diff options
author | David Daney <ddaney@caviumnetworks.com> | 2009-01-08 19:46:40 -0500 |
---|---|---|
committer | Ralf Baechle <ralf@linux-mips.org> | 2009-01-11 04:57:21 -0500 |
commit | 5b3b16880f404ca54126210ca86141cceeafc0cf (patch) | |
tree | f69d30450a923782534d4ae257f20aace0a0be74 /arch | |
parent | 58f07778ce9d32c22cecb1d8ef348001f0e705c9 (diff) |
MIPS: Add Cavium OCTEON processor support files to arch/mips/cavium-octeon.
These are the rest of the new files needed to add OCTEON processor
support to the Linux kernel. Other than Makefile and Kconfig which
should be obvious, we have:
csrc-octeon.c -- Clock source driver for OCTEON.
dma-octeon.c -- Helper functions for mapping DMA memory.
flash_setup.c -- Register on-board flash with the MTD subsystem.
octeon-irq.c -- OCTEON interrupt controller managment.
octeon-memcpy.S -- Optimized memcpy() implementation.
serial.c -- Register 8250 platform driver and early console.
setup.c -- Early architecture initialization.
smp.c -- OCTEON SMP support.
octeon_switch.S -- Scheduler context switch for OCTEON.
c-octeon.c -- OCTEON cache controller support.
cex-oct.S -- OCTEON cache exception handler.
asm/mach-cavium-octeon/*.h -- Architecture include files.
Signed-off-by: Tomaso Paoletti <tpaoletti@caviumnetworks.com>
Signed-off-by: David Daney <ddaney@caviumnetworks.com>
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
create mode 100644 arch/mips/cavium-octeon/Kconfig
create mode 100644 arch/mips/cavium-octeon/Makefile
create mode 100644 arch/mips/cavium-octeon/csrc-octeon.c
create mode 100644 arch/mips/cavium-octeon/dma-octeon.c
create mode 100644 arch/mips/cavium-octeon/flash_setup.c
create mode 100644 arch/mips/cavium-octeon/octeon-irq.c
create mode 100644 arch/mips/cavium-octeon/octeon-memcpy.S
create mode 100644 arch/mips/cavium-octeon/serial.c
create mode 100644 arch/mips/cavium-octeon/setup.c
create mode 100644 arch/mips/cavium-octeon/smp.c
create mode 100644 arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h
create mode 100644 arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h
create mode 100644 arch/mips/include/asm/mach-cavium-octeon/irq.h
create mode 100644 arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h
create mode 100644 arch/mips/include/asm/mach-cavium-octeon/war.h
create mode 100644 arch/mips/include/asm/octeon/octeon.h
create mode 100644 arch/mips/kernel/octeon_switch.S
create mode 100644 arch/mips/mm/c-octeon.c
create mode 100644 arch/mips/mm/cex-oct.S
Diffstat (limited to 'arch')
-rw-r--r-- | arch/mips/cavium-octeon/Kconfig | 85 | ||||
-rw-r--r-- | arch/mips/cavium-octeon/Makefile | 16 | ||||
-rw-r--r-- | arch/mips/cavium-octeon/csrc-octeon.c | 58 | ||||
-rw-r--r-- | arch/mips/cavium-octeon/dma-octeon.c | 32 | ||||
-rw-r--r-- | arch/mips/cavium-octeon/flash_setup.c | 84 | ||||
-rw-r--r-- | arch/mips/cavium-octeon/octeon-irq.c | 497 | ||||
-rw-r--r-- | arch/mips/cavium-octeon/octeon-memcpy.S | 521 | ||||
-rw-r--r-- | arch/mips/cavium-octeon/serial.c | 136 | ||||
-rw-r--r-- | arch/mips/cavium-octeon/setup.c | 929 | ||||
-rw-r--r-- | arch/mips/cavium-octeon/smp.c | 211 | ||||
-rw-r--r-- | arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h | 78 | ||||
-rw-r--r-- | arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h | 64 | ||||
-rw-r--r-- | arch/mips/include/asm/mach-cavium-octeon/irq.h | 244 | ||||
-rw-r--r-- | arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h | 131 | ||||
-rw-r--r-- | arch/mips/include/asm/mach-cavium-octeon/war.h | 26 | ||||
-rw-r--r-- | arch/mips/include/asm/octeon/octeon.h | 248 | ||||
-rw-r--r-- | arch/mips/kernel/octeon_switch.S | 506 | ||||
-rw-r--r-- | arch/mips/mm/c-octeon.c | 307 | ||||
-rw-r--r-- | arch/mips/mm/cex-oct.S | 70 |
19 files changed, 4243 insertions, 0 deletions
diff --git a/arch/mips/cavium-octeon/Kconfig b/arch/mips/cavium-octeon/Kconfig new file mode 100644 index 000000000000..094c17e38e16 --- /dev/null +++ b/arch/mips/cavium-octeon/Kconfig | |||
@@ -0,0 +1,85 @@ | |||
1 | config CAVIUM_OCTEON_SPECIFIC_OPTIONS | ||
2 | bool "Enable Octeon specific options" | ||
3 | depends on CPU_CAVIUM_OCTEON | ||
4 | default "y" | ||
5 | |||
6 | config CAVIUM_OCTEON_2ND_KERNEL | ||
7 | bool "Build the kernel to be used as a 2nd kernel on the same chip" | ||
8 | depends on CAVIUM_OCTEON_SPECIFIC_OPTIONS | ||
9 | default "n" | ||
10 | help | ||
11 | This option configures this kernel to be linked at a different | ||
12 | address and use the 2nd uart for output. This allows a kernel built | ||
13 | with this option to be run at the same time as one built without this | ||
14 | option. | ||
15 | |||
16 | config CAVIUM_OCTEON_HW_FIX_UNALIGNED | ||
17 | bool "Enable hardware fixups of unaligned loads and stores" | ||
18 | depends on CAVIUM_OCTEON_SPECIFIC_OPTIONS | ||
19 | default "y" | ||
20 | help | ||
21 | Configure the Octeon hardware to automatically fix unaligned loads | ||
22 | and stores. Normally unaligned accesses are fixed using a kernel | ||
23 | exception handler. This option enables the hardware automatic fixups, | ||
24 | which requires only an extra 3 cycles. Disable this option if you | ||
25 | are running code that relies on address exceptions on unaligned | ||
26 | accesses. | ||
27 | |||
28 | config CAVIUM_OCTEON_CVMSEG_SIZE | ||
29 | int "Number of L1 cache lines reserved for CVMSEG memory" | ||
30 | depends on CAVIUM_OCTEON_SPECIFIC_OPTIONS | ||
31 | range 0 54 | ||
32 | default 1 | ||
33 | help | ||
34 | CVMSEG LM is a segment that accesses portions of the dcache as a | ||
35 | local memory; the larger CVMSEG is, the smaller the cache is. | ||
36 | This selects the size of CVMSEG LM, which is in cache blocks. The | ||
37 | legally range is from zero to 54 cache blocks (i.e. CVMSEG LM is | ||
38 | between zero and 6192 bytes). | ||
39 | |||
40 | config CAVIUM_OCTEON_LOCK_L2 | ||
41 | bool "Lock often used kernel code in the L2" | ||
42 | depends on CAVIUM_OCTEON_SPECIFIC_OPTIONS | ||
43 | default "y" | ||
44 | help | ||
45 | Enable locking parts of the kernel into the L2 cache. | ||
46 | |||
47 | config CAVIUM_OCTEON_LOCK_L2_TLB | ||
48 | bool "Lock the TLB handler in L2" | ||
49 | depends on CAVIUM_OCTEON_LOCK_L2 | ||
50 | default "y" | ||
51 | help | ||
52 | Lock the low level TLB fast path into L2. | ||
53 | |||
54 | config CAVIUM_OCTEON_LOCK_L2_EXCEPTION | ||
55 | bool "Lock the exception handler in L2" | ||
56 | depends on CAVIUM_OCTEON_LOCK_L2 | ||
57 | default "y" | ||
58 | help | ||
59 | Lock the low level exception handler into L2. | ||
60 | |||
61 | config CAVIUM_OCTEON_LOCK_L2_LOW_LEVEL_INTERRUPT | ||
62 | bool "Lock the interrupt handler in L2" | ||
63 | depends on CAVIUM_OCTEON_LOCK_L2 | ||
64 | default "y" | ||
65 | help | ||
66 | Lock the low level interrupt handler into L2. | ||
67 | |||
68 | config CAVIUM_OCTEON_LOCK_L2_INTERRUPT | ||
69 | bool "Lock the 2nd level interrupt handler in L2" | ||
70 | depends on CAVIUM_OCTEON_LOCK_L2 | ||
71 | default "y" | ||
72 | help | ||
73 | Lock the 2nd level interrupt handler in L2. | ||
74 | |||
75 | config CAVIUM_OCTEON_LOCK_L2_MEMCPY | ||
76 | bool "Lock memcpy() in L2" | ||
77 | depends on CAVIUM_OCTEON_LOCK_L2 | ||
78 | default "y" | ||
79 | help | ||
80 | Lock the kernel's implementation of memcpy() into L2. | ||
81 | |||
82 | config ARCH_SPARSEMEM_ENABLE | ||
83 | def_bool y | ||
84 | select SPARSEMEM_STATIC | ||
85 | depends on CPU_CAVIUM_OCTEON | ||
diff --git a/arch/mips/cavium-octeon/Makefile b/arch/mips/cavium-octeon/Makefile new file mode 100644 index 000000000000..1c2a7faf5881 --- /dev/null +++ b/arch/mips/cavium-octeon/Makefile | |||
@@ -0,0 +1,16 @@ | |||
1 | # | ||
2 | # Makefile for the Cavium Octeon specific kernel interface routines | ||
3 | # under Linux. | ||
4 | # | ||
5 | # This file is subject to the terms and conditions of the GNU General Public | ||
6 | # License. See the file "COPYING" in the main directory of this archive | ||
7 | # for more details. | ||
8 | # | ||
9 | # Copyright (C) 2005-2008 Cavium Networks | ||
10 | # | ||
11 | |||
12 | obj-y := setup.o serial.o octeon-irq.o csrc-octeon.o | ||
13 | obj-y += dma-octeon.o flash_setup.o | ||
14 | obj-y += octeon-memcpy.o | ||
15 | |||
16 | obj-$(CONFIG_SMP) += smp.o | ||
diff --git a/arch/mips/cavium-octeon/csrc-octeon.c b/arch/mips/cavium-octeon/csrc-octeon.c new file mode 100644 index 000000000000..70fd92c31657 --- /dev/null +++ b/arch/mips/cavium-octeon/csrc-octeon.c | |||
@@ -0,0 +1,58 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 2007 by Ralf Baechle | ||
7 | */ | ||
8 | #include <linux/clocksource.h> | ||
9 | #include <linux/init.h> | ||
10 | |||
11 | #include <asm/time.h> | ||
12 | |||
13 | #include <asm/octeon/octeon.h> | ||
14 | #include <asm/octeon/cvmx-ipd-defs.h> | ||
15 | |||
16 | /* | ||
17 | * Set the current core's cvmcount counter to the value of the | ||
18 | * IPD_CLK_COUNT. We do this on all cores as they are brought | ||
19 | * on-line. This allows for a read from a local cpu register to | ||
20 | * access a synchronized counter. | ||
21 | * | ||
22 | */ | ||
23 | void octeon_init_cvmcount(void) | ||
24 | { | ||
25 | unsigned long flags; | ||
26 | unsigned loops = 2; | ||
27 | |||
28 | /* Clobber loops so GCC will not unroll the following while loop. */ | ||
29 | asm("" : "+r" (loops)); | ||
30 | |||
31 | local_irq_save(flags); | ||
32 | /* | ||
33 | * Loop several times so we are executing from the cache, | ||
34 | * which should give more deterministic timing. | ||
35 | */ | ||
36 | while (loops--) | ||
37 | write_c0_cvmcount(cvmx_read_csr(CVMX_IPD_CLK_COUNT)); | ||
38 | local_irq_restore(flags); | ||
39 | } | ||
40 | |||
41 | static cycle_t octeon_cvmcount_read(void) | ||
42 | { | ||
43 | return read_c0_cvmcount(); | ||
44 | } | ||
45 | |||
46 | static struct clocksource clocksource_mips = { | ||
47 | .name = "OCTEON_CVMCOUNT", | ||
48 | .read = octeon_cvmcount_read, | ||
49 | .mask = CLOCKSOURCE_MASK(64), | ||
50 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, | ||
51 | }; | ||
52 | |||
53 | void __init plat_time_init(void) | ||
54 | { | ||
55 | clocksource_mips.rating = 300; | ||
56 | clocksource_set_clock(&clocksource_mips, mips_hpt_frequency); | ||
57 | clocksource_register(&clocksource_mips); | ||
58 | } | ||
diff --git a/arch/mips/cavium-octeon/dma-octeon.c b/arch/mips/cavium-octeon/dma-octeon.c new file mode 100644 index 000000000000..01b1ef94b361 --- /dev/null +++ b/arch/mips/cavium-octeon/dma-octeon.c | |||
@@ -0,0 +1,32 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com> | ||
7 | * Copyright (C) 2000, 2001 Ralf Baechle <ralf@gnu.org> | ||
8 | * Copyright (C) 2005 Ilya A. Volynets-Evenbakh <ilya@total-knowledge.com> | ||
9 | * swiped from i386, and cloned for MIPS by Geert, polished by Ralf. | ||
10 | * IP32 changes by Ilya. | ||
11 | * Cavium Networks: Create new dma setup for Cavium Networks Octeon based on | ||
12 | * the kernels original. | ||
13 | */ | ||
14 | #include <linux/types.h> | ||
15 | #include <linux/mm.h> | ||
16 | |||
17 | #include <dma-coherence.h> | ||
18 | |||
19 | dma_addr_t octeon_map_dma_mem(struct device *dev, void *ptr, size_t size) | ||
20 | { | ||
21 | /* Without PCI/PCIe this function can be called for Octeon internal | ||
22 | devices such as USB. These devices all support 64bit addressing */ | ||
23 | mb(); | ||
24 | return virt_to_phys(ptr); | ||
25 | } | ||
26 | |||
27 | void octeon_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr) | ||
28 | { | ||
29 | /* Without PCI/PCIe this function can be called for Octeon internal | ||
30 | * devices such as USB. These devices all support 64bit addressing */ | ||
31 | return; | ||
32 | } | ||
diff --git a/arch/mips/cavium-octeon/flash_setup.c b/arch/mips/cavium-octeon/flash_setup.c new file mode 100644 index 000000000000..553d36cbcc42 --- /dev/null +++ b/arch/mips/cavium-octeon/flash_setup.c | |||
@@ -0,0 +1,84 @@ | |||
1 | /* | ||
2 | * Octeon Bootbus flash setup | ||
3 | * | ||
4 | * This file is subject to the terms and conditions of the GNU General Public | ||
5 | * License. See the file "COPYING" in the main directory of this archive | ||
6 | * for more details. | ||
7 | * | ||
8 | * Copyright (C) 2007, 2008 Cavium Networks | ||
9 | */ | ||
10 | #include <linux/kernel.h> | ||
11 | #include <linux/mtd/mtd.h> | ||
12 | #include <linux/mtd/map.h> | ||
13 | #include <linux/mtd/partitions.h> | ||
14 | |||
15 | #include <asm/octeon/octeon.h> | ||
16 | |||
17 | static struct map_info flash_map; | ||
18 | static struct mtd_info *mymtd; | ||
19 | #ifdef CONFIG_MTD_PARTITIONS | ||
20 | static int nr_parts; | ||
21 | static struct mtd_partition *parts; | ||
22 | static const char *part_probe_types[] = { | ||
23 | "cmdlinepart", | ||
24 | #ifdef CONFIG_MTD_REDBOOT_PARTS | ||
25 | "RedBoot", | ||
26 | #endif | ||
27 | NULL | ||
28 | }; | ||
29 | #endif | ||
30 | |||
31 | /** | ||
32 | * Module/ driver initialization. | ||
33 | * | ||
34 | * Returns Zero on success | ||
35 | */ | ||
36 | static int __init flash_init(void) | ||
37 | { | ||
38 | /* | ||
39 | * Read the bootbus region 0 setup to determine the base | ||
40 | * address of the flash. | ||
41 | */ | ||
42 | union cvmx_mio_boot_reg_cfgx region_cfg; | ||
43 | region_cfg.u64 = cvmx_read_csr(CVMX_MIO_BOOT_REG_CFGX(0)); | ||
44 | if (region_cfg.s.en) { | ||
45 | /* | ||
46 | * The bootloader always takes the flash and sets its | ||
47 | * address so the entire flash fits below | ||
48 | * 0x1fc00000. This way the flash aliases to | ||
49 | * 0x1fc00000 for booting. Software can access the | ||
50 | * full flash at the true address, while core boot can | ||
51 | * access 4MB. | ||
52 | */ | ||
53 | /* Use this name so old part lines work */ | ||
54 | flash_map.name = "phys_mapped_flash"; | ||
55 | flash_map.phys = region_cfg.s.base << 16; | ||
56 | flash_map.size = 0x1fc00000 - flash_map.phys; | ||
57 | flash_map.bankwidth = 1; | ||
58 | flash_map.virt = ioremap(flash_map.phys, flash_map.size); | ||
59 | pr_notice("Bootbus flash: Setting flash for %luMB flash at " | ||
60 | "0x%08lx\n", flash_map.size >> 20, flash_map.phys); | ||
61 | simple_map_init(&flash_map); | ||
62 | mymtd = do_map_probe("cfi_probe", &flash_map); | ||
63 | if (mymtd) { | ||
64 | mymtd->owner = THIS_MODULE; | ||
65 | |||
66 | #ifdef CONFIG_MTD_PARTITIONS | ||
67 | nr_parts = parse_mtd_partitions(mymtd, | ||
68 | part_probe_types, | ||
69 | &parts, 0); | ||
70 | if (nr_parts > 0) | ||
71 | add_mtd_partitions(mymtd, parts, nr_parts); | ||
72 | else | ||
73 | add_mtd_device(mymtd); | ||
74 | #else | ||
75 | add_mtd_device(mymtd); | ||
76 | #endif | ||
77 | } else { | ||
78 | pr_err("Failed to register MTD device for flash\n"); | ||
79 | } | ||
80 | } | ||
81 | return 0; | ||
82 | } | ||
83 | |||
84 | late_initcall(flash_init); | ||
diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c new file mode 100644 index 000000000000..fc72984a5dae --- /dev/null +++ b/arch/mips/cavium-octeon/octeon-irq.c | |||
@@ -0,0 +1,497 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 2004-2008 Cavium Networks | ||
7 | */ | ||
8 | #include <linux/irq.h> | ||
9 | #include <linux/interrupt.h> | ||
10 | #include <linux/hardirq.h> | ||
11 | |||
12 | #include <asm/octeon/octeon.h> | ||
13 | |||
14 | DEFINE_RWLOCK(octeon_irq_ciu0_rwlock); | ||
15 | DEFINE_RWLOCK(octeon_irq_ciu1_rwlock); | ||
16 | DEFINE_SPINLOCK(octeon_irq_msi_lock); | ||
17 | |||
18 | static void octeon_irq_core_ack(unsigned int irq) | ||
19 | { | ||
20 | unsigned int bit = irq - OCTEON_IRQ_SW0; | ||
21 | /* | ||
22 | * We don't need to disable IRQs to make these atomic since | ||
23 | * they are already disabled earlier in the low level | ||
24 | * interrupt code. | ||
25 | */ | ||
26 | clear_c0_status(0x100 << bit); | ||
27 | /* The two user interrupts must be cleared manually. */ | ||
28 | if (bit < 2) | ||
29 | clear_c0_cause(0x100 << bit); | ||
30 | } | ||
31 | |||
32 | static void octeon_irq_core_eoi(unsigned int irq) | ||
33 | { | ||
34 | irq_desc_t *desc = irq_desc + irq; | ||
35 | unsigned int bit = irq - OCTEON_IRQ_SW0; | ||
36 | /* | ||
37 | * If an IRQ is being processed while we are disabling it the | ||
38 | * handler will attempt to unmask the interrupt after it has | ||
39 | * been disabled. | ||
40 | */ | ||
41 | if (desc->status & IRQ_DISABLED) | ||
42 | return; | ||
43 | |||
44 | /* There is a race here. We should fix it. */ | ||
45 | |||
46 | /* | ||
47 | * We don't need to disable IRQs to make these atomic since | ||
48 | * they are already disabled earlier in the low level | ||
49 | * interrupt code. | ||
50 | */ | ||
51 | set_c0_status(0x100 << bit); | ||
52 | } | ||
53 | |||
54 | static void octeon_irq_core_enable(unsigned int irq) | ||
55 | { | ||
56 | unsigned long flags; | ||
57 | unsigned int bit = irq - OCTEON_IRQ_SW0; | ||
58 | |||
59 | /* | ||
60 | * We need to disable interrupts to make sure our updates are | ||
61 | * atomic. | ||
62 | */ | ||
63 | local_irq_save(flags); | ||
64 | set_c0_status(0x100 << bit); | ||
65 | local_irq_restore(flags); | ||
66 | } | ||
67 | |||
68 | static void octeon_irq_core_disable_local(unsigned int irq) | ||
69 | { | ||
70 | unsigned long flags; | ||
71 | unsigned int bit = irq - OCTEON_IRQ_SW0; | ||
72 | /* | ||
73 | * We need to disable interrupts to make sure our updates are | ||
74 | * atomic. | ||
75 | */ | ||
76 | local_irq_save(flags); | ||
77 | clear_c0_status(0x100 << bit); | ||
78 | local_irq_restore(flags); | ||
79 | } | ||
80 | |||
81 | static void octeon_irq_core_disable(unsigned int irq) | ||
82 | { | ||
83 | #ifdef CONFIG_SMP | ||
84 | on_each_cpu((void (*)(void *)) octeon_irq_core_disable_local, | ||
85 | (void *) (long) irq, 1); | ||
86 | #else | ||
87 | octeon_irq_core_disable_local(irq); | ||
88 | #endif | ||
89 | } | ||
90 | |||
91 | static struct irq_chip octeon_irq_chip_core = { | ||
92 | .name = "Core", | ||
93 | .enable = octeon_irq_core_enable, | ||
94 | .disable = octeon_irq_core_disable, | ||
95 | .ack = octeon_irq_core_ack, | ||
96 | .eoi = octeon_irq_core_eoi, | ||
97 | }; | ||
98 | |||
99 | |||
100 | static void octeon_irq_ciu0_ack(unsigned int irq) | ||
101 | { | ||
102 | /* | ||
103 | * In order to avoid any locking accessing the CIU, we | ||
104 | * acknowledge CIU interrupts by disabling all of them. This | ||
105 | * way we can use a per core register and avoid any out of | ||
106 | * core locking requirements. This has the side affect that | ||
107 | * CIU interrupts can't be processed recursively. | ||
108 | * | ||
109 | * We don't need to disable IRQs to make these atomic since | ||
110 | * they are already disabled earlier in the low level | ||
111 | * interrupt code. | ||
112 | */ | ||
113 | clear_c0_status(0x100 << 2); | ||
114 | } | ||
115 | |||
116 | static void octeon_irq_ciu0_eoi(unsigned int irq) | ||
117 | { | ||
118 | /* | ||
119 | * Enable all CIU interrupts again. We don't need to disable | ||
120 | * IRQs to make these atomic since they are already disabled | ||
121 | * earlier in the low level interrupt code. | ||
122 | */ | ||
123 | set_c0_status(0x100 << 2); | ||
124 | } | ||
125 | |||
126 | static void octeon_irq_ciu0_enable(unsigned int irq) | ||
127 | { | ||
128 | int coreid = cvmx_get_core_num(); | ||
129 | unsigned long flags; | ||
130 | uint64_t en0; | ||
131 | int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */ | ||
132 | |||
133 | /* | ||
134 | * A read lock is used here to make sure only one core is ever | ||
135 | * updating the CIU enable bits at a time. During an enable | ||
136 | * the cores don't interfere with each other. During a disable | ||
137 | * the write lock stops any enables that might cause a | ||
138 | * problem. | ||
139 | */ | ||
140 | read_lock_irqsave(&octeon_irq_ciu0_rwlock, flags); | ||
141 | en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)); | ||
142 | en0 |= 1ull << bit; | ||
143 | cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0); | ||
144 | cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)); | ||
145 | read_unlock_irqrestore(&octeon_irq_ciu0_rwlock, flags); | ||
146 | } | ||
147 | |||
148 | static void octeon_irq_ciu0_disable(unsigned int irq) | ||
149 | { | ||
150 | int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */ | ||
151 | unsigned long flags; | ||
152 | uint64_t en0; | ||
153 | #ifdef CONFIG_SMP | ||
154 | int cpu; | ||
155 | write_lock_irqsave(&octeon_irq_ciu0_rwlock, flags); | ||
156 | for_each_online_cpu(cpu) { | ||
157 | int coreid = cpu_logical_map(cpu); | ||
158 | en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)); | ||
159 | en0 &= ~(1ull << bit); | ||
160 | cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0); | ||
161 | } | ||
162 | /* | ||
163 | * We need to do a read after the last update to make sure all | ||
164 | * of them are done. | ||
165 | */ | ||
166 | cvmx_read_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2)); | ||
167 | write_unlock_irqrestore(&octeon_irq_ciu0_rwlock, flags); | ||
168 | #else | ||
169 | int coreid = cvmx_get_core_num(); | ||
170 | local_irq_save(flags); | ||
171 | en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)); | ||
172 | en0 &= ~(1ull << bit); | ||
173 | cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0); | ||
174 | cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)); | ||
175 | local_irq_restore(flags); | ||
176 | #endif | ||
177 | } | ||
178 | |||
179 | #ifdef CONFIG_SMP | ||
180 | static void octeon_irq_ciu0_set_affinity(unsigned int irq, const struct cpumask *dest) | ||
181 | { | ||
182 | int cpu; | ||
183 | int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */ | ||
184 | |||
185 | write_lock(&octeon_irq_ciu0_rwlock); | ||
186 | for_each_online_cpu(cpu) { | ||
187 | int coreid = cpu_logical_map(cpu); | ||
188 | uint64_t en0 = | ||
189 | cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)); | ||
190 | if (cpumask_test_cpu(cpu, dest)) | ||
191 | en0 |= 1ull << bit; | ||
192 | else | ||
193 | en0 &= ~(1ull << bit); | ||
194 | cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0); | ||
195 | } | ||
196 | /* | ||
197 | * We need to do a read after the last update to make sure all | ||
198 | * of them are done. | ||
199 | */ | ||
200 | cvmx_read_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2)); | ||
201 | write_unlock(&octeon_irq_ciu0_rwlock); | ||
202 | } | ||
203 | #endif | ||
204 | |||
205 | static struct irq_chip octeon_irq_chip_ciu0 = { | ||
206 | .name = "CIU0", | ||
207 | .enable = octeon_irq_ciu0_enable, | ||
208 | .disable = octeon_irq_ciu0_disable, | ||
209 | .ack = octeon_irq_ciu0_ack, | ||
210 | .eoi = octeon_irq_ciu0_eoi, | ||
211 | #ifdef CONFIG_SMP | ||
212 | .set_affinity = octeon_irq_ciu0_set_affinity, | ||
213 | #endif | ||
214 | }; | ||
215 | |||
216 | |||
217 | static void octeon_irq_ciu1_ack(unsigned int irq) | ||
218 | { | ||
219 | /* | ||
220 | * In order to avoid any locking accessing the CIU, we | ||
221 | * acknowledge CIU interrupts by disabling all of them. This | ||
222 | * way we can use a per core register and avoid any out of | ||
223 | * core locking requirements. This has the side affect that | ||
224 | * CIU interrupts can't be processed recursively. We don't | ||
225 | * need to disable IRQs to make these atomic since they are | ||
226 | * already disabled earlier in the low level interrupt code. | ||
227 | */ | ||
228 | clear_c0_status(0x100 << 3); | ||
229 | } | ||
230 | |||
231 | static void octeon_irq_ciu1_eoi(unsigned int irq) | ||
232 | { | ||
233 | /* | ||
234 | * Enable all CIU interrupts again. We don't need to disable | ||
235 | * IRQs to make these atomic since they are already disabled | ||
236 | * earlier in the low level interrupt code. | ||
237 | */ | ||
238 | set_c0_status(0x100 << 3); | ||
239 | } | ||
240 | |||
241 | static void octeon_irq_ciu1_enable(unsigned int irq) | ||
242 | { | ||
243 | int coreid = cvmx_get_core_num(); | ||
244 | unsigned long flags; | ||
245 | uint64_t en1; | ||
246 | int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */ | ||
247 | |||
248 | /* | ||
249 | * A read lock is used here to make sure only one core is ever | ||
250 | * updating the CIU enable bits at a time. During an enable | ||
251 | * the cores don't interfere with each other. During a disable | ||
252 | * the write lock stops any enables that might cause a | ||
253 | * problem. | ||
254 | */ | ||
255 | read_lock_irqsave(&octeon_irq_ciu1_rwlock, flags); | ||
256 | en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1)); | ||
257 | en1 |= 1ull << bit; | ||
258 | cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1); | ||
259 | cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1)); | ||
260 | read_unlock_irqrestore(&octeon_irq_ciu1_rwlock, flags); | ||
261 | } | ||
262 | |||
263 | static void octeon_irq_ciu1_disable(unsigned int irq) | ||
264 | { | ||
265 | int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */ | ||
266 | unsigned long flags; | ||
267 | uint64_t en1; | ||
268 | #ifdef CONFIG_SMP | ||
269 | int cpu; | ||
270 | write_lock_irqsave(&octeon_irq_ciu1_rwlock, flags); | ||
271 | for_each_online_cpu(cpu) { | ||
272 | int coreid = cpu_logical_map(cpu); | ||
273 | en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1)); | ||
274 | en1 &= ~(1ull << bit); | ||
275 | cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1); | ||
276 | } | ||
277 | /* | ||
278 | * We need to do a read after the last update to make sure all | ||
279 | * of them are done. | ||
280 | */ | ||
281 | cvmx_read_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1)); | ||
282 | write_unlock_irqrestore(&octeon_irq_ciu1_rwlock, flags); | ||
283 | #else | ||
284 | int coreid = cvmx_get_core_num(); | ||
285 | local_irq_save(flags); | ||
286 | en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1)); | ||
287 | en1 &= ~(1ull << bit); | ||
288 | cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1); | ||
289 | cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1)); | ||
290 | local_irq_restore(flags); | ||
291 | #endif | ||
292 | } | ||
293 | |||
294 | #ifdef CONFIG_SMP | ||
295 | static void octeon_irq_ciu1_set_affinity(unsigned int irq, const struct cpumask *dest) | ||
296 | { | ||
297 | int cpu; | ||
298 | int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */ | ||
299 | |||
300 | write_lock(&octeon_irq_ciu1_rwlock); | ||
301 | for_each_online_cpu(cpu) { | ||
302 | int coreid = cpu_logical_map(cpu); | ||
303 | uint64_t en1 = | ||
304 | cvmx_read_csr(CVMX_CIU_INTX_EN1 | ||
305 | (coreid * 2 + 1)); | ||
306 | if (cpumask_test_cpu(cpu, dest)) | ||
307 | en1 |= 1ull << bit; | ||
308 | else | ||
309 | en1 &= ~(1ull << bit); | ||
310 | cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1); | ||
311 | } | ||
312 | /* | ||
313 | * We need to do a read after the last update to make sure all | ||
314 | * of them are done. | ||
315 | */ | ||
316 | cvmx_read_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1)); | ||
317 | write_unlock(&octeon_irq_ciu1_rwlock); | ||
318 | } | ||
319 | #endif | ||
320 | |||
321 | static struct irq_chip octeon_irq_chip_ciu1 = { | ||
322 | .name = "CIU1", | ||
323 | .enable = octeon_irq_ciu1_enable, | ||
324 | .disable = octeon_irq_ciu1_disable, | ||
325 | .ack = octeon_irq_ciu1_ack, | ||
326 | .eoi = octeon_irq_ciu1_eoi, | ||
327 | #ifdef CONFIG_SMP | ||
328 | .set_affinity = octeon_irq_ciu1_set_affinity, | ||
329 | #endif | ||
330 | }; | ||
331 | |||
332 | #ifdef CONFIG_PCI_MSI | ||
333 | |||
334 | static void octeon_irq_msi_ack(unsigned int irq) | ||
335 | { | ||
336 | if (!octeon_has_feature(OCTEON_FEATURE_PCIE)) { | ||
337 | /* These chips have PCI */ | ||
338 | cvmx_write_csr(CVMX_NPI_NPI_MSI_RCV, | ||
339 | 1ull << (irq - OCTEON_IRQ_MSI_BIT0)); | ||
340 | } else { | ||
341 | /* | ||
342 | * These chips have PCIe. Thankfully the ACK doesn't | ||
343 | * need any locking. | ||
344 | */ | ||
345 | cvmx_write_csr(CVMX_PEXP_NPEI_MSI_RCV0, | ||
346 | 1ull << (irq - OCTEON_IRQ_MSI_BIT0)); | ||
347 | } | ||
348 | } | ||
349 | |||
350 | static void octeon_irq_msi_eoi(unsigned int irq) | ||
351 | { | ||
352 | /* Nothing needed */ | ||
353 | } | ||
354 | |||
355 | static void octeon_irq_msi_enable(unsigned int irq) | ||
356 | { | ||
357 | if (!octeon_has_feature(OCTEON_FEATURE_PCIE)) { | ||
358 | /* | ||
359 | * Octeon PCI doesn't have the ability to mask/unmask | ||
360 | * MSI interrupts individually. Instead of | ||
361 | * masking/unmasking them in groups of 16, we simple | ||
362 | * assume MSI devices are well behaved. MSI | ||
363 | * interrupts are always enable and the ACK is assumed | ||
364 | * to be enough. | ||
365 | */ | ||
366 | } else { | ||
367 | /* These chips have PCIe. Note that we only support | ||
368 | * the first 64 MSI interrupts. Unfortunately all the | ||
369 | * MSI enables are in the same register. We use | ||
370 | * MSI0's lock to control access to them all. | ||
371 | */ | ||
372 | uint64_t en; | ||
373 | unsigned long flags; | ||
374 | spin_lock_irqsave(&octeon_irq_msi_lock, flags); | ||
375 | en = cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0); | ||
376 | en |= 1ull << (irq - OCTEON_IRQ_MSI_BIT0); | ||
377 | cvmx_write_csr(CVMX_PEXP_NPEI_MSI_ENB0, en); | ||
378 | cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0); | ||
379 | spin_unlock_irqrestore(&octeon_irq_msi_lock, flags); | ||
380 | } | ||
381 | } | ||
382 | |||
383 | static void octeon_irq_msi_disable(unsigned int irq) | ||
384 | { | ||
385 | if (!octeon_has_feature(OCTEON_FEATURE_PCIE)) { | ||
386 | /* See comment in enable */ | ||
387 | } else { | ||
388 | /* | ||
389 | * These chips have PCIe. Note that we only support | ||
390 | * the first 64 MSI interrupts. Unfortunately all the | ||
391 | * MSI enables are in the same register. We use | ||
392 | * MSI0's lock to control access to them all. | ||
393 | */ | ||
394 | uint64_t en; | ||
395 | unsigned long flags; | ||
396 | spin_lock_irqsave(&octeon_irq_msi_lock, flags); | ||
397 | en = cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0); | ||
398 | en &= ~(1ull << (irq - OCTEON_IRQ_MSI_BIT0)); | ||
399 | cvmx_write_csr(CVMX_PEXP_NPEI_MSI_ENB0, en); | ||
400 | cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0); | ||
401 | spin_unlock_irqrestore(&octeon_irq_msi_lock, flags); | ||
402 | } | ||
403 | } | ||
404 | |||
405 | static struct irq_chip octeon_irq_chip_msi = { | ||
406 | .name = "MSI", | ||
407 | .enable = octeon_irq_msi_enable, | ||
408 | .disable = octeon_irq_msi_disable, | ||
409 | .ack = octeon_irq_msi_ack, | ||
410 | .eoi = octeon_irq_msi_eoi, | ||
411 | }; | ||
412 | #endif | ||
413 | |||
414 | void __init arch_init_irq(void) | ||
415 | { | ||
416 | int irq; | ||
417 | |||
418 | #ifdef CONFIG_SMP | ||
419 | /* Set the default affinity to the boot cpu. */ | ||
420 | cpumask_clear(irq_default_affinity); | ||
421 | cpumask_set_cpu(smp_processor_id(), irq_default_affinity); | ||
422 | #endif | ||
423 | |||
424 | if (NR_IRQS < OCTEON_IRQ_LAST) | ||
425 | pr_err("octeon_irq_init: NR_IRQS is set too low\n"); | ||
426 | |||
427 | /* 0 - 15 reserved for i8259 master and slave controller. */ | ||
428 | |||
429 | /* 17 - 23 Mips internal */ | ||
430 | for (irq = OCTEON_IRQ_SW0; irq <= OCTEON_IRQ_TIMER; irq++) { | ||
431 | set_irq_chip_and_handler(irq, &octeon_irq_chip_core, | ||
432 | handle_percpu_irq); | ||
433 | } | ||
434 | |||
435 | /* 24 - 87 CIU_INT_SUM0 */ | ||
436 | for (irq = OCTEON_IRQ_WORKQ0; irq <= OCTEON_IRQ_BOOTDMA; irq++) { | ||
437 | set_irq_chip_and_handler(irq, &octeon_irq_chip_ciu0, | ||
438 | handle_percpu_irq); | ||
439 | } | ||
440 | |||
441 | /* 88 - 151 CIU_INT_SUM1 */ | ||
442 | for (irq = OCTEON_IRQ_WDOG0; irq <= OCTEON_IRQ_RESERVED151; irq++) { | ||
443 | set_irq_chip_and_handler(irq, &octeon_irq_chip_ciu1, | ||
444 | handle_percpu_irq); | ||
445 | } | ||
446 | |||
447 | #ifdef CONFIG_PCI_MSI | ||
448 | /* 152 - 215 PCI/PCIe MSI interrupts */ | ||
449 | for (irq = OCTEON_IRQ_MSI_BIT0; irq <= OCTEON_IRQ_MSI_BIT63; irq++) { | ||
450 | set_irq_chip_and_handler(irq, &octeon_irq_chip_msi, | ||
451 | handle_percpu_irq); | ||
452 | } | ||
453 | #endif | ||
454 | set_c0_status(0x300 << 2); | ||
455 | } | ||
456 | |||
457 | asmlinkage void plat_irq_dispatch(void) | ||
458 | { | ||
459 | const unsigned long core_id = cvmx_get_core_num(); | ||
460 | const uint64_t ciu_sum0_address = CVMX_CIU_INTX_SUM0(core_id * 2); | ||
461 | const uint64_t ciu_en0_address = CVMX_CIU_INTX_EN0(core_id * 2); | ||
462 | const uint64_t ciu_sum1_address = CVMX_CIU_INT_SUM1; | ||
463 | const uint64_t ciu_en1_address = CVMX_CIU_INTX_EN1(core_id * 2 + 1); | ||
464 | unsigned long cop0_cause; | ||
465 | unsigned long cop0_status; | ||
466 | uint64_t ciu_en; | ||
467 | uint64_t ciu_sum; | ||
468 | |||
469 | while (1) { | ||
470 | cop0_cause = read_c0_cause(); | ||
471 | cop0_status = read_c0_status(); | ||
472 | cop0_cause &= cop0_status; | ||
473 | cop0_cause &= ST0_IM; | ||
474 | |||
475 | if (unlikely(cop0_cause & STATUSF_IP2)) { | ||
476 | ciu_sum = cvmx_read_csr(ciu_sum0_address); | ||
477 | ciu_en = cvmx_read_csr(ciu_en0_address); | ||
478 | ciu_sum &= ciu_en; | ||
479 | if (likely(ciu_sum)) | ||
480 | do_IRQ(fls64(ciu_sum) + OCTEON_IRQ_WORKQ0 - 1); | ||
481 | else | ||
482 | spurious_interrupt(); | ||
483 | } else if (unlikely(cop0_cause & STATUSF_IP3)) { | ||
484 | ciu_sum = cvmx_read_csr(ciu_sum1_address); | ||
485 | ciu_en = cvmx_read_csr(ciu_en1_address); | ||
486 | ciu_sum &= ciu_en; | ||
487 | if (likely(ciu_sum)) | ||
488 | do_IRQ(fls64(ciu_sum) + OCTEON_IRQ_WDOG0 - 1); | ||
489 | else | ||
490 | spurious_interrupt(); | ||
491 | } else if (likely(cop0_cause)) { | ||
492 | do_IRQ(fls(cop0_cause) - 9 + MIPS_CPU_IRQ_BASE); | ||
493 | } else { | ||
494 | break; | ||
495 | } | ||
496 | } | ||
497 | } | ||
diff --git a/arch/mips/cavium-octeon/octeon-memcpy.S b/arch/mips/cavium-octeon/octeon-memcpy.S new file mode 100644 index 000000000000..88e0cddca205 --- /dev/null +++ b/arch/mips/cavium-octeon/octeon-memcpy.S | |||
@@ -0,0 +1,521 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Unified implementation of memcpy, memmove and the __copy_user backend. | ||
7 | * | ||
8 | * Copyright (C) 1998, 99, 2000, 01, 2002 Ralf Baechle (ralf@gnu.org) | ||
9 | * Copyright (C) 1999, 2000, 01, 2002 Silicon Graphics, Inc. | ||
10 | * Copyright (C) 2002 Broadcom, Inc. | ||
11 | * memcpy/copy_user author: Mark Vandevoorde | ||
12 | * | ||
13 | * Mnemonic names for arguments to memcpy/__copy_user | ||
14 | */ | ||
15 | |||
16 | #include <asm/asm.h> | ||
17 | #include <asm/asm-offsets.h> | ||
18 | #include <asm/regdef.h> | ||
19 | |||
20 | #define dst a0 | ||
21 | #define src a1 | ||
22 | #define len a2 | ||
23 | |||
24 | /* | ||
25 | * Spec | ||
26 | * | ||
27 | * memcpy copies len bytes from src to dst and sets v0 to dst. | ||
28 | * It assumes that | ||
29 | * - src and dst don't overlap | ||
30 | * - src is readable | ||
31 | * - dst is writable | ||
32 | * memcpy uses the standard calling convention | ||
33 | * | ||
34 | * __copy_user copies up to len bytes from src to dst and sets a2 (len) to | ||
35 | * the number of uncopied bytes due to an exception caused by a read or write. | ||
36 | * __copy_user assumes that src and dst don't overlap, and that the call is | ||
37 | * implementing one of the following: | ||
38 | * copy_to_user | ||
39 | * - src is readable (no exceptions when reading src) | ||
40 | * copy_from_user | ||
41 | * - dst is writable (no exceptions when writing dst) | ||
42 | * __copy_user uses a non-standard calling convention; see | ||
43 | * arch/mips/include/asm/uaccess.h | ||
44 | * | ||
45 | * When an exception happens on a load, the handler must | ||
46 | # ensure that all of the destination buffer is overwritten to prevent | ||
47 | * leaking information to user mode programs. | ||
48 | */ | ||
49 | |||
50 | /* | ||
51 | * Implementation | ||
52 | */ | ||
53 | |||
54 | /* | ||
55 | * The exception handler for loads requires that: | ||
56 | * 1- AT contain the address of the byte just past the end of the source | ||
57 | * of the copy, | ||
58 | * 2- src_entry <= src < AT, and | ||
59 | * 3- (dst - src) == (dst_entry - src_entry), | ||
60 | * The _entry suffix denotes values when __copy_user was called. | ||
61 | * | ||
62 | * (1) is set up up by uaccess.h and maintained by not writing AT in copy_user | ||
63 | * (2) is met by incrementing src by the number of bytes copied | ||
64 | * (3) is met by not doing loads between a pair of increments of dst and src | ||
65 | * | ||
66 | * The exception handlers for stores adjust len (if necessary) and return. | ||
67 | * These handlers do not need to overwrite any data. | ||
68 | * | ||
69 | * For __rmemcpy and memmove an exception is always a kernel bug, therefore | ||
70 | * they're not protected. | ||
71 | */ | ||
72 | |||
73 | #define EXC(inst_reg,addr,handler) \ | ||
74 | 9: inst_reg, addr; \ | ||
75 | .section __ex_table,"a"; \ | ||
76 | PTR 9b, handler; \ | ||
77 | .previous | ||
78 | |||
79 | /* | ||
80 | * Only on the 64-bit kernel we can made use of 64-bit registers. | ||
81 | */ | ||
82 | #ifdef CONFIG_64BIT | ||
83 | #define USE_DOUBLE | ||
84 | #endif | ||
85 | |||
86 | #ifdef USE_DOUBLE | ||
87 | |||
88 | #define LOAD ld | ||
89 | #define LOADL ldl | ||
90 | #define LOADR ldr | ||
91 | #define STOREL sdl | ||
92 | #define STORER sdr | ||
93 | #define STORE sd | ||
94 | #define ADD daddu | ||
95 | #define SUB dsubu | ||
96 | #define SRL dsrl | ||
97 | #define SRA dsra | ||
98 | #define SLL dsll | ||
99 | #define SLLV dsllv | ||
100 | #define SRLV dsrlv | ||
101 | #define NBYTES 8 | ||
102 | #define LOG_NBYTES 3 | ||
103 | |||
104 | /* | ||
105 | * As we are sharing code base with the mips32 tree (which use the o32 ABI | ||
106 | * register definitions). We need to redefine the register definitions from | ||
107 | * the n64 ABI register naming to the o32 ABI register naming. | ||
108 | */ | ||
109 | #undef t0 | ||
110 | #undef t1 | ||
111 | #undef t2 | ||
112 | #undef t3 | ||
113 | #define t0 $8 | ||
114 | #define t1 $9 | ||
115 | #define t2 $10 | ||
116 | #define t3 $11 | ||
117 | #define t4 $12 | ||
118 | #define t5 $13 | ||
119 | #define t6 $14 | ||
120 | #define t7 $15 | ||
121 | |||
122 | #else | ||
123 | |||
124 | #define LOAD lw | ||
125 | #define LOADL lwl | ||
126 | #define LOADR lwr | ||
127 | #define STOREL swl | ||
128 | #define STORER swr | ||
129 | #define STORE sw | ||
130 | #define ADD addu | ||
131 | #define SUB subu | ||
132 | #define SRL srl | ||
133 | #define SLL sll | ||
134 | #define SRA sra | ||
135 | #define SLLV sllv | ||
136 | #define SRLV srlv | ||
137 | #define NBYTES 4 | ||
138 | #define LOG_NBYTES 2 | ||
139 | |||
140 | #endif /* USE_DOUBLE */ | ||
141 | |||
142 | #ifdef CONFIG_CPU_LITTLE_ENDIAN | ||
143 | #define LDFIRST LOADR | ||
144 | #define LDREST LOADL | ||
145 | #define STFIRST STORER | ||
146 | #define STREST STOREL | ||
147 | #define SHIFT_DISCARD SLLV | ||
148 | #else | ||
149 | #define LDFIRST LOADL | ||
150 | #define LDREST LOADR | ||
151 | #define STFIRST STOREL | ||
152 | #define STREST STORER | ||
153 | #define SHIFT_DISCARD SRLV | ||
154 | #endif | ||
155 | |||
156 | #define FIRST(unit) ((unit)*NBYTES) | ||
157 | #define REST(unit) (FIRST(unit)+NBYTES-1) | ||
158 | #define UNIT(unit) FIRST(unit) | ||
159 | |||
160 | #define ADDRMASK (NBYTES-1) | ||
161 | |||
162 | .text | ||
163 | .set noreorder | ||
164 | .set noat | ||
165 | |||
166 | /* | ||
167 | * A combined memcpy/__copy_user | ||
168 | * __copy_user sets len to 0 for success; else to an upper bound of | ||
169 | * the number of uncopied bytes. | ||
170 | * memcpy sets v0 to dst. | ||
171 | */ | ||
172 | .align 5 | ||
173 | LEAF(memcpy) /* a0=dst a1=src a2=len */ | ||
174 | move v0, dst /* return value */ | ||
175 | __memcpy: | ||
176 | FEXPORT(__copy_user) | ||
177 | /* | ||
178 | * Note: dst & src may be unaligned, len may be 0 | ||
179 | * Temps | ||
180 | */ | ||
181 | # | ||
182 | # Octeon doesn't care if the destination is unaligned. The hardware | ||
183 | # can fix it faster than we can special case the assembly. | ||
184 | # | ||
185 | pref 0, 0(src) | ||
186 | sltu t0, len, NBYTES # Check if < 1 word | ||
187 | bnez t0, copy_bytes_checklen | ||
188 | and t0, src, ADDRMASK # Check if src unaligned | ||
189 | bnez t0, src_unaligned | ||
190 | sltu t0, len, 4*NBYTES # Check if < 4 words | ||
191 | bnez t0, less_than_4units | ||
192 | sltu t0, len, 8*NBYTES # Check if < 8 words | ||
193 | bnez t0, less_than_8units | ||
194 | sltu t0, len, 16*NBYTES # Check if < 16 words | ||
195 | bnez t0, cleanup_both_aligned | ||
196 | sltu t0, len, 128+1 # Check if len < 129 | ||
197 | bnez t0, 1f # Skip prefetch if len is too short | ||
198 | sltu t0, len, 256+1 # Check if len < 257 | ||
199 | bnez t0, 1f # Skip prefetch if len is too short | ||
200 | pref 0, 128(src) # We must not prefetch invalid addresses | ||
201 | # | ||
202 | # This is where we loop if there is more than 128 bytes left | ||
203 | 2: pref 0, 256(src) # We must not prefetch invalid addresses | ||
204 | # | ||
205 | # This is where we loop if we can't prefetch anymore | ||
206 | 1: | ||
207 | EXC( LOAD t0, UNIT(0)(src), l_exc) | ||
208 | EXC( LOAD t1, UNIT(1)(src), l_exc_copy) | ||
209 | EXC( LOAD t2, UNIT(2)(src), l_exc_copy) | ||
210 | EXC( LOAD t3, UNIT(3)(src), l_exc_copy) | ||
211 | SUB len, len, 16*NBYTES | ||
212 | EXC( STORE t0, UNIT(0)(dst), s_exc_p16u) | ||
213 | EXC( STORE t1, UNIT(1)(dst), s_exc_p15u) | ||
214 | EXC( STORE t2, UNIT(2)(dst), s_exc_p14u) | ||
215 | EXC( STORE t3, UNIT(3)(dst), s_exc_p13u) | ||
216 | EXC( LOAD t0, UNIT(4)(src), l_exc_copy) | ||
217 | EXC( LOAD t1, UNIT(5)(src), l_exc_copy) | ||
218 | EXC( LOAD t2, UNIT(6)(src), l_exc_copy) | ||
219 | EXC( LOAD t3, UNIT(7)(src), l_exc_copy) | ||
220 | EXC( STORE t0, UNIT(4)(dst), s_exc_p12u) | ||
221 | EXC( STORE t1, UNIT(5)(dst), s_exc_p11u) | ||
222 | EXC( STORE t2, UNIT(6)(dst), s_exc_p10u) | ||
223 | ADD src, src, 16*NBYTES | ||
224 | EXC( STORE t3, UNIT(7)(dst), s_exc_p9u) | ||
225 | ADD dst, dst, 16*NBYTES | ||
226 | EXC( LOAD t0, UNIT(-8)(src), l_exc_copy) | ||
227 | EXC( LOAD t1, UNIT(-7)(src), l_exc_copy) | ||
228 | EXC( LOAD t2, UNIT(-6)(src), l_exc_copy) | ||
229 | EXC( LOAD t3, UNIT(-5)(src), l_exc_copy) | ||
230 | EXC( STORE t0, UNIT(-8)(dst), s_exc_p8u) | ||
231 | EXC( STORE t1, UNIT(-7)(dst), s_exc_p7u) | ||
232 | EXC( STORE t2, UNIT(-6)(dst), s_exc_p6u) | ||
233 | EXC( STORE t3, UNIT(-5)(dst), s_exc_p5u) | ||
234 | EXC( LOAD t0, UNIT(-4)(src), l_exc_copy) | ||
235 | EXC( LOAD t1, UNIT(-3)(src), l_exc_copy) | ||
236 | EXC( LOAD t2, UNIT(-2)(src), l_exc_copy) | ||
237 | EXC( LOAD t3, UNIT(-1)(src), l_exc_copy) | ||
238 | EXC( STORE t0, UNIT(-4)(dst), s_exc_p4u) | ||
239 | EXC( STORE t1, UNIT(-3)(dst), s_exc_p3u) | ||
240 | EXC( STORE t2, UNIT(-2)(dst), s_exc_p2u) | ||
241 | EXC( STORE t3, UNIT(-1)(dst), s_exc_p1u) | ||
242 | sltu t0, len, 256+1 # See if we can prefetch more | ||
243 | beqz t0, 2b | ||
244 | sltu t0, len, 128 # See if we can loop more time | ||
245 | beqz t0, 1b | ||
246 | nop | ||
247 | # | ||
248 | # Jump here if there are less than 16*NBYTES left. | ||
249 | # | ||
250 | cleanup_both_aligned: | ||
251 | beqz len, done | ||
252 | sltu t0, len, 8*NBYTES | ||
253 | bnez t0, less_than_8units | ||
254 | nop | ||
255 | EXC( LOAD t0, UNIT(0)(src), l_exc) | ||
256 | EXC( LOAD t1, UNIT(1)(src), l_exc_copy) | ||
257 | EXC( LOAD t2, UNIT(2)(src), l_exc_copy) | ||
258 | EXC( LOAD t3, UNIT(3)(src), l_exc_copy) | ||
259 | SUB len, len, 8*NBYTES | ||
260 | EXC( STORE t0, UNIT(0)(dst), s_exc_p8u) | ||
261 | EXC( STORE t1, UNIT(1)(dst), s_exc_p7u) | ||
262 | EXC( STORE t2, UNIT(2)(dst), s_exc_p6u) | ||
263 | EXC( STORE t3, UNIT(3)(dst), s_exc_p5u) | ||
264 | EXC( LOAD t0, UNIT(4)(src), l_exc_copy) | ||
265 | EXC( LOAD t1, UNIT(5)(src), l_exc_copy) | ||
266 | EXC( LOAD t2, UNIT(6)(src), l_exc_copy) | ||
267 | EXC( LOAD t3, UNIT(7)(src), l_exc_copy) | ||
268 | EXC( STORE t0, UNIT(4)(dst), s_exc_p4u) | ||
269 | EXC( STORE t1, UNIT(5)(dst), s_exc_p3u) | ||
270 | EXC( STORE t2, UNIT(6)(dst), s_exc_p2u) | ||
271 | EXC( STORE t3, UNIT(7)(dst), s_exc_p1u) | ||
272 | ADD src, src, 8*NBYTES | ||
273 | beqz len, done | ||
274 | ADD dst, dst, 8*NBYTES | ||
275 | # | ||
276 | # Jump here if there are less than 8*NBYTES left. | ||
277 | # | ||
278 | less_than_8units: | ||
279 | sltu t0, len, 4*NBYTES | ||
280 | bnez t0, less_than_4units | ||
281 | nop | ||
282 | EXC( LOAD t0, UNIT(0)(src), l_exc) | ||
283 | EXC( LOAD t1, UNIT(1)(src), l_exc_copy) | ||
284 | EXC( LOAD t2, UNIT(2)(src), l_exc_copy) | ||
285 | EXC( LOAD t3, UNIT(3)(src), l_exc_copy) | ||
286 | SUB len, len, 4*NBYTES | ||
287 | EXC( STORE t0, UNIT(0)(dst), s_exc_p4u) | ||
288 | EXC( STORE t1, UNIT(1)(dst), s_exc_p3u) | ||
289 | EXC( STORE t2, UNIT(2)(dst), s_exc_p2u) | ||
290 | EXC( STORE t3, UNIT(3)(dst), s_exc_p1u) | ||
291 | ADD src, src, 4*NBYTES | ||
292 | beqz len, done | ||
293 | ADD dst, dst, 4*NBYTES | ||
294 | # | ||
295 | # Jump here if there are less than 4*NBYTES left. This means | ||
296 | # we may need to copy up to 3 NBYTES words. | ||
297 | # | ||
298 | less_than_4units: | ||
299 | sltu t0, len, 1*NBYTES | ||
300 | bnez t0, copy_bytes_checklen | ||
301 | nop | ||
302 | # | ||
303 | # 1) Copy NBYTES, then check length again | ||
304 | # | ||
305 | EXC( LOAD t0, 0(src), l_exc) | ||
306 | SUB len, len, NBYTES | ||
307 | sltu t1, len, 8 | ||
308 | EXC( STORE t0, 0(dst), s_exc_p1u) | ||
309 | ADD src, src, NBYTES | ||
310 | bnez t1, copy_bytes_checklen | ||
311 | ADD dst, dst, NBYTES | ||
312 | # | ||
313 | # 2) Copy NBYTES, then check length again | ||
314 | # | ||
315 | EXC( LOAD t0, 0(src), l_exc) | ||
316 | SUB len, len, NBYTES | ||
317 | sltu t1, len, 8 | ||
318 | EXC( STORE t0, 0(dst), s_exc_p1u) | ||
319 | ADD src, src, NBYTES | ||
320 | bnez t1, copy_bytes_checklen | ||
321 | ADD dst, dst, NBYTES | ||
322 | # | ||
323 | # 3) Copy NBYTES, then check length again | ||
324 | # | ||
325 | EXC( LOAD t0, 0(src), l_exc) | ||
326 | SUB len, len, NBYTES | ||
327 | ADD src, src, NBYTES | ||
328 | ADD dst, dst, NBYTES | ||
329 | b copy_bytes_checklen | ||
330 | EXC( STORE t0, -8(dst), s_exc_p1u) | ||
331 | |||
332 | src_unaligned: | ||
333 | #define rem t8 | ||
334 | SRL t0, len, LOG_NBYTES+2 # +2 for 4 units/iter | ||
335 | beqz t0, cleanup_src_unaligned | ||
336 | and rem, len, (4*NBYTES-1) # rem = len % 4*NBYTES | ||
337 | 1: | ||
338 | /* | ||
339 | * Avoid consecutive LD*'s to the same register since some mips | ||
340 | * implementations can't issue them in the same cycle. | ||
341 | * It's OK to load FIRST(N+1) before REST(N) because the two addresses | ||
342 | * are to the same unit (unless src is aligned, but it's not). | ||
343 | */ | ||
344 | EXC( LDFIRST t0, FIRST(0)(src), l_exc) | ||
345 | EXC( LDFIRST t1, FIRST(1)(src), l_exc_copy) | ||
346 | SUB len, len, 4*NBYTES | ||
347 | EXC( LDREST t0, REST(0)(src), l_exc_copy) | ||
348 | EXC( LDREST t1, REST(1)(src), l_exc_copy) | ||
349 | EXC( LDFIRST t2, FIRST(2)(src), l_exc_copy) | ||
350 | EXC( LDFIRST t3, FIRST(3)(src), l_exc_copy) | ||
351 | EXC( LDREST t2, REST(2)(src), l_exc_copy) | ||
352 | EXC( LDREST t3, REST(3)(src), l_exc_copy) | ||
353 | ADD src, src, 4*NBYTES | ||
354 | EXC( STORE t0, UNIT(0)(dst), s_exc_p4u) | ||
355 | EXC( STORE t1, UNIT(1)(dst), s_exc_p3u) | ||
356 | EXC( STORE t2, UNIT(2)(dst), s_exc_p2u) | ||
357 | EXC( STORE t3, UNIT(3)(dst), s_exc_p1u) | ||
358 | bne len, rem, 1b | ||
359 | ADD dst, dst, 4*NBYTES | ||
360 | |||
361 | cleanup_src_unaligned: | ||
362 | beqz len, done | ||
363 | and rem, len, NBYTES-1 # rem = len % NBYTES | ||
364 | beq rem, len, copy_bytes | ||
365 | nop | ||
366 | 1: | ||
367 | EXC( LDFIRST t0, FIRST(0)(src), l_exc) | ||
368 | EXC( LDREST t0, REST(0)(src), l_exc_copy) | ||
369 | SUB len, len, NBYTES | ||
370 | EXC( STORE t0, 0(dst), s_exc_p1u) | ||
371 | ADD src, src, NBYTES | ||
372 | bne len, rem, 1b | ||
373 | ADD dst, dst, NBYTES | ||
374 | |||
375 | copy_bytes_checklen: | ||
376 | beqz len, done | ||
377 | nop | ||
378 | copy_bytes: | ||
379 | /* 0 < len < NBYTES */ | ||
380 | #define COPY_BYTE(N) \ | ||
381 | EXC( lb t0, N(src), l_exc); \ | ||
382 | SUB len, len, 1; \ | ||
383 | beqz len, done; \ | ||
384 | EXC( sb t0, N(dst), s_exc_p1) | ||
385 | |||
386 | COPY_BYTE(0) | ||
387 | COPY_BYTE(1) | ||
388 | #ifdef USE_DOUBLE | ||
389 | COPY_BYTE(2) | ||
390 | COPY_BYTE(3) | ||
391 | COPY_BYTE(4) | ||
392 | COPY_BYTE(5) | ||
393 | #endif | ||
394 | EXC( lb t0, NBYTES-2(src), l_exc) | ||
395 | SUB len, len, 1 | ||
396 | jr ra | ||
397 | EXC( sb t0, NBYTES-2(dst), s_exc_p1) | ||
398 | done: | ||
399 | jr ra | ||
400 | nop | ||
401 | END(memcpy) | ||
402 | |||
403 | l_exc_copy: | ||
404 | /* | ||
405 | * Copy bytes from src until faulting load address (or until a | ||
406 | * lb faults) | ||
407 | * | ||
408 | * When reached by a faulting LDFIRST/LDREST, THREAD_BUADDR($28) | ||
409 | * may be more than a byte beyond the last address. | ||
410 | * Hence, the lb below may get an exception. | ||
411 | * | ||
412 | * Assumes src < THREAD_BUADDR($28) | ||
413 | */ | ||
414 | LOAD t0, TI_TASK($28) | ||
415 | nop | ||
416 | LOAD t0, THREAD_BUADDR(t0) | ||
417 | 1: | ||
418 | EXC( lb t1, 0(src), l_exc) | ||
419 | ADD src, src, 1 | ||
420 | sb t1, 0(dst) # can't fault -- we're copy_from_user | ||
421 | bne src, t0, 1b | ||
422 | ADD dst, dst, 1 | ||
423 | l_exc: | ||
424 | LOAD t0, TI_TASK($28) | ||
425 | nop | ||
426 | LOAD t0, THREAD_BUADDR(t0) # t0 is just past last good address | ||
427 | nop | ||
428 | SUB len, AT, t0 # len number of uncopied bytes | ||
429 | /* | ||
430 | * Here's where we rely on src and dst being incremented in tandem, | ||
431 | * See (3) above. | ||
432 | * dst += (fault addr - src) to put dst at first byte to clear | ||
433 | */ | ||
434 | ADD dst, t0 # compute start address in a1 | ||
435 | SUB dst, src | ||
436 | /* | ||
437 | * Clear len bytes starting at dst. Can't call __bzero because it | ||
438 | * might modify len. An inefficient loop for these rare times... | ||
439 | */ | ||
440 | beqz len, done | ||
441 | SUB src, len, 1 | ||
442 | 1: sb zero, 0(dst) | ||
443 | ADD dst, dst, 1 | ||
444 | bnez src, 1b | ||
445 | SUB src, src, 1 | ||
446 | jr ra | ||
447 | nop | ||
448 | |||
449 | |||
450 | #define SEXC(n) \ | ||
451 | s_exc_p ## n ## u: \ | ||
452 | jr ra; \ | ||
453 | ADD len, len, n*NBYTES | ||
454 | |||
455 | SEXC(16) | ||
456 | SEXC(15) | ||
457 | SEXC(14) | ||
458 | SEXC(13) | ||
459 | SEXC(12) | ||
460 | SEXC(11) | ||
461 | SEXC(10) | ||
462 | SEXC(9) | ||
463 | SEXC(8) | ||
464 | SEXC(7) | ||
465 | SEXC(6) | ||
466 | SEXC(5) | ||
467 | SEXC(4) | ||
468 | SEXC(3) | ||
469 | SEXC(2) | ||
470 | SEXC(1) | ||
471 | |||
472 | s_exc_p1: | ||
473 | jr ra | ||
474 | ADD len, len, 1 | ||
475 | s_exc: | ||
476 | jr ra | ||
477 | nop | ||
478 | |||
479 | .align 5 | ||
480 | LEAF(memmove) | ||
481 | ADD t0, a0, a2 | ||
482 | ADD t1, a1, a2 | ||
483 | sltu t0, a1, t0 # dst + len <= src -> memcpy | ||
484 | sltu t1, a0, t1 # dst >= src + len -> memcpy | ||
485 | and t0, t1 | ||
486 | beqz t0, __memcpy | ||
487 | move v0, a0 /* return value */ | ||
488 | beqz a2, r_out | ||
489 | END(memmove) | ||
490 | |||
491 | /* fall through to __rmemcpy */ | ||
492 | LEAF(__rmemcpy) /* a0=dst a1=src a2=len */ | ||
493 | sltu t0, a1, a0 | ||
494 | beqz t0, r_end_bytes_up # src >= dst | ||
495 | nop | ||
496 | ADD a0, a2 # dst = dst + len | ||
497 | ADD a1, a2 # src = src + len | ||
498 | |||
499 | r_end_bytes: | ||
500 | lb t0, -1(a1) | ||
501 | SUB a2, a2, 0x1 | ||
502 | sb t0, -1(a0) | ||
503 | SUB a1, a1, 0x1 | ||
504 | bnez a2, r_end_bytes | ||
505 | SUB a0, a0, 0x1 | ||
506 | |||
507 | r_out: | ||
508 | jr ra | ||
509 | move a2, zero | ||
510 | |||
511 | r_end_bytes_up: | ||
512 | lb t0, (a1) | ||
513 | SUB a2, a2, 0x1 | ||
514 | sb t0, (a0) | ||
515 | ADD a1, a1, 0x1 | ||
516 | bnez a2, r_end_bytes_up | ||
517 | ADD a0, a0, 0x1 | ||
518 | |||
519 | jr ra | ||
520 | move a2, zero | ||
521 | END(__rmemcpy) | ||
diff --git a/arch/mips/cavium-octeon/serial.c b/arch/mips/cavium-octeon/serial.c new file mode 100644 index 000000000000..8240728d485a --- /dev/null +++ b/arch/mips/cavium-octeon/serial.c | |||
@@ -0,0 +1,136 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 2004-2007 Cavium Networks | ||
7 | */ | ||
8 | #include <linux/console.h> | ||
9 | #include <linux/module.h> | ||
10 | #include <linux/init.h> | ||
11 | #include <linux/platform_device.h> | ||
12 | #include <linux/serial.h> | ||
13 | #include <linux/serial_8250.h> | ||
14 | #include <linux/serial_reg.h> | ||
15 | #include <linux/tty.h> | ||
16 | |||
17 | #include <asm/time.h> | ||
18 | |||
19 | #include <asm/octeon/octeon.h> | ||
20 | |||
21 | #ifdef CONFIG_GDB_CONSOLE | ||
22 | #define DEBUG_UART 0 | ||
23 | #else | ||
24 | #define DEBUG_UART 1 | ||
25 | #endif | ||
26 | |||
27 | unsigned int octeon_serial_in(struct uart_port *up, int offset) | ||
28 | { | ||
29 | int rv = cvmx_read_csr((uint64_t)(up->membase + (offset << 3))); | ||
30 | if (offset == UART_IIR && (rv & 0xf) == 7) { | ||
31 | /* Busy interrupt, read the USR (39) and try again. */ | ||
32 | cvmx_read_csr((uint64_t)(up->membase + (39 << 3))); | ||
33 | rv = cvmx_read_csr((uint64_t)(up->membase + (offset << 3))); | ||
34 | } | ||
35 | return rv; | ||
36 | } | ||
37 | |||
38 | void octeon_serial_out(struct uart_port *up, int offset, int value) | ||
39 | { | ||
40 | /* | ||
41 | * If bits 6 or 7 of the OCTEON UART's LCR are set, it quits | ||
42 | * working. | ||
43 | */ | ||
44 | if (offset == UART_LCR) | ||
45 | value &= 0x9f; | ||
46 | cvmx_write_csr((uint64_t)(up->membase + (offset << 3)), (u8)value); | ||
47 | } | ||
48 | |||
49 | /* | ||
50 | * Allocated in .bss, so it is all zeroed. | ||
51 | */ | ||
52 | #define OCTEON_MAX_UARTS 3 | ||
53 | static struct plat_serial8250_port octeon_uart8250_data[OCTEON_MAX_UARTS + 1]; | ||
54 | static struct platform_device octeon_uart8250_device = { | ||
55 | .name = "serial8250", | ||
56 | .id = PLAT8250_DEV_PLATFORM, | ||
57 | .dev = { | ||
58 | .platform_data = octeon_uart8250_data, | ||
59 | }, | ||
60 | }; | ||
61 | |||
62 | static void __init octeon_uart_set_common(struct plat_serial8250_port *p) | ||
63 | { | ||
64 | p->flags = ASYNC_SKIP_TEST | UPF_SHARE_IRQ | UPF_FIXED_TYPE; | ||
65 | p->type = PORT_OCTEON; | ||
66 | p->iotype = UPIO_MEM; | ||
67 | p->regshift = 3; /* I/O addresses are every 8 bytes */ | ||
68 | p->uartclk = mips_hpt_frequency; | ||
69 | p->serial_in = octeon_serial_in; | ||
70 | p->serial_out = octeon_serial_out; | ||
71 | } | ||
72 | |||
73 | static int __init octeon_serial_init(void) | ||
74 | { | ||
75 | int enable_uart0; | ||
76 | int enable_uart1; | ||
77 | int enable_uart2; | ||
78 | struct plat_serial8250_port *p; | ||
79 | |||
80 | #ifdef CONFIG_CAVIUM_OCTEON_2ND_KERNEL | ||
81 | /* | ||
82 | * If we are configured to run as the second of two kernels, | ||
83 | * disable uart0 and enable uart1. Uart0 is owned by the first | ||
84 | * kernel | ||
85 | */ | ||
86 | enable_uart0 = 0; | ||
87 | enable_uart1 = 1; | ||
88 | #else | ||
89 | /* | ||
90 | * We are configured for the first kernel. We'll enable uart0 | ||
91 | * if the bootloader told us to use 0, otherwise will enable | ||
92 | * uart 1. | ||
93 | */ | ||
94 | enable_uart0 = (octeon_get_boot_uart() == 0); | ||
95 | enable_uart1 = (octeon_get_boot_uart() == 1); | ||
96 | #ifdef CONFIG_KGDB | ||
97 | enable_uart1 = 1; | ||
98 | #endif | ||
99 | #endif | ||
100 | |||
101 | /* Right now CN52XX is the only chip with a third uart */ | ||
102 | enable_uart2 = OCTEON_IS_MODEL(OCTEON_CN52XX); | ||
103 | |||
104 | p = octeon_uart8250_data; | ||
105 | if (enable_uart0) { | ||
106 | /* Add a ttyS device for hardware uart 0 */ | ||
107 | octeon_uart_set_common(p); | ||
108 | p->membase = (void *) CVMX_MIO_UARTX_RBR(0); | ||
109 | p->mapbase = CVMX_MIO_UARTX_RBR(0) & ((1ull << 49) - 1); | ||
110 | p->irq = OCTEON_IRQ_UART0; | ||
111 | p++; | ||
112 | } | ||
113 | |||
114 | if (enable_uart1) { | ||
115 | /* Add a ttyS device for hardware uart 1 */ | ||
116 | octeon_uart_set_common(p); | ||
117 | p->membase = (void *) CVMX_MIO_UARTX_RBR(1); | ||
118 | p->mapbase = CVMX_MIO_UARTX_RBR(1) & ((1ull << 49) - 1); | ||
119 | p->irq = OCTEON_IRQ_UART1; | ||
120 | p++; | ||
121 | } | ||
122 | if (enable_uart2) { | ||
123 | /* Add a ttyS device for hardware uart 2 */ | ||
124 | octeon_uart_set_common(p); | ||
125 | p->membase = (void *) CVMX_MIO_UART2_RBR; | ||
126 | p->mapbase = CVMX_MIO_UART2_RBR & ((1ull << 49) - 1); | ||
127 | p->irq = OCTEON_IRQ_UART2; | ||
128 | p++; | ||
129 | } | ||
130 | |||
131 | BUG_ON(p > &octeon_uart8250_data[OCTEON_MAX_UARTS]); | ||
132 | |||
133 | return platform_device_register(&octeon_uart8250_device); | ||
134 | } | ||
135 | |||
136 | device_initcall(octeon_serial_init); | ||
diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c new file mode 100644 index 000000000000..e085feddb4a4 --- /dev/null +++ b/arch/mips/cavium-octeon/setup.c | |||
@@ -0,0 +1,929 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 2004-2007 Cavium Networks | ||
7 | * Copyright (C) 2008 Wind River Systems | ||
8 | */ | ||
9 | #include <linux/init.h> | ||
10 | #include <linux/console.h> | ||
11 | #include <linux/delay.h> | ||
12 | #include <linux/interrupt.h> | ||
13 | #include <linux/io.h> | ||
14 | #include <linux/irq.h> | ||
15 | #include <linux/serial.h> | ||
16 | #include <linux/types.h> | ||
17 | #include <linux/string.h> /* for memset */ | ||
18 | #include <linux/serial.h> | ||
19 | #include <linux/tty.h> | ||
20 | #include <linux/time.h> | ||
21 | #include <linux/platform_device.h> | ||
22 | #include <linux/serial_core.h> | ||
23 | #include <linux/serial_8250.h> | ||
24 | #include <linux/string.h> | ||
25 | |||
26 | #include <asm/processor.h> | ||
27 | #include <asm/reboot.h> | ||
28 | #include <asm/smp-ops.h> | ||
29 | #include <asm/system.h> | ||
30 | #include <asm/irq_cpu.h> | ||
31 | #include <asm/mipsregs.h> | ||
32 | #include <asm/bootinfo.h> | ||
33 | #include <asm/sections.h> | ||
34 | #include <asm/time.h> | ||
35 | |||
36 | #include <asm/octeon/octeon.h> | ||
37 | |||
38 | #ifdef CONFIG_CAVIUM_DECODE_RSL | ||
39 | extern void cvmx_interrupt_rsl_decode(void); | ||
40 | extern int __cvmx_interrupt_ecc_report_single_bit_errors; | ||
41 | extern void cvmx_interrupt_rsl_enable(void); | ||
42 | #endif | ||
43 | |||
44 | extern struct plat_smp_ops octeon_smp_ops; | ||
45 | |||
46 | #ifdef CONFIG_PCI | ||
47 | extern void pci_console_init(const char *arg); | ||
48 | #endif | ||
49 | |||
50 | #ifdef CONFIG_CAVIUM_RESERVE32 | ||
51 | extern uint64_t octeon_reserve32_memory; | ||
52 | #endif | ||
53 | static unsigned long long MAX_MEMORY = 512ull << 20; | ||
54 | |||
55 | struct octeon_boot_descriptor *octeon_boot_desc_ptr; | ||
56 | |||
57 | struct cvmx_bootinfo *octeon_bootinfo; | ||
58 | EXPORT_SYMBOL(octeon_bootinfo); | ||
59 | |||
60 | #ifdef CONFIG_CAVIUM_RESERVE32 | ||
61 | uint64_t octeon_reserve32_memory; | ||
62 | EXPORT_SYMBOL(octeon_reserve32_memory); | ||
63 | #endif | ||
64 | |||
65 | static int octeon_uart; | ||
66 | |||
67 | extern asmlinkage void handle_int(void); | ||
68 | extern asmlinkage void plat_irq_dispatch(void); | ||
69 | |||
70 | /** | ||
71 | * Return non zero if we are currently running in the Octeon simulator | ||
72 | * | ||
73 | * Returns | ||
74 | */ | ||
75 | int octeon_is_simulation(void) | ||
76 | { | ||
77 | return octeon_bootinfo->board_type == CVMX_BOARD_TYPE_SIM; | ||
78 | } | ||
79 | EXPORT_SYMBOL(octeon_is_simulation); | ||
80 | |||
81 | /** | ||
82 | * Return true if Octeon is in PCI Host mode. This means | ||
83 | * Linux can control the PCI bus. | ||
84 | * | ||
85 | * Returns Non zero if Octeon in host mode. | ||
86 | */ | ||
87 | int octeon_is_pci_host(void) | ||
88 | { | ||
89 | #ifdef CONFIG_PCI | ||
90 | return octeon_bootinfo->config_flags & CVMX_BOOTINFO_CFG_FLAG_PCI_HOST; | ||
91 | #else | ||
92 | return 0; | ||
93 | #endif | ||
94 | } | ||
95 | |||
96 | /** | ||
97 | * Get the clock rate of Octeon | ||
98 | * | ||
99 | * Returns Clock rate in HZ | ||
100 | */ | ||
101 | uint64_t octeon_get_clock_rate(void) | ||
102 | { | ||
103 | if (octeon_is_simulation()) | ||
104 | octeon_bootinfo->eclock_hz = 6000000; | ||
105 | return octeon_bootinfo->eclock_hz; | ||
106 | } | ||
107 | EXPORT_SYMBOL(octeon_get_clock_rate); | ||
108 | |||
109 | /** | ||
110 | * Write to the LCD display connected to the bootbus. This display | ||
111 | * exists on most Cavium evaluation boards. If it doesn't exist, then | ||
112 | * this function doesn't do anything. | ||
113 | * | ||
114 | * @s: String to write | ||
115 | */ | ||
116 | void octeon_write_lcd(const char *s) | ||
117 | { | ||
118 | if (octeon_bootinfo->led_display_base_addr) { | ||
119 | void __iomem *lcd_address = | ||
120 | ioremap_nocache(octeon_bootinfo->led_display_base_addr, | ||
121 | 8); | ||
122 | int i; | ||
123 | for (i = 0; i < 8; i++, s++) { | ||
124 | if (*s) | ||
125 | iowrite8(*s, lcd_address + i); | ||
126 | else | ||
127 | iowrite8(' ', lcd_address + i); | ||
128 | } | ||
129 | iounmap(lcd_address); | ||
130 | } | ||
131 | } | ||
132 | |||
133 | /** | ||
134 | * Return the console uart passed by the bootloader | ||
135 | * | ||
136 | * Returns uart (0 or 1) | ||
137 | */ | ||
138 | int octeon_get_boot_uart(void) | ||
139 | { | ||
140 | int uart; | ||
141 | #ifdef CONFIG_CAVIUM_OCTEON_2ND_KERNEL | ||
142 | uart = 1; | ||
143 | #else | ||
144 | uart = (octeon_boot_desc_ptr->flags & OCTEON_BL_FLAG_CONSOLE_UART1) ? | ||
145 | 1 : 0; | ||
146 | #endif | ||
147 | return uart; | ||
148 | } | ||
149 | |||
150 | /** | ||
151 | * Get the coremask Linux was booted on. | ||
152 | * | ||
153 | * Returns Core mask | ||
154 | */ | ||
155 | int octeon_get_boot_coremask(void) | ||
156 | { | ||
157 | return octeon_boot_desc_ptr->core_mask; | ||
158 | } | ||
159 | |||
160 | /** | ||
161 | * Check the hardware BIST results for a CPU | ||
162 | */ | ||
163 | void octeon_check_cpu_bist(void) | ||
164 | { | ||
165 | const int coreid = cvmx_get_core_num(); | ||
166 | unsigned long long mask; | ||
167 | unsigned long long bist_val; | ||
168 | |||
169 | /* Check BIST results for COP0 registers */ | ||
170 | mask = 0x1f00000000ull; | ||
171 | bist_val = read_octeon_c0_icacheerr(); | ||
172 | if (bist_val & mask) | ||
173 | pr_err("Core%d BIST Failure: CacheErr(icache) = 0x%llx\n", | ||
174 | coreid, bist_val); | ||
175 | |||
176 | bist_val = read_octeon_c0_dcacheerr(); | ||
177 | if (bist_val & 1) | ||
178 | pr_err("Core%d L1 Dcache parity error: " | ||
179 | "CacheErr(dcache) = 0x%llx\n", | ||
180 | coreid, bist_val); | ||
181 | |||
182 | mask = 0xfc00000000000000ull; | ||
183 | bist_val = read_c0_cvmmemctl(); | ||
184 | if (bist_val & mask) | ||
185 | pr_err("Core%d BIST Failure: COP0_CVM_MEM_CTL = 0x%llx\n", | ||
186 | coreid, bist_val); | ||
187 | |||
188 | write_octeon_c0_dcacheerr(0); | ||
189 | } | ||
190 | |||
191 | #ifdef CONFIG_CAVIUM_RESERVE32_USE_WIRED_TLB | ||
192 | /** | ||
193 | * Called on every core to setup the wired tlb entry needed | ||
194 | * if CONFIG_CAVIUM_RESERVE32_USE_WIRED_TLB is set. | ||
195 | * | ||
196 | */ | ||
197 | static void octeon_hal_setup_per_cpu_reserved32(void *unused) | ||
198 | { | ||
199 | /* | ||
200 | * The config has selected to wire the reserve32 memory for all | ||
201 | * userspace applications. We need to put a wired TLB entry in for each | ||
202 | * 512MB of reserve32 memory. We only handle double 256MB pages here, | ||
203 | * so reserve32 must be multiple of 512MB. | ||
204 | */ | ||
205 | uint32_t size = CONFIG_CAVIUM_RESERVE32; | ||
206 | uint32_t entrylo0 = | ||
207 | 0x7 | ((octeon_reserve32_memory & ((1ul << 40) - 1)) >> 6); | ||
208 | uint32_t entrylo1 = entrylo0 + (256 << 14); | ||
209 | uint32_t entryhi = (0x80000000UL - (CONFIG_CAVIUM_RESERVE32 << 20)); | ||
210 | while (size >= 512) { | ||
211 | #if 0 | ||
212 | pr_info("CPU%d: Adding double wired TLB entry for 0x%lx\n", | ||
213 | smp_processor_id(), entryhi); | ||
214 | #endif | ||
215 | add_wired_entry(entrylo0, entrylo1, entryhi, PM_256M); | ||
216 | entrylo0 += 512 << 14; | ||
217 | entrylo1 += 512 << 14; | ||
218 | entryhi += 512 << 20; | ||
219 | size -= 512; | ||
220 | } | ||
221 | } | ||
222 | #endif /* CONFIG_CAVIUM_RESERVE32_USE_WIRED_TLB */ | ||
223 | |||
224 | /** | ||
225 | * Called to release the named block which was used to made sure | ||
226 | * that nobody used the memory for something else during | ||
227 | * init. Now we'll free it so userspace apps can use this | ||
228 | * memory region with bootmem_alloc. | ||
229 | * | ||
230 | * This function is called only once from prom_free_prom_memory(). | ||
231 | */ | ||
232 | void octeon_hal_setup_reserved32(void) | ||
233 | { | ||
234 | #ifdef CONFIG_CAVIUM_RESERVE32_USE_WIRED_TLB | ||
235 | on_each_cpu(octeon_hal_setup_per_cpu_reserved32, NULL, 0, 1); | ||
236 | #endif | ||
237 | } | ||
238 | |||
239 | /** | ||
240 | * Reboot Octeon | ||
241 | * | ||
242 | * @command: Command to pass to the bootloader. Currently ignored. | ||
243 | */ | ||
244 | static void octeon_restart(char *command) | ||
245 | { | ||
246 | /* Disable all watchdogs before soft reset. They don't get cleared */ | ||
247 | #ifdef CONFIG_SMP | ||
248 | int cpu; | ||
249 | for_each_online_cpu(cpu) | ||
250 | cvmx_write_csr(CVMX_CIU_WDOGX(cpu_logical_map(cpu)), 0); | ||
251 | #else | ||
252 | cvmx_write_csr(CVMX_CIU_WDOGX(cvmx_get_core_num()), 0); | ||
253 | #endif | ||
254 | |||
255 | mb(); | ||
256 | while (1) | ||
257 | cvmx_write_csr(CVMX_CIU_SOFT_RST, 1); | ||
258 | } | ||
259 | |||
260 | |||
261 | /** | ||
262 | * Permanently stop a core. | ||
263 | * | ||
264 | * @arg: Ignored. | ||
265 | */ | ||
266 | static void octeon_kill_core(void *arg) | ||
267 | { | ||
268 | mb(); | ||
269 | if (octeon_is_simulation()) { | ||
270 | /* The simulator needs the watchdog to stop for dead cores */ | ||
271 | cvmx_write_csr(CVMX_CIU_WDOGX(cvmx_get_core_num()), 0); | ||
272 | /* A break instruction causes the simulator stop a core */ | ||
273 | asm volatile ("sync\nbreak"); | ||
274 | } | ||
275 | } | ||
276 | |||
277 | |||
278 | /** | ||
279 | * Halt the system | ||
280 | */ | ||
281 | static void octeon_halt(void) | ||
282 | { | ||
283 | smp_call_function(octeon_kill_core, NULL, 0); | ||
284 | |||
285 | switch (octeon_bootinfo->board_type) { | ||
286 | case CVMX_BOARD_TYPE_NAO38: | ||
287 | /* Driving a 1 to GPIO 12 shuts off this board */ | ||
288 | cvmx_write_csr(CVMX_GPIO_BIT_CFGX(12), 1); | ||
289 | cvmx_write_csr(CVMX_GPIO_TX_SET, 0x1000); | ||
290 | break; | ||
291 | default: | ||
292 | octeon_write_lcd("PowerOff"); | ||
293 | break; | ||
294 | } | ||
295 | |||
296 | octeon_kill_core(NULL); | ||
297 | } | ||
298 | |||
299 | #if 0 | ||
300 | /** | ||
301 | * Platform time init specifics. | ||
302 | * Returns | ||
303 | */ | ||
304 | void __init plat_time_init(void) | ||
305 | { | ||
306 | /* Nothing special here, but we are required to have one */ | ||
307 | } | ||
308 | |||
309 | #endif | ||
310 | |||
311 | /** | ||
312 | * Handle all the error condition interrupts that might occur. | ||
313 | * | ||
314 | */ | ||
315 | #ifdef CONFIG_CAVIUM_DECODE_RSL | ||
316 | static irqreturn_t octeon_rlm_interrupt(int cpl, void *dev_id) | ||
317 | { | ||
318 | cvmx_interrupt_rsl_decode(); | ||
319 | return IRQ_HANDLED; | ||
320 | } | ||
321 | #endif | ||
322 | |||
323 | /** | ||
324 | * Return a string representing the system type | ||
325 | * | ||
326 | * Returns | ||
327 | */ | ||
328 | const char *octeon_board_type_string(void) | ||
329 | { | ||
330 | static char name[80]; | ||
331 | sprintf(name, "%s (%s)", | ||
332 | cvmx_board_type_to_string(octeon_bootinfo->board_type), | ||
333 | octeon_model_get_string(read_c0_prid())); | ||
334 | return name; | ||
335 | } | ||
336 | |||
337 | const char *get_system_type(void) | ||
338 | __attribute__ ((alias("octeon_board_type_string"))); | ||
339 | |||
340 | void octeon_user_io_init(void) | ||
341 | { | ||
342 | union octeon_cvmemctl cvmmemctl; | ||
343 | union cvmx_iob_fau_timeout fau_timeout; | ||
344 | union cvmx_pow_nw_tim nm_tim; | ||
345 | uint64_t cvmctl; | ||
346 | |||
347 | /* Get the current settings for CP0_CVMMEMCTL_REG */ | ||
348 | cvmmemctl.u64 = read_c0_cvmmemctl(); | ||
349 | /* R/W If set, marked write-buffer entries time out the same | ||
350 | * as as other entries; if clear, marked write-buffer entries | ||
351 | * use the maximum timeout. */ | ||
352 | cvmmemctl.s.dismarkwblongto = 1; | ||
353 | /* R/W If set, a merged store does not clear the write-buffer | ||
354 | * entry timeout state. */ | ||
355 | cvmmemctl.s.dismrgclrwbto = 0; | ||
356 | /* R/W Two bits that are the MSBs of the resultant CVMSEG LM | ||
357 | * word location for an IOBDMA. The other 8 bits come from the | ||
358 | * SCRADDR field of the IOBDMA. */ | ||
359 | cvmmemctl.s.iobdmascrmsb = 0; | ||
360 | /* R/W If set, SYNCWS and SYNCS only order marked stores; if | ||
361 | * clear, SYNCWS and SYNCS only order unmarked | ||
362 | * stores. SYNCWSMARKED has no effect when DISSYNCWS is | ||
363 | * set. */ | ||
364 | cvmmemctl.s.syncwsmarked = 0; | ||
365 | /* R/W If set, SYNCWS acts as SYNCW and SYNCS acts as SYNC. */ | ||
366 | cvmmemctl.s.dissyncws = 0; | ||
367 | /* R/W If set, no stall happens on write buffer full. */ | ||
368 | if (OCTEON_IS_MODEL(OCTEON_CN38XX_PASS2)) | ||
369 | cvmmemctl.s.diswbfst = 1; | ||
370 | else | ||
371 | cvmmemctl.s.diswbfst = 0; | ||
372 | /* R/W If set (and SX set), supervisor-level loads/stores can | ||
373 | * use XKPHYS addresses with <48>==0 */ | ||
374 | cvmmemctl.s.xkmemenas = 0; | ||
375 | |||
376 | /* R/W If set (and UX set), user-level loads/stores can use | ||
377 | * XKPHYS addresses with VA<48>==0 */ | ||
378 | cvmmemctl.s.xkmemenau = 0; | ||
379 | |||
380 | /* R/W If set (and SX set), supervisor-level loads/stores can | ||
381 | * use XKPHYS addresses with VA<48>==1 */ | ||
382 | cvmmemctl.s.xkioenas = 0; | ||
383 | |||
384 | /* R/W If set (and UX set), user-level loads/stores can use | ||
385 | * XKPHYS addresses with VA<48>==1 */ | ||
386 | cvmmemctl.s.xkioenau = 0; | ||
387 | |||
388 | /* R/W If set, all stores act as SYNCW (NOMERGE must be set | ||
389 | * when this is set) RW, reset to 0. */ | ||
390 | cvmmemctl.s.allsyncw = 0; | ||
391 | |||
392 | /* R/W If set, no stores merge, and all stores reach the | ||
393 | * coherent bus in order. */ | ||
394 | cvmmemctl.s.nomerge = 0; | ||
395 | /* R/W Selects the bit in the counter used for DID time-outs 0 | ||
396 | * = 231, 1 = 230, 2 = 229, 3 = 214. Actual time-out is | ||
397 | * between 1x and 2x this interval. For example, with | ||
398 | * DIDTTO=3, expiration interval is between 16K and 32K. */ | ||
399 | cvmmemctl.s.didtto = 0; | ||
400 | /* R/W If set, the (mem) CSR clock never turns off. */ | ||
401 | cvmmemctl.s.csrckalwys = 0; | ||
402 | /* R/W If set, mclk never turns off. */ | ||
403 | cvmmemctl.s.mclkalwys = 0; | ||
404 | /* R/W Selects the bit in the counter used for write buffer | ||
405 | * flush time-outs (WBFLT+11) is the bit position in an | ||
406 | * internal counter used to determine expiration. The write | ||
407 | * buffer expires between 1x and 2x this interval. For | ||
408 | * example, with WBFLT = 0, a write buffer expires between 2K | ||
409 | * and 4K cycles after the write buffer entry is allocated. */ | ||
410 | cvmmemctl.s.wbfltime = 0; | ||
411 | /* R/W If set, do not put Istream in the L2 cache. */ | ||
412 | cvmmemctl.s.istrnol2 = 0; | ||
413 | /* R/W The write buffer threshold. */ | ||
414 | cvmmemctl.s.wbthresh = 10; | ||
415 | /* R/W If set, CVMSEG is available for loads/stores in | ||
416 | * kernel/debug mode. */ | ||
417 | #if CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0 | ||
418 | cvmmemctl.s.cvmsegenak = 1; | ||
419 | #else | ||
420 | cvmmemctl.s.cvmsegenak = 0; | ||
421 | #endif | ||
422 | /* R/W If set, CVMSEG is available for loads/stores in | ||
423 | * supervisor mode. */ | ||
424 | cvmmemctl.s.cvmsegenas = 0; | ||
425 | /* R/W If set, CVMSEG is available for loads/stores in user | ||
426 | * mode. */ | ||
427 | cvmmemctl.s.cvmsegenau = 0; | ||
428 | /* R/W Size of local memory in cache blocks, 54 (6912 bytes) | ||
429 | * is max legal value. */ | ||
430 | cvmmemctl.s.lmemsz = CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE; | ||
431 | |||
432 | |||
433 | if (smp_processor_id() == 0) | ||
434 | pr_notice("CVMSEG size: %d cache lines (%d bytes)\n", | ||
435 | CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE, | ||
436 | CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE * 128); | ||
437 | |||
438 | write_c0_cvmmemctl(cvmmemctl.u64); | ||
439 | |||
440 | /* Move the performance counter interrupts to IRQ 6 */ | ||
441 | cvmctl = read_c0_cvmctl(); | ||
442 | cvmctl &= ~(7 << 7); | ||
443 | cvmctl |= 6 << 7; | ||
444 | write_c0_cvmctl(cvmctl); | ||
445 | |||
446 | /* Set a default for the hardware timeouts */ | ||
447 | fau_timeout.u64 = 0; | ||
448 | fau_timeout.s.tout_val = 0xfff; | ||
449 | /* Disable tagwait FAU timeout */ | ||
450 | fau_timeout.s.tout_enb = 0; | ||
451 | cvmx_write_csr(CVMX_IOB_FAU_TIMEOUT, fau_timeout.u64); | ||
452 | |||
453 | nm_tim.u64 = 0; | ||
454 | /* 4096 cycles */ | ||
455 | nm_tim.s.nw_tim = 3; | ||
456 | cvmx_write_csr(CVMX_POW_NW_TIM, nm_tim.u64); | ||
457 | |||
458 | write_octeon_c0_icacheerr(0); | ||
459 | write_c0_derraddr1(0); | ||
460 | } | ||
461 | |||
462 | /** | ||
463 | * Early entry point for arch setup | ||
464 | */ | ||
465 | void __init prom_init(void) | ||
466 | { | ||
467 | struct cvmx_sysinfo *sysinfo; | ||
468 | const int coreid = cvmx_get_core_num(); | ||
469 | int i; | ||
470 | int argc; | ||
471 | struct uart_port octeon_port; | ||
472 | #ifdef CONFIG_CAVIUM_RESERVE32 | ||
473 | int64_t addr = -1; | ||
474 | #endif | ||
475 | /* | ||
476 | * The bootloader passes a pointer to the boot descriptor in | ||
477 | * $a3, this is available as fw_arg3. | ||
478 | */ | ||
479 | octeon_boot_desc_ptr = (struct octeon_boot_descriptor *)fw_arg3; | ||
480 | octeon_bootinfo = | ||
481 | cvmx_phys_to_ptr(octeon_boot_desc_ptr->cvmx_desc_vaddr); | ||
482 | cvmx_bootmem_init(cvmx_phys_to_ptr(octeon_bootinfo->phy_mem_desc_addr)); | ||
483 | |||
484 | /* | ||
485 | * Only enable the LED controller if we're running on a CN38XX, CN58XX, | ||
486 | * or CN56XX. The CN30XX and CN31XX don't have an LED controller. | ||
487 | */ | ||
488 | if (!octeon_is_simulation() && | ||
489 | octeon_has_feature(OCTEON_FEATURE_LED_CONTROLLER)) { | ||
490 | cvmx_write_csr(CVMX_LED_EN, 0); | ||
491 | cvmx_write_csr(CVMX_LED_PRT, 0); | ||
492 | cvmx_write_csr(CVMX_LED_DBG, 0); | ||
493 | cvmx_write_csr(CVMX_LED_PRT_FMT, 0); | ||
494 | cvmx_write_csr(CVMX_LED_UDD_CNTX(0), 32); | ||
495 | cvmx_write_csr(CVMX_LED_UDD_CNTX(1), 32); | ||
496 | cvmx_write_csr(CVMX_LED_UDD_DATX(0), 0); | ||
497 | cvmx_write_csr(CVMX_LED_UDD_DATX(1), 0); | ||
498 | cvmx_write_csr(CVMX_LED_EN, 1); | ||
499 | } | ||
500 | #ifdef CONFIG_CAVIUM_RESERVE32 | ||
501 | /* | ||
502 | * We need to temporarily allocate all memory in the reserve32 | ||
503 | * region. This makes sure the kernel doesn't allocate this | ||
504 | * memory when it is getting memory from the | ||
505 | * bootloader. Later, after the memory allocations are | ||
506 | * complete, the reserve32 will be freed. | ||
507 | */ | ||
508 | #ifdef CONFIG_CAVIUM_RESERVE32_USE_WIRED_TLB | ||
509 | if (CONFIG_CAVIUM_RESERVE32 & 0x1ff) | ||
510 | pr_err("CAVIUM_RESERVE32 isn't a multiple of 512MB. " | ||
511 | "This is required if CAVIUM_RESERVE32_USE_WIRED_TLB " | ||
512 | "is set\n"); | ||
513 | else | ||
514 | addr = cvmx_bootmem_phy_named_block_alloc(CONFIG_CAVIUM_RESERVE32 << 20, | ||
515 | 0, 0, 512 << 20, | ||
516 | "CAVIUM_RESERVE32", 0); | ||
517 | #else | ||
518 | /* | ||
519 | * Allocate memory for RESERVED32 aligned on 2MB boundary. This | ||
520 | * is in case we later use hugetlb entries with it. | ||
521 | */ | ||
522 | addr = cvmx_bootmem_phy_named_block_alloc(CONFIG_CAVIUM_RESERVE32 << 20, | ||
523 | 0, 0, 2 << 20, | ||
524 | "CAVIUM_RESERVE32", 0); | ||
525 | #endif | ||
526 | if (addr < 0) | ||
527 | pr_err("Failed to allocate CAVIUM_RESERVE32 memory area\n"); | ||
528 | else | ||
529 | octeon_reserve32_memory = addr; | ||
530 | #endif | ||
531 | |||
532 | #ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2 | ||
533 | if (cvmx_read_csr(CVMX_L2D_FUS3) & (3ull << 34)) { | ||
534 | pr_info("Skipping L2 locking due to reduced L2 cache size\n"); | ||
535 | } else { | ||
536 | uint32_t ebase = read_c0_ebase() & 0x3ffff000; | ||
537 | #ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_TLB | ||
538 | /* TLB refill */ | ||
539 | cvmx_l2c_lock_mem_region(ebase, 0x100); | ||
540 | #endif | ||
541 | #ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_EXCEPTION | ||
542 | /* General exception */ | ||
543 | cvmx_l2c_lock_mem_region(ebase + 0x180, 0x80); | ||
544 | #endif | ||
545 | #ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_LOW_LEVEL_INTERRUPT | ||
546 | /* Interrupt handler */ | ||
547 | cvmx_l2c_lock_mem_region(ebase + 0x200, 0x80); | ||
548 | #endif | ||
549 | #ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_INTERRUPT | ||
550 | cvmx_l2c_lock_mem_region(__pa_symbol(handle_int), 0x100); | ||
551 | cvmx_l2c_lock_mem_region(__pa_symbol(plat_irq_dispatch), 0x80); | ||
552 | #endif | ||
553 | #ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_MEMCPY | ||
554 | cvmx_l2c_lock_mem_region(__pa_symbol(memcpy), 0x480); | ||
555 | #endif | ||
556 | } | ||
557 | #endif | ||
558 | |||
559 | sysinfo = cvmx_sysinfo_get(); | ||
560 | memset(sysinfo, 0, sizeof(*sysinfo)); | ||
561 | sysinfo->system_dram_size = octeon_bootinfo->dram_size << 20; | ||
562 | sysinfo->phy_mem_desc_ptr = | ||
563 | cvmx_phys_to_ptr(octeon_bootinfo->phy_mem_desc_addr); | ||
564 | sysinfo->core_mask = octeon_bootinfo->core_mask; | ||
565 | sysinfo->exception_base_addr = octeon_bootinfo->exception_base_addr; | ||
566 | sysinfo->cpu_clock_hz = octeon_bootinfo->eclock_hz; | ||
567 | sysinfo->dram_data_rate_hz = octeon_bootinfo->dclock_hz * 2; | ||
568 | sysinfo->board_type = octeon_bootinfo->board_type; | ||
569 | sysinfo->board_rev_major = octeon_bootinfo->board_rev_major; | ||
570 | sysinfo->board_rev_minor = octeon_bootinfo->board_rev_minor; | ||
571 | memcpy(sysinfo->mac_addr_base, octeon_bootinfo->mac_addr_base, | ||
572 | sizeof(sysinfo->mac_addr_base)); | ||
573 | sysinfo->mac_addr_count = octeon_bootinfo->mac_addr_count; | ||
574 | memcpy(sysinfo->board_serial_number, | ||
575 | octeon_bootinfo->board_serial_number, | ||
576 | sizeof(sysinfo->board_serial_number)); | ||
577 | sysinfo->compact_flash_common_base_addr = | ||
578 | octeon_bootinfo->compact_flash_common_base_addr; | ||
579 | sysinfo->compact_flash_attribute_base_addr = | ||
580 | octeon_bootinfo->compact_flash_attribute_base_addr; | ||
581 | sysinfo->led_display_base_addr = octeon_bootinfo->led_display_base_addr; | ||
582 | sysinfo->dfa_ref_clock_hz = octeon_bootinfo->dfa_ref_clock_hz; | ||
583 | sysinfo->bootloader_config_flags = octeon_bootinfo->config_flags; | ||
584 | |||
585 | |||
586 | octeon_check_cpu_bist(); | ||
587 | |||
588 | octeon_uart = octeon_get_boot_uart(); | ||
589 | |||
590 | /* | ||
591 | * Disable All CIU Interrupts. The ones we need will be | ||
592 | * enabled later. Read the SUM register so we know the write | ||
593 | * completed. | ||
594 | */ | ||
595 | cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2)), 0); | ||
596 | cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2 + 1)), 0); | ||
597 | cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2)), 0); | ||
598 | cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2 + 1)), 0); | ||
599 | cvmx_read_csr(CVMX_CIU_INTX_SUM0((coreid * 2))); | ||
600 | |||
601 | #ifdef CONFIG_SMP | ||
602 | octeon_write_lcd("LinuxSMP"); | ||
603 | #else | ||
604 | octeon_write_lcd("Linux"); | ||
605 | #endif | ||
606 | |||
607 | #ifdef CONFIG_CAVIUM_GDB | ||
608 | /* | ||
609 | * When debugging the linux kernel, force the cores to enter | ||
610 | * the debug exception handler to break in. | ||
611 | */ | ||
612 | if (octeon_get_boot_debug_flag()) { | ||
613 | cvmx_write_csr(CVMX_CIU_DINT, 1 << cvmx_get_core_num()); | ||
614 | cvmx_read_csr(CVMX_CIU_DINT); | ||
615 | } | ||
616 | #endif | ||
617 | |||
618 | /* | ||
619 | * BIST should always be enabled when doing a soft reset. L2 | ||
620 | * Cache locking for instance is not cleared unless BIST is | ||
621 | * enabled. Unfortunately due to a chip errata G-200 for | ||
622 | * Cn38XX and CN31XX, BIST msut be disabled on these parts. | ||
623 | */ | ||
624 | if (OCTEON_IS_MODEL(OCTEON_CN38XX_PASS2) || | ||
625 | OCTEON_IS_MODEL(OCTEON_CN31XX)) | ||
626 | cvmx_write_csr(CVMX_CIU_SOFT_BIST, 0); | ||
627 | else | ||
628 | cvmx_write_csr(CVMX_CIU_SOFT_BIST, 1); | ||
629 | |||
630 | /* Default to 64MB in the simulator to speed things up */ | ||
631 | if (octeon_is_simulation()) | ||
632 | MAX_MEMORY = 64ull << 20; | ||
633 | |||
634 | arcs_cmdline[0] = 0; | ||
635 | argc = octeon_boot_desc_ptr->argc; | ||
636 | for (i = 0; i < argc; i++) { | ||
637 | const char *arg = | ||
638 | cvmx_phys_to_ptr(octeon_boot_desc_ptr->argv[i]); | ||
639 | if ((strncmp(arg, "MEM=", 4) == 0) || | ||
640 | (strncmp(arg, "mem=", 4) == 0)) { | ||
641 | sscanf(arg + 4, "%llu", &MAX_MEMORY); | ||
642 | MAX_MEMORY <<= 20; | ||
643 | if (MAX_MEMORY == 0) | ||
644 | MAX_MEMORY = 32ull << 30; | ||
645 | } else if (strcmp(arg, "ecc_verbose") == 0) { | ||
646 | #ifdef CONFIG_CAVIUM_REPORT_SINGLE_BIT_ECC | ||
647 | __cvmx_interrupt_ecc_report_single_bit_errors = 1; | ||
648 | pr_notice("Reporting of single bit ECC errors is " | ||
649 | "turned on\n"); | ||
650 | #endif | ||
651 | } else if (strlen(arcs_cmdline) + strlen(arg) + 1 < | ||
652 | sizeof(arcs_cmdline) - 1) { | ||
653 | strcat(arcs_cmdline, " "); | ||
654 | strcat(arcs_cmdline, arg); | ||
655 | } | ||
656 | } | ||
657 | |||
658 | if (strstr(arcs_cmdline, "console=") == NULL) { | ||
659 | #ifdef CONFIG_GDB_CONSOLE | ||
660 | strcat(arcs_cmdline, " console=gdb"); | ||
661 | #else | ||
662 | #ifdef CONFIG_CAVIUM_OCTEON_2ND_KERNEL | ||
663 | strcat(arcs_cmdline, " console=ttyS0,115200"); | ||
664 | #else | ||
665 | if (octeon_uart == 1) | ||
666 | strcat(arcs_cmdline, " console=ttyS1,115200"); | ||
667 | else | ||
668 | strcat(arcs_cmdline, " console=ttyS0,115200"); | ||
669 | #endif | ||
670 | #endif | ||
671 | } | ||
672 | |||
673 | if (octeon_is_simulation()) { | ||
674 | /* | ||
675 | * The simulator uses a mtdram device pre filled with | ||
676 | * the filesystem. Also specify the calibration delay | ||
677 | * to avoid calculating it every time. | ||
678 | */ | ||
679 | strcat(arcs_cmdline, " rw root=1f00" | ||
680 | " lpj=60176 slram=root,0x40000000,+1073741824"); | ||
681 | } | ||
682 | |||
683 | mips_hpt_frequency = octeon_get_clock_rate(); | ||
684 | |||
685 | octeon_init_cvmcount(); | ||
686 | |||
687 | _machine_restart = octeon_restart; | ||
688 | _machine_halt = octeon_halt; | ||
689 | |||
690 | memset(&octeon_port, 0, sizeof(octeon_port)); | ||
691 | /* | ||
692 | * For early_serial_setup we don't set the port type or | ||
693 | * UPF_FIXED_TYPE. | ||
694 | */ | ||
695 | octeon_port.flags = ASYNC_SKIP_TEST | UPF_SHARE_IRQ; | ||
696 | octeon_port.iotype = UPIO_MEM; | ||
697 | /* I/O addresses are every 8 bytes */ | ||
698 | octeon_port.regshift = 3; | ||
699 | /* Clock rate of the chip */ | ||
700 | octeon_port.uartclk = mips_hpt_frequency; | ||
701 | octeon_port.fifosize = 64; | ||
702 | octeon_port.mapbase = 0x0001180000000800ull + (1024 * octeon_uart); | ||
703 | octeon_port.membase = cvmx_phys_to_ptr(octeon_port.mapbase); | ||
704 | octeon_port.serial_in = octeon_serial_in; | ||
705 | octeon_port.serial_out = octeon_serial_out; | ||
706 | #ifdef CONFIG_CAVIUM_OCTEON_2ND_KERNEL | ||
707 | octeon_port.line = 0; | ||
708 | #else | ||
709 | octeon_port.line = octeon_uart; | ||
710 | #endif | ||
711 | octeon_port.irq = 42 + octeon_uart; | ||
712 | early_serial_setup(&octeon_port); | ||
713 | |||
714 | octeon_user_io_init(); | ||
715 | register_smp_ops(&octeon_smp_ops); | ||
716 | } | ||
717 | |||
718 | void __init plat_mem_setup(void) | ||
719 | { | ||
720 | uint64_t mem_alloc_size; | ||
721 | uint64_t total; | ||
722 | int64_t memory; | ||
723 | |||
724 | total = 0; | ||
725 | |||
726 | /* First add the init memory we will be returning. */ | ||
727 | memory = __pa_symbol(&__init_begin) & PAGE_MASK; | ||
728 | mem_alloc_size = (__pa_symbol(&__init_end) & PAGE_MASK) - memory; | ||
729 | if (mem_alloc_size > 0) { | ||
730 | add_memory_region(memory, mem_alloc_size, BOOT_MEM_RAM); | ||
731 | total += mem_alloc_size; | ||
732 | } | ||
733 | |||
734 | /* | ||
735 | * The Mips memory init uses the first memory location for | ||
736 | * some memory vectors. When SPARSEMEM is in use, it doesn't | ||
737 | * verify that the size is big enough for the final | ||
738 | * vectors. Making the smallest chuck 4MB seems to be enough | ||
739 | * to consistantly work. | ||
740 | */ | ||
741 | mem_alloc_size = 4 << 20; | ||
742 | if (mem_alloc_size > MAX_MEMORY) | ||
743 | mem_alloc_size = MAX_MEMORY; | ||
744 | |||
745 | /* | ||
746 | * When allocating memory, we want incrementing addresses from | ||
747 | * bootmem_alloc so the code in add_memory_region can merge | ||
748 | * regions next to each other. | ||
749 | */ | ||
750 | cvmx_bootmem_lock(); | ||
751 | while ((boot_mem_map.nr_map < BOOT_MEM_MAP_MAX) | ||
752 | && (total < MAX_MEMORY)) { | ||
753 | #if defined(CONFIG_64BIT) || defined(CONFIG_64BIT_PHYS_ADDR) | ||
754 | memory = cvmx_bootmem_phy_alloc(mem_alloc_size, | ||
755 | __pa_symbol(&__init_end), -1, | ||
756 | 0x100000, | ||
757 | CVMX_BOOTMEM_FLAG_NO_LOCKING); | ||
758 | #elif defined(CONFIG_HIGHMEM) | ||
759 | memory = cvmx_bootmem_phy_alloc(mem_alloc_size, 0, 1ull << 31, | ||
760 | 0x100000, | ||
761 | CVMX_BOOTMEM_FLAG_NO_LOCKING); | ||
762 | #else | ||
763 | memory = cvmx_bootmem_phy_alloc(mem_alloc_size, 0, 512 << 20, | ||
764 | 0x100000, | ||
765 | CVMX_BOOTMEM_FLAG_NO_LOCKING); | ||
766 | #endif | ||
767 | if (memory >= 0) { | ||
768 | /* | ||
769 | * This function automatically merges address | ||
770 | * regions next to each other if they are | ||
771 | * received in incrementing order. | ||
772 | */ | ||
773 | add_memory_region(memory, mem_alloc_size, BOOT_MEM_RAM); | ||
774 | total += mem_alloc_size; | ||
775 | } else { | ||
776 | break; | ||
777 | } | ||
778 | } | ||
779 | cvmx_bootmem_unlock(); | ||
780 | |||
781 | #ifdef CONFIG_CAVIUM_RESERVE32 | ||
782 | /* | ||
783 | * Now that we've allocated the kernel memory it is safe to | ||
784 | * free the reserved region. We free it here so that builtin | ||
785 | * drivers can use the memory. | ||
786 | */ | ||
787 | if (octeon_reserve32_memory) | ||
788 | cvmx_bootmem_free_named("CAVIUM_RESERVE32"); | ||
789 | #endif /* CONFIG_CAVIUM_RESERVE32 */ | ||
790 | |||
791 | if (total == 0) | ||
792 | panic("Unable to allocate memory from " | ||
793 | "cvmx_bootmem_phy_alloc\n"); | ||
794 | } | ||
795 | |||
796 | |||
797 | int prom_putchar(char c) | ||
798 | { | ||
799 | uint64_t lsrval; | ||
800 | |||
801 | /* Spin until there is room */ | ||
802 | do { | ||
803 | lsrval = cvmx_read_csr(CVMX_MIO_UARTX_LSR(octeon_uart)); | ||
804 | } while ((lsrval & 0x20) == 0); | ||
805 | |||
806 | /* Write the byte */ | ||
807 | cvmx_write_csr(CVMX_MIO_UARTX_THR(octeon_uart), c); | ||
808 | return 1; | ||
809 | } | ||
810 | |||
811 | void prom_free_prom_memory(void) | ||
812 | { | ||
813 | #ifdef CONFIG_CAVIUM_DECODE_RSL | ||
814 | cvmx_interrupt_rsl_enable(); | ||
815 | |||
816 | /* Add an interrupt handler for general failures. */ | ||
817 | if (request_irq(OCTEON_IRQ_RML, octeon_rlm_interrupt, IRQF_SHARED, | ||
818 | "RML/RSL", octeon_rlm_interrupt)) { | ||
819 | panic("Unable to request_irq(OCTEON_IRQ_RML)\n"); | ||
820 | } | ||
821 | #endif | ||
822 | |||
823 | /* This call is here so that it is performed after any TLB | ||
824 | initializations. It needs to be after these in case the | ||
825 | CONFIG_CAVIUM_RESERVE32_USE_WIRED_TLB option is set */ | ||
826 | octeon_hal_setup_reserved32(); | ||
827 | } | ||
828 | |||
829 | static struct octeon_cf_data octeon_cf_data; | ||
830 | |||
831 | static int __init octeon_cf_device_init(void) | ||
832 | { | ||
833 | union cvmx_mio_boot_reg_cfgx mio_boot_reg_cfg; | ||
834 | unsigned long base_ptr, region_base, region_size; | ||
835 | struct platform_device *pd; | ||
836 | struct resource cf_resources[3]; | ||
837 | unsigned int num_resources; | ||
838 | int i; | ||
839 | int ret = 0; | ||
840 | |||
841 | /* Setup octeon-cf platform device if present. */ | ||
842 | base_ptr = 0; | ||
843 | if (octeon_bootinfo->major_version == 1 | ||
844 | && octeon_bootinfo->minor_version >= 1) { | ||
845 | if (octeon_bootinfo->compact_flash_common_base_addr) | ||
846 | base_ptr = | ||
847 | octeon_bootinfo->compact_flash_common_base_addr; | ||
848 | } else { | ||
849 | base_ptr = 0x1d000800; | ||
850 | } | ||
851 | |||
852 | if (!base_ptr) | ||
853 | return ret; | ||
854 | |||
855 | /* Find CS0 region. */ | ||
856 | for (i = 0; i < 8; i++) { | ||
857 | mio_boot_reg_cfg.u64 = cvmx_read_csr(CVMX_MIO_BOOT_REG_CFGX(i)); | ||
858 | region_base = mio_boot_reg_cfg.s.base << 16; | ||
859 | region_size = (mio_boot_reg_cfg.s.size + 1) << 16; | ||
860 | if (mio_boot_reg_cfg.s.en && base_ptr >= region_base | ||
861 | && base_ptr < region_base + region_size) | ||
862 | break; | ||
863 | } | ||
864 | if (i >= 7) { | ||
865 | /* i and i + 1 are CS0 and CS1, both must be less than 8. */ | ||
866 | goto out; | ||
867 | } | ||
868 | octeon_cf_data.base_region = i; | ||
869 | octeon_cf_data.is16bit = mio_boot_reg_cfg.s.width; | ||
870 | octeon_cf_data.base_region_bias = base_ptr - region_base; | ||
871 | memset(cf_resources, 0, sizeof(cf_resources)); | ||
872 | num_resources = 0; | ||
873 | cf_resources[num_resources].flags = IORESOURCE_MEM; | ||
874 | cf_resources[num_resources].start = region_base; | ||
875 | cf_resources[num_resources].end = region_base + region_size - 1; | ||
876 | num_resources++; | ||
877 | |||
878 | |||
879 | if (!(base_ptr & 0xfffful)) { | ||
880 | /* | ||
881 | * Boot loader signals availability of DMA (true_ide | ||
882 | * mode) by setting low order bits of base_ptr to | ||
883 | * zero. | ||
884 | */ | ||
885 | |||
886 | /* Asume that CS1 immediately follows. */ | ||
887 | mio_boot_reg_cfg.u64 = | ||
888 | cvmx_read_csr(CVMX_MIO_BOOT_REG_CFGX(i + 1)); | ||
889 | region_base = mio_boot_reg_cfg.s.base << 16; | ||
890 | region_size = (mio_boot_reg_cfg.s.size + 1) << 16; | ||
891 | if (!mio_boot_reg_cfg.s.en) | ||
892 | goto out; | ||
893 | |||
894 | cf_resources[num_resources].flags = IORESOURCE_MEM; | ||
895 | cf_resources[num_resources].start = region_base; | ||
896 | cf_resources[num_resources].end = region_base + region_size - 1; | ||
897 | num_resources++; | ||
898 | |||
899 | octeon_cf_data.dma_engine = 0; | ||
900 | cf_resources[num_resources].flags = IORESOURCE_IRQ; | ||
901 | cf_resources[num_resources].start = OCTEON_IRQ_BOOTDMA; | ||
902 | cf_resources[num_resources].end = OCTEON_IRQ_BOOTDMA; | ||
903 | num_resources++; | ||
904 | } else { | ||
905 | octeon_cf_data.dma_engine = -1; | ||
906 | } | ||
907 | |||
908 | pd = platform_device_alloc("pata_octeon_cf", -1); | ||
909 | if (!pd) { | ||
910 | ret = -ENOMEM; | ||
911 | goto out; | ||
912 | } | ||
913 | pd->dev.platform_data = &octeon_cf_data; | ||
914 | |||
915 | ret = platform_device_add_resources(pd, cf_resources, num_resources); | ||
916 | if (ret) | ||
917 | goto fail; | ||
918 | |||
919 | ret = platform_device_add(pd); | ||
920 | if (ret) | ||
921 | goto fail; | ||
922 | |||
923 | return ret; | ||
924 | fail: | ||
925 | platform_device_put(pd); | ||
926 | out: | ||
927 | return ret; | ||
928 | } | ||
929 | device_initcall(octeon_cf_device_init); | ||
diff --git a/arch/mips/cavium-octeon/smp.c b/arch/mips/cavium-octeon/smp.c new file mode 100644 index 000000000000..24e0ad63980a --- /dev/null +++ b/arch/mips/cavium-octeon/smp.c | |||
@@ -0,0 +1,211 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 2004-2008 Cavium Networks | ||
7 | */ | ||
8 | #include <linux/init.h> | ||
9 | #include <linux/delay.h> | ||
10 | #include <linux/smp.h> | ||
11 | #include <linux/interrupt.h> | ||
12 | #include <linux/kernel_stat.h> | ||
13 | #include <linux/sched.h> | ||
14 | #include <linux/module.h> | ||
15 | |||
16 | #include <asm/mmu_context.h> | ||
17 | #include <asm/system.h> | ||
18 | #include <asm/time.h> | ||
19 | |||
20 | #include <asm/octeon/octeon.h> | ||
21 | |||
22 | volatile unsigned long octeon_processor_boot = 0xff; | ||
23 | volatile unsigned long octeon_processor_sp; | ||
24 | volatile unsigned long octeon_processor_gp; | ||
25 | |||
26 | static irqreturn_t mailbox_interrupt(int irq, void *dev_id) | ||
27 | { | ||
28 | const int coreid = cvmx_get_core_num(); | ||
29 | uint64_t action; | ||
30 | |||
31 | /* Load the mailbox register to figure out what we're supposed to do */ | ||
32 | action = cvmx_read_csr(CVMX_CIU_MBOX_CLRX(coreid)); | ||
33 | |||
34 | /* Clear the mailbox to clear the interrupt */ | ||
35 | cvmx_write_csr(CVMX_CIU_MBOX_CLRX(coreid), action); | ||
36 | |||
37 | if (action & SMP_CALL_FUNCTION) | ||
38 | smp_call_function_interrupt(); | ||
39 | |||
40 | /* Check if we've been told to flush the icache */ | ||
41 | if (action & SMP_ICACHE_FLUSH) | ||
42 | asm volatile ("synci 0($0)\n"); | ||
43 | return IRQ_HANDLED; | ||
44 | } | ||
45 | |||
46 | /** | ||
47 | * Cause the function described by call_data to be executed on the passed | ||
48 | * cpu. When the function has finished, increment the finished field of | ||
49 | * call_data. | ||
50 | */ | ||
51 | void octeon_send_ipi_single(int cpu, unsigned int action) | ||
52 | { | ||
53 | int coreid = cpu_logical_map(cpu); | ||
54 | /* | ||
55 | pr_info("SMP: Mailbox send cpu=%d, coreid=%d, action=%u\n", cpu, | ||
56 | coreid, action); | ||
57 | */ | ||
58 | cvmx_write_csr(CVMX_CIU_MBOX_SETX(coreid), action); | ||
59 | } | ||
60 | |||
61 | static inline void octeon_send_ipi_mask(cpumask_t mask, unsigned int action) | ||
62 | { | ||
63 | unsigned int i; | ||
64 | |||
65 | for_each_cpu_mask(i, mask) | ||
66 | octeon_send_ipi_single(i, action); | ||
67 | } | ||
68 | |||
69 | /** | ||
70 | * Detect available CPUs, populate phys_cpu_present_map | ||
71 | */ | ||
72 | static void octeon_smp_setup(void) | ||
73 | { | ||
74 | const int coreid = cvmx_get_core_num(); | ||
75 | int cpus; | ||
76 | int id; | ||
77 | |||
78 | int core_mask = octeon_get_boot_coremask(); | ||
79 | |||
80 | cpus_clear(cpu_possible_map); | ||
81 | __cpu_number_map[coreid] = 0; | ||
82 | __cpu_logical_map[0] = coreid; | ||
83 | cpu_set(0, cpu_possible_map); | ||
84 | |||
85 | cpus = 1; | ||
86 | for (id = 0; id < 16; id++) { | ||
87 | if ((id != coreid) && (core_mask & (1 << id))) { | ||
88 | cpu_set(cpus, cpu_possible_map); | ||
89 | __cpu_number_map[id] = cpus; | ||
90 | __cpu_logical_map[cpus] = id; | ||
91 | cpus++; | ||
92 | } | ||
93 | } | ||
94 | } | ||
95 | |||
96 | /** | ||
97 | * Firmware CPU startup hook | ||
98 | * | ||
99 | */ | ||
100 | static void octeon_boot_secondary(int cpu, struct task_struct *idle) | ||
101 | { | ||
102 | int count; | ||
103 | |||
104 | pr_info("SMP: Booting CPU%02d (CoreId %2d)...\n", cpu, | ||
105 | cpu_logical_map(cpu)); | ||
106 | |||
107 | octeon_processor_sp = __KSTK_TOS(idle); | ||
108 | octeon_processor_gp = (unsigned long)(task_thread_info(idle)); | ||
109 | octeon_processor_boot = cpu_logical_map(cpu); | ||
110 | mb(); | ||
111 | |||
112 | count = 10000; | ||
113 | while (octeon_processor_sp && count) { | ||
114 | /* Waiting for processor to get the SP and GP */ | ||
115 | udelay(1); | ||
116 | count--; | ||
117 | } | ||
118 | if (count == 0) | ||
119 | pr_err("Secondary boot timeout\n"); | ||
120 | } | ||
121 | |||
122 | /** | ||
123 | * After we've done initial boot, this function is called to allow the | ||
124 | * board code to clean up state, if needed | ||
125 | */ | ||
126 | static void octeon_init_secondary(void) | ||
127 | { | ||
128 | const int coreid = cvmx_get_core_num(); | ||
129 | union cvmx_ciu_intx_sum0 interrupt_enable; | ||
130 | |||
131 | octeon_check_cpu_bist(); | ||
132 | octeon_init_cvmcount(); | ||
133 | /* | ||
134 | pr_info("SMP: CPU%d (CoreId %lu) started\n", cpu, coreid); | ||
135 | */ | ||
136 | /* Enable Mailbox interrupts to this core. These are the only | ||
137 | interrupts allowed on line 3 */ | ||
138 | cvmx_write_csr(CVMX_CIU_MBOX_CLRX(coreid), 0xffffffff); | ||
139 | interrupt_enable.u64 = 0; | ||
140 | interrupt_enable.s.mbox = 0x3; | ||
141 | cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2)), interrupt_enable.u64); | ||
142 | cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2 + 1)), 0); | ||
143 | cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2)), 0); | ||
144 | cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2 + 1)), 0); | ||
145 | /* Enable core interrupt processing for 2,3 and 7 */ | ||
146 | set_c0_status(0x8c01); | ||
147 | } | ||
148 | |||
149 | /** | ||
150 | * Callout to firmware before smp_init | ||
151 | * | ||
152 | */ | ||
153 | void octeon_prepare_cpus(unsigned int max_cpus) | ||
154 | { | ||
155 | cvmx_write_csr(CVMX_CIU_MBOX_CLRX(cvmx_get_core_num()), 0xffffffff); | ||
156 | if (request_irq(OCTEON_IRQ_MBOX0, mailbox_interrupt, IRQF_SHARED, | ||
157 | "mailbox0", mailbox_interrupt)) { | ||
158 | panic("Cannot request_irq(OCTEON_IRQ_MBOX0)\n"); | ||
159 | } | ||
160 | if (request_irq(OCTEON_IRQ_MBOX1, mailbox_interrupt, IRQF_SHARED, | ||
161 | "mailbox1", mailbox_interrupt)) { | ||
162 | panic("Cannot request_irq(OCTEON_IRQ_MBOX1)\n"); | ||
163 | } | ||
164 | } | ||
165 | |||
166 | /** | ||
167 | * Last chance for the board code to finish SMP initialization before | ||
168 | * the CPU is "online". | ||
169 | */ | ||
170 | static void octeon_smp_finish(void) | ||
171 | { | ||
172 | #ifdef CONFIG_CAVIUM_GDB | ||
173 | unsigned long tmp; | ||
174 | /* Pulse MCD0 signal on Ctrl-C to stop all the cores. Also set the MCD0 | ||
175 | to be not masked by this core so we know the signal is received by | ||
176 | someone */ | ||
177 | asm volatile ("dmfc0 %0, $22\n" | ||
178 | "ori %0, %0, 0x9100\n" "dmtc0 %0, $22\n" : "=r" (tmp)); | ||
179 | #endif | ||
180 | |||
181 | octeon_user_io_init(); | ||
182 | |||
183 | /* to generate the first CPU timer interrupt */ | ||
184 | write_c0_compare(read_c0_count() + mips_hpt_frequency / HZ); | ||
185 | } | ||
186 | |||
187 | /** | ||
188 | * Hook for after all CPUs are online | ||
189 | */ | ||
190 | static void octeon_cpus_done(void) | ||
191 | { | ||
192 | #ifdef CONFIG_CAVIUM_GDB | ||
193 | unsigned long tmp; | ||
194 | /* Pulse MCD0 signal on Ctrl-C to stop all the cores. Also set the MCD0 | ||
195 | to be not masked by this core so we know the signal is received by | ||
196 | someone */ | ||
197 | asm volatile ("dmfc0 %0, $22\n" | ||
198 | "ori %0, %0, 0x9100\n" "dmtc0 %0, $22\n" : "=r" (tmp)); | ||
199 | #endif | ||
200 | } | ||
201 | |||
202 | struct plat_smp_ops octeon_smp_ops = { | ||
203 | .send_ipi_single = octeon_send_ipi_single, | ||
204 | .send_ipi_mask = octeon_send_ipi_mask, | ||
205 | .init_secondary = octeon_init_secondary, | ||
206 | .smp_finish = octeon_smp_finish, | ||
207 | .cpus_done = octeon_cpus_done, | ||
208 | .boot_secondary = octeon_boot_secondary, | ||
209 | .smp_setup = octeon_smp_setup, | ||
210 | .prepare_cpus = octeon_prepare_cpus, | ||
211 | }; | ||
diff --git a/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h b/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h new file mode 100644 index 000000000000..04ce6e6569da --- /dev/null +++ b/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h | |||
@@ -0,0 +1,78 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 2004 Cavium Networks | ||
7 | */ | ||
8 | #ifndef __ASM_MACH_CAVIUM_OCTEON_CPU_FEATURE_OVERRIDES_H | ||
9 | #define __ASM_MACH_CAVIUM_OCTEON_CPU_FEATURE_OVERRIDES_H | ||
10 | |||
11 | #include <linux/types.h> | ||
12 | #include <asm/mipsregs.h> | ||
13 | |||
14 | /* | ||
15 | * Cavium Octeons are MIPS64v2 processors | ||
16 | */ | ||
17 | #define cpu_dcache_line_size() 128 | ||
18 | #define cpu_icache_line_size() 128 | ||
19 | |||
20 | |||
21 | #define cpu_has_4kex 1 | ||
22 | #define cpu_has_3k_cache 0 | ||
23 | #define cpu_has_4k_cache 0 | ||
24 | #define cpu_has_tx39_cache 0 | ||
25 | #define cpu_has_fpu 0 | ||
26 | #define cpu_has_counter 1 | ||
27 | #define cpu_has_watch 1 | ||
28 | #define cpu_has_divec 1 | ||
29 | #define cpu_has_vce 0 | ||
30 | #define cpu_has_cache_cdex_p 0 | ||
31 | #define cpu_has_cache_cdex_s 0 | ||
32 | #define cpu_has_prefetch 1 | ||
33 | |||
34 | /* | ||
35 | * We should disable LL/SC on non SMP systems as it is faster to | ||
36 | * disable interrupts for atomic access than a LL/SC. Unfortunatly we | ||
37 | * cannot as this breaks asm/futex.h | ||
38 | */ | ||
39 | #define cpu_has_llsc 1 | ||
40 | #define cpu_has_vtag_icache 1 | ||
41 | #define cpu_has_dc_aliases 0 | ||
42 | #define cpu_has_ic_fills_f_dc 0 | ||
43 | #define cpu_has_64bits 1 | ||
44 | #define cpu_has_octeon_cache 1 | ||
45 | #define cpu_has_saa octeon_has_saa() | ||
46 | #define cpu_has_mips32r1 0 | ||
47 | #define cpu_has_mips32r2 0 | ||
48 | #define cpu_has_mips64r1 0 | ||
49 | #define cpu_has_mips64r2 1 | ||
50 | #define cpu_has_dsp 0 | ||
51 | #define cpu_has_mipsmt 0 | ||
52 | #define cpu_has_userlocal 0 | ||
53 | #define cpu_has_vint 0 | ||
54 | #define cpu_has_veic 0 | ||
55 | #define ARCH_HAS_READ_CURRENT_TIMER 1 | ||
56 | #define ARCH_HAS_IRQ_PER_CPU 1 | ||
57 | #define ARCH_HAS_SPINLOCK_PREFETCH 1 | ||
58 | #define spin_lock_prefetch(x) prefetch(x) | ||
59 | #define PREFETCH_STRIDE 128 | ||
60 | |||
61 | static inline int read_current_timer(unsigned long *result) | ||
62 | { | ||
63 | asm volatile ("rdhwr %0,$31\n" | ||
64 | #ifndef CONFIG_64BIT | ||
65 | "\tsll %0, 0" | ||
66 | #endif | ||
67 | : "=r" (*result)); | ||
68 | return 0; | ||
69 | } | ||
70 | |||
71 | static inline int octeon_has_saa(void) | ||
72 | { | ||
73 | int id; | ||
74 | asm volatile ("mfc0 %0, $15,0" : "=r" (id)); | ||
75 | return id >= 0x000d0300; | ||
76 | } | ||
77 | |||
78 | #endif | ||
diff --git a/arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h b/arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h new file mode 100644 index 000000000000..f30fce92aabb --- /dev/null +++ b/arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h | |||
@@ -0,0 +1,64 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 2006 Ralf Baechle <ralf@linux-mips.org> | ||
7 | * | ||
8 | * | ||
9 | * Similar to mach-generic/dma-coherence.h except | ||
10 | * plat_device_is_coherent hard coded to return 1. | ||
11 | * | ||
12 | */ | ||
13 | #ifndef __ASM_MACH_CAVIUM_OCTEON_DMA_COHERENCE_H | ||
14 | #define __ASM_MACH_CAVIUM_OCTEON_DMA_COHERENCE_H | ||
15 | |||
16 | struct device; | ||
17 | |||
18 | dma_addr_t octeon_map_dma_mem(struct device *, void *, size_t); | ||
19 | void octeon_unmap_dma_mem(struct device *, dma_addr_t); | ||
20 | |||
21 | static inline dma_addr_t plat_map_dma_mem(struct device *dev, void *addr, | ||
22 | size_t size) | ||
23 | { | ||
24 | return octeon_map_dma_mem(dev, addr, size); | ||
25 | } | ||
26 | |||
27 | static inline dma_addr_t plat_map_dma_mem_page(struct device *dev, | ||
28 | struct page *page) | ||
29 | { | ||
30 | return octeon_map_dma_mem(dev, page_address(page), PAGE_SIZE); | ||
31 | } | ||
32 | |||
33 | static inline unsigned long plat_dma_addr_to_phys(dma_addr_t dma_addr) | ||
34 | { | ||
35 | return dma_addr; | ||
36 | } | ||
37 | |||
38 | static inline void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr) | ||
39 | { | ||
40 | octeon_unmap_dma_mem(dev, dma_addr); | ||
41 | } | ||
42 | |||
43 | static inline int plat_dma_supported(struct device *dev, u64 mask) | ||
44 | { | ||
45 | return 1; | ||
46 | } | ||
47 | |||
48 | static inline void plat_extra_sync_for_device(struct device *dev) | ||
49 | { | ||
50 | mb(); | ||
51 | } | ||
52 | |||
53 | static inline int plat_device_is_coherent(struct device *dev) | ||
54 | { | ||
55 | return 1; | ||
56 | } | ||
57 | |||
58 | static inline int plat_dma_mapping_error(struct device *dev, | ||
59 | dma_addr_t dma_addr) | ||
60 | { | ||
61 | return dma_addr == -1; | ||
62 | } | ||
63 | |||
64 | #endif /* __ASM_MACH_CAVIUM_OCTEON_DMA_COHERENCE_H */ | ||
diff --git a/arch/mips/include/asm/mach-cavium-octeon/irq.h b/arch/mips/include/asm/mach-cavium-octeon/irq.h new file mode 100644 index 000000000000..d32220fbf4f1 --- /dev/null +++ b/arch/mips/include/asm/mach-cavium-octeon/irq.h | |||
@@ -0,0 +1,244 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 2004-2008 Cavium Networks | ||
7 | */ | ||
8 | #ifndef __OCTEON_IRQ_H__ | ||
9 | #define __OCTEON_IRQ_H__ | ||
10 | |||
11 | #define NR_IRQS OCTEON_IRQ_LAST | ||
12 | #define MIPS_CPU_IRQ_BASE OCTEON_IRQ_SW0 | ||
13 | |||
14 | /* 0 - 7 represent the i8259 master */ | ||
15 | #define OCTEON_IRQ_I8259M0 0 | ||
16 | #define OCTEON_IRQ_I8259M1 1 | ||
17 | #define OCTEON_IRQ_I8259M2 2 | ||
18 | #define OCTEON_IRQ_I8259M3 3 | ||
19 | #define OCTEON_IRQ_I8259M4 4 | ||
20 | #define OCTEON_IRQ_I8259M5 5 | ||
21 | #define OCTEON_IRQ_I8259M6 6 | ||
22 | #define OCTEON_IRQ_I8259M7 7 | ||
23 | /* 8 - 15 represent the i8259 slave */ | ||
24 | #define OCTEON_IRQ_I8259S0 8 | ||
25 | #define OCTEON_IRQ_I8259S1 9 | ||
26 | #define OCTEON_IRQ_I8259S2 10 | ||
27 | #define OCTEON_IRQ_I8259S3 11 | ||
28 | #define OCTEON_IRQ_I8259S4 12 | ||
29 | #define OCTEON_IRQ_I8259S5 13 | ||
30 | #define OCTEON_IRQ_I8259S6 14 | ||
31 | #define OCTEON_IRQ_I8259S7 15 | ||
32 | /* 16 - 23 represent the 8 MIPS standard interrupt sources */ | ||
33 | #define OCTEON_IRQ_SW0 16 | ||
34 | #define OCTEON_IRQ_SW1 17 | ||
35 | #define OCTEON_IRQ_CIU0 18 | ||
36 | #define OCTEON_IRQ_CIU1 19 | ||
37 | #define OCTEON_IRQ_CIU4 20 | ||
38 | #define OCTEON_IRQ_5 21 | ||
39 | #define OCTEON_IRQ_PERF 22 | ||
40 | #define OCTEON_IRQ_TIMER 23 | ||
41 | /* 24 - 87 represent the sources in CIU_INTX_EN0 */ | ||
42 | #define OCTEON_IRQ_WORKQ0 24 | ||
43 | #define OCTEON_IRQ_WORKQ1 25 | ||
44 | #define OCTEON_IRQ_WORKQ2 26 | ||
45 | #define OCTEON_IRQ_WORKQ3 27 | ||
46 | #define OCTEON_IRQ_WORKQ4 28 | ||
47 | #define OCTEON_IRQ_WORKQ5 29 | ||
48 | #define OCTEON_IRQ_WORKQ6 30 | ||
49 | #define OCTEON_IRQ_WORKQ7 31 | ||
50 | #define OCTEON_IRQ_WORKQ8 32 | ||
51 | #define OCTEON_IRQ_WORKQ9 33 | ||
52 | #define OCTEON_IRQ_WORKQ10 34 | ||
53 | #define OCTEON_IRQ_WORKQ11 35 | ||
54 | #define OCTEON_IRQ_WORKQ12 36 | ||
55 | #define OCTEON_IRQ_WORKQ13 37 | ||
56 | #define OCTEON_IRQ_WORKQ14 38 | ||
57 | #define OCTEON_IRQ_WORKQ15 39 | ||
58 | #define OCTEON_IRQ_GPIO0 40 | ||
59 | #define OCTEON_IRQ_GPIO1 41 | ||
60 | #define OCTEON_IRQ_GPIO2 42 | ||
61 | #define OCTEON_IRQ_GPIO3 43 | ||
62 | #define OCTEON_IRQ_GPIO4 44 | ||
63 | #define OCTEON_IRQ_GPIO5 45 | ||
64 | #define OCTEON_IRQ_GPIO6 46 | ||
65 | #define OCTEON_IRQ_GPIO7 47 | ||
66 | #define OCTEON_IRQ_GPIO8 48 | ||
67 | #define OCTEON_IRQ_GPIO9 49 | ||
68 | #define OCTEON_IRQ_GPIO10 50 | ||
69 | #define OCTEON_IRQ_GPIO11 51 | ||
70 | #define OCTEON_IRQ_GPIO12 52 | ||
71 | #define OCTEON_IRQ_GPIO13 53 | ||
72 | #define OCTEON_IRQ_GPIO14 54 | ||
73 | #define OCTEON_IRQ_GPIO15 55 | ||
74 | #define OCTEON_IRQ_MBOX0 56 | ||
75 | #define OCTEON_IRQ_MBOX1 57 | ||
76 | #define OCTEON_IRQ_UART0 58 | ||
77 | #define OCTEON_IRQ_UART1 59 | ||
78 | #define OCTEON_IRQ_PCI_INT0 60 | ||
79 | #define OCTEON_IRQ_PCI_INT1 61 | ||
80 | #define OCTEON_IRQ_PCI_INT2 62 | ||
81 | #define OCTEON_IRQ_PCI_INT3 63 | ||
82 | #define OCTEON_IRQ_PCI_MSI0 64 | ||
83 | #define OCTEON_IRQ_PCI_MSI1 65 | ||
84 | #define OCTEON_IRQ_PCI_MSI2 66 | ||
85 | #define OCTEON_IRQ_PCI_MSI3 67 | ||
86 | #define OCTEON_IRQ_RESERVED68 68 /* Summary of CIU_INT_SUM1 */ | ||
87 | #define OCTEON_IRQ_TWSI 69 | ||
88 | #define OCTEON_IRQ_RML 70 | ||
89 | #define OCTEON_IRQ_TRACE 71 | ||
90 | #define OCTEON_IRQ_GMX_DRP0 72 | ||
91 | #define OCTEON_IRQ_GMX_DRP1 73 | ||
92 | #define OCTEON_IRQ_IPD_DRP 74 | ||
93 | #define OCTEON_IRQ_KEY_ZERO 75 | ||
94 | #define OCTEON_IRQ_TIMER0 76 | ||
95 | #define OCTEON_IRQ_TIMER1 77 | ||
96 | #define OCTEON_IRQ_TIMER2 78 | ||
97 | #define OCTEON_IRQ_TIMER3 79 | ||
98 | #define OCTEON_IRQ_USB0 80 | ||
99 | #define OCTEON_IRQ_PCM 81 | ||
100 | #define OCTEON_IRQ_MPI 82 | ||
101 | #define OCTEON_IRQ_TWSI2 83 | ||
102 | #define OCTEON_IRQ_POWIQ 84 | ||
103 | #define OCTEON_IRQ_IPDPPTHR 85 | ||
104 | #define OCTEON_IRQ_MII0 86 | ||
105 | #define OCTEON_IRQ_BOOTDMA 87 | ||
106 | /* 88 - 151 represent the sources in CIU_INTX_EN1 */ | ||
107 | #define OCTEON_IRQ_WDOG0 88 | ||
108 | #define OCTEON_IRQ_WDOG1 89 | ||
109 | #define OCTEON_IRQ_WDOG2 90 | ||
110 | #define OCTEON_IRQ_WDOG3 91 | ||
111 | #define OCTEON_IRQ_WDOG4 92 | ||
112 | #define OCTEON_IRQ_WDOG5 93 | ||
113 | #define OCTEON_IRQ_WDOG6 94 | ||
114 | #define OCTEON_IRQ_WDOG7 95 | ||
115 | #define OCTEON_IRQ_WDOG8 96 | ||
116 | #define OCTEON_IRQ_WDOG9 97 | ||
117 | #define OCTEON_IRQ_WDOG10 98 | ||
118 | #define OCTEON_IRQ_WDOG11 99 | ||
119 | #define OCTEON_IRQ_WDOG12 100 | ||
120 | #define OCTEON_IRQ_WDOG13 101 | ||
121 | #define OCTEON_IRQ_WDOG14 102 | ||
122 | #define OCTEON_IRQ_WDOG15 103 | ||
123 | #define OCTEON_IRQ_UART2 104 | ||
124 | #define OCTEON_IRQ_USB1 105 | ||
125 | #define OCTEON_IRQ_MII1 106 | ||
126 | #define OCTEON_IRQ_RESERVED107 107 | ||
127 | #define OCTEON_IRQ_RESERVED108 108 | ||
128 | #define OCTEON_IRQ_RESERVED109 109 | ||
129 | #define OCTEON_IRQ_RESERVED110 110 | ||
130 | #define OCTEON_IRQ_RESERVED111 111 | ||
131 | #define OCTEON_IRQ_RESERVED112 112 | ||
132 | #define OCTEON_IRQ_RESERVED113 113 | ||
133 | #define OCTEON_IRQ_RESERVED114 114 | ||
134 | #define OCTEON_IRQ_RESERVED115 115 | ||
135 | #define OCTEON_IRQ_RESERVED116 116 | ||
136 | #define OCTEON_IRQ_RESERVED117 117 | ||
137 | #define OCTEON_IRQ_RESERVED118 118 | ||
138 | #define OCTEON_IRQ_RESERVED119 119 | ||
139 | #define OCTEON_IRQ_RESERVED120 120 | ||
140 | #define OCTEON_IRQ_RESERVED121 121 | ||
141 | #define OCTEON_IRQ_RESERVED122 122 | ||
142 | #define OCTEON_IRQ_RESERVED123 123 | ||
143 | #define OCTEON_IRQ_RESERVED124 124 | ||
144 | #define OCTEON_IRQ_RESERVED125 125 | ||
145 | #define OCTEON_IRQ_RESERVED126 126 | ||
146 | #define OCTEON_IRQ_RESERVED127 127 | ||
147 | #define OCTEON_IRQ_RESERVED128 128 | ||
148 | #define OCTEON_IRQ_RESERVED129 129 | ||
149 | #define OCTEON_IRQ_RESERVED130 130 | ||
150 | #define OCTEON_IRQ_RESERVED131 131 | ||
151 | #define OCTEON_IRQ_RESERVED132 132 | ||
152 | #define OCTEON_IRQ_RESERVED133 133 | ||
153 | #define OCTEON_IRQ_RESERVED134 134 | ||
154 | #define OCTEON_IRQ_RESERVED135 135 | ||
155 | #define OCTEON_IRQ_RESERVED136 136 | ||
156 | #define OCTEON_IRQ_RESERVED137 137 | ||
157 | #define OCTEON_IRQ_RESERVED138 138 | ||
158 | #define OCTEON_IRQ_RESERVED139 139 | ||
159 | #define OCTEON_IRQ_RESERVED140 140 | ||
160 | #define OCTEON_IRQ_RESERVED141 141 | ||
161 | #define OCTEON_IRQ_RESERVED142 142 | ||
162 | #define OCTEON_IRQ_RESERVED143 143 | ||
163 | #define OCTEON_IRQ_RESERVED144 144 | ||
164 | #define OCTEON_IRQ_RESERVED145 145 | ||
165 | #define OCTEON_IRQ_RESERVED146 146 | ||
166 | #define OCTEON_IRQ_RESERVED147 147 | ||
167 | #define OCTEON_IRQ_RESERVED148 148 | ||
168 | #define OCTEON_IRQ_RESERVED149 149 | ||
169 | #define OCTEON_IRQ_RESERVED150 150 | ||
170 | #define OCTEON_IRQ_RESERVED151 151 | ||
171 | |||
172 | #ifdef CONFIG_PCI_MSI | ||
173 | /* 152 - 215 represent the MSI interrupts 0-63 */ | ||
174 | #define OCTEON_IRQ_MSI_BIT0 152 | ||
175 | #define OCTEON_IRQ_MSI_BIT1 153 | ||
176 | #define OCTEON_IRQ_MSI_BIT2 154 | ||
177 | #define OCTEON_IRQ_MSI_BIT3 155 | ||
178 | #define OCTEON_IRQ_MSI_BIT4 156 | ||
179 | #define OCTEON_IRQ_MSI_BIT5 157 | ||
180 | #define OCTEON_IRQ_MSI_BIT6 158 | ||
181 | #define OCTEON_IRQ_MSI_BIT7 159 | ||
182 | #define OCTEON_IRQ_MSI_BIT8 160 | ||
183 | #define OCTEON_IRQ_MSI_BIT9 161 | ||
184 | #define OCTEON_IRQ_MSI_BIT10 162 | ||
185 | #define OCTEON_IRQ_MSI_BIT11 163 | ||
186 | #define OCTEON_IRQ_MSI_BIT12 164 | ||
187 | #define OCTEON_IRQ_MSI_BIT13 165 | ||
188 | #define OCTEON_IRQ_MSI_BIT14 166 | ||
189 | #define OCTEON_IRQ_MSI_BIT15 167 | ||
190 | #define OCTEON_IRQ_MSI_BIT16 168 | ||
191 | #define OCTEON_IRQ_MSI_BIT17 169 | ||
192 | #define OCTEON_IRQ_MSI_BIT18 170 | ||
193 | #define OCTEON_IRQ_MSI_BIT19 171 | ||
194 | #define OCTEON_IRQ_MSI_BIT20 172 | ||
195 | #define OCTEON_IRQ_MSI_BIT21 173 | ||
196 | #define OCTEON_IRQ_MSI_BIT22 174 | ||
197 | #define OCTEON_IRQ_MSI_BIT23 175 | ||
198 | #define OCTEON_IRQ_MSI_BIT24 176 | ||
199 | #define OCTEON_IRQ_MSI_BIT25 177 | ||
200 | #define OCTEON_IRQ_MSI_BIT26 178 | ||
201 | #define OCTEON_IRQ_MSI_BIT27 179 | ||
202 | #define OCTEON_IRQ_MSI_BIT28 180 | ||
203 | #define OCTEON_IRQ_MSI_BIT29 181 | ||
204 | #define OCTEON_IRQ_MSI_BIT30 182 | ||
205 | #define OCTEON_IRQ_MSI_BIT31 183 | ||
206 | #define OCTEON_IRQ_MSI_BIT32 184 | ||
207 | #define OCTEON_IRQ_MSI_BIT33 185 | ||
208 | #define OCTEON_IRQ_MSI_BIT34 186 | ||
209 | #define OCTEON_IRQ_MSI_BIT35 187 | ||
210 | #define OCTEON_IRQ_MSI_BIT36 188 | ||
211 | #define OCTEON_IRQ_MSI_BIT37 189 | ||
212 | #define OCTEON_IRQ_MSI_BIT38 190 | ||
213 | #define OCTEON_IRQ_MSI_BIT39 191 | ||
214 | #define OCTEON_IRQ_MSI_BIT40 192 | ||
215 | #define OCTEON_IRQ_MSI_BIT41 193 | ||
216 | #define OCTEON_IRQ_MSI_BIT42 194 | ||
217 | #define OCTEON_IRQ_MSI_BIT43 195 | ||
218 | #define OCTEON_IRQ_MSI_BIT44 196 | ||
219 | #define OCTEON_IRQ_MSI_BIT45 197 | ||
220 | #define OCTEON_IRQ_MSI_BIT46 198 | ||
221 | #define OCTEON_IRQ_MSI_BIT47 199 | ||
222 | #define OCTEON_IRQ_MSI_BIT48 200 | ||
223 | #define OCTEON_IRQ_MSI_BIT49 201 | ||
224 | #define OCTEON_IRQ_MSI_BIT50 202 | ||
225 | #define OCTEON_IRQ_MSI_BIT51 203 | ||
226 | #define OCTEON_IRQ_MSI_BIT52 204 | ||
227 | #define OCTEON_IRQ_MSI_BIT53 205 | ||
228 | #define OCTEON_IRQ_MSI_BIT54 206 | ||
229 | #define OCTEON_IRQ_MSI_BIT55 207 | ||
230 | #define OCTEON_IRQ_MSI_BIT56 208 | ||
231 | #define OCTEON_IRQ_MSI_BIT57 209 | ||
232 | #define OCTEON_IRQ_MSI_BIT58 210 | ||
233 | #define OCTEON_IRQ_MSI_BIT59 211 | ||
234 | #define OCTEON_IRQ_MSI_BIT60 212 | ||
235 | #define OCTEON_IRQ_MSI_BIT61 213 | ||
236 | #define OCTEON_IRQ_MSI_BIT62 214 | ||
237 | #define OCTEON_IRQ_MSI_BIT63 215 | ||
238 | |||
239 | #define OCTEON_IRQ_LAST 216 | ||
240 | #else | ||
241 | #define OCTEON_IRQ_LAST 152 | ||
242 | #endif | ||
243 | |||
244 | #endif | ||
diff --git a/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h b/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h new file mode 100644 index 000000000000..0b2b5eb22e9b --- /dev/null +++ b/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h | |||
@@ -0,0 +1,131 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 2005-2008 Cavium Networks, Inc | ||
7 | */ | ||
8 | #ifndef __ASM_MACH_CAVIUM_OCTEON_KERNEL_ENTRY_H | ||
9 | #define __ASM_MACH_CAVIUM_OCTEON_KERNEL_ENTRY_H | ||
10 | |||
11 | |||
12 | #define CP0_CYCLE_COUNTER $9, 6 | ||
13 | #define CP0_CVMCTL_REG $9, 7 | ||
14 | #define CP0_CVMMEMCTL_REG $11,7 | ||
15 | #define CP0_PRID_REG $15, 0 | ||
16 | #define CP0_PRID_OCTEON_PASS1 0x000d0000 | ||
17 | #define CP0_PRID_OCTEON_CN30XX 0x000d0200 | ||
18 | |||
19 | .macro kernel_entry_setup | ||
20 | # Registers set by bootloader: | ||
21 | # (only 32 bits set by bootloader, all addresses are physical | ||
22 | # addresses, and need to have the appropriate memory region set | ||
23 | # by the kernel | ||
24 | # a0 = argc | ||
25 | # a1 = argv (kseg0 compat addr) | ||
26 | # a2 = 1 if init core, zero otherwise | ||
27 | # a3 = address of boot descriptor block | ||
28 | .set push | ||
29 | .set arch=octeon | ||
30 | # Read the cavium mem control register | ||
31 | dmfc0 v0, CP0_CVMMEMCTL_REG | ||
32 | # Clear the lower 6 bits, the CVMSEG size | ||
33 | dins v0, $0, 0, 6 | ||
34 | ori v0, CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE | ||
35 | dmtc0 v0, CP0_CVMMEMCTL_REG # Write the cavium mem control register | ||
36 | dmfc0 v0, CP0_CVMCTL_REG # Read the cavium control register | ||
37 | #ifdef CONFIG_CAVIUM_OCTEON_HW_FIX_UNALIGNED | ||
38 | # Disable unaligned load/store support but leave HW fixup enabled | ||
39 | or v0, v0, 0x5001 | ||
40 | xor v0, v0, 0x1001 | ||
41 | #else | ||
42 | # Disable unaligned load/store and HW fixup support | ||
43 | or v0, v0, 0x5001 | ||
44 | xor v0, v0, 0x5001 | ||
45 | #endif | ||
46 | # Read the processor ID register | ||
47 | mfc0 v1, CP0_PRID_REG | ||
48 | # Disable instruction prefetching (Octeon Pass1 errata) | ||
49 | or v0, v0, 0x2000 | ||
50 | # Skip reenable of prefetching for Octeon Pass1 | ||
51 | beq v1, CP0_PRID_OCTEON_PASS1, skip | ||
52 | nop | ||
53 | # Reenable instruction prefetching, not on Pass1 | ||
54 | xor v0, v0, 0x2000 | ||
55 | # Strip off pass number off of processor id | ||
56 | srl v1, 8 | ||
57 | sll v1, 8 | ||
58 | # CN30XX needs some extra stuff turned off for better performance | ||
59 | bne v1, CP0_PRID_OCTEON_CN30XX, skip | ||
60 | nop | ||
61 | # CN30XX Use random Icache replacement | ||
62 | or v0, v0, 0x400 | ||
63 | # CN30XX Disable instruction prefetching | ||
64 | or v0, v0, 0x2000 | ||
65 | skip: | ||
66 | # Write the cavium control register | ||
67 | dmtc0 v0, CP0_CVMCTL_REG | ||
68 | sync | ||
69 | # Flush dcache after config change | ||
70 | cache 9, 0($0) | ||
71 | # Get my core id | ||
72 | rdhwr v0, $0 | ||
73 | # Jump the master to kernel_entry | ||
74 | bne a2, zero, octeon_main_processor | ||
75 | nop | ||
76 | |||
77 | #ifdef CONFIG_SMP | ||
78 | |||
79 | # | ||
80 | # All cores other than the master need to wait here for SMP bootstrap | ||
81 | # to begin | ||
82 | # | ||
83 | |||
84 | # This is the variable where the next core to boot os stored | ||
85 | PTR_LA t0, octeon_processor_boot | ||
86 | octeon_spin_wait_boot: | ||
87 | # Get the core id of the next to be booted | ||
88 | LONG_L t1, (t0) | ||
89 | # Keep looping if it isn't me | ||
90 | bne t1, v0, octeon_spin_wait_boot | ||
91 | nop | ||
92 | # Get my GP from the global variable | ||
93 | PTR_LA t0, octeon_processor_gp | ||
94 | LONG_L gp, (t0) | ||
95 | # Get my SP from the global variable | ||
96 | PTR_LA t0, octeon_processor_sp | ||
97 | LONG_L sp, (t0) | ||
98 | # Set the SP global variable to zero so the master knows we've started | ||
99 | LONG_S zero, (t0) | ||
100 | #ifdef __OCTEON__ | ||
101 | syncw | ||
102 | syncw | ||
103 | #else | ||
104 | sync | ||
105 | #endif | ||
106 | # Jump to the normal Linux SMP entry point | ||
107 | j smp_bootstrap | ||
108 | nop | ||
109 | #else /* CONFIG_SMP */ | ||
110 | |||
111 | # | ||
112 | # Someone tried to boot SMP with a non SMP kernel. All extra cores | ||
113 | # will halt here. | ||
114 | # | ||
115 | octeon_wait_forever: | ||
116 | wait | ||
117 | b octeon_wait_forever | ||
118 | nop | ||
119 | |||
120 | #endif /* CONFIG_SMP */ | ||
121 | octeon_main_processor: | ||
122 | .set pop | ||
123 | .endm | ||
124 | |||
125 | /* | ||
126 | * Do SMP slave processor setup necessary before we can savely execute C code. | ||
127 | */ | ||
128 | .macro smp_slave_setup | ||
129 | .endm | ||
130 | |||
131 | #endif /* __ASM_MACH_CAVIUM_OCTEON_KERNEL_ENTRY_H */ | ||
diff --git a/arch/mips/include/asm/mach-cavium-octeon/war.h b/arch/mips/include/asm/mach-cavium-octeon/war.h new file mode 100644 index 000000000000..c4712d7cc81d --- /dev/null +++ b/arch/mips/include/asm/mach-cavium-octeon/war.h | |||
@@ -0,0 +1,26 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 2002, 2004, 2007 by Ralf Baechle <ralf@linux-mips.org> | ||
7 | * Copyright (C) 2008 Cavium Networks <support@caviumnetworks.com> | ||
8 | */ | ||
9 | #ifndef __ASM_MIPS_MACH_CAVIUM_OCTEON_WAR_H | ||
10 | #define __ASM_MIPS_MACH_CAVIUM_OCTEON_WAR_H | ||
11 | |||
12 | #define R4600_V1_INDEX_ICACHEOP_WAR 0 | ||
13 | #define R4600_V1_HIT_CACHEOP_WAR 0 | ||
14 | #define R4600_V2_HIT_CACHEOP_WAR 0 | ||
15 | #define R5432_CP0_INTERRUPT_WAR 0 | ||
16 | #define BCM1250_M3_WAR 0 | ||
17 | #define SIBYTE_1956_WAR 0 | ||
18 | #define MIPS4K_ICACHE_REFILL_WAR 0 | ||
19 | #define MIPS_CACHE_SYNC_WAR 0 | ||
20 | #define TX49XX_ICACHE_INDEX_INV_WAR 0 | ||
21 | #define RM9000_CDEX_SMP_WAR 0 | ||
22 | #define ICACHE_REFILLS_WORKAROUND_WAR 0 | ||
23 | #define R10000_LLSC_WAR 0 | ||
24 | #define MIPS34K_MISSED_ITLB_WAR 0 | ||
25 | |||
26 | #endif /* __ASM_MIPS_MACH_CAVIUM_OCTEON_WAR_H */ | ||
diff --git a/arch/mips/include/asm/octeon/octeon.h b/arch/mips/include/asm/octeon/octeon.h new file mode 100644 index 000000000000..edc676084cda --- /dev/null +++ b/arch/mips/include/asm/octeon/octeon.h | |||
@@ -0,0 +1,248 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 2004-2008 Cavium Networks | ||
7 | */ | ||
8 | #ifndef __ASM_OCTEON_OCTEON_H | ||
9 | #define __ASM_OCTEON_OCTEON_H | ||
10 | |||
11 | #include "cvmx.h" | ||
12 | |||
13 | extern uint64_t octeon_bootmem_alloc_range_phys(uint64_t size, | ||
14 | uint64_t alignment, | ||
15 | uint64_t min_addr, | ||
16 | uint64_t max_addr, | ||
17 | int do_locking); | ||
18 | extern void *octeon_bootmem_alloc(uint64_t size, uint64_t alignment, | ||
19 | int do_locking); | ||
20 | extern void *octeon_bootmem_alloc_range(uint64_t size, uint64_t alignment, | ||
21 | uint64_t min_addr, uint64_t max_addr, | ||
22 | int do_locking); | ||
23 | extern void *octeon_bootmem_alloc_named(uint64_t size, uint64_t alignment, | ||
24 | char *name); | ||
25 | extern void *octeon_bootmem_alloc_named_range(uint64_t size, uint64_t min_addr, | ||
26 | uint64_t max_addr, uint64_t align, | ||
27 | char *name); | ||
28 | extern void *octeon_bootmem_alloc_named_address(uint64_t size, uint64_t address, | ||
29 | char *name); | ||
30 | extern int octeon_bootmem_free_named(char *name); | ||
31 | extern void octeon_bootmem_lock(void); | ||
32 | extern void octeon_bootmem_unlock(void); | ||
33 | |||
34 | extern int octeon_is_simulation(void); | ||
35 | extern int octeon_is_pci_host(void); | ||
36 | extern int octeon_usb_is_ref_clk(void); | ||
37 | extern uint64_t octeon_get_clock_rate(void); | ||
38 | extern const char *octeon_board_type_string(void); | ||
39 | extern const char *octeon_get_pci_interrupts(void); | ||
40 | extern int octeon_get_southbridge_interrupt(void); | ||
41 | extern int octeon_get_boot_coremask(void); | ||
42 | extern int octeon_get_boot_num_arguments(void); | ||
43 | extern const char *octeon_get_boot_argument(int arg); | ||
44 | extern void octeon_hal_setup_reserved32(void); | ||
45 | extern void octeon_user_io_init(void); | ||
46 | struct octeon_cop2_state; | ||
47 | extern unsigned long octeon_crypto_enable(struct octeon_cop2_state *state); | ||
48 | extern void octeon_crypto_disable(struct octeon_cop2_state *state, | ||
49 | unsigned long flags); | ||
50 | |||
51 | extern void octeon_init_cvmcount(void); | ||
52 | |||
53 | #define OCTEON_ARGV_MAX_ARGS 64 | ||
54 | #define OCTOEN_SERIAL_LEN 20 | ||
55 | |||
56 | struct octeon_boot_descriptor { | ||
57 | /* Start of block referenced by assembly code - do not change! */ | ||
58 | uint32_t desc_version; | ||
59 | uint32_t desc_size; | ||
60 | uint64_t stack_top; | ||
61 | uint64_t heap_base; | ||
62 | uint64_t heap_end; | ||
63 | /* Only used by bootloader */ | ||
64 | uint64_t entry_point; | ||
65 | uint64_t desc_vaddr; | ||
66 | /* End of This block referenced by assembly code - do not change! */ | ||
67 | uint32_t exception_base_addr; | ||
68 | uint32_t stack_size; | ||
69 | uint32_t heap_size; | ||
70 | /* Argc count for application. */ | ||
71 | uint32_t argc; | ||
72 | uint32_t argv[OCTEON_ARGV_MAX_ARGS]; | ||
73 | |||
74 | #define BOOT_FLAG_INIT_CORE (1 << 0) | ||
75 | #define OCTEON_BL_FLAG_DEBUG (1 << 1) | ||
76 | #define OCTEON_BL_FLAG_NO_MAGIC (1 << 2) | ||
77 | /* If set, use uart1 for console */ | ||
78 | #define OCTEON_BL_FLAG_CONSOLE_UART1 (1 << 3) | ||
79 | /* If set, use PCI console */ | ||
80 | #define OCTEON_BL_FLAG_CONSOLE_PCI (1 << 4) | ||
81 | /* Call exit on break on serial port */ | ||
82 | #define OCTEON_BL_FLAG_BREAK (1 << 5) | ||
83 | |||
84 | uint32_t flags; | ||
85 | uint32_t core_mask; | ||
86 | /* DRAM size in megabyes. */ | ||
87 | uint32_t dram_size; | ||
88 | /* physical address of free memory descriptor block. */ | ||
89 | uint32_t phy_mem_desc_addr; | ||
90 | /* used to pass flags from app to debugger. */ | ||
91 | uint32_t debugger_flags_base_addr; | ||
92 | /* CPU clock speed, in hz. */ | ||
93 | uint32_t eclock_hz; | ||
94 | /* DRAM clock speed, in hz. */ | ||
95 | uint32_t dclock_hz; | ||
96 | /* SPI4 clock in hz. */ | ||
97 | uint32_t spi_clock_hz; | ||
98 | uint16_t board_type; | ||
99 | uint8_t board_rev_major; | ||
100 | uint8_t board_rev_minor; | ||
101 | uint16_t chip_type; | ||
102 | uint8_t chip_rev_major; | ||
103 | uint8_t chip_rev_minor; | ||
104 | char board_serial_number[OCTOEN_SERIAL_LEN]; | ||
105 | uint8_t mac_addr_base[6]; | ||
106 | uint8_t mac_addr_count; | ||
107 | uint64_t cvmx_desc_vaddr; | ||
108 | }; | ||
109 | |||
110 | union octeon_cvmemctl { | ||
111 | uint64_t u64; | ||
112 | struct { | ||
113 | /* RO 1 = BIST fail, 0 = BIST pass */ | ||
114 | uint64_t tlbbist:1; | ||
115 | /* RO 1 = BIST fail, 0 = BIST pass */ | ||
116 | uint64_t l1cbist:1; | ||
117 | /* RO 1 = BIST fail, 0 = BIST pass */ | ||
118 | uint64_t l1dbist:1; | ||
119 | /* RO 1 = BIST fail, 0 = BIST pass */ | ||
120 | uint64_t dcmbist:1; | ||
121 | /* RO 1 = BIST fail, 0 = BIST pass */ | ||
122 | uint64_t ptgbist:1; | ||
123 | /* RO 1 = BIST fail, 0 = BIST pass */ | ||
124 | uint64_t wbfbist:1; | ||
125 | /* Reserved */ | ||
126 | uint64_t reserved:22; | ||
127 | /* R/W If set, marked write-buffer entries time out | ||
128 | * the same as as other entries; if clear, marked | ||
129 | * write-buffer entries use the maximum timeout. */ | ||
130 | uint64_t dismarkwblongto:1; | ||
131 | /* R/W If set, a merged store does not clear the | ||
132 | * write-buffer entry timeout state. */ | ||
133 | uint64_t dismrgclrwbto:1; | ||
134 | /* R/W Two bits that are the MSBs of the resultant | ||
135 | * CVMSEG LM word location for an IOBDMA. The other 8 | ||
136 | * bits come from the SCRADDR field of the IOBDMA. */ | ||
137 | uint64_t iobdmascrmsb:2; | ||
138 | /* R/W If set, SYNCWS and SYNCS only order marked | ||
139 | * stores; if clear, SYNCWS and SYNCS only order | ||
140 | * unmarked stores. SYNCWSMARKED has no effect when | ||
141 | * DISSYNCWS is set. */ | ||
142 | uint64_t syncwsmarked:1; | ||
143 | /* R/W If set, SYNCWS acts as SYNCW and SYNCS acts as | ||
144 | * SYNC. */ | ||
145 | uint64_t dissyncws:1; | ||
146 | /* R/W If set, no stall happens on write buffer | ||
147 | * full. */ | ||
148 | uint64_t diswbfst:1; | ||
149 | /* R/W If set (and SX set), supervisor-level | ||
150 | * loads/stores can use XKPHYS addresses with | ||
151 | * VA<48>==0 */ | ||
152 | uint64_t xkmemenas:1; | ||
153 | /* R/W If set (and UX set), user-level loads/stores | ||
154 | * can use XKPHYS addresses with VA<48>==0 */ | ||
155 | uint64_t xkmemenau:1; | ||
156 | /* R/W If set (and SX set), supervisor-level | ||
157 | * loads/stores can use XKPHYS addresses with | ||
158 | * VA<48>==1 */ | ||
159 | uint64_t xkioenas:1; | ||
160 | /* R/W If set (and UX set), user-level loads/stores | ||
161 | * can use XKPHYS addresses with VA<48>==1 */ | ||
162 | uint64_t xkioenau:1; | ||
163 | /* R/W If set, all stores act as SYNCW (NOMERGE must | ||
164 | * be set when this is set) RW, reset to 0. */ | ||
165 | uint64_t allsyncw:1; | ||
166 | /* R/W If set, no stores merge, and all stores reach | ||
167 | * the coherent bus in order. */ | ||
168 | uint64_t nomerge:1; | ||
169 | /* R/W Selects the bit in the counter used for DID | ||
170 | * time-outs 0 = 231, 1 = 230, 2 = 229, 3 = | ||
171 | * 214. Actual time-out is between 1x and 2x this | ||
172 | * interval. For example, with DIDTTO=3, expiration | ||
173 | * interval is between 16K and 32K. */ | ||
174 | uint64_t didtto:2; | ||
175 | /* R/W If set, the (mem) CSR clock never turns off. */ | ||
176 | uint64_t csrckalwys:1; | ||
177 | /* R/W If set, mclk never turns off. */ | ||
178 | uint64_t mclkalwys:1; | ||
179 | /* R/W Selects the bit in the counter used for write | ||
180 | * buffer flush time-outs (WBFLT+11) is the bit | ||
181 | * position in an internal counter used to determine | ||
182 | * expiration. The write buffer expires between 1x and | ||
183 | * 2x this interval. For example, with WBFLT = 0, a | ||
184 | * write buffer expires between 2K and 4K cycles after | ||
185 | * the write buffer entry is allocated. */ | ||
186 | uint64_t wbfltime:3; | ||
187 | /* R/W If set, do not put Istream in the L2 cache. */ | ||
188 | uint64_t istrnol2:1; | ||
189 | /* R/W The write buffer threshold. */ | ||
190 | uint64_t wbthresh:4; | ||
191 | /* Reserved */ | ||
192 | uint64_t reserved2:2; | ||
193 | /* R/W If set, CVMSEG is available for loads/stores in | ||
194 | * kernel/debug mode. */ | ||
195 | uint64_t cvmsegenak:1; | ||
196 | /* R/W If set, CVMSEG is available for loads/stores in | ||
197 | * supervisor mode. */ | ||
198 | uint64_t cvmsegenas:1; | ||
199 | /* R/W If set, CVMSEG is available for loads/stores in | ||
200 | * user mode. */ | ||
201 | uint64_t cvmsegenau:1; | ||
202 | /* R/W Size of local memory in cache blocks, 54 (6912 | ||
203 | * bytes) is max legal value. */ | ||
204 | uint64_t lmemsz:6; | ||
205 | } s; | ||
206 | }; | ||
207 | |||
208 | struct octeon_cf_data { | ||
209 | unsigned long base_region_bias; | ||
210 | unsigned int base_region; /* The chip select region used by CF */ | ||
211 | int is16bit; /* 0 - 8bit, !0 - 16bit */ | ||
212 | int dma_engine; /* -1 for no DMA */ | ||
213 | }; | ||
214 | |||
215 | extern void octeon_write_lcd(const char *s); | ||
216 | extern void octeon_check_cpu_bist(void); | ||
217 | extern int octeon_get_boot_debug_flag(void); | ||
218 | extern int octeon_get_boot_uart(void); | ||
219 | |||
220 | struct uart_port; | ||
221 | extern unsigned int octeon_serial_in(struct uart_port *, int); | ||
222 | extern void octeon_serial_out(struct uart_port *, int, int); | ||
223 | |||
224 | /** | ||
225 | * Write a 32bit value to the Octeon NPI register space | ||
226 | * | ||
227 | * @address: Address to write to | ||
228 | * @val: Value to write | ||
229 | */ | ||
230 | static inline void octeon_npi_write32(uint64_t address, uint32_t val) | ||
231 | { | ||
232 | cvmx_write64_uint32(address ^ 4, val); | ||
233 | cvmx_read64_uint32(address ^ 4); | ||
234 | } | ||
235 | |||
236 | |||
237 | /** | ||
238 | * Read a 32bit value from the Octeon NPI register space | ||
239 | * | ||
240 | * @address: Address to read | ||
241 | * Returns The result | ||
242 | */ | ||
243 | static inline uint32_t octeon_npi_read32(uint64_t address) | ||
244 | { | ||
245 | return cvmx_read64_uint32(address ^ 4); | ||
246 | } | ||
247 | |||
248 | #endif /* __ASM_OCTEON_OCTEON_H */ | ||
diff --git a/arch/mips/kernel/octeon_switch.S b/arch/mips/kernel/octeon_switch.S new file mode 100644 index 000000000000..d52389672b06 --- /dev/null +++ b/arch/mips/kernel/octeon_switch.S | |||
@@ -0,0 +1,506 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 1994, 1995, 1996, 1998, 1999, 2002, 2003 Ralf Baechle | ||
7 | * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com) | ||
8 | * Copyright (C) 1994, 1995, 1996, by Andreas Busse | ||
9 | * Copyright (C) 1999 Silicon Graphics, Inc. | ||
10 | * Copyright (C) 2000 MIPS Technologies, Inc. | ||
11 | * written by Carsten Langgaard, carstenl@mips.com | ||
12 | */ | ||
13 | #include <asm/asm.h> | ||
14 | #include <asm/cachectl.h> | ||
15 | #include <asm/fpregdef.h> | ||
16 | #include <asm/mipsregs.h> | ||
17 | #include <asm/asm-offsets.h> | ||
18 | #include <asm/page.h> | ||
19 | #include <asm/pgtable-bits.h> | ||
20 | #include <asm/regdef.h> | ||
21 | #include <asm/stackframe.h> | ||
22 | #include <asm/thread_info.h> | ||
23 | |||
24 | #include <asm/asmmacro.h> | ||
25 | |||
26 | /* | ||
27 | * Offset to the current process status flags, the first 32 bytes of the | ||
28 | * stack are not used. | ||
29 | */ | ||
30 | #define ST_OFF (_THREAD_SIZE - 32 - PT_SIZE + PT_STATUS) | ||
31 | |||
32 | /* | ||
33 | * task_struct *resume(task_struct *prev, task_struct *next, | ||
34 | * struct thread_info *next_ti) | ||
35 | */ | ||
36 | .align 7 | ||
37 | LEAF(resume) | ||
38 | .set arch=octeon | ||
39 | #ifndef CONFIG_CPU_HAS_LLSC | ||
40 | sw zero, ll_bit | ||
41 | #endif | ||
42 | mfc0 t1, CP0_STATUS | ||
43 | LONG_S t1, THREAD_STATUS(a0) | ||
44 | cpu_save_nonscratch a0 | ||
45 | LONG_S ra, THREAD_REG31(a0) | ||
46 | |||
47 | /* check if we need to save COP2 registers */ | ||
48 | PTR_L t2, TASK_THREAD_INFO(a0) | ||
49 | LONG_L t0, ST_OFF(t2) | ||
50 | bbit0 t0, 30, 1f | ||
51 | |||
52 | /* Disable COP2 in the stored process state */ | ||
53 | li t1, ST0_CU2 | ||
54 | xor t0, t1 | ||
55 | LONG_S t0, ST_OFF(t2) | ||
56 | |||
57 | /* Enable COP2 so we can save it */ | ||
58 | mfc0 t0, CP0_STATUS | ||
59 | or t0, t1 | ||
60 | mtc0 t0, CP0_STATUS | ||
61 | |||
62 | /* Save COP2 */ | ||
63 | daddu a0, THREAD_CP2 | ||
64 | jal octeon_cop2_save | ||
65 | dsubu a0, THREAD_CP2 | ||
66 | |||
67 | /* Disable COP2 now that we are done */ | ||
68 | mfc0 t0, CP0_STATUS | ||
69 | li t1, ST0_CU2 | ||
70 | xor t0, t1 | ||
71 | mtc0 t0, CP0_STATUS | ||
72 | |||
73 | 1: | ||
74 | #if CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0 | ||
75 | /* Check if we need to store CVMSEG state */ | ||
76 | mfc0 t0, $11,7 /* CvmMemCtl */ | ||
77 | bbit0 t0, 6, 3f /* Is user access enabled? */ | ||
78 | |||
79 | /* Store the CVMSEG state */ | ||
80 | /* Extract the size of CVMSEG */ | ||
81 | andi t0, 0x3f | ||
82 | /* Multiply * (cache line size/sizeof(long)/2) */ | ||
83 | sll t0, 7-LONGLOG-1 | ||
84 | li t1, -32768 /* Base address of CVMSEG */ | ||
85 | LONG_ADDI t2, a0, THREAD_CVMSEG /* Where to store CVMSEG to */ | ||
86 | synciobdma | ||
87 | 2: | ||
88 | .set noreorder | ||
89 | LONG_L t8, 0(t1) /* Load from CVMSEG */ | ||
90 | subu t0, 1 /* Decrement loop var */ | ||
91 | LONG_L t9, LONGSIZE(t1)/* Load from CVMSEG */ | ||
92 | LONG_ADDU t1, LONGSIZE*2 /* Increment loc in CVMSEG */ | ||
93 | LONG_S t8, 0(t2) /* Store CVMSEG to thread storage */ | ||
94 | LONG_ADDU t2, LONGSIZE*2 /* Increment loc in thread storage */ | ||
95 | bnez t0, 2b /* Loop until we've copied it all */ | ||
96 | LONG_S t9, -LONGSIZE(t2)/* Store CVMSEG to thread storage */ | ||
97 | .set reorder | ||
98 | |||
99 | /* Disable access to CVMSEG */ | ||
100 | mfc0 t0, $11,7 /* CvmMemCtl */ | ||
101 | xori t0, t0, 0x40 /* Bit 6 is CVMSEG user enable */ | ||
102 | mtc0 t0, $11,7 /* CvmMemCtl */ | ||
103 | #endif | ||
104 | 3: | ||
105 | /* | ||
106 | * The order of restoring the registers takes care of the race | ||
107 | * updating $28, $29 and kernelsp without disabling ints. | ||
108 | */ | ||
109 | move $28, a2 | ||
110 | cpu_restore_nonscratch a1 | ||
111 | |||
112 | #if (_THREAD_SIZE - 32) < 0x8000 | ||
113 | PTR_ADDIU t0, $28, _THREAD_SIZE - 32 | ||
114 | #else | ||
115 | PTR_LI t0, _THREAD_SIZE - 32 | ||
116 | PTR_ADDU t0, $28 | ||
117 | #endif | ||
118 | set_saved_sp t0, t1, t2 | ||
119 | |||
120 | mfc0 t1, CP0_STATUS /* Do we really need this? */ | ||
121 | li a3, 0xff01 | ||
122 | and t1, a3 | ||
123 | LONG_L a2, THREAD_STATUS(a1) | ||
124 | nor a3, $0, a3 | ||
125 | and a2, a3 | ||
126 | or a2, t1 | ||
127 | mtc0 a2, CP0_STATUS | ||
128 | move v0, a0 | ||
129 | jr ra | ||
130 | END(resume) | ||
131 | |||
132 | /* | ||
133 | * void octeon_cop2_save(struct octeon_cop2_state *a0) | ||
134 | */ | ||
135 | .align 7 | ||
136 | LEAF(octeon_cop2_save) | ||
137 | |||
138 | dmfc0 t9, $9,7 /* CvmCtl register. */ | ||
139 | |||
140 | /* Save the COP2 CRC state */ | ||
141 | dmfc2 t0, 0x0201 | ||
142 | dmfc2 t1, 0x0202 | ||
143 | dmfc2 t2, 0x0200 | ||
144 | sd t0, OCTEON_CP2_CRC_IV(a0) | ||
145 | sd t1, OCTEON_CP2_CRC_LENGTH(a0) | ||
146 | sd t2, OCTEON_CP2_CRC_POLY(a0) | ||
147 | /* Skip next instructions if CvmCtl[NODFA_CP2] set */ | ||
148 | bbit1 t9, 28, 1f | ||
149 | |||
150 | /* Save the LLM state */ | ||
151 | dmfc2 t0, 0x0402 | ||
152 | dmfc2 t1, 0x040A | ||
153 | sd t0, OCTEON_CP2_LLM_DAT(a0) | ||
154 | sd t1, OCTEON_CP2_LLM_DAT+8(a0) | ||
155 | |||
156 | 1: bbit1 t9, 26, 3f /* done if CvmCtl[NOCRYPTO] set */ | ||
157 | |||
158 | /* Save the COP2 crypto state */ | ||
159 | /* this part is mostly common to both pass 1 and later revisions */ | ||
160 | dmfc2 t0, 0x0084 | ||
161 | dmfc2 t1, 0x0080 | ||
162 | dmfc2 t2, 0x0081 | ||
163 | dmfc2 t3, 0x0082 | ||
164 | sd t0, OCTEON_CP2_3DES_IV(a0) | ||
165 | dmfc2 t0, 0x0088 | ||
166 | sd t1, OCTEON_CP2_3DES_KEY(a0) | ||
167 | dmfc2 t1, 0x0111 /* only necessary for pass 1 */ | ||
168 | sd t2, OCTEON_CP2_3DES_KEY+8(a0) | ||
169 | dmfc2 t2, 0x0102 | ||
170 | sd t3, OCTEON_CP2_3DES_KEY+16(a0) | ||
171 | dmfc2 t3, 0x0103 | ||
172 | sd t0, OCTEON_CP2_3DES_RESULT(a0) | ||
173 | dmfc2 t0, 0x0104 | ||
174 | sd t1, OCTEON_CP2_AES_INP0(a0) /* only necessary for pass 1 */ | ||
175 | dmfc2 t1, 0x0105 | ||
176 | sd t2, OCTEON_CP2_AES_IV(a0) | ||
177 | dmfc2 t2, 0x0106 | ||
178 | sd t3, OCTEON_CP2_AES_IV+8(a0) | ||
179 | dmfc2 t3, 0x0107 | ||
180 | sd t0, OCTEON_CP2_AES_KEY(a0) | ||
181 | dmfc2 t0, 0x0110 | ||
182 | sd t1, OCTEON_CP2_AES_KEY+8(a0) | ||
183 | dmfc2 t1, 0x0100 | ||
184 | sd t2, OCTEON_CP2_AES_KEY+16(a0) | ||
185 | dmfc2 t2, 0x0101 | ||
186 | sd t3, OCTEON_CP2_AES_KEY+24(a0) | ||
187 | mfc0 t3, $15,0 /* Get the processor ID register */ | ||
188 | sd t0, OCTEON_CP2_AES_KEYLEN(a0) | ||
189 | li t0, 0x000d0000 /* This is the processor ID of Octeon Pass1 */ | ||
190 | sd t1, OCTEON_CP2_AES_RESULT(a0) | ||
191 | sd t2, OCTEON_CP2_AES_RESULT+8(a0) | ||
192 | /* Skip to the Pass1 version of the remainder of the COP2 state */ | ||
193 | beq t3, t0, 2f | ||
194 | |||
195 | /* the non-pass1 state when !CvmCtl[NOCRYPTO] */ | ||
196 | dmfc2 t1, 0x0240 | ||
197 | dmfc2 t2, 0x0241 | ||
198 | dmfc2 t3, 0x0242 | ||
199 | dmfc2 t0, 0x0243 | ||
200 | sd t1, OCTEON_CP2_HSH_DATW(a0) | ||
201 | dmfc2 t1, 0x0244 | ||
202 | sd t2, OCTEON_CP2_HSH_DATW+8(a0) | ||
203 | dmfc2 t2, 0x0245 | ||
204 | sd t3, OCTEON_CP2_HSH_DATW+16(a0) | ||
205 | dmfc2 t3, 0x0246 | ||
206 | sd t0, OCTEON_CP2_HSH_DATW+24(a0) | ||
207 | dmfc2 t0, 0x0247 | ||
208 | sd t1, OCTEON_CP2_HSH_DATW+32(a0) | ||
209 | dmfc2 t1, 0x0248 | ||
210 | sd t2, OCTEON_CP2_HSH_DATW+40(a0) | ||
211 | dmfc2 t2, 0x0249 | ||
212 | sd t3, OCTEON_CP2_HSH_DATW+48(a0) | ||
213 | dmfc2 t3, 0x024A | ||
214 | sd t0, OCTEON_CP2_HSH_DATW+56(a0) | ||
215 | dmfc2 t0, 0x024B | ||
216 | sd t1, OCTEON_CP2_HSH_DATW+64(a0) | ||
217 | dmfc2 t1, 0x024C | ||
218 | sd t2, OCTEON_CP2_HSH_DATW+72(a0) | ||
219 | dmfc2 t2, 0x024D | ||
220 | sd t3, OCTEON_CP2_HSH_DATW+80(a0) | ||
221 | dmfc2 t3, 0x024E | ||
222 | sd t0, OCTEON_CP2_HSH_DATW+88(a0) | ||
223 | dmfc2 t0, 0x0250 | ||
224 | sd t1, OCTEON_CP2_HSH_DATW+96(a0) | ||
225 | dmfc2 t1, 0x0251 | ||
226 | sd t2, OCTEON_CP2_HSH_DATW+104(a0) | ||
227 | dmfc2 t2, 0x0252 | ||
228 | sd t3, OCTEON_CP2_HSH_DATW+112(a0) | ||
229 | dmfc2 t3, 0x0253 | ||
230 | sd t0, OCTEON_CP2_HSH_IVW(a0) | ||
231 | dmfc2 t0, 0x0254 | ||
232 | sd t1, OCTEON_CP2_HSH_IVW+8(a0) | ||
233 | dmfc2 t1, 0x0255 | ||
234 | sd t2, OCTEON_CP2_HSH_IVW+16(a0) | ||
235 | dmfc2 t2, 0x0256 | ||
236 | sd t3, OCTEON_CP2_HSH_IVW+24(a0) | ||
237 | dmfc2 t3, 0x0257 | ||
238 | sd t0, OCTEON_CP2_HSH_IVW+32(a0) | ||
239 | dmfc2 t0, 0x0258 | ||
240 | sd t1, OCTEON_CP2_HSH_IVW+40(a0) | ||
241 | dmfc2 t1, 0x0259 | ||
242 | sd t2, OCTEON_CP2_HSH_IVW+48(a0) | ||
243 | dmfc2 t2, 0x025E | ||
244 | sd t3, OCTEON_CP2_HSH_IVW+56(a0) | ||
245 | dmfc2 t3, 0x025A | ||
246 | sd t0, OCTEON_CP2_GFM_MULT(a0) | ||
247 | dmfc2 t0, 0x025B | ||
248 | sd t1, OCTEON_CP2_GFM_MULT+8(a0) | ||
249 | sd t2, OCTEON_CP2_GFM_POLY(a0) | ||
250 | sd t3, OCTEON_CP2_GFM_RESULT(a0) | ||
251 | sd t0, OCTEON_CP2_GFM_RESULT+8(a0) | ||
252 | jr ra | ||
253 | |||
254 | 2: /* pass 1 special stuff when !CvmCtl[NOCRYPTO] */ | ||
255 | dmfc2 t3, 0x0040 | ||
256 | dmfc2 t0, 0x0041 | ||
257 | dmfc2 t1, 0x0042 | ||
258 | dmfc2 t2, 0x0043 | ||
259 | sd t3, OCTEON_CP2_HSH_DATW(a0) | ||
260 | dmfc2 t3, 0x0044 | ||
261 | sd t0, OCTEON_CP2_HSH_DATW+8(a0) | ||
262 | dmfc2 t0, 0x0045 | ||
263 | sd t1, OCTEON_CP2_HSH_DATW+16(a0) | ||
264 | dmfc2 t1, 0x0046 | ||
265 | sd t2, OCTEON_CP2_HSH_DATW+24(a0) | ||
266 | dmfc2 t2, 0x0048 | ||
267 | sd t3, OCTEON_CP2_HSH_DATW+32(a0) | ||
268 | dmfc2 t3, 0x0049 | ||
269 | sd t0, OCTEON_CP2_HSH_DATW+40(a0) | ||
270 | dmfc2 t0, 0x004A | ||
271 | sd t1, OCTEON_CP2_HSH_DATW+48(a0) | ||
272 | sd t2, OCTEON_CP2_HSH_IVW(a0) | ||
273 | sd t3, OCTEON_CP2_HSH_IVW+8(a0) | ||
274 | sd t0, OCTEON_CP2_HSH_IVW+16(a0) | ||
275 | |||
276 | 3: /* pass 1 or CvmCtl[NOCRYPTO] set */ | ||
277 | jr ra | ||
278 | END(octeon_cop2_save) | ||
279 | |||
280 | /* | ||
281 | * void octeon_cop2_restore(struct octeon_cop2_state *a0) | ||
282 | */ | ||
283 | .align 7 | ||
284 | .set push | ||
285 | .set noreorder | ||
286 | LEAF(octeon_cop2_restore) | ||
287 | /* First cache line was prefetched before the call */ | ||
288 | pref 4, 128(a0) | ||
289 | dmfc0 t9, $9,7 /* CvmCtl register. */ | ||
290 | |||
291 | pref 4, 256(a0) | ||
292 | ld t0, OCTEON_CP2_CRC_IV(a0) | ||
293 | pref 4, 384(a0) | ||
294 | ld t1, OCTEON_CP2_CRC_LENGTH(a0) | ||
295 | ld t2, OCTEON_CP2_CRC_POLY(a0) | ||
296 | |||
297 | /* Restore the COP2 CRC state */ | ||
298 | dmtc2 t0, 0x0201 | ||
299 | dmtc2 t1, 0x1202 | ||
300 | bbit1 t9, 28, 2f /* Skip LLM if CvmCtl[NODFA_CP2] is set */ | ||
301 | dmtc2 t2, 0x4200 | ||
302 | |||
303 | /* Restore the LLM state */ | ||
304 | ld t0, OCTEON_CP2_LLM_DAT(a0) | ||
305 | ld t1, OCTEON_CP2_LLM_DAT+8(a0) | ||
306 | dmtc2 t0, 0x0402 | ||
307 | dmtc2 t1, 0x040A | ||
308 | |||
309 | 2: | ||
310 | bbit1 t9, 26, done_restore /* done if CvmCtl[NOCRYPTO] set */ | ||
311 | nop | ||
312 | |||
313 | /* Restore the COP2 crypto state common to pass 1 and pass 2 */ | ||
314 | ld t0, OCTEON_CP2_3DES_IV(a0) | ||
315 | ld t1, OCTEON_CP2_3DES_KEY(a0) | ||
316 | ld t2, OCTEON_CP2_3DES_KEY+8(a0) | ||
317 | dmtc2 t0, 0x0084 | ||
318 | ld t0, OCTEON_CP2_3DES_KEY+16(a0) | ||
319 | dmtc2 t1, 0x0080 | ||
320 | ld t1, OCTEON_CP2_3DES_RESULT(a0) | ||
321 | dmtc2 t2, 0x0081 | ||
322 | ld t2, OCTEON_CP2_AES_INP0(a0) /* only really needed for pass 1 */ | ||
323 | dmtc2 t0, 0x0082 | ||
324 | ld t0, OCTEON_CP2_AES_IV(a0) | ||
325 | dmtc2 t1, 0x0098 | ||
326 | ld t1, OCTEON_CP2_AES_IV+8(a0) | ||
327 | dmtc2 t2, 0x010A /* only really needed for pass 1 */ | ||
328 | ld t2, OCTEON_CP2_AES_KEY(a0) | ||
329 | dmtc2 t0, 0x0102 | ||
330 | ld t0, OCTEON_CP2_AES_KEY+8(a0) | ||
331 | dmtc2 t1, 0x0103 | ||
332 | ld t1, OCTEON_CP2_AES_KEY+16(a0) | ||
333 | dmtc2 t2, 0x0104 | ||
334 | ld t2, OCTEON_CP2_AES_KEY+24(a0) | ||
335 | dmtc2 t0, 0x0105 | ||
336 | ld t0, OCTEON_CP2_AES_KEYLEN(a0) | ||
337 | dmtc2 t1, 0x0106 | ||
338 | ld t1, OCTEON_CP2_AES_RESULT(a0) | ||
339 | dmtc2 t2, 0x0107 | ||
340 | ld t2, OCTEON_CP2_AES_RESULT+8(a0) | ||
341 | mfc0 t3, $15,0 /* Get the processor ID register */ | ||
342 | dmtc2 t0, 0x0110 | ||
343 | li t0, 0x000d0000 /* This is the processor ID of Octeon Pass1 */ | ||
344 | dmtc2 t1, 0x0100 | ||
345 | bne t0, t3, 3f /* Skip the next stuff for non-pass1 */ | ||
346 | dmtc2 t2, 0x0101 | ||
347 | |||
348 | /* this code is specific for pass 1 */ | ||
349 | ld t0, OCTEON_CP2_HSH_DATW(a0) | ||
350 | ld t1, OCTEON_CP2_HSH_DATW+8(a0) | ||
351 | ld t2, OCTEON_CP2_HSH_DATW+16(a0) | ||
352 | dmtc2 t0, 0x0040 | ||
353 | ld t0, OCTEON_CP2_HSH_DATW+24(a0) | ||
354 | dmtc2 t1, 0x0041 | ||
355 | ld t1, OCTEON_CP2_HSH_DATW+32(a0) | ||
356 | dmtc2 t2, 0x0042 | ||
357 | ld t2, OCTEON_CP2_HSH_DATW+40(a0) | ||
358 | dmtc2 t0, 0x0043 | ||
359 | ld t0, OCTEON_CP2_HSH_DATW+48(a0) | ||
360 | dmtc2 t1, 0x0044 | ||
361 | ld t1, OCTEON_CP2_HSH_IVW(a0) | ||
362 | dmtc2 t2, 0x0045 | ||
363 | ld t2, OCTEON_CP2_HSH_IVW+8(a0) | ||
364 | dmtc2 t0, 0x0046 | ||
365 | ld t0, OCTEON_CP2_HSH_IVW+16(a0) | ||
366 | dmtc2 t1, 0x0048 | ||
367 | dmtc2 t2, 0x0049 | ||
368 | b done_restore /* unconditional branch */ | ||
369 | dmtc2 t0, 0x004A | ||
370 | |||
371 | 3: /* this is post-pass1 code */ | ||
372 | ld t2, OCTEON_CP2_HSH_DATW(a0) | ||
373 | ld t0, OCTEON_CP2_HSH_DATW+8(a0) | ||
374 | ld t1, OCTEON_CP2_HSH_DATW+16(a0) | ||
375 | dmtc2 t2, 0x0240 | ||
376 | ld t2, OCTEON_CP2_HSH_DATW+24(a0) | ||
377 | dmtc2 t0, 0x0241 | ||
378 | ld t0, OCTEON_CP2_HSH_DATW+32(a0) | ||
379 | dmtc2 t1, 0x0242 | ||
380 | ld t1, OCTEON_CP2_HSH_DATW+40(a0) | ||
381 | dmtc2 t2, 0x0243 | ||
382 | ld t2, OCTEON_CP2_HSH_DATW+48(a0) | ||
383 | dmtc2 t0, 0x0244 | ||
384 | ld t0, OCTEON_CP2_HSH_DATW+56(a0) | ||
385 | dmtc2 t1, 0x0245 | ||
386 | ld t1, OCTEON_CP2_HSH_DATW+64(a0) | ||
387 | dmtc2 t2, 0x0246 | ||
388 | ld t2, OCTEON_CP2_HSH_DATW+72(a0) | ||
389 | dmtc2 t0, 0x0247 | ||
390 | ld t0, OCTEON_CP2_HSH_DATW+80(a0) | ||
391 | dmtc2 t1, 0x0248 | ||
392 | ld t1, OCTEON_CP2_HSH_DATW+88(a0) | ||
393 | dmtc2 t2, 0x0249 | ||
394 | ld t2, OCTEON_CP2_HSH_DATW+96(a0) | ||
395 | dmtc2 t0, 0x024A | ||
396 | ld t0, OCTEON_CP2_HSH_DATW+104(a0) | ||
397 | dmtc2 t1, 0x024B | ||
398 | ld t1, OCTEON_CP2_HSH_DATW+112(a0) | ||
399 | dmtc2 t2, 0x024C | ||
400 | ld t2, OCTEON_CP2_HSH_IVW(a0) | ||
401 | dmtc2 t0, 0x024D | ||
402 | ld t0, OCTEON_CP2_HSH_IVW+8(a0) | ||
403 | dmtc2 t1, 0x024E | ||
404 | ld t1, OCTEON_CP2_HSH_IVW+16(a0) | ||
405 | dmtc2 t2, 0x0250 | ||
406 | ld t2, OCTEON_CP2_HSH_IVW+24(a0) | ||
407 | dmtc2 t0, 0x0251 | ||
408 | ld t0, OCTEON_CP2_HSH_IVW+32(a0) | ||
409 | dmtc2 t1, 0x0252 | ||
410 | ld t1, OCTEON_CP2_HSH_IVW+40(a0) | ||
411 | dmtc2 t2, 0x0253 | ||
412 | ld t2, OCTEON_CP2_HSH_IVW+48(a0) | ||
413 | dmtc2 t0, 0x0254 | ||
414 | ld t0, OCTEON_CP2_HSH_IVW+56(a0) | ||
415 | dmtc2 t1, 0x0255 | ||
416 | ld t1, OCTEON_CP2_GFM_MULT(a0) | ||
417 | dmtc2 t2, 0x0256 | ||
418 | ld t2, OCTEON_CP2_GFM_MULT+8(a0) | ||
419 | dmtc2 t0, 0x0257 | ||
420 | ld t0, OCTEON_CP2_GFM_POLY(a0) | ||
421 | dmtc2 t1, 0x0258 | ||
422 | ld t1, OCTEON_CP2_GFM_RESULT(a0) | ||
423 | dmtc2 t2, 0x0259 | ||
424 | ld t2, OCTEON_CP2_GFM_RESULT+8(a0) | ||
425 | dmtc2 t0, 0x025E | ||
426 | dmtc2 t1, 0x025A | ||
427 | dmtc2 t2, 0x025B | ||
428 | |||
429 | done_restore: | ||
430 | jr ra | ||
431 | nop | ||
432 | END(octeon_cop2_restore) | ||
433 | .set pop | ||
434 | |||
435 | /* | ||
436 | * void octeon_mult_save() | ||
437 | * sp is assumed to point to a struct pt_regs | ||
438 | * | ||
439 | * NOTE: This is called in SAVE_SOME in stackframe.h. It can only | ||
440 | * safely modify k0 and k1. | ||
441 | */ | ||
442 | .align 7 | ||
443 | .set push | ||
444 | .set noreorder | ||
445 | LEAF(octeon_mult_save) | ||
446 | dmfc0 k0, $9,7 /* CvmCtl register. */ | ||
447 | bbit1 k0, 27, 1f /* Skip CvmCtl[NOMUL] */ | ||
448 | nop | ||
449 | |||
450 | /* Save the multiplier state */ | ||
451 | v3mulu k0, $0, $0 | ||
452 | v3mulu k1, $0, $0 | ||
453 | sd k0, PT_MTP(sp) /* PT_MTP has P0 */ | ||
454 | v3mulu k0, $0, $0 | ||
455 | sd k1, PT_MTP+8(sp) /* PT_MTP+8 has P1 */ | ||
456 | ori k1, $0, 1 | ||
457 | v3mulu k1, k1, $0 | ||
458 | sd k0, PT_MTP+16(sp) /* PT_MTP+16 has P2 */ | ||
459 | v3mulu k0, $0, $0 | ||
460 | sd k1, PT_MPL(sp) /* PT_MPL has MPL0 */ | ||
461 | v3mulu k1, $0, $0 | ||
462 | sd k0, PT_MPL+8(sp) /* PT_MPL+8 has MPL1 */ | ||
463 | jr ra | ||
464 | sd k1, PT_MPL+16(sp) /* PT_MPL+16 has MPL2 */ | ||
465 | |||
466 | 1: /* Resume here if CvmCtl[NOMUL] */ | ||
467 | jr ra | ||
468 | END(octeon_mult_save) | ||
469 | .set pop | ||
470 | |||
471 | /* | ||
472 | * void octeon_mult_restore() | ||
473 | * sp is assumed to point to a struct pt_regs | ||
474 | * | ||
475 | * NOTE: This is called in RESTORE_SOME in stackframe.h. | ||
476 | */ | ||
477 | .align 7 | ||
478 | .set push | ||
479 | .set noreorder | ||
480 | LEAF(octeon_mult_restore) | ||
481 | dmfc0 k1, $9,7 /* CvmCtl register. */ | ||
482 | ld v0, PT_MPL(sp) /* MPL0 */ | ||
483 | ld v1, PT_MPL+8(sp) /* MPL1 */ | ||
484 | ld k0, PT_MPL+16(sp) /* MPL2 */ | ||
485 | bbit1 k1, 27, 1f /* Skip CvmCtl[NOMUL] */ | ||
486 | /* Normally falls through, so no time wasted here */ | ||
487 | nop | ||
488 | |||
489 | /* Restore the multiplier state */ | ||
490 | ld k1, PT_MTP+16(sp) /* P2 */ | ||
491 | MTM0 v0 /* MPL0 */ | ||
492 | ld v0, PT_MTP+8(sp) /* P1 */ | ||
493 | MTM1 v1 /* MPL1 */ | ||
494 | ld v1, PT_MTP(sp) /* P0 */ | ||
495 | MTM2 k0 /* MPL2 */ | ||
496 | MTP2 k1 /* P2 */ | ||
497 | MTP1 v0 /* P1 */ | ||
498 | jr ra | ||
499 | MTP0 v1 /* P0 */ | ||
500 | |||
501 | 1: /* Resume here if CvmCtl[NOMUL] */ | ||
502 | jr ra | ||
503 | nop | ||
504 | END(octeon_mult_restore) | ||
505 | .set pop | ||
506 | |||
diff --git a/arch/mips/mm/c-octeon.c b/arch/mips/mm/c-octeon.c new file mode 100644 index 000000000000..44d01a0a8490 --- /dev/null +++ b/arch/mips/mm/c-octeon.c | |||
@@ -0,0 +1,307 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 2005-2007 Cavium Networks | ||
7 | */ | ||
8 | #include <linux/init.h> | ||
9 | #include <linux/kernel.h> | ||
10 | #include <linux/sched.h> | ||
11 | #include <linux/mm.h> | ||
12 | #include <linux/bitops.h> | ||
13 | #include <linux/cpu.h> | ||
14 | #include <linux/io.h> | ||
15 | |||
16 | #include <asm/bcache.h> | ||
17 | #include <asm/bootinfo.h> | ||
18 | #include <asm/cacheops.h> | ||
19 | #include <asm/cpu-features.h> | ||
20 | #include <asm/page.h> | ||
21 | #include <asm/pgtable.h> | ||
22 | #include <asm/r4kcache.h> | ||
23 | #include <asm/system.h> | ||
24 | #include <asm/mmu_context.h> | ||
25 | #include <asm/war.h> | ||
26 | |||
27 | #include <asm/octeon/octeon.h> | ||
28 | |||
29 | unsigned long long cache_err_dcache[NR_CPUS]; | ||
30 | |||
31 | /** | ||
32 | * Octeon automatically flushes the dcache on tlb changes, so | ||
33 | * from Linux's viewpoint it acts much like a physically | ||
34 | * tagged cache. No flushing is needed | ||
35 | * | ||
36 | */ | ||
37 | static void octeon_flush_data_cache_page(unsigned long addr) | ||
38 | { | ||
39 | /* Nothing to do */ | ||
40 | } | ||
41 | |||
42 | static inline void octeon_local_flush_icache(void) | ||
43 | { | ||
44 | asm volatile ("synci 0($0)"); | ||
45 | } | ||
46 | |||
47 | /* | ||
48 | * Flush local I-cache for the specified range. | ||
49 | */ | ||
50 | static void local_octeon_flush_icache_range(unsigned long start, | ||
51 | unsigned long end) | ||
52 | { | ||
53 | octeon_local_flush_icache(); | ||
54 | } | ||
55 | |||
56 | /** | ||
57 | * Flush caches as necessary for all cores affected by a | ||
58 | * vma. If no vma is supplied, all cores are flushed. | ||
59 | * | ||
60 | * @vma: VMA to flush or NULL to flush all icaches. | ||
61 | */ | ||
62 | static void octeon_flush_icache_all_cores(struct vm_area_struct *vma) | ||
63 | { | ||
64 | extern void octeon_send_ipi_single(int cpu, unsigned int action); | ||
65 | #ifdef CONFIG_SMP | ||
66 | int cpu; | ||
67 | cpumask_t mask; | ||
68 | #endif | ||
69 | |||
70 | mb(); | ||
71 | octeon_local_flush_icache(); | ||
72 | #ifdef CONFIG_SMP | ||
73 | preempt_disable(); | ||
74 | cpu = smp_processor_id(); | ||
75 | |||
76 | /* | ||
77 | * If we have a vma structure, we only need to worry about | ||
78 | * cores it has been used on | ||
79 | */ | ||
80 | if (vma) | ||
81 | mask = vma->vm_mm->cpu_vm_mask; | ||
82 | else | ||
83 | mask = cpu_online_map; | ||
84 | cpu_clear(cpu, mask); | ||
85 | for_each_cpu_mask(cpu, mask) | ||
86 | octeon_send_ipi_single(cpu, SMP_ICACHE_FLUSH); | ||
87 | |||
88 | preempt_enable(); | ||
89 | #endif | ||
90 | } | ||
91 | |||
92 | |||
93 | /** | ||
94 | * Called to flush the icache on all cores | ||
95 | */ | ||
96 | static void octeon_flush_icache_all(void) | ||
97 | { | ||
98 | octeon_flush_icache_all_cores(NULL); | ||
99 | } | ||
100 | |||
101 | |||
102 | /** | ||
103 | * Called to flush all memory associated with a memory | ||
104 | * context. | ||
105 | * | ||
106 | * @mm: Memory context to flush | ||
107 | */ | ||
108 | static void octeon_flush_cache_mm(struct mm_struct *mm) | ||
109 | { | ||
110 | /* | ||
111 | * According to the R4K version of this file, CPUs without | ||
112 | * dcache aliases don't need to do anything here | ||
113 | */ | ||
114 | } | ||
115 | |||
116 | |||
117 | /** | ||
118 | * Flush a range of kernel addresses out of the icache | ||
119 | * | ||
120 | */ | ||
121 | static void octeon_flush_icache_range(unsigned long start, unsigned long end) | ||
122 | { | ||
123 | octeon_flush_icache_all_cores(NULL); | ||
124 | } | ||
125 | |||
126 | |||
127 | /** | ||
128 | * Flush the icache for a trampoline. These are used for interrupt | ||
129 | * and exception hooking. | ||
130 | * | ||
131 | * @addr: Address to flush | ||
132 | */ | ||
133 | static void octeon_flush_cache_sigtramp(unsigned long addr) | ||
134 | { | ||
135 | struct vm_area_struct *vma; | ||
136 | |||
137 | vma = find_vma(current->mm, addr); | ||
138 | octeon_flush_icache_all_cores(vma); | ||
139 | } | ||
140 | |||
141 | |||
142 | /** | ||
143 | * Flush a range out of a vma | ||
144 | * | ||
145 | * @vma: VMA to flush | ||
146 | * @start: | ||
147 | * @end: | ||
148 | */ | ||
149 | static void octeon_flush_cache_range(struct vm_area_struct *vma, | ||
150 | unsigned long start, unsigned long end) | ||
151 | { | ||
152 | if (vma->vm_flags & VM_EXEC) | ||
153 | octeon_flush_icache_all_cores(vma); | ||
154 | } | ||
155 | |||
156 | |||
157 | /** | ||
158 | * Flush a specific page of a vma | ||
159 | * | ||
160 | * @vma: VMA to flush page for | ||
161 | * @page: Page to flush | ||
162 | * @pfn: | ||
163 | */ | ||
164 | static void octeon_flush_cache_page(struct vm_area_struct *vma, | ||
165 | unsigned long page, unsigned long pfn) | ||
166 | { | ||
167 | if (vma->vm_flags & VM_EXEC) | ||
168 | octeon_flush_icache_all_cores(vma); | ||
169 | } | ||
170 | |||
171 | |||
172 | /** | ||
173 | * Probe Octeon's caches | ||
174 | * | ||
175 | */ | ||
176 | static void __devinit probe_octeon(void) | ||
177 | { | ||
178 | unsigned long icache_size; | ||
179 | unsigned long dcache_size; | ||
180 | unsigned int config1; | ||
181 | struct cpuinfo_mips *c = ¤t_cpu_data; | ||
182 | |||
183 | switch (c->cputype) { | ||
184 | case CPU_CAVIUM_OCTEON: | ||
185 | config1 = read_c0_config1(); | ||
186 | c->icache.linesz = 2 << ((config1 >> 19) & 7); | ||
187 | c->icache.sets = 64 << ((config1 >> 22) & 7); | ||
188 | c->icache.ways = 1 + ((config1 >> 16) & 7); | ||
189 | c->icache.flags |= MIPS_CACHE_VTAG; | ||
190 | icache_size = | ||
191 | c->icache.sets * c->icache.ways * c->icache.linesz; | ||
192 | c->icache.waybit = ffs(icache_size / c->icache.ways) - 1; | ||
193 | c->dcache.linesz = 128; | ||
194 | if (OCTEON_IS_MODEL(OCTEON_CN3XXX)) | ||
195 | c->dcache.sets = 1; /* CN3XXX has one Dcache set */ | ||
196 | else | ||
197 | c->dcache.sets = 2; /* CN5XXX has two Dcache sets */ | ||
198 | c->dcache.ways = 64; | ||
199 | dcache_size = | ||
200 | c->dcache.sets * c->dcache.ways * c->dcache.linesz; | ||
201 | c->dcache.waybit = ffs(dcache_size / c->dcache.ways) - 1; | ||
202 | c->options |= MIPS_CPU_PREFETCH; | ||
203 | break; | ||
204 | |||
205 | default: | ||
206 | panic("Unsupported Cavium Networks CPU type\n"); | ||
207 | break; | ||
208 | } | ||
209 | |||
210 | /* compute a couple of other cache variables */ | ||
211 | c->icache.waysize = icache_size / c->icache.ways; | ||
212 | c->dcache.waysize = dcache_size / c->dcache.ways; | ||
213 | |||
214 | c->icache.sets = icache_size / (c->icache.linesz * c->icache.ways); | ||
215 | c->dcache.sets = dcache_size / (c->dcache.linesz * c->dcache.ways); | ||
216 | |||
217 | if (smp_processor_id() == 0) { | ||
218 | pr_notice("Primary instruction cache %ldkB, %s, %d way, " | ||
219 | "%d sets, linesize %d bytes.\n", | ||
220 | icache_size >> 10, | ||
221 | cpu_has_vtag_icache ? | ||
222 | "virtually tagged" : "physically tagged", | ||
223 | c->icache.ways, c->icache.sets, c->icache.linesz); | ||
224 | |||
225 | pr_notice("Primary data cache %ldkB, %d-way, %d sets, " | ||
226 | "linesize %d bytes.\n", | ||
227 | dcache_size >> 10, c->dcache.ways, | ||
228 | c->dcache.sets, c->dcache.linesz); | ||
229 | } | ||
230 | } | ||
231 | |||
232 | |||
233 | /** | ||
234 | * Setup the Octeon cache flush routines | ||
235 | * | ||
236 | */ | ||
237 | void __devinit octeon_cache_init(void) | ||
238 | { | ||
239 | extern unsigned long ebase; | ||
240 | extern char except_vec2_octeon; | ||
241 | |||
242 | memcpy((void *)(ebase + 0x100), &except_vec2_octeon, 0x80); | ||
243 | octeon_flush_cache_sigtramp(ebase + 0x100); | ||
244 | |||
245 | probe_octeon(); | ||
246 | |||
247 | shm_align_mask = PAGE_SIZE - 1; | ||
248 | |||
249 | flush_cache_all = octeon_flush_icache_all; | ||
250 | __flush_cache_all = octeon_flush_icache_all; | ||
251 | flush_cache_mm = octeon_flush_cache_mm; | ||
252 | flush_cache_page = octeon_flush_cache_page; | ||
253 | flush_cache_range = octeon_flush_cache_range; | ||
254 | flush_cache_sigtramp = octeon_flush_cache_sigtramp; | ||
255 | flush_icache_all = octeon_flush_icache_all; | ||
256 | flush_data_cache_page = octeon_flush_data_cache_page; | ||
257 | flush_icache_range = octeon_flush_icache_range; | ||
258 | local_flush_icache_range = local_octeon_flush_icache_range; | ||
259 | |||
260 | build_clear_page(); | ||
261 | build_copy_page(); | ||
262 | } | ||
263 | |||
264 | /** | ||
265 | * Handle a cache error exception | ||
266 | */ | ||
267 | |||
268 | static void cache_parity_error_octeon(int non_recoverable) | ||
269 | { | ||
270 | unsigned long coreid = cvmx_get_core_num(); | ||
271 | uint64_t icache_err = read_octeon_c0_icacheerr(); | ||
272 | |||
273 | pr_err("Cache error exception:\n"); | ||
274 | pr_err("cp0_errorepc == %lx\n", read_c0_errorepc()); | ||
275 | if (icache_err & 1) { | ||
276 | pr_err("CacheErr (Icache) == %llx\n", | ||
277 | (unsigned long long)icache_err); | ||
278 | write_octeon_c0_icacheerr(0); | ||
279 | } | ||
280 | if (cache_err_dcache[coreid] & 1) { | ||
281 | pr_err("CacheErr (Dcache) == %llx\n", | ||
282 | (unsigned long long)cache_err_dcache[coreid]); | ||
283 | cache_err_dcache[coreid] = 0; | ||
284 | } | ||
285 | |||
286 | if (non_recoverable) | ||
287 | panic("Can't handle cache error: nested exception"); | ||
288 | } | ||
289 | |||
290 | /** | ||
291 | * Called when the the exception is not recoverable | ||
292 | */ | ||
293 | |||
294 | asmlinkage void cache_parity_error_octeon_recoverable(void) | ||
295 | { | ||
296 | cache_parity_error_octeon(0); | ||
297 | } | ||
298 | |||
299 | /** | ||
300 | * Called when the the exception is recoverable | ||
301 | */ | ||
302 | |||
303 | asmlinkage void cache_parity_error_octeon_non_recoverable(void) | ||
304 | { | ||
305 | cache_parity_error_octeon(1); | ||
306 | } | ||
307 | |||
diff --git a/arch/mips/mm/cex-oct.S b/arch/mips/mm/cex-oct.S new file mode 100644 index 000000000000..3db8553fcd34 --- /dev/null +++ b/arch/mips/mm/cex-oct.S | |||
@@ -0,0 +1,70 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 2006 Cavium Networks | ||
7 | * Cache error handler | ||
8 | */ | ||
9 | |||
10 | #include <asm/asm.h> | ||
11 | #include <asm/regdef.h> | ||
12 | #include <asm/mipsregs.h> | ||
13 | #include <asm/stackframe.h> | ||
14 | |||
15 | /* | ||
16 | * Handle cache error. Indicate to the second level handler whether | ||
17 | * the exception is recoverable. | ||
18 | */ | ||
19 | LEAF(except_vec2_octeon) | ||
20 | |||
21 | .set push | ||
22 | .set mips64r2 | ||
23 | .set noreorder | ||
24 | .set noat | ||
25 | |||
26 | |||
27 | /* due to an errata we need to read the COP0 CacheErr (Dcache) | ||
28 | * before any cache/DRAM access */ | ||
29 | |||
30 | rdhwr k0, $0 /* get core_id */ | ||
31 | PTR_LA k1, cache_err_dcache | ||
32 | sll k0, k0, 3 | ||
33 | PTR_ADDU k1, k0, k1 /* k1 = &cache_err_dcache[core_id] */ | ||
34 | |||
35 | dmfc0 k0, CP0_CACHEERR, 1 | ||
36 | sd k0, (k1) | ||
37 | dmtc0 $0, CP0_CACHEERR, 1 | ||
38 | |||
39 | /* check whether this is a nested exception */ | ||
40 | mfc0 k1, CP0_STATUS | ||
41 | andi k1, k1, ST0_EXL | ||
42 | beqz k1, 1f | ||
43 | nop | ||
44 | j cache_parity_error_octeon_non_recoverable | ||
45 | nop | ||
46 | |||
47 | /* exception is recoverable */ | ||
48 | 1: j handle_cache_err | ||
49 | nop | ||
50 | |||
51 | .set pop | ||
52 | END(except_vec2_octeon) | ||
53 | |||
54 | /* We need to jump to handle_cache_err so that the previous handler | ||
55 | * can fit within 0x80 bytes. We also move from 0xFFFFFFFFAXXXXXXX | ||
56 | * space (uncached) to the 0xFFFFFFFF8XXXXXXX space (cached). */ | ||
57 | LEAF(handle_cache_err) | ||
58 | .set push | ||
59 | .set noreorder | ||
60 | .set noat | ||
61 | |||
62 | SAVE_ALL | ||
63 | KMODE | ||
64 | jal cache_parity_error_octeon_recoverable | ||
65 | nop | ||
66 | j ret_from_exception | ||
67 | nop | ||
68 | |||
69 | .set pop | ||
70 | END(handle_cache_err) | ||