aboutsummaryrefslogtreecommitdiffstats
path: root/arch/c6x/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/c6x/kernel')
-rw-r--r--arch/c6x/kernel/Makefile12
-rw-r--r--arch/c6x/kernel/asm-offsets.c123
-rw-r--r--arch/c6x/kernel/c6x_ksyms.c66
-rw-r--r--arch/c6x/kernel/devicetree.c53
-rw-r--r--arch/c6x/kernel/dma.c153
-rw-r--r--arch/c6x/kernel/entry.S803
-rw-r--r--arch/c6x/kernel/head.S84
-rw-r--r--arch/c6x/kernel/irq.c728
-rw-r--r--arch/c6x/kernel/module.c123
-rw-r--r--arch/c6x/kernel/process.c265
-rw-r--r--arch/c6x/kernel/ptrace.c187
-rw-r--r--arch/c6x/kernel/setup.c510
-rw-r--r--arch/c6x/kernel/signal.c377
-rw-r--r--arch/c6x/kernel/soc.c91
-rw-r--r--arch/c6x/kernel/switch_to.S74
-rw-r--r--arch/c6x/kernel/sys_c6x.c74
-rw-r--r--arch/c6x/kernel/time.c65
-rw-r--r--arch/c6x/kernel/traps.c423
-rw-r--r--arch/c6x/kernel/vectors.S81
-rw-r--r--arch/c6x/kernel/vmlinux.lds.S162
20 files changed, 4454 insertions, 0 deletions
diff --git a/arch/c6x/kernel/Makefile b/arch/c6x/kernel/Makefile
new file mode 100644
index 000000000000..580a515a9443
--- /dev/null
+++ b/arch/c6x/kernel/Makefile
@@ -0,0 +1,12 @@
1#
2# Makefile for arch/c6x/kernel/
3#
4
5extra-y := head.o vmlinux.lds
6
7obj-y := process.o traps.o irq.o signal.o ptrace.o
8obj-y += setup.o sys_c6x.o time.o devicetree.o
9obj-y += switch_to.o entry.o vectors.o c6x_ksyms.o
10obj-y += soc.o dma.o
11
12obj-$(CONFIG_MODULES) += module.o
diff --git a/arch/c6x/kernel/asm-offsets.c b/arch/c6x/kernel/asm-offsets.c
new file mode 100644
index 000000000000..759ad6d207b6
--- /dev/null
+++ b/arch/c6x/kernel/asm-offsets.c
@@ -0,0 +1,123 @@
1/*
2 * Generate definitions needed by assembly language modules.
3 * This code generates raw asm output which is post-processed
4 * to extract and format the required data.
5 */
6
7#include <linux/sched.h>
8#include <linux/thread_info.h>
9#include <asm/procinfo.h>
10#include <linux/kbuild.h>
11#include <linux/unistd.h>
12
13void foo(void)
14{
15 OFFSET(REGS_A16, pt_regs, a16);
16 OFFSET(REGS_A17, pt_regs, a17);
17 OFFSET(REGS_A18, pt_regs, a18);
18 OFFSET(REGS_A19, pt_regs, a19);
19 OFFSET(REGS_A20, pt_regs, a20);
20 OFFSET(REGS_A21, pt_regs, a21);
21 OFFSET(REGS_A22, pt_regs, a22);
22 OFFSET(REGS_A23, pt_regs, a23);
23 OFFSET(REGS_A24, pt_regs, a24);
24 OFFSET(REGS_A25, pt_regs, a25);
25 OFFSET(REGS_A26, pt_regs, a26);
26 OFFSET(REGS_A27, pt_regs, a27);
27 OFFSET(REGS_A28, pt_regs, a28);
28 OFFSET(REGS_A29, pt_regs, a29);
29 OFFSET(REGS_A30, pt_regs, a30);
30 OFFSET(REGS_A31, pt_regs, a31);
31
32 OFFSET(REGS_B16, pt_regs, b16);
33 OFFSET(REGS_B17, pt_regs, b17);
34 OFFSET(REGS_B18, pt_regs, b18);
35 OFFSET(REGS_B19, pt_regs, b19);
36 OFFSET(REGS_B20, pt_regs, b20);
37 OFFSET(REGS_B21, pt_regs, b21);
38 OFFSET(REGS_B22, pt_regs, b22);
39 OFFSET(REGS_B23, pt_regs, b23);
40 OFFSET(REGS_B24, pt_regs, b24);
41 OFFSET(REGS_B25, pt_regs, b25);
42 OFFSET(REGS_B26, pt_regs, b26);
43 OFFSET(REGS_B27, pt_regs, b27);
44 OFFSET(REGS_B28, pt_regs, b28);
45 OFFSET(REGS_B29, pt_regs, b29);
46 OFFSET(REGS_B30, pt_regs, b30);
47 OFFSET(REGS_B31, pt_regs, b31);
48
49 OFFSET(REGS_A0, pt_regs, a0);
50 OFFSET(REGS_A1, pt_regs, a1);
51 OFFSET(REGS_A2, pt_regs, a2);
52 OFFSET(REGS_A3, pt_regs, a3);
53 OFFSET(REGS_A4, pt_regs, a4);
54 OFFSET(REGS_A5, pt_regs, a5);
55 OFFSET(REGS_A6, pt_regs, a6);
56 OFFSET(REGS_A7, pt_regs, a7);
57 OFFSET(REGS_A8, pt_regs, a8);
58 OFFSET(REGS_A9, pt_regs, a9);
59 OFFSET(REGS_A10, pt_regs, a10);
60 OFFSET(REGS_A11, pt_regs, a11);
61 OFFSET(REGS_A12, pt_regs, a12);
62 OFFSET(REGS_A13, pt_regs, a13);
63 OFFSET(REGS_A14, pt_regs, a14);
64 OFFSET(REGS_A15, pt_regs, a15);
65
66 OFFSET(REGS_B0, pt_regs, b0);
67 OFFSET(REGS_B1, pt_regs, b1);
68 OFFSET(REGS_B2, pt_regs, b2);
69 OFFSET(REGS_B3, pt_regs, b3);
70 OFFSET(REGS_B4, pt_regs, b4);
71 OFFSET(REGS_B5, pt_regs, b5);
72 OFFSET(REGS_B6, pt_regs, b6);
73 OFFSET(REGS_B7, pt_regs, b7);
74 OFFSET(REGS_B8, pt_regs, b8);
75 OFFSET(REGS_B9, pt_regs, b9);
76 OFFSET(REGS_B10, pt_regs, b10);
77 OFFSET(REGS_B11, pt_regs, b11);
78 OFFSET(REGS_B12, pt_regs, b12);
79 OFFSET(REGS_B13, pt_regs, b13);
80 OFFSET(REGS_DP, pt_regs, dp);
81 OFFSET(REGS_SP, pt_regs, sp);
82
83 OFFSET(REGS_TSR, pt_regs, tsr);
84 OFFSET(REGS_ORIG_A4, pt_regs, orig_a4);
85
86 DEFINE(REGS__END, sizeof(struct pt_regs));
87 BLANK();
88
89 OFFSET(THREAD_PC, thread_struct, pc);
90 OFFSET(THREAD_B15_14, thread_struct, b15_14);
91 OFFSET(THREAD_A15_14, thread_struct, a15_14);
92 OFFSET(THREAD_B13_12, thread_struct, b13_12);
93 OFFSET(THREAD_A13_12, thread_struct, a13_12);
94 OFFSET(THREAD_B11_10, thread_struct, b11_10);
95 OFFSET(THREAD_A11_10, thread_struct, a11_10);
96 OFFSET(THREAD_RICL_ICL, thread_struct, ricl_icl);
97 BLANK();
98
99 OFFSET(TASK_STATE, task_struct, state);
100 BLANK();
101
102 OFFSET(THREAD_INFO_FLAGS, thread_info, flags);
103 OFFSET(THREAD_INFO_PREEMPT_COUNT, thread_info, preempt_count);
104 BLANK();
105
106 /* These would be unneccessary if we ran asm files
107 * through the preprocessor.
108 */
109 DEFINE(KTHREAD_SIZE, THREAD_SIZE);
110 DEFINE(KTHREAD_SHIFT, THREAD_SHIFT);
111 DEFINE(KTHREAD_START_SP, THREAD_START_SP);
112 DEFINE(ENOSYS_, ENOSYS);
113 DEFINE(NR_SYSCALLS_, __NR_syscalls);
114
115 DEFINE(_TIF_SYSCALL_TRACE, (1<<TIF_SYSCALL_TRACE));
116 DEFINE(_TIF_NOTIFY_RESUME, (1<<TIF_NOTIFY_RESUME));
117 DEFINE(_TIF_SIGPENDING, (1<<TIF_SIGPENDING));
118 DEFINE(_TIF_NEED_RESCHED, (1<<TIF_NEED_RESCHED));
119 DEFINE(_TIF_POLLING_NRFLAG, (1<<TIF_POLLING_NRFLAG));
120
121 DEFINE(_TIF_ALLWORK_MASK, TIF_ALLWORK_MASK);
122 DEFINE(_TIF_WORK_MASK, TIF_WORK_MASK);
123}
diff --git a/arch/c6x/kernel/c6x_ksyms.c b/arch/c6x/kernel/c6x_ksyms.c
new file mode 100644
index 000000000000..0ba3e0bba3e1
--- /dev/null
+++ b/arch/c6x/kernel/c6x_ksyms.c
@@ -0,0 +1,66 @@
1/*
2 * Port on Texas Instruments TMS320C6x architecture
3 *
4 * Copyright (C) 2004, 2009, 2010, 2011 Texas Instruments Incorporated
5 * Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 */
12#include <linux/module.h>
13#include <asm/checksum.h>
14#include <linux/io.h>
15
16/*
17 * libgcc functions - used internally by the compiler...
18 */
19extern int __c6xabi_divi(int dividend, int divisor);
20EXPORT_SYMBOL(__c6xabi_divi);
21
22extern unsigned __c6xabi_divu(unsigned dividend, unsigned divisor);
23EXPORT_SYMBOL(__c6xabi_divu);
24
25extern int __c6xabi_remi(int dividend, int divisor);
26EXPORT_SYMBOL(__c6xabi_remi);
27
28extern unsigned __c6xabi_remu(unsigned dividend, unsigned divisor);
29EXPORT_SYMBOL(__c6xabi_remu);
30
31extern int __c6xabi_divremi(int dividend, int divisor);
32EXPORT_SYMBOL(__c6xabi_divremi);
33
34extern unsigned __c6xabi_divremu(unsigned dividend, unsigned divisor);
35EXPORT_SYMBOL(__c6xabi_divremu);
36
37extern unsigned long long __c6xabi_mpyll(unsigned long long src1,
38 unsigned long long src2);
39EXPORT_SYMBOL(__c6xabi_mpyll);
40
41extern long long __c6xabi_negll(long long src);
42EXPORT_SYMBOL(__c6xabi_negll);
43
44extern unsigned long long __c6xabi_llshl(unsigned long long src1, uint src2);
45EXPORT_SYMBOL(__c6xabi_llshl);
46
47extern long long __c6xabi_llshr(long long src1, uint src2);
48EXPORT_SYMBOL(__c6xabi_llshr);
49
50extern unsigned long long __c6xabi_llshru(unsigned long long src1, uint src2);
51EXPORT_SYMBOL(__c6xabi_llshru);
52
53extern void __c6xabi_strasgi(int *dst, const int *src, unsigned cnt);
54EXPORT_SYMBOL(__c6xabi_strasgi);
55
56extern void __c6xabi_push_rts(void);
57EXPORT_SYMBOL(__c6xabi_push_rts);
58
59extern void __c6xabi_pop_rts(void);
60EXPORT_SYMBOL(__c6xabi_pop_rts);
61
62extern void __c6xabi_strasgi_64plus(int *dst, const int *src, unsigned cnt);
63EXPORT_SYMBOL(__c6xabi_strasgi_64plus);
64
65/* lib functions */
66EXPORT_SYMBOL(memcpy);
diff --git a/arch/c6x/kernel/devicetree.c b/arch/c6x/kernel/devicetree.c
new file mode 100644
index 000000000000..bdb56f09d0ac
--- /dev/null
+++ b/arch/c6x/kernel/devicetree.c
@@ -0,0 +1,53 @@
1/*
2 * Architecture specific OF callbacks.
3 *
4 * Copyright (C) 2011 Texas Instruments Incorporated
5 * Author: Mark Salter <msalter@redhat.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 */
12#include <linux/init.h>
13#include <linux/of.h>
14#include <linux/of_fdt.h>
15#include <linux/initrd.h>
16#include <linux/memblock.h>
17
18void __init early_init_devtree(void *params)
19{
20 /* Setup flat device-tree pointer */
21 initial_boot_params = params;
22
23 /* Retrieve various informations from the /chosen node of the
24 * device-tree, including the platform type, initrd location and
25 * size and more ...
26 */
27 of_scan_flat_dt(early_init_dt_scan_chosen, c6x_command_line);
28
29 /* Scan memory nodes and rebuild MEMBLOCKs */
30 of_scan_flat_dt(early_init_dt_scan_root, NULL);
31 of_scan_flat_dt(early_init_dt_scan_memory, NULL);
32}
33
34
35#ifdef CONFIG_BLK_DEV_INITRD
36void __init early_init_dt_setup_initrd_arch(unsigned long start,
37 unsigned long end)
38{
39 initrd_start = (unsigned long)__va(start);
40 initrd_end = (unsigned long)__va(end);
41 initrd_below_start_ok = 1;
42}
43#endif
44
45void __init early_init_dt_add_memory_arch(u64 base, u64 size)
46{
47 c6x_add_memory(base, size);
48}
49
50void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
51{
52 return __va(memblock_alloc(size, align));
53}
diff --git a/arch/c6x/kernel/dma.c b/arch/c6x/kernel/dma.c
new file mode 100644
index 000000000000..ab7b12de144d
--- /dev/null
+++ b/arch/c6x/kernel/dma.c
@@ -0,0 +1,153 @@
1/*
2 * Copyright (C) 2011 Texas Instruments Incorporated
3 * Author: Mark Salter <msalter@redhat.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9#include <linux/module.h>
10#include <linux/dma-mapping.h>
11#include <linux/mm.h>
12#include <linux/mm_types.h>
13#include <linux/scatterlist.h>
14
15#include <asm/cacheflush.h>
16
17static void c6x_dma_sync(dma_addr_t handle, size_t size,
18 enum dma_data_direction dir)
19{
20 unsigned long paddr = handle;
21
22 BUG_ON(!valid_dma_direction(dir));
23
24 switch (dir) {
25 case DMA_FROM_DEVICE:
26 L2_cache_block_invalidate(paddr, paddr + size);
27 break;
28 case DMA_TO_DEVICE:
29 L2_cache_block_writeback(paddr, paddr + size);
30 break;
31 case DMA_BIDIRECTIONAL:
32 L2_cache_block_writeback_invalidate(paddr, paddr + size);
33 break;
34 default:
35 break;
36 }
37}
38
39dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
40 enum dma_data_direction dir)
41{
42 dma_addr_t addr = virt_to_phys(ptr);
43
44 c6x_dma_sync(addr, size, dir);
45
46 debug_dma_map_page(dev, virt_to_page(ptr),
47 (unsigned long)ptr & ~PAGE_MASK, size,
48 dir, addr, true);
49 return addr;
50}
51EXPORT_SYMBOL(dma_map_single);
52
53
54void dma_unmap_single(struct device *dev, dma_addr_t handle,
55 size_t size, enum dma_data_direction dir)
56{
57 c6x_dma_sync(handle, size, dir);
58
59 debug_dma_unmap_page(dev, handle, size, dir, true);
60}
61EXPORT_SYMBOL(dma_unmap_single);
62
63
64int dma_map_sg(struct device *dev, struct scatterlist *sglist,
65 int nents, enum dma_data_direction dir)
66{
67 struct scatterlist *sg;
68 int i;
69
70 for_each_sg(sglist, sg, nents, i)
71 sg->dma_address = dma_map_single(dev, sg_virt(sg), sg->length,
72 dir);
73
74 debug_dma_map_sg(dev, sglist, nents, nents, dir);
75
76 return nents;
77}
78EXPORT_SYMBOL(dma_map_sg);
79
80
81void dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
82 int nents, enum dma_data_direction dir)
83{
84 struct scatterlist *sg;
85 int i;
86
87 for_each_sg(sglist, sg, nents, i)
88 dma_unmap_single(dev, sg_dma_address(sg), sg->length, dir);
89
90 debug_dma_unmap_sg(dev, sglist, nents, dir);
91}
92EXPORT_SYMBOL(dma_unmap_sg);
93
94void dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle,
95 size_t size, enum dma_data_direction dir)
96{
97 c6x_dma_sync(handle, size, dir);
98
99 debug_dma_sync_single_for_cpu(dev, handle, size, dir);
100}
101EXPORT_SYMBOL(dma_sync_single_for_cpu);
102
103
104void dma_sync_single_for_device(struct device *dev, dma_addr_t handle,
105 size_t size, enum dma_data_direction dir)
106{
107 c6x_dma_sync(handle, size, dir);
108
109 debug_dma_sync_single_for_device(dev, handle, size, dir);
110}
111EXPORT_SYMBOL(dma_sync_single_for_device);
112
113
114void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist,
115 int nents, enum dma_data_direction dir)
116{
117 struct scatterlist *sg;
118 int i;
119
120 for_each_sg(sglist, sg, nents, i)
121 dma_sync_single_for_cpu(dev, sg_dma_address(sg),
122 sg->length, dir);
123
124 debug_dma_sync_sg_for_cpu(dev, sglist, nents, dir);
125}
126EXPORT_SYMBOL(dma_sync_sg_for_cpu);
127
128
129void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist,
130 int nents, enum dma_data_direction dir)
131{
132 struct scatterlist *sg;
133 int i;
134
135 for_each_sg(sglist, sg, nents, i)
136 dma_sync_single_for_device(dev, sg_dma_address(sg),
137 sg->length, dir);
138
139 debug_dma_sync_sg_for_device(dev, sglist, nents, dir);
140}
141EXPORT_SYMBOL(dma_sync_sg_for_device);
142
143
144/* Number of entries preallocated for DMA-API debugging */
145#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
146
147static int __init dma_init(void)
148{
149 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
150
151 return 0;
152}
153fs_initcall(dma_init);
diff --git a/arch/c6x/kernel/entry.S b/arch/c6x/kernel/entry.S
new file mode 100644
index 000000000000..3e977ccda827
--- /dev/null
+++ b/arch/c6x/kernel/entry.S
@@ -0,0 +1,803 @@
1;
2; Port on Texas Instruments TMS320C6x architecture
3;
4; Copyright (C) 2004-2011 Texas Instruments Incorporated
5; Author: Aurelien Jacquiot (aurelien.jacquiot@virtuallogix.com)
6; Updated for 2.6.34: Mark Salter <msalter@redhat.com>
7;
8; This program is free software; you can redistribute it and/or modify
9; it under the terms of the GNU General Public License version 2 as
10; published by the Free Software Foundation.
11;
12
13#include <linux/sys.h>
14#include <linux/linkage.h>
15#include <asm/thread_info.h>
16#include <asm/asm-offsets.h>
17#include <asm/unistd.h>
18#include <asm/errno.h>
19
20; Registers naming
21#define DP B14
22#define SP B15
23
24#ifndef CONFIG_PREEMPT
25#define resume_kernel restore_all
26#endif
27
28 .altmacro
29
30 .macro MASK_INT reg
31 MVC .S2 CSR,reg
32 CLR .S2 reg,0,0,reg
33 MVC .S2 reg,CSR
34 .endm
35
36 .macro UNMASK_INT reg
37 MVC .S2 CSR,reg
38 SET .S2 reg,0,0,reg
39 MVC .S2 reg,CSR
40 .endm
41
42 .macro GET_THREAD_INFO reg
43 SHR .S1X SP,THREAD_SHIFT,reg
44 SHL .S1 reg,THREAD_SHIFT,reg
45 .endm
46
47 ;;
48 ;; This defines the normal kernel pt_regs layout.
49 ;;
50 .macro SAVE_ALL __rp __tsr
51 STW .D2T2 B0,*SP--[2] ; save original B0
52 MVKL .S2 current_ksp,B0
53 MVKH .S2 current_ksp,B0
54 LDW .D2T2 *B0,B1 ; KSP
55
56 NOP 3
57 STW .D2T2 B1,*+SP[1] ; save original B1
58 XOR .D2 SP,B1,B0 ; (SP ^ KSP)
59 LDW .D2T2 *+SP[1],B1 ; restore B0/B1
60 LDW .D2T2 *++SP[2],B0
61 SHR .S2 B0,THREAD_SHIFT,B0 ; 0 if already using kstack
62 [B0] STDW .D2T2 SP:DP,*--B1[1] ; user: save user sp/dp kstack
63 [B0] MV .S2 B1,SP ; and switch to kstack
64||[!B0] STDW .D2T2 SP:DP,*--SP[1] ; kernel: save on current stack
65
66 SUBAW .D2 SP,2,SP
67
68 ADD .D1X SP,-8,A15
69 || STDW .D2T1 A15:A14,*SP--[16] ; save A15:A14
70
71 STDW .D2T2 B13:B12,*SP--[1]
72 || STDW .D1T1 A13:A12,*A15--[1]
73 || MVC .S2 __rp,B13
74
75 STDW .D2T2 B11:B10,*SP--[1]
76 || STDW .D1T1 A11:A10,*A15--[1]
77 || MVC .S2 CSR,B12
78
79 STDW .D2T2 B9:B8,*SP--[1]
80 || STDW .D1T1 A9:A8,*A15--[1]
81 || MVC .S2 RILC,B11
82 STDW .D2T2 B7:B6,*SP--[1]
83 || STDW .D1T1 A7:A6,*A15--[1]
84 || MVC .S2 ILC,B10
85
86 STDW .D2T2 B5:B4,*SP--[1]
87 || STDW .D1T1 A5:A4,*A15--[1]
88
89 STDW .D2T2 B3:B2,*SP--[1]
90 || STDW .D1T1 A3:A2,*A15--[1]
91 || MVC .S2 __tsr,B5
92
93 STDW .D2T2 B1:B0,*SP--[1]
94 || STDW .D1T1 A1:A0,*A15--[1]
95 || MV .S1X B5,A5
96
97 STDW .D2T2 B31:B30,*SP--[1]
98 || STDW .D1T1 A31:A30,*A15--[1]
99 STDW .D2T2 B29:B28,*SP--[1]
100 || STDW .D1T1 A29:A28,*A15--[1]
101 STDW .D2T2 B27:B26,*SP--[1]
102 || STDW .D1T1 A27:A26,*A15--[1]
103 STDW .D2T2 B25:B24,*SP--[1]
104 || STDW .D1T1 A25:A24,*A15--[1]
105 STDW .D2T2 B23:B22,*SP--[1]
106 || STDW .D1T1 A23:A22,*A15--[1]
107 STDW .D2T2 B21:B20,*SP--[1]
108 || STDW .D1T1 A21:A20,*A15--[1]
109 STDW .D2T2 B19:B18,*SP--[1]
110 || STDW .D1T1 A19:A18,*A15--[1]
111 STDW .D2T2 B17:B16,*SP--[1]
112 || STDW .D1T1 A17:A16,*A15--[1]
113
114 STDW .D2T2 B13:B12,*SP--[1] ; save PC and CSR
115
116 STDW .D2T2 B11:B10,*SP--[1] ; save RILC and ILC
117 STDW .D2T1 A5:A4,*SP--[1] ; save TSR and orig A4
118
119 ;; We left an unused word on the stack just above pt_regs.
120 ;; It is used to save whether or not this frame is due to
121 ;; a syscall. It is cleared here, but the syscall handler
122 ;; sets it to a non-zero value.
123 MVK .L2 0,B1
124 STW .D2T2 B1,*+SP(REGS__END+8) ; clear syscall flag
125 .endm
126
127 .macro RESTORE_ALL __rp __tsr
128 LDDW .D2T2 *++SP[1],B9:B8 ; get TSR (B9)
129 LDDW .D2T2 *++SP[1],B11:B10 ; get RILC (B11) and ILC (B10)
130 LDDW .D2T2 *++SP[1],B13:B12 ; get PC (B13) and CSR (B12)
131
132 ADDAW .D1X SP,30,A15
133
134 LDDW .D1T1 *++A15[1],A17:A16
135 || LDDW .D2T2 *++SP[1],B17:B16
136 LDDW .D1T1 *++A15[1],A19:A18
137 || LDDW .D2T2 *++SP[1],B19:B18
138 LDDW .D1T1 *++A15[1],A21:A20
139 || LDDW .D2T2 *++SP[1],B21:B20
140 LDDW .D1T1 *++A15[1],A23:A22
141 || LDDW .D2T2 *++SP[1],B23:B22
142 LDDW .D1T1 *++A15[1],A25:A24
143 || LDDW .D2T2 *++SP[1],B25:B24
144 LDDW .D1T1 *++A15[1],A27:A26
145 || LDDW .D2T2 *++SP[1],B27:B26
146 LDDW .D1T1 *++A15[1],A29:A28
147 || LDDW .D2T2 *++SP[1],B29:B28
148 LDDW .D1T1 *++A15[1],A31:A30
149 || LDDW .D2T2 *++SP[1],B31:B30
150
151 LDDW .D1T1 *++A15[1],A1:A0
152 || LDDW .D2T2 *++SP[1],B1:B0
153
154 LDDW .D1T1 *++A15[1],A3:A2
155 || LDDW .D2T2 *++SP[1],B3:B2
156 || MVC .S2 B9,__tsr
157 LDDW .D1T1 *++A15[1],A5:A4
158 || LDDW .D2T2 *++SP[1],B5:B4
159 || MVC .S2 B11,RILC
160 LDDW .D1T1 *++A15[1],A7:A6
161 || LDDW .D2T2 *++SP[1],B7:B6
162 || MVC .S2 B10,ILC
163
164 LDDW .D1T1 *++A15[1],A9:A8
165 || LDDW .D2T2 *++SP[1],B9:B8
166 || MVC .S2 B13,__rp
167
168 LDDW .D1T1 *++A15[1],A11:A10
169 || LDDW .D2T2 *++SP[1],B11:B10
170 || MVC .S2 B12,CSR
171
172 LDDW .D1T1 *++A15[1],A13:A12
173 || LDDW .D2T2 *++SP[1],B13:B12
174
175 MV .D2X A15,SP
176 || MVKL .S1 current_ksp,A15
177 MVKH .S1 current_ksp,A15
178 || ADDAW .D1X SP,6,A14
179 STW .D1T1 A14,*A15 ; save kernel stack pointer
180
181 LDDW .D2T1 *++SP[1],A15:A14
182
183 B .S2 __rp ; return from interruption
184 LDDW .D2T2 *+SP[1],SP:DP
185 NOP 4
186 .endm
187
188 .section .text
189
190 ;;
191 ;; Jump to schedule() then return to ret_from_exception
192 ;;
193_reschedule:
194#ifdef CONFIG_C6X_BIG_KERNEL
195 MVKL .S1 schedule,A0
196 MVKH .S1 schedule,A0
197 B .S2X A0
198#else
199 B .S1 schedule
200#endif
201 ADDKPC .S2 ret_from_exception,B3,4
202
203 ;;
204 ;; Called before syscall handler when process is being debugged
205 ;;
206tracesys_on:
207#ifdef CONFIG_C6X_BIG_KERNEL
208 MVKL .S1 syscall_trace_entry,A0
209 MVKH .S1 syscall_trace_entry,A0
210 B .S2X A0
211#else
212 B .S1 syscall_trace_entry
213#endif
214 ADDKPC .S2 ret_from_syscall_trace,B3,3
215 ADD .S1X 8,SP,A4
216
217ret_from_syscall_trace:
218 ;; tracing returns (possibly new) syscall number
219 MV .D2X A4,B0
220 || MVK .S2 __NR_syscalls,B1
221 CMPLTU .L2 B0,B1,B1
222
223 [!B1] BNOP .S2 ret_from_syscall_function,5
224 || MVK .S1 -ENOSYS,A4
225
226 ;; reload syscall args from (possibly modified) stack frame
227 ;; and get syscall handler addr from sys_call_table:
228 LDW .D2T2 *+SP(REGS_B4+8),B4
229 || MVKL .S2 sys_call_table,B1
230 LDW .D2T1 *+SP(REGS_A6+8),A6
231 || MVKH .S2 sys_call_table,B1
232 LDW .D2T2 *+B1[B0],B0
233 || MVKL .S2 ret_from_syscall_function,B3
234 LDW .D2T2 *+SP(REGS_B6+8),B6
235 || MVKH .S2 ret_from_syscall_function,B3
236 LDW .D2T1 *+SP(REGS_A8+8),A8
237 LDW .D2T2 *+SP(REGS_B8+8),B8
238 NOP
239 ; B0 = sys_call_table[__NR_*]
240 BNOP .S2 B0,5 ; branch to syscall handler
241 || LDW .D2T1 *+SP(REGS_ORIG_A4+8),A4
242
243syscall_exit_work:
244 AND .D1 _TIF_SYSCALL_TRACE,A2,A0
245 [!A0] BNOP .S1 work_pending,5
246 [A0] B .S2 syscall_trace_exit
247 ADDKPC .S2 resume_userspace,B3,1
248 MVC .S2 CSR,B1
249 SET .S2 B1,0,0,B1
250 MVC .S2 B1,CSR ; enable ints
251
252work_pending:
253 AND .D1 _TIF_NEED_RESCHED,A2,A0
254 [!A0] BNOP .S1 work_notifysig,5
255
256work_resched:
257#ifdef CONFIG_C6X_BIG_KERNEL
258 MVKL .S1 schedule,A1
259 MVKH .S1 schedule,A1
260 B .S2X A1
261#else
262 B .S2 schedule
263#endif
264 ADDKPC .S2 work_rescheduled,B3,4
265work_rescheduled:
266 ;; make sure we don't miss an interrupt setting need_resched or
267 ;; sigpending between sampling and the rti
268 MASK_INT B2
269 GET_THREAD_INFO A12
270 LDW .D1T1 *+A12(THREAD_INFO_FLAGS),A2
271 MVK .S1 _TIF_WORK_MASK,A1
272 MVK .S1 _TIF_NEED_RESCHED,A3
273 NOP 2
274 AND .D1 A1,A2,A0
275 || AND .S1 A3,A2,A1
276 [!A0] BNOP .S1 restore_all,5
277 [A1] BNOP .S1 work_resched,5
278
279work_notifysig:
280 B .S2 do_notify_resume
281 LDW .D2T1 *+SP(REGS__END+8),A6 ; syscall flag
282 ADDKPC .S2 resume_userspace,B3,1
283 ADD .S1X 8,SP,A4 ; pt_regs pointer is first arg
284 MV .D2X A2,B4 ; thread_info flags is second arg
285
286 ;;
287 ;; On C64x+, the return way from exception and interrupt
288 ;; is a little bit different
289 ;;
290ENTRY(ret_from_exception)
291#ifdef CONFIG_PREEMPT
292 MASK_INT B2
293#endif
294
295ENTRY(ret_from_interrupt)
296 ;;
297 ;; Check if we are comming from user mode.
298 ;;
299 LDW .D2T2 *+SP(REGS_TSR+8),B0
300 MVK .S2 0x40,B1
301 NOP 3
302 AND .D2 B0,B1,B0
303 [!B0] BNOP .S2 resume_kernel,5
304
305resume_userspace:
306 ;; make sure we don't miss an interrupt setting need_resched or
307 ;; sigpending between sampling and the rti
308 MASK_INT B2
309 GET_THREAD_INFO A12
310 LDW .D1T1 *+A12(THREAD_INFO_FLAGS),A2
311 MVK .S1 _TIF_WORK_MASK,A1
312 MVK .S1 _TIF_NEED_RESCHED,A3
313 NOP 2
314 AND .D1 A1,A2,A0
315 [A0] BNOP .S1 work_pending,5
316 BNOP .S1 restore_all,5
317
318 ;;
319 ;; System call handling
320 ;; B0 = syscall number (in sys_call_table)
321 ;; A4,B4,A6,B6,A8,B8 = arguments of the syscall function
322 ;; A4 is the return value register
323 ;;
324system_call_saved:
325 MVK .L2 1,B2
326 STW .D2T2 B2,*+SP(REGS__END+8) ; set syscall flag
327 MVC .S2 B2,ECR ; ack the software exception
328
329 UNMASK_INT B2 ; re-enable global IT
330
331system_call_saved_noack:
332 ;; Check system call number
333 MVK .S2 __NR_syscalls,B1
334#ifdef CONFIG_C6X_BIG_KERNEL
335 || MVKL .S1 sys_ni_syscall,A0
336#endif
337 CMPLTU .L2 B0,B1,B1
338#ifdef CONFIG_C6X_BIG_KERNEL
339 || MVKH .S1 sys_ni_syscall,A0
340#endif
341
342 ;; Check for ptrace
343 GET_THREAD_INFO A12
344
345#ifdef CONFIG_C6X_BIG_KERNEL
346 [!B1] B .S2X A0
347#else
348 [!B1] B .S2 sys_ni_syscall
349#endif
350 [!B1] ADDKPC .S2 ret_from_syscall_function,B3,4
351
352 ;; Get syscall handler addr from sys_call_table
353 ;; call tracesys_on or call syscall handler
354 LDW .D1T1 *+A12(THREAD_INFO_FLAGS),A2
355 || MVKL .S2 sys_call_table,B1
356 MVKH .S2 sys_call_table,B1
357 LDW .D2T2 *+B1[B0],B0
358 NOP 2
359 ; A2 = thread_info flags
360 AND .D1 _TIF_SYSCALL_TRACE,A2,A2
361 [A2] BNOP .S1 tracesys_on,5
362 ;; B0 = _sys_call_table[__NR_*]
363 B .S2 B0
364 ADDKPC .S2 ret_from_syscall_function,B3,4
365
366ret_from_syscall_function:
367 STW .D2T1 A4,*+SP(REGS_A4+8) ; save return value in A4
368 ; original A4 is in orig_A4
369syscall_exit:
370 ;; make sure we don't miss an interrupt setting need_resched or
371 ;; sigpending between sampling and the rti
372 MASK_INT B2
373 LDW .D1T1 *+A12(THREAD_INFO_FLAGS),A2
374 MVK .S1 _TIF_ALLWORK_MASK,A1
375 NOP 3
376 AND .D1 A1,A2,A2 ; check for work to do
377 [A2] BNOP .S1 syscall_exit_work,5
378
379restore_all:
380 RESTORE_ALL NRP,NTSR
381
382 ;;
383 ;; After a fork we jump here directly from resume,
384 ;; so that A4 contains the previous task structure.
385 ;;
386ENTRY(ret_from_fork)
387#ifdef CONFIG_C6X_BIG_KERNEL
388 MVKL .S1 schedule_tail,A0
389 MVKH .S1 schedule_tail,A0
390 B .S2X A0
391#else
392 B .S2 schedule_tail
393#endif
394 ADDKPC .S2 ret_from_fork_2,B3,4
395ret_from_fork_2:
396 ;; return 0 in A4 for child process
397 GET_THREAD_INFO A12
398 BNOP .S2 syscall_exit,3
399 MVK .L2 0,B0
400 STW .D2T2 B0,*+SP(REGS_A4+8)
401ENDPROC(ret_from_fork)
402
403 ;;
404 ;; These are the interrupt handlers, responsible for calling __do_IRQ()
405 ;; int6 is used for syscalls (see _system_call entry)
406 ;;
407 .macro SAVE_ALL_INT
408 SAVE_ALL IRP,ITSR
409 .endm
410
411 .macro CALL_INT int
412#ifdef CONFIG_C6X_BIG_KERNEL
413 MVKL .S1 c6x_do_IRQ,A0
414 MVKH .S1 c6x_do_IRQ,A0
415 BNOP .S2X A0,1
416 MVK .S1 int,A4
417 ADDAW .D2 SP,2,B4
418 MVKL .S2 ret_from_interrupt,B3
419 MVKH .S2 ret_from_interrupt,B3
420#else
421 CALLP .S2 c6x_do_IRQ,B3
422 || MVK .S1 int,A4
423 || ADDAW .D2 SP,2,B4
424 B .S1 ret_from_interrupt
425 NOP 5
426#endif
427 .endm
428
429ENTRY(_int4_handler)
430 SAVE_ALL_INT
431 CALL_INT 4
432ENDPROC(_int4_handler)
433
434ENTRY(_int5_handler)
435 SAVE_ALL_INT
436 CALL_INT 5
437ENDPROC(_int5_handler)
438
439ENTRY(_int6_handler)
440 SAVE_ALL_INT
441 CALL_INT 6
442ENDPROC(_int6_handler)
443
444ENTRY(_int7_handler)
445 SAVE_ALL_INT
446 CALL_INT 7
447ENDPROC(_int7_handler)
448
449ENTRY(_int8_handler)
450 SAVE_ALL_INT
451 CALL_INT 8
452ENDPROC(_int8_handler)
453
454ENTRY(_int9_handler)
455 SAVE_ALL_INT
456 CALL_INT 9
457ENDPROC(_int9_handler)
458
459ENTRY(_int10_handler)
460 SAVE_ALL_INT
461 CALL_INT 10
462ENDPROC(_int10_handler)
463
464ENTRY(_int11_handler)
465 SAVE_ALL_INT
466 CALL_INT 11
467ENDPROC(_int11_handler)
468
469ENTRY(_int12_handler)
470 SAVE_ALL_INT
471 CALL_INT 12
472ENDPROC(_int12_handler)
473
474ENTRY(_int13_handler)
475 SAVE_ALL_INT
476 CALL_INT 13
477ENDPROC(_int13_handler)
478
479ENTRY(_int14_handler)
480 SAVE_ALL_INT
481 CALL_INT 14
482ENDPROC(_int14_handler)
483
484ENTRY(_int15_handler)
485 SAVE_ALL_INT
486 CALL_INT 15
487ENDPROC(_int15_handler)
488
489 ;;
490 ;; Handler for uninitialized and spurious interrupts
491 ;;
492ENTRY(_bad_interrupt)
493 B .S2 IRP
494 NOP 5
495ENDPROC(_bad_interrupt)
496
497 ;;
498 ;; Entry for NMI/exceptions/syscall
499 ;;
500ENTRY(_nmi_handler)
501 SAVE_ALL NRP,NTSR
502
503 MVC .S2 EFR,B2
504 CMPEQ .L2 1,B2,B2
505 || MVC .S2 TSR,B1
506 CLR .S2 B1,10,10,B1
507 MVC .S2 B1,TSR
508#ifdef CONFIG_C6X_BIG_KERNEL
509 [!B2] MVKL .S1 process_exception,A0
510 [!B2] MVKH .S1 process_exception,A0
511 [!B2] B .S2X A0
512#else
513 [!B2] B .S2 process_exception
514#endif
515 [B2] B .S2 system_call_saved
516 [!B2] ADDAW .D2 SP,2,B1
517 [!B2] MV .D1X B1,A4
518 ADDKPC .S2 ret_from_trap,B3,2
519
520ret_from_trap:
521 MV .D2X A4,B0
522 [!B0] BNOP .S2 ret_from_exception,5
523
524#ifdef CONFIG_C6X_BIG_KERNEL
525 MVKL .S2 system_call_saved_noack,B3
526 MVKH .S2 system_call_saved_noack,B3
527#endif
528 LDW .D2T2 *+SP(REGS_B0+8),B0
529 LDW .D2T1 *+SP(REGS_A4+8),A4
530 LDW .D2T2 *+SP(REGS_B4+8),B4
531 LDW .D2T1 *+SP(REGS_A6+8),A6
532 LDW .D2T2 *+SP(REGS_B6+8),B6
533 LDW .D2T1 *+SP(REGS_A8+8),A8
534#ifdef CONFIG_C6X_BIG_KERNEL
535 || B .S2 B3
536#else
537 || B .S2 system_call_saved_noack
538#endif
539 LDW .D2T2 *+SP(REGS_B8+8),B8
540 NOP 4
541ENDPROC(_nmi_handler)
542
543 ;;
544 ;; Jump to schedule() then return to ret_from_isr
545 ;;
546#ifdef CONFIG_PREEMPT
547resume_kernel:
548 GET_THREAD_INFO A12
549 LDW .D1T1 *+A12(THREAD_INFO_PREEMPT_COUNT),A1
550 NOP 4
551 [A1] BNOP .S2 restore_all,5
552
553preempt_schedule:
554 GET_THREAD_INFO A2
555 LDW .D1T1 *+A2(THREAD_INFO_FLAGS),A1
556#ifdef CONFIG_C6X_BIG_KERNEL
557 MVKL .S2 preempt_schedule_irq,B0
558 MVKH .S2 preempt_schedule_irq,B0
559 NOP 2
560#else
561 NOP 4
562#endif
563 AND .D1 _TIF_NEED_RESCHED,A1,A1
564 [!A1] BNOP .S2 restore_all,5
565#ifdef CONFIG_C6X_BIG_KERNEL
566 B .S2 B0
567#else
568 B .S2 preempt_schedule_irq
569#endif
570 ADDKPC .S2 preempt_schedule,B3,4
571#endif /* CONFIG_PREEMPT */
572
573ENTRY(enable_exception)
574 DINT
575 MVC .S2 TSR,B0
576 MVC .S2 B3,NRP
577 MVK .L2 0xc,B1
578 OR .D2 B0,B1,B0
579 MVC .S2 B0,TSR ; Set GEE and XEN in TSR
580 B .S2 NRP
581 NOP 5
582ENDPROC(enable_exception)
583
584ENTRY(sys_sigaltstack)
585#ifdef CONFIG_C6X_BIG_KERNEL
586 MVKL .S1 do_sigaltstack,A0 ; branch to do_sigaltstack
587 MVKH .S1 do_sigaltstack,A0
588 B .S2X A0
589#else
590 B .S2 do_sigaltstack
591#endif
592 LDW .D2T1 *+SP(REGS_SP+8),A6
593 NOP 4
594ENDPROC(sys_sigaltstack)
595
596 ;; kernel_execve
597ENTRY(kernel_execve)
598 MVK .S2 __NR_execve,B0
599 SWE
600 BNOP .S2 B3,5
601ENDPROC(kernel_execve)
602
603 ;;
604 ;; Special system calls
605 ;; return address is in B3
606 ;;
607ENTRY(sys_clone)
608 ADD .D1X SP,8,A4
609#ifdef CONFIG_C6X_BIG_KERNEL
610 || MVKL .S1 sys_c6x_clone,A0
611 MVKH .S1 sys_c6x_clone,A0
612 BNOP .S2X A0,5
613#else
614 || B .S2 sys_c6x_clone
615 NOP 5
616#endif
617ENDPROC(sys_clone)
618
619ENTRY(sys_rt_sigreturn)
620 ADD .D1X SP,8,A4
621#ifdef CONFIG_C6X_BIG_KERNEL
622 || MVKL .S1 do_rt_sigreturn,A0
623 MVKH .S1 do_rt_sigreturn,A0
624 BNOP .S2X A0,5
625#else
626 || B .S2 do_rt_sigreturn
627 NOP 5
628#endif
629ENDPROC(sys_rt_sigreturn)
630
631ENTRY(sys_execve)
632 ADDAW .D2 SP,2,B6 ; put regs addr in 4th parameter
633 ; & adjust regs stack addr
634 LDW .D2T2 *+SP(REGS_B4+8),B4
635
636 ;; c6x_execve(char *name, char **argv,
637 ;; char **envp, struct pt_regs *regs)
638#ifdef CONFIG_C6X_BIG_KERNEL
639 || MVKL .S1 sys_c6x_execve,A0
640 MVKH .S1 sys_c6x_execve,A0
641 B .S2X A0
642#else
643 || B .S2 sys_c6x_execve
644#endif
645 STW .D2T2 B3,*SP--[2]
646 ADDKPC .S2 ret_from_c6x_execve,B3,3
647
648ret_from_c6x_execve:
649 LDW .D2T2 *++SP[2],B3
650 NOP 4
651 BNOP .S2 B3,5
652ENDPROC(sys_execve)
653
654ENTRY(sys_pread_c6x)
655 MV .D2X A8,B7
656#ifdef CONFIG_C6X_BIG_KERNEL
657 || MVKL .S1 sys_pread64,A0
658 MVKH .S1 sys_pread64,A0
659 BNOP .S2X A0,5
660#else
661 || B .S2 sys_pread64
662 NOP 5
663#endif
664ENDPROC(sys_pread_c6x)
665
666ENTRY(sys_pwrite_c6x)
667 MV .D2X A8,B7
668#ifdef CONFIG_C6X_BIG_KERNEL
669 || MVKL .S1 sys_pwrite64,A0
670 MVKH .S1 sys_pwrite64,A0
671 BNOP .S2X A0,5
672#else
673 || B .S2 sys_pwrite64
674 NOP 5
675#endif
676ENDPROC(sys_pwrite_c6x)
677
678;; On Entry
679;; A4 - path
680;; B4 - offset_lo (LE), offset_hi (BE)
681;; A6 - offset_lo (BE), offset_hi (LE)
682ENTRY(sys_truncate64_c6x)
683#ifdef CONFIG_CPU_BIG_ENDIAN
684 MV .S2 B4,B5
685 MV .D2X A6,B4
686#else
687 MV .D2X A6,B5
688#endif
689#ifdef CONFIG_C6X_BIG_KERNEL
690 || MVKL .S1 sys_truncate64,A0
691 MVKH .S1 sys_truncate64,A0
692 BNOP .S2X A0,5
693#else
694 || B .S2 sys_truncate64
695 NOP 5
696#endif
697ENDPROC(sys_truncate64_c6x)
698
699;; On Entry
700;; A4 - fd
701;; B4 - offset_lo (LE), offset_hi (BE)
702;; A6 - offset_lo (BE), offset_hi (LE)
703ENTRY(sys_ftruncate64_c6x)
704#ifdef CONFIG_CPU_BIG_ENDIAN
705 MV .S2 B4,B5
706 MV .D2X A6,B4
707#else
708 MV .D2X A6,B5
709#endif
710#ifdef CONFIG_C6X_BIG_KERNEL
711 || MVKL .S1 sys_ftruncate64,A0
712 MVKH .S1 sys_ftruncate64,A0
713 BNOP .S2X A0,5
714#else
715 || B .S2 sys_ftruncate64
716 NOP 5
717#endif
718ENDPROC(sys_ftruncate64_c6x)
719
720#ifdef __ARCH_WANT_SYSCALL_OFF_T
721;; On Entry
722;; A4 - fd
723;; B4 - offset_lo (LE), offset_hi (BE)
724;; A6 - offset_lo (BE), offset_hi (LE)
725;; B6 - len
726;; A8 - advice
727ENTRY(sys_fadvise64_c6x)
728#ifdef CONFIG_C6X_BIG_KERNEL
729 MVKL .S1 sys_fadvise64,A0
730 MVKH .S1 sys_fadvise64,A0
731 BNOP .S2X A0,2
732#else
733 B .S2 sys_fadvise64
734 NOP 2
735#endif
736#ifdef CONFIG_CPU_BIG_ENDIAN
737 MV .L2 B4,B5
738 || MV .D2X A6,B4
739#else
740 MV .D2X A6,B5
741#endif
742 MV .D1X B6,A6
743 MV .D2X A8,B6
744#endif
745ENDPROC(sys_fadvise64_c6x)
746
747;; On Entry
748;; A4 - fd
749;; B4 - offset_lo (LE), offset_hi (BE)
750;; A6 - offset_lo (BE), offset_hi (LE)
751;; B6 - len_lo (LE), len_hi (BE)
752;; A8 - len_lo (BE), len_hi (LE)
753;; B8 - advice
754ENTRY(sys_fadvise64_64_c6x)
755#ifdef CONFIG_C6X_BIG_KERNEL
756 MVKL .S1 sys_fadvise64_64,A0
757 MVKH .S1 sys_fadvise64_64,A0
758 BNOP .S2X A0,2
759#else
760 B .S2 sys_fadvise64_64
761 NOP 2
762#endif
763#ifdef CONFIG_CPU_BIG_ENDIAN
764 MV .L2 B4,B5
765 || MV .D2X A6,B4
766 MV .L1 A8,A6
767 || MV .D1X B6,A7
768#else
769 MV .D2X A6,B5
770 MV .L1 A8,A7
771 || MV .D1X B6,A6
772#endif
773 MV .L2 B8,B6
774ENDPROC(sys_fadvise64_64_c6x)
775
776;; On Entry
777;; A4 - fd
778;; B4 - mode
779;; A6 - offset_hi
780;; B6 - offset_lo
781;; A8 - len_hi
782;; B8 - len_lo
783ENTRY(sys_fallocate_c6x)
784#ifdef CONFIG_C6X_BIG_KERNEL
785 MVKL .S1 sys_fallocate,A0
786 MVKH .S1 sys_fallocate,A0
787 BNOP .S2X A0,1
788#else
789 B .S2 sys_fallocate
790 NOP
791#endif
792 MV .D1 A6,A7
793 MV .D1X B6,A6
794 MV .D2X A8,B7
795 MV .D2 B8,B6
796ENDPROC(sys_fallocate_c6x)
797
798 ;; put this in .neardata for faster access when using DSBT mode
799 .section .neardata,"aw",@progbits
800 .global current_ksp
801 .hidden current_ksp
802current_ksp:
803 .word init_thread_union + THREAD_START_SP
diff --git a/arch/c6x/kernel/head.S b/arch/c6x/kernel/head.S
new file mode 100644
index 000000000000..133eab6edf6b
--- /dev/null
+++ b/arch/c6x/kernel/head.S
@@ -0,0 +1,84 @@
1;
2; Port on Texas Instruments TMS320C6x architecture
3;
4; Copyright (C) 2004, 2009, 2010, 2011 Texas Instruments Incorporated
5; Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
6;
7; This program is free software; you can redistribute it and/or modify
8; it under the terms of the GNU General Public License version 2 as
9; published by the Free Software Foundation.
10;
11#include <linux/linkage.h>
12#include <linux/of_fdt.h>
13#include <asm/asm-offsets.h>
14
15 __HEAD
16ENTRY(_c_int00)
17 ;; Save magic and pointer
18 MV .S1 A4,A10
19 MV .S2 B4,B10
20 MVKL .S2 __bss_start,B5
21 MVKH .S2 __bss_start,B5
22 MVKL .S2 __bss_stop,B6
23 MVKH .S2 __bss_stop,B6
24 SUB .L2 B6,B5,B6 ; bss size
25
26 ;; Set the stack pointer
27 MVKL .S2 current_ksp,B0
28 MVKH .S2 current_ksp,B0
29 LDW .D2T2 *B0,B15
30
31 ;; clear bss
32 SHR .S2 B6,3,B0 ; number of dwords to clear
33 ZERO .L2 B13
34 ZERO .L2 B12
35bss_loop:
36 BDEC .S2 bss_loop,B0
37 NOP 3
38 CMPLT .L2 B0,0,B1
39 [!B1] STDW .D2T2 B13:B12,*B5++[1]
40
41 NOP 4
42 AND .D2 ~7,B15,B15
43
44 ;; Clear GIE and PGIE
45 MVC .S2 CSR,B2
46 CLR .S2 B2,0,1,B2
47 MVC .S2 B2,CSR
48 MVC .S2 TSR,B2
49 CLR .S2 B2,0,1,B2
50 MVC .S2 B2,TSR
51 MVC .S2 ITSR,B2
52 CLR .S2 B2,0,1,B2
53 MVC .S2 B2,ITSR
54 MVC .S2 NTSR,B2
55 CLR .S2 B2,0,1,B2
56 MVC .S2 B2,NTSR
57
58 ;; pass DTB pointer to machine_init (or zero if none)
59 MVKL .S1 OF_DT_HEADER,A0
60 MVKH .S1 OF_DT_HEADER,A0
61 CMPEQ .L1 A10,A0,A0
62 [A0] MV .S1X B10,A4
63 [!A0] MVK .S1 0,A4
64
65#ifdef CONFIG_C6X_BIG_KERNEL
66 MVKL .S1 machine_init,A0
67 MVKH .S1 machine_init,A0
68 B .S2X A0
69 ADDKPC .S2 0f,B3,4
700:
71#else
72 CALLP .S2 machine_init,B3
73#endif
74
75 ;; Jump to Linux init
76#ifdef CONFIG_C6X_BIG_KERNEL
77 MVKL .S1 start_kernel,A0
78 MVKH .S1 start_kernel,A0
79 B .S2X A0
80#else
81 B .S2 start_kernel
82#endif
83 NOP 5
84L1: BNOP .S2 L1,5
diff --git a/arch/c6x/kernel/irq.c b/arch/c6x/kernel/irq.c
new file mode 100644
index 000000000000..0929e4b2b244
--- /dev/null
+++ b/arch/c6x/kernel/irq.c
@@ -0,0 +1,728 @@
1/*
2 * Copyright (C) 2011 Texas Instruments Incorporated
3 *
4 * This borrows heavily from powerpc version, which is:
5 *
6 * Derived from arch/i386/kernel/irq.c
7 * Copyright (C) 1992 Linus Torvalds
8 * Adapted from arch/i386 by Gary Thomas
9 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
10 * Updated and modified by Cort Dougan <cort@fsmlabs.com>
11 * Copyright (C) 1996-2001 Cort Dougan
12 * Adapted for Power Macintosh by Paul Mackerras
13 * Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au)
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 */
20#include <linux/slab.h>
21#include <linux/seq_file.h>
22#include <linux/radix-tree.h>
23#include <linux/module.h>
24#include <linux/of.h>
25#include <linux/of_irq.h>
26#include <linux/interrupt.h>
27#include <linux/kernel_stat.h>
28
29#include <asm/megamod-pic.h>
30
31unsigned long irq_err_count;
32
33static DEFINE_RAW_SPINLOCK(core_irq_lock);
34
35static void mask_core_irq(struct irq_data *data)
36{
37 unsigned int prio = data->irq;
38
39 BUG_ON(prio < 4 || prio >= NR_PRIORITY_IRQS);
40
41 raw_spin_lock(&core_irq_lock);
42 and_creg(IER, ~(1 << prio));
43 raw_spin_unlock(&core_irq_lock);
44}
45
46static void unmask_core_irq(struct irq_data *data)
47{
48 unsigned int prio = data->irq;
49
50 raw_spin_lock(&core_irq_lock);
51 or_creg(IER, 1 << prio);
52 raw_spin_unlock(&core_irq_lock);
53}
54
55static struct irq_chip core_chip = {
56 .name = "core",
57 .irq_mask = mask_core_irq,
58 .irq_unmask = unmask_core_irq,
59};
60
61asmlinkage void c6x_do_IRQ(unsigned int prio, struct pt_regs *regs)
62{
63 struct pt_regs *old_regs = set_irq_regs(regs);
64
65 irq_enter();
66
67 BUG_ON(prio < 4 || prio >= NR_PRIORITY_IRQS);
68
69 generic_handle_irq(prio);
70
71 irq_exit();
72
73 set_irq_regs(old_regs);
74}
75
76static struct irq_host *core_host;
77
78static int core_host_map(struct irq_host *h, unsigned int virq,
79 irq_hw_number_t hw)
80{
81 if (hw < 4 || hw >= NR_PRIORITY_IRQS)
82 return -EINVAL;
83
84 irq_set_status_flags(virq, IRQ_LEVEL);
85 irq_set_chip_and_handler(virq, &core_chip, handle_level_irq);
86 return 0;
87}
88
89static struct irq_host_ops core_host_ops = {
90 .map = core_host_map,
91};
92
93void __init init_IRQ(void)
94{
95 struct device_node *np;
96
97 /* Mask all priority IRQs */
98 and_creg(IER, ~0xfff0);
99
100 np = of_find_compatible_node(NULL, NULL, "ti,c64x+core-pic");
101 if (np != NULL) {
102 /* create the core host */
103 core_host = irq_alloc_host(np, IRQ_HOST_MAP_PRIORITY, 0,
104 &core_host_ops, 0);
105 if (core_host)
106 irq_set_default_host(core_host);
107 of_node_put(np);
108 }
109
110 printk(KERN_INFO "Core interrupt controller initialized\n");
111
112 /* now we're ready for other SoC controllers */
113 megamod_pic_init();
114
115 /* Clear all general IRQ flags */
116 set_creg(ICR, 0xfff0);
117}
118
119void ack_bad_irq(int irq)
120{
121 printk(KERN_ERR "IRQ: spurious interrupt %d\n", irq);
122 irq_err_count++;
123}
124
125int arch_show_interrupts(struct seq_file *p, int prec)
126{
127 seq_printf(p, "%*s: %10lu\n", prec, "Err", irq_err_count);
128 return 0;
129}
130
131/*
132 * IRQ controller and virtual interrupts
133 */
134
135/* The main irq map itself is an array of NR_IRQ entries containing the
136 * associate host and irq number. An entry with a host of NULL is free.
137 * An entry can be allocated if it's free, the allocator always then sets
138 * hwirq first to the host's invalid irq number and then fills ops.
139 */
140struct irq_map_entry {
141 irq_hw_number_t hwirq;
142 struct irq_host *host;
143};
144
145static LIST_HEAD(irq_hosts);
146static DEFINE_RAW_SPINLOCK(irq_big_lock);
147static DEFINE_MUTEX(revmap_trees_mutex);
148static struct irq_map_entry irq_map[NR_IRQS];
149static unsigned int irq_virq_count = NR_IRQS;
150static struct irq_host *irq_default_host;
151
152irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
153{
154 return irq_map[d->irq].hwirq;
155}
156EXPORT_SYMBOL_GPL(irqd_to_hwirq);
157
158irq_hw_number_t virq_to_hw(unsigned int virq)
159{
160 return irq_map[virq].hwirq;
161}
162EXPORT_SYMBOL_GPL(virq_to_hw);
163
164bool virq_is_host(unsigned int virq, struct irq_host *host)
165{
166 return irq_map[virq].host == host;
167}
168EXPORT_SYMBOL_GPL(virq_is_host);
169
170static int default_irq_host_match(struct irq_host *h, struct device_node *np)
171{
172 return h->of_node != NULL && h->of_node == np;
173}
174
175struct irq_host *irq_alloc_host(struct device_node *of_node,
176 unsigned int revmap_type,
177 unsigned int revmap_arg,
178 struct irq_host_ops *ops,
179 irq_hw_number_t inval_irq)
180{
181 struct irq_host *host;
182 unsigned int size = sizeof(struct irq_host);
183 unsigned int i;
184 unsigned int *rmap;
185 unsigned long flags;
186
187 /* Allocate structure and revmap table if using linear mapping */
188 if (revmap_type == IRQ_HOST_MAP_LINEAR)
189 size += revmap_arg * sizeof(unsigned int);
190 host = kzalloc(size, GFP_KERNEL);
191 if (host == NULL)
192 return NULL;
193
194 /* Fill structure */
195 host->revmap_type = revmap_type;
196 host->inval_irq = inval_irq;
197 host->ops = ops;
198 host->of_node = of_node_get(of_node);
199
200 if (host->ops->match == NULL)
201 host->ops->match = default_irq_host_match;
202
203 raw_spin_lock_irqsave(&irq_big_lock, flags);
204
205 /* Check for the priority controller. */
206 if (revmap_type == IRQ_HOST_MAP_PRIORITY) {
207 if (irq_map[0].host != NULL) {
208 raw_spin_unlock_irqrestore(&irq_big_lock, flags);
209 of_node_put(host->of_node);
210 kfree(host);
211 return NULL;
212 }
213 irq_map[0].host = host;
214 }
215
216 list_add(&host->link, &irq_hosts);
217 raw_spin_unlock_irqrestore(&irq_big_lock, flags);
218
219 /* Additional setups per revmap type */
220 switch (revmap_type) {
221 case IRQ_HOST_MAP_PRIORITY:
222 /* 0 is always the invalid number for priority */
223 host->inval_irq = 0;
224 /* setup us as the host for all priority interrupts */
225 for (i = 1; i < NR_PRIORITY_IRQS; i++) {
226 irq_map[i].hwirq = i;
227 smp_wmb();
228 irq_map[i].host = host;
229 smp_wmb();
230
231 ops->map(host, i, i);
232 }
233 break;
234 case IRQ_HOST_MAP_LINEAR:
235 rmap = (unsigned int *)(host + 1);
236 for (i = 0; i < revmap_arg; i++)
237 rmap[i] = NO_IRQ;
238 host->revmap_data.linear.size = revmap_arg;
239 smp_wmb();
240 host->revmap_data.linear.revmap = rmap;
241 break;
242 case IRQ_HOST_MAP_TREE:
243 INIT_RADIX_TREE(&host->revmap_data.tree, GFP_KERNEL);
244 break;
245 default:
246 break;
247 }
248
249 pr_debug("irq: Allocated host of type %d @0x%p\n", revmap_type, host);
250
251 return host;
252}
253
254struct irq_host *irq_find_host(struct device_node *node)
255{
256 struct irq_host *h, *found = NULL;
257 unsigned long flags;
258
259 /* We might want to match the legacy controller last since
260 * it might potentially be set to match all interrupts in
261 * the absence of a device node. This isn't a problem so far
262 * yet though...
263 */
264 raw_spin_lock_irqsave(&irq_big_lock, flags);
265 list_for_each_entry(h, &irq_hosts, link)
266 if (h->ops->match(h, node)) {
267 found = h;
268 break;
269 }
270 raw_spin_unlock_irqrestore(&irq_big_lock, flags);
271 return found;
272}
273EXPORT_SYMBOL_GPL(irq_find_host);
274
275void irq_set_default_host(struct irq_host *host)
276{
277 pr_debug("irq: Default host set to @0x%p\n", host);
278
279 irq_default_host = host;
280}
281
282void irq_set_virq_count(unsigned int count)
283{
284 pr_debug("irq: Trying to set virq count to %d\n", count);
285
286 BUG_ON(count < NR_PRIORITY_IRQS);
287 if (count < NR_IRQS)
288 irq_virq_count = count;
289}
290
291static int irq_setup_virq(struct irq_host *host, unsigned int virq,
292 irq_hw_number_t hwirq)
293{
294 int res;
295
296 res = irq_alloc_desc_at(virq, 0);
297 if (res != virq) {
298 pr_debug("irq: -> allocating desc failed\n");
299 goto error;
300 }
301
302 /* map it */
303 smp_wmb();
304 irq_map[virq].hwirq = hwirq;
305 smp_mb();
306
307 if (host->ops->map(host, virq, hwirq)) {
308 pr_debug("irq: -> mapping failed, freeing\n");
309 goto errdesc;
310 }
311
312 irq_clear_status_flags(virq, IRQ_NOREQUEST);
313
314 return 0;
315
316errdesc:
317 irq_free_descs(virq, 1);
318error:
319 irq_free_virt(virq, 1);
320 return -1;
321}
322
323unsigned int irq_create_direct_mapping(struct irq_host *host)
324{
325 unsigned int virq;
326
327 if (host == NULL)
328 host = irq_default_host;
329
330 BUG_ON(host == NULL);
331 WARN_ON(host->revmap_type != IRQ_HOST_MAP_NOMAP);
332
333 virq = irq_alloc_virt(host, 1, 0);
334 if (virq == NO_IRQ) {
335 pr_debug("irq: create_direct virq allocation failed\n");
336 return NO_IRQ;
337 }
338
339 pr_debug("irq: create_direct obtained virq %d\n", virq);
340
341 if (irq_setup_virq(host, virq, virq))
342 return NO_IRQ;
343
344 return virq;
345}
346
347unsigned int irq_create_mapping(struct irq_host *host,
348 irq_hw_number_t hwirq)
349{
350 unsigned int virq, hint;
351
352 pr_debug("irq: irq_create_mapping(0x%p, 0x%lx)\n", host, hwirq);
353
354 /* Look for default host if nececssary */
355 if (host == NULL)
356 host = irq_default_host;
357 if (host == NULL) {
358 printk(KERN_WARNING "irq_create_mapping called for"
359 " NULL host, hwirq=%lx\n", hwirq);
360 WARN_ON(1);
361 return NO_IRQ;
362 }
363 pr_debug("irq: -> using host @%p\n", host);
364
365 /* Check if mapping already exists */
366 virq = irq_find_mapping(host, hwirq);
367 if (virq != NO_IRQ) {
368 pr_debug("irq: -> existing mapping on virq %d\n", virq);
369 return virq;
370 }
371
372 /* Allocate a virtual interrupt number */
373 hint = hwirq % irq_virq_count;
374 virq = irq_alloc_virt(host, 1, hint);
375 if (virq == NO_IRQ) {
376 pr_debug("irq: -> virq allocation failed\n");
377 return NO_IRQ;
378 }
379
380 if (irq_setup_virq(host, virq, hwirq))
381 return NO_IRQ;
382
383 pr_debug("irq: irq %lu on host %s mapped to virtual irq %u\n",
384 hwirq, host->of_node ? host->of_node->full_name : "null", virq);
385
386 return virq;
387}
388EXPORT_SYMBOL_GPL(irq_create_mapping);
389
390unsigned int irq_create_of_mapping(struct device_node *controller,
391 const u32 *intspec, unsigned int intsize)
392{
393 struct irq_host *host;
394 irq_hw_number_t hwirq;
395 unsigned int type = IRQ_TYPE_NONE;
396 unsigned int virq;
397
398 if (controller == NULL)
399 host = irq_default_host;
400 else
401 host = irq_find_host(controller);
402 if (host == NULL) {
403 printk(KERN_WARNING "irq: no irq host found for %s !\n",
404 controller->full_name);
405 return NO_IRQ;
406 }
407
408 /* If host has no translation, then we assume interrupt line */
409 if (host->ops->xlate == NULL)
410 hwirq = intspec[0];
411 else {
412 if (host->ops->xlate(host, controller, intspec, intsize,
413 &hwirq, &type))
414 return NO_IRQ;
415 }
416
417 /* Create mapping */
418 virq = irq_create_mapping(host, hwirq);
419 if (virq == NO_IRQ)
420 return virq;
421
422 /* Set type if specified and different than the current one */
423 if (type != IRQ_TYPE_NONE &&
424 type != (irqd_get_trigger_type(irq_get_irq_data(virq))))
425 irq_set_irq_type(virq, type);
426 return virq;
427}
428EXPORT_SYMBOL_GPL(irq_create_of_mapping);
429
430void irq_dispose_mapping(unsigned int virq)
431{
432 struct irq_host *host;
433 irq_hw_number_t hwirq;
434
435 if (virq == NO_IRQ)
436 return;
437
438 /* Never unmap priority interrupts */
439 if (virq < NR_PRIORITY_IRQS)
440 return;
441
442 host = irq_map[virq].host;
443 if (WARN_ON(host == NULL))
444 return;
445
446 irq_set_status_flags(virq, IRQ_NOREQUEST);
447
448 /* remove chip and handler */
449 irq_set_chip_and_handler(virq, NULL, NULL);
450
451 /* Make sure it's completed */
452 synchronize_irq(virq);
453
454 /* Tell the PIC about it */
455 if (host->ops->unmap)
456 host->ops->unmap(host, virq);
457 smp_mb();
458
459 /* Clear reverse map */
460 hwirq = irq_map[virq].hwirq;
461 switch (host->revmap_type) {
462 case IRQ_HOST_MAP_LINEAR:
463 if (hwirq < host->revmap_data.linear.size)
464 host->revmap_data.linear.revmap[hwirq] = NO_IRQ;
465 break;
466 case IRQ_HOST_MAP_TREE:
467 mutex_lock(&revmap_trees_mutex);
468 radix_tree_delete(&host->revmap_data.tree, hwirq);
469 mutex_unlock(&revmap_trees_mutex);
470 break;
471 }
472
473 /* Destroy map */
474 smp_mb();
475 irq_map[virq].hwirq = host->inval_irq;
476
477 irq_free_descs(virq, 1);
478 /* Free it */
479 irq_free_virt(virq, 1);
480}
481EXPORT_SYMBOL_GPL(irq_dispose_mapping);
482
483unsigned int irq_find_mapping(struct irq_host *host,
484 irq_hw_number_t hwirq)
485{
486 unsigned int i;
487 unsigned int hint = hwirq % irq_virq_count;
488
489 /* Look for default host if nececssary */
490 if (host == NULL)
491 host = irq_default_host;
492 if (host == NULL)
493 return NO_IRQ;
494
495 /* Slow path does a linear search of the map */
496 i = hint;
497 do {
498 if (irq_map[i].host == host &&
499 irq_map[i].hwirq == hwirq)
500 return i;
501 i++;
502 if (i >= irq_virq_count)
503 i = 4;
504 } while (i != hint);
505 return NO_IRQ;
506}
507EXPORT_SYMBOL_GPL(irq_find_mapping);
508
509unsigned int irq_radix_revmap_lookup(struct irq_host *host,
510 irq_hw_number_t hwirq)
511{
512 struct irq_map_entry *ptr;
513 unsigned int virq;
514
515 if (WARN_ON_ONCE(host->revmap_type != IRQ_HOST_MAP_TREE))
516 return irq_find_mapping(host, hwirq);
517
518 /*
519 * The ptr returned references the static global irq_map.
520 * but freeing an irq can delete nodes along the path to
521 * do the lookup via call_rcu.
522 */
523 rcu_read_lock();
524 ptr = radix_tree_lookup(&host->revmap_data.tree, hwirq);
525 rcu_read_unlock();
526
527 /*
528 * If found in radix tree, then fine.
529 * Else fallback to linear lookup - this should not happen in practice
530 * as it means that we failed to insert the node in the radix tree.
531 */
532 if (ptr)
533 virq = ptr - irq_map;
534 else
535 virq = irq_find_mapping(host, hwirq);
536
537 return virq;
538}
539
540void irq_radix_revmap_insert(struct irq_host *host, unsigned int virq,
541 irq_hw_number_t hwirq)
542{
543 if (WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE))
544 return;
545
546 if (virq != NO_IRQ) {
547 mutex_lock(&revmap_trees_mutex);
548 radix_tree_insert(&host->revmap_data.tree, hwirq,
549 &irq_map[virq]);
550 mutex_unlock(&revmap_trees_mutex);
551 }
552}
553
554unsigned int irq_linear_revmap(struct irq_host *host,
555 irq_hw_number_t hwirq)
556{
557 unsigned int *revmap;
558
559 if (WARN_ON_ONCE(host->revmap_type != IRQ_HOST_MAP_LINEAR))
560 return irq_find_mapping(host, hwirq);
561
562 /* Check revmap bounds */
563 if (unlikely(hwirq >= host->revmap_data.linear.size))
564 return irq_find_mapping(host, hwirq);
565
566 /* Check if revmap was allocated */
567 revmap = host->revmap_data.linear.revmap;
568 if (unlikely(revmap == NULL))
569 return irq_find_mapping(host, hwirq);
570
571 /* Fill up revmap with slow path if no mapping found */
572 if (unlikely(revmap[hwirq] == NO_IRQ))
573 revmap[hwirq] = irq_find_mapping(host, hwirq);
574
575 return revmap[hwirq];
576}
577
578unsigned int irq_alloc_virt(struct irq_host *host,
579 unsigned int count,
580 unsigned int hint)
581{
582 unsigned long flags;
583 unsigned int i, j, found = NO_IRQ;
584
585 if (count == 0 || count > (irq_virq_count - NR_PRIORITY_IRQS))
586 return NO_IRQ;
587
588 raw_spin_lock_irqsave(&irq_big_lock, flags);
589
590 /* Use hint for 1 interrupt if any */
591 if (count == 1 && hint >= NR_PRIORITY_IRQS &&
592 hint < irq_virq_count && irq_map[hint].host == NULL) {
593 found = hint;
594 goto hint_found;
595 }
596
597 /* Look for count consecutive numbers in the allocatable
598 * (non-legacy) space
599 */
600 for (i = NR_PRIORITY_IRQS, j = 0; i < irq_virq_count; i++) {
601 if (irq_map[i].host != NULL)
602 j = 0;
603 else
604 j++;
605
606 if (j == count) {
607 found = i - count + 1;
608 break;
609 }
610 }
611 if (found == NO_IRQ) {
612 raw_spin_unlock_irqrestore(&irq_big_lock, flags);
613 return NO_IRQ;
614 }
615 hint_found:
616 for (i = found; i < (found + count); i++) {
617 irq_map[i].hwirq = host->inval_irq;
618 smp_wmb();
619 irq_map[i].host = host;
620 }
621 raw_spin_unlock_irqrestore(&irq_big_lock, flags);
622 return found;
623}
624
625void irq_free_virt(unsigned int virq, unsigned int count)
626{
627 unsigned long flags;
628 unsigned int i;
629
630 WARN_ON(virq < NR_PRIORITY_IRQS);
631 WARN_ON(count == 0 || (virq + count) > irq_virq_count);
632
633 if (virq < NR_PRIORITY_IRQS) {
634 if (virq + count < NR_PRIORITY_IRQS)
635 return;
636 count -= NR_PRIORITY_IRQS - virq;
637 virq = NR_PRIORITY_IRQS;
638 }
639
640 if (count > irq_virq_count || virq > irq_virq_count - count) {
641 if (virq > irq_virq_count)
642 return;
643 count = irq_virq_count - virq;
644 }
645
646 raw_spin_lock_irqsave(&irq_big_lock, flags);
647 for (i = virq; i < (virq + count); i++) {
648 struct irq_host *host;
649
650 host = irq_map[i].host;
651 irq_map[i].hwirq = host->inval_irq;
652 smp_wmb();
653 irq_map[i].host = NULL;
654 }
655 raw_spin_unlock_irqrestore(&irq_big_lock, flags);
656}
657
658#ifdef CONFIG_VIRQ_DEBUG
659static int virq_debug_show(struct seq_file *m, void *private)
660{
661 unsigned long flags;
662 struct irq_desc *desc;
663 const char *p;
664 static const char none[] = "none";
665 void *data;
666 int i;
667
668 seq_printf(m, "%-5s %-7s %-15s %-18s %s\n", "virq", "hwirq",
669 "chip name", "chip data", "host name");
670
671 for (i = 1; i < nr_irqs; i++) {
672 desc = irq_to_desc(i);
673 if (!desc)
674 continue;
675
676 raw_spin_lock_irqsave(&desc->lock, flags);
677
678 if (desc->action && desc->action->handler) {
679 struct irq_chip *chip;
680
681 seq_printf(m, "%5d ", i);
682 seq_printf(m, "0x%05lx ", irq_map[i].hwirq);
683
684 chip = irq_desc_get_chip(desc);
685 if (chip && chip->name)
686 p = chip->name;
687 else
688 p = none;
689 seq_printf(m, "%-15s ", p);
690
691 data = irq_desc_get_chip_data(desc);
692 seq_printf(m, "0x%16p ", data);
693
694 if (irq_map[i].host && irq_map[i].host->of_node)
695 p = irq_map[i].host->of_node->full_name;
696 else
697 p = none;
698 seq_printf(m, "%s\n", p);
699 }
700
701 raw_spin_unlock_irqrestore(&desc->lock, flags);
702 }
703
704 return 0;
705}
706
707static int virq_debug_open(struct inode *inode, struct file *file)
708{
709 return single_open(file, virq_debug_show, inode->i_private);
710}
711
712static const struct file_operations virq_debug_fops = {
713 .open = virq_debug_open,
714 .read = seq_read,
715 .llseek = seq_lseek,
716 .release = single_release,
717};
718
719static int __init irq_debugfs_init(void)
720{
721 if (debugfs_create_file("virq_mapping", S_IRUGO, powerpc_debugfs_root,
722 NULL, &virq_debug_fops) == NULL)
723 return -ENOMEM;
724
725 return 0;
726}
727device_initcall(irq_debugfs_init);
728#endif /* CONFIG_VIRQ_DEBUG */
diff --git a/arch/c6x/kernel/module.c b/arch/c6x/kernel/module.c
new file mode 100644
index 000000000000..5fc03f18f56c
--- /dev/null
+++ b/arch/c6x/kernel/module.c
@@ -0,0 +1,123 @@
1/*
2 * Port on Texas Instruments TMS320C6x architecture
3 *
4 * Copyright (C) 2005, 2009, 2010, 2011 Texas Instruments Incorporated
5 * Author: Thomas Charleux (thomas.charleux@jaluna.com)
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 */
12#include <linux/moduleloader.h>
13#include <linux/elf.h>
14#include <linux/vmalloc.h>
15#include <linux/kernel.h>
16
17static inline int fixup_pcr(u32 *ip, Elf32_Addr dest, u32 maskbits, int shift)
18{
19 u32 opcode;
20 long ep = (long)ip & ~31;
21 long delta = ((long)dest - ep) >> 2;
22 long mask = (1 << maskbits) - 1;
23
24 if ((delta >> (maskbits - 1)) == 0 ||
25 (delta >> (maskbits - 1)) == -1) {
26 opcode = *ip;
27 opcode &= ~(mask << shift);
28 opcode |= ((delta & mask) << shift);
29 *ip = opcode;
30
31 pr_debug("REL PCR_S%d[%p] dest[%p] opcode[%08x]\n",
32 maskbits, ip, (void *)dest, opcode);
33
34 return 0;
35 }
36 pr_err("PCR_S%d reloc %p -> %p out of range!\n",
37 maskbits, ip, (void *)dest);
38
39 return -1;
40}
41
42/*
43 * apply a RELA relocation
44 */
45int apply_relocate_add(Elf32_Shdr *sechdrs,
46 const char *strtab,
47 unsigned int symindex,
48 unsigned int relsec,
49 struct module *me)
50{
51 Elf32_Rela *rel = (void *) sechdrs[relsec].sh_addr;
52 Elf_Sym *sym;
53 u32 *location, opcode;
54 unsigned int i;
55 Elf32_Addr v;
56 Elf_Addr offset = 0;
57
58 pr_debug("Applying relocate section %u to %u with offset 0x%x\n",
59 relsec, sechdrs[relsec].sh_info, offset);
60
61 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
62 /* This is where to make the change */
63 location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
64 + rel[i].r_offset - offset;
65
66 /* This is the symbol it is referring to. Note that all
67 undefined symbols have been resolved. */
68 sym = (Elf_Sym *)sechdrs[symindex].sh_addr
69 + ELF32_R_SYM(rel[i].r_info);
70
71 /* this is the adjustment to be made */
72 v = sym->st_value + rel[i].r_addend;
73
74 switch (ELF32_R_TYPE(rel[i].r_info)) {
75 case R_C6000_ABS32:
76 pr_debug("RELA ABS32: [%p] = 0x%x\n", location, v);
77 *location = v;
78 break;
79 case R_C6000_ABS16:
80 pr_debug("RELA ABS16: [%p] = 0x%x\n", location, v);
81 *(u16 *)location = v;
82 break;
83 case R_C6000_ABS8:
84 pr_debug("RELA ABS8: [%p] = 0x%x\n", location, v);
85 *(u8 *)location = v;
86 break;
87 case R_C6000_ABS_L16:
88 opcode = *location;
89 opcode &= ~0x7fff80;
90 opcode |= ((v & 0xffff) << 7);
91 pr_debug("RELA ABS_L16[%p] v[0x%x] opcode[0x%x]\n",
92 location, v, opcode);
93 *location = opcode;
94 break;
95 case R_C6000_ABS_H16:
96 opcode = *location;
97 opcode &= ~0x7fff80;
98 opcode |= ((v >> 9) & 0x7fff80);
99 pr_debug("RELA ABS_H16[%p] v[0x%x] opcode[0x%x]\n",
100 location, v, opcode);
101 *location = opcode;
102 break;
103 case R_C6000_PCR_S21:
104 if (fixup_pcr(location, v, 21, 7))
105 return -ENOEXEC;
106 break;
107 case R_C6000_PCR_S12:
108 if (fixup_pcr(location, v, 12, 16))
109 return -ENOEXEC;
110 break;
111 case R_C6000_PCR_S10:
112 if (fixup_pcr(location, v, 10, 13))
113 return -ENOEXEC;
114 break;
115 default:
116 pr_err("module %s: Unknown RELA relocation: %u\n",
117 me->name, ELF32_R_TYPE(rel[i].r_info));
118 return -ENOEXEC;
119 }
120 }
121
122 return 0;
123}
diff --git a/arch/c6x/kernel/process.c b/arch/c6x/kernel/process.c
new file mode 100644
index 000000000000..7ca8c41b03cd
--- /dev/null
+++ b/arch/c6x/kernel/process.c
@@ -0,0 +1,265 @@
1/*
2 * Port on Texas Instruments TMS320C6x architecture
3 *
4 * Copyright (C) 2004, 2006, 2009, 2010, 2011 Texas Instruments Incorporated
5 * Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 */
12#include <linux/module.h>
13#include <linux/unistd.h>
14#include <linux/ptrace.h>
15#include <linux/init_task.h>
16#include <linux/tick.h>
17#include <linux/mqueue.h>
18#include <linux/syscalls.h>
19#include <linux/reboot.h>
20
21#include <asm/syscalls.h>
22
23/* hooks for board specific support */
24void (*c6x_restart)(void);
25void (*c6x_halt)(void);
26
27extern asmlinkage void ret_from_fork(void);
28
29static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
30static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
31
32/*
33 * Initial thread structure.
34 */
35union thread_union init_thread_union __init_task_data = {
36 INIT_THREAD_INFO(init_task)
37};
38
39/*
40 * Initial task structure.
41 */
42struct task_struct init_task = INIT_TASK(init_task);
43EXPORT_SYMBOL(init_task);
44
45/*
46 * power off function, if any
47 */
48void (*pm_power_off)(void);
49EXPORT_SYMBOL(pm_power_off);
50
51static void c6x_idle(void)
52{
53 unsigned long tmp;
54
55 /*
56 * Put local_irq_enable and idle in same execute packet
57 * to make them atomic and avoid race to idle with
58 * interrupts enabled.
59 */
60 asm volatile (" mvc .s2 CSR,%0\n"
61 " or .d2 1,%0,%0\n"
62 " mvc .s2 %0,CSR\n"
63 "|| idle\n"
64 : "=b"(tmp));
65}
66
67/*
68 * The idle loop for C64x
69 */
70void cpu_idle(void)
71{
72 /* endless idle loop with no priority at all */
73 while (1) {
74 tick_nohz_idle_enter();
75 rcu_idle_enter();
76 while (1) {
77 local_irq_disable();
78 if (need_resched()) {
79 local_irq_enable();
80 break;
81 }
82 c6x_idle(); /* enables local irqs */
83 }
84 rcu_idle_exit();
85 tick_nohz_idle_exit();
86
87 preempt_enable_no_resched();
88 schedule();
89 preempt_disable();
90 }
91}
92
93static void halt_loop(void)
94{
95 printk(KERN_EMERG "System Halted, OK to turn off power\n");
96 local_irq_disable();
97 while (1)
98 asm volatile("idle\n");
99}
100
101void machine_restart(char *__unused)
102{
103 if (c6x_restart)
104 c6x_restart();
105 halt_loop();
106}
107
108void machine_halt(void)
109{
110 if (c6x_halt)
111 c6x_halt();
112 halt_loop();
113}
114
115void machine_power_off(void)
116{
117 if (pm_power_off)
118 pm_power_off();
119 halt_loop();
120}
121
122static void kernel_thread_helper(int dummy, void *arg, int (*fn)(void *))
123{
124 do_exit(fn(arg));
125}
126
127/*
128 * Create a kernel thread
129 */
130int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
131{
132 struct pt_regs regs;
133
134 /*
135 * copy_thread sets a4 to zero (child return from fork)
136 * so we can't just set things up to directly return to
137 * fn.
138 */
139 memset(&regs, 0, sizeof(regs));
140 regs.b4 = (unsigned long) arg;
141 regs.a6 = (unsigned long) fn;
142 regs.pc = (unsigned long) kernel_thread_helper;
143 local_save_flags(regs.csr);
144 regs.csr |= 1;
145 regs.tsr = 5; /* Set GEE and GIE in TSR */
146
147 /* Ok, create the new process.. */
148 return do_fork(flags | CLONE_VM | CLONE_UNTRACED, -1, &regs,
149 0, NULL, NULL);
150}
151EXPORT_SYMBOL(kernel_thread);
152
153void flush_thread(void)
154{
155}
156
157void exit_thread(void)
158{
159}
160
161SYSCALL_DEFINE1(c6x_clone, struct pt_regs *, regs)
162{
163 unsigned long clone_flags;
164 unsigned long newsp;
165
166 /* syscall puts clone_flags in A4 and usp in B4 */
167 clone_flags = regs->orig_a4;
168 if (regs->b4)
169 newsp = regs->b4;
170 else
171 newsp = regs->sp;
172
173 return do_fork(clone_flags, newsp, regs, 0, (int __user *)regs->a6,
174 (int __user *)regs->b6);
175}
176
177/*
178 * Do necessary setup to start up a newly executed thread.
179 */
180void start_thread(struct pt_regs *regs, unsigned int pc, unsigned long usp)
181{
182 /*
183 * The binfmt loader will setup a "full" stack, but the C6X
184 * operates an "empty" stack. So we adjust the usp so that
185 * argc doesn't get destroyed if an interrupt is taken before
186 * it is read from the stack.
187 *
188 * NB: Library startup code needs to match this.
189 */
190 usp -= 8;
191
192 set_fs(USER_DS);
193 regs->pc = pc;
194 regs->sp = usp;
195 regs->tsr |= 0x40; /* set user mode */
196 current->thread.usp = usp;
197}
198
199/*
200 * Copy a new thread context in its stack.
201 */
202int copy_thread(unsigned long clone_flags, unsigned long usp,
203 unsigned long ustk_size,
204 struct task_struct *p, struct pt_regs *regs)
205{
206 struct pt_regs *childregs;
207
208 childregs = task_pt_regs(p);
209
210 *childregs = *regs;
211 childregs->a4 = 0;
212
213 if (usp == -1)
214 /* case of __kernel_thread: we return to supervisor space */
215 childregs->sp = (unsigned long)(childregs + 1);
216 else
217 /* Otherwise use the given stack */
218 childregs->sp = usp;
219
220 /* Set usp/ksp */
221 p->thread.usp = childregs->sp;
222 /* switch_to uses stack to save/restore 14 callee-saved regs */
223 thread_saved_ksp(p) = (unsigned long)childregs - 8;
224 p->thread.pc = (unsigned int) ret_from_fork;
225 p->thread.wchan = (unsigned long) ret_from_fork;
226#ifdef __DSBT__
227 {
228 unsigned long dp;
229
230 asm volatile ("mv .S2 b14,%0\n" : "=b"(dp));
231
232 thread_saved_dp(p) = dp;
233 if (usp == -1)
234 childregs->dp = dp;
235 }
236#endif
237 return 0;
238}
239
240/*
241 * c6x_execve() executes a new program.
242 */
243SYSCALL_DEFINE4(c6x_execve, const char __user *, name,
244 const char __user *const __user *, argv,
245 const char __user *const __user *, envp,
246 struct pt_regs *, regs)
247{
248 int error;
249 char *filename;
250
251 filename = getname(name);
252 error = PTR_ERR(filename);
253 if (IS_ERR(filename))
254 goto out;
255
256 error = do_execve(filename, argv, envp, regs);
257 putname(filename);
258out:
259 return error;
260}
261
262unsigned long get_wchan(struct task_struct *p)
263{
264 return p->thread.wchan;
265}
diff --git a/arch/c6x/kernel/ptrace.c b/arch/c6x/kernel/ptrace.c
new file mode 100644
index 000000000000..3c494e84444d
--- /dev/null
+++ b/arch/c6x/kernel/ptrace.c
@@ -0,0 +1,187 @@
1/*
2 * Port on Texas Instruments TMS320C6x architecture
3 *
4 * Copyright (C) 2004, 2006, 2009, 2010, 2011 Texas Instruments Incorporated
5 * Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
6 *
7 * Updated for 2.6.34: Mark Salter <msalter@redhat.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13#include <linux/ptrace.h>
14#include <linux/tracehook.h>
15#include <linux/regset.h>
16#include <linux/elf.h>
17
18#include <asm/cacheflush.h>
19
20#define PT_REG_SIZE (sizeof(struct pt_regs))
21
22/*
23 * Called by kernel/ptrace.c when detaching.
24 */
25void ptrace_disable(struct task_struct *child)
26{
27 /* nothing to do */
28}
29
30/*
31 * Get a register number from live pt_regs for the specified task.
32 */
33static inline long get_reg(struct task_struct *task, int regno)
34{
35 long *addr = (long *)task_pt_regs(task);
36
37 if (regno == PT_TSR || regno == PT_CSR)
38 return 0;
39
40 return addr[regno];
41}
42
43/*
44 * Write contents of register REGNO in task TASK.
45 */
46static inline int put_reg(struct task_struct *task,
47 int regno,
48 unsigned long data)
49{
50 unsigned long *addr = (unsigned long *)task_pt_regs(task);
51
52 if (regno != PT_TSR && regno != PT_CSR)
53 addr[regno] = data;
54
55 return 0;
56}
57
58/* regset get/set implementations */
59
60static int gpr_get(struct task_struct *target,
61 const struct user_regset *regset,
62 unsigned int pos, unsigned int count,
63 void *kbuf, void __user *ubuf)
64{
65 struct pt_regs *regs = task_pt_regs(target);
66
67 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
68 regs,
69 0, sizeof(*regs));
70}
71
72static int gpr_set(struct task_struct *target,
73 const struct user_regset *regset,
74 unsigned int pos, unsigned int count,
75 const void *kbuf, const void __user *ubuf)
76{
77 int ret;
78 struct pt_regs *regs = task_pt_regs(target);
79
80 /* Don't copyin TSR or CSR */
81 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
82 &regs,
83 0, PT_TSR * sizeof(long));
84 if (ret)
85 return ret;
86
87 ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
88 PT_TSR * sizeof(long),
89 (PT_TSR + 1) * sizeof(long));
90 if (ret)
91 return ret;
92
93 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
94 &regs,
95 (PT_TSR + 1) * sizeof(long),
96 PT_CSR * sizeof(long));
97 if (ret)
98 return ret;
99
100 ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
101 PT_CSR * sizeof(long),
102 (PT_CSR + 1) * sizeof(long));
103 if (ret)
104 return ret;
105
106 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
107 &regs,
108 (PT_CSR + 1) * sizeof(long), -1);
109 return ret;
110}
111
112enum c6x_regset {
113 REGSET_GPR,
114};
115
116static const struct user_regset c6x_regsets[] = {
117 [REGSET_GPR] = {
118 .core_note_type = NT_PRSTATUS,
119 .n = ELF_NGREG,
120 .size = sizeof(u32),
121 .align = sizeof(u32),
122 .get = gpr_get,
123 .set = gpr_set
124 },
125};
126
127static const struct user_regset_view user_c6x_native_view = {
128 .name = "tic6x",
129 .e_machine = EM_TI_C6000,
130 .regsets = c6x_regsets,
131 .n = ARRAY_SIZE(c6x_regsets),
132};
133
134const struct user_regset_view *task_user_regset_view(struct task_struct *task)
135{
136 return &user_c6x_native_view;
137}
138
139/*
140 * Perform ptrace request
141 */
142long arch_ptrace(struct task_struct *child, long request,
143 unsigned long addr, unsigned long data)
144{
145 int ret = 0;
146
147 switch (request) {
148 /*
149 * write the word at location addr.
150 */
151 case PTRACE_POKETEXT:
152 ret = generic_ptrace_pokedata(child, addr, data);
153 if (ret == 0 && request == PTRACE_POKETEXT)
154 flush_icache_range(addr, addr + 4);
155 break;
156 default:
157 ret = ptrace_request(child, request, addr, data);
158 break;
159 }
160
161 return ret;
162}
163
164/*
165 * handle tracing of system call entry
166 * - return the revised system call number or ULONG_MAX to cause ENOSYS
167 */
168asmlinkage unsigned long syscall_trace_entry(struct pt_regs *regs)
169{
170 if (tracehook_report_syscall_entry(regs))
171 /* tracing decided this syscall should not happen, so
172 * We'll return a bogus call number to get an ENOSYS
173 * error, but leave the original number in
174 * regs->orig_a4
175 */
176 return ULONG_MAX;
177
178 return regs->b0;
179}
180
181/*
182 * handle tracing of system call exit
183 */
184asmlinkage void syscall_trace_exit(struct pt_regs *regs)
185{
186 tracehook_report_syscall_exit(regs, 0);
187}
diff --git a/arch/c6x/kernel/setup.c b/arch/c6x/kernel/setup.c
new file mode 100644
index 000000000000..0c07921747f4
--- /dev/null
+++ b/arch/c6x/kernel/setup.c
@@ -0,0 +1,510 @@
1/*
2 * Port on Texas Instruments TMS320C6x architecture
3 *
4 * Copyright (C) 2004, 2006, 2009, 2010, 2011 Texas Instruments Incorporated
5 * Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/dma-mapping.h>
12#include <linux/memblock.h>
13#include <linux/seq_file.h>
14#include <linux/bootmem.h>
15#include <linux/clkdev.h>
16#include <linux/initrd.h>
17#include <linux/kernel.h>
18#include <linux/module.h>
19#include <linux/of_fdt.h>
20#include <linux/string.h>
21#include <linux/errno.h>
22#include <linux/cache.h>
23#include <linux/delay.h>
24#include <linux/sched.h>
25#include <linux/clk.h>
26#include <linux/cpu.h>
27#include <linux/fs.h>
28#include <linux/of.h>
29
30
31#include <asm/sections.h>
32#include <asm/div64.h>
33#include <asm/setup.h>
34#include <asm/dscr.h>
35#include <asm/clock.h>
36#include <asm/soc.h>
37
38static const char *c6x_soc_name;
39
40int c6x_num_cores;
41EXPORT_SYMBOL_GPL(c6x_num_cores);
42
43unsigned int c6x_silicon_rev;
44EXPORT_SYMBOL_GPL(c6x_silicon_rev);
45
46/*
47 * Device status register. This holds information
48 * about device configuration needed by some drivers.
49 */
50unsigned int c6x_devstat;
51EXPORT_SYMBOL_GPL(c6x_devstat);
52
53/*
54 * Some SoCs have fuse registers holding a unique MAC
55 * address. This is parsed out of the device tree with
56 * the resulting MAC being held here.
57 */
58unsigned char c6x_fuse_mac[6];
59
60unsigned long memory_start;
61unsigned long memory_end;
62
63unsigned long ram_start;
64unsigned long ram_end;
65
66/* Uncached memory for DMA consistent use (memdma=) */
67static unsigned long dma_start __initdata;
68static unsigned long dma_size __initdata;
69
70char c6x_command_line[COMMAND_LINE_SIZE];
71
72#if defined(CONFIG_CMDLINE_BOOL)
73static const char default_command_line[COMMAND_LINE_SIZE] __section(.cmdline) =
74 CONFIG_CMDLINE;
75#endif
76
77struct cpuinfo_c6x {
78 const char *cpu_name;
79 const char *cpu_voltage;
80 const char *mmu;
81 const char *fpu;
82 char *cpu_rev;
83 unsigned int core_id;
84 char __cpu_rev[5];
85};
86
87static DEFINE_PER_CPU(struct cpuinfo_c6x, cpu_data);
88
89unsigned int ticks_per_ns_scaled;
90EXPORT_SYMBOL(ticks_per_ns_scaled);
91
92unsigned int c6x_core_freq;
93
94static void __init get_cpuinfo(void)
95{
96 unsigned cpu_id, rev_id, csr;
97 struct clk *coreclk = clk_get_sys(NULL, "core");
98 unsigned long core_khz;
99 u64 tmp;
100 struct cpuinfo_c6x *p;
101 struct device_node *node, *np;
102
103 p = &per_cpu(cpu_data, smp_processor_id());
104
105 if (!IS_ERR(coreclk))
106 c6x_core_freq = clk_get_rate(coreclk);
107 else {
108 printk(KERN_WARNING
109 "Cannot find core clock frequency. Using 700MHz\n");
110 c6x_core_freq = 700000000;
111 }
112
113 core_khz = c6x_core_freq / 1000;
114
115 tmp = (uint64_t)core_khz << C6X_NDELAY_SCALE;
116 do_div(tmp, 1000000);
117 ticks_per_ns_scaled = tmp;
118
119 csr = get_creg(CSR);
120 cpu_id = csr >> 24;
121 rev_id = (csr >> 16) & 0xff;
122
123 p->mmu = "none";
124 p->fpu = "none";
125 p->cpu_voltage = "unknown";
126
127 switch (cpu_id) {
128 case 0:
129 p->cpu_name = "C67x";
130 p->fpu = "yes";
131 break;
132 case 2:
133 p->cpu_name = "C62x";
134 break;
135 case 8:
136 p->cpu_name = "C64x";
137 break;
138 case 12:
139 p->cpu_name = "C64x";
140 break;
141 case 16:
142 p->cpu_name = "C64x+";
143 p->cpu_voltage = "1.2";
144 break;
145 default:
146 p->cpu_name = "unknown";
147 break;
148 }
149
150 if (cpu_id < 16) {
151 switch (rev_id) {
152 case 0x1:
153 if (cpu_id > 8) {
154 p->cpu_rev = "DM640/DM641/DM642/DM643";
155 p->cpu_voltage = "1.2 - 1.4";
156 } else {
157 p->cpu_rev = "C6201";
158 p->cpu_voltage = "2.5";
159 }
160 break;
161 case 0x2:
162 p->cpu_rev = "C6201B/C6202/C6211";
163 p->cpu_voltage = "1.8";
164 break;
165 case 0x3:
166 p->cpu_rev = "C6202B/C6203/C6204/C6205";
167 p->cpu_voltage = "1.5";
168 break;
169 case 0x201:
170 p->cpu_rev = "C6701 revision 0 (early CPU)";
171 p->cpu_voltage = "1.8";
172 break;
173 case 0x202:
174 p->cpu_rev = "C6701/C6711/C6712";
175 p->cpu_voltage = "1.8";
176 break;
177 case 0x801:
178 p->cpu_rev = "C64x";
179 p->cpu_voltage = "1.5";
180 break;
181 default:
182 p->cpu_rev = "unknown";
183 }
184 } else {
185 p->cpu_rev = p->__cpu_rev;
186 snprintf(p->__cpu_rev, sizeof(p->__cpu_rev), "0x%x", cpu_id);
187 }
188
189 p->core_id = get_coreid();
190
191 node = of_find_node_by_name(NULL, "cpus");
192 if (node) {
193 for_each_child_of_node(node, np)
194 if (!strcmp("cpu", np->name))
195 ++c6x_num_cores;
196 of_node_put(node);
197 }
198
199 node = of_find_node_by_name(NULL, "soc");
200 if (node) {
201 if (of_property_read_string(node, "model", &c6x_soc_name))
202 c6x_soc_name = "unknown";
203 of_node_put(node);
204 } else
205 c6x_soc_name = "unknown";
206
207 printk(KERN_INFO "CPU%d: %s rev %s, %s volts, %uMHz\n",
208 p->core_id, p->cpu_name, p->cpu_rev,
209 p->cpu_voltage, c6x_core_freq / 1000000);
210}
211
212/*
213 * Early parsing of the command line
214 */
215static u32 mem_size __initdata;
216
217/* "mem=" parsing. */
218static int __init early_mem(char *p)
219{
220 if (!p)
221 return -EINVAL;
222
223 mem_size = memparse(p, &p);
224 /* don't remove all of memory when handling "mem={invalid}" */
225 if (mem_size == 0)
226 return -EINVAL;
227
228 return 0;
229}
230early_param("mem", early_mem);
231
232/* "memdma=<size>[@<address>]" parsing. */
233static int __init early_memdma(char *p)
234{
235 if (!p)
236 return -EINVAL;
237
238 dma_size = memparse(p, &p);
239 if (*p == '@')
240 dma_start = memparse(p, &p);
241
242 return 0;
243}
244early_param("memdma", early_memdma);
245
246int __init c6x_add_memory(phys_addr_t start, unsigned long size)
247{
248 static int ram_found __initdata;
249
250 /* We only handle one bank (the one with PAGE_OFFSET) for now */
251 if (ram_found)
252 return -EINVAL;
253
254 if (start > PAGE_OFFSET || PAGE_OFFSET >= (start + size))
255 return 0;
256
257 ram_start = start;
258 ram_end = start + size;
259
260 ram_found = 1;
261 return 0;
262}
263
264/*
265 * Do early machine setup and device tree parsing. This is called very
266 * early on the boot process.
267 */
268notrace void __init machine_init(unsigned long dt_ptr)
269{
270 struct boot_param_header *dtb = __va(dt_ptr);
271 struct boot_param_header *fdt = (struct boot_param_header *)_fdt_start;
272
273 /* interrupts must be masked */
274 set_creg(IER, 2);
275
276 /*
277 * Set the Interrupt Service Table (IST) to the beginning of the
278 * vector table.
279 */
280 set_ist(_vectors_start);
281
282 lockdep_init();
283
284 /*
285 * dtb is passed in from bootloader.
286 * fdt is linked in blob.
287 */
288 if (dtb && dtb != fdt)
289 fdt = dtb;
290
291 /* Do some early initialization based on the flat device tree */
292 early_init_devtree(fdt);
293
294 /* parse_early_param needs a boot_command_line */
295 strlcpy(boot_command_line, c6x_command_line, COMMAND_LINE_SIZE);
296 parse_early_param();
297}
298
299void __init setup_arch(char **cmdline_p)
300{
301 int bootmap_size;
302 struct memblock_region *reg;
303
304 printk(KERN_INFO "Initializing kernel\n");
305
306 /* Initialize command line */
307 *cmdline_p = c6x_command_line;
308
309 memory_end = ram_end;
310 memory_end &= ~(PAGE_SIZE - 1);
311
312 if (mem_size && (PAGE_OFFSET + PAGE_ALIGN(mem_size)) < memory_end)
313 memory_end = PAGE_OFFSET + PAGE_ALIGN(mem_size);
314
315 /* add block that this kernel can use */
316 memblock_add(PAGE_OFFSET, memory_end - PAGE_OFFSET);
317
318 /* reserve kernel text/data/bss */
319 memblock_reserve(PAGE_OFFSET,
320 PAGE_ALIGN((unsigned long)&_end - PAGE_OFFSET));
321
322 if (dma_size) {
323 /* align to cacheability granularity */
324 dma_size = CACHE_REGION_END(dma_size);
325
326 if (!dma_start)
327 dma_start = memory_end - dma_size;
328
329 /* align to cacheability granularity */
330 dma_start = CACHE_REGION_START(dma_start);
331
332 /* reserve DMA memory taken from kernel memory */
333 if (memblock_is_region_memory(dma_start, dma_size))
334 memblock_reserve(dma_start, dma_size);
335 }
336
337 memory_start = PAGE_ALIGN((unsigned int) &_end);
338
339 printk(KERN_INFO "Memory Start=%08lx, Memory End=%08lx\n",
340 memory_start, memory_end);
341
342#ifdef CONFIG_BLK_DEV_INITRD
343 /*
344 * Reserve initrd memory if in kernel memory.
345 */
346 if (initrd_start < initrd_end)
347 if (memblock_is_region_memory(initrd_start,
348 initrd_end - initrd_start))
349 memblock_reserve(initrd_start,
350 initrd_end - initrd_start);
351#endif
352
353 init_mm.start_code = (unsigned long) &_stext;
354 init_mm.end_code = (unsigned long) &_etext;
355 init_mm.end_data = memory_start;
356 init_mm.brk = memory_start;
357
358 /*
359 * Give all the memory to the bootmap allocator, tell it to put the
360 * boot mem_map at the start of memory
361 */
362 bootmap_size = init_bootmem_node(NODE_DATA(0),
363 memory_start >> PAGE_SHIFT,
364 PAGE_OFFSET >> PAGE_SHIFT,
365 memory_end >> PAGE_SHIFT);
366 memblock_reserve(memory_start, bootmap_size);
367
368 unflatten_device_tree();
369
370 c6x_cache_init();
371
372 /* Set the whole external memory as non-cacheable */
373 disable_caching(ram_start, ram_end - 1);
374
375 /* Set caching of external RAM used by Linux */
376 for_each_memblock(memory, reg)
377 enable_caching(CACHE_REGION_START(reg->base),
378 CACHE_REGION_START(reg->base + reg->size - 1));
379
380#ifdef CONFIG_BLK_DEV_INITRD
381 /*
382 * Enable caching for initrd which falls outside kernel memory.
383 */
384 if (initrd_start < initrd_end) {
385 if (!memblock_is_region_memory(initrd_start,
386 initrd_end - initrd_start))
387 enable_caching(CACHE_REGION_START(initrd_start),
388 CACHE_REGION_START(initrd_end - 1));
389 }
390#endif
391
392 /*
393 * Disable caching for dma coherent memory taken from kernel memory.
394 */
395 if (dma_size && memblock_is_region_memory(dma_start, dma_size))
396 disable_caching(dma_start,
397 CACHE_REGION_START(dma_start + dma_size - 1));
398
399 /* Initialize the coherent memory allocator */
400 coherent_mem_init(dma_start, dma_size);
401
402 /*
403 * Free all memory as a starting point.
404 */
405 free_bootmem(PAGE_OFFSET, memory_end - PAGE_OFFSET);
406
407 /*
408 * Then reserve memory which is already being used.
409 */
410 for_each_memblock(reserved, reg) {
411 pr_debug("reserved - 0x%08x-0x%08x\n",
412 (u32) reg->base, (u32) reg->size);
413 reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT);
414 }
415
416 max_low_pfn = PFN_DOWN(memory_end);
417 min_low_pfn = PFN_UP(memory_start);
418 max_mapnr = max_low_pfn - min_low_pfn;
419
420 /* Get kmalloc into gear */
421 paging_init();
422
423 /*
424 * Probe for Device State Configuration Registers.
425 * We have to do this early in case timer needs to be enabled
426 * through DSCR.
427 */
428 dscr_probe();
429
430 /* We do this early for timer and core clock frequency */
431 c64x_setup_clocks();
432
433 /* Get CPU info */
434 get_cpuinfo();
435
436#if defined(CONFIG_VT) && defined(CONFIG_DUMMY_CONSOLE)
437 conswitchp = &dummy_con;
438#endif
439}
440
441#define cpu_to_ptr(n) ((void *)((long)(n)+1))
442#define ptr_to_cpu(p) ((long)(p) - 1)
443
444static int show_cpuinfo(struct seq_file *m, void *v)
445{
446 int n = ptr_to_cpu(v);
447 struct cpuinfo_c6x *p = &per_cpu(cpu_data, n);
448
449 if (n == 0) {
450 seq_printf(m,
451 "soc\t\t: %s\n"
452 "soc revision\t: 0x%x\n"
453 "soc cores\t: %d\n",
454 c6x_soc_name, c6x_silicon_rev, c6x_num_cores);
455 }
456
457 seq_printf(m,
458 "\n"
459 "processor\t: %d\n"
460 "cpu\t\t: %s\n"
461 "core revision\t: %s\n"
462 "core voltage\t: %s\n"
463 "core id\t\t: %d\n"
464 "mmu\t\t: %s\n"
465 "fpu\t\t: %s\n"
466 "cpu MHz\t\t: %u\n"
467 "bogomips\t: %lu.%02lu\n\n",
468 n,
469 p->cpu_name, p->cpu_rev, p->cpu_voltage,
470 p->core_id, p->mmu, p->fpu,
471 (c6x_core_freq + 500000) / 1000000,
472 (loops_per_jiffy/(500000/HZ)),
473 (loops_per_jiffy/(5000/HZ))%100);
474
475 return 0;
476}
477
478static void *c_start(struct seq_file *m, loff_t *pos)
479{
480 return *pos < nr_cpu_ids ? cpu_to_ptr(*pos) : NULL;
481}
482static void *c_next(struct seq_file *m, void *v, loff_t *pos)
483{
484 ++*pos;
485 return NULL;
486}
487static void c_stop(struct seq_file *m, void *v)
488{
489}
490
491const struct seq_operations cpuinfo_op = {
492 c_start,
493 c_stop,
494 c_next,
495 show_cpuinfo
496};
497
498static struct cpu cpu_devices[NR_CPUS];
499
500static int __init topology_init(void)
501{
502 int i;
503
504 for_each_present_cpu(i)
505 register_cpu(&cpu_devices[i], i);
506
507 return 0;
508}
509
510subsys_initcall(topology_init);
diff --git a/arch/c6x/kernel/signal.c b/arch/c6x/kernel/signal.c
new file mode 100644
index 000000000000..304f675826e9
--- /dev/null
+++ b/arch/c6x/kernel/signal.c
@@ -0,0 +1,377 @@
1/*
2 * Port on Texas Instruments TMS320C6x architecture
3 *
4 * Copyright (C) 2004, 2006, 2009, 2010, 2011 Texas Instruments Incorporated
5 * Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
6 *
7 * Updated for 2.6.34: Mark Salter <msalter@redhat.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#include <linux/kernel.h>
15#include <linux/uaccess.h>
16#include <linux/syscalls.h>
17#include <linux/tracehook.h>
18
19#include <asm/ucontext.h>
20#include <asm/cacheflush.h>
21
22
23#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
24
25/*
26 * Do a signal return, undo the signal stack.
27 */
28
29#define RETCODE_SIZE (9 << 2) /* 9 instructions = 36 bytes */
30
31struct rt_sigframe {
32 struct siginfo __user *pinfo;
33 void __user *puc;
34 struct siginfo info;
35 struct ucontext uc;
36 unsigned long retcode[RETCODE_SIZE >> 2];
37};
38
39static int restore_sigcontext(struct pt_regs *regs,
40 struct sigcontext __user *sc)
41{
42 int err = 0;
43
44 /* The access_ok check was done by caller, so use __get_user here */
45#define COPY(x) (err |= __get_user(regs->x, &sc->sc_##x))
46
47 COPY(sp); COPY(a4); COPY(b4); COPY(a6); COPY(b6); COPY(a8); COPY(b8);
48 COPY(a0); COPY(a1); COPY(a2); COPY(a3); COPY(a5); COPY(a7); COPY(a9);
49 COPY(b0); COPY(b1); COPY(b2); COPY(b3); COPY(b5); COPY(b7); COPY(b9);
50
51 COPY(a16); COPY(a17); COPY(a18); COPY(a19);
52 COPY(a20); COPY(a21); COPY(a22); COPY(a23);
53 COPY(a24); COPY(a25); COPY(a26); COPY(a27);
54 COPY(a28); COPY(a29); COPY(a30); COPY(a31);
55 COPY(b16); COPY(b17); COPY(b18); COPY(b19);
56 COPY(b20); COPY(b21); COPY(b22); COPY(b23);
57 COPY(b24); COPY(b25); COPY(b26); COPY(b27);
58 COPY(b28); COPY(b29); COPY(b30); COPY(b31);
59
60 COPY(csr); COPY(pc);
61
62#undef COPY
63
64 return err;
65}
66
67asmlinkage int do_rt_sigreturn(struct pt_regs *regs)
68{
69 struct rt_sigframe __user *frame;
70 sigset_t set;
71
72 /*
73 * Since we stacked the signal on a dword boundary,
74 * 'sp' should be dword aligned here. If it's
75 * not, then the user is trying to mess with us.
76 */
77 if (regs->sp & 7)
78 goto badframe;
79
80 frame = (struct rt_sigframe __user *) ((unsigned long) regs->sp + 8);
81
82 if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
83 goto badframe;
84 if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
85 goto badframe;
86
87 sigdelsetmask(&set, ~_BLOCKABLE);
88 spin_lock_irq(&current->sighand->siglock);
89 current->blocked = set;
90 recalc_sigpending();
91 spin_unlock_irq(&current->sighand->siglock);
92
93 if (restore_sigcontext(regs, &frame->uc.uc_mcontext))
94 goto badframe;
95
96 return regs->a4;
97
98badframe:
99 force_sig(SIGSEGV, current);
100 return 0;
101}
102
103static int setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
104 unsigned long mask)
105{
106 int err = 0;
107
108 err |= __put_user(mask, &sc->sc_mask);
109
110 /* The access_ok check was done by caller, so use __put_user here */
111#define COPY(x) (err |= __put_user(regs->x, &sc->sc_##x))
112
113 COPY(sp); COPY(a4); COPY(b4); COPY(a6); COPY(b6); COPY(a8); COPY(b8);
114 COPY(a0); COPY(a1); COPY(a2); COPY(a3); COPY(a5); COPY(a7); COPY(a9);
115 COPY(b0); COPY(b1); COPY(b2); COPY(b3); COPY(b5); COPY(b7); COPY(b9);
116
117 COPY(a16); COPY(a17); COPY(a18); COPY(a19);
118 COPY(a20); COPY(a21); COPY(a22); COPY(a23);
119 COPY(a24); COPY(a25); COPY(a26); COPY(a27);
120 COPY(a28); COPY(a29); COPY(a30); COPY(a31);
121 COPY(b16); COPY(b17); COPY(b18); COPY(b19);
122 COPY(b20); COPY(b21); COPY(b22); COPY(b23);
123 COPY(b24); COPY(b25); COPY(b26); COPY(b27);
124 COPY(b28); COPY(b29); COPY(b30); COPY(b31);
125
126 COPY(csr); COPY(pc);
127
128#undef COPY
129
130 return err;
131}
132
133static inline void __user *get_sigframe(struct k_sigaction *ka,
134 struct pt_regs *regs,
135 unsigned long framesize)
136{
137 unsigned long sp = regs->sp;
138
139 /*
140 * This is the X/Open sanctioned signal stack switching.
141 */
142 if ((ka->sa.sa_flags & SA_ONSTACK) && sas_ss_flags(sp) == 0)
143 sp = current->sas_ss_sp + current->sas_ss_size;
144
145 /*
146 * No matter what happens, 'sp' must be dword
147 * aligned. Otherwise, nasty things will happen
148 */
149 return (void __user *)((sp - framesize) & ~7);
150}
151
152static int setup_rt_frame(int signr, struct k_sigaction *ka, siginfo_t *info,
153 sigset_t *set, struct pt_regs *regs)
154{
155 struct rt_sigframe __user *frame;
156 unsigned long __user *retcode;
157 int err = 0;
158
159 frame = get_sigframe(ka, regs, sizeof(*frame));
160
161 if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
162 goto segv_and_exit;
163
164 err |= __put_user(&frame->info, &frame->pinfo);
165 err |= __put_user(&frame->uc, &frame->puc);
166 err |= copy_siginfo_to_user(&frame->info, info);
167
168 /* Clear all the bits of the ucontext we don't use. */
169 err |= __clear_user(&frame->uc, offsetof(struct ucontext, uc_mcontext));
170
171 err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, set->sig[0]);
172 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
173
174 /* Set up to return from userspace */
175 retcode = (unsigned long __user *) &frame->retcode;
176
177 /* The access_ok check was done above, so use __put_user here */
178#define COPY(x) (err |= __put_user(x, retcode++))
179
180 COPY(0x0000002AUL | (__NR_rt_sigreturn << 7));
181 /* MVK __NR_rt_sigreturn,B0 */
182 COPY(0x10000000UL); /* SWE */
183 COPY(0x00006000UL); /* NOP 4 */
184 COPY(0x00006000UL); /* NOP 4 */
185 COPY(0x00006000UL); /* NOP 4 */
186 COPY(0x00006000UL); /* NOP 4 */
187 COPY(0x00006000UL); /* NOP 4 */
188 COPY(0x00006000UL); /* NOP 4 */
189 COPY(0x00006000UL); /* NOP 4 */
190
191#undef COPY
192
193 if (err)
194 goto segv_and_exit;
195
196 flush_icache_range((unsigned long) &frame->retcode,
197 (unsigned long) &frame->retcode + RETCODE_SIZE);
198
199 retcode = (unsigned long __user *) &frame->retcode;
200
201 /* Change user context to branch to signal handler */
202 regs->sp = (unsigned long) frame - 8;
203 regs->b3 = (unsigned long) retcode;
204 regs->pc = (unsigned long) ka->sa.sa_handler;
205
206 /* Give the signal number to the handler */
207 regs->a4 = signr;
208
209 /*
210 * For realtime signals we must also set the second and third
211 * arguments for the signal handler.
212 * -- Peter Maydell <pmaydell@chiark.greenend.org.uk> 2000-12-06
213 */
214 regs->b4 = (unsigned long)&frame->info;
215 regs->a6 = (unsigned long)&frame->uc;
216
217 return 0;
218
219segv_and_exit:
220 force_sigsegv(signr, current);
221 return -EFAULT;
222}
223
224static inline void
225handle_restart(struct pt_regs *regs, struct k_sigaction *ka, int has_handler)
226{
227 switch (regs->a4) {
228 case -ERESTARTNOHAND:
229 if (!has_handler)
230 goto do_restart;
231 regs->a4 = -EINTR;
232 break;
233
234 case -ERESTARTSYS:
235 if (has_handler && !(ka->sa.sa_flags & SA_RESTART)) {
236 regs->a4 = -EINTR;
237 break;
238 }
239 /* fallthrough */
240 case -ERESTARTNOINTR:
241do_restart:
242 regs->a4 = regs->orig_a4;
243 regs->pc -= 4;
244 break;
245 }
246}
247
248/*
249 * handle the actual delivery of a signal to userspace
250 */
251static int handle_signal(int sig,
252 siginfo_t *info, struct k_sigaction *ka,
253 sigset_t *oldset, struct pt_regs *regs,
254 int syscall)
255{
256 int ret;
257
258 /* Are we from a system call? */
259 if (syscall) {
260 /* If so, check system call restarting.. */
261 switch (regs->a4) {
262 case -ERESTART_RESTARTBLOCK:
263 case -ERESTARTNOHAND:
264 regs->a4 = -EINTR;
265 break;
266
267 case -ERESTARTSYS:
268 if (!(ka->sa.sa_flags & SA_RESTART)) {
269 regs->a4 = -EINTR;
270 break;
271 }
272
273 /* fallthrough */
274 case -ERESTARTNOINTR:
275 regs->a4 = regs->orig_a4;
276 regs->pc -= 4;
277 }
278 }
279
280 /* Set up the stack frame */
281 ret = setup_rt_frame(sig, ka, info, oldset, regs);
282 if (ret == 0) {
283 spin_lock_irq(&current->sighand->siglock);
284 sigorsets(&current->blocked, &current->blocked,
285 &ka->sa.sa_mask);
286 if (!(ka->sa.sa_flags & SA_NODEFER))
287 sigaddset(&current->blocked, sig);
288 recalc_sigpending();
289 spin_unlock_irq(&current->sighand->siglock);
290 }
291
292 return ret;
293}
294
295/*
296 * handle a potential signal
297 */
298static void do_signal(struct pt_regs *regs, int syscall)
299{
300 struct k_sigaction ka;
301 siginfo_t info;
302 sigset_t *oldset;
303 int signr;
304
305 /* we want the common case to go fast, which is why we may in certain
306 * cases get here from kernel mode */
307 if (!user_mode(regs))
308 return;
309
310 if (test_thread_flag(TIF_RESTORE_SIGMASK))
311 oldset = &current->saved_sigmask;
312 else
313 oldset = &current->blocked;
314
315 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
316 if (signr > 0) {
317 if (handle_signal(signr, &info, &ka, oldset,
318 regs, syscall) == 0) {
319 /* a signal was successfully delivered; the saved
320 * sigmask will have been stored in the signal frame,
321 * and will be restored by sigreturn, so we can simply
322 * clear the TIF_RESTORE_SIGMASK flag */
323 if (test_thread_flag(TIF_RESTORE_SIGMASK))
324 clear_thread_flag(TIF_RESTORE_SIGMASK);
325
326 tracehook_signal_handler(signr, &info, &ka, regs, 0);
327 }
328
329 return;
330 }
331
332 /* did we come from a system call? */
333 if (syscall) {
334 /* restart the system call - no handlers present */
335 switch (regs->a4) {
336 case -ERESTARTNOHAND:
337 case -ERESTARTSYS:
338 case -ERESTARTNOINTR:
339 regs->a4 = regs->orig_a4;
340 regs->pc -= 4;
341 break;
342
343 case -ERESTART_RESTARTBLOCK:
344 regs->a4 = regs->orig_a4;
345 regs->b0 = __NR_restart_syscall;
346 regs->pc -= 4;
347 break;
348 }
349 }
350
351 /* if there's no signal to deliver, we just put the saved sigmask
352 * back */
353 if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
354 clear_thread_flag(TIF_RESTORE_SIGMASK);
355 sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
356 }
357}
358
359/*
360 * notification of userspace execution resumption
361 * - triggered by current->work.notify_resume
362 */
363asmlinkage void do_notify_resume(struct pt_regs *regs, u32 thread_info_flags,
364 int syscall)
365{
366 /* deal with pending signal delivery */
367 if (thread_info_flags & ((1 << TIF_SIGPENDING) |
368 (1 << TIF_RESTORE_SIGMASK)))
369 do_signal(regs, syscall);
370
371 if (thread_info_flags & (1 << TIF_NOTIFY_RESUME)) {
372 clear_thread_flag(TIF_NOTIFY_RESUME);
373 tracehook_notify_resume(regs);
374 if (current->replacement_session_keyring)
375 key_replace_session_keyring();
376 }
377}
diff --git a/arch/c6x/kernel/soc.c b/arch/c6x/kernel/soc.c
new file mode 100644
index 000000000000..dd45bc39af0e
--- /dev/null
+++ b/arch/c6x/kernel/soc.c
@@ -0,0 +1,91 @@
1/*
2 * Miscellaneous SoC-specific hooks.
3 *
4 * Copyright (C) 2011 Texas Instruments Incorporated
5 * Author: Mark Salter <msalter@redhat.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/module.h>
12#include <linux/ctype.h>
13#include <linux/etherdevice.h>
14#include <asm/system.h>
15#include <asm/setup.h>
16#include <asm/soc.h>
17
18struct soc_ops soc_ops;
19
20int soc_get_exception(void)
21{
22 if (!soc_ops.get_exception)
23 return -1;
24 return soc_ops.get_exception();
25}
26
27void soc_assert_event(unsigned int evt)
28{
29 if (soc_ops.assert_event)
30 soc_ops.assert_event(evt);
31}
32
33static u8 cmdline_mac[6];
34
35static int __init get_mac_addr_from_cmdline(char *str)
36{
37 int count, i, val;
38
39 for (count = 0; count < 6 && *str; count++, str += 3) {
40 if (!isxdigit(str[0]) || !isxdigit(str[1]))
41 return 0;
42 if (str[2] != ((count < 5) ? ':' : '\0'))
43 return 0;
44
45 for (i = 0, val = 0; i < 2; i++) {
46 val = val << 4;
47 val |= isdigit(str[i]) ?
48 str[i] - '0' : toupper(str[i]) - 'A' + 10;
49 }
50 cmdline_mac[count] = val;
51 }
52 return 1;
53}
54__setup("emac_addr=", get_mac_addr_from_cmdline);
55
56/*
57 * Setup the MAC address for SoC ethernet devices.
58 *
59 * Before calling this function, the ethernet driver will have
60 * initialized the addr with local-mac-address from the device
61 * tree (if found). Allow command line to override, but not
62 * the fused address.
63 */
64int soc_mac_addr(unsigned int index, u8 *addr)
65{
66 int i, have_dt_mac = 0, have_cmdline_mac = 0, have_fuse_mac = 0;
67
68 for (i = 0; i < 6; i++) {
69 if (cmdline_mac[i])
70 have_cmdline_mac = 1;
71 if (c6x_fuse_mac[i])
72 have_fuse_mac = 1;
73 if (addr[i])
74 have_dt_mac = 1;
75 }
76
77 /* cmdline overrides all */
78 if (have_cmdline_mac)
79 memcpy(addr, cmdline_mac, 6);
80 else if (!have_dt_mac) {
81 if (have_fuse_mac)
82 memcpy(addr, c6x_fuse_mac, 6);
83 else
84 random_ether_addr(addr);
85 }
86
87 /* adjust for specific EMAC device */
88 addr[5] += index * c6x_num_cores;
89 return 1;
90}
91EXPORT_SYMBOL_GPL(soc_mac_addr);
diff --git a/arch/c6x/kernel/switch_to.S b/arch/c6x/kernel/switch_to.S
new file mode 100644
index 000000000000..09177ed0fa5c
--- /dev/null
+++ b/arch/c6x/kernel/switch_to.S
@@ -0,0 +1,74 @@
1/*
2 * Copyright (C) 2011 Texas Instruments Incorporated
3 * Author: Mark Salter (msalter@redhat.com)
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10#include <linux/linkage.h>
11#include <asm/asm-offsets.h>
12
13#define SP B15
14
15 /*
16 * void __switch_to(struct thread_info *prev,
17 * struct thread_info *next,
18 * struct task_struct *tsk) ;
19 */
20ENTRY(__switch_to)
21 LDDW .D2T2 *+B4(THREAD_B15_14),B7:B6
22 || MV .L2X A4,B5 ; prev
23 || MV .L1X B4,A5 ; next
24 || MVC .S2 RILC,B1
25
26 STW .D2T2 B3,*+B5(THREAD_PC)
27 || STDW .D1T1 A13:A12,*+A4(THREAD_A13_12)
28 || MVC .S2 ILC,B0
29
30 LDW .D2T2 *+B4(THREAD_PC),B3
31 || LDDW .D1T1 *+A5(THREAD_A13_12),A13:A12
32
33 STDW .D1T1 A11:A10,*+A4(THREAD_A11_10)
34 || STDW .D2T2 B1:B0,*+B5(THREAD_RICL_ICL)
35#ifndef __DSBT__
36 || MVKL .S2 current_ksp,B1
37#endif
38
39 STDW .D2T2 B15:B14,*+B5(THREAD_B15_14)
40 || STDW .D1T1 A15:A14,*+A4(THREAD_A15_14)
41#ifndef __DSBT__
42 || MVKH .S2 current_ksp,B1
43#endif
44
45 ;; Switch to next SP
46 MV .S2 B7,SP
47#ifdef __DSBT__
48 || STW .D2T2 B7,*+B14(current_ksp)
49#else
50 || STW .D2T2 B7,*B1
51 || MV .L2 B6,B14
52#endif
53 || LDDW .D1T1 *+A5(THREAD_RICL_ICL),A1:A0
54
55 STDW .D2T2 B11:B10,*+B5(THREAD_B11_10)
56 || LDDW .D1T1 *+A5(THREAD_A15_14),A15:A14
57
58 STDW .D2T2 B13:B12,*+B5(THREAD_B13_12)
59 || LDDW .D1T1 *+A5(THREAD_A11_10),A11:A10
60
61 B .S2 B3 ; return in next E1
62 || LDDW .D2T2 *+B4(THREAD_B13_12),B13:B12
63
64 LDDW .D2T2 *+B4(THREAD_B11_10),B11:B10
65 NOP
66
67 MV .L2X A0,B0
68 || MV .S1 A6,A4
69
70 MVC .S2 B0,ILC
71 || MV .L2X A1,B1
72
73 MVC .S2 B1,RILC
74ENDPROC(__switch_to)
diff --git a/arch/c6x/kernel/sys_c6x.c b/arch/c6x/kernel/sys_c6x.c
new file mode 100644
index 000000000000..3e9bdfbee8ad
--- /dev/null
+++ b/arch/c6x/kernel/sys_c6x.c
@@ -0,0 +1,74 @@
1/*
2 * Port on Texas Instruments TMS320C6x architecture
3 *
4 * Copyright (C) 2004, 2009, 2010, 2011 Texas Instruments Incorporated
5 * Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/module.h>
12#include <linux/syscalls.h>
13#include <linux/uaccess.h>
14
15#include <asm/syscalls.h>
16
17#ifdef CONFIG_ACCESS_CHECK
18int _access_ok(unsigned long addr, unsigned long size)
19{
20 if (!size)
21 return 1;
22
23 if (!addr || addr > (0xffffffffUL - (size - 1)))
24 goto _bad_access;
25
26 if (segment_eq(get_fs(), KERNEL_DS))
27 return 1;
28
29 if (memory_start <= addr && (addr + size - 1) < memory_end)
30 return 1;
31
32_bad_access:
33 pr_debug("Bad access attempt: pid[%d] addr[%08lx] size[0x%lx]\n",
34 current->pid, addr, size);
35 return 0;
36}
37EXPORT_SYMBOL(_access_ok);
38#endif
39
40/* sys_cache_sync -- sync caches over given range */
41asmlinkage int sys_cache_sync(unsigned long s, unsigned long e)
42{
43 L1D_cache_block_writeback_invalidate(s, e);
44 L1P_cache_block_invalidate(s, e);
45
46 return 0;
47}
48
49/* Provide the actual syscall number to call mapping. */
50#undef __SYSCALL
51#define __SYSCALL(nr, call) [nr] = (call),
52
53/*
54 * Use trampolines
55 */
56#define sys_pread64 sys_pread_c6x
57#define sys_pwrite64 sys_pwrite_c6x
58#define sys_truncate64 sys_truncate64_c6x
59#define sys_ftruncate64 sys_ftruncate64_c6x
60#define sys_fadvise64 sys_fadvise64_c6x
61#define sys_fadvise64_64 sys_fadvise64_64_c6x
62#define sys_fallocate sys_fallocate_c6x
63
64/* Use sys_mmap_pgoff directly */
65#define sys_mmap2 sys_mmap_pgoff
66
67/*
68 * Note that we can't include <linux/unistd.h> here since the header
69 * guard will defeat us; <asm/unistd.h> checks for __SYSCALL as well.
70 */
71void *sys_call_table[__NR_syscalls] = {
72 [0 ... __NR_syscalls-1] = sys_ni_syscall,
73#include <asm/unistd.h>
74};
diff --git a/arch/c6x/kernel/time.c b/arch/c6x/kernel/time.c
new file mode 100644
index 000000000000..4c9f136165f7
--- /dev/null
+++ b/arch/c6x/kernel/time.c
@@ -0,0 +1,65 @@
1/*
2 * Port on Texas Instruments TMS320C6x architecture
3 *
4 * Copyright (C) 2004, 2009, 2010, 2011 Texas Instruments Incorporated
5 * Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/kernel.h>
13#include <linux/clocksource.h>
14#include <linux/errno.h>
15#include <linux/sched.h>
16#include <linux/param.h>
17#include <linux/string.h>
18#include <linux/mm.h>
19#include <linux/interrupt.h>
20#include <linux/timex.h>
21#include <linux/profile.h>
22
23#include <asm/timer64.h>
24
25static u32 sched_clock_multiplier;
26#define SCHED_CLOCK_SHIFT 16
27
28static cycle_t tsc_read(struct clocksource *cs)
29{
30 return get_cycles();
31}
32
33static struct clocksource clocksource_tsc = {
34 .name = "timestamp",
35 .rating = 300,
36 .read = tsc_read,
37 .mask = CLOCKSOURCE_MASK(64),
38 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
39};
40
41/*
42 * scheduler clock - returns current time in nanoseconds.
43 */
44u64 sched_clock(void)
45{
46 u64 tsc = get_cycles();
47
48 return (tsc * sched_clock_multiplier) >> SCHED_CLOCK_SHIFT;
49}
50
51void time_init(void)
52{
53 u64 tmp = (u64)NSEC_PER_SEC << SCHED_CLOCK_SHIFT;
54
55 do_div(tmp, c6x_core_freq);
56 sched_clock_multiplier = tmp;
57
58 clocksource_register_hz(&clocksource_tsc, c6x_core_freq);
59
60 /* write anything into TSCL to enable counting */
61 set_creg(TSCL, 0);
62
63 /* probe for timer64 event timer */
64 timer64_init();
65}
diff --git a/arch/c6x/kernel/traps.c b/arch/c6x/kernel/traps.c
new file mode 100644
index 000000000000..f50e3edd6dad
--- /dev/null
+++ b/arch/c6x/kernel/traps.c
@@ -0,0 +1,423 @@
1/*
2 * Port on Texas Instruments TMS320C6x architecture
3 *
4 * Copyright (C) 2004, 2006, 2009, 2010, 2011 Texas Instruments Incorporated
5 * Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/module.h>
12#include <linux/ptrace.h>
13#include <linux/kallsyms.h>
14#include <linux/bug.h>
15
16#include <asm/soc.h>
17#include <asm/traps.h>
18
19int (*c6x_nmi_handler)(struct pt_regs *regs);
20
21void __init trap_init(void)
22{
23 ack_exception(EXCEPT_TYPE_NXF);
24 ack_exception(EXCEPT_TYPE_EXC);
25 ack_exception(EXCEPT_TYPE_IXF);
26 ack_exception(EXCEPT_TYPE_SXF);
27 enable_exception();
28}
29
30void show_regs(struct pt_regs *regs)
31{
32 pr_err("\n");
33 pr_err("PC: %08lx SP: %08lx\n", regs->pc, regs->sp);
34 pr_err("Status: %08lx ORIG_A4: %08lx\n", regs->csr, regs->orig_a4);
35 pr_err("A0: %08lx B0: %08lx\n", regs->a0, regs->b0);
36 pr_err("A1: %08lx B1: %08lx\n", regs->a1, regs->b1);
37 pr_err("A2: %08lx B2: %08lx\n", regs->a2, regs->b2);
38 pr_err("A3: %08lx B3: %08lx\n", regs->a3, regs->b3);
39 pr_err("A4: %08lx B4: %08lx\n", regs->a4, regs->b4);
40 pr_err("A5: %08lx B5: %08lx\n", regs->a5, regs->b5);
41 pr_err("A6: %08lx B6: %08lx\n", regs->a6, regs->b6);
42 pr_err("A7: %08lx B7: %08lx\n", regs->a7, regs->b7);
43 pr_err("A8: %08lx B8: %08lx\n", regs->a8, regs->b8);
44 pr_err("A9: %08lx B9: %08lx\n", regs->a9, regs->b9);
45 pr_err("A10: %08lx B10: %08lx\n", regs->a10, regs->b10);
46 pr_err("A11: %08lx B11: %08lx\n", regs->a11, regs->b11);
47 pr_err("A12: %08lx B12: %08lx\n", regs->a12, regs->b12);
48 pr_err("A13: %08lx B13: %08lx\n", regs->a13, regs->b13);
49 pr_err("A14: %08lx B14: %08lx\n", regs->a14, regs->dp);
50 pr_err("A15: %08lx B15: %08lx\n", regs->a15, regs->sp);
51 pr_err("A16: %08lx B16: %08lx\n", regs->a16, regs->b16);
52 pr_err("A17: %08lx B17: %08lx\n", regs->a17, regs->b17);
53 pr_err("A18: %08lx B18: %08lx\n", regs->a18, regs->b18);
54 pr_err("A19: %08lx B19: %08lx\n", regs->a19, regs->b19);
55 pr_err("A20: %08lx B20: %08lx\n", regs->a20, regs->b20);
56 pr_err("A21: %08lx B21: %08lx\n", regs->a21, regs->b21);
57 pr_err("A22: %08lx B22: %08lx\n", regs->a22, regs->b22);
58 pr_err("A23: %08lx B23: %08lx\n", regs->a23, regs->b23);
59 pr_err("A24: %08lx B24: %08lx\n", regs->a24, regs->b24);
60 pr_err("A25: %08lx B25: %08lx\n", regs->a25, regs->b25);
61 pr_err("A26: %08lx B26: %08lx\n", regs->a26, regs->b26);
62 pr_err("A27: %08lx B27: %08lx\n", regs->a27, regs->b27);
63 pr_err("A28: %08lx B28: %08lx\n", regs->a28, regs->b28);
64 pr_err("A29: %08lx B29: %08lx\n", regs->a29, regs->b29);
65 pr_err("A30: %08lx B30: %08lx\n", regs->a30, regs->b30);
66 pr_err("A31: %08lx B31: %08lx\n", regs->a31, regs->b31);
67}
68
69void dump_stack(void)
70{
71 unsigned long stack;
72
73 show_stack(current, &stack);
74}
75EXPORT_SYMBOL(dump_stack);
76
77
78void die(char *str, struct pt_regs *fp, int nr)
79{
80 console_verbose();
81 pr_err("%s: %08x\n", str, nr);
82 show_regs(fp);
83
84 pr_err("Process %s (pid: %d, stackpage=%08lx)\n",
85 current->comm, current->pid, (PAGE_SIZE +
86 (unsigned long) current));
87
88 dump_stack();
89 while (1)
90 ;
91}
92
93static void die_if_kernel(char *str, struct pt_regs *fp, int nr)
94{
95 if (user_mode(fp))
96 return;
97
98 die(str, fp, nr);
99}
100
101
102/* Internal exceptions */
103static struct exception_info iexcept_table[10] = {
104 { "Oops - instruction fetch", SIGBUS, BUS_ADRERR },
105 { "Oops - fetch packet", SIGBUS, BUS_ADRERR },
106 { "Oops - execute packet", SIGILL, ILL_ILLOPC },
107 { "Oops - undefined instruction", SIGILL, ILL_ILLOPC },
108 { "Oops - resource conflict", SIGILL, ILL_ILLOPC },
109 { "Oops - resource access", SIGILL, ILL_PRVREG },
110 { "Oops - privilege", SIGILL, ILL_PRVOPC },
111 { "Oops - loops buffer", SIGILL, ILL_ILLOPC },
112 { "Oops - software exception", SIGILL, ILL_ILLTRP },
113 { "Oops - unknown exception", SIGILL, ILL_ILLOPC }
114};
115
116/* External exceptions */
117static struct exception_info eexcept_table[128] = {
118 { "Oops - external exception", SIGBUS, BUS_ADRERR },
119 { "Oops - external exception", SIGBUS, BUS_ADRERR },
120 { "Oops - external exception", SIGBUS, BUS_ADRERR },
121 { "Oops - external exception", SIGBUS, BUS_ADRERR },
122 { "Oops - external exception", SIGBUS, BUS_ADRERR },
123 { "Oops - external exception", SIGBUS, BUS_ADRERR },
124 { "Oops - external exception", SIGBUS, BUS_ADRERR },
125 { "Oops - external exception", SIGBUS, BUS_ADRERR },
126 { "Oops - external exception", SIGBUS, BUS_ADRERR },
127 { "Oops - external exception", SIGBUS, BUS_ADRERR },
128 { "Oops - external exception", SIGBUS, BUS_ADRERR },
129 { "Oops - external exception", SIGBUS, BUS_ADRERR },
130 { "Oops - external exception", SIGBUS, BUS_ADRERR },
131 { "Oops - external exception", SIGBUS, BUS_ADRERR },
132 { "Oops - external exception", SIGBUS, BUS_ADRERR },
133 { "Oops - external exception", SIGBUS, BUS_ADRERR },
134 { "Oops - external exception", SIGBUS, BUS_ADRERR },
135 { "Oops - external exception", SIGBUS, BUS_ADRERR },
136 { "Oops - external exception", SIGBUS, BUS_ADRERR },
137 { "Oops - external exception", SIGBUS, BUS_ADRERR },
138 { "Oops - external exception", SIGBUS, BUS_ADRERR },
139 { "Oops - external exception", SIGBUS, BUS_ADRERR },
140 { "Oops - external exception", SIGBUS, BUS_ADRERR },
141 { "Oops - external exception", SIGBUS, BUS_ADRERR },
142 { "Oops - external exception", SIGBUS, BUS_ADRERR },
143 { "Oops - external exception", SIGBUS, BUS_ADRERR },
144 { "Oops - external exception", SIGBUS, BUS_ADRERR },
145 { "Oops - external exception", SIGBUS, BUS_ADRERR },
146 { "Oops - external exception", SIGBUS, BUS_ADRERR },
147 { "Oops - external exception", SIGBUS, BUS_ADRERR },
148 { "Oops - external exception", SIGBUS, BUS_ADRERR },
149 { "Oops - external exception", SIGBUS, BUS_ADRERR },
150
151 { "Oops - external exception", SIGBUS, BUS_ADRERR },
152 { "Oops - external exception", SIGBUS, BUS_ADRERR },
153 { "Oops - external exception", SIGBUS, BUS_ADRERR },
154 { "Oops - external exception", SIGBUS, BUS_ADRERR },
155 { "Oops - external exception", SIGBUS, BUS_ADRERR },
156 { "Oops - external exception", SIGBUS, BUS_ADRERR },
157 { "Oops - external exception", SIGBUS, BUS_ADRERR },
158 { "Oops - external exception", SIGBUS, BUS_ADRERR },
159 { "Oops - external exception", SIGBUS, BUS_ADRERR },
160 { "Oops - external exception", SIGBUS, BUS_ADRERR },
161 { "Oops - external exception", SIGBUS, BUS_ADRERR },
162 { "Oops - external exception", SIGBUS, BUS_ADRERR },
163 { "Oops - external exception", SIGBUS, BUS_ADRERR },
164 { "Oops - external exception", SIGBUS, BUS_ADRERR },
165 { "Oops - external exception", SIGBUS, BUS_ADRERR },
166 { "Oops - external exception", SIGBUS, BUS_ADRERR },
167 { "Oops - external exception", SIGBUS, BUS_ADRERR },
168 { "Oops - external exception", SIGBUS, BUS_ADRERR },
169 { "Oops - external exception", SIGBUS, BUS_ADRERR },
170 { "Oops - external exception", SIGBUS, BUS_ADRERR },
171 { "Oops - external exception", SIGBUS, BUS_ADRERR },
172 { "Oops - external exception", SIGBUS, BUS_ADRERR },
173 { "Oops - external exception", SIGBUS, BUS_ADRERR },
174 { "Oops - external exception", SIGBUS, BUS_ADRERR },
175 { "Oops - external exception", SIGBUS, BUS_ADRERR },
176 { "Oops - external exception", SIGBUS, BUS_ADRERR },
177 { "Oops - external exception", SIGBUS, BUS_ADRERR },
178 { "Oops - external exception", SIGBUS, BUS_ADRERR },
179 { "Oops - external exception", SIGBUS, BUS_ADRERR },
180 { "Oops - external exception", SIGBUS, BUS_ADRERR },
181 { "Oops - external exception", SIGBUS, BUS_ADRERR },
182 { "Oops - external exception", SIGBUS, BUS_ADRERR },
183
184 { "Oops - external exception", SIGBUS, BUS_ADRERR },
185 { "Oops - external exception", SIGBUS, BUS_ADRERR },
186 { "Oops - external exception", SIGBUS, BUS_ADRERR },
187 { "Oops - external exception", SIGBUS, BUS_ADRERR },
188 { "Oops - external exception", SIGBUS, BUS_ADRERR },
189 { "Oops - external exception", SIGBUS, BUS_ADRERR },
190 { "Oops - external exception", SIGBUS, BUS_ADRERR },
191 { "Oops - external exception", SIGBUS, BUS_ADRERR },
192 { "Oops - external exception", SIGBUS, BUS_ADRERR },
193 { "Oops - external exception", SIGBUS, BUS_ADRERR },
194 { "Oops - external exception", SIGBUS, BUS_ADRERR },
195 { "Oops - external exception", SIGBUS, BUS_ADRERR },
196 { "Oops - external exception", SIGBUS, BUS_ADRERR },
197 { "Oops - external exception", SIGBUS, BUS_ADRERR },
198 { "Oops - external exception", SIGBUS, BUS_ADRERR },
199 { "Oops - external exception", SIGBUS, BUS_ADRERR },
200 { "Oops - external exception", SIGBUS, BUS_ADRERR },
201 { "Oops - external exception", SIGBUS, BUS_ADRERR },
202 { "Oops - external exception", SIGBUS, BUS_ADRERR },
203 { "Oops - external exception", SIGBUS, BUS_ADRERR },
204 { "Oops - external exception", SIGBUS, BUS_ADRERR },
205 { "Oops - external exception", SIGBUS, BUS_ADRERR },
206 { "Oops - external exception", SIGBUS, BUS_ADRERR },
207 { "Oops - external exception", SIGBUS, BUS_ADRERR },
208 { "Oops - external exception", SIGBUS, BUS_ADRERR },
209 { "Oops - external exception", SIGBUS, BUS_ADRERR },
210 { "Oops - external exception", SIGBUS, BUS_ADRERR },
211 { "Oops - external exception", SIGBUS, BUS_ADRERR },
212 { "Oops - external exception", SIGBUS, BUS_ADRERR },
213 { "Oops - external exception", SIGBUS, BUS_ADRERR },
214 { "Oops - external exception", SIGBUS, BUS_ADRERR },
215 { "Oops - external exception", SIGBUS, BUS_ADRERR },
216
217 { "Oops - external exception", SIGBUS, BUS_ADRERR },
218 { "Oops - external exception", SIGBUS, BUS_ADRERR },
219 { "Oops - external exception", SIGBUS, BUS_ADRERR },
220 { "Oops - external exception", SIGBUS, BUS_ADRERR },
221 { "Oops - external exception", SIGBUS, BUS_ADRERR },
222 { "Oops - external exception", SIGBUS, BUS_ADRERR },
223 { "Oops - external exception", SIGBUS, BUS_ADRERR },
224 { "Oops - external exception", SIGBUS, BUS_ADRERR },
225 { "Oops - external exception", SIGBUS, BUS_ADRERR },
226 { "Oops - external exception", SIGBUS, BUS_ADRERR },
227 { "Oops - external exception", SIGBUS, BUS_ADRERR },
228 { "Oops - external exception", SIGBUS, BUS_ADRERR },
229 { "Oops - external exception", SIGBUS, BUS_ADRERR },
230 { "Oops - external exception", SIGBUS, BUS_ADRERR },
231 { "Oops - external exception", SIGBUS, BUS_ADRERR },
232 { "Oops - external exception", SIGBUS, BUS_ADRERR },
233 { "Oops - external exception", SIGBUS, BUS_ADRERR },
234 { "Oops - external exception", SIGBUS, BUS_ADRERR },
235 { "Oops - external exception", SIGBUS, BUS_ADRERR },
236 { "Oops - external exception", SIGBUS, BUS_ADRERR },
237 { "Oops - external exception", SIGBUS, BUS_ADRERR },
238 { "Oops - external exception", SIGBUS, BUS_ADRERR },
239 { "Oops - external exception", SIGBUS, BUS_ADRERR },
240 { "Oops - CPU memory protection fault", SIGSEGV, SEGV_ACCERR },
241 { "Oops - CPU memory protection fault in L1P", SIGSEGV, SEGV_ACCERR },
242 { "Oops - DMA memory protection fault in L1P", SIGSEGV, SEGV_ACCERR },
243 { "Oops - CPU memory protection fault in L1D", SIGSEGV, SEGV_ACCERR },
244 { "Oops - DMA memory protection fault in L1D", SIGSEGV, SEGV_ACCERR },
245 { "Oops - CPU memory protection fault in L2", SIGSEGV, SEGV_ACCERR },
246 { "Oops - DMA memory protection fault in L2", SIGSEGV, SEGV_ACCERR },
247 { "Oops - EMC CPU memory protection fault", SIGSEGV, SEGV_ACCERR },
248 { "Oops - EMC bus error", SIGBUS, BUS_ADRERR }
249};
250
251static void do_trap(struct exception_info *except_info, struct pt_regs *regs)
252{
253 unsigned long addr = instruction_pointer(regs);
254 siginfo_t info;
255
256 if (except_info->code != TRAP_BRKPT)
257 pr_err("TRAP: %s PC[0x%lx] signo[%d] code[%d]\n",
258 except_info->kernel_str, regs->pc,
259 except_info->signo, except_info->code);
260
261 die_if_kernel(except_info->kernel_str, regs, addr);
262
263 info.si_signo = except_info->signo;
264 info.si_errno = 0;
265 info.si_code = except_info->code;
266 info.si_addr = (void __user *)addr;
267
268 force_sig_info(except_info->signo, &info, current);
269}
270
271/*
272 * Process an internal exception (non maskable)
273 */
274static int process_iexcept(struct pt_regs *regs)
275{
276 unsigned int iexcept_report = get_iexcept();
277 unsigned int iexcept_num;
278
279 ack_exception(EXCEPT_TYPE_IXF);
280
281 pr_err("IEXCEPT: PC[0x%lx]\n", regs->pc);
282
283 while (iexcept_report) {
284 iexcept_num = __ffs(iexcept_report);
285 iexcept_report &= ~(1 << iexcept_num);
286 set_iexcept(iexcept_report);
287 if (*(unsigned int *)regs->pc == BKPT_OPCODE) {
288 /* This is a breakpoint */
289 struct exception_info bkpt_exception = {
290 "Oops - undefined instruction",
291 SIGTRAP, TRAP_BRKPT
292 };
293 do_trap(&bkpt_exception, regs);
294 iexcept_report &= ~(0xFF);
295 set_iexcept(iexcept_report);
296 continue;
297 }
298
299 do_trap(&iexcept_table[iexcept_num], regs);
300 }
301 return 0;
302}
303
304/*
305 * Process an external exception (maskable)
306 */
307static void process_eexcept(struct pt_regs *regs)
308{
309 int evt;
310
311 pr_err("EEXCEPT: PC[0x%lx]\n", regs->pc);
312
313 while ((evt = soc_get_exception()) >= 0)
314 do_trap(&eexcept_table[evt], regs);
315
316 ack_exception(EXCEPT_TYPE_EXC);
317}
318
319/*
320 * Main exception processing
321 */
322asmlinkage int process_exception(struct pt_regs *regs)
323{
324 unsigned int type;
325 unsigned int type_num;
326 unsigned int ie_num = 9; /* default is unknown exception */
327
328 while ((type = get_except_type()) != 0) {
329 type_num = fls(type) - 1;
330
331 switch (type_num) {
332 case EXCEPT_TYPE_NXF:
333 ack_exception(EXCEPT_TYPE_NXF);
334 if (c6x_nmi_handler)
335 (c6x_nmi_handler)(regs);
336 else
337 pr_alert("NMI interrupt!\n");
338 break;
339
340 case EXCEPT_TYPE_IXF:
341 if (process_iexcept(regs))
342 return 1;
343 break;
344
345 case EXCEPT_TYPE_EXC:
346 process_eexcept(regs);
347 break;
348
349 case EXCEPT_TYPE_SXF:
350 ie_num = 8;
351 default:
352 ack_exception(type_num);
353 do_trap(&iexcept_table[ie_num], regs);
354 break;
355 }
356 }
357 return 0;
358}
359
360static int kstack_depth_to_print = 48;
361
362static void show_trace(unsigned long *stack, unsigned long *endstack)
363{
364 unsigned long addr;
365 int i;
366
367 pr_debug("Call trace:");
368 i = 0;
369 while (stack + 1 <= endstack) {
370 addr = *stack++;
371 /*
372 * If the address is either in the text segment of the
373 * kernel, or in the region which contains vmalloc'ed
374 * memory, it *may* be the address of a calling
375 * routine; if so, print it so that someone tracing
376 * down the cause of the crash will be able to figure
377 * out the call path that was taken.
378 */
379 if (__kernel_text_address(addr)) {
380#ifndef CONFIG_KALLSYMS
381 if (i % 5 == 0)
382 pr_debug("\n ");
383#endif
384 pr_debug(" [<%08lx>]", addr);
385 print_symbol(" %s\n", addr);
386 i++;
387 }
388 }
389 pr_debug("\n");
390}
391
392void show_stack(struct task_struct *task, unsigned long *stack)
393{
394 unsigned long *p, *endstack;
395 int i;
396
397 if (!stack) {
398 if (task && task != current)
399 /* We know this is a kernel stack,
400 so this is the start/end */
401 stack = (unsigned long *)thread_saved_ksp(task);
402 else
403 stack = (unsigned long *)&stack;
404 }
405 endstack = (unsigned long *)(((unsigned long)stack + THREAD_SIZE - 1)
406 & -THREAD_SIZE);
407
408 pr_debug("Stack from %08lx:", (unsigned long)stack);
409 for (i = 0, p = stack; i < kstack_depth_to_print; i++) {
410 if (p + 1 > endstack)
411 break;
412 if (i % 8 == 0)
413 pr_cont("\n ");
414 pr_cont(" %08lx", *p++);
415 }
416 pr_cont("\n");
417 show_trace(stack, endstack);
418}
419
420int is_valid_bugaddr(unsigned long addr)
421{
422 return __kernel_text_address(addr);
423}
diff --git a/arch/c6x/kernel/vectors.S b/arch/c6x/kernel/vectors.S
new file mode 100644
index 000000000000..c95c66fc71e8
--- /dev/null
+++ b/arch/c6x/kernel/vectors.S
@@ -0,0 +1,81 @@
1;
2; Port on Texas Instruments TMS320C6x architecture
3;
4; Copyright (C) 2004, 2006, 2009, 2010, 2011 Texas Instruments Incorporated
5; Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
6;
7; This program is free software; you can redistribute it and/or modify
8; it under the terms of the GNU General Public License version 2 as
9; published by the Free Software Foundation.
10;
11; This section handles all the interrupt vector routines.
12; At RESET the processor sets up the DRAM timing parameters and
13; branches to the label _c_int00 which handles initialization for the C code.
14;
15
16#define ALIGNMENT 5
17
18 .macro IRQVEC name, handler
19 .align ALIGNMENT
20 .hidden \name
21 .global \name
22\name:
23#ifdef CONFIG_C6X_BIG_KERNEL
24 STW .D2T1 A0,*B15--[2]
25 || MVKL .S1 \handler,A0
26 MVKH .S1 \handler,A0
27 B .S2X A0
28 LDW .D2T1 *++B15[2],A0
29 NOP 4
30 NOP
31 NOP
32 .endm
33#else /* CONFIG_C6X_BIG_KERNEL */
34 B .S2 \handler
35 NOP
36 NOP
37 NOP
38 NOP
39 NOP
40 NOP
41 NOP
42 .endm
43#endif /* CONFIG_C6X_BIG_KERNEL */
44
45 .sect ".vectors","ax"
46 .align ALIGNMENT
47 .global RESET
48 .hidden RESET
49RESET:
50#ifdef CONFIG_C6X_BIG_KERNEL
51 MVKL .S1 _c_int00,A0 ; branch to _c_int00
52 MVKH .S1 _c_int00,A0
53 B .S2X A0
54#else
55 B .S2 _c_int00
56 NOP
57 NOP
58#endif
59 NOP
60 NOP
61 NOP
62 NOP
63 NOP
64
65
66 IRQVEC NMI,_nmi_handler ; NMI interrupt
67 IRQVEC AINT,_bad_interrupt ; reserved
68 IRQVEC MSGINT,_bad_interrupt ; reserved
69
70 IRQVEC INT4,_int4_handler
71 IRQVEC INT5,_int5_handler
72 IRQVEC INT6,_int6_handler
73 IRQVEC INT7,_int7_handler
74 IRQVEC INT8,_int8_handler
75 IRQVEC INT9,_int9_handler
76 IRQVEC INT10,_int10_handler
77 IRQVEC INT11,_int11_handler
78 IRQVEC INT12,_int12_handler
79 IRQVEC INT13,_int13_handler
80 IRQVEC INT14,_int14_handler
81 IRQVEC INT15,_int15_handler
diff --git a/arch/c6x/kernel/vmlinux.lds.S b/arch/c6x/kernel/vmlinux.lds.S
new file mode 100644
index 000000000000..1d81c4c129ec
--- /dev/null
+++ b/arch/c6x/kernel/vmlinux.lds.S
@@ -0,0 +1,162 @@
1/*
2 * ld script for the c6x kernel
3 *
4 * Copyright (C) 2010, 2011 Texas Instruments Incorporated
5 * Mark Salter <msalter@redhat.com>
6 */
7#include <asm-generic/vmlinux.lds.h>
8#include <asm/thread_info.h>
9#include <asm/page.h>
10
11ENTRY(_c_int00)
12
13#if defined(CONFIG_CPU_BIG_ENDIAN)
14jiffies = jiffies_64 + 4;
15#else
16jiffies = jiffies_64;
17#endif
18
19#define READONLY_SEGMENT_START \
20 . = PAGE_OFFSET;
21#define READWRITE_SEGMENT_START \
22 . = ALIGN(128); \
23 _data_lma = .;
24
25SECTIONS
26{
27 /*
28 * Start kernel read only segment
29 */
30 READONLY_SEGMENT_START
31
32 .vectors :
33 {
34 _vectors_start = .;
35 *(.vectors)
36 . = ALIGN(0x400);
37 _vectors_end = .;
38 }
39
40 . = ALIGN(0x1000);
41 .cmdline :
42 {
43 *(.cmdline)
44 }
45
46 /*
47 * This section contains data which may be shared with other
48 * cores. It needs to be a fixed offset from PAGE_OFFSET
49 * regardless of kernel configuration.
50 */
51 .virtio_ipc_dev :
52 {
53 *(.virtio_ipc_dev)
54 }
55
56 . = ALIGN(PAGE_SIZE);
57 .init :
58 {
59 _stext = .;
60 _sinittext = .;
61 HEAD_TEXT
62 INIT_TEXT
63 _einittext = .;
64 }
65
66 __init_begin = _stext;
67 INIT_DATA_SECTION(16)
68
69 PERCPU_SECTION(128)
70
71 . = ALIGN(PAGE_SIZE);
72 __init_end = .;
73
74 .text :
75 {
76 _text = .;
77 TEXT_TEXT
78 SCHED_TEXT
79 LOCK_TEXT
80 IRQENTRY_TEXT
81 KPROBES_TEXT
82 *(.fixup)
83 *(.gnu.warning)
84 }
85
86 EXCEPTION_TABLE(16)
87 NOTES
88
89 RO_DATA_SECTION(PAGE_SIZE)
90 .const :
91 {
92 *(.const .const.* .gnu.linkonce.r.*)
93 *(.switch)
94 }
95
96 . = ALIGN (8) ;
97 __fdt_blob : AT(ADDR(__fdt_blob) - LOAD_OFFSET)
98 {
99 _fdt_start = . ; /* place for fdt blob */
100 *(__fdt_blob) ; /* Any link-placed DTB */
101 BYTE(0); /* section always has contents */
102 . = _fdt_start + 0x4000; /* Pad up to 16kbyte */
103 _fdt_end = . ;
104 }
105
106 _etext = .;
107
108 /*
109 * Start kernel read-write segment.
110 */
111 READWRITE_SEGMENT_START
112 _sdata = .;
113
114 .fardata : AT(ADDR(.fardata) - LOAD_OFFSET)
115 {
116 INIT_TASK_DATA(THREAD_SIZE)
117 NOSAVE_DATA
118 PAGE_ALIGNED_DATA(PAGE_SIZE)
119 CACHELINE_ALIGNED_DATA(128)
120 READ_MOSTLY_DATA(128)
121 DATA_DATA
122 CONSTRUCTORS
123 *(.data1)
124 *(.fardata .fardata.*)
125 *(.data.debug_bpt)
126 }
127
128 .neardata ALIGN(8) : AT(ADDR(.neardata) - LOAD_OFFSET)
129 {
130 *(.neardata2 .neardata2.* .gnu.linkonce.s2.*)
131 *(.neardata .neardata.* .gnu.linkonce.s.*)
132 . = ALIGN(8);
133 }
134
135 _edata = .;
136
137 __bss_start = .;
138 SBSS(8)
139 BSS(8)
140 .far :
141 {
142 . = ALIGN(8);
143 *(.dynfar)
144 *(.far .far.* .gnu.linkonce.b.*)
145 . = ALIGN(8);
146 }
147 __bss_stop = .;
148
149 _end = .;
150
151 DWARF_DEBUG
152
153 /DISCARD/ :
154 {
155 EXIT_TEXT
156 EXIT_DATA
157 EXIT_CALL
158 *(.discard)
159 *(.discard.*)
160 *(.interp)
161 }
162}