diff options
Diffstat (limited to 'arch/tile/kernel')
-rw-r--r-- | arch/tile/kernel/entry.S | 2 | ||||
-rw-r--r-- | arch/tile/kernel/intvec_32.S | 24 | ||||
-rw-r--r-- | arch/tile/kernel/intvec_64.S | 52 | ||||
-rw-r--r-- | arch/tile/kernel/module.c | 2 | ||||
-rw-r--r-- | arch/tile/kernel/proc.c | 4 | ||||
-rw-r--r-- | arch/tile/kernel/process.c | 3 | ||||
-rw-r--r-- | arch/tile/kernel/setup.c | 28 | ||||
-rw-r--r-- | arch/tile/kernel/single_step.c | 31 | ||||
-rw-r--r-- | arch/tile/kernel/smp.c | 8 | ||||
-rw-r--r-- | arch/tile/kernel/smpboot.c | 2 | ||||
-rw-r--r-- | arch/tile/kernel/stack.c | 232 | ||||
-rw-r--r-- | arch/tile/kernel/traps.c | 15 |
12 files changed, 243 insertions, 160 deletions
diff --git a/arch/tile/kernel/entry.S b/arch/tile/kernel/entry.S index 431e9ae60488..ec91568df880 100644 --- a/arch/tile/kernel/entry.S +++ b/arch/tile/kernel/entry.S | |||
@@ -85,6 +85,7 @@ STD_ENTRY(cpu_idle_on_new_stack) | |||
85 | /* Loop forever on a nap during SMP boot. */ | 85 | /* Loop forever on a nap during SMP boot. */ |
86 | STD_ENTRY(smp_nap) | 86 | STD_ENTRY(smp_nap) |
87 | nap | 87 | nap |
88 | nop /* avoid provoking the icache prefetch with a jump */ | ||
88 | j smp_nap /* we are not architecturally guaranteed not to exit nap */ | 89 | j smp_nap /* we are not architecturally guaranteed not to exit nap */ |
89 | jrp lr /* clue in the backtracer */ | 90 | jrp lr /* clue in the backtracer */ |
90 | STD_ENDPROC(smp_nap) | 91 | STD_ENDPROC(smp_nap) |
@@ -105,5 +106,6 @@ STD_ENTRY(_cpu_idle) | |||
105 | .global _cpu_idle_nap | 106 | .global _cpu_idle_nap |
106 | _cpu_idle_nap: | 107 | _cpu_idle_nap: |
107 | nap | 108 | nap |
109 | nop /* avoid provoking the icache prefetch with a jump */ | ||
108 | jrp lr | 110 | jrp lr |
109 | STD_ENDPROC(_cpu_idle) | 111 | STD_ENDPROC(_cpu_idle) |
diff --git a/arch/tile/kernel/intvec_32.S b/arch/tile/kernel/intvec_32.S index aecc8ed5f39b..5d56a1ef5ba5 100644 --- a/arch/tile/kernel/intvec_32.S +++ b/arch/tile/kernel/intvec_32.S | |||
@@ -799,6 +799,10 @@ handle_interrupt: | |||
799 | * This routine takes a boolean in r30 indicating if this is an NMI. | 799 | * This routine takes a boolean in r30 indicating if this is an NMI. |
800 | * If so, we also expect a boolean in r31 indicating whether to | 800 | * If so, we also expect a boolean in r31 indicating whether to |
801 | * re-enable the oprofile interrupts. | 801 | * re-enable the oprofile interrupts. |
802 | * | ||
803 | * Note that .Lresume_userspace is jumped to directly in several | ||
804 | * places, and we need to make sure r30 is set correctly in those | ||
805 | * callers as well. | ||
802 | */ | 806 | */ |
803 | STD_ENTRY(interrupt_return) | 807 | STD_ENTRY(interrupt_return) |
804 | /* If we're resuming to kernel space, don't check thread flags. */ | 808 | /* If we're resuming to kernel space, don't check thread flags. */ |
@@ -1237,7 +1241,10 @@ handle_syscall: | |||
1237 | bzt r30, 1f | 1241 | bzt r30, 1f |
1238 | jal do_syscall_trace | 1242 | jal do_syscall_trace |
1239 | FEEDBACK_REENTER(handle_syscall) | 1243 | FEEDBACK_REENTER(handle_syscall) |
1240 | 1: j .Lresume_userspace /* jump into middle of interrupt_return */ | 1244 | 1: { |
1245 | movei r30, 0 /* not an NMI */ | ||
1246 | j .Lresume_userspace /* jump into middle of interrupt_return */ | ||
1247 | } | ||
1241 | 1248 | ||
1242 | .Linvalid_syscall: | 1249 | .Linvalid_syscall: |
1243 | /* Report an invalid syscall back to the user program */ | 1250 | /* Report an invalid syscall back to the user program */ |
@@ -1246,7 +1253,10 @@ handle_syscall: | |||
1246 | movei r28, -ENOSYS | 1253 | movei r28, -ENOSYS |
1247 | } | 1254 | } |
1248 | sw r29, r28 | 1255 | sw r29, r28 |
1249 | j .Lresume_userspace /* jump into middle of interrupt_return */ | 1256 | { |
1257 | movei r30, 0 /* not an NMI */ | ||
1258 | j .Lresume_userspace /* jump into middle of interrupt_return */ | ||
1259 | } | ||
1250 | STD_ENDPROC(handle_syscall) | 1260 | STD_ENDPROC(handle_syscall) |
1251 | 1261 | ||
1252 | /* Return the address for oprofile to suppress in backtraces. */ | 1262 | /* Return the address for oprofile to suppress in backtraces. */ |
@@ -1262,7 +1272,10 @@ STD_ENTRY(ret_from_fork) | |||
1262 | jal sim_notify_fork | 1272 | jal sim_notify_fork |
1263 | jal schedule_tail | 1273 | jal schedule_tail |
1264 | FEEDBACK_REENTER(ret_from_fork) | 1274 | FEEDBACK_REENTER(ret_from_fork) |
1265 | j .Lresume_userspace /* jump into middle of interrupt_return */ | 1275 | { |
1276 | movei r30, 0 /* not an NMI */ | ||
1277 | j .Lresume_userspace /* jump into middle of interrupt_return */ | ||
1278 | } | ||
1266 | STD_ENDPROC(ret_from_fork) | 1279 | STD_ENDPROC(ret_from_fork) |
1267 | 1280 | ||
1268 | /* | 1281 | /* |
@@ -1376,7 +1389,10 @@ handle_ill: | |||
1376 | 1389 | ||
1377 | jal send_sigtrap /* issue a SIGTRAP */ | 1390 | jal send_sigtrap /* issue a SIGTRAP */ |
1378 | FEEDBACK_REENTER(handle_ill) | 1391 | FEEDBACK_REENTER(handle_ill) |
1379 | j .Lresume_userspace /* jump into middle of interrupt_return */ | 1392 | { |
1393 | movei r30, 0 /* not an NMI */ | ||
1394 | j .Lresume_userspace /* jump into middle of interrupt_return */ | ||
1395 | } | ||
1380 | 1396 | ||
1381 | .Ldispatch_normal_ill: | 1397 | .Ldispatch_normal_ill: |
1382 | { | 1398 | { |
diff --git a/arch/tile/kernel/intvec_64.S b/arch/tile/kernel/intvec_64.S index 79c93e10ba27..49d9d6621682 100644 --- a/arch/tile/kernel/intvec_64.S +++ b/arch/tile/kernel/intvec_64.S | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <asm/irqflags.h> | 22 | #include <asm/irqflags.h> |
23 | #include <asm/asm-offsets.h> | 23 | #include <asm/asm-offsets.h> |
24 | #include <asm/types.h> | 24 | #include <asm/types.h> |
25 | #include <asm/signal.h> | ||
25 | #include <hv/hypervisor.h> | 26 | #include <hv/hypervisor.h> |
26 | #include <arch/abi.h> | 27 | #include <arch/abi.h> |
27 | #include <arch/interrupts.h> | 28 | #include <arch/interrupts.h> |
@@ -605,6 +606,10 @@ handle_interrupt: | |||
605 | * This routine takes a boolean in r30 indicating if this is an NMI. | 606 | * This routine takes a boolean in r30 indicating if this is an NMI. |
606 | * If so, we also expect a boolean in r31 indicating whether to | 607 | * If so, we also expect a boolean in r31 indicating whether to |
607 | * re-enable the oprofile interrupts. | 608 | * re-enable the oprofile interrupts. |
609 | * | ||
610 | * Note that .Lresume_userspace is jumped to directly in several | ||
611 | * places, and we need to make sure r30 is set correctly in those | ||
612 | * callers as well. | ||
608 | */ | 613 | */ |
609 | STD_ENTRY(interrupt_return) | 614 | STD_ENTRY(interrupt_return) |
610 | /* If we're resuming to kernel space, don't check thread flags. */ | 615 | /* If we're resuming to kernel space, don't check thread flags. */ |
@@ -1039,11 +1044,28 @@ handle_syscall: | |||
1039 | 1044 | ||
1040 | /* Do syscall trace again, if requested. */ | 1045 | /* Do syscall trace again, if requested. */ |
1041 | ld r30, r31 | 1046 | ld r30, r31 |
1042 | andi r30, r30, _TIF_SYSCALL_TRACE | 1047 | andi r0, r30, _TIF_SYSCALL_TRACE |
1043 | beqzt r30, 1f | 1048 | { |
1049 | andi r0, r30, _TIF_SINGLESTEP | ||
1050 | beqzt r0, 1f | ||
1051 | } | ||
1044 | jal do_syscall_trace | 1052 | jal do_syscall_trace |
1045 | FEEDBACK_REENTER(handle_syscall) | 1053 | FEEDBACK_REENTER(handle_syscall) |
1046 | 1: j .Lresume_userspace /* jump into middle of interrupt_return */ | 1054 | andi r0, r30, _TIF_SINGLESTEP |
1055 | |||
1056 | 1: beqzt r0, 2f | ||
1057 | |||
1058 | /* Single stepping -- notify ptrace. */ | ||
1059 | { | ||
1060 | movei r0, SIGTRAP | ||
1061 | jal ptrace_notify | ||
1062 | } | ||
1063 | FEEDBACK_REENTER(handle_syscall) | ||
1064 | |||
1065 | 2: { | ||
1066 | movei r30, 0 /* not an NMI */ | ||
1067 | j .Lresume_userspace /* jump into middle of interrupt_return */ | ||
1068 | } | ||
1047 | 1069 | ||
1048 | .Lcompat_syscall: | 1070 | .Lcompat_syscall: |
1049 | /* | 1071 | /* |
@@ -1077,7 +1099,10 @@ handle_syscall: | |||
1077 | movei r28, -ENOSYS | 1099 | movei r28, -ENOSYS |
1078 | } | 1100 | } |
1079 | st r29, r28 | 1101 | st r29, r28 |
1080 | j .Lresume_userspace /* jump into middle of interrupt_return */ | 1102 | { |
1103 | movei r30, 0 /* not an NMI */ | ||
1104 | j .Lresume_userspace /* jump into middle of interrupt_return */ | ||
1105 | } | ||
1081 | STD_ENDPROC(handle_syscall) | 1106 | STD_ENDPROC(handle_syscall) |
1082 | 1107 | ||
1083 | /* Return the address for oprofile to suppress in backtraces. */ | 1108 | /* Return the address for oprofile to suppress in backtraces. */ |
@@ -1093,7 +1118,10 @@ STD_ENTRY(ret_from_fork) | |||
1093 | jal sim_notify_fork | 1118 | jal sim_notify_fork |
1094 | jal schedule_tail | 1119 | jal schedule_tail |
1095 | FEEDBACK_REENTER(ret_from_fork) | 1120 | FEEDBACK_REENTER(ret_from_fork) |
1096 | j .Lresume_userspace | 1121 | { |
1122 | movei r30, 0 /* not an NMI */ | ||
1123 | j .Lresume_userspace /* jump into middle of interrupt_return */ | ||
1124 | } | ||
1097 | STD_ENDPROC(ret_from_fork) | 1125 | STD_ENDPROC(ret_from_fork) |
1098 | 1126 | ||
1099 | /* Various stub interrupt handlers and syscall handlers */ | 1127 | /* Various stub interrupt handlers and syscall handlers */ |
@@ -1156,6 +1184,18 @@ int_unalign: | |||
1156 | push_extra_callee_saves r0 | 1184 | push_extra_callee_saves r0 |
1157 | j do_trap | 1185 | j do_trap |
1158 | 1186 | ||
1187 | /* Fill the return address stack with nonzero entries. */ | ||
1188 | STD_ENTRY(fill_ra_stack) | ||
1189 | { | ||
1190 | move r0, lr | ||
1191 | jal 1f | ||
1192 | } | ||
1193 | 1: jal 2f | ||
1194 | 2: jal 3f | ||
1195 | 3: jal 4f | ||
1196 | 4: jrp r0 | ||
1197 | STD_ENDPROC(fill_ra_stack) | ||
1198 | |||
1159 | /* Include .intrpt1 array of interrupt vectors */ | 1199 | /* Include .intrpt1 array of interrupt vectors */ |
1160 | .section ".intrpt1", "ax" | 1200 | .section ".intrpt1", "ax" |
1161 | 1201 | ||
@@ -1166,7 +1206,7 @@ int_unalign: | |||
1166 | #define do_hardwall_trap bad_intr | 1206 | #define do_hardwall_trap bad_intr |
1167 | #endif | 1207 | #endif |
1168 | 1208 | ||
1169 | int_hand INT_MEM_ERROR, MEM_ERROR, bad_intr | 1209 | int_hand INT_MEM_ERROR, MEM_ERROR, do_trap |
1170 | int_hand INT_SINGLE_STEP_3, SINGLE_STEP_3, bad_intr | 1210 | int_hand INT_SINGLE_STEP_3, SINGLE_STEP_3, bad_intr |
1171 | #if CONFIG_KERNEL_PL == 2 | 1211 | #if CONFIG_KERNEL_PL == 2 |
1172 | int_hand INT_SINGLE_STEP_2, SINGLE_STEP_2, gx_singlestep_handle | 1212 | int_hand INT_SINGLE_STEP_2, SINGLE_STEP_2, gx_singlestep_handle |
diff --git a/arch/tile/kernel/module.c b/arch/tile/kernel/module.c index b90ab9925674..98d476920106 100644 --- a/arch/tile/kernel/module.c +++ b/arch/tile/kernel/module.c | |||
@@ -67,6 +67,8 @@ void *module_alloc(unsigned long size) | |||
67 | area = __get_vm_area(size, VM_ALLOC, MEM_MODULE_START, MEM_MODULE_END); | 67 | area = __get_vm_area(size, VM_ALLOC, MEM_MODULE_START, MEM_MODULE_END); |
68 | if (!area) | 68 | if (!area) |
69 | goto error; | 69 | goto error; |
70 | area->nr_pages = npages; | ||
71 | area->pages = pages; | ||
70 | 72 | ||
71 | if (map_vm_area(area, prot_rwx, &pages)) { | 73 | if (map_vm_area(area, prot_rwx, &pages)) { |
72 | vunmap(area->addr); | 74 | vunmap(area->addr); |
diff --git a/arch/tile/kernel/proc.c b/arch/tile/kernel/proc.c index 7a9327046404..446a7f52cc11 100644 --- a/arch/tile/kernel/proc.c +++ b/arch/tile/kernel/proc.c | |||
@@ -146,7 +146,6 @@ static ctl_table unaligned_table[] = { | |||
146 | }, | 146 | }, |
147 | {} | 147 | {} |
148 | }; | 148 | }; |
149 | #endif | ||
150 | 149 | ||
151 | static struct ctl_path tile_path[] = { | 150 | static struct ctl_path tile_path[] = { |
152 | { .procname = "tile" }, | 151 | { .procname = "tile" }, |
@@ -155,10 +154,9 @@ static struct ctl_path tile_path[] = { | |||
155 | 154 | ||
156 | static int __init proc_sys_tile_init(void) | 155 | static int __init proc_sys_tile_init(void) |
157 | { | 156 | { |
158 | #ifndef __tilegx__ /* FIXME: GX: no support for unaligned access yet */ | ||
159 | register_sysctl_paths(tile_path, unaligned_table); | 157 | register_sysctl_paths(tile_path, unaligned_table); |
160 | #endif | ||
161 | return 0; | 158 | return 0; |
162 | } | 159 | } |
163 | 160 | ||
164 | arch_initcall(proc_sys_tile_init); | 161 | arch_initcall(proc_sys_tile_init); |
162 | #endif | ||
diff --git a/arch/tile/kernel/process.c b/arch/tile/kernel/process.c index 30caecac94dc..2d5ef617bb39 100644 --- a/arch/tile/kernel/process.c +++ b/arch/tile/kernel/process.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <linux/tracehook.h> | 28 | #include <linux/tracehook.h> |
29 | #include <linux/signal.h> | 29 | #include <linux/signal.h> |
30 | #include <asm/stack.h> | 30 | #include <asm/stack.h> |
31 | #include <asm/switch_to.h> | ||
31 | #include <asm/homecache.h> | 32 | #include <asm/homecache.h> |
32 | #include <asm/syscalls.h> | 33 | #include <asm/syscalls.h> |
33 | #include <asm/traps.h> | 34 | #include <asm/traps.h> |
@@ -285,7 +286,7 @@ struct task_struct *validate_current(void) | |||
285 | static struct task_struct corrupt = { .comm = "<corrupt>" }; | 286 | static struct task_struct corrupt = { .comm = "<corrupt>" }; |
286 | struct task_struct *tsk = current; | 287 | struct task_struct *tsk = current; |
287 | if (unlikely((unsigned long)tsk < PAGE_OFFSET || | 288 | if (unlikely((unsigned long)tsk < PAGE_OFFSET || |
288 | (void *)tsk > high_memory || | 289 | (high_memory && (void *)tsk > high_memory) || |
289 | ((unsigned long)tsk & (__alignof__(*tsk) - 1)) != 0)) { | 290 | ((unsigned long)tsk & (__alignof__(*tsk) - 1)) != 0)) { |
290 | pr_err("Corrupt 'current' %p (sp %#lx)\n", tsk, stack_pointer); | 291 | pr_err("Corrupt 'current' %p (sp %#lx)\n", tsk, stack_pointer); |
291 | tsk = &corrupt; | 292 | tsk = &corrupt; |
diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c index 92a94f4920ad..bff23f476110 100644 --- a/arch/tile/kernel/setup.c +++ b/arch/tile/kernel/setup.c | |||
@@ -103,13 +103,11 @@ unsigned long __initdata pci_reserve_end_pfn = -1U; | |||
103 | 103 | ||
104 | static int __init setup_maxmem(char *str) | 104 | static int __init setup_maxmem(char *str) |
105 | { | 105 | { |
106 | long maxmem_mb; | 106 | unsigned long long maxmem; |
107 | if (str == NULL || strict_strtol(str, 0, &maxmem_mb) != 0 || | 107 | if (str == NULL || (maxmem = memparse(str, NULL)) == 0) |
108 | maxmem_mb == 0) | ||
109 | return -EINVAL; | 108 | return -EINVAL; |
110 | 109 | ||
111 | maxmem_pfn = (maxmem_mb >> (HPAGE_SHIFT - 20)) << | 110 | maxmem_pfn = (maxmem >> HPAGE_SHIFT) << (HPAGE_SHIFT - PAGE_SHIFT); |
112 | (HPAGE_SHIFT - PAGE_SHIFT); | ||
113 | pr_info("Forcing RAM used to no more than %dMB\n", | 111 | pr_info("Forcing RAM used to no more than %dMB\n", |
114 | maxmem_pfn >> (20 - PAGE_SHIFT)); | 112 | maxmem_pfn >> (20 - PAGE_SHIFT)); |
115 | return 0; | 113 | return 0; |
@@ -119,14 +117,15 @@ early_param("maxmem", setup_maxmem); | |||
119 | static int __init setup_maxnodemem(char *str) | 117 | static int __init setup_maxnodemem(char *str) |
120 | { | 118 | { |
121 | char *endp; | 119 | char *endp; |
122 | long maxnodemem_mb, node; | 120 | unsigned long long maxnodemem; |
121 | long node; | ||
123 | 122 | ||
124 | node = str ? simple_strtoul(str, &endp, 0) : INT_MAX; | 123 | node = str ? simple_strtoul(str, &endp, 0) : INT_MAX; |
125 | if (node >= MAX_NUMNODES || *endp != ':' || | 124 | if (node >= MAX_NUMNODES || *endp != ':') |
126 | strict_strtol(endp+1, 0, &maxnodemem_mb) != 0) | ||
127 | return -EINVAL; | 125 | return -EINVAL; |
128 | 126 | ||
129 | maxnodemem_pfn[node] = (maxnodemem_mb >> (HPAGE_SHIFT - 20)) << | 127 | maxnodemem = memparse(endp+1, NULL); |
128 | maxnodemem_pfn[node] = (maxnodemem >> HPAGE_SHIFT) << | ||
130 | (HPAGE_SHIFT - PAGE_SHIFT); | 129 | (HPAGE_SHIFT - PAGE_SHIFT); |
131 | pr_info("Forcing RAM used on node %ld to no more than %dMB\n", | 130 | pr_info("Forcing RAM used on node %ld to no more than %dMB\n", |
132 | node, maxnodemem_pfn[node] >> (20 - PAGE_SHIFT)); | 131 | node, maxnodemem_pfn[node] >> (20 - PAGE_SHIFT)); |
@@ -913,6 +912,13 @@ void __cpuinit setup_cpu(int boot) | |||
913 | 912 | ||
914 | #ifdef CONFIG_BLK_DEV_INITRD | 913 | #ifdef CONFIG_BLK_DEV_INITRD |
915 | 914 | ||
915 | /* | ||
916 | * Note that the kernel can potentially support other compression | ||
917 | * techniques than gz, though we don't do so by default. If we ever | ||
918 | * decide to do so we can either look for other filename extensions, | ||
919 | * or just allow a file with this name to be compressed with an | ||
920 | * arbitrary compressor (somewhat counterintuitively). | ||
921 | */ | ||
916 | static int __initdata set_initramfs_file; | 922 | static int __initdata set_initramfs_file; |
917 | static char __initdata initramfs_file[128] = "initramfs.cpio.gz"; | 923 | static char __initdata initramfs_file[128] = "initramfs.cpio.gz"; |
918 | 924 | ||
@@ -928,9 +934,9 @@ static int __init setup_initramfs_file(char *str) | |||
928 | early_param("initramfs_file", setup_initramfs_file); | 934 | early_param("initramfs_file", setup_initramfs_file); |
929 | 935 | ||
930 | /* | 936 | /* |
931 | * We look for an additional "initramfs.cpio.gz" file in the hvfs. | 937 | * We look for an "initramfs.cpio.gz" file in the hvfs. |
932 | * If there is one, we allocate some memory for it and it will be | 938 | * If there is one, we allocate some memory for it and it will be |
933 | * unpacked to the initramfs after any built-in initramfs_data. | 939 | * unpacked to the initramfs. |
934 | */ | 940 | */ |
935 | static void __init load_hv_initrd(void) | 941 | static void __init load_hv_initrd(void) |
936 | { | 942 | { |
diff --git a/arch/tile/kernel/single_step.c b/arch/tile/kernel/single_step.c index bc1eb586e24d..9efbc1391b3c 100644 --- a/arch/tile/kernel/single_step.c +++ b/arch/tile/kernel/single_step.c | |||
@@ -153,6 +153,25 @@ static tile_bundle_bits rewrite_load_store_unaligned( | |||
153 | if (((unsigned long)addr % size) == 0) | 153 | if (((unsigned long)addr % size) == 0) |
154 | return bundle; | 154 | return bundle; |
155 | 155 | ||
156 | /* | ||
157 | * Return SIGBUS with the unaligned address, if requested. | ||
158 | * Note that we return SIGBUS even for completely invalid addresses | ||
159 | * as long as they are in fact unaligned; this matches what the | ||
160 | * tilepro hardware would be doing, if it could provide us with the | ||
161 | * actual bad address in an SPR, which it doesn't. | ||
162 | */ | ||
163 | if (unaligned_fixup == 0) { | ||
164 | siginfo_t info = { | ||
165 | .si_signo = SIGBUS, | ||
166 | .si_code = BUS_ADRALN, | ||
167 | .si_addr = addr | ||
168 | }; | ||
169 | trace_unhandled_signal("unaligned trap", regs, | ||
170 | (unsigned long)addr, SIGBUS); | ||
171 | force_sig_info(info.si_signo, &info, current); | ||
172 | return (tilepro_bundle_bits) 0; | ||
173 | } | ||
174 | |||
156 | #ifndef __LITTLE_ENDIAN | 175 | #ifndef __LITTLE_ENDIAN |
157 | # error We assume little-endian representation with copy_xx_user size 2 here | 176 | # error We assume little-endian representation with copy_xx_user size 2 here |
158 | #endif | 177 | #endif |
@@ -192,18 +211,6 @@ static tile_bundle_bits rewrite_load_store_unaligned( | |||
192 | return (tile_bundle_bits) 0; | 211 | return (tile_bundle_bits) 0; |
193 | } | 212 | } |
194 | 213 | ||
195 | if (unaligned_fixup == 0) { | ||
196 | siginfo_t info = { | ||
197 | .si_signo = SIGBUS, | ||
198 | .si_code = BUS_ADRALN, | ||
199 | .si_addr = addr | ||
200 | }; | ||
201 | trace_unhandled_signal("unaligned trap", regs, | ||
202 | (unsigned long)addr, SIGBUS); | ||
203 | force_sig_info(info.si_signo, &info, current); | ||
204 | return (tile_bundle_bits) 0; | ||
205 | } | ||
206 | |||
207 | if (unaligned_printk || unaligned_fixup_count == 0) { | 214 | if (unaligned_printk || unaligned_fixup_count == 0) { |
208 | pr_info("Process %d/%s: PC %#lx: Fixup of" | 215 | pr_info("Process %d/%s: PC %#lx: Fixup of" |
209 | " unaligned %s at %#lx.\n", | 216 | " unaligned %s at %#lx.\n", |
diff --git a/arch/tile/kernel/smp.c b/arch/tile/kernel/smp.c index a44e103c5a63..91da0f721958 100644 --- a/arch/tile/kernel/smp.c +++ b/arch/tile/kernel/smp.c | |||
@@ -103,7 +103,7 @@ static void smp_stop_cpu_interrupt(void) | |||
103 | set_cpu_online(smp_processor_id(), 0); | 103 | set_cpu_online(smp_processor_id(), 0); |
104 | arch_local_irq_disable_all(); | 104 | arch_local_irq_disable_all(); |
105 | for (;;) | 105 | for (;;) |
106 | asm("nap"); | 106 | asm("nap; nop"); |
107 | } | 107 | } |
108 | 108 | ||
109 | /* This function calls the 'stop' function on all other CPUs in the system. */ | 109 | /* This function calls the 'stop' function on all other CPUs in the system. */ |
@@ -113,6 +113,12 @@ void smp_send_stop(void) | |||
113 | send_IPI_allbutself(MSG_TAG_STOP_CPU); | 113 | send_IPI_allbutself(MSG_TAG_STOP_CPU); |
114 | } | 114 | } |
115 | 115 | ||
116 | /* On panic, just wait; we may get an smp_send_stop() later on. */ | ||
117 | void panic_smp_self_stop(void) | ||
118 | { | ||
119 | while (1) | ||
120 | asm("nap; nop"); | ||
121 | } | ||
116 | 122 | ||
117 | /* | 123 | /* |
118 | * Dispatch code called from hv_message_intr() for HV_MSG_TILE hv messages. | 124 | * Dispatch code called from hv_message_intr() for HV_MSG_TILE hv messages. |
diff --git a/arch/tile/kernel/smpboot.c b/arch/tile/kernel/smpboot.c index b949edcec200..172aef7d3159 100644 --- a/arch/tile/kernel/smpboot.c +++ b/arch/tile/kernel/smpboot.c | |||
@@ -196,6 +196,8 @@ void __cpuinit online_secondary(void) | |||
196 | /* This must be done before setting cpu_online_mask */ | 196 | /* This must be done before setting cpu_online_mask */ |
197 | wmb(); | 197 | wmb(); |
198 | 198 | ||
199 | notify_cpu_starting(smp_processor_id()); | ||
200 | |||
199 | /* | 201 | /* |
200 | * We need to hold call_lock, so there is no inconsistency | 202 | * We need to hold call_lock, so there is no inconsistency |
201 | * between the time smp_call_function() determines number of | 203 | * between the time smp_call_function() determines number of |
diff --git a/arch/tile/kernel/stack.c b/arch/tile/kernel/stack.c index 37ee4d037e0b..b2f44c28dda6 100644 --- a/arch/tile/kernel/stack.c +++ b/arch/tile/kernel/stack.c | |||
@@ -21,10 +21,12 @@ | |||
21 | #include <linux/stacktrace.h> | 21 | #include <linux/stacktrace.h> |
22 | #include <linux/uaccess.h> | 22 | #include <linux/uaccess.h> |
23 | #include <linux/mmzone.h> | 23 | #include <linux/mmzone.h> |
24 | #include <linux/dcache.h> | ||
25 | #include <linux/fs.h> | ||
24 | #include <asm/backtrace.h> | 26 | #include <asm/backtrace.h> |
25 | #include <asm/page.h> | 27 | #include <asm/page.h> |
26 | #include <asm/tlbflush.h> | ||
27 | #include <asm/ucontext.h> | 28 | #include <asm/ucontext.h> |
29 | #include <asm/switch_to.h> | ||
28 | #include <asm/sigframe.h> | 30 | #include <asm/sigframe.h> |
29 | #include <asm/stack.h> | 31 | #include <asm/stack.h> |
30 | #include <arch/abi.h> | 32 | #include <arch/abi.h> |
@@ -44,72 +46,23 @@ static int in_kernel_stack(struct KBacktraceIterator *kbt, unsigned long sp) | |||
44 | return sp >= kstack_base && sp < kstack_base + THREAD_SIZE; | 46 | return sp >= kstack_base && sp < kstack_base + THREAD_SIZE; |
45 | } | 47 | } |
46 | 48 | ||
47 | /* Is address valid for reading? */ | ||
48 | static int valid_address(struct KBacktraceIterator *kbt, unsigned long address) | ||
49 | { | ||
50 | HV_PTE *l1_pgtable = kbt->pgtable; | ||
51 | HV_PTE *l2_pgtable; | ||
52 | unsigned long pfn; | ||
53 | HV_PTE pte; | ||
54 | struct page *page; | ||
55 | |||
56 | if (l1_pgtable == NULL) | ||
57 | return 0; /* can't read user space in other tasks */ | ||
58 | |||
59 | #ifdef CONFIG_64BIT | ||
60 | /* Find the real l1_pgtable by looking in the l0_pgtable. */ | ||
61 | pte = l1_pgtable[HV_L0_INDEX(address)]; | ||
62 | if (!hv_pte_get_present(pte)) | ||
63 | return 0; | ||
64 | pfn = hv_pte_get_pfn(pte); | ||
65 | if (pte_huge(pte)) { | ||
66 | if (!pfn_valid(pfn)) { | ||
67 | pr_err("L0 huge page has bad pfn %#lx\n", pfn); | ||
68 | return 0; | ||
69 | } | ||
70 | return hv_pte_get_present(pte) && hv_pte_get_readable(pte); | ||
71 | } | ||
72 | page = pfn_to_page(pfn); | ||
73 | BUG_ON(PageHighMem(page)); /* No HIGHMEM on 64-bit. */ | ||
74 | l1_pgtable = (HV_PTE *)pfn_to_kaddr(pfn); | ||
75 | #endif | ||
76 | pte = l1_pgtable[HV_L1_INDEX(address)]; | ||
77 | if (!hv_pte_get_present(pte)) | ||
78 | return 0; | ||
79 | pfn = hv_pte_get_pfn(pte); | ||
80 | if (pte_huge(pte)) { | ||
81 | if (!pfn_valid(pfn)) { | ||
82 | pr_err("huge page has bad pfn %#lx\n", pfn); | ||
83 | return 0; | ||
84 | } | ||
85 | return hv_pte_get_present(pte) && hv_pte_get_readable(pte); | ||
86 | } | ||
87 | |||
88 | page = pfn_to_page(pfn); | ||
89 | if (PageHighMem(page)) { | ||
90 | pr_err("L2 page table not in LOWMEM (%#llx)\n", | ||
91 | HV_PFN_TO_CPA(pfn)); | ||
92 | return 0; | ||
93 | } | ||
94 | l2_pgtable = (HV_PTE *)pfn_to_kaddr(pfn); | ||
95 | pte = l2_pgtable[HV_L2_INDEX(address)]; | ||
96 | return hv_pte_get_present(pte) && hv_pte_get_readable(pte); | ||
97 | } | ||
98 | |||
99 | /* Callback for backtracer; basically a glorified memcpy */ | 49 | /* Callback for backtracer; basically a glorified memcpy */ |
100 | static bool read_memory_func(void *result, unsigned long address, | 50 | static bool read_memory_func(void *result, unsigned long address, |
101 | unsigned int size, void *vkbt) | 51 | unsigned int size, void *vkbt) |
102 | { | 52 | { |
103 | int retval; | 53 | int retval; |
104 | struct KBacktraceIterator *kbt = (struct KBacktraceIterator *)vkbt; | 54 | struct KBacktraceIterator *kbt = (struct KBacktraceIterator *)vkbt; |
55 | |||
56 | if (address == 0) | ||
57 | return 0; | ||
105 | if (__kernel_text_address(address)) { | 58 | if (__kernel_text_address(address)) { |
106 | /* OK to read kernel code. */ | 59 | /* OK to read kernel code. */ |
107 | } else if (address >= PAGE_OFFSET) { | 60 | } else if (address >= PAGE_OFFSET) { |
108 | /* We only tolerate kernel-space reads of this task's stack */ | 61 | /* We only tolerate kernel-space reads of this task's stack */ |
109 | if (!in_kernel_stack(kbt, address)) | 62 | if (!in_kernel_stack(kbt, address)) |
110 | return 0; | 63 | return 0; |
111 | } else if (!valid_address(kbt, address)) { | 64 | } else if (!kbt->is_current) { |
112 | return 0; /* invalid user-space address */ | 65 | return 0; /* can't read from other user address spaces */ |
113 | } | 66 | } |
114 | pagefault_disable(); | 67 | pagefault_disable(); |
115 | retval = __copy_from_user_inatomic(result, | 68 | retval = __copy_from_user_inatomic(result, |
@@ -127,6 +80,8 @@ static struct pt_regs *valid_fault_handler(struct KBacktraceIterator* kbt) | |||
127 | unsigned long sp = kbt->it.sp; | 80 | unsigned long sp = kbt->it.sp; |
128 | struct pt_regs *p; | 81 | struct pt_regs *p; |
129 | 82 | ||
83 | if (sp % sizeof(long) != 0) | ||
84 | return NULL; | ||
130 | if (!in_kernel_stack(kbt, sp)) | 85 | if (!in_kernel_stack(kbt, sp)) |
131 | return NULL; | 86 | return NULL; |
132 | if (!in_kernel_stack(kbt, sp + C_ABI_SAVE_AREA_SIZE + PTREGS_SIZE-1)) | 87 | if (!in_kernel_stack(kbt, sp + C_ABI_SAVE_AREA_SIZE + PTREGS_SIZE-1)) |
@@ -169,27 +124,27 @@ static int is_sigreturn(unsigned long pc) | |||
169 | } | 124 | } |
170 | 125 | ||
171 | /* Return a pt_regs pointer for a valid signal handler frame */ | 126 | /* Return a pt_regs pointer for a valid signal handler frame */ |
172 | static struct pt_regs *valid_sigframe(struct KBacktraceIterator* kbt) | 127 | static struct pt_regs *valid_sigframe(struct KBacktraceIterator* kbt, |
128 | struct rt_sigframe* kframe) | ||
173 | { | 129 | { |
174 | BacktraceIterator *b = &kbt->it; | 130 | BacktraceIterator *b = &kbt->it; |
175 | 131 | ||
176 | if (b->pc == VDSO_BASE) { | 132 | if (b->pc == VDSO_BASE && b->sp < PAGE_OFFSET && |
177 | struct rt_sigframe *frame; | 133 | b->sp % sizeof(long) == 0) { |
178 | unsigned long sigframe_top = | 134 | int retval; |
179 | b->sp + sizeof(struct rt_sigframe) - 1; | 135 | pagefault_disable(); |
180 | if (!valid_address(kbt, b->sp) || | 136 | retval = __copy_from_user_inatomic( |
181 | !valid_address(kbt, sigframe_top)) { | 137 | kframe, (void __user __force *)b->sp, |
182 | if (kbt->verbose) | 138 | sizeof(*kframe)); |
183 | pr_err(" (odd signal: sp %#lx?)\n", | 139 | pagefault_enable(); |
184 | (unsigned long)(b->sp)); | 140 | if (retval != 0 || |
141 | (unsigned int)(kframe->info.si_signo) >= _NSIG) | ||
185 | return NULL; | 142 | return NULL; |
186 | } | ||
187 | frame = (struct rt_sigframe *)b->sp; | ||
188 | if (kbt->verbose) { | 143 | if (kbt->verbose) { |
189 | pr_err(" <received signal %d>\n", | 144 | pr_err(" <received signal %d>\n", |
190 | frame->info.si_signo); | 145 | kframe->info.si_signo); |
191 | } | 146 | } |
192 | return (struct pt_regs *)&frame->uc.uc_mcontext; | 147 | return (struct pt_regs *)&kframe->uc.uc_mcontext; |
193 | } | 148 | } |
194 | return NULL; | 149 | return NULL; |
195 | } | 150 | } |
@@ -202,10 +157,11 @@ static int KBacktraceIterator_is_sigreturn(struct KBacktraceIterator *kbt) | |||
202 | static int KBacktraceIterator_restart(struct KBacktraceIterator *kbt) | 157 | static int KBacktraceIterator_restart(struct KBacktraceIterator *kbt) |
203 | { | 158 | { |
204 | struct pt_regs *p; | 159 | struct pt_regs *p; |
160 | struct rt_sigframe kframe; | ||
205 | 161 | ||
206 | p = valid_fault_handler(kbt); | 162 | p = valid_fault_handler(kbt); |
207 | if (p == NULL) | 163 | if (p == NULL) |
208 | p = valid_sigframe(kbt); | 164 | p = valid_sigframe(kbt, &kframe); |
209 | if (p == NULL) | 165 | if (p == NULL) |
210 | return 0; | 166 | return 0; |
211 | backtrace_init(&kbt->it, read_memory_func, kbt, | 167 | backtrace_init(&kbt->it, read_memory_func, kbt, |
@@ -265,41 +221,19 @@ void KBacktraceIterator_init(struct KBacktraceIterator *kbt, | |||
265 | 221 | ||
266 | /* | 222 | /* |
267 | * Set up callback information. We grab the kernel stack base | 223 | * Set up callback information. We grab the kernel stack base |
268 | * so we will allow reads of that address range, and if we're | 224 | * so we will allow reads of that address range. |
269 | * asking about the current process we grab the page table | ||
270 | * so we can check user accesses before trying to read them. | ||
271 | * We flush the TLB to avoid any weird skew issues. | ||
272 | */ | 225 | */ |
273 | is_current = (t == NULL); | 226 | is_current = (t == NULL || t == current); |
274 | kbt->is_current = is_current; | 227 | kbt->is_current = is_current; |
275 | if (is_current) | 228 | if (is_current) |
276 | t = validate_current(); | 229 | t = validate_current(); |
277 | kbt->task = t; | 230 | kbt->task = t; |
278 | kbt->pgtable = NULL; | ||
279 | kbt->verbose = 0; /* override in caller if desired */ | 231 | kbt->verbose = 0; /* override in caller if desired */ |
280 | kbt->profile = 0; /* override in caller if desired */ | 232 | kbt->profile = 0; /* override in caller if desired */ |
281 | kbt->end = KBT_ONGOING; | 233 | kbt->end = KBT_ONGOING; |
282 | kbt->new_context = 0; | 234 | kbt->new_context = 1; |
283 | if (is_current) { | 235 | if (is_current) |
284 | HV_PhysAddr pgdir_pa = hv_inquire_context().page_table; | ||
285 | if (pgdir_pa == (unsigned long)swapper_pg_dir - PAGE_OFFSET) { | ||
286 | /* | ||
287 | * Not just an optimization: this also allows | ||
288 | * this to work at all before va/pa mappings | ||
289 | * are set up. | ||
290 | */ | ||
291 | kbt->pgtable = swapper_pg_dir; | ||
292 | } else { | ||
293 | struct page *page = pfn_to_page(PFN_DOWN(pgdir_pa)); | ||
294 | if (!PageHighMem(page)) | ||
295 | kbt->pgtable = __va(pgdir_pa); | ||
296 | else | ||
297 | pr_err("page table not in LOWMEM" | ||
298 | " (%#llx)\n", pgdir_pa); | ||
299 | } | ||
300 | local_flush_tlb_all(); | ||
301 | validate_stack(regs); | 236 | validate_stack(regs); |
302 | } | ||
303 | 237 | ||
304 | if (regs == NULL) { | 238 | if (regs == NULL) { |
305 | if (is_current || t->state == TASK_RUNNING) { | 239 | if (is_current || t->state == TASK_RUNNING) { |
@@ -345,6 +279,78 @@ void KBacktraceIterator_next(struct KBacktraceIterator *kbt) | |||
345 | } | 279 | } |
346 | EXPORT_SYMBOL(KBacktraceIterator_next); | 280 | EXPORT_SYMBOL(KBacktraceIterator_next); |
347 | 281 | ||
282 | static void describe_addr(struct KBacktraceIterator *kbt, | ||
283 | unsigned long address, | ||
284 | int have_mmap_sem, char *buf, size_t bufsize) | ||
285 | { | ||
286 | struct vm_area_struct *vma; | ||
287 | size_t namelen, remaining; | ||
288 | unsigned long size, offset, adjust; | ||
289 | char *p, *modname; | ||
290 | const char *name; | ||
291 | int rc; | ||
292 | |||
293 | /* | ||
294 | * Look one byte back for every caller frame (i.e. those that | ||
295 | * aren't a new context) so we look up symbol data for the | ||
296 | * call itself, not the following instruction, which may be on | ||
297 | * a different line (or in a different function). | ||
298 | */ | ||
299 | adjust = !kbt->new_context; | ||
300 | address -= adjust; | ||
301 | |||
302 | if (address >= PAGE_OFFSET) { | ||
303 | /* Handle kernel symbols. */ | ||
304 | BUG_ON(bufsize < KSYM_NAME_LEN); | ||
305 | name = kallsyms_lookup(address, &size, &offset, | ||
306 | &modname, buf); | ||
307 | if (name == NULL) { | ||
308 | buf[0] = '\0'; | ||
309 | return; | ||
310 | } | ||
311 | namelen = strlen(buf); | ||
312 | remaining = (bufsize - 1) - namelen; | ||
313 | p = buf + namelen; | ||
314 | rc = snprintf(p, remaining, "+%#lx/%#lx ", | ||
315 | offset + adjust, size); | ||
316 | if (modname && rc < remaining) | ||
317 | snprintf(p + rc, remaining - rc, "[%s] ", modname); | ||
318 | buf[bufsize-1] = '\0'; | ||
319 | return; | ||
320 | } | ||
321 | |||
322 | /* If we don't have the mmap_sem, we can't show any more info. */ | ||
323 | buf[0] = '\0'; | ||
324 | if (!have_mmap_sem) | ||
325 | return; | ||
326 | |||
327 | /* Find vma info. */ | ||
328 | vma = find_vma(kbt->task->mm, address); | ||
329 | if (vma == NULL || address < vma->vm_start) { | ||
330 | snprintf(buf, bufsize, "[unmapped address] "); | ||
331 | return; | ||
332 | } | ||
333 | |||
334 | if (vma->vm_file) { | ||
335 | char *s; | ||
336 | p = d_path(&vma->vm_file->f_path, buf, bufsize); | ||
337 | if (IS_ERR(p)) | ||
338 | p = "?"; | ||
339 | s = strrchr(p, '/'); | ||
340 | if (s) | ||
341 | p = s+1; | ||
342 | } else { | ||
343 | p = "anon"; | ||
344 | } | ||
345 | |||
346 | /* Generate a string description of the vma info. */ | ||
347 | namelen = strlen(p); | ||
348 | remaining = (bufsize - 1) - namelen; | ||
349 | memmove(buf, p, namelen); | ||
350 | snprintf(buf + namelen, remaining, "[%lx+%lx] ", | ||
351 | vma->vm_start, vma->vm_end - vma->vm_start); | ||
352 | } | ||
353 | |||
348 | /* | 354 | /* |
349 | * This method wraps the backtracer's more generic support. | 355 | * This method wraps the backtracer's more generic support. |
350 | * It is only invoked from the architecture-specific code; show_stack() | 356 | * It is only invoked from the architecture-specific code; show_stack() |
@@ -353,6 +359,7 @@ EXPORT_SYMBOL(KBacktraceIterator_next); | |||
353 | void tile_show_stack(struct KBacktraceIterator *kbt, int headers) | 359 | void tile_show_stack(struct KBacktraceIterator *kbt, int headers) |
354 | { | 360 | { |
355 | int i; | 361 | int i; |
362 | int have_mmap_sem = 0; | ||
356 | 363 | ||
357 | if (headers) { | 364 | if (headers) { |
358 | /* | 365 | /* |
@@ -369,31 +376,16 @@ void tile_show_stack(struct KBacktraceIterator *kbt, int headers) | |||
369 | kbt->verbose = 1; | 376 | kbt->verbose = 1; |
370 | i = 0; | 377 | i = 0; |
371 | for (; !KBacktraceIterator_end(kbt); KBacktraceIterator_next(kbt)) { | 378 | for (; !KBacktraceIterator_end(kbt); KBacktraceIterator_next(kbt)) { |
372 | char *modname; | ||
373 | const char *name; | ||
374 | unsigned long address = kbt->it.pc; | ||
375 | unsigned long offset, size; | ||
376 | char namebuf[KSYM_NAME_LEN+100]; | 379 | char namebuf[KSYM_NAME_LEN+100]; |
380 | unsigned long address = kbt->it.pc; | ||
377 | 381 | ||
378 | if (address >= PAGE_OFFSET) | 382 | /* Try to acquire the mmap_sem as we pass into userspace. */ |
379 | name = kallsyms_lookup(address, &size, &offset, | 383 | if (address < PAGE_OFFSET && !have_mmap_sem && kbt->task->mm) |
380 | &modname, namebuf); | 384 | have_mmap_sem = |
381 | else | 385 | down_read_trylock(&kbt->task->mm->mmap_sem); |
382 | name = NULL; | 386 | |
383 | 387 | describe_addr(kbt, address, have_mmap_sem, | |
384 | if (!name) | 388 | namebuf, sizeof(namebuf)); |
385 | namebuf[0] = '\0'; | ||
386 | else { | ||
387 | size_t namelen = strlen(namebuf); | ||
388 | size_t remaining = (sizeof(namebuf) - 1) - namelen; | ||
389 | char *p = namebuf + namelen; | ||
390 | int rc = snprintf(p, remaining, "+%#lx/%#lx ", | ||
391 | offset, size); | ||
392 | if (modname && rc < remaining) | ||
393 | snprintf(p + rc, remaining - rc, | ||
394 | "[%s] ", modname); | ||
395 | namebuf[sizeof(namebuf)-1] = '\0'; | ||
396 | } | ||
397 | 389 | ||
398 | pr_err(" frame %d: 0x%lx %s(sp 0x%lx)\n", | 390 | pr_err(" frame %d: 0x%lx %s(sp 0x%lx)\n", |
399 | i++, address, namebuf, (unsigned long)(kbt->it.sp)); | 391 | i++, address, namebuf, (unsigned long)(kbt->it.sp)); |
@@ -408,6 +400,8 @@ void tile_show_stack(struct KBacktraceIterator *kbt, int headers) | |||
408 | pr_err("Stack dump stopped; next frame identical to this one\n"); | 400 | pr_err("Stack dump stopped; next frame identical to this one\n"); |
409 | if (headers) | 401 | if (headers) |
410 | pr_err("Stack dump complete\n"); | 402 | pr_err("Stack dump complete\n"); |
403 | if (have_mmap_sem) | ||
404 | up_read(&kbt->task->mm->mmap_sem); | ||
411 | } | 405 | } |
412 | EXPORT_SYMBOL(tile_show_stack); | 406 | EXPORT_SYMBOL(tile_show_stack); |
413 | 407 | ||
diff --git a/arch/tile/kernel/traps.c b/arch/tile/kernel/traps.c index 2bb6602a1ee7..73cff814ac57 100644 --- a/arch/tile/kernel/traps.c +++ b/arch/tile/kernel/traps.c | |||
@@ -200,7 +200,7 @@ void __kprobes do_trap(struct pt_regs *regs, int fault_num, | |||
200 | { | 200 | { |
201 | siginfo_t info = { 0 }; | 201 | siginfo_t info = { 0 }; |
202 | int signo, code; | 202 | int signo, code; |
203 | unsigned long address; | 203 | unsigned long address = 0; |
204 | bundle_bits instr; | 204 | bundle_bits instr; |
205 | 205 | ||
206 | /* Re-enable interrupts. */ | 206 | /* Re-enable interrupts. */ |
@@ -223,6 +223,10 @@ void __kprobes do_trap(struct pt_regs *regs, int fault_num, | |||
223 | } | 223 | } |
224 | 224 | ||
225 | switch (fault_num) { | 225 | switch (fault_num) { |
226 | case INT_MEM_ERROR: | ||
227 | signo = SIGBUS; | ||
228 | code = BUS_OBJERR; | ||
229 | break; | ||
226 | case INT_ILL: | 230 | case INT_ILL: |
227 | if (copy_from_user(&instr, (void __user *)regs->pc, | 231 | if (copy_from_user(&instr, (void __user *)regs->pc, |
228 | sizeof(instr))) { | 232 | sizeof(instr))) { |
@@ -289,7 +293,10 @@ void __kprobes do_trap(struct pt_regs *regs, int fault_num, | |||
289 | address = regs->pc; | 293 | address = regs->pc; |
290 | break; | 294 | break; |
291 | #ifdef __tilegx__ | 295 | #ifdef __tilegx__ |
292 | case INT_ILL_TRANS: | 296 | case INT_ILL_TRANS: { |
297 | /* Avoid a hardware erratum with the return address stack. */ | ||
298 | fill_ra_stack(); | ||
299 | |||
293 | signo = SIGSEGV; | 300 | signo = SIGSEGV; |
294 | code = SEGV_MAPERR; | 301 | code = SEGV_MAPERR; |
295 | if (reason & SPR_ILL_TRANS_REASON__I_STREAM_VA_RMASK) | 302 | if (reason & SPR_ILL_TRANS_REASON__I_STREAM_VA_RMASK) |
@@ -297,6 +304,7 @@ void __kprobes do_trap(struct pt_regs *regs, int fault_num, | |||
297 | else | 304 | else |
298 | address = 0; /* FIXME: GX: single-step for address */ | 305 | address = 0; /* FIXME: GX: single-step for address */ |
299 | break; | 306 | break; |
307 | } | ||
300 | #endif | 308 | #endif |
301 | default: | 309 | default: |
302 | panic("Unexpected do_trap interrupt number %d", fault_num); | 310 | panic("Unexpected do_trap interrupt number %d", fault_num); |
@@ -308,7 +316,8 @@ void __kprobes do_trap(struct pt_regs *regs, int fault_num, | |||
308 | info.si_addr = (void __user *)address; | 316 | info.si_addr = (void __user *)address; |
309 | if (signo == SIGILL) | 317 | if (signo == SIGILL) |
310 | info.si_trapno = fault_num; | 318 | info.si_trapno = fault_num; |
311 | trace_unhandled_signal("trap", regs, address, signo); | 319 | if (signo != SIGTRAP) |
320 | trace_unhandled_signal("trap", regs, address, signo); | ||
312 | force_sig_info(signo, &info, current); | 321 | force_sig_info(signo, &info, current); |
313 | } | 322 | } |
314 | 323 | ||