aboutsummaryrefslogtreecommitdiffstats
path: root/arch/m32r/kernel
diff options
context:
space:
mode:
authorArnd Bergmann <arnd@arndb.de>2018-03-07 15:36:19 -0500
committerArnd Bergmann <arnd@arndb.de>2018-03-09 17:20:00 -0500
commit553b085c2075f6a4a2591108554f830fa61e881f (patch)
tree68d63911f2c12e0fb9fa23498df9300442a88f92 /arch/m32r/kernel
parentfd8773f9f544955f6f47dc2ac3ab85ad64376b7f (diff)
arch: remove m32r port
The Mitsubishi/Renesas m32r architecture has been around for many years, but the Linux port has been obsolete for a very long time as well, with the last significant updates done for linux-2.6.14. While some m32r microcontrollers are still being marketed by Renesas, those are apparently no longer possible to support, mainly due to the lack of an external memory interface. Hirokazu Takata was the maintainer until the architecture got marked Orphaned in 2014. Link: http://www.linux-m32r.org/ Link: https://www.renesas.com/en-eu/products/microcontrollers-microprocessors/m32r.html Cc: Hirokazu Takata <takata@linux-m32r.org> Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Diffstat (limited to 'arch/m32r/kernel')
-rw-r--r--arch/m32r/kernel/.gitignore1
-rw-r--r--arch/m32r/kernel/Makefile12
-rw-r--r--arch/m32r/kernel/align.c585
-rw-r--r--arch/m32r/kernel/asm-offsets.c15
-rw-r--r--arch/m32r/kernel/entry.S553
-rw-r--r--arch/m32r/kernel/head.S284
-rw-r--r--arch/m32r/kernel/irq.c44
-rw-r--r--arch/m32r/kernel/m32r_ksyms.c89
-rw-r--r--arch/m32r/kernel/module.c203
-rw-r--r--arch/m32r/kernel/process.c154
-rw-r--r--arch/m32r/kernel/ptrace.c708
-rw-r--r--arch/m32r/kernel/setup.c424
-rw-r--r--arch/m32r/kernel/signal.c336
-rw-r--r--arch/m32r/kernel/smp.c836
-rw-r--r--arch/m32r/kernel/smpboot.c627
-rw-r--r--arch/m32r/kernel/sys_m32r.c91
-rw-r--r--arch/m32r/kernel/syscall_table.S328
-rw-r--r--arch/m32r/kernel/time.c199
-rw-r--r--arch/m32r/kernel/traps.c324
-rw-r--r--arch/m32r/kernel/vmlinux.lds.S79
20 files changed, 0 insertions, 5892 deletions
diff --git a/arch/m32r/kernel/.gitignore b/arch/m32r/kernel/.gitignore
deleted file mode 100644
index c5f676c3c224..000000000000
--- a/arch/m32r/kernel/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
1vmlinux.lds
diff --git a/arch/m32r/kernel/Makefile b/arch/m32r/kernel/Makefile
deleted file mode 100644
index bd94dca51596..000000000000
--- a/arch/m32r/kernel/Makefile
+++ /dev/null
@@ -1,12 +0,0 @@
1# SPDX-License-Identifier: GPL-2.0
2#
3# Makefile for the Linux/M32R kernel.
4#
5
6extra-y := head.o vmlinux.lds
7
8obj-y := process.o entry.o traps.o align.o irq.o setup.o time.o \
9 m32r_ksyms.o sys_m32r.o signal.o ptrace.o
10
11obj-$(CONFIG_SMP) += smp.o smpboot.o
12obj-$(CONFIG_MODULES) += module.o
diff --git a/arch/m32r/kernel/align.c b/arch/m32r/kernel/align.c
deleted file mode 100644
index 2919a6647aff..000000000000
--- a/arch/m32r/kernel/align.c
+++ /dev/null
@@ -1,585 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * align.c - address exception handler for M32R
4 *
5 * Copyright (c) 2003 Hitoshi Yamamoto
6 */
7
8#include <asm/ptrace.h>
9#include <linux/uaccess.h>
10
11static int get_reg(struct pt_regs *regs, int nr)
12{
13 int val;
14
15 if (nr < 4)
16 val = *(unsigned long *)(&regs->r0 + nr);
17 else if (nr < 7)
18 val = *(unsigned long *)(&regs->r4 + (nr - 4));
19 else if (nr < 13)
20 val = *(unsigned long *)(&regs->r7 + (nr - 7));
21 else
22 val = *(unsigned long *)(&regs->fp + (nr - 13));
23
24 return val;
25}
26
27static void set_reg(struct pt_regs *regs, int nr, int val)
28{
29 if (nr < 4)
30 *(unsigned long *)(&regs->r0 + nr) = val;
31 else if (nr < 7)
32 *(unsigned long *)(&regs->r4 + (nr - 4)) = val;
33 else if (nr < 13)
34 *(unsigned long *)(&regs->r7 + (nr - 7)) = val;
35 else
36 *(unsigned long *)(&regs->fp + (nr - 13)) = val;
37}
38
39#define REG1(insn) (((insn) & 0x0f00) >> 8)
40#define REG2(insn) ((insn) & 0x000f)
41#define PSW_BC 0x100
42
43/* O- instruction */
44#define ISA_LD1 0x20c0 /* ld Rdest, @Rsrc */
45#define ISA_LD2 0x20e0 /* ld Rdest, @Rsrc+ */
46#define ISA_LDH 0x20a0 /* ldh Rdest, @Rsrc */
47#define ISA_LDUH 0x20b0 /* lduh Rdest, @Rsrc */
48#define ISA_ST1 0x2040 /* st Rsrc1, @Rsrc2 */
49#define ISA_ST2 0x2060 /* st Rsrc1, @+Rsrc2 */
50#define ISA_ST3 0x2070 /* st Rsrc1, @-Rsrc2 */
51#define ISA_STH1 0x2020 /* sth Rsrc1, @Rsrc2 */
52#define ISA_STH2 0x2030 /* sth Rsrc1, @Rsrc2+ */
53
54#ifdef CONFIG_ISA_DUAL_ISSUE
55
56/* OS instruction */
57#define ISA_ADD 0x00a0 /* add Rdest, Rsrc */
58#define ISA_ADDI 0x4000 /* addi Rdest, #imm8 */
59#define ISA_ADDX 0x0090 /* addx Rdest, Rsrc */
60#define ISA_AND 0x00c0 /* and Rdest, Rsrc */
61#define ISA_CMP 0x0040 /* cmp Rsrc1, Rsrc2 */
62#define ISA_CMPEQ 0x0060 /* cmpeq Rsrc1, Rsrc2 */
63#define ISA_CMPU 0x0050 /* cmpu Rsrc1, Rsrc2 */
64#define ISA_CMPZ 0x0070 /* cmpz Rsrc */
65#define ISA_LDI 0x6000 /* ldi Rdest, #imm8 */
66#define ISA_MV 0x1080 /* mv Rdest, Rsrc */
67#define ISA_NEG 0x0030 /* neg Rdest, Rsrc */
68#define ISA_NOP 0x7000 /* nop */
69#define ISA_NOT 0x00b0 /* not Rdest, Rsrc */
70#define ISA_OR 0x00e0 /* or Rdest, Rsrc */
71#define ISA_SUB 0x0020 /* sub Rdest, Rsrc */
72#define ISA_SUBX 0x0010 /* subx Rdest, Rsrc */
73#define ISA_XOR 0x00d0 /* xor Rdest, Rsrc */
74
75/* -S instruction */
76#define ISA_MUL 0x1060 /* mul Rdest, Rsrc */
77#define ISA_MULLO_A0 0x3010 /* mullo Rsrc1, Rsrc2, A0 */
78#define ISA_MULLO_A1 0x3090 /* mullo Rsrc1, Rsrc2, A1 */
79#define ISA_MVFACMI_A0 0x50f2 /* mvfacmi Rdest, A0 */
80#define ISA_MVFACMI_A1 0x50f6 /* mvfacmi Rdest, A1 */
81
82static int emu_addi(unsigned short insn, struct pt_regs *regs)
83{
84 char imm = (char)(insn & 0xff);
85 int dest = REG1(insn);
86 int val;
87
88 val = get_reg(regs, dest);
89 val += imm;
90 set_reg(regs, dest, val);
91
92 return 0;
93}
94
95static int emu_ldi(unsigned short insn, struct pt_regs *regs)
96{
97 char imm = (char)(insn & 0xff);
98
99 set_reg(regs, REG1(insn), (int)imm);
100
101 return 0;
102}
103
104static int emu_add(unsigned short insn, struct pt_regs *regs)
105{
106 int dest = REG1(insn);
107 int src = REG2(insn);
108 int val;
109
110 val = get_reg(regs, dest);
111 val += get_reg(regs, src);
112 set_reg(regs, dest, val);
113
114 return 0;
115}
116
117static int emu_addx(unsigned short insn, struct pt_regs *regs)
118{
119 int dest = REG1(insn);
120 unsigned int val, tmp;
121
122 val = regs->psw & PSW_BC ? 1 : 0;
123 tmp = get_reg(regs, dest);
124 val += tmp;
125 val += (unsigned int)get_reg(regs, REG2(insn));
126 set_reg(regs, dest, val);
127
128 /* C bit set */
129 if (val < tmp)
130 regs->psw |= PSW_BC;
131 else
132 regs->psw &= ~(PSW_BC);
133
134 return 0;
135}
136
137static int emu_and(unsigned short insn, struct pt_regs *regs)
138{
139 int dest = REG1(insn);
140 int val;
141
142 val = get_reg(regs, dest);
143 val &= get_reg(regs, REG2(insn));
144 set_reg(regs, dest, val);
145
146 return 0;
147}
148
149static int emu_cmp(unsigned short insn, struct pt_regs *regs)
150{
151 if (get_reg(regs, REG1(insn)) < get_reg(regs, REG2(insn)))
152 regs->psw |= PSW_BC;
153 else
154 regs->psw &= ~(PSW_BC);
155
156 return 0;
157}
158
159static int emu_cmpeq(unsigned short insn, struct pt_regs *regs)
160{
161 if (get_reg(regs, REG1(insn)) == get_reg(regs, REG2(insn)))
162 regs->psw |= PSW_BC;
163 else
164 regs->psw &= ~(PSW_BC);
165
166 return 0;
167}
168
169static int emu_cmpu(unsigned short insn, struct pt_regs *regs)
170{
171 if ((unsigned int)get_reg(regs, REG1(insn))
172 < (unsigned int)get_reg(regs, REG2(insn)))
173 regs->psw |= PSW_BC;
174 else
175 regs->psw &= ~(PSW_BC);
176
177 return 0;
178}
179
180static int emu_cmpz(unsigned short insn, struct pt_regs *regs)
181{
182 if (!get_reg(regs, REG2(insn)))
183 regs->psw |= PSW_BC;
184 else
185 regs->psw &= ~(PSW_BC);
186
187 return 0;
188}
189
190static int emu_mv(unsigned short insn, struct pt_regs *regs)
191{
192 int val;
193
194 val = get_reg(regs, REG2(insn));
195 set_reg(regs, REG1(insn), val);
196
197 return 0;
198}
199
200static int emu_neg(unsigned short insn, struct pt_regs *regs)
201{
202 int val;
203
204 val = get_reg(regs, REG2(insn));
205 set_reg(regs, REG1(insn), 0 - val);
206
207 return 0;
208}
209
210static int emu_not(unsigned short insn, struct pt_regs *regs)
211{
212 int val;
213
214 val = get_reg(regs, REG2(insn));
215 set_reg(regs, REG1(insn), ~val);
216
217 return 0;
218}
219
220static int emu_or(unsigned short insn, struct pt_regs *regs)
221{
222 int dest = REG1(insn);
223 int val;
224
225 val = get_reg(regs, dest);
226 val |= get_reg(regs, REG2(insn));
227 set_reg(regs, dest, val);
228
229 return 0;
230}
231
232static int emu_sub(unsigned short insn, struct pt_regs *regs)
233{
234 int dest = REG1(insn);
235 int val;
236
237 val = get_reg(regs, dest);
238 val -= get_reg(regs, REG2(insn));
239 set_reg(regs, dest, val);
240
241 return 0;
242}
243
244static int emu_subx(unsigned short insn, struct pt_regs *regs)
245{
246 int dest = REG1(insn);
247 unsigned int val, tmp;
248
249 val = tmp = get_reg(regs, dest);
250 val -= (unsigned int)get_reg(regs, REG2(insn));
251 val -= regs->psw & PSW_BC ? 1 : 0;
252 set_reg(regs, dest, val);
253
254 /* C bit set */
255 if (val > tmp)
256 regs->psw |= PSW_BC;
257 else
258 regs->psw &= ~(PSW_BC);
259
260 return 0;
261}
262
263static int emu_xor(unsigned short insn, struct pt_regs *regs)
264{
265 int dest = REG1(insn);
266 unsigned int val;
267
268 val = (unsigned int)get_reg(regs, dest);
269 val ^= (unsigned int)get_reg(regs, REG2(insn));
270 set_reg(regs, dest, val);
271
272 return 0;
273}
274
275static int emu_mul(unsigned short insn, struct pt_regs *regs)
276{
277 int dest = REG1(insn);
278 int reg1, reg2;
279
280 reg1 = get_reg(regs, dest);
281 reg2 = get_reg(regs, REG2(insn));
282
283 __asm__ __volatile__ (
284 "mul %0, %1; \n\t"
285 : "+r" (reg1) : "r" (reg2)
286 );
287
288 set_reg(regs, dest, reg1);
289
290 return 0;
291}
292
293static int emu_mullo_a0(unsigned short insn, struct pt_regs *regs)
294{
295 int reg1, reg2;
296
297 reg1 = get_reg(regs, REG1(insn));
298 reg2 = get_reg(regs, REG2(insn));
299
300 __asm__ __volatile__ (
301 "mullo %0, %1, a0; \n\t"
302 "mvfachi %0, a0; \n\t"
303 "mvfaclo %1, a0; \n\t"
304 : "+r" (reg1), "+r" (reg2)
305 );
306
307 regs->acc0h = reg1;
308 regs->acc0l = reg2;
309
310 return 0;
311}
312
313static int emu_mullo_a1(unsigned short insn, struct pt_regs *regs)
314{
315 int reg1, reg2;
316
317 reg1 = get_reg(regs, REG1(insn));
318 reg2 = get_reg(regs, REG2(insn));
319
320 __asm__ __volatile__ (
321 "mullo %0, %1, a0; \n\t"
322 "mvfachi %0, a0; \n\t"
323 "mvfaclo %1, a0; \n\t"
324 : "+r" (reg1), "+r" (reg2)
325 );
326
327 regs->acc1h = reg1;
328 regs->acc1l = reg2;
329
330 return 0;
331}
332
333static int emu_mvfacmi_a0(unsigned short insn, struct pt_regs *regs)
334{
335 unsigned long val;
336
337 val = (regs->acc0h << 16) | (regs->acc0l >> 16);
338 set_reg(regs, REG1(insn), (int)val);
339
340 return 0;
341}
342
343static int emu_mvfacmi_a1(unsigned short insn, struct pt_regs *regs)
344{
345 unsigned long val;
346
347 val = (regs->acc1h << 16) | (regs->acc1l >> 16);
348 set_reg(regs, REG1(insn), (int)val);
349
350 return 0;
351}
352
353static int emu_m32r2(unsigned short insn, struct pt_regs *regs)
354{
355 int res = -1;
356
357 if ((insn & 0x7fff) == ISA_NOP) /* nop */
358 return 0;
359
360 switch(insn & 0x7000) {
361 case ISA_ADDI: /* addi Rdest, #imm8 */
362 res = emu_addi(insn, regs);
363 break;
364 case ISA_LDI: /* ldi Rdest, #imm8 */
365 res = emu_ldi(insn, regs);
366 break;
367 default:
368 break;
369 }
370
371 if (!res)
372 return 0;
373
374 switch(insn & 0x70f0) {
375 case ISA_ADD: /* add Rdest, Rsrc */
376 res = emu_add(insn, regs);
377 break;
378 case ISA_ADDX: /* addx Rdest, Rsrc */
379 res = emu_addx(insn, regs);
380 break;
381 case ISA_AND: /* and Rdest, Rsrc */
382 res = emu_and(insn, regs);
383 break;
384 case ISA_CMP: /* cmp Rsrc1, Rsrc2 */
385 res = emu_cmp(insn, regs);
386 break;
387 case ISA_CMPEQ: /* cmpeq Rsrc1, Rsrc2 */
388 res = emu_cmpeq(insn, regs);
389 break;
390 case ISA_CMPU: /* cmpu Rsrc1, Rsrc2 */
391 res = emu_cmpu(insn, regs);
392 break;
393 case ISA_CMPZ: /* cmpz Rsrc */
394 res = emu_cmpz(insn, regs);
395 break;
396 case ISA_MV: /* mv Rdest, Rsrc */
397 res = emu_mv(insn, regs);
398 break;
399 case ISA_NEG: /* neg Rdest, Rsrc */
400 res = emu_neg(insn, regs);
401 break;
402 case ISA_NOT: /* not Rdest, Rsrc */
403 res = emu_not(insn, regs);
404 break;
405 case ISA_OR: /* or Rdest, Rsrc */
406 res = emu_or(insn, regs);
407 break;
408 case ISA_SUB: /* sub Rdest, Rsrc */
409 res = emu_sub(insn, regs);
410 break;
411 case ISA_SUBX: /* subx Rdest, Rsrc */
412 res = emu_subx(insn, regs);
413 break;
414 case ISA_XOR: /* xor Rdest, Rsrc */
415 res = emu_xor(insn, regs);
416 break;
417 case ISA_MUL: /* mul Rdest, Rsrc */
418 res = emu_mul(insn, regs);
419 break;
420 case ISA_MULLO_A0: /* mullo Rsrc1, Rsrc2 */
421 res = emu_mullo_a0(insn, regs);
422 break;
423 case ISA_MULLO_A1: /* mullo Rsrc1, Rsrc2 */
424 res = emu_mullo_a1(insn, regs);
425 break;
426 default:
427 break;
428 }
429
430 if (!res)
431 return 0;
432
433 switch(insn & 0x70ff) {
434 case ISA_MVFACMI_A0: /* mvfacmi Rdest */
435 res = emu_mvfacmi_a0(insn, regs);
436 break;
437 case ISA_MVFACMI_A1: /* mvfacmi Rdest */
438 res = emu_mvfacmi_a1(insn, regs);
439 break;
440 default:
441 break;
442 }
443
444 return res;
445}
446
447#endif /* CONFIG_ISA_DUAL_ISSUE */
448
449/*
450 * ld : ?010 dest 1100 src
451 * 0010 dest 1110 src : ld Rdest, @Rsrc+
452 * ldh : ?010 dest 1010 src
453 * lduh : ?010 dest 1011 src
454 * st : ?010 src1 0100 src2
455 * 0010 src1 0110 src2 : st Rsrc1, @+Rsrc2
456 * 0010 src1 0111 src2 : st Rsrc1, @-Rsrc2
457 * sth : ?010 src1 0010 src2
458 */
459
460static int insn_check(unsigned long insn, struct pt_regs *regs,
461 unsigned char **ucp)
462{
463 int res = 0;
464
465 /*
466 * 32bit insn
467 * ld Rdest, @(disp16, Rsrc)
468 * st Rdest, @(disp16, Rsrc)
469 */
470 if (insn & 0x80000000) { /* 32bit insn */
471 *ucp += (short)(insn & 0x0000ffff);
472 regs->bpc += 4;
473 } else { /* 16bit insn */
474#ifdef CONFIG_ISA_DUAL_ISSUE
475 /* parallel exec check */
476 if (!(regs->bpc & 0x2) && insn & 0x8000) {
477 res = emu_m32r2((unsigned short)insn, regs);
478 regs->bpc += 4;
479 } else
480#endif /* CONFIG_ISA_DUAL_ISSUE */
481 regs->bpc += 2;
482 }
483
484 return res;
485}
486
487static int emu_ld(unsigned long insn32, struct pt_regs *regs)
488{
489 unsigned char *ucp;
490 unsigned long val;
491 unsigned short insn16;
492 int size, src;
493
494 insn16 = insn32 >> 16;
495 src = REG2(insn16);
496 ucp = (unsigned char *)get_reg(regs, src);
497
498 if (insn_check(insn32, regs, &ucp))
499 return -1;
500
501 size = insn16 & 0x0040 ? 4 : 2;
502 if (copy_from_user(&val, ucp, size))
503 return -1;
504
505 if (size == 2)
506 val >>= 16;
507
508 /* ldh sign check */
509 if ((insn16 & 0x00f0) == 0x00a0 && (val & 0x8000))
510 val |= 0xffff0000;
511
512 set_reg(regs, REG1(insn16), val);
513
514 /* ld increment check */
515 if ((insn16 & 0xf0f0) == ISA_LD2) /* ld Rdest, @Rsrc+ */
516 set_reg(regs, src, (unsigned long)(ucp + 4));
517
518 return 0;
519}
520
521static int emu_st(unsigned long insn32, struct pt_regs *regs)
522{
523 unsigned char *ucp;
524 unsigned long val;
525 unsigned short insn16;
526 int size, src2;
527
528 insn16 = insn32 >> 16;
529 src2 = REG2(insn16);
530
531 ucp = (unsigned char *)get_reg(regs, src2);
532
533 if (insn_check(insn32, regs, &ucp))
534 return -1;
535
536 size = insn16 & 0x0040 ? 4 : 2;
537 val = get_reg(regs, REG1(insn16));
538 if (size == 2)
539 val <<= 16;
540
541 /* st inc/dec check */
542 if ((insn16 & 0xf0e0) == 0x2060) {
543 if (insn16 & 0x0010)
544 ucp -= 4;
545 else
546 ucp += 4;
547
548 set_reg(regs, src2, (unsigned long)ucp);
549 }
550
551 if (copy_to_user(ucp, &val, size))
552 return -1;
553
554 /* sth inc check */
555 if ((insn16 & 0xf0f0) == ISA_STH2) {
556 ucp += 2;
557 set_reg(regs, src2, (unsigned long)ucp);
558 }
559
560 return 0;
561}
562
563int handle_unaligned_access(unsigned long insn32, struct pt_regs *regs)
564{
565 unsigned short insn16;
566 int res;
567
568 insn16 = insn32 >> 16;
569
570 /* ld or st check */
571 if ((insn16 & 0x7000) != 0x2000)
572 return -1;
573
574 /* insn alignment check */
575 if ((insn16 & 0x8000) && (regs->bpc & 3))
576 return -1;
577
578 if (insn16 & 0x0080) /* ld */
579 res = emu_ld(insn32, regs);
580 else /* st */
581 res = emu_st(insn32, regs);
582
583 return res;
584}
585
diff --git a/arch/m32r/kernel/asm-offsets.c b/arch/m32r/kernel/asm-offsets.c
deleted file mode 100644
index 7cb90b459e07..000000000000
--- a/arch/m32r/kernel/asm-offsets.c
+++ /dev/null
@@ -1,15 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/thread_info.h>
3#include <linux/kbuild.h>
4
5int foo(void)
6{
7 OFFSET(TI_TASK, thread_info, task);
8 OFFSET(TI_FLAGS, thread_info, flags);
9 OFFSET(TI_STATUS, thread_info, status);
10 OFFSET(TI_CPU, thread_info, cpu);
11 OFFSET(TI_PRE_COUNT, thread_info, preempt_count);
12 OFFSET(TI_ADDR_LIMIT, thread_info, addr_limit);
13
14 return 0;
15}
diff --git a/arch/m32r/kernel/entry.S b/arch/m32r/kernel/entry.S
deleted file mode 100644
index bbf48f2aa2a7..000000000000
--- a/arch/m32r/kernel/entry.S
+++ /dev/null
@@ -1,553 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * linux/arch/m32r/kernel/entry.S
4 *
5 * Copyright (c) 2001, 2002 Hirokazu Takata, Hitoshi Yamamoto, H. Kondo
6 * Copyright (c) 2003 Hitoshi Yamamoto
7 * Copyright (c) 2004 Hirokazu Takata <takata at linux-m32r.org>
8 *
9 * Taken from i386 version.
10 * Copyright (C) 1991, 1992 Linus Torvalds
11 */
12
13/*
14 * entry.S contains the system-call and fault low-level handling routines.
15 * This also contains the timer-interrupt handler, as well as all interrupts
16 * and faults that can result in a task-switch.
17 *
18 * NOTE: This code handles signal-recognition, which happens every time
19 * after a timer-interrupt and after each system call.
20 *
21 * Stack layout in 'ret_from_system_call':
22 * ptrace needs to have all regs on the stack.
23 * if the order here is changed, it needs to be
24 * updated in fork.c:copy_thread, signal.c:do_signal,
25 * ptrace.c and ptrace.h
26 *
27 * M32R/M32Rx/M32R2
28 * @(sp) - r4
29 * @(0x04,sp) - r5
30 * @(0x08,sp) - r6
31 * @(0x0c,sp) - *pt_regs
32 * @(0x10,sp) - r0
33 * @(0x14,sp) - r1
34 * @(0x18,sp) - r2
35 * @(0x1c,sp) - r3
36 * @(0x20,sp) - r7
37 * @(0x24,sp) - r8
38 * @(0x28,sp) - r9
39 * @(0x2c,sp) - r10
40 * @(0x30,sp) - r11
41 * @(0x34,sp) - r12
42 * @(0x38,sp) - syscall_nr
43 * @(0x3c,sp) - acc0h
44 * @(0x40,sp) - acc0l
45 * @(0x44,sp) - acc1h ; ISA_DSP_LEVEL2 only
46 * @(0x48,sp) - acc1l ; ISA_DSP_LEVEL2 only
47 * @(0x4c,sp) - psw
48 * @(0x50,sp) - bpc
49 * @(0x54,sp) - bbpsw
50 * @(0x58,sp) - bbpc
51 * @(0x5c,sp) - spu (cr3)
52 * @(0x60,sp) - fp (r13)
53 * @(0x64,sp) - lr (r14)
54 * @(0x68,sp) - spi (cr2)
55 * @(0x6c,sp) - orig_r0
56 */
57
58#include <linux/linkage.h>
59#include <asm/irq.h>
60#include <asm/unistd.h>
61#include <asm/assembler.h>
62#include <asm/thread_info.h>
63#include <asm/errno.h>
64#include <asm/segment.h>
65#include <asm/smp.h>
66#include <asm/page.h>
67#include <asm/m32r.h>
68#include <asm/mmu_context.h>
69#include <asm/asm-offsets.h>
70
71#if !defined(CONFIG_MMU)
72#define sys_madvise sys_ni_syscall
73#define sys_readahead sys_ni_syscall
74#define sys_mprotect sys_ni_syscall
75#define sys_msync sys_ni_syscall
76#define sys_mlock sys_ni_syscall
77#define sys_munlock sys_ni_syscall
78#define sys_mlockall sys_ni_syscall
79#define sys_munlockall sys_ni_syscall
80#define sys_mremap sys_ni_syscall
81#define sys_mincore sys_ni_syscall
82#define sys_remap_file_pages sys_ni_syscall
83#endif /* CONFIG_MMU */
84
85#define R4(reg) @reg
86#define R5(reg) @(0x04,reg)
87#define R6(reg) @(0x08,reg)
88#define PTREGS(reg) @(0x0C,reg)
89#define R0(reg) @(0x10,reg)
90#define R1(reg) @(0x14,reg)
91#define R2(reg) @(0x18,reg)
92#define R3(reg) @(0x1C,reg)
93#define R7(reg) @(0x20,reg)
94#define R8(reg) @(0x24,reg)
95#define R9(reg) @(0x28,reg)
96#define R10(reg) @(0x2C,reg)
97#define R11(reg) @(0x30,reg)
98#define R12(reg) @(0x34,reg)
99#define SYSCALL_NR(reg) @(0x38,reg)
100#define ACC0H(reg) @(0x3C,reg)
101#define ACC0L(reg) @(0x40,reg)
102#define ACC1H(reg) @(0x44,reg)
103#define ACC1L(reg) @(0x48,reg)
104#define PSW(reg) @(0x4C,reg)
105#define BPC(reg) @(0x50,reg)
106#define BBPSW(reg) @(0x54,reg)
107#define BBPC(reg) @(0x58,reg)
108#define SPU(reg) @(0x5C,reg)
109#define FP(reg) @(0x60,reg) /* FP = R13 */
110#define LR(reg) @(0x64,reg)
111#define SP(reg) @(0x68,reg)
112#define ORIG_R0(reg) @(0x6C,reg)
113
114#define nr_syscalls ((syscall_table_size)/4)
115
116#ifdef CONFIG_PREEMPT
117#define preempt_stop(x) DISABLE_INTERRUPTS(x)
118#else
119#define preempt_stop(x)
120#define resume_kernel restore_all
121#endif
122
123/* how to get the thread information struct from ASM */
124#define GET_THREAD_INFO(reg) GET_THREAD_INFO reg
125 .macro GET_THREAD_INFO reg
126 ldi \reg, #-THREAD_SIZE
127 and \reg, sp
128 .endm
129
130ENTRY(ret_from_kernel_thread)
131 pop r0
132 bl schedule_tail
133 GET_THREAD_INFO(r8)
134 ld r0, R0(r8)
135 ld r1, R1(r8)
136 jl r1
137 bra syscall_exit
138
139ENTRY(ret_from_fork)
140 pop r0
141 bl schedule_tail
142 GET_THREAD_INFO(r8)
143 bra syscall_exit
144
145/*
146 * Return to user mode is not as complex as all this looks,
147 * but we want the default path for a system call return to
148 * go as quickly as possible which is why some of this is
149 * less clear than it otherwise should be.
150 */
151
152 ; userspace resumption stub bypassing syscall exit tracing
153 ALIGN
154ret_from_exception:
155 preempt_stop(r4)
156ret_from_intr:
157 ld r4, PSW(sp)
158#ifdef CONFIG_ISA_M32R2
159 and3 r4, r4, #0x8800 ; check BSM and BPM bits
160#else
161 and3 r4, r4, #0x8000 ; check BSM bit
162#endif
163 beqz r4, resume_kernel
164resume_userspace:
165 DISABLE_INTERRUPTS(r4) ; make sure we don't miss an interrupt
166 ; setting need_resched or sigpending
167 ; between sampling and the iret
168 GET_THREAD_INFO(r8)
169 ld r9, @(TI_FLAGS, r8)
170 and3 r4, r9, #_TIF_WORK_MASK ; is there any work to be done on
171 ; int/exception return?
172 bnez r4, work_pending
173 bra restore_all
174
175#ifdef CONFIG_PREEMPT
176ENTRY(resume_kernel)
177 GET_THREAD_INFO(r8)
178 ld r9, @(TI_PRE_COUNT, r8) ; non-zero preempt_count ?
179 bnez r9, restore_all
180need_resched:
181 ld r9, @(TI_FLAGS, r8) ; need_resched set ?
182 and3 r4, r9, #_TIF_NEED_RESCHED
183 beqz r4, restore_all
184 ld r4, PSW(sp) ; interrupts off (exception path) ?
185 and3 r4, r4, #0x4000
186 beqz r4, restore_all
187 bl preempt_schedule_irq
188 bra need_resched
189#endif
190
191 ; system call handler stub
192ENTRY(system_call)
193 SWITCH_TO_KERNEL_STACK
194 SAVE_ALL
195 ENABLE_INTERRUPTS(r4) ; Enable interrupt
196 st sp, PTREGS(sp) ; implicit pt_regs parameter
197 cmpui r7, #NR_syscalls
198 bnc syscall_badsys
199 st r7, SYSCALL_NR(sp) ; syscall_nr
200 ; system call tracing in operation
201 GET_THREAD_INFO(r8)
202 ld r9, @(TI_FLAGS, r8)
203 and3 r4, r9, #_TIF_SYSCALL_TRACE
204 bnez r4, syscall_trace_entry
205syscall_call:
206 slli r7, #2 ; table jump for the system call
207 LDIMM (r4, sys_call_table)
208 add r7, r4
209 ld r7, @r7
210 jl r7 ; execute system call
211 st r0, R0(sp) ; save the return value
212syscall_exit:
213 DISABLE_INTERRUPTS(r4) ; make sure we don't miss an interrupt
214 ; setting need_resched or sigpending
215 ; between sampling and the iret
216 ld r9, @(TI_FLAGS, r8)
217 and3 r4, r9, #_TIF_ALLWORK_MASK ; current->work
218 bnez r4, syscall_exit_work
219restore_all:
220 RESTORE_ALL
221
222 # perform work that needs to be done immediately before resumption
223 # r9 : flags
224 ALIGN
225work_pending:
226 and3 r4, r9, #_TIF_NEED_RESCHED
227 beqz r4, work_notifysig
228work_resched:
229 bl schedule
230 DISABLE_INTERRUPTS(r4) ; make sure we don't miss an interrupt
231 ; setting need_resched or sigpending
232 ; between sampling and the iret
233 ld r9, @(TI_FLAGS, r8)
234 and3 r4, r9, #_TIF_WORK_MASK ; is there any work to be done other
235 ; than syscall tracing?
236 beqz r4, restore_all
237 and3 r4, r4, #_TIF_NEED_RESCHED
238 bnez r4, work_resched
239
240work_notifysig: ; deal with pending signals and
241 ; notify-resume requests
242 mv r0, sp ; arg1 : struct pt_regs *regs
243 mv r1, r9 ; arg2 : __u32 thread_info_flags
244 bl do_notify_resume
245 bra resume_userspace
246
247 ; perform syscall exit tracing
248 ALIGN
249syscall_trace_entry:
250 ldi r4, #-ENOSYS
251 st r4, R0(sp)
252 bl do_syscall_trace
253 ld r0, ORIG_R0(sp)
254 ld r1, R1(sp)
255 ld r2, R2(sp)
256 ld r3, R3(sp)
257 ld r4, R4(sp)
258 ld r5, R5(sp)
259 ld r6, R6(sp)
260 ld r7, SYSCALL_NR(sp)
261 cmpui r7, #NR_syscalls
262 bc syscall_call
263 bra syscall_exit
264
265 ; perform syscall exit tracing
266 ALIGN
267syscall_exit_work:
268 ld r9, @(TI_FLAGS, r8)
269 and3 r4, r9, #_TIF_SYSCALL_TRACE
270 beqz r4, work_pending
271 ENABLE_INTERRUPTS(r4) ; could let do_syscall_trace() call
272 ; schedule() instead
273 bl do_syscall_trace
274 bra resume_userspace
275
276 ALIGN
277syscall_fault:
278 SAVE_ALL
279 GET_THREAD_INFO(r8)
280 ldi r4, #-EFAULT
281 st r4, R0(sp)
282 bra resume_userspace
283
284 ALIGN
285syscall_badsys:
286 ldi r4, #-ENOSYS
287 st r4, R0(sp)
288 bra resume_userspace
289
290 .global eit_vector
291
292 .equ ei_vec_table, eit_vector + 0x0200
293
294/*
295 * EI handler routine
296 */
297ENTRY(ei_handler)
298#if defined(CONFIG_CHIP_M32700)
299 ; WORKAROUND: force to clear SM bit and use the kernel stack (SPI).
300 SWITCH_TO_KERNEL_STACK
301#endif
302 SAVE_ALL
303 mv r1, sp ; arg1(regs)
304 ; get ICU status
305 seth r0, #shigh(M32R_ICU_ISTS_ADDR)
306 ld r0, @(low(M32R_ICU_ISTS_ADDR),r0)
307 push r0
308#if defined(CONFIG_SMP)
309 /*
310 * If IRQ == 0 --> Nothing to do, Not write IMASK
311 * If IRQ == IPI --> Do IPI handler, Not write IMASK
312 * If IRQ != 0, IPI --> Do do_IRQ(), Write IMASK
313 */
314 slli r0, #4
315 srli r0, #24 ; r0(irq_num<<2)
316 ;; IRQ exist check
317#if defined(CONFIG_CHIP_M32700)
318 /* WORKAROUND: IMASK bug M32700-TS1, TS2 chip. */
319 bnez r0, 0f
320 ld24 r14, #0x00070000
321 seth r0, #shigh(M32R_ICU_IMASK_ADDR)
322 st r14, @(low(M32R_ICU_IMASK_ADDR),r0)
323 bra 1f
324 .fillinsn
3250:
326#endif /* CONFIG_CHIP_M32700 */
327 beqz r0, 1f ; if (!irq_num) goto exit
328 ;; IPI check
329 cmpi r0, #(M32R_IRQ_IPI0<<2) ; ISN < IPI0 check
330 bc 2f
331 cmpi r0, #((M32R_IRQ_IPI7+1)<<2) ; ISN > IPI7 check
332 bnc 2f
333 LDIMM (r2, ei_vec_table)
334 add r2, r0
335 ld r2, @r2
336 beqz r2, 1f ; if (no IPI handler) goto exit
337 mv r0, r1 ; arg0(regs)
338 jl r2
339 .fillinsn
3401:
341 addi sp, #4
342 bra restore_all
343 .fillinsn
3442:
345 srli r0, #2
346#else /* not CONFIG_SMP */
347 srli r0, #22 ; r0(irq)
348#endif /* not CONFIG_SMP */
349
350#if defined(CONFIG_PLAT_HAS_INT1ICU)
351 add3 r2, r0, #-(M32R_IRQ_INT1) ; INT1# interrupt
352 bnez r2, 3f
353 seth r0, #shigh(M32R_INT1ICU_ISTS)
354 lduh r0, @(low(M32R_INT1ICU_ISTS),r0) ; bit10-6 : ISN
355 slli r0, #21
356 srli r0, #27 ; ISN
357 addi r0, #(M32R_INT1ICU_IRQ_BASE)
358 bra check_end
359 .fillinsn
3603:
361#endif /* CONFIG_PLAT_HAS_INT1ICU */
362#if defined(CONFIG_PLAT_HAS_INT0ICU)
363 add3 r2, r0, #-(M32R_IRQ_INT0) ; INT0# interrupt
364 bnez r2, 4f
365 seth r0, #shigh(M32R_INT0ICU_ISTS)
366 lduh r0, @(low(M32R_INT0ICU_ISTS),r0) ; bit10-6 : ISN
367 slli r0, #21
368 srli r0, #27 ; ISN
369 add3 r0, r0, #(M32R_INT0ICU_IRQ_BASE)
370 bra check_end
371 .fillinsn
3724:
373#endif /* CONFIG_PLAT_HAS_INT0ICU */
374#if defined(CONFIG_PLAT_HAS_INT2ICU)
375 add3 r2, r0, #-(M32R_IRQ_INT2) ; INT2# interrupt
376 bnez r2, 5f
377 seth r0, #shigh(M32R_INT2ICU_ISTS)
378 lduh r0, @(low(M32R_INT2ICU_ISTS),r0) ; bit10-6 : ISN
379 slli r0, #21
380 srli r0, #27 ; ISN
381 add3 r0, r0, #(M32R_INT2ICU_IRQ_BASE)
382 ; bra check_end
383 .fillinsn
3845:
385#endif /* CONFIG_PLAT_HAS_INT2ICU */
386
387check_end:
388 bl do_IRQ
389 pop r14
390 seth r0, #shigh(M32R_ICU_IMASK_ADDR)
391 st r14, @(low(M32R_ICU_IMASK_ADDR),r0)
392 bra ret_from_intr
393
394/*
395 * Default EIT handler
396 */
397 ALIGN
398int_msg:
399 .asciz "Unknown interrupt\n"
400 .byte 0
401
402ENTRY(default_eit_handler)
403 push r0
404 mvfc r0, psw
405 push r1
406 push r2
407 push r3
408 push r0
409 LDIMM (r0, __KERNEL_DS)
410 mv r0, r1
411 mv r0, r2
412 LDIMM (r0, int_msg)
413 bl printk
414 pop r0
415 pop r3
416 pop r2
417 pop r1
418 mvtc r0, psw
419 pop r0
420infinit:
421 bra infinit
422
423#ifdef CONFIG_MMU
424/*
425 * Access Exception handler
426 */
427ENTRY(ace_handler)
428 SWITCH_TO_KERNEL_STACK
429 SAVE_ALL
430
431 seth r2, #shigh(MMU_REG_BASE) /* Check status register */
432 ld r4, @(low(MESTS_offset),r2)
433 st r4, @(low(MESTS_offset),r2)
434 srl3 r1, r4, #4
435#ifdef CONFIG_CHIP_M32700
436 and3 r1, r1, #0x0000ffff
437 ; WORKAROUND: ignore TME bit for the M32700(TS1).
438#endif /* CONFIG_CHIP_M32700 */
439 beqz r1, inst
440oprand:
441 ld r2, @(low(MDEVA_offset),r2) ; set address
442 srli r1, #1
443 bra 1f
444inst:
445 and3 r1, r4, #2
446 srli r1, #1
447 or3 r1, r1, #8
448 mvfc r2, bpc ; set address
449 .fillinsn
4501:
451 mvfc r3, psw
452 mv r0, sp
453 and3 r3, r3, 0x800
454 srli r3, #9
455 or r1, r3
456 /*
457 * do_page_fault():
458 * r0 : struct pt_regs *regs
459 * r1 : unsigned long error-code
460 * r2 : unsigned long address
461 * error-code:
462 * +------+------+------+------+
463 * | bit3 | bit2 | bit1 | bit0 |
464 * +------+------+------+------+
465 * bit 3 == 0:means data, 1:means instruction
466 * bit 2 == 0:means kernel, 1:means user-mode
467 * bit 1 == 0:means read, 1:means write
468 * bit 0 == 0:means no page found 1:means protection fault
469 *
470 */
471 bl do_page_fault
472 bra ret_from_intr
473#endif /* CONFIG_MMU */
474
475
476ENTRY(alignment_check)
477 /* void alignment_check(int error_code) */
478 SWITCH_TO_KERNEL_STACK
479 SAVE_ALL
480 ldi r1, #0x30 ; error_code
481 mv r0, sp ; pt_regs
482 bl do_alignment_check
483error_code:
484 bra ret_from_exception
485
486ENTRY(rie_handler)
487 /* void rie_handler(int error_code) */
488 SWITCH_TO_KERNEL_STACK
489 SAVE_ALL
490 ldi r1, #0x20 ; error_code
491 mv r0, sp ; pt_regs
492 bl do_rie_handler
493 bra error_code
494
495ENTRY(pie_handler)
496 /* void pie_handler(int error_code) */
497 SWITCH_TO_KERNEL_STACK
498 SAVE_ALL
499 ldi r1, #0 ; error_code ; FIXME
500 mv r0, sp ; pt_regs
501 bl do_pie_handler
502 bra error_code
503
504ENTRY(debug_trap)
505 /* void debug_trap(void) */
506 .global withdraw_debug_trap
507 SWITCH_TO_KERNEL_STACK
508 SAVE_ALL
509 mv r0, sp ; pt_regs
510 bl withdraw_debug_trap
511 ldi r1, #0 ; error_code
512 mv r0, sp ; pt_regs
513 bl do_debug_trap
514 bra error_code
515
516ENTRY(ill_trap)
517 /* void ill_trap(void) */
518 SWITCH_TO_KERNEL_STACK
519 SAVE_ALL
520 ldi r1, #0 ; error_code ; FIXME
521 mv r0, sp ; pt_regs
522 bl do_ill_trap
523 bra error_code
524
525ENTRY(cache_flushing_handler)
526 /* void _flush_cache_all(void); */
527 .global _flush_cache_all
528 SWITCH_TO_KERNEL_STACK
529 push r0
530 push r1
531 push r2
532 push r3
533 push r4
534 push r5
535 push r6
536 push r7
537 push lr
538 bl _flush_cache_all
539 pop lr
540 pop r7
541 pop r6
542 pop r5
543 pop r4
544 pop r3
545 pop r2
546 pop r1
547 pop r0
548 rte
549
550 .section .rodata,"a"
551#include "syscall_table.S"
552
553syscall_table_size=(.-sys_call_table)
diff --git a/arch/m32r/kernel/head.S b/arch/m32r/kernel/head.S
deleted file mode 100644
index 1f040973df1c..000000000000
--- a/arch/m32r/kernel/head.S
+++ /dev/null
@@ -1,284 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * linux/arch/m32r/kernel/head.S
4 *
5 * M32R startup code.
6 *
7 * Copyright (c) 2001, 2002 Hiroyuki Kondo, Hirokazu Takata,
8 * Hitoshi Yamamoto
9 */
10
11#include <linux/init.h>
12__INIT
13__INITDATA
14
15 .text
16#include <linux/linkage.h>
17#include <asm/segment.h>
18#include <asm/page.h>
19#include <asm/pgtable.h>
20#include <asm/assembler.h>
21#include <asm/m32r.h>
22#include <asm/mmu_context.h>
23
24/*
25 * References to members of the boot_cpu_data structure.
26 */
27__HEAD
28 .global start_kernel
29 .global __bss_start
30 .global _end
31ENTRY(stext)
32ENTRY(_stext)
33 /* Setup up the stack pointer */
34 LDIMM (r0, spi_stack_top)
35 LDIMM (r1, spu_stack_top)
36 mvtc r0, spi
37 mvtc r1, spu
38
39 /* Initilalize PSW */
40 ldi r0, #0x0000 /* use SPI, disable EI */
41 mvtc r0, psw
42
43 /* Set up the stack pointer */
44 LDIMM (r0, stack_start)
45 ld r0, @r0
46 mvtc r0, spi
47
48/*
49 * Clear BSS first so that there are no surprises...
50 */
51#ifdef CONFIG_ISA_DUAL_ISSUE
52
53 LDIMM (r2, __bss_start)
54 LDIMM (r3, _end)
55 sub r3, r2 ; BSS size in bytes
56 ; R4 = BSS size in longwords (rounded down)
57 mv r4, r3 || ldi r1, #0
58 srli r4, #4 || addi r2, #-4
59 beqz r4, .Lendloop1
60.Lloop1:
61#ifndef CONFIG_CHIP_M32310
62 ; Touch memory for the no-write-allocating cache.
63 ld r0, @(4,r2)
64#endif
65 st r1, @+r2 || addi r4, #-1
66 st r1, @+r2
67 st r1, @+r2
68 st r1, @+r2 || cmpeq r1, r4 ; R4 = 0?
69 bnc .Lloop1
70.Lendloop1:
71 and3 r4, r3, #15
72 addi r2, #4
73 beqz r4, .Lendloop2
74.Lloop2:
75 stb r1, @r2 || addi r4, #-1
76 addi r2, #1
77 bnez r4, .Lloop2
78.Lendloop2:
79
80#else /* not CONFIG_ISA_DUAL_ISSUE */
81
82 LDIMM (r2, __bss_start)
83 LDIMM (r3, _end)
84 sub r3, r2 ; BSS size in bytes
85 mv r4, r3
86 srli r4, #2 ; R4 = BSS size in longwords (rounded down)
87 ldi r1, #0 ; clear R1 for longwords store
88 addi r2, #-4 ; account for pre-inc store
89 beqz r4, .Lendloop1 ; any more to go?
90.Lloop1:
91 st r1, @+r2 ; yep, zero out another longword
92 addi r4, #-1 ; decrement count
93 bnez r4, .Lloop1 ; go do some more
94.Lendloop1:
95 and3 r4, r3, #3 ; get no. of remaining BSS bytes to clear
96 addi r2, #4 ; account for pre-inc store
97 beqz r4, .Lendloop2 ; any more to go?
98.Lloop2:
99 stb r1, @r2 ; yep, zero out another byte
100 addi r2, #1 ; bump address
101 addi r4, #-1 ; decrement count
102 bnez r4, .Lloop2 ; go do some more
103.Lendloop2:
104
105#endif /* not CONFIG_ISA_DUAL_ISSUE */
106
107#if 0 /* M32R_FIXME */
108/*
109 * Copy data segment from ROM to RAM.
110 */
111 .global ROM_D, TOP_DATA, END_DATA
112
113 LDIMM (r1, ROM_D)
114 LDIMM (r2, TOP_DATA)
115 LDIMM (r3, END_DATA)
116 addi r2, #-4
117 addi r3, #-4
118loop1:
119 ld r0, @r1+
120 st r0, @+r2
121 cmp r2, r3
122 bc loop1
123#endif /* 0 */
124
125/* Jump to kernel */
126 LDIMM (r2, start_kernel)
127 jl r2
128 .fillinsn
1291:
130 bra 1b ; main should never return here, but
131 ; just in case, we know what happens.
132
133#ifdef CONFIG_SMP
134/*
135 * AP startup routine
136 */
137 .global eit_vector
138ENTRY(startup_AP)
139;; setup EVB
140 LDIMM (r4, eit_vector)
141 mvtc r4, cr5
142
143;; enable MMU
144 LDIMM (r2, init_tlb)
145 jl r2
146 seth r4, #high(MATM)
147 or3 r4, r4, #low(MATM)
148 ldi r5, #0x01
149 st r5, @r4 ; Set MATM Reg(T bit ON)
150 ld r6, @r4 ; MATM Check
151 LDIMM (r5, 1f)
152 jmp r5 ; enable MMU
153 nop
154 .fillinsn
1551:
156;; ISN check
157 ld r6, @r4 ; MATM Check
158 seth r4, #high(M32R_ICU_ISTS_ADDR)
159 or3 r4, r4, #low(M32R_ICU_ISTS_ADDR)
160 ld r5, @r4 ; Read ISTSi reg.
161 mv r6, r5
162 slli r5, #13 ; PIML check
163 srli r5, #13 ;
164 seth r4, #high(M32R_ICU_IMASK_ADDR)
165 or3 r4, r4, #low(M32R_ICU_IMASK_ADDR)
166 st r5, @r4 ; Write IMASKi reg.
167 slli r6, #4 ; ISN check
168 srli r6, #26 ;
169 seth r4, #high(M32R_IRQ_IPI5)
170 or3 r4, r4, #low(M32R_IRQ_IPI5)
171 bne r4, r6, 2f ; if (ISN != CPU_BOOT_IPI) goto sleep;
172
173;; check cpu_bootout_map and set cpu_bootin_map
174 LDIMM (r4, cpu_bootout_map)
175 ld r4, @r4
176 seth r5, #high(M32R_CPUID_PORTL)
177 or3 r5, r5, #low(M32R_CPUID_PORTL)
178 ld r5, @r5
179 ldi r6, #1
180 sll r6, r5
181 and r4, r6
182 beqz r4, 2f
183 LDIMM (r4, cpu_bootin_map)
184 ld r5, @r4
185 or r5, r6
186 st r6, @r4
187
188;; clear PSW
189 ldi r4, #0
190 mvtc r4, psw
191
192;; setup SPI
193 LDIMM (r4, stack_start)
194 ld r4, @r4
195 mvtc r4, spi
196
197;; setup BPC (start_secondary)
198 LDIMM (r4, start_secondary)
199 mvtc r4, bpc
200
201 rte ; goto startup_secondary
202 nop
203 nop
204
205 .fillinsn
2062:
207 ;; disable MMU
208 seth r4, #high(MATM)
209 or3 r4, r4, #low(MATM)
210 ldi r5, #0
211 st r5, @r4 ; Set MATM Reg(T bit OFF)
212 ld r6, @r4 ; MATM Check
213 LDIMM (r4, 3f)
214 seth r5, #high(__PAGE_OFFSET)
215 or3 r5, r5, #low(__PAGE_OFFSET)
216 not r5, r5
217 and r4, r5
218 jmp r4 ; disable MMU
219 nop
220 .fillinsn
2213:
222 ;; SLEEP and wait IPI
223 LDIMM (r4, AP_loop)
224 seth r5, #high(__PAGE_OFFSET)
225 or3 r5, r5, #low(__PAGE_OFFSET)
226 not r5, r5
227 and r4, r5
228 jmp r4
229 nop
230 nop
231#endif /* CONFIG_SMP */
232
233 .text
234ENTRY(stack_start)
235 .long init_thread_union+8192
236 .long __KERNEL_DS
237
238/*
239 * This is initialized to create a identity-mapping at 0-4M (for bootup
240 * purposes) and another mapping of the 0-4M area at virtual address
241 * PAGE_OFFSET.
242 */
243 .text
244
245#define MOUNT_ROOT_RDONLY 1
246#define RAMDISK_FLAGS 0 ; 1024KB
247#define ORIG_ROOT_DEV 0x0100 ; /dev/ram0 (major:01, minor:00)
248#define LOADER_TYPE 1 ; (??? - non-zero value seems
249 ; to be needed to boot from initrd)
250
251#define COMMAND_LINE ""
252
253 .section .empty_zero_page, "aw"
254ENTRY(empty_zero_page)
255 .long MOUNT_ROOT_RDONLY /* offset: +0x00 */
256 .long RAMDISK_FLAGS
257 .long ORIG_ROOT_DEV
258 .long LOADER_TYPE
259 .long 0 /* INITRD_START */ /* +0x10 */
260 .long 0 /* INITRD_SIZE */
261 .long 0 /* CPU_CLOCK */
262 .long 0 /* BUS_CLOCK */
263 .long 0 /* TIMER_DIVIDE */ /* +0x20 */
264 .balign 256,0
265 .asciz COMMAND_LINE
266 .byte 0
267 .balign 4096,0,4096
268
269/*------------------------------------------------------------------------
270 * Stack area
271 */
272 .section .init.data, "aw"
273 ALIGN
274 .global spi_stack_top
275 .zero 1024
276spi_stack_top:
277
278 .section .init.data, "aw"
279 ALIGN
280 .global spu_stack_top
281 .zero 1024
282spu_stack_top:
283
284 .end
diff --git a/arch/m32r/kernel/irq.c b/arch/m32r/kernel/irq.c
deleted file mode 100644
index 83b5032f176c..000000000000
--- a/arch/m32r/kernel/irq.c
+++ /dev/null
@@ -1,44 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * linux/arch/m32r/kernel/irq.c
4 *
5 * Copyright (c) 2003, 2004 Hitoshi Yamamoto
6 * Copyright (c) 2004 Hirokazu Takata <takata at linux-m32r.org>
7 */
8
9/*
10 * linux/arch/i386/kernel/irq.c
11 *
12 * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
13 *
14 * This file contains the lowest level m32r-specific interrupt
15 * entry and irq statistics code. All the remaining irq logic is
16 * done by the generic kernel/irq/ code and in the
17 * m32r-specific irq controller code.
18 */
19
20#include <linux/kernel_stat.h>
21#include <linux/interrupt.h>
22#include <linux/module.h>
23#include <linux/uaccess.h>
24
25/*
26 * do_IRQ handles all normal device IRQs (the special
27 * SMP cross-CPU interrupts have their own specific
28 * handlers).
29 */
30asmlinkage unsigned int do_IRQ(int irq, struct pt_regs *regs)
31{
32 struct pt_regs *old_regs;
33 old_regs = set_irq_regs(regs);
34 irq_enter();
35
36#ifdef CONFIG_DEBUG_STACKOVERFLOW
37 /* FIXME M32R */
38#endif
39 generic_handle_irq(irq);
40 irq_exit();
41 set_irq_regs(old_regs);
42
43 return 1;
44}
diff --git a/arch/m32r/kernel/m32r_ksyms.c b/arch/m32r/kernel/m32r_ksyms.c
deleted file mode 100644
index 46ebe071e4d6..000000000000
--- a/arch/m32r/kernel/m32r_ksyms.c
+++ /dev/null
@@ -1,89 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/module.h>
3#include <linux/smp.h>
4#include <linux/user.h>
5#include <linux/elfcore.h>
6#include <linux/sched.h>
7#include <linux/in6.h>
8#include <linux/interrupt.h>
9#include <linux/string.h>
10
11#include <asm/processor.h>
12#include <linux/uaccess.h>
13#include <asm/checksum.h>
14#include <asm/io.h>
15#include <asm/delay.h>
16#include <asm/irq.h>
17#include <asm/tlbflush.h>
18#include <asm/pgtable.h>
19
20/* platform dependent support */
21EXPORT_SYMBOL(boot_cpu_data);
22EXPORT_SYMBOL(dump_fpu);
23EXPORT_SYMBOL(__ioremap);
24EXPORT_SYMBOL(iounmap);
25
26EXPORT_SYMBOL(strncpy_from_user);
27EXPORT_SYMBOL(clear_user);
28EXPORT_SYMBOL(__clear_user);
29EXPORT_SYMBOL(strnlen_user);
30
31#ifdef CONFIG_SMP
32#ifdef CONFIG_CHIP_M32700_TS1
33extern void *dcache_dummy;
34EXPORT_SYMBOL(dcache_dummy);
35#endif
36EXPORT_SYMBOL(cpu_data);
37
38/* TLB flushing */
39EXPORT_SYMBOL(smp_flush_tlb_page);
40#endif
41
42extern int __ucmpdi2(unsigned long long a, unsigned long long b);
43EXPORT_SYMBOL(__ucmpdi2);
44
45/* compiler generated symbol */
46extern void __ashldi3(void);
47extern void __ashrdi3(void);
48extern void __lshldi3(void);
49extern void __lshrdi3(void);
50extern void __muldi3(void);
51EXPORT_SYMBOL(__ashldi3);
52EXPORT_SYMBOL(__ashrdi3);
53EXPORT_SYMBOL(__lshldi3);
54EXPORT_SYMBOL(__lshrdi3);
55EXPORT_SYMBOL(__muldi3);
56
57/* memory and string operations */
58EXPORT_SYMBOL(memcpy);
59EXPORT_SYMBOL(memset);
60EXPORT_SYMBOL(copy_page);
61EXPORT_SYMBOL(clear_page);
62EXPORT_SYMBOL(strlen);
63EXPORT_SYMBOL(empty_zero_page);
64
65EXPORT_SYMBOL(_inb);
66EXPORT_SYMBOL(_inw);
67EXPORT_SYMBOL(_inl);
68EXPORT_SYMBOL(_outb);
69EXPORT_SYMBOL(_outw);
70EXPORT_SYMBOL(_outl);
71EXPORT_SYMBOL(_inb_p);
72EXPORT_SYMBOL(_inw_p);
73EXPORT_SYMBOL(_inl_p);
74EXPORT_SYMBOL(_outb_p);
75EXPORT_SYMBOL(_outw_p);
76EXPORT_SYMBOL(_outl_p);
77EXPORT_SYMBOL(_insb);
78EXPORT_SYMBOL(_insw);
79EXPORT_SYMBOL(_insl);
80EXPORT_SYMBOL(_outsb);
81EXPORT_SYMBOL(_outsw);
82EXPORT_SYMBOL(_outsl);
83EXPORT_SYMBOL(_readb);
84EXPORT_SYMBOL(_readw);
85EXPORT_SYMBOL(_readl);
86EXPORT_SYMBOL(_writeb);
87EXPORT_SYMBOL(_writew);
88EXPORT_SYMBOL(_writel);
89
diff --git a/arch/m32r/kernel/module.c b/arch/m32r/kernel/module.c
deleted file mode 100644
index 38233b6596b6..000000000000
--- a/arch/m32r/kernel/module.c
+++ /dev/null
@@ -1,203 +0,0 @@
1/* Kernel module help for M32R.
2
3 This program is free software; you can redistribute it and/or modify
4 it under the terms of the GNU General Public License as published by
5 the Free Software Foundation; either version 2 of the License, or
6 (at your option) any later version.
7
8 This program is distributed in the hope that it will be useful,
9 but WITHOUT ANY WARRANTY; without even the implied warranty of
10 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 GNU General Public License for more details.
12
13 You should have received a copy of the GNU General Public License
14 along with this program; if not, write to the Free Software
15 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
16*/
17
18#include <linux/moduleloader.h>
19#include <linux/elf.h>
20#include <linux/vmalloc.h>
21#include <linux/fs.h>
22#include <linux/string.h>
23#include <linux/kernel.h>
24
25#if 0
26#define DEBUGP printk
27#else
28#define DEBUGP(fmt...)
29#endif
30
31#define COPY_UNALIGNED_WORD(sw, tw, align) \
32{ \
33 void *__s = &(sw), *__t = &(tw); \
34 unsigned short *__s2 = __s, *__t2 =__t; \
35 unsigned char *__s1 = __s, *__t1 =__t; \
36 switch ((align)) \
37 { \
38 case 0: \
39 *(unsigned long *) __t = *(unsigned long *) __s; \
40 break; \
41 case 2: \
42 *__t2++ = *__s2++; \
43 *__t2 = *__s2; \
44 break; \
45 default: \
46 *__t1++ = *__s1++; \
47 *__t1++ = *__s1++; \
48 *__t1++ = *__s1++; \
49 *__t1 = *__s1; \
50 break; \
51 } \
52}
53
54#define COPY_UNALIGNED_HWORD(sw, tw, align) \
55 { \
56 void *__s = &(sw), *__t = &(tw); \
57 unsigned short *__s2 = __s, *__t2 =__t; \
58 unsigned char *__s1 = __s, *__t1 =__t; \
59 switch ((align)) \
60 { \
61 case 0: \
62 *__t2 = *__s2; \
63 break; \
64 default: \
65 *__t1++ = *__s1++; \
66 *__t1 = *__s1; \
67 break; \
68 } \
69 }
70
71int apply_relocate_add(Elf32_Shdr *sechdrs,
72 const char *strtab,
73 unsigned int symindex,
74 unsigned int relsec,
75 struct module *me)
76{
77 unsigned int i;
78 Elf32_Rela *rel = (void *)sechdrs[relsec].sh_addr;
79 Elf32_Sym *sym;
80 Elf32_Addr relocation;
81 uint32_t *location;
82 uint32_t value;
83 unsigned short *hlocation;
84 unsigned short hvalue;
85 int svalue;
86 int align;
87
88 DEBUGP("Applying relocate section %u to %u\n", relsec,
89 sechdrs[relsec].sh_info);
90 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
91 /* This is where to make the change */
92 location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
93 + rel[i].r_offset;
94 /* This is the symbol it is referring to. Note that all
95 undefined symbols have been resolved. */
96 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
97 + ELF32_R_SYM(rel[i].r_info);
98 relocation = sym->st_value + rel[i].r_addend;
99 align = (int)location & 3;
100
101 switch (ELF32_R_TYPE(rel[i].r_info)) {
102 case R_M32R_32_RELA:
103 COPY_UNALIGNED_WORD (*location, value, align);
104 value += relocation;
105 COPY_UNALIGNED_WORD (value, *location, align);
106 break;
107 case R_M32R_HI16_ULO_RELA:
108 COPY_UNALIGNED_WORD (*location, value, align);
109 relocation = (relocation >>16) & 0xffff;
110 /* RELA must has 0 at relocation field. */
111 value += relocation;
112 COPY_UNALIGNED_WORD (value, *location, align);
113 break;
114 case R_M32R_HI16_SLO_RELA:
115 COPY_UNALIGNED_WORD (*location, value, align);
116 if (relocation & 0x8000) relocation += 0x10000;
117 relocation = (relocation >>16) & 0xffff;
118 /* RELA must has 0 at relocation field. */
119 value += relocation;
120 COPY_UNALIGNED_WORD (value, *location, align);
121 break;
122 case R_M32R_16_RELA:
123 hlocation = (unsigned short *)location;
124 relocation = relocation & 0xffff;
125 /* RELA must has 0 at relocation field. */
126 hvalue = relocation;
127 COPY_UNALIGNED_WORD (hvalue, *hlocation, align);
128 break;
129 case R_M32R_SDA16_RELA:
130 case R_M32R_LO16_RELA:
131 COPY_UNALIGNED_WORD (*location, value, align);
132 relocation = relocation & 0xffff;
133 /* RELA must has 0 at relocation field. */
134 value += relocation;
135 COPY_UNALIGNED_WORD (value, *location, align);
136 break;
137 case R_M32R_24_RELA:
138 COPY_UNALIGNED_WORD (*location, value, align);
139 relocation = relocation & 0xffffff;
140 /* RELA must has 0 at relocation field. */
141 value += relocation;
142 COPY_UNALIGNED_WORD (value, *location, align);
143 break;
144 case R_M32R_18_PCREL_RELA:
145 relocation = (relocation - (Elf32_Addr) location);
146 if (relocation < -0x20000 || 0x1fffc < relocation)
147 {
148 printk(KERN_ERR "module %s: relocation overflow: %u\n",
149 me->name, relocation);
150 return -ENOEXEC;
151 }
152 COPY_UNALIGNED_WORD (*location, value, align);
153 if (value & 0xffff)
154 {
155 /* RELA must has 0 at relocation field. */
156 printk(KERN_ERR "module %s: illegal relocation field: %u\n",
157 me->name, value);
158 return -ENOEXEC;
159 }
160 relocation = (relocation >> 2) & 0xffff;
161 value += relocation;
162 COPY_UNALIGNED_WORD (value, *location, align);
163 break;
164 case R_M32R_10_PCREL_RELA:
165 hlocation = (unsigned short *)location;
166 relocation = (relocation - (Elf32_Addr) location);
167 COPY_UNALIGNED_HWORD (*hlocation, hvalue, align);
168 svalue = (int)hvalue;
169 svalue = (signed char)svalue << 2;
170 relocation += svalue;
171 relocation = (relocation >> 2) & 0xff;
172 hvalue = hvalue & 0xff00;
173 hvalue += relocation;
174 COPY_UNALIGNED_HWORD (hvalue, *hlocation, align);
175 break;
176 case R_M32R_26_PCREL_RELA:
177 relocation = (relocation - (Elf32_Addr) location);
178 if (relocation < -0x2000000 || 0x1fffffc < relocation)
179 {
180 printk(KERN_ERR "module %s: relocation overflow: %u\n",
181 me->name, relocation);
182 return -ENOEXEC;
183 }
184 COPY_UNALIGNED_WORD (*location, value, align);
185 if (value & 0xffffff)
186 {
187 /* RELA must has 0 at relocation field. */
188 printk(KERN_ERR "module %s: illegal relocation field: %u\n",
189 me->name, value);
190 return -ENOEXEC;
191 }
192 relocation = (relocation >> 2) & 0xffffff;
193 value += relocation;
194 COPY_UNALIGNED_WORD (value, *location, align);
195 break;
196 default:
197 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
198 me->name, ELF32_R_TYPE(rel[i].r_info));
199 return -ENOEXEC;
200 }
201 }
202 return 0;
203}
diff --git a/arch/m32r/kernel/process.c b/arch/m32r/kernel/process.c
deleted file mode 100644
index a1a4cb136e99..000000000000
--- a/arch/m32r/kernel/process.c
+++ /dev/null
@@ -1,154 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * linux/arch/m32r/kernel/process.c
4 *
5 * Copyright (c) 2001, 2002 Hiroyuki Kondo, Hirokazu Takata,
6 * Hitoshi Yamamoto
7 * Taken from sh version.
8 * Copyright (C) 1995 Linus Torvalds
9 * SuperH version: Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima
10 */
11
12#undef DEBUG_PROCESS
13#ifdef DEBUG_PROCESS
14#define DPRINTK(fmt, args...) printk("%s:%d:%s: " fmt, __FILE__, __LINE__, \
15 __func__, ##args)
16#else
17#define DPRINTK(fmt, args...)
18#endif
19
20/*
21 * This file handles the architecture-dependent parts of process handling..
22 */
23
24#include <linux/fs.h>
25#include <linux/slab.h>
26#include <linux/sched/debug.h>
27#include <linux/sched/task.h>
28#include <linux/sched/task_stack.h>
29#include <linux/module.h>
30#include <linux/ptrace.h>
31#include <linux/unistd.h>
32#include <linux/hardirq.h>
33#include <linux/rcupdate.h>
34
35#include <asm/io.h>
36#include <linux/uaccess.h>
37#include <asm/mmu_context.h>
38#include <asm/elf.h>
39#include <asm/m32r.h>
40
41#include <linux/err.h>
42
43void (*pm_power_off)(void) = NULL;
44EXPORT_SYMBOL(pm_power_off);
45
46void machine_restart(char *__unused)
47{
48#if defined(CONFIG_PLAT_MAPPI3)
49 outw(1, (unsigned long)PLD_REBOOT);
50#endif
51
52 printk("Please push reset button!\n");
53 while (1)
54 cpu_relax();
55}
56
57void machine_halt(void)
58{
59 printk("Please push reset button!\n");
60 while (1)
61 cpu_relax();
62}
63
64void machine_power_off(void)
65{
66 /* M32R_FIXME */
67}
68
69void show_regs(struct pt_regs * regs)
70{
71 printk("\n");
72 show_regs_print_info(KERN_DEFAULT);
73
74 printk("BPC[%08lx]:PSW[%08lx]:LR [%08lx]:FP [%08lx]\n", \
75 regs->bpc, regs->psw, regs->lr, regs->fp);
76 printk("BBPC[%08lx]:BBPSW[%08lx]:SPU[%08lx]:SPI[%08lx]\n", \
77 regs->bbpc, regs->bbpsw, regs->spu, regs->spi);
78 printk("R0 [%08lx]:R1 [%08lx]:R2 [%08lx]:R3 [%08lx]\n", \
79 regs->r0, regs->r1, regs->r2, regs->r3);
80 printk("R4 [%08lx]:R5 [%08lx]:R6 [%08lx]:R7 [%08lx]\n", \
81 regs->r4, regs->r5, regs->r6, regs->r7);
82 printk("R8 [%08lx]:R9 [%08lx]:R10[%08lx]:R11[%08lx]\n", \
83 regs->r8, regs->r9, regs->r10, regs->r11);
84 printk("R12[%08lx]\n", \
85 regs->r12);
86
87#if defined(CONFIG_ISA_M32R2) && defined(CONFIG_ISA_DSP_LEVEL2)
88 printk("ACC0H[%08lx]:ACC0L[%08lx]\n", \
89 regs->acc0h, regs->acc0l);
90 printk("ACC1H[%08lx]:ACC1L[%08lx]\n", \
91 regs->acc1h, regs->acc1l);
92#elif defined(CONFIG_ISA_M32R2) || defined(CONFIG_ISA_M32R)
93 printk("ACCH[%08lx]:ACCL[%08lx]\n", \
94 regs->acc0h, regs->acc0l);
95#else
96#error unknown isa configuration
97#endif
98}
99
100void flush_thread(void)
101{
102 DPRINTK("pid = %d\n", current->pid);
103 memset(&current->thread.debug_trap, 0, sizeof(struct debug_trap));
104}
105
106void release_thread(struct task_struct *dead_task)
107{
108 /* do nothing */
109 DPRINTK("pid = %d\n", dead_task->pid);
110}
111
112/* Fill in the fpu structure for a core dump.. */
113int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
114{
115 return 0; /* Task didn't use the fpu at all. */
116}
117
118int copy_thread(unsigned long clone_flags, unsigned long spu,
119 unsigned long arg, struct task_struct *tsk)
120{
121 struct pt_regs *childregs = task_pt_regs(tsk);
122 extern void ret_from_fork(void);
123 extern void ret_from_kernel_thread(void);
124
125 if (unlikely(tsk->flags & PF_KTHREAD)) {
126 memset(childregs, 0, sizeof(struct pt_regs));
127 childregs->psw = M32R_PSW_BIE;
128 childregs->r1 = spu; /* fn */
129 childregs->r0 = arg;
130 tsk->thread.lr = (unsigned long)ret_from_kernel_thread;
131 } else {
132 /* Copy registers */
133 *childregs = *current_pt_regs();
134 if (spu)
135 childregs->spu = spu;
136 childregs->r0 = 0; /* Child gets zero as return value */
137 tsk->thread.lr = (unsigned long)ret_from_fork;
138 }
139 tsk->thread.sp = (unsigned long)childregs;
140
141 return 0;
142}
143
144/*
145 * These bracket the sleeping functions..
146 */
147#define first_sched ((unsigned long) scheduling_functions_start_here)
148#define last_sched ((unsigned long) scheduling_functions_end_here)
149
150unsigned long get_wchan(struct task_struct *p)
151{
152 /* M32R_FIXME */
153 return (0);
154}
diff --git a/arch/m32r/kernel/ptrace.c b/arch/m32r/kernel/ptrace.c
deleted file mode 100644
index d702a5ca0f92..000000000000
--- a/arch/m32r/kernel/ptrace.c
+++ /dev/null
@@ -1,708 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * linux/arch/m32r/kernel/ptrace.c
4 *
5 * Copyright (C) 2002 Hirokazu Takata, Takeo Takahashi
6 * Copyright (C) 2004 Hirokazu Takata, Kei Sakamoto
7 *
8 * Original x86 implementation:
9 * By Ross Biro 1/23/92
10 * edited by Linus Torvalds
11 *
12 * Some code taken from sh version:
13 * Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka
14 * Some code taken from arm version:
15 * Copyright (C) 2000 Russell King
16 */
17
18#include <linux/kernel.h>
19#include <linux/sched.h>
20#include <linux/sched/task_stack.h>
21#include <linux/mm.h>
22#include <linux/err.h>
23#include <linux/smp.h>
24#include <linux/errno.h>
25#include <linux/ptrace.h>
26#include <linux/user.h>
27#include <linux/string.h>
28#include <linux/signal.h>
29
30#include <asm/cacheflush.h>
31#include <asm/io.h>
32#include <linux/uaccess.h>
33#include <asm/pgtable.h>
34#include <asm/processor.h>
35#include <asm/mmu_context.h>
36
37/*
38 * This routine will get a word off of the process kernel stack.
39 */
40static inline unsigned long int
41get_stack_long(struct task_struct *task, int offset)
42{
43 unsigned long *stack;
44
45 stack = (unsigned long *)task_pt_regs(task);
46
47 return stack[offset];
48}
49
50/*
51 * This routine will put a word on the process kernel stack.
52 */
53static inline int
54put_stack_long(struct task_struct *task, int offset, unsigned long data)
55{
56 unsigned long *stack;
57
58 stack = (unsigned long *)task_pt_regs(task);
59 stack[offset] = data;
60
61 return 0;
62}
63
64static int reg_offset[] = {
65 PT_R0, PT_R1, PT_R2, PT_R3, PT_R4, PT_R5, PT_R6, PT_R7,
66 PT_R8, PT_R9, PT_R10, PT_R11, PT_R12, PT_FP, PT_LR, PT_SPU,
67};
68
69/*
70 * Read the word at offset "off" into the "struct user". We
71 * actually access the pt_regs stored on the kernel stack.
72 */
73static int ptrace_read_user(struct task_struct *tsk, unsigned long off,
74 unsigned long __user *data)
75{
76 unsigned long tmp;
77#ifndef NO_FPU
78 struct user * dummy = NULL;
79#endif
80
81 if ((off & 3) || off > sizeof(struct user) - 3)
82 return -EIO;
83
84 off >>= 2;
85 switch (off) {
86 case PT_EVB:
87 __asm__ __volatile__ (
88 "mvfc %0, cr5 \n\t"
89 : "=r" (tmp)
90 );
91 break;
92 case PT_CBR: {
93 unsigned long psw;
94 psw = get_stack_long(tsk, PT_PSW);
95 tmp = ((psw >> 8) & 1);
96 }
97 break;
98 case PT_PSW: {
99 unsigned long psw, bbpsw;
100 psw = get_stack_long(tsk, PT_PSW);
101 bbpsw = get_stack_long(tsk, PT_BBPSW);
102 tmp = ((psw >> 8) & 0xff) | ((bbpsw & 0xff) << 8);
103 }
104 break;
105 case PT_PC:
106 tmp = get_stack_long(tsk, PT_BPC);
107 break;
108 case PT_BPC:
109 off = PT_BBPC;
110 /* fall through */
111 default:
112 if (off < (sizeof(struct pt_regs) >> 2))
113 tmp = get_stack_long(tsk, off);
114#ifndef NO_FPU
115 else if (off >= (long)(&dummy->fpu >> 2) &&
116 off < (long)(&dummy->u_fpvalid >> 2)) {
117 if (!tsk_used_math(tsk)) {
118 if (off == (long)(&dummy->fpu.fpscr >> 2))
119 tmp = FPSCR_INIT;
120 else
121 tmp = 0;
122 } else
123 tmp = ((long *)(&tsk->thread.fpu >> 2))
124 [off - (long)&dummy->fpu];
125 } else if (off == (long)(&dummy->u_fpvalid >> 2))
126 tmp = !!tsk_used_math(tsk);
127#endif /* not NO_FPU */
128 else
129 tmp = 0;
130 }
131
132 return put_user(tmp, data);
133}
134
135static int ptrace_write_user(struct task_struct *tsk, unsigned long off,
136 unsigned long data)
137{
138 int ret = -EIO;
139#ifndef NO_FPU
140 struct user * dummy = NULL;
141#endif
142
143 if ((off & 3) || off > sizeof(struct user) - 3)
144 return -EIO;
145
146 off >>= 2;
147 switch (off) {
148 case PT_EVB:
149 case PT_BPC:
150 case PT_SPI:
151 /* We don't allow to modify evb. */
152 ret = 0;
153 break;
154 case PT_PSW:
155 case PT_CBR: {
156 /* We allow to modify only cbr in psw */
157 unsigned long psw;
158 psw = get_stack_long(tsk, PT_PSW);
159 psw = (psw & ~0x100) | ((data & 1) << 8);
160 ret = put_stack_long(tsk, PT_PSW, psw);
161 }
162 break;
163 case PT_PC:
164 off = PT_BPC;
165 data &= ~1;
166 /* fall through */
167 default:
168 if (off < (sizeof(struct pt_regs) >> 2))
169 ret = put_stack_long(tsk, off, data);
170#ifndef NO_FPU
171 else if (off >= (long)(&dummy->fpu >> 2) &&
172 off < (long)(&dummy->u_fpvalid >> 2)) {
173 set_stopped_child_used_math(tsk);
174 ((long *)&tsk->thread.fpu)
175 [off - (long)&dummy->fpu] = data;
176 ret = 0;
177 } else if (off == (long)(&dummy->u_fpvalid >> 2)) {
178 conditional_stopped_child_used_math(data, tsk);
179 ret = 0;
180 }
181#endif /* not NO_FPU */
182 break;
183 }
184
185 return ret;
186}
187
188/*
189 * Get all user integer registers.
190 */
191static int ptrace_getregs(struct task_struct *tsk, void __user *uregs)
192{
193 struct pt_regs *regs = task_pt_regs(tsk);
194
195 return copy_to_user(uregs, regs, sizeof(struct pt_regs)) ? -EFAULT : 0;
196}
197
198/*
199 * Set all user integer registers.
200 */
201static int ptrace_setregs(struct task_struct *tsk, void __user *uregs)
202{
203 struct pt_regs newregs;
204 int ret;
205
206 ret = -EFAULT;
207 if (copy_from_user(&newregs, uregs, sizeof(struct pt_regs)) == 0) {
208 struct pt_regs *regs = task_pt_regs(tsk);
209 *regs = newregs;
210 ret = 0;
211 }
212
213 return ret;
214}
215
216
217static inline int
218check_condition_bit(struct task_struct *child)
219{
220 return (int)((get_stack_long(child, PT_PSW) >> 8) & 1);
221}
222
223static int
224check_condition_src(unsigned long op, unsigned long regno1,
225 unsigned long regno2, struct task_struct *child)
226{
227 unsigned long reg1, reg2;
228
229 reg2 = get_stack_long(child, reg_offset[regno2]);
230
231 switch (op) {
232 case 0x0: /* BEQ */
233 reg1 = get_stack_long(child, reg_offset[regno1]);
234 return reg1 == reg2;
235 case 0x1: /* BNE */
236 reg1 = get_stack_long(child, reg_offset[regno1]);
237 return reg1 != reg2;
238 case 0x8: /* BEQZ */
239 return reg2 == 0;
240 case 0x9: /* BNEZ */
241 return reg2 != 0;
242 case 0xa: /* BLTZ */
243 return (int)reg2 < 0;
244 case 0xb: /* BGEZ */
245 return (int)reg2 >= 0;
246 case 0xc: /* BLEZ */
247 return (int)reg2 <= 0;
248 case 0xd: /* BGTZ */
249 return (int)reg2 > 0;
250 default:
251 /* never reached */
252 return 0;
253 }
254}
255
256static void
257compute_next_pc_for_16bit_insn(unsigned long insn, unsigned long pc,
258 unsigned long *next_pc,
259 struct task_struct *child)
260{
261 unsigned long op, op2, op3;
262 unsigned long disp;
263 unsigned long regno;
264 int parallel = 0;
265
266 if (insn & 0x00008000)
267 parallel = 1;
268 if (pc & 3)
269 insn &= 0x7fff; /* right slot */
270 else
271 insn >>= 16; /* left slot */
272
273 op = (insn >> 12) & 0xf;
274 op2 = (insn >> 8) & 0xf;
275 op3 = (insn >> 4) & 0xf;
276
277 if (op == 0x7) {
278 switch (op2) {
279 case 0xd: /* BNC */
280 case 0x9: /* BNCL */
281 if (!check_condition_bit(child)) {
282 disp = (long)(insn << 24) >> 22;
283 *next_pc = (pc & ~0x3) + disp;
284 return;
285 }
286 break;
287 case 0x8: /* BCL */
288 case 0xc: /* BC */
289 if (check_condition_bit(child)) {
290 disp = (long)(insn << 24) >> 22;
291 *next_pc = (pc & ~0x3) + disp;
292 return;
293 }
294 break;
295 case 0xe: /* BL */
296 case 0xf: /* BRA */
297 disp = (long)(insn << 24) >> 22;
298 *next_pc = (pc & ~0x3) + disp;
299 return;
300 break;
301 }
302 } else if (op == 0x1) {
303 switch (op2) {
304 case 0x0:
305 if (op3 == 0xf) { /* TRAP */
306#if 1
307 /* pass through */
308#else
309 /* kernel space is not allowed as next_pc */
310 unsigned long evb;
311 unsigned long trapno;
312 trapno = insn & 0xf;
313 __asm__ __volatile__ (
314 "mvfc %0, cr5\n"
315 :"=r"(evb)
316 :
317 );
318 *next_pc = evb + (trapno << 2);
319 return;
320#endif
321 } else if (op3 == 0xd) { /* RTE */
322 *next_pc = get_stack_long(child, PT_BPC);
323 return;
324 }
325 break;
326 case 0xc: /* JC */
327 if (op3 == 0xc && check_condition_bit(child)) {
328 regno = insn & 0xf;
329 *next_pc = get_stack_long(child,
330 reg_offset[regno]);
331 return;
332 }
333 break;
334 case 0xd: /* JNC */
335 if (op3 == 0xc && !check_condition_bit(child)) {
336 regno = insn & 0xf;
337 *next_pc = get_stack_long(child,
338 reg_offset[regno]);
339 return;
340 }
341 break;
342 case 0xe: /* JL */
343 case 0xf: /* JMP */
344 if (op3 == 0xc) { /* JMP */
345 regno = insn & 0xf;
346 *next_pc = get_stack_long(child,
347 reg_offset[regno]);
348 return;
349 }
350 break;
351 }
352 }
353 if (parallel)
354 *next_pc = pc + 4;
355 else
356 *next_pc = pc + 2;
357}
358
359static void
360compute_next_pc_for_32bit_insn(unsigned long insn, unsigned long pc,
361 unsigned long *next_pc,
362 struct task_struct *child)
363{
364 unsigned long op;
365 unsigned long op2;
366 unsigned long disp;
367 unsigned long regno1, regno2;
368
369 op = (insn >> 28) & 0xf;
370 if (op == 0xf) { /* branch 24-bit relative */
371 op2 = (insn >> 24) & 0xf;
372 switch (op2) {
373 case 0xd: /* BNC */
374 case 0x9: /* BNCL */
375 if (!check_condition_bit(child)) {
376 disp = (long)(insn << 8) >> 6;
377 *next_pc = (pc & ~0x3) + disp;
378 return;
379 }
380 break;
381 case 0x8: /* BCL */
382 case 0xc: /* BC */
383 if (check_condition_bit(child)) {
384 disp = (long)(insn << 8) >> 6;
385 *next_pc = (pc & ~0x3) + disp;
386 return;
387 }
388 break;
389 case 0xe: /* BL */
390 case 0xf: /* BRA */
391 disp = (long)(insn << 8) >> 6;
392 *next_pc = (pc & ~0x3) + disp;
393 return;
394 }
395 } else if (op == 0xb) { /* branch 16-bit relative */
396 op2 = (insn >> 20) & 0xf;
397 switch (op2) {
398 case 0x0: /* BEQ */
399 case 0x1: /* BNE */
400 case 0x8: /* BEQZ */
401 case 0x9: /* BNEZ */
402 case 0xa: /* BLTZ */
403 case 0xb: /* BGEZ */
404 case 0xc: /* BLEZ */
405 case 0xd: /* BGTZ */
406 regno1 = ((insn >> 24) & 0xf);
407 regno2 = ((insn >> 16) & 0xf);
408 if (check_condition_src(op2, regno1, regno2, child)) {
409 disp = (long)(insn << 16) >> 14;
410 *next_pc = (pc & ~0x3) + disp;
411 return;
412 }
413 break;
414 }
415 }
416 *next_pc = pc + 4;
417}
418
419static inline void
420compute_next_pc(unsigned long insn, unsigned long pc,
421 unsigned long *next_pc, struct task_struct *child)
422{
423 if (insn & 0x80000000)
424 compute_next_pc_for_32bit_insn(insn, pc, next_pc, child);
425 else
426 compute_next_pc_for_16bit_insn(insn, pc, next_pc, child);
427}
428
429static int
430register_debug_trap(struct task_struct *child, unsigned long next_pc,
431 unsigned long next_insn, unsigned long *code)
432{
433 struct debug_trap *p = &child->thread.debug_trap;
434 unsigned long addr = next_pc & ~3;
435
436 if (p->nr_trap == MAX_TRAPS) {
437 printk("kernel BUG at %s %d: p->nr_trap = %d\n",
438 __FILE__, __LINE__, p->nr_trap);
439 return -1;
440 }
441 p->addr[p->nr_trap] = addr;
442 p->insn[p->nr_trap] = next_insn;
443 p->nr_trap++;
444 if (next_pc & 3) {
445 *code = (next_insn & 0xffff0000) | 0x10f1;
446 /* xxx --> TRAP1 */
447 } else {
448 if ((next_insn & 0x80000000) || (next_insn & 0x8000)) {
449 *code = 0x10f17000;
450 /* TRAP1 --> NOP */
451 } else {
452 *code = (next_insn & 0xffff) | 0x10f10000;
453 /* TRAP1 --> xxx */
454 }
455 }
456 return 0;
457}
458
459static int
460unregister_debug_trap(struct task_struct *child, unsigned long addr,
461 unsigned long *code)
462{
463 struct debug_trap *p = &child->thread.debug_trap;
464 int i;
465
466 /* Search debug trap entry. */
467 for (i = 0; i < p->nr_trap; i++) {
468 if (p->addr[i] == addr)
469 break;
470 }
471 if (i >= p->nr_trap) {
472 /* The trap may be requested from debugger.
473 * ptrace should do nothing in this case.
474 */
475 return 0;
476 }
477
478 /* Recover original instruction code. */
479 *code = p->insn[i];
480
481 /* Shift debug trap entries. */
482 while (i < p->nr_trap - 1) {
483 p->insn[i] = p->insn[i + 1];
484 p->addr[i] = p->addr[i + 1];
485 i++;
486 }
487 p->nr_trap--;
488 return 1;
489}
490
491static void
492unregister_all_debug_traps(struct task_struct *child)
493{
494 struct debug_trap *p = &child->thread.debug_trap;
495 int i;
496
497 for (i = 0; i < p->nr_trap; i++)
498 access_process_vm(child, p->addr[i], &p->insn[i], sizeof(p->insn[i]),
499 FOLL_FORCE | FOLL_WRITE);
500 p->nr_trap = 0;
501}
502
503static inline void
504invalidate_cache(void)
505{
506#if defined(CONFIG_CHIP_M32700) || defined(CONFIG_CHIP_OPSP)
507
508 _flush_cache_copyback_all();
509
510#else /* ! CONFIG_CHIP_M32700 */
511
512 /* Invalidate cache */
513 __asm__ __volatile__ (
514 "ldi r0, #-1 \n\t"
515 "ldi r1, #0 \n\t"
516 "stb r1, @r0 ; cache off \n\t"
517 "; \n\t"
518 "ldi r0, #-2 \n\t"
519 "ldi r1, #1 \n\t"
520 "stb r1, @r0 ; cache invalidate \n\t"
521 ".fillinsn \n"
522 "0: \n\t"
523 "ldb r1, @r0 ; invalidate check \n\t"
524 "bnez r1, 0b \n\t"
525 "; \n\t"
526 "ldi r0, #-1 \n\t"
527 "ldi r1, #1 \n\t"
528 "stb r1, @r0 ; cache on \n\t"
529 : : : "r0", "r1", "memory"
530 );
531 /* FIXME: copying-back d-cache and invalidating i-cache are needed.
532 */
533#endif /* CONFIG_CHIP_M32700 */
534}
535
536/* Embed a debug trap (TRAP1) code */
537static int
538embed_debug_trap(struct task_struct *child, unsigned long next_pc)
539{
540 unsigned long next_insn, code;
541 unsigned long addr = next_pc & ~3;
542
543 if (access_process_vm(child, addr, &next_insn, sizeof(next_insn),
544 FOLL_FORCE)
545 != sizeof(next_insn)) {
546 return -1; /* error */
547 }
548
549 /* Set a trap code. */
550 if (register_debug_trap(child, next_pc, next_insn, &code)) {
551 return -1; /* error */
552 }
553 if (access_process_vm(child, addr, &code, sizeof(code),
554 FOLL_FORCE | FOLL_WRITE)
555 != sizeof(code)) {
556 return -1; /* error */
557 }
558 return 0; /* success */
559}
560
561void
562withdraw_debug_trap(struct pt_regs *regs)
563{
564 unsigned long addr;
565 unsigned long code;
566
567 addr = (regs->bpc - 2) & ~3;
568 regs->bpc -= 2;
569 if (unregister_debug_trap(current, addr, &code)) {
570 access_process_vm(current, addr, &code, sizeof(code),
571 FOLL_FORCE | FOLL_WRITE);
572 invalidate_cache();
573 }
574}
575
576void
577init_debug_traps(struct task_struct *child)
578{
579 struct debug_trap *p = &child->thread.debug_trap;
580 int i;
581 p->nr_trap = 0;
582 for (i = 0; i < MAX_TRAPS; i++) {
583 p->addr[i] = 0;
584 p->insn[i] = 0;
585 }
586}
587
588void user_enable_single_step(struct task_struct *child)
589{
590 unsigned long next_pc;
591 unsigned long pc, insn;
592
593 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
594
595 /* Compute next pc. */
596 pc = get_stack_long(child, PT_BPC);
597
598 if (access_process_vm(child, pc&~3, &insn, sizeof(insn),
599 FOLL_FORCE)
600 != sizeof(insn))
601 return;
602
603 compute_next_pc(insn, pc, &next_pc, child);
604 if (next_pc & 0x80000000)
605 return;
606
607 if (embed_debug_trap(child, next_pc))
608 return;
609
610 invalidate_cache();
611}
612
613void user_disable_single_step(struct task_struct *child)
614{
615 unregister_all_debug_traps(child);
616 invalidate_cache();
617}
618
619/*
620 * Called by kernel/ptrace.c when detaching..
621 *
622 * Make sure single step bits etc are not set.
623 */
624void ptrace_disable(struct task_struct *child)
625{
626 /* nothing to do.. */
627}
628
629long
630arch_ptrace(struct task_struct *child, long request,
631 unsigned long addr, unsigned long data)
632{
633 int ret;
634 unsigned long __user *datap = (unsigned long __user *) data;
635
636 switch (request) {
637 /*
638 * read word at location "addr" in the child process.
639 */
640 case PTRACE_PEEKTEXT:
641 case PTRACE_PEEKDATA:
642 ret = generic_ptrace_peekdata(child, addr, data);
643 break;
644
645 /*
646 * read the word at location addr in the USER area.
647 */
648 case PTRACE_PEEKUSR:
649 ret = ptrace_read_user(child, addr, datap);
650 break;
651
652 /*
653 * write the word at location addr.
654 */
655 case PTRACE_POKETEXT:
656 case PTRACE_POKEDATA:
657 ret = generic_ptrace_pokedata(child, addr, data);
658 if (ret == 0 && request == PTRACE_POKETEXT)
659 invalidate_cache();
660 break;
661
662 /*
663 * write the word at location addr in the USER area.
664 */
665 case PTRACE_POKEUSR:
666 ret = ptrace_write_user(child, addr, data);
667 break;
668
669 case PTRACE_GETREGS:
670 ret = ptrace_getregs(child, datap);
671 break;
672
673 case PTRACE_SETREGS:
674 ret = ptrace_setregs(child, datap);
675 break;
676
677 default:
678 ret = ptrace_request(child, request, addr, data);
679 break;
680 }
681
682 return ret;
683}
684
685/* notification of system call entry/exit
686 * - triggered by current->work.syscall_trace
687 */
688void do_syscall_trace(void)
689{
690 if (!test_thread_flag(TIF_SYSCALL_TRACE))
691 return;
692 if (!(current->ptrace & PT_PTRACED))
693 return;
694 /* the 0x80 provides a way for the tracing parent to distinguish
695 between a syscall stop and SIGTRAP delivery */
696 ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
697 ? 0x80 : 0));
698
699 /*
700 * this isn't the same as continuing with a signal, but it will do
701 * for normal use. strace only continues with a signal if the
702 * stopping signal is not SIGTRAP. -brl
703 */
704 if (current->exit_code) {
705 send_sig(current->exit_code, current, 1);
706 current->exit_code = 0;
707 }
708}
diff --git a/arch/m32r/kernel/setup.c b/arch/m32r/kernel/setup.c
deleted file mode 100644
index b72d5db39f00..000000000000
--- a/arch/m32r/kernel/setup.c
+++ /dev/null
@@ -1,424 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * linux/arch/m32r/kernel/setup.c
4 *
5 * Setup routines for Renesas M32R
6 *
7 * Copyright (c) 2001, 2002 Hiroyuki Kondo, Hirokazu Takata,
8 * Hitoshi Yamamoto
9 */
10
11#include <linux/init.h>
12#include <linux/kernel.h>
13#include <linux/stddef.h>
14#include <linux/fs.h>
15#include <linux/sched/mm.h>
16#include <linux/ioport.h>
17#include <linux/mm.h>
18#include <linux/bootmem.h>
19#include <linux/console.h>
20#include <linux/initrd.h>
21#include <linux/major.h>
22#include <linux/root_dev.h>
23#include <linux/seq_file.h>
24#include <linux/timex.h>
25#include <linux/screen_info.h>
26#include <linux/cpu.h>
27#include <linux/nodemask.h>
28#include <linux/pfn.h>
29
30#include <asm/processor.h>
31#include <asm/pgtable.h>
32#include <asm/io.h>
33#include <asm/mmu_context.h>
34#include <asm/m32r.h>
35#include <asm/setup.h>
36#include <asm/sections.h>
37
38#ifdef CONFIG_MMU
39extern void init_mmu(void);
40#endif
41
42extern char _end[];
43
44/*
45 * Machine setup..
46 */
47struct cpuinfo_m32r boot_cpu_data;
48
49#ifdef CONFIG_BLK_DEV_RAM
50extern int rd_doload; /* 1 = load ramdisk, 0 = don't load */
51extern int rd_prompt; /* 1 = prompt for ramdisk, 0 = don't prompt */
52extern int rd_image_start; /* starting block # of image */
53#endif
54
55#if defined(CONFIG_VGA_CONSOLE)
56struct screen_info screen_info = {
57 .orig_video_lines = 25,
58 .orig_video_cols = 80,
59 .orig_video_mode = 0,
60 .orig_video_ega_bx = 0,
61 .orig_video_isVGA = 1,
62 .orig_video_points = 8
63};
64#endif
65
66extern int root_mountflags;
67
68static char __initdata command_line[COMMAND_LINE_SIZE];
69
70static struct resource data_resource = {
71 .name = "Kernel data",
72 .start = 0,
73 .end = 0,
74 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
75};
76
77static struct resource code_resource = {
78 .name = "Kernel code",
79 .start = 0,
80 .end = 0,
81 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
82};
83
84unsigned long memory_start;
85EXPORT_SYMBOL(memory_start);
86
87unsigned long memory_end;
88EXPORT_SYMBOL(memory_end);
89
90void __init setup_arch(char **);
91int get_cpuinfo(char *);
92
93static __inline__ void parse_mem_cmdline(char ** cmdline_p)
94{
95 char c = ' ';
96 char *to = command_line;
97 char *from = COMMAND_LINE;
98 int len = 0;
99 int usermem = 0;
100
101 /* Save unparsed command line copy for /proc/cmdline */
102 memcpy(boot_command_line, COMMAND_LINE, COMMAND_LINE_SIZE);
103 boot_command_line[COMMAND_LINE_SIZE-1] = '\0';
104
105 memory_start = (unsigned long)CONFIG_MEMORY_START+PAGE_OFFSET;
106 memory_end = memory_start+(unsigned long)CONFIG_MEMORY_SIZE;
107
108 for ( ; ; ) {
109 if (c == ' ' && !memcmp(from, "mem=", 4)) {
110 if (to != command_line)
111 to--;
112
113 {
114 unsigned long mem_size;
115
116 usermem = 1;
117 mem_size = memparse(from+4, &from);
118 memory_end = memory_start + mem_size;
119 }
120 }
121 c = *(from++);
122 if (!c)
123 break;
124
125 if (COMMAND_LINE_SIZE <= ++len)
126 break;
127
128 *(to++) = c;
129 }
130 *to = '\0';
131 *cmdline_p = command_line;
132 if (usermem)
133 printk(KERN_INFO "user-defined physical RAM map:\n");
134}
135
136#ifndef CONFIG_DISCONTIGMEM
137static unsigned long __init setup_memory(void)
138{
139 unsigned long start_pfn, max_low_pfn, bootmap_size;
140
141 start_pfn = PFN_UP( __pa(_end) );
142 max_low_pfn = PFN_DOWN( __pa(memory_end) );
143
144 /*
145 * Initialize the boot-time allocator (with low memory only):
146 */
147 bootmap_size = init_bootmem_node(NODE_DATA(0), start_pfn,
148 CONFIG_MEMORY_START>>PAGE_SHIFT, max_low_pfn);
149
150 /*
151 * Register fully available low RAM pages with the bootmem allocator.
152 */
153 {
154 unsigned long curr_pfn;
155 unsigned long last_pfn;
156 unsigned long pages;
157
158 /*
159 * We are rounding up the start address of usable memory:
160 */
161 curr_pfn = PFN_UP(__pa(memory_start));
162
163 /*
164 * ... and at the end of the usable range downwards:
165 */
166 last_pfn = PFN_DOWN(__pa(memory_end));
167
168 if (last_pfn > max_low_pfn)
169 last_pfn = max_low_pfn;
170
171 pages = last_pfn - curr_pfn;
172 free_bootmem(PFN_PHYS(curr_pfn), PFN_PHYS(pages));
173 }
174
175 /*
176 * Reserve the kernel text and
177 * Reserve the bootmem bitmap. We do this in two steps (first step
178 * was init_bootmem()), because this catches the (definitely buggy)
179 * case of us accidentally initializing the bootmem allocator with
180 * an invalid RAM area.
181 */
182 reserve_bootmem(CONFIG_MEMORY_START + PAGE_SIZE,
183 (PFN_PHYS(start_pfn) + bootmap_size + PAGE_SIZE - 1)
184 - CONFIG_MEMORY_START,
185 BOOTMEM_DEFAULT);
186
187 /*
188 * reserve physical page 0 - it's a special BIOS page on many boxes,
189 * enabling clean reboots, SMP operation, laptop functions.
190 */
191 reserve_bootmem(CONFIG_MEMORY_START, PAGE_SIZE, BOOTMEM_DEFAULT);
192
193 /*
194 * reserve memory hole
195 */
196#ifdef CONFIG_MEMHOLE
197 reserve_bootmem(CONFIG_MEMHOLE_START, CONFIG_MEMHOLE_SIZE,
198 BOOTMEM_DEFAULT);
199#endif
200
201#ifdef CONFIG_BLK_DEV_INITRD
202 if (LOADER_TYPE && INITRD_START) {
203 if (INITRD_START + INITRD_SIZE <= (max_low_pfn << PAGE_SHIFT)) {
204 reserve_bootmem(INITRD_START, INITRD_SIZE,
205 BOOTMEM_DEFAULT);
206 initrd_start = INITRD_START + PAGE_OFFSET;
207 initrd_end = initrd_start + INITRD_SIZE;
208 printk("initrd:start[%08lx],size[%08lx]\n",
209 initrd_start, INITRD_SIZE);
210 } else {
211 printk("initrd extends beyond end of memory "
212 "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
213 INITRD_START + INITRD_SIZE,
214 max_low_pfn << PAGE_SHIFT);
215
216 initrd_start = 0;
217 }
218 }
219#endif
220
221 return max_low_pfn;
222}
223#else /* CONFIG_DISCONTIGMEM */
224extern unsigned long setup_memory(void);
225#endif /* CONFIG_DISCONTIGMEM */
226
227void __init setup_arch(char **cmdline_p)
228{
229 ROOT_DEV = old_decode_dev(ORIG_ROOT_DEV);
230
231 boot_cpu_data.cpu_clock = M32R_CPUCLK;
232 boot_cpu_data.bus_clock = M32R_BUSCLK;
233 boot_cpu_data.timer_divide = M32R_TIMER_DIVIDE;
234
235#ifdef CONFIG_BLK_DEV_RAM
236 rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
237 rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
238 rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
239#endif
240
241 if (!MOUNT_ROOT_RDONLY)
242 root_mountflags &= ~MS_RDONLY;
243
244#ifdef CONFIG_VT
245#if defined(CONFIG_VGA_CONSOLE)
246 conswitchp = &vga_con;
247#elif defined(CONFIG_DUMMY_CONSOLE)
248 conswitchp = &dummy_con;
249#endif
250#endif
251
252#ifdef CONFIG_DISCONTIGMEM
253 nodes_clear(node_online_map);
254 node_set_online(0);
255 node_set_online(1);
256#endif /* CONFIG_DISCONTIGMEM */
257
258 init_mm.start_code = (unsigned long) _text;
259 init_mm.end_code = (unsigned long) _etext;
260 init_mm.end_data = (unsigned long) _edata;
261 init_mm.brk = (unsigned long) _end;
262
263 code_resource.start = virt_to_phys(_text);
264 code_resource.end = virt_to_phys(_etext)-1;
265 data_resource.start = virt_to_phys(_etext);
266 data_resource.end = virt_to_phys(_edata)-1;
267
268 parse_mem_cmdline(cmdline_p);
269
270 setup_memory();
271
272 paging_init();
273}
274
275static struct cpu cpu_devices[NR_CPUS];
276
277static int __init topology_init(void)
278{
279 int i;
280
281 for_each_present_cpu(i)
282 register_cpu(&cpu_devices[i], i);
283
284 return 0;
285}
286
287subsys_initcall(topology_init);
288
289#ifdef CONFIG_PROC_FS
290/*
291 * Get CPU information for use by the procfs.
292 */
293static int show_cpuinfo(struct seq_file *m, void *v)
294{
295 struct cpuinfo_m32r *c = v;
296 unsigned long cpu = c - cpu_data;
297
298#ifdef CONFIG_SMP
299 if (!cpu_online(cpu))
300 return 0;
301#endif /* CONFIG_SMP */
302
303 seq_printf(m, "processor\t: %ld\n", cpu);
304
305#if defined(CONFIG_CHIP_VDEC2)
306 seq_printf(m, "cpu family\t: VDEC2\n"
307 "cache size\t: Unknown\n");
308#elif defined(CONFIG_CHIP_M32700)
309 seq_printf(m,"cpu family\t: M32700\n"
310 "cache size\t: I-8KB/D-8KB\n");
311#elif defined(CONFIG_CHIP_M32102)
312 seq_printf(m,"cpu family\t: M32102\n"
313 "cache size\t: I-8KB\n");
314#elif defined(CONFIG_CHIP_OPSP)
315 seq_printf(m,"cpu family\t: OPSP\n"
316 "cache size\t: I-8KB/D-8KB\n");
317#elif defined(CONFIG_CHIP_MP)
318 seq_printf(m, "cpu family\t: M32R-MP\n"
319 "cache size\t: I-xxKB/D-xxKB\n");
320#elif defined(CONFIG_CHIP_M32104)
321 seq_printf(m,"cpu family\t: M32104\n"
322 "cache size\t: I-8KB/D-8KB\n");
323#else
324 seq_printf(m, "cpu family\t: Unknown\n");
325#endif
326 seq_printf(m, "bogomips\t: %lu.%02lu\n",
327 c->loops_per_jiffy/(500000/HZ),
328 (c->loops_per_jiffy/(5000/HZ)) % 100);
329#if defined(CONFIG_PLAT_MAPPI)
330 seq_printf(m, "Machine\t\t: Mappi Evaluation board\n");
331#elif defined(CONFIG_PLAT_MAPPI2)
332 seq_printf(m, "Machine\t\t: Mappi-II Evaluation board\n");
333#elif defined(CONFIG_PLAT_MAPPI3)
334 seq_printf(m, "Machine\t\t: Mappi-III Evaluation board\n");
335#elif defined(CONFIG_PLAT_M32700UT)
336 seq_printf(m, "Machine\t\t: M32700UT Evaluation board\n");
337#elif defined(CONFIG_PLAT_OPSPUT)
338 seq_printf(m, "Machine\t\t: OPSPUT Evaluation board\n");
339#elif defined(CONFIG_PLAT_USRV)
340 seq_printf(m, "Machine\t\t: uServer\n");
341#elif defined(CONFIG_PLAT_OAKS32R)
342 seq_printf(m, "Machine\t\t: OAKS32R\n");
343#elif defined(CONFIG_PLAT_M32104UT)
344 seq_printf(m, "Machine\t\t: M3T-M32104UT uT Engine board\n");
345#else
346 seq_printf(m, "Machine\t\t: Unknown\n");
347#endif
348
349#define PRINT_CLOCK(name, value) \
350 seq_printf(m, name " clock\t: %d.%02dMHz\n", \
351 ((value) / 1000000), ((value) % 1000000)/10000)
352
353 PRINT_CLOCK("CPU", (int)c->cpu_clock);
354 PRINT_CLOCK("Bus", (int)c->bus_clock);
355
356 seq_printf(m, "\n");
357
358 return 0;
359}
360
361static void *c_start(struct seq_file *m, loff_t *pos)
362{
363 return *pos < NR_CPUS ? cpu_data + *pos : NULL;
364}
365
366static void *c_next(struct seq_file *m, void *v, loff_t *pos)
367{
368 ++*pos;
369 return c_start(m, pos);
370}
371
372static void c_stop(struct seq_file *m, void *v)
373{
374}
375
376const struct seq_operations cpuinfo_op = {
377 .start = c_start,
378 .next = c_next,
379 .stop = c_stop,
380 .show = show_cpuinfo,
381};
382#endif /* CONFIG_PROC_FS */
383
384unsigned long cpu_initialized __initdata = 0;
385
386/*
387 * cpu_init() initializes state that is per-CPU. Some data is already
388 * initialized (naturally) in the bootstrap process.
389 * We reload them nevertheless, this function acts as a
390 * 'CPU state barrier', nothing should get across.
391 */
392#if defined(CONFIG_CHIP_VDEC2) || defined(CONFIG_CHIP_XNUX2) \
393 || defined(CONFIG_CHIP_M32700) || defined(CONFIG_CHIP_M32102) \
394 || defined(CONFIG_CHIP_OPSP) || defined(CONFIG_CHIP_M32104)
395void __init cpu_init (void)
396{
397 int cpu_id = smp_processor_id();
398
399 if (test_and_set_bit(cpu_id, &cpu_initialized)) {
400 printk(KERN_WARNING "CPU#%d already initialized!\n", cpu_id);
401 for ( ; ; )
402 local_irq_enable();
403 }
404 printk(KERN_INFO "Initializing CPU#%d\n", cpu_id);
405
406 /* Set up and load the per-CPU TSS and LDT */
407 mmgrab(&init_mm);
408 current->active_mm = &init_mm;
409 if (current->mm)
410 BUG();
411
412 /* Force FPU initialization */
413 current_thread_info()->status = 0;
414 clear_used_math();
415
416#ifdef CONFIG_MMU
417 /* Set up MMU */
418 init_mmu();
419#endif
420
421 /* Set up ICUIMASK */
422 outl(0x00070000, M32R_ICU_IMASK_PORTL); /* imask=111 */
423}
424#endif /* defined(CONFIG_CHIP_VDEC2) ... */
diff --git a/arch/m32r/kernel/signal.c b/arch/m32r/kernel/signal.c
deleted file mode 100644
index ba4d8d6330f1..000000000000
--- a/arch/m32r/kernel/signal.c
+++ /dev/null
@@ -1,336 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * linux/arch/m32r/kernel/signal.c
4 *
5 * Copyright (c) 2003 Hitoshi Yamamoto
6 *
7 * Taken from i386 version.
8 * Copyright (C) 1991, 1992 Linus Torvalds
9 *
10 * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson
11 * 2000-06-20 Pentium III FXSR, SSE support by Gareth Hughes
12 */
13
14#include <linux/sched.h>
15#include <linux/mm.h>
16#include <linux/smp.h>
17#include <linux/kernel.h>
18#include <linux/signal.h>
19#include <linux/errno.h>
20#include <linux/wait.h>
21#include <linux/unistd.h>
22#include <linux/stddef.h>
23#include <linux/personality.h>
24#include <linux/tracehook.h>
25#include <asm/cacheflush.h>
26#include <asm/ucontext.h>
27#include <linux/uaccess.h>
28
29#define DEBUG_SIG 0
30
31/*
32 * Do a signal return; undo the signal stack.
33 */
34
35struct rt_sigframe
36{
37 int sig;
38 struct siginfo __user *pinfo;
39 void __user *puc;
40 struct siginfo info;
41 struct ucontext uc;
42// struct _fpstate fpstate;
43};
44
45static int
46restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
47 int *r0_p)
48{
49 unsigned int err = 0;
50
51 /* Always make any pending restarted system calls return -EINTR */
52 current->restart_block.fn = do_no_restart_syscall;
53
54#define COPY(x) err |= __get_user(regs->x, &sc->sc_##x)
55 COPY(r4);
56 COPY(r5);
57 COPY(r6);
58 COPY(pt_regs);
59 /* COPY(r0); Skip r0 */
60 COPY(r1);
61 COPY(r2);
62 COPY(r3);
63 COPY(r7);
64 COPY(r8);
65 COPY(r9);
66 COPY(r10);
67 COPY(r11);
68 COPY(r12);
69 COPY(acc0h);
70 COPY(acc0l);
71 COPY(acc1h); /* ISA_DSP_LEVEL2 only */
72 COPY(acc1l); /* ISA_DSP_LEVEL2 only */
73 COPY(psw);
74 COPY(bpc);
75 COPY(bbpsw);
76 COPY(bbpc);
77 COPY(spu);
78 COPY(fp);
79 COPY(lr);
80 COPY(spi);
81#undef COPY
82
83 regs->syscall_nr = -1; /* disable syscall checks */
84 err |= __get_user(*r0_p, &sc->sc_r0);
85
86 return err;
87}
88
89asmlinkage int
90sys_rt_sigreturn(unsigned long r0, unsigned long r1,
91 unsigned long r2, unsigned long r3, unsigned long r4,
92 unsigned long r5, unsigned long r6, struct pt_regs *regs)
93{
94 struct rt_sigframe __user *frame = (struct rt_sigframe __user *)regs->spu;
95 sigset_t set;
96 int result;
97
98 if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
99 goto badframe;
100 if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
101 goto badframe;
102
103 set_current_blocked(&set);
104
105 if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &result))
106 goto badframe;
107
108 if (restore_altstack(&frame->uc.uc_stack))
109 goto badframe;
110
111 return result;
112
113badframe:
114 force_sig(SIGSEGV, current);
115 return 0;
116}
117
118/*
119 * Set up a signal frame.
120 */
121
122static int
123setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
124 unsigned long mask)
125{
126 int err = 0;
127
128#define COPY(x) err |= __put_user(regs->x, &sc->sc_##x)
129 COPY(r4);
130 COPY(r5);
131 COPY(r6);
132 COPY(pt_regs);
133 COPY(r0);
134 COPY(r1);
135 COPY(r2);
136 COPY(r3);
137 COPY(r7);
138 COPY(r8);
139 COPY(r9);
140 COPY(r10);
141 COPY(r11);
142 COPY(r12);
143 COPY(acc0h);
144 COPY(acc0l);
145 COPY(acc1h); /* ISA_DSP_LEVEL2 only */
146 COPY(acc1l); /* ISA_DSP_LEVEL2 only */
147 COPY(psw);
148 COPY(bpc);
149 COPY(bbpsw);
150 COPY(bbpc);
151 COPY(spu);
152 COPY(fp);
153 COPY(lr);
154 COPY(spi);
155#undef COPY
156
157 err |= __put_user(mask, &sc->oldmask);
158
159 return err;
160}
161
162/*
163 * Determine which stack to use..
164 */
165static inline void __user *
166get_sigframe(struct ksignal *ksig, unsigned long sp, size_t frame_size)
167{
168 return (void __user *)((sigsp(sp, ksig) - frame_size) & -8ul);
169}
170
171static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
172 struct pt_regs *regs)
173{
174 struct rt_sigframe __user *frame;
175 int err = 0;
176 int sig = ksig->sig;
177
178 frame = get_sigframe(ksig, regs->spu, sizeof(*frame));
179
180 if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
181 return -EFAULT;
182
183 err |= __put_user(sig, &frame->sig);
184 if (err)
185 return -EFAULT;
186
187 err |= __put_user(&frame->info, &frame->pinfo);
188 err |= __put_user(&frame->uc, &frame->puc);
189 err |= copy_siginfo_to_user(&frame->info, &ksig->info);
190 if (err)
191 return -EFAULT;
192
193 /* Create the ucontext. */
194 err |= __put_user(0, &frame->uc.uc_flags);
195 err |= __put_user(0, &frame->uc.uc_link);
196 err |= __save_altstack(&frame->uc.uc_stack, regs->spu);
197 err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, set->sig[0]);
198 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
199 if (err)
200 return -EFAULT;
201
202 /* Set up to return from userspace. */
203 regs->lr = (unsigned long)ksig->ka.sa.sa_restorer;
204
205 /* Set up registers for signal handler */
206 regs->spu = (unsigned long)frame;
207 regs->r0 = sig; /* Arg for signal handler */
208 regs->r1 = (unsigned long)&frame->info;
209 regs->r2 = (unsigned long)&frame->uc;
210 regs->bpc = (unsigned long)ksig->ka.sa.sa_handler;
211
212#if DEBUG_SIG
213 printk("SIG deliver (%s:%d): sp=%p pc=%p\n",
214 current->comm, current->pid, frame, regs->pc);
215#endif
216
217 return 0;
218}
219
220static int prev_insn(struct pt_regs *regs)
221{
222 u16 inst;
223 if (get_user(inst, (u16 __user *)(regs->bpc - 2)))
224 return -EFAULT;
225 if ((inst & 0xfff0) == 0x10f0) /* trap ? */
226 regs->bpc -= 2;
227 else
228 regs->bpc -= 4;
229 regs->syscall_nr = -1;
230 return 0;
231}
232
233/*
234 * OK, we're invoking a handler
235 */
236
237static void
238handle_signal(struct ksignal *ksig, struct pt_regs *regs)
239{
240 int ret;
241
242 /* Are we from a system call? */
243 if (regs->syscall_nr >= 0) {
244 /* If so, check system call restarting.. */
245 switch (regs->r0) {
246 case -ERESTART_RESTARTBLOCK:
247 case -ERESTARTNOHAND:
248 regs->r0 = -EINTR;
249 break;
250
251 case -ERESTARTSYS:
252 if (!(ksig->ka.sa.sa_flags & SA_RESTART)) {
253 regs->r0 = -EINTR;
254 break;
255 }
256 /* fallthrough */
257 case -ERESTARTNOINTR:
258 regs->r0 = regs->orig_r0;
259 if (prev_insn(regs) < 0)
260 return;
261 }
262 }
263
264 /* Set up the stack frame */
265 ret = setup_rt_frame(ksig, sigmask_to_save(), regs);
266
267 signal_setup_done(ret, ksig, 0);
268}
269
270/*
271 * Note that 'init' is a special process: it doesn't get signals it doesn't
272 * want to handle. Thus you cannot kill init even with a SIGKILL even by
273 * mistake.
274 */
275static void do_signal(struct pt_regs *regs)
276{
277 struct ksignal ksig;
278
279 /*
280 * We want the common case to go fast, which
281 * is why we may in certain cases get here from
282 * kernel mode. Just return without doing anything
283 * if so.
284 */
285 if (!user_mode(regs))
286 return;
287
288 if (get_signal(&ksig)) {
289 /* Re-enable any watchpoints before delivering the
290 * signal to user space. The processor register will
291 * have been cleared if the watchpoint triggered
292 * inside the kernel.
293 */
294
295 /* Whee! Actually deliver the signal. */
296 handle_signal(&ksig, regs);
297
298 return;
299 }
300
301 /* Did we come from a system call? */
302 if (regs->syscall_nr >= 0) {
303 /* Restart the system call - no handlers present */
304 if (regs->r0 == -ERESTARTNOHAND ||
305 regs->r0 == -ERESTARTSYS ||
306 regs->r0 == -ERESTARTNOINTR) {
307 regs->r0 = regs->orig_r0;
308 prev_insn(regs);
309 } else if (regs->r0 == -ERESTART_RESTARTBLOCK){
310 regs->r0 = regs->orig_r0;
311 regs->r7 = __NR_restart_syscall;
312 prev_insn(regs);
313 }
314 }
315 restore_saved_sigmask();
316}
317
318/*
319 * notification of userspace execution resumption
320 * - triggered by current->work.notify_resume
321 */
322void do_notify_resume(struct pt_regs *regs, __u32 thread_info_flags)
323{
324 /* Pending single-step? */
325 if (thread_info_flags & _TIF_SINGLESTEP)
326 clear_thread_flag(TIF_SINGLESTEP);
327
328 /* deal with pending signal delivery */
329 if (thread_info_flags & _TIF_SIGPENDING)
330 do_signal(regs);
331
332 if (thread_info_flags & _TIF_NOTIFY_RESUME) {
333 clear_thread_flag(TIF_NOTIFY_RESUME);
334 tracehook_notify_resume(regs);
335 }
336}
diff --git a/arch/m32r/kernel/smp.c b/arch/m32r/kernel/smp.c
deleted file mode 100644
index 564052e3d3a0..000000000000
--- a/arch/m32r/kernel/smp.c
+++ /dev/null
@@ -1,836 +0,0 @@
1/*
2 * linux/arch/m32r/kernel/smp.c
3 *
4 * M32R SMP support routines.
5 *
6 * Copyright (c) 2001, 2002 Hitoshi Yamamoto
7 *
8 * Taken from i386 version.
9 * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
10 * (c) 1998-99, 2000 Ingo Molnar <mingo@redhat.com>
11 *
12 * This code is released under the GNU General Public License version 2 or
13 * later.
14 */
15
16#undef DEBUG_SMP
17
18#include <linux/irq.h>
19#include <linux/interrupt.h>
20#include <linux/sched.h>
21#include <linux/spinlock.h>
22#include <linux/mm.h>
23#include <linux/smp.h>
24#include <linux/profile.h>
25#include <linux/cpu.h>
26
27#include <asm/cacheflush.h>
28#include <asm/pgalloc.h>
29#include <linux/atomic.h>
30#include <asm/io.h>
31#include <asm/mmu_context.h>
32#include <asm/m32r.h>
33#include <asm/tlbflush.h>
34
35/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
36/* Data structures and variables */
37/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
38
39/*
40 * For flush_cache_all()
41 */
42static DEFINE_SPINLOCK(flushcache_lock);
43static volatile unsigned long flushcache_cpumask = 0;
44
45/*
46 * For flush_tlb_others()
47 */
48static cpumask_t flush_cpumask;
49static struct mm_struct *flush_mm;
50static struct vm_area_struct *flush_vma;
51static volatile unsigned long flush_va;
52static DEFINE_SPINLOCK(tlbstate_lock);
53#define FLUSH_ALL 0xffffffff
54
55DECLARE_PER_CPU(int, prof_multiplier);
56DECLARE_PER_CPU(int, prof_old_multiplier);
57DECLARE_PER_CPU(int, prof_counter);
58
59extern spinlock_t ipi_lock[];
60
61/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
62/* Function Prototypes */
63/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
64
65void smp_reschedule_interrupt(void);
66void smp_flush_cache_all_interrupt(void);
67
68static void flush_tlb_all_ipi(void *);
69static void flush_tlb_others(cpumask_t, struct mm_struct *,
70 struct vm_area_struct *, unsigned long);
71
72void smp_invalidate_interrupt(void);
73
74static void stop_this_cpu(void *);
75
76void smp_ipi_timer_interrupt(struct pt_regs *);
77void smp_local_timer_interrupt(void);
78
79static void send_IPI_allbutself(int, int);
80static void send_IPI_mask(const struct cpumask *, int, int);
81
82/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
83/* Rescheduling request Routines */
84/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
85
86/*==========================================================================*
87 * Name: smp_send_reschedule
88 *
89 * Description: This routine requests other CPU to execute rescheduling.
90 * 1.Send 'RESCHEDULE_IPI' to other CPU.
91 * Request other CPU to execute 'smp_reschedule_interrupt()'.
92 *
93 * Born on Date: 2002.02.05
94 *
95 * Arguments: cpu_id - Target CPU ID
96 *
97 * Returns: void (cannot fail)
98 *
99 * Modification log:
100 * Date Who Description
101 * ---------- --- --------------------------------------------------------
102 *
103 *==========================================================================*/
104void smp_send_reschedule(int cpu_id)
105{
106 WARN_ON(cpu_is_offline(cpu_id));
107 send_IPI_mask(cpumask_of(cpu_id), RESCHEDULE_IPI, 1);
108}
109
110/*==========================================================================*
111 * Name: smp_reschedule_interrupt
112 *
113 * Description: This routine executes on CPU which received
114 * 'RESCHEDULE_IPI'.
115 *
116 * Born on Date: 2002.02.05
117 *
118 * Arguments: NONE
119 *
120 * Returns: void (cannot fail)
121 *
122 * Modification log:
123 * Date Who Description
124 * ---------- --- --------------------------------------------------------
125 *
126 *==========================================================================*/
127void smp_reschedule_interrupt(void)
128{
129 scheduler_ipi();
130}
131
132/*==========================================================================*
133 * Name: smp_flush_cache_all
134 *
135 * Description: This routine sends a 'INVALIDATE_CACHE_IPI' to all other
136 * CPUs in the system.
137 *
138 * Born on Date: 2003-05-28
139 *
140 * Arguments: NONE
141 *
142 * Returns: void (cannot fail)
143 *
144 * Modification log:
145 * Date Who Description
146 * ---------- --- --------------------------------------------------------
147 *
148 *==========================================================================*/
149void smp_flush_cache_all(void)
150{
151 cpumask_t cpumask;
152 unsigned long *mask;
153
154 preempt_disable();
155 cpumask_copy(&cpumask, cpu_online_mask);
156 cpumask_clear_cpu(smp_processor_id(), &cpumask);
157 spin_lock(&flushcache_lock);
158 mask=cpumask_bits(&cpumask);
159 atomic_or(*mask, (atomic_t *)&flushcache_cpumask);
160 send_IPI_mask(&cpumask, INVALIDATE_CACHE_IPI, 0);
161 _flush_cache_copyback_all();
162 while (flushcache_cpumask)
163 mb();
164 spin_unlock(&flushcache_lock);
165 preempt_enable();
166}
167EXPORT_SYMBOL(smp_flush_cache_all);
168
169void smp_flush_cache_all_interrupt(void)
170{
171 _flush_cache_copyback_all();
172 clear_bit(smp_processor_id(), &flushcache_cpumask);
173}
174
175/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
176/* TLB flush request Routines */
177/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
178
179/*==========================================================================*
180 * Name: smp_flush_tlb_all
181 *
182 * Description: This routine flushes all processes TLBs.
183 * 1.Request other CPU to execute 'flush_tlb_all_ipi()'.
184 * 2.Execute 'do_flush_tlb_all_local()'.
185 *
186 * Born on Date: 2002.02.05
187 *
188 * Arguments: NONE
189 *
190 * Returns: void (cannot fail)
191 *
192 * Modification log:
193 * Date Who Description
194 * ---------- --- --------------------------------------------------------
195 *
196 *==========================================================================*/
197void smp_flush_tlb_all(void)
198{
199 unsigned long flags;
200
201 preempt_disable();
202 local_irq_save(flags);
203 __flush_tlb_all();
204 local_irq_restore(flags);
205 smp_call_function(flush_tlb_all_ipi, NULL, 1);
206 preempt_enable();
207}
208
209/*==========================================================================*
210 * Name: flush_tlb_all_ipi
211 *
212 * Description: This routine flushes all local TLBs.
213 * 1.Execute 'do_flush_tlb_all_local()'.
214 *
215 * Born on Date: 2002.02.05
216 *
217 * Arguments: *info - not used
218 *
219 * Returns: void (cannot fail)
220 *
221 * Modification log:
222 * Date Who Description
223 * ---------- --- --------------------------------------------------------
224 *
225 *==========================================================================*/
226static void flush_tlb_all_ipi(void *info)
227{
228 __flush_tlb_all();
229}
230
231/*==========================================================================*
232 * Name: smp_flush_tlb_mm
233 *
234 * Description: This routine flushes the specified mm context TLB's.
235 *
236 * Born on Date: 2002.02.05
237 *
238 * Arguments: *mm - a pointer to the mm struct for flush TLB
239 *
240 * Returns: void (cannot fail)
241 *
242 * Modification log:
243 * Date Who Description
244 * ---------- --- --------------------------------------------------------
245 *
246 *==========================================================================*/
247void smp_flush_tlb_mm(struct mm_struct *mm)
248{
249 int cpu_id;
250 cpumask_t cpu_mask;
251 unsigned long *mmc;
252 unsigned long flags;
253
254 preempt_disable();
255 cpu_id = smp_processor_id();
256 mmc = &mm->context[cpu_id];
257 cpumask_copy(&cpu_mask, mm_cpumask(mm));
258 cpumask_clear_cpu(cpu_id, &cpu_mask);
259
260 if (*mmc != NO_CONTEXT) {
261 local_irq_save(flags);
262 *mmc = NO_CONTEXT;
263 if (mm == current->mm)
264 activate_context(mm);
265 else
266 cpumask_clear_cpu(cpu_id, mm_cpumask(mm));
267 local_irq_restore(flags);
268 }
269 if (!cpumask_empty(&cpu_mask))
270 flush_tlb_others(cpu_mask, mm, NULL, FLUSH_ALL);
271
272 preempt_enable();
273}
274
275/*==========================================================================*
276 * Name: smp_flush_tlb_range
277 *
278 * Description: This routine flushes a range of pages.
279 *
280 * Born on Date: 2002.02.05
281 *
282 * Arguments: *mm - a pointer to the mm struct for flush TLB
283 * start - not used
284 * end - not used
285 *
286 * Returns: void (cannot fail)
287 *
288 * Modification log:
289 * Date Who Description
290 * ---------- --- --------------------------------------------------------
291 *
292 *==========================================================================*/
293void smp_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
294 unsigned long end)
295{
296 smp_flush_tlb_mm(vma->vm_mm);
297}
298
299/*==========================================================================*
300 * Name: smp_flush_tlb_page
301 *
302 * Description: This routine flushes one page.
303 *
304 * Born on Date: 2002.02.05
305 *
306 * Arguments: *vma - a pointer to the vma struct include va
307 * va - virtual address for flush TLB
308 *
309 * Returns: void (cannot fail)
310 *
311 * Modification log:
312 * Date Who Description
313 * ---------- --- --------------------------------------------------------
314 *
315 *==========================================================================*/
316void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
317{
318 struct mm_struct *mm = vma->vm_mm;
319 int cpu_id;
320 cpumask_t cpu_mask;
321 unsigned long *mmc;
322 unsigned long flags;
323
324 preempt_disable();
325 cpu_id = smp_processor_id();
326 mmc = &mm->context[cpu_id];
327 cpumask_copy(&cpu_mask, mm_cpumask(mm));
328 cpumask_clear_cpu(cpu_id, &cpu_mask);
329
330#ifdef DEBUG_SMP
331 if (!mm)
332 BUG();
333#endif
334
335 if (*mmc != NO_CONTEXT) {
336 local_irq_save(flags);
337 va &= PAGE_MASK;
338 va |= (*mmc & MMU_CONTEXT_ASID_MASK);
339 __flush_tlb_page(va);
340 local_irq_restore(flags);
341 }
342 if (!cpumask_empty(&cpu_mask))
343 flush_tlb_others(cpu_mask, mm, vma, va);
344
345 preempt_enable();
346}
347
348/*==========================================================================*
349 * Name: flush_tlb_others
350 *
351 * Description: This routine requests other CPU to execute flush TLB.
352 * 1.Setup parameters.
353 * 2.Send 'INVALIDATE_TLB_IPI' to other CPU.
354 * Request other CPU to execute 'smp_invalidate_interrupt()'.
355 * 3.Wait for other CPUs operation finished.
356 *
357 * Born on Date: 2002.02.05
358 *
359 * Arguments: cpumask - bitmap of target CPUs
360 * *mm - a pointer to the mm struct for flush TLB
361 * *vma - a pointer to the vma struct include va
362 * va - virtual address for flush TLB
363 *
364 * Returns: void (cannot fail)
365 *
366 * Modification log:
367 * Date Who Description
368 * ---------- --- --------------------------------------------------------
369 *
370 *==========================================================================*/
371static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
372 struct vm_area_struct *vma, unsigned long va)
373{
374 unsigned long *mask;
375#ifdef DEBUG_SMP
376 unsigned long flags;
377 __save_flags(flags);
378 if (!(flags & 0x0040)) /* Interrupt Disable NONONO */
379 BUG();
380#endif /* DEBUG_SMP */
381
382 /*
383 * A couple of (to be removed) sanity checks:
384 *
385 * - we do not send IPIs to not-yet booted CPUs.
386 * - current CPU must not be in mask
387 * - mask must exist :)
388 */
389 BUG_ON(cpumask_empty(&cpumask));
390
391 BUG_ON(cpumask_test_cpu(smp_processor_id(), &cpumask));
392 BUG_ON(!mm);
393
394 /* If a CPU which we ran on has gone down, OK. */
395 cpumask_and(&cpumask, &cpumask, cpu_online_mask);
396 if (cpumask_empty(&cpumask))
397 return;
398
399 /*
400 * i'm not happy about this global shared spinlock in the
401 * MM hot path, but we'll see how contended it is.
402 * Temporarily this turns IRQs off, so that lockups are
403 * detected by the NMI watchdog.
404 */
405 spin_lock(&tlbstate_lock);
406
407 flush_mm = mm;
408 flush_vma = vma;
409 flush_va = va;
410 mask=cpumask_bits(&cpumask);
411 atomic_or(*mask, (atomic_t *)&flush_cpumask);
412
413 /*
414 * We have to send the IPI only to
415 * CPUs affected.
416 */
417 send_IPI_mask(&cpumask, INVALIDATE_TLB_IPI, 0);
418
419 while (!cpumask_empty(&flush_cpumask)) {
420 /* nothing. lockup detection does not belong here */
421 mb();
422 }
423
424 flush_mm = NULL;
425 flush_vma = NULL;
426 flush_va = 0;
427 spin_unlock(&tlbstate_lock);
428}
429
430/*==========================================================================*
431 * Name: smp_invalidate_interrupt
432 *
433 * Description: This routine executes on CPU which received
434 * 'INVALIDATE_TLB_IPI'.
435 * 1.Flush local TLB.
436 * 2.Report flush TLB process was finished.
437 *
438 * Born on Date: 2002.02.05
439 *
440 * Arguments: NONE
441 *
442 * Returns: void (cannot fail)
443 *
444 * Modification log:
445 * Date Who Description
446 * ---------- --- --------------------------------------------------------
447 *
448 *==========================================================================*/
449void smp_invalidate_interrupt(void)
450{
451 int cpu_id = smp_processor_id();
452 unsigned long *mmc = &flush_mm->context[cpu_id];
453
454 if (!cpumask_test_cpu(cpu_id, &flush_cpumask))
455 return;
456
457 if (flush_va == FLUSH_ALL) {
458 *mmc = NO_CONTEXT;
459 if (flush_mm == current->active_mm)
460 activate_context(flush_mm);
461 else
462 cpumask_clear_cpu(cpu_id, mm_cpumask(flush_mm));
463 } else {
464 unsigned long va = flush_va;
465
466 if (*mmc != NO_CONTEXT) {
467 va &= PAGE_MASK;
468 va |= (*mmc & MMU_CONTEXT_ASID_MASK);
469 __flush_tlb_page(va);
470 }
471 }
472 cpumask_clear_cpu(cpu_id, &flush_cpumask);
473}
474
475/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
476/* Stop CPU request Routines */
477/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
478
479/*==========================================================================*
480 * Name: smp_send_stop
481 *
482 * Description: This routine requests stop all CPUs.
483 * 1.Request other CPU to execute 'stop_this_cpu()'.
484 *
485 * Born on Date: 2002.02.05
486 *
487 * Arguments: NONE
488 *
489 * Returns: void (cannot fail)
490 *
491 * Modification log:
492 * Date Who Description
493 * ---------- --- --------------------------------------------------------
494 *
495 *==========================================================================*/
496void smp_send_stop(void)
497{
498 smp_call_function(stop_this_cpu, NULL, 0);
499}
500
501/*==========================================================================*
502 * Name: stop_this_cpu
503 *
504 * Description: This routine halt CPU.
505 *
506 * Born on Date: 2002.02.05
507 *
508 * Arguments: NONE
509 *
510 * Returns: void (cannot fail)
511 *
512 * Modification log:
513 * Date Who Description
514 * ---------- --- --------------------------------------------------------
515 *
516 *==========================================================================*/
517static void stop_this_cpu(void *dummy)
518{
519 int cpu_id = smp_processor_id();
520
521 /*
522 * Remove this CPU:
523 */
524 set_cpu_online(cpu_id, false);
525
526 /*
527 * PSW IE = 1;
528 * IMASK = 0;
529 * goto SLEEP
530 */
531 local_irq_disable();
532 outl(0, M32R_ICU_IMASK_PORTL);
533 inl(M32R_ICU_IMASK_PORTL); /* dummy read */
534 local_irq_enable();
535
536 for ( ; ; );
537}
538
539void arch_send_call_function_ipi_mask(const struct cpumask *mask)
540{
541 send_IPI_mask(mask, CALL_FUNCTION_IPI, 0);
542}
543
544void arch_send_call_function_single_ipi(int cpu)
545{
546 send_IPI_mask(cpumask_of(cpu), CALL_FUNC_SINGLE_IPI, 0);
547}
548
549/*==========================================================================*
550 * Name: smp_call_function_interrupt
551 *
552 * Description: This routine executes on CPU which received
553 * 'CALL_FUNCTION_IPI'.
554 *
555 * Born on Date: 2002.02.05
556 *
557 * Arguments: NONE
558 *
559 * Returns: void (cannot fail)
560 *
561 * Modification log:
562 * Date Who Description
563 * ---------- --- --------------------------------------------------------
564 *
565 *==========================================================================*/
566void smp_call_function_interrupt(void)
567{
568 irq_enter();
569 generic_smp_call_function_interrupt();
570 irq_exit();
571}
572
573void smp_call_function_single_interrupt(void)
574{
575 irq_enter();
576 generic_smp_call_function_single_interrupt();
577 irq_exit();
578}
579
580/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
581/* Timer Routines */
582/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
583
584/*==========================================================================*
585 * Name: smp_send_timer
586 *
587 * Description: This routine sends a 'LOCAL_TIMER_IPI' to all other CPUs
588 * in the system.
589 *
590 * Born on Date: 2002.02.05
591 *
592 * Arguments: NONE
593 *
594 * Returns: void (cannot fail)
595 *
596 * Modification log:
597 * Date Who Description
598 * ---------- --- --------------------------------------------------------
599 *
600 *==========================================================================*/
601void smp_send_timer(void)
602{
603 send_IPI_allbutself(LOCAL_TIMER_IPI, 1);
604}
605
606/*==========================================================================*
607 * Name: smp_send_timer
608 *
609 * Description: This routine executes on CPU which received
610 * 'LOCAL_TIMER_IPI'.
611 *
612 * Born on Date: 2002.02.05
613 *
614 * Arguments: *regs - a pointer to the saved regster info
615 *
616 * Returns: void (cannot fail)
617 *
618 * Modification log:
619 * Date Who Description
620 * ---------- --- --------------------------------------------------------
621 *
622 *==========================================================================*/
623void smp_ipi_timer_interrupt(struct pt_regs *regs)
624{
625 struct pt_regs *old_regs;
626 old_regs = set_irq_regs(regs);
627 irq_enter();
628 smp_local_timer_interrupt();
629 irq_exit();
630 set_irq_regs(old_regs);
631}
632
633/*==========================================================================*
634 * Name: smp_local_timer_interrupt
635 *
636 * Description: Local timer interrupt handler. It does both profiling and
637 * process statistics/rescheduling.
638 * We do profiling in every local tick, statistics/rescheduling
639 * happen only every 'profiling multiplier' ticks. The default
640 * multiplier is 1 and it can be changed by writing the new
641 * multiplier value into /proc/profile.
642 *
643 * Born on Date: 2002.02.05
644 *
645 * Arguments: *regs - a pointer to the saved regster info
646 *
647 * Returns: void (cannot fail)
648 *
649 * Original: arch/i386/kernel/apic.c
650 *
651 * Modification log:
652 * Date Who Description
653 * ---------- --- --------------------------------------------------------
654 * 2003-06-24 hy use per_cpu structure.
655 *==========================================================================*/
656void smp_local_timer_interrupt(void)
657{
658 int user = user_mode(get_irq_regs());
659 int cpu_id = smp_processor_id();
660
661 /*
662 * The profiling function is SMP safe. (nothing can mess
663 * around with "current", and the profiling counters are
664 * updated with atomic operations). This is especially
665 * useful with a profiling multiplier != 1
666 */
667
668 profile_tick(CPU_PROFILING);
669
670 if (--per_cpu(prof_counter, cpu_id) <= 0) {
671 /*
672 * The multiplier may have changed since the last time we got
673 * to this point as a result of the user writing to
674 * /proc/profile. In this case we need to adjust the APIC
675 * timer accordingly.
676 *
677 * Interrupts are already masked off at this point.
678 */
679 per_cpu(prof_counter, cpu_id)
680 = per_cpu(prof_multiplier, cpu_id);
681 if (per_cpu(prof_counter, cpu_id)
682 != per_cpu(prof_old_multiplier, cpu_id))
683 {
684 per_cpu(prof_old_multiplier, cpu_id)
685 = per_cpu(prof_counter, cpu_id);
686 }
687
688 update_process_times(user);
689 }
690}
691
692/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
693/* Send IPI Routines */
694/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
695
696/*==========================================================================*
697 * Name: send_IPI_allbutself
698 *
699 * Description: This routine sends a IPI to all other CPUs in the system.
700 *
701 * Born on Date: 2002.02.05
702 *
703 * Arguments: ipi_num - Number of IPI
704 * try - 0 : Send IPI certainly.
705 * !0 : The following IPI is not sent when Target CPU
706 * has not received the before IPI.
707 *
708 * Returns: void (cannot fail)
709 *
710 * Modification log:
711 * Date Who Description
712 * ---------- --- --------------------------------------------------------
713 *
714 *==========================================================================*/
715static void send_IPI_allbutself(int ipi_num, int try)
716{
717 cpumask_t cpumask;
718
719 cpumask_copy(&cpumask, cpu_online_mask);
720 cpumask_clear_cpu(smp_processor_id(), &cpumask);
721
722 send_IPI_mask(&cpumask, ipi_num, try);
723}
724
725/*==========================================================================*
726 * Name: send_IPI_mask
727 *
728 * Description: This routine sends a IPI to CPUs in the system.
729 *
730 * Born on Date: 2002.02.05
731 *
732 * Arguments: cpu_mask - Bitmap of target CPUs logical ID
733 * ipi_num - Number of IPI
734 * try - 0 : Send IPI certainly.
735 * !0 : The following IPI is not sent when Target CPU
736 * has not received the before IPI.
737 *
738 * Returns: void (cannot fail)
739 *
740 * Modification log:
741 * Date Who Description
742 * ---------- --- --------------------------------------------------------
743 *
744 *==========================================================================*/
745static void send_IPI_mask(const struct cpumask *cpumask, int ipi_num, int try)
746{
747 cpumask_t physid_mask, tmp;
748 int cpu_id, phys_id;
749 int num_cpus = num_online_cpus();
750
751 if (num_cpus <= 1) /* NO MP */
752 return;
753
754 cpumask_and(&tmp, cpumask, cpu_online_mask);
755 BUG_ON(!cpumask_equal(cpumask, &tmp));
756
757 cpumask_clear(&physid_mask);
758 for_each_cpu(cpu_id, cpumask) {
759 if ((phys_id = cpu_to_physid(cpu_id)) != -1)
760 cpumask_set_cpu(phys_id, &physid_mask);
761 }
762
763 send_IPI_mask_phys(&physid_mask, ipi_num, try);
764}
765
766/*==========================================================================*
767 * Name: send_IPI_mask_phys
768 *
769 * Description: This routine sends a IPI to other CPUs in the system.
770 *
771 * Born on Date: 2002.02.05
772 *
773 * Arguments: cpu_mask - Bitmap of target CPUs physical ID
774 * ipi_num - Number of IPI
775 * try - 0 : Send IPI certainly.
776 * !0 : The following IPI is not sent when Target CPU
777 * has not received the before IPI.
778 *
779 * Returns: IPICRi regster value.
780 *
781 * Modification log:
782 * Date Who Description
783 * ---------- --- --------------------------------------------------------
784 *
785 *==========================================================================*/
786unsigned long send_IPI_mask_phys(const cpumask_t *physid_mask, int ipi_num,
787 int try)
788{
789 spinlock_t *ipilock;
790 volatile unsigned long *ipicr_addr;
791 unsigned long ipicr_val;
792 unsigned long my_physid_mask;
793 unsigned long mask = cpumask_bits(physid_mask)[0];
794
795
796 if (mask & ~physids_coerce(phys_cpu_present_map))
797 BUG();
798 if (ipi_num >= NR_IPIS || ipi_num < 0)
799 BUG();
800
801 mask <<= IPI_SHIFT;
802 ipilock = &ipi_lock[ipi_num];
803 ipicr_addr = (volatile unsigned long *)(M32R_ICU_IPICR_ADDR
804 + (ipi_num << 2));
805 my_physid_mask = ~(1 << smp_processor_id());
806
807 /*
808 * lock ipi_lock[i]
809 * check IPICRi == 0
810 * write IPICRi (send IPIi)
811 * unlock ipi_lock[i]
812 */
813 spin_lock(ipilock);
814 __asm__ __volatile__ (
815 ";; CHECK IPICRi == 0 \n\t"
816 ".fillinsn \n"
817 "1: \n\t"
818 "ld %0, @%1 \n\t"
819 "and %0, %4 \n\t"
820 "beqz %0, 2f \n\t"
821 "bnez %3, 3f \n\t"
822 "bra 1b \n\t"
823 ";; WRITE IPICRi (send IPIi) \n\t"
824 ".fillinsn \n"
825 "2: \n\t"
826 "st %2, @%1 \n\t"
827 ".fillinsn \n"
828 "3: \n\t"
829 : "=&r"(ipicr_val)
830 : "r"(ipicr_addr), "r"(mask), "r"(try), "r"(my_physid_mask)
831 : "memory"
832 );
833 spin_unlock(ipilock);
834
835 return ipicr_val;
836}
diff --git a/arch/m32r/kernel/smpboot.c b/arch/m32r/kernel/smpboot.c
deleted file mode 100644
index a7d04684d2c7..000000000000
--- a/arch/m32r/kernel/smpboot.c
+++ /dev/null
@@ -1,627 +0,0 @@
1/*
2 * linux/arch/m32r/kernel/smpboot.c
3 * orig : i386 2.4.10
4 *
5 * M32R SMP booting functions
6 *
7 * Copyright (c) 2001, 2002, 2003 Hitoshi Yamamoto
8 *
9 * Taken from i386 version.
10 * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
11 * (c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com>
12 *
13 * Much of the core SMP work is based on previous work by Thomas Radke, to
14 * whom a great many thanks are extended.
15 *
16 * Thanks to Intel for making available several different Pentium,
17 * Pentium Pro and Pentium-II/Xeon MP machines.
18 * Original development of Linux SMP code supported by Caldera.
19 *
20 * This code is released under the GNU General Public License version 2 or
21 * later.
22 *
23 * Fixes
24 * Felix Koop : NR_CPUS used properly
25 * Jose Renau : Handle single CPU case.
26 * Alan Cox : By repeated request
27 * 8) - Total BogoMIP report.
28 * Greg Wright : Fix for kernel stacks panic.
29 * Erich Boleyn : MP v1.4 and additional changes.
30 * Matthias Sattler : Changes for 2.1 kernel map.
31 * Michel Lespinasse : Changes for 2.1 kernel map.
32 * Michael Chastain : Change trampoline.S to gnu as.
33 * Alan Cox : Dumb bug: 'B' step PPro's are fine
34 * Ingo Molnar : Added APIC timers, based on code
35 * from Jose Renau
36 * Ingo Molnar : various cleanups and rewrites
37 * Tigran Aivazian : fixed "0.00 in /proc/uptime on SMP" bug.
38 * Maciej W. Rozycki : Bits for genuine 82489DX APICs
39 * Martin J. Bligh : Added support for multi-quad systems
40 */
41
42#include <linux/module.h>
43#include <linux/cpu.h>
44#include <linux/init.h>
45#include <linux/kernel.h>
46#include <linux/mm.h>
47#include <linux/sched.h>
48#include <linux/sched/task.h>
49#include <linux/err.h>
50#include <linux/irq.h>
51#include <linux/bootmem.h>
52#include <linux/delay.h>
53
54#include <asm/io.h>
55#include <asm/pgalloc.h>
56#include <asm/tlbflush.h>
57
58#define DEBUG_SMP
59#ifdef DEBUG_SMP
60#define Dprintk(x...) printk(x)
61#else
62#define Dprintk(x...)
63#endif
64
65extern cpumask_t cpu_initialized;
66
67/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
68/* Data structures and variables */
69/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
70
71/* Processor that is doing the boot up */
72static unsigned int bsp_phys_id = -1;
73
74/* Bitmask of physically existing CPUs */
75physid_mask_t phys_cpu_present_map;
76
77cpumask_t cpu_bootout_map;
78cpumask_t cpu_bootin_map;
79static cpumask_t cpu_callin_map;
80cpumask_t cpu_callout_map;
81EXPORT_SYMBOL(cpu_callout_map);
82
83/* Per CPU bogomips and other parameters */
84struct cpuinfo_m32r cpu_data[NR_CPUS] __cacheline_aligned;
85
86static int cpucount;
87static cpumask_t smp_commenced_mask;
88
89extern struct {
90 void * spi;
91 unsigned short ss;
92} stack_start;
93
94/* which physical physical ID maps to which logical CPU number */
95static volatile int physid_2_cpu[NR_CPUS];
96#define physid_to_cpu(physid) physid_2_cpu[physid]
97
98/* which logical CPU number maps to which physical ID */
99volatile int cpu_2_physid[NR_CPUS];
100
101DEFINE_PER_CPU(int, prof_multiplier) = 1;
102DEFINE_PER_CPU(int, prof_old_multiplier) = 1;
103DEFINE_PER_CPU(int, prof_counter) = 1;
104
105spinlock_t ipi_lock[NR_IPIS];
106
107static unsigned int calibration_result;
108
109/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
110/* Function Prototypes */
111/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
112
113static void init_ipi_lock(void);
114static void do_boot_cpu(int);
115
116int start_secondary(void *);
117static void smp_callin(void);
118static void smp_online(void);
119
120static void show_mp_info(int);
121static void smp_store_cpu_info(int);
122static void show_cpu_info(int);
123int setup_profiling_timer(unsigned int);
124static void init_cpu_to_physid(void);
125static void map_cpu_to_physid(int, int);
126static void unmap_cpu_to_physid(int, int);
127
128/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
129/* Boot up APs Routines : BSP */
130/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
131void smp_prepare_boot_cpu(void)
132{
133 bsp_phys_id = hard_smp_processor_id();
134 physid_set(bsp_phys_id, phys_cpu_present_map);
135 set_cpu_online(0, true); /* BSP's cpu_id == 0 */
136 cpumask_set_cpu(0, &cpu_callout_map);
137 cpumask_set_cpu(0, &cpu_callin_map);
138
139 /*
140 * Initialize the logical to physical CPU number mapping
141 */
142 init_cpu_to_physid();
143 map_cpu_to_physid(0, bsp_phys_id);
144 current_thread_info()->cpu = 0;
145}
146
147/*==========================================================================*
148 * Name: smp_prepare_cpus (old smp_boot_cpus)
149 *
150 * Description: This routine boot up APs.
151 *
152 * Born on Date: 2002.02.05
153 *
154 * Arguments: NONE
155 *
156 * Returns: void (cannot fail)
157 *
158 * Modification log:
159 * Date Who Description
160 * ---------- --- --------------------------------------------------------
161 * 2003-06-24 hy modify for linux-2.5.69
162 *
163 *==========================================================================*/
164void __init smp_prepare_cpus(unsigned int max_cpus)
165{
166 int phys_id;
167 unsigned long nr_cpu;
168
169 nr_cpu = inl(M32R_FPGA_NUM_OF_CPUS_PORTL);
170 if (nr_cpu > NR_CPUS) {
171 printk(KERN_INFO "NUM_OF_CPUS reg. value [%ld] > NR_CPU [%d]",
172 nr_cpu, NR_CPUS);
173 goto smp_done;
174 }
175 for (phys_id = 0 ; phys_id < nr_cpu ; phys_id++)
176 physid_set(phys_id, phys_cpu_present_map);
177#ifndef CONFIG_HOTPLUG_CPU
178 init_cpu_present(cpu_possible_mask);
179#endif
180
181 show_mp_info(nr_cpu);
182
183 init_ipi_lock();
184
185 /*
186 * Setup boot CPU information
187 */
188 smp_store_cpu_info(0); /* Final full version of the data */
189
190 /*
191 * If SMP should be disabled, then really disable it!
192 */
193 if (!max_cpus) {
194 printk(KERN_INFO "SMP mode deactivated by commandline.\n");
195 goto smp_done;
196 }
197
198 /*
199 * Now scan the CPU present map and fire up the other CPUs.
200 */
201 Dprintk("CPU present map : %lx\n", physids_coerce(phys_cpu_present_map));
202
203 for (phys_id = 0 ; phys_id < NR_CPUS ; phys_id++) {
204 /*
205 * Don't even attempt to start the boot CPU!
206 */
207 if (phys_id == bsp_phys_id)
208 continue;
209
210 if (!physid_isset(phys_id, phys_cpu_present_map))
211 continue;
212
213 if (max_cpus <= cpucount + 1)
214 continue;
215
216 do_boot_cpu(phys_id);
217
218 /*
219 * Make sure we unmap all failed CPUs
220 */
221 if (physid_to_cpu(phys_id) == -1) {
222 physid_clear(phys_id, phys_cpu_present_map);
223 printk("phys CPU#%d not responding - " \
224 "cannot use it.\n", phys_id);
225 }
226 }
227
228smp_done:
229 Dprintk("Boot done.\n");
230}
231
232/*
233 * init_ipi_lock : Initialize IPI locks.
234 */
235static void __init init_ipi_lock(void)
236{
237 int ipi;
238
239 for (ipi = 0 ; ipi < NR_IPIS ; ipi++)
240 spin_lock_init(&ipi_lock[ipi]);
241}
242
243/*==========================================================================*
244 * Name: do_boot_cpu
245 *
246 * Description: This routine boot up one AP.
247 *
248 * Born on Date: 2002.02.05
249 *
250 * Arguments: phys_id - Target CPU physical ID
251 *
252 * Returns: void (cannot fail)
253 *
254 * Modification log:
255 * Date Who Description
256 * ---------- --- --------------------------------------------------------
257 * 2003-06-24 hy modify for linux-2.5.69
258 *
259 *==========================================================================*/
260static void __init do_boot_cpu(int phys_id)
261{
262 struct task_struct *idle;
263 unsigned long send_status, boot_status;
264 int timeout, cpu_id;
265
266 cpu_id = ++cpucount;
267
268 /*
269 * We can't use kernel_thread since we must avoid to
270 * reschedule the child.
271 */
272 idle = fork_idle(cpu_id);
273 if (IS_ERR(idle))
274 panic("failed fork for CPU#%d.", cpu_id);
275
276 idle->thread.lr = (unsigned long)start_secondary;
277
278 map_cpu_to_physid(cpu_id, phys_id);
279
280 /* So we see what's up */
281 printk("Booting processor %d/%d\n", phys_id, cpu_id);
282 stack_start.spi = (void *)idle->thread.sp;
283 task_thread_info(idle)->cpu = cpu_id;
284
285 /*
286 * Send Startup IPI
287 * 1.IPI received by CPU#(phys_id).
288 * 2.CPU#(phys_id) enter startup_AP (arch/m32r/kernel/head.S)
289 * 3.CPU#(phys_id) enter start_secondary()
290 */
291 send_status = 0;
292 boot_status = 0;
293
294 cpumask_set_cpu(phys_id, &cpu_bootout_map);
295
296 /* Send Startup IPI */
297 send_IPI_mask_phys(cpumask_of(phys_id), CPU_BOOT_IPI, 0);
298
299 Dprintk("Waiting for send to finish...\n");
300 timeout = 0;
301
302 /* Wait 100[ms] */
303 do {
304 Dprintk("+");
305 udelay(1000);
306 send_status = !cpumask_test_cpu(phys_id, &cpu_bootin_map);
307 } while (send_status && (timeout++ < 100));
308
309 Dprintk("After Startup.\n");
310
311 if (!send_status) {
312 /*
313 * allow APs to start initializing.
314 */
315 Dprintk("Before Callout %d.\n", cpu_id);
316 cpumask_set_cpu(cpu_id, &cpu_callout_map);
317 Dprintk("After Callout %d.\n", cpu_id);
318
319 /*
320 * Wait 5s total for a response
321 */
322 for (timeout = 0; timeout < 5000; timeout++) {
323 if (cpumask_test_cpu(cpu_id, &cpu_callin_map))
324 break; /* It has booted */
325 udelay(1000);
326 }
327
328 if (cpumask_test_cpu(cpu_id, &cpu_callin_map)) {
329 /* number CPUs logically, starting from 1 (BSP is 0) */
330 Dprintk("OK.\n");
331 } else {
332 boot_status = 1;
333 printk("Not responding.\n");
334 }
335 } else
336 printk("IPI never delivered???\n");
337
338 if (send_status || boot_status) {
339 unmap_cpu_to_physid(cpu_id, phys_id);
340 cpumask_clear_cpu(cpu_id, &cpu_callout_map);
341 cpumask_clear_cpu(cpu_id, &cpu_callin_map);
342 cpumask_clear_cpu(cpu_id, &cpu_initialized);
343 cpucount--;
344 }
345}
346
347int __cpu_up(unsigned int cpu_id, struct task_struct *tidle)
348{
349 int timeout;
350
351 cpumask_set_cpu(cpu_id, &smp_commenced_mask);
352
353 /*
354 * Wait 5s total for a response
355 */
356 for (timeout = 0; timeout < 5000; timeout++) {
357 if (cpu_online(cpu_id))
358 break;
359 udelay(1000);
360 }
361 if (!cpu_online(cpu_id))
362 BUG();
363
364 return 0;
365}
366
367void __init smp_cpus_done(unsigned int max_cpus)
368{
369 int cpu_id, timeout;
370 unsigned long bogosum = 0;
371
372 for (timeout = 0; timeout < 5000; timeout++) {
373 if (cpumask_equal(&cpu_callin_map, cpu_online_mask))
374 break;
375 udelay(1000);
376 }
377 if (!cpumask_equal(&cpu_callin_map, cpu_online_mask))
378 BUG();
379
380 for_each_online_cpu(cpu_id)
381 show_cpu_info(cpu_id);
382
383 /*
384 * Allow the user to impress friends.
385 */
386 Dprintk("Before bogomips.\n");
387 if (cpucount) {
388 for_each_cpu(cpu_id,cpu_online_mask)
389 bogosum += cpu_data[cpu_id].loops_per_jiffy;
390
391 printk(KERN_INFO "Total of %d processors activated " \
392 "(%lu.%02lu BogoMIPS).\n", cpucount + 1,
393 bogosum / (500000 / HZ),
394 (bogosum / (5000 / HZ)) % 100);
395 Dprintk("Before bogocount - setting activated=1.\n");
396 }
397}
398
399/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
400/* Activate a secondary processor Routines */
401/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
402
403/*==========================================================================*
404 * Name: start_secondary
405 *
406 * Description: This routine activate a secondary processor.
407 *
408 * Born on Date: 2002.02.05
409 *
410 * Arguments: *unused - currently unused.
411 *
412 * Returns: void (cannot fail)
413 *
414 * Modification log:
415 * Date Who Description
416 * ---------- --- --------------------------------------------------------
417 * 2003-06-24 hy modify for linux-2.5.69
418 *
419 *==========================================================================*/
420int __init start_secondary(void *unused)
421{
422 cpu_init();
423 preempt_disable();
424 smp_callin();
425 while (!cpumask_test_cpu(smp_processor_id(), &smp_commenced_mask))
426 cpu_relax();
427
428 smp_online();
429
430 /*
431 * low-memory mappings have been cleared, flush them from
432 * the local TLBs too.
433 */
434 local_flush_tlb_all();
435
436 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
437 return 0;
438}
439
440/*==========================================================================*
441 * Name: smp_callin
442 *
443 * Description: This routine activate a secondary processor.
444 *
445 * Born on Date: 2002.02.05
446 *
447 * Arguments: NONE
448 *
449 * Returns: void (cannot fail)
450 *
451 * Modification log:
452 * Date Who Description
453 * ---------- --- --------------------------------------------------------
454 * 2003-06-24 hy modify for linux-2.5.69
455 *
456 *==========================================================================*/
457static void __init smp_callin(void)
458{
459 int phys_id = hard_smp_processor_id();
460 int cpu_id = smp_processor_id();
461 unsigned long timeout;
462
463 if (cpumask_test_cpu(cpu_id, &cpu_callin_map)) {
464 printk("huh, phys CPU#%d, CPU#%d already present??\n",
465 phys_id, cpu_id);
466 BUG();
467 }
468 Dprintk("CPU#%d (phys ID: %d) waiting for CALLOUT\n", cpu_id, phys_id);
469
470 /* Waiting 2s total for startup (udelay is not yet working) */
471 timeout = jiffies + (2 * HZ);
472 while (time_before(jiffies, timeout)) {
473 /* Has the boot CPU finished it's STARTUP sequence ? */
474 if (cpumask_test_cpu(cpu_id, &cpu_callout_map))
475 break;
476 cpu_relax();
477 }
478
479 if (!time_before(jiffies, timeout)) {
480 printk("BUG: CPU#%d started up but did not get a callout!\n",
481 cpu_id);
482 BUG();
483 }
484
485 /* Allow the master to continue. */
486 cpumask_set_cpu(cpu_id, &cpu_callin_map);
487}
488
489static void __init smp_online(void)
490{
491 int cpu_id = smp_processor_id();
492
493 notify_cpu_starting(cpu_id);
494
495 local_irq_enable();
496
497 /* Get our bogomips. */
498 calibrate_delay();
499
500 /* Save our processor parameters */
501 smp_store_cpu_info(cpu_id);
502
503 set_cpu_online(cpu_id, true);
504}
505
506/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
507/* Boot up CPUs common Routines */
508/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
509static void __init show_mp_info(int nr_cpu)
510{
511 int i;
512 char cpu_model0[17], cpu_model1[17], cpu_ver[9];
513
514 strncpy(cpu_model0, (char *)M32R_FPGA_CPU_NAME_ADDR, 16);
515 strncpy(cpu_model1, (char *)M32R_FPGA_MODEL_ID_ADDR, 16);
516 strncpy(cpu_ver, (char *)M32R_FPGA_VERSION_ADDR, 8);
517
518 cpu_model0[16] = '\0';
519 for (i = 15 ; i >= 0 ; i--) {
520 if (cpu_model0[i] != ' ')
521 break;
522 cpu_model0[i] = '\0';
523 }
524 cpu_model1[16] = '\0';
525 for (i = 15 ; i >= 0 ; i--) {
526 if (cpu_model1[i] != ' ')
527 break;
528 cpu_model1[i] = '\0';
529 }
530 cpu_ver[8] = '\0';
531 for (i = 7 ; i >= 0 ; i--) {
532 if (cpu_ver[i] != ' ')
533 break;
534 cpu_ver[i] = '\0';
535 }
536
537 printk(KERN_INFO "M32R-mp information\n");
538 printk(KERN_INFO " On-chip CPUs : %d\n", nr_cpu);
539 printk(KERN_INFO " CPU model : %s/%s(%s)\n", cpu_model0,
540 cpu_model1, cpu_ver);
541}
542
543/*
544 * The bootstrap kernel entry code has set these up. Save them for
545 * a given CPU
546 */
547static void __init smp_store_cpu_info(int cpu_id)
548{
549 struct cpuinfo_m32r *ci = cpu_data + cpu_id;
550
551 *ci = boot_cpu_data;
552 ci->loops_per_jiffy = loops_per_jiffy;
553}
554
555static void __init show_cpu_info(int cpu_id)
556{
557 struct cpuinfo_m32r *ci = &cpu_data[cpu_id];
558
559 printk("CPU#%d : ", cpu_id);
560
561#define PRINT_CLOCK(name, value) \
562 printk(name " clock %d.%02dMHz", \
563 ((value) / 1000000), ((value) % 1000000) / 10000)
564
565 PRINT_CLOCK("CPU", (int)ci->cpu_clock);
566 PRINT_CLOCK(", Bus", (int)ci->bus_clock);
567 printk(", loops_per_jiffy[%ld]\n", ci->loops_per_jiffy);
568}
569
570/*
571 * the frequency of the profiling timer can be changed
572 * by writing a multiplier value into /proc/profile.
573 */
574int setup_profiling_timer(unsigned int multiplier)
575{
576 int i;
577
578 /*
579 * Sanity check. [at least 500 APIC cycles should be
580 * between APIC interrupts as a rule of thumb, to avoid
581 * irqs flooding us]
582 */
583 if ( (!multiplier) || (calibration_result / multiplier < 500))
584 return -EINVAL;
585
586 /*
587 * Set the new multiplier for each CPU. CPUs don't start using the
588 * new values until the next timer interrupt in which they do process
589 * accounting. At that time they also adjust their APIC timers
590 * accordingly.
591 */
592 for_each_possible_cpu(i)
593 per_cpu(prof_multiplier, i) = multiplier;
594
595 return 0;
596}
597
598/* Initialize all maps between cpu number and apicids */
599static void __init init_cpu_to_physid(void)
600{
601 int i;
602
603 for (i = 0 ; i < NR_CPUS ; i++) {
604 cpu_2_physid[i] = -1;
605 physid_2_cpu[i] = -1;
606 }
607}
608
609/*
610 * set up a mapping between cpu and apicid. Uses logical apicids for multiquad,
611 * else physical apic ids
612 */
613static void __init map_cpu_to_physid(int cpu_id, int phys_id)
614{
615 physid_2_cpu[phys_id] = cpu_id;
616 cpu_2_physid[cpu_id] = phys_id;
617}
618
619/*
620 * undo a mapping between cpu and apicid. Uses logical apicids for multiquad,
621 * else physical apic ids
622 */
623static void __init unmap_cpu_to_physid(int cpu_id, int phys_id)
624{
625 physid_2_cpu[phys_id] = -1;
626 cpu_2_physid[cpu_id] = -1;
627}
diff --git a/arch/m32r/kernel/sys_m32r.c b/arch/m32r/kernel/sys_m32r.c
deleted file mode 100644
index 22a50fc49ab7..000000000000
--- a/arch/m32r/kernel/sys_m32r.c
+++ /dev/null
@@ -1,91 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * linux/arch/m32r/kernel/sys_m32r.c
4 *
5 * This file contains various random system calls that
6 * have a non-standard calling sequence on the Linux/M32R platform.
7 *
8 * Taken from i386 version.
9 */
10
11#include <linux/errno.h>
12#include <linux/sched.h>
13#include <linux/mm.h>
14#include <linux/fs.h>
15#include <linux/smp.h>
16#include <linux/sem.h>
17#include <linux/msg.h>
18#include <linux/shm.h>
19#include <linux/stat.h>
20#include <linux/syscalls.h>
21#include <linux/mman.h>
22#include <linux/file.h>
23#include <linux/utsname.h>
24#include <linux/ipc.h>
25
26#include <linux/uaccess.h>
27#include <asm/cachectl.h>
28#include <asm/cacheflush.h>
29#include <asm/syscall.h>
30#include <asm/unistd.h>
31
32/*
33 * sys_tas() - test-and-set
34 */
35asmlinkage int sys_tas(int __user *addr)
36{
37 int oldval;
38
39 if (!access_ok(VERIFY_WRITE, addr, sizeof (int)))
40 return -EFAULT;
41
42 /* atomic operation:
43 * oldval = *addr; *addr = 1;
44 */
45 __asm__ __volatile__ (
46 DCACHE_CLEAR("%0", "r4", "%1")
47 " .fillinsn\n"
48 "1:\n"
49 " lock %0, @%1 -> unlock %2, @%1\n"
50 "2:\n"
51 /* NOTE:
52 * The m32r processor can accept interrupts only
53 * at the 32-bit instruction boundary.
54 * So, in the above code, the "unlock" instruction
55 * can be executed continuously after the "lock"
56 * instruction execution without any interruptions.
57 */
58 ".section .fixup,\"ax\"\n"
59 " .balign 4\n"
60 "3: ldi %0, #%3\n"
61 " seth r14, #high(2b)\n"
62 " or3 r14, r14, #low(2b)\n"
63 " jmp r14\n"
64 ".previous\n"
65 ".section __ex_table,\"a\"\n"
66 " .balign 4\n"
67 " .long 1b,3b\n"
68 ".previous\n"
69 : "=&r" (oldval)
70 : "r" (addr), "r" (1), "i"(-EFAULT)
71 : "r14", "memory"
72#ifdef CONFIG_CHIP_M32700_TS1
73 , "r4"
74#endif /* CONFIG_CHIP_M32700_TS1 */
75 );
76
77 return oldval;
78}
79
80asmlinkage int sys_cacheflush(void *addr, int bytes, int cache)
81{
82 /* This should flush more selectively ... */
83 _flush_cache_all();
84 return 0;
85}
86
87asmlinkage int sys_cachectl(char *addr, int nbytes, int op)
88{
89 /* Not implemented yet. */
90 return -ENOSYS;
91}
diff --git a/arch/m32r/kernel/syscall_table.S b/arch/m32r/kernel/syscall_table.S
deleted file mode 100644
index cf0bcf014b98..000000000000
--- a/arch/m32r/kernel/syscall_table.S
+++ /dev/null
@@ -1,328 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2ENTRY(sys_call_table)
3 .long sys_restart_syscall /* 0 - old "setup()" system call*/
4 .long sys_exit
5 .long sys_fork
6 .long sys_read
7 .long sys_write
8 .long sys_open /* 5 */
9 .long sys_close
10 .long sys_waitpid
11 .long sys_creat
12 .long sys_link
13 .long sys_unlink /* 10 */
14 .long sys_execve
15 .long sys_chdir
16 .long sys_time
17 .long sys_mknod
18 .long sys_chmod /* 15 */
19 .long sys_ni_syscall /* lchown16 syscall holder */
20 .long sys_ni_syscall /* old break syscall holder */
21 .long sys_ni_syscall /* old stat syscall holder */
22 .long sys_lseek
23 .long sys_getpid /* 20 */
24 .long sys_mount
25 .long sys_oldumount
26 .long sys_ni_syscall /* setuid16 syscall holder */
27 .long sys_ni_syscall /* getuid16 syscall holder */
28 .long sys_stime /* 25 */
29 .long sys_ptrace
30 .long sys_alarm
31 .long sys_ni_syscall /* old fstat syscall holder */
32 .long sys_pause
33 .long sys_utime /* 30 */
34 .long sys_ni_syscall /* old stty syscall holder */
35 .long sys_cachectl /* for M32R */ /* old gtty syscall holder */
36 .long sys_access
37 .long sys_ni_syscall /* nice syscall holder */
38 .long sys_ni_syscall /* 35 - old ftime syscall holder */
39 .long sys_sync
40 .long sys_kill
41 .long sys_rename
42 .long sys_mkdir
43 .long sys_rmdir /* 40 */
44 .long sys_dup
45 .long sys_pipe
46 .long sys_times
47 .long sys_ni_syscall /* old prof syscall holder */
48 .long sys_brk /* 45 */
49 .long sys_ni_syscall /* setgid16 syscall holder */
50 .long sys_getgid /* will be unused */
51 .long sys_ni_syscall /* signal syscall holder */
52 .long sys_ni_syscall /* geteuid16 syscall holder */
53 .long sys_ni_syscall /* 50 - getegid16 syscall holder */
54 .long sys_acct
55 .long sys_umount /* recycled never used phys() */
56 .long sys_ni_syscall /* old lock syscall holder */
57 .long sys_ioctl
58 .long sys_fcntl /* 55 - will be unused */
59 .long sys_ni_syscall /* mpx syscall holder */
60 .long sys_setpgid
61 .long sys_ni_syscall /* old ulimit syscall holder */
62 .long sys_ni_syscall /* sys_olduname */
63 .long sys_umask /* 60 */
64 .long sys_chroot
65 .long sys_ustat
66 .long sys_dup2
67 .long sys_getppid
68 .long sys_getpgrp /* 65 */
69 .long sys_setsid
70 .long sys_ni_syscall /* sigaction syscall holder */
71 .long sys_ni_syscall /* sgetmask syscall holder */
72 .long sys_ni_syscall /* ssetmask syscall holder */
73 .long sys_ni_syscall /* 70 - setreuid16 syscall holder */
74 .long sys_ni_syscall /* setregid16 syscall holder */
75 .long sys_ni_syscall /* sigsuspend syscall holder */
76 .long sys_ni_syscall /* sigpending syscall holder */
77 .long sys_sethostname
78 .long sys_setrlimit /* 75 */
79 .long sys_getrlimit/*will be unused*/
80 .long sys_getrusage
81 .long sys_gettimeofday
82 .long sys_settimeofday
83 .long sys_ni_syscall /* 80 - getgroups16 syscall holder */
84 .long sys_ni_syscall /* setgroups16 syscall holder */
85 .long sys_ni_syscall /* sys_oldselect */
86 .long sys_symlink
87 .long sys_ni_syscall /* old lstat syscall holder */
88 .long sys_readlink /* 85 */
89 .long sys_uselib
90 .long sys_swapon
91 .long sys_reboot
92 .long sys_ni_syscall /* readdir syscall holder */
93 .long sys_ni_syscall /* 90 - old_mmap syscall holder */
94 .long sys_munmap
95 .long sys_truncate
96 .long sys_ftruncate
97 .long sys_fchmod
98 .long sys_ni_syscall /* 95 - fchwon16 syscall holder */
99 .long sys_getpriority
100 .long sys_setpriority
101 .long sys_ni_syscall /* old profil syscall holder */
102 .long sys_statfs
103 .long sys_fstatfs /* 100 */
104 .long sys_ni_syscall /* ioperm syscall holder */
105 .long sys_socketcall
106 .long sys_syslog
107 .long sys_setitimer
108 .long sys_getitimer /* 105 */
109 .long sys_newstat
110 .long sys_newlstat
111 .long sys_newfstat
112 .long sys_ni_syscall /* old uname syscall holder */
113 .long sys_ni_syscall /* 110 - iopl syscall holder */
114 .long sys_vhangup
115 .long sys_ni_syscall /* idle syscall holder */
116 .long sys_ni_syscall /* vm86old syscall holder */
117 .long sys_wait4
118 .long sys_swapoff /* 115 */
119 .long sys_sysinfo
120 .long sys_ipc
121 .long sys_fsync
122 .long sys_ni_syscall /* sigreturn syscall holder */
123 .long sys_clone /* 120 */
124 .long sys_setdomainname
125 .long sys_newuname
126 .long sys_ni_syscall /* modify_ldt syscall holder */
127 .long sys_adjtimex
128 .long sys_mprotect /* 125 */
129 .long sys_ni_syscall /* sigprocmask syscall holder */
130 .long sys_ni_syscall /* create_module syscall holder */
131 .long sys_init_module
132 .long sys_delete_module
133 .long sys_ni_syscall /* 130 - get_kernel_syms */
134 .long sys_quotactl
135 .long sys_getpgid
136 .long sys_fchdir
137 .long sys_bdflush
138 .long sys_sysfs /* 135 */
139 .long sys_personality
140 .long sys_ni_syscall /* afs_syscall syscall holder */
141 .long sys_ni_syscall /* setfsuid16 syscall holder */
142 .long sys_ni_syscall /* setfsgid16 syscall holder */
143 .long sys_llseek /* 140 */
144 .long sys_getdents
145 .long sys_select
146 .long sys_flock
147 .long sys_msync
148 .long sys_readv /* 145 */
149 .long sys_writev
150 .long sys_getsid
151 .long sys_fdatasync
152 .long sys_sysctl
153 .long sys_mlock /* 150 */
154 .long sys_munlock
155 .long sys_mlockall
156 .long sys_munlockall
157 .long sys_sched_setparam
158 .long sys_sched_getparam /* 155 */
159 .long sys_sched_setscheduler
160 .long sys_sched_getscheduler
161 .long sys_sched_yield
162 .long sys_sched_get_priority_max
163 .long sys_sched_get_priority_min /* 160 */
164 .long sys_sched_rr_get_interval
165 .long sys_nanosleep
166 .long sys_mremap
167 .long sys_ni_syscall /* setresuid16 syscall holder */
168 .long sys_ni_syscall /* 165 - getresuid16 syscall holder */
169 .long sys_tas /* vm86 syscall holder */
170 .long sys_ni_syscall /* query_module syscall holder */
171 .long sys_poll
172 .long sys_ni_syscall /* was nfsservctl */
173 .long sys_setresgid /* 170 */
174 .long sys_getresgid
175 .long sys_prctl
176 .long sys_rt_sigreturn
177 .long sys_rt_sigaction
178 .long sys_rt_sigprocmask /* 175 */
179 .long sys_rt_sigpending
180 .long sys_rt_sigtimedwait
181 .long sys_rt_sigqueueinfo
182 .long sys_rt_sigsuspend
183 .long sys_pread64 /* 180 */
184 .long sys_pwrite64
185 .long sys_ni_syscall /* chown16 syscall holder */
186 .long sys_getcwd
187 .long sys_capget
188 .long sys_capset /* 185 */
189 .long sys_sigaltstack
190 .long sys_sendfile
191 .long sys_ni_syscall /* streams1 */
192 .long sys_ni_syscall /* streams2 */
193 .long sys_vfork /* 190 */
194 .long sys_getrlimit
195 .long sys_mmap_pgoff
196 .long sys_truncate64
197 .long sys_ftruncate64
198 .long sys_stat64 /* 195 */
199 .long sys_lstat64
200 .long sys_fstat64
201 .long sys_lchown
202 .long sys_getuid
203 .long sys_getgid /* 200 */
204 .long sys_geteuid
205 .long sys_getegid
206 .long sys_setreuid
207 .long sys_setregid
208 .long sys_getgroups /* 205 */
209 .long sys_setgroups
210 .long sys_fchown
211 .long sys_setresuid
212 .long sys_getresuid
213 .long sys_setresgid /* 210 */
214 .long sys_getresgid
215 .long sys_chown
216 .long sys_setuid
217 .long sys_setgid
218 .long sys_setfsuid /* 215 */
219 .long sys_setfsgid
220 .long sys_pivot_root
221 .long sys_mincore
222 .long sys_madvise
223 .long sys_getdents64 /* 220 */
224 .long sys_fcntl64
225 .long sys_ni_syscall /* reserved for TUX */
226 .long sys_ni_syscall /* Reserved for Security */
227 .long sys_gettid
228 .long sys_readahead /* 225 */
229 .long sys_setxattr
230 .long sys_lsetxattr
231 .long sys_fsetxattr
232 .long sys_getxattr
233 .long sys_lgetxattr /* 230 */
234 .long sys_fgetxattr
235 .long sys_listxattr
236 .long sys_llistxattr
237 .long sys_flistxattr
238 .long sys_removexattr /* 235 */
239 .long sys_lremovexattr
240 .long sys_fremovexattr
241 .long sys_tkill
242 .long sys_sendfile64
243 .long sys_futex /* 240 */
244 .long sys_sched_setaffinity
245 .long sys_sched_getaffinity
246 .long sys_ni_syscall /* reserved for "set_thread_area" system call */
247 .long sys_ni_syscall /* reserved for "get_thread_area" system call */
248 .long sys_io_setup /* 245 */
249 .long sys_io_destroy
250 .long sys_io_getevents
251 .long sys_io_submit
252 .long sys_io_cancel
253 .long sys_fadvise64 /* 250 */
254 .long sys_ni_syscall
255 .long sys_exit_group
256 .long sys_lookup_dcookie
257 .long sys_epoll_create
258 .long sys_epoll_ctl /* 255 */
259 .long sys_epoll_wait
260 .long sys_remap_file_pages
261 .long sys_set_tid_address
262 .long sys_timer_create
263 .long sys_timer_settime /* 260 */
264 .long sys_timer_gettime
265 .long sys_timer_getoverrun
266 .long sys_timer_delete
267 .long sys_clock_settime
268 .long sys_clock_gettime /* 265 */
269 .long sys_clock_getres
270 .long sys_clock_nanosleep
271 .long sys_statfs64
272 .long sys_fstatfs64
273 .long sys_tgkill /* 270 */
274 .long sys_utimes
275 .long sys_fadvise64_64
276 .long sys_ni_syscall /* Reserved for sys_vserver */
277 .long sys_ni_syscall /* Reserved for sys_mbind */
278 .long sys_ni_syscall /* Reserved for sys_get_mempolicy */
279 .long sys_ni_syscall /* Reserved for sys_set_mempolicy */
280 .long sys_mq_open
281 .long sys_mq_unlink
282 .long sys_mq_timedsend
283 .long sys_mq_timedreceive /* 280 */
284 .long sys_mq_notify
285 .long sys_mq_getsetattr
286 .long sys_ni_syscall /* reserved for kexec */
287 .long sys_waitid
288 .long sys_ni_syscall /* 285 */ /* available */
289 .long sys_add_key
290 .long sys_request_key
291 .long sys_keyctl
292 .long sys_ioprio_set
293 .long sys_ioprio_get /* 290 */
294 .long sys_inotify_init
295 .long sys_inotify_add_watch
296 .long sys_inotify_rm_watch
297 .long sys_migrate_pages
298 .long sys_openat /* 295 */
299 .long sys_mkdirat
300 .long sys_mknodat
301 .long sys_fchownat
302 .long sys_futimesat
303 .long sys_fstatat64 /* 300 */
304 .long sys_unlinkat
305 .long sys_renameat
306 .long sys_linkat
307 .long sys_symlinkat
308 .long sys_readlinkat /* 305 */
309 .long sys_fchmodat
310 .long sys_faccessat
311 .long sys_pselect6
312 .long sys_ppoll
313 .long sys_unshare /* 310 */
314 .long sys_set_robust_list
315 .long sys_get_robust_list
316 .long sys_splice
317 .long sys_sync_file_range
318 .long sys_tee /* 315 */
319 .long sys_vmsplice
320 .long sys_move_pages
321 .long sys_getcpu
322 .long sys_epoll_pwait
323 .long sys_utimensat /* 320 */
324 .long sys_signalfd
325 .long sys_ni_syscall
326 .long sys_eventfd
327 .long sys_fallocate
328 .long sys_setns /* 325 */
diff --git a/arch/m32r/kernel/time.c b/arch/m32r/kernel/time.c
deleted file mode 100644
index 521749fbbb56..000000000000
--- a/arch/m32r/kernel/time.c
+++ /dev/null
@@ -1,199 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * linux/arch/m32r/kernel/time.c
4 *
5 * Copyright (c) 2001, 2002 Hiroyuki Kondo, Hirokazu Takata,
6 * Hitoshi Yamamoto
7 * Taken from i386 version.
8 * Copyright (C) 1991, 1992, 1995 Linus Torvalds
9 * Copyright (C) 1996, 1997, 1998 Ralf Baechle
10 *
11 * This file contains the time handling details for PC-style clocks as
12 * found in some MIPS systems.
13 *
14 * Some code taken from sh version.
15 * Copyright (C) 1999 Tetsuya Okada & Niibe Yutaka
16 * Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>
17 */
18
19#undef DEBUG_TIMER
20
21#include <linux/errno.h>
22#include <linux/init.h>
23#include <linux/module.h>
24#include <linux/sched.h>
25#include <linux/kernel.h>
26#include <linux/param.h>
27#include <linux/string.h>
28#include <linux/mm.h>
29#include <linux/interrupt.h>
30#include <linux/profile.h>
31
32#include <asm/io.h>
33#include <asm/m32r.h>
34
35#include <asm/hw_irq.h>
36
37#if defined(CONFIG_RTC_DRV_CMOS) || defined(CONFIG_RTC_DRV_CMOS_MODULE)
38/* this needs a better home */
39DEFINE_SPINLOCK(rtc_lock);
40
41#ifdef CONFIG_RTC_DRV_CMOS_MODULE
42EXPORT_SYMBOL(rtc_lock);
43#endif
44#endif /* pc-style 'CMOS' RTC support */
45
46#ifdef CONFIG_SMP
47extern void smp_local_timer_interrupt(void);
48#endif
49
50#define TICK_SIZE (tick_nsec / 1000)
51
52/*
53 * Change this if you have some constant time drift
54 */
55
56/* This is for machines which generate the exact clock. */
57#define USECS_PER_JIFFY (1000000/HZ)
58
59static unsigned long latch;
60
61static u32 m32r_gettimeoffset(void)
62{
63 unsigned long elapsed_time = 0; /* [us] */
64
65#if defined(CONFIG_CHIP_M32102) || defined(CONFIG_CHIP_XNUX2) \
66 || defined(CONFIG_CHIP_VDEC2) || defined(CONFIG_CHIP_M32700) \
67 || defined(CONFIG_CHIP_OPSP) || defined(CONFIG_CHIP_M32104)
68#ifndef CONFIG_SMP
69
70 unsigned long count;
71
72 /* timer count may underflow right here */
73 count = inl(M32R_MFT2CUT_PORTL);
74
75 if (inl(M32R_ICU_CR18_PORTL) & 0x00000100) /* underflow check */
76 count = 0;
77
78 count = (latch - count) * TICK_SIZE;
79 elapsed_time = DIV_ROUND_CLOSEST(count, latch);
80 /* NOTE: LATCH is equal to the "interval" value (= reload count). */
81
82#else /* CONFIG_SMP */
83 unsigned long count;
84 static unsigned long p_jiffies = -1;
85 static unsigned long p_count = 0;
86
87 /* timer count may underflow right here */
88 count = inl(M32R_MFT2CUT_PORTL);
89
90 if (jiffies == p_jiffies && count > p_count)
91 count = 0;
92
93 p_jiffies = jiffies;
94 p_count = count;
95
96 count = (latch - count) * TICK_SIZE;
97 elapsed_time = DIV_ROUND_CLOSEST(count, latch);
98 /* NOTE: LATCH is equal to the "interval" value (= reload count). */
99#endif /* CONFIG_SMP */
100#elif defined(CONFIG_CHIP_M32310)
101#warning do_gettimeoffse not implemented
102#else
103#error no chip configuration
104#endif
105
106 return elapsed_time * 1000;
107}
108
109/*
110 * timer_interrupt() needs to keep up the real-time clock,
111 * as well as call the "xtime_update()" routine every clocktick
112 */
113static irqreturn_t timer_interrupt(int irq, void *dev_id)
114{
115#ifndef CONFIG_SMP
116 profile_tick(CPU_PROFILING);
117#endif
118 xtime_update(1);
119
120#ifndef CONFIG_SMP
121 update_process_times(user_mode(get_irq_regs()));
122#endif
123 /* As we return to user mode fire off the other CPU schedulers..
124 this is basically because we don't yet share IRQ's around.
125 This message is rigged to be safe on the 386 - basically it's
126 a hack, so don't look closely for now.. */
127
128#ifdef CONFIG_SMP
129 smp_local_timer_interrupt();
130 smp_send_timer();
131#endif
132
133 return IRQ_HANDLED;
134}
135
136static struct irqaction irq0 = {
137 .handler = timer_interrupt,
138 .name = "MFT2",
139};
140
141void read_persistent_clock(struct timespec *ts)
142{
143 unsigned int epoch, year, mon, day, hour, min, sec;
144
145 sec = min = hour = day = mon = year = 0;
146 epoch = 0;
147
148 year = 23;
149 mon = 4;
150 day = 17;
151
152 /* Attempt to guess the epoch. This is the same heuristic as in rtc.c
153 so no stupid things will happen to timekeeping. Who knows, maybe
154 Ultrix also uses 1952 as epoch ... */
155 if (year > 10 && year < 44)
156 epoch = 1980;
157 else if (year < 96)
158 epoch = 1952;
159 year += epoch;
160
161 ts->tv_sec = mktime(year, mon, day, hour, min, sec);
162 ts->tv_nsec = (INITIAL_JIFFIES % HZ) * (NSEC_PER_SEC / HZ);
163}
164
165
166void __init time_init(void)
167{
168 arch_gettimeoffset = m32r_gettimeoffset;
169
170#if defined(CONFIG_CHIP_M32102) || defined(CONFIG_CHIP_XNUX2) \
171 || defined(CONFIG_CHIP_VDEC2) || defined(CONFIG_CHIP_M32700) \
172 || defined(CONFIG_CHIP_OPSP) || defined(CONFIG_CHIP_M32104)
173
174 /* M32102 MFT setup */
175 setup_irq(M32R_IRQ_MFT2, &irq0);
176 {
177 unsigned long bus_clock;
178 unsigned short divide;
179
180 bus_clock = boot_cpu_data.bus_clock;
181 divide = boot_cpu_data.timer_divide;
182 latch = DIV_ROUND_CLOSEST(bus_clock/divide, HZ);
183
184 printk("Timer start : latch = %ld\n", latch);
185
186 outl((M32R_MFTMOD_CC_MASK | M32R_MFTMOD_TCCR \
187 |M32R_MFTMOD_CSSEL011), M32R_MFT2MOD_PORTL);
188 outl(latch, M32R_MFT2RLD_PORTL);
189 outl(latch, M32R_MFT2CUT_PORTL);
190 outl(0, M32R_MFT2CMPRLD_PORTL);
191 outl((M32R_MFTCR_MFT2MSK|M32R_MFTCR_MFT2EN), M32R_MFTCR_PORTL);
192 }
193
194#elif defined(CONFIG_CHIP_M32310)
195#warning time_init not implemented
196#else
197#error no chip configuration
198#endif
199}
diff --git a/arch/m32r/kernel/traps.c b/arch/m32r/kernel/traps.c
deleted file mode 100644
index a6f300a208bd..000000000000
--- a/arch/m32r/kernel/traps.c
+++ /dev/null
@@ -1,324 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * linux/arch/m32r/kernel/traps.c
4 *
5 * Copyright (C) 2001, 2002 Hirokazu Takata, Hiroyuki Kondo,
6 * Hitoshi Yamamoto
7 */
8
9/*
10 * 'traps.c' handles hardware traps and faults after we have saved some
11 * state in 'entry.S'.
12 */
13#include <linux/init.h>
14#include <linux/kernel.h>
15#include <linux/kallsyms.h>
16#include <linux/stddef.h>
17#include <linux/ptrace.h>
18#include <linux/sched/debug.h>
19#include <linux/sched/task_stack.h>
20#include <linux/mm.h>
21#include <linux/cpu.h>
22
23#include <asm/page.h>
24#include <asm/processor.h>
25
26#include <linux/uaccess.h>
27#include <asm/io.h>
28#include <linux/atomic.h>
29
30#include <asm/smp.h>
31
32#include <linux/module.h>
33
34asmlinkage void alignment_check(void);
35asmlinkage void ei_handler(void);
36asmlinkage void rie_handler(void);
37asmlinkage void debug_trap(void);
38asmlinkage void cache_flushing_handler(void);
39asmlinkage void ill_trap(void);
40
41#ifdef CONFIG_SMP
42extern void smp_reschedule_interrupt(void);
43extern void smp_invalidate_interrupt(void);
44extern void smp_call_function_interrupt(void);
45extern void smp_ipi_timer_interrupt(void);
46extern void smp_flush_cache_all_interrupt(void);
47extern void smp_call_function_single_interrupt(void);
48
49/*
50 * for Boot AP function
51 */
52asm (
53 " .section .eit_vector4,\"ax\" \n"
54 " .global _AP_RE \n"
55 " .global startup_AP \n"
56 "_AP_RE: \n"
57 " .fill 32, 4, 0 \n"
58 "_AP_EI: bra startup_AP \n"
59 " .previous \n"
60);
61#endif /* CONFIG_SMP */
62
63extern unsigned long eit_vector[];
64#define BRA_INSN(func, entry) \
65 ((unsigned long)func - (unsigned long)eit_vector - entry*4)/4 \
66 + 0xff000000UL
67
68static void set_eit_vector_entries(void)
69{
70 extern void default_eit_handler(void);
71 extern void system_call(void);
72 extern void pie_handler(void);
73 extern void ace_handler(void);
74 extern void tme_handler(void);
75 extern void _flush_cache_copyback_all(void);
76
77 eit_vector[0] = 0xd0c00001; /* seth r0, 0x01 */
78 eit_vector[1] = BRA_INSN(default_eit_handler, 1);
79 eit_vector[4] = 0xd0c00010; /* seth r0, 0x10 */
80 eit_vector[5] = BRA_INSN(default_eit_handler, 5);
81 eit_vector[8] = BRA_INSN(rie_handler, 8);
82 eit_vector[12] = BRA_INSN(alignment_check, 12);
83 eit_vector[16] = BRA_INSN(ill_trap, 16);
84 eit_vector[17] = BRA_INSN(debug_trap, 17);
85 eit_vector[18] = BRA_INSN(system_call, 18);
86 eit_vector[19] = BRA_INSN(ill_trap, 19);
87 eit_vector[20] = BRA_INSN(ill_trap, 20);
88 eit_vector[21] = BRA_INSN(ill_trap, 21);
89 eit_vector[22] = BRA_INSN(ill_trap, 22);
90 eit_vector[23] = BRA_INSN(ill_trap, 23);
91 eit_vector[24] = BRA_INSN(ill_trap, 24);
92 eit_vector[25] = BRA_INSN(ill_trap, 25);
93 eit_vector[26] = BRA_INSN(ill_trap, 26);
94 eit_vector[27] = BRA_INSN(ill_trap, 27);
95 eit_vector[28] = BRA_INSN(cache_flushing_handler, 28);
96 eit_vector[29] = BRA_INSN(ill_trap, 29);
97 eit_vector[30] = BRA_INSN(ill_trap, 30);
98 eit_vector[31] = BRA_INSN(ill_trap, 31);
99 eit_vector[32] = BRA_INSN(ei_handler, 32);
100 eit_vector[64] = BRA_INSN(pie_handler, 64);
101#ifdef CONFIG_MMU
102 eit_vector[68] = BRA_INSN(ace_handler, 68);
103 eit_vector[72] = BRA_INSN(tme_handler, 72);
104#endif /* CONFIG_MMU */
105#ifdef CONFIG_SMP
106 eit_vector[184] = (unsigned long)smp_reschedule_interrupt;
107 eit_vector[185] = (unsigned long)smp_invalidate_interrupt;
108 eit_vector[186] = (unsigned long)smp_call_function_interrupt;
109 eit_vector[187] = (unsigned long)smp_ipi_timer_interrupt;
110 eit_vector[188] = (unsigned long)smp_flush_cache_all_interrupt;
111 eit_vector[189] = 0; /* CPU_BOOT_IPI */
112 eit_vector[190] = (unsigned long)smp_call_function_single_interrupt;
113 eit_vector[191] = 0;
114#endif
115 _flush_cache_copyback_all();
116}
117
118void __init trap_init(void)
119{
120 set_eit_vector_entries();
121
122 /*
123 * Should be a barrier for any external CPU state.
124 */
125 cpu_init();
126}
127
128static int kstack_depth_to_print = 24;
129
130static void show_trace(struct task_struct *task, unsigned long *stack)
131{
132 unsigned long addr;
133
134 if (!stack)
135 stack = (unsigned long*)&stack;
136
137 printk("Call Trace: ");
138 while (!kstack_end(stack)) {
139 addr = *stack++;
140 if (__kernel_text_address(addr))
141 printk("[<%08lx>] %pSR\n", addr, (void *)addr);
142 }
143 printk("\n");
144}
145
146void show_stack(struct task_struct *task, unsigned long *sp)
147{
148 unsigned long *stack;
149 int i;
150
151 /*
152 * debugging aid: "show_stack(NULL);" prints the
153 * back trace for this cpu.
154 */
155
156 if(sp==NULL) {
157 if (task)
158 sp = (unsigned long *)task->thread.sp;
159 else
160 sp=(unsigned long*)&sp;
161 }
162
163 stack = sp;
164 for(i=0; i < kstack_depth_to_print; i++) {
165 if (kstack_end(stack))
166 break;
167 if (i && ((i % 4) == 0))
168 printk("\n ");
169 printk("%08lx ", *stack++);
170 }
171 printk("\n");
172 show_trace(task, sp);
173}
174
175static void show_registers(struct pt_regs *regs)
176{
177 int i = 0;
178 int in_kernel = 1;
179 unsigned long sp;
180
181 printk("CPU: %d\n", smp_processor_id());
182 show_regs(regs);
183
184 sp = (unsigned long) (1+regs);
185 if (user_mode(regs)) {
186 in_kernel = 0;
187 sp = regs->spu;
188 printk("SPU: %08lx\n", sp);
189 } else {
190 printk("SPI: %08lx\n", sp);
191 }
192 printk("Process %s (pid: %d, process nr: %d, stackpage=%08lx)",
193 current->comm, task_pid_nr(current), 0xffff & i, 4096+(unsigned long)current);
194
195 /*
196 * When in-kernel, we also print out the stack and code at the
197 * time of the fault..
198 */
199 if (in_kernel) {
200 printk("\nStack: ");
201 show_stack(current, (unsigned long*) sp);
202
203 printk("\nCode: ");
204 if (regs->bpc < PAGE_OFFSET)
205 goto bad;
206
207 for(i=0;i<20;i++) {
208 unsigned char c;
209 if (__get_user(c, &((unsigned char*)regs->bpc)[i])) {
210bad:
211 printk(" Bad PC value.");
212 break;
213 }
214 printk("%02x ", c);
215 }
216 }
217 printk("\n");
218}
219
220static DEFINE_SPINLOCK(die_lock);
221
222void die(const char * str, struct pt_regs * regs, long err)
223{
224 console_verbose();
225 spin_lock_irq(&die_lock);
226 bust_spinlocks(1);
227 printk("%s: %04lx\n", str, err & 0xffff);
228 show_registers(regs);
229 bust_spinlocks(0);
230 spin_unlock_irq(&die_lock);
231 do_exit(SIGSEGV);
232}
233
234static __inline__ void die_if_kernel(const char * str,
235 struct pt_regs * regs, long err)
236{
237 if (!user_mode(regs))
238 die(str, regs, err);
239}
240
241static __inline__ void do_trap(int trapnr, int signr, const char * str,
242 struct pt_regs * regs, long error_code, siginfo_t *info)
243{
244 if (user_mode(regs)) {
245 /* trap_signal */
246 struct task_struct *tsk = current;
247 tsk->thread.error_code = error_code;
248 tsk->thread.trap_no = trapnr;
249 if (info)
250 force_sig_info(signr, info, tsk);
251 else
252 force_sig(signr, tsk);
253 return;
254 } else {
255 /* kernel_trap */
256 if (!fixup_exception(regs))
257 die(str, regs, error_code);
258 return;
259 }
260}
261
262#define DO_ERROR(trapnr, signr, str, name) \
263asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
264{ \
265 do_trap(trapnr, signr, NULL, regs, error_code, NULL); \
266}
267
268#define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
269asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
270{ \
271 siginfo_t info; \
272 info.si_signo = signr; \
273 info.si_errno = 0; \
274 info.si_code = sicode; \
275 info.si_addr = (void __user *)siaddr; \
276 do_trap(trapnr, signr, str, regs, error_code, &info); \
277}
278
279DO_ERROR( 1, SIGTRAP, "debug trap", debug_trap)
280DO_ERROR_INFO(0x20, SIGILL, "reserved instruction ", rie_handler, ILL_ILLOPC, regs->bpc)
281DO_ERROR_INFO(0x100, SIGILL, "privileged instruction", pie_handler, ILL_PRVOPC, regs->bpc)
282DO_ERROR_INFO(-1, SIGILL, "illegal trap", ill_trap, ILL_ILLTRP, regs->bpc)
283
284extern int handle_unaligned_access(unsigned long, struct pt_regs *);
285
286/* This code taken from arch/sh/kernel/traps.c */
287asmlinkage void do_alignment_check(struct pt_regs *regs, long error_code)
288{
289 mm_segment_t oldfs;
290 unsigned long insn;
291 int tmp;
292
293 oldfs = get_fs();
294
295 if (user_mode(regs)) {
296 local_irq_enable();
297 current->thread.error_code = error_code;
298 current->thread.trap_no = 0x17;
299
300 set_fs(USER_DS);
301 if (copy_from_user(&insn, (void *)regs->bpc, 4)) {
302 set_fs(oldfs);
303 goto uspace_segv;
304 }
305 tmp = handle_unaligned_access(insn, regs);
306 set_fs(oldfs);
307
308 if (!tmp)
309 return;
310
311 uspace_segv:
312 printk(KERN_NOTICE "Killing process \"%s\" due to unaligned "
313 "access\n", current->comm);
314 force_sig(SIGSEGV, current);
315 } else {
316 set_fs(KERNEL_DS);
317 if (copy_from_user(&insn, (void *)regs->bpc, 4)) {
318 set_fs(oldfs);
319 die("insn faulting in do_address_error", regs, 0);
320 }
321 handle_unaligned_access(insn, regs);
322 set_fs(oldfs);
323 }
324}
diff --git a/arch/m32r/kernel/vmlinux.lds.S b/arch/m32r/kernel/vmlinux.lds.S
deleted file mode 100644
index 7e4d957f7f7f..000000000000
--- a/arch/m32r/kernel/vmlinux.lds.S
+++ /dev/null
@@ -1,79 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/* ld script to make M32R Linux kernel
3 */
4
5#include <asm-generic/vmlinux.lds.h>
6#include <asm/addrspace.h>
7#include <asm/page.h>
8#include <asm/thread_info.h>
9
10OUTPUT_ARCH(m32r)
11#if defined(__LITTLE_ENDIAN__)
12 jiffies = jiffies_64;
13#else
14 jiffies = jiffies_64 + 4;
15#endif
16
17kernel_entry = boot - 0x80000000;
18ENTRY(kernel_entry)
19
20SECTIONS
21{
22 . = CONFIG_MEMORY_START + __PAGE_OFFSET;
23 eit_vector = .;
24
25 . = . + 0x1000;
26 .empty_zero_page : { *(.empty_zero_page) } = 0
27
28 /* read-only */
29 _text = .; /* Text and read-only data */
30 .boot : { *(.boot) } = 0
31 .text : {
32 HEAD_TEXT
33 TEXT_TEXT
34 SCHED_TEXT
35 CPUIDLE_TEXT
36 LOCK_TEXT
37 *(.fixup)
38 *(.gnu.warning)
39 } = 0x9090
40#ifdef CONFIG_SMP
41 . = ALIGN(65536);
42 .eit_vector4 : { *(.eit_vector4) }
43#endif
44 _etext = .; /* End of text section */
45
46 EXCEPTION_TABLE(16)
47 NOTES
48
49 _sdata = .; /* Start of data section */
50 RODATA
51 RW_DATA_SECTION(32, PAGE_SIZE, THREAD_SIZE)
52 _edata = .; /* End of data section */
53
54 /* will be freed after init */
55 . = ALIGN(PAGE_SIZE); /* Init code and data */
56 __init_begin = .;
57 INIT_TEXT_SECTION(PAGE_SIZE)
58 INIT_DATA_SECTION(16)
59 PERCPU_SECTION(32)
60 . = ALIGN(PAGE_SIZE);
61 __init_end = .;
62 /* freed after init ends here */
63
64 BSS_SECTION(0, 0, 4)
65
66 _end = . ;
67
68 /* Stabs debugging sections. */
69 .stab 0 : { *(.stab) }
70 .stabstr 0 : { *(.stabstr) }
71 .stab.excl 0 : { *(.stab.excl) }
72 .stab.exclstr 0 : { *(.stab.exclstr) }
73 .stab.index 0 : { *(.stab.index) }
74 .stab.indexstr 0 : { *(.stab.indexstr) }
75 .comment 0 : { *(.comment) }
76
77 /* Sections to be discarded */
78 DISCARDS
79}