aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorArnd Bergmann <arnd@arndb.de>2018-03-07 16:18:39 -0500
committerArnd Bergmann <arnd@arndb.de>2018-03-07 16:18:39 -0500
commitb67aea2bbab780e412b8af3386cc9f78f61a4cac (patch)
tree93fb3f88d71a431d5a1d2203635546986dacf3f4
parent661e50bc853209e41a5c14a290ca4decc43cbfd1 (diff)
parent8d06c3302635f0ab426937f2bb10e9b9c34087e4 (diff)
Merge tag 'metag_remove_2' of ssh://gitolite.kernel.org/pub/scm/linux/kernel/git/jhogan/metag into asm-generic
Remove metag architecture These patches remove the metag architecture and tightly dependent drivers from the kernel. With the 4.16 kernel the ancient gcc 4.2.4 based metag toolchain we have been using is hitting compiler bugs, so now seems a good time to drop it altogether. * tag 'metag_remove_2' of ssh://gitolite.kernel.org/pub/scm/linux/kernel/git/jhogan/metag: i2c: img-scb: Drop METAG dependency media: img-ir: Drop METAG dependency watchdog: imgpdc: Drop METAG dependency MAINTAINERS/CREDITS: Drop METAG ARCHITECTURE tty: Remove metag DA TTY and console driver clocksource: Remove metag generic timer driver irqchip: Remove metag irqchip drivers Drop a bunch of metag references docs: Remove remaining references to metag docs: Remove metag docs metag: Remove arch/metag/ Signed-off-by: Arnd Bergmann <arnd@arndb.de>
-rw-r--r--CREDITS5
-rw-r--r--Documentation/00-INDEX2
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt4
-rw-r--r--Documentation/dev-tools/kmemleak.rst2
-rw-r--r--Documentation/devicetree/bindings/metag/meta.txt30
-rw-r--r--Documentation/features/core/BPF-JIT/arch-support.txt1
-rw-r--r--Documentation/features/core/generic-idle-thread/arch-support.txt1
-rw-r--r--Documentation/features/core/jump-labels/arch-support.txt1
-rw-r--r--Documentation/features/core/tracehook/arch-support.txt1
-rw-r--r--Documentation/features/debug/KASAN/arch-support.txt1
-rw-r--r--Documentation/features/debug/gcov-profile-all/arch-support.txt1
-rw-r--r--Documentation/features/debug/kgdb/arch-support.txt1
-rw-r--r--Documentation/features/debug/kprobes-on-ftrace/arch-support.txt1
-rw-r--r--Documentation/features/debug/kprobes/arch-support.txt1
-rw-r--r--Documentation/features/debug/kretprobes/arch-support.txt1
-rw-r--r--Documentation/features/debug/optprobes/arch-support.txt1
-rw-r--r--Documentation/features/debug/stackprotector/arch-support.txt1
-rw-r--r--Documentation/features/debug/uprobes/arch-support.txt1
-rw-r--r--Documentation/features/debug/user-ret-profiler/arch-support.txt1
-rw-r--r--Documentation/features/io/dma-api-debug/arch-support.txt1
-rw-r--r--Documentation/features/io/dma-contiguous/arch-support.txt1
-rw-r--r--Documentation/features/io/sg-chain/arch-support.txt1
-rw-r--r--Documentation/features/lib/strncasecmp/arch-support.txt1
-rw-r--r--Documentation/features/locking/cmpxchg-local/arch-support.txt1
-rw-r--r--Documentation/features/locking/lockdep/arch-support.txt1
-rw-r--r--Documentation/features/locking/queued-rwlocks/arch-support.txt1
-rw-r--r--Documentation/features/locking/queued-spinlocks/arch-support.txt1
-rw-r--r--Documentation/features/locking/rwsem-optimized/arch-support.txt1
-rw-r--r--Documentation/features/perf/kprobes-event/arch-support.txt1
-rw-r--r--Documentation/features/perf/perf-regs/arch-support.txt1
-rw-r--r--Documentation/features/perf/perf-stackdump/arch-support.txt1
-rw-r--r--Documentation/features/sched/membarrier-sync-core/arch-support.txt1
-rw-r--r--Documentation/features/sched/numa-balancing/arch-support.txt1
-rw-r--r--Documentation/features/seccomp/seccomp-filter/arch-support.txt1
-rw-r--r--Documentation/features/time/arch-tick-broadcast/arch-support.txt1
-rw-r--r--Documentation/features/time/clockevents/arch-support.txt1
-rw-r--r--Documentation/features/time/context-tracking/arch-support.txt1
-rw-r--r--Documentation/features/time/irq-time-acct/arch-support.txt1
-rw-r--r--Documentation/features/time/modern-timekeeping/arch-support.txt1
-rw-r--r--Documentation/features/time/virt-cpuacct/arch-support.txt1
-rw-r--r--Documentation/features/vm/ELF-ASLR/arch-support.txt1
-rw-r--r--Documentation/features/vm/PG_uncached/arch-support.txt1
-rw-r--r--Documentation/features/vm/THP/arch-support.txt1
-rw-r--r--Documentation/features/vm/TLB/arch-support.txt1
-rw-r--r--Documentation/features/vm/huge-vmap/arch-support.txt1
-rw-r--r--Documentation/features/vm/ioremap_prot/arch-support.txt1
-rw-r--r--Documentation/features/vm/numa-memblock/arch-support.txt1
-rw-r--r--Documentation/features/vm/pte_special/arch-support.txt1
-rw-r--r--Documentation/metag/00-INDEX4
-rw-r--r--Documentation/metag/kernel-ABI.txt256
-rw-r--r--MAINTAINERS14
-rw-r--r--arch/metag/Kconfig287
-rw-r--r--arch/metag/Kconfig.debug34
-rw-r--r--arch/metag/Kconfig.soc69
-rw-r--r--arch/metag/Makefile89
-rw-r--r--arch/metag/boot/.gitignore3
-rw-r--r--arch/metag/boot/Makefile68
-rw-r--r--arch/metag/boot/dts/Makefile16
-rw-r--r--arch/metag/boot/dts/skeleton.dts10
-rw-r--r--arch/metag/boot/dts/skeleton.dtsi15
-rw-r--r--arch/metag/boot/dts/tz1090.dtsi108
-rw-r--r--arch/metag/boot/dts/tz1090_generic.dts10
-rw-r--r--arch/metag/configs/meta1_defconfig39
-rw-r--r--arch/metag/configs/meta2_defconfig40
-rw-r--r--arch/metag/configs/meta2_smp_defconfig41
-rw-r--r--arch/metag/configs/tz1090_defconfig42
-rw-r--r--arch/metag/include/asm/Kbuild33
-rw-r--r--arch/metag/include/asm/atomic.h49
-rw-r--r--arch/metag/include/asm/atomic_lnkget.h204
-rw-r--r--arch/metag/include/asm/atomic_lock1.h157
-rw-r--r--arch/metag/include/asm/barrier.h85
-rw-r--r--arch/metag/include/asm/bitops.h127
-rw-r--r--arch/metag/include/asm/bug.h13
-rw-r--r--arch/metag/include/asm/cache.h24
-rw-r--r--arch/metag/include/asm/cacheflush.h251
-rw-r--r--arch/metag/include/asm/cachepart.h43
-rw-r--r--arch/metag/include/asm/checksum.h93
-rw-r--r--arch/metag/include/asm/clock.h59
-rw-r--r--arch/metag/include/asm/cmpxchg.h64
-rw-r--r--arch/metag/include/asm/cmpxchg_irq.h43
-rw-r--r--arch/metag/include/asm/cmpxchg_lnkget.h87
-rw-r--r--arch/metag/include/asm/cmpxchg_lock1.h49
-rw-r--r--arch/metag/include/asm/core_reg.h36
-rw-r--r--arch/metag/include/asm/cpu.h15
-rw-r--r--arch/metag/include/asm/da.h44
-rw-r--r--arch/metag/include/asm/delay.h30
-rw-r--r--arch/metag/include/asm/div64.h13
-rw-r--r--arch/metag/include/asm/dma-mapping.h12
-rw-r--r--arch/metag/include/asm/elf.h126
-rw-r--r--arch/metag/include/asm/fixmap.h69
-rw-r--r--arch/metag/include/asm/ftrace.h24
-rw-r--r--arch/metag/include/asm/global_lock.h101
-rw-r--r--arch/metag/include/asm/highmem.h62
-rw-r--r--arch/metag/include/asm/hugetlb.h75
-rw-r--r--arch/metag/include/asm/hwthread.h41
-rw-r--r--arch/metag/include/asm/io.h170
-rw-r--r--arch/metag/include/asm/irq.h38
-rw-r--r--arch/metag/include/asm/irqflags.h94
-rw-r--r--arch/metag/include/asm/l2cache.h259
-rw-r--r--arch/metag/include/asm/linkage.h8
-rw-r--r--arch/metag/include/asm/mach/arch.h86
-rw-r--r--arch/metag/include/asm/metag_isa.h81
-rw-r--r--arch/metag/include/asm/metag_mem.h1109
-rw-r--r--arch/metag/include/asm/metag_regs.h1184
-rw-r--r--arch/metag/include/asm/mman.h12
-rw-r--r--arch/metag/include/asm/mmu.h78
-rw-r--r--arch/metag/include/asm/mmu_context.h115
-rw-r--r--arch/metag/include/asm/mmzone.h43
-rw-r--r--arch/metag/include/asm/module.h38
-rw-r--r--arch/metag/include/asm/page.h129
-rw-r--r--arch/metag/include/asm/perf_event.h4
-rw-r--r--arch/metag/include/asm/pgalloc.h83
-rw-r--r--arch/metag/include/asm/pgtable-bits.h105
-rw-r--r--arch/metag/include/asm/pgtable.h270
-rw-r--r--arch/metag/include/asm/processor.h201
-rw-r--r--arch/metag/include/asm/ptrace.h61
-rw-r--r--arch/metag/include/asm/setup.h10
-rw-r--r--arch/metag/include/asm/smp.h28
-rw-r--r--arch/metag/include/asm/sparsemem.h14
-rw-r--r--arch/metag/include/asm/spinlock.h19
-rw-r--r--arch/metag/include/asm/spinlock_lnkget.h213
-rw-r--r--arch/metag/include/asm/spinlock_lock1.h165
-rw-r--r--arch/metag/include/asm/spinlock_types.h21
-rw-r--r--arch/metag/include/asm/stacktrace.h21
-rw-r--r--arch/metag/include/asm/string.h14
-rw-r--r--arch/metag/include/asm/switch.h21
-rw-r--r--arch/metag/include/asm/syscall.h104
-rw-r--r--arch/metag/include/asm/syscalls.h40
-rw-r--r--arch/metag/include/asm/tbx.h1420
-rw-r--r--arch/metag/include/asm/tcm.h31
-rw-r--r--arch/metag/include/asm/thread_info.h141
-rw-r--r--arch/metag/include/asm/tlb.h37
-rw-r--r--arch/metag/include/asm/tlbflush.h78
-rw-r--r--arch/metag/include/asm/topology.h28
-rw-r--r--arch/metag/include/asm/traps.h48
-rw-r--r--arch/metag/include/asm/uaccess.h213
-rw-r--r--arch/metag/include/asm/unistd.h12
-rw-r--r--arch/metag/include/asm/user_gateway.h45
-rw-r--r--arch/metag/include/uapi/asm/Kbuild31
-rw-r--r--arch/metag/include/uapi/asm/byteorder.h2
-rw-r--r--arch/metag/include/uapi/asm/ech.h16
-rw-r--r--arch/metag/include/uapi/asm/ptrace.h114
-rw-r--r--arch/metag/include/uapi/asm/sigcontext.h32
-rw-r--r--arch/metag/include/uapi/asm/siginfo.h16
-rw-r--r--arch/metag/include/uapi/asm/swab.h27
-rw-r--r--arch/metag/include/uapi/asm/unistd.h24
-rw-r--r--arch/metag/kernel/.gitignore1
-rw-r--r--arch/metag/kernel/Makefile40
-rw-r--r--arch/metag/kernel/asm-offsets.c15
-rw-r--r--arch/metag/kernel/cachepart.c132
-rw-r--r--arch/metag/kernel/clock.c110
-rw-r--r--arch/metag/kernel/core_reg.c118
-rw-r--r--arch/metag/kernel/da.c25
-rw-r--r--arch/metag/kernel/devtree.c57
-rw-r--r--arch/metag/kernel/dma.c588
-rw-r--r--arch/metag/kernel/ftrace.c121
-rw-r--r--arch/metag/kernel/ftrace_stub.S62
-rw-r--r--arch/metag/kernel/head.S66
-rw-r--r--arch/metag/kernel/irq.c293
-rw-r--r--arch/metag/kernel/kick.c110
-rw-r--r--arch/metag/kernel/machines.c21
-rw-r--r--arch/metag/kernel/metag_ksyms.c55
-rw-r--r--arch/metag/kernel/module.c284
-rw-r--r--arch/metag/kernel/perf/Makefile3
-rw-r--r--arch/metag/kernel/perf/perf_event.c879
-rw-r--r--arch/metag/kernel/perf/perf_event.h106
-rw-r--r--arch/metag/kernel/perf_callchain.c97
-rw-r--r--arch/metag/kernel/process.c448
-rw-r--r--arch/metag/kernel/ptrace.c427
-rw-r--r--arch/metag/kernel/setup.c622
-rw-r--r--arch/metag/kernel/signal.c336
-rw-r--r--arch/metag/kernel/smp.c668
-rw-r--r--arch/metag/kernel/stacktrace.c187
-rw-r--r--arch/metag/kernel/sys_metag.c181
-rw-r--r--arch/metag/kernel/tbiunexp.S23
-rw-r--r--arch/metag/kernel/tcm.c152
-rw-r--r--arch/metag/kernel/time.c26
-rw-r--r--arch/metag/kernel/topology.c78
-rw-r--r--arch/metag/kernel/traps.c992
-rw-r--r--arch/metag/kernel/user_gateway.S98
-rw-r--r--arch/metag/kernel/vmlinux.lds.S74
-rw-r--r--arch/metag/lib/Makefile23
-rw-r--r--arch/metag/lib/ashldi3.S34
-rw-r--r--arch/metag/lib/ashrdi3.S34
-rw-r--r--arch/metag/lib/checksum.c167
-rw-r--r--arch/metag/lib/clear_page.S18
-rw-r--r--arch/metag/lib/cmpdi2.S33
-rw-r--r--arch/metag/lib/copy_page.S21
-rw-r--r--arch/metag/lib/delay.c57
-rw-r--r--arch/metag/lib/div64.S109
-rw-r--r--arch/metag/lib/divsi3.S101
-rw-r--r--arch/metag/lib/ip_fast_csum.S33
-rw-r--r--arch/metag/lib/lshrdi3.S34
-rw-r--r--arch/metag/lib/memcpy.S186
-rw-r--r--arch/metag/lib/memmove.S346
-rw-r--r--arch/metag/lib/memset.S87
-rw-r--r--arch/metag/lib/modsi3.S39
-rw-r--r--arch/metag/lib/muldi3.S45
-rw-r--r--arch/metag/lib/ucmpdi2.S28
-rw-r--r--arch/metag/lib/usercopy.c1257
-rw-r--r--arch/metag/mm/Kconfig147
-rw-r--r--arch/metag/mm/Makefile20
-rw-r--r--arch/metag/mm/cache.c521
-rw-r--r--arch/metag/mm/extable.c15
-rw-r--r--arch/metag/mm/fault.c247
-rw-r--r--arch/metag/mm/highmem.c122
-rw-r--r--arch/metag/mm/hugetlbpage.c251
-rw-r--r--arch/metag/mm/init.c408
-rw-r--r--arch/metag/mm/ioremap.c90
-rw-r--r--arch/metag/mm/l2cache.c193
-rw-r--r--arch/metag/mm/maccess.c69
-rw-r--r--arch/metag/mm/mmu-meta1.c157
-rw-r--r--arch/metag/mm/mmu-meta2.c208
-rw-r--r--arch/metag/mm/numa.c82
-rw-r--r--arch/metag/oprofile/Makefile18
-rw-r--r--arch/metag/oprofile/backtrace.c63
-rw-r--r--arch/metag/oprofile/backtrace.h7
-rw-r--r--arch/metag/oprofile/common.c66
-rw-r--r--arch/metag/tbx/Makefile22
-rw-r--r--arch/metag/tbx/tbicore.S136
-rw-r--r--arch/metag/tbx/tbictx.S366
-rw-r--r--arch/metag/tbx/tbictxfpu.S190
-rw-r--r--arch/metag/tbx/tbidefr.S175
-rw-r--r--arch/metag/tbx/tbidspram.S161
-rw-r--r--arch/metag/tbx/tbilogf.S48
-rw-r--r--arch/metag/tbx/tbipcx.S451
-rw-r--r--arch/metag/tbx/tbiroot.S87
-rw-r--r--arch/metag/tbx/tbisoft.S237
-rw-r--r--arch/metag/tbx/tbistring.c114
-rw-r--r--arch/metag/tbx/tbitimer.S207
-rw-r--r--drivers/clocksource/Kconfig5
-rw-r--r--drivers/clocksource/Makefile1
-rw-r--r--drivers/clocksource/metag_generic.c161
-rw-r--r--drivers/i2c/busses/Kconfig2
-rw-r--r--drivers/irqchip/Makefile2
-rw-r--r--drivers/irqchip/irq-metag-ext.c871
-rw-r--r--drivers/irqchip/irq-metag.c343
-rw-r--r--drivers/media/rc/img-ir/Kconfig2
-rw-r--r--drivers/tty/Kconfig13
-rw-r--r--drivers/tty/Makefile1
-rw-r--r--drivers/tty/metag_da.c665
-rw-r--r--drivers/watchdog/Kconfig2
-rw-r--r--include/clocksource/metag_generic.h21
-rw-r--r--include/linux/cpuhotplug.h2
-rw-r--r--include/linux/irqchip/metag-ext.h34
-rw-r--r--include/linux/irqchip/metag.h25
-rw-r--r--include/linux/mm.h2
-rw-r--r--include/trace/events/mmflags.h2
-rw-r--r--include/uapi/linux/elf.h3
-rw-r--r--lib/Kconfig.debug2
-rw-r--r--mm/Kconfig7
-rwxr-xr-xscripts/checkstack.pl4
-rw-r--r--scripts/recordmcount.c20
-rw-r--r--tools/perf/perf-sys.h4
254 files changed, 14 insertions, 27618 deletions
diff --git a/CREDITS b/CREDITS
index a3ec0c744172..989cda91c427 100644
--- a/CREDITS
+++ b/CREDITS
@@ -1564,6 +1564,11 @@ W: http://www.carumba.com/
1564D: bug toaster (A1 sauce makes all the difference) 1564D: bug toaster (A1 sauce makes all the difference)
1565D: Random linux hacker 1565D: Random linux hacker
1566 1566
1567N: James Hogan
1568E: jhogan@kernel.org
1569D: Metag architecture maintainer
1570D: TZ1090 SoC maintainer
1571
1567N: Tim Hockin 1572N: Tim Hockin
1568E: thockin@hockin.org 1573E: thockin@hockin.org
1569W: http://www.hockin.org/~thockin 1574W: http://www.hockin.org/~thockin
diff --git a/Documentation/00-INDEX b/Documentation/00-INDEX
index 7f3a0728ccf2..eae1e7193f50 100644
--- a/Documentation/00-INDEX
+++ b/Documentation/00-INDEX
@@ -276,8 +276,6 @@ memory-hotplug.txt
276 - Hotpluggable memory support, how to use and current status. 276 - Hotpluggable memory support, how to use and current status.
277men-chameleon-bus.txt 277men-chameleon-bus.txt
278 - info on MEN chameleon bus. 278 - info on MEN chameleon bus.
279metag/
280 - directory with info about Linux on Meta architecture.
281mic/ 279mic/
282 - Intel Many Integrated Core (MIC) architecture device driver. 280 - Intel Many Integrated Core (MIC) architecture device driver.
283mips/ 281mips/
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 1d1d53f85ddd..30a8d0635898 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -1347,10 +1347,6 @@
1347 If specified, z/VM IUCV HVC accepts connections 1347 If specified, z/VM IUCV HVC accepts connections
1348 from listed z/VM user IDs only. 1348 from listed z/VM user IDs only.
1349 1349
1350 hwthread_map= [METAG] Comma-separated list of Linux cpu id to
1351 hardware thread id mappings.
1352 Format: <cpu>:<hwthread>
1353
1354 keep_bootcon [KNL] 1350 keep_bootcon [KNL]
1355 Do not unregister boot console at start. This is only 1351 Do not unregister boot console at start. This is only
1356 useful for debugging when something happens in the window 1352 useful for debugging when something happens in the window
diff --git a/Documentation/dev-tools/kmemleak.rst b/Documentation/dev-tools/kmemleak.rst
index cb8862659178..e6f51260ff32 100644
--- a/Documentation/dev-tools/kmemleak.rst
+++ b/Documentation/dev-tools/kmemleak.rst
@@ -8,7 +8,7 @@ with the difference that the orphan objects are not freed but only
8reported via /sys/kernel/debug/kmemleak. A similar method is used by the 8reported via /sys/kernel/debug/kmemleak. A similar method is used by the
9Valgrind tool (``memcheck --leak-check``) to detect the memory leaks in 9Valgrind tool (``memcheck --leak-check``) to detect the memory leaks in
10user-space applications. 10user-space applications.
11Kmemleak is supported on x86, arm, powerpc, sparc, sh, microblaze, ppc, mips, s390, metag and tile. 11Kmemleak is supported on x86, arm, powerpc, sparc, sh, microblaze, ppc, mips, s390 and tile.
12 12
13Usage 13Usage
14----- 14-----
diff --git a/Documentation/devicetree/bindings/metag/meta.txt b/Documentation/devicetree/bindings/metag/meta.txt
deleted file mode 100644
index f4457f57ab08..000000000000
--- a/Documentation/devicetree/bindings/metag/meta.txt
+++ /dev/null
@@ -1,30 +0,0 @@
1* Meta Processor Binding
2
3This binding specifies what properties must be available in the device tree
4representation of a Meta Processor Core, which is the root node in the tree.
5
6Required properties:
7
8 - compatible: Specifies the compatibility list for the Meta processor.
9 The type shall be <string> and the value shall include "img,meta".
10
11Optional properties:
12
13 - clocks: Clock consumer specifiers as described in
14 Documentation/devicetree/bindings/clock/clock-bindings.txt
15
16 - clock-names: Clock consumer names as described in
17 Documentation/devicetree/bindings/clock/clock-bindings.txt.
18
19Clocks are identified by name. Valid clocks are:
20
21 - "core": The Meta core clock from which the Meta timers are derived.
22
23* Examples
24
25/ {
26 compatible = "toumaz,tz1090", "img,meta";
27
28 clocks = <&meta_core_clk>;
29 clock-names = "core";
30};
diff --git a/Documentation/features/core/BPF-JIT/arch-support.txt b/Documentation/features/core/BPF-JIT/arch-support.txt
index 5575d2d09625..b0634ec01881 100644
--- a/Documentation/features/core/BPF-JIT/arch-support.txt
+++ b/Documentation/features/core/BPF-JIT/arch-support.txt
@@ -19,7 +19,6 @@
19 | ia64: | TODO | 19 | ia64: | TODO |
20 | m32r: | TODO | 20 | m32r: | TODO |
21 | m68k: | TODO | 21 | m68k: | TODO |
22 | metag: | TODO |
23 | microblaze: | TODO | 22 | microblaze: | TODO |
24 | mips: | ok | 23 | mips: | ok |
25 | mn10300: | TODO | 24 | mn10300: | TODO |
diff --git a/Documentation/features/core/generic-idle-thread/arch-support.txt b/Documentation/features/core/generic-idle-thread/arch-support.txt
index abb5f271a792..e2a1a385efd3 100644
--- a/Documentation/features/core/generic-idle-thread/arch-support.txt
+++ b/Documentation/features/core/generic-idle-thread/arch-support.txt
@@ -19,7 +19,6 @@
19 | ia64: | ok | 19 | ia64: | ok |
20 | m32r: | TODO | 20 | m32r: | TODO |
21 | m68k: | TODO | 21 | m68k: | TODO |
22 | metag: | ok |
23 | microblaze: | TODO | 22 | microblaze: | TODO |
24 | mips: | ok | 23 | mips: | ok |
25 | mn10300: | TODO | 24 | mn10300: | TODO |
diff --git a/Documentation/features/core/jump-labels/arch-support.txt b/Documentation/features/core/jump-labels/arch-support.txt
index dbdaffcc5110..dafcea38fe5e 100644
--- a/Documentation/features/core/jump-labels/arch-support.txt
+++ b/Documentation/features/core/jump-labels/arch-support.txt
@@ -19,7 +19,6 @@
19 | ia64: | TODO | 19 | ia64: | TODO |
20 | m32r: | TODO | 20 | m32r: | TODO |
21 | m68k: | TODO | 21 | m68k: | TODO |
22 | metag: | TODO |
23 | microblaze: | TODO | 22 | microblaze: | TODO |
24 | mips: | ok | 23 | mips: | ok |
25 | mn10300: | TODO | 24 | mn10300: | TODO |
diff --git a/Documentation/features/core/tracehook/arch-support.txt b/Documentation/features/core/tracehook/arch-support.txt
index dfb638c2f842..3d7886fcb6a9 100644
--- a/Documentation/features/core/tracehook/arch-support.txt
+++ b/Documentation/features/core/tracehook/arch-support.txt
@@ -19,7 +19,6 @@
19 | ia64: | ok | 19 | ia64: | ok |
20 | m32r: | TODO | 20 | m32r: | TODO |
21 | m68k: | TODO | 21 | m68k: | TODO |
22 | metag: | ok |
23 | microblaze: | TODO | 22 | microblaze: | TODO |
24 | mips: | ok | 23 | mips: | ok |
25 | mn10300: | ok | 24 | mn10300: | ok |
diff --git a/Documentation/features/debug/KASAN/arch-support.txt b/Documentation/features/debug/KASAN/arch-support.txt
index 3406fae833c3..63598b0e8ea6 100644
--- a/Documentation/features/debug/KASAN/arch-support.txt
+++ b/Documentation/features/debug/KASAN/arch-support.txt
@@ -19,7 +19,6 @@
19 | ia64: | TODO | 19 | ia64: | TODO |
20 | m32r: | TODO | 20 | m32r: | TODO |
21 | m68k: | TODO | 21 | m68k: | TODO |
22 | metag: | TODO |
23 | microblaze: | TODO | 22 | microblaze: | TODO |
24 | mips: | TODO | 23 | mips: | TODO |
25 | mn10300: | TODO | 24 | mn10300: | TODO |
diff --git a/Documentation/features/debug/gcov-profile-all/arch-support.txt b/Documentation/features/debug/gcov-profile-all/arch-support.txt
index 830dbe801aaf..13b3b3dfe7f2 100644
--- a/Documentation/features/debug/gcov-profile-all/arch-support.txt
+++ b/Documentation/features/debug/gcov-profile-all/arch-support.txt
@@ -19,7 +19,6 @@
19 | ia64: | TODO | 19 | ia64: | TODO |
20 | m32r: | TODO | 20 | m32r: | TODO |
21 | m68k: | TODO | 21 | m68k: | TODO |
22 | metag: | TODO |
23 | microblaze: | ok | 22 | microblaze: | ok |
24 | mips: | TODO | 23 | mips: | TODO |
25 | mn10300: | TODO | 24 | mn10300: | TODO |
diff --git a/Documentation/features/debug/kgdb/arch-support.txt b/Documentation/features/debug/kgdb/arch-support.txt
index 0217bf6e942d..cb4792cf0f98 100644
--- a/Documentation/features/debug/kgdb/arch-support.txt
+++ b/Documentation/features/debug/kgdb/arch-support.txt
@@ -19,7 +19,6 @@
19 | ia64: | TODO | 19 | ia64: | TODO |
20 | m32r: | TODO | 20 | m32r: | TODO |
21 | m68k: | TODO | 21 | m68k: | TODO |
22 | metag: | TODO |
23 | microblaze: | ok | 22 | microblaze: | ok |
24 | mips: | ok | 23 | mips: | ok |
25 | mn10300: | ok | 24 | mn10300: | ok |
diff --git a/Documentation/features/debug/kprobes-on-ftrace/arch-support.txt b/Documentation/features/debug/kprobes-on-ftrace/arch-support.txt
index 1e84be3c142e..2046539489fe 100644
--- a/Documentation/features/debug/kprobes-on-ftrace/arch-support.txt
+++ b/Documentation/features/debug/kprobes-on-ftrace/arch-support.txt
@@ -19,7 +19,6 @@
19 | ia64: | TODO | 19 | ia64: | TODO |
20 | m32r: | TODO | 20 | m32r: | TODO |
21 | m68k: | TODO | 21 | m68k: | TODO |
22 | metag: | TODO |
23 | microblaze: | TODO | 22 | microblaze: | TODO |
24 | mips: | TODO | 23 | mips: | TODO |
25 | mn10300: | TODO | 24 | mn10300: | TODO |
diff --git a/Documentation/features/debug/kprobes/arch-support.txt b/Documentation/features/debug/kprobes/arch-support.txt
index 529f66eda679..bfb3546a70d0 100644
--- a/Documentation/features/debug/kprobes/arch-support.txt
+++ b/Documentation/features/debug/kprobes/arch-support.txt
@@ -19,7 +19,6 @@
19 | ia64: | ok | 19 | ia64: | ok |
20 | m32r: | TODO | 20 | m32r: | TODO |
21 | m68k: | TODO | 21 | m68k: | TODO |
22 | metag: | TODO |
23 | microblaze: | TODO | 22 | microblaze: | TODO |
24 | mips: | ok | 23 | mips: | ok |
25 | mn10300: | TODO | 24 | mn10300: | TODO |
diff --git a/Documentation/features/debug/kretprobes/arch-support.txt b/Documentation/features/debug/kretprobes/arch-support.txt
index 43353242e439..cb2213bfadc5 100644
--- a/Documentation/features/debug/kretprobes/arch-support.txt
+++ b/Documentation/features/debug/kretprobes/arch-support.txt
@@ -19,7 +19,6 @@
19 | ia64: | ok | 19 | ia64: | ok |
20 | m32r: | TODO | 20 | m32r: | TODO |
21 | m68k: | TODO | 21 | m68k: | TODO |
22 | metag: | TODO |
23 | microblaze: | TODO | 22 | microblaze: | TODO |
24 | mips: | ok | 23 | mips: | ok |
25 | mn10300: | TODO | 24 | mn10300: | TODO |
diff --git a/Documentation/features/debug/optprobes/arch-support.txt b/Documentation/features/debug/optprobes/arch-support.txt
index f559f1ba5416..219aa64ca3f5 100644
--- a/Documentation/features/debug/optprobes/arch-support.txt
+++ b/Documentation/features/debug/optprobes/arch-support.txt
@@ -19,7 +19,6 @@
19 | ia64: | TODO | 19 | ia64: | TODO |
20 | m32r: | TODO | 20 | m32r: | TODO |
21 | m68k: | TODO | 21 | m68k: | TODO |
22 | metag: | TODO |
23 | microblaze: | TODO | 22 | microblaze: | TODO |
24 | mips: | TODO | 23 | mips: | TODO |
25 | mn10300: | TODO | 24 | mn10300: | TODO |
diff --git a/Documentation/features/debug/stackprotector/arch-support.txt b/Documentation/features/debug/stackprotector/arch-support.txt
index 59a4c9ffb7f3..904864c3f18c 100644
--- a/Documentation/features/debug/stackprotector/arch-support.txt
+++ b/Documentation/features/debug/stackprotector/arch-support.txt
@@ -19,7 +19,6 @@
19 | ia64: | TODO | 19 | ia64: | TODO |
20 | m32r: | TODO | 20 | m32r: | TODO |
21 | m68k: | TODO | 21 | m68k: | TODO |
22 | metag: | TODO |
23 | microblaze: | TODO | 22 | microblaze: | TODO |
24 | mips: | ok | 23 | mips: | ok |
25 | mn10300: | TODO | 24 | mn10300: | TODO |
diff --git a/Documentation/features/debug/uprobes/arch-support.txt b/Documentation/features/debug/uprobes/arch-support.txt
index 53ed42b0e7e5..d092f000e6bb 100644
--- a/Documentation/features/debug/uprobes/arch-support.txt
+++ b/Documentation/features/debug/uprobes/arch-support.txt
@@ -19,7 +19,6 @@
19 | ia64: | TODO | 19 | ia64: | TODO |
20 | m32r: | TODO | 20 | m32r: | TODO |
21 | m68k: | TODO | 21 | m68k: | TODO |
22 | metag: | TODO |
23 | microblaze: | TODO | 22 | microblaze: | TODO |
24 | mips: | ok | 23 | mips: | ok |
25 | mn10300: | TODO | 24 | mn10300: | TODO |
diff --git a/Documentation/features/debug/user-ret-profiler/arch-support.txt b/Documentation/features/debug/user-ret-profiler/arch-support.txt
index 149443936de9..9e9e195b6d30 100644
--- a/Documentation/features/debug/user-ret-profiler/arch-support.txt
+++ b/Documentation/features/debug/user-ret-profiler/arch-support.txt
@@ -19,7 +19,6 @@
19 | ia64: | TODO | 19 | ia64: | TODO |
20 | m32r: | TODO | 20 | m32r: | TODO |
21 | m68k: | TODO | 21 | m68k: | TODO |
22 | metag: | TODO |
23 | microblaze: | TODO | 22 | microblaze: | TODO |
24 | mips: | TODO | 23 | mips: | TODO |
25 | mn10300: | TODO | 24 | mn10300: | TODO |
diff --git a/Documentation/features/io/dma-api-debug/arch-support.txt b/Documentation/features/io/dma-api-debug/arch-support.txt
index 6be920643be6..ba9e169859c4 100644
--- a/Documentation/features/io/dma-api-debug/arch-support.txt
+++ b/Documentation/features/io/dma-api-debug/arch-support.txt
@@ -19,7 +19,6 @@
19 | ia64: | ok | 19 | ia64: | ok |
20 | m32r: | TODO | 20 | m32r: | TODO |
21 | m68k: | TODO | 21 | m68k: | TODO |
22 | metag: | TODO |
23 | microblaze: | ok | 22 | microblaze: | ok |
24 | mips: | ok | 23 | mips: | ok |
25 | mn10300: | TODO | 24 | mn10300: | TODO |
diff --git a/Documentation/features/io/dma-contiguous/arch-support.txt b/Documentation/features/io/dma-contiguous/arch-support.txt
index 0eb08e1e32b8..35b501f2c117 100644
--- a/Documentation/features/io/dma-contiguous/arch-support.txt
+++ b/Documentation/features/io/dma-contiguous/arch-support.txt
@@ -19,7 +19,6 @@
19 | ia64: | TODO | 19 | ia64: | TODO |
20 | m32r: | TODO | 20 | m32r: | TODO |
21 | m68k: | TODO | 21 | m68k: | TODO |
22 | metag: | TODO |
23 | microblaze: | TODO | 22 | microblaze: | TODO |
24 | mips: | ok | 23 | mips: | ok |
25 | mn10300: | TODO | 24 | mn10300: | TODO |
diff --git a/Documentation/features/io/sg-chain/arch-support.txt b/Documentation/features/io/sg-chain/arch-support.txt
index 514ad3468aa5..42c078dff18b 100644
--- a/Documentation/features/io/sg-chain/arch-support.txt
+++ b/Documentation/features/io/sg-chain/arch-support.txt
@@ -19,7 +19,6 @@
19 | ia64: | ok | 19 | ia64: | ok |
20 | m32r: | TODO | 20 | m32r: | TODO |
21 | m68k: | TODO | 21 | m68k: | TODO |
22 | metag: | TODO |
23 | microblaze: | TODO | 22 | microblaze: | TODO |
24 | mips: | TODO | 23 | mips: | TODO |
25 | mn10300: | TODO | 24 | mn10300: | TODO |
diff --git a/Documentation/features/lib/strncasecmp/arch-support.txt b/Documentation/features/lib/strncasecmp/arch-support.txt
index 532c6f0fc15c..b10c21f14739 100644
--- a/Documentation/features/lib/strncasecmp/arch-support.txt
+++ b/Documentation/features/lib/strncasecmp/arch-support.txt
@@ -19,7 +19,6 @@
19 | ia64: | TODO | 19 | ia64: | TODO |
20 | m32r: | TODO | 20 | m32r: | TODO |
21 | m68k: | TODO | 21 | m68k: | TODO |
22 | metag: | TODO |
23 | microblaze: | TODO | 22 | microblaze: | TODO |
24 | mips: | TODO | 23 | mips: | TODO |
25 | mn10300: | TODO | 24 | mn10300: | TODO |
diff --git a/Documentation/features/locking/cmpxchg-local/arch-support.txt b/Documentation/features/locking/cmpxchg-local/arch-support.txt
index f3eec26c8cf8..3b87fd37bae8 100644
--- a/Documentation/features/locking/cmpxchg-local/arch-support.txt
+++ b/Documentation/features/locking/cmpxchg-local/arch-support.txt
@@ -19,7 +19,6 @@
19 | ia64: | TODO | 19 | ia64: | TODO |
20 | m32r: | TODO | 20 | m32r: | TODO |
21 | m68k: | TODO | 21 | m68k: | TODO |
22 | metag: | TODO |
23 | microblaze: | TODO | 22 | microblaze: | TODO |
24 | mips: | TODO | 23 | mips: | TODO |
25 | mn10300: | TODO | 24 | mn10300: | TODO |
diff --git a/Documentation/features/locking/lockdep/arch-support.txt b/Documentation/features/locking/lockdep/arch-support.txt
index 9756abc680a7..cefcd720f04e 100644
--- a/Documentation/features/locking/lockdep/arch-support.txt
+++ b/Documentation/features/locking/lockdep/arch-support.txt
@@ -19,7 +19,6 @@
19 | ia64: | TODO | 19 | ia64: | TODO |
20 | m32r: | TODO | 20 | m32r: | TODO |
21 | m68k: | TODO | 21 | m68k: | TODO |
22 | metag: | ok |
23 | microblaze: | ok | 22 | microblaze: | ok |
24 | mips: | ok | 23 | mips: | ok |
25 | mn10300: | TODO | 24 | mn10300: | TODO |
diff --git a/Documentation/features/locking/queued-rwlocks/arch-support.txt b/Documentation/features/locking/queued-rwlocks/arch-support.txt
index 62f4ee5c156c..da6c7e37141c 100644
--- a/Documentation/features/locking/queued-rwlocks/arch-support.txt
+++ b/Documentation/features/locking/queued-rwlocks/arch-support.txt
@@ -19,7 +19,6 @@
19 | ia64: | TODO | 19 | ia64: | TODO |
20 | m32r: | TODO | 20 | m32r: | TODO |
21 | m68k: | TODO | 21 | m68k: | TODO |
22 | metag: | TODO |
23 | microblaze: | TODO | 22 | microblaze: | TODO |
24 | mips: | TODO | 23 | mips: | TODO |
25 | mn10300: | TODO | 24 | mn10300: | TODO |
diff --git a/Documentation/features/locking/queued-spinlocks/arch-support.txt b/Documentation/features/locking/queued-spinlocks/arch-support.txt
index 321b32f6e63c..1e5dbcdd1c76 100644
--- a/Documentation/features/locking/queued-spinlocks/arch-support.txt
+++ b/Documentation/features/locking/queued-spinlocks/arch-support.txt
@@ -19,7 +19,6 @@
19 | ia64: | TODO | 19 | ia64: | TODO |
20 | m32r: | TODO | 20 | m32r: | TODO |
21 | m68k: | TODO | 21 | m68k: | TODO |
22 | metag: | TODO |
23 | microblaze: | TODO | 22 | microblaze: | TODO |
24 | mips: | TODO | 23 | mips: | TODO |
25 | mn10300: | TODO | 24 | mn10300: | TODO |
diff --git a/Documentation/features/locking/rwsem-optimized/arch-support.txt b/Documentation/features/locking/rwsem-optimized/arch-support.txt
index 79bfa4d6e41f..b79e92288112 100644
--- a/Documentation/features/locking/rwsem-optimized/arch-support.txt
+++ b/Documentation/features/locking/rwsem-optimized/arch-support.txt
@@ -19,7 +19,6 @@
19 | ia64: | ok | 19 | ia64: | ok |
20 | m32r: | TODO | 20 | m32r: | TODO |
21 | m68k: | TODO | 21 | m68k: | TODO |
22 | metag: | TODO |
23 | microblaze: | TODO | 22 | microblaze: | TODO |
24 | mips: | TODO | 23 | mips: | TODO |
25 | mn10300: | TODO | 24 | mn10300: | TODO |
diff --git a/Documentation/features/perf/kprobes-event/arch-support.txt b/Documentation/features/perf/kprobes-event/arch-support.txt
index 00f1606bbf45..6418ccc6fc34 100644
--- a/Documentation/features/perf/kprobes-event/arch-support.txt
+++ b/Documentation/features/perf/kprobes-event/arch-support.txt
@@ -19,7 +19,6 @@
19 | ia64: | TODO | 19 | ia64: | TODO |
20 | m32r: | TODO | 20 | m32r: | TODO |
21 | m68k: | TODO | 21 | m68k: | TODO |
22 | metag: | TODO |
23 | microblaze: | TODO | 22 | microblaze: | TODO |
24 | mips: | ok | 23 | mips: | ok |
25 | mn10300: | TODO | 24 | mn10300: | TODO |
diff --git a/Documentation/features/perf/perf-regs/arch-support.txt b/Documentation/features/perf/perf-regs/arch-support.txt
index 7d516eacf7b9..3b3392ac6466 100644
--- a/Documentation/features/perf/perf-regs/arch-support.txt
+++ b/Documentation/features/perf/perf-regs/arch-support.txt
@@ -19,7 +19,6 @@
19 | ia64: | TODO | 19 | ia64: | TODO |
20 | m32r: | TODO | 20 | m32r: | TODO |
21 | m68k: | TODO | 21 | m68k: | TODO |
22 | metag: | TODO |
23 | microblaze: | TODO | 22 | microblaze: | TODO |
24 | mips: | TODO | 23 | mips: | TODO |
25 | mn10300: | TODO | 24 | mn10300: | TODO |
diff --git a/Documentation/features/perf/perf-stackdump/arch-support.txt b/Documentation/features/perf/perf-stackdump/arch-support.txt
index f974b8df5d82..4594cb28fbc8 100644
--- a/Documentation/features/perf/perf-stackdump/arch-support.txt
+++ b/Documentation/features/perf/perf-stackdump/arch-support.txt
@@ -19,7 +19,6 @@
19 | ia64: | TODO | 19 | ia64: | TODO |
20 | m32r: | TODO | 20 | m32r: | TODO |
21 | m68k: | TODO | 21 | m68k: | TODO |
22 | metag: | TODO |
23 | microblaze: | TODO | 22 | microblaze: | TODO |
24 | mips: | TODO | 23 | mips: | TODO |
25 | mn10300: | TODO | 24 | mn10300: | TODO |
diff --git a/Documentation/features/sched/membarrier-sync-core/arch-support.txt b/Documentation/features/sched/membarrier-sync-core/arch-support.txt
index 2c815a7f1ba7..42eaab4d439d 100644
--- a/Documentation/features/sched/membarrier-sync-core/arch-support.txt
+++ b/Documentation/features/sched/membarrier-sync-core/arch-support.txt
@@ -42,7 +42,6 @@
42 | ia64: | TODO | 42 | ia64: | TODO |
43 | m32r: | TODO | 43 | m32r: | TODO |
44 | m68k: | TODO | 44 | m68k: | TODO |
45 | metag: | TODO |
46 | microblaze: | TODO | 45 | microblaze: | TODO |
47 | mips: | TODO | 46 | mips: | TODO |
48 | mn10300: | TODO | 47 | mn10300: | TODO |
diff --git a/Documentation/features/sched/numa-balancing/arch-support.txt b/Documentation/features/sched/numa-balancing/arch-support.txt
index 1d3c0f669152..4e67833aae66 100644
--- a/Documentation/features/sched/numa-balancing/arch-support.txt
+++ b/Documentation/features/sched/numa-balancing/arch-support.txt
@@ -19,7 +19,6 @@
19 | ia64: | TODO | 19 | ia64: | TODO |
20 | m32r: | .. | 20 | m32r: | .. |
21 | m68k: | .. | 21 | m68k: | .. |
22 | metag: | .. |
23 | microblaze: | .. | 22 | microblaze: | .. |
24 | mips: | TODO | 23 | mips: | TODO |
25 | mn10300: | .. | 24 | mn10300: | .. |
diff --git a/Documentation/features/seccomp/seccomp-filter/arch-support.txt b/Documentation/features/seccomp/seccomp-filter/arch-support.txt
index a32d5b207679..c5d8b397a693 100644
--- a/Documentation/features/seccomp/seccomp-filter/arch-support.txt
+++ b/Documentation/features/seccomp/seccomp-filter/arch-support.txt
@@ -19,7 +19,6 @@
19 | ia64: | TODO | 19 | ia64: | TODO |
20 | m32r: | TODO | 20 | m32r: | TODO |
21 | m68k: | TODO | 21 | m68k: | TODO |
22 | metag: | TODO |
23 | microblaze: | TODO | 22 | microblaze: | TODO |
24 | mips: | ok | 23 | mips: | ok |
25 | mn10300: | TODO | 24 | mn10300: | TODO |
diff --git a/Documentation/features/time/arch-tick-broadcast/arch-support.txt b/Documentation/features/time/arch-tick-broadcast/arch-support.txt
index caee8f64d1bc..9e4999136881 100644
--- a/Documentation/features/time/arch-tick-broadcast/arch-support.txt
+++ b/Documentation/features/time/arch-tick-broadcast/arch-support.txt
@@ -19,7 +19,6 @@
19 | ia64: | TODO | 19 | ia64: | TODO |
20 | m32r: | TODO | 20 | m32r: | TODO |
21 | m68k: | TODO | 21 | m68k: | TODO |
22 | metag: | TODO |
23 | microblaze: | TODO | 22 | microblaze: | TODO |
24 | mips: | ok | 23 | mips: | ok |
25 | mn10300: | TODO | 24 | mn10300: | TODO |
diff --git a/Documentation/features/time/clockevents/arch-support.txt b/Documentation/features/time/clockevents/arch-support.txt
index 1cd87f6cd07d..f90cb64c640b 100644
--- a/Documentation/features/time/clockevents/arch-support.txt
+++ b/Documentation/features/time/clockevents/arch-support.txt
@@ -19,7 +19,6 @@
19 | ia64: | TODO | 19 | ia64: | TODO |
20 | m32r: | TODO | 20 | m32r: | TODO |
21 | m68k: | ok | 21 | m68k: | ok |
22 | metag: | ok |
23 | microblaze: | ok | 22 | microblaze: | ok |
24 | mips: | ok | 23 | mips: | ok |
25 | mn10300: | ok | 24 | mn10300: | ok |
diff --git a/Documentation/features/time/context-tracking/arch-support.txt b/Documentation/features/time/context-tracking/arch-support.txt
index e6d7c7b2253c..eb4e5d32a2e9 100644
--- a/Documentation/features/time/context-tracking/arch-support.txt
+++ b/Documentation/features/time/context-tracking/arch-support.txt
@@ -19,7 +19,6 @@
19 | ia64: | TODO | 19 | ia64: | TODO |
20 | m32r: | TODO | 20 | m32r: | TODO |
21 | m68k: | TODO | 21 | m68k: | TODO |
22 | metag: | TODO |
23 | microblaze: | TODO | 22 | microblaze: | TODO |
24 | mips: | ok | 23 | mips: | ok |
25 | mn10300: | TODO | 24 | mn10300: | TODO |
diff --git a/Documentation/features/time/irq-time-acct/arch-support.txt b/Documentation/features/time/irq-time-acct/arch-support.txt
index 15c6071788ae..02b7441f360f 100644
--- a/Documentation/features/time/irq-time-acct/arch-support.txt
+++ b/Documentation/features/time/irq-time-acct/arch-support.txt
@@ -19,7 +19,6 @@
19 | ia64: | .. | 19 | ia64: | .. |
20 | m32r: | TODO | 20 | m32r: | TODO |
21 | m68k: | TODO | 21 | m68k: | TODO |
22 | metag: | TODO |
23 | microblaze: | TODO | 22 | microblaze: | TODO |
24 | mips: | ok | 23 | mips: | ok |
25 | mn10300: | TODO | 24 | mn10300: | TODO |
diff --git a/Documentation/features/time/modern-timekeeping/arch-support.txt b/Documentation/features/time/modern-timekeeping/arch-support.txt
index baee7611ba3d..b3eb6fe6bc27 100644
--- a/Documentation/features/time/modern-timekeeping/arch-support.txt
+++ b/Documentation/features/time/modern-timekeeping/arch-support.txt
@@ -19,7 +19,6 @@
19 | ia64: | ok | 19 | ia64: | ok |
20 | m32r: | TODO | 20 | m32r: | TODO |
21 | m68k: | TODO | 21 | m68k: | TODO |
22 | metag: | ok |
23 | microblaze: | ok | 22 | microblaze: | ok |
24 | mips: | ok | 23 | mips: | ok |
25 | mn10300: | ok | 24 | mn10300: | ok |
diff --git a/Documentation/features/time/virt-cpuacct/arch-support.txt b/Documentation/features/time/virt-cpuacct/arch-support.txt
index 9129530cb73c..a1bd77fd723a 100644
--- a/Documentation/features/time/virt-cpuacct/arch-support.txt
+++ b/Documentation/features/time/virt-cpuacct/arch-support.txt
@@ -19,7 +19,6 @@
19 | ia64: | ok | 19 | ia64: | ok |
20 | m32r: | TODO | 20 | m32r: | TODO |
21 | m68k: | TODO | 21 | m68k: | TODO |
22 | metag: | TODO |
23 | microblaze: | TODO | 22 | microblaze: | TODO |
24 | mips: | ok | 23 | mips: | ok |
25 | mn10300: | TODO | 24 | mn10300: | TODO |
diff --git a/Documentation/features/vm/ELF-ASLR/arch-support.txt b/Documentation/features/vm/ELF-ASLR/arch-support.txt
index f6829af3255f..3f926177833c 100644
--- a/Documentation/features/vm/ELF-ASLR/arch-support.txt
+++ b/Documentation/features/vm/ELF-ASLR/arch-support.txt
@@ -19,7 +19,6 @@
19 | ia64: | TODO | 19 | ia64: | TODO |
20 | m32r: | TODO | 20 | m32r: | TODO |
21 | m68k: | TODO | 21 | m68k: | TODO |
22 | metag: | TODO |
23 | microblaze: | TODO | 22 | microblaze: | TODO |
24 | mips: | ok | 23 | mips: | ok |
25 | mn10300: | TODO | 24 | mn10300: | TODO |
diff --git a/Documentation/features/vm/PG_uncached/arch-support.txt b/Documentation/features/vm/PG_uncached/arch-support.txt
index 1a09ea99d486..4c8f65d525d7 100644
--- a/Documentation/features/vm/PG_uncached/arch-support.txt
+++ b/Documentation/features/vm/PG_uncached/arch-support.txt
@@ -19,7 +19,6 @@
19 | ia64: | ok | 19 | ia64: | ok |
20 | m32r: | TODO | 20 | m32r: | TODO |
21 | m68k: | TODO | 21 | m68k: | TODO |
22 | metag: | TODO |
23 | microblaze: | TODO | 22 | microblaze: | TODO |
24 | mips: | TODO | 23 | mips: | TODO |
25 | mn10300: | TODO | 24 | mn10300: | TODO |
diff --git a/Documentation/features/vm/THP/arch-support.txt b/Documentation/features/vm/THP/arch-support.txt
index d170e6236503..d121dc2e3e5e 100644
--- a/Documentation/features/vm/THP/arch-support.txt
+++ b/Documentation/features/vm/THP/arch-support.txt
@@ -19,7 +19,6 @@
19 | ia64: | TODO | 19 | ia64: | TODO |
20 | m32r: | .. | 20 | m32r: | .. |
21 | m68k: | .. | 21 | m68k: | .. |
22 | metag: | TODO |
23 | microblaze: | .. | 22 | microblaze: | .. |
24 | mips: | ok | 23 | mips: | ok |
25 | mn10300: | .. | 24 | mn10300: | .. |
diff --git a/Documentation/features/vm/TLB/arch-support.txt b/Documentation/features/vm/TLB/arch-support.txt
index abfab4080a91..af233d2d82cf 100644
--- a/Documentation/features/vm/TLB/arch-support.txt
+++ b/Documentation/features/vm/TLB/arch-support.txt
@@ -19,7 +19,6 @@
19 | ia64: | TODO | 19 | ia64: | TODO |
20 | m32r: | TODO | 20 | m32r: | TODO |
21 | m68k: | .. | 21 | m68k: | .. |
22 | metag: | TODO |
23 | microblaze: | .. | 22 | microblaze: | .. |
24 | mips: | TODO | 23 | mips: | TODO |
25 | mn10300: | TODO | 24 | mn10300: | TODO |
diff --git a/Documentation/features/vm/huge-vmap/arch-support.txt b/Documentation/features/vm/huge-vmap/arch-support.txt
index f81f09b22b08..45c74fbe6805 100644
--- a/Documentation/features/vm/huge-vmap/arch-support.txt
+++ b/Documentation/features/vm/huge-vmap/arch-support.txt
@@ -19,7 +19,6 @@
19 | ia64: | TODO | 19 | ia64: | TODO |
20 | m32r: | TODO | 20 | m32r: | TODO |
21 | m68k: | TODO | 21 | m68k: | TODO |
22 | metag: | TODO |
23 | microblaze: | TODO | 22 | microblaze: | TODO |
24 | mips: | TODO | 23 | mips: | TODO |
25 | mn10300: | TODO | 24 | mn10300: | TODO |
diff --git a/Documentation/features/vm/ioremap_prot/arch-support.txt b/Documentation/features/vm/ioremap_prot/arch-support.txt
index 0cc3e11c42e2..6cd436af0cc8 100644
--- a/Documentation/features/vm/ioremap_prot/arch-support.txt
+++ b/Documentation/features/vm/ioremap_prot/arch-support.txt
@@ -19,7 +19,6 @@
19 | ia64: | TODO | 19 | ia64: | TODO |
20 | m32r: | TODO | 20 | m32r: | TODO |
21 | m68k: | TODO | 21 | m68k: | TODO |
22 | metag: | TODO |
23 | microblaze: | TODO | 22 | microblaze: | TODO |
24 | mips: | TODO | 23 | mips: | TODO |
25 | mn10300: | TODO | 24 | mn10300: | TODO |
diff --git a/Documentation/features/vm/numa-memblock/arch-support.txt b/Documentation/features/vm/numa-memblock/arch-support.txt
index 9a3fdac42ce1..2db895856da6 100644
--- a/Documentation/features/vm/numa-memblock/arch-support.txt
+++ b/Documentation/features/vm/numa-memblock/arch-support.txt
@@ -19,7 +19,6 @@
19 | ia64: | ok | 19 | ia64: | ok |
20 | m32r: | TODO | 20 | m32r: | TODO |
21 | m68k: | .. | 21 | m68k: | .. |
22 | metag: | ok |
23 | microblaze: | ok | 22 | microblaze: | ok |
24 | mips: | ok | 23 | mips: | ok |
25 | mn10300: | TODO | 24 | mn10300: | TODO |
diff --git a/Documentation/features/vm/pte_special/arch-support.txt b/Documentation/features/vm/pte_special/arch-support.txt
index dfaa39e664ff..ccb15b6da42f 100644
--- a/Documentation/features/vm/pte_special/arch-support.txt
+++ b/Documentation/features/vm/pte_special/arch-support.txt
@@ -19,7 +19,6 @@
19 | ia64: | TODO | 19 | ia64: | TODO |
20 | m32r: | TODO | 20 | m32r: | TODO |
21 | m68k: | TODO | 21 | m68k: | TODO |
22 | metag: | TODO |
23 | microblaze: | TODO | 22 | microblaze: | TODO |
24 | mips: | TODO | 23 | mips: | TODO |
25 | mn10300: | TODO | 24 | mn10300: | TODO |
diff --git a/Documentation/metag/00-INDEX b/Documentation/metag/00-INDEX
deleted file mode 100644
index db11c513bd5c..000000000000
--- a/Documentation/metag/00-INDEX
+++ /dev/null
@@ -1,4 +0,0 @@
100-INDEX
2 - this file
3kernel-ABI.txt
4 - Documents metag ABI details
diff --git a/Documentation/metag/kernel-ABI.txt b/Documentation/metag/kernel-ABI.txt
deleted file mode 100644
index 628216603198..000000000000
--- a/Documentation/metag/kernel-ABI.txt
+++ /dev/null
@@ -1,256 +0,0 @@
1 ==========================
2 KERNEL ABIS FOR METAG ARCH
3 ==========================
4
5This document describes the Linux ABIs for the metag architecture, and has the
6following sections:
7
8 (*) Outline of registers
9 (*) Userland registers
10 (*) Kernel registers
11 (*) System call ABI
12 (*) Calling conventions
13
14
15====================
16OUTLINE OF REGISTERS
17====================
18
19The main Meta core registers are arranged in units:
20
21 UNIT Type DESCRIPTION GP EXT PRIV GLOBAL
22 ======= ======= =============== ======= ======= ======= =======
23 CT Special Control unit
24 D0 General Data unit 0 0-7 8-15 16-31 16-31
25 D1 General Data unit 1 0-7 8-15 16-31 16-31
26 A0 General Address unit 0 0-3 4-7 8-15 8-15
27 A1 General Address unit 1 0-3 4-7 8-15 8-15
28 PC Special PC unit 0 1
29 PORT Special Ports
30 TR Special Trigger unit 0-7
31 TT Special Trace unit 0-5
32 FX General FP unit 0-15
33
34GP registers form part of the main context.
35
36Extended context registers (EXT) may not be present on all hardware threads and
37can be context switched if support is enabled and the appropriate bits are set
38in e.g. the D0.8 register to indicate what extended state to preserve.
39
40Global registers are shared between threads and are privilege protected.
41
42See arch/metag/include/asm/metag_regs.h for definitions relating to core
43registers and the fields and bits they contain. See the TRMs for further details
44about special registers.
45
46Several special registers are preserved in the main context, these are the
47interesting ones:
48
49 REG (ALIAS) PURPOSE
50 ======================= ===============================================
51 CT.1 (TXMODE) Processor mode bits (particularly for DSP)
52 CT.2 (TXSTATUS) Condition flags and LSM_STEP (MGET/MSET step)
53 CT.3 (TXRPT) Branch repeat counter
54 PC.0 (PC) Program counter
55
56Some of the general registers have special purposes in the ABI and therefore
57have aliases:
58
59 D0 REG (ALIAS) PURPOSE D1 REG (ALIAS) PURPOSE
60 =============== =============== =============== =======================
61 D0.0 (D0Re0) 32bit result D1.0 (D1Re0) Top half of 64bit result
62 D0.1 (D0Ar6) Argument 6 D1.1 (D1Ar5) Argument 5
63 D0.2 (D0Ar4) Argument 4 D1.2 (D1Ar3) Argument 3
64 D0.3 (D0Ar2) Argument 2 D1.3 (D1Ar1) Argument 1
65 D0.4 (D0FrT) Frame temp D1.4 (D1RtP) Return pointer
66 D0.5 Call preserved D1.5 Call preserved
67 D0.6 Call preserved D1.6 Call preserved
68 D0.7 Call preserved D1.7 Call preserved
69
70 A0 REG (ALIAS) PURPOSE A1 REG (ALIAS) PURPOSE
71 =============== =============== =============== =======================
72 A0.0 (A0StP) Stack pointer A1.0 (A1GbP) Global base pointer
73 A0.1 (A0FrP) Frame pointer A1.1 (A1LbP) Local base pointer
74 A0.2 A1.2
75 A0.3 A1.3
76
77
78==================
79USERLAND REGISTERS
80==================
81
82All the general purpose D0, D1, A0, A1 registers are preserved when entering the
83kernel (including asynchronous events such as interrupts and timer ticks) except
84the following which have special purposes in the ABI:
85
86 REGISTERS WHEN STATUS PURPOSE
87 =============== ======= =============== ===============================
88 D0.8 DSP Preserved ECH, determines what extended
89 DSP state to preserve.
90 A0.0 (A0StP) ALWAYS Preserved Stack >= A0StP may be clobbered
91 at any time by the creation of a
92 signal frame.
93 A1.0 (A1GbP) SMP Clobbered Used as temporary for loading
94 kernel stack pointer and saving
95 core context.
96 A0.15 !SMP Protected Stores kernel stack pointer.
97 A1.15 ALWAYS Protected Stores kernel base pointer.
98
99On UP A0.15 is used to store the kernel stack pointer for storing the userland
100context. A0.15 is global between hardware threads though which means it cannot
101be used on SMP for this purpose. Since no protected local registers are
102available A1GbP is reserved for use as a temporary to allow a percpu stack
103pointer to be loaded for storing the rest of the context.
104
105
106================
107KERNEL REGISTERS
108================
109
110When in the kernel the following registers have special purposes in the ABI:
111
112 REGISTERS WHEN STATUS PURPOSE
113 =============== ======= =============== ===============================
114 A0.0 (A0StP) ALWAYS Preserved Stack >= A0StP may be clobbered
115 at any time by the creation of
116 an irq signal frame.
117 A1.0 (A1GbP) ALWAYS Preserved Reserved (kernel base pointer).
118
119
120===============
121SYSTEM CALL ABI
122===============
123
124When a system call is made, the following registers are effective:
125
126 REGISTERS CALL RETURN
127 =============== ======================= ===============================
128 D0.0 (D0Re0) Return value (or -errno)
129 D1.0 (D1Re0) System call number Clobbered
130 D0.1 (D0Ar6) Syscall arg #6 Preserved
131 D1.1 (D1Ar5) Syscall arg #5 Preserved
132 D0.2 (D0Ar4) Syscall arg #4 Preserved
133 D1.2 (D1Ar3) Syscall arg #3 Preserved
134 D0.3 (D0Ar2) Syscall arg #2 Preserved
135 D1.3 (D1Ar1) Syscall arg #1 Preserved
136
137Due to the limited number of argument registers and some system calls with badly
138aligned 64-bit arguments, 64-bit values are always packed in consecutive
139arguments, even if this is contrary to the normal calling conventions (where the
140two halves would go in a matching pair of data registers).
141
142For example fadvise64_64 usually has the signature:
143
144 long sys_fadvise64_64(i32 fd, i64 offs, i64 len, i32 advice);
145
146But for metag fadvise64_64 is wrapped so that the 64-bit arguments are packed:
147
148 long sys_fadvise64_64_metag(i32 fd, i32 offs_lo,
149 i32 offs_hi, i32 len_lo,
150 i32 len_hi, i32 advice)
151
152So the arguments are packed in the registers like this:
153
154 D0 REG (ALIAS) VALUE D1 REG (ALIAS) VALUE
155 =============== =============== =============== =======================
156 D0.1 (D0Ar6) advice D1.1 (D1Ar5) hi(len)
157 D0.2 (D0Ar4) lo(len) D1.2 (D1Ar3) hi(offs)
158 D0.3 (D0Ar2) lo(offs) D1.3 (D1Ar1) fd
159
160
161===================
162CALLING CONVENTIONS
163===================
164
165These calling conventions apply to both user and kernel code. The stack grows
166from low addresses to high addresses in the metag ABI. The stack pointer (A0StP)
167should always point to the next free address on the stack and should at all
168times be 64-bit aligned. The following registers are effective at the point of a
169call:
170
171 REGISTERS CALL RETURN
172 =============== ======================= ===============================
173 D0.0 (D0Re0) 32bit return value
174 D1.0 (D1Re0) Upper half of 64bit return value
175 D0.1 (D0Ar6) 32bit argument #6 Clobbered
176 D1.1 (D1Ar5) 32bit argument #5 Clobbered
177 D0.2 (D0Ar4) 32bit argument #4 Clobbered
178 D1.2 (D1Ar3) 32bit argument #3 Clobbered
179 D0.3 (D0Ar2) 32bit argument #2 Clobbered
180 D1.3 (D1Ar1) 32bit argument #1 Clobbered
181 D0.4 (D0FrT) Clobbered
182 D1.4 (D1RtP) Return pointer Clobbered
183 D{0-1}.{5-7} Preserved
184 A0.0 (A0StP) Stack pointer Preserved
185 A1.0 (A0GbP) Preserved
186 A0.1 (A0FrP) Frame pointer Preserved
187 A1.1 (A0LbP) Preserved
188 A{0-1},{2-3} Clobbered
189
19064-bit arguments are placed in matching pairs of registers (i.e. the same
191register number in both D0 and D1 units), with the least significant half in D0
192and the most significant half in D1, leaving a gap where necessary. Further
193arguments are stored on the stack in reverse order (earlier arguments at higher
194addresses):
195
196 ADDRESS 0 1 2 3 4 5 6 7
197 =============== ===== ===== ===== ===== ===== ===== ===== =====
198 A0StP -->
199 A0StP-0x08 32bit argument #8 32bit argument #7
200 A0StP-0x10 32bit argument #10 32bit argument #9
201
202Function prologues tend to look a bit like this:
203
204 /* If frame pointer in use, move it to frame temp register so it can be
205 easily pushed onto stack */
206 MOV D0FrT,A0FrP
207
208 /* If frame pointer in use, set it to stack pointer */
209 ADD A0FrP,A0StP,#0
210
211 /* Preserve D0FrT, D1RtP, D{0-1}.{5-7} on stack, incrementing A0StP */
212 MSETL [A0StP++],D0FrT,D0.5,D0.6,D0.7
213
214 /* Allocate some stack space for local variables */
215 ADD A0StP,A0StP,#0x10
216
217At this point the stack would look like this:
218
219 ADDRESS 0 1 2 3 4 5 6 7
220 =============== ===== ===== ===== ===== ===== ===== ===== =====
221 A0StP -->
222 A0StP-0x08
223 A0StP-0x10
224 A0StP-0x18 Old D0.7 Old D1.7
225 A0StP-0x20 Old D0.6 Old D1.6
226 A0StP-0x28 Old D0.5 Old D1.5
227 A0FrP --> Old A0FrP (frame ptr) Old D1RtP (return ptr)
228 A0FrP-0x08 32bit argument #8 32bit argument #7
229 A0FrP-0x10 32bit argument #10 32bit argument #9
230
231Function epilogues tend to differ depending on the use of a frame pointer. An
232example of a frame pointer epilogue:
233
234 /* Restore D0FrT, D1RtP, D{0-1}.{5-7} from stack, incrementing A0FrP */
235 MGETL D0FrT,D0.5,D0.6,D0.7,[A0FrP++]
236 /* Restore stack pointer to where frame pointer was before increment */
237 SUB A0StP,A0FrP,#0x20
238 /* Restore frame pointer from frame temp */
239 MOV A0FrP,D0FrT
240 /* Return to caller via restored return pointer */
241 MOV PC,D1RtP
242
243If the function hasn't touched the frame pointer, MGETL cannot be safely used
244with A0StP as it always increments and that would expose the stack to clobbering
245by interrupts (kernel) or signals (user). Therefore it's common to see the MGETL
246split into separate GETL instructions:
247
248 /* Restore D0FrT, D1RtP, D{0-1}.{5-7} from stack */
249 GETL D0FrT,D1RtP,[A0StP+#-0x30]
250 GETL D0.5,D1.5,[A0StP+#-0x28]
251 GETL D0.6,D1.6,[A0StP+#-0x20]
252 GETL D0.7,D1.7,[A0StP+#-0x18]
253 /* Restore stack pointer */
254 SUB A0StP,A0StP,#0x30
255 /* Return to caller via restored return pointer */
256 MOV PC,D1RtP
diff --git a/MAINTAINERS b/MAINTAINERS
index 4623caf8d72d..313754bf39e1 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -9084,20 +9084,6 @@ F: drivers/media/platform/meson/ao-cec.c
9084F: Documentation/devicetree/bindings/media/meson-ao-cec.txt 9084F: Documentation/devicetree/bindings/media/meson-ao-cec.txt
9085T: git git://linuxtv.org/media_tree.git 9085T: git git://linuxtv.org/media_tree.git
9086 9086
9087METAG ARCHITECTURE
9088M: James Hogan <jhogan@kernel.org>
9089L: linux-metag@vger.kernel.org
9090T: git git://git.kernel.org/pub/scm/linux/kernel/git/jhogan/metag.git
9091S: Odd Fixes
9092F: arch/metag/
9093F: Documentation/metag/
9094F: Documentation/devicetree/bindings/metag/
9095F: Documentation/devicetree/bindings/interrupt-controller/img,*
9096F: drivers/clocksource/metag_generic.c
9097F: drivers/irqchip/irq-metag.c
9098F: drivers/irqchip/irq-metag-ext.c
9099F: drivers/tty/metag_da.c
9100
9101MICROBLAZE ARCHITECTURE 9087MICROBLAZE ARCHITECTURE
9102M: Michal Simek <monstr@monstr.eu> 9088M: Michal Simek <monstr@monstr.eu>
9103W: http://www.monstr.eu/fdt/ 9089W: http://www.monstr.eu/fdt/
diff --git a/arch/metag/Kconfig b/arch/metag/Kconfig
deleted file mode 100644
index c7b62a339539..000000000000
--- a/arch/metag/Kconfig
+++ /dev/null
@@ -1,287 +0,0 @@
1# SPDX-License-Identifier: GPL-2.0
2config METAG
3 def_bool y
4 select EMBEDDED
5 select GENERIC_ATOMIC64
6 select GENERIC_CLOCKEVENTS
7 select GENERIC_IRQ_SHOW
8 select GENERIC_SMP_IDLE_THREAD
9 select HAVE_64BIT_ALIGNED_ACCESS
10 select HAVE_ARCH_TRACEHOOK
11 select HAVE_C_RECORDMCOUNT
12 select HAVE_DEBUG_KMEMLEAK
13 select HAVE_DEBUG_STACKOVERFLOW
14 select HAVE_DYNAMIC_FTRACE
15 select HAVE_EXIT_THREAD
16 select HAVE_FTRACE_MCOUNT_RECORD
17 select HAVE_FUNCTION_TRACER
18 select HAVE_KERNEL_BZIP2
19 select HAVE_KERNEL_GZIP
20 select HAVE_KERNEL_LZO
21 select HAVE_KERNEL_XZ
22 select HAVE_MEMBLOCK
23 select HAVE_MEMBLOCK_NODE_MAP
24 select HAVE_MOD_ARCH_SPECIFIC
25 select HAVE_OPROFILE
26 select HAVE_PERF_EVENTS
27 select HAVE_SYSCALL_TRACEPOINTS
28 select HAVE_UNDERSCORE_SYMBOL_PREFIX
29 select IRQ_DOMAIN
30 select GENERIC_IRQ_EFFECTIVE_AFF_MASK
31 select MODULES_USE_ELF_RELA
32 select OF
33 select OF_EARLY_FLATTREE
34 select SPARSE_IRQ
35 select CPU_NO_EFFICIENT_FFS
36
37config STACKTRACE_SUPPORT
38 def_bool y
39
40config LOCKDEP_SUPPORT
41 def_bool y
42
43config RWSEM_GENERIC_SPINLOCK
44 def_bool y
45
46config RWSEM_XCHGADD_ALGORITHM
47 bool
48
49config GENERIC_HWEIGHT
50 def_bool y
51
52config GENERIC_CALIBRATE_DELAY
53 def_bool y
54
55config NO_IOPORT_MAP
56 def_bool y
57
58source "init/Kconfig"
59
60source "kernel/Kconfig.freezer"
61
62menu "Processor type and features"
63
64config MMU
65 def_bool y
66
67config STACK_GROWSUP
68 def_bool y
69
70config HOTPLUG_CPU
71 bool "Enable CPU hotplug support"
72 depends on SMP
73 help
74 Say Y here to allow turning CPUs off and on. CPUs can be
75 controlled through /sys/devices/system/cpu.
76
77 Say N if you want to disable CPU hotplug.
78
79config HIGHMEM
80 bool "High Memory Support"
81 help
82 The address space of Meta processors is only 4 Gigabytes large
83 and it has to accommodate user address space, kernel address
84 space as well as some memory mapped IO. That means that, if you
85 have a large amount of physical memory and/or IO, not all of the
86 memory can be "permanently mapped" by the kernel. The physical
87 memory that is not permanently mapped is called "high memory".
88
89 Depending on the selected kernel/user memory split, minimum
90 vmalloc space and actual amount of RAM, you may not need this
91 option which should result in a slightly faster kernel.
92
93 If unsure, say n.
94
95source "arch/metag/mm/Kconfig"
96
97source "arch/metag/Kconfig.soc"
98
99config METAG_META12
100 bool
101 help
102 Select this from the SoC config symbol to indicate that it contains a
103 Meta 1.2 core.
104
105config METAG_META21
106 bool
107 help
108 Select this from the SoC config symbol to indicate that it contains a
109 Meta 2.1 core.
110
111config SMP
112 bool "Symmetric multi-processing support"
113 depends on METAG_META21 && METAG_META21_MMU
114 help
115 This enables support for systems with more than one thread running
116 Linux. If you have a system with only one thread running Linux,
117 say N. Otherwise, say Y.
118
119config NR_CPUS
120 int "Maximum number of CPUs (2-4)" if SMP
121 range 2 4 if SMP
122 default "1" if !SMP
123 default "4" if SMP
124
125config METAG_SMP_WRITE_REORDERING
126 bool
127 help
128 This attempts to prevent cache-memory incoherence due to external
129 reordering of writes from different hardware threads when SMP is
130 enabled. It adds fences (system event 0) to smp_mb and smp_rmb in an
131 attempt to catch some of the cases, and also before writes to shared
132 memory in LOCK1 protected atomics and spinlocks.
133 This will not completely prevent cache incoherency on affected cores.
134
135config METAG_LNKGET_AROUND_CACHE
136 bool
137 depends on METAG_META21
138 help
139 This indicates that the LNKGET/LNKSET instructions go around the
140 cache, which requires some extra cache flushes when the memory needs
141 to be accessed by normal GET/SET instructions too.
142
143choice
144 prompt "Atomicity primitive"
145 default METAG_ATOMICITY_LNKGET
146 help
147 This option selects the mechanism for performing atomic operations.
148
149config METAG_ATOMICITY_IRQSOFF
150 depends on !SMP
151 bool "irqsoff"
152 help
153 This option disables interrupts to achieve atomicity. This mechanism
154 is not SMP-safe.
155
156config METAG_ATOMICITY_LNKGET
157 depends on METAG_META21
158 bool "lnkget/lnkset"
159 help
160 This option uses the LNKGET and LNKSET instructions to achieve
161 atomicity. LNKGET/LNKSET are load-link/store-conditional instructions.
162 Choose this option if your system requires low latency.
163
164config METAG_ATOMICITY_LOCK1
165 depends on SMP
166 bool "lock1"
167 help
168 This option uses the LOCK1 instruction for atomicity. This is mainly
169 provided as a debugging aid if the lnkget/lnkset atomicity primitive
170 isn't working properly.
171
172endchoice
173
174config METAG_FPU
175 bool "FPU Support"
176 depends on METAG_META21
177 default y
178 help
179 This option allows processes to use FPU hardware available with this
180 CPU. If this option is not enabled FPU registers will not be saved
181 and restored on context-switch.
182
183 If you plan on running programs which are compiled to use hard floats
184 say Y here.
185
186config METAG_DSP
187 bool "DSP Support"
188 help
189 This option allows processes to use DSP hardware available
190 with this CPU. If this option is not enabled DSP registers
191 will not be saved and restored on context-switch.
192
193 If you plan on running DSP programs say Y here.
194
195config METAG_PERFCOUNTER_IRQS
196 bool "PerfCounters interrupt support"
197 depends on METAG_META21
198 help
199 This option enables using interrupts to collect information from
200 Performance Counters. This option is supported in new META21
201 (starting from HTP265).
202
203 When disabled, Performance Counters information will be collected
204 based on Timer Interrupt.
205
206config HW_PERF_EVENTS
207 def_bool METAG_PERFCOUNTER_IRQS && PERF_EVENTS
208
209config METAG_DA
210 bool "DA support"
211 help
212 Say Y if you plan to use a DA debug adapter with Linux. The presence
213 of the DA will be detected automatically at boot, so it is safe to say
214 Y to this option even when booting without a DA.
215
216 This enables support for services provided by DA JTAG debug adapters,
217 such as:
218 - communication over DA channels (such as the console driver).
219 - use of the DA filesystem.
220
221menu "Boot options"
222
223config METAG_BUILTIN_DTB
224 bool "Embed DTB in kernel image"
225 default y
226 help
227 Embeds a device tree binary in the kernel image.
228
229config METAG_BUILTIN_DTB_NAME
230 string "Built in DTB"
231 depends on METAG_BUILTIN_DTB
232 help
233 Set the name of the DTB to embed (leave blank to pick one
234 automatically based on kernel configuration).
235
236config CMDLINE_BOOL
237 bool "Default bootloader kernel arguments"
238
239config CMDLINE
240 string "Kernel command line"
241 depends on CMDLINE_BOOL
242 help
243 On some architectures there is currently no way for the boot loader
244 to pass arguments to the kernel. For these architectures, you should
245 supply some command-line options at build time by entering them
246 here.
247
248config CMDLINE_FORCE
249 bool "Force default kernel command string"
250 depends on CMDLINE_BOOL
251 help
252 Set this to have arguments from the default kernel command string
253 override those passed by the boot loader.
254
255endmenu
256
257source "kernel/Kconfig.preempt"
258
259source kernel/Kconfig.hz
260
261endmenu
262
263menu "Power management options"
264
265source kernel/power/Kconfig
266
267endmenu
268
269menu "Executable file formats"
270
271source "fs/Kconfig.binfmt"
272
273endmenu
274
275source "net/Kconfig"
276
277source "drivers/Kconfig"
278
279source "fs/Kconfig"
280
281source "arch/metag/Kconfig.debug"
282
283source "security/Kconfig"
284
285source "crypto/Kconfig"
286
287source "lib/Kconfig"
diff --git a/arch/metag/Kconfig.debug b/arch/metag/Kconfig.debug
deleted file mode 100644
index ac4516c605db..000000000000
--- a/arch/metag/Kconfig.debug
+++ /dev/null
@@ -1,34 +0,0 @@
1# SPDX-License-Identifier: GPL-2.0
2menu "Kernel hacking"
3
4config TRACE_IRQFLAGS_SUPPORT
5 bool
6 default y
7
8source "lib/Kconfig.debug"
9
10config 4KSTACKS
11 bool "Use 4Kb for kernel stacks instead of 8Kb"
12 depends on DEBUG_KERNEL
13 help
14 If you say Y here the kernel will use a 4Kb stacksize for the
15 kernel stack attached to each process/thread. This facilitates
16 running more threads on a system and also reduces the pressure
17 on the VM subsystem for higher order allocations. This option
18 will also use IRQ stacks to compensate for the reduced stackspace.
19
20config METAG_FUNCTION_TRACE
21 bool "Output Meta real-time trace data for function entry/exit"
22 help
23 If you say Y here the kernel will use the Meta hardware trace
24 unit to output information about function entry and exit that
25 can be used by a debugger for profiling and call-graphs.
26
27config METAG_POISON_CATCH_BUFFERS
28 bool "Poison catch buffer contents on kernel entry"
29 help
30 If you say Y here the kernel will write poison data to the
31 catch buffer registers on kernel entry. This will make any
32 problem with catch buffer handling much more apparent.
33
34endmenu
diff --git a/arch/metag/Kconfig.soc b/arch/metag/Kconfig.soc
deleted file mode 100644
index c521f0e00d8e..000000000000
--- a/arch/metag/Kconfig.soc
+++ /dev/null
@@ -1,69 +0,0 @@
1# SPDX-License-Identifier: GPL-2.0
2choice
3 prompt "SoC Type"
4 default META21_FPGA
5
6config META12_FPGA
7 bool "Meta 1.2 FPGA"
8 select METAG_META12
9 help
10 This is a Meta 1.2 FPGA bitstream, just a bare CPU.
11
12config META21_FPGA
13 bool "Meta 2.1 FPGA"
14 select METAG_META21
15 help
16 This is a Meta 2.1 FPGA bitstream, just a bare CPU.
17
18config SOC_TZ1090
19 bool "Toumaz Xenif TZ1090 SoC (Comet)"
20 select IMGPDC_IRQ
21 select METAG_LNKGET_AROUND_CACHE
22 select METAG_META21
23 select METAG_SMP_WRITE_REORDERING
24 select PINCTRL
25 select PINCTRL_TZ1090
26 select PINCTRL_TZ1090_PDC
27 help
28 This is a Toumaz Technology Xenif TZ1090 (A.K.A. Comet) SoC containing
29 a 2-threaded HTP.
30
31endchoice
32
33menu "SoC configuration"
34
35if METAG_META21
36
37# Meta 2.x specific options
38
39config METAG_META21_MMU
40 bool "Meta 2.x MMU mode"
41 default y
42 help
43 Use the Meta 2.x MMU in extended mode.
44
45config METAG_UNALIGNED
46 bool "Meta 2.x unaligned access checking"
47 default y
48 help
49 All memory accesses will be checked for alignment and an exception
50 raised on unaligned accesses. This feature does cost performance
51 but without it there will be no notification of this type of error.
52
53config METAG_USER_TCM
54 bool "Meta on-chip memory support for userland"
55 select GENERIC_ALLOCATOR
56 default y
57 help
58 Allow the on-chip memories of Meta SoCs to be used by user
59 applications.
60
61endif
62
63config METAG_HALT_ON_PANIC
64 bool "Halt the core on panic"
65 help
66 Halt the core when a panic occurs. This is useful when running
67 pre-production silicon or in an FPGA environment.
68
69endmenu
diff --git a/arch/metag/Makefile b/arch/metag/Makefile
deleted file mode 100644
index 033a58214119..000000000000
--- a/arch/metag/Makefile
+++ /dev/null
@@ -1,89 +0,0 @@
1#
2# metag/Makefile
3#
4# This file is included by the global makefile so that you can add your own
5# architecture-specific flags and dependencies. Remember to do have actions
6# for "archclean" cleaning up for this architecture.
7#
8# This file is subject to the terms and conditions of the GNU General Public
9# License. See the file "COPYING" in the main directory of this archive
10# for more details.
11#
12# Copyright (C) 1994 by Linus Torvalds
13# 2007,2008,2012 by Imagination Technologies Ltd.
14#
15
16LDFLAGS :=
17OBJCOPYFLAGS := -O binary -R .note -R .comment -S
18
19checkflags-$(CONFIG_METAG_META12) += -DMETAC_1_2
20checkflags-$(CONFIG_METAG_META21) += -DMETAC_2_1
21CHECKFLAGS += -D__metag__ $(checkflags-y)
22
23KBUILD_DEFCONFIG := tz1090_defconfig
24
25sflags-$(CONFIG_METAG_META12) += -mmetac=1.2
26ifeq ($(CONFIG_METAG_META12),y)
27# Only use TBI API 1.4 if DSP is enabled for META12 cores
28sflags-$(CONFIG_METAG_DSP) += -DTBI_1_4
29endif
30sflags-$(CONFIG_METAG_META21) += -mmetac=2.1 -DTBI_1_4
31
32cflags-$(CONFIG_METAG_FUNCTION_TRACE) += -mhwtrace-leaf -mhwtrace-retpc
33cflags-$(CONFIG_METAG_META21) += -mextensions=bex
34
35KBUILD_CFLAGS += -pipe
36KBUILD_CFLAGS += -ffunction-sections
37
38KBUILD_CFLAGS += $(sflags-y) $(cflags-y)
39KBUILD_AFLAGS += $(sflags-y)
40
41LDFLAGS_vmlinux := $(ldflags-y)
42
43head-y := arch/metag/kernel/head.o
44
45core-y += arch/metag/boot/dts/
46core-y += arch/metag/kernel/
47core-y += arch/metag/mm/
48
49libs-y += arch/metag/lib/
50libs-y += arch/metag/tbx/
51
52drivers-$(CONFIG_OPROFILE) += arch/metag/oprofile/
53
54boot := arch/metag/boot
55
56boot_targets += uImage
57boot_targets += uImage.gz
58boot_targets += uImage.bz2
59boot_targets += uImage.xz
60boot_targets += uImage.lzo
61boot_targets += uImage.bin
62boot_targets += vmlinux.bin
63
64PHONY += $(boot_targets)
65
66all: vmlinux.bin
67
68$(boot_targets): vmlinux
69 $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
70
71%.dtb %.dtb.S %.dtb.o: scripts
72 $(Q)$(MAKE) $(build)=$(boot)/dts $(boot)/dts/$@
73
74dtbs: scripts
75 $(Q)$(MAKE) $(build)=$(boot)/dts
76
77archclean:
78 $(Q)$(MAKE) $(clean)=$(boot)
79
80define archhelp
81 echo '* vmlinux.bin - Binary kernel image (arch/$(ARCH)/boot/vmlinux.bin)'
82 @echo ' uImage - Alias to bootable U-Boot image'
83 @echo ' uImage.bin - Kernel-only image for U-Boot (bin)'
84 @echo ' uImage.gz - Kernel-only image for U-Boot (gzip)'
85 @echo ' uImage.bz2 - Kernel-only image for U-Boot (bzip2)'
86 @echo ' uImage.xz - Kernel-only image for U-Boot (xz)'
87 @echo ' uImage.lzo - Kernel-only image for U-Boot (lzo)'
88 @echo ' dtbs - Build device tree blobs for enabled boards'
89endef
diff --git a/arch/metag/boot/.gitignore b/arch/metag/boot/.gitignore
deleted file mode 100644
index 6c662ddb909a..000000000000
--- a/arch/metag/boot/.gitignore
+++ /dev/null
@@ -1,3 +0,0 @@
1vmlinux*
2uImage*
3ramdisk.*
diff --git a/arch/metag/boot/Makefile b/arch/metag/boot/Makefile
deleted file mode 100644
index 5a1f88cf91e3..000000000000
--- a/arch/metag/boot/Makefile
+++ /dev/null
@@ -1,68 +0,0 @@
1#
2# This file is subject to the terms and conditions of the GNU General Public
3# License. See the file "COPYING" in the main directory of this archive
4# for more details.
5#
6# Copyright (C) 2007,2012 Imagination Technologies Ltd.
7#
8
9suffix-y := bin
10suffix-$(CONFIG_KERNEL_GZIP) := gz
11suffix-$(CONFIG_KERNEL_BZIP2) := bz2
12suffix-$(CONFIG_KERNEL_XZ) := xz
13suffix-$(CONFIG_KERNEL_LZO) := lzo
14
15targets += vmlinux.bin
16targets += uImage
17targets += uImage.gz
18targets += uImage.bz2
19targets += uImage.xz
20targets += uImage.lzo
21targets += uImage.bin
22
23extra-y += vmlinux.bin
24extra-y += vmlinux.bin.gz
25extra-y += vmlinux.bin.bz2
26extra-y += vmlinux.bin.xz
27extra-y += vmlinux.bin.lzo
28
29UIMAGE_LOADADDR = $(CONFIG_PAGE_OFFSET)
30
31ifeq ($(CONFIG_FUNCTION_TRACER),y)
32orig_cflags := $(KBUILD_CFLAGS)
33KBUILD_CFLAGS = $(subst -pg, , $(orig_cflags))
34endif
35
36$(obj)/vmlinux.bin: vmlinux FORCE
37 $(call if_changed,objcopy)
38
39$(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE
40 $(call if_changed,gzip)
41
42$(obj)/vmlinux.bin.bz2: $(obj)/vmlinux.bin FORCE
43 $(call if_changed,bzip2)
44
45$(obj)/vmlinux.bin.xz: $(obj)/vmlinux.bin FORCE
46 $(call if_changed,xzkern)
47
48$(obj)/vmlinux.bin.lzo: $(obj)/vmlinux.bin FORCE
49 $(call if_changed,lzo)
50
51$(obj)/uImage.gz: $(obj)/vmlinux.bin.gz FORCE
52 $(call if_changed,uimage,gzip)
53
54$(obj)/uImage.bz2: $(obj)/vmlinux.bin.bz2 FORCE
55 $(call if_changed,uimage,bzip2)
56
57$(obj)/uImage.xz: $(obj)/vmlinux.bin.xz FORCE
58 $(call if_changed,uimage,xz)
59
60$(obj)/uImage.lzo: $(obj)/vmlinux.bin.lzo FORCE
61 $(call if_changed,uimage,lzo)
62
63$(obj)/uImage.bin: $(obj)/vmlinux.bin FORCE
64 $(call if_changed,uimage,none)
65
66$(obj)/uImage: $(obj)/uImage.$(suffix-y)
67 @ln -sf $(notdir $<) $@
68 @echo ' Image $@ is ready'
diff --git a/arch/metag/boot/dts/Makefile b/arch/metag/boot/dts/Makefile
deleted file mode 100644
index f0a180f62766..000000000000
--- a/arch/metag/boot/dts/Makefile
+++ /dev/null
@@ -1,16 +0,0 @@
1# SPDX-License-Identifier: GPL-2.0
2dtb-y += skeleton.dtb
3dtb-y += tz1090_generic.dtb
4
5# Built-in dtb
6builtindtb-y := skeleton
7builtindtb-$(CONFIG_SOC_TZ1090) := tz1090_generic
8
9ifneq ($(CONFIG_METAG_BUILTIN_DTB_NAME),"")
10 builtindtb-y := $(patsubst "%",%,$(CONFIG_METAG_BUILTIN_DTB_NAME))
11endif
12
13dtb-$(CONFIG_METAG_BUILTIN_DTB) += $(builtindtb-y).dtb
14obj-$(CONFIG_METAG_BUILTIN_DTB) += $(builtindtb-y).dtb.o
15
16.SECONDARY: $(obj)/$(builtindtb-y).dtb.S
diff --git a/arch/metag/boot/dts/skeleton.dts b/arch/metag/boot/dts/skeleton.dts
deleted file mode 100644
index 7a49aeb365d0..000000000000
--- a/arch/metag/boot/dts/skeleton.dts
+++ /dev/null
@@ -1,10 +0,0 @@
1/*
2 * Copyright (C) 2012 Imagination Technologies Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8/dts-v1/;
9
10#include "skeleton.dtsi"
diff --git a/arch/metag/boot/dts/skeleton.dtsi b/arch/metag/boot/dts/skeleton.dtsi
deleted file mode 100644
index 43e2ffe73c27..000000000000
--- a/arch/metag/boot/dts/skeleton.dtsi
+++ /dev/null
@@ -1,15 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Skeleton device tree; the bare minimum needed to boot; just include and
4 * add a compatible value. The bootloader will typically populate the memory
5 * node.
6 */
7
8/ {
9 compatible = "img,meta";
10 #address-cells = <1>;
11 #size-cells = <1>;
12 chosen { };
13 aliases { };
14 memory { device_type = "memory"; reg = <0 0>; };
15};
diff --git a/arch/metag/boot/dts/tz1090.dtsi b/arch/metag/boot/dts/tz1090.dtsi
deleted file mode 100644
index 24ea7d2e9138..000000000000
--- a/arch/metag/boot/dts/tz1090.dtsi
+++ /dev/null
@@ -1,108 +0,0 @@
1/*
2 * Copyright (C) 2012 Imagination Technologies Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#include "skeleton.dtsi"
10
11#include <dt-bindings/interrupt-controller/irq.h>
12
13/ {
14 compatible = "toumaz,tz1090", "img,meta";
15
16 interrupt-parent = <&intc>;
17
18 intc: interrupt-controller {
19 compatible = "img,meta-intc";
20 interrupt-controller;
21 #interrupt-cells = <2>;
22 num-banks = <2>;
23 };
24
25 soc {
26 compatible = "simple-bus";
27 #address-cells = <1>;
28 #size-cells = <1>;
29 ranges;
30
31 pdc: pdc@0x02006000 {
32 interrupt-controller;
33 #interrupt-cells = <2>;
34
35 reg = <0x02006000 0x1000>;
36 compatible = "img,pdc-intc";
37
38 num-perips = <3>;
39 num-syswakes = <3>;
40
41 interrupts = <18 IRQ_TYPE_LEVEL_HIGH>, /* Syswakes */
42 <30 IRQ_TYPE_LEVEL_HIGH>, /* Perip 0 (RTC) */
43 <29 IRQ_TYPE_LEVEL_HIGH>, /* Perip 1 (IR) */
44 <31 IRQ_TYPE_LEVEL_HIGH>; /* Perip 2 (WDT) */
45 };
46
47 pinctrl: pinctrl@02005800 {
48 #gpio-range-cells = <3>;
49 compatible = "img,tz1090-pinctrl";
50 reg = <0x02005800 0xe4>;
51 };
52
53 pdc_pinctrl: pinctrl@02006500 {
54 #gpio-range-cells = <3>;
55 compatible = "img,tz1090-pdc-pinctrl";
56 reg = <0x02006500 0x100>;
57 };
58
59 gpios: gpios@02005800 {
60 #address-cells = <1>;
61 #size-cells = <0>;
62 compatible = "img,tz1090-gpio";
63 reg = <0x02005800 0x90>;
64
65 gpios0: bank@0 {
66 gpio-controller;
67 interrupt-controller;
68 #gpio-cells = <2>;
69 #interrupt-cells = <2>;
70 reg = <0>;
71 interrupts = <13 IRQ_TYPE_LEVEL_HIGH>;
72 gpio-ranges = <&pinctrl 0 0 30>;
73 };
74 gpios1: bank@1 {
75 gpio-controller;
76 interrupt-controller;
77 #gpio-cells = <2>;
78 #interrupt-cells = <2>;
79 reg = <1>;
80 interrupts = <14 IRQ_TYPE_LEVEL_HIGH>;
81 gpio-ranges = <&pinctrl 0 30 30>;
82 };
83 gpios2: bank@2 {
84 gpio-controller;
85 interrupt-controller;
86 #gpio-cells = <2>;
87 #interrupt-cells = <2>;
88 reg = <2>;
89 interrupts = <15 IRQ_TYPE_LEVEL_HIGH>;
90 gpio-ranges = <&pinctrl 0 60 30>;
91 };
92 };
93
94 pdc_gpios: gpios@02006500 {
95 gpio-controller;
96 #gpio-cells = <2>;
97
98 compatible = "img,tz1090-pdc-gpio";
99 reg = <0x02006500 0x100>;
100
101 interrupt-parent = <&pdc>;
102 interrupts = <8 IRQ_TYPE_NONE>,
103 <9 IRQ_TYPE_NONE>,
104 <10 IRQ_TYPE_NONE>;
105 gpio-ranges = <&pdc_pinctrl 0 0 7>;
106 };
107 };
108};
diff --git a/arch/metag/boot/dts/tz1090_generic.dts b/arch/metag/boot/dts/tz1090_generic.dts
deleted file mode 100644
index f96090955964..000000000000
--- a/arch/metag/boot/dts/tz1090_generic.dts
+++ /dev/null
@@ -1,10 +0,0 @@
1/*
2 * Copyright (C) 2012 Imagination Technologies Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8/dts-v1/;
9
10#include "tz1090.dtsi"
diff --git a/arch/metag/configs/meta1_defconfig b/arch/metag/configs/meta1_defconfig
deleted file mode 100644
index 01cd67e4403d..000000000000
--- a/arch/metag/configs/meta1_defconfig
+++ /dev/null
@@ -1,39 +0,0 @@
1# CONFIG_LOCALVERSION_AUTO is not set
2# CONFIG_SWAP is not set
3CONFIG_SYSFS_DEPRECATED=y
4CONFIG_SYSFS_DEPRECATED_V2=y
5CONFIG_KALLSYMS_ALL=y
6# CONFIG_ELF_CORE is not set
7CONFIG_SLAB=y
8# CONFIG_BLK_DEV_BSG is not set
9CONFIG_PARTITION_ADVANCED=y
10# CONFIG_MSDOS_PARTITION is not set
11# CONFIG_IOSCHED_DEADLINE is not set
12# CONFIG_IOSCHED_CFQ is not set
13CONFIG_FLATMEM_MANUAL=y
14CONFIG_META12_FPGA=y
15CONFIG_METAG_DA=y
16CONFIG_HZ_100=y
17CONFIG_DEVTMPFS=y
18CONFIG_DEVTMPFS_MOUNT=y
19# CONFIG_STANDALONE is not set
20# CONFIG_PREVENT_FIRMWARE_BUILD is not set
21# CONFIG_FW_LOADER is not set
22CONFIG_BLK_DEV_RAM=y
23CONFIG_BLK_DEV_RAM_COUNT=1
24CONFIG_BLK_DEV_RAM_SIZE=16384
25# CONFIG_INPUT is not set
26# CONFIG_SERIO is not set
27# CONFIG_VT is not set
28# CONFIG_LEGACY_PTYS is not set
29CONFIG_DA_TTY=y
30CONFIG_DA_CONSOLE=y
31# CONFIG_DEVKMEM is not set
32# CONFIG_HW_RANDOM is not set
33# CONFIG_HWMON is not set
34# CONFIG_USB_SUPPORT is not set
35# CONFIG_DNOTIFY is not set
36CONFIG_TMPFS=y
37# CONFIG_MISC_FILESYSTEMS is not set
38# CONFIG_SCHED_DEBUG is not set
39CONFIG_DEBUG_INFO=y
diff --git a/arch/metag/configs/meta2_defconfig b/arch/metag/configs/meta2_defconfig
deleted file mode 100644
index 643392ba7ed5..000000000000
--- a/arch/metag/configs/meta2_defconfig
+++ /dev/null
@@ -1,40 +0,0 @@
1# CONFIG_LOCALVERSION_AUTO is not set
2# CONFIG_SWAP is not set
3CONFIG_SYSVIPC=y
4CONFIG_SYSFS_DEPRECATED=y
5CONFIG_SYSFS_DEPRECATED_V2=y
6CONFIG_KALLSYMS_ALL=y
7# CONFIG_ELF_CORE is not set
8CONFIG_SLAB=y
9# CONFIG_BLK_DEV_BSG is not set
10CONFIG_PARTITION_ADVANCED=y
11# CONFIG_MSDOS_PARTITION is not set
12# CONFIG_IOSCHED_DEADLINE is not set
13# CONFIG_IOSCHED_CFQ is not set
14CONFIG_METAG_L2C=y
15CONFIG_FLATMEM_MANUAL=y
16CONFIG_METAG_HALT_ON_PANIC=y
17CONFIG_METAG_DA=y
18CONFIG_HZ_100=y
19CONFIG_DEVTMPFS=y
20# CONFIG_STANDALONE is not set
21# CONFIG_PREVENT_FIRMWARE_BUILD is not set
22# CONFIG_FW_LOADER is not set
23CONFIG_BLK_DEV_RAM=y
24CONFIG_BLK_DEV_RAM_COUNT=1
25CONFIG_BLK_DEV_RAM_SIZE=16384
26# CONFIG_INPUT is not set
27# CONFIG_SERIO is not set
28# CONFIG_VT is not set
29# CONFIG_LEGACY_PTYS is not set
30CONFIG_DA_TTY=y
31CONFIG_DA_CONSOLE=y
32# CONFIG_DEVKMEM is not set
33# CONFIG_HW_RANDOM is not set
34# CONFIG_HWMON is not set
35# CONFIG_USB_SUPPORT is not set
36# CONFIG_DNOTIFY is not set
37CONFIG_TMPFS=y
38# CONFIG_MISC_FILESYSTEMS is not set
39# CONFIG_SCHED_DEBUG is not set
40CONFIG_DEBUG_INFO=y
diff --git a/arch/metag/configs/meta2_smp_defconfig b/arch/metag/configs/meta2_smp_defconfig
deleted file mode 100644
index f3306737da20..000000000000
--- a/arch/metag/configs/meta2_smp_defconfig
+++ /dev/null
@@ -1,41 +0,0 @@
1# CONFIG_LOCALVERSION_AUTO is not set
2# CONFIG_SWAP is not set
3CONFIG_SYSVIPC=y
4CONFIG_SYSFS_DEPRECATED=y
5CONFIG_SYSFS_DEPRECATED_V2=y
6CONFIG_KALLSYMS_ALL=y
7# CONFIG_ELF_CORE is not set
8CONFIG_SLAB=y
9# CONFIG_BLK_DEV_BSG is not set
10CONFIG_PARTITION_ADVANCED=y
11# CONFIG_MSDOS_PARTITION is not set
12# CONFIG_IOSCHED_DEADLINE is not set
13# CONFIG_IOSCHED_CFQ is not set
14CONFIG_METAG_L2C=y
15CONFIG_FLATMEM_MANUAL=y
16CONFIG_METAG_HALT_ON_PANIC=y
17CONFIG_SMP=y
18CONFIG_METAG_DA=y
19CONFIG_HZ_100=y
20CONFIG_DEVTMPFS=y
21# CONFIG_STANDALONE is not set
22# CONFIG_PREVENT_FIRMWARE_BUILD is not set
23# CONFIG_FW_LOADER is not set
24CONFIG_BLK_DEV_RAM=y
25CONFIG_BLK_DEV_RAM_COUNT=1
26CONFIG_BLK_DEV_RAM_SIZE=16384
27# CONFIG_INPUT is not set
28# CONFIG_SERIO is not set
29# CONFIG_VT is not set
30# CONFIG_LEGACY_PTYS is not set
31CONFIG_DA_TTY=y
32CONFIG_DA_CONSOLE=y
33# CONFIG_DEVKMEM is not set
34# CONFIG_HW_RANDOM is not set
35# CONFIG_HWMON is not set
36# CONFIG_USB_SUPPORT is not set
37# CONFIG_DNOTIFY is not set
38CONFIG_TMPFS=y
39# CONFIG_MISC_FILESYSTEMS is not set
40# CONFIG_SCHED_DEBUG is not set
41CONFIG_DEBUG_INFO=y
diff --git a/arch/metag/configs/tz1090_defconfig b/arch/metag/configs/tz1090_defconfig
deleted file mode 100644
index 9f9316a6df27..000000000000
--- a/arch/metag/configs/tz1090_defconfig
+++ /dev/null
@@ -1,42 +0,0 @@
1# CONFIG_LOCALVERSION_AUTO is not set
2# CONFIG_SWAP is not set
3CONFIG_SYSVIPC=y
4CONFIG_SYSFS_DEPRECATED=y
5CONFIG_SYSFS_DEPRECATED_V2=y
6CONFIG_KALLSYMS_ALL=y
7# CONFIG_ELF_CORE is not set
8CONFIG_SLAB=y
9# CONFIG_BLK_DEV_BSG is not set
10CONFIG_PARTITION_ADVANCED=y
11# CONFIG_MSDOS_PARTITION is not set
12# CONFIG_IOSCHED_DEADLINE is not set
13# CONFIG_IOSCHED_CFQ is not set
14CONFIG_FLATMEM_MANUAL=y
15CONFIG_SOC_TZ1090=y
16CONFIG_METAG_HALT_ON_PANIC=y
17# CONFIG_METAG_FPU is not set
18CONFIG_METAG_DA=y
19CONFIG_HZ_100=y
20CONFIG_DEVTMPFS=y
21# CONFIG_STANDALONE is not set
22# CONFIG_PREVENT_FIRMWARE_BUILD is not set
23# CONFIG_FW_LOADER is not set
24CONFIG_BLK_DEV_RAM=y
25CONFIG_BLK_DEV_RAM_COUNT=1
26CONFIG_BLK_DEV_RAM_SIZE=16384
27# CONFIG_INPUT is not set
28# CONFIG_SERIO is not set
29# CONFIG_VT is not set
30# CONFIG_LEGACY_PTYS is not set
31CONFIG_DA_TTY=y
32CONFIG_DA_CONSOLE=y
33# CONFIG_DEVKMEM is not set
34# CONFIG_HW_RANDOM is not set
35CONFIG_GPIOLIB=y
36# CONFIG_HWMON is not set
37# CONFIG_USB_SUPPORT is not set
38# CONFIG_DNOTIFY is not set
39CONFIG_TMPFS=y
40# CONFIG_MISC_FILESYSTEMS is not set
41# CONFIG_SCHED_DEBUG is not set
42CONFIG_DEBUG_INFO=y
diff --git a/arch/metag/include/asm/Kbuild b/arch/metag/include/asm/Kbuild
deleted file mode 100644
index 913c779979df..000000000000
--- a/arch/metag/include/asm/Kbuild
+++ /dev/null
@@ -1,33 +0,0 @@
1generic-y += bugs.h
2generic-y += current.h
3generic-y += device.h
4generic-y += dma.h
5generic-y += emergency-restart.h
6generic-y += exec.h
7generic-y += extable.h
8generic-y += fb.h
9generic-y += futex.h
10generic-y += hardirq.h
11generic-y += hw_irq.h
12generic-y += irq_regs.h
13generic-y += irq_work.h
14generic-y += kdebug.h
15generic-y += kmap_types.h
16generic-y += kprobes.h
17generic-y += local.h
18generic-y += local64.h
19generic-y += mcs_spinlock.h
20generic-y += mm-arch-hooks.h
21generic-y += pci.h
22generic-y += percpu.h
23generic-y += preempt.h
24generic-y += sections.h
25generic-y += serial.h
26generic-y += switch_to.h
27generic-y += timex.h
28generic-y += trace_clock.h
29generic-y += unaligned.h
30generic-y += user.h
31generic-y += vga.h
32generic-y += word-at-a-time.h
33generic-y += xor.h
diff --git a/arch/metag/include/asm/atomic.h b/arch/metag/include/asm/atomic.h
deleted file mode 100644
index 97ae189c2dd8..000000000000
--- a/arch/metag/include/asm/atomic.h
+++ /dev/null
@@ -1,49 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __ASM_METAG_ATOMIC_H
3#define __ASM_METAG_ATOMIC_H
4
5#include <linux/compiler.h>
6#include <linux/types.h>
7#include <asm/cmpxchg.h>
8#include <asm/barrier.h>
9
10#if defined(CONFIG_METAG_ATOMICITY_IRQSOFF)
11/* The simple UP case. */
12#include <asm-generic/atomic.h>
13#else
14
15#if defined(CONFIG_METAG_ATOMICITY_LOCK1)
16#include <asm/atomic_lock1.h>
17#else
18#include <asm/atomic_lnkget.h>
19#endif
20
21#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
22
23#define atomic_dec_return(v) atomic_sub_return(1, (v))
24#define atomic_inc_return(v) atomic_add_return(1, (v))
25
26/*
27 * atomic_inc_and_test - increment and test
28 * @v: pointer of type atomic_t
29 *
30 * Atomically increments @v by 1
31 * and returns true if the result is zero, or false for all
32 * other cases.
33 */
34#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
35
36#define atomic_sub_and_test(i, v) (atomic_sub_return((i), (v)) == 0)
37#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
38
39#define atomic_inc(v) atomic_add(1, (v))
40#define atomic_dec(v) atomic_sub(1, (v))
41
42#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
43#define atomic_dec_if_positive(v) atomic_sub_if_positive(1, v)
44
45#endif
46
47#include <asm-generic/atomic64.h>
48
49#endif /* __ASM_METAG_ATOMIC_H */
diff --git a/arch/metag/include/asm/atomic_lnkget.h b/arch/metag/include/asm/atomic_lnkget.h
deleted file mode 100644
index 17e8c61c946d..000000000000
--- a/arch/metag/include/asm/atomic_lnkget.h
+++ /dev/null
@@ -1,204 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __ASM_METAG_ATOMIC_LNKGET_H
3#define __ASM_METAG_ATOMIC_LNKGET_H
4
5#define ATOMIC_INIT(i) { (i) }
6
7#define atomic_set(v, i) WRITE_ONCE((v)->counter, (i))
8
9#include <linux/compiler.h>
10
11#include <asm/barrier.h>
12
13/*
14 * None of these asm statements clobber memory as LNKSET writes around
15 * the cache so the memory it modifies cannot safely be read by any means
16 * other than these accessors.
17 */
18
19static inline int atomic_read(const atomic_t *v)
20{
21 int temp;
22
23 asm volatile (
24 "LNKGETD %0, [%1]\n"
25 : "=da" (temp)
26 : "da" (&v->counter));
27
28 return temp;
29}
30
31#define ATOMIC_OP(op) \
32static inline void atomic_##op(int i, atomic_t *v) \
33{ \
34 int temp; \
35 \
36 asm volatile ( \
37 "1: LNKGETD %0, [%1]\n" \
38 " " #op " %0, %0, %2\n" \
39 " LNKSETD [%1], %0\n" \
40 " DEFR %0, TXSTAT\n" \
41 " ANDT %0, %0, #HI(0x3f000000)\n" \
42 " CMPT %0, #HI(0x02000000)\n" \
43 " BNZ 1b\n" \
44 : "=&d" (temp) \
45 : "da" (&v->counter), "bd" (i) \
46 : "cc"); \
47} \
48
49#define ATOMIC_OP_RETURN(op) \
50static inline int atomic_##op##_return(int i, atomic_t *v) \
51{ \
52 int result, temp; \
53 \
54 smp_mb(); \
55 \
56 asm volatile ( \
57 "1: LNKGETD %1, [%2]\n" \
58 " " #op " %1, %1, %3\n" \
59 " LNKSETD [%2], %1\n" \
60 " DEFR %0, TXSTAT\n" \
61 " ANDT %0, %0, #HI(0x3f000000)\n" \
62 " CMPT %0, #HI(0x02000000)\n" \
63 " BNZ 1b\n" \
64 : "=&d" (temp), "=&da" (result) \
65 : "da" (&v->counter), "br" (i) \
66 : "cc"); \
67 \
68 smp_mb(); \
69 \
70 return result; \
71}
72
73#define ATOMIC_FETCH_OP(op) \
74static inline int atomic_fetch_##op(int i, atomic_t *v) \
75{ \
76 int result, temp; \
77 \
78 smp_mb(); \
79 \
80 asm volatile ( \
81 "1: LNKGETD %1, [%2]\n" \
82 " " #op " %0, %1, %3\n" \
83 " LNKSETD [%2], %0\n" \
84 " DEFR %0, TXSTAT\n" \
85 " ANDT %0, %0, #HI(0x3f000000)\n" \
86 " CMPT %0, #HI(0x02000000)\n" \
87 " BNZ 1b\n" \
88 : "=&d" (temp), "=&d" (result) \
89 : "da" (&v->counter), "bd" (i) \
90 : "cc"); \
91 \
92 smp_mb(); \
93 \
94 return result; \
95}
96
97#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op)
98
99ATOMIC_OPS(add)
100ATOMIC_OPS(sub)
101
102#undef ATOMIC_OPS
103#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op)
104
105ATOMIC_OPS(and)
106ATOMIC_OPS(or)
107ATOMIC_OPS(xor)
108
109#undef ATOMIC_OPS
110#undef ATOMIC_FETCH_OP
111#undef ATOMIC_OP_RETURN
112#undef ATOMIC_OP
113
114static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
115{
116 int result, temp;
117
118 smp_mb();
119
120 asm volatile (
121 "1: LNKGETD %1, [%2]\n"
122 " CMP %1, %3\n"
123 " LNKSETDEQ [%2], %4\n"
124 " BNE 2f\n"
125 " DEFR %0, TXSTAT\n"
126 " ANDT %0, %0, #HI(0x3f000000)\n"
127 " CMPT %0, #HI(0x02000000)\n"
128 " BNZ 1b\n"
129 "2:\n"
130 : "=&d" (temp), "=&d" (result)
131 : "da" (&v->counter), "bd" (old), "da" (new)
132 : "cc");
133
134 smp_mb();
135
136 return result;
137}
138
139static inline int atomic_xchg(atomic_t *v, int new)
140{
141 int temp, old;
142
143 asm volatile (
144 "1: LNKGETD %1, [%2]\n"
145 " LNKSETD [%2], %3\n"
146 " DEFR %0, TXSTAT\n"
147 " ANDT %0, %0, #HI(0x3f000000)\n"
148 " CMPT %0, #HI(0x02000000)\n"
149 " BNZ 1b\n"
150 : "=&d" (temp), "=&d" (old)
151 : "da" (&v->counter), "da" (new)
152 : "cc");
153
154 return old;
155}
156
157static inline int __atomic_add_unless(atomic_t *v, int a, int u)
158{
159 int result, temp;
160
161 smp_mb();
162
163 asm volatile (
164 "1: LNKGETD %1, [%2]\n"
165 " CMP %1, %3\n"
166 " ADD %0, %1, %4\n"
167 " LNKSETDNE [%2], %0\n"
168 " BEQ 2f\n"
169 " DEFR %0, TXSTAT\n"
170 " ANDT %0, %0, #HI(0x3f000000)\n"
171 " CMPT %0, #HI(0x02000000)\n"
172 " BNZ 1b\n"
173 "2:\n"
174 : "=&d" (temp), "=&d" (result)
175 : "da" (&v->counter), "bd" (u), "bd" (a)
176 : "cc");
177
178 smp_mb();
179
180 return result;
181}
182
183static inline int atomic_sub_if_positive(int i, atomic_t *v)
184{
185 int result, temp;
186
187 asm volatile (
188 "1: LNKGETD %1, [%2]\n"
189 " SUBS %1, %1, %3\n"
190 " LNKSETDGE [%2], %1\n"
191 " BLT 2f\n"
192 " DEFR %0, TXSTAT\n"
193 " ANDT %0, %0, #HI(0x3f000000)\n"
194 " CMPT %0, #HI(0x02000000)\n"
195 " BNZ 1b\n"
196 "2:\n"
197 : "=&d" (temp), "=&da" (result)
198 : "da" (&v->counter), "bd" (i)
199 : "cc");
200
201 return result;
202}
203
204#endif /* __ASM_METAG_ATOMIC_LNKGET_H */
diff --git a/arch/metag/include/asm/atomic_lock1.h b/arch/metag/include/asm/atomic_lock1.h
deleted file mode 100644
index 2ce8fa3a79c2..000000000000
--- a/arch/metag/include/asm/atomic_lock1.h
+++ /dev/null
@@ -1,157 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __ASM_METAG_ATOMIC_LOCK1_H
3#define __ASM_METAG_ATOMIC_LOCK1_H
4
5#define ATOMIC_INIT(i) { (i) }
6
7#include <linux/compiler.h>
8
9#include <asm/barrier.h>
10#include <asm/global_lock.h>
11
12static inline int atomic_read(const atomic_t *v)
13{
14 return READ_ONCE((v)->counter);
15}
16
17/*
18 * atomic_set needs to be take the lock to protect atomic_add_unless from a
19 * possible race, as it reads the counter twice:
20 *
21 * CPU0 CPU1
22 * atomic_add_unless(1, 0)
23 * ret = v->counter (non-zero)
24 * if (ret != u) v->counter = 0
25 * v->counter += 1 (counter set to 1)
26 *
27 * Making atomic_set take the lock ensures that ordering and logical
28 * consistency is preserved.
29 */
30static inline int atomic_set(atomic_t *v, int i)
31{
32 unsigned long flags;
33
34 __global_lock1(flags);
35 fence();
36 v->counter = i;
37 __global_unlock1(flags);
38 return i;
39}
40
41#define atomic_set_release(v, i) atomic_set((v), (i))
42
43#define ATOMIC_OP(op, c_op) \
44static inline void atomic_##op(int i, atomic_t *v) \
45{ \
46 unsigned long flags; \
47 \
48 __global_lock1(flags); \
49 fence(); \
50 v->counter c_op i; \
51 __global_unlock1(flags); \
52} \
53
54#define ATOMIC_OP_RETURN(op, c_op) \
55static inline int atomic_##op##_return(int i, atomic_t *v) \
56{ \
57 unsigned long result; \
58 unsigned long flags; \
59 \
60 __global_lock1(flags); \
61 result = v->counter; \
62 result c_op i; \
63 fence(); \
64 v->counter = result; \
65 __global_unlock1(flags); \
66 \
67 return result; \
68}
69
70#define ATOMIC_FETCH_OP(op, c_op) \
71static inline int atomic_fetch_##op(int i, atomic_t *v) \
72{ \
73 unsigned long result; \
74 unsigned long flags; \
75 \
76 __global_lock1(flags); \
77 result = v->counter; \
78 fence(); \
79 v->counter c_op i; \
80 __global_unlock1(flags); \
81 \
82 return result; \
83}
84
85#define ATOMIC_OPS(op, c_op) \
86 ATOMIC_OP(op, c_op) \
87 ATOMIC_OP_RETURN(op, c_op) \
88 ATOMIC_FETCH_OP(op, c_op)
89
90ATOMIC_OPS(add, +=)
91ATOMIC_OPS(sub, -=)
92
93#undef ATOMIC_OPS
94#define ATOMIC_OPS(op, c_op) \
95 ATOMIC_OP(op, c_op) \
96 ATOMIC_FETCH_OP(op, c_op)
97
98ATOMIC_OPS(and, &=)
99ATOMIC_OPS(or, |=)
100ATOMIC_OPS(xor, ^=)
101
102#undef ATOMIC_OPS
103#undef ATOMIC_FETCH_OP
104#undef ATOMIC_OP_RETURN
105#undef ATOMIC_OP
106
107static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
108{
109 int ret;
110 unsigned long flags;
111
112 __global_lock1(flags);
113 ret = v->counter;
114 if (ret == old) {
115 fence();
116 v->counter = new;
117 }
118 __global_unlock1(flags);
119
120 return ret;
121}
122
123#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
124
125static inline int __atomic_add_unless(atomic_t *v, int a, int u)
126{
127 int ret;
128 unsigned long flags;
129
130 __global_lock1(flags);
131 ret = v->counter;
132 if (ret != u) {
133 fence();
134 v->counter += a;
135 }
136 __global_unlock1(flags);
137
138 return ret;
139}
140
141static inline int atomic_sub_if_positive(int i, atomic_t *v)
142{
143 int ret;
144 unsigned long flags;
145
146 __global_lock1(flags);
147 ret = v->counter - 1;
148 if (ret >= 0) {
149 fence();
150 v->counter = ret;
151 }
152 __global_unlock1(flags);
153
154 return ret;
155}
156
157#endif /* __ASM_METAG_ATOMIC_LOCK1_H */
diff --git a/arch/metag/include/asm/barrier.h b/arch/metag/include/asm/barrier.h
deleted file mode 100644
index 2661fec5696a..000000000000
--- a/arch/metag/include/asm/barrier.h
+++ /dev/null
@@ -1,85 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_METAG_BARRIER_H
3#define _ASM_METAG_BARRIER_H
4
5#include <asm/metag_mem.h>
6
7#define nop() asm volatile ("NOP")
8
9#ifdef CONFIG_METAG_META21
10
11/* HTP and above have a system event to fence writes */
12static inline void wr_fence(void)
13{
14 volatile int *flushptr = (volatile int *) LINSYSEVENT_WR_FENCE;
15 barrier();
16 *flushptr = 0;
17 barrier();
18}
19
20#else /* CONFIG_METAG_META21 */
21
22/*
23 * ATP doesn't have system event to fence writes, so it is necessary to flush
24 * the processor write queues as well as possibly the write combiner (depending
25 * on the page being written).
26 * To ensure the write queues are flushed we do 4 writes to a system event
27 * register (in this case write combiner flush) which will also flush the write
28 * combiner.
29 */
30static inline void wr_fence(void)
31{
32 volatile int *flushptr = (volatile int *) LINSYSEVENT_WR_COMBINE_FLUSH;
33 barrier();
34 *flushptr = 0;
35 *flushptr = 0;
36 *flushptr = 0;
37 *flushptr = 0;
38 barrier();
39}
40
41#endif /* !CONFIG_METAG_META21 */
42
43/* flush writes through the write combiner */
44#define mb() wr_fence()
45#define rmb() barrier()
46#define wmb() mb()
47
48#ifdef CONFIG_METAG_SMP_WRITE_REORDERING
49/*
50 * Write to the atomic memory unlock system event register (command 0). This is
51 * needed before a write to shared memory in a critical section, to prevent
52 * external reordering of writes before the fence on other threads with writes
53 * after the fence on this thread (and to prevent the ensuing cache-memory
54 * incoherence). It is therefore ineffective if used after and on the same
55 * thread as a write.
56 */
57static inline void metag_fence(void)
58{
59 volatile int *flushptr = (volatile int *) LINSYSEVENT_WR_ATOMIC_UNLOCK;
60 barrier();
61 *flushptr = 0;
62 barrier();
63}
64#define __smp_mb() metag_fence()
65#define __smp_rmb() metag_fence()
66#define __smp_wmb() barrier()
67#else
68#define metag_fence() do { } while (0)
69#define __smp_mb() barrier()
70#define __smp_rmb() barrier()
71#define __smp_wmb() barrier()
72#endif
73
74#ifdef CONFIG_SMP
75#define fence() metag_fence()
76#else
77#define fence() do { } while (0)
78#endif
79
80#define __smp_mb__before_atomic() barrier()
81#define __smp_mb__after_atomic() barrier()
82
83#include <asm-generic/barrier.h>
84
85#endif /* _ASM_METAG_BARRIER_H */
diff --git a/arch/metag/include/asm/bitops.h b/arch/metag/include/asm/bitops.h
deleted file mode 100644
index 766ad43010ad..000000000000
--- a/arch/metag/include/asm/bitops.h
+++ /dev/null
@@ -1,127 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __ASM_METAG_BITOPS_H
3#define __ASM_METAG_BITOPS_H
4
5#include <linux/compiler.h>
6#include <asm/barrier.h>
7#include <asm/global_lock.h>
8
9#ifdef CONFIG_SMP
10/*
11 * These functions are the basis of our bit ops.
12 */
13static inline void set_bit(unsigned int bit, volatile unsigned long *p)
14{
15 unsigned long flags;
16 unsigned long mask = 1UL << (bit & 31);
17
18 p += bit >> 5;
19
20 __global_lock1(flags);
21 fence();
22 *p |= mask;
23 __global_unlock1(flags);
24}
25
26static inline void clear_bit(unsigned int bit, volatile unsigned long *p)
27{
28 unsigned long flags;
29 unsigned long mask = 1UL << (bit & 31);
30
31 p += bit >> 5;
32
33 __global_lock1(flags);
34 fence();
35 *p &= ~mask;
36 __global_unlock1(flags);
37}
38
39static inline void change_bit(unsigned int bit, volatile unsigned long *p)
40{
41 unsigned long flags;
42 unsigned long mask = 1UL << (bit & 31);
43
44 p += bit >> 5;
45
46 __global_lock1(flags);
47 fence();
48 *p ^= mask;
49 __global_unlock1(flags);
50}
51
52static inline int test_and_set_bit(unsigned int bit, volatile unsigned long *p)
53{
54 unsigned long flags;
55 unsigned long old;
56 unsigned long mask = 1UL << (bit & 31);
57
58 p += bit >> 5;
59
60 __global_lock1(flags);
61 old = *p;
62 if (!(old & mask)) {
63 fence();
64 *p = old | mask;
65 }
66 __global_unlock1(flags);
67
68 return (old & mask) != 0;
69}
70
71static inline int test_and_clear_bit(unsigned int bit,
72 volatile unsigned long *p)
73{
74 unsigned long flags;
75 unsigned long old;
76 unsigned long mask = 1UL << (bit & 31);
77
78 p += bit >> 5;
79
80 __global_lock1(flags);
81 old = *p;
82 if (old & mask) {
83 fence();
84 *p = old & ~mask;
85 }
86 __global_unlock1(flags);
87
88 return (old & mask) != 0;
89}
90
91static inline int test_and_change_bit(unsigned int bit,
92 volatile unsigned long *p)
93{
94 unsigned long flags;
95 unsigned long old;
96 unsigned long mask = 1UL << (bit & 31);
97
98 p += bit >> 5;
99
100 __global_lock1(flags);
101 fence();
102 old = *p;
103 *p = old ^ mask;
104 __global_unlock1(flags);
105
106 return (old & mask) != 0;
107}
108
109#else
110#include <asm-generic/bitops/atomic.h>
111#endif /* CONFIG_SMP */
112
113#include <asm-generic/bitops/non-atomic.h>
114#include <asm-generic/bitops/find.h>
115#include <asm-generic/bitops/ffs.h>
116#include <asm-generic/bitops/__ffs.h>
117#include <asm-generic/bitops/ffz.h>
118#include <asm-generic/bitops/fls.h>
119#include <asm-generic/bitops/__fls.h>
120#include <asm-generic/bitops/fls64.h>
121#include <asm-generic/bitops/hweight.h>
122#include <asm-generic/bitops/lock.h>
123#include <asm-generic/bitops/sched.h>
124#include <asm-generic/bitops/le.h>
125#include <asm-generic/bitops/ext2-atomic.h>
126
127#endif /* __ASM_METAG_BITOPS_H */
diff --git a/arch/metag/include/asm/bug.h b/arch/metag/include/asm/bug.h
deleted file mode 100644
index ee07a943f931..000000000000
--- a/arch/metag/include/asm/bug.h
+++ /dev/null
@@ -1,13 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_METAG_BUG_H
3#define _ASM_METAG_BUG_H
4
5#include <asm-generic/bug.h>
6
7struct pt_regs;
8
9extern const char *trap_name(int trapno);
10extern void __noreturn die(const char *str, struct pt_regs *regs, long err,
11 unsigned long addr);
12
13#endif
diff --git a/arch/metag/include/asm/cache.h b/arch/metag/include/asm/cache.h
deleted file mode 100644
index b5df02239c8d..000000000000
--- a/arch/metag/include/asm/cache.h
+++ /dev/null
@@ -1,24 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __ASM_METAG_CACHE_H
3#define __ASM_METAG_CACHE_H
4
5/* L1 cache line size (64 bytes) */
6#define L1_CACHE_SHIFT 6
7#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
8
9/* Meta requires large data items to be 8 byte aligned. */
10#define ARCH_SLAB_MINALIGN 8
11
12/*
13 * With an L2 cache, we may invalidate dirty lines, so we need to ensure DMA
14 * buffers have cache line alignment.
15 */
16#ifdef CONFIG_METAG_L2C
17#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
18#else
19#define ARCH_DMA_MINALIGN 8
20#endif
21
22#define __read_mostly __attribute__((__section__(".data..read_mostly")))
23
24#endif
diff --git a/arch/metag/include/asm/cacheflush.h b/arch/metag/include/asm/cacheflush.h
deleted file mode 100644
index 2584a51eca1a..000000000000
--- a/arch/metag/include/asm/cacheflush.h
+++ /dev/null
@@ -1,251 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _METAG_CACHEFLUSH_H
3#define _METAG_CACHEFLUSH_H
4
5#include <linux/mm.h>
6#include <linux/sched.h>
7#include <linux/io.h>
8
9#include <asm/l2cache.h>
10#include <asm/metag_isa.h>
11#include <asm/metag_mem.h>
12
13void metag_cache_probe(void);
14
15void metag_data_cache_flush_all(const void *start);
16void metag_code_cache_flush_all(const void *start);
17
18/*
19 * Routines to flush physical cache lines that may be used to cache data or code
20 * normally accessed via the linear address range supplied. The region flushed
21 * must either lie in local or global address space determined by the top bit of
22 * the pStart address. If Bytes is >= 4K then the whole of the related cache
23 * state will be flushed rather than a limited range.
24 */
25void metag_data_cache_flush(const void *start, int bytes);
26void metag_code_cache_flush(const void *start, int bytes);
27
28#ifdef CONFIG_METAG_META12
29
30/* Write through, virtually tagged, split I/D cache. */
31
32static inline void __flush_cache_all(void)
33{
34 metag_code_cache_flush_all((void *) PAGE_OFFSET);
35 metag_data_cache_flush_all((void *) PAGE_OFFSET);
36}
37
38#define flush_cache_all() __flush_cache_all()
39
40/* flush the entire user address space referenced in this mm structure */
41static inline void flush_cache_mm(struct mm_struct *mm)
42{
43 if (mm == current->mm)
44 __flush_cache_all();
45}
46
47#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
48
49/* flush a range of addresses from this mm */
50static inline void flush_cache_range(struct vm_area_struct *vma,
51 unsigned long start, unsigned long end)
52{
53 flush_cache_mm(vma->vm_mm);
54}
55
56static inline void flush_cache_page(struct vm_area_struct *vma,
57 unsigned long vmaddr, unsigned long pfn)
58{
59 flush_cache_mm(vma->vm_mm);
60}
61
62#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
63static inline void flush_dcache_page(struct page *page)
64{
65 metag_data_cache_flush_all((void *) PAGE_OFFSET);
66}
67
68#define flush_dcache_mmap_lock(mapping) do { } while (0)
69#define flush_dcache_mmap_unlock(mapping) do { } while (0)
70
71static inline void flush_icache_page(struct vm_area_struct *vma,
72 struct page *page)
73{
74 metag_code_cache_flush(page_to_virt(page), PAGE_SIZE);
75}
76
77static inline void flush_cache_vmap(unsigned long start, unsigned long end)
78{
79 metag_data_cache_flush_all((void *) PAGE_OFFSET);
80}
81
82static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
83{
84 metag_data_cache_flush_all((void *) PAGE_OFFSET);
85}
86
87#else
88
89/* Write through, physically tagged, split I/D cache. */
90
91#define flush_cache_all() do { } while (0)
92#define flush_cache_mm(mm) do { } while (0)
93#define flush_cache_dup_mm(mm) do { } while (0)
94#define flush_cache_range(vma, start, end) do { } while (0)
95#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
96#define flush_dcache_mmap_lock(mapping) do { } while (0)
97#define flush_dcache_mmap_unlock(mapping) do { } while (0)
98#define flush_icache_page(vma, pg) do { } while (0)
99#define flush_cache_vmap(start, end) do { } while (0)
100#define flush_cache_vunmap(start, end) do { } while (0)
101
102#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
103static inline void flush_dcache_page(struct page *page)
104{
105 /* FIXME: We can do better than this. All we are trying to do is
106 * make the i-cache coherent, we should use the PG_arch_1 bit like
107 * e.g. powerpc.
108 */
109#ifdef CONFIG_SMP
110 metag_out32(1, SYSC_ICACHE_FLUSH);
111#else
112 metag_code_cache_flush_all((void *) PAGE_OFFSET);
113#endif
114}
115
116#endif
117
118/* Push n pages at kernel virtual address and clear the icache */
119static inline void flush_icache_range(unsigned long address,
120 unsigned long endaddr)
121{
122#ifdef CONFIG_SMP
123 metag_out32(1, SYSC_ICACHE_FLUSH);
124#else
125 metag_code_cache_flush((void *) address, endaddr - address);
126#endif
127}
128
129static inline void flush_cache_sigtramp(unsigned long addr, int size)
130{
131 /*
132 * Flush the icache in case there was previously some code
133 * fetched from this address, perhaps a previous sigtramp.
134 *
135 * We don't need to flush the dcache, it's write through and
136 * we just wrote the sigtramp code through it.
137 */
138#ifdef CONFIG_SMP
139 metag_out32(1, SYSC_ICACHE_FLUSH);
140#else
141 metag_code_cache_flush((void *) addr, size);
142#endif
143}
144
145#ifdef CONFIG_METAG_L2C
146
147/*
148 * Perform a single specific CACHEWD operation on an address, masking lower bits
149 * of address first.
150 */
151static inline void cachewd_line(void *addr, unsigned int data)
152{
153 unsigned long masked = (unsigned long)addr & -0x40;
154 __builtin_meta2_cachewd((void *)masked, data);
155}
156
157/* Perform a certain CACHEW op on each cache line in a range */
158static inline void cachew_region_op(void *start, unsigned long size,
159 unsigned int op)
160{
161 unsigned long offset = (unsigned long)start & 0x3f;
162 int i;
163 if (offset) {
164 size += offset;
165 start -= offset;
166 }
167 i = (size - 1) >> 6;
168 do {
169 __builtin_meta2_cachewd(start, op);
170 start += 0x40;
171 } while (i--);
172}
173
174/* prevent write fence and flushbacks being reordered in L2 */
175static inline void l2c_fence_flush(void *addr)
176{
177 /*
178 * Synchronise by reading back and re-flushing.
179 * It is assumed this access will miss, as the caller should have just
180 * flushed the cache line.
181 */
182 (void)(volatile u8 *)addr;
183 cachewd_line(addr, CACHEW_FLUSH_L1D_L2);
184}
185
186/* prevent write fence and writebacks being reordered in L2 */
187static inline void l2c_fence(void *addr)
188{
189 /*
190 * A write back has occurred, but not necessarily an invalidate, so the
191 * readback in l2c_fence_flush() would hit in the cache and have no
192 * effect. Therefore fully flush the line first.
193 */
194 cachewd_line(addr, CACHEW_FLUSH_L1D_L2);
195 l2c_fence_flush(addr);
196}
197
198/* Used to keep memory consistent when doing DMA. */
199static inline void flush_dcache_region(void *start, unsigned long size)
200{
201 /* metag_data_cache_flush won't flush L2 cache lines if size >= 4096 */
202 if (meta_l2c_is_enabled()) {
203 cachew_region_op(start, size, CACHEW_FLUSH_L1D_L2);
204 if (meta_l2c_is_writeback())
205 l2c_fence_flush(start + size - 1);
206 } else {
207 metag_data_cache_flush(start, size);
208 }
209}
210
211/* Write back dirty lines to memory (or do nothing if no writeback caches) */
212static inline void writeback_dcache_region(void *start, unsigned long size)
213{
214 if (meta_l2c_is_enabled() && meta_l2c_is_writeback()) {
215 cachew_region_op(start, size, CACHEW_WRITEBACK_L1D_L2);
216 l2c_fence(start + size - 1);
217 }
218}
219
220/* Invalidate (may also write back if necessary) */
221static inline void invalidate_dcache_region(void *start, unsigned long size)
222{
223 if (meta_l2c_is_enabled())
224 cachew_region_op(start, size, CACHEW_INVALIDATE_L1D_L2);
225 else
226 metag_data_cache_flush(start, size);
227}
228#else
229#define flush_dcache_region(s, l) metag_data_cache_flush((s), (l))
230#define writeback_dcache_region(s, l) do {} while (0)
231#define invalidate_dcache_region(s, l) flush_dcache_region((s), (l))
232#endif
233
234static inline void copy_to_user_page(struct vm_area_struct *vma,
235 struct page *page, unsigned long vaddr,
236 void *dst, const void *src,
237 unsigned long len)
238{
239 memcpy(dst, src, len);
240 flush_icache_range((unsigned long)dst, (unsigned long)dst + len);
241}
242
243static inline void copy_from_user_page(struct vm_area_struct *vma,
244 struct page *page, unsigned long vaddr,
245 void *dst, const void *src,
246 unsigned long len)
247{
248 memcpy(dst, src, len);
249}
250
251#endif /* _METAG_CACHEFLUSH_H */
diff --git a/arch/metag/include/asm/cachepart.h b/arch/metag/include/asm/cachepart.h
deleted file mode 100644
index 79411e977586..000000000000
--- a/arch/metag/include/asm/cachepart.h
+++ /dev/null
@@ -1,43 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Meta cache partition manipulation.
4 *
5 * Copyright 2010 Imagination Technologies Ltd.
6 */
7
8#ifndef _METAG_CACHEPART_H_
9#define _METAG_CACHEPART_H_
10
11/**
12 * get_dcache_size() - Get size of data cache.
13 */
14unsigned int get_dcache_size(void);
15
16/**
17 * get_icache_size() - Get size of code cache.
18 */
19unsigned int get_icache_size(void);
20
21/**
22 * get_global_dcache_size() - Get the thread's global dcache.
23 *
24 * Returns the size of the current thread's global dcache partition.
25 */
26unsigned int get_global_dcache_size(void);
27
28/**
29 * get_global_icache_size() - Get the thread's global icache.
30 *
31 * Returns the size of the current thread's global icache partition.
32 */
33unsigned int get_global_icache_size(void);
34
35/**
36 * check_for_dache_aliasing() - Ensure that the bootloader has configured the
37 * dache and icache properly to avoid aliasing
38 * @thread_id: Hardware thread ID
39 *
40 */
41void check_for_cache_aliasing(int thread_id);
42
43#endif
diff --git a/arch/metag/include/asm/checksum.h b/arch/metag/include/asm/checksum.h
deleted file mode 100644
index 6533d14e9789..000000000000
--- a/arch/metag/include/asm/checksum.h
+++ /dev/null
@@ -1,93 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _METAG_CHECKSUM_H
3#define _METAG_CHECKSUM_H
4
5/*
6 * computes the checksum of a memory block at buff, length len,
7 * and adds in "sum" (32-bit)
8 *
9 * returns a 32-bit number suitable for feeding into itself
10 * or csum_tcpudp_magic
11 *
12 * this function must be called with even lengths, except
13 * for the last fragment, which may be odd
14 *
15 * it's best to have buff aligned on a 32-bit boundary
16 */
17extern __wsum csum_partial(const void *buff, int len, __wsum sum);
18
19/*
20 * the same as csum_partial, but copies from src while it
21 * checksums
22 *
23 * here even more important to align src and dst on a 32-bit (or even
24 * better 64-bit) boundary
25 */
26extern __wsum csum_partial_copy(const void *src, void *dst, int len,
27 __wsum sum);
28
29/*
30 * the same as csum_partial_copy, but copies from user space.
31 *
32 * here even more important to align src and dst on a 32-bit (or even
33 * better 64-bit) boundary
34 */
35extern __wsum csum_partial_copy_from_user(const void __user *src, void *dst,
36 int len, __wsum sum, int *csum_err);
37
38#define csum_partial_copy_nocheck(src, dst, len, sum) \
39 csum_partial_copy((src), (dst), (len), (sum))
40
41/*
42 * Fold a partial checksum
43 */
44static inline __sum16 csum_fold(__wsum csum)
45{
46 u32 sum = (__force u32)csum;
47 sum = (sum & 0xffff) + (sum >> 16);
48 sum = (sum & 0xffff) + (sum >> 16);
49 return (__force __sum16)~sum;
50}
51
52/*
53 * This is a version of ip_compute_csum() optimized for IP headers,
54 * which always checksum on 4 octet boundaries.
55 */
56extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl);
57
58/*
59 * computes the checksum of the TCP/UDP pseudo-header
60 * returns a 16-bit checksum, already complemented
61 */
62static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
63 __u32 len, __u8 proto,
64 __wsum sum)
65{
66 unsigned long len_proto = (proto + len) << 8;
67 asm ("ADDS %0, %0, %1\n"
68 "ADDCS %0, %0, #1\n"
69 "ADDS %0, %0, %2\n"
70 "ADDCS %0, %0, #1\n"
71 "ADDS %0, %0, %3\n"
72 "ADDCS %0, %0, #1\n"
73 : "=d" (sum)
74 : "d" (daddr), "d" (saddr), "d" (len_proto),
75 "0" (sum)
76 : "cc");
77 return sum;
78}
79
80static inline __sum16
81csum_tcpudp_magic(__be32 saddr, __be32 daddr, __u32 len,
82 __u8 proto, __wsum sum)
83{
84 return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
85}
86
87/*
88 * this routine is used for miscellaneous IP-like checksums, mainly
89 * in icmp.c
90 */
91extern __sum16 ip_compute_csum(const void *buff, int len);
92
93#endif /* _METAG_CHECKSUM_H */
diff --git a/arch/metag/include/asm/clock.h b/arch/metag/include/asm/clock.h
deleted file mode 100644
index ded4ab2e1fd0..000000000000
--- a/arch/metag/include/asm/clock.h
+++ /dev/null
@@ -1,59 +0,0 @@
1/*
2 * arch/metag/include/asm/clock.h
3 *
4 * Copyright (C) 2012 Imagination Technologies Ltd.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#ifndef _METAG_CLOCK_H_
12#define _METAG_CLOCK_H_
13
14#include <asm/mach/arch.h>
15
16/**
17 * struct meta_clock_desc - Meta Core clock callbacks.
18 * @get_core_freq: Get the frequency of the Meta core. If this is NULL, the
19 * core frequency will be determined like this:
20 * Meta 1: based on loops_per_jiffy.
21 * Meta 2: (EXPAND_TIMER_DIV + 1) MHz.
22 * If a "core" clock is provided by the device tree, it
23 * will override this function.
24 */
25struct meta_clock_desc {
26 unsigned long (*get_core_freq)(void);
27};
28
29extern struct meta_clock_desc _meta_clock;
30
31/*
32 * Perform platform clock initialisation, reading clocks from device tree etc.
33 * Only accessible during boot.
34 */
35void init_metag_clocks(void);
36
37/*
38 * Set up the default clock, ensuring all callbacks are valid - only accessible
39 * during boot.
40 */
41void setup_meta_clocks(struct meta_clock_desc *desc);
42
43/**
44 * get_coreclock() - Get the frequency of the Meta core clock.
45 *
46 * Returns: The Meta core clock frequency in Hz.
47 */
48static inline unsigned long get_coreclock(void)
49{
50 /*
51 * Use the current clock callback. If set correctly this will provide
52 * the most accurate frequency as it can be calculated directly from the
53 * PLL configuration. otherwise a default callback will have been set
54 * instead.
55 */
56 return _meta_clock.get_core_freq();
57}
58
59#endif /* _METAG_CLOCK_H_ */
diff --git a/arch/metag/include/asm/cmpxchg.h b/arch/metag/include/asm/cmpxchg.h
deleted file mode 100644
index 68c4ab1466fd..000000000000
--- a/arch/metag/include/asm/cmpxchg.h
+++ /dev/null
@@ -1,64 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __ASM_METAG_CMPXCHG_H
3#define __ASM_METAG_CMPXCHG_H
4
5#include <asm/barrier.h>
6
7#if defined(CONFIG_METAG_ATOMICITY_IRQSOFF)
8#include <asm/cmpxchg_irq.h>
9#elif defined(CONFIG_METAG_ATOMICITY_LOCK1)
10#include <asm/cmpxchg_lock1.h>
11#elif defined(CONFIG_METAG_ATOMICITY_LNKGET)
12#include <asm/cmpxchg_lnkget.h>
13#endif
14
15extern void __xchg_called_with_bad_pointer(void);
16
17#define __xchg(ptr, x, size) \
18({ \
19 unsigned long __xchg__res; \
20 volatile void *__xchg_ptr = (ptr); \
21 switch (size) { \
22 case 4: \
23 __xchg__res = xchg_u32(__xchg_ptr, x); \
24 break; \
25 case 1: \
26 __xchg__res = xchg_u8(__xchg_ptr, x); \
27 break; \
28 default: \
29 __xchg_called_with_bad_pointer(); \
30 __xchg__res = x; \
31 break; \
32 } \
33 \
34 __xchg__res; \
35})
36
37#define xchg(ptr, x) \
38 ((__typeof__(*(ptr)))__xchg((ptr), (unsigned long)(x), sizeof(*(ptr))))
39
40/* This function doesn't exist, so you'll get a linker error
41 * if something tries to do an invalid cmpxchg(). */
42extern void __cmpxchg_called_with_bad_pointer(void);
43
44static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
45 unsigned long new, int size)
46{
47 switch (size) {
48 case 4:
49 return __cmpxchg_u32(ptr, old, new);
50 }
51 __cmpxchg_called_with_bad_pointer();
52 return old;
53}
54
55#define cmpxchg(ptr, o, n) \
56 ({ \
57 __typeof__(*(ptr)) _o_ = (o); \
58 __typeof__(*(ptr)) _n_ = (n); \
59 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
60 (unsigned long)_n_, \
61 sizeof(*(ptr))); \
62 })
63
64#endif /* __ASM_METAG_CMPXCHG_H */
diff --git a/arch/metag/include/asm/cmpxchg_irq.h b/arch/metag/include/asm/cmpxchg_irq.h
deleted file mode 100644
index 5255e37f8496..000000000000
--- a/arch/metag/include/asm/cmpxchg_irq.h
+++ /dev/null
@@ -1,43 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __ASM_METAG_CMPXCHG_IRQ_H
3#define __ASM_METAG_CMPXCHG_IRQ_H
4
5#include <linux/irqflags.h>
6
7static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val)
8{
9 unsigned long flags, retval;
10
11 local_irq_save(flags);
12 retval = *m;
13 *m = val;
14 local_irq_restore(flags);
15 return retval;
16}
17
18static inline unsigned long xchg_u8(volatile u8 *m, unsigned long val)
19{
20 unsigned long flags, retval;
21
22 local_irq_save(flags);
23 retval = *m;
24 *m = val & 0xff;
25 local_irq_restore(flags);
26 return retval;
27}
28
29static inline unsigned long __cmpxchg_u32(volatile int *m, unsigned long old,
30 unsigned long new)
31{
32 __u32 retval;
33 unsigned long flags;
34
35 local_irq_save(flags);
36 retval = *m;
37 if (retval == old)
38 *m = new;
39 local_irq_restore(flags); /* implies memory barrier */
40 return retval;
41}
42
43#endif /* __ASM_METAG_CMPXCHG_IRQ_H */
diff --git a/arch/metag/include/asm/cmpxchg_lnkget.h b/arch/metag/include/asm/cmpxchg_lnkget.h
deleted file mode 100644
index c69be00a4739..000000000000
--- a/arch/metag/include/asm/cmpxchg_lnkget.h
+++ /dev/null
@@ -1,87 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __ASM_METAG_CMPXCHG_LNKGET_H
3#define __ASM_METAG_CMPXCHG_LNKGET_H
4
5static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val)
6{
7 int temp, old;
8
9 smp_mb();
10
11 asm volatile (
12 "1: LNKGETD %1, [%2]\n"
13 " LNKSETD [%2], %3\n"
14 " DEFR %0, TXSTAT\n"
15 " ANDT %0, %0, #HI(0x3f000000)\n"
16 " CMPT %0, #HI(0x02000000)\n"
17 " BNZ 1b\n"
18#ifdef CONFIG_METAG_LNKGET_AROUND_CACHE
19 " DCACHE [%2], %0\n"
20#endif
21 : "=&d" (temp), "=&d" (old)
22 : "da" (m), "da" (val)
23 : "cc"
24 );
25
26 smp_mb();
27
28 return old;
29}
30
31static inline unsigned long xchg_u8(volatile u8 *m, unsigned long val)
32{
33 int temp, old;
34
35 smp_mb();
36
37 asm volatile (
38 "1: LNKGETD %1, [%2]\n"
39 " LNKSETD [%2], %3\n"
40 " DEFR %0, TXSTAT\n"
41 " ANDT %0, %0, #HI(0x3f000000)\n"
42 " CMPT %0, #HI(0x02000000)\n"
43 " BNZ 1b\n"
44#ifdef CONFIG_METAG_LNKGET_AROUND_CACHE
45 " DCACHE [%2], %0\n"
46#endif
47 : "=&d" (temp), "=&d" (old)
48 : "da" (m), "da" (val & 0xff)
49 : "cc"
50 );
51
52 smp_mb();
53
54 return old;
55}
56
57static inline unsigned long __cmpxchg_u32(volatile int *m, unsigned long old,
58 unsigned long new)
59{
60 __u32 retval, temp;
61
62 smp_mb();
63
64 asm volatile (
65 "1: LNKGETD %1, [%2]\n"
66 " CMP %1, %3\n"
67 " LNKSETDEQ [%2], %4\n"
68 " BNE 2f\n"
69 " DEFR %0, TXSTAT\n"
70 " ANDT %0, %0, #HI(0x3f000000)\n"
71 " CMPT %0, #HI(0x02000000)\n"
72 " BNZ 1b\n"
73#ifdef CONFIG_METAG_LNKGET_AROUND_CACHE
74 " DCACHE [%2], %0\n"
75#endif
76 "2:\n"
77 : "=&d" (temp), "=&d" (retval)
78 : "da" (m), "bd" (old), "da" (new)
79 : "cc"
80 );
81
82 smp_mb();
83
84 return retval;
85}
86
87#endif /* __ASM_METAG_CMPXCHG_LNKGET_H */
diff --git a/arch/metag/include/asm/cmpxchg_lock1.h b/arch/metag/include/asm/cmpxchg_lock1.h
deleted file mode 100644
index 5976e39db2b4..000000000000
--- a/arch/metag/include/asm/cmpxchg_lock1.h
+++ /dev/null
@@ -1,49 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __ASM_METAG_CMPXCHG_LOCK1_H
3#define __ASM_METAG_CMPXCHG_LOCK1_H
4
5#include <asm/global_lock.h>
6
7/* Use LOCK2 as these have to be atomic w.r.t. ordinary accesses. */
8
9static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val)
10{
11 unsigned long flags, retval;
12
13 __global_lock2(flags);
14 fence();
15 retval = *m;
16 *m = val;
17 __global_unlock2(flags);
18 return retval;
19}
20
21static inline unsigned long xchg_u8(volatile u8 *m, unsigned long val)
22{
23 unsigned long flags, retval;
24
25 __global_lock2(flags);
26 fence();
27 retval = *m;
28 *m = val & 0xff;
29 __global_unlock2(flags);
30 return retval;
31}
32
33static inline unsigned long __cmpxchg_u32(volatile int *m, unsigned long old,
34 unsigned long new)
35{
36 __u32 retval;
37 unsigned long flags;
38
39 __global_lock2(flags);
40 retval = *m;
41 if (retval == old) {
42 fence();
43 *m = new;
44 }
45 __global_unlock2(flags);
46 return retval;
47}
48
49#endif /* __ASM_METAG_CMPXCHG_LOCK1_H */
diff --git a/arch/metag/include/asm/core_reg.h b/arch/metag/include/asm/core_reg.h
deleted file mode 100644
index ca70a0a29b61..000000000000
--- a/arch/metag/include/asm/core_reg.h
+++ /dev/null
@@ -1,36 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __ASM_METAG_CORE_REG_H_
3#define __ASM_METAG_CORE_REG_H_
4
5#include <asm/metag_regs.h>
6
7extern void core_reg_write(int unit, int reg, int thread, unsigned int val);
8extern unsigned int core_reg_read(int unit, int reg, int thread);
9
10/*
11 * These macros allow direct access from C to any register known to the
12 * assembler. Example candidates are TXTACTCYC, TXIDLECYC, and TXPRIVEXT.
13 */
14
15#define __core_reg_get(reg) ({ \
16 unsigned int __grvalue; \
17 asm volatile("MOV %0," #reg \
18 : "=r" (__grvalue)); \
19 __grvalue; \
20})
21
22#define __core_reg_set(reg, value) do { \
23 unsigned int __srvalue = (value); \
24 asm volatile("MOV " #reg ",%0" \
25 : \
26 : "r" (__srvalue)); \
27} while (0)
28
29#define __core_reg_swap(reg, value) do { \
30 unsigned int __srvalue = (value); \
31 asm volatile("SWAP " #reg ",%0" \
32 : "+r" (__srvalue)); \
33 (value) = __srvalue; \
34} while (0)
35
36#endif
diff --git a/arch/metag/include/asm/cpu.h b/arch/metag/include/asm/cpu.h
deleted file mode 100644
index 9dac67de4748..000000000000
--- a/arch/metag/include/asm/cpu.h
+++ /dev/null
@@ -1,15 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_METAG_CPU_H
3#define _ASM_METAG_CPU_H
4
5#include <linux/percpu.h>
6
7struct cpuinfo_metag {
8 struct cpu cpu;
9#ifdef CONFIG_SMP
10 unsigned long loops_per_jiffy;
11#endif
12};
13
14DECLARE_PER_CPU(struct cpuinfo_metag, cpu_data);
15#endif /* _ASM_METAG_CPU_H */
diff --git a/arch/metag/include/asm/da.h b/arch/metag/include/asm/da.h
deleted file mode 100644
index 901daa540e6e..000000000000
--- a/arch/metag/include/asm/da.h
+++ /dev/null
@@ -1,44 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Meta DA JTAG debugger control.
4 *
5 * Copyright 2012 Imagination Technologies Ltd.
6 */
7
8#ifndef _METAG_DA_H_
9#define _METAG_DA_H_
10
11#ifdef CONFIG_METAG_DA
12
13#include <linux/init.h>
14#include <linux/types.h>
15
16extern bool _metag_da_present;
17
18/**
19 * metag_da_enabled() - Find whether a DA is currently enabled.
20 *
21 * Returns: true if a DA was detected, false if not.
22 */
23static inline bool metag_da_enabled(void)
24{
25 return _metag_da_present;
26}
27
28/**
29 * metag_da_probe() - Try and detect a connected DA.
30 *
31 * This is used at start up to detect whether a DA is active.
32 *
33 * Returns: 0 on detection, -err otherwise.
34 */
35int __init metag_da_probe(void);
36
37#else /* !CONFIG_METAG_DA */
38
39#define metag_da_enabled() false
40#define metag_da_probe() do {} while (0)
41
42#endif
43
44#endif /* _METAG_DA_H_ */
diff --git a/arch/metag/include/asm/delay.h b/arch/metag/include/asm/delay.h
deleted file mode 100644
index fd73d3d5d294..000000000000
--- a/arch/metag/include/asm/delay.h
+++ /dev/null
@@ -1,30 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _METAG_DELAY_H
3#define _METAG_DELAY_H
4
5/*
6 * Copyright (C) 1993 Linus Torvalds
7 *
8 * Delay routines calling functions in arch/metag/lib/delay.c
9 */
10
11/* Undefined functions to get compile-time errors */
12extern void __bad_udelay(void);
13extern void __bad_ndelay(void);
14
15extern void __udelay(unsigned long usecs);
16extern void __ndelay(unsigned long nsecs);
17extern void __const_udelay(unsigned long xloops);
18extern void __delay(unsigned long loops);
19
20/* 0x10c7 is 2**32 / 1000000 (rounded up) */
21#define udelay(n) (__builtin_constant_p(n) ? \
22 ((n) > 20000 ? __bad_udelay() : __const_udelay((n) * 0x10c7ul)) : \
23 __udelay(n))
24
25/* 0x5 is 2**32 / 1000000000 (rounded up) */
26#define ndelay(n) (__builtin_constant_p(n) ? \
27 ((n) > 20000 ? __bad_ndelay() : __const_udelay((n) * 5ul)) : \
28 __ndelay(n))
29
30#endif /* _METAG_DELAY_H */
diff --git a/arch/metag/include/asm/div64.h b/arch/metag/include/asm/div64.h
deleted file mode 100644
index e3686d2ae20e..000000000000
--- a/arch/metag/include/asm/div64.h
+++ /dev/null
@@ -1,13 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __ASM_DIV64_H__
3#define __ASM_DIV64_H__
4
5#include <asm-generic/div64.h>
6
7extern u64 div_u64(u64 dividend, u64 divisor);
8extern s64 div_s64(s64 dividend, s64 divisor);
9
10#define div_u64 div_u64
11#define div_s64 div_s64
12
13#endif
diff --git a/arch/metag/include/asm/dma-mapping.h b/arch/metag/include/asm/dma-mapping.h
deleted file mode 100644
index cfd6a0505b56..000000000000
--- a/arch/metag/include/asm/dma-mapping.h
+++ /dev/null
@@ -1,12 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_METAG_DMA_MAPPING_H
3#define _ASM_METAG_DMA_MAPPING_H
4
5extern const struct dma_map_ops metag_dma_ops;
6
7static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
8{
9 return &metag_dma_ops;
10}
11
12#endif
diff --git a/arch/metag/include/asm/elf.h b/arch/metag/include/asm/elf.h
deleted file mode 100644
index a6c33800ba66..000000000000
--- a/arch/metag/include/asm/elf.h
+++ /dev/null
@@ -1,126 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __ASM_METAG_ELF_H
3#define __ASM_METAG_ELF_H
4
5#define EM_METAG 174
6
7/* Meta relocations */
8#define R_METAG_HIADDR16 0
9#define R_METAG_LOADDR16 1
10#define R_METAG_ADDR32 2
11#define R_METAG_NONE 3
12#define R_METAG_RELBRANCH 4
13#define R_METAG_GETSETOFF 5
14
15/* Backward compatibility */
16#define R_METAG_REG32OP1 6
17#define R_METAG_REG32OP2 7
18#define R_METAG_REG32OP3 8
19#define R_METAG_REG16OP1 9
20#define R_METAG_REG16OP2 10
21#define R_METAG_REG16OP3 11
22#define R_METAG_REG32OP4 12
23
24#define R_METAG_HIOG 13
25#define R_METAG_LOOG 14
26
27/* GNU */
28#define R_METAG_GNU_VTINHERIT 30
29#define R_METAG_GNU_VTENTRY 31
30
31/* PIC relocations */
32#define R_METAG_HI16_GOTOFF 32
33#define R_METAG_LO16_GOTOFF 33
34#define R_METAG_GETSET_GOTOFF 34
35#define R_METAG_GETSET_GOT 35
36#define R_METAG_HI16_GOTPC 36
37#define R_METAG_LO16_GOTPC 37
38#define R_METAG_HI16_PLT 38
39#define R_METAG_LO16_PLT 39
40#define R_METAG_RELBRANCH_PLT 40
41#define R_METAG_GOTOFF 41
42#define R_METAG_PLT 42
43#define R_METAG_COPY 43
44#define R_METAG_JMP_SLOT 44
45#define R_METAG_RELATIVE 45
46#define R_METAG_GLOB_DAT 46
47
48/*
49 * ELF register definitions.
50 */
51
52#include <asm/page.h>
53#include <asm/processor.h>
54#include <asm/ptrace.h>
55#include <asm/user.h>
56
57typedef unsigned long elf_greg_t;
58
59#define ELF_NGREG (sizeof(struct user_gp_regs) / sizeof(elf_greg_t))
60typedef elf_greg_t elf_gregset_t[ELF_NGREG];
61
62typedef unsigned long elf_fpregset_t;
63
64/*
65 * This is used to ensure we don't load something for the wrong architecture.
66 */
67#define elf_check_arch(x) ((x)->e_machine == EM_METAG)
68
69/*
70 * These are used to set parameters in the core dumps.
71 */
72#define ELF_CLASS ELFCLASS32
73#define ELF_DATA ELFDATA2LSB
74#define ELF_ARCH EM_METAG
75
76#define ELF_PLAT_INIT(_r, load_addr) \
77 do { _r->ctx.AX[0].U0 = 0; } while (0)
78
79#define USE_ELF_CORE_DUMP
80#define CORE_DUMP_USE_REGSET
81#define ELF_EXEC_PAGESIZE PAGE_SIZE
82
83/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
84 use of this is to invoke "./ld.so someprog" to test out a new version of
85 the loader. We need to make sure that it is out of the way of the program
86 that it will "exec", and that there is sufficient room for the brk. */
87
88#define ELF_ET_DYN_BASE 0x08000000UL
89
90#define ELF_CORE_COPY_REGS(_dest, _regs) \
91 memcpy((char *)&_dest, (char *)_regs, sizeof(struct pt_regs));
92
93/* This yields a mask that user programs can use to figure out what
94 instruction set this cpu supports. */
95
96#define ELF_HWCAP (0)
97
98/* This yields a string that ld.so will use to load implementation
99 specific libraries for optimization. This is more specific in
100 intent than poking at uname or /proc/cpuinfo. */
101
102#define ELF_PLATFORM (NULL)
103
104#define STACK_RND_MASK (0)
105
106#ifdef CONFIG_METAG_USER_TCM
107
108struct elf32_phdr;
109struct file;
110
111unsigned long __metag_elf_map(struct file *filep, unsigned long addr,
112 struct elf32_phdr *eppnt, int prot, int type,
113 unsigned long total_size);
114
115static inline unsigned long metag_elf_map(struct file *filep,
116 unsigned long addr,
117 struct elf32_phdr *eppnt, int prot,
118 int type, unsigned long total_size)
119{
120 return __metag_elf_map(filep, addr, eppnt, prot, type, total_size);
121}
122#define elf_map metag_elf_map
123
124#endif
125
126#endif
diff --git a/arch/metag/include/asm/fixmap.h b/arch/metag/include/asm/fixmap.h
deleted file mode 100644
index af621b041739..000000000000
--- a/arch/metag/include/asm/fixmap.h
+++ /dev/null
@@ -1,69 +0,0 @@
1/*
2 * fixmap.h: compile-time virtual memory allocation
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 1998 Ingo Molnar
9 *
10 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
11 */
12
13#ifndef _ASM_FIXMAP_H
14#define _ASM_FIXMAP_H
15
16#include <asm/pgtable.h>
17#ifdef CONFIG_HIGHMEM
18#include <linux/threads.h>
19#include <asm/kmap_types.h>
20#endif
21
22/*
23 * Here we define all the compile-time 'special' virtual
24 * addresses. The point is to have a constant address at
25 * compile time, but to set the physical address only
26 * in the boot process. We allocate these special addresses
27 * from the end of the consistent memory region backwards.
28 * Also this lets us do fail-safe vmalloc(), we
29 * can guarantee that these special addresses and
30 * vmalloc()-ed addresses never overlap.
31 *
32 * these 'compile-time allocated' memory buffers are
33 * fixed-size 4k pages. (or larger if used with an increment
34 * higher than 1) use fixmap_set(idx,phys) to associate
35 * physical memory with fixmap indices.
36 *
37 * TLB entries of such buffers will not be flushed across
38 * task switches.
39 */
40enum fixed_addresses {
41#define FIX_N_COLOURS 8
42#ifdef CONFIG_HIGHMEM
43 /* reserved pte's for temporary kernel mappings */
44 FIX_KMAP_BEGIN,
45 FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
46#endif
47 __end_of_fixed_addresses
48};
49
50#define FIXADDR_TOP (CONSISTENT_START - PAGE_SIZE)
51#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
52#define FIXADDR_START ((FIXADDR_TOP - FIXADDR_SIZE) & PMD_MASK)
53
54#include <asm-generic/fixmap.h>
55
56#define kmap_get_fixmap_pte(vaddr) \
57 pte_offset_kernel( \
58 pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), \
59 (vaddr) \
60 )
61
62/*
63 * Called from pgtable_init()
64 */
65extern void fixrange_init(unsigned long start, unsigned long end,
66 pgd_t *pgd_base);
67
68
69#endif
diff --git a/arch/metag/include/asm/ftrace.h b/arch/metag/include/asm/ftrace.h
deleted file mode 100644
index b1c8c76fb772..000000000000
--- a/arch/metag/include/asm/ftrace.h
+++ /dev/null
@@ -1,24 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_METAG_FTRACE
3#define _ASM_METAG_FTRACE
4
5#ifdef CONFIG_FUNCTION_TRACER
6#define MCOUNT_INSN_SIZE 8 /* sizeof mcount call */
7
8#ifndef __ASSEMBLY__
9extern void mcount_wrapper(void);
10#define MCOUNT_ADDR ((unsigned long)(mcount_wrapper))
11
12static inline unsigned long ftrace_call_adjust(unsigned long addr)
13{
14 return addr;
15}
16
17struct dyn_arch_ftrace {
18 /* No extra data needed on metag */
19};
20#endif /* __ASSEMBLY__ */
21
22#endif /* CONFIG_FUNCTION_TRACER */
23
24#endif /* _ASM_METAG_FTRACE */
diff --git a/arch/metag/include/asm/global_lock.h b/arch/metag/include/asm/global_lock.h
deleted file mode 100644
index 4d3da9682233..000000000000
--- a/arch/metag/include/asm/global_lock.h
+++ /dev/null
@@ -1,101 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __ASM_METAG_GLOBAL_LOCK_H
3#define __ASM_METAG_GLOBAL_LOCK_H
4
5#include <asm/metag_mem.h>
6
7/**
8 * __global_lock1() - Acquire global voluntary lock (LOCK1).
9 * @flags: Variable to store flags into.
10 *
11 * Acquires the Meta global voluntary lock (LOCK1), also taking care to disable
12 * all triggers so we cannot be interrupted, and to enforce a compiler barrier
13 * so that the compiler cannot reorder memory accesses across the lock.
14 *
15 * No other hardware thread will be able to acquire the voluntary or exclusive
16 * locks until the voluntary lock is released with @__global_unlock1, but they
17 * may continue to execute as long as they aren't trying to acquire either of
18 * the locks.
19 */
20#define __global_lock1(flags) do { \
21 unsigned int __trval; \
22 asm volatile("MOV %0,#0\n\t" \
23 "SWAP %0,TXMASKI\n\t" \
24 "LOCK1" \
25 : "=r" (__trval) \
26 : \
27 : "memory"); \
28 (flags) = __trval; \
29} while (0)
30
31/**
32 * __global_unlock1() - Release global voluntary lock (LOCK1).
33 * @flags: Variable to restore flags from.
34 *
35 * Releases the Meta global voluntary lock (LOCK1) acquired with
36 * @__global_lock1, also taking care to re-enable triggers, and to enforce a
37 * compiler barrier so that the compiler cannot reorder memory accesses across
38 * the unlock.
39 *
40 * This immediately allows another hardware thread to acquire the voluntary or
41 * exclusive locks.
42 */
43#define __global_unlock1(flags) do { \
44 unsigned int __trval = (flags); \
45 asm volatile("LOCK0\n\t" \
46 "MOV TXMASKI,%0" \
47 : \
48 : "r" (__trval) \
49 : "memory"); \
50} while (0)
51
52/**
53 * __global_lock2() - Acquire global exclusive lock (LOCK2).
54 * @flags: Variable to store flags into.
55 *
56 * Acquires the Meta global voluntary lock and global exclusive lock (LOCK2),
57 * also taking care to disable all triggers so we cannot be interrupted, to take
58 * the atomic lock (system event) and to enforce a compiler barrier so that the
59 * compiler cannot reorder memory accesses across the lock.
60 *
61 * No other hardware thread will be able to execute code until the locks are
62 * released with @__global_unlock2.
63 */
64#define __global_lock2(flags) do { \
65 unsigned int __trval; \
66 unsigned int __aloc_hi = LINSYSEVENT_WR_ATOMIC_LOCK & 0xFFFF0000; \
67 asm volatile("MOV %0,#0\n\t" \
68 "SWAP %0,TXMASKI\n\t" \
69 "LOCK2\n\t" \
70 "SETD [%1+#0x40],D1RtP" \
71 : "=r&" (__trval) \
72 : "u" (__aloc_hi) \
73 : "memory"); \
74 (flags) = __trval; \
75} while (0)
76
77/**
78 * __global_unlock2() - Release global exclusive lock (LOCK2).
79 * @flags: Variable to restore flags from.
80 *
81 * Releases the Meta global exclusive lock (LOCK2) and global voluntary lock
82 * acquired with @__global_lock2, also taking care to release the atomic lock
83 * (system event), re-enable triggers, and to enforce a compiler barrier so that
84 * the compiler cannot reorder memory accesses across the unlock.
85 *
86 * This immediately allows other hardware threads to continue executing and one
87 * of them to acquire locks.
88 */
89#define __global_unlock2(flags) do { \
90 unsigned int __trval = (flags); \
91 unsigned int __alock_hi = LINSYSEVENT_WR_ATOMIC_LOCK & 0xFFFF0000; \
92 asm volatile("SETD [%1+#0x00],D1RtP\n\t" \
93 "LOCK0\n\t" \
94 "MOV TXMASKI,%0" \
95 : \
96 : "r" (__trval), \
97 "u" (__alock_hi) \
98 : "memory"); \
99} while (0)
100
101#endif /* __ASM_METAG_GLOBAL_LOCK_H */
diff --git a/arch/metag/include/asm/highmem.h b/arch/metag/include/asm/highmem.h
deleted file mode 100644
index 8b0dfd684e15..000000000000
--- a/arch/metag/include/asm/highmem.h
+++ /dev/null
@@ -1,62 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_HIGHMEM_H
3#define _ASM_HIGHMEM_H
4
5#include <asm/cacheflush.h>
6#include <asm/kmap_types.h>
7#include <asm/fixmap.h>
8
9/*
10 * Right now we initialize only a single pte table. It can be extended
11 * easily, subsequent pte tables have to be allocated in one physical
12 * chunk of RAM.
13 */
14/*
15 * Ordering is (from lower to higher memory addresses):
16 *
17 * high_memory
18 * Persistent kmap area
19 * PKMAP_BASE
20 * fixed_addresses
21 * FIXADDR_START
22 * FIXADDR_TOP
23 * Vmalloc area
24 * VMALLOC_START
25 * VMALLOC_END
26 */
27#define PKMAP_BASE (FIXADDR_START - PMD_SIZE)
28#define LAST_PKMAP PTRS_PER_PTE
29#define LAST_PKMAP_MASK (LAST_PKMAP - 1)
30#define PKMAP_NR(virt) (((virt) - PKMAP_BASE) >> PAGE_SHIFT)
31#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
32
33#define kmap_prot PAGE_KERNEL
34
35static inline void flush_cache_kmaps(void)
36{
37 flush_cache_all();
38}
39
40/* declarations for highmem.c */
41extern unsigned long highstart_pfn, highend_pfn;
42
43extern pte_t *pkmap_page_table;
44
45extern void *kmap_high(struct page *page);
46extern void kunmap_high(struct page *page);
47
48extern void kmap_init(void);
49
50/*
51 * The following functions are already defined by <linux/highmem.h>
52 * when CONFIG_HIGHMEM is not set.
53 */
54#ifdef CONFIG_HIGHMEM
55extern void *kmap(struct page *page);
56extern void kunmap(struct page *page);
57extern void *kmap_atomic(struct page *page);
58extern void __kunmap_atomic(void *kvaddr);
59extern void *kmap_atomic_pfn(unsigned long pfn);
60#endif
61
62#endif
diff --git a/arch/metag/include/asm/hugetlb.h b/arch/metag/include/asm/hugetlb.h
deleted file mode 100644
index 1607363d2639..000000000000
--- a/arch/metag/include/asm/hugetlb.h
+++ /dev/null
@@ -1,75 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_METAG_HUGETLB_H
3#define _ASM_METAG_HUGETLB_H
4
5#include <asm/page.h>
6#include <asm-generic/hugetlb.h>
7
8
9static inline int is_hugepage_only_range(struct mm_struct *mm,
10 unsigned long addr,
11 unsigned long len) {
12 return 0;
13}
14
15int prepare_hugepage_range(struct file *file, unsigned long addr,
16 unsigned long len);
17
18static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
19 unsigned long addr, unsigned long end,
20 unsigned long floor,
21 unsigned long ceiling)
22{
23 free_pgd_range(tlb, addr, end, floor, ceiling);
24}
25
26static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
27 pte_t *ptep, pte_t pte)
28{
29 set_pte_at(mm, addr, ptep, pte);
30}
31
32static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
33 unsigned long addr, pte_t *ptep)
34{
35 return ptep_get_and_clear(mm, addr, ptep);
36}
37
38static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
39 unsigned long addr, pte_t *ptep)
40{
41}
42
43static inline int huge_pte_none(pte_t pte)
44{
45 return pte_none(pte);
46}
47
48static inline pte_t huge_pte_wrprotect(pte_t pte)
49{
50 return pte_wrprotect(pte);
51}
52
53static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
54 unsigned long addr, pte_t *ptep)
55{
56 ptep_set_wrprotect(mm, addr, ptep);
57}
58
59static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
60 unsigned long addr, pte_t *ptep,
61 pte_t pte, int dirty)
62{
63 return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
64}
65
66static inline pte_t huge_ptep_get(pte_t *ptep)
67{
68 return *ptep;
69}
70
71static inline void arch_clear_hugepage_flags(struct page *page)
72{
73}
74
75#endif /* _ASM_METAG_HUGETLB_H */
diff --git a/arch/metag/include/asm/hwthread.h b/arch/metag/include/asm/hwthread.h
deleted file mode 100644
index 8d2171da5414..000000000000
--- a/arch/metag/include/asm/hwthread.h
+++ /dev/null
@@ -1,41 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (C) 2008 Imagination Technologies
4 */
5#ifndef __METAG_HWTHREAD_H
6#define __METAG_HWTHREAD_H
7
8#include <linux/bug.h>
9#include <linux/io.h>
10
11#include <asm/metag_mem.h>
12
13#define BAD_HWTHREAD_ID (0xFFU)
14#define BAD_CPU_ID (0xFFU)
15
16extern u8 cpu_2_hwthread_id[];
17extern u8 hwthread_id_2_cpu[];
18
19/*
20 * Each hardware thread's Control Unit registers are memory-mapped
21 * and can therefore be accessed by any other hardware thread.
22 *
23 * This helper function returns the memory address where "thread"'s
24 * register "regnum" is mapped.
25 */
26static inline
27void __iomem *__CU_addr(unsigned int thread, unsigned int regnum)
28{
29 unsigned int base, thread_offset, thread_regnum;
30
31 WARN_ON(thread == BAD_HWTHREAD_ID);
32
33 base = T0UCTREG0; /* Control unit base */
34
35 thread_offset = TnUCTRX_STRIDE * thread;
36 thread_regnum = TXUCTREGn_STRIDE * regnum;
37
38 return (void __iomem *)(base + thread_offset + thread_regnum);
39}
40
41#endif /* __METAG_HWTHREAD_H */
diff --git a/arch/metag/include/asm/io.h b/arch/metag/include/asm/io.h
deleted file mode 100644
index 71cd2bc54718..000000000000
--- a/arch/metag/include/asm/io.h
+++ /dev/null
@@ -1,170 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_METAG_IO_H
3#define _ASM_METAG_IO_H
4
5#include <linux/types.h>
6#include <asm/pgtable-bits.h>
7
8#define IO_SPACE_LIMIT 0
9
10#define page_to_bus page_to_phys
11#define bus_to_page phys_to_page
12
13/*
14 * Generic I/O
15 */
16
17#define __raw_readb __raw_readb
18static inline u8 __raw_readb(const volatile void __iomem *addr)
19{
20 u8 ret;
21 asm volatile("GETB %0,[%1]"
22 : "=da" (ret)
23 : "da" (addr)
24 : "memory");
25 return ret;
26}
27
28#define __raw_readw __raw_readw
29static inline u16 __raw_readw(const volatile void __iomem *addr)
30{
31 u16 ret;
32 asm volatile("GETW %0,[%1]"
33 : "=da" (ret)
34 : "da" (addr)
35 : "memory");
36 return ret;
37}
38
39#define __raw_readl __raw_readl
40static inline u32 __raw_readl(const volatile void __iomem *addr)
41{
42 u32 ret;
43 asm volatile("GETD %0,[%1]"
44 : "=da" (ret)
45 : "da" (addr)
46 : "memory");
47 return ret;
48}
49
50#define __raw_readq __raw_readq
51static inline u64 __raw_readq(const volatile void __iomem *addr)
52{
53 u64 ret;
54 asm volatile("GETL %0,%t0,[%1]"
55 : "=da" (ret)
56 : "da" (addr)
57 : "memory");
58 return ret;
59}
60
61#define __raw_writeb __raw_writeb
62static inline void __raw_writeb(u8 b, volatile void __iomem *addr)
63{
64 asm volatile("SETB [%0],%1"
65 :
66 : "da" (addr),
67 "da" (b)
68 : "memory");
69}
70
71#define __raw_writew __raw_writew
72static inline void __raw_writew(u16 b, volatile void __iomem *addr)
73{
74 asm volatile("SETW [%0],%1"
75 :
76 : "da" (addr),
77 "da" (b)
78 : "memory");
79}
80
81#define __raw_writel __raw_writel
82static inline void __raw_writel(u32 b, volatile void __iomem *addr)
83{
84 asm volatile("SETD [%0],%1"
85 :
86 : "da" (addr),
87 "da" (b)
88 : "memory");
89}
90
91#define __raw_writeq __raw_writeq
92static inline void __raw_writeq(u64 b, volatile void __iomem *addr)
93{
94 asm volatile("SETL [%0],%1,%t1"
95 :
96 : "da" (addr),
97 "da" (b)
98 : "memory");
99}
100
101/*
102 * The generic io.h can define all the other generic accessors
103 */
104
105#include <asm-generic/io.h>
106
107/*
108 * Despite being a 32bit architecture, Meta can do 64bit memory accesses
109 * (assuming the bus supports it).
110 */
111
112#define readq __raw_readq
113#define writeq __raw_writeq
114
115/*
116 * Meta specific I/O for accessing non-MMU areas.
117 *
118 * These can be provided with a physical address rather than an __iomem pointer
119 * and should only be used by core architecture code for accessing fixed core
120 * registers. Generic drivers should use ioremap and the generic I/O accessors.
121 */
122
123#define metag_in8(addr) __raw_readb((volatile void __iomem *)(addr))
124#define metag_in16(addr) __raw_readw((volatile void __iomem *)(addr))
125#define metag_in32(addr) __raw_readl((volatile void __iomem *)(addr))
126#define metag_in64(addr) __raw_readq((volatile void __iomem *)(addr))
127
128#define metag_out8(b, addr) __raw_writeb(b, (volatile void __iomem *)(addr))
129#define metag_out16(b, addr) __raw_writew(b, (volatile void __iomem *)(addr))
130#define metag_out32(b, addr) __raw_writel(b, (volatile void __iomem *)(addr))
131#define metag_out64(b, addr) __raw_writeq(b, (volatile void __iomem *)(addr))
132
133/*
134 * io remapping functions
135 */
136
137extern void __iomem *__ioremap(unsigned long offset,
138 size_t size, unsigned long flags);
139extern void __iounmap(void __iomem *addr);
140
141/**
142 * ioremap - map bus memory into CPU space
143 * @offset: bus address of the memory
144 * @size: size of the resource to map
145 *
146 * ioremap performs a platform specific sequence of operations to
147 * make bus memory CPU accessible via the readb/readw/readl/writeb/
148 * writew/writel functions and the other mmio helpers. The returned
149 * address is not guaranteed to be usable directly as a virtual
150 * address.
151 */
152#define ioremap(offset, size) \
153 __ioremap((offset), (size), 0)
154
155#define ioremap_nocache(offset, size) \
156 __ioremap((offset), (size), 0)
157
158#define ioremap_cached(offset, size) \
159 __ioremap((offset), (size), _PAGE_CACHEABLE)
160
161#define ioremap_wc(offset, size) \
162 __ioremap((offset), (size), _PAGE_WR_COMBINE)
163
164#define ioremap_wt(offset, size) \
165 __ioremap((offset), (size), 0)
166
167#define iounmap(addr) \
168 __iounmap(addr)
169
170#endif /* _ASM_METAG_IO_H */
diff --git a/arch/metag/include/asm/irq.h b/arch/metag/include/asm/irq.h
deleted file mode 100644
index cb02c29935a4..000000000000
--- a/arch/metag/include/asm/irq.h
+++ /dev/null
@@ -1,38 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __ASM_METAG_IRQ_H
3#define __ASM_METAG_IRQ_H
4
5#ifdef CONFIG_4KSTACKS
6extern void irq_ctx_init(int cpu);
7extern void irq_ctx_exit(int cpu);
8# define __ARCH_HAS_DO_SOFTIRQ
9#else
10static inline void irq_ctx_init(int cpu)
11{
12}
13static inline void irq_ctx_exit(int cpu)
14{
15}
16#endif
17
18void tbi_startup_interrupt(int);
19void tbi_shutdown_interrupt(int);
20
21struct pt_regs;
22
23int tbisig_map(unsigned int hw);
24extern void do_IRQ(int irq, struct pt_regs *regs);
25extern void init_IRQ(void);
26
27#ifdef CONFIG_METAG_SUSPEND_MEM
28int traps_save_context(void);
29int traps_restore_context(void);
30#endif
31
32#include <asm-generic/irq.h>
33
34#ifdef CONFIG_HOTPLUG_CPU
35extern void migrate_irqs(void);
36#endif
37
38#endif /* __ASM_METAG_IRQ_H */
diff --git a/arch/metag/include/asm/irqflags.h b/arch/metag/include/asm/irqflags.h
deleted file mode 100644
index e2fe34acb93b..000000000000
--- a/arch/metag/include/asm/irqflags.h
+++ /dev/null
@@ -1,94 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * IRQ flags handling
4 *
5 * This file gets included from lowlevel asm headers too, to provide
6 * wrapped versions of the local_irq_*() APIs, based on the
7 * raw_local_irq_*() functions from the lowlevel headers.
8 */
9#ifndef _ASM_IRQFLAGS_H
10#define _ASM_IRQFLAGS_H
11
12#ifndef __ASSEMBLY__
13
14#include <asm/core_reg.h>
15#include <asm/metag_regs.h>
16
17#define INTS_OFF_MASK TXSTATI_BGNDHALT_BIT
18
19#ifdef CONFIG_SMP
20extern unsigned int get_trigger_mask(void);
21#else
22
23extern unsigned int global_trigger_mask;
24
25static inline unsigned int get_trigger_mask(void)
26{
27 return global_trigger_mask;
28}
29#endif
30
31static inline unsigned long arch_local_save_flags(void)
32{
33 return __core_reg_get(TXMASKI);
34}
35
36static inline int arch_irqs_disabled_flags(unsigned long flags)
37{
38 return (flags & ~INTS_OFF_MASK) == 0;
39}
40
41static inline int arch_irqs_disabled(void)
42{
43 unsigned long flags = arch_local_save_flags();
44
45 return arch_irqs_disabled_flags(flags);
46}
47
48static inline unsigned long __irqs_disabled(void)
49{
50 /*
51 * We shouldn't enable exceptions if they are not already
52 * enabled. This is required for chancalls to work correctly.
53 */
54 return arch_local_save_flags() & INTS_OFF_MASK;
55}
56
57/*
58 * For spinlocks, etc:
59 */
60static inline unsigned long arch_local_irq_save(void)
61{
62 unsigned long flags = __irqs_disabled();
63
64 asm volatile("SWAP %0,TXMASKI\n" : "=r" (flags) : "0" (flags)
65 : "memory");
66
67 return flags;
68}
69
70static inline void arch_local_irq_restore(unsigned long flags)
71{
72 asm volatile("MOV TXMASKI,%0\n" : : "r" (flags) : "memory");
73}
74
75static inline void arch_local_irq_disable(void)
76{
77 unsigned long flags = __irqs_disabled();
78
79 asm volatile("MOV TXMASKI,%0\n" : : "r" (flags) : "memory");
80}
81
82#ifdef CONFIG_SMP
83/* Avoid circular include dependencies through <linux/preempt.h> */
84void arch_local_irq_enable(void);
85#else
86static inline void arch_local_irq_enable(void)
87{
88 arch_local_irq_restore(get_trigger_mask());
89}
90#endif
91
92#endif /* (__ASSEMBLY__) */
93
94#endif /* !(_ASM_IRQFLAGS_H) */
diff --git a/arch/metag/include/asm/l2cache.h b/arch/metag/include/asm/l2cache.h
deleted file mode 100644
index f260b158b8fe..000000000000
--- a/arch/metag/include/asm/l2cache.h
+++ /dev/null
@@ -1,259 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _METAG_L2CACHE_H
3#define _METAG_L2CACHE_H
4
5#ifdef CONFIG_METAG_L2C
6
7#include <asm/global_lock.h>
8#include <asm/io.h>
9
10/*
11 * Store the last known value of pfenable (we don't want prefetch enabled while
12 * L2 is off).
13 */
14extern int l2c_pfenable;
15
16/* defined in arch/metag/drivers/core-sysfs.c */
17extern struct sysdev_class cache_sysclass;
18
19static inline void wr_fence(void);
20
21/*
22 * Functions for reading of L2 cache configuration.
23 */
24
25/* Get raw L2 config register (CORE_CONFIG3) */
26static inline unsigned int meta_l2c_config(void)
27{
28 const unsigned int *corecfg3 = (const unsigned int *)METAC_CORE_CONFIG3;
29 return *corecfg3;
30}
31
32/* Get whether the L2 is present */
33static inline int meta_l2c_is_present(void)
34{
35 return meta_l2c_config() & METAC_CORECFG3_L2C_HAVE_L2C_BIT;
36}
37
38/* Get whether the L2 is configured for write-back instead of write-through */
39static inline int meta_l2c_is_writeback(void)
40{
41 return meta_l2c_config() & METAC_CORECFG3_L2C_MODE_BIT;
42}
43
44/* Get whether the L2 is unified instead of separated code/data */
45static inline int meta_l2c_is_unified(void)
46{
47 return meta_l2c_config() & METAC_CORECFG3_L2C_UNIFIED_BIT;
48}
49
50/* Get the L2 cache size in bytes */
51static inline unsigned int meta_l2c_size(void)
52{
53 unsigned int size_s;
54 if (!meta_l2c_is_present())
55 return 0;
56 size_s = (meta_l2c_config() & METAC_CORECFG3_L2C_SIZE_BITS)
57 >> METAC_CORECFG3_L2C_SIZE_S;
58 /* L2CSIZE is in KiB */
59 return 1024 << size_s;
60}
61
62/* Get the number of ways in the L2 cache */
63static inline unsigned int meta_l2c_ways(void)
64{
65 unsigned int ways_s;
66 if (!meta_l2c_is_present())
67 return 0;
68 ways_s = (meta_l2c_config() & METAC_CORECFG3_L2C_NUM_WAYS_BITS)
69 >> METAC_CORECFG3_L2C_NUM_WAYS_S;
70 return 0x1 << ways_s;
71}
72
73/* Get the line size of the L2 cache */
74static inline unsigned int meta_l2c_linesize(void)
75{
76 unsigned int line_size;
77 if (!meta_l2c_is_present())
78 return 0;
79 line_size = (meta_l2c_config() & METAC_CORECFG3_L2C_LINE_SIZE_BITS)
80 >> METAC_CORECFG3_L2C_LINE_SIZE_S;
81 switch (line_size) {
82 case METAC_CORECFG3_L2C_LINE_SIZE_64B:
83 return 64;
84 default:
85 return 0;
86 }
87}
88
89/* Get the revision ID of the L2 cache */
90static inline unsigned int meta_l2c_revision(void)
91{
92 return (meta_l2c_config() & METAC_CORECFG3_L2C_REV_ID_BITS)
93 >> METAC_CORECFG3_L2C_REV_ID_S;
94}
95
96
97/*
98 * Start an initialisation of the L2 cachelines and wait for completion.
99 * This should only be done in a LOCK1 or LOCK2 critical section while the L2
100 * is disabled.
101 */
102static inline void _meta_l2c_init(void)
103{
104 metag_out32(SYSC_L2C_INIT_INIT, SYSC_L2C_INIT);
105 while (metag_in32(SYSC_L2C_INIT) == SYSC_L2C_INIT_IN_PROGRESS)
106 /* do nothing */;
107}
108
109/*
110 * Start a writeback of dirty L2 cachelines and wait for completion.
111 * This should only be done in a LOCK1 or LOCK2 critical section.
112 */
113static inline void _meta_l2c_purge(void)
114{
115 metag_out32(SYSC_L2C_PURGE_PURGE, SYSC_L2C_PURGE);
116 while (metag_in32(SYSC_L2C_PURGE) == SYSC_L2C_PURGE_IN_PROGRESS)
117 /* do nothing */;
118}
119
120/* Set whether the L2 cache is enabled. */
121static inline void _meta_l2c_enable(int enabled)
122{
123 unsigned int enable;
124
125 enable = metag_in32(SYSC_L2C_ENABLE);
126 if (enabled)
127 enable |= SYSC_L2C_ENABLE_ENABLE_BIT;
128 else
129 enable &= ~SYSC_L2C_ENABLE_ENABLE_BIT;
130 metag_out32(enable, SYSC_L2C_ENABLE);
131}
132
133/* Set whether the L2 cache prefetch is enabled. */
134static inline void _meta_l2c_pf_enable(int pfenabled)
135{
136 unsigned int enable;
137
138 enable = metag_in32(SYSC_L2C_ENABLE);
139 if (pfenabled)
140 enable |= SYSC_L2C_ENABLE_PFENABLE_BIT;
141 else
142 enable &= ~SYSC_L2C_ENABLE_PFENABLE_BIT;
143 metag_out32(enable, SYSC_L2C_ENABLE);
144}
145
146/* Return whether the L2 cache is enabled */
147static inline int _meta_l2c_is_enabled(void)
148{
149 return metag_in32(SYSC_L2C_ENABLE) & SYSC_L2C_ENABLE_ENABLE_BIT;
150}
151
152/* Return whether the L2 cache prefetch is enabled */
153static inline int _meta_l2c_pf_is_enabled(void)
154{
155 return metag_in32(SYSC_L2C_ENABLE) & SYSC_L2C_ENABLE_PFENABLE_BIT;
156}
157
158
159/* Return whether the L2 cache is enabled */
160static inline int meta_l2c_is_enabled(void)
161{
162 int en;
163
164 /*
165 * There is no need to lock at the moment, as the enable bit is never
166 * intermediately changed, so we will never see an intermediate result.
167 */
168 en = _meta_l2c_is_enabled();
169
170 return en;
171}
172
173/*
174 * Ensure the L2 cache is disabled.
175 * Return whether the L2 was previously disabled.
176 */
177int meta_l2c_disable(void);
178
179/*
180 * Ensure the L2 cache is enabled.
181 * Return whether the L2 was previously enabled.
182 */
183int meta_l2c_enable(void);
184
185/* Return whether the L2 cache prefetch is enabled */
186static inline int meta_l2c_pf_is_enabled(void)
187{
188 return l2c_pfenable;
189}
190
191/*
192 * Set whether the L2 cache prefetch is enabled.
193 * Return whether the L2 prefetch was previously enabled.
194 */
195int meta_l2c_pf_enable(int pfenable);
196
197/*
198 * Flush the L2 cache.
199 * Return 1 if the L2 is disabled.
200 */
201int meta_l2c_flush(void);
202
203/*
204 * Write back all dirty cache lines in the L2 cache.
205 * Return 1 if the L2 is disabled or there isn't any writeback.
206 */
207static inline int meta_l2c_writeback(void)
208{
209 unsigned long flags;
210 int en;
211
212 /* no need to purge if it's not a writeback cache */
213 if (!meta_l2c_is_writeback())
214 return 1;
215
216 /*
217 * Purge only works if the L2 is enabled, and involves reading back to
218 * detect completion, so keep this operation atomic with other threads.
219 */
220 __global_lock1(flags);
221 en = meta_l2c_is_enabled();
222 if (likely(en)) {
223 wr_fence();
224 _meta_l2c_purge();
225 }
226 __global_unlock1(flags);
227
228 return !en;
229}
230
231#else /* CONFIG_METAG_L2C */
232
233#define meta_l2c_config() 0
234#define meta_l2c_is_present() 0
235#define meta_l2c_is_writeback() 0
236#define meta_l2c_is_unified() 0
237#define meta_l2c_size() 0
238#define meta_l2c_ways() 0
239#define meta_l2c_linesize() 0
240#define meta_l2c_revision() 0
241
242#define meta_l2c_is_enabled() 0
243#define _meta_l2c_pf_is_enabled() 0
244#define meta_l2c_pf_is_enabled() 0
245#define meta_l2c_disable() 1
246#define meta_l2c_enable() 0
247#define meta_l2c_pf_enable(X) 0
248static inline int meta_l2c_flush(void)
249{
250 return 1;
251}
252static inline int meta_l2c_writeback(void)
253{
254 return 1;
255}
256
257#endif /* CONFIG_METAG_L2C */
258
259#endif /* _METAG_L2CACHE_H */
diff --git a/arch/metag/include/asm/linkage.h b/arch/metag/include/asm/linkage.h
deleted file mode 100644
index 3a9024ecb827..000000000000
--- a/arch/metag/include/asm/linkage.h
+++ /dev/null
@@ -1,8 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __ASM_LINKAGE_H
3#define __ASM_LINKAGE_H
4
5#define __ALIGN .p2align 2
6#define __ALIGN_STR ".p2align 2"
7
8#endif
diff --git a/arch/metag/include/asm/mach/arch.h b/arch/metag/include/asm/mach/arch.h
deleted file mode 100644
index 433f94624fa2..000000000000
--- a/arch/metag/include/asm/mach/arch.h
+++ /dev/null
@@ -1,86 +0,0 @@
1/*
2 * arch/metag/include/asm/mach/arch.h
3 *
4 * Copyright (C) 2012 Imagination Technologies Ltd.
5 *
6 * based on the ARM version:
7 * Copyright (C) 2000 Russell King
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#ifndef _METAG_MACH_ARCH_H_
15#define _METAG_MACH_ARCH_H_
16
17#include <linux/stddef.h>
18
19#include <asm/clock.h>
20
21/**
22 * struct machine_desc - Describes a board controlled by a Meta.
23 * @name: Board/SoC name.
24 * @dt_compat: Array of device tree 'compatible' strings.
25 * @clocks: Clock callbacks.
26 *
27 * @nr_irqs: Maximum number of IRQs.
28 * If 0, defaults to NR_IRQS in asm-generic/irq.h.
29 *
30 * @init_early: Early init callback.
31 * @init_irq: IRQ init callback for setting up IRQ controllers.
32 * @init_machine: Arch init callback for setting up devices.
33 * @init_late: Late init callback.
34 *
35 * This structure is provided by each board which can be controlled by a Meta.
36 * It is chosen by matching the compatible strings in the device tree provided
37 * by the bootloader with the strings in @dt_compat, and sets up any aspects of
38 * the machine that aren't configured with device tree (yet).
39 */
40struct machine_desc {
41 const char *name;
42 const char **dt_compat;
43 struct meta_clock_desc *clocks;
44
45 unsigned int nr_irqs;
46
47 void (*init_early)(void);
48 void (*init_irq)(void);
49 void (*init_machine)(void);
50 void (*init_late)(void);
51};
52
53/*
54 * Current machine - only accessible during boot.
55 */
56extern const struct machine_desc *machine_desc;
57
58/*
59 * Machine type table - also only accessible during boot
60 */
61extern struct machine_desc __arch_info_begin[], __arch_info_end[];
62#define for_each_machine_desc(p) \
63 for (p = __arch_info_begin; p < __arch_info_end; p++)
64
65static inline struct machine_desc *default_machine_desc(void)
66{
67 /* the default machine is the last one linked in */
68 if (__arch_info_end - 1 < __arch_info_begin)
69 return NULL;
70 return __arch_info_end - 1;
71}
72
73/*
74 * Set of macros to define architecture features. This is built into
75 * a table by the linker.
76 */
77#define MACHINE_START(_type, _name) \
78static const struct machine_desc __mach_desc_##_type \
79__used \
80__attribute__((__section__(".arch.info.init"))) = { \
81 .name = _name,
82
83#define MACHINE_END \
84};
85
86#endif /* _METAG_MACH_ARCH_H_ */
diff --git a/arch/metag/include/asm/metag_isa.h b/arch/metag/include/asm/metag_isa.h
deleted file mode 100644
index c8aa2ae3899f..000000000000
--- a/arch/metag/include/asm/metag_isa.h
+++ /dev/null
@@ -1,81 +0,0 @@
1/*
2 * asm/metag_isa.h
3 *
4 * Copyright (C) 2000-2007, 2012 Imagination Technologies.
5 *
6 * This program is free software; you can redistribute it and/or modify it under
7 * the terms of the GNU General Public License version 2 as published by the
8 * Free Software Foundation.
9 *
10 * Various defines for Meta instruction set.
11 */
12
13#ifndef _ASM_METAG_ISA_H_
14#define _ASM_METAG_ISA_H_
15
16
17/* L1 cache layout */
18
19/* Data cache line size as bytes and shift */
20#define DCACHE_LINE_BYTES 64
21#define DCACHE_LINE_S 6
22
23/* Number of ways in the data cache */
24#define DCACHE_WAYS 4
25
26/* Instruction cache line size as bytes and shift */
27#define ICACHE_LINE_BYTES 64
28#define ICACHE_LINE_S 6
29
30/* Number of ways in the instruction cache */
31#define ICACHE_WAYS 4
32
33
34/*
35 * CACHEWD/CACHEWL instructions use the bottom 8 bits of the data presented to
36 * control the operation actually achieved.
37 */
38/* Use of these two bits should be discouraged since the bits dont have
39 * consistent meanings
40 */
41#define CACHEW_ICACHE_BIT 0x01
42#define CACHEW_TLBFLUSH_BIT 0x02
43
44#define CACHEW_FLUSH_L1D_L2 0x0
45#define CACHEW_INVALIDATE_L1I 0x1
46#define CACHEW_INVALIDATE_L1DTLB 0x2
47#define CACHEW_INVALIDATE_L1ITLB 0x3
48#define CACHEW_WRITEBACK_L1D_L2 0x4
49#define CACHEW_INVALIDATE_L1D 0x8
50#define CACHEW_INVALIDATE_L1D_L2 0xC
51
52/*
53 * CACHERD/CACHERL instructions use bits 3:5 of the address presented to
54 * control the operation achieved and hence the specific result.
55 */
56#define CACHER_ADDR_BITS 0xFFFFFFC0
57#define CACHER_OPER_BITS 0x00000030
58#define CACHER_OPER_S 4
59#define CACHER_OPER_LINPHY 0
60#define CACHER_ICACHE_BIT 0x00000008
61#define CACHER_ICACHE_S 3
62
63/*
64 * CACHERD/CACHERL LINPHY Oper result is one/two 32-bit words
65 *
66 * If CRLINPHY0_VAL_BIT (Bit 0) set then,
67 * Lower 32-bits corresponds to MMCU_ENTRY_* above.
68 * Upper 32-bits corresponds to CRLINPHY1_* values below (if requested).
69 * else
70 * Lower 32-bits corresponds to CRLINPHY0_* values below.
71 * Upper 32-bits undefined.
72 */
73#define CRLINPHY0_VAL_BIT 0x00000001
74#define CRLINPHY0_FIRST_BIT 0x00000004 /* Set if VAL=0 due to first level */
75
76#define CRLINPHY1_READ_BIT 0x00000001 /* Set if reads permitted */
77#define CRLINPHY1_SINGLE_BIT 0x00000004 /* Set if TLB does not cache entry */
78#define CRLINPHY1_PAGEMSK_BITS 0x0000FFF0 /* Set to ((2^n-1)>>12) value */
79#define CRLINPHY1_PAGEMSK_S 4
80
81#endif /* _ASM_METAG_ISA_H_ */
diff --git a/arch/metag/include/asm/metag_mem.h b/arch/metag/include/asm/metag_mem.h
deleted file mode 100644
index 7848bc6d3b61..000000000000
--- a/arch/metag/include/asm/metag_mem.h
+++ /dev/null
@@ -1,1109 +0,0 @@
1/*
2 * asm/metag_mem.h
3 *
4 * Copyright (C) 2000-2007, 2012 Imagination Technologies.
5 *
6 * This program is free software; you can redistribute it and/or modify it under
7 * the terms of the GNU General Public License version 2 as published by the
8 * Free Software Foundation.
9 *
10 * Various defines for Meta (memory-mapped) registers.
11 */
12
13#ifndef _ASM_METAG_MEM_H_
14#define _ASM_METAG_MEM_H_
15
16/*****************************************************************************
17 * META MEMORY MAP LINEAR ADDRESS VALUES
18 ****************************************************************************/
19/*
20 * COMMON MEMORY MAP
21 * -----------------
22 */
23
24#define LINSYSTEM_BASE 0x00200000
25#define LINSYSTEM_LIMIT 0x07FFFFFF
26
27/* Linear cache flush now implemented via DCACHE instruction. These defines
28 related to a special region that used to exist for achieving cache flushes.
29 */
30#define LINSYSLFLUSH_S 0
31
32#define LINSYSRES0_BASE 0x00200000
33#define LINSYSRES0_LIMIT 0x01FFFFFF
34
35#define LINSYSCUSTOM_BASE 0x02000000
36#define LINSYSCUSTOM_LIMIT 0x02FFFFFF
37
38#define LINSYSEXPAND_BASE 0x03000000
39#define LINSYSEXPAND_LIMIT 0x03FFFFFF
40
41#define LINSYSEVENT_BASE 0x04000000
42#define LINSYSEVENT_WR_ATOMIC_UNLOCK 0x04000000
43#define LINSYSEVENT_WR_ATOMIC_LOCK 0x04000040
44#define LINSYSEVENT_WR_CACHE_DISABLE 0x04000080
45#define LINSYSEVENT_WR_CACHE_ENABLE 0x040000C0
46#define LINSYSEVENT_WR_COMBINE_FLUSH 0x04000100
47#define LINSYSEVENT_WR_FENCE 0x04000140
48#define LINSYSEVENT_LIMIT 0x04000FFF
49
50#define LINSYSCFLUSH_BASE 0x04400000
51#define LINSYSCFLUSH_DCACHE_LINE 0x04400000
52#define LINSYSCFLUSH_ICACHE_LINE 0x04500000
53#define LINSYSCFLUSH_MMCU 0x04700000
54#ifndef METAC_1_2
55#define LINSYSCFLUSH_TxMMCU_BASE 0x04700020
56#define LINSYSCFLUSH_TxMMCU_STRIDE 0x00000008
57#endif
58#define LINSYSCFLUSH_ADDR_BITS 0x000FFFFF
59#define LINSYSCFLUSH_ADDR_S 0
60#define LINSYSCFLUSH_LIMIT 0x047FFFFF
61
62#define LINSYSCTRL_BASE 0x04800000
63#define LINSYSCTRL_LIMIT 0x04FFFFFF
64
65#define LINSYSMTABLE_BASE 0x05000000
66#define LINSYSMTABLE_LIMIT 0x05FFFFFF
67
68#define LINSYSDIRECT_BASE 0x06000000
69#define LINSYSDIRECT_LIMIT 0x07FFFFFF
70
71#define LINLOCAL_BASE 0x08000000
72#define LINLOCAL_LIMIT 0x7FFFFFFF
73
74#define LINCORE_BASE 0x80000000
75#define LINCORE_LIMIT 0x87FFFFFF
76
77#define LINCORE_CODE_BASE 0x80000000
78#define LINCORE_CODE_LIMIT 0x81FFFFFF
79
80#define LINCORE_DATA_BASE 0x82000000
81#define LINCORE_DATA_LIMIT 0x83FFFFFF
82
83
84/* The core can support locked icache lines in this region */
85#define LINCORE_ICACHE_BASE 0x84000000
86#define LINCORE_ICACHE_LIMIT 0x85FFFFFF
87
88/* The core can support locked dcache lines in this region */
89#define LINCORE_DCACHE_BASE 0x86000000
90#define LINCORE_DCACHE_LIMIT 0x87FFFFFF
91
92#define LINGLOBAL_BASE 0x88000000
93#define LINGLOBAL_LIMIT 0xFFFDFFFF
94
95/*
96 * CHIP Core Register Map
97 * ----------------------
98 */
99#define CORE_HWBASE 0x04800000
100#define PRIV_HWBASE 0x04810000
101#define TRIG_HWBASE 0x04820000
102#define SYSC_HWBASE 0x04830000
103
104/*****************************************************************************
105 * INTER-THREAD KICK REGISTERS FOR SOFTWARE EVENT GENERATION
106 ****************************************************************************/
107/*
108 * These values define memory mapped registers that can be used to supply
109 * kicks to threads that service arbitrary software events.
110 */
111
112#define T0KICK 0x04800800 /* Background kick 0 */
113#define TXXKICK_MAX 0xFFFF /* Maximum kicks */
114#define TnXKICK_STRIDE 0x00001000 /* Thread scale value */
115#define TnXKICK_STRIDE_S 12
116#define T0KICKI 0x04800808 /* Interrupt kick 0 */
117#define TXIKICK_OFFSET 0x00000008 /* Int level offset value */
118#define T1KICK 0x04801800 /* Background kick 1 */
119#define T1KICKI 0x04801808 /* Interrupt kick 1 */
120#define T2KICK 0x04802800 /* Background kick 2 */
121#define T2KICKI 0x04802808 /* Interrupt kick 2 */
122#define T3KICK 0x04803800 /* Background kick 3 */
123#define T3KICKI 0x04803808 /* Interrupt kick 3 */
124
125/*****************************************************************************
126 * GLOBAL REGISTER ACCESS RESOURCES
127 ****************************************************************************/
128/*
129 * These values define memory mapped registers that allow access to the
130 * internal state of all threads in order to allow global set-up of thread
131 * state and external handling of thread events, errors, or debugging.
132 *
133 * The actual unit and register index values needed to access individul
134 * registers are chip specific see - METAC_TXUXX_VALUES in metac_x_y.h.
135 * However two C array initialisers TXUXX_MASKS and TGUXX_MASKS will always be
136 * defined to allow arbitrary loading, display, and saving of all valid
137 * register states without detailed knowledge of their purpose - TXUXX sets
138 * bits for all valid registers and TGUXX sets bits for the sub-set which are
139 * global.
140 */
141
142#define T0UCTREG0 0x04800000 /* Access to all CT regs */
143#define TnUCTRX_STRIDE 0x00001000 /* Thread scale value */
144#define TXUCTREGn_STRIDE 0x00000008 /* Register scale value */
145
146#define TXUXXRXDT 0x0480FFF0 /* Data to/from any threads reg */
147#define TXUXXRXRQ 0x0480FFF8
148#define TXUXXRXRQ_DREADY_BIT 0x80000000 /* Poll for done */
149#define TXUXXRXRQ_DSPEXT_BIT 0x00020000 /* Addr DSP Regs */
150#define TXUXXRXRQ_RDnWR_BIT 0x00010000 /* Set for read */
151#define TXUXXRXRQ_TX_BITS 0x00003000 /* Thread number */
152#define TXUXXRXRQ_TX_S 12
153#define TXUXXRXRQ_RX_BITS 0x000001F0 /* Register num */
154#define TXUXXRXRQ_RX_S 4
155#define TXUXXRXRQ_DSPRARD0 0 /* DSP RAM A Read Pointer 0 */
156#define TXUXXRXRQ_DSPRARD1 1 /* DSP RAM A Read Pointer 1 */
157#define TXUXXRXRQ_DSPRAWR0 2 /* DSP RAM A Write Pointer 0 */
158#define TXUXXRXRQ_DSPRAWR2 3 /* DSP RAM A Write Pointer 1 */
159#define TXUXXRXRQ_DSPRBRD0 4 /* DSP RAM B Read Pointer 0 */
160#define TXUXXRXRQ_DSPRBRD1 5 /* DSP RAM B Read Pointer 1 */
161#define TXUXXRXRQ_DSPRBWR0 6 /* DSP RAM B Write Pointer 0 */
162#define TXUXXRXRQ_DSPRBWR1 7 /* DSP RAM B Write Pointer 1 */
163#define TXUXXRXRQ_DSPRARINC0 8 /* DSP RAM A Read Increment 0 */
164#define TXUXXRXRQ_DSPRARINC1 9 /* DSP RAM A Read Increment 1 */
165#define TXUXXRXRQ_DSPRAWINC0 10 /* DSP RAM A Write Increment 0 */
166#define TXUXXRXRQ_DSPRAWINC1 11 /* DSP RAM A Write Increment 1 */
167#define TXUXXRXRQ_DSPRBRINC0 12 /* DSP RAM B Read Increment 0 */
168#define TXUXXRXRQ_DSPRBRINC1 13 /* DSP RAM B Read Increment 1 */
169#define TXUXXRXRQ_DSPRBWINC0 14 /* DSP RAM B Write Increment 0 */
170#define TXUXXRXRQ_DSPRBWINC1 15 /* DSP RAM B Write Increment 1 */
171
172#define TXUXXRXRQ_ACC0L0 16 /* Accumulator 0 bottom 32-bits */
173#define TXUXXRXRQ_ACC1L0 17 /* Accumulator 1 bottom 32-bits */
174#define TXUXXRXRQ_ACC2L0 18 /* Accumulator 2 bottom 32-bits */
175#define TXUXXRXRQ_ACC3L0 19 /* Accumulator 3 bottom 32-bits */
176#define TXUXXRXRQ_ACC0HI 20 /* Accumulator 0 top 8-bits */
177#define TXUXXRXRQ_ACC1HI 21 /* Accumulator 1 top 8-bits */
178#define TXUXXRXRQ_ACC2HI 22 /* Accumulator 2 top 8-bits */
179#define TXUXXRXRQ_ACC3HI 23 /* Accumulator 3 top 8-bits */
180#define TXUXXRXRQ_UXX_BITS 0x0000000F /* Unit number */
181#define TXUXXRXRQ_UXX_S 0
182
183/*****************************************************************************
184 * PRIVILEGE CONTROL VALUES FOR MEMORY MAPPED RESOURCES
185 ****************************************************************************/
186/*
187 * These values define memory mapped registers that give control over and
188 * the privilege required to access other memory mapped resources. These
189 * registers themselves always require privilege to update them.
190 */
191
192#define TXPRIVREG_STRIDE 0x8 /* Delta between per-thread regs */
193#define TXPRIVREG_STRIDE_S 3
194
195/*
196 * Each bit 0 to 15 defines privilege required to access internal register
197 * regions 0x04800000 to 0x048FFFFF in 64k chunks
198 */
199#define T0PIOREG 0x04810100
200#define T1PIOREG 0x04810108
201#define T2PIOREG 0x04810110
202#define T3PIOREG 0x04810118
203
204/*
205 * Each bit 0 to 31 defines privilege required to use the pair of
206 * system events implemented as writee in the regions 0x04000000 to
207 * 0x04000FFF in 2*64 byte chunks.
208 */
209#define T0PSYREG 0x04810180
210#define T1PSYREG 0x04810188
211#define T2PSYREG 0x04810190
212#define T3PSYREG 0x04810198
213
214/*
215 * CHIP PRIV CONTROLS
216 * ------------------
217 */
218
219/* The TXPIOREG register holds a bit mask directly mappable to
220 corresponding addresses in the range 0x04800000 to 049FFFFF */
221#define TXPIOREG_ADDR_BITS 0x1F0000 /* Up to 32x64K bytes */
222#define TXPIOREG_ADDR_S 16
223
224/* Hence based on the _HWBASE values ... */
225#define TXPIOREG_CORE_BIT (1<<((0x04800000>>16)&0x1F))
226#define TXPIOREG_PRIV_BIT (1<<((0x04810000>>16)&0x1F))
227#define TXPIOREG_TRIG_BIT (1<<((0x04820000>>16)&0x1F))
228#define TXPIOREG_SYSC_BIT (1<<((0x04830000>>16)&0x1F))
229
230#define TXPIOREG_WRC_BIT 0x00080000 /* Wr combiner reg priv */
231#define TXPIOREG_LOCALBUS_RW_BIT 0x00040000 /* Local bus rd/wr priv */
232#define TXPIOREG_SYSREGBUS_RD_BIT 0x00020000 /* Sys reg bus write priv */
233#define TXPIOREG_SYSREGBUS_WR_BIT 0x00010000 /* Sys reg bus read priv */
234
235/* CORE region privilege controls */
236#define T0PRIVCORE 0x04800828
237#define TXPRIVCORE_TXBKICK_BIT 0x001 /* Background kick priv */
238#define TXPRIVCORE_TXIKICK_BIT 0x002 /* Interrupt kick priv */
239#define TXPRIVCORE_TXAMAREGX_BIT 0x004 /* TXAMAREG4|5|6 priv */
240#define TnPRIVCORE_STRIDE 0x00001000
241
242#define T0PRIVSYSR 0x04810000
243#define TnPRIVSYSR_STRIDE 0x00000008
244#define TnPRIVSYSR_STRIDE_S 3
245#define TXPRIVSYSR_CFLUSH_BIT 0x01
246#define TXPRIVSYSR_MTABLE_BIT 0x02
247#define TXPRIVSYSR_DIRECT_BIT 0x04
248#ifdef METAC_1_2
249#define TXPRIVSYSR_ALL_BITS 0x07
250#else
251#define TXPRIVSYSR_CORE_BIT 0x08
252#define TXPRIVSYSR_CORECODE_BIT 0x10
253#define TXPRIVSYSR_ALL_BITS 0x1F
254#endif
255#define T1PRIVSYSR 0x04810008
256#define T2PRIVSYSR 0x04810010
257#define T3PRIVSYSR 0x04810018
258
259/*****************************************************************************
260 * H/W TRIGGER STATE/LEVEL REGISTERS AND H/W TRIGGER VECTORS
261 ****************************************************************************/
262/*
263 * These values define memory mapped registers that give control over and
264 * the state of hardware trigger sources both external to the META processor
265 * and internal to it.
266 */
267
268#define HWSTATMETA 0x04820000 /* Hardware status/clear META trig */
269#define HWSTATMETA_T0HALT_BITS 0xF
270#define HWSTATMETA_T0HALT_S 0
271#define HWSTATMETA_T0BHALT_BIT 0x1 /* Background HALT */
272#define HWSTATMETA_T0IHALT_BIT 0x2 /* Interrupt HALT */
273#define HWSTATMETA_T0PHALT_BIT 0x4 /* PF/RO Memory HALT */
274#define HWSTATMETA_T0AMATR_BIT 0x8 /* AMA trigger */
275#define HWSTATMETA_TnINT_S 4 /* Shift by (thread*4) */
276#define HWSTATEXT 0x04820010 /* H/W status/clear external trigs 0-31 */
277#define HWSTATEXT2 0x04820018 /* H/W status/clear external trigs 32-63 */
278#define HWSTATEXT4 0x04820020 /* H/W status/clear external trigs 64-95 */
279#define HWSTATEXT6 0x04820028 /* H/W status/clear external trigs 96-128 */
280#define HWLEVELEXT 0x04820030 /* Edge/Level type of external trigs 0-31 */
281#define HWLEVELEXT2 0x04820038 /* Edge/Level type of external trigs 32-63 */
282#define HWLEVELEXT4 0x04820040 /* Edge/Level type of external trigs 64-95 */
283#define HWLEVELEXT6 0x04820048 /* Edge/Level type of external trigs 96-128 */
284#define HWLEVELEXT_XXX_LEVEL 1 /* Level sense logic in HWSTATEXTn */
285#define HWLEVELEXT_XXX_EDGE 0
286#define HWMASKEXT 0x04820050 /* Enable/disable of external trigs 0-31 */
287#define HWMASKEXT2 0x04820058 /* Enable/disable of external trigs 32-63 */
288#define HWMASKEXT4 0x04820060 /* Enable/disable of external trigs 64-95 */
289#define HWMASKEXT6 0x04820068 /* Enable/disable of external trigs 96-128 */
290#define T0VECINT_BHALT 0x04820500 /* Background HALT trigger vector */
291#define TXVECXXX_BITS 0xF /* Per-trigger vector vals 0,1,4-15 */
292#define TXVECXXX_S 0
293#define T0VECINT_IHALT 0x04820508 /* Interrupt HALT */
294#define T0VECINT_PHALT 0x04820510 /* PF/RO memory fault */
295#define T0VECINT_AMATR 0x04820518 /* AMA trigger */
296#define TnVECINT_STRIDE 0x00000020 /* Per thread stride */
297#define HWVEC0EXT 0x04820700 /* Vectors for external triggers 0-31 */
298#define HWVEC20EXT 0x04821700 /* Vectors for external triggers 32-63 */
299#define HWVEC40EXT 0x04822700 /* Vectors for external triggers 64-95 */
300#define HWVEC60EXT 0x04823700 /* Vectors for external triggers 96-127 */
301#define HWVECnEXT_STRIDE 0x00000008 /* Per trigger stride */
302#define HWVECnEXT_DEBUG 0x1 /* Redirect trigger to debug i/f */
303
304/*
305 * CORE HWCODE-BREAKPOINT REGISTERS/VALUES
306 * ---------------------------------------
307 */
308#define CODEB0ADDR 0x0480FF00 /* Address specifier */
309#define CODEBXADDR_MATCHX_BITS 0xFFFFFFFC
310#define CODEBXADDR_MATCHX_S 2
311#define CODEB0CTRL 0x0480FF08 /* Control */
312#define CODEBXCTRL_MATEN_BIT 0x80000000 /* Match 'Enable' */
313#define CODEBXCTRL_MATTXEN_BIT 0x10000000 /* Match threadn enable */
314#define CODEBXCTRL_HITC_BITS 0x00FF0000 /* Hit counter */
315#define CODEBXCTRL_HITC_S 16
316#define CODEBXHITC_NEXT 0xFF /* Next 'hit' will trigger */
317#define CODEBXHITC_HIT1 0x00 /* No 'hits' after trigger */
318#define CODEBXCTRL_MMASK_BITS 0x0000FFFC /* Mask ADDR_MATCH bits */
319#define CODEBXCTRL_MMASK_S 2
320#define CODEBXCTRL_MATLTX_BITS 0x00000003 /* Match threadn LOCAL addr */
321#define CODEBXCTRL_MATLTX_S 0 /* Match threadn LOCAL addr */
322#define CODEBnXXXX_STRIDE 0x00000010 /* Stride between CODEB reg sets */
323#define CODEBnXXXX_STRIDE_S 4
324#define CODEBnXXXX_LIMIT 3 /* Sets 0-3 */
325
326/*
327 * CORE DATA-WATCHPOINT REGISTERS/VALUES
328 * -------------------------------------
329 */
330#define DATAW0ADDR 0x0480FF40 /* Address specifier */
331#define DATAWXADDR_MATCHR_BITS 0xFFFFFFF8
332#define DATAWXADDR_MATCHR_S 3
333#define DATAWXADDR_MATCHW_BITS 0xFFFFFFFF
334#define DATAWXADDR_MATCHW_S 0
335#define DATAW0CTRL 0x0480FF48 /* Control */
336#define DATAWXCTRL_MATRD_BIT 0x80000000 /* Match 'Read' */
337#ifndef METAC_1_2
338#define DATAWXCTRL_MATNOTTX_BIT 0x20000000 /* Invert threadn enable */
339#endif
340#define DATAWXCTRL_MATWR_BIT 0x40000000 /* Match 'Write' */
341#define DATAWXCTRL_MATTXEN_BIT 0x10000000 /* Match threadn enable */
342#define DATAWXCTRL_WRSIZE_BITS 0x0F000000 /* Write Match Size */
343#define DATAWXCTRL_WRSIZE_S 24
344#define DATAWWRSIZE_ANY 0 /* Any size transaction matches */
345#define DATAWWRSIZE_8BIT 1 /* Specific sizes ... */
346#define DATAWWRSIZE_16BIT 2
347#define DATAWWRSIZE_32BIT 3
348#define DATAWWRSIZE_64BIT 4
349#define DATAWXCTRL_HITC_BITS 0x00FF0000 /* Hit counter */
350#define DATAWXCTRL_HITC_S 16
351#define DATAWXHITC_NEXT 0xFF /* Next 'hit' will trigger */
352#define DATAWXHITC_HIT1 0x00 /* No 'hits' after trigger */
353#define DATAWXCTRL_MMASK_BITS 0x0000FFF8 /* Mask ADDR_MATCH bits */
354#define DATAWXCTRL_MMASK_S 3
355#define DATAWXCTRL_MATLTX_BITS 0x00000003 /* Match threadn LOCAL addr */
356#define DATAWXCTRL_MATLTX_S 0 /* Match threadn LOCAL addr */
357#define DATAW0DMATCH0 0x0480FF50 /* Write match data */
358#define DATAW0DMATCH1 0x0480FF58
359#define DATAW0DMASK0 0x0480FF60 /* Write match data mask */
360#define DATAW0DMASK1 0x0480FF68
361#define DATAWnXXXX_STRIDE 0x00000040 /* Stride between DATAW reg sets */
362#define DATAWnXXXX_STRIDE_S 6
363#define DATAWnXXXX_LIMIT 1 /* Sets 0,1 */
364
365/*
366 * CHIP Automatic Mips Allocation control registers
367 * ------------------------------------------------
368 */
369
370/* CORE memory mapped AMA registers */
371#define T0AMAREG4 0x04800810
372#define TXAMAREG4_POOLSIZE_BITS 0x3FFFFF00
373#define TXAMAREG4_POOLSIZE_S 8
374#define TXAMAREG4_AVALUE_BITS 0x000000FF
375#define TXAMAREG4_AVALUE_S 0
376#define T0AMAREG5 0x04800818
377#define TXAMAREG5_POOLC_BITS 0x07FFFFFF
378#define TXAMAREG5_POOLC_S 0
379#define T0AMAREG6 0x04800820
380#define TXAMAREG6_DLINEDEF_BITS 0x00FFFFF0
381#define TXAMAREG6_DLINEDEF_S 0
382#define TnAMAREGX_STRIDE 0x00001000
383
384/*
385 * Memory Management Control Unit Table Entries
386 * --------------------------------------------
387 */
388#define MMCU_ENTRY_S 4 /* -> Entry size */
389#define MMCU_ENTRY_ADDR_BITS 0xFFFFF000 /* Physical address */
390#define MMCU_ENTRY_ADDR_S 12 /* -> Page size */
391#define MMCU_ENTRY_CWIN_BITS 0x000000C0 /* Caching 'window' selection */
392#define MMCU_ENTRY_CWIN_S 6
393#define MMCU_CWIN_UNCACHED 0 /* May not be memory etc. */
394#define MMCU_CWIN_BURST 1 /* Cached but LRU unset */
395#define MMCU_CWIN_C1SET 2 /* Cached in 1 set only */
396#define MMCU_CWIN_CACHED 3 /* Fully cached */
397#define MMCU_ENTRY_CACHE_BIT 0x00000080 /* Set for cached region */
398#define MMCU_ECACHE1_FULL_BIT 0x00000040 /* Use all the sets */
399#define MMCU_ECACHE0_BURST_BIT 0x00000040 /* Match bursts */
400#define MMCU_ENTRY_SYS_BIT 0x00000010 /* Sys-coherent access required */
401#define MMCU_ENTRY_WRC_BIT 0x00000008 /* Write combining allowed */
402#define MMCU_ENTRY_PRIV_BIT 0x00000004 /* Privilege required */
403#define MMCU_ENTRY_WR_BIT 0x00000002 /* Writes allowed */
404#define MMCU_ENTRY_VAL_BIT 0x00000001 /* Entry is valid */
405
406#ifdef METAC_2_1
407/*
408 * Extended first-level/top table entries have extra/larger fields in later
409 * cores as bits 11:0 previously had no effect in such table entries.
410 */
411#define MMCU_E1ENT_ADDR_BITS 0xFFFFFFC0 /* Physical address */
412#define MMCU_E1ENT_ADDR_S 6 /* -> resolution < page size */
413#define MMCU_E1ENT_PGSZ_BITS 0x0000001E /* Page size for 2nd level */
414#define MMCU_E1ENT_PGSZ_S 1
415#define MMCU_E1ENT_PGSZ0_POWER 12 /* PgSz 0 -> 4K */
416#define MMCU_E1ENT_PGSZ_MAX 10 /* PgSz 10 -> 4M maximum */
417#define MMCU_E1ENT_MINIM_BIT 0x00000020
418#endif /* METAC_2_1 */
419
420/* MMCU control register in SYSC region */
421#define MMCU_TABLE_PHYS_ADDR 0x04830010
422#define MMCU_TABLE_PHYS_ADDR_BITS 0xFFFFFFFC
423#ifdef METAC_2_1
424#define MMCU_TABLE_PHYS_EXTEND 0x00000001 /* See below */
425#endif
426#define MMCU_DCACHE_CTRL_ADDR 0x04830018
427#define MMCU_xCACHE_CTRL_ENABLE_BIT 0x00000001
428#define MMCU_xCACHE_CTRL_PARTITION_BIT 0x00000000 /* See xCPART below */
429#define MMCU_ICACHE_CTRL_ADDR 0x04830020
430
431#ifdef METAC_2_1
432
433/*
434 * Allow direct access to physical memory used to implement MMU table.
435 *
436 * Each is based on a corresponding MMCU_TnLOCAL_TABLE_PHYSn or similar
437 * MMCU_TnGLOBAL_TABLE_PHYSn register pair (see next).
438 */
439#define LINSYSMEMT0L_BASE 0x05000000
440#define LINSYSMEMT0L_LIMIT 0x051FFFFF
441#define LINSYSMEMTnX_STRIDE 0x00200000 /* 2MB Local per thread */
442#define LINSYSMEMTnX_STRIDE_S 21
443#define LINSYSMEMTXG_OFFSET 0x00800000 /* +2MB Global per thread */
444#define LINSYSMEMTXG_OFFSET_S 23
445#define LINSYSMEMT1L_BASE 0x05200000
446#define LINSYSMEMT1L_LIMIT 0x053FFFFF
447#define LINSYSMEMT2L_BASE 0x05400000
448#define LINSYSMEMT2L_LIMIT 0x055FFFFF
449#define LINSYSMEMT3L_BASE 0x05600000
450#define LINSYSMEMT3L_LIMIT 0x057FFFFF
451#define LINSYSMEMT0G_BASE 0x05800000
452#define LINSYSMEMT0G_LIMIT 0x059FFFFF
453#define LINSYSMEMT1G_BASE 0x05A00000
454#define LINSYSMEMT1G_LIMIT 0x05BFFFFF
455#define LINSYSMEMT2G_BASE 0x05C00000
456#define LINSYSMEMT2G_LIMIT 0x05DFFFFF
457#define LINSYSMEMT3G_BASE 0x05E00000
458#define LINSYSMEMT3G_LIMIT 0x05FFFFFF
459
460/*
461 * Extended MMU table functionality allows a sparse or flat table to be
462 * described much more efficiently than before.
463 */
464#define MMCU_T0LOCAL_TABLE_PHYS0 0x04830700
465#define MMCU_TnX_TABLE_PHYSX_STRIDE 0x20 /* Offset per thread */
466#define MMCU_TnX_TABLE_PHYSX_STRIDE_S 5
467#define MMCU_TXG_TABLE_PHYSX_OFFSET 0x10 /* Global versus local */
468#define MMCU_TXG_TABLE_PHYSX_OFFSET_S 4
469#define MMCU_TBLPHYS0_DCCTRL_BITS 0x000000DF /* DC controls */
470#define MMCU_TBLPHYS0_ENTLB_BIT 0x00000020 /* Cache in TLB */
471#define MMCU_TBLPHYS0_TBLSZ_BITS 0x00000F00 /* Area supported */
472#define MMCU_TBLPHYS0_TBLSZ_S 8
473#define MMCU_TBLPHYS0_TBLSZ0_POWER 22 /* 0 -> 4M */
474#define MMCU_TBLPHYS0_TBLSZ_MAX 9 /* 9 -> 2G */
475#define MMCU_TBLPHYS0_LINBASE_BITS 0xFFC00000 /* Linear base */
476#define MMCU_TBLPHYS0_LINBASE_S 22
477
478#define MMCU_T0LOCAL_TABLE_PHYS1 0x04830708
479#define MMCU_TBLPHYS1_ADDR_BITS 0xFFFFFFFC /* Physical base */
480#define MMCU_TBLPHYS1_ADDR_S 2
481
482#define MMCU_T0GLOBAL_TABLE_PHYS0 0x04830710
483#define MMCU_T0GLOBAL_TABLE_PHYS1 0x04830718
484#define MMCU_T1LOCAL_TABLE_PHYS0 0x04830720
485#define MMCU_T1LOCAL_TABLE_PHYS1 0x04830728
486#define MMCU_T1GLOBAL_TABLE_PHYS0 0x04830730
487#define MMCU_T1GLOBAL_TABLE_PHYS1 0x04830738
488#define MMCU_T2LOCAL_TABLE_PHYS0 0x04830740
489#define MMCU_T2LOCAL_TABLE_PHYS1 0x04830748
490#define MMCU_T2GLOBAL_TABLE_PHYS0 0x04830750
491#define MMCU_T2GLOBAL_TABLE_PHYS1 0x04830758
492#define MMCU_T3LOCAL_TABLE_PHYS0 0x04830760
493#define MMCU_T3LOCAL_TABLE_PHYS1 0x04830768
494#define MMCU_T3GLOBAL_TABLE_PHYS0 0x04830770
495#define MMCU_T3GLOBAL_TABLE_PHYS1 0x04830778
496
497#define MMCU_T0EBWCCTRL 0x04830640
498#define MMCU_TnEBWCCTRL_BITS 0x00000007
499#define MMCU_TnEBWCCTRL_S 0
500#define MMCU_TnEBWCCCTRL_DISABLE_ALL 0
501#define MMCU_TnEBWCCCTRL_ABIT25 1
502#define MMCU_TnEBWCCCTRL_ABIT26 2
503#define MMCU_TnEBWCCCTRL_ABIT27 3
504#define MMCU_TnEBWCCCTRL_ABIT28 4
505#define MMCU_TnEBWCCCTRL_ABIT29 5
506#define MMCU_TnEBWCCCTRL_ABIT30 6
507#define MMCU_TnEBWCCCTRL_ENABLE_ALL 7
508#define MMCU_TnEBWCCTRL_STRIDE 8
509
510#endif /* METAC_2_1 */
511
512
513/* Registers within the SYSC register region */
514#define METAC_ID 0x04830000
515#define METAC_ID_MAJOR_BITS 0xFF000000
516#define METAC_ID_MAJOR_S 24
517#define METAC_ID_MINOR_BITS 0x00FF0000
518#define METAC_ID_MINOR_S 16
519#define METAC_ID_REV_BITS 0x0000FF00
520#define METAC_ID_REV_S 8
521#define METAC_ID_MAINT_BITS 0x000000FF
522#define METAC_ID_MAINT_S 0
523
524#ifdef METAC_2_1
525/* Use of this section is strongly deprecated */
526#define METAC_ID2 0x04830008
527#define METAC_ID2_DESIGNER_BITS 0xFFFF0000 /* Modified by customer */
528#define METAC_ID2_DESIGNER_S 16
529#define METAC_ID2_MINOR2_BITS 0x00000F00 /* 3rd digit of prod rev */
530#define METAC_ID2_MINOR2_S 8
531#define METAC_ID2_CONFIG_BITS 0x000000FF /* Wrapper configuration */
532#define METAC_ID2_CONFIG_S 0
533
534/* Primary core identification and configuration information */
535#define METAC_CORE_ID 0x04831000
536#define METAC_COREID_GROUP_BITS 0xFF000000
537#define METAC_COREID_GROUP_S 24
538#define METAC_COREID_GROUP_METAG 0x14
539#define METAC_COREID_ID_BITS 0x00FF0000
540#define METAC_COREID_ID_S 16
541#define METAC_COREID_ID_W32 0x10 /* >= for 32-bit pipeline */
542#define METAC_COREID_CONFIG_BITS 0x0000FFFF
543#define METAC_COREID_CONFIG_S 0
544#define METAC_COREID_CFGCACHE_BITS 0x0007
545#define METAC_COREID_CFGCACHE_S 0
546#define METAC_COREID_CFGCACHE_NOM 0
547#define METAC_COREID_CFGCACHE_TYPE0 1
548#define METAC_COREID_CFGCACHE_NOMMU 1 /* Alias for TYPE0 */
549#define METAC_COREID_CFGCACHE_NOCACHE 2
550#define METAC_COREID_CFGCACHE_PRIVNOMMU 3
551#define METAC_COREID_CFGDSP_BITS 0x0038
552#define METAC_COREID_CFGDSP_S 3
553#define METAC_COREID_CFGDSP_NOM 0
554#define METAC_COREID_CFGDSP_MIN 1
555#define METAC_COREID_NOFPACC_BIT 0x0040 /* Set if no FPU accum */
556#define METAC_COREID_CFGFPU_BITS 0x0180
557#define METAC_COREID_CFGFPU_S 7
558#define METAC_COREID_CFGFPU_NOM 0
559#define METAC_COREID_CFGFPU_SNGL 1
560#define METAC_COREID_CFGFPU_DBL 2
561#define METAC_COREID_NOAMA_BIT 0x0200 /* Set if no AMA present */
562#define METAC_COREID_NOCOH_BIT 0x0400 /* Set if no Gbl coherency */
563
564/* Core revision information */
565#define METAC_CORE_REV 0x04831008
566#define METAC_COREREV_DESIGN_BITS 0xFF000000
567#define METAC_COREREV_DESIGN_S 24
568#define METAC_COREREV_MAJOR_BITS 0x00FF0000
569#define METAC_COREREV_MAJOR_S 16
570#define METAC_COREREV_MINOR_BITS 0x0000FF00
571#define METAC_COREREV_MINOR_S 8
572#define METAC_COREREV_MAINT_BITS 0x000000FF
573#define METAC_COREREV_MAINT_S 0
574
575/* Configuration information control outside the core */
576#define METAC_CORE_DESIGNER1 0x04831010 /* Arbitrary value */
577#define METAC_CORE_DESIGNER2 0x04831018 /* Arbitrary value */
578
579/* Configuration information covering presence/number of various features */
580#define METAC_CORE_CONFIG2 0x04831020
581#define METAC_CORECFG2_COREDBGTYPE_BITS 0x60000000 /* Core debug type */
582#define METAC_CORECFG2_COREDBGTYPE_S 29
583#define METAC_CORECFG2_DCSMALL_BIT 0x04000000 /* Data cache small */
584#define METAC_CORECFG2_ICSMALL_BIT 0x02000000 /* Inst cache small */
585#define METAC_CORECFG2_DCSZNP_BITS 0x01C00000 /* Data cache size np */
586#define METAC_CORECFG2_DCSZNP_S 22
587#define METAC_CORECFG2_ICSZNP_BITS 0x00380000 /* Inst cache size np */
588#define METAC_CORECFG2_ICSZNP_S 19
589#define METAC_CORECFG2_DCSZ_BITS 0x00070000 /* Data cache size */
590#define METAC_CORECFG2_DCSZ_S 16
591#define METAC_CORECFG2_xCSZ_4K 0 /* Allocated values */
592#define METAC_CORECFG2_xCSZ_8K 1
593#define METAC_CORECFG2_xCSZ_16K 2
594#define METAC_CORECFG2_xCSZ_32K 3
595#define METAC_CORECFG2_xCSZ_64K 4
596#define METAC_CORE_C2ICSZ_BITS 0x0000E000 /* Inst cache size */
597#define METAC_CORE_C2ICSZ_S 13
598#define METAC_CORE_GBLACC_BITS 0x00001800 /* Number of Global Acc */
599#define METAC_CORE_GBLACC_S 11
600#define METAC_CORE_GBLDXR_BITS 0x00000700 /* 0 -> 0, R -> 2^(R-1) */
601#define METAC_CORE_GBLDXR_S 8
602#define METAC_CORE_GBLAXR_BITS 0x000000E0 /* 0 -> 0, R -> 2^(R-1) */
603#define METAC_CORE_GBLAXR_S 5
604#define METAC_CORE_RTTRACE_BIT 0x00000010
605#define METAC_CORE_WATCHN_BITS 0x0000000C /* 0 -> 0, N -> 2^N */
606#define METAC_CORE_WATCHN_S 2
607#define METAC_CORE_BREAKN_BITS 0x00000003 /* 0 -> 0, N -> 2^N */
608#define METAC_CORE_BREAKN_S 0
609
610/* Configuration information covering presence/number of various features */
611#define METAC_CORE_CONFIG3 0x04831028
612#define METAC_CORECFG3_L2C_REV_ID_BITS 0x000F0000 /* Revision of L2 cache */
613#define METAC_CORECFG3_L2C_REV_ID_S 16
614#define METAC_CORECFG3_L2C_LINE_SIZE_BITS 0x00003000 /* L2 line size */
615#define METAC_CORECFG3_L2C_LINE_SIZE_S 12
616#define METAC_CORECFG3_L2C_LINE_SIZE_64B 0x0 /* 64 bytes */
617#define METAC_CORECFG3_L2C_NUM_WAYS_BITS 0x00000F00 /* L2 number of ways (2^n) */
618#define METAC_CORECFG3_L2C_NUM_WAYS_S 8
619#define METAC_CORECFG3_L2C_SIZE_BITS 0x000000F0 /* L2 size (2^n) */
620#define METAC_CORECFG3_L2C_SIZE_S 4
621#define METAC_CORECFG3_L2C_UNIFIED_BIT 0x00000004 /* Unified cache: */
622#define METAC_CORECFG3_L2C_UNIFIED_S 2
623#define METAC_CORECFG3_L2C_UNIFIED_UNIFIED 1 /* - Unified D/I cache */
624#define METAC_CORECFG3_L2C_UNIFIED_SEPARATE 0 /* - Separate D/I cache */
625#define METAC_CORECFG3_L2C_MODE_BIT 0x00000002 /* Cache Mode: */
626#define METAC_CORECFG3_L2C_MODE_S 1
627#define METAC_CORECFG3_L2C_MODE_WRITE_BACK 1 /* - Write back */
628#define METAC_CORECFG3_L2C_MODE_WRITE_THROUGH 0 /* - Write through */
629#define METAC_CORECFG3_L2C_HAVE_L2C_BIT 0x00000001 /* Have L2C */
630#define METAC_CORECFG3_L2C_HAVE_L2C_S 0
631
632#endif /* METAC_2_1 */
633
634#define SYSC_CACHE_MMU_CONFIG 0x04830028
635#ifdef METAC_2_1
636#define SYSC_CMMUCFG_DCSKEWABLE_BIT 0x00000040
637#define SYSC_CMMUCFG_ICSKEWABLE_BIT 0x00000020
638#define SYSC_CMMUCFG_DCSKEWOFF_BIT 0x00000010 /* Skew association override */
639#define SYSC_CMMUCFG_ICSKEWOFF_BIT 0x00000008 /* -> default 0 on if present */
640#define SYSC_CMMUCFG_MODE_BITS 0x00000007 /* Access to old state */
641#define SYSC_CMMUCFG_MODE_S 0
642#define SYSC_CMMUCFG_ON 0x7
643#define SYSC_CMMUCFG_EBYPASS 0x6 /* Enhanced by-pass mode */
644#define SYSC_CMMUCFG_EBYPASSIC 0x4 /* EB just inst cache */
645#define SYSC_CMMUCFG_EBYPASSDC 0x2 /* EB just data cache */
646#endif /* METAC_2_1 */
647/* Old definitions, Keep them for now */
648#define SYSC_CMMUCFG_MMU_ON_BIT 0x1
649#define SYSC_CMMUCFG_DC_ON_BIT 0x2
650#define SYSC_CMMUCFG_IC_ON_BIT 0x4
651
652#define SYSC_JTAG_THREAD 0x04830030
653#define SYSC_JTAG_TX_BITS 0x00000003 /* Read only bits! */
654#define SYSC_JTAG_TX_S 0
655#define SYSC_JTAG_PRIV_BIT 0x00000004
656#ifdef METAC_2_1
657#define SYSC_JTAG_SLAVETX_BITS 0x00000018
658#define SYSC_JTAG_SLAVETX_S 3
659#endif /* METAC_2_1 */
660
661#define SYSC_DCACHE_FLUSH 0x04830038
662#define SYSC_ICACHE_FLUSH 0x04830040
663#define SYSC_xCACHE_FLUSH_INIT 0x1
664#define MMCU_DIRECTMAP0_ADDR 0x04830080 /* LINSYSDIRECT_BASE -> */
665#define MMCU_DIRECTMAPn_STRIDE 0x00000010 /* 4 Region settings */
666#define MMCU_DIRECTMAPn_S 4
667#define MMCU_DIRECTMAPn_ADDR_BITS 0xFF800000
668#define MMCU_DIRECTMAPn_ADDR_S 23
669#define MMCU_DIRECTMAPn_ADDR_SCALE 0x00800000 /* 8M Regions */
670#ifdef METAC_2_1
671/*
672 * These fields in the above registers provide MMCU_ENTRY_* values
673 * for each direct mapped region to enable optimisation of these areas.
674 * (LSB similar to VALID must be set for enhancments to be active)
675 */
676#define MMCU_DIRECTMAPn_ENHANCE_BIT 0x00000001 /* 0 = no optim */
677#define MMCU_DIRECTMAPn_DCCTRL_BITS 0x000000DF /* Get DC Ctrl */
678#define MMCU_DIRECTMAPn_DCCTRL_S 0
679#define MMCU_DIRECTMAPn_ICCTRL_BITS 0x0000C000 /* Get IC Ctrl */
680#define MMCU_DIRECTMAPn_ICCTRL_S 8
681#define MMCU_DIRECTMAPn_ENTLB_BIT 0x00000020 /* Cache in TLB */
682#define MMCU_DIRECTMAPn_ICCWIN_BITS 0x0000C000 /* Get IC Win Bits */
683#define MMCU_DIRECTMAPn_ICCWIN_S 14
684#endif /* METAC_2_1 */
685
686#define MMCU_DIRECTMAP1_ADDR 0x04830090
687#define MMCU_DIRECTMAP2_ADDR 0x048300a0
688#define MMCU_DIRECTMAP3_ADDR 0x048300b0
689
690/*
691 * These bits partion each threads use of data cache or instruction cache
692 * resource by modifying the top 4 bits of the address within the cache
693 * storage area.
694 */
695#define SYSC_DCPART0 0x04830200
696#define SYSC_xCPARTn_STRIDE 0x00000008
697#define SYSC_xCPARTL_AND_BITS 0x0000000F /* Masks top 4 bits */
698#define SYSC_xCPARTL_AND_S 0
699#define SYSC_xCPARTG_AND_BITS 0x00000F00 /* Masks top 4 bits */
700#define SYSC_xCPARTG_AND_S 8
701#define SYSC_xCPARTL_OR_BITS 0x000F0000 /* Ors into top 4 bits */
702#define SYSC_xCPARTL_OR_S 16
703#ifdef METAC_2_1
704#define SYSC_DCPART_GCON_BIT 0x00100000 /* Coherent shared local */
705#endif /* METAC_2_1 */
706#define SYSC_xCPARTG_OR_BITS 0x0F000000 /* Ors into top 4 bits */
707#define SYSC_xCPARTG_OR_S 24
708#define SYSC_CWRMODE_BIT 0x80000000 /* Write cache mode bit */
709
710#define SYSC_DCPART1 0x04830208
711#define SYSC_DCPART2 0x04830210
712#define SYSC_DCPART3 0x04830218
713#define SYSC_ICPART0 0x04830220
714#define SYSC_ICPART1 0x04830228
715#define SYSC_ICPART2 0x04830230
716#define SYSC_ICPART3 0x04830238
717
718/*
719 * META Core Memory and Cache Update registers
720 */
721#define SYSC_MCMDATAX 0x04830300 /* 32-bit read/write data register */
722#define SYSC_MCMDATAT 0x04830308 /* Read or write data triggers oper */
723#define SYSC_MCMGCTRL 0x04830310 /* Control register */
724#define SYSC_MCMGCTRL_READ_BIT 0x00000001 /* Set to issue 1st read */
725#define SYSC_MCMGCTRL_AINC_BIT 0x00000002 /* Set for auto-increment */
726#define SYSC_MCMGCTRL_ADDR_BITS 0x000FFFFC /* Address or index */
727#define SYSC_MCMGCTRL_ADDR_S 2
728#define SYSC_MCMGCTRL_ID_BITS 0x0FF00000 /* Internal memory block Id */
729#define SYSC_MCMGCTRL_ID_S 20
730#define SYSC_MCMGID_NODEV 0xFF /* No Device Selected */
731#define SYSC_MCMGID_DSPRAM0A 0x04 /* DSP RAM D0 block A access */
732#define SYSC_MCMGID_DSPRAM0B 0x05 /* DSP RAM D0 block B access */
733#define SYSC_MCMGID_DSPRAM1A 0x06 /* DSP RAM D1 block A access */
734#define SYSC_MCMGID_DSPRAM1B 0x07 /* DSP RAM D1 block B access */
735#define SYSC_MCMGID_DCACHEL 0x08 /* DCACHE lines (64-bytes/line) */
736#ifdef METAC_2_1
737#define SYSC_MCMGID_DCACHETLB 0x09 /* DCACHE TLB ( Read Only ) */
738#endif /* METAC_2_1 */
739#define SYSC_MCMGID_DCACHET 0x0A /* DCACHE tags (32-bits/line) */
740#define SYSC_MCMGID_DCACHELRU 0x0B /* DCACHE LRU (8-bits/line) */
741#define SYSC_MCMGID_ICACHEL 0x0C /* ICACHE lines (64-bytes/line */
742#ifdef METAC_2_1
743#define SYSC_MCMGID_ICACHETLB 0x0D /* ICACHE TLB (Read Only ) */
744#endif /* METAC_2_1 */
745#define SYSC_MCMGID_ICACHET 0x0E /* ICACHE Tags (32-bits/line) */
746#define SYSC_MCMGID_ICACHELRU 0x0F /* ICACHE LRU (8-bits/line ) */
747#define SYSC_MCMGID_COREIRAM0 0x10 /* Core code mem id 0 */
748#define SYSC_MCMGID_COREIRAMn 0x17
749#define SYSC_MCMGID_COREDRAM0 0x18 /* Core data mem id 0 */
750#define SYSC_MCMGID_COREDRAMn 0x1F
751#ifdef METAC_2_1
752#define SYSC_MCMGID_DCACHEST 0x20 /* DCACHE ST ( Read Only ) */
753#define SYSC_MCMGID_ICACHEST 0x21 /* ICACHE ST ( Read Only ) */
754#define SYSC_MCMGID_DCACHETLBLRU 0x22 /* DCACHE TLB LRU ( Read Only )*/
755#define SYSC_MCMGID_ICACHETLBLRU 0x23 /* ICACHE TLB LRU( Read Only ) */
756#define SYSC_MCMGID_DCACHESTLRU 0x24 /* DCACHE ST LRU ( Read Only ) */
757#define SYSC_MCMGID_ICACHESTLRU 0x25 /* ICACHE ST LRU ( Read Only ) */
758#define SYSC_MCMGID_DEBUGTLB 0x26 /* DEBUG TLB ( Read Only ) */
759#define SYSC_MCMGID_DEBUGST 0x27 /* DEBUG ST ( Read Only ) */
760#define SYSC_MCMGID_L2CACHEL 0x30 /* L2 Cache Lines (64-bytes/line) */
761#define SYSC_MCMGID_L2CACHET 0x31 /* L2 Cache Tags (32-bits/line) */
762#define SYSC_MCMGID_COPROX0 0x70 /* Coprocessor port id 0 */
763#define SYSC_MCMGID_COPROXn 0x77
764#endif /* METAC_2_1 */
765#define SYSC_MCMGCTRL_TR31_BIT 0x80000000 /* Trigger 31 on completion */
766#define SYSC_MCMSTATUS 0x04830318 /* Status read only */
767#define SYSC_MCMSTATUS_IDLE_BIT 0x00000001
768
769/* META System Events */
770#define SYSC_SYS_EVENT 0x04830400
771#define SYSC_SYSEVT_ATOMIC_BIT 0x00000001
772#define SYSC_SYSEVT_CACHEX_BIT 0x00000002
773#define SYSC_ATOMIC_LOCK 0x04830408
774#define SYSC_ATOMIC_STATE_TX_BITS 0x0000000F
775#define SYSC_ATOMIC_STATE_TX_S 0
776#ifdef METAC_1_2
777#define SYSC_ATOMIC_STATE_DX_BITS 0x000000F0
778#define SYSC_ATOMIC_STATE_DX_S 4
779#else /* METAC_1_2 */
780#define SYSC_ATOMIC_SOURCE_BIT 0x00000010
781#endif /* !METAC_1_2 */
782
783
784#ifdef METAC_2_1
785
786/* These definitions replace the EXPAND_TIMER_DIV register defines which are to
787 * be deprecated.
788 */
789#define SYSC_TIMER_DIV 0x04830140
790#define SYSC_TIMDIV_BITS 0x000000FF
791#define SYSC_TIMDIV_S 0
792
793/* META Enhanced by-pass control for local and global region */
794#define MMCU_LOCAL_EBCTRL 0x04830600
795#define MMCU_GLOBAL_EBCTRL 0x04830608
796#define MMCU_EBCTRL_SINGLE_BIT 0x00000020 /* TLB Uncached */
797/*
798 * These fields in the above registers provide MMCU_ENTRY_* values
799 * for each direct mapped region to enable optimisation of these areas.
800 */
801#define MMCU_EBCTRL_DCCTRL_BITS 0x000000C0 /* Get DC Ctrl */
802#define MMCU_EBCTRL_DCCTRL_S 0
803#define MMCU_EBCTRL_ICCTRL_BITS 0x0000C000 /* Get DC Ctrl */
804#define MMCU_EBCTRL_ICCTRL_S 8
805
806/* META Cached Core Mode Registers */
807#define MMCU_T0CCM_ICCTRL 0x04830680 /* Core cached code control */
808#define MMCU_TnCCM_xxCTRL_STRIDE 8
809#define MMCU_TnCCM_xxCTRL_STRIDE_S 3
810#define MMCU_T1CCM_ICCTRL 0x04830688
811#define MMCU_T2CCM_ICCTRL 0x04830690
812#define MMCU_T3CCM_ICCTRL 0x04830698
813#define MMCU_T0CCM_DCCTRL 0x048306C0 /* Core cached data control */
814#define MMCU_T1CCM_DCCTRL 0x048306C8
815#define MMCU_T2CCM_DCCTRL 0x048306D0
816#define MMCU_T3CCM_DCCTRL 0x048306D8
817#define MMCU_TnCCM_ENABLE_BIT 0x00000001
818#define MMCU_TnCCM_WIN3_BIT 0x00000002
819#define MMCU_TnCCM_DCWRITE_BIT 0x00000004 /* In DCCTRL only */
820#define MMCU_TnCCM_REGSZ_BITS 0x00000F00
821#define MMCU_TnCCM_REGSZ_S 8
822#define MMCU_TnCCM_REGSZ0_POWER 12 /* RegSz 0 -> 4K */
823#define MMCU_TnCCM_REGSZ_MAXBYTES 0x00080000 /* 512K max */
824#define MMCU_TnCCM_ADDR_BITS 0xFFFFF000
825#define MMCU_TnCCM_ADDR_S 12
826
827#endif /* METAC_2_1 */
828
829/*
830 * Hardware performance counter registers
831 * --------------------------------------
832 */
833#ifdef METAC_2_1
834/* Two Performance Counter Internal Core Events Control registers */
835#define PERF_ICORE0 0x0480FFD0
836#define PERF_ICORE1 0x0480FFD8
837#define PERFI_CTRL_BITS 0x0000000F
838#define PERFI_CTRL_S 0
839#define PERFI_CAH_DMISS 0x0 /* Dcache Misses in cache (TLB Hit) */
840#define PERFI_CAH_IMISS 0x1 /* Icache Misses in cache (TLB Hit) */
841#define PERFI_TLB_DMISS 0x2 /* Dcache Misses in per-thread TLB */
842#define PERFI_TLB_IMISS 0x3 /* Icache Misses in per-thread TLB */
843#define PERFI_TLB_DWRHITS 0x4 /* DC Write-Hits in per-thread TLB */
844#define PERFI_TLB_DWRMISS 0x5 /* DC Write-Miss in per-thread TLB */
845#define PERFI_CAH_DLFETCH 0x8 /* DC Read cache line fetch */
846#define PERFI_CAH_ILFETCH 0x9 /* DC Read cache line fetch */
847#define PERFI_CAH_DWFETCH 0xA /* DC Read cache word fetch */
848#define PERFI_CAH_IWFETCH 0xB /* DC Read cache word fetch */
849#endif /* METAC_2_1 */
850
851/* Two memory-mapped hardware performance counter registers */
852#define PERF_COUNT0 0x0480FFE0
853#define PERF_COUNT1 0x0480FFE8
854
855/* Fields in PERF_COUNTn registers */
856#define PERF_COUNT_BITS 0x00ffffff /* Event count value */
857
858#define PERF_THREAD_BITS 0x0f000000 /* Thread mask selects threads */
859#define PERF_THREAD_S 24
860
861#define PERF_CTRL_BITS 0xf0000000 /* Event filter control */
862#define PERF_CTRL_S 28
863
864#define PERFCTRL_SUPER 0 /* Superthread cycles */
865#define PERFCTRL_REWIND 1 /* Rewinds due to Dcache Misses */
866#ifdef METAC_2_1
867#define PERFCTRL_SUPREW 2 /* Rewinds of superthreaded cycles (no mask) */
868
869#define PERFCTRL_CYCLES 3 /* Counts all cycles (no mask) */
870
871#define PERFCTRL_PREDBC 4 /* Conditional branch predictions */
872#define PERFCTRL_MISPBC 5 /* Conditional branch mispredictions */
873#define PERFCTRL_PREDRT 6 /* Return predictions */
874#define PERFCTRL_MISPRT 7 /* Return mispredictions */
875#endif /* METAC_2_1 */
876
877#define PERFCTRL_DHITS 8 /* Dcache Hits */
878#define PERFCTRL_IHITS 9 /* Icache Hits */
879#define PERFCTRL_IMISS 10 /* Icache Misses in cache or TLB */
880#ifdef METAC_2_1
881#define PERFCTRL_DCSTALL 11 /* Dcache+TLB o/p delayed (per-thread) */
882#define PERFCTRL_ICSTALL 12 /* Icache+TLB o/p delayed (per-thread) */
883
884#define PERFCTRL_INT 13 /* Internal core detailed events (see next) */
885#define PERFCTRL_EXT 15 /* External source in core periphery */
886#endif /* METAC_2_1 */
887
888#ifdef METAC_2_1
889/* These definitions replace the EXPAND_PERFCHANx register defines which are to
890 * be deprecated.
891 */
892#define PERF_CHAN0 0x04830150
893#define PERF_CHAN1 0x04830158
894#define PERF_CHAN_BITS 0x0000000F
895#define PERF_CHAN_S 0
896#define PERFCHAN_WRC_WRBURST 0x0 /* Write combiner write burst */
897#define PERFCHAN_WRC_WRITE 0x1 /* Write combiner write */
898#define PERFCHAN_WRC_RDBURST 0x2 /* Write combiner read burst */
899#define PERFCHAN_WRC_READ 0x3 /* Write combiner read */
900#define PERFCHAN_PREARB_DELAY 0x4 /* Pre-arbiter delay cycle */
901 /* Cross-bar hold-off cycle: */
902#define PERFCHAN_XBAR_HOLDWRAP 0x5 /* wrapper register */
903#define PERFCHAN_XBAR_HOLDSBUS 0x6 /* system bus (ATP only) */
904#define PERFCHAN_XBAR_HOLDCREG 0x9 /* core registers */
905#define PERFCHAN_L2C_MISS 0x6 /* L2 Cache miss */
906#define PERFCHAN_L2C_HIT 0x7 /* L2 Cache hit */
907#define PERFCHAN_L2C_WRITEBACK 0x8 /* L2 Cache writeback */
908 /* Admission delay cycle: */
909#define PERFCHAN_INPUT_CREG 0xB /* core registers */
910#define PERFCHAN_INPUT_INTR 0xC /* internal ram */
911#define PERFCHAN_INPUT_WRC 0xD /* write combiners(memory) */
912
913/* Should following be removed as not in TRM anywhere? */
914#define PERFCHAN_XBAR_HOLDINTR 0x8 /* internal ram */
915#define PERFCHAN_INPUT_SBUS 0xA /* register port */
916/* End of remove section. */
917
918#define PERFCHAN_MAINARB_DELAY 0xF /* Main arbiter delay cycle */
919
920#endif /* METAC_2_1 */
921
922#ifdef METAC_2_1
923/*
924 * Write combiner registers
925 * ------------------------
926 *
927 * These replace the EXPAND_T0WRCOMBINE register defines, which will be
928 * deprecated.
929 */
930#define WRCOMB_CONFIG0 0x04830100
931#define WRCOMB_LFFEn_BIT 0x00004000 /* Enable auto line full flush */
932#define WRCOMB_ENABLE_BIT 0x00002000 /* Enable write combiner */
933#define WRCOMB_TIMEOUT_ENABLE_BIT 0x00001000 /* Timeout flush enable */
934#define WRCOMB_TIMEOUT_COUNT_BITS 0x000003FF
935#define WRCOMB_TIMEOUT_COUNT_S 0
936#define WRCOMB_CONFIG4 0x04830180
937#define WRCOMB_PARTALLOC_BITS 0x000000C0
938#define WRCOMB_PARTALLOC_S 64
939#define WRCOMB_PARTSIZE_BITS 0x00000030
940#define WRCOMB_PARTSIZE_S 4
941#define WRCOMB_PARTOFFSET_BITS 0x0000000F
942#define WRCOMB_PARTOFFSET_S 0
943#define WRCOMB_CONFIG_STRIDE 8
944#endif /* METAC_2_1 */
945
946#ifdef METAC_2_1
947/*
948 * Thread arbiter registers
949 * ------------------------
950 *
951 * These replace the EXPAND_T0ARBITER register defines, which will be
952 * deprecated.
953 */
954#define ARBITER_ARBCONFIG0 0x04830120
955#define ARBCFG_BPRIORITY_BIT 0x02000000
956#define ARBCFG_IPRIORITY_BIT 0x01000000
957#define ARBCFG_PAGE_BITS 0x00FF0000
958#define ARBCFG_PAGE_S 16
959#define ARBCFG_BBASE_BITS 0x0000FF00
960#define ARGCFG_BBASE_S 8
961#define ARBCFG_IBASE_BITS 0x000000FF
962#define ARBCFG_IBASE_S 0
963#define ARBITER_TTECONFIG0 0x04820160
964#define ARBTTE_IUPPER_BITS 0xFF000000
965#define ARBTTE_IUPPER_S 24
966#define ARBTTE_ILOWER_BITS 0x00FF0000
967#define ARBTTE_ILOWER_S 16
968#define ARBTTE_BUPPER_BITS 0x0000FF00
969#define ARBTTE_BUPPER_S 8
970#define ARBTTE_BLOWER_BITS 0x000000FF
971#define ARBTTE_BLOWER_S 0
972#define ARBITER_STRIDE 8
973#endif /* METAC_2_1 */
974
975/*
976 * Expansion area registers
977 * --------------------------------------
978 */
979
980/* These defines are to be deprecated. See above instead. */
981#define EXPAND_T0WRCOMBINE 0x03000000
982#ifdef METAC_2_1
983#define EXPWRC_LFFEn_BIT 0x00004000 /* Enable auto line full flush */
984#endif /* METAC_2_1 */
985#define EXPWRC_ENABLE_BIT 0x00002000 /* Enable write combiner */
986#define EXPWRC_TIMEOUT_ENABLE_BIT 0x00001000 /* Timeout flush enable */
987#define EXPWRC_TIMEOUT_COUNT_BITS 0x000003FF
988#define EXPWRC_TIMEOUT_COUNT_S 0
989#define EXPAND_TnWRCOMBINE_STRIDE 0x00000008
990
991/* These defines are to be deprecated. See above instead. */
992#define EXPAND_T0ARBITER 0x03000020
993#define EXPARB_BPRIORITY_BIT 0x02000000
994#define EXPARB_IPRIORITY_BIT 0x01000000
995#define EXPARB_PAGE_BITS 0x00FF0000
996#define EXPARB_PAGE_S 16
997#define EXPARB_BBASE_BITS 0x0000FF00
998#define EXPARB_BBASE_S 8
999#define EXPARB_IBASE_BITS 0x000000FF
1000#define EXPARB_IBASE_S 0
1001#define EXPAND_TnARBITER_STRIDE 0x00000008
1002
1003/* These definitions are to be deprecated. See above instead. */
1004#define EXPAND_TIMER_DIV 0x03000040
1005#define EXPTIM_DIV_BITS 0x000000FF
1006#define EXPTIM_DIV_S 0
1007
1008/* These definitions are to be deprecated. See above instead. */
1009#define EXPAND_PERFCHAN0 0x03000050
1010#define EXPAND_PERFCHAN1 0x03000058
1011#define EXPPERF_CTRL_BITS 0x0000000F
1012#define EXPPERF_CTRL_S 0
1013#define EXPPERF_WRC_WRBURST 0x0 /* Write combiner write burst */
1014#define EXPPERF_WRC_WRITE 0x1 /* Write combiner write */
1015#define EXPPERF_WRC_RDBURST 0x2 /* Write combiner read burst */
1016#define EXPPERF_WRC_READ 0x3 /* Write combiner read */
1017#define EXPPERF_PREARB_DELAY 0x4 /* Pre-arbiter delay cycle */
1018 /* Cross-bar hold-off cycle: */
1019#define EXPPERF_XBAR_HOLDWRAP 0x5 /* wrapper register */
1020#define EXPPERF_XBAR_HOLDSBUS 0x6 /* system bus */
1021#ifdef METAC_1_2
1022#define EXPPERF_XBAR_HOLDLBUS 0x7 /* local bus */
1023#else /* METAC_1_2 */
1024#define EXPPERF_XBAR_HOLDINTR 0x8 /* internal ram */
1025#define EXPPERF_XBAR_HOLDCREG 0x9 /* core registers */
1026 /* Admission delay cycle: */
1027#define EXPPERF_INPUT_SBUS 0xA /* register port */
1028#define EXPPERF_INPUT_CREG 0xB /* core registers */
1029#define EXPPERF_INPUT_INTR 0xC /* internal ram */
1030#define EXPPERF_INPUT_WRC 0xD /* write combiners(memory) */
1031#endif /* !METAC_1_2 */
1032#define EXPPERF_MAINARB_DELAY 0xF /* Main arbiter delay cycle */
1033
1034/*
1035 * Debug port registers
1036 * --------------------------------------
1037 */
1038
1039/* Data Exchange Register */
1040#define DBGPORT_MDBGDATAX 0x0
1041
1042/* Data Transfer register */
1043#define DBGPORT_MDBGDATAT 0x4
1044
1045/* Control Register 0 */
1046#define DBGPORT_MDBGCTRL0 0x8
1047#define DBGPORT_MDBGCTRL0_ADDR_BITS 0xFFFFFFFC
1048#define DBGPORT_MDBGCTRL0_ADDR_S 2
1049#define DBGPORT_MDBGCTRL0_AUTOINCR_BIT 0x00000002
1050#define DBGPORT_MDBGCTRL0_RD_BIT 0x00000001
1051
1052/* Control Register 1 */
1053#define DBGPORT_MDBGCTRL1 0xC
1054#ifdef METAC_2_1
1055#define DBGPORT_MDBGCTRL1_DEFERRTHREAD_BITS 0xC0000000
1056#define DBGPORT_MDBGCTRL1_DEFERRTHREAD_S 30
1057#endif /* METAC_2_1 */
1058#define DBGPORT_MDBGCTRL1_LOCK2_INTERLOCK_BIT 0x20000000
1059#define DBGPORT_MDBGCTRL1_ATOMIC_INTERLOCK_BIT 0x10000000
1060#define DBGPORT_MDBGCTRL1_TRIGSTATUS_BIT 0x08000000
1061#define DBGPORT_MDBGCTRL1_GBLPORT_IDLE_BIT 0x04000000
1062#define DBGPORT_MDBGCTRL1_COREMEM_IDLE_BIT 0x02000000
1063#define DBGPORT_MDBGCTRL1_READY_BIT 0x01000000
1064#ifdef METAC_2_1
1065#define DBGPORT_MDBGCTRL1_DEFERRID_BITS 0x00E00000
1066#define DBGPORT_MDBGCTRL1_DEFERRID_S 21
1067#define DBGPORT_MDBGCTRL1_DEFERR_BIT 0x00100000
1068#endif /* METAC_2_1 */
1069#define DBGPORT_MDBGCTRL1_WR_ACTIVE_BIT 0x00040000
1070#define DBGPORT_MDBGCTRL1_COND_LOCK2_BIT 0x00020000
1071#define DBGPORT_MDBGCTRL1_LOCK2_BIT 0x00010000
1072#define DBGPORT_MDBGCTRL1_DIAGNOSE_BIT 0x00008000
1073#define DBGPORT_MDBGCTRL1_FORCEDIAG_BIT 0x00004000
1074#define DBGPORT_MDBGCTRL1_MEMFAULT_BITS 0x00003000
1075#define DBGPORT_MDBGCTRL1_MEMFAULT_S 12
1076#define DBGPORT_MDBGCTRL1_TRIGGER_BIT 0x00000100
1077#ifdef METAC_2_1
1078#define DBGPORT_MDBGCTRL1_INTSPECIAL_BIT 0x00000080
1079#define DBGPORT_MDBGCTRL1_INTRUSIVE_BIT 0x00000040
1080#endif /* METAC_2_1 */
1081#define DBGPORT_MDBGCTRL1_THREAD_BITS 0x00000030 /* Thread mask selects threads */
1082#define DBGPORT_MDBGCTRL1_THREAD_S 4
1083#define DBGPORT_MDBGCTRL1_TRANS_SIZE_BITS 0x0000000C
1084#define DBGPORT_MDBGCTRL1_TRANS_SIZE_S 2
1085#define DBGPORT_MDBGCTRL1_TRANS_SIZE_32_BIT 0x00000000
1086#define DBGPORT_MDBGCTRL1_TRANS_SIZE_16_BIT 0x00000004
1087#define DBGPORT_MDBGCTRL1_TRANS_SIZE_8_BIT 0x00000008
1088#define DBGPORT_MDBGCTRL1_BYTE_ROUND_BITS 0x00000003
1089#define DBGPORT_MDBGCTRL1_BYTE_ROUND_S 0
1090#define DBGPORT_MDBGCTRL1_BYTE_ROUND_8_BIT 0x00000001
1091#define DBGPORT_MDBGCTRL1_BYTE_ROUND_16_BIT 0x00000002
1092
1093
1094/* L2 Cache registers */
1095#define SYSC_L2C_INIT 0x048300C0
1096#define SYSC_L2C_INIT_INIT 1
1097#define SYSC_L2C_INIT_IN_PROGRESS 0
1098#define SYSC_L2C_INIT_COMPLETE 1
1099
1100#define SYSC_L2C_ENABLE 0x048300D0
1101#define SYSC_L2C_ENABLE_ENABLE_BIT 0x00000001
1102#define SYSC_L2C_ENABLE_PFENABLE_BIT 0x00000002
1103
1104#define SYSC_L2C_PURGE 0x048300C8
1105#define SYSC_L2C_PURGE_PURGE 1
1106#define SYSC_L2C_PURGE_IN_PROGRESS 0
1107#define SYSC_L2C_PURGE_COMPLETE 1
1108
1109#endif /* _ASM_METAG_MEM_H_ */
diff --git a/arch/metag/include/asm/metag_regs.h b/arch/metag/include/asm/metag_regs.h
deleted file mode 100644
index 60b750971d8a..000000000000
--- a/arch/metag/include/asm/metag_regs.h
+++ /dev/null
@@ -1,1184 +0,0 @@
1/*
2 * asm/metag_regs.h
3 *
4 * Copyright (C) 2000-2007, 2012 Imagination Technologies.
5 *
6 * This program is free software; you can redistribute it and/or modify it under
7 * the terms of the GNU General Public License version 2 as published by the
8 * Free Software Foundation.
9 *
10 * Various defines for Meta core (non memory-mapped) registers.
11 */
12
13#ifndef _ASM_METAG_REGS_H_
14#define _ASM_METAG_REGS_H_
15
16/*
17 * CHIP Unit Identifiers and Valid/Global register number masks
18 * ------------------------------------------------------------
19 */
20#define TXUCT_ID 0x0 /* Control unit regs */
21#ifdef METAC_1_2
22#define TXUCT_MASK 0xFF0FFFFF /* Valid regs 0..31 */
23#else
24#define TXUCT_MASK 0xFF1FFFFF /* Valid regs 0..31 */
25#endif
26#define TGUCT_MASK 0x00000000 /* No global regs */
27#define TXUD0_ID 0x1 /* Data unit regs */
28#define TXUD1_ID 0x2
29#define TXUDX_MASK 0xFFFFFFFF /* Valid regs 0..31 */
30#define TGUDX_MASK 0xFFFF0000 /* Global regs for base inst */
31#define TXUDXDSP_MASK 0x0F0FFFFF /* Valid DSP regs */
32#define TGUDXDSP_MASK 0x0E0E0000 /* Global DSP ACC regs */
33#define TXUA0_ID 0x3 /* Address unit regs */
34#define TXUA1_ID 0x4
35#define TXUAX_MASK 0x0000FFFF /* Valid regs 0-15 */
36#define TGUAX_MASK 0x0000FF00 /* Global regs 8-15 */
37#define TXUPC_ID 0x5 /* PC registers */
38#define TXUPC_MASK 0x00000003 /* Valid regs 0- 1 */
39#define TGUPC_MASK 0x00000000 /* No global regs */
40#define TXUPORT_ID 0x6 /* Ports are not registers */
41#define TXUTR_ID 0x7
42#define TXUTR_MASK 0x0000005F /* Valid regs 0-3,4,6 */
43#define TGUTR_MASK 0x00000000 /* No global regs */
44#ifdef METAC_2_1
45#define TXUTT_ID 0x8
46#define TXUTT_MASK 0x0000000F /* Valid regs 0-3 */
47#define TGUTT_MASK 0x00000010 /* Global reg 4 */
48#define TXUFP_ID 0x9 /* FPU regs */
49#define TXUFP_MASK 0x0000FFFF /* Valid regs 0-15 */
50#define TGUFP_MASK 0x00000000 /* No global regs */
51#endif /* METAC_2_1 */
52
53#ifdef METAC_1_2
54#define TXUXX_MASKS { TXUCT_MASK, TXUDX_MASK, TXUDX_MASK, TXUAX_MASK, \
55 TXUAX_MASK, TXUPC_MASK, 0, TXUTR_MASK, \
56 0, 0, 0, 0, 0, 0, 0, 0 }
57#define TGUXX_MASKS { TGUCT_MASK, TGUDX_MASK, TGUDX_MASK, TGUAX_MASK, \
58 TGUAX_MASK, TGUPC_MASK, 0, TGUTR_MASK, \
59 0, 0, 0, 0, 0, 0, 0, 0 }
60#else /* METAC_1_2 */
61#define TXUXX_MASKS { TXUCT_MASK, TXUDX_MASK, TXUDX_MASK, TXUAX_MASK, \
62 TXUAX_MASK, TXUPC_MASK, 0, TXUTR_MASK, \
63 TXUTT_MASK, TXUFP_MASK, 0, 0, \
64 0, 0, 0, 0 }
65#define TGUXX_MASKS { TGUCT_MASK, TGUDX_MASK, TGUDX_MASK, TGUAX_MASK, \
66 TGUAX_MASK, TGUPC_MASK, 0, TGUTR_MASK, \
67 TGUTT_MASK, TGUFP_MASK, 0, 0, \
68 0, 0, 0, 0 }
69#endif /* !METAC_1_2 */
70
71#define TXUXXDSP_MASKS { 0, TXUDXDSP_MASK, TXUDXDSP_MASK, 0, 0, 0, 0, 0, \
72 0, 0, 0, 0, 0, 0, 0, 0 }
73#define TGUXXDSP_MASKS { 0, TGUDXDSP_MASK, TGUDXDSP_MASK, 0, 0, 0, 0, 0, \
74 0, 0, 0, 0, 0, 0, 0, 0 }
75
76/* -------------------------------------------------------------------------
77; DATA AND ADDRESS UNIT REGISTERS
78; -----------------------------------------------------------------------*/
79/*
80 Thread local D0 registers
81 */
82/* D0.0 ; Holds 32-bit result, can be used as scratch */
83#define D0Re0 D0.0
84/* D0.1 ; Used to pass Arg6_32 */
85#define D0Ar6 D0.1
86/* D0.2 ; Used to pass Arg4_32 */
87#define D0Ar4 D0.2
88/* D0.3 ; Used to pass Arg2_32 to a called routine (see D1.3 below) */
89#define D0Ar2 D0.3
90/* D0.4 ; Can be used as scratch; used to save A0FrP in entry sequences */
91#define D0FrT D0.4
92/* D0.5 ; C compiler assumes preservation, save with D1.5 if used */
93/* D0.6 ; C compiler assumes preservation, save with D1.6 if used */
94/* D0.7 ; C compiler assumes preservation, save with D1.7 if used */
95/* D0.8 ; Use of D0.8 and above is not encouraged */
96/* D0.9 */
97/* D0.10 */
98/* D0.11 */
99/* D0.12 */
100/* D0.13 */
101/* D0.14 */
102/* D0.15 */
103/*
104 Thread local D1 registers
105 */
106/* D1.0 ; Holds top 32-bits of 64-bit result, can be used as scratch */
107#define D1Re0 D1.0
108/* D1.1 ; Used to pass Arg5_32 */
109#define D1Ar5 D1.1
110/* D1.2 ; Used to pass Arg3_32 */
111#define D1Ar3 D1.2
112/* D1.3 ; Used to pass Arg1_32 (first 32-bit argument) to a called routine */
113#define D1Ar1 D1.3
114/* D1.4 ; Used for Return Pointer, save during entry with A0FrP (via D0.4) */
115#define D1RtP D1.4
116/* D1.5 ; C compiler assumes preservation, save if used */
117/* D1.6 ; C compiler assumes preservation, save if used */
118/* D1.7 ; C compiler assumes preservation, save if used */
119/* D1.8 ; Use of D1.8 and above is not encouraged */
120/* D1.9 */
121/* D1.10 */
122/* D1.11 */
123/* D1.12 */
124/* D1.13 */
125/* D1.14 */
126/* D1.15 */
127/*
128 Thread local A0 registers
129 */
130/* A0.0 ; Primary stack pointer */
131#define A0StP A0.0
132/* A0.1 ; Used as local frame pointer in C, save if used (via D0.4) */
133#define A0FrP A0.1
134/* A0.2 */
135/* A0.3 */
136/* A0.4 ; Use of A0.4 and above is not encouraged */
137/* A0.5 */
138/* A0.6 */
139/* A0.7 */
140/*
141 Thread local A1 registers
142 */
143/* A1.0 ; Global static chain pointer - do not modify */
144#define A1GbP A1.0
145/* A1.1 ; Local static chain pointer in C, can be used as scratch */
146#define A1LbP A1.1
147/* A1.2 */
148/* A1.3 */
149/* A1.4 ; Use of A1.4 and above is not encouraged */
150/* A1.5 */
151/* A1.6 */
152/* A1.7 */
153#ifdef METAC_2_1
154/* Renameable registers for use with Fast Interrupts */
155/* The interrupt stack pointer (usually a global register) */
156#define A0IStP A0IReg
157/* The interrupt global pointer (usually a global register) */
158#define A1IGbP A1IReg
159#endif
160/*
161 Further registers may be globally allocated via linkage/loading tools,
162 normally they are not used.
163 */
164/*-------------------------------------------------------------------------
165; STACK STRUCTURE and CALLING CONVENTION
166; -----------------------------------------------------------------------*/
167/*
168; Calling convention indicates that the following is the state of the
169; stack frame at the start of a routine-
170;
171; Arg9_32 [A0StP+#-12]
172; Arg8_32 [A0StP+#- 8]
173; Arg7_32 [A0StP+#- 4]
174; A0StP->
175;
176; Registers D1.3, D0.3, ..., to D0.1 are used to pass Arg1_32 to Arg6_32
177; respectively. If a routine needs to store them on the stack in order
178; to make sub-calls or because of the general complexity of the routine it
179; is best to dump these registers immediately at the start of a routine
180; using a MSETL or SETL instruction-
181;
182; MSETL [A0StP],D0Ar6,D0Ar4,D0Ar2; Only dump arguments expected
183;or SETL [A0StP+#8++],D0Ar2 ; Up to two 32-bit args expected
184;
185; For non-leaf routines it is always necessary to save and restore at least
186; the return address value D1RtP on the stack. Also by convention if the
187; frame is saved then a new A0FrP value must be set-up. So for non-leaf
188; routines at this point both these registers must be saved onto the stack
189; using a SETL instruction and the new A0FrP value is then set-up-
190;
191; MOV D0FrT,A0FrP
192; ADD A0FrP,A0StP,#0
193; SETL [A0StP+#8++],D0FrT,D1RtP
194;
195; Registers D0.5, D1.5, to D1.7 are assumed to be preserved across calls so
196; a SETL or MSETL instruction can be used to save the current state
197; of these registers if they are modified by the current routine-
198;
199; MSETL [A0StP],D0.5,D0.6,D0.7 ; Only save registers modified
200;or SETL [A0StP+#8++],D0.5 ; Only D0.5 and/or D1.5 modified
201;
202; All of the above sequences can be combined into one maximal case-
203;
204; MOV D0FrT,A0FrP ; Save and calculate new frame pointer
205; ADD A0FrP,A0StP,#(ARS)
206; MSETL [A0StP],D0Ar6,D0Ar4,D0Ar2,D0FrT,D0.5,D0.6,D0.7
207;
208; Having completed the above sequence the only remaining task on routine
209; entry is to reserve any local and outgoing argment storage space on the
210; stack. This instruction may be omitted if the size of this region is zero-
211;
212; ADD A0StP,A0StP,#(LCS)
213;
214; LCS is the first example use of one of a number of standard local defined
215; values that can be created to make assembler code more readable and
216; potentially more robust-
217;
218; #define ARS 0x18 ; Register arg bytes saved on stack
219; #define FRS 0x20 ; Frame save area size in bytes
220; #define LCS 0x00 ; Locals and Outgoing arg size
221; #define ARO (LCS+FRS) ; Stack offset to access args
222;
223; All of the above defines should be undefined (#undef) at the end of each
224; routine to avoid accidental use in the next routine.
225;
226; Given all of the above the following stack structure is expected during
227; the body of a routine if all args passed in registers are saved during
228; entry-
229;
230; ; 'Incoming args area'
231; Arg10_32 [A0StP+#-((10*4)+ARO)] Arg9_32 [A0StP+#-(( 9*4)+ARO)]
232; Arg8_32 [A0StP+#-(( 8*4)+ARO)] Arg7_32 [A0StP+#-(( 7*4)+ARO)]
233;--- Call point
234; D0Ar6= Arg6_32 [A0StP+#-(( 6*4)+ARO)] D1Ar5=Arg5_32 [A0StP+#-(( 5*4)+ARO)]
235; D0Ar4= Arg4_32 [A0StP+#-(( 4*4)+ARO)] D1Ar3=Arg3_32 [A0StP+#-(( 3*4)+ARO)]
236; D0Ar2= Arg2_32 [A0StP+#-(( 2*4)+ARO)] D1Ar2=Arg1_32 [A0StP+#-(( 1*4)+ARO)]
237; ; 'Frame area'
238; A0FrP-> D0FrT, D1RtP,
239; D0.5, D1.5,
240; D0.6, D1.6,
241; D0.7, D1.7,
242; ; 'Locals area'
243; Loc0_32 [A0StP+# (( 0*4)-LCS)], Loc1_32 [A0StP+# (( 1*4)-LCS)]
244; .... other locals
245; Locn_32 [A0StP+# (( n*4)-LCS)]
246; ; 'Outgoing args area'
247; Outm_32 [A0StP+#- ( m*4)] .... other outgoing args
248; Out8_32 [A0StP+#- ( 1*4)] Out7_32 [A0StP+#- ( 1*4)]
249; A0StP-> (Out1_32-Out6_32 in regs D1Ar1-D0Ar6)
250;
251; The exit sequence for a non-leaf routine can use the frame pointer created
252; in the entry sequence to optimise the recovery of the full state-
253;
254; MGETL D0FrT,D0.5,D0.6,D0.7,[A0FrP]
255; SUB A0StP,A0FrP,#(ARS+FRS)
256; MOV A0FrP,D0FrT
257; MOV PC,D1RtP
258;
259; Having described the most complex non-leaf case above, it is worth noting
260; that if a routine is a leaf and does not use any of the caller-preserved
261; state. The routine can be implemented as-
262;
263; ADD A0StP,A0StP,#LCS
264; .... body of routine
265; SUB A0StP,A0StP,#LCS
266; MOV PC,D1RtP
267;
268; The stack adjustments can also be omitted if no local storage is required.
269;
270; Another exit sequence structure is more applicable if for a leaf routine
271; with no local frame pointer saved/generated in which the call saved
272; registers need to be saved and restored-
273;
274; MSETL [A0StP],D0.5,D0.6,D0.7 ; Hence FRS is 0x18, ARS is 0x00
275; ADD A0StP,A0StP,#LCS
276; .... body of routine
277; GETL D0.5,D1.5,[A0StP+#((0*8)-(FRS+LCS))]
278; GETL D0.6,D1.6,[A0StP+#((1*8)-(FRS+LCS))]
279; GETL D0.7,D1.7,[A0StP+#((2*8)-(FRS+LCS))]
280; SUB A0StP,A0StP,#(ARS+FRS+LCS)
281; MOV PC,D1RtP
282;
283; Lastly, to support profiling assembler code should use a fixed entry/exit
284; sequence if the trigger define _GMON_ASM is defined-
285;
286; #ifndef _GMON_ASM
287; ... optimised entry code
288; #else
289; ; Profiling entry case
290; MOV D0FrT,A0FrP ; Save and calculate new frame pointer
291; ADD A0FrP,A0StP,#(ARS)
292; MSETL [A0StP],...,D0FrT,... or SETL [A0FrP],D0FrT,D1RtP
293; CALLR D0FrT,_mcount_wrapper
294; #endif
295; ... body of routine
296; #ifndef _GMON_ASM
297; ... optimised exit code
298; #else
299; ; Profiling exit case
300; MGETL D0FrT,...,[A0FrP] or GETL D0FrT,D1RtP,[A0FrP++]
301; SUB A0StP,A0FrP,#(ARS+FRS)
302; MOV A0FrP,D0FrT
303; MOV PC,D1RtP
304; #endif
305
306
307; -------------------------------------------------------------------------
308; CONTROL UNIT REGISTERS
309; -------------------------------------------------------------------------
310;
311; See the assembler guide, hardware documentation, or the field values
312; defined below for some details of the use of these registers.
313*/
314#define TXENABLE CT.0 /* Need to define bit-field values in these */
315#define TXMODE CT.1
316#define TXSTATUS CT.2 /* DEFAULT 0x00020000 */
317#define TXRPT CT.3
318#define TXTIMER CT.4
319#define TXL1START CT.5
320#define TXL1END CT.6
321#define TXL1COUNT CT.7
322#define TXL2START CT.8
323#define TXL2END CT.9
324#define TXL2COUNT CT.10
325#define TXBPOBITS CT.11
326#define TXMRSIZE CT.12
327#define TXTIMERI CT.13
328#define TXDRCTRL CT.14 /* DEFAULT 0x0XXXF0F0 */
329#define TXDRSIZE CT.15
330#define TXCATCH0 CT.16
331#define TXCATCH1 CT.17
332#define TXCATCH2 CT.18
333#define TXCATCH3 CT.19
334
335#ifdef METAC_2_1
336#define TXDEFR CT.20
337#define TXCPRS CT.21
338#endif
339
340#define TXINTERN0 CT.23
341#define TXAMAREG0 CT.24
342#define TXAMAREG1 CT.25
343#define TXAMAREG2 CT.26
344#define TXAMAREG3 CT.27
345#define TXDIVTIME CT.28 /* DEFAULT 0x00000001 */
346#define TXPRIVEXT CT.29 /* DEFAULT 0x003B0000 */
347#define TXTACTCYC CT.30
348#define TXIDLECYC CT.31
349
350/*****************************************************************************
351 * CONTROL UNIT REGISTER BITS
352 ****************************************************************************/
353/*
354 * The following registers and where appropriate the sub-fields of those
355 * registers are defined for pervasive use in controlling program flow.
356 */
357
358/*
359 * TXENABLE register fields - only the thread id is routinely useful
360 */
361#define TXENABLE_REGNUM 0
362#define TXENABLE_THREAD_BITS 0x00000700
363#define TXENABLE_THREAD_S 8
364#define TXENABLE_REV_STEP_BITS 0x000000F0
365#define TXENABLE_REV_STEP_S 4
366
367/*
368 * TXMODE register - controls extensions of the instruction set
369 */
370#define TXMODE_REGNUM 1
371#define TXMODE_DEFAULT 0 /* All fields default to zero */
372
373/*
374 * TXSTATUS register - contains a couple of stable bits that can be used
375 * to determine the privilege processing level and interrupt
376 * processing level of the current thread.
377 */
378#define TXSTATUS_REGNUM 2
379#define TXSTATUS_PSTAT_BIT 0x00020000 /* -> Privilege active */
380#define TXSTATUS_PSTAT_S 17
381#define TXSTATUS_ISTAT_BIT 0x00010000 /* -> In interrupt state */
382#define TXSTATUS_ISTAT_S 16
383
384/*
385 * These are all relatively boring registers, mostly full 32-bit
386 */
387#define TXRPT_REGNUM 3 /* Repeat counter for XFR... instructions */
388#define TXTIMER_REGNUM 4 /* Timer-- causes timer trigger on overflow */
389#define TXL1START_REGNUM 5 /* Hardware Loop 1 Start-PC/End-PC/Count */
390#define TXL1END_REGNUM 6
391#define TXL1COUNT_REGNUM 7
392#define TXL2START_REGNUM 8 /* Hardware Loop 2 Start-PC/End-PC/Count */
393#define TXL2END_REGNUM 9
394#define TXL2COUNT_REGNUM 10
395#define TXBPOBITS_REGNUM 11 /* Branch predict override bits - tune perf */
396#define TXTIMERI_REGNUM 13 /* Timer-- time based interrupt trigger */
397
398/*
399 * TXDIVTIME register is routinely read to calculate the time-base for
400 * the TXTIMER register.
401 */
402#define TXDIVTIME_REGNUM 28
403#define TXDIVTIME_DIV_BITS 0x000000FF
404#define TXDIVTIME_DIV_S 0
405#define TXDIVTIME_DIV_MIN 0x00000001 /* Maximum resolution */
406#define TXDIVTIME_DIV_MAX 0x00000100 /* 1/1 -> 1/256 resolution */
407#define TXDIVTIME_BASE_HZ 1000000 /* Timers run at 1Mhz @1/1 */
408
409/*
410 * TXPRIVEXT register can be consulted to decide if write access to a
411 * part of the threads register set is not permitted when in
412 * unprivileged mode (PSTAT == 0).
413 */
414#define TXPRIVEXT_REGNUM 29
415#define TXPRIVEXT_COPRO_BITS 0xFF000000 /* Co-processor 0-7 */
416#define TXPRIVEXT_COPRO_S 24
417#ifndef METAC_1_2
418#define TXPRIVEXT_TXTIMER_BIT 0x00080000 /* TXTIMER priv */
419#define TXPRIVEXT_TRACE_BIT 0x00040000 /* TTEXEC|TTCTRL|GTEXEC */
420#endif
421#define TXPRIVEXT_TXTRIGGER_BIT 0x00020000 /* TXSTAT|TXMASK|TXPOLL */
422#define TXPRIVEXT_TXGBLCREG_BIT 0x00010000 /* Global common regs */
423#define TXPRIVEXT_CBPRIV_BIT 0x00008000 /* Mem i/f dump priv */
424#define TXPRIVEXT_ILOCK_BIT 0x00004000 /* LOCK inst priv */
425#define TXPRIVEXT_TXITACCYC_BIT 0x00002000 /* TXIDLECYC|TXTACTCYC */
426#define TXPRIVEXT_TXDIVTIME_BIT 0x00001000 /* TXDIVTIME priv */
427#define TXPRIVEXT_TXAMAREGX_BIT 0x00000800 /* TXAMAREGX priv */
428#define TXPRIVEXT_TXTIMERI_BIT 0x00000400 /* TXTIMERI priv */
429#define TXPRIVEXT_TXSTATUS_BIT 0x00000200 /* TXSTATUS priv */
430#define TXPRIVEXT_TXDISABLE_BIT 0x00000100 /* TXENABLE priv */
431#ifndef METAC_1_2
432#define TXPRIVEXT_MINIMON_BIT 0x00000080 /* Enable Minim features */
433#define TXPRIVEXT_OLDBCCON_BIT 0x00000020 /* Restore Static predictions */
434#define TXPRIVEXT_ALIGNREW_BIT 0x00000010 /* Align & precise checks */
435#endif
436#define TXPRIVEXT_KEEPPRI_BIT 0x00000008 /* Use AMA_Priority if ISTAT=1*/
437#define TXPRIVEXT_TXTOGGLEI_BIT 0x00000001 /* TX.....I priv */
438
439/*
440 * TXTACTCYC register - counts instructions issued for this thread
441 */
442#define TXTACTCYC_REGNUM 30
443#define TXTACTCYC_COUNT_MASK 0x00FFFFFF
444
445/*
446 * TXIDLECYC register - counts idle cycles
447 */
448#define TXIDLECYC_REGNUM 31
449#define TXIDLECYC_COUNT_MASK 0x00FFFFFF
450
451/*****************************************************************************
452 * DSP EXTENSIONS
453 ****************************************************************************/
454/*
455 * The following values relate to fields and controls that only a program
456 * using the DSP extensions of the META instruction set need to know.
457 */
458
459
460#ifndef METAC_1_2
461/*
462 * Allow co-processor hardware to replace the read pipeline data source in
463 * appropriate cases.
464 */
465#define TXMODE_RDCPEN_BIT 0x00800000
466#endif
467
468/*
469 * Address unit addressing modes
470 */
471#define TXMODE_A1ADDR_BITS 0x00007000
472#define TXMODE_A1ADDR_S 12
473#define TXMODE_A0ADDR_BITS 0x00000700
474#define TXMODE_A0ADDR_S 8
475#define TXMODE_AXADDR_MODULO 3
476#define TXMODE_AXADDR_REVB 4
477#define TXMODE_AXADDR_REVW 5
478#define TXMODE_AXADDR_REVD 6
479#define TXMODE_AXADDR_REVL 7
480
481/*
482 * Data unit OverScale select (default 0 -> normal, 1 -> top 16 bits)
483 */
484#define TXMODE_DXOVERSCALE_BIT 0x00000080
485
486/*
487 * Data unit MX mode select (default 0 -> MX16, 1 -> MX8)
488 */
489#define TXMODE_M8_BIT 0x00000040
490
491/*
492 * Data unit accumulator saturation point (default -> 40 bit accumulator)
493 */
494#define TXMODE_DXACCSAT_BIT 0x00000020 /* Set for 32-bit accumulator */
495
496/*
497 * Data unit accumulator saturation enable (default 0 -> no saturation)
498 */
499#define TXMODE_DXSAT_BIT 0x00000010
500
501/*
502 * Data unit master rounding control (default 0 -> normal, 1 -> convergent)
503 */
504#define TXMODE_DXROUNDING_BIT 0x00000008
505
506/*
507 * Data unit product shift for fractional arithmetic (default off)
508 */
509#define TXMODE_DXPRODSHIFT_BIT 0x00000004
510
511/*
512 * Select the arithmetic mode (multiply mostly) for both data units
513 */
514#define TXMODE_DXARITH_BITS 0x00000003
515#define TXMODE_DXARITH_32 3
516#define TXMODE_DXARITH_32H 2
517#define TXMODE_DXARITH_S16 1
518#define TXMODE_DXARITH_16 0
519
520/*
521 * TXMRSIZE register value only relevant when DSP modulo addressing active
522 */
523#define TXMRSIZE_REGNUM 12
524#define TXMRSIZE_MIN 0x0002 /* 0, 1 -> normal addressing logic */
525#define TXMRSIZE_MAX 0xFFFF
526
527/*
528 * TXDRCTRL register can be used to detect the actaul size of the DSP RAM
529 * partitions allocated to this thread.
530 */
531#define TXDRCTRL_REGNUM 14
532#define TXDRCTRL_SINESIZE_BITS 0x0F000000
533#define TXDRCTRL_SINESIZE_S 24
534#define TXDRCTRL_RAMSZPOW_BITS 0x001F0000 /* Limit = (1<<RAMSZPOW)-1 */
535#define TXDRCTRL_RAMSZPOW_S 16
536#define TXDRCTRL_D1RSZAND_BITS 0x0000F000 /* Mask top 4 bits - D1 */
537#define TXDRCTRL_D1RSZAND_S 12
538#define TXDRCTRL_D0RSZAND_BITS 0x000000F0 /* Mask top 4 bits - D0 */
539#define TXDRCTRL_D0RSZAND_S 4
540/* Given extracted RAMSZPOW and DnRSZAND fields this returns the size */
541#define TXDRCTRL_DXSIZE(Pow, AndBits) \
542 ((((~(AndBits)) & 0x0f) + 1) << ((Pow)-4))
543
544/*
545 * TXDRSIZE register provides modulo addressing options for each DSP RAM
546 */
547#define TXDRSIZE_REGNUM 15
548#define TXDRSIZE_R1MOD_BITS 0xFFFF0000
549#define TXDRSIZE_R1MOD_S 16
550#define TXDRSIZE_R0MOD_BITS 0x0000FFFF
551#define TXDRSIZE_R0MOD_S 0
552
553#define TXDRSIZE_RBRAD_SCALE_BITS 0x70000000
554#define TXDRSIZE_RBRAD_SCALE_S 28
555#define TXDRSIZE_RBMODSIZE_BITS 0x0FFF0000
556#define TXDRSIZE_RBMODSIZE_S 16
557#define TXDRSIZE_RARAD_SCALE_BITS 0x00007000
558#define TXDRSIZE_RARAD_SCALE_S 12
559#define TXDRSIZE_RAMODSIZE_BITS 0x00000FFF
560#define TXDRSIZE_RAMODSIZE_S 0
561
562/*****************************************************************************
563 * DEFERRED and BUS ERROR EXTENSION
564 ****************************************************************************/
565
566/*
567 * TXDEFR register - Deferred exception control
568 */
569#define TXDEFR_REGNUM 20
570#define TXDEFR_DEFAULT 0 /* All fields default to zero */
571
572/*
573 * Bus error state is a multi-bit positive/negative event notification from
574 * the bus infrastructure.
575 */
576#define TXDEFR_BUS_ERR_BIT 0x80000000 /* Set if error (LSB STATE) */
577#define TXDEFR_BUS_ERRI_BIT 0x40000000 /* Fetch returned error */
578#define TXDEFR_BUS_STATE_BITS 0x3F000000 /* Bus event/state data */
579#define TXDEFR_BUS_STATE_S 24
580#define TXDEFR_BUS_TRIG_BIT 0x00800000 /* Set when bus error seen */
581
582/*
583 * Bus events are collected by background code in a deferred manner unless
584 * selected to trigger an extended interrupt HALT trigger when they occur.
585 */
586#define TXDEFR_BUS_ICTRL_BIT 0x00000080 /* Enable interrupt trigger */
587
588/*
589 * CHIP Automatic Mips Allocation control registers
590 * ------------------------------------------------
591 */
592
593/* CT Bank AMA Registers */
594#define TXAMAREG0_REGNUM 24
595#ifdef METAC_1_2
596#define TXAMAREG0_CTRL_BITS 0x07000000
597#else /* METAC_1_2 */
598#define TXAMAREG0_RCOFF_BIT 0x08000000
599#define TXAMAREG0_DLINEHLT_BIT 0x04000000
600#define TXAMAREG0_DLINEDIS_BIT 0x02000000
601#define TXAMAREG0_CYCSTRICT_BIT 0x01000000
602#define TXAMAREG0_CTRL_BITS (TXAMAREG0_RCOFF_BIT | \
603 TXAMAREG0_DLINEHLT_BIT | \
604 TXAMAREG0_DLINEDIS_BIT | \
605 TXAMAREG0_CYCSTRICT_BIT)
606#endif /* !METAC_1_2 */
607#define TXAMAREG0_CTRL_S 24
608#define TXAMAREG0_MDM_BIT 0x00400000
609#define TXAMAREG0_MPF_BIT 0x00200000
610#define TXAMAREG0_MPE_BIT 0x00100000
611#define TXAMAREG0_MASK_BITS (TXAMAREG0_MDM_BIT | \
612 TXAMAREG0_MPF_BIT | \
613 TXAMAREG0_MPE_BIT)
614#define TXAMAREG0_MASK_S 20
615#define TXAMAREG0_SDM_BIT 0x00040000
616#define TXAMAREG0_SPF_BIT 0x00020000
617#define TXAMAREG0_SPE_BIT 0x00010000
618#define TXAMAREG0_STATUS_BITS (TXAMAREG0_SDM_BIT | \
619 TXAMAREG0_SPF_BIT | \
620 TXAMAREG0_SPE_BIT)
621#define TXAMAREG0_STATUS_S 16
622#define TXAMAREG0_PRIORITY_BITS 0x0000FF00
623#define TXAMAREG0_PRIORITY_S 8
624#define TXAMAREG0_BVALUE_BITS 0x000000FF
625#define TXAMAREG0_BVALUE_S 0
626
627#define TXAMAREG1_REGNUM 25
628#define TXAMAREG1_DELAYC_BITS 0x07FFFFFF
629#define TXAMAREG1_DELAYC_S 0
630
631#define TXAMAREG2_REGNUM 26
632#ifdef METAC_1_2
633#define TXAMAREG2_DLINEC_BITS 0x00FFFFFF
634#define TXAMAREG2_DLINEC_S 0
635#else /* METAC_1_2 */
636#define TXAMAREG2_IRQPRIORITY_BIT 0xFF000000
637#define TXAMAREG2_IRQPRIORITY_S 24
638#define TXAMAREG2_DLINEC_BITS 0x00FFFFF0
639#define TXAMAREG2_DLINEC_S 4
640#endif /* !METAC_1_2 */
641
642#define TXAMAREG3_REGNUM 27
643#define TXAMAREG2_AMABLOCK_BIT 0x00080000
644#define TXAMAREG2_AMAC_BITS 0x0000FFFF
645#define TXAMAREG2_AMAC_S 0
646
647/*****************************************************************************
648 * FPU EXTENSIONS
649 ****************************************************************************/
650/*
651 * The following registers only exist in FPU enabled cores.
652 */
653
654/*
655 * TXMODE register - FPU rounding mode control/status fields
656 */
657#define TXMODE_FPURMODE_BITS 0x00030000
658#define TXMODE_FPURMODE_S 16
659#define TXMODE_FPURMODEWRITE_BIT 0x00040000 /* Set to change FPURMODE */
660
661/*
662 * TXDEFR register - FPU exception handling/state is a significant source
663 * of deferrable errors. Run-time S/W can move handling to interrupt level
664 * using DEFR instruction to collect state.
665 */
666#define TXDEFR_FPE_FE_BITS 0x003F0000 /* Set by FPU_FE events */
667#define TXDEFR_FPE_FE_S 16
668
669#define TXDEFR_FPE_INEXACT_FE_BIT 0x010000
670#define TXDEFR_FPE_UNDERFLOW_FE_BIT 0x020000
671#define TXDEFR_FPE_OVERFLOW_FE_BIT 0x040000
672#define TXDEFR_FPE_DIVBYZERO_FE_BIT 0x080000
673#define TXDEFR_FPE_INVALID_FE_BIT 0x100000
674#define TXDEFR_FPE_DENORMAL_FE_BIT 0x200000
675
676#define TXDEFR_FPE_ICTRL_BITS 0x000003F /* Route to interrupts */
677#define TXDEFR_FPE_ICTRL_S 0
678
679#define TXDEFR_FPE_INEXACT_ICTRL_BIT 0x01
680#define TXDEFR_FPE_UNDERFLOW_ICTRL_BIT 0x02
681#define TXDEFR_FPE_OVERFLOW_ICTRL_BIT 0x04
682#define TXDEFR_FPE_DIVBYZERO_ICTRL_BIT 0x08
683#define TXDEFR_FPE_INVALID_ICTRL_BIT 0x10
684#define TXDEFR_FPE_DENORMAL_ICTRL_BIT 0x20
685
686/*
687 * DETAILED FPU RELATED VALUES
688 * ---------------------------
689 */
690
691/*
692 * Rounding mode field in TXMODE can hold a number of logical values
693 */
694#define METAG_FPURMODE_TONEAREST 0x0 /* Default */
695#define METAG_FPURMODE_TOWARDZERO 0x1
696#define METAG_FPURMODE_UPWARD 0x2
697#define METAG_FPURMODE_DOWNWARD 0x3
698
699/*
700 * In order to set the TXMODE register field that controls the rounding mode
701 * an extra bit must be set in the value written versus that read in order
702 * to gate writes to the rounding mode field. This allows other non-FPU code
703 * to modify TXMODE without knowledge of the FPU units presence and not
704 * influence the FPU rounding mode. This macro adds the required bit so new
705 * rounding modes are accepted.
706 */
707#define TXMODE_FPURMODE_SET(FPURMode) \
708 (TXMODE_FPURMODEWRITE_BIT + ((FPURMode)<<TXMODE_FPURMODE_S))
709
710/*
711 * To successfully restore TXMODE to zero at the end of the function the
712 * following value (rather than zero) must be used.
713 */
714#define TXMODE_FPURMODE_RESET (TXMODE_FPURMODEWRITE_BIT)
715
716/*
717 * In TXSTATUS a special bit exists to indicate if FPU H/W has been accessed
718 * since it was last reset.
719 */
720#define TXSTATUS_FPACTIVE_BIT 0x01000000
721
722/*
723 * Exception state (see TXDEFR_FPU_FE_*) and enabling (for interrupt
724 * level processing (see TXDEFR_FPU_ICTRL_*) are controlled by similar
725 * bit mask locations within each field.
726 */
727#define METAG_FPU_FE_INEXACT 0x01
728#define METAG_FPU_FE_UNDERFLOW 0x02
729#define METAG_FPU_FE_OVERFLOW 0x04
730#define METAG_FPU_FE_DIVBYZERO 0x08
731#define METAG_FPU_FE_INVALID 0x10
732#define METAG_FPU_FE_DENORMAL 0x20
733#define METAG_FPU_FE_ALL_EXCEPT (METAG_FPU_FE_INEXACT | \
734 METAG_FPU_FE_UNDERFLOW | \
735 METAG_FPU_FE_OVERFLOW | \
736 METAG_FPU_FE_DIVBYZERO | \
737 METAG_FPU_FE_INVALID | \
738 METAG_FPU_FE_DENORMAL)
739
740/*****************************************************************************
741 * THREAD CONTROL, ERROR, OR INTERRUPT STATE EXTENSIONS
742 ****************************************************************************/
743/*
744 * The following values are only relevant to code that externally controls
745 * threads, handles errors/interrupts, and/or set-up interrupt/error handlers
746 * for subsequent use.
747 */
748
749/*
750 * TXENABLE register fields - only ENABLE_BIT is potentially read/write
751 */
752#define TXENABLE_MAJOR_REV_BITS 0xFF000000
753#define TXENABLE_MAJOR_REV_S 24
754#define TXENABLE_MINOR_REV_BITS 0x00FF0000
755#define TXENABLE_MINOR_REV_S 16
756#define TXENABLE_CLASS_BITS 0x0000F000
757#define TXENABLE_CLASS_S 12
758#define TXENABLE_CLASS_DSP 0x0 /* -> DSP Thread */
759#define TXENABLE_CLASS_LDSP 0x8 /* -> DSP LITE Thread */
760#define TXENABLE_CLASS_GP 0xC /* -> General Purpose Thread */
761#define TXENABLE_CLASSALT_LFPU 0x2 /* Set to indicate LITE FPU */
762#define TXENABLE_CLASSALT_FPUR8 0x1 /* Set to indicate 8xFPU regs */
763#define TXENABLE_MTXARCH_BIT 0x00000800
764#define TXENABLE_STEP_REV_BITS 0x000000F0
765#define TXENABLE_STEP_REV_S 4
766#define TXENABLE_STOPPED_BIT 0x00000004 /* TXOFF due to ENABLE->0 */
767#define TXENABLE_OFF_BIT 0x00000002 /* Thread is in off state */
768#define TXENABLE_ENABLE_BIT 0x00000001 /* Set if running */
769
770/*
771 * TXSTATUS register - used by external/internal interrupt/error handler
772 */
773#define TXSTATUS_CB1MARKER_BIT 0x00800000 /* -> int level mem state */
774#define TXSTATUS_CBMARKER_BIT 0x00400000 /* -> mem i/f state dumped */
775#define TXSTATUS_MEM_FAULT_BITS 0x00300000
776#define TXSTATUS_MEM_FAULT_S 20
777#define TXSTATUS_MEMFAULT_NONE 0x0 /* -> No memory fault */
778#define TXSTATUS_MEMFAULT_GEN 0x1 /* -> General fault */
779#define TXSTATUS_MEMFAULT_PF 0x2 /* -> Page fault */
780#define TXSTATUS_MEMFAULT_RO 0x3 /* -> Read only fault */
781#define TXSTATUS_MAJOR_HALT_BITS 0x000C0000
782#define TXSTATUS_MAJOR_HALT_S 18
783#define TXSTATUS_MAJHALT_TRAP 0x0 /* -> SWITCH inst used */
784#define TXSTATUS_MAJHALT_INST 0x1 /* -> Unknown inst or fetch */
785#define TXSTATUS_MAJHALT_PRIV 0x2 /* -> Internal privilege */
786#define TXSTATUS_MAJHALT_MEM 0x3 /* -> Memory i/f fault */
787#define TXSTATUS_L_STEP_BITS 0x00000800 /* -> Progress of L oper */
788#define TXSTATUS_LSM_STEP_BITS 0x00000700 /* -> Progress of L/S mult */
789#define TXSTATUS_LSM_STEP_S 8
790#define TXSTATUS_FLAG_BITS 0x0000001F /* -> All the flags */
791#define TXSTATUS_SCC_BIT 0x00000010 /* -> Split-16 flags ... */
792#define TXSTATUS_SCF_LZ_BIT 0x00000008 /* -> Split-16 Low Z flag */
793#define TXSTATUS_SCF_HZ_BIT 0x00000004 /* -> Split-16 High Z flag */
794#define TXSTATUS_SCF_HC_BIT 0x00000002 /* -> Split-16 High C flag */
795#define TXSTATUS_SCF_LC_BIT 0x00000001 /* -> Split-16 Low C flag */
796#define TXSTATUS_CF_Z_BIT 0x00000008 /* -> Condition Z flag */
797#define TXSTATUS_CF_N_BIT 0x00000004 /* -> Condition N flag */
798#define TXSTATUS_CF_O_BIT 0x00000002 /* -> Condition O flag */
799#define TXSTATUS_CF_C_BIT 0x00000001 /* -> Condition C flag */
800
801/*
802 * TXCATCH0-3 register contents may store information on a memory operation
803 * that has failed if the bit TXSTATUS_CBMARKER_BIT is set.
804 */
805#define TXCATCH0_REGNUM 16
806#define TXCATCH1_REGNUM 17
807#define TXCATCH1_ADDR_BITS 0xFFFFFFFF /* TXCATCH1 is Addr 0-31 */
808#define TXCATCH1_ADDR_S 0
809#define TXCATCH2_REGNUM 18
810#define TXCATCH2_DATA0_BITS 0xFFFFFFFF /* TXCATCH2 is Data 0-31 */
811#define TXCATCH2_DATA0_S 0
812#define TXCATCH3_REGNUM 19
813#define TXCATCH3_DATA1_BITS 0xFFFFFFFF /* TXCATCH3 is Data 32-63 */
814#define TXCATCH3_DATA1_S 0
815
816/*
817 * Detailed catch state information
818 * --------------------------------
819 */
820
821/* Contents of TXCATCH0 register */
822#define TXCATCH0_LDRXX_BITS 0xF8000000 /* Load destination reg 0-31 */
823#define TXCATCH0_LDRXX_S 27
824#define TXCATCH0_LDDST_BITS 0x07FF0000 /* Load destination bits */
825#define TXCATCH0_LDDST_S 16
826#define TXCATCH0_LDDST_D1DSP 0x400 /* One bit set if it's a LOAD */
827#define TXCATCH0_LDDST_D0DSP 0x200
828#define TXCATCH0_LDDST_TMPLT 0x100
829#define TXCATCH0_LDDST_TR 0x080
830#ifdef METAC_2_1
831#define TXCATCH0_LDDST_FPU 0x040
832#endif
833#define TXCATCH0_LDDST_PC 0x020
834#define TXCATCH0_LDDST_A1 0x010
835#define TXCATCH0_LDDST_A0 0x008
836#define TXCATCH0_LDDST_D1 0x004
837#define TXCATCH0_LDDST_D0 0x002
838#define TXCATCH0_LDDST_CT 0x001
839#ifdef METAC_2_1
840#define TXCATCH0_WATCHSTOP_BIT 0x00004000 /* Set if Data Watch set fault */
841#endif
842#define TXCATCH0_WATCHS_BIT 0x00004000 /* Set if Data Watch set fault */
843#define TXCATCH0_WATCH1_BIT 0x00002000 /* Set if Data Watch 1 matches */
844#define TXCATCH0_WATCH0_BIT 0x00001000 /* Set if Data Watch 0 matches */
845#define TXCATCH0_FAULT_BITS 0x00000C00 /* See TXSTATUS_MEMFAULT_* */
846#define TXCATCH0_FAULT_S 10
847#define TXCATCH0_PRIV_BIT 0x00000200 /* Privilege of transaction */
848#define TXCATCH0_READ_BIT 0x00000100 /* Set for Read or Load cases */
849
850#ifdef METAC_2_1
851/* LNKGET Marker bit in TXCATCH0 */
852#define TXCATCH0_LNKGET_MARKER_BIT 0x00000008
853#define TXCATCH0_PREPROC_BIT 0x00000004
854#endif
855
856/* Loads are indicated by one of the LDDST bits being set */
857#define TXCATCH0_LDM16_BIT 0x00000004 /* Load M16 flag */
858#define TXCATCH0_LDL2L1_BITS 0x00000003 /* Load data size L2,L1 */
859#define TXCATCH0_LDL2L1_S 0
860
861/* Reads are indicated by the READ bit being set without LDDST bits */
862#define TXCATCH0_RAXX_BITS 0x0000001F /* RAXX issue port for read */
863#define TXCATCH0_RAXX_S 0
864
865/* Write operations are all that remain if READ bit is not set */
866#define TXCATCH0_WMASK_BITS 0x000000FF /* Write byte lane mask */
867#define TXCATCH0_WMASK_S 0
868
869#ifdef METAC_2_1
870
871/* When a FPU exception is signalled then FPUSPEC == FPUSPEC_TAG */
872#define TXCATCH0_FPURDREG_BITS 0xF8000000
873#define TXCATCH0_FPURDREG_S 27
874#define TXCATCH0_FPUR1REG_BITS 0x07C00000
875#define TXCATCH0_FPUR1REG_S 22
876#define TXCATCH0_FPUSPEC_BITS 0x000F0000
877#define TXCATCH0_FPUSPEC_S 16
878#define TXCATCH0_FPUSPEC_TAG 0xF
879#define TXCATCH0_FPUINSTA_BIT 0x00001000
880#define TXCATCH0_FPUINSTQ_BIT 0x00000800
881#define TXCATCH0_FPUINSTZ_BIT 0x00000400
882#define TXCATCH0_FPUINSTN_BIT 0x00000200
883#define TXCATCH0_FPUINSTO3O_BIT 0x00000100
884#define TXCATCH0_FPUWIDTH_BITS 0x000000C0
885#define TXCATCH0_FPUWIDTH_S 6
886#define TXCATCH0_FPUWIDTH_FLOAT 0
887#define TXCATCH0_FPUWIDTH_DOUBLE 1
888#define TXCATCH0_FPUWIDTH_PAIRED 2
889#define TXCATCH0_FPUOPENC_BITS 0x0000003F
890#define TXCATCH0_FPUOPENC_S 0
891#define TXCATCH0_FPUOPENC_ADD 0 /* rop1=Rs1, rop3=Rs2 */
892#define TXCATCH0_FPUOPENC_SUB 1 /* rop1=Rs1, rop3=Rs2 */
893#define TXCATCH0_FPUOPENC_MUL 2 /* rop1=Rs1, rop2=Rs2 */
894#define TXCATCH0_FPUOPENC_ATOI 3 /* rop3=Rs */
895#define TXCATCH0_FPUOPENC_ATOX 4 /* rop3=Rs, uses #Imm */
896#define TXCATCH0_FPUOPENC_ITOA 5 /* rop3=Rs */
897#define TXCATCH0_FPUOPENC_XTOA 6 /* rop3=Rs, uses #Imm */
898#define TXCATCH0_FPUOPENC_ATOH 7 /* rop2=Rs */
899#define TXCATCH0_FPUOPENC_HTOA 8 /* rop2=Rs */
900#define TXCATCH0_FPUOPENC_DTOF 9 /* rop3=Rs */
901#define TXCATCH0_FPUOPENC_FTOD 10 /* rop3=Rs */
902#define TXCATCH0_FPUOPENC_DTOL 11 /* rop3=Rs */
903#define TXCATCH0_FPUOPENC_LTOD 12 /* rop3=Rs */
904#define TXCATCH0_FPUOPENC_DTOXL 13 /* rop3=Rs, uses #imm */
905#define TXCATCH0_FPUOPENC_XLTOD 14 /* rop3=Rs, uses #imm */
906#define TXCATCH0_FPUOPENC_CMP 15 /* rop1=Rs1, rop2=Rs2 */
907#define TXCATCH0_FPUOPENC_MIN 16 /* rop1=Rs1, rop2=Rs2 */
908#define TXCATCH0_FPUOPENC_MAX 17 /* rop1=Rs1, rop2=Rs2 */
909#define TXCATCH0_FPUOPENC_ADDRE 18 /* rop1=Rs1, rop3=Rs2 */
910#define TXCATCH0_FPUOPENC_SUBRE 19 /* rop1=Rs1, rop3=Rs2 */
911#define TXCATCH0_FPUOPENC_MULRE 20 /* rop1=Rs1, rop2=Rs2 */
912#define TXCATCH0_FPUOPENC_MXA 21 /* rop1=Rs1, rop2=Rs2, rop3=Rs3*/
913#define TXCATCH0_FPUOPENC_MXAS 22 /* rop1=Rs1, rop2=Rs2, rop3=Rs3*/
914#define TXCATCH0_FPUOPENC_MAR 23 /* rop1=Rs1, rop2=Rs2 */
915#define TXCATCH0_FPUOPENC_MARS 24 /* rop1=Rs1, rop2=Rs2 */
916#define TXCATCH0_FPUOPENC_MUZ 25 /* rop1=Rs1, rop2=Rs2, rop3=Rs3*/
917#define TXCATCH0_FPUOPENC_MUZS 26 /* rop1=Rs1, rop2=Rs2, rop3=Rs3*/
918#define TXCATCH0_FPUOPENC_RCP 27 /* rop2=Rs */
919#define TXCATCH0_FPUOPENC_RSQ 28 /* rop2=Rs */
920
921/* For floating point exceptions TXCATCH1 is used to carry extra data */
922#define TXCATCH1_FPUR2REG_BITS 0xF8000000
923#define TXCATCH1_FPUR2REG_S 27
924#define TXCATCH1_FPUR3REG_BITS 0x07C00000 /* Undefined if O3O set */
925#define TXCATCH1_FPUR3REG_S 22
926#define TXCATCH1_FPUIMM16_BITS 0x0000FFFF
927#define TXCATCH1_FPUIMM16_S 0
928
929#endif /* METAC_2_1 */
930
931/*
932 * TXDIVTIME register used to hold the partial base address of memory i/f
933 * state dump area. Now deprecated.
934 */
935#define TXDIVTIME_CBBASE_MASK 0x03FFFE00
936#define TXDIVTIME_CBBASE_LINBASE 0x80000000
937#define TXDIVTIME_CBBASE_LINBOFF 0x00000000 /* BGnd state */
938#define TXDIVTIME_CBBASE_LINIOFF 0x00000100 /* Int state */
939
940/*
941 * TXDIVTIME register used to indicate if the read pipeline was dirty when a
942 * thread was interrupted, halted, or generated an exception. It is invalid
943 * to attempt to issue a further pipeline read address while the read
944 * pipeline is in the dirty state.
945 */
946#define TXDIVTIME_RPDIRTY_BIT 0x80000000
947
948/*
949 * Further bits in the TXDIVTIME register allow interrupt handling code to
950 * short-cut the discovery the most significant bit last read from TXSTATI.
951 *
952 * This is the bit number of the trigger line that a low level interrupt
953 * handler should acknowledge and then perhaps the index of a corresponding
954 * handler function.
955 */
956#define TXDIVTIME_IRQENC_BITS 0x0F000000
957#define TXDIVTIME_IRQENC_S 24
958
959/*
960 * If TXDIVTIME_RPVALID_BIT is set the read pipeline contained significant
961 * information when the thread was interrupted|halted|exceptioned. Each slot
962 * containing data is indicated by a one bit in the corresponding
963 * TXDIVTIME_RPMASK_BITS bit (least significance bit relates to first
964 * location in read pipeline - most likely to have the 1 state). Empty slots
965 * contain zeroes with no interlock applied on reads if RPDIRTY is currently
966 * set with RPMASK itself being read-only state.
967 */
968#define TXDIVTIME_RPMASK_BITS 0x003F0000 /* -> Full (1) Empty (0) */
969#define TXDIVTIME_RPMASK_S 16
970
971/*
972 * TXPRIVEXT register can be used to single step thread execution and
973 * enforce synchronous memory i/f address checking for debugging purposes.
974 */
975#define TXPRIVEXT_TXSTEP_BIT 0x00000004
976#define TXPRIVEXT_MEMCHECK_BIT 0x00000002
977
978/*
979 * TXINTERNx registers holds internal state information for H/W debugging only
980 */
981#define TXINTERN0_REGNUM 23
982#define TXINTERN0_LOCK2_BITS 0xF0000000
983#define TXINTERN0_LOCK2_S 28
984#define TXINTERN0_LOCK1_BITS 0x0F000000
985#define TXINTERN0_LOCK1_S 24
986#define TXINTERN0_TIFDF_BITS 0x0000F000
987#define TXINTERN0_TIFDF_S 12
988#define TXINTERN0_TIFIB_BITS 0x00000F00
989#define TXINTERN0_TIFIB_S 8
990#define TXINTERN0_TIFAF_BITS 0x000000F0
991#define TXINTERN0_TIFAF_S 4
992#define TXINTERN0_MSTATE_BITS 0x0000000F
993#define TXINTERN0_MSTATE_S 0
994
995/*
996 * TXSTAT, TXMASK, TXPOLL, TXSTATI, TXMASKI, TXPOLLI registers from trigger
997 * bank all have similar contents (upper kick count bits not in MASK regs)
998 */
999#define TXSTAT_REGNUM 0
1000#define TXSTAT_TIMER_BIT 0x00000001
1001#define TXSTAT_TIMER_S 0
1002#define TXSTAT_KICK_BIT 0x00000002
1003#define TXSTAT_KICK_S 1
1004#define TXSTAT_DEFER_BIT 0x00000008
1005#define TXSTAT_DEFER_S 3
1006#define TXSTAT_EXTTRIG_BITS 0x0000FFF0
1007#define TXSTAT_EXTTRIG_S 4
1008#define TXSTAT_FPE_BITS 0x003F0000
1009#define TXSTAT_FPE_S 16
1010#define TXSTAT_FPE_DENORMAL_BIT 0x00200000
1011#define TXSTAT_FPE_DENORMAL_S 21
1012#define TXSTAT_FPE_INVALID_BIT 0x00100000
1013#define TXSTAT_FPE_INVALID_S 20
1014#define TXSTAT_FPE_DIVBYZERO_BIT 0x00080000
1015#define TXSTAT_FPE_DIVBYZERO_S 19
1016#define TXSTAT_FPE_OVERFLOW_BIT 0x00040000
1017#define TXSTAT_FPE_OVERFLOW_S 18
1018#define TXSTAT_FPE_UNDERFLOW_BIT 0x00020000
1019#define TXSTAT_FPE_UNDERFLOW_S 17
1020#define TXSTAT_FPE_INEXACT_BIT 0x00010000
1021#define TXSTAT_FPE_INEXACT_S 16
1022#define TXSTAT_BUSERR_BIT 0x00800000 /* Set if bus error/ack state */
1023#define TXSTAT_BUSERR_S 23
1024#define TXSTAT_BUSSTATE_BITS 0xFF000000 /* Read only */
1025#define TXSTAT_BUSSTATE_S 24
1026#define TXSTAT_KICKCNT_BITS 0xFFFF0000
1027#define TXSTAT_KICKCNT_S 16
1028#define TXMASK_REGNUM 1
1029#define TXSTATI_REGNUM 2
1030#define TXSTATI_BGNDHALT_BIT 0x00000004
1031#define TXMASKI_REGNUM 3
1032#define TXPOLL_REGNUM 4
1033#define TXPOLLI_REGNUM 6
1034
1035/*
1036 * TXDRCTRL register can be used to partition the DSP RAM space available to
1037 * this thread at startup. This is achieved by offsetting the region allocated
1038 * to each thread.
1039 */
1040#define TXDRCTRL_D1PARTOR_BITS 0x00000F00 /* OR's into top 4 bits */
1041#define TXDRCTRL_D1PARTOR_S 8
1042#define TXDRCTRL_D0PARTOR_BITS 0x0000000F /* OR's into top 4 bits */
1043#define TXDRCTRL_D0PARTOR_S 0
1044/* Given extracted Pow and Or fields this is threads base within DSP RAM */
1045#define TXDRCTRL_DXBASE(Pow, Or) ((Or)<<((Pow)-4))
1046
1047/*****************************************************************************
1048 * RUN TIME TRACE CONTROL REGISTERS
1049 ****************************************************************************/
1050/*
1051 * The following values are only relevant to code that implements run-time
1052 * trace features within the META Core
1053 */
1054#define TTEXEC TT.0
1055#define TTCTRL TT.1
1056#define TTMARK TT.2
1057#define TTREC TT.3
1058#define GTEXEC TT.4
1059
1060#define TTEXEC_REGNUM 0
1061#define TTEXEC_EXTTRIGAND_BITS 0x7F000000
1062#define TTEXEC_EXTTRIGAND_S 24
1063#define TTEXEC_EXTTRIGEN_BIT 0x00008000
1064#define TTEXEC_EXTTRIGMATCH_BITS 0x00007F00
1065#define TTEXEC_EXTTRIGMATCH_S 8
1066#define TTEXEC_TCMODE_BITS 0x00000003
1067#define TTEXEC_TCMODE_S 0
1068
1069#define TTCTRL_REGNUM 1
1070#define TTCTRL_TRACETT_BITS 0x00008000
1071#define TTCTRL_TRACETT_S 15
1072#define TTCTRL_TRACEALL_BITS 0x00002000
1073#define TTCTRL_TRACEALL_S 13
1074#ifdef METAC_2_1
1075#define TTCTRL_TRACEALLTAG_BITS 0x00000400
1076#define TTCTRL_TRACEALLTAG_S 10
1077#endif /* METAC_2_1 */
1078#define TTCTRL_TRACETAG_BITS 0x00000200
1079#define TTCTRL_TRACETAG_S 9
1080#define TTCTRL_TRACETTPC_BITS 0x00000080
1081#define TTCTRL_TRACETTPC_S 7
1082#define TTCTRL_TRACEMPC_BITS 0x00000020
1083#define TTCTRL_TRACEMPC_S 5
1084#define TTCTRL_TRACEEN_BITS 0x00000008
1085#define TTCTRL_TRACEEN_S 3
1086#define TTCTRL_TRACEEN1_BITS 0x00000004
1087#define TTCTRL_TRACEEN1_S 2
1088#define TTCTRL_TRACEPC_BITS 0x00000002
1089#define TTCTRL_TRACEPC_S 1
1090
1091#ifdef METAC_2_1
1092#define TTMARK_REGNUM 2
1093#define TTMARK_BITS 0xFFFFFFFF
1094#define TTMARK_S 0x0
1095
1096#define TTREC_REGNUM 3
1097#define TTREC_BITS 0xFFFFFFFFFFFFFFFF
1098#define TTREC_S 0x0
1099#endif /* METAC_2_1 */
1100
1101#define GTEXEC_REGNUM 4
1102#define GTEXEC_DCRUN_BITS 0x80000000
1103#define GTEXEC_DCRUN_S 31
1104#define GTEXEC_ICMODE_BITS 0x0C000000
1105#define GTEXEC_ICMODE_S 26
1106#define GTEXEC_TCMODE_BITS 0x03000000
1107#define GTEXEC_TCMODE_S 24
1108#define GTEXEC_PERF1CMODE_BITS 0x00040000
1109#define GTEXEC_PERF1CMODE_S 18
1110#define GTEXEC_PERF0CMODE_BITS 0x00010000
1111#define GTEXEC_PERF0CMODE_S 16
1112#define GTEXEC_REFMSEL_BITS 0x0000F000
1113#define GTEXEC_REFMSEL_S 12
1114#define GTEXEC_METRICTH_BITS 0x000003FF
1115#define GTEXEC_METRICTH_S 0
1116
1117#ifdef METAC_2_1
1118/*
1119 * Clock Control registers
1120 * -----------------------
1121 */
1122#define TXCLKCTRL_REGNUM 22
1123
1124/*
1125 * Default setting is with clocks always on (DEFON), turning all clocks off
1126 * can only be done from external devices (OFF), enabling automatic clock
1127 * gating will allow clocks to stop as units fall idle.
1128 */
1129#define TXCLKCTRL_ALL_OFF 0x02222222
1130#define TXCLKCTRL_ALL_DEFON 0x01111111
1131#define TXCLKCTRL_ALL_AUTO 0x02222222
1132
1133/*
1134 * Individual fields control caches, floating point and main data/addr units
1135 */
1136#define TXCLKCTRL_CLOCKIC_BITS 0x03000000
1137#define TXCLKCTRL_CLOCKIC_S 24
1138#define TXCLKCTRL_CLOCKDC_BITS 0x00300000
1139#define TXCLKCTRL_CLOCKDC_S 20
1140#define TXCLKCTRL_CLOCKFP_BITS 0x00030000
1141#define TXCLKCTRL_CLOCKFP_S 16
1142#define TXCLKCTRL_CLOCKD1_BITS 0x00003000
1143#define TXCLKCTRL_CLOCKD1_S 12
1144#define TXCLKCTRL_CLOCKD0_BITS 0x00000300
1145#define TXCLKCTRL_CLOCKD0_S 8
1146#define TXCLKCTRL_CLOCKA1_BITS 0x00000030
1147#define TXCLKCTRL_CLOCKA1_S 4
1148#define TXCLKCTRL_CLOCKA0_BITS 0x00000003
1149#define TXCLKCTRL_CLOCKA0_S 0
1150
1151/*
1152 * Individual settings for each field are common
1153 */
1154#define TXCLKCTRL_CLOCKxx_OFF 0
1155#define TXCLKCTRL_CLOCKxx_DEFON 1
1156#define TXCLKCTRL_CLOCKxx_AUTO 2
1157
1158#endif /* METAC_2_1 */
1159
1160#ifdef METAC_2_1
1161/*
1162 * Fast interrupt new bits
1163 * ------------------------------------
1164 */
1165#define TXSTATUS_IPTOGGLE_BIT 0x80000000 /* Prev PToggle of TXPRIVEXT */
1166#define TXSTATUS_ISTATE_BIT 0x40000000 /* IState bit */
1167#define TXSTATUS_IWAIT_BIT 0x20000000 /* wait indefinitely in decision step*/
1168#define TXSTATUS_IEXCEPT_BIT 0x10000000 /* Indicate an exception occurred */
1169#define TXSTATUS_IRPCOUNT_BITS 0x0E000000 /* Number of 'dirty' date entries*/
1170#define TXSTATUS_IRPCOUNT_S 25
1171#define TXSTATUS_IRQSTAT_BITS 0x0000F000 /* IRQEnc bits, trigger or interrupts */
1172#define TXSTATUS_IRQSTAT_S 12
1173#define TXSTATUS_LNKSETOK_BIT 0x00000020 /* LNKSetOK bit, successful LNKSET */
1174
1175/* New fields in TXDE for fast interrupt system */
1176#define TXDIVTIME_IACTIVE_BIT 0x00008000 /* Enable new interrupt system */
1177#define TXDIVTIME_INONEST_BIT 0x00004000 /* Gate nested interrupt */
1178#define TXDIVTIME_IREGIDXGATE_BIT 0x00002000 /* gate of the IRegIdex field */
1179#define TXDIVTIME_IREGIDX_BITS 0x00001E00 /* Index of A0.0/1 replaces */
1180#define TXDIVTIME_IREGIDX_S 9
1181#define TXDIVTIME_NOST_BIT 0x00000100 /* disable superthreading bit */
1182#endif
1183
1184#endif /* _ASM_METAG_REGS_H_ */
diff --git a/arch/metag/include/asm/mman.h b/arch/metag/include/asm/mman.h
deleted file mode 100644
index dcb0d20a64fd..000000000000
--- a/arch/metag/include/asm/mman.h
+++ /dev/null
@@ -1,12 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __METAG_MMAN_H__
3#define __METAG_MMAN_H__
4
5#include <uapi/asm/mman.h>
6
7#ifndef __ASSEMBLY__
8#define arch_mmap_check metag_mmap_check
9int metag_mmap_check(unsigned long addr, unsigned long len,
10 unsigned long flags);
11#endif
12#endif /* __METAG_MMAN_H__ */
diff --git a/arch/metag/include/asm/mmu.h b/arch/metag/include/asm/mmu.h
deleted file mode 100644
index cab5a01c3dcb..000000000000
--- a/arch/metag/include/asm/mmu.h
+++ /dev/null
@@ -1,78 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __MMU_H
3#define __MMU_H
4
5#ifdef CONFIG_METAG_USER_TCM
6#include <linux/list.h>
7#endif
8
9#ifdef CONFIG_HUGETLB_PAGE
10#include <asm/page.h>
11#endif
12
13typedef struct {
14 /* Software pgd base pointer used for Meta 1.x MMU. */
15 unsigned long pgd_base;
16#ifdef CONFIG_METAG_USER_TCM
17 struct list_head tcm;
18#endif
19#ifdef CONFIG_HUGETLB_PAGE
20#if HPAGE_SHIFT < HUGEPT_SHIFT
21 /* last partially filled huge page table address */
22 unsigned long part_huge;
23#endif
24#endif
25} mm_context_t;
26
27/* Given a virtual address, return the pte for the top level 4meg entry
28 * that maps that address.
29 * Returns 0 (an empty pte) if that range is not mapped.
30 */
31unsigned long mmu_read_first_level_page(unsigned long vaddr);
32
33/* Given a linear (virtual) address, return the second level 4k pte
34 * that maps that address. Returns 0 if the address is not mapped.
35 */
36unsigned long mmu_read_second_level_page(unsigned long vaddr);
37
38/* Get the virtual base address of the MMU */
39unsigned long mmu_get_base(void);
40
41/* Initialize the MMU. */
42void mmu_init(unsigned long mem_end);
43
44#ifdef CONFIG_METAG_META21_MMU
45/*
46 * For cpu "cpu" calculate and return the address of the
47 * MMCU_TnLOCAL_TABLE_PHYS0 if running in local-space or
48 * MMCU_TnGLOBAL_TABLE_PHYS0 if running in global-space.
49 */
50static inline unsigned long mmu_phys0_addr(unsigned int cpu)
51{
52 unsigned long phys0;
53
54 phys0 = (MMCU_T0LOCAL_TABLE_PHYS0 +
55 (MMCU_TnX_TABLE_PHYSX_STRIDE * cpu)) +
56 (MMCU_TXG_TABLE_PHYSX_OFFSET * is_global_space(PAGE_OFFSET));
57
58 return phys0;
59}
60
61/*
62 * For cpu "cpu" calculate and return the address of the
63 * MMCU_TnLOCAL_TABLE_PHYS1 if running in local-space or
64 * MMCU_TnGLOBAL_TABLE_PHYS1 if running in global-space.
65 */
66static inline unsigned long mmu_phys1_addr(unsigned int cpu)
67{
68 unsigned long phys1;
69
70 phys1 = (MMCU_T0LOCAL_TABLE_PHYS1 +
71 (MMCU_TnX_TABLE_PHYSX_STRIDE * cpu)) +
72 (MMCU_TXG_TABLE_PHYSX_OFFSET * is_global_space(PAGE_OFFSET));
73
74 return phys1;
75}
76#endif /* CONFIG_METAG_META21_MMU */
77
78#endif
diff --git a/arch/metag/include/asm/mmu_context.h b/arch/metag/include/asm/mmu_context.h
deleted file mode 100644
index 7b4766379622..000000000000
--- a/arch/metag/include/asm/mmu_context.h
+++ /dev/null
@@ -1,115 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __METAG_MMU_CONTEXT_H
3#define __METAG_MMU_CONTEXT_H
4
5#include <asm-generic/mm_hooks.h>
6
7#include <asm/page.h>
8#include <asm/mmu.h>
9#include <asm/tlbflush.h>
10#include <asm/cacheflush.h>
11
12#include <linux/io.h>
13#include <linux/mm_types.h>
14
15static inline void enter_lazy_tlb(struct mm_struct *mm,
16 struct task_struct *tsk)
17{
18}
19
20static inline int init_new_context(struct task_struct *tsk,
21 struct mm_struct *mm)
22{
23#ifndef CONFIG_METAG_META21_MMU
24 /* We use context to store a pointer to the page holding the
25 * pgd of a process while it is running. While a process is not
26 * running the pgd and context fields should be equal.
27 */
28 mm->context.pgd_base = (unsigned long) mm->pgd;
29#endif
30#ifdef CONFIG_METAG_USER_TCM
31 INIT_LIST_HEAD(&mm->context.tcm);
32#endif
33 return 0;
34}
35
36#ifdef CONFIG_METAG_USER_TCM
37
38#include <linux/slab.h>
39#include <asm/tcm.h>
40
41static inline void destroy_context(struct mm_struct *mm)
42{
43 struct tcm_allocation *pos, *n;
44
45 list_for_each_entry_safe(pos, n, &mm->context.tcm, list) {
46 tcm_free(pos->tag, pos->addr, pos->size);
47 list_del(&pos->list);
48 kfree(pos);
49 }
50}
51#else
52#define destroy_context(mm) do { } while (0)
53#endif
54
55#ifdef CONFIG_METAG_META21_MMU
56static inline void load_pgd(pgd_t *pgd, int thread)
57{
58 unsigned long phys0 = mmu_phys0_addr(thread);
59 unsigned long phys1 = mmu_phys1_addr(thread);
60
61 /*
62 * 0x900 2Gb address space
63 * The permission bits apply to MMU table region which gives a 2MB
64 * window into physical memory. We especially don't want userland to be
65 * able to access this.
66 */
67 metag_out32(0x900 | _PAGE_CACHEABLE | _PAGE_PRIV | _PAGE_WRITE |
68 _PAGE_PRESENT, phys0);
69 /* Set new MMU base address */
70 metag_out32(__pa(pgd) & MMCU_TBLPHYS1_ADDR_BITS, phys1);
71}
72#endif
73
74static inline void switch_mmu(struct mm_struct *prev, struct mm_struct *next)
75{
76#ifdef CONFIG_METAG_META21_MMU
77 load_pgd(next->pgd, hard_processor_id());
78#else
79 unsigned int i;
80
81 /* prev->context == prev->pgd in the case where we are initially
82 switching from the init task to the first process. */
83 if (prev->context.pgd_base != (unsigned long) prev->pgd) {
84 for (i = FIRST_USER_PGD_NR; i < USER_PTRS_PER_PGD; i++)
85 ((pgd_t *) prev->context.pgd_base)[i] = prev->pgd[i];
86 } else
87 prev->pgd = (pgd_t *)mmu_get_base();
88
89 next->pgd = prev->pgd;
90 prev->pgd = (pgd_t *) prev->context.pgd_base;
91
92 for (i = FIRST_USER_PGD_NR; i < USER_PTRS_PER_PGD; i++)
93 next->pgd[i] = ((pgd_t *) next->context.pgd_base)[i];
94
95 flush_cache_all();
96#endif
97 flush_tlb_all();
98}
99
100static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
101 struct task_struct *tsk)
102{
103 if (prev != next)
104 switch_mmu(prev, next);
105}
106
107static inline void activate_mm(struct mm_struct *prev_mm,
108 struct mm_struct *next_mm)
109{
110 switch_mmu(prev_mm, next_mm);
111}
112
113#define deactivate_mm(tsk, mm) do { } while (0)
114
115#endif
diff --git a/arch/metag/include/asm/mmzone.h b/arch/metag/include/asm/mmzone.h
deleted file mode 100644
index 8627fb532206..000000000000
--- a/arch/metag/include/asm/mmzone.h
+++ /dev/null
@@ -1,43 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __ASM_METAG_MMZONE_H
3#define __ASM_METAG_MMZONE_H
4
5#ifdef CONFIG_NEED_MULTIPLE_NODES
6#include <linux/numa.h>
7
8extern struct pglist_data *node_data[];
9#define NODE_DATA(nid) (node_data[nid])
10
11static inline int pfn_to_nid(unsigned long pfn)
12{
13 int nid;
14
15 for (nid = 0; nid < MAX_NUMNODES; nid++)
16 if (pfn >= node_start_pfn(nid) && pfn <= node_end_pfn(nid))
17 break;
18
19 return nid;
20}
21
22static inline struct pglist_data *pfn_to_pgdat(unsigned long pfn)
23{
24 return NODE_DATA(pfn_to_nid(pfn));
25}
26
27/* arch/metag/mm/numa.c */
28void __init setup_bootmem_node(int nid, unsigned long start, unsigned long end);
29#else
30static inline void
31setup_bootmem_node(int nid, unsigned long start, unsigned long end)
32{
33}
34#endif /* CONFIG_NEED_MULTIPLE_NODES */
35
36#ifdef CONFIG_NUMA
37/* SoC specific mem init */
38void __init soc_mem_setup(void);
39#else
40static inline void __init soc_mem_setup(void) {};
41#endif
42
43#endif /* __ASM_METAG_MMZONE_H */
diff --git a/arch/metag/include/asm/module.h b/arch/metag/include/asm/module.h
deleted file mode 100644
index e957171c320b..000000000000
--- a/arch/metag/include/asm/module.h
+++ /dev/null
@@ -1,38 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_METAG_MODULE_H
3#define _ASM_METAG_MODULE_H
4
5#include <asm-generic/module.h>
6
7struct metag_plt_entry {
8 /* Indirect jump instruction sequence. */
9 unsigned long tramp[2];
10};
11
12struct mod_arch_specific {
13 /* Indices of PLT sections within module. */
14 unsigned int core_plt_section, init_plt_section;
15};
16
17#if defined CONFIG_METAG_META12
18#define MODULE_PROC_FAMILY "META 1.2 "
19#elif defined CONFIG_METAG_META21
20#define MODULE_PROC_FAMILY "META 2.1 "
21#else
22#define MODULE_PROC_FAMILY ""
23#endif
24
25#ifdef CONFIG_4KSTACKS
26#define MODULE_STACKSIZE "4KSTACKS "
27#else
28#define MODULE_STACKSIZE ""
29#endif
30
31#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE
32
33#ifdef MODULE
34asm(".section .plt,\"ax\",@progbits; .balign 8; .previous");
35asm(".section .init.plt,\"ax\",@progbits; .balign 8; .previous");
36#endif
37
38#endif /* _ASM_METAG_MODULE_H */
diff --git a/arch/metag/include/asm/page.h b/arch/metag/include/asm/page.h
deleted file mode 100644
index 9e994d77069d..000000000000
--- a/arch/metag/include/asm/page.h
+++ /dev/null
@@ -1,129 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _METAG_PAGE_H
3#define _METAG_PAGE_H
4
5#include <linux/const.h>
6
7#include <asm/metag_mem.h>
8
9/* PAGE_SHIFT determines the page size */
10#if defined(CONFIG_PAGE_SIZE_4K)
11#define PAGE_SHIFT 12
12#elif defined(CONFIG_PAGE_SIZE_8K)
13#define PAGE_SHIFT 13
14#elif defined(CONFIG_PAGE_SIZE_16K)
15#define PAGE_SHIFT 14
16#endif
17
18#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
19#define PAGE_MASK (~(PAGE_SIZE-1))
20
21#if defined(CONFIG_HUGETLB_PAGE_SIZE_8K)
22# define HPAGE_SHIFT 13
23#elif defined(CONFIG_HUGETLB_PAGE_SIZE_16K)
24# define HPAGE_SHIFT 14
25#elif defined(CONFIG_HUGETLB_PAGE_SIZE_32K)
26# define HPAGE_SHIFT 15
27#elif defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
28# define HPAGE_SHIFT 16
29#elif defined(CONFIG_HUGETLB_PAGE_SIZE_128K)
30# define HPAGE_SHIFT 17
31#elif defined(CONFIG_HUGETLB_PAGE_SIZE_256K)
32# define HPAGE_SHIFT 18
33#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K)
34# define HPAGE_SHIFT 19
35#elif defined(CONFIG_HUGETLB_PAGE_SIZE_1M)
36# define HPAGE_SHIFT 20
37#elif defined(CONFIG_HUGETLB_PAGE_SIZE_2M)
38# define HPAGE_SHIFT 21
39#elif defined(CONFIG_HUGETLB_PAGE_SIZE_4M)
40# define HPAGE_SHIFT 22
41#endif
42
43#ifdef CONFIG_HUGETLB_PAGE
44# define HPAGE_SIZE (1UL << HPAGE_SHIFT)
45# define HPAGE_MASK (~(HPAGE_SIZE-1))
46# define HUGETLB_PAGE_ORDER (HPAGE_SHIFT-PAGE_SHIFT)
47/*
48 * We define our own hugetlb_get_unmapped_area so we don't corrupt 2nd level
49 * page tables with normal pages in them.
50 */
51# define HUGEPT_SHIFT (22)
52# define HUGEPT_ALIGN (1 << HUGEPT_SHIFT)
53# define HUGEPT_MASK (HUGEPT_ALIGN - 1)
54# define ALIGN_HUGEPT(x) ALIGN(x, HUGEPT_ALIGN)
55# define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
56#endif
57
58#ifndef __ASSEMBLY__
59
60/* On the Meta, we would like to know if the address (heap) we have is
61 * in local or global space.
62 */
63#define is_global_space(addr) ((addr) > 0x7fffffff)
64#define is_local_space(addr) (!is_global_space(addr))
65
66extern void clear_page(void *to);
67extern void copy_page(void *to, void *from);
68
69#define clear_user_page(page, vaddr, pg) clear_page(page)
70#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
71
72/*
73 * These are used to make use of C type-checking..
74 */
75typedef struct { unsigned long pte; } pte_t;
76typedef struct { unsigned long pgd; } pgd_t;
77typedef struct { unsigned long pgprot; } pgprot_t;
78typedef struct page *pgtable_t;
79
80#define pte_val(x) ((x).pte)
81#define pgd_val(x) ((x).pgd)
82#define pgprot_val(x) ((x).pgprot)
83
84#define __pte(x) ((pte_t) { (x) })
85#define __pgd(x) ((pgd_t) { (x) })
86#define __pgprot(x) ((pgprot_t) { (x) })
87
88/* The kernel must now ALWAYS live at either 0xC0000000 or 0x40000000 - that
89 * being either global or local space.
90 */
91#define PAGE_OFFSET (CONFIG_PAGE_OFFSET)
92
93#if PAGE_OFFSET >= LINGLOBAL_BASE
94#define META_MEMORY_BASE LINGLOBAL_BASE
95#define META_MEMORY_LIMIT LINGLOBAL_LIMIT
96#else
97#define META_MEMORY_BASE LINLOCAL_BASE
98#define META_MEMORY_LIMIT LINLOCAL_LIMIT
99#endif
100
101/* Offset between physical and virtual mapping of kernel memory. */
102extern unsigned int meta_memoffset;
103
104#define __pa(x) ((unsigned long)(((unsigned long)(x)) - meta_memoffset))
105#define __va(x) ((void *)((unsigned long)(((unsigned long)(x)) + meta_memoffset)))
106
107extern unsigned long pfn_base;
108#define ARCH_PFN_OFFSET (pfn_base)
109#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
110#define page_to_virt(page) __va(page_to_pfn(page) << PAGE_SHIFT)
111#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
112#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
113#ifdef CONFIG_FLATMEM
114extern unsigned long max_pfn;
115extern unsigned long min_low_pfn;
116#define pfn_valid(pfn) ((pfn) >= min_low_pfn && (pfn) < max_pfn)
117#endif
118
119#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
120
121#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
122 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
123
124#include <asm-generic/memory_model.h>
125#include <asm-generic/getorder.h>
126
127#endif /* __ASSMEBLY__ */
128
129#endif /* _METAG_PAGE_H */
diff --git a/arch/metag/include/asm/perf_event.h b/arch/metag/include/asm/perf_event.h
deleted file mode 100644
index 105bbff0149f..000000000000
--- a/arch/metag/include/asm/perf_event.h
+++ /dev/null
@@ -1,4 +0,0 @@
1#ifndef __ASM_METAG_PERF_EVENT_H
2#define __ASM_METAG_PERF_EVENT_H
3
4#endif /* __ASM_METAG_PERF_EVENT_H */
diff --git a/arch/metag/include/asm/pgalloc.h b/arch/metag/include/asm/pgalloc.h
deleted file mode 100644
index 0b9d95d78b61..000000000000
--- a/arch/metag/include/asm/pgalloc.h
+++ /dev/null
@@ -1,83 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _METAG_PGALLOC_H
3#define _METAG_PGALLOC_H
4
5#include <linux/threads.h>
6#include <linux/mm.h>
7
8#define pmd_populate_kernel(mm, pmd, pte) \
9 set_pmd(pmd, __pmd(_PAGE_TABLE | __pa(pte)))
10
11#define pmd_populate(mm, pmd, pte) \
12 set_pmd(pmd, __pmd(_PAGE_TABLE | page_to_phys(pte)))
13
14#define pmd_pgtable(pmd) pmd_page(pmd)
15
16/*
17 * Allocate and free page tables.
18 */
19#ifdef CONFIG_METAG_META21_MMU
20static inline void pgd_ctor(pgd_t *pgd)
21{
22 memcpy(pgd + USER_PTRS_PER_PGD,
23 swapper_pg_dir + USER_PTRS_PER_PGD,
24 (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
25}
26#else
27#define pgd_ctor(x) do { } while (0)
28#endif
29
30static inline pgd_t *pgd_alloc(struct mm_struct *mm)
31{
32 pgd_t *pgd = (pgd_t *)get_zeroed_page(GFP_KERNEL);
33 if (pgd)
34 pgd_ctor(pgd);
35 return pgd;
36}
37
38static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
39{
40 free_page((unsigned long)pgd);
41}
42
43static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
44 unsigned long address)
45{
46 pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
47 return pte;
48}
49
50static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
51 unsigned long address)
52{
53 struct page *pte;
54 pte = alloc_pages(GFP_KERNEL | __GFP_ZERO, 0);
55 if (!pte)
56 return NULL;
57 if (!pgtable_page_ctor(pte)) {
58 __free_page(pte);
59 return NULL;
60 }
61 return pte;
62}
63
64static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
65{
66 free_page((unsigned long)pte);
67}
68
69static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
70{
71 pgtable_page_dtor(pte);
72 __free_page(pte);
73}
74
75#define __pte_free_tlb(tlb, pte, addr) \
76 do { \
77 pgtable_page_dtor(pte); \
78 tlb_remove_page((tlb), (pte)); \
79 } while (0)
80
81#define check_pgt_cache() do { } while (0)
82
83#endif
diff --git a/arch/metag/include/asm/pgtable-bits.h b/arch/metag/include/asm/pgtable-bits.h
deleted file mode 100644
index 5f6b82282a41..000000000000
--- a/arch/metag/include/asm/pgtable-bits.h
+++ /dev/null
@@ -1,105 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Meta page table definitions.
4 */
5
6#ifndef _METAG_PGTABLE_BITS_H
7#define _METAG_PGTABLE_BITS_H
8
9#include <asm/metag_mem.h>
10
11/*
12 * Definitions for MMU descriptors
13 *
14 * These are the hardware bits in the MMCU pte entries.
15 * Derived from the Meta toolkit headers.
16 */
17#define _PAGE_PRESENT MMCU_ENTRY_VAL_BIT
18#define _PAGE_WRITE MMCU_ENTRY_WR_BIT
19#define _PAGE_PRIV MMCU_ENTRY_PRIV_BIT
20/* Write combine bit - this can cause writes to occur out of order */
21#define _PAGE_WR_COMBINE MMCU_ENTRY_WRC_BIT
22/* Sys coherent bit - this bit is never used by Linux */
23#define _PAGE_SYS_COHERENT MMCU_ENTRY_SYS_BIT
24#define _PAGE_ALWAYS_ZERO_1 0x020
25#define _PAGE_CACHE_CTRL0 0x040
26#define _PAGE_CACHE_CTRL1 0x080
27#define _PAGE_ALWAYS_ZERO_2 0x100
28#define _PAGE_ALWAYS_ZERO_3 0x200
29#define _PAGE_ALWAYS_ZERO_4 0x400
30#define _PAGE_ALWAYS_ZERO_5 0x800
31
32/* These are software bits that we stuff into the gaps in the hardware
33 * pte entries that are not used. Note, these DO get stored in the actual
34 * hardware, but the hardware just does not use them.
35 */
36#define _PAGE_ACCESSED _PAGE_ALWAYS_ZERO_1
37#define _PAGE_DIRTY _PAGE_ALWAYS_ZERO_2
38
39/* Pages owned, and protected by, the kernel. */
40#define _PAGE_KERNEL _PAGE_PRIV
41
42/* No cacheing of this page */
43#define _PAGE_CACHE_WIN0 (MMCU_CWIN_UNCACHED << MMCU_ENTRY_CWIN_S)
44/* burst cacheing - good for data streaming */
45#define _PAGE_CACHE_WIN1 (MMCU_CWIN_BURST << MMCU_ENTRY_CWIN_S)
46/* One cache way per thread */
47#define _PAGE_CACHE_WIN2 (MMCU_CWIN_C1SET << MMCU_ENTRY_CWIN_S)
48/* Full on cacheing */
49#define _PAGE_CACHE_WIN3 (MMCU_CWIN_CACHED << MMCU_ENTRY_CWIN_S)
50
51#define _PAGE_CACHEABLE (_PAGE_CACHE_WIN3 | _PAGE_WR_COMBINE)
52
53/* which bits are used for cache control ... */
54#define _PAGE_CACHE_MASK (_PAGE_CACHE_CTRL0 | _PAGE_CACHE_CTRL1 | \
55 _PAGE_WR_COMBINE)
56
57/* This is a mask of the bits that pte_modify is allowed to change. */
58#define _PAGE_CHG_MASK (PAGE_MASK)
59
60#define _PAGE_SZ_SHIFT 1
61#define _PAGE_SZ_4K (0x0)
62#define _PAGE_SZ_8K (0x1 << _PAGE_SZ_SHIFT)
63#define _PAGE_SZ_16K (0x2 << _PAGE_SZ_SHIFT)
64#define _PAGE_SZ_32K (0x3 << _PAGE_SZ_SHIFT)
65#define _PAGE_SZ_64K (0x4 << _PAGE_SZ_SHIFT)
66#define _PAGE_SZ_128K (0x5 << _PAGE_SZ_SHIFT)
67#define _PAGE_SZ_256K (0x6 << _PAGE_SZ_SHIFT)
68#define _PAGE_SZ_512K (0x7 << _PAGE_SZ_SHIFT)
69#define _PAGE_SZ_1M (0x8 << _PAGE_SZ_SHIFT)
70#define _PAGE_SZ_2M (0x9 << _PAGE_SZ_SHIFT)
71#define _PAGE_SZ_4M (0xa << _PAGE_SZ_SHIFT)
72#define _PAGE_SZ_MASK (0xf << _PAGE_SZ_SHIFT)
73
74#if defined(CONFIG_PAGE_SIZE_4K)
75#define _PAGE_SZ (_PAGE_SZ_4K)
76#elif defined(CONFIG_PAGE_SIZE_8K)
77#define _PAGE_SZ (_PAGE_SZ_8K)
78#elif defined(CONFIG_PAGE_SIZE_16K)
79#define _PAGE_SZ (_PAGE_SZ_16K)
80#endif
81#define _PAGE_TABLE (_PAGE_SZ | _PAGE_PRESENT)
82
83#if defined(CONFIG_HUGETLB_PAGE_SIZE_8K)
84# define _PAGE_SZHUGE (_PAGE_SZ_8K)
85#elif defined(CONFIG_HUGETLB_PAGE_SIZE_16K)
86# define _PAGE_SZHUGE (_PAGE_SZ_16K)
87#elif defined(CONFIG_HUGETLB_PAGE_SIZE_32K)
88# define _PAGE_SZHUGE (_PAGE_SZ_32K)
89#elif defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
90# define _PAGE_SZHUGE (_PAGE_SZ_64K)
91#elif defined(CONFIG_HUGETLB_PAGE_SIZE_128K)
92# define _PAGE_SZHUGE (_PAGE_SZ_128K)
93#elif defined(CONFIG_HUGETLB_PAGE_SIZE_256K)
94# define _PAGE_SZHUGE (_PAGE_SZ_256K)
95#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K)
96# define _PAGE_SZHUGE (_PAGE_SZ_512K)
97#elif defined(CONFIG_HUGETLB_PAGE_SIZE_1M)
98# define _PAGE_SZHUGE (_PAGE_SZ_1M)
99#elif defined(CONFIG_HUGETLB_PAGE_SIZE_2M)
100# define _PAGE_SZHUGE (_PAGE_SZ_2M)
101#elif defined(CONFIG_HUGETLB_PAGE_SIZE_4M)
102# define _PAGE_SZHUGE (_PAGE_SZ_4M)
103#endif
104
105#endif /* _METAG_PGTABLE_BITS_H */
diff --git a/arch/metag/include/asm/pgtable.h b/arch/metag/include/asm/pgtable.h
deleted file mode 100644
index a3422f06c03b..000000000000
--- a/arch/metag/include/asm/pgtable.h
+++ /dev/null
@@ -1,270 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Macros and functions to manipulate Meta page tables.
4 */
5
6#ifndef _METAG_PGTABLE_H
7#define _METAG_PGTABLE_H
8
9#include <asm/pgtable-bits.h>
10#define __ARCH_USE_5LEVEL_HACK
11#include <asm-generic/pgtable-nopmd.h>
12
13/* Invalid regions on Meta: 0x00000000-0x001FFFFF and 0xFFFF0000-0xFFFFFFFF */
14#if PAGE_OFFSET >= LINGLOBAL_BASE
15#define CONSISTENT_START 0xF7000000
16#define CONSISTENT_END 0xF73FFFFF
17#define VMALLOC_START 0xF8000000
18#define VMALLOC_END 0xFFFEFFFF
19#else
20#define CONSISTENT_START 0x77000000
21#define CONSISTENT_END 0x773FFFFF
22#define VMALLOC_START 0x78000000
23#define VMALLOC_END 0x7FFFFFFF
24#endif
25
26/*
27 * The Linux memory management assumes a three-level page table setup. On
28 * Meta, we use that, but "fold" the mid level into the top-level page
29 * table.
30 */
31
32/* PGDIR_SHIFT determines the size of the area a second-level page table can
33 * map. This is always 4MB.
34 */
35
36#define PGDIR_SHIFT 22
37#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
38#define PGDIR_MASK (~(PGDIR_SIZE-1))
39
40/*
41 * Entries per page directory level: we use a two-level, so
42 * we don't really have any PMD directory physically. First level tables
43 * always map 2Gb (local or global) at a granularity of 4MB, second-level
44 * tables map 4MB with a granularity between 4MB and 4kB (between 1 and
45 * 1024 entries).
46 */
47#define PTRS_PER_PTE (PGDIR_SIZE/PAGE_SIZE)
48#define HPTRS_PER_PTE (PGDIR_SIZE/HPAGE_SIZE)
49#define PTRS_PER_PGD 512
50
51#define USER_PTRS_PER_PGD 256
52#define FIRST_USER_ADDRESS META_MEMORY_BASE
53#define FIRST_USER_PGD_NR pgd_index(FIRST_USER_ADDRESS)
54
55#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \
56 _PAGE_CACHEABLE)
57
58#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_WRITE | \
59 _PAGE_ACCESSED | _PAGE_CACHEABLE)
60#define PAGE_SHARED_C PAGE_SHARED
61#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \
62 _PAGE_CACHEABLE)
63#define PAGE_COPY_C PAGE_COPY
64
65#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \
66 _PAGE_CACHEABLE)
67#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_DIRTY | \
68 _PAGE_ACCESSED | _PAGE_WRITE | \
69 _PAGE_CACHEABLE | _PAGE_KERNEL)
70
71#define __P000 PAGE_NONE
72#define __P001 PAGE_READONLY
73#define __P010 PAGE_COPY
74#define __P011 PAGE_COPY
75#define __P100 PAGE_READONLY
76#define __P101 PAGE_READONLY
77#define __P110 PAGE_COPY_C
78#define __P111 PAGE_COPY_C
79
80#define __S000 PAGE_NONE
81#define __S001 PAGE_READONLY
82#define __S010 PAGE_SHARED
83#define __S011 PAGE_SHARED
84#define __S100 PAGE_READONLY
85#define __S101 PAGE_READONLY
86#define __S110 PAGE_SHARED_C
87#define __S111 PAGE_SHARED_C
88
89#ifndef __ASSEMBLY__
90
91#include <asm/page.h>
92
93/* zero page used for uninitialized stuff */
94extern unsigned long empty_zero_page;
95#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
96
97/* Certain architectures need to do special things when pte's
98 * within a page table are directly modified. Thus, the following
99 * hook is made available.
100 */
101#define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval))
102#define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval)
103
104#define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval)
105
106#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
107
108#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
109
110#define pte_none(x) (!pte_val(x))
111#define pte_present(x) (pte_val(x) & _PAGE_PRESENT)
112#define pte_clear(mm, addr, xp) do { pte_val(*(xp)) = 0; } while (0)
113
114#define pmd_none(x) (!pmd_val(x))
115#define pmd_bad(x) ((pmd_val(x) & ~(PAGE_MASK | _PAGE_SZ_MASK)) \
116 != (_PAGE_TABLE & ~_PAGE_SZ_MASK))
117#define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
118#define pmd_clear(xp) do { pmd_val(*(xp)) = 0; } while (0)
119
120#define pte_page(x) pfn_to_page(pte_pfn(x))
121
122/*
123 * The following only work if pte_present() is true.
124 * Undefined behaviour if not..
125 */
126
127static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; }
128static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
129static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
130static inline int pte_special(pte_t pte) { return 0; }
131
132static inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) &= (~_PAGE_WRITE); return pte; }
133static inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~_PAGE_DIRTY; return pte; }
134static inline pte_t pte_mkold(pte_t pte) { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
135static inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) |= _PAGE_WRITE; return pte; }
136static inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= _PAGE_DIRTY; return pte; }
137static inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= _PAGE_ACCESSED; return pte; }
138static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
139static inline pte_t pte_mkhuge(pte_t pte) { return pte; }
140
141/*
142 * Macro and implementation to make a page protection as uncacheable.
143 */
144#define pgprot_writecombine(prot) \
145 __pgprot(pgprot_val(prot) & ~(_PAGE_CACHE_CTRL1 | _PAGE_CACHE_CTRL0))
146
147#define pgprot_noncached(prot) \
148 __pgprot(pgprot_val(prot) & ~_PAGE_CACHEABLE)
149
150
151/*
152 * Conversion functions: convert a page and protection to a page entry,
153 * and a page entry and page directory to the page they refer to.
154 */
155
156#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
157
158static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
159{
160 pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot);
161 return pte;
162}
163
164static inline unsigned long pmd_page_vaddr(pmd_t pmd)
165{
166 unsigned long paddr = pmd_val(pmd) & PAGE_MASK;
167 if (!paddr)
168 return 0;
169 return (unsigned long)__va(paddr);
170}
171
172#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
173#define pmd_page_shift(pmd) (12 + ((pmd_val(pmd) & _PAGE_SZ_MASK) \
174 >> _PAGE_SZ_SHIFT))
175#define pmd_num_ptrs(pmd) (PGDIR_SIZE >> pmd_page_shift(pmd))
176
177/*
178 * Each pgd is only 2k, mapping 2Gb (local or global). If we're in global
179 * space drop the top bit before indexing the pgd.
180 */
181#if PAGE_OFFSET >= LINGLOBAL_BASE
182#define pgd_index(address) ((((address) & ~0x80000000) >> PGDIR_SHIFT) \
183 & (PTRS_PER_PGD-1))
184#else
185#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
186#endif
187
188#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
189
190#define pgd_offset_k(address) pgd_offset(&init_mm, address)
191
192#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
193
194/* Find an entry in the second-level page table.. */
195#if !defined(CONFIG_HUGETLB_PAGE)
196 /* all pages are of size (1 << PAGE_SHIFT), so no need to read 1st level pt */
197# define pte_index(pmd, address) \
198 (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
199#else
200 /* some pages are huge, so read 1st level pt to find out */
201# define pte_index(pmd, address) \
202 (((address) >> pmd_page_shift(pmd)) & (pmd_num_ptrs(pmd) - 1))
203#endif
204#define pte_offset_kernel(dir, address) \
205 ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(*(dir), address))
206#define pte_offset_map(dir, address) pte_offset_kernel(dir, address)
207#define pte_offset_map_nested(dir, address) pte_offset_kernel(dir, address)
208
209#define pte_unmap(pte) do { } while (0)
210#define pte_unmap_nested(pte) do { } while (0)
211
212#define pte_ERROR(e) \
213 pr_err("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
214#define pgd_ERROR(e) \
215 pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
216
217/*
218 * Meta doesn't have any external MMU info: the kernel page
219 * tables contain all the necessary information.
220 */
221static inline void update_mmu_cache(struct vm_area_struct *vma,
222 unsigned long address, pte_t *pte)
223{
224}
225
226/*
227 * Encode and decode a swap entry (must be !pte_none(e) && !pte_present(e))
228 * Since PAGE_PRESENT is bit 1, we can use the bits above that.
229 */
230#define __swp_type(x) (((x).val >> 1) & 0xff)
231#define __swp_offset(x) ((x).val >> 10)
232#define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 1) | \
233 ((offset) << 10) })
234#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
235#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
236
237#define kern_addr_valid(addr) (1)
238
239/*
240 * No page table caches to initialise
241 */
242#define pgtable_cache_init() do { } while (0)
243
244extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
245void paging_init(unsigned long mem_end);
246
247#ifdef CONFIG_METAG_META12
248/* This is a workaround for an issue in Meta 1 cores. These cores cache
249 * invalid entries in the TLB so we always need to flush whenever we add
250 * a new pte. Unfortunately we can only flush the whole TLB not shoot down
251 * single entries so this is sub-optimal. This implementation ensures that
252 * we will get a flush at the second attempt, so we may still get repeated
253 * faults, we just don't overflow the kernel stack handling them.
254 */
255#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
256#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
257({ \
258 int __changed = !pte_same(*(__ptep), __entry); \
259 if (__changed) { \
260 set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \
261 } \
262 flush_tlb_page(__vma, __address); \
263 __changed; \
264})
265#endif
266
267#include <asm-generic/pgtable.h>
268
269#endif /* __ASSEMBLY__ */
270#endif /* _METAG_PGTABLE_H */
diff --git a/arch/metag/include/asm/processor.h b/arch/metag/include/asm/processor.h
deleted file mode 100644
index 9a0c502cd4a0..000000000000
--- a/arch/metag/include/asm/processor.h
+++ /dev/null
@@ -1,201 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (C) 2005,2006,2007,2008 Imagination Technologies
4 */
5
6#ifndef __ASM_METAG_PROCESSOR_H
7#define __ASM_METAG_PROCESSOR_H
8
9#include <linux/atomic.h>
10
11#include <asm/page.h>
12#include <asm/ptrace.h>
13#include <asm/metag_regs.h>
14
15/*
16 * Default implementation of macro that returns current
17 * instruction pointer ("program counter").
18 */
19#define current_text_addr() ({ __label__ _l; _l: &&_l; })
20
21/* The task stops where the kernel starts */
22#define TASK_SIZE PAGE_OFFSET
23/* Add an extra page of padding at the top of the stack for the guard page. */
24#define STACK_TOP (TASK_SIZE - PAGE_SIZE)
25#define STACK_TOP_MAX STACK_TOP
26/* Maximum virtual space for stack */
27#define STACK_SIZE_MAX (CONFIG_MAX_STACK_SIZE_MB*1024*1024)
28
29/* This decides where the kernel will search for a free chunk of vm
30 * space during mmap's.
31 */
32#define TASK_UNMAPPED_BASE META_MEMORY_BASE
33
34typedef struct {
35 unsigned long seg;
36} mm_segment_t;
37
38#ifdef CONFIG_METAG_FPU
39struct meta_fpu_context {
40 TBICTXEXTFPU fpstate;
41 union {
42 struct {
43 TBICTXEXTBB4 fx8_15;
44 TBICTXEXTFPACC fpacc;
45 } fx8_15;
46 struct {
47 TBICTXEXTFPACC fpacc;
48 TBICTXEXTBB4 unused;
49 } nofx8_15;
50 } extfpstate;
51 bool needs_restore;
52};
53#else
54struct meta_fpu_context {};
55#endif
56
57#ifdef CONFIG_METAG_DSP
58struct meta_ext_context {
59 struct {
60 TBIEXTCTX ctx;
61 TBICTXEXTBB8 bb8;
62 TBIDUAL ax[TBICTXEXTAXX_BYTES / sizeof(TBIDUAL)];
63 TBICTXEXTHL2 hl2;
64 TBICTXEXTTDPR ext;
65 TBICTXEXTRP6 rp;
66 } regs;
67
68 /* DSPRAM A and B save areas. */
69 void *ram[2];
70
71 /* ECH encoded size of DSPRAM save areas. */
72 unsigned int ram_sz[2];
73};
74#else
75struct meta_ext_context {};
76#endif
77
78struct thread_struct {
79 PTBICTX kernel_context;
80 /* A copy of the user process Sig.SaveMask. */
81 unsigned int user_flags;
82 struct meta_fpu_context *fpu_context;
83 void __user *tls_ptr;
84 unsigned short int_depth;
85 unsigned short txdefr_failure;
86 struct meta_ext_context *dsp_context;
87};
88
89#define INIT_THREAD { \
90 NULL, /* kernel_context */ \
91 0, /* user_flags */ \
92 NULL, /* fpu_context */ \
93 NULL, /* tls_ptr */ \
94 1, /* int_depth - we start in kernel */ \
95 0, /* txdefr_failure */ \
96 NULL, /* dsp_context */ \
97}
98
99/* Needed to make #define as we are referencing 'current', that is not visible
100 * yet.
101 *
102 * Stack layout is as below.
103
104 argc argument counter (integer)
105 argv[0] program name (pointer)
106 argv[1...N] program args (pointers)
107 argv[argc-1] end of args (integer)
108 NULL
109 env[0...N] environment variables (pointers)
110 NULL
111
112 */
113#define start_thread(regs, pc, usp) do { \
114 unsigned int *argc = (unsigned int *) bprm->exec; \
115 current->thread.int_depth = 1; \
116 /* Force this process down to user land */ \
117 regs->ctx.SaveMask = TBICTX_PRIV_BIT; \
118 regs->ctx.CurrPC = pc; \
119 regs->ctx.AX[0].U0 = usp; \
120 regs->ctx.DX[3].U1 = *((int *)argc); /* argc */ \
121 regs->ctx.DX[3].U0 = (int)((int *)argc + 1); /* argv */ \
122 regs->ctx.DX[2].U1 = (int)((int *)argc + \
123 regs->ctx.DX[3].U1 + 2); /* envp */ \
124 regs->ctx.DX[2].U0 = 0; /* rtld_fini */ \
125} while (0)
126
127/* Forward declaration, a strange C thing */
128struct task_struct;
129
130/* Free all resources held by a thread. */
131static inline void release_thread(struct task_struct *dead_task)
132{
133}
134
135/*
136 * Return saved PC of a blocked thread.
137 */
138#define thread_saved_pc(tsk) \
139 ((unsigned long)(tsk)->thread.kernel_context->CurrPC)
140#define thread_saved_sp(tsk) \
141 ((unsigned long)(tsk)->thread.kernel_context->AX[0].U0)
142#define thread_saved_fp(tsk) \
143 ((unsigned long)(tsk)->thread.kernel_context->AX[1].U0)
144
145unsigned long get_wchan(struct task_struct *p);
146
147#define KSTK_EIP(tsk) (task_pt_regs(tsk)->ctx.CurrPC)
148#define KSTK_ESP(tsk) (task_pt_regs(tsk)->ctx.AX[0].U0)
149
150#define user_stack_pointer(regs) ((regs)->ctx.AX[0].U0)
151
152#define cpu_relax() barrier()
153
154extern void setup_priv(void);
155
156static inline unsigned int hard_processor_id(void)
157{
158 unsigned int id;
159
160 asm volatile ("MOV %0, TXENABLE\n"
161 "AND %0, %0, %1\n"
162 "LSR %0, %0, %2\n"
163 : "=&d" (id)
164 : "I" (TXENABLE_THREAD_BITS),
165 "K" (TXENABLE_THREAD_S)
166 );
167
168 return id;
169}
170
171#define OP3_EXIT 0
172
173#define HALT_OK 0
174#define HALT_PANIC -1
175
176/*
177 * Halt (stop) the hardware thread. This instruction sequence is the
178 * standard way to cause a Meta hardware thread to exit. The exit code
179 * is pushed onto the stack which is interpreted by the debug adapter.
180 */
181static inline void hard_processor_halt(int exit_code)
182{
183 asm volatile ("MOV D1Ar1, %0\n"
184 "MOV D0Ar6, %1\n"
185 "MSETL [A0StP],D0Ar6,D0Ar4,D0Ar2\n"
186 "1:\n"
187 "SWITCH #0xC30006\n"
188 "B 1b\n"
189 : : "r" (exit_code), "K" (OP3_EXIT));
190}
191
192/* Set these hooks to call SoC specific code to restart/halt/power off. */
193extern void (*soc_restart)(char *cmd);
194extern void (*soc_halt)(void);
195
196extern void show_trace(struct task_struct *tsk, unsigned long *sp,
197 struct pt_regs *regs);
198
199extern const struct seq_operations cpuinfo_op;
200
201#endif
diff --git a/arch/metag/include/asm/ptrace.h b/arch/metag/include/asm/ptrace.h
deleted file mode 100644
index 9074f254c9ca..000000000000
--- a/arch/metag/include/asm/ptrace.h
+++ /dev/null
@@ -1,61 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _METAG_PTRACE_H
3#define _METAG_PTRACE_H
4
5#include <linux/compiler.h>
6#include <uapi/asm/ptrace.h>
7#include <asm/tbx.h>
8
9#ifndef __ASSEMBLY__
10
11/* this struct defines the way the registers are stored on the
12 stack during a system call. */
13
14struct pt_regs {
15 TBICTX ctx;
16 TBICTXEXTCB0 extcb0[5];
17};
18
19#define user_mode(regs) (((regs)->ctx.SaveMask & TBICTX_PRIV_BIT) > 0)
20
21#define instruction_pointer(regs) ((unsigned long)(regs)->ctx.CurrPC)
22#define profile_pc(regs) instruction_pointer(regs)
23
24#define task_pt_regs(task) \
25 ((struct pt_regs *)(task_stack_page(task) + \
26 sizeof(struct thread_info)))
27
28#define current_pt_regs() \
29 ((struct pt_regs *)((char *)current_thread_info() + \
30 sizeof(struct thread_info)))
31
32int syscall_trace_enter(struct pt_regs *regs);
33void syscall_trace_leave(struct pt_regs *regs);
34
35/* copy a struct user_gp_regs out to user */
36int metag_gp_regs_copyout(const struct pt_regs *regs,
37 unsigned int pos, unsigned int count,
38 void *kbuf, void __user *ubuf);
39/* copy a struct user_gp_regs in from user */
40int metag_gp_regs_copyin(struct pt_regs *regs,
41 unsigned int pos, unsigned int count,
42 const void *kbuf, const void __user *ubuf);
43/* copy a struct user_cb_regs out to user */
44int metag_cb_regs_copyout(const struct pt_regs *regs,
45 unsigned int pos, unsigned int count,
46 void *kbuf, void __user *ubuf);
47/* copy a struct user_cb_regs in from user */
48int metag_cb_regs_copyin(struct pt_regs *regs,
49 unsigned int pos, unsigned int count,
50 const void *kbuf, const void __user *ubuf);
51/* copy a struct user_rp_state out to user */
52int metag_rp_state_copyout(const struct pt_regs *regs,
53 unsigned int pos, unsigned int count,
54 void *kbuf, void __user *ubuf);
55/* copy a struct user_rp_state in from user */
56int metag_rp_state_copyin(struct pt_regs *regs,
57 unsigned int pos, unsigned int count,
58 const void *kbuf, const void __user *ubuf);
59
60#endif /* __ASSEMBLY__ */
61#endif /* _METAG_PTRACE_H */
diff --git a/arch/metag/include/asm/setup.h b/arch/metag/include/asm/setup.h
deleted file mode 100644
index 504621d79ef5..000000000000
--- a/arch/metag/include/asm/setup.h
+++ /dev/null
@@ -1,10 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_METAG_SETUP_H
3#define _ASM_METAG_SETUP_H
4
5#include <uapi/asm/setup.h>
6
7extern const struct machine_desc *setup_machine_fdt(void *dt);
8void per_cpu_trap_init(unsigned long);
9extern void __init dump_machine_table(void);
10#endif /* _ASM_METAG_SETUP_H */
diff --git a/arch/metag/include/asm/smp.h b/arch/metag/include/asm/smp.h
deleted file mode 100644
index 8d3683d83680..000000000000
--- a/arch/metag/include/asm/smp.h
+++ /dev/null
@@ -1,28 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __ASM_SMP_H
3#define __ASM_SMP_H
4
5#include <linux/cpumask.h>
6
7#define raw_smp_processor_id() (current_thread_info()->cpu)
8
9enum ipi_msg_type {
10 IPI_CALL_FUNC,
11 IPI_RESCHEDULE,
12};
13
14extern void arch_send_call_function_single_ipi(int cpu);
15extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
16
17asmlinkage void secondary_start_kernel(void);
18
19extern void secondary_startup(void);
20
21#ifdef CONFIG_HOTPLUG_CPU
22extern void __cpu_die(unsigned int cpu);
23extern int __cpu_disable(void);
24extern void cpu_die(void);
25#endif
26
27extern void smp_init_cpus(void);
28#endif /* __ASM_SMP_H */
diff --git a/arch/metag/include/asm/sparsemem.h b/arch/metag/include/asm/sparsemem.h
deleted file mode 100644
index 2942894bace5..000000000000
--- a/arch/metag/include/asm/sparsemem.h
+++ /dev/null
@@ -1,14 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __ASM_METAG_SPARSEMEM_H
3#define __ASM_METAG_SPARSEMEM_H
4
5/*
6 * SECTION_SIZE_BITS 2^N: how big each section will be
7 * MAX_PHYSADDR_BITS 2^N: how much physical address space we have
8 * MAX_PHYSMEM_BITS 2^N: how much memory we can have in that space
9 */
10#define SECTION_SIZE_BITS 26
11#define MAX_PHYSADDR_BITS 32
12#define MAX_PHYSMEM_BITS 32
13
14#endif /* __ASM_METAG_SPARSEMEM_H */
diff --git a/arch/metag/include/asm/spinlock.h b/arch/metag/include/asm/spinlock.h
deleted file mode 100644
index 4497c232d9c1..000000000000
--- a/arch/metag/include/asm/spinlock.h
+++ /dev/null
@@ -1,19 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __ASM_SPINLOCK_H
3#define __ASM_SPINLOCK_H
4
5#include <asm/barrier.h>
6#include <asm/processor.h>
7
8#ifdef CONFIG_METAG_ATOMICITY_LOCK1
9#include <asm/spinlock_lock1.h>
10#else
11#include <asm/spinlock_lnkget.h>
12#endif
13
14/*
15 * both lock1 and lnkget are test-and-set spinlocks with 0 unlocked and 1
16 * locked.
17 */
18
19#endif /* __ASM_SPINLOCK_H */
diff --git a/arch/metag/include/asm/spinlock_lnkget.h b/arch/metag/include/asm/spinlock_lnkget.h
deleted file mode 100644
index dfd780eab350..000000000000
--- a/arch/metag/include/asm/spinlock_lnkget.h
+++ /dev/null
@@ -1,213 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __ASM_SPINLOCK_LNKGET_H
3#define __ASM_SPINLOCK_LNKGET_H
4
5/*
6 * None of these asm statements clobber memory as LNKSET writes around
7 * the cache so the memory it modifies cannot safely be read by any means
8 * other than these accessors.
9 */
10
11static inline int arch_spin_is_locked(arch_spinlock_t *lock)
12{
13 int ret;
14
15 asm volatile ("LNKGETD %0, [%1]\n"
16 "TST %0, #1\n"
17 "MOV %0, #1\n"
18 "XORZ %0, %0, %0\n"
19 : "=&d" (ret)
20 : "da" (&lock->lock)
21 : "cc");
22 return ret;
23}
24
25static inline void arch_spin_lock(arch_spinlock_t *lock)
26{
27 int tmp;
28
29 asm volatile ("1: LNKGETD %0,[%1]\n"
30 " TST %0, #1\n"
31 " ADD %0, %0, #1\n"
32 " LNKSETDZ [%1], %0\n"
33 " BNZ 1b\n"
34 " DEFR %0, TXSTAT\n"
35 " ANDT %0, %0, #HI(0x3f000000)\n"
36 " CMPT %0, #HI(0x02000000)\n"
37 " BNZ 1b\n"
38 : "=&d" (tmp)
39 : "da" (&lock->lock)
40 : "cc");
41
42 smp_mb();
43}
44
45/* Returns 0 if failed to acquire lock */
46static inline int arch_spin_trylock(arch_spinlock_t *lock)
47{
48 int tmp;
49
50 asm volatile (" LNKGETD %0,[%1]\n"
51 " TST %0, #1\n"
52 " ADD %0, %0, #1\n"
53 " LNKSETDZ [%1], %0\n"
54 " BNZ 1f\n"
55 " DEFR %0, TXSTAT\n"
56 " ANDT %0, %0, #HI(0x3f000000)\n"
57 " CMPT %0, #HI(0x02000000)\n"
58 " MOV %0, #1\n"
59 "1: XORNZ %0, %0, %0\n"
60 : "=&d" (tmp)
61 : "da" (&lock->lock)
62 : "cc");
63
64 smp_mb();
65
66 return tmp;
67}
68
69static inline void arch_spin_unlock(arch_spinlock_t *lock)
70{
71 smp_mb();
72
73 asm volatile (" SETD [%0], %1\n"
74 :
75 : "da" (&lock->lock), "da" (0)
76 : "memory");
77}
78
79/*
80 * RWLOCKS
81 *
82 *
83 * Write locks are easy - we just set bit 31. When unlocking, we can
84 * just write zero since the lock is exclusively held.
85 */
86
87static inline void arch_write_lock(arch_rwlock_t *rw)
88{
89 int tmp;
90
91 asm volatile ("1: LNKGETD %0,[%1]\n"
92 " CMP %0, #0\n"
93 " ADD %0, %0, %2\n"
94 " LNKSETDZ [%1], %0\n"
95 " BNZ 1b\n"
96 " DEFR %0, TXSTAT\n"
97 " ANDT %0, %0, #HI(0x3f000000)\n"
98 " CMPT %0, #HI(0x02000000)\n"
99 " BNZ 1b\n"
100 : "=&d" (tmp)
101 : "da" (&rw->lock), "bd" (0x80000000)
102 : "cc");
103
104 smp_mb();
105}
106
107static inline int arch_write_trylock(arch_rwlock_t *rw)
108{
109 int tmp;
110
111 asm volatile (" LNKGETD %0,[%1]\n"
112 " CMP %0, #0\n"
113 " ADD %0, %0, %2\n"
114 " LNKSETDZ [%1], %0\n"
115 " BNZ 1f\n"
116 " DEFR %0, TXSTAT\n"
117 " ANDT %0, %0, #HI(0x3f000000)\n"
118 " CMPT %0, #HI(0x02000000)\n"
119 " MOV %0,#1\n"
120 "1: XORNZ %0, %0, %0\n"
121 : "=&d" (tmp)
122 : "da" (&rw->lock), "bd" (0x80000000)
123 : "cc");
124
125 smp_mb();
126
127 return tmp;
128}
129
130static inline void arch_write_unlock(arch_rwlock_t *rw)
131{
132 smp_mb();
133
134 asm volatile (" SETD [%0], %1\n"
135 :
136 : "da" (&rw->lock), "da" (0)
137 : "memory");
138}
139
140/*
141 * Read locks are a bit more hairy:
142 * - Exclusively load the lock value.
143 * - Increment it.
144 * - Store new lock value if positive, and we still own this location.
145 * If the value is negative, we've already failed.
146 * - If we failed to store the value, we want a negative result.
147 * - If we failed, try again.
148 * Unlocking is similarly hairy. We may have multiple read locks
149 * currently active. However, we know we won't have any write
150 * locks.
151 */
152static inline void arch_read_lock(arch_rwlock_t *rw)
153{
154 int tmp;
155
156 asm volatile ("1: LNKGETD %0,[%1]\n"
157 " ADDS %0, %0, #1\n"
158 " LNKSETDPL [%1], %0\n"
159 " BMI 1b\n"
160 " DEFR %0, TXSTAT\n"
161 " ANDT %0, %0, #HI(0x3f000000)\n"
162 " CMPT %0, #HI(0x02000000)\n"
163 " BNZ 1b\n"
164 : "=&d" (tmp)
165 : "da" (&rw->lock)
166 : "cc");
167
168 smp_mb();
169}
170
171static inline void arch_read_unlock(arch_rwlock_t *rw)
172{
173 int tmp;
174
175 smp_mb();
176
177 asm volatile ("1: LNKGETD %0,[%1]\n"
178 " SUB %0, %0, #1\n"
179 " LNKSETD [%1], %0\n"
180 " DEFR %0, TXSTAT\n"
181 " ANDT %0, %0, #HI(0x3f000000)\n"
182 " CMPT %0, #HI(0x02000000)\n"
183 " BNZ 1b\n"
184 : "=&d" (tmp)
185 : "da" (&rw->lock)
186 : "cc", "memory");
187}
188
189static inline int arch_read_trylock(arch_rwlock_t *rw)
190{
191 int tmp;
192
193 asm volatile (" LNKGETD %0,[%1]\n"
194 " ADDS %0, %0, #1\n"
195 " LNKSETDPL [%1], %0\n"
196 " BMI 1f\n"
197 " DEFR %0, TXSTAT\n"
198 " ANDT %0, %0, #HI(0x3f000000)\n"
199 " CMPT %0, #HI(0x02000000)\n"
200 " MOV %0,#1\n"
201 " BZ 2f\n"
202 "1: MOV %0,#0\n"
203 "2:\n"
204 : "=&d" (tmp)
205 : "da" (&rw->lock)
206 : "cc");
207
208 smp_mb();
209
210 return tmp;
211}
212
213#endif /* __ASM_SPINLOCK_LNKGET_H */
diff --git a/arch/metag/include/asm/spinlock_lock1.h b/arch/metag/include/asm/spinlock_lock1.h
deleted file mode 100644
index c0bd81bbe18c..000000000000
--- a/arch/metag/include/asm/spinlock_lock1.h
+++ /dev/null
@@ -1,165 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __ASM_SPINLOCK_LOCK1_H
3#define __ASM_SPINLOCK_LOCK1_H
4
5#include <asm/bug.h>
6#include <asm/global_lock.h>
7
8static inline int arch_spin_is_locked(arch_spinlock_t *lock)
9{
10 int ret;
11
12 barrier();
13 ret = lock->lock;
14 WARN_ON(ret != 0 && ret != 1);
15 return ret;
16}
17
18static inline void arch_spin_lock(arch_spinlock_t *lock)
19{
20 unsigned int we_won = 0;
21 unsigned long flags;
22
23again:
24 __global_lock1(flags);
25 if (lock->lock == 0) {
26 fence();
27 lock->lock = 1;
28 we_won = 1;
29 }
30 __global_unlock1(flags);
31 if (we_won == 0)
32 goto again;
33 WARN_ON(lock->lock != 1);
34}
35
36/* Returns 0 if failed to acquire lock */
37static inline int arch_spin_trylock(arch_spinlock_t *lock)
38{
39 unsigned long flags;
40 unsigned int ret;
41
42 __global_lock1(flags);
43 ret = lock->lock;
44 if (ret == 0) {
45 fence();
46 lock->lock = 1;
47 }
48 __global_unlock1(flags);
49 return (ret == 0);
50}
51
52static inline void arch_spin_unlock(arch_spinlock_t *lock)
53{
54 barrier();
55 WARN_ON(!lock->lock);
56 lock->lock = 0;
57}
58
59/*
60 * RWLOCKS
61 *
62 *
63 * Write locks are easy - we just set bit 31. When unlocking, we can
64 * just write zero since the lock is exclusively held.
65 */
66
67static inline void arch_write_lock(arch_rwlock_t *rw)
68{
69 unsigned long flags;
70 unsigned int we_won = 0;
71
72again:
73 __global_lock1(flags);
74 if (rw->lock == 0) {
75 fence();
76 rw->lock = 0x80000000;
77 we_won = 1;
78 }
79 __global_unlock1(flags);
80 if (we_won == 0)
81 goto again;
82 WARN_ON(rw->lock != 0x80000000);
83}
84
85static inline int arch_write_trylock(arch_rwlock_t *rw)
86{
87 unsigned long flags;
88 unsigned int ret;
89
90 __global_lock1(flags);
91 ret = rw->lock;
92 if (ret == 0) {
93 fence();
94 rw->lock = 0x80000000;
95 }
96 __global_unlock1(flags);
97
98 return (ret == 0);
99}
100
101static inline void arch_write_unlock(arch_rwlock_t *rw)
102{
103 barrier();
104 WARN_ON(rw->lock != 0x80000000);
105 rw->lock = 0;
106}
107
108/*
109 * Read locks are a bit more hairy:
110 * - Exclusively load the lock value.
111 * - Increment it.
112 * - Store new lock value if positive, and we still own this location.
113 * If the value is negative, we've already failed.
114 * - If we failed to store the value, we want a negative result.
115 * - If we failed, try again.
116 * Unlocking is similarly hairy. We may have multiple read locks
117 * currently active. However, we know we won't have any write
118 * locks.
119 */
120static inline void arch_read_lock(arch_rwlock_t *rw)
121{
122 unsigned long flags;
123 unsigned int we_won = 0, ret;
124
125again:
126 __global_lock1(flags);
127 ret = rw->lock;
128 if (ret < 0x80000000) {
129 fence();
130 rw->lock = ret + 1;
131 we_won = 1;
132 }
133 __global_unlock1(flags);
134 if (!we_won)
135 goto again;
136}
137
138static inline void arch_read_unlock(arch_rwlock_t *rw)
139{
140 unsigned long flags;
141 unsigned int ret;
142
143 __global_lock1(flags);
144 fence();
145 ret = rw->lock--;
146 __global_unlock1(flags);
147 WARN_ON(ret == 0);
148}
149
150static inline int arch_read_trylock(arch_rwlock_t *rw)
151{
152 unsigned long flags;
153 unsigned int ret;
154
155 __global_lock1(flags);
156 ret = rw->lock;
157 if (ret < 0x80000000) {
158 fence();
159 rw->lock = ret + 1;
160 }
161 __global_unlock1(flags);
162 return (ret < 0x80000000);
163}
164
165#endif /* __ASM_SPINLOCK_LOCK1_H */
diff --git a/arch/metag/include/asm/spinlock_types.h b/arch/metag/include/asm/spinlock_types.h
deleted file mode 100644
index cd197f1bed59..000000000000
--- a/arch/metag/include/asm/spinlock_types.h
+++ /dev/null
@@ -1,21 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_METAG_SPINLOCK_TYPES_H
3#define _ASM_METAG_SPINLOCK_TYPES_H
4
5#ifndef __LINUX_SPINLOCK_TYPES_H
6# error "please don't include this file directly"
7#endif
8
9typedef struct {
10 volatile unsigned int lock;
11} arch_spinlock_t;
12
13#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
14
15typedef struct {
16 volatile unsigned int lock;
17} arch_rwlock_t;
18
19#define __ARCH_RW_LOCK_UNLOCKED { 0 }
20
21#endif /* _ASM_METAG_SPINLOCK_TYPES_H */
diff --git a/arch/metag/include/asm/stacktrace.h b/arch/metag/include/asm/stacktrace.h
deleted file mode 100644
index f45e3cb2bbb5..000000000000
--- a/arch/metag/include/asm/stacktrace.h
+++ /dev/null
@@ -1,21 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __ASM_STACKTRACE_H
3#define __ASM_STACKTRACE_H
4
5struct stackframe {
6 unsigned long fp;
7 unsigned long sp;
8 unsigned long lr;
9 unsigned long pc;
10};
11
12struct metag_frame {
13 unsigned long fp;
14 unsigned long lr;
15};
16
17extern int unwind_frame(struct stackframe *frame);
18extern void walk_stackframe(struct stackframe *frame,
19 int (*fn)(struct stackframe *, void *), void *data);
20
21#endif /* __ASM_STACKTRACE_H */
diff --git a/arch/metag/include/asm/string.h b/arch/metag/include/asm/string.h
deleted file mode 100644
index 86f9614d5fc6..000000000000
--- a/arch/metag/include/asm/string.h
+++ /dev/null
@@ -1,14 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _METAG_STRING_H_
3#define _METAG_STRING_H_
4
5#define __HAVE_ARCH_MEMSET
6extern void *memset(void *__s, int __c, size_t __count);
7
8#define __HAVE_ARCH_MEMCPY
9void *memcpy(void *__to, __const__ void *__from, size_t __n);
10
11#define __HAVE_ARCH_MEMMOVE
12extern void *memmove(void *__dest, __const__ void *__src, size_t __n);
13
14#endif /* _METAG_STRING_H_ */
diff --git a/arch/metag/include/asm/switch.h b/arch/metag/include/asm/switch.h
deleted file mode 100644
index 1fd6a587c844..000000000000
--- a/arch/metag/include/asm/switch.h
+++ /dev/null
@@ -1,21 +0,0 @@
1/*
2 * Copyright (C) 2012 Imagination Technologies Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 */
9
10#ifndef _ASM_METAG_SWITCH_H
11#define _ASM_METAG_SWITCH_H
12
13/* metag SWITCH codes */
14#define __METAG_SW_PERM_BREAK 0x400002 /* compiled in breakpoint */
15#define __METAG_SW_SYS_LEGACY 0x440000 /* legacy system calls */
16#define __METAG_SW_SYS 0x440001 /* system calls */
17
18/* metag SWITCH instruction encoding */
19#define __METAG_SW_ENCODING(TYPE) (0xaf000000 | (__METAG_SW_##TYPE))
20
21#endif /* _ASM_METAG_SWITCH_H */
diff --git a/arch/metag/include/asm/syscall.h b/arch/metag/include/asm/syscall.h
deleted file mode 100644
index 24fc97939f77..000000000000
--- a/arch/metag/include/asm/syscall.h
+++ /dev/null
@@ -1,104 +0,0 @@
1/*
2 * Access to user system call parameters and results
3 *
4 * Copyright (C) 2008 Imagination Technologies Ltd.
5 *
6 * This copyrighted material is made available to anyone wishing to use,
7 * modify, copy, or redistribute it subject to the terms and conditions
8 * of the GNU General Public License v.2.
9 *
10 * See asm-generic/syscall.h for descriptions of what we must do here.
11 */
12
13#ifndef _ASM_METAG_SYSCALL_H
14#define _ASM_METAG_SYSCALL_H
15
16#include <linux/sched.h>
17#include <linux/err.h>
18#include <linux/uaccess.h>
19
20#include <asm/switch.h>
21
22static inline long syscall_get_nr(struct task_struct *task,
23 struct pt_regs *regs)
24{
25 unsigned long insn;
26
27 /*
28 * FIXME there's no way to find out how we got here other than to
29 * examine the memory at the PC to see if it is a syscall
30 * SWITCH instruction.
31 */
32 if (get_user(insn, (unsigned long *)(regs->ctx.CurrPC - 4)))
33 return -1;
34
35 if (insn == __METAG_SW_ENCODING(SYS))
36 return regs->ctx.DX[0].U1;
37 else
38 return -1L;
39}
40
41static inline void syscall_rollback(struct task_struct *task,
42 struct pt_regs *regs)
43{
44 /* do nothing */
45}
46
47static inline long syscall_get_error(struct task_struct *task,
48 struct pt_regs *regs)
49{
50 unsigned long error = regs->ctx.DX[0].U0;
51 return IS_ERR_VALUE(error) ? error : 0;
52}
53
54static inline long syscall_get_return_value(struct task_struct *task,
55 struct pt_regs *regs)
56{
57 return regs->ctx.DX[0].U0;
58}
59
60static inline void syscall_set_return_value(struct task_struct *task,
61 struct pt_regs *regs,
62 int error, long val)
63{
64 regs->ctx.DX[0].U0 = (long) error ?: val;
65}
66
67static inline void syscall_get_arguments(struct task_struct *task,
68 struct pt_regs *regs,
69 unsigned int i, unsigned int n,
70 unsigned long *args)
71{
72 unsigned int reg, j;
73 BUG_ON(i + n > 6);
74
75 for (j = i, reg = 6 - i; j < (i + n); j++, reg--) {
76 if (reg % 2)
77 args[j] = regs->ctx.DX[(reg + 1) / 2].U0;
78 else
79 args[j] = regs->ctx.DX[reg / 2].U1;
80 }
81}
82
83static inline void syscall_set_arguments(struct task_struct *task,
84 struct pt_regs *regs,
85 unsigned int i, unsigned int n,
86 const unsigned long *args)
87{
88 unsigned int reg;
89 BUG_ON(i + n > 6);
90
91 for (reg = 6 - i; i < (i + n); i++, reg--) {
92 if (reg % 2)
93 regs->ctx.DX[(reg + 1) / 2].U0 = args[i];
94 else
95 regs->ctx.DX[reg / 2].U1 = args[i];
96 }
97}
98
99#define NR_syscalls __NR_syscalls
100
101/* generic syscall table */
102extern const void *sys_call_table[];
103
104#endif /* _ASM_METAG_SYSCALL_H */
diff --git a/arch/metag/include/asm/syscalls.h b/arch/metag/include/asm/syscalls.h
deleted file mode 100644
index eac0cf120323..000000000000
--- a/arch/metag/include/asm/syscalls.h
+++ /dev/null
@@ -1,40 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_METAG_SYSCALLS_H
3#define _ASM_METAG_SYSCALLS_H
4
5#include <linux/compiler.h>
6#include <linux/linkage.h>
7#include <linux/types.h>
8#include <linux/signal.h>
9
10/* kernel/signal.c */
11#define sys_rt_sigreturn sys_rt_sigreturn
12asmlinkage long sys_rt_sigreturn(void);
13
14#include <asm-generic/syscalls.h>
15
16/* kernel/sys_metag.c */
17asmlinkage int sys_metag_setglobalbit(char __user *, int);
18asmlinkage void sys_metag_set_fpu_flags(unsigned int);
19asmlinkage int sys_metag_set_tls(void __user *);
20asmlinkage void *sys_metag_get_tls(void);
21
22asmlinkage long sys_truncate64_metag(const char __user *, unsigned long,
23 unsigned long);
24asmlinkage long sys_ftruncate64_metag(unsigned int, unsigned long,
25 unsigned long);
26asmlinkage long sys_fadvise64_64_metag(int, unsigned long, unsigned long,
27 unsigned long, unsigned long, int);
28asmlinkage long sys_readahead_metag(int, unsigned long, unsigned long, size_t);
29asmlinkage ssize_t sys_pread64_metag(unsigned long, char __user *, size_t,
30 unsigned long, unsigned long);
31asmlinkage ssize_t sys_pwrite64_metag(unsigned long, char __user *, size_t,
32 unsigned long, unsigned long);
33asmlinkage long sys_sync_file_range_metag(int, unsigned long, unsigned long,
34 unsigned long, unsigned long,
35 unsigned int);
36
37int do_work_pending(struct pt_regs *regs, unsigned int thread_flags,
38 int syscall);
39
40#endif /* _ASM_METAG_SYSCALLS_H */
diff --git a/arch/metag/include/asm/tbx.h b/arch/metag/include/asm/tbx.h
deleted file mode 100644
index 5cd2a6c86223..000000000000
--- a/arch/metag/include/asm/tbx.h
+++ /dev/null
@@ -1,1420 +0,0 @@
1/*
2 * asm/tbx.h
3 *
4 * Copyright (C) 2000-2012 Imagination Technologies.
5 *
6 * This program is free software; you can redistribute it and/or modify it under
7 * the terms of the GNU General Public License version 2 as published by the
8 * Free Software Foundation.
9 *
10 * Thread binary interface header
11 */
12
13#ifndef _ASM_METAG_TBX_H_
14#define _ASM_METAG_TBX_H_
15
16/* for CACHEW_* values */
17#include <asm/metag_isa.h>
18/* for LINSYSEVENT_* addresses */
19#include <asm/metag_mem.h>
20
21#ifdef TBI_1_4
22#ifndef TBI_MUTEXES_1_4
23#define TBI_MUTEXES_1_4
24#endif
25#ifndef TBI_SEMAPHORES_1_4
26#define TBI_SEMAPHORES_1_4
27#endif
28#ifndef TBI_ASYNC_SWITCH_1_4
29#define TBI_ASYNC_SWITCH_1_4
30#endif
31#ifndef TBI_FASTINT_1_4
32#define TBI_FASTINT_1_4
33#endif
34#endif
35
36
37/* Id values in the TBI system describe a segment using an arbitrary
38 integer value and flags in the bottom 8 bits, the SIGPOLL value is
39 used in cases where control over blocking or polling behaviour is
40 needed. */
41#define TBID_SIGPOLL_BIT 0x02 /* Set bit in an Id value to poll vs block */
42/* Extended segment identifiers use strings in the string table */
43#define TBID_IS_SEGSTR( Id ) (((Id) & (TBID_SEGTYPE_BITS>>1)) == 0)
44
45/* Segment identifiers contain the following related bit-fields */
46#define TBID_SEGTYPE_BITS 0x0F /* One of the predefined segment types */
47#define TBID_SEGTYPE_S 0
48#define TBID_SEGSCOPE_BITS 0x30 /* Indicates the scope of the segment */
49#define TBID_SEGSCOPE_S 4
50#define TBID_SEGGADDR_BITS 0xC0 /* Indicates access possible via pGAddr */
51#define TBID_SEGGADDR_S 6
52
53/* Segments of memory can only really contain a few types of data */
54#define TBID_SEGTYPE_TEXT 0x02 /* Code segment */
55#define TBID_SEGTYPE_DATA 0x04 /* Data segment */
56#define TBID_SEGTYPE_STACK 0x06 /* Stack segment */
57#define TBID_SEGTYPE_HEAP 0x0A /* Heap segment */
58#define TBID_SEGTYPE_ROOT 0x0C /* Root block segments */
59#define TBID_SEGTYPE_STRING 0x0E /* String table segment */
60
61/* Segments have one of three possible scopes */
62#define TBID_SEGSCOPE_INIT 0 /* Temporary area for initialisation phase */
63#define TBID_SEGSCOPE_LOCAL 1 /* Private to this thread */
64#define TBID_SEGSCOPE_GLOBAL 2 /* Shared globally throughout the system */
65#define TBID_SEGSCOPE_SHARED 3 /* Limited sharing between local/global */
66
67/* For segment specifier a further field in two of the remaining bits
68 indicates the usefulness of the pGAddr field in the segment descriptor
69 descriptor. */
70#define TBID_SEGGADDR_NULL 0 /* pGAddr is NULL -> SEGSCOPE_(LOCAL|INIT) */
71#define TBID_SEGGADDR_READ 1 /* Only read via pGAddr */
72#define TBID_SEGGADDR_WRITE 2 /* Full access via pGAddr */
73#define TBID_SEGGADDR_EXEC 3 /* Only execute via pGAddr */
74
75/* The following values are common to both segment and signal Id value and
76 live in the top 8 bits of the Id values. */
77
78/* The ISTAT bit indicates if segments are related to interrupt vs
79 background level interfaces a thread can still handle all triggers at
80 either level, but can also split these up if it wants to. */
81#define TBID_ISTAT_BIT 0x01000000
82#define TBID_ISTAT_S 24
83
84/* Privilege needed to access a segment is indicated by the next bit.
85
86 This bit is set to mirror the current privilege level when starting a
87 search for a segment - setting it yourself toggles the automatically
88 generated state which is only useful to emulate unprivileged behaviour
89 or access unprivileged areas of memory while at privileged level. */
90#define TBID_PSTAT_BIT 0x02000000
91#define TBID_PSTAT_S 25
92
93/* The top six bits of a signal/segment specifier identifies a thread within
94 the system. This represents a segments owner. */
95#define TBID_THREAD_BITS 0xFC000000
96#define TBID_THREAD_S 26
97
98/* Special thread id values */
99#define TBID_THREAD_NULL (-32) /* Never matches any thread/segment id used */
100#define TBID_THREAD_GLOBAL (-31) /* Things global to all threads */
101#define TBID_THREAD_HOST ( -1) /* Host interface */
102#define TBID_THREAD_EXTIO (TBID_THREAD_HOST) /* Host based ExtIO i/f */
103
104/* Virtual Id's are used for external thread interface structures or the
105 above special Id's */
106#define TBID_IS_VIRTTHREAD( Id ) ((Id) < 0)
107
108/* Real Id's are used for actual hardware threads that are local */
109#define TBID_IS_REALTHREAD( Id ) ((Id) >= 0)
110
111/* Generate a segment Id given Thread, Scope, and Type */
112#define TBID_SEG( Thread, Scope, Type ) (\
113 ((Thread)<<TBID_THREAD_S) + ((Scope)<<TBID_SEGSCOPE_S) + (Type))
114
115/* Generate a signal Id given Thread and SigNum */
116#define TBID_SIG( Thread, SigNum ) (\
117 ((Thread)<<TBID_THREAD_S) + ((SigNum)<<TBID_SIGNUM_S) + TBID_SIGNAL_BIT)
118
119/* Generate an Id that solely represents a thread - useful for cache ops */
120#define TBID_THD( Thread ) ((Thread)<<TBID_THREAD_S)
121#define TBID_THD_NULL ((TBID_THREAD_NULL) <<TBID_THREAD_S)
122#define TBID_THD_GLOBAL ((TBID_THREAD_GLOBAL)<<TBID_THREAD_S)
123
124/* Common exception handler (see TBID_SIGNUM_XXF below) receives hardware
125 generated fault codes TBIXXF_SIGNUM_xxF in it's SigNum parameter */
126#define TBIXXF_SIGNUM_IIF 0x01 /* General instruction fault */
127#define TBIXXF_SIGNUM_PGF 0x02 /* Privilege general fault */
128#define TBIXXF_SIGNUM_DHF 0x03 /* Data access watchpoint HIT */
129#define TBIXXF_SIGNUM_IGF 0x05 /* Code fetch general read failure */
130#define TBIXXF_SIGNUM_DGF 0x07 /* Data access general read/write fault */
131#define TBIXXF_SIGNUM_IPF 0x09 /* Code fetch page fault */
132#define TBIXXF_SIGNUM_DPF 0x0B /* Data access page fault */
133#define TBIXXF_SIGNUM_IHF 0x0D /* Instruction breakpoint HIT */
134#define TBIXXF_SIGNUM_DWF 0x0F /* Data access read-only fault */
135
136/* Hardware signals communicate events between processing levels within a
137 single thread all the _xxF cases are exceptions and are routed via a
138 common exception handler, _SWx are software trap events and kicks including
139 __TBISignal generated kicks, and finally _TRx are hardware triggers */
140#define TBID_SIGNUM_SW0 0x00 /* SWITCH GROUP 0 - Per thread user */
141#define TBID_SIGNUM_SW1 0x01 /* SWITCH GROUP 1 - Per thread system */
142#define TBID_SIGNUM_SW2 0x02 /* SWITCH GROUP 2 - Internal global request */
143#define TBID_SIGNUM_SW3 0x03 /* SWITCH GROUP 3 - External global request */
144#ifdef TBI_1_4
145#define TBID_SIGNUM_FPE 0x04 /* Deferred exception - Any IEEE 754 exception */
146#define TBID_SIGNUM_FPD 0x05 /* Deferred exception - Denormal exception */
147/* Reserved 0x6 for a reserved deferred exception */
148#define TBID_SIGNUM_BUS 0x07 /* Deferred exception - Bus Error */
149/* Reserved 0x08-0x09 */
150#else
151/* Reserved 0x04-0x09 */
152#endif
153/* Reserved 0x0A-0x0F */
154#define TBID_SIGNUM_TRT 0x10 /* Timer trigger */
155#define TBID_SIGNUM_LWK 0x11 /* Low level kick */
156#define TBID_SIGNUM_XXF 0x12 /* Fault handler - receives ALL _xxF sigs */
157#ifdef TBI_1_4
158#define TBID_SIGNUM_DFR 0x13 /* Deferred Exception handler */
159#else
160#define TBID_SIGNUM_FPE 0x13 /* FPE Exception handler */
161#endif
162/* External trigger one group 0x14 to 0x17 - per thread */
163#define TBID_SIGNUM_TR1(Thread) (0x14+(Thread))
164#define TBID_SIGNUM_T10 0x14
165#define TBID_SIGNUM_T11 0x15
166#define TBID_SIGNUM_T12 0x16
167#define TBID_SIGNUM_T13 0x17
168/* External trigger two group 0x18 to 0x1b - per thread */
169#define TBID_SIGNUM_TR2(Thread) (0x18+(Thread))
170#define TBID_SIGNUM_T20 0x18
171#define TBID_SIGNUM_T21 0x19
172#define TBID_SIGNUM_T22 0x1A
173#define TBID_SIGNUM_T23 0x1B
174#define TBID_SIGNUM_TR3 0x1C /* External trigger N-4 (global) */
175#define TBID_SIGNUM_TR4 0x1D /* External trigger N-3 (global) */
176#define TBID_SIGNUM_TR5 0x1E /* External trigger N-2 (global) */
177#define TBID_SIGNUM_TR6 0x1F /* External trigger N-1 (global) */
178#define TBID_SIGNUM_MAX 0x1F
179
180/* Return the trigger register(TXMASK[I]/TXSTAT[I]) bits related to
181 each hardware signal, sometimes this is a many-to-one relationship. */
182#define TBI_TRIG_BIT(SigNum) (\
183 ((SigNum) >= TBID_SIGNUM_TRT) ? 1<<((SigNum)-TBID_SIGNUM_TRT) :\
184 ((SigNum) == TBID_SIGNUM_LWK) ? \
185 TXSTAT_KICK_BIT : TXSTATI_BGNDHALT_BIT )
186
187/* Return the hardware trigger vector number for entries in the
188 HWVEC0EXT table that will generate the required internal trigger. */
189#define TBI_TRIG_VEC(SigNum) (\
190 ((SigNum) >= TBID_SIGNUM_T10) ? ((SigNum)-TBID_SIGNUM_TRT) : -1)
191
192/* Default trigger masks for each thread at background/interrupt level */
193#define TBI_TRIGS_INIT( Thread ) (\
194 TXSTAT_KICK_BIT + TBI_TRIG_BIT(TBID_SIGNUM_TR1(Thread)) )
195#define TBI_INTS_INIT( Thread ) (\
196 TXSTAT_KICK_BIT + TXSTATI_BGNDHALT_BIT \
197 + TBI_TRIG_BIT(TBID_SIGNUM_TR2(Thread)) )
198
199#ifndef __ASSEMBLY__
200/* A spin-lock location is a zero-initialised location in memory */
201typedef volatile int TBISPIN, *PTBISPIN;
202
203/* A kick location is a hardware location you can write to
204 * in order to cause a kick
205 */
206typedef volatile int *PTBIKICK;
207
208#if defined(METAC_1_0) || defined(METAC_1_1)
209/* Macro to perform a kick */
210#define TBI_KICK( pKick ) do { pKick[0] = 1; } while (0)
211#else
212/* #define METAG_LIN_VALUES before including machine.h if required */
213#ifdef LINSYSEVENT_WR_COMBINE_FLUSH
214/* Macro to perform a kick - write combiners must be flushed */
215#define TBI_KICK( pKick ) do {\
216 volatile int *pFlush = (volatile int *) LINSYSEVENT_WR_COMBINE_FLUSH; \
217 pFlush[0] = 0; \
218 pKick[0] = 1; } while (0)
219#endif
220#endif /* if defined(METAC_1_0) || defined(METAC_1_1) */
221#endif /* ifndef __ASSEMBLY__ */
222
223#ifndef __ASSEMBLY__
224/* 64-bit dual unit state value */
225typedef struct _tbidual_tag_ {
226 /* 32-bit value from a pair of registers in data or address units */
227 int U0, U1;
228} TBIDUAL, *PTBIDUAL;
229#endif /* ifndef __ASSEMBLY__ */
230
231/* Byte offsets of fields within TBIDUAL */
232#define TBIDUAL_U0 (0)
233#define TBIDUAL_U1 (4)
234
235#define TBIDUAL_BYTES (8)
236
237#define TBICTX_CRIT_BIT 0x0001 /* ASync state saved in TBICTX */
238#define TBICTX_SOFT_BIT 0x0002 /* Sync state saved in TBICTX (other bits 0) */
239#ifdef TBI_FASTINT_1_4
240#define TBICTX_FINT_BIT 0x0004 /* Using Fast Interrupts */
241#endif
242#define TBICTX_FPAC_BIT 0x0010 /* FPU state in TBICTX, FPU active on entry */
243#define TBICTX_XMCC_BIT 0x0020 /* Bit to identify a MECC task */
244#define TBICTX_CBUF_BIT 0x0040 /* Hardware catch buffer flag from TXSTATUS */
245#define TBICTX_CBRP_BIT 0x0080 /* Read pipeline dirty from TXDIVTIME */
246#define TBICTX_XDX8_BIT 0x0100 /* Saved DX.8 to DX.15 too */
247#define TBICTX_XAXX_BIT 0x0200 /* Save remaining AX registers to AX.7 */
248#define TBICTX_XHL2_BIT 0x0400 /* Saved hardware loop registers too */
249#define TBICTX_XTDP_BIT 0x0800 /* Saved DSP registers too */
250#define TBICTX_XEXT_BIT 0x1000 /* Set if TBICTX.Ext.Ctx contains extended
251 state save area, otherwise TBICTX.Ext.AX2
252 just holds normal A0.2 and A1.2 states */
253#define TBICTX_WAIT_BIT 0x2000 /* Causes wait for trigger - sticky toggle */
254#define TBICTX_XCBF_BIT 0x4000 /* Catch buffer or RD extracted into TBICTX */
255#define TBICTX_PRIV_BIT 0x8000 /* Set if system uses 'privileged' model */
256
257#ifdef METAC_1_0
258#define TBICTX_XAX3_BIT 0x0200 /* Saved AX.5 to AX.7 for XAXX */
259#define TBICTX_AX_REGS 5 /* Ax.0 to Ax.4 are core GP regs on CHORUS */
260#else
261#define TBICTX_XAX4_BIT 0x0200 /* Saved AX.4 to AX.7 for XAXX */
262#define TBICTX_AX_REGS 4 /* Default is Ax.0 to Ax.3 */
263#endif
264
265#ifdef TBI_1_4
266#define TBICTX_CFGFPU_FX16_BIT 0x00010000 /* Save FX.8 to FX.15 too */
267
268/* The METAC_CORE_ID_CONFIG field indicates omitted DSP resources */
269#define METAC_COREID_CFGXCTX_MASK( Value ) (\
270 ( (((Value & METAC_COREID_CFGDSP_BITS)>> \
271 METAC_COREID_CFGDSP_S ) == METAC_COREID_CFGDSP_MIN) ? \
272 ~(TBICTX_XHL2_BIT+TBICTX_XTDP_BIT+ \
273 TBICTX_XAXX_BIT+TBICTX_XDX8_BIT ) : ~0U ) )
274#endif
275
276/* Extended context state provides a standardised method for registering the
277 arguments required by __TBICtxSave to save the additional register states
278 currently in use by non general purpose code. The state of the __TBIExtCtx
279 variable in the static space of the thread forms an extension of the base
280 context of the thread.
281
282 If ( __TBIExtCtx.Ctx.SaveMask == 0 ) then pExt is assumed to be NULL and
283 the empty state of __TBIExtCtx is represented by the fact that
284 TBICTX.SaveMask does not have the bit TBICTX_XEXT_BIT set.
285
286 If ( __TBIExtCtx.Ctx.SaveMask != 0 ) then pExt should point at a suitably
287 sized extended context save area (usually at the end of the stack space
288 allocated by the current routine). This space should allow for the
289 displaced state of A0.2 and A1.2 to be saved along with the other extended
290 states indicated via __TBIExtCtx.Ctx.SaveMask. */
291#ifndef __ASSEMBLY__
292typedef union _tbiextctx_tag_ {
293 long long Val;
294 TBIDUAL AX2;
295 struct _tbiextctxext_tag {
296#ifdef TBI_1_4
297 short DspramSizes; /* DSPRAM sizes. Encoding varies between
298 TBICtxAlloc and the ECH scheme. */
299#else
300 short Reserved0;
301#endif
302 short SaveMask; /* Flag bits for state saved */
303 PTBIDUAL pExt; /* AX[2] state saved first plus Xxxx state */
304
305 } Ctx;
306
307} TBIEXTCTX, *PTBIEXTCTX;
308
309/* Automatic registration of extended context save for __TBINestInts */
310extern TBIEXTCTX __TBIExtCtx;
311#endif /* ifndef __ASSEMBLY__ */
312
313/* Byte offsets of fields within TBIEXTCTX */
314#define TBIEXTCTX_AX2 (0)
315#define TBIEXTCTX_Ctx (0)
316#define TBIEXTCTX_Ctx_SaveMask (TBIEXTCTX_Ctx + 2)
317#define TBIEXTCTX_Ctx_pExt (TBIEXTCTX_Ctx + 2 + 2)
318
319/* Extended context data size calculation constants */
320#define TBICTXEXT_BYTES (8)
321#define TBICTXEXTBB8_BYTES (8*8)
322#define TBICTXEXTAX3_BYTES (3*8)
323#define TBICTXEXTAX4_BYTES (4*8)
324#ifdef METAC_1_0
325#define TBICTXEXTAXX_BYTES TBICTXEXTAX3_BYTES
326#else
327#define TBICTXEXTAXX_BYTES TBICTXEXTAX4_BYTES
328#endif
329#define TBICTXEXTHL2_BYTES (3*8)
330#define TBICTXEXTTDR_BYTES (27*8)
331#define TBICTXEXTTDP_BYTES TBICTXEXTTDR_BYTES
332
333#ifdef TBI_1_4
334#define TBICTXEXTFX8_BYTES (4*8)
335#define TBICTXEXTFPAC_BYTES (1*4 + 2*2 + 4*8)
336#define TBICTXEXTFACF_BYTES (3*8)
337#endif
338
339/* Maximum flag bits to be set via the TBICTX_EXTSET macro */
340#define TBICTXEXT_MAXBITS (TBICTX_XEXT_BIT| \
341 TBICTX_XDX8_BIT|TBICTX_XAXX_BIT|\
342 TBICTX_XHL2_BIT|TBICTX_XTDP_BIT )
343
344/* Maximum size of the extended context save area for current variant */
345#define TBICTXEXT_MAXBYTES (TBICTXEXT_BYTES+TBICTXEXTBB8_BYTES+\
346 TBICTXEXTAXX_BYTES+TBICTXEXTHL2_BYTES+\
347 TBICTXEXTTDP_BYTES )
348
349#ifdef TBI_FASTINT_1_4
350/* Maximum flag bits to be set via the TBICTX_EXTSET macro */
351#define TBICTX2EXT_MAXBITS (TBICTX_XDX8_BIT|TBICTX_XAXX_BIT|\
352 TBICTX_XHL2_BIT|TBICTX_XTDP_BIT )
353
354/* Maximum size of the extended context save area for current variant */
355#define TBICTX2EXT_MAXBYTES (TBICTXEXTBB8_BYTES+TBICTXEXTAXX_BYTES\
356 +TBICTXEXTHL2_BYTES+TBICTXEXTTDP_BYTES )
357#endif
358
359/* Specify extended resources being used by current routine, code must be
360 assembler generated to utilise extended resources-
361
362 MOV D0xxx,A0StP ; Perform alloca - routine should
363 ADD A0StP,A0StP,#SaveSize ; setup/use A0FrP to access locals
364 MOVT D1xxx,#SaveMask ; TBICTX_XEXT_BIT MUST be set
365 SETL [A1GbP+#OG(___TBIExtCtx)],D0xxx,D1xxx
366
367 NB: OG(___TBIExtCtx) is a special case supported for SETL/GETL operations
368 on 64-bit sizes structures only, other accesses must be based on use
369 of OGA(___TBIExtCtx).
370
371 At exit of routine-
372
373 MOV D0xxx,#0 ; Clear extended context save state
374 MOV D1xxx,#0
375 SETL [A1GbP+#OG(___TBIExtCtx)],D0xxx,D1xxx
376 SUB A0StP,A0StP,#SaveSize ; If original A0StP required
377
378 NB: Both the setting and clearing of the whole __TBIExtCtx MUST be done
379 atomically in one 64-bit write operation.
380
381 For simple interrupt handling only via __TBINestInts there should be no
382 impact of the __TBIExtCtx system. If pre-emptive scheduling is being
383 performed however (assuming __TBINestInts has already been called earlier
384 on) then the following logic will correctly call __TBICtxSave if required
385 and clear out the currently selected background task-
386
387 if ( __TBIExtCtx.Ctx.SaveMask & TBICTX_XEXT_BIT )
388 {
389 / * Store extended states in pCtx * /
390 State.Sig.SaveMask |= __TBIExtCtx.Ctx.SaveMask;
391
392 (void) __TBICtxSave( State, (void *) __TBIExtCtx.Ctx.pExt );
393 __TBIExtCtx.Val = 0;
394 }
395
396 and when restoring task states call __TBICtxRestore-
397
398 / * Restore state from pCtx * /
399 State.Sig.pCtx = pCtx;
400 State.Sig.SaveMask = pCtx->SaveMask;
401
402 if ( State.Sig.SaveMask & TBICTX_XEXT_BIT )
403 {
404 / * Restore extended states from pCtx * /
405 __TBIExtCtx.Val = pCtx->Ext.Val;
406
407 (void) __TBICtxRestore( State, (void *) __TBIExtCtx.Ctx.pExt );
408 }
409
410 */
411
412/* Critical thread state save area */
413#ifndef __ASSEMBLY__
414typedef struct _tbictx_tag_ {
415 /* TXSTATUS_FLAG_BITS and TXSTATUS_LSM_STEP_BITS from TXSTATUS */
416 short Flags;
417 /* Mask indicates any extended context state saved; 0 -> Never run */
418 short SaveMask;
419 /* Saved PC value */
420 int CurrPC;
421 /* Saved critical register states */
422 TBIDUAL DX[8];
423 /* Background control register states - for cores without catch buffer
424 base in DIVTIME the TXSTATUS bits RPVALID and RPMASK are stored with
425 the real state TXDIVTIME in CurrDIVTIME */
426 int CurrRPT, CurrBPOBITS, CurrMODE, CurrDIVTIME;
427 /* Saved AX register states */
428 TBIDUAL AX[2];
429 TBIEXTCTX Ext;
430 TBIDUAL AX3[TBICTX_AX_REGS-3];
431
432 /* Any CBUF state to be restored by a handler return must be stored here.
433 Other extended state can be stored anywhere - see __TBICtxSave and
434 __TBICtxRestore. */
435
436} TBICTX, *PTBICTX;
437
438#ifdef TBI_FASTINT_1_4
439typedef struct _tbictx2_tag_ {
440 TBIDUAL AX[2]; /* AU.0, AU.1 */
441 TBIDUAL DX[2]; /* DU.0, DU.4 */
442 int CurrMODE;
443 int CurrRPT;
444 int CurrSTATUS;
445 void *CurrPC; /* PC in PC address space */
446} TBICTX2, *PTBICTX2;
447/* TBICTX2 is followed by:
448 * TBICTXEXTCB0 if TXSTATUS.CBMarker
449 * TBIDUAL * TXSTATUS.IRPCount if TXSTATUS.IRPCount > 0
450 * TBICTXGP if using __TBIStdRootIntHandler or __TBIStdCtxSwitchRootIntHandler
451 */
452
453typedef struct _tbictxgp_tag_ {
454 short DspramSizes;
455 short SaveMask;
456 void *pExt;
457 TBIDUAL DX[6]; /* DU.1-DU.3, DU.5-DU.7 */
458 TBIDUAL AX[2]; /* AU.2-AU.3 */
459} TBICTXGP, *PTBICTXGP;
460
461#define TBICTXGP_DspramSizes (0)
462#define TBICTXGP_SaveMask (TBICTXGP_DspramSizes + 2)
463#define TBICTXGP_MAX_BYTES (2 + 2 + 4 + 8*(6+2))
464
465#endif
466#endif /* ifndef __ASSEMBLY__ */
467
468/* Byte offsets of fields within TBICTX */
469#define TBICTX_Flags (0)
470#define TBICTX_SaveMask (2)
471#define TBICTX_CurrPC (4)
472#define TBICTX_DX (2 + 2 + 4)
473#define TBICTX_CurrRPT (2 + 2 + 4 + 8 * 8)
474#define TBICTX_CurrMODE (2 + 2 + 4 + 8 * 8 + 4 + 4)
475#define TBICTX_AX (2 + 2 + 4 + 8 * 8 + 4 + 4 + 4 + 4)
476#define TBICTX_Ext (2 + 2 + 4 + 8 * 8 + 4 + 4 + 4 + 4 + 2 * 8)
477#define TBICTX_Ext_AX2 (TBICTX_Ext + TBIEXTCTX_AX2)
478#define TBICTX_Ext_AX2_U0 (TBICTX_Ext + TBIEXTCTX_AX2 + TBIDUAL_U0)
479#define TBICTX_Ext_AX2_U1 (TBICTX_Ext + TBIEXTCTX_AX2 + TBIDUAL_U1)
480#define TBICTX_Ext_Ctx_pExt (TBICTX_Ext + TBIEXTCTX_Ctx_pExt)
481#define TBICTX_Ext_Ctx_SaveMask (TBICTX_Ext + TBIEXTCTX_Ctx_SaveMask)
482
483#ifdef TBI_FASTINT_1_4
484#define TBICTX2_BYTES (8 * 2 + 8 * 2 + 4 + 4 + 4 + 4)
485#define TBICTXEXTCB0_BYTES (4 + 4 + 8)
486
487#define TBICTX2_CRIT_MAX_BYTES (TBICTX2_BYTES + TBICTXEXTCB0_BYTES + 6 * TBIDUAL_BYTES)
488#define TBI_SWITCH_NEXT_PC(PC, EXTRA) ((PC) + (EXTRA & 1) ? 8 : 4)
489#endif
490
491#ifndef __ASSEMBLY__
492/* Extended thread state save areas - catch buffer state element */
493typedef struct _tbictxextcb0_tag_ {
494 /* Flags data and address value - see METAC_CATCH_VALUES in machine.h */
495 unsigned long CBFlags, CBAddr;
496 /* 64-bit data */
497 TBIDUAL CBData;
498
499} TBICTXEXTCB0, *PTBICTXEXTCB0;
500
501/* Read pipeline state saved on later cores after single catch buffer slot */
502typedef struct _tbictxextrp6_tag_ {
503 /* RPMask is TXSTATUS_RPMASK_BITS only, reserved is undefined */
504 unsigned long RPMask, Reserved0;
505 TBIDUAL CBData[6];
506
507} TBICTXEXTRP6, *PTBICTXEXTRP6;
508
509/* Extended thread state save areas - 8 DU register pairs */
510typedef struct _tbictxextbb8_tag_ {
511 /* Remaining Data unit registers in 64-bit pairs */
512 TBIDUAL UX[8];
513
514} TBICTXEXTBB8, *PTBICTXEXTBB8;
515
516/* Extended thread state save areas - 3 AU register pairs */
517typedef struct _tbictxextbb3_tag_ {
518 /* Remaining Address unit registers in 64-bit pairs */
519 TBIDUAL UX[3];
520
521} TBICTXEXTBB3, *PTBICTXEXTBB3;
522
523/* Extended thread state save areas - 4 AU register pairs or 4 FX pairs */
524typedef struct _tbictxextbb4_tag_ {
525 /* Remaining Address unit or FPU registers in 64-bit pairs */
526 TBIDUAL UX[4];
527
528} TBICTXEXTBB4, *PTBICTXEXTBB4;
529
530/* Extended thread state save areas - Hardware loop states (max 2) */
531typedef struct _tbictxexthl2_tag_ {
532 /* Hardware looping register states */
533 TBIDUAL Start, End, Count;
534
535} TBICTXEXTHL2, *PTBICTXEXTHL2;
536
537/* Extended thread state save areas - DSP register states */
538typedef struct _tbictxexttdp_tag_ {
539 /* DSP 32-bit accumulator register state (Bits 31:0 of ACX.0) */
540 TBIDUAL Acc32[1];
541 /* DSP > 32-bit accumulator bits 63:32 of ACX.0 (zero-extended) */
542 TBIDUAL Acc64[1];
543 /* Twiddle register state, and three phase increment states */
544 TBIDUAL PReg[4];
545 /* Modulo region size, padded to 64-bits */
546 int CurrMRSIZE, Reserved0;
547
548} TBICTXEXTTDP, *PTBICTXEXTTDP;
549
550/* Extended thread state save areas - DSP register states including DSP RAM */
551typedef struct _tbictxexttdpr_tag_ {
552 /* DSP 32-bit accumulator register state (Bits 31:0 of ACX.0) */
553 TBIDUAL Acc32[1];
554 /* DSP 40-bit accumulator register state (Bits 39:8 of ACX.0) */
555 TBIDUAL Acc40[1];
556 /* DSP RAM Pointers */
557 TBIDUAL RP0[2], WP0[2], RP1[2], WP1[2];
558 /* DSP RAM Increments */
559 TBIDUAL RPI0[2], WPI0[2], RPI1[2], WPI1[2];
560 /* Template registers */
561 unsigned long Tmplt[16];
562 /* Modulo address region size and DSP RAM module region sizes */
563 int CurrMRSIZE, CurrDRSIZE;
564
565} TBICTXEXTTDPR, *PTBICTXEXTTDPR;
566
567#ifdef TBI_1_4
568/* The METAC_ID_CORE register state is a marker for the FPU
569 state that is then stored after this core header structure. */
570#define TBICTXEXTFPU_CONFIG_MASK ( (METAC_COREID_NOFPACC_BIT+ \
571 METAC_COREID_CFGFPU_BITS ) << \
572 METAC_COREID_CONFIG_BITS )
573
574/* Recorded FPU exception state from TXDEFR in DefrFpu */
575#define TBICTXEXTFPU_DEFRFPU_MASK (TXDEFR_FPU_FE_BITS)
576
577/* Extended thread state save areas - FPU register states */
578typedef struct _tbictxextfpu_tag_ {
579 /* Stored METAC_CORE_ID CONFIG */
580 int CfgFpu;
581 /* Stored deferred TXDEFR bits related to FPU
582 *
583 * This is encoded as follows in order to fit into 16-bits:
584 * DefrFPU:15 - 14 <= 0
585 * :13 - 8 <= TXDEFR:21-16
586 * : 7 - 6 <= 0
587 * : 5 - 0 <= TXDEFR:5-0
588 */
589 short DefrFpu;
590
591 /* TXMODE bits related to FPU */
592 short ModeFpu;
593
594 /* FPU Even/Odd register states */
595 TBIDUAL FX[4];
596
597 /* if CfgFpu & TBICTX_CFGFPU_FX16_BIT -> 1 then TBICTXEXTBB4 holds FX.8-15 */
598 /* if CfgFpu & TBICTX_CFGFPU_NOACF_BIT -> 0 then TBICTXEXTFPACC holds state */
599} TBICTXEXTFPU, *PTBICTXEXTFPU;
600
601/* Extended thread state save areas - FPU accumulator state */
602typedef struct _tbictxextfpacc_tag_ {
603 /* FPU accumulator register state - three 64-bit parts */
604 TBIDUAL FAcc32[3];
605
606} TBICTXEXTFPACC, *PTBICTXEXTFPACC;
607#endif
608
609/* Prototype TBI structure */
610struct _tbi_tag_ ;
611
612/* A 64-bit return value used commonly in the TBI APIs */
613typedef union _tbires_tag_ {
614 /* Save and load this value to get/set the whole result quickly */
615 long long Val;
616
617 /* Parameter of a fnSigs or __TBICtx* call */
618 struct _tbires_sig_tag_ {
619 /* TXMASK[I] bits zeroed upto and including current trigger level */
620 unsigned short TrigMask;
621 /* Control bits for handlers - see PTBIAPIFN documentation below */
622 unsigned short SaveMask;
623 /* Pointer to the base register context save area of the thread */
624 PTBICTX pCtx;
625 } Sig;
626
627 /* Result of TBIThrdPrivId call */
628 struct _tbires_thrdprivid_tag_ {
629 /* Basic thread identifier; just TBID_THREAD_BITS */
630 int Id;
631 /* None thread number bits; TBID_ISTAT_BIT+TBID_PSTAT_BIT */
632 int Priv;
633 } Thrd;
634
635 /* Parameter and Result of a __TBISwitch call */
636 struct _tbires_switch_tag_ {
637 /* Parameter passed across context switch */
638 void *pPara;
639 /* Thread context of other Thread includng restore flags */
640 PTBICTX pCtx;
641 } Switch;
642
643 /* For extended S/W events only */
644 struct _tbires_ccb_tag_ {
645 void *pCCB;
646 int COff;
647 } CCB;
648
649 struct _tbires_tlb_tag_ {
650 int Leaf; /* TLB Leaf data */
651 int Flags; /* TLB Flags */
652 } Tlb;
653
654#ifdef TBI_FASTINT_1_4
655 struct _tbires_intr_tag_ {
656 short TrigMask;
657 short SaveMask;
658 PTBICTX2 pCtx;
659 } Intr;
660#endif
661
662} TBIRES, *PTBIRES;
663#endif /* ifndef __ASSEMBLY__ */
664
665#ifndef __ASSEMBLY__
666/* Prototype for all signal handler functions, called via ___TBISyncTrigger or
667 ___TBIASyncTrigger.
668
669 State.Sig.TrigMask will indicate the bits set within TXMASKI at
670 the time of the handler call that have all been cleared to prevent
671 nested interrupt occurring immediately.
672
673 State.Sig.SaveMask is a bit-mask which will be set to Zero when a trigger
674 occurs at background level and TBICTX_CRIT_BIT and optionally
675 TBICTX_CBUF_BIT when a trigger occurs at interrupt level.
676
677 TBICTX_CBUF_BIT reflects the state of TXSTATUS_CBMARKER_BIT for
678 the interrupted background thread.
679
680 State.Sig.pCtx will point at a TBICTX structure generated to hold the
681 critical state of the interrupted thread at interrupt level and
682 should be set to NULL when called at background level.
683
684 Triggers will indicate the status of TXSTAT or TXSTATI sampled by the
685 code that called the handler.
686
687 Inst is defined as 'Inst' if the SigNum is TBID_SIGNUM_SWx and holds the
688 actual SWITCH instruction detected, in other cases the value of this
689 parameter is undefined.
690
691 pTBI points at the PTBI structure related to the thread and processing
692 level involved.
693
694 TBIRES return value at both processing levels is similar in terms of any
695 changes that the handler makes. By default the State argument value
696 passed in should be returned.
697
698 Sig.TrigMask value is bits to OR back into TXMASKI when the handler
699 completes to enable currently disabled interrupts.
700
701 Sig.SaveMask value is ignored.
702
703 Sig.pCtx is ignored.
704
705 */
706typedef TBIRES (*PTBIAPIFN)( TBIRES State, int SigNum,
707 int Triggers, int Inst,
708 volatile struct _tbi_tag_ *pTBI );
709#endif /* ifndef __ASSEMBLY__ */
710
711#ifndef __ASSEMBLY__
712/* The global memory map is described by a list of segment descriptors */
713typedef volatile struct _tbiseg_tag_ {
714 volatile struct _tbiseg_tag_ *pLink;
715 int Id; /* Id of the segment */
716 TBISPIN Lock; /* Spin-lock for struct (normally 0) */
717 unsigned int Bytes; /* Size of region in bytes */
718 void *pGAddr; /* Base addr of region in global space */
719 void *pLAddr; /* Base addr of region in local space */
720 int Data[2]; /* Segment specific data (may be extended) */
721
722} TBISEG, *PTBISEG;
723#endif /* ifndef __ASSEMBLY__ */
724
725/* Offsets of fields in TBISEG structure */
726#define TBISEG_pLink ( 0)
727#define TBISEG_Id ( 4)
728#define TBISEG_Lock ( 8)
729#define TBISEG_Bytes (12)
730#define TBISEG_pGAddr (16)
731#define TBISEG_pLAddr (20)
732#define TBISEG_Data (24)
733
734#ifndef __ASSEMBLY__
735typedef volatile struct _tbi_tag_ {
736 int SigMask; /* Bits set to represent S/W events */
737 PTBIKICK pKick; /* Kick addr for S/W events */
738 void *pCCB; /* Extended S/W events */
739 PTBISEG pSeg; /* Related segment structure */
740 PTBIAPIFN fnSigs[TBID_SIGNUM_MAX+1];/* Signal handler API table */
741} *PTBI, TBI;
742#endif /* ifndef __ASSEMBLY__ */
743
744/* Byte offsets of fields within TBI */
745#define TBI_SigMask (0)
746#define TBI_pKick (4)
747#define TBI_pCCB (8)
748#define TBI_pSeg (12)
749#define TBI_fnSigs (16)
750
751#ifdef TBI_1_4
752#ifndef __ASSEMBLY__
753/* This handler should be used for TBID_SIGNUM_DFR */
754extern TBIRES __TBIHandleDFR ( TBIRES State, int SigNum,
755 int Triggers, int Inst,
756 volatile struct _tbi_tag_ *pTBI );
757#endif
758#endif
759
760/* String table entry - special values */
761#define METAG_TBI_STRS (0x5300) /* Tag : If entry is valid */
762#define METAG_TBI_STRE (0x4500) /* Tag : If entry is end of table */
763#define METAG_TBI_STRG (0x4700) /* Tag : If entry is a gap */
764#define METAG_TBI_STRX (0x5A00) /* TransLen : If no translation present */
765
766#ifndef __ASSEMBLY__
767typedef volatile struct _tbistr_tag_ {
768 short Bytes; /* Length of entry in Bytes */
769 short Tag; /* Normally METAG_TBI_STRS(0x5300) */
770 short Len; /* Length of the string entry (incl null) */
771 short TransLen; /* Normally METAG_TBI_STRX(0x5A00) */
772 char String[8]; /* Zero terminated (may-be bigger) */
773
774} TBISTR, *PTBISTR;
775#endif /* ifndef __ASSEMBLY__ */
776
777/* Cache size information - available as fields of Data[1] of global heap
778 segment */
779#define METAG_TBI_ICACHE_SIZE_S 0 /* see comments below */
780#define METAG_TBI_ICACHE_SIZE_BITS 0x0000000F
781#define METAG_TBI_ICACHE_FILL_S 4
782#define METAG_TBI_ICACHE_FILL_BITS 0x000000F0
783#define METAG_TBI_DCACHE_SIZE_S 8
784#define METAG_TBI_DCACHE_SIZE_BITS 0x00000F00
785#define METAG_TBI_DCACHE_FILL_S 12
786#define METAG_TBI_DCACHE_FILL_BITS 0x0000F000
787
788/* METAG_TBI_xCACHE_SIZE
789 Describes the physical cache size rounded up to the next power of 2
790 relative to a 16K (2^14) cache. These sizes are encoded as a signed addend
791 to this base power of 2, for example
792 4K -> 2^12 -> -2 (i.e. 12-14)
793 8K -> 2^13 -> -1
794 16K -> 2^14 -> 0
795 32K -> 2^15 -> +1
796 64K -> 2^16 -> +2
797 128K -> 2^17 -> +3
798
799 METAG_TBI_xCACHE_FILL
800 Describes the physical cache size within the power of 2 area given by
801 the value above. For example a 10K cache may be represented as having
802 nearest size 16K with a fill of 10 sixteenths. This is encoded as the
803 number of unused 1/16ths, for example
804 0000 -> 0 -> 16/16
805 0001 -> 1 -> 15/16
806 0010 -> 2 -> 14/16
807 ...
808 1111 -> 15 -> 1/16
809 */
810
811#define METAG_TBI_CACHE_SIZE_BASE_LOG2 14
812
813/* Each declaration made by this macro generates a TBISTR entry */
814#ifndef __ASSEMBLY__
815#define TBISTR_DECL( Name, Str ) \
816 __attribute__ ((__section__ (".tbistr") )) const char Name[] = #Str
817#endif
818
819/* META timer values - see below for Timer support routines */
820#define TBI_TIMERWAIT_MIN (-16) /* Minimum 'recommended' period */
821#define TBI_TIMERWAIT_MAX (-0x7FFFFFFF) /* Maximum 'recommended' period */
822
823#ifndef __ASSEMBLY__
824/* These macros allow direct access from C to any register known to the
825 assembler or defined in machine.h. Example candidates are TXTACTCYC,
826 TXIDLECYC, and TXPRIVEXT. Note that when higher level macros and routines
827 like the timer and trigger handling features below these should be used in
828 preference to this direct low-level access mechanism. */
829#define TBI_GETREG( Reg ) __extension__ ({\
830 int __GRValue; \
831 __asm__ volatile ("MOV\t%0," #Reg "\t/* (*TBI_GETREG OK) */" : \
832 "=r" (__GRValue) ); \
833 __GRValue; })
834
835#define TBI_SETREG( Reg, Value ) do {\
836 int __SRValue = Value; \
837 __asm__ volatile ("MOV\t" #Reg ",%0\t/* (*TBI_SETREG OK) */" : \
838 : "r" (__SRValue) ); } while (0)
839
840#define TBI_SWAPREG( Reg, Value ) do {\
841 int __XRValue = (Value); \
842 __asm__ volatile ("SWAP\t" #Reg ",%0\t/* (*TBI_SWAPREG OK) */" : \
843 "=r" (__XRValue) : "0" (__XRValue) ); \
844 Value = __XRValue; } while (0)
845
846/* Obtain and/or release global critical section lock given that interrupts
847 are already disabled and/or should remain disabled. */
848#define TBI_NOINTSCRITON do {\
849 __asm__ volatile ("LOCK1\t\t/* (*TBI_NOINTSCRITON OK) */");} while (0)
850#define TBI_NOINTSCRITOFF do {\
851 __asm__ volatile ("LOCK0\t\t/* (*TBI_NOINTSCRITOFF OK) */");} while (0)
852/* Optimised in-lining versions of the above macros */
853
854#define TBI_LOCK( TrigState ) do {\
855 int __TRValue; \
856 int __ALOCKHI = LINSYSEVENT_WR_ATOMIC_LOCK & 0xFFFF0000; \
857 __asm__ volatile ("MOV %0,#0\t\t/* (*TBI_LOCK ... */\n\t" \
858 "SWAP\t%0,TXMASKI\t/* ... */\n\t" \
859 "LOCK2\t\t/* ... */\n\t" \
860 "SETD\t[%1+#0x40],D1RtP /* ... OK) */" : \
861 "=r&" (__TRValue) : "u" (__ALOCKHI) ); \
862 TrigState = __TRValue; } while (0)
863#define TBI_CRITON( TrigState ) do {\
864 int __TRValue; \
865 __asm__ volatile ("MOV %0,#0\t\t/* (*TBI_CRITON ... */\n\t" \
866 "SWAP\t%0,TXMASKI\t/* ... */\n\t" \
867 "LOCK1\t\t/* ... OK) */" : \
868 "=r" (__TRValue) ); \
869 TrigState = __TRValue; } while (0)
870
871#define TBI_INTSX( TrigState ) do {\
872 int __TRValue = TrigState; \
873 __asm__ volatile ("SWAP\t%0,TXMASKI\t/* (*TBI_INTSX OK) */" : \
874 "=r" (__TRValue) : "0" (__TRValue) ); \
875 TrigState = __TRValue; } while (0)
876
877#define TBI_UNLOCK( TrigState ) do {\
878 int __TRValue = TrigState; \
879 int __ALOCKHI = LINSYSEVENT_WR_ATOMIC_LOCK & 0xFFFF0000; \
880 __asm__ volatile ("SETD\t[%1+#0x00],D1RtP\t/* (*TBI_UNLOCK ... */\n\t" \
881 "LOCK0\t\t/* ... */\n\t" \
882 "MOV\tTXMASKI,%0\t/* ... OK) */" : \
883 : "r" (__TRValue), "u" (__ALOCKHI) ); } while (0)
884
885#define TBI_CRITOFF( TrigState ) do {\
886 int __TRValue = TrigState; \
887 __asm__ volatile ("LOCK0\t\t/* (*TBI_CRITOFF ... */\n\t" \
888 "MOV\tTXMASKI,%0\t/* ... OK) */" : \
889 : "r" (__TRValue) ); } while (0)
890
891#define TBI_TRIGSX( SrcDst ) do { TBI_SWAPREG( TXMASK, SrcDst );} while (0)
892
893/* Composite macros to perform logic ops on INTS or TRIGS masks */
894#define TBI_INTSOR( Bits ) do {\
895 int __TT = 0; TBI_INTSX(__TT); \
896 __TT |= (Bits); TBI_INTSX(__TT); } while (0)
897
898#define TBI_INTSAND( Bits ) do {\
899 int __TT = 0; TBI_INTSX(__TT); \
900 __TT &= (Bits); TBI_INTSX(__TT); } while (0)
901
902#ifdef TBI_1_4
903#define TBI_DEFRICTRLSOR( Bits ) do {\
904 int __TT = TBI_GETREG( CT.20 ); \
905 __TT |= (Bits); TBI_SETREG( CT.20, __TT); } while (0)
906
907#define TBI_DEFRICTRLSAND( Bits ) do {\
908 int __TT = TBI_GETREG( TXDEFR ); \
909 __TT &= (Bits); TBI_SETREG( CT.20, __TT); } while (0)
910#endif
911
912#define TBI_TRIGSOR( Bits ) do {\
913 int __TT = TBI_GETREG( TXMASK ); \
914 __TT |= (Bits); TBI_SETREG( TXMASK, __TT); } while (0)
915
916#define TBI_TRIGSAND( Bits ) do {\
917 int __TT = TBI_GETREG( TXMASK ); \
918 __TT &= (Bits); TBI_SETREG( TXMASK, __TT); } while (0)
919
920/* Macros to disable and re-enable interrupts using TBI_INTSX, deliberate
921 traps and exceptions can still be handled within the critical section. */
922#define TBI_STOPINTS( Value ) do {\
923 int __TT = TBI_GETREG( TXMASKI ); \
924 __TT &= TXSTATI_BGNDHALT_BIT; TBI_INTSX( __TT ); \
925 Value = __TT; } while (0)
926#define TBI_RESTINTS( Value ) do {\
927 int __TT = Value; TBI_INTSX( __TT ); } while (0)
928
929/* Return pointer to segment list at current privilege level */
930PTBISEG __TBISegList( void );
931
932/* Search the segment list for a match given Id, pStart can be NULL */
933PTBISEG __TBIFindSeg( PTBISEG pStart, int Id );
934
935/* Prepare a new segment structure using space from within another */
936PTBISEG __TBINewSeg( PTBISEG pFromSeg, int Id, unsigned int Bytes );
937
938/* Prepare a new segment using any global or local heap segments available */
939PTBISEG __TBIMakeNewSeg( int Id, unsigned int Bytes );
940
941/* Insert a new segment into the segment list so __TBIFindSeg can locate it */
942void __TBIAddSeg( PTBISEG pSeg );
943#define __TBIADDSEG_DEF /* Some versions failed to define this */
944
945/* Return Id of current thread; TBID_ISTAT_BIT+TBID_THREAD_BITS */
946int __TBIThreadId( void );
947
948/* Return TBIRES.Thrd data for current thread */
949TBIRES __TBIThrdPrivId( void );
950
951/* Return pointer to current threads TBI root block.
952 Id implies whether Int or Background root block is required */
953PTBI __TBI( int Id );
954
955/* Try to set Mask bit using the spin-lock protocol, return 0 if fails and
956 new state if succeeds */
957int __TBIPoll( PTBISPIN pLock, int Mask );
958
959/* Set Mask bits via the spin-lock protocol in *pLock, return new state */
960int __TBISpin( PTBISPIN pLock, int Mask );
961
962/* Default handler set up for all TBI.fnSigs entries during initialisation */
963TBIRES __TBIUnExpXXX( TBIRES State, int SigNum,
964 int Triggers, int Inst, PTBI pTBI );
965
966/* Call this routine to service triggers at background processing level. The
967 TBID_POLL_BIT of the Id parameter value will be used to indicate that the
968 routine should return if no triggers need to be serviced initially. If this
969 bit is not set the routine will block until one trigger handler is serviced
970 and then behave like the poll case servicing any remaining triggers
971 actually outstanding before returning. Normally the State parameter should
972 be simply initialised to zero and the result should be ignored, other
973 values/options are for internal use only. */
974TBIRES __TBISyncTrigger( TBIRES State, int Id );
975
976/* Call this routine to enable processing of triggers by signal handlers at
977 interrupt level. The State parameter value passed is returned by this
978 routine. The State.Sig.TrigMask field also specifies the initial
979 state of the interrupt mask register TXMASKI to be setup by the call.
980 The other parts of the State parameter are ignored unless the PRIV bit is
981 set in the SaveMask field. In this case the State.Sig.pCtx field specifies
982 the base of the stack to which the interrupt system should switch into
983 as it saves the state of the previously executing code. In the case the
984 thread will be unprivileged as it continues execution at the return
985 point of this routine and it's future state will be effectively never
986 trusted to be valid. */
987TBIRES __TBIASyncTrigger( TBIRES State );
988
989/* Call this to swap soft threads executing at the background processing level.
990 The TBIRES returned to the new thread will be the same as the NextThread
991 value specified to the call. The NextThread.Switch.pCtx value specifies
992 which thread context to restore and the NextThread.Switch.Para value can
993 hold an arbitrary expression to be passed between the threads. The saved
994 state of the previous thread will be stored in a TBICTX descriptor created
995 on it's stack and the address of this will be stored into the *rpSaveCtx
996 location specified. */
997TBIRES __TBISwitch( TBIRES NextThread, PTBICTX *rpSaveCtx );
998
999/* Call this to initialise a stack frame ready for further use, up to four
1000 32-bit arguments may be specified after the fixed args to be passed via
1001 the new stack pStack to the routine specified via fnMain. If the
1002 main-line routine ever returns the thread will operate as if main itself
1003 had returned and terminate with the return code given. */
1004typedef int (*PTBIMAINFN)( TBIRES Arg /*, <= 4 additional 32-bit args */ );
1005PTBICTX __TBISwitchInit( void *pStack, PTBIMAINFN fnMain, ... );
1006
1007/* Call this to resume a thread from a saved synchronous TBICTX state.
1008 The TBIRES returned to the new thread will be the same as the NextThread
1009 value specified to the call. The NextThread.Switch.pCtx value specifies
1010 which thread context to restore and the NextThread.Switch.Para value can
1011 hold an arbitrary expression to be passed between the threads. The context
1012 of the calling thread is lost and this routine never returns to the
1013 caller. The TrigsMask value supplied is ored into TXMASKI to enable
1014 interrupts after the context of the new thread is established. */
1015void __TBISyncResume( TBIRES NextThread, int TrigsMask );
1016
1017/* Call these routines to save and restore the extended states of
1018 scheduled tasks. */
1019void *__TBICtxSave( TBIRES State, void *pExt );
1020void *__TBICtxRestore( TBIRES State, void *pExt );
1021
1022#ifdef TBI_1_4
1023#ifdef TBI_FASTINT_1_4
1024/* Call these routines to copy the GP state to a separate buffer
1025 * Only necessary for context switching.
1026 */
1027PTBICTXGP __TBICtx2SaveCrit( PTBICTX2 pCurrentCtx, PTBICTX2 pSaveCtx );
1028void *__TBICtx2SaveGP( PTBICTXGP pCurrentCtxGP, PTBICTXGP pSaveCtxGP );
1029
1030/* Call these routines to save and restore the extended states of
1031 scheduled tasks. */
1032void *__TBICtx2Save( PTBICTXGP pCtxGP, short SaveMask, void *pExt );
1033void *__TBICtx2Restore( PTBICTX2 pCtx, short SaveMask, void *pExt );
1034#endif
1035
1036/* If FPAC flag is set then significant FPU context exists. Call these routine
1037 to save and restore it */
1038void *__TBICtxFPUSave( TBIRES State, void *pExt );
1039void *__TBICtxFPURestore( TBIRES State, void *pExt );
1040
1041#ifdef TBI_FASTINT_1_4
1042extern void *__TBICtx2FPUSave (PTBICTXGP, short, void*);
1043extern void *__TBICtx2FPURestore (PTBICTXGP, short, void*);
1044#endif
1045#endif
1046
1047#ifdef TBI_1_4
1048/* Call these routines to save and restore DSPRAM. */
1049void *__TBIDspramSaveA (short DspramSizes, void *pExt);
1050void *__TBIDspramSaveB (short DspramSizes, void *pExt);
1051void *__TBIDspramRestoreA (short DspramSizes, void *pExt);
1052void *__TBIDspramRestoreB (short DspramSizes, void *pExt);
1053#endif
1054
1055/* This routine should be used at the entrypoint of interrupt handlers to
1056 re-enable higher priority interrupts and/or save state from the previously
1057 executing background code. State is a TBIRES.Sig parameter with NoNestMask
1058 indicating the triggers (if any) that should remain disabled and SaveMask
1059 CBUF bit indicating the if the hardware catch buffer is dirty. Optionally
1060 any number of extended state bits X??? including XCBF can be specified to
1061 force a nested state save call to __TBICtxSave before the current routine
1062 continues. (In the latter case __TBICtxRestore should be called to restore
1063 any extended states before the background thread of execution is resumed)
1064
1065 By default (no X??? bits specified in SaveMask) this routine performs a
1066 sub-call to __TBICtxSave with the pExt and State parameters specified IF
1067 some triggers could be serviced while the current interrupt handler
1068 executes and the hardware catch buffer is actually dirty. In this case
1069 this routine provides the XCBF bit in State.Sig.SaveMask to force the
1070 __TBICtxSave to extract the current catch state.
1071
1072 The NoNestMask parameter should normally indicate that the same or lower
1073 triggers than those provoking the current handler call should not be
1074 serviced in nested calls, zero may be specified if all possible interrupts
1075 are to be allowed.
1076
1077 The TBIRES.Sig value returned will be similar to the State parameter
1078 specified with the XCBF bit ORed into it's SaveMask if a context save was
1079 required and fewer bits set in it's TrigMask corresponding to the same/lower
1080 priority interrupt triggers still not enabled. */
1081TBIRES __TBINestInts( TBIRES State, void *pExt, int NoNestMask );
1082
1083/* This routine causes the TBICTX structure specified in State.Sig.pCtx to
1084 be restored. This implies that execution will not return to the caller.
1085 The State.Sig.TrigMask field will be restored during the context switch
1086 such that any immediately occurring interrupts occur in the context of the
1087 newly specified task. The State.Sig.SaveMask parameter is ignored. */
1088void __TBIASyncResume( TBIRES State );
1089
1090/* Call this routine to enable fastest possible processing of one or more
1091 interrupt triggers via a unified signal handler. The handler concerned
1092 must simple return after servicing the related hardware.
1093 The State.Sig.TrigMask parameter indicates the interrupt triggers to be
1094 enabled and the Thin.Thin.fnHandler specifies the routine to call and
1095 the whole Thin parameter value will be passed to this routine unaltered as
1096 it's first parameter. */
1097void __TBIASyncThin( TBIRES State, TBIRES Thin );
1098
1099/* Do this before performing your own direct spin-lock access - use TBI_LOCK */
1100int __TBILock( void );
1101
1102/* Do this after performing your own direct spin-lock access - use TBI_UNLOCK */
1103void __TBIUnlock( int TrigState );
1104
1105/* Obtain and release global critical section lock - only stops execution
1106 of interrupts on this thread and similar critical section code on other
1107 local threads - use TBI_CRITON or TBI_CRITOFF */
1108int __TBICritOn( void );
1109void __TBICritOff( int TrigState );
1110
1111/* Change INTS (TXMASKI) - return old state - use TBI_INTSX */
1112int __TBIIntsX( int NewMask );
1113
1114/* Change TRIGS (TXMASK) - return old state - use TBI_TRIGSX */
1115int __TBITrigsX( int NewMask );
1116
1117/* This function initialises a timer for first use, only the TBID_ISTAT_BIT
1118 of the Id parameter is used to indicate which timer is to be modified. The
1119 Wait value should either be zero to disable the timer concerned or be in
1120 the recommended TBI_TIMERWAIT_* range to specify the delay required before
1121 the first timer trigger occurs.
1122
1123 The TBID_ISTAT_BIT of the Id parameter similar effects all other timer
1124 support functions (see below). */
1125void __TBITimerCtrl( int Id, int Wait );
1126
1127/* This routine returns a 64-bit time stamp value that is initialised to zero
1128 via a __TBITimerCtrl timer enabling call. */
1129long long __TBITimeStamp( int Id );
1130
1131/* To manage a periodic timer each period elapsed should be subracted from
1132 the current timer value to attempt to set up the next timer trigger. The
1133 Wait parameter should be a value in the recommended TBI_TIMERWAIT_* range.
1134 The return value is the new aggregate value that the timer was updated to,
1135 if this is less than zero then a timer trigger is guaranteed to be
1136 generated after the number of ticks implied, if a positive result is
1137 returned either itterative or step-wise corrective action must be taken to
1138 resynchronise the timer and hence provoke a future timer trigger. */
1139int __TBITimerAdd( int Id, int Wait );
1140
1141/* String table search function, pStart is first entry to check or NULL,
1142 pStr is string data to search for and MatchLen is either length of string
1143 to compare for an exact match or negative length to compare for partial
1144 match. */
1145const TBISTR *__TBIFindStr( const TBISTR *pStart,
1146 const char *pStr, int MatchLen );
1147
1148/* String table translate function, pStr is text to translate and Len is
1149 it's length. Value returned may not be a string pointer if the
1150 translation value is really some other type, 64-bit alignment of the return
1151 pointer is guaranteed so almost any type including a structure could be
1152 located with this routine. */
1153const void *__TBITransStr( const char *pStr, int Len );
1154
1155
1156
1157/* Arbitrary physical memory access windows, use different Channels to avoid
1158 conflict/thrashing within a single piece of code. */
1159void *__TBIPhysAccess( int Channel, int PhysAddr, int Bytes );
1160void __TBIPhysRelease( int Channel, void *pLinAddr );
1161
1162#ifdef METAC_1_0
1163/* Data cache function nullified because data cache is off */
1164#define TBIDCACHE_FLUSH( pAddr )
1165#define TBIDCACHE_PRELOAD( Type, pAddr ) ((Type) (pAddr))
1166#define TBIDCACHE_REFRESH( Type, pAddr ) ((Type) (pAddr))
1167#endif
1168#ifdef METAC_1_1
1169/* To flush a single cache line from the data cache using a linear address */
1170#define TBIDCACHE_FLUSH( pAddr ) ((volatile char *) \
1171 (((unsigned int) (pAddr))>>LINSYSLFLUSH_S))[0] = 0
1172
1173extern void * __builtin_dcache_preload (void *);
1174
1175/* Try to ensure that the data at the address concerned is in the cache */
1176#define TBIDCACHE_PRELOAD( Type, Addr ) \
1177 ((Type) __builtin_dcache_preload ((void *)(Addr)))
1178
1179extern void * __builtin_dcache_refresh (void *);
1180
1181/* Flush any old version of data from address and re-load a new copy */
1182#define TBIDCACHE_REFRESH( Type, Addr ) __extension__ ({ \
1183 Type __addr = (Type)(Addr); \
1184 (void)__builtin_dcache_refresh ((void *)(((unsigned int)(__addr))>>6)); \
1185 __addr; })
1186
1187#endif
1188#ifndef METAC_1_0
1189#ifndef METAC_1_1
1190/* Support for DCACHE builtin */
1191extern void __builtin_dcache_flush (void *);
1192
1193/* To flush a single cache line from the data cache using a linear address */
1194#define TBIDCACHE_FLUSH( Addr ) \
1195 __builtin_dcache_flush ((void *)(Addr))
1196
1197extern void * __builtin_dcache_preload (void *);
1198
1199/* Try to ensure that the data at the address concerned is in the cache */
1200#define TBIDCACHE_PRELOAD( Type, Addr ) \
1201 ((Type) __builtin_dcache_preload ((void *)(Addr)))
1202
1203extern void * __builtin_dcache_refresh (void *);
1204
1205/* Flush any old version of data from address and re-load a new copy */
1206#define TBIDCACHE_REFRESH( Type, Addr ) \
1207 ((Type) __builtin_dcache_refresh ((void *)(Addr)))
1208
1209#endif
1210#endif
1211
1212/* Flush the MMCU cache */
1213#define TBIMCACHE_FLUSH() { ((volatile int *) LINSYSCFLUSH_MMCU)[0] = 0; }
1214
1215#ifdef METAC_2_1
1216/* Obtain the MMU table entry for the specified address */
1217#define TBIMTABLE_LEAFDATA(ADDR) TBIXCACHE_RD((int)(ADDR) & (-1<<6))
1218
1219#ifndef __ASSEMBLY__
1220/* Obtain the full MMU table entry for the specified address */
1221#define TBIMTABLE_DATA(ADDR) __extension__ ({ TBIRES __p; \
1222 __p.Val = TBIXCACHE_RL((int)(ADDR) & (-1<<6)); \
1223 __p; })
1224#endif
1225#endif
1226
1227/* Combine a physical base address, and a linear address
1228 * Internal use only
1229 */
1230#define _TBIMTABLE_LIN2PHYS(PHYS, LIN, LMASK) (void*)(((int)(PHYS)&0xFFFFF000)\
1231 +((int)(LIN)&(LMASK)))
1232
1233/* Convert a linear to a physical address */
1234#define TBIMTABLE_LIN2PHYS(LEAFDATA, ADDR) \
1235 (((LEAFDATA) & CRLINPHY0_VAL_BIT) \
1236 ? _TBIMTABLE_LIN2PHYS(LEAFDATA, ADDR, 0x00000FFF) \
1237 : 0)
1238
1239/* Debug support - using external debugger or host */
1240void __TBIDumpSegListEntries( void );
1241void __TBILogF( const char *pFmt, ... );
1242void __TBIAssert( const char *pFile, int LineNum, const char *pExp );
1243void __TBICont( const char *pMsg, ... ); /* TBIAssert -> 'wait for continue' */
1244
1245/* Array of signal name data for debug messages */
1246extern const char __TBISigNames[];
1247#endif /* ifndef __ASSEMBLY__ */
1248
1249
1250
1251/* Scale of sub-strings in the __TBISigNames string list */
1252#define TBI_SIGNAME_SCALE 4
1253#define TBI_SIGNAME_SCALE_S 2
1254
1255#define TBI_1_3
1256
1257#ifdef TBI_1_3
1258
1259#ifndef __ASSEMBLY__
1260#define TBIXCACHE_RD(ADDR) __extension__ ({\
1261 void * __Addr = (void *)(ADDR); \
1262 int __Data; \
1263 __asm__ volatile ( "CACHERD\t%0,[%1+#0]" : \
1264 "=r" (__Data) : "r" (__Addr) ); \
1265 __Data; })
1266
1267#define TBIXCACHE_RL(ADDR) __extension__ ({\
1268 void * __Addr = (void *)(ADDR); \
1269 long long __Data; \
1270 __asm__ volatile ( "CACHERL\t%0,%t0,[%1+#0]" : \
1271 "=d" (__Data) : "r" (__Addr) ); \
1272 __Data; })
1273
1274#define TBIXCACHE_WD(ADDR, DATA) do {\
1275 void * __Addr = (void *)(ADDR); \
1276 int __Data = DATA; \
1277 __asm__ volatile ( "CACHEWD\t[%0+#0],%1" : \
1278 : "r" (__Addr), "r" (__Data) ); } while(0)
1279
1280#define TBIXCACHE_WL(ADDR, DATA) do {\
1281 void * __Addr = (void *)(ADDR); \
1282 long long __Data = DATA; \
1283 __asm__ volatile ( "CACHEWL\t[%0+#0],%1,%t1" : \
1284 : "r" (__Addr), "r" (__Data) ); } while(0)
1285
1286#ifdef TBI_4_0
1287
1288#define TBICACHE_FLUSH_L1D_L2(ADDR) \
1289 TBIXCACHE_WD(ADDR, CACHEW_FLUSH_L1D_L2)
1290#define TBICACHE_WRITEBACK_L1D_L2(ADDR) \
1291 TBIXCACHE_WD(ADDR, CACHEW_WRITEBACK_L1D_L2)
1292#define TBICACHE_INVALIDATE_L1D(ADDR) \
1293 TBIXCACHE_WD(ADDR, CACHEW_INVALIDATE_L1D)
1294#define TBICACHE_INVALIDATE_L1D_L2(ADDR) \
1295 TBIXCACHE_WD(ADDR, CACHEW_INVALIDATE_L1D_L2)
1296#define TBICACHE_INVALIDATE_L1DTLB(ADDR) \
1297 TBIXCACHE_WD(ADDR, CACHEW_INVALIDATE_L1DTLB)
1298#define TBICACHE_INVALIDATE_L1I(ADDR) \
1299 TBIXCACHE_WD(ADDR, CACHEW_INVALIDATE_L1I)
1300#define TBICACHE_INVALIDATE_L1ITLB(ADDR) \
1301 TBIXCACHE_WD(ADDR, CACHEW_INVALIDATE_L1ITLB)
1302
1303#endif /* TBI_4_0 */
1304#endif /* ifndef __ASSEMBLY__ */
1305
1306/*
1307 * Calculate linear PC value from real PC and Minim mode control, the LSB of
1308 * the result returned indicates if address compression has occurred.
1309 */
1310#ifndef __ASSEMBLY__
1311#define METAG_LINPC( PCVal ) (\
1312 ( (TBI_GETREG(TXPRIVEXT) & TXPRIVEXT_MINIMON_BIT) != 0 ) ? ( \
1313 ( ((PCVal) & 0x00900000) == 0x00900000 ) ? \
1314 (((PCVal) & 0xFFE00000) + (((PCVal) & 0x001FFFFC)>>1) + 1) : \
1315 ( ((PCVal) & 0x00800000) == 0x00000000 ) ? \
1316 (((PCVal) & 0xFF800000) + (((PCVal) & 0x007FFFFC)>>1) + 1) : \
1317 (PCVal) ) \
1318 : (PCVal) )
1319#define METAG_LINPC_X2BIT 0x00000001 /* Make (Size>>1) if compressed */
1320
1321/* Convert an arbitrary Linear address into a valid Minim PC or return 0 */
1322#define METAG_PCMINIM( LinVal ) (\
1323 (((LinVal) & 0x00980000) == 0x00880000) ? \
1324 (((LinVal) & 0xFFE00000) + (((LinVal) & 0x000FFFFE)<<1)) : \
1325 (((LinVal) & 0x00C00000) == 0x00000000) ? \
1326 (((LinVal) & 0xFF800000) + (((LinVal) & 0x003FFFFE)<<1)) : 0 )
1327
1328/* Reverse a METAG_LINPC conversion step to return the original PCVal */
1329#define METAG_PCLIN( LinVal ) ( 0xFFFFFFFC & (\
1330 ( (LinVal & METAG_LINPC_X2BIT) != 0 ) ? METAG_PCMINIM( LinVal ) : \
1331 (LinVal) ))
1332
1333/*
1334 * Flush the MMCU Table cache privately for each thread. On cores that do not
1335 * support per-thread flushing it will flush all threads mapping data.
1336 */
1337#define TBIMCACHE_TFLUSH(Thread) do {\
1338 ((volatile int *)( LINSYSCFLUSH_TxMMCU_BASE + \
1339 (LINSYSCFLUSH_TxMMCU_STRIDE*(Thread)) ))[0] = 0; \
1340 } while(0)
1341
1342/*
1343 * To flush a single linear-matched cache line from the code cache. In
1344 * cases where Minim is possible the METAC_LINPC operation must be used
1345 * to pre-process the address being flushed.
1346 */
1347#define TBIICACHE_FLUSH( pAddr ) TBIXCACHE_WD (pAddr, CACHEW_ICACHE_BIT)
1348
1349/* To flush a single linear-matched mapping from code/data MMU table cache */
1350#define TBIMCACHE_AFLUSH( pAddr, SegType ) \
1351 TBIXCACHE_WD(pAddr, CACHEW_TLBFLUSH_BIT + ( \
1352 ((SegType) == TBID_SEGTYPE_TEXT) ? CACHEW_ICACHE_BIT : 0 ))
1353
1354/*
1355 * To flush translation data corresponding to a range of addresses without
1356 * using TBITCACHE_FLUSH to flush all of this threads translation data. It
1357 * is necessary to know what stride (>= 4K) must be used to flush a specific
1358 * region.
1359 *
1360 * For example direct mapped regions use the maximum page size (512K) which may
1361 * mean that only one flush is needed to cover the sub-set of the direct
1362 * mapped area used since it was setup.
1363 *
1364 * The function returns the stride on which flushes should be performed.
1365 *
1366 * If 0 is returned then the region is not subject to MMU caching, if -1 is
1367 * returned then this indicates that only TBIMCACHE_TFLUSH can be used to
1368 * flush the region concerned rather than TBIMCACHE_AFLUSH which this
1369 * function is designed to support.
1370 */
1371int __TBIMMUCacheStride( const void *pStart, int Bytes );
1372
1373/*
1374 * This function will use the above lower level functions to achieve a MMU
1375 * table data flush in an optimal a fashion as possible. On a system that
1376 * supports linear address based caching this function will also call the
1377 * code or data cache flush functions to maintain address/data coherency.
1378 *
1379 * SegType should be TBID_SEGTYPE_TEXT if the address range is for code or
1380 * any other value such as TBID_SEGTYPE_DATA for data. If an area is
1381 * used in both ways then call this function twice; once for each.
1382 */
1383void __TBIMMUCacheFlush( const void *pStart, int Bytes, int SegType );
1384
1385/*
1386 * Cached Core mode setup and flush functions allow one code and one data
1387 * region of the corresponding global or local cache partion size to be
1388 * locked into the corresponding cache memory. This prevents normal LRU
1389 * logic discarding the code or data and avoids write-thru bandwidth in
1390 * data areas. Code mappings are selected by specifying TBID_SEGTYPE_TEXT
1391 * for SegType, otherwise data mappings are created.
1392 *
1393 * Mode supplied should always contain the VALID bit and WINx selection data.
1394 * Data areas will be mapped read-only if the WRITE bit is not added.
1395 *
1396 * The address returned by the Opt function will either be the same as that
1397 * passed in (if optimisation cannot be supported) or the base of the new core
1398 * cached region in linear address space. The returned address must be passed
1399 * into the End function to remove the mapping when required. If a non-core
1400 * cached memory address is passed into it the End function has no effect.
1401 * Note that the region accessed MUST be flushed from the appropriate cache
1402 * before the End function is called to deliver correct operation.
1403 */
1404void *__TBICoreCacheOpt( const void *pStart, int Bytes, int SegType, int Mode );
1405void __TBICoreCacheEnd( const void *pOpt, int Bytes, int SegType );
1406
1407/*
1408 * Optimise physical access channel and flush side effects before releasing
1409 * the channel. If pStart is NULL the whole region must be flushed and this is
1410 * done automatically by the channel release function if optimisation is
1411 * enabled. Flushing the specific region that may have been accessed before
1412 * release should optimises this process. On physically cached systems we do
1413 * not flush the code/data caches only the MMU table data needs flushing.
1414 */
1415void __TBIPhysOptim( int Channel, int IMode, int DMode );
1416void __TBIPhysFlush( int Channel, const void *pStart, int Bytes );
1417#endif
1418#endif /* ifdef TBI_1_3 */
1419
1420#endif /* _ASM_METAG_TBX_H_ */
diff --git a/arch/metag/include/asm/tcm.h b/arch/metag/include/asm/tcm.h
deleted file mode 100644
index a0a4997e4b8a..000000000000
--- a/arch/metag/include/asm/tcm.h
+++ /dev/null
@@ -1,31 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __ASM_TCM_H__
3#define __ASM_TCM_H__
4
5#include <linux/ioport.h>
6#include <linux/list.h>
7
8struct tcm_allocation {
9 struct list_head list;
10 unsigned int tag;
11 unsigned long addr;
12 unsigned long size;
13};
14
15/*
16 * TCM memory region descriptor.
17 */
18struct tcm_region {
19 unsigned int tag;
20 struct resource res;
21};
22
23#define TCM_INVALID_TAG 0xffffffff
24
25unsigned long tcm_alloc(unsigned int tag, size_t len);
26void tcm_free(unsigned int tag, unsigned long addr, size_t len);
27unsigned int tcm_lookup_tag(unsigned long p);
28
29int tcm_add_region(struct tcm_region *reg);
30
31#endif
diff --git a/arch/metag/include/asm/thread_info.h b/arch/metag/include/asm/thread_info.h
deleted file mode 100644
index a1a9c7f5ca8c..000000000000
--- a/arch/metag/include/asm/thread_info.h
+++ /dev/null
@@ -1,141 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/* thread_info.h: Meta low-level thread information
3 *
4 * Copyright (C) 2002 David Howells (dhowells@redhat.com)
5 * - Incorporating suggestions made by Linus Torvalds and Dave Miller
6 *
7 * Meta port by Imagination Technologies
8 */
9
10#ifndef _ASM_THREAD_INFO_H
11#define _ASM_THREAD_INFO_H
12
13#include <linux/compiler.h>
14#include <asm/page.h>
15
16#ifndef __ASSEMBLY__
17#include <asm/processor.h>
18#endif
19
20/*
21 * low level task data that entry.S needs immediate access to
22 * - this struct should fit entirely inside of one cache line
23 * - this struct shares the supervisor stack pages
24 * - if the contents of this structure are changed, the assembly constants must
25 * also be changed
26 */
27#ifndef __ASSEMBLY__
28
29/* This must be 8 byte aligned so we can ensure stack alignment. */
30struct thread_info {
31 struct task_struct *task; /* main task structure */
32 unsigned long flags; /* low level flags */
33 unsigned long status; /* thread-synchronous flags */
34 u32 cpu; /* current CPU */
35 int preempt_count; /* 0 => preemptable, <0 => BUG */
36
37 mm_segment_t addr_limit; /* thread address space */
38
39 u8 supervisor_stack[0] __aligned(8);
40};
41
42#else /* !__ASSEMBLY__ */
43
44#include <generated/asm-offsets.h>
45
46#endif
47
48#ifdef CONFIG_4KSTACKS
49#define THREAD_SHIFT 12
50#else
51#define THREAD_SHIFT 13
52#endif
53
54#if THREAD_SHIFT >= PAGE_SHIFT
55#define THREAD_SIZE_ORDER (THREAD_SHIFT - PAGE_SHIFT)
56#else
57#define THREAD_SIZE_ORDER 0
58#endif
59
60#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
61
62#define STACK_WARN (THREAD_SIZE/8)
63/*
64 * macros/functions for gaining access to the thread information structure
65 */
66#ifndef __ASSEMBLY__
67
68#define INIT_THREAD_INFO(tsk) \
69{ \
70 .task = &tsk, \
71 .flags = 0, \
72 .cpu = 0, \
73 .preempt_count = INIT_PREEMPT_COUNT, \
74 .addr_limit = KERNEL_DS, \
75}
76
77/* how to get the current stack pointer from C */
78register unsigned long current_stack_pointer asm("A0StP") __used;
79
80/* how to get the thread information struct from C */
81static inline struct thread_info *current_thread_info(void)
82{
83 return (struct thread_info *)(current_stack_pointer &
84 ~(THREAD_SIZE - 1));
85}
86
87#define __HAVE_ARCH_KSTACK_END
88static inline int kstack_end(void *addr)
89{
90 return addr == (void *) (((unsigned long) addr & ~(THREAD_SIZE - 1))
91 + sizeof(struct thread_info));
92}
93
94#endif
95
96/*
97 * thread information flags
98 * - these are process state flags that various assembly files may need to
99 * access
100 * - pending work-to-be-done flags are in LSW
101 * - other flags in MSW
102 */
103#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
104#define TIF_SIGPENDING 1 /* signal pending */
105#define TIF_NEED_RESCHED 2 /* rescheduling necessary */
106#define TIF_SINGLESTEP 3 /* restore singlestep on return to user
107 mode */
108#define TIF_SYSCALL_AUDIT 4 /* syscall auditing active */
109#define TIF_SECCOMP 5 /* secure computing */
110#define TIF_RESTORE_SIGMASK 6 /* restore signal mask in do_signal() */
111#define TIF_NOTIFY_RESUME 7 /* callback before returning to user */
112#define TIF_MEMDIE 8 /* is terminating due to OOM killer */
113#define TIF_SYSCALL_TRACEPOINT 9 /* syscall tracepoint instrumentation */
114
115
116#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
117#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
118#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
119#define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP)
120#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
121#define _TIF_SECCOMP (1<<TIF_SECCOMP)
122#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
123#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
124#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
125
126/* work to do in syscall trace */
127#define _TIF_WORK_SYSCALL_MASK (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP | \
128 _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \
129 _TIF_SYSCALL_TRACEPOINT)
130
131/* work to do on any return to u-space */
132#define _TIF_ALLWORK_MASK (_TIF_SYSCALL_TRACE | _TIF_SIGPENDING | \
133 _TIF_NEED_RESCHED | _TIF_SYSCALL_AUDIT | \
134 _TIF_SINGLESTEP | _TIF_RESTORE_SIGMASK | \
135 _TIF_NOTIFY_RESUME)
136
137/* work to do on interrupt/exception return */
138#define _TIF_WORK_MASK (_TIF_ALLWORK_MASK & ~(_TIF_SYSCALL_TRACE | \
139 _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP))
140
141#endif /* _ASM_THREAD_INFO_H */
diff --git a/arch/metag/include/asm/tlb.h b/arch/metag/include/asm/tlb.h
deleted file mode 100644
index fbe6ee91e8e7..000000000000
--- a/arch/metag/include/asm/tlb.h
+++ /dev/null
@@ -1,37 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __ASM_METAG_TLB_H
3#define __ASM_METAG_TLB_H
4
5#include <asm/cacheflush.h>
6#include <asm/page.h>
7
8/* Note, read http://lkml.org/lkml/2004/1/15/6 */
9
10#ifdef CONFIG_METAG_META12
11
12#define tlb_start_vma(tlb, vma) \
13 do { \
14 if (!tlb->fullmm) \
15 flush_cache_range(vma, vma->vm_start, vma->vm_end); \
16 } while (0)
17
18#define tlb_end_vma(tlb, vma) \
19 do { \
20 if (!tlb->fullmm) \
21 flush_tlb_range(vma, vma->vm_start, vma->vm_end); \
22 } while (0)
23
24
25#else
26
27#define tlb_start_vma(tlb, vma) do { } while (0)
28#define tlb_end_vma(tlb, vma) do { } while (0)
29
30#endif
31
32#define __tlb_remove_tlb_entry(tlb, pte, addr) do { } while (0)
33#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
34
35#include <asm-generic/tlb.h>
36
37#endif
diff --git a/arch/metag/include/asm/tlbflush.h b/arch/metag/include/asm/tlbflush.h
deleted file mode 100644
index f3e5d99a3891..000000000000
--- a/arch/metag/include/asm/tlbflush.h
+++ /dev/null
@@ -1,78 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __ASM_METAG_TLBFLUSH_H
3#define __ASM_METAG_TLBFLUSH_H
4
5#include <linux/io.h>
6#include <linux/sched.h>
7#include <asm/metag_mem.h>
8#include <asm/pgalloc.h>
9
10/*
11 * TLB flushing:
12 *
13 * - flush_tlb() flushes the current mm struct TLBs
14 * - flush_tlb_all() flushes all processes TLBs
15 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
16 * - flush_tlb_page(vma, vmaddr) flushes one page
17 * - flush_tlb_range(mm, start, end) flushes a range of pages
18 * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
19 * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
20 *
21 * FIXME: Meta 2 can flush single TLB entries.
22 *
23 */
24
25#if defined(CONFIG_METAG_META21) && !defined(CONFIG_SMP)
26static inline void __flush_tlb(void)
27{
28 /* flush TLB entries for just the current hardware thread */
29 int thread = hard_processor_id();
30 metag_out32(0, (LINSYSCFLUSH_TxMMCU_BASE +
31 LINSYSCFLUSH_TxMMCU_STRIDE * thread));
32}
33#else
34static inline void __flush_tlb(void)
35{
36 /* flush TLB entries for all hardware threads */
37 metag_out32(0, LINSYSCFLUSH_MMCU);
38}
39#endif /* defined(CONFIG_METAG_META21) && !defined(CONFIG_SMP) */
40
41#define flush_tlb() __flush_tlb()
42
43#define flush_tlb_all() __flush_tlb()
44
45#define local_flush_tlb_all() __flush_tlb()
46
47static inline void flush_tlb_mm(struct mm_struct *mm)
48{
49 if (mm == current->active_mm)
50 __flush_tlb();
51}
52
53static inline void flush_tlb_page(struct vm_area_struct *vma,
54 unsigned long addr)
55{
56 flush_tlb_mm(vma->vm_mm);
57}
58
59static inline void flush_tlb_range(struct vm_area_struct *vma,
60 unsigned long start, unsigned long end)
61{
62 flush_tlb_mm(vma->vm_mm);
63}
64
65static inline void flush_tlb_pgtables(struct mm_struct *mm,
66 unsigned long start, unsigned long end)
67{
68 flush_tlb_mm(mm);
69}
70
71static inline void flush_tlb_kernel_range(unsigned long start,
72 unsigned long end)
73{
74 flush_tlb_all();
75}
76
77#endif /* __ASM_METAG_TLBFLUSH_H */
78
diff --git a/arch/metag/include/asm/topology.h b/arch/metag/include/asm/topology.h
deleted file mode 100644
index df0d9e6b7f12..000000000000
--- a/arch/metag/include/asm/topology.h
+++ /dev/null
@@ -1,28 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_METAG_TOPOLOGY_H
3#define _ASM_METAG_TOPOLOGY_H
4
5#ifdef CONFIG_NUMA
6
7#define cpu_to_node(cpu) ((void)(cpu), 0)
8
9#define cpumask_of_node(node) ((void)node, cpu_online_mask)
10
11#define pcibus_to_node(bus) ((void)(bus), -1)
12#define cpumask_of_pcibus(bus) (pcibus_to_node(bus) == -1 ? \
13 cpu_all_mask : \
14 cpumask_of_node(pcibus_to_node(bus)))
15
16#endif
17
18#define mc_capable() (1)
19
20const struct cpumask *cpu_coregroup_mask(unsigned int cpu);
21
22extern cpumask_t cpu_core_map[NR_CPUS];
23
24#define topology_core_cpumask(cpu) (&cpu_core_map[cpu])
25
26#include <asm-generic/topology.h>
27
28#endif /* _ASM_METAG_TOPOLOGY_H */
diff --git a/arch/metag/include/asm/traps.h b/arch/metag/include/asm/traps.h
deleted file mode 100644
index ac808740bd84..000000000000
--- a/arch/metag/include/asm/traps.h
+++ /dev/null
@@ -1,48 +0,0 @@
1/*
2 * Copyright (C) 2005,2008 Imagination Technologies
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file COPYING in the main directory of this archive
6 * for more details.
7 */
8
9#ifndef _METAG_TBIVECTORS_H
10#define _METAG_TBIVECTORS_H
11
12#ifndef __ASSEMBLY__
13
14#include <asm/tbx.h>
15
16typedef TBIRES (*kick_irq_func_t)(TBIRES, int, int, int, PTBI, int *);
17
18extern TBIRES kick_handler(TBIRES, int, int, int, PTBI);
19struct kick_irq_handler {
20 struct list_head list;
21 kick_irq_func_t func;
22};
23
24extern void kick_register_func(struct kick_irq_handler *);
25extern void kick_unregister_func(struct kick_irq_handler *);
26
27extern void head_end(TBIRES, unsigned long);
28extern void restart_critical_section(TBIRES State);
29extern TBIRES tail_end_sys(TBIRES, int, int *);
30static inline TBIRES tail_end(TBIRES state)
31{
32 return tail_end_sys(state, -1, NULL);
33}
34
35DECLARE_PER_CPU(PTBI, pTBI);
36extern PTBI pTBI_get(unsigned int);
37
38extern int ret_from_fork(TBIRES arg);
39
40extern int do_page_fault(struct pt_regs *regs, unsigned long address,
41 unsigned int write_access, unsigned int trapno);
42
43extern TBIRES __TBIUnExpXXX(TBIRES State, int SigNum, int Triggers, int Inst,
44 PTBI pTBI);
45
46#endif
47
48#endif /* _METAG_TBIVECTORS_H */
diff --git a/arch/metag/include/asm/uaccess.h b/arch/metag/include/asm/uaccess.h
deleted file mode 100644
index a5311eb36e32..000000000000
--- a/arch/metag/include/asm/uaccess.h
+++ /dev/null
@@ -1,213 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __METAG_UACCESS_H
3#define __METAG_UACCESS_H
4
5/*
6 * User space memory access functions
7 */
8
9/*
10 * The fs value determines whether argument validity checking should be
11 * performed or not. If get_fs() == USER_DS, checking is performed, with
12 * get_fs() == KERNEL_DS, checking is bypassed.
13 *
14 * For historical reasons, these macros are grossly misnamed.
15 */
16
17#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
18
19#define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFF)
20#define USER_DS MAKE_MM_SEG(PAGE_OFFSET)
21
22#define get_ds() (KERNEL_DS)
23#define get_fs() (current_thread_info()->addr_limit)
24#define set_fs(x) (current_thread_info()->addr_limit = (x))
25
26#define segment_eq(a, b) ((a).seg == (b).seg)
27
28static inline int __access_ok(unsigned long addr, unsigned long size)
29{
30 /*
31 * Allow access to the user mapped memory area, but not the system area
32 * before it. The check extends to the top of the address space when
33 * kernel access is allowed (there's no real reason to user copy to the
34 * system area in any case).
35 */
36 if (likely(addr >= META_MEMORY_BASE && addr < get_fs().seg &&
37 size <= get_fs().seg - addr))
38 return true;
39 /*
40 * Explicitly allow NULL pointers here. Parts of the kernel such
41 * as readv/writev use access_ok to validate pointers, but want
42 * to allow NULL pointers for various reasons. NULL pointers are
43 * safe to allow through because the first page is not mappable on
44 * Meta.
45 */
46 if (!addr)
47 return true;
48 /* Allow access to core code memory area... */
49 if (addr >= LINCORE_CODE_BASE && addr <= LINCORE_CODE_LIMIT &&
50 size <= LINCORE_CODE_LIMIT + 1 - addr)
51 return true;
52 /* ... but no other areas. */
53 return false;
54}
55
56#define access_ok(type, addr, size) __access_ok((unsigned long)(addr), \
57 (unsigned long)(size))
58
59#include <asm/extable.h>
60
61/*
62 * These are the main single-value transfer routines. They automatically
63 * use the right size if we just have the right pointer type.
64 */
65
66#define put_user(x, ptr) \
67 __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
68#define __put_user(x, ptr) \
69 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
70
71extern void __put_user_bad(void);
72
73#define __put_user_nocheck(x, ptr, size) \
74({ \
75 long __pu_err; \
76 __put_user_size((x), (ptr), (size), __pu_err); \
77 __pu_err; \
78})
79
80#define __put_user_check(x, ptr, size) \
81({ \
82 long __pu_err = -EFAULT; \
83 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
84 if (access_ok(VERIFY_WRITE, __pu_addr, size)) \
85 __put_user_size((x), __pu_addr, (size), __pu_err); \
86 __pu_err; \
87})
88
89extern long __put_user_asm_b(unsigned int x, void __user *addr);
90extern long __put_user_asm_w(unsigned int x, void __user *addr);
91extern long __put_user_asm_d(unsigned int x, void __user *addr);
92extern long __put_user_asm_l(unsigned long long x, void __user *addr);
93
94#define __put_user_size(x, ptr, size, retval) \
95do { \
96 retval = 0; \
97 switch (size) { \
98 case 1: \
99 retval = __put_user_asm_b((__force unsigned int)x, ptr);\
100 break; \
101 case 2: \
102 retval = __put_user_asm_w((__force unsigned int)x, ptr);\
103 break; \
104 case 4: \
105 retval = __put_user_asm_d((__force unsigned int)x, ptr);\
106 break; \
107 case 8: \
108 retval = __put_user_asm_l((__force unsigned long long)x,\
109 ptr); \
110 break; \
111 default: \
112 __put_user_bad(); \
113 } \
114} while (0)
115
116#define get_user(x, ptr) \
117 __get_user_check((x), (ptr), sizeof(*(ptr)))
118#define __get_user(x, ptr) \
119 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
120
121extern long __get_user_bad(void);
122
123#define __get_user_nocheck(x, ptr, size) \
124({ \
125 long __gu_err; \
126 long long __gu_val; \
127 __get_user_size(__gu_val, (ptr), (size), __gu_err); \
128 (x) = (__force __typeof__(*(ptr)))__gu_val; \
129 __gu_err; \
130})
131
132#define __get_user_check(x, ptr, size) \
133({ \
134 long __gu_err = -EFAULT; \
135 long long __gu_val = 0; \
136 const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
137 if (access_ok(VERIFY_READ, __gu_addr, size)) \
138 __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
139 (x) = (__force __typeof__(*(ptr)))__gu_val; \
140 __gu_err; \
141})
142
143extern unsigned char __get_user_asm_b(const void __user *addr, long *err);
144extern unsigned short __get_user_asm_w(const void __user *addr, long *err);
145extern unsigned int __get_user_asm_d(const void __user *addr, long *err);
146extern unsigned long long __get_user_asm_l(const void __user *addr, long *err);
147
148#define __get_user_size(x, ptr, size, retval) \
149do { \
150 retval = 0; \
151 switch (size) { \
152 case 1: \
153 x = __get_user_asm_b(ptr, &retval); break; \
154 case 2: \
155 x = __get_user_asm_w(ptr, &retval); break; \
156 case 4: \
157 x = __get_user_asm_d(ptr, &retval); break; \
158 case 8: \
159 x = __get_user_asm_l(ptr, &retval); break; \
160 default: \
161 (x) = __get_user_bad(); \
162 } \
163} while (0)
164
165/*
166 * Copy a null terminated string from userspace.
167 *
168 * Must return:
169 * -EFAULT for an exception
170 * count if we hit the buffer limit
171 * bytes copied if we hit a null byte
172 * (without the null byte)
173 */
174
175extern long __must_check __strncpy_from_user(char *dst, const char __user *src,
176 long count);
177
178static inline long
179strncpy_from_user(char *dst, const char __user *src, long count)
180{
181 if (!access_ok(VERIFY_READ, src, 1))
182 return -EFAULT;
183 return __strncpy_from_user(dst, src, count);
184}
185/*
186 * Return the size of a string (including the ending 0)
187 *
188 * Return 0 on exception, a value greater than N if too long
189 */
190extern long __must_check strnlen_user(const char __user *src, long count);
191
192extern unsigned long raw_copy_from_user(void *to, const void __user *from,
193 unsigned long n);
194extern unsigned long raw_copy_to_user(void __user *to, const void *from,
195 unsigned long n);
196
197/*
198 * Zero Userspace
199 */
200
201extern unsigned long __must_check __do_clear_user(void __user *to,
202 unsigned long n);
203
204static inline unsigned long clear_user(void __user *to, unsigned long n)
205{
206 if (access_ok(VERIFY_WRITE, to, n))
207 return __do_clear_user(to, n);
208 return n;
209}
210
211#define __clear_user(to, n) __do_clear_user(to, n)
212
213#endif /* _METAG_UACCESS_H */
diff --git a/arch/metag/include/asm/unistd.h b/arch/metag/include/asm/unistd.h
deleted file mode 100644
index 32955a18fb32..000000000000
--- a/arch/metag/include/asm/unistd.h
+++ /dev/null
@@ -1,12 +0,0 @@
1/*
2 * Copyright (C) 2012 Imagination Technologies Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 */
9
10#include <uapi/asm/unistd.h>
11
12#define __ARCH_WANT_SYS_CLONE
diff --git a/arch/metag/include/asm/user_gateway.h b/arch/metag/include/asm/user_gateway.h
deleted file mode 100644
index cf2392b95a56..000000000000
--- a/arch/metag/include/asm/user_gateway.h
+++ /dev/null
@@ -1,45 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (C) 2010 Imagination Technologies
4 */
5
6#ifndef __ASM_METAG_USER_GATEWAY_H
7#define __ASM_METAG_USER_GATEWAY_H
8
9#include <asm/page.h>
10
11/* Page of kernel code accessible to userspace. */
12#define USER_GATEWAY_PAGE 0x6ffff000
13/* Offset of TLS pointer array in gateway page. */
14#define USER_GATEWAY_TLS 0x100
15
16#ifndef __ASSEMBLY__
17
18extern char __user_gateway_start;
19extern char __user_gateway_end;
20
21/* Kernel mapping of the gateway page. */
22extern void *gateway_page;
23
24static inline void set_gateway_tls(void __user *tls_ptr)
25{
26 void **gateway_tls = (void **)(gateway_page + USER_GATEWAY_TLS +
27 hard_processor_id() * 4);
28
29 *gateway_tls = (__force void *)tls_ptr;
30#ifdef CONFIG_METAG_META12
31 /* Avoid cache aliases on virtually tagged cache. */
32 __builtin_dcache_flush((void *)USER_GATEWAY_PAGE + USER_GATEWAY_TLS +
33 hard_processor_id() * sizeof(void *));
34#endif
35}
36
37extern int __kuser_get_tls(void);
38extern char *__kuser_get_tls_end[];
39
40extern int __kuser_cmpxchg(int, int, unsigned long *);
41extern char *__kuser_cmpxchg_end[];
42
43#endif
44
45#endif
diff --git a/arch/metag/include/uapi/asm/Kbuild b/arch/metag/include/uapi/asm/Kbuild
deleted file mode 100644
index f9eaf07d29f8..000000000000
--- a/arch/metag/include/uapi/asm/Kbuild
+++ /dev/null
@@ -1,31 +0,0 @@
1# UAPI Header export list
2include include/uapi/asm-generic/Kbuild.asm
3
4generic-y += auxvec.h
5generic-y += bitsperlong.h
6generic-y += bpf_perf_event.h
7generic-y += errno.h
8generic-y += fcntl.h
9generic-y += ioctl.h
10generic-y += ioctls.h
11generic-y += ipcbuf.h
12generic-y += kvm_para.h
13generic-y += mman.h
14generic-y += msgbuf.h
15generic-y += param.h
16generic-y += poll.h
17generic-y += posix_types.h
18generic-y += resource.h
19generic-y += sembuf.h
20generic-y += setup.h
21generic-y += shmbuf.h
22generic-y += shmparam.h
23generic-y += signal.h
24generic-y += socket.h
25generic-y += sockios.h
26generic-y += stat.h
27generic-y += statfs.h
28generic-y += termbits.h
29generic-y += termios.h
30generic-y += types.h
31generic-y += ucontext.h
diff --git a/arch/metag/include/uapi/asm/byteorder.h b/arch/metag/include/uapi/asm/byteorder.h
deleted file mode 100644
index e5e03ff7e20d..000000000000
--- a/arch/metag/include/uapi/asm/byteorder.h
+++ /dev/null
@@ -1,2 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2#include <linux/byteorder/little_endian.h>
diff --git a/arch/metag/include/uapi/asm/ech.h b/arch/metag/include/uapi/asm/ech.h
deleted file mode 100644
index 1e09f1ea4f7f..000000000000
--- a/arch/metag/include/uapi/asm/ech.h
+++ /dev/null
@@ -1,16 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2#ifndef _UAPI_METAG_ECH_H
3#define _UAPI_METAG_ECH_H
4
5/*
6 * These bits can be set in the top half of the D0.8 register when DSP context
7 * switching is enabled, in order to support partial DSP context save/restore.
8 */
9
10#define TBICTX_XEXT_BIT 0x1000 /* Enable extended context save */
11#define TBICTX_XTDP_BIT 0x0800 /* DSP accumulators/RAM/templates */
12#define TBICTX_XHL2_BIT 0x0400 /* Hardware loops */
13#define TBICTX_XAXX_BIT 0x0200 /* Extended AX registers (A*.4-7) */
14#define TBICTX_XDX8_BIT 0x0100 /* Extended DX registers (D*.8-15) */
15
16#endif /* _UAPI_METAG_ECH_H */
diff --git a/arch/metag/include/uapi/asm/ptrace.h b/arch/metag/include/uapi/asm/ptrace.h
deleted file mode 100644
index 8ad9daa841c3..000000000000
--- a/arch/metag/include/uapi/asm/ptrace.h
+++ /dev/null
@@ -1,114 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2#ifndef _UAPI_METAG_PTRACE_H
3#define _UAPI_METAG_PTRACE_H
4
5#ifndef __ASSEMBLY__
6
7/*
8 * These are the layouts of the regsets returned by the GETREGSET ptrace call
9 */
10
11/* user_gp_regs::status */
12
13/* CBMarker bit (indicates catch state / catch replay) */
14#define USER_GP_REGS_STATUS_CATCH_BIT (1 << 22)
15#define USER_GP_REGS_STATUS_CATCH_S 22
16/* LSM_STEP field (load/store multiple step) */
17#define USER_GP_REGS_STATUS_LSM_STEP_BITS (0x7 << 8)
18#define USER_GP_REGS_STATUS_LSM_STEP_S 8
19/* SCC bit (indicates split 16x16 condition flags) */
20#define USER_GP_REGS_STATUS_SCC_BIT (1 << 4)
21#define USER_GP_REGS_STATUS_SCC_S 4
22
23/* normal condition flags */
24/* CF_Z bit (Zero flag) */
25#define USER_GP_REGS_STATUS_CF_Z_BIT (1 << 3)
26#define USER_GP_REGS_STATUS_CF_Z_S 3
27/* CF_N bit (Negative flag) */
28#define USER_GP_REGS_STATUS_CF_N_BIT (1 << 2)
29#define USER_GP_REGS_STATUS_CF_N_S 2
30/* CF_V bit (oVerflow flag) */
31#define USER_GP_REGS_STATUS_CF_V_BIT (1 << 1)
32#define USER_GP_REGS_STATUS_CF_V_S 1
33/* CF_C bit (Carry flag) */
34#define USER_GP_REGS_STATUS_CF_C_BIT (1 << 0)
35#define USER_GP_REGS_STATUS_CF_C_S 0
36
37/* split 16x16 condition flags */
38/* SCF_LZ bit (Low Zero flag) */
39#define USER_GP_REGS_STATUS_SCF_LZ_BIT (1 << 3)
40#define USER_GP_REGS_STATUS_SCF_LZ_S 3
41/* SCF_HZ bit (High Zero flag) */
42#define USER_GP_REGS_STATUS_SCF_HZ_BIT (1 << 2)
43#define USER_GP_REGS_STATUS_SCF_HZ_S 2
44/* SCF_HC bit (High Carry flag) */
45#define USER_GP_REGS_STATUS_SCF_HC_BIT (1 << 1)
46#define USER_GP_REGS_STATUS_SCF_HC_S 1
47/* SCF_LC bit (Low Carry flag) */
48#define USER_GP_REGS_STATUS_SCF_LC_BIT (1 << 0)
49#define USER_GP_REGS_STATUS_SCF_LC_S 0
50
51/**
52 * struct user_gp_regs - User general purpose registers
53 * @dx: GP data unit regs (dx[reg][unit] = D{unit:0-1}.{reg:0-7})
54 * @ax: GP address unit regs (ax[reg][unit] = A{unit:0-1}.{reg:0-3})
55 * @pc: PC register
56 * @status: TXSTATUS register (condition flags, LSM_STEP etc)
57 * @rpt: TXRPT registers (branch repeat counter)
58 * @bpobits: TXBPOBITS register ("branch prediction other" bits)
59 * @mode: TXMODE register
60 * @_pad1: Reserved padding to make sizeof obviously 64bit aligned
61 *
62 * This is the user-visible general purpose register state structure.
63 *
64 * It can be accessed through PTRACE_GETREGSET with NT_PRSTATUS.
65 *
66 * It is also used in the signal context.
67 */
68struct user_gp_regs {
69 unsigned long dx[8][2];
70 unsigned long ax[4][2];
71 unsigned long pc;
72 unsigned long status;
73 unsigned long rpt;
74 unsigned long bpobits;
75 unsigned long mode;
76 unsigned long _pad1;
77};
78
79/**
80 * struct user_cb_regs - User catch buffer registers
81 * @flags: TXCATCH0 register (fault flags)
82 * @addr: TXCATCH1 register (fault address)
83 * @data: TXCATCH2 and TXCATCH3 registers (low and high data word)
84 *
85 * This is the user-visible catch buffer register state structure containing
86 * information about a failed memory access, and allowing the access to be
87 * modified and replayed.
88 *
89 * It can be accessed through PTRACE_GETREGSET with NT_METAG_CBUF.
90 */
91struct user_cb_regs {
92 unsigned long flags;
93 unsigned long addr;
94 unsigned long long data;
95};
96
97/**
98 * struct user_rp_state - User read pipeline state
99 * @entries: Read pipeline entries
100 * @mask: Mask of valid pipeline entries (RPMask from TXDIVTIME register)
101 *
102 * This is the user-visible read pipeline state structure containing the entries
103 * currently in the read pipeline and the mask of valid entries.
104 *
105 * It can be accessed through PTRACE_GETREGSET with NT_METAG_RPIPE.
106 */
107struct user_rp_state {
108 unsigned long long entries[6];
109 unsigned long mask;
110};
111
112#endif /* __ASSEMBLY__ */
113
114#endif /* _UAPI_METAG_PTRACE_H */
diff --git a/arch/metag/include/uapi/asm/sigcontext.h b/arch/metag/include/uapi/asm/sigcontext.h
deleted file mode 100644
index ac7e1f28d584..000000000000
--- a/arch/metag/include/uapi/asm/sigcontext.h
+++ /dev/null
@@ -1,32 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2#ifndef _ASM_METAG_SIGCONTEXT_H
3#define _ASM_METAG_SIGCONTEXT_H
4
5#include <asm/ptrace.h>
6
7/*
8 * In a sigcontext structure we need to store the active state of the
9 * user process so that it does not get trashed when we call the signal
10 * handler. That not really the same as a user context that we are
11 * going to store on syscall etc.
12 */
13struct sigcontext {
14 struct user_gp_regs regs; /* needs to be first */
15
16 /*
17 * Catch registers describing a memory fault.
18 * If USER_GP_REGS_STATUS_CATCH_BIT is set in regs.status then catch
19 * buffers have been saved and will be replayed on sigreturn.
20 * Clear that bit to discard the catch state instead of replaying it.
21 */
22 struct user_cb_regs cb;
23
24 /*
25 * Read pipeline state. This will get restored on sigreturn.
26 */
27 struct user_rp_state rp;
28
29 unsigned long oldmask;
30};
31
32#endif
diff --git a/arch/metag/include/uapi/asm/siginfo.h b/arch/metag/include/uapi/asm/siginfo.h
deleted file mode 100644
index 9a3f6cde9487..000000000000
--- a/arch/metag/include/uapi/asm/siginfo.h
+++ /dev/null
@@ -1,16 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2#ifndef _METAG_SIGINFO_H
3#define _METAG_SIGINFO_H
4
5#define __ARCH_SI_TRAPNO
6
7#include <asm-generic/siginfo.h>
8
9/*
10 * SIGFPE si_codes
11 */
12#ifdef __KERNEL__
13#define FPE_FIXME 0 /* Broken dup of SI_USER */
14#endif /* __KERNEL__ */
15
16#endif
diff --git a/arch/metag/include/uapi/asm/swab.h b/arch/metag/include/uapi/asm/swab.h
deleted file mode 100644
index 30d696fcc237..000000000000
--- a/arch/metag/include/uapi/asm/swab.h
+++ /dev/null
@@ -1,27 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2#ifndef __ASM_METAG_SWAB_H
3#define __ASM_METAG_SWAB_H
4
5#include <linux/compiler.h>
6#include <linux/types.h>
7#include <asm-generic/swab.h>
8
9static inline __attribute_const__ __u16 __arch_swab16(__u16 x)
10{
11 return __builtin_metag_bswaps(x);
12}
13#define __arch_swab16 __arch_swab16
14
15static inline __attribute_const__ __u32 __arch_swab32(__u32 x)
16{
17 return __builtin_metag_bswap(x);
18}
19#define __arch_swab32 __arch_swab32
20
21static inline __attribute_const__ __u64 __arch_swab64(__u64 x)
22{
23 return __builtin_metag_bswapll(x);
24}
25#define __arch_swab64 __arch_swab64
26
27#endif /* __ASM_METAG_SWAB_H */
diff --git a/arch/metag/include/uapi/asm/unistd.h b/arch/metag/include/uapi/asm/unistd.h
deleted file mode 100644
index 9f72c4cfcfb5..000000000000
--- a/arch/metag/include/uapi/asm/unistd.h
+++ /dev/null
@@ -1,24 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
2/*
3 * Copyright (C) 2012 Imagination Technologies Ltd.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 */
10
11#define __ARCH_WANT_RENAMEAT
12
13/* Use the standard ABI for syscalls. */
14#include <asm-generic/unistd.h>
15
16/* metag-specific syscalls. */
17#define __NR_metag_setglobalbit (__NR_arch_specific_syscall + 1)
18__SYSCALL(__NR_metag_setglobalbit, sys_metag_setglobalbit)
19#define __NR_metag_set_fpu_flags (__NR_arch_specific_syscall + 2)
20__SYSCALL(__NR_metag_set_fpu_flags, sys_metag_set_fpu_flags)
21#define __NR_metag_set_tls (__NR_arch_specific_syscall + 3)
22__SYSCALL(__NR_metag_set_tls, sys_metag_set_tls)
23#define __NR_metag_get_tls (__NR_arch_specific_syscall + 4)
24__SYSCALL(__NR_metag_get_tls, sys_metag_get_tls)
diff --git a/arch/metag/kernel/.gitignore b/arch/metag/kernel/.gitignore
deleted file mode 100644
index c5f676c3c224..000000000000
--- a/arch/metag/kernel/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
1vmlinux.lds
diff --git a/arch/metag/kernel/Makefile b/arch/metag/kernel/Makefile
deleted file mode 100644
index 73441d8c0369..000000000000
--- a/arch/metag/kernel/Makefile
+++ /dev/null
@@ -1,40 +0,0 @@
1# SPDX-License-Identifier: GPL-2.0
2#
3# Makefile for the Linux/Meta kernel.
4#
5
6extra-y += head.o
7extra-y += vmlinux.lds
8
9obj-y += cachepart.o
10obj-y += clock.o
11obj-y += core_reg.o
12obj-y += devtree.o
13obj-y += dma.o
14obj-y += irq.o
15obj-y += kick.o
16obj-y += machines.o
17obj-y += process.o
18obj-y += ptrace.o
19obj-y += setup.o
20obj-y += signal.o
21obj-y += stacktrace.o
22obj-y += sys_metag.o
23obj-y += tbiunexp.o
24obj-y += time.o
25obj-y += topology.o
26obj-y += traps.o
27obj-y += user_gateway.o
28
29obj-$(CONFIG_PERF_EVENTS) += perf/
30
31obj-$(CONFIG_METAG_COREMEM) += coremem.o
32obj-$(CONFIG_METAG_DA) += da.o
33obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
34obj-$(CONFIG_FUNCTION_TRACER) += ftrace_stub.o
35obj-$(CONFIG_MODULES) += metag_ksyms.o
36obj-$(CONFIG_MODULES) += module.o
37obj-$(CONFIG_PERF_EVENTS) += perf_callchain.o
38obj-$(CONFIG_SMP) += smp.o
39obj-$(CONFIG_METAG_SUSPEND_MEM) += suspend.o
40obj-$(CONFIG_METAG_USER_TCM) += tcm.o
diff --git a/arch/metag/kernel/asm-offsets.c b/arch/metag/kernel/asm-offsets.c
deleted file mode 100644
index d9b348b99ff2..000000000000
--- a/arch/metag/kernel/asm-offsets.c
+++ /dev/null
@@ -1,15 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * This program is used to generate definitions needed by
4 * assembly language modules.
5 *
6 */
7
8#include <linux/kbuild.h>
9#include <linux/thread_info.h>
10
11int main(void)
12{
13 DEFINE(THREAD_INFO_SIZE, sizeof(struct thread_info));
14 return 0;
15}
diff --git a/arch/metag/kernel/cachepart.c b/arch/metag/kernel/cachepart.c
deleted file mode 100644
index 6e0f8a80cc96..000000000000
--- a/arch/metag/kernel/cachepart.c
+++ /dev/null
@@ -1,132 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Meta cache partition manipulation.
4 *
5 * Copyright 2010 Imagination Technologies Ltd.
6 */
7
8#include <linux/kernel.h>
9#include <linux/io.h>
10#include <linux/errno.h>
11#include <asm/processor.h>
12#include <asm/cachepart.h>
13#include <asm/metag_isa.h>
14#include <asm/metag_mem.h>
15
16#define SYSC_DCPART(n) (SYSC_DCPART0 + SYSC_xCPARTn_STRIDE * (n))
17#define SYSC_ICPART(n) (SYSC_ICPART0 + SYSC_xCPARTn_STRIDE * (n))
18
19#define CACHE_ASSOCIATIVITY 4 /* 4 way set-associative */
20#define ICACHE 0
21#define DCACHE 1
22
23/* The CORE_CONFIG2 register is not available on Meta 1 */
24#ifdef CONFIG_METAG_META21
25unsigned int get_dcache_size(void)
26{
27 unsigned int config2 = metag_in32(METAC_CORE_CONFIG2);
28 unsigned int sz = 0x1000 << ((config2 & METAC_CORECFG2_DCSZ_BITS)
29 >> METAC_CORECFG2_DCSZ_S);
30 if (config2 & METAC_CORECFG2_DCSMALL_BIT)
31 sz >>= 6;
32 return sz;
33}
34
35unsigned int get_icache_size(void)
36{
37 unsigned int config2 = metag_in32(METAC_CORE_CONFIG2);
38 unsigned int sz = 0x1000 << ((config2 & METAC_CORE_C2ICSZ_BITS)
39 >> METAC_CORE_C2ICSZ_S);
40 if (config2 & METAC_CORECFG2_ICSMALL_BIT)
41 sz >>= 6;
42 return sz;
43}
44
45unsigned int get_global_dcache_size(void)
46{
47 unsigned int cpart = metag_in32(SYSC_DCPART(hard_processor_id()));
48 unsigned int temp = cpart & SYSC_xCPARTG_AND_BITS;
49 return (get_dcache_size() * ((temp >> SYSC_xCPARTG_AND_S) + 1)) >> 4;
50}
51
52unsigned int get_global_icache_size(void)
53{
54 unsigned int cpart = metag_in32(SYSC_ICPART(hard_processor_id()));
55 unsigned int temp = cpart & SYSC_xCPARTG_AND_BITS;
56 return (get_icache_size() * ((temp >> SYSC_xCPARTG_AND_S) + 1)) >> 4;
57}
58
59static int get_thread_cache_size(unsigned int cache, int thread_id)
60{
61 unsigned int cache_size;
62 unsigned int t_cache_part;
63 unsigned int isEnabled;
64 unsigned int offset = 0;
65 isEnabled = (cache == DCACHE ? metag_in32(MMCU_DCACHE_CTRL_ADDR) & 0x1 :
66 metag_in32(MMCU_ICACHE_CTRL_ADDR) & 0x1);
67 if (!isEnabled)
68 return 0;
69#if PAGE_OFFSET >= LINGLOBAL_BASE
70 /* Checking for global cache */
71 cache_size = (cache == DCACHE ? get_global_dcache_size() :
72 get_global_icache_size());
73 offset = 8;
74#else
75 cache_size = (cache == DCACHE ? get_dcache_size() :
76 get_icache_size());
77#endif
78 t_cache_part = (cache == DCACHE ?
79 (metag_in32(SYSC_DCPART(thread_id)) >> offset) & 0xF :
80 (metag_in32(SYSC_ICPART(thread_id)) >> offset) & 0xF);
81 switch (t_cache_part) {
82 case 0xF:
83 return cache_size;
84 case 0x7:
85 return cache_size / 2;
86 case 0x3:
87 return cache_size / 4;
88 case 0x1:
89 return cache_size / 8;
90 case 0:
91 return cache_size / 16;
92 }
93 return -1;
94}
95
96void check_for_cache_aliasing(int thread_id)
97{
98 int thread_cache_size;
99 unsigned int cache_type;
100 for (cache_type = ICACHE; cache_type <= DCACHE; cache_type++) {
101 thread_cache_size =
102 get_thread_cache_size(cache_type, thread_id);
103 if (thread_cache_size < 0)
104 pr_emerg("Can't read %s cache size\n",
105 cache_type ? "DCACHE" : "ICACHE");
106 else if (thread_cache_size == 0)
107 /* Cache is off. No need to check for aliasing */
108 continue;
109 if (thread_cache_size / CACHE_ASSOCIATIVITY > PAGE_SIZE) {
110 pr_emerg("Potential cache aliasing detected in %s on Thread %d\n",
111 cache_type ? "DCACHE" : "ICACHE", thread_id);
112 pr_warn("Total %s size: %u bytes\n",
113 cache_type ? "DCACHE" : "ICACHE",
114 cache_type ? get_dcache_size()
115 : get_icache_size());
116 pr_warn("Thread %s size: %d bytes\n",
117 cache_type ? "CACHE" : "ICACHE",
118 thread_cache_size);
119 pr_warn("Page Size: %lu bytes\n", PAGE_SIZE);
120 panic("Potential cache aliasing detected");
121 }
122 }
123}
124
125#else
126
127void check_for_cache_aliasing(int thread_id)
128{
129 return;
130}
131
132#endif
diff --git a/arch/metag/kernel/clock.c b/arch/metag/kernel/clock.c
deleted file mode 100644
index 6339c9c6d0ab..000000000000
--- a/arch/metag/kernel/clock.c
+++ /dev/null
@@ -1,110 +0,0 @@
1/*
2 * arch/metag/kernel/clock.c
3 *
4 * Copyright (C) 2012 Imagination Technologies Ltd.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/clk.h>
12#include <linux/delay.h>
13#include <linux/io.h>
14#include <linux/of.h>
15
16#include <asm/param.h>
17#include <asm/clock.h>
18
19struct meta_clock_desc _meta_clock;
20
21/* Default machine get_core_freq callback. */
22static unsigned long get_core_freq_default(void)
23{
24#ifdef CONFIG_METAG_META21
25 /*
26 * Meta 2 cores divide down the core clock for the Meta timers, so we
27 * can estimate the core clock from the divider.
28 */
29 return (metag_in32(EXPAND_TIMER_DIV) + 1) * 1000000;
30#else
31 /*
32 * On Meta 1 we don't know the core clock, but assuming the Meta timer
33 * is correct it can be estimated based on loops_per_jiffy.
34 */
35 return (loops_per_jiffy * HZ * 5) >> 1;
36#endif
37}
38
39static struct clk *clk_core;
40
41/* Clk based get_core_freq callback. */
42static unsigned long get_core_freq_clk(void)
43{
44 return clk_get_rate(clk_core);
45}
46
47/**
48 * init_metag_core_clock() - Set up core clock from devicetree.
49 *
50 * Checks to see if a "core" clock is provided in the device tree, and overrides
51 * the get_core_freq callback to use it.
52 */
53static void __init init_metag_core_clock(void)
54{
55 /*
56 * See if a core clock is provided by the devicetree (and
57 * registered by the init callback above).
58 */
59 struct device_node *node;
60 node = of_find_compatible_node(NULL, NULL, "img,meta");
61 if (!node) {
62 pr_warn("%s: no compatible img,meta DT node found\n",
63 __func__);
64 return;
65 }
66
67 clk_core = of_clk_get_by_name(node, "core");
68 if (IS_ERR(clk_core)) {
69 pr_warn("%s: no core clock found in DT\n",
70 __func__);
71 return;
72 }
73
74 /*
75 * Override the core frequency callback to use
76 * this clk.
77 */
78 _meta_clock.get_core_freq = get_core_freq_clk;
79}
80
81/**
82 * init_metag_clocks() - Set up clocks from devicetree.
83 *
84 * Set up important clocks from device tree. In particular any needed for clock
85 * sources.
86 */
87void __init init_metag_clocks(void)
88{
89 init_metag_core_clock();
90
91 pr_info("Core clock frequency: %lu Hz\n", get_coreclock());
92}
93
94/**
95 * setup_meta_clocks() - Early set up of the Meta clock.
96 * @desc: Clock descriptor usually provided by machine description
97 *
98 * Ensures all callbacks are valid.
99 */
100void __init setup_meta_clocks(struct meta_clock_desc *desc)
101{
102 /* copy callbacks */
103 if (desc)
104 _meta_clock = *desc;
105
106 /* set fallback functions */
107 if (!_meta_clock.get_core_freq)
108 _meta_clock.get_core_freq = get_core_freq_default;
109}
110
diff --git a/arch/metag/kernel/core_reg.c b/arch/metag/kernel/core_reg.c
deleted file mode 100644
index df2833f2766f..000000000000
--- a/arch/metag/kernel/core_reg.c
+++ /dev/null
@@ -1,118 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Support for reading and writing Meta core internal registers.
4 *
5 * Copyright (C) 2011 Imagination Technologies Ltd.
6 *
7 */
8
9#include <linux/delay.h>
10#include <linux/export.h>
11
12#include <asm/core_reg.h>
13#include <asm/global_lock.h>
14#include <asm/hwthread.h>
15#include <asm/io.h>
16#include <asm/metag_mem.h>
17#include <asm/metag_regs.h>
18
19#define UNIT_BIT_MASK TXUXXRXRQ_UXX_BITS
20#define REG_BIT_MASK TXUXXRXRQ_RX_BITS
21#define THREAD_BIT_MASK TXUXXRXRQ_TX_BITS
22
23#define UNIT_SHIFTS TXUXXRXRQ_UXX_S
24#define REG_SHIFTS TXUXXRXRQ_RX_S
25#define THREAD_SHIFTS TXUXXRXRQ_TX_S
26
27#define UNIT_VAL(x) (((x) << UNIT_SHIFTS) & UNIT_BIT_MASK)
28#define REG_VAL(x) (((x) << REG_SHIFTS) & REG_BIT_MASK)
29#define THREAD_VAL(x) (((x) << THREAD_SHIFTS) & THREAD_BIT_MASK)
30
31/*
32 * core_reg_write() - modify the content of a register in a core unit.
33 * @unit: The unit to be modified.
34 * @reg: Register number within the unit.
35 * @thread: The thread we want to access.
36 * @val: The new value to write.
37 *
38 * Check asm/metag_regs.h for a list/defines of supported units (ie: TXUPC_ID,
39 * TXUTR_ID, etc), and regnums within the units (ie: TXMASKI_REGNUM,
40 * TXPOLLI_REGNUM, etc).
41 */
42void core_reg_write(int unit, int reg, int thread, unsigned int val)
43{
44 unsigned long flags;
45
46 /* TXUCT_ID has its own memory mapped registers */
47 if (unit == TXUCT_ID) {
48 void __iomem *cu_reg = __CU_addr(thread, reg);
49 metag_out32(val, cu_reg);
50 return;
51 }
52
53 __global_lock2(flags);
54
55 /* wait for ready */
56 while (!(metag_in32(TXUXXRXRQ) & TXUXXRXRQ_DREADY_BIT))
57 udelay(10);
58
59 /* set the value to write */
60 metag_out32(val, TXUXXRXDT);
61
62 /* set the register to write */
63 val = UNIT_VAL(unit) | REG_VAL(reg) | THREAD_VAL(thread);
64 metag_out32(val, TXUXXRXRQ);
65
66 /* wait for finish */
67 while (!(metag_in32(TXUXXRXRQ) & TXUXXRXRQ_DREADY_BIT))
68 udelay(10);
69
70 __global_unlock2(flags);
71}
72EXPORT_SYMBOL(core_reg_write);
73
74/*
75 * core_reg_read() - read the content of a register in a core unit.
76 * @unit: The unit to be modified.
77 * @reg: Register number within the unit.
78 * @thread: The thread we want to access.
79 *
80 * Check asm/metag_regs.h for a list/defines of supported units (ie: TXUPC_ID,
81 * TXUTR_ID, etc), and regnums within the units (ie: TXMASKI_REGNUM,
82 * TXPOLLI_REGNUM, etc).
83 */
84unsigned int core_reg_read(int unit, int reg, int thread)
85{
86 unsigned long flags;
87 unsigned int val;
88
89 /* TXUCT_ID has its own memory mapped registers */
90 if (unit == TXUCT_ID) {
91 void __iomem *cu_reg = __CU_addr(thread, reg);
92 val = metag_in32(cu_reg);
93 return val;
94 }
95
96 __global_lock2(flags);
97
98 /* wait for ready */
99 while (!(metag_in32(TXUXXRXRQ) & TXUXXRXRQ_DREADY_BIT))
100 udelay(10);
101
102 /* set the register to read */
103 val = (UNIT_VAL(unit) | REG_VAL(reg) | THREAD_VAL(thread) |
104 TXUXXRXRQ_RDnWR_BIT);
105 metag_out32(val, TXUXXRXRQ);
106
107 /* wait for finish */
108 while (!(metag_in32(TXUXXRXRQ) & TXUXXRXRQ_DREADY_BIT))
109 udelay(10);
110
111 /* read the register value */
112 val = metag_in32(TXUXXRXDT);
113
114 __global_unlock2(flags);
115
116 return val;
117}
118EXPORT_SYMBOL(core_reg_read);
diff --git a/arch/metag/kernel/da.c b/arch/metag/kernel/da.c
deleted file mode 100644
index a35dbed6fffa..000000000000
--- a/arch/metag/kernel/da.c
+++ /dev/null
@@ -1,25 +0,0 @@
1/*
2 * Meta DA JTAG debugger control.
3 *
4 * Copyright 2012 Imagination Technologies Ltd.
5 */
6
7
8#include <linux/export.h>
9#include <linux/io.h>
10#include <linux/kernel.h>
11#include <asm/da.h>
12#include <asm/metag_mem.h>
13
14bool _metag_da_present;
15EXPORT_SYMBOL_GPL(_metag_da_present);
16
17int __init metag_da_probe(void)
18{
19 _metag_da_present = (metag_in32(T0VECINT_BHALT) == 1);
20 if (_metag_da_present)
21 pr_info("DA present\n");
22 else
23 pr_info("DA not present\n");
24 return 0;
25}
diff --git a/arch/metag/kernel/devtree.c b/arch/metag/kernel/devtree.c
deleted file mode 100644
index 6af749a64438..000000000000
--- a/arch/metag/kernel/devtree.c
+++ /dev/null
@@ -1,57 +0,0 @@
1/*
2 * linux/arch/metag/kernel/devtree.c
3 *
4 * Copyright (C) 2012 Imagination Technologies Ltd.
5 *
6 * Based on ARM version:
7 * Copyright (C) 2009 Canonical Ltd. <jeremy.kerr@canonical.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#include <linux/init.h>
15#include <linux/export.h>
16#include <linux/types.h>
17#include <linux/of_fdt.h>
18
19#include <asm/setup.h>
20#include <asm/page.h>
21#include <asm/mach/arch.h>
22
23static const void * __init arch_get_next_mach(const char *const **match)
24{
25 static const struct machine_desc *mdesc = __arch_info_begin;
26 const struct machine_desc *m = mdesc;
27
28 if (m >= __arch_info_end)
29 return NULL;
30
31 mdesc++;
32 *match = m->dt_compat;
33 return m;
34}
35
36/**
37 * setup_machine_fdt - Machine setup when an dtb was passed to the kernel
38 * @dt: virtual address pointer to dt blob
39 *
40 * If a dtb was passed to the kernel, then use it to choose the correct
41 * machine_desc and to setup the system.
42 */
43const struct machine_desc * __init setup_machine_fdt(void *dt)
44{
45 const struct machine_desc *mdesc;
46
47 /* check device tree validity */
48 if (!early_init_dt_scan(dt))
49 return NULL;
50
51 mdesc = of_flat_dt_match_machine(NULL, arch_get_next_mach);
52 if (!mdesc)
53 dump_machine_table(); /* does not return */
54 pr_info("Machine name: %s\n", mdesc->name);
55
56 return mdesc;
57}
diff --git a/arch/metag/kernel/dma.c b/arch/metag/kernel/dma.c
deleted file mode 100644
index f0ab3a498328..000000000000
--- a/arch/metag/kernel/dma.c
+++ /dev/null
@@ -1,588 +0,0 @@
1/*
2 * Meta version derived from arch/powerpc/lib/dma-noncoherent.c
3 * Copyright (C) 2008 Imagination Technologies Ltd.
4 *
5 * PowerPC version derived from arch/arm/mm/consistent.c
6 * Copyright (C) 2001 Dan Malek (dmalek@jlc.net)
7 *
8 * Copyright (C) 2000 Russell King
9 *
10 * Consistent memory allocators. Used for DMA devices that want to
11 * share uncached memory with the processor core. The function return
12 * is the virtual address and 'dma_handle' is the physical address.
13 * Mostly stolen from the ARM port, with some changes for PowerPC.
14 * -- Dan
15 *
16 * Reorganized to get rid of the arch-specific consistent_* functions
17 * and provide non-coherent implementations for the DMA API. -Matt
18 *
19 * Added in_interrupt() safe dma_alloc_coherent()/dma_free_coherent()
20 * implementation. This is pulled straight from ARM and barely
21 * modified. -Matt
22 *
23 * This program is free software; you can redistribute it and/or modify
24 * it under the terms of the GNU General Public License version 2 as
25 * published by the Free Software Foundation.
26 */
27
28#include <linux/sched.h>
29#include <linux/kernel.h>
30#include <linux/errno.h>
31#include <linux/export.h>
32#include <linux/string.h>
33#include <linux/types.h>
34#include <linux/highmem.h>
35#include <linux/dma-mapping.h>
36#include <linux/slab.h>
37
38#include <asm/tlbflush.h>
39#include <asm/mmu.h>
40
41#define CONSISTENT_OFFSET(x) (((unsigned long)(x) - CONSISTENT_START) \
42 >> PAGE_SHIFT)
43
44static u64 get_coherent_dma_mask(struct device *dev)
45{
46 u64 mask = ~0ULL;
47
48 if (dev) {
49 mask = dev->coherent_dma_mask;
50
51 /*
52 * Sanity check the DMA mask - it must be non-zero, and
53 * must be able to be satisfied by a DMA allocation.
54 */
55 if (mask == 0) {
56 dev_warn(dev, "coherent DMA mask is unset\n");
57 return 0;
58 }
59 }
60
61 return mask;
62}
63/*
64 * This is the page table (2MB) covering uncached, DMA consistent allocations
65 */
66static pte_t *consistent_pte;
67static DEFINE_SPINLOCK(consistent_lock);
68
69/*
70 * VM region handling support.
71 *
72 * This should become something generic, handling VM region allocations for
73 * vmalloc and similar (ioremap, module space, etc).
74 *
75 * I envisage vmalloc()'s supporting vm_struct becoming:
76 *
77 * struct vm_struct {
78 * struct metag_vm_region region;
79 * unsigned long flags;
80 * struct page **pages;
81 * unsigned int nr_pages;
82 * unsigned long phys_addr;
83 * };
84 *
85 * get_vm_area() would then call metag_vm_region_alloc with an appropriate
86 * struct metag_vm_region head (eg):
87 *
88 * struct metag_vm_region vmalloc_head = {
89 * .vm_list = LIST_HEAD_INIT(vmalloc_head.vm_list),
90 * .vm_start = VMALLOC_START,
91 * .vm_end = VMALLOC_END,
92 * };
93 *
94 * However, vmalloc_head.vm_start is variable (typically, it is dependent on
95 * the amount of RAM found at boot time.) I would imagine that get_vm_area()
96 * would have to initialise this each time prior to calling
97 * metag_vm_region_alloc().
98 */
99struct metag_vm_region {
100 struct list_head vm_list;
101 unsigned long vm_start;
102 unsigned long vm_end;
103 struct page *vm_pages;
104 int vm_active;
105};
106
107static struct metag_vm_region consistent_head = {
108 .vm_list = LIST_HEAD_INIT(consistent_head.vm_list),
109 .vm_start = CONSISTENT_START,
110 .vm_end = CONSISTENT_END,
111};
112
113static struct metag_vm_region *metag_vm_region_alloc(struct metag_vm_region
114 *head, size_t size,
115 gfp_t gfp)
116{
117 unsigned long addr = head->vm_start, end = head->vm_end - size;
118 unsigned long flags;
119 struct metag_vm_region *c, *new;
120
121 new = kmalloc(sizeof(struct metag_vm_region), gfp);
122 if (!new)
123 goto out;
124
125 spin_lock_irqsave(&consistent_lock, flags);
126
127 list_for_each_entry(c, &head->vm_list, vm_list) {
128 if ((addr + size) < addr)
129 goto nospc;
130 if ((addr + size) <= c->vm_start)
131 goto found;
132 addr = c->vm_end;
133 if (addr > end)
134 goto nospc;
135 }
136
137found:
138 /*
139 * Insert this entry _before_ the one we found.
140 */
141 list_add_tail(&new->vm_list, &c->vm_list);
142 new->vm_start = addr;
143 new->vm_end = addr + size;
144 new->vm_active = 1;
145
146 spin_unlock_irqrestore(&consistent_lock, flags);
147 return new;
148
149nospc:
150 spin_unlock_irqrestore(&consistent_lock, flags);
151 kfree(new);
152out:
153 return NULL;
154}
155
156static struct metag_vm_region *metag_vm_region_find(struct metag_vm_region
157 *head, unsigned long addr)
158{
159 struct metag_vm_region *c;
160
161 list_for_each_entry(c, &head->vm_list, vm_list) {
162 if (c->vm_active && c->vm_start == addr)
163 goto out;
164 }
165 c = NULL;
166out:
167 return c;
168}
169
170/*
171 * Allocate DMA-coherent memory space and return both the kernel remapped
172 * virtual and bus address for that space.
173 */
174static void *metag_dma_alloc(struct device *dev, size_t size,
175 dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
176{
177 struct page *page;
178 struct metag_vm_region *c;
179 unsigned long order;
180 u64 mask = get_coherent_dma_mask(dev);
181 u64 limit;
182
183 if (!consistent_pte) {
184 pr_err("%s: not initialised\n", __func__);
185 dump_stack();
186 return NULL;
187 }
188
189 if (!mask)
190 goto no_page;
191 size = PAGE_ALIGN(size);
192 limit = (mask + 1) & ~mask;
193 if ((limit && size >= limit)
194 || size >= (CONSISTENT_END - CONSISTENT_START)) {
195 pr_warn("coherent allocation too big (requested %#x mask %#Lx)\n",
196 size, mask);
197 return NULL;
198 }
199
200 order = get_order(size);
201
202 if (mask != 0xffffffff)
203 gfp |= GFP_DMA;
204
205 page = alloc_pages(gfp, order);
206 if (!page)
207 goto no_page;
208
209 /*
210 * Invalidate any data that might be lurking in the
211 * kernel direct-mapped region for device DMA.
212 */
213 {
214 void *kaddr = page_address(page);
215 memset(kaddr, 0, size);
216 flush_dcache_region(kaddr, size);
217 }
218
219 /*
220 * Allocate a virtual address in the consistent mapping region.
221 */
222 c = metag_vm_region_alloc(&consistent_head, size,
223 gfp & ~(__GFP_DMA | __GFP_HIGHMEM));
224 if (c) {
225 unsigned long vaddr = c->vm_start;
226 pte_t *pte = consistent_pte + CONSISTENT_OFFSET(vaddr);
227 struct page *end = page + (1 << order);
228
229 c->vm_pages = page;
230 split_page(page, order);
231
232 /*
233 * Set the "dma handle"
234 */
235 *handle = page_to_bus(page);
236
237 do {
238 BUG_ON(!pte_none(*pte));
239
240 SetPageReserved(page);
241 set_pte_at(&init_mm, vaddr,
242 pte, mk_pte(page,
243 pgprot_writecombine
244 (PAGE_KERNEL)));
245 page++;
246 pte++;
247 vaddr += PAGE_SIZE;
248 } while (size -= PAGE_SIZE);
249
250 /*
251 * Free the otherwise unused pages.
252 */
253 while (page < end) {
254 __free_page(page);
255 page++;
256 }
257
258 return (void *)c->vm_start;
259 }
260
261 if (page)
262 __free_pages(page, order);
263no_page:
264 return NULL;
265}
266
267/*
268 * free a page as defined by the above mapping.
269 */
270static void metag_dma_free(struct device *dev, size_t size, void *vaddr,
271 dma_addr_t dma_handle, unsigned long attrs)
272{
273 struct metag_vm_region *c;
274 unsigned long flags, addr;
275 pte_t *ptep;
276
277 size = PAGE_ALIGN(size);
278
279 spin_lock_irqsave(&consistent_lock, flags);
280
281 c = metag_vm_region_find(&consistent_head, (unsigned long)vaddr);
282 if (!c)
283 goto no_area;
284
285 c->vm_active = 0;
286 if ((c->vm_end - c->vm_start) != size) {
287 pr_err("%s: freeing wrong coherent size (%ld != %d)\n",
288 __func__, c->vm_end - c->vm_start, size);
289 dump_stack();
290 size = c->vm_end - c->vm_start;
291 }
292
293 ptep = consistent_pte + CONSISTENT_OFFSET(c->vm_start);
294 addr = c->vm_start;
295 do {
296 pte_t pte = ptep_get_and_clear(&init_mm, addr, ptep);
297 unsigned long pfn;
298
299 ptep++;
300 addr += PAGE_SIZE;
301
302 if (!pte_none(pte) && pte_present(pte)) {
303 pfn = pte_pfn(pte);
304
305 if (pfn_valid(pfn)) {
306 struct page *page = pfn_to_page(pfn);
307 __free_reserved_page(page);
308 continue;
309 }
310 }
311
312 pr_crit("%s: bad page in kernel page table\n",
313 __func__);
314 } while (size -= PAGE_SIZE);
315
316 flush_tlb_kernel_range(c->vm_start, c->vm_end);
317
318 list_del(&c->vm_list);
319
320 spin_unlock_irqrestore(&consistent_lock, flags);
321
322 kfree(c);
323 return;
324
325no_area:
326 spin_unlock_irqrestore(&consistent_lock, flags);
327 pr_err("%s: trying to free invalid coherent area: %p\n",
328 __func__, vaddr);
329 dump_stack();
330}
331
332static int metag_dma_mmap(struct device *dev, struct vm_area_struct *vma,
333 void *cpu_addr, dma_addr_t dma_addr, size_t size,
334 unsigned long attrs)
335{
336 unsigned long flags, user_size, kern_size;
337 struct metag_vm_region *c;
338 int ret = -ENXIO;
339
340 if (attrs & DMA_ATTR_WRITE_COMBINE)
341 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
342 else
343 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
344
345 user_size = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
346
347 spin_lock_irqsave(&consistent_lock, flags);
348 c = metag_vm_region_find(&consistent_head, (unsigned long)cpu_addr);
349 spin_unlock_irqrestore(&consistent_lock, flags);
350
351 if (c) {
352 unsigned long off = vma->vm_pgoff;
353
354 kern_size = (c->vm_end - c->vm_start) >> PAGE_SHIFT;
355
356 if (off < kern_size &&
357 user_size <= (kern_size - off)) {
358 ret = remap_pfn_range(vma, vma->vm_start,
359 page_to_pfn(c->vm_pages) + off,
360 user_size << PAGE_SHIFT,
361 vma->vm_page_prot);
362 }
363 }
364
365
366 return ret;
367}
368
369/*
370 * Initialise the consistent memory allocation.
371 */
372static int __init dma_alloc_init(void)
373{
374 pgd_t *pgd, *pgd_k;
375 pud_t *pud, *pud_k;
376 pmd_t *pmd, *pmd_k;
377 pte_t *pte;
378 int ret = 0;
379
380 do {
381 int offset = pgd_index(CONSISTENT_START);
382 pgd = pgd_offset(&init_mm, CONSISTENT_START);
383 pud = pud_alloc(&init_mm, pgd, CONSISTENT_START);
384 pmd = pmd_alloc(&init_mm, pud, CONSISTENT_START);
385 WARN_ON(!pmd_none(*pmd));
386
387 pte = pte_alloc_kernel(pmd, CONSISTENT_START);
388 if (!pte) {
389 pr_err("%s: no pte tables\n", __func__);
390 ret = -ENOMEM;
391 break;
392 }
393
394 pgd_k = ((pgd_t *) mmu_get_base()) + offset;
395 pud_k = pud_offset(pgd_k, CONSISTENT_START);
396 pmd_k = pmd_offset(pud_k, CONSISTENT_START);
397 set_pmd(pmd_k, *pmd);
398
399 consistent_pte = pte;
400 } while (0);
401
402 return ret;
403}
404early_initcall(dma_alloc_init);
405
406/*
407 * make an area consistent to devices.
408 */
409static void dma_sync_for_device(void *vaddr, size_t size, int dma_direction)
410{
411 /*
412 * Ensure any writes get through the write combiner. This is necessary
413 * even with DMA_FROM_DEVICE, or the write may dirty the cache after
414 * we've invalidated it and get written back during the DMA.
415 */
416
417 barrier();
418
419 switch (dma_direction) {
420 case DMA_BIDIRECTIONAL:
421 /*
422 * Writeback to ensure the device can see our latest changes and
423 * so that we have no dirty lines, and invalidate the cache
424 * lines too in preparation for receiving the buffer back
425 * (dma_sync_for_cpu) later.
426 */
427 flush_dcache_region(vaddr, size);
428 break;
429 case DMA_TO_DEVICE:
430 /*
431 * Writeback to ensure the device can see our latest changes.
432 * There's no need to invalidate as the device shouldn't write
433 * to the buffer.
434 */
435 writeback_dcache_region(vaddr, size);
436 break;
437 case DMA_FROM_DEVICE:
438 /*
439 * Invalidate to ensure we have no dirty lines that could get
440 * written back during the DMA. It's also safe to flush
441 * (writeback) here if necessary.
442 */
443 invalidate_dcache_region(vaddr, size);
444 break;
445 case DMA_NONE:
446 BUG();
447 }
448
449 wmb();
450}
451
452/*
453 * make an area consistent to the core.
454 */
455static void dma_sync_for_cpu(void *vaddr, size_t size, int dma_direction)
456{
457 /*
458 * Hardware L2 cache prefetch doesn't occur across 4K physical
459 * boundaries, however according to Documentation/DMA-API-HOWTO.txt
460 * kmalloc'd memory is DMA'able, so accesses in nearby memory could
461 * trigger a cache fill in the DMA buffer.
462 *
463 * This should never cause dirty lines, so a flush or invalidate should
464 * be safe to allow us to see data from the device.
465 */
466 if (_meta_l2c_pf_is_enabled()) {
467 switch (dma_direction) {
468 case DMA_BIDIRECTIONAL:
469 case DMA_FROM_DEVICE:
470 invalidate_dcache_region(vaddr, size);
471 break;
472 case DMA_TO_DEVICE:
473 /* The device shouldn't have written to the buffer */
474 break;
475 case DMA_NONE:
476 BUG();
477 }
478 }
479
480 rmb();
481}
482
483static dma_addr_t metag_dma_map_page(struct device *dev, struct page *page,
484 unsigned long offset, size_t size,
485 enum dma_data_direction direction, unsigned long attrs)
486{
487 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
488 dma_sync_for_device((void *)(page_to_phys(page) + offset),
489 size, direction);
490 return page_to_phys(page) + offset;
491}
492
493static void metag_dma_unmap_page(struct device *dev, dma_addr_t dma_address,
494 size_t size, enum dma_data_direction direction,
495 unsigned long attrs)
496{
497 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
498 dma_sync_for_cpu(phys_to_virt(dma_address), size, direction);
499}
500
501static int metag_dma_map_sg(struct device *dev, struct scatterlist *sglist,
502 int nents, enum dma_data_direction direction,
503 unsigned long attrs)
504{
505 struct scatterlist *sg;
506 int i;
507
508 for_each_sg(sglist, sg, nents, i) {
509 BUG_ON(!sg_page(sg));
510
511 sg->dma_address = sg_phys(sg);
512
513 if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
514 continue;
515
516 dma_sync_for_device(sg_virt(sg), sg->length, direction);
517 }
518
519 return nents;
520}
521
522
523static void metag_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
524 int nhwentries, enum dma_data_direction direction,
525 unsigned long attrs)
526{
527 struct scatterlist *sg;
528 int i;
529
530 for_each_sg(sglist, sg, nhwentries, i) {
531 BUG_ON(!sg_page(sg));
532
533 sg->dma_address = sg_phys(sg);
534
535 if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
536 continue;
537
538 dma_sync_for_cpu(sg_virt(sg), sg->length, direction);
539 }
540}
541
542static void metag_dma_sync_single_for_cpu(struct device *dev,
543 dma_addr_t dma_handle, size_t size,
544 enum dma_data_direction direction)
545{
546 dma_sync_for_cpu(phys_to_virt(dma_handle), size, direction);
547}
548
549static void metag_dma_sync_single_for_device(struct device *dev,
550 dma_addr_t dma_handle, size_t size,
551 enum dma_data_direction direction)
552{
553 dma_sync_for_device(phys_to_virt(dma_handle), size, direction);
554}
555
556static void metag_dma_sync_sg_for_cpu(struct device *dev,
557 struct scatterlist *sglist, int nelems,
558 enum dma_data_direction direction)
559{
560 int i;
561 struct scatterlist *sg;
562
563 for_each_sg(sglist, sg, nelems, i)
564 dma_sync_for_cpu(sg_virt(sg), sg->length, direction);
565}
566
567static void metag_dma_sync_sg_for_device(struct device *dev,
568 struct scatterlist *sglist, int nelems,
569 enum dma_data_direction direction)
570{
571 int i;
572 struct scatterlist *sg;
573
574 for_each_sg(sglist, sg, nelems, i)
575 dma_sync_for_device(sg_virt(sg), sg->length, direction);
576}
577
578const struct dma_map_ops metag_dma_ops = {
579 .alloc = metag_dma_alloc,
580 .free = metag_dma_free,
581 .map_page = metag_dma_map_page,
582 .map_sg = metag_dma_map_sg,
583 .sync_single_for_device = metag_dma_sync_single_for_device,
584 .sync_single_for_cpu = metag_dma_sync_single_for_cpu,
585 .sync_sg_for_cpu = metag_dma_sync_sg_for_cpu,
586 .mmap = metag_dma_mmap,
587};
588EXPORT_SYMBOL(metag_dma_ops);
diff --git a/arch/metag/kernel/ftrace.c b/arch/metag/kernel/ftrace.c
deleted file mode 100644
index f7b23d300881..000000000000
--- a/arch/metag/kernel/ftrace.c
+++ /dev/null
@@ -1,121 +0,0 @@
1/*
2 * Copyright (C) 2008 Imagination Technologies Ltd.
3 * Licensed under the GPL
4 *
5 * Dynamic ftrace support.
6 */
7
8#include <linux/ftrace.h>
9#include <linux/io.h>
10#include <linux/uaccess.h>
11
12#include <asm/cacheflush.h>
13
14#define D04_MOVT_TEMPLATE 0x02200005
15#define D04_CALL_TEMPLATE 0xAC200005
16#define D1RTP_MOVT_TEMPLATE 0x03200005
17#define D1RTP_CALL_TEMPLATE 0xAC200006
18
19static const unsigned long NOP[2] = {0xa0fffffe, 0xa0fffffe};
20static unsigned long movt_and_call_insn[2];
21
22static unsigned char *ftrace_nop_replace(void)
23{
24 return (char *)&NOP[0];
25}
26
27static unsigned char *ftrace_call_replace(unsigned long pc, unsigned long addr)
28{
29 unsigned long hi16, low16;
30
31 hi16 = (addr & 0xffff0000) >> 13;
32 low16 = (addr & 0x0000ffff) << 3;
33
34 /*
35 * The compiler makes the call to mcount_wrapper()
36 * (Meta's wrapper around mcount()) through the register
37 * D0.4. So whenever we're patching one of those compiler-generated
38 * calls we also need to go through D0.4. Otherwise use D1RtP.
39 */
40 if (pc == (unsigned long)&ftrace_call) {
41 writel(D1RTP_MOVT_TEMPLATE | hi16, &movt_and_call_insn[0]);
42 writel(D1RTP_CALL_TEMPLATE | low16, &movt_and_call_insn[1]);
43 } else {
44 writel(D04_MOVT_TEMPLATE | hi16, &movt_and_call_insn[0]);
45 writel(D04_CALL_TEMPLATE | low16, &movt_and_call_insn[1]);
46 }
47
48 return (unsigned char *)&movt_and_call_insn[0];
49}
50
51static int ftrace_modify_code(unsigned long pc, unsigned char *old_code,
52 unsigned char *new_code)
53{
54 unsigned char replaced[MCOUNT_INSN_SIZE];
55
56 /*
57 * Note:
58 * We are paranoid about modifying text, as if a bug was to happen, it
59 * could cause us to read or write to someplace that could cause harm.
60 * Carefully read and modify the code with probe_kernel_*(), and make
61 * sure what we read is what we expected it to be before modifying it.
62 */
63
64 /* read the text we want to modify */
65 if (probe_kernel_read(replaced, (void *)pc, MCOUNT_INSN_SIZE))
66 return -EFAULT;
67
68 /* Make sure it is what we expect it to be */
69 if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0)
70 return -EINVAL;
71
72 /* replace the text with the new text */
73 if (probe_kernel_write((void *)pc, new_code, MCOUNT_INSN_SIZE))
74 return -EPERM;
75
76 flush_icache_range(pc, pc + MCOUNT_INSN_SIZE);
77
78 return 0;
79}
80
81int ftrace_update_ftrace_func(ftrace_func_t func)
82{
83 int ret;
84 unsigned long pc;
85 unsigned char old[MCOUNT_INSN_SIZE], *new;
86
87 pc = (unsigned long)&ftrace_call;
88 memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
89 new = ftrace_call_replace(pc, (unsigned long)func);
90 ret = ftrace_modify_code(pc, old, new);
91
92 return ret;
93}
94
95int ftrace_make_nop(struct module *mod,
96 struct dyn_ftrace *rec, unsigned long addr)
97{
98 unsigned char *new, *old;
99 unsigned long ip = rec->ip;
100
101 old = ftrace_call_replace(ip, addr);
102 new = ftrace_nop_replace();
103
104 return ftrace_modify_code(ip, old, new);
105}
106
107int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
108{
109 unsigned char *new, *old;
110 unsigned long ip = rec->ip;
111
112 old = ftrace_nop_replace();
113 new = ftrace_call_replace(ip, addr);
114
115 return ftrace_modify_code(ip, old, new);
116}
117
118int __init ftrace_dyn_arch_init(void)
119{
120 return 0;
121}
diff --git a/arch/metag/kernel/ftrace_stub.S b/arch/metag/kernel/ftrace_stub.S
deleted file mode 100644
index 3acc288217c0..000000000000
--- a/arch/metag/kernel/ftrace_stub.S
+++ /dev/null
@@ -1,62 +0,0 @@
1/*
2 * Copyright (C) 2008 Imagination Technologies Ltd.
3 * Licensed under the GPL
4 *
5 */
6
7#include <asm/ftrace.h>
8
9 .text
10#ifdef CONFIG_DYNAMIC_FTRACE
11 .global _mcount_wrapper
12 .type _mcount_wrapper,function
13_mcount_wrapper:
14 MOV PC,D0.4
15
16 .global _ftrace_caller
17 .type _ftrace_caller,function
18_ftrace_caller:
19 MSETL [A0StP], D0Ar6, D0Ar4, D0Ar2, D0.4
20 MOV D1Ar1, D0.4
21 MOV D0Ar2, D1RtP
22 SUB D1Ar1,D1Ar1,#MCOUNT_INSN_SIZE
23
24 .global _ftrace_call
25_ftrace_call:
26 MOVT D1RtP,#HI(_ftrace_stub)
27 CALL D1RtP,#LO(_ftrace_stub)
28 GETL D0.4, D1RtP, [A0StP++#(-8)]
29 GETL D0Ar2, D1Ar1, [A0StP++#(-8)]
30 GETL D0Ar4, D1Ar3, [A0StP++#(-8)]
31 GETL D0Ar6, D1Ar5, [A0StP++#(-8)]
32 MOV PC, D0.4
33#else
34
35 .global _mcount_wrapper
36 .type _mcount_wrapper,function
37_mcount_wrapper:
38 MSETL [A0StP], D0Ar6, D0Ar4, D0Ar2, D0.4
39 MOV D1Ar1, D0.4
40 MOV D0Ar2, D1RtP
41 MOVT D0Re0,#HI(_ftrace_trace_function)
42 ADD D0Re0,D0Re0,#LO(_ftrace_trace_function)
43 GET D1Ar3,[D0Re0]
44 MOVT D1Re0,#HI(_ftrace_stub)
45 ADD D1Re0,D1Re0,#LO(_ftrace_stub)
46 CMP D1Ar3,D1Re0
47 BEQ $Ltrace_exit
48 MOV D1RtP,D1Ar3
49 SUB D1Ar1,D1Ar1,#MCOUNT_INSN_SIZE
50 SWAP PC,D1RtP
51$Ltrace_exit:
52 GETL D0.4, D1RtP, [A0StP++#(-8)]
53 GETL D0Ar2, D1Ar1, [A0StP++#(-8)]
54 GETL D0Ar4, D1Ar3, [A0StP++#(-8)]
55 GETL D0Ar6, D1Ar5, [A0StP++#(-8)]
56 MOV PC, D0.4
57
58#endif /* CONFIG_DYNAMIC_FTRACE */
59
60 .global _ftrace_stub
61_ftrace_stub:
62 MOV PC,D1RtP
diff --git a/arch/metag/kernel/head.S b/arch/metag/kernel/head.S
deleted file mode 100644
index 3ed27813413e..000000000000
--- a/arch/metag/kernel/head.S
+++ /dev/null
@@ -1,66 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2 ! Copyright 2005,2006,2007,2009 Imagination Technologies
3
4#include <linux/init.h>
5#include <asm/metag_mem.h>
6#include <generated/asm-offsets.h>
7#undef __exit
8
9 __HEAD
10 ! Setup the stack and get going into _metag_start_kernel
11 .global __start
12 .type __start,function
13__start:
14 ! D1Ar1 contains pTBI (ISTAT)
15 ! D0Ar2 contains pTBI
16 ! D1Ar3 contains __pTBISegs
17 ! D0Ar4 contains kernel arglist pointer
18
19 MOVT D0Re0,#HI(___pTBIs)
20 ADD D0Re0,D0Re0,#LO(___pTBIs)
21 SETL [D0Re0],D0Ar2,D1Ar1
22 MOVT D0Re0,#HI(___pTBISegs)
23 ADD D0Re0,D0Re0,#LO(___pTBISegs)
24 SETD [D0Re0],D1Ar3
25 MOV A0FrP,#0
26 MOV D0Re0,#0
27 MOV D1Re0,#0
28 MOV D1Ar3,#0
29 MOV D1Ar1,D0Ar4 !Store kernel boot params
30 MOV D1Ar5,#0
31 MOV D0Ar6,#0
32#ifdef CONFIG_METAG_DSP
33 MOV D0.8,#0
34#endif
35 MOVT A0StP,#HI(_init_thread_union)
36 ADD A0StP,A0StP,#LO(_init_thread_union)
37 ADD A0StP,A0StP,#THREAD_INFO_SIZE
38 MOVT D1RtP,#HI(_metag_start_kernel)
39 CALL D1RtP,#LO(_metag_start_kernel)
40 .size __start,.-__start
41
42 !! Needed by TBX
43 .global __exit
44 .type __exit,function
45__exit:
46 XOR TXENABLE,D0Re0,D0Re0
47 .size __exit,.-__exit
48
49#ifdef CONFIG_SMP
50 .global _secondary_startup
51 .type _secondary_startup,function
52_secondary_startup:
53#if CONFIG_PAGE_OFFSET < LINGLOBAL_BASE
54 ! In case GCOn has just been turned on we need to fence any writes that
55 ! the boot thread might have performed prior to coherency taking effect.
56 MOVT D0Re0,#HI(LINSYSEVENT_WR_ATOMIC_UNLOCK)
57 MOV D1Re0,#0
58 SETD [D0Re0], D1Re0
59#endif
60 MOVT A0StP,#HI(_secondary_data_stack)
61 ADD A0StP,A0StP,#LO(_secondary_data_stack)
62 GETD A0StP,[A0StP]
63 ADD A0StP,A0StP,#THREAD_INFO_SIZE
64 B _secondary_start_kernel
65 .size _secondary_startup,.-_secondary_startup
66#endif
diff --git a/arch/metag/kernel/irq.c b/arch/metag/kernel/irq.c
deleted file mode 100644
index 704cf17f8370..000000000000
--- a/arch/metag/kernel/irq.c
+++ /dev/null
@@ -1,293 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Linux/Meta general interrupt handling code
4 *
5 */
6
7#include <linux/kernel.h>
8#include <linux/interrupt.h>
9#include <linux/init.h>
10#include <linux/irqchip/metag-ext.h>
11#include <linux/irqchip/metag.h>
12#include <linux/irqdomain.h>
13#include <linux/ratelimit.h>
14
15#include <asm/core_reg.h>
16#include <asm/mach/arch.h>
17#include <linux/uaccess.h>
18
19#ifdef CONFIG_4KSTACKS
20union irq_ctx {
21 struct thread_info tinfo;
22 u32 stack[THREAD_SIZE/sizeof(u32)];
23};
24
25static union irq_ctx *hardirq_ctx[NR_CPUS] __read_mostly;
26static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly;
27#endif
28
29static struct irq_domain *root_domain;
30
31static unsigned int startup_meta_irq(struct irq_data *data)
32{
33 tbi_startup_interrupt(data->hwirq);
34 return 0;
35}
36
37static void shutdown_meta_irq(struct irq_data *data)
38{
39 tbi_shutdown_interrupt(data->hwirq);
40}
41
42void do_IRQ(int irq, struct pt_regs *regs)
43{
44 struct pt_regs *old_regs = set_irq_regs(regs);
45#ifdef CONFIG_4KSTACKS
46 struct irq_desc *desc;
47 union irq_ctx *curctx, *irqctx;
48 u32 *isp;
49#endif
50
51 irq_enter();
52
53 irq = irq_linear_revmap(root_domain, irq);
54
55#ifdef CONFIG_DEBUG_STACKOVERFLOW
56 /* Debugging check for stack overflow: is there less than 1KB free? */
57 {
58 unsigned long sp;
59
60 sp = __core_reg_get(A0StP);
61 sp &= THREAD_SIZE - 1;
62
63 if (unlikely(sp > (THREAD_SIZE - 1024)))
64 pr_err("Stack overflow in do_IRQ: %ld\n", sp);
65 }
66#endif
67
68
69#ifdef CONFIG_4KSTACKS
70 curctx = (union irq_ctx *) current_thread_info();
71 irqctx = hardirq_ctx[smp_processor_id()];
72
73 /*
74 * this is where we switch to the IRQ stack. However, if we are
75 * already using the IRQ stack (because we interrupted a hardirq
76 * handler) we can't do that and just have to keep using the
77 * current stack (which is the irq stack already after all)
78 */
79 if (curctx != irqctx) {
80 /* build the stack frame on the IRQ stack */
81 isp = (u32 *) ((char *)irqctx + sizeof(struct thread_info));
82 irqctx->tinfo.task = curctx->tinfo.task;
83
84 /*
85 * Copy the softirq bits in preempt_count so that the
86 * softirq checks work in the hardirq context.
87 */
88 irqctx->tinfo.preempt_count =
89 (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
90 (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
91
92 desc = irq_to_desc(irq);
93
94 asm volatile (
95 "MOV D0.5,%0\n"
96 "MOV D1Ar1,%1\n"
97 "MOV D1RtP,%2\n"
98 "SWAP A0StP,D0.5\n"
99 "SWAP PC,D1RtP\n"
100 "MOV A0StP,D0.5\n"
101 :
102 : "r" (isp), "r" (desc), "r" (desc->handle_irq)
103 : "memory", "cc", "D1Ar1", "D0Ar2", "D1Ar3", "D0Ar4",
104 "D1Ar5", "D0Ar6", "D0Re0", "D1Re0", "D0.4", "D1RtP",
105 "D0.5"
106 );
107 } else
108#endif
109 generic_handle_irq(irq);
110
111 irq_exit();
112
113 set_irq_regs(old_regs);
114}
115
116#ifdef CONFIG_4KSTACKS
117
118static char softirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss;
119
120static char hardirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss;
121
122/*
123 * allocate per-cpu stacks for hardirq and for softirq processing
124 */
125void irq_ctx_init(int cpu)
126{
127 union irq_ctx *irqctx;
128
129 if (hardirq_ctx[cpu])
130 return;
131
132 irqctx = (union irq_ctx *) &hardirq_stack[cpu * THREAD_SIZE];
133 irqctx->tinfo.task = NULL;
134 irqctx->tinfo.cpu = cpu;
135 irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
136 irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
137
138 hardirq_ctx[cpu] = irqctx;
139
140 irqctx = (union irq_ctx *) &softirq_stack[cpu * THREAD_SIZE];
141 irqctx->tinfo.task = NULL;
142 irqctx->tinfo.cpu = cpu;
143 irqctx->tinfo.preempt_count = 0;
144 irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
145
146 softirq_ctx[cpu] = irqctx;
147
148 pr_info("CPU %u irqstacks, hard=%p soft=%p\n",
149 cpu, hardirq_ctx[cpu], softirq_ctx[cpu]);
150}
151
152void irq_ctx_exit(int cpu)
153{
154 hardirq_ctx[smp_processor_id()] = NULL;
155}
156
157extern asmlinkage void __do_softirq(void);
158
159void do_softirq_own_stack(void)
160{
161 struct thread_info *curctx;
162 union irq_ctx *irqctx;
163 u32 *isp;
164
165 curctx = current_thread_info();
166 irqctx = softirq_ctx[smp_processor_id()];
167 irqctx->tinfo.task = curctx->task;
168
169 /* build the stack frame on the softirq stack */
170 isp = (u32 *) ((char *)irqctx + sizeof(struct thread_info));
171
172 asm volatile (
173 "MOV D0.5,%0\n"
174 "SWAP A0StP,D0.5\n"
175 "CALLR D1RtP,___do_softirq\n"
176 "MOV A0StP,D0.5\n"
177 :
178 : "r" (isp)
179 : "memory", "cc", "D1Ar1", "D0Ar2", "D1Ar3", "D0Ar4",
180 "D1Ar5", "D0Ar6", "D0Re0", "D1Re0", "D0.4", "D1RtP",
181 "D0.5"
182 );
183}
184#endif
185
186static struct irq_chip meta_irq_type = {
187 .name = "META-IRQ",
188 .irq_startup = startup_meta_irq,
189 .irq_shutdown = shutdown_meta_irq,
190};
191
192/**
193 * tbisig_map() - Map a TBI signal number to a virtual IRQ number.
194 * @hw: Number of the TBI signal. Must be in range.
195 *
196 * Returns: The virtual IRQ number of the TBI signal number IRQ specified by
197 * @hw.
198 */
199int tbisig_map(unsigned int hw)
200{
201 return irq_create_mapping(root_domain, hw);
202}
203
204/**
205 * metag_tbisig_map() - map a tbi signal to a Linux virtual IRQ number
206 * @d: root irq domain
207 * @irq: virtual irq number
208 * @hw: hardware irq number (TBI signal number)
209 *
210 * This sets up a virtual irq for a specified TBI signal number.
211 */
212static int metag_tbisig_map(struct irq_domain *d, unsigned int irq,
213 irq_hw_number_t hw)
214{
215#ifdef CONFIG_SMP
216 irq_set_chip_and_handler(irq, &meta_irq_type, handle_percpu_irq);
217#else
218 irq_set_chip_and_handler(irq, &meta_irq_type, handle_simple_irq);
219#endif
220 return 0;
221}
222
223static const struct irq_domain_ops metag_tbisig_domain_ops = {
224 .map = metag_tbisig_map,
225};
226
227/*
228 * void init_IRQ(void)
229 *
230 * Parameters: None
231 *
232 * Returns: Nothing
233 *
234 * This function should be called during kernel startup to initialize
235 * the IRQ handling routines.
236 */
237void __init init_IRQ(void)
238{
239 root_domain = irq_domain_add_linear(NULL, 32,
240 &metag_tbisig_domain_ops, NULL);
241 if (unlikely(!root_domain))
242 panic("init_IRQ: cannot add root IRQ domain");
243
244 irq_ctx_init(smp_processor_id());
245
246 init_internal_IRQ();
247 init_external_IRQ();
248
249 if (machine_desc->init_irq)
250 machine_desc->init_irq();
251}
252
253int __init arch_probe_nr_irqs(void)
254{
255 if (machine_desc->nr_irqs)
256 nr_irqs = machine_desc->nr_irqs;
257 return 0;
258}
259
260#ifdef CONFIG_HOTPLUG_CPU
261/*
262 * The CPU has been marked offline. Migrate IRQs off this CPU. If
263 * the affinity settings do not allow other CPUs, force them onto any
264 * available CPU.
265 */
266void migrate_irqs(void)
267{
268 unsigned int i, cpu = smp_processor_id();
269
270 for_each_active_irq(i) {
271 struct irq_data *data = irq_get_irq_data(i);
272 struct cpumask *mask;
273 unsigned int newcpu;
274
275 if (irqd_is_per_cpu(data))
276 continue;
277
278 mask = irq_data_get_affinity_mask(data);
279 if (!cpumask_test_cpu(cpu, mask))
280 continue;
281
282 newcpu = cpumask_any_and(mask, cpu_online_mask);
283
284 if (newcpu >= nr_cpu_ids) {
285 pr_info_ratelimited("IRQ%u no longer affine to CPU%u\n",
286 i, cpu);
287
288 cpumask_setall(mask);
289 }
290 irq_set_affinity(i, mask);
291 }
292}
293#endif /* CONFIG_HOTPLUG_CPU */
diff --git a/arch/metag/kernel/kick.c b/arch/metag/kernel/kick.c
deleted file mode 100644
index beb377621322..000000000000
--- a/arch/metag/kernel/kick.c
+++ /dev/null
@@ -1,110 +0,0 @@
1/*
2 * Copyright (C) 2009 Imagination Technologies
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file COPYING in the main directory of this archive
6 * for more details.
7 *
8 * The Meta KICK interrupt mechanism is generally a useful feature, so
9 * we provide an interface for registering multiple interrupt
10 * handlers. All the registered interrupt handlers are "chained". When
11 * a KICK interrupt is received the first function in the list is
12 * called. If that interrupt handler cannot handle the KICK the next
13 * one is called, then the next until someone handles it (or we run
14 * out of functions). As soon as one function handles the interrupt no
15 * other handlers are called.
16 *
17 * The only downside of chaining interrupt handlers is that each
18 * handler must be able to detect whether the KICK was intended for it
19 * or not. For example, when the IPI handler runs and it sees that
20 * there are no IPI messages it must not signal that the KICK was
21 * handled, thereby giving the other handlers a chance to run.
22 *
23 * The reason that we provide our own interface for calling KICK
24 * handlers instead of using the generic kernel infrastructure is that
25 * the KICK handlers require access to a CPU's pTBI structure. So we
26 * pass it as an argument.
27 */
28#include <linux/export.h>
29#include <linux/hardirq.h>
30#include <linux/irq.h>
31#include <linux/kernel.h>
32#include <linux/mm.h>
33#include <linux/types.h>
34
35#include <asm/traps.h>
36
37/*
38 * All accesses/manipulations of kick_handlers_list should be
39 * performed while holding kick_handlers_lock.
40 */
41static DEFINE_SPINLOCK(kick_handlers_lock);
42static LIST_HEAD(kick_handlers_list);
43
44void kick_register_func(struct kick_irq_handler *kh)
45{
46 unsigned long flags;
47
48 spin_lock_irqsave(&kick_handlers_lock, flags);
49
50 list_add_tail(&kh->list, &kick_handlers_list);
51
52 spin_unlock_irqrestore(&kick_handlers_lock, flags);
53}
54EXPORT_SYMBOL(kick_register_func);
55
56void kick_unregister_func(struct kick_irq_handler *kh)
57{
58 unsigned long flags;
59
60 spin_lock_irqsave(&kick_handlers_lock, flags);
61
62 list_del(&kh->list);
63
64 spin_unlock_irqrestore(&kick_handlers_lock, flags);
65}
66EXPORT_SYMBOL(kick_unregister_func);
67
68TBIRES
69kick_handler(TBIRES State, int SigNum, int Triggers, int Inst, PTBI pTBI)
70{
71 struct pt_regs *old_regs;
72 struct kick_irq_handler *kh;
73 struct list_head *lh;
74 int handled = 0;
75 TBIRES ret;
76
77 head_end(State, ~INTS_OFF_MASK);
78
79 /* If we interrupted user code handle any critical sections. */
80 if (State.Sig.SaveMask & TBICTX_PRIV_BIT)
81 restart_critical_section(State);
82
83 trace_hardirqs_off();
84
85 old_regs = set_irq_regs((struct pt_regs *)State.Sig.pCtx);
86 irq_enter();
87
88 /*
89 * There is no need to disable interrupts here because we
90 * can't nest KICK interrupts in a KICK interrupt handler.
91 */
92 spin_lock(&kick_handlers_lock);
93
94 list_for_each(lh, &kick_handlers_list) {
95 kh = list_entry(lh, struct kick_irq_handler, list);
96
97 ret = kh->func(State, SigNum, Triggers, Inst, pTBI, &handled);
98 if (handled)
99 break;
100 }
101
102 spin_unlock(&kick_handlers_lock);
103
104 WARN_ON(!handled);
105
106 irq_exit();
107 set_irq_regs(old_regs);
108
109 return tail_end(ret);
110}
diff --git a/arch/metag/kernel/machines.c b/arch/metag/kernel/machines.c
deleted file mode 100644
index e49790181051..000000000000
--- a/arch/metag/kernel/machines.c
+++ /dev/null
@@ -1,21 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * arch/metag/kernel/machines.c
4 *
5 * Copyright (C) 2012 Imagination Technologies Ltd.
6 *
7 * Generic Meta Boards.
8 */
9
10#include <linux/init.h>
11#include <asm/irq.h>
12#include <asm/mach/arch.h>
13
14static const char *meta_boards_compat[] __initdata = {
15 "img,meta",
16 NULL,
17};
18
19MACHINE_START(META, "Generic Meta")
20 .dt_compat = meta_boards_compat,
21MACHINE_END
diff --git a/arch/metag/kernel/metag_ksyms.c b/arch/metag/kernel/metag_ksyms.c
deleted file mode 100644
index e312386efb72..000000000000
--- a/arch/metag/kernel/metag_ksyms.c
+++ /dev/null
@@ -1,55 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/export.h>
3#include <linux/types.h>
4
5#include <asm/checksum.h>
6#include <asm/div64.h>
7#include <asm/ftrace.h>
8#include <asm/page.h>
9#include <asm/string.h>
10#include <asm/tbx.h>
11
12EXPORT_SYMBOL(clear_page);
13EXPORT_SYMBOL(copy_page);
14
15#ifdef CONFIG_FLATMEM
16/* needed for the pfn_valid macro */
17EXPORT_SYMBOL(max_pfn);
18EXPORT_SYMBOL(min_low_pfn);
19#endif
20
21/* Network checksum functions */
22EXPORT_SYMBOL(csum_partial);
23
24/* TBI symbols */
25EXPORT_SYMBOL(__TBI);
26EXPORT_SYMBOL(__TBIFindSeg);
27EXPORT_SYMBOL(__TBIPoll);
28EXPORT_SYMBOL(__TBITimeStamp);
29
30#define DECLARE_EXPORT(name) extern void name(void); EXPORT_SYMBOL(name)
31
32/* libgcc functions */
33DECLARE_EXPORT(__ashldi3);
34DECLARE_EXPORT(__ashrdi3);
35DECLARE_EXPORT(__lshrdi3);
36DECLARE_EXPORT(__udivsi3);
37DECLARE_EXPORT(__divsi3);
38DECLARE_EXPORT(__umodsi3);
39DECLARE_EXPORT(__modsi3);
40DECLARE_EXPORT(__muldi3);
41DECLARE_EXPORT(__cmpdi2);
42DECLARE_EXPORT(__ucmpdi2);
43
44/* Maths functions */
45EXPORT_SYMBOL(div_u64);
46EXPORT_SYMBOL(div_s64);
47
48/* String functions */
49EXPORT_SYMBOL(memcpy);
50EXPORT_SYMBOL(memset);
51EXPORT_SYMBOL(memmove);
52
53#ifdef CONFIG_FUNCTION_TRACER
54EXPORT_SYMBOL(mcount_wrapper);
55#endif
diff --git a/arch/metag/kernel/module.c b/arch/metag/kernel/module.c
deleted file mode 100644
index bb8dfba9a763..000000000000
--- a/arch/metag/kernel/module.c
+++ /dev/null
@@ -1,284 +0,0 @@
1/* Kernel module help for Meta.
2
3 This program is free software; you can redistribute it and/or modify
4 it under the terms of the GNU General Public License as published by
5 the Free Software Foundation; either version 2 of the License, or
6 (at your option) any later version.
7
8 This program is distributed in the hope that it will be useful,
9 but WITHOUT ANY WARRANTY; without even the implied warranty of
10 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 GNU General Public License for more details.
12*/
13#include <linux/moduleloader.h>
14#include <linux/elf.h>
15#include <linux/vmalloc.h>
16#include <linux/fs.h>
17#include <linux/string.h>
18#include <linux/kernel.h>
19#include <linux/sort.h>
20
21#include <asm/unaligned.h>
22
23/* Count how many different relocations (different symbol, different
24 addend) */
25static unsigned int count_relocs(const Elf32_Rela *rela, unsigned int num)
26{
27 unsigned int i, r_info, r_addend, _count_relocs;
28
29 _count_relocs = 0;
30 r_info = 0;
31 r_addend = 0;
32 for (i = 0; i < num; i++)
33 /* Only count relbranch relocs, others don't need stubs */
34 if (ELF32_R_TYPE(rela[i].r_info) == R_METAG_RELBRANCH &&
35 (r_info != ELF32_R_SYM(rela[i].r_info) ||
36 r_addend != rela[i].r_addend)) {
37 _count_relocs++;
38 r_info = ELF32_R_SYM(rela[i].r_info);
39 r_addend = rela[i].r_addend;
40 }
41
42 return _count_relocs;
43}
44
45static int relacmp(const void *_x, const void *_y)
46{
47 const Elf32_Rela *x, *y;
48
49 y = (Elf32_Rela *)_x;
50 x = (Elf32_Rela *)_y;
51
52 /* Compare the entire r_info (as opposed to ELF32_R_SYM(r_info) only) to
53 * make the comparison cheaper/faster. It won't affect the sorting or
54 * the counting algorithms' performance
55 */
56 if (x->r_info < y->r_info)
57 return -1;
58 else if (x->r_info > y->r_info)
59 return 1;
60 else if (x->r_addend < y->r_addend)
61 return -1;
62 else if (x->r_addend > y->r_addend)
63 return 1;
64 else
65 return 0;
66}
67
68static void relaswap(void *_x, void *_y, int size)
69{
70 uint32_t *x, *y, tmp;
71 int i;
72
73 y = (uint32_t *)_x;
74 x = (uint32_t *)_y;
75
76 for (i = 0; i < sizeof(Elf32_Rela) / sizeof(uint32_t); i++) {
77 tmp = x[i];
78 x[i] = y[i];
79 y[i] = tmp;
80 }
81}
82
83/* Get the potential trampolines size required of the init and
84 non-init sections */
85static unsigned long get_plt_size(const Elf32_Ehdr *hdr,
86 const Elf32_Shdr *sechdrs,
87 const char *secstrings,
88 int is_init)
89{
90 unsigned long ret = 0;
91 unsigned i;
92
93 /* Everything marked ALLOC (this includes the exported
94 symbols) */
95 for (i = 1; i < hdr->e_shnum; i++) {
96 /* If it's called *.init*, and we're not init, we're
97 not interested */
98 if ((strstr(secstrings + sechdrs[i].sh_name, ".init") != NULL)
99 != is_init)
100 continue;
101
102 /* We don't want to look at debug sections. */
103 if (strstr(secstrings + sechdrs[i].sh_name, ".debug") != NULL)
104 continue;
105
106 if (sechdrs[i].sh_type == SHT_RELA) {
107 pr_debug("Found relocations in section %u\n", i);
108 pr_debug("Ptr: %p. Number: %u\n",
109 (void *)hdr + sechdrs[i].sh_offset,
110 sechdrs[i].sh_size / sizeof(Elf32_Rela));
111
112 /* Sort the relocation information based on a symbol and
113 * addend key. This is a stable O(n*log n) complexity
114 * alogrithm but it will reduce the complexity of
115 * count_relocs() to linear complexity O(n)
116 */
117 sort((void *)hdr + sechdrs[i].sh_offset,
118 sechdrs[i].sh_size / sizeof(Elf32_Rela),
119 sizeof(Elf32_Rela), relacmp, relaswap);
120
121 ret += count_relocs((void *)hdr
122 + sechdrs[i].sh_offset,
123 sechdrs[i].sh_size
124 / sizeof(Elf32_Rela))
125 * sizeof(struct metag_plt_entry);
126 }
127 }
128
129 return ret;
130}
131
132int module_frob_arch_sections(Elf32_Ehdr *hdr,
133 Elf32_Shdr *sechdrs,
134 char *secstrings,
135 struct module *me)
136{
137 unsigned int i;
138
139 /* Find .plt and .init.plt sections */
140 for (i = 0; i < hdr->e_shnum; i++) {
141 if (strcmp(secstrings + sechdrs[i].sh_name, ".init.plt") == 0)
142 me->arch.init_plt_section = i;
143 else if (strcmp(secstrings + sechdrs[i].sh_name, ".plt") == 0)
144 me->arch.core_plt_section = i;
145 }
146 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
147 pr_err("Module doesn't contain .plt or .init.plt sections.\n");
148 return -ENOEXEC;
149 }
150
151 /* Override their sizes */
152 sechdrs[me->arch.core_plt_section].sh_size
153 = get_plt_size(hdr, sechdrs, secstrings, 0);
154 sechdrs[me->arch.core_plt_section].sh_type = SHT_NOBITS;
155 sechdrs[me->arch.init_plt_section].sh_size
156 = get_plt_size(hdr, sechdrs, secstrings, 1);
157 sechdrs[me->arch.init_plt_section].sh_type = SHT_NOBITS;
158 return 0;
159}
160
161/* Set up a trampoline in the PLT to bounce us to the distant function */
162static uint32_t do_plt_call(void *location, Elf32_Addr val,
163 Elf32_Shdr *sechdrs, struct module *mod)
164{
165 struct metag_plt_entry *entry;
166 /* Instructions used to do the indirect jump. */
167 uint32_t tramp[2];
168
169 /* We have to trash a register, so we assume that any control
170 transfer more than 21-bits away must be a function call
171 (so we can use a call-clobbered register). */
172
173 /* MOVT D0Re0,#HI(v) */
174 tramp[0] = 0x02000005 | (((val & 0xffff0000) >> 16) << 3);
175 /* JUMP D0Re0,#LO(v) */
176 tramp[1] = 0xac000001 | ((val & 0x0000ffff) << 3);
177
178 /* Init, or core PLT? */
179 if (location >= mod->core_layout.base
180 && location < mod->core_layout.base + mod->core_layout.size)
181 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
182 else
183 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
184
185 /* Find this entry, or if that fails, the next avail. entry */
186 while (entry->tramp[0])
187 if (entry->tramp[0] == tramp[0] && entry->tramp[1] == tramp[1])
188 return (uint32_t)entry;
189 else
190 entry++;
191
192 entry->tramp[0] = tramp[0];
193 entry->tramp[1] = tramp[1];
194
195 return (uint32_t)entry;
196}
197
198int apply_relocate_add(Elf32_Shdr *sechdrs,
199 const char *strtab,
200 unsigned int symindex,
201 unsigned int relsec,
202 struct module *me)
203{
204 unsigned int i;
205 Elf32_Rela *rel = (void *)sechdrs[relsec].sh_addr;
206 Elf32_Sym *sym;
207 Elf32_Addr relocation;
208 uint32_t *location;
209 int32_t value;
210
211 pr_debug("Applying relocate section %u to %u\n", relsec,
212 sechdrs[relsec].sh_info);
213 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
214 /* This is where to make the change */
215 location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
216 + rel[i].r_offset;
217 /* This is the symbol it is referring to. Note that all
218 undefined symbols have been resolved. */
219 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
220 + ELF32_R_SYM(rel[i].r_info);
221 relocation = sym->st_value + rel[i].r_addend;
222
223 switch (ELF32_R_TYPE(rel[i].r_info)) {
224 case R_METAG_NONE:
225 break;
226 case R_METAG_HIADDR16:
227 relocation >>= 16;
228 case R_METAG_LOADDR16:
229 *location = (*location & 0xfff80007) |
230 ((relocation & 0xffff) << 3);
231 break;
232 case R_METAG_ADDR32:
233 /*
234 * Packed data structures may cause a misaligned
235 * R_METAG_ADDR32 to be emitted.
236 */
237 put_unaligned(relocation, location);
238 break;
239 case R_METAG_GETSETOFF:
240 *location += ((relocation & 0xfff) << 7);
241 break;
242 case R_METAG_RELBRANCH:
243 if (*location & (0x7ffff << 5)) {
244 pr_err("bad relbranch relocation\n");
245 break;
246 }
247
248 /* This jump is too big for the offset slot. Build
249 * a PLT to jump through to get to where we want to go.
250 * NB: 21bit check - not scaled to 19bit yet
251 */
252 if (((int32_t)(relocation -
253 (uint32_t)location) > 0xfffff) ||
254 ((int32_t)(relocation -
255 (uint32_t)location) < -0xfffff)) {
256 relocation = do_plt_call(location, relocation,
257 sechdrs, me);
258 }
259
260 value = relocation - (uint32_t)location;
261
262 /* branch instruction aligned */
263 value /= 4;
264
265 if ((value > 0x7ffff) || (value < -0x7ffff)) {
266 /*
267 * this should have been caught by the code
268 * above!
269 */
270 pr_err("overflow of relbranch reloc\n");
271 }
272
273 *location = (*location & (~(0x7ffff << 5))) |
274 ((value & 0x7ffff) << 5);
275 break;
276
277 default:
278 pr_err("module %s: Unknown relocation: %u\n",
279 me->name, ELF32_R_TYPE(rel[i].r_info));
280 return -ENOEXEC;
281 }
282 }
283 return 0;
284}
diff --git a/arch/metag/kernel/perf/Makefile b/arch/metag/kernel/perf/Makefile
deleted file mode 100644
index b158cb27208d..000000000000
--- a/arch/metag/kernel/perf/Makefile
+++ /dev/null
@@ -1,3 +0,0 @@
1# Makefile for performance event core
2
3obj-y += perf_event.o
diff --git a/arch/metag/kernel/perf/perf_event.c b/arch/metag/kernel/perf/perf_event.c
deleted file mode 100644
index 7e793eb0c1fe..000000000000
--- a/arch/metag/kernel/perf/perf_event.c
+++ /dev/null
@@ -1,879 +0,0 @@
1/*
2 * Meta performance counter support.
3 * Copyright (C) 2012 Imagination Technologies Ltd
4 *
5 * This code is based on the sh pmu code:
6 * Copyright (C) 2009 Paul Mundt
7 *
8 * and on the arm pmu code:
9 * Copyright (C) 2009 picoChip Designs, Ltd., James Iles
10 * Copyright (C) 2010 ARM Ltd., Will Deacon <will.deacon@arm.com>
11 *
12 * This file is subject to the terms and conditions of the GNU General Public
13 * License. See the file "COPYING" in the main directory of this archive
14 * for more details.
15 */
16
17#include <linux/atomic.h>
18#include <linux/export.h>
19#include <linux/init.h>
20#include <linux/irqchip/metag.h>
21#include <linux/perf_event.h>
22#include <linux/slab.h>
23
24#include <asm/core_reg.h>
25#include <asm/io.h>
26#include <asm/irq.h>
27#include <asm/processor.h>
28
29#include "perf_event.h"
30
31static int _hw_perf_event_init(struct perf_event *);
32static void _hw_perf_event_destroy(struct perf_event *);
33
34/* Determines which core type we are */
35static struct metag_pmu *metag_pmu __read_mostly;
36
37/* Processor specific data */
38static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
39
40/* PMU admin */
41const char *perf_pmu_name(void)
42{
43 if (!metag_pmu)
44 return NULL;
45
46 return metag_pmu->name;
47}
48EXPORT_SYMBOL_GPL(perf_pmu_name);
49
50int perf_num_counters(void)
51{
52 if (metag_pmu)
53 return metag_pmu->max_events;
54
55 return 0;
56}
57EXPORT_SYMBOL_GPL(perf_num_counters);
58
59static inline int metag_pmu_initialised(void)
60{
61 return !!metag_pmu;
62}
63
64static void release_pmu_hardware(void)
65{
66 int irq;
67 unsigned int version = (metag_pmu->version &
68 (METAC_ID_MINOR_BITS | METAC_ID_REV_BITS)) >>
69 METAC_ID_REV_S;
70
71 /* Early cores don't have overflow interrupts */
72 if (version < 0x0104)
73 return;
74
75 irq = internal_irq_map(17);
76 if (irq >= 0)
77 free_irq(irq, (void *)1);
78
79 irq = internal_irq_map(16);
80 if (irq >= 0)
81 free_irq(irq, (void *)0);
82}
83
84static int reserve_pmu_hardware(void)
85{
86 int err = 0, irq[2];
87 unsigned int version = (metag_pmu->version &
88 (METAC_ID_MINOR_BITS | METAC_ID_REV_BITS)) >>
89 METAC_ID_REV_S;
90
91 /* Early cores don't have overflow interrupts */
92 if (version < 0x0104)
93 goto out;
94
95 /*
96 * Bit 16 on HWSTATMETA is the interrupt for performance counter 0;
97 * similarly, 17 is the interrupt for performance counter 1.
98 * We can't (yet) interrupt on the cycle counter, because it's a
99 * register, however it holds a 32-bit value as opposed to 24-bit.
100 */
101 irq[0] = internal_irq_map(16);
102 if (irq[0] < 0) {
103 pr_err("unable to map internal IRQ %d\n", 16);
104 goto out;
105 }
106 err = request_irq(irq[0], metag_pmu->handle_irq, IRQF_NOBALANCING,
107 "metagpmu0", (void *)0);
108 if (err) {
109 pr_err("unable to request IRQ%d for metag PMU counters\n",
110 irq[0]);
111 goto out;
112 }
113
114 irq[1] = internal_irq_map(17);
115 if (irq[1] < 0) {
116 pr_err("unable to map internal IRQ %d\n", 17);
117 goto out_irq1;
118 }
119 err = request_irq(irq[1], metag_pmu->handle_irq, IRQF_NOBALANCING,
120 "metagpmu1", (void *)1);
121 if (err) {
122 pr_err("unable to request IRQ%d for metag PMU counters\n",
123 irq[1]);
124 goto out_irq1;
125 }
126
127 return 0;
128
129out_irq1:
130 free_irq(irq[0], (void *)0);
131out:
132 return err;
133}
134
135/* PMU operations */
136static void metag_pmu_enable(struct pmu *pmu)
137{
138}
139
140static void metag_pmu_disable(struct pmu *pmu)
141{
142}
143
144static int metag_pmu_event_init(struct perf_event *event)
145{
146 int err = 0;
147 atomic_t *active_events = &metag_pmu->active_events;
148
149 if (!metag_pmu_initialised()) {
150 err = -ENODEV;
151 goto out;
152 }
153
154 if (has_branch_stack(event))
155 return -EOPNOTSUPP;
156
157 event->destroy = _hw_perf_event_destroy;
158
159 if (!atomic_inc_not_zero(active_events)) {
160 mutex_lock(&metag_pmu->reserve_mutex);
161 if (atomic_read(active_events) == 0)
162 err = reserve_pmu_hardware();
163
164 if (!err)
165 atomic_inc(active_events);
166
167 mutex_unlock(&metag_pmu->reserve_mutex);
168 }
169
170 /* Hardware and caches counters */
171 switch (event->attr.type) {
172 case PERF_TYPE_HARDWARE:
173 case PERF_TYPE_HW_CACHE:
174 case PERF_TYPE_RAW:
175 err = _hw_perf_event_init(event);
176 break;
177
178 default:
179 return -ENOENT;
180 }
181
182 if (err)
183 event->destroy(event);
184
185out:
186 return err;
187}
188
189void metag_pmu_event_update(struct perf_event *event,
190 struct hw_perf_event *hwc, int idx)
191{
192 u64 prev_raw_count, new_raw_count;
193 s64 delta;
194
195 /*
196 * If this counter is chained, it may be that the previous counter
197 * value has been changed beneath us.
198 *
199 * To get around this, we read and exchange the new raw count, then
200 * add the delta (new - prev) to the generic counter atomically.
201 *
202 * Without interrupts, this is the simplest approach.
203 */
204again:
205 prev_raw_count = local64_read(&hwc->prev_count);
206 new_raw_count = metag_pmu->read(idx);
207
208 if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
209 new_raw_count) != prev_raw_count)
210 goto again;
211
212 /*
213 * Calculate the delta and add it to the counter.
214 */
215 delta = (new_raw_count - prev_raw_count) & MAX_PERIOD;
216
217 local64_add(delta, &event->count);
218 local64_sub(delta, &hwc->period_left);
219}
220
221int metag_pmu_event_set_period(struct perf_event *event,
222 struct hw_perf_event *hwc, int idx)
223{
224 s64 left = local64_read(&hwc->period_left);
225 s64 period = hwc->sample_period;
226 int ret = 0;
227
228 /* The period may have been changed */
229 if (unlikely(period != hwc->last_period))
230 left += period - hwc->last_period;
231
232 if (unlikely(left <= -period)) {
233 left = period;
234 local64_set(&hwc->period_left, left);
235 hwc->last_period = period;
236 ret = 1;
237 }
238
239 if (unlikely(left <= 0)) {
240 left += period;
241 local64_set(&hwc->period_left, left);
242 hwc->last_period = period;
243 ret = 1;
244 }
245
246 if (left > (s64)metag_pmu->max_period)
247 left = metag_pmu->max_period;
248
249 if (metag_pmu->write) {
250 local64_set(&hwc->prev_count, -(s32)left);
251 metag_pmu->write(idx, -left & MAX_PERIOD);
252 }
253
254 perf_event_update_userpage(event);
255
256 return ret;
257}
258
259static void metag_pmu_start(struct perf_event *event, int flags)
260{
261 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
262 struct hw_perf_event *hwc = &event->hw;
263 int idx = hwc->idx;
264
265 if (WARN_ON_ONCE(idx == -1))
266 return;
267
268 /*
269 * We always have to reprogram the period, so ignore PERF_EF_RELOAD.
270 */
271 if (flags & PERF_EF_RELOAD)
272 WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
273
274 hwc->state = 0;
275
276 /*
277 * Reset the period.
278 * Some counters can't be stopped (i.e. are core global), so when the
279 * counter was 'stopped' we merely disabled the IRQ. If we don't reset
280 * the period, then we'll either: a) get an overflow too soon;
281 * or b) too late if the overflow happened since disabling.
282 * Obviously, this has little bearing on cores without the overflow
283 * interrupt, as the performance counter resets to zero on write
284 * anyway.
285 */
286 if (metag_pmu->max_period)
287 metag_pmu_event_set_period(event, hwc, hwc->idx);
288 cpuc->events[idx] = event;
289 metag_pmu->enable(hwc, idx);
290}
291
292static void metag_pmu_stop(struct perf_event *event, int flags)
293{
294 struct hw_perf_event *hwc = &event->hw;
295
296 /*
297 * We should always update the counter on stop; see comment above
298 * why.
299 */
300 if (!(hwc->state & PERF_HES_STOPPED)) {
301 metag_pmu_event_update(event, hwc, hwc->idx);
302 metag_pmu->disable(hwc, hwc->idx);
303 hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
304 }
305}
306
307static int metag_pmu_add(struct perf_event *event, int flags)
308{
309 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
310 struct hw_perf_event *hwc = &event->hw;
311 int idx = 0, ret = 0;
312
313 perf_pmu_disable(event->pmu);
314
315 /* check whether we're counting instructions */
316 if (hwc->config == 0x100) {
317 if (__test_and_set_bit(METAG_INST_COUNTER,
318 cpuc->used_mask)) {
319 ret = -EAGAIN;
320 goto out;
321 }
322 idx = METAG_INST_COUNTER;
323 } else {
324 /* Check whether we have a spare counter */
325 idx = find_first_zero_bit(cpuc->used_mask,
326 atomic_read(&metag_pmu->active_events));
327 if (idx >= METAG_INST_COUNTER) {
328 ret = -EAGAIN;
329 goto out;
330 }
331
332 __set_bit(idx, cpuc->used_mask);
333 }
334 hwc->idx = idx;
335
336 /* Make sure the counter is disabled */
337 metag_pmu->disable(hwc, idx);
338
339 hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
340 if (flags & PERF_EF_START)
341 metag_pmu_start(event, PERF_EF_RELOAD);
342
343 perf_event_update_userpage(event);
344out:
345 perf_pmu_enable(event->pmu);
346 return ret;
347}
348
349static void metag_pmu_del(struct perf_event *event, int flags)
350{
351 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
352 struct hw_perf_event *hwc = &event->hw;
353 int idx = hwc->idx;
354
355 WARN_ON(idx < 0);
356 metag_pmu_stop(event, PERF_EF_UPDATE);
357 cpuc->events[idx] = NULL;
358 __clear_bit(idx, cpuc->used_mask);
359
360 perf_event_update_userpage(event);
361}
362
363static void metag_pmu_read(struct perf_event *event)
364{
365 struct hw_perf_event *hwc = &event->hw;
366
367 /* Don't read disabled counters! */
368 if (hwc->idx < 0)
369 return;
370
371 metag_pmu_event_update(event, hwc, hwc->idx);
372}
373
374static struct pmu pmu = {
375 .pmu_enable = metag_pmu_enable,
376 .pmu_disable = metag_pmu_disable,
377
378 .event_init = metag_pmu_event_init,
379
380 .add = metag_pmu_add,
381 .del = metag_pmu_del,
382 .start = metag_pmu_start,
383 .stop = metag_pmu_stop,
384 .read = metag_pmu_read,
385};
386
387/* Core counter specific functions */
388static const int metag_general_events[] = {
389 [PERF_COUNT_HW_CPU_CYCLES] = 0x03,
390 [PERF_COUNT_HW_INSTRUCTIONS] = 0x100,
391 [PERF_COUNT_HW_CACHE_REFERENCES] = -1,
392 [PERF_COUNT_HW_CACHE_MISSES] = -1,
393 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = -1,
394 [PERF_COUNT_HW_BRANCH_MISSES] = -1,
395 [PERF_COUNT_HW_BUS_CYCLES] = -1,
396 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = -1,
397 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = -1,
398 [PERF_COUNT_HW_REF_CPU_CYCLES] = -1,
399};
400
401static const int metag_pmu_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
402 [C(L1D)] = {
403 [C(OP_READ)] = {
404 [C(RESULT_ACCESS)] = 0x08,
405 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
406 },
407 [C(OP_WRITE)] = {
408 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
409 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
410 },
411 [C(OP_PREFETCH)] = {
412 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
413 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
414 },
415 },
416 [C(L1I)] = {
417 [C(OP_READ)] = {
418 [C(RESULT_ACCESS)] = 0x09,
419 [C(RESULT_MISS)] = 0x0a,
420 },
421 [C(OP_WRITE)] = {
422 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
423 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
424 },
425 [C(OP_PREFETCH)] = {
426 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
427 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
428 },
429 },
430 [C(LL)] = {
431 [C(OP_READ)] = {
432 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
433 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
434 },
435 [C(OP_WRITE)] = {
436 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
437 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
438 },
439 [C(OP_PREFETCH)] = {
440 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
441 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
442 },
443 },
444 [C(DTLB)] = {
445 [C(OP_READ)] = {
446 [C(RESULT_ACCESS)] = 0xd0,
447 [C(RESULT_MISS)] = 0xd2,
448 },
449 [C(OP_WRITE)] = {
450 [C(RESULT_ACCESS)] = 0xd4,
451 [C(RESULT_MISS)] = 0xd5,
452 },
453 [C(OP_PREFETCH)] = {
454 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
455 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
456 },
457 },
458 [C(ITLB)] = {
459 [C(OP_READ)] = {
460 [C(RESULT_ACCESS)] = 0xd1,
461 [C(RESULT_MISS)] = 0xd3,
462 },
463 [C(OP_WRITE)] = {
464 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
465 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
466 },
467 [C(OP_PREFETCH)] = {
468 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
469 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
470 },
471 },
472 [C(BPU)] = {
473 [C(OP_READ)] = {
474 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
475 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
476 },
477 [C(OP_WRITE)] = {
478 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
479 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
480 },
481 [C(OP_PREFETCH)] = {
482 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
483 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
484 },
485 },
486 [C(NODE)] = {
487 [C(OP_READ)] = {
488 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
489 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
490 },
491 [C(OP_WRITE)] = {
492 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
493 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
494 },
495 [C(OP_PREFETCH)] = {
496 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
497 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
498 },
499 },
500};
501
502
503static void _hw_perf_event_destroy(struct perf_event *event)
504{
505 atomic_t *active_events = &metag_pmu->active_events;
506 struct mutex *pmu_mutex = &metag_pmu->reserve_mutex;
507
508 if (atomic_dec_and_mutex_lock(active_events, pmu_mutex)) {
509 release_pmu_hardware();
510 mutex_unlock(pmu_mutex);
511 }
512}
513
514static int _hw_perf_cache_event(int config, int *evp)
515{
516 unsigned long type, op, result;
517 int ev;
518
519 if (!metag_pmu->cache_events)
520 return -EINVAL;
521
522 /* Unpack config */
523 type = config & 0xff;
524 op = (config >> 8) & 0xff;
525 result = (config >> 16) & 0xff;
526
527 if (type >= PERF_COUNT_HW_CACHE_MAX ||
528 op >= PERF_COUNT_HW_CACHE_OP_MAX ||
529 result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
530 return -EINVAL;
531
532 ev = (*metag_pmu->cache_events)[type][op][result];
533 if (ev == 0)
534 return -EOPNOTSUPP;
535 if (ev == -1)
536 return -EINVAL;
537 *evp = ev;
538 return 0;
539}
540
541static int _hw_perf_event_init(struct perf_event *event)
542{
543 struct perf_event_attr *attr = &event->attr;
544 struct hw_perf_event *hwc = &event->hw;
545 int mapping = 0, err;
546
547 switch (attr->type) {
548 case PERF_TYPE_HARDWARE:
549 if (attr->config >= PERF_COUNT_HW_MAX)
550 return -EINVAL;
551
552 mapping = metag_pmu->event_map(attr->config);
553 break;
554
555 case PERF_TYPE_HW_CACHE:
556 err = _hw_perf_cache_event(attr->config, &mapping);
557 if (err)
558 return err;
559 break;
560
561 case PERF_TYPE_RAW:
562 mapping = attr->config;
563 break;
564 }
565
566 /* Return early if the event is unsupported */
567 if (mapping == -1)
568 return -EINVAL;
569
570 /*
571 * Don't assign an index until the event is placed into the hardware.
572 * -1 signifies that we're still deciding where to put it. On SMP
573 * systems each core has its own set of counters, so we can't do any
574 * constraint checking yet.
575 */
576 hwc->idx = -1;
577
578 /* Store the event encoding */
579 hwc->config |= (unsigned long)mapping;
580
581 /*
582 * For non-sampling runs, limit the sample_period to half of the
583 * counter width. This way, the new counter value should be less
584 * likely to overtake the previous one (unless there are IRQ latency
585 * issues...)
586 */
587 if (metag_pmu->max_period) {
588 if (!hwc->sample_period) {
589 hwc->sample_period = metag_pmu->max_period >> 1;
590 hwc->last_period = hwc->sample_period;
591 local64_set(&hwc->period_left, hwc->sample_period);
592 }
593 }
594
595 return 0;
596}
597
598static void metag_pmu_enable_counter(struct hw_perf_event *event, int idx)
599{
600 struct cpu_hw_events *events = this_cpu_ptr(&cpu_hw_events);
601 unsigned int config = event->config;
602 unsigned int tmp = config & 0xf0;
603 unsigned long flags;
604
605 raw_spin_lock_irqsave(&events->pmu_lock, flags);
606
607 /*
608 * Check if we're enabling the instruction counter (index of
609 * MAX_HWEVENTS - 1)
610 */
611 if (METAG_INST_COUNTER == idx) {
612 WARN_ONCE((config != 0x100),
613 "invalid configuration (%d) for counter (%d)\n",
614 config, idx);
615 local64_set(&event->prev_count, __core_reg_get(TXTACTCYC));
616 goto unlock;
617 }
618
619 /* Check for a core internal or performance channel event. */
620 if (tmp) {
621 /* PERF_ICORE/PERF_CHAN only exist since Meta2 */
622#ifdef METAC_2_1
623 void *perf_addr;
624
625 /*
626 * Anything other than a cycle count will write the low-
627 * nibble to the correct counter register.
628 */
629 switch (tmp) {
630 case 0xd0:
631 perf_addr = (void *)PERF_ICORE(idx);
632 break;
633
634 case 0xf0:
635 perf_addr = (void *)PERF_CHAN(idx);
636 break;
637
638 default:
639 perf_addr = NULL;
640 break;
641 }
642
643 if (perf_addr)
644 metag_out32((config & 0x0f), perf_addr);
645#endif
646
647 /*
648 * Now we use the high nibble as the performance event to
649 * to count.
650 */
651 config = tmp >> 4;
652 }
653
654 tmp = ((config & 0xf) << 28) |
655 ((1 << 24) << hard_processor_id());
656 if (metag_pmu->max_period)
657 /*
658 * Cores supporting overflow interrupts may have had the counter
659 * set to a specific value that needs preserving.
660 */
661 tmp |= metag_in32(PERF_COUNT(idx)) & 0x00ffffff;
662 else
663 /*
664 * Older cores reset the counter on write, so prev_count needs
665 * resetting too so we can calculate a correct delta.
666 */
667 local64_set(&event->prev_count, 0);
668
669 metag_out32(tmp, PERF_COUNT(idx));
670unlock:
671 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
672}
673
674static void metag_pmu_disable_counter(struct hw_perf_event *event, int idx)
675{
676 struct cpu_hw_events *events = this_cpu_ptr(&cpu_hw_events);
677 unsigned int tmp = 0;
678 unsigned long flags;
679
680 /*
681 * The cycle counter can't be disabled per se, as it's a hardware
682 * thread register which is always counting. We merely return if this
683 * is the counter we're attempting to disable.
684 */
685 if (METAG_INST_COUNTER == idx)
686 return;
687
688 /*
689 * The counter value _should_ have been read prior to disabling,
690 * as if we're running on an early core then the value gets reset to
691 * 0, and any read after that would be useless. On the newer cores,
692 * however, it's better to read-modify-update this for purposes of
693 * the overflow interrupt.
694 * Here we remove the thread id AND the event nibble (there are at
695 * least two events that count events that are core global and ignore
696 * the thread id mask). This only works because we don't mix thread
697 * performance counts, and event 0x00 requires a thread id mask!
698 */
699 raw_spin_lock_irqsave(&events->pmu_lock, flags);
700
701 tmp = metag_in32(PERF_COUNT(idx));
702 tmp &= 0x00ffffff;
703 metag_out32(tmp, PERF_COUNT(idx));
704
705 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
706}
707
708static u64 metag_pmu_read_counter(int idx)
709{
710 u32 tmp = 0;
711
712 if (METAG_INST_COUNTER == idx) {
713 tmp = __core_reg_get(TXTACTCYC);
714 goto out;
715 }
716
717 tmp = metag_in32(PERF_COUNT(idx)) & 0x00ffffff;
718out:
719 return tmp;
720}
721
722static void metag_pmu_write_counter(int idx, u32 val)
723{
724 struct cpu_hw_events *events = this_cpu_ptr(&cpu_hw_events);
725 u32 tmp = 0;
726 unsigned long flags;
727
728 /*
729 * This _shouldn't_ happen, but if it does, then we can just
730 * ignore the write, as the register is read-only and clear-on-write.
731 */
732 if (METAG_INST_COUNTER == idx)
733 return;
734
735 /*
736 * We'll keep the thread mask and event id, and just update the
737 * counter itself. Also , we should bound the value to 24-bits.
738 */
739 raw_spin_lock_irqsave(&events->pmu_lock, flags);
740
741 val &= 0x00ffffff;
742 tmp = metag_in32(PERF_COUNT(idx)) & 0xff000000;
743 val |= tmp;
744 metag_out32(val, PERF_COUNT(idx));
745
746 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
747}
748
749static int metag_pmu_event_map(int idx)
750{
751 return metag_general_events[idx];
752}
753
754static irqreturn_t metag_pmu_counter_overflow(int irq, void *dev)
755{
756 int idx = (int)dev;
757 struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
758 struct perf_event *event = cpuhw->events[idx];
759 struct hw_perf_event *hwc = &event->hw;
760 struct pt_regs *regs = get_irq_regs();
761 struct perf_sample_data sampledata;
762 unsigned long flags;
763 u32 counter = 0;
764
765 /*
766 * We need to stop the core temporarily from generating another
767 * interrupt while we disable this counter. However, we don't want
768 * to flag the counter as free
769 */
770 __global_lock2(flags);
771 counter = metag_in32(PERF_COUNT(idx));
772 metag_out32((counter & 0x00ffffff), PERF_COUNT(idx));
773 __global_unlock2(flags);
774
775 /* Update the counts and reset the sample period */
776 metag_pmu_event_update(event, hwc, idx);
777 perf_sample_data_init(&sampledata, 0, hwc->last_period);
778 metag_pmu_event_set_period(event, hwc, idx);
779
780 /*
781 * Enable the counter again once core overflow processing has
782 * completed. Note the counter value may have been modified while it was
783 * inactive to set it up ready for the next interrupt.
784 */
785 if (!perf_event_overflow(event, &sampledata, regs)) {
786 __global_lock2(flags);
787 counter = (counter & 0xff000000) |
788 (metag_in32(PERF_COUNT(idx)) & 0x00ffffff);
789 metag_out32(counter, PERF_COUNT(idx));
790 __global_unlock2(flags);
791 }
792
793 return IRQ_HANDLED;
794}
795
796static struct metag_pmu _metag_pmu = {
797 .handle_irq = metag_pmu_counter_overflow,
798 .enable = metag_pmu_enable_counter,
799 .disable = metag_pmu_disable_counter,
800 .read = metag_pmu_read_counter,
801 .write = metag_pmu_write_counter,
802 .event_map = metag_pmu_event_map,
803 .cache_events = &metag_pmu_cache_events,
804 .max_period = MAX_PERIOD,
805 .max_events = MAX_HWEVENTS,
806};
807
808/* PMU CPU hotplug notifier */
809static int metag_pmu_starting_cpu(unsigned int cpu)
810{
811 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
812
813 memset(cpuc, 0, sizeof(struct cpu_hw_events));
814 raw_spin_lock_init(&cpuc->pmu_lock);
815
816 return 0;
817}
818
819/* PMU Initialisation */
820static int __init init_hw_perf_events(void)
821{
822 int ret = 0, cpu;
823 u32 version = *(u32 *)METAC_ID;
824 int major = (version & METAC_ID_MAJOR_BITS) >> METAC_ID_MAJOR_S;
825 int min_rev = (version & (METAC_ID_MINOR_BITS | METAC_ID_REV_BITS))
826 >> METAC_ID_REV_S;
827
828 /* Not a Meta 2 core, then not supported */
829 if (0x02 > major) {
830 pr_info("no hardware counter support available\n");
831 goto out;
832 } else if (0x02 == major) {
833 metag_pmu = &_metag_pmu;
834
835 if (min_rev < 0x0104) {
836 /*
837 * A core without overflow interrupts, and clear-on-
838 * write counters.
839 */
840 metag_pmu->handle_irq = NULL;
841 metag_pmu->write = NULL;
842 metag_pmu->max_period = 0;
843 }
844
845 metag_pmu->name = "meta2";
846 metag_pmu->version = version;
847 metag_pmu->pmu = pmu;
848 }
849
850 pr_info("enabled with %s PMU driver, %d counters available\n",
851 metag_pmu->name, metag_pmu->max_events);
852
853 /*
854 * Early cores have "limited" counters - they have no overflow
855 * interrupts - and so are unable to do sampling without extra work
856 * and timer assistance.
857 */
858 if (metag_pmu->max_period == 0) {
859 metag_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
860 }
861
862 /* Initialise the active events and reservation mutex */
863 atomic_set(&metag_pmu->active_events, 0);
864 mutex_init(&metag_pmu->reserve_mutex);
865
866 /* Clear the counters */
867 metag_out32(0, PERF_COUNT(0));
868 metag_out32(0, PERF_COUNT(1));
869
870 cpuhp_setup_state(CPUHP_AP_PERF_METAG_STARTING,
871 "perf/metag:starting", metag_pmu_starting_cpu,
872 NULL);
873
874 ret = perf_pmu_register(&pmu, metag_pmu->name, PERF_TYPE_RAW);
875 if (ret)
876 cpuhp_remove_state_nocalls(CPUHP_AP_PERF_METAG_STARTING);
877 return ret;
878}
879early_initcall(init_hw_perf_events);
diff --git a/arch/metag/kernel/perf/perf_event.h b/arch/metag/kernel/perf/perf_event.h
deleted file mode 100644
index fd10a1345b67..000000000000
--- a/arch/metag/kernel/perf/perf_event.h
+++ /dev/null
@@ -1,106 +0,0 @@
1/*
2 * Meta performance counter support.
3 * Copyright (C) 2012 Imagination Technologies Ltd
4 *
5 * This file is subject to the terms and conditions of the GNU General Public
6 * License. See the file "COPYING" in the main directory of this archive
7 * for more details.
8 */
9
10#ifndef METAG_PERF_EVENT_H_
11#define METAG_PERF_EVENT_H_
12
13#include <linux/kernel.h>
14#include <linux/interrupt.h>
15#include <linux/perf_event.h>
16
17/* For performance counter definitions */
18#include <asm/metag_mem.h>
19
20/*
21 * The Meta core has two performance counters, with 24-bit resolution. Newer
22 * cores generate an overflow interrupt on transition from 0xffffff to 0.
23 *
24 * Each counter consists of the counter id, hardware thread id, and the count
25 * itself; each counter can be assigned to multiple hardware threads at any
26 * one time, with the returned count being an aggregate of events. A small
27 * number of events are thread global, i.e. they count the aggregate of all
28 * threads' events, regardless of the thread selected.
29 *
30 * Newer cores can store an arbitrary 24-bit number in the counter, whereas
31 * older cores will clear the counter bits on write.
32 *
33 * We also have a pseudo-counter in the form of the thread active cycles
34 * counter (which, incidentally, is also bound to
35 */
36
37#define MAX_HWEVENTS 3
38#define MAX_PERIOD ((1UL << 24) - 1)
39#define METAG_INST_COUNTER (MAX_HWEVENTS - 1)
40
41/**
42 * struct cpu_hw_events - a processor core's performance events
43 * @events: an array of perf_events active for a given index.
44 * @used_mask: a bitmap of in-use counters.
45 * @pmu_lock: a perf counter lock
46 *
47 * This is a per-cpu/core structure that maintains a record of its
48 * performance counters' state.
49 */
50struct cpu_hw_events {
51 struct perf_event *events[MAX_HWEVENTS];
52 unsigned long used_mask[BITS_TO_LONGS(MAX_HWEVENTS)];
53 raw_spinlock_t pmu_lock;
54};
55
56/**
57 * struct metag_pmu - the Meta PMU structure
58 * @pmu: core pmu structure
59 * @name: pmu name
60 * @version: core version
61 * @handle_irq: overflow interrupt handler
62 * @enable: enable a counter
63 * @disable: disable a counter
64 * @read: read the value of a counter
65 * @write: write a value to a counter
66 * @event_map: kernel event to counter event id map
67 * @cache_events: kernel cache counter to core cache counter map
68 * @max_period: maximum value of the counter before overflow
69 * @max_events: maximum number of counters available at any one time
70 * @active_events: number of active counters
71 * @reserve_mutex: counter reservation mutex
72 *
73 * This describes the main functionality and data used by the performance
74 * event core.
75 */
76struct metag_pmu {
77 struct pmu pmu;
78 const char *name;
79 u32 version;
80 irqreturn_t (*handle_irq)(int irq_num, void *dev);
81 void (*enable)(struct hw_perf_event *evt, int idx);
82 void (*disable)(struct hw_perf_event *evt, int idx);
83 u64 (*read)(int idx);
84 void (*write)(int idx, u32 val);
85 int (*event_map)(int idx);
86 const int (*cache_events)[PERF_COUNT_HW_CACHE_MAX]
87 [PERF_COUNT_HW_CACHE_OP_MAX]
88 [PERF_COUNT_HW_CACHE_RESULT_MAX];
89 u32 max_period;
90 int max_events;
91 atomic_t active_events;
92 struct mutex reserve_mutex;
93};
94
95/* Convenience macros for accessing the perf counters */
96/* Define some convenience accessors */
97#define PERF_COUNT(x) (PERF_COUNT0 + (sizeof(u64) * (x)))
98#define PERF_ICORE(x) (PERF_ICORE0 + (sizeof(u64) * (x)))
99#define PERF_CHAN(x) (PERF_CHAN0 + (sizeof(u64) * (x)))
100
101/* Cache index macros */
102#define C(x) PERF_COUNT_HW_CACHE_##x
103#define CACHE_OP_UNSUPPORTED 0xfffe
104#define CACHE_OP_NONSENSE 0xffff
105
106#endif
diff --git a/arch/metag/kernel/perf_callchain.c b/arch/metag/kernel/perf_callchain.c
deleted file mode 100644
index d325ba101de0..000000000000
--- a/arch/metag/kernel/perf_callchain.c
+++ /dev/null
@@ -1,97 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Perf callchain handling code.
4 *
5 * Based on the ARM perf implementation.
6 */
7
8#include <linux/kernel.h>
9#include <linux/sched.h>
10#include <linux/perf_event.h>
11#include <linux/uaccess.h>
12#include <asm/ptrace.h>
13#include <asm/stacktrace.h>
14
15static bool is_valid_call(unsigned long calladdr)
16{
17 unsigned int callinsn;
18
19 /* Check the possible return address is aligned. */
20 if (!(calladdr & 0x3)) {
21 if (!get_user(callinsn, (unsigned int *)calladdr)) {
22 /* Check for CALLR or SWAP PC,D1RtP. */
23 if ((callinsn & 0xff000000) == 0xab000000 ||
24 callinsn == 0xa3200aa0)
25 return true;
26 }
27 }
28 return false;
29}
30
31static struct metag_frame __user *
32user_backtrace(struct metag_frame __user *user_frame,
33 struct perf_callchain_entry_ctx *entry)
34{
35 struct metag_frame frame;
36 unsigned long calladdr;
37
38 /* We cannot rely on having frame pointers in user code. */
39 while (1) {
40 /* Also check accessibility of one struct frame beyond */
41 if (!access_ok(VERIFY_READ, user_frame, sizeof(frame)))
42 return 0;
43 if (__copy_from_user_inatomic(&frame, user_frame,
44 sizeof(frame)))
45 return 0;
46
47 --user_frame;
48
49 calladdr = frame.lr - 4;
50 if (is_valid_call(calladdr)) {
51 perf_callchain_store(entry, calladdr);
52 return user_frame;
53 }
54 }
55
56 return 0;
57}
58
59void
60perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
61{
62 unsigned long sp = regs->ctx.AX[0].U0;
63 struct metag_frame __user *frame;
64
65 frame = (struct metag_frame __user *)sp;
66
67 --frame;
68
69 while ((entry->nr < entry->max_stack) && frame)
70 frame = user_backtrace(frame, entry);
71}
72
73/*
74 * Gets called by walk_stackframe() for every stackframe. This will be called
75 * whist unwinding the stackframe and is like a subroutine return so we use
76 * the PC.
77 */
78static int
79callchain_trace(struct stackframe *fr,
80 void *data)
81{
82 struct perf_callchain_entry_ctx *entry = data;
83 perf_callchain_store(entry, fr->pc);
84 return 0;
85}
86
87void
88perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
89{
90 struct stackframe fr;
91
92 fr.fp = regs->ctx.AX[1].U0;
93 fr.sp = regs->ctx.AX[0].U0;
94 fr.lr = regs->ctx.DX[4].U1;
95 fr.pc = regs->ctx.CurrPC;
96 walk_stackframe(&fr, callchain_trace, entry);
97}
diff --git a/arch/metag/kernel/process.c b/arch/metag/kernel/process.c
deleted file mode 100644
index 0909834c83a7..000000000000
--- a/arch/metag/kernel/process.c
+++ /dev/null
@@ -1,448 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2005,2006,2007,2008,2009,2010,2011 Imagination Technologies
4 *
5 * This file contains the architecture-dependent parts of process handling.
6 *
7 */
8
9#include <linux/errno.h>
10#include <linux/export.h>
11#include <linux/sched.h>
12#include <linux/sched/debug.h>
13#include <linux/sched/task.h>
14#include <linux/sched/task_stack.h>
15#include <linux/kernel.h>
16#include <linux/mm.h>
17#include <linux/unistd.h>
18#include <linux/ptrace.h>
19#include <linux/user.h>
20#include <linux/reboot.h>
21#include <linux/elfcore.h>
22#include <linux/fs.h>
23#include <linux/tick.h>
24#include <linux/slab.h>
25#include <linux/mman.h>
26#include <linux/pm.h>
27#include <linux/syscalls.h>
28#include <linux/uaccess.h>
29#include <linux/smp.h>
30#include <asm/core_reg.h>
31#include <asm/user_gateway.h>
32#include <asm/tcm.h>
33#include <asm/traps.h>
34#include <asm/switch_to.h>
35
36/*
37 * Wait for the next interrupt and enable local interrupts
38 */
39void arch_cpu_idle(void)
40{
41 int tmp;
42
43 /*
44 * Quickly jump straight into the interrupt entry point without actually
45 * triggering an interrupt. When TXSTATI gets read the processor will
46 * block until an interrupt is triggered.
47 */
48 asm volatile (/* Switch into ISTAT mode */
49 "RTH\n\t"
50 /* Enable local interrupts */
51 "MOV TXMASKI, %1\n\t"
52 /*
53 * We can't directly "SWAP PC, PCX", so we swap via a
54 * temporary. Essentially we do:
55 * PCX_new = 1f (the place to continue execution)
56 * PC = PCX_old
57 */
58 "ADD %0, CPC0, #(1f-.)\n\t"
59 "SWAP PCX, %0\n\t"
60 "MOV PC, %0\n"
61 /* Continue execution here with interrupts enabled */
62 "1:"
63 : "=a" (tmp)
64 : "r" (get_trigger_mask()));
65}
66
67#ifdef CONFIG_HOTPLUG_CPU
68void arch_cpu_idle_dead(void)
69{
70 cpu_die();
71}
72#endif
73
74void (*pm_power_off)(void);
75EXPORT_SYMBOL(pm_power_off);
76
77void (*soc_restart)(char *cmd);
78void (*soc_halt)(void);
79
80void machine_restart(char *cmd)
81{
82 if (soc_restart)
83 soc_restart(cmd);
84 hard_processor_halt(HALT_OK);
85}
86
87void machine_halt(void)
88{
89 if (soc_halt)
90 soc_halt();
91 smp_send_stop();
92 hard_processor_halt(HALT_OK);
93}
94
95void machine_power_off(void)
96{
97 if (pm_power_off)
98 pm_power_off();
99 smp_send_stop();
100 hard_processor_halt(HALT_OK);
101}
102
103#define FLAG_Z 0x8
104#define FLAG_N 0x4
105#define FLAG_O 0x2
106#define FLAG_C 0x1
107
108void show_regs(struct pt_regs *regs)
109{
110 int i;
111 const char *AX0_names[] = {"A0StP", "A0FrP"};
112 const char *AX1_names[] = {"A1GbP", "A1LbP"};
113
114 const char *DX0_names[] = {
115 "D0Re0",
116 "D0Ar6",
117 "D0Ar4",
118 "D0Ar2",
119 "D0FrT",
120 "D0.5 ",
121 "D0.6 ",
122 "D0.7 "
123 };
124
125 const char *DX1_names[] = {
126 "D1Re0",
127 "D1Ar5",
128 "D1Ar3",
129 "D1Ar1",
130 "D1RtP",
131 "D1.5 ",
132 "D1.6 ",
133 "D1.7 "
134 };
135
136 show_regs_print_info(KERN_INFO);
137
138 pr_info(" pt_regs @ %p\n", regs);
139 pr_info(" SaveMask = 0x%04hx\n", regs->ctx.SaveMask);
140 pr_info(" Flags = 0x%04hx (%c%c%c%c)\n", regs->ctx.Flags,
141 regs->ctx.Flags & FLAG_Z ? 'Z' : 'z',
142 regs->ctx.Flags & FLAG_N ? 'N' : 'n',
143 regs->ctx.Flags & FLAG_O ? 'O' : 'o',
144 regs->ctx.Flags & FLAG_C ? 'C' : 'c');
145 pr_info(" TXRPT = 0x%08x\n", regs->ctx.CurrRPT);
146 pr_info(" PC = 0x%08x\n", regs->ctx.CurrPC);
147
148 /* AX regs */
149 for (i = 0; i < 2; i++) {
150 pr_info(" %s = 0x%08x ",
151 AX0_names[i],
152 regs->ctx.AX[i].U0);
153 printk(" %s = 0x%08x\n",
154 AX1_names[i],
155 regs->ctx.AX[i].U1);
156 }
157
158 if (regs->ctx.SaveMask & TBICTX_XEXT_BIT)
159 pr_warn(" Extended state present - AX2.[01] will be WRONG\n");
160
161 /* Special place with AXx.2 */
162 pr_info(" A0.2 = 0x%08x ",
163 regs->ctx.Ext.AX2.U0);
164 printk(" A1.2 = 0x%08x\n",
165 regs->ctx.Ext.AX2.U1);
166
167 /* 'extended' AX regs (nominally, just AXx.3) */
168 for (i = 0; i < (TBICTX_AX_REGS - 3); i++) {
169 pr_info(" A0.%d = 0x%08x ", i + 3, regs->ctx.AX3[i].U0);
170 printk(" A1.%d = 0x%08x\n", i + 3, regs->ctx.AX3[i].U1);
171 }
172
173 for (i = 0; i < 8; i++) {
174 pr_info(" %s = 0x%08x ", DX0_names[i], regs->ctx.DX[i].U0);
175 printk(" %s = 0x%08x\n", DX1_names[i], regs->ctx.DX[i].U1);
176 }
177
178 show_trace(NULL, (unsigned long *)regs->ctx.AX[0].U0, regs);
179}
180
181/*
182 * Copy architecture-specific thread state
183 */
184int copy_thread(unsigned long clone_flags, unsigned long usp,
185 unsigned long kthread_arg, struct task_struct *tsk)
186{
187 struct pt_regs *childregs = task_pt_regs(tsk);
188 void *kernel_context = ((void *) childregs +
189 sizeof(struct pt_regs));
190 unsigned long global_base;
191
192 BUG_ON(((unsigned long)childregs) & 0x7);
193 BUG_ON(((unsigned long)kernel_context) & 0x7);
194
195 memset(&tsk->thread.kernel_context, 0,
196 sizeof(tsk->thread.kernel_context));
197
198 tsk->thread.kernel_context = __TBISwitchInit(kernel_context,
199 ret_from_fork,
200 0, 0);
201
202 if (unlikely(tsk->flags & PF_KTHREAD)) {
203 /*
204 * Make sure we don't leak any kernel data to child's regs
205 * if kernel thread becomes a userspace thread in the future
206 */
207 memset(childregs, 0 , sizeof(struct pt_regs));
208
209 global_base = __core_reg_get(A1GbP);
210 childregs->ctx.AX[0].U1 = (unsigned long) global_base;
211 childregs->ctx.AX[0].U0 = (unsigned long) kernel_context;
212 /* Set D1Ar1=kthread_arg and D1RtP=usp (fn) */
213 childregs->ctx.DX[4].U1 = usp;
214 childregs->ctx.DX[3].U1 = kthread_arg;
215 tsk->thread.int_depth = 2;
216 return 0;
217 }
218
219 /*
220 * Get a pointer to where the new child's register block should have
221 * been pushed.
222 * The Meta's stack grows upwards, and the context is the the first
223 * thing to be pushed by TBX (phew)
224 */
225 *childregs = *current_pt_regs();
226 /* Set the correct stack for the clone mode */
227 if (usp)
228 childregs->ctx.AX[0].U0 = ALIGN(usp, 8);
229 tsk->thread.int_depth = 1;
230
231 /* set return value for child process */
232 childregs->ctx.DX[0].U0 = 0;
233
234 /* The TLS pointer is passed as an argument to sys_clone. */
235 if (clone_flags & CLONE_SETTLS)
236 tsk->thread.tls_ptr =
237 (__force void __user *)childregs->ctx.DX[1].U1;
238
239#ifdef CONFIG_METAG_FPU
240 if (tsk->thread.fpu_context) {
241 struct meta_fpu_context *ctx;
242
243 ctx = kmemdup(tsk->thread.fpu_context,
244 sizeof(struct meta_fpu_context), GFP_ATOMIC);
245 tsk->thread.fpu_context = ctx;
246 }
247#endif
248
249#ifdef CONFIG_METAG_DSP
250 if (tsk->thread.dsp_context) {
251 struct meta_ext_context *ctx;
252 int i;
253
254 ctx = kmemdup(tsk->thread.dsp_context,
255 sizeof(struct meta_ext_context), GFP_ATOMIC);
256 for (i = 0; i < 2; i++)
257 ctx->ram[i] = kmemdup(ctx->ram[i], ctx->ram_sz[i],
258 GFP_ATOMIC);
259 tsk->thread.dsp_context = ctx;
260 }
261#endif
262
263 return 0;
264}
265
266#ifdef CONFIG_METAG_FPU
267static void alloc_fpu_context(struct thread_struct *thread)
268{
269 thread->fpu_context = kzalloc(sizeof(struct meta_fpu_context),
270 GFP_ATOMIC);
271}
272
273static void clear_fpu(struct thread_struct *thread)
274{
275 thread->user_flags &= ~TBICTX_FPAC_BIT;
276 kfree(thread->fpu_context);
277 thread->fpu_context = NULL;
278}
279#else
280static void clear_fpu(struct thread_struct *thread)
281{
282}
283#endif
284
285#ifdef CONFIG_METAG_DSP
286static void clear_dsp(struct thread_struct *thread)
287{
288 if (thread->dsp_context) {
289 kfree(thread->dsp_context->ram[0]);
290 kfree(thread->dsp_context->ram[1]);
291
292 kfree(thread->dsp_context);
293
294 thread->dsp_context = NULL;
295 }
296
297 __core_reg_set(D0.8, 0);
298}
299#else
300static void clear_dsp(struct thread_struct *thread)
301{
302}
303#endif
304
305struct task_struct *__sched __switch_to(struct task_struct *prev,
306 struct task_struct *next)
307{
308 TBIRES to, from;
309
310 to.Switch.pCtx = next->thread.kernel_context;
311 to.Switch.pPara = prev;
312
313#ifdef CONFIG_METAG_FPU
314 if (prev->thread.user_flags & TBICTX_FPAC_BIT) {
315 struct pt_regs *regs = task_pt_regs(prev);
316 TBIRES state;
317
318 state.Sig.SaveMask = prev->thread.user_flags;
319 state.Sig.pCtx = &regs->ctx;
320
321 if (!prev->thread.fpu_context)
322 alloc_fpu_context(&prev->thread);
323 if (prev->thread.fpu_context)
324 __TBICtxFPUSave(state, prev->thread.fpu_context);
325 }
326 /*
327 * Force a restore of the FPU context next time this process is
328 * scheduled.
329 */
330 if (prev->thread.fpu_context)
331 prev->thread.fpu_context->needs_restore = true;
332#endif
333
334
335 from = __TBISwitch(to, &prev->thread.kernel_context);
336
337 /* Restore TLS pointer for this process. */
338 set_gateway_tls(current->thread.tls_ptr);
339
340 return (struct task_struct *) from.Switch.pPara;
341}
342
343void flush_thread(void)
344{
345 clear_fpu(&current->thread);
346 clear_dsp(&current->thread);
347}
348
349/*
350 * Free current thread data structures etc.
351 */
352void exit_thread(struct task_struct *tsk)
353{
354 clear_fpu(&tsk->thread);
355 clear_dsp(&tsk->thread);
356}
357
358/* TODO: figure out how to unwind the kernel stack here to figure out
359 * where we went to sleep. */
360unsigned long get_wchan(struct task_struct *p)
361{
362 return 0;
363}
364
365int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
366{
367 /* Returning 0 indicates that the FPU state was not stored (as it was
368 * not in use) */
369 return 0;
370}
371
372#ifdef CONFIG_METAG_USER_TCM
373
374#define ELF_MIN_ALIGN PAGE_SIZE
375
376#define ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(ELF_MIN_ALIGN-1))
377#define ELF_PAGEOFFSET(_v) ((_v) & (ELF_MIN_ALIGN-1))
378#define ELF_PAGEALIGN(_v) (((_v) + ELF_MIN_ALIGN - 1) & ~(ELF_MIN_ALIGN - 1))
379
380#define BAD_ADDR(x) ((unsigned long)(x) >= TASK_SIZE)
381
382unsigned long __metag_elf_map(struct file *filep, unsigned long addr,
383 struct elf_phdr *eppnt, int prot, int type,
384 unsigned long total_size)
385{
386 unsigned long map_addr, size;
387 unsigned long page_off = ELF_PAGEOFFSET(eppnt->p_vaddr);
388 unsigned long raw_size = eppnt->p_filesz + page_off;
389 unsigned long off = eppnt->p_offset - page_off;
390 unsigned int tcm_tag;
391 addr = ELF_PAGESTART(addr);
392 size = ELF_PAGEALIGN(raw_size);
393
394 /* mmap() will return -EINVAL if given a zero size, but a
395 * segment with zero filesize is perfectly valid */
396 if (!size)
397 return addr;
398
399 tcm_tag = tcm_lookup_tag(addr);
400
401 if (tcm_tag != TCM_INVALID_TAG)
402 type &= ~MAP_FIXED;
403
404 /*
405 * total_size is the size of the ELF (interpreter) image.
406 * The _first_ mmap needs to know the full size, otherwise
407 * randomization might put this image into an overlapping
408 * position with the ELF binary image. (since size < total_size)
409 * So we first map the 'big' image - and unmap the remainder at
410 * the end. (which unmap is needed for ELF images with holes.)
411 */
412 if (total_size) {
413 total_size = ELF_PAGEALIGN(total_size);
414 map_addr = vm_mmap(filep, addr, total_size, prot, type, off);
415 if (!BAD_ADDR(map_addr))
416 vm_munmap(map_addr+size, total_size-size);
417 } else
418 map_addr = vm_mmap(filep, addr, size, prot, type, off);
419
420 if (!BAD_ADDR(map_addr) && tcm_tag != TCM_INVALID_TAG) {
421 struct tcm_allocation *tcm;
422 unsigned long tcm_addr;
423
424 tcm = kmalloc(sizeof(*tcm), GFP_KERNEL);
425 if (!tcm)
426 return -ENOMEM;
427
428 tcm_addr = tcm_alloc(tcm_tag, raw_size);
429 if (tcm_addr != addr) {
430 kfree(tcm);
431 return -ENOMEM;
432 }
433
434 tcm->tag = tcm_tag;
435 tcm->addr = tcm_addr;
436 tcm->size = raw_size;
437
438 list_add(&tcm->list, &current->mm->context.tcm);
439
440 eppnt->p_vaddr = map_addr;
441 if (copy_from_user((void *) addr, (void __user *) map_addr,
442 raw_size))
443 return -EFAULT;
444 }
445
446 return map_addr;
447}
448#endif
diff --git a/arch/metag/kernel/ptrace.c b/arch/metag/kernel/ptrace.c
deleted file mode 100644
index e615603a4b0a..000000000000
--- a/arch/metag/kernel/ptrace.c
+++ /dev/null
@@ -1,427 +0,0 @@
1/*
2 * Copyright (C) 2005-2012 Imagination Technologies Ltd.
3 *
4 * This file is subject to the terms and conditions of the GNU General
5 * Public License. See the file COPYING in the main directory of
6 * this archive for more details.
7 */
8
9#include <linux/kernel.h>
10#include <linux/mm.h>
11#include <linux/errno.h>
12#include <linux/ptrace.h>
13#include <linux/user.h>
14#include <linux/regset.h>
15#include <linux/tracehook.h>
16#include <linux/elf.h>
17#include <linux/uaccess.h>
18#include <linux/sched/task_stack.h>
19
20#include <trace/syscall.h>
21
22#define CREATE_TRACE_POINTS
23#include <trace/events/syscalls.h>
24
25/*
26 * user_regset definitions.
27 */
28
29static unsigned long user_txstatus(const struct pt_regs *regs)
30{
31 unsigned long data = (unsigned long)regs->ctx.Flags;
32
33 if (regs->ctx.SaveMask & TBICTX_CBUF_BIT)
34 data |= USER_GP_REGS_STATUS_CATCH_BIT;
35
36 return data;
37}
38
39int metag_gp_regs_copyout(const struct pt_regs *regs,
40 unsigned int pos, unsigned int count,
41 void *kbuf, void __user *ubuf)
42{
43 const void *ptr;
44 unsigned long data;
45 int ret;
46
47 /* D{0-1}.{0-7} */
48 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
49 regs->ctx.DX, 0, 4*16);
50 if (ret)
51 goto out;
52 /* A{0-1}.{0-1} */
53 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
54 regs->ctx.AX, 4*16, 4*20);
55 if (ret)
56 goto out;
57 /* A{0-1}.2 */
58 if (regs->ctx.SaveMask & TBICTX_XEXT_BIT)
59 ptr = regs->ctx.Ext.Ctx.pExt;
60 else
61 ptr = &regs->ctx.Ext.AX2;
62 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
63 ptr, 4*20, 4*22);
64 if (ret)
65 goto out;
66 /* A{0-1}.3 */
67 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
68 &regs->ctx.AX3, 4*22, 4*24);
69 if (ret)
70 goto out;
71 /* PC */
72 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
73 &regs->ctx.CurrPC, 4*24, 4*25);
74 if (ret)
75 goto out;
76 /* TXSTATUS */
77 data = user_txstatus(regs);
78 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
79 &data, 4*25, 4*26);
80 if (ret)
81 goto out;
82 /* TXRPT, TXBPOBITS, TXMODE */
83 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
84 &regs->ctx.CurrRPT, 4*26, 4*29);
85 if (ret)
86 goto out;
87 /* Padding */
88 ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
89 4*29, 4*30);
90out:
91 return ret;
92}
93
94int metag_gp_regs_copyin(struct pt_regs *regs,
95 unsigned int pos, unsigned int count,
96 const void *kbuf, const void __user *ubuf)
97{
98 void *ptr;
99 unsigned long data;
100 int ret;
101
102 /* D{0-1}.{0-7} */
103 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
104 regs->ctx.DX, 0, 4*16);
105 if (ret)
106 goto out;
107 /* A{0-1}.{0-1} */
108 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
109 regs->ctx.AX, 4*16, 4*20);
110 if (ret)
111 goto out;
112 /* A{0-1}.2 */
113 if (regs->ctx.SaveMask & TBICTX_XEXT_BIT)
114 ptr = regs->ctx.Ext.Ctx.pExt;
115 else
116 ptr = &regs->ctx.Ext.AX2;
117 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
118 ptr, 4*20, 4*22);
119 if (ret)
120 goto out;
121 /* A{0-1}.3 */
122 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
123 &regs->ctx.AX3, 4*22, 4*24);
124 if (ret)
125 goto out;
126 /* PC */
127 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
128 &regs->ctx.CurrPC, 4*24, 4*25);
129 if (ret)
130 goto out;
131 /* TXSTATUS */
132 data = user_txstatus(regs);
133 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
134 &data, 4*25, 4*26);
135 if (ret)
136 goto out;
137 regs->ctx.Flags = data & 0xffff;
138 if (data & USER_GP_REGS_STATUS_CATCH_BIT)
139 regs->ctx.SaveMask |= TBICTX_XCBF_BIT | TBICTX_CBUF_BIT;
140 else
141 regs->ctx.SaveMask &= ~TBICTX_CBUF_BIT;
142 /* TXRPT, TXBPOBITS, TXMODE */
143 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
144 &regs->ctx.CurrRPT, 4*26, 4*29);
145out:
146 return ret;
147}
148
149static int metag_gp_regs_get(struct task_struct *target,
150 const struct user_regset *regset,
151 unsigned int pos, unsigned int count,
152 void *kbuf, void __user *ubuf)
153{
154 const struct pt_regs *regs = task_pt_regs(target);
155 return metag_gp_regs_copyout(regs, pos, count, kbuf, ubuf);
156}
157
158static int metag_gp_regs_set(struct task_struct *target,
159 const struct user_regset *regset,
160 unsigned int pos, unsigned int count,
161 const void *kbuf, const void __user *ubuf)
162{
163 struct pt_regs *regs = task_pt_regs(target);
164 return metag_gp_regs_copyin(regs, pos, count, kbuf, ubuf);
165}
166
167int metag_cb_regs_copyout(const struct pt_regs *regs,
168 unsigned int pos, unsigned int count,
169 void *kbuf, void __user *ubuf)
170{
171 int ret;
172
173 /* TXCATCH{0-3} */
174 if (regs->ctx.SaveMask & TBICTX_XCBF_BIT)
175 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
176 regs->extcb0, 0, 4*4);
177 else
178 ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
179 0, 4*4);
180 return ret;
181}
182
183int metag_cb_regs_copyin(struct pt_regs *regs,
184 unsigned int pos, unsigned int count,
185 const void *kbuf, const void __user *ubuf)
186{
187 int ret;
188
189 /* TXCATCH{0-3} */
190 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
191 regs->extcb0, 0, 4*4);
192 return ret;
193}
194
195static int metag_cb_regs_get(struct task_struct *target,
196 const struct user_regset *regset,
197 unsigned int pos, unsigned int count,
198 void *kbuf, void __user *ubuf)
199{
200 const struct pt_regs *regs = task_pt_regs(target);
201 return metag_cb_regs_copyout(regs, pos, count, kbuf, ubuf);
202}
203
204static int metag_cb_regs_set(struct task_struct *target,
205 const struct user_regset *regset,
206 unsigned int pos, unsigned int count,
207 const void *kbuf, const void __user *ubuf)
208{
209 struct pt_regs *regs = task_pt_regs(target);
210 return metag_cb_regs_copyin(regs, pos, count, kbuf, ubuf);
211}
212
213int metag_rp_state_copyout(const struct pt_regs *regs,
214 unsigned int pos, unsigned int count,
215 void *kbuf, void __user *ubuf)
216{
217 unsigned long mask;
218 u64 *ptr;
219 int ret, i;
220
221 /* Empty read pipeline */
222 if (!(regs->ctx.SaveMask & TBICTX_CBRP_BIT)) {
223 ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
224 0, 4*13);
225 goto out;
226 }
227
228 mask = (regs->ctx.CurrDIVTIME & TXDIVTIME_RPMASK_BITS) >>
229 TXDIVTIME_RPMASK_S;
230
231 /* Read pipeline entries */
232 ptr = (void *)&regs->extcb0[1];
233 for (i = 0; i < 6; ++i, ++ptr) {
234 if (mask & (1 << i))
235 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
236 ptr, 8*i, 8*(i + 1));
237 else
238 ret = user_regset_copyout_zero(&pos, &count, &kbuf,
239 &ubuf, 8*i, 8*(i + 1));
240 if (ret)
241 goto out;
242 }
243 /* Mask of entries */
244 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
245 &mask, 4*12, 4*13);
246out:
247 return ret;
248}
249
250int metag_rp_state_copyin(struct pt_regs *regs,
251 unsigned int pos, unsigned int count,
252 const void *kbuf, const void __user *ubuf)
253{
254 struct user_rp_state rp;
255 unsigned long long *ptr;
256 int ret, i;
257
258 if (count < 4*13)
259 return -EINVAL;
260 /* Read the entire pipeline before making any changes */
261 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
262 &rp, 0, 4*13);
263 if (ret)
264 goto out;
265
266 /* Write pipeline entries */
267 ptr = (void *)&regs->extcb0[1];
268 for (i = 0; i < 6; ++i, ++ptr)
269 if (rp.mask & (1 << i))
270 *ptr = rp.entries[i];
271
272 /* Update RPMask in TXDIVTIME */
273 regs->ctx.CurrDIVTIME &= ~TXDIVTIME_RPMASK_BITS;
274 regs->ctx.CurrDIVTIME |= (rp.mask << TXDIVTIME_RPMASK_S)
275 & TXDIVTIME_RPMASK_BITS;
276
277 /* Set/clear flags to indicate catch/read pipeline state */
278 if (rp.mask)
279 regs->ctx.SaveMask |= TBICTX_XCBF_BIT | TBICTX_CBRP_BIT;
280 else
281 regs->ctx.SaveMask &= ~TBICTX_CBRP_BIT;
282out:
283 return ret;
284}
285
286static int metag_rp_state_get(struct task_struct *target,
287 const struct user_regset *regset,
288 unsigned int pos, unsigned int count,
289 void *kbuf, void __user *ubuf)
290{
291 const struct pt_regs *regs = task_pt_regs(target);
292 return metag_rp_state_copyout(regs, pos, count, kbuf, ubuf);
293}
294
295static int metag_rp_state_set(struct task_struct *target,
296 const struct user_regset *regset,
297 unsigned int pos, unsigned int count,
298 const void *kbuf, const void __user *ubuf)
299{
300 struct pt_regs *regs = task_pt_regs(target);
301 return metag_rp_state_copyin(regs, pos, count, kbuf, ubuf);
302}
303
304static int metag_tls_get(struct task_struct *target,
305 const struct user_regset *regset,
306 unsigned int pos, unsigned int count,
307 void *kbuf, void __user *ubuf)
308{
309 void __user *tls = target->thread.tls_ptr;
310 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
311}
312
313static int metag_tls_set(struct task_struct *target,
314 const struct user_regset *regset,
315 unsigned int pos, unsigned int count,
316 const void *kbuf, const void __user *ubuf)
317{
318 int ret;
319 void __user *tls = target->thread.tls_ptr;
320
321 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
322 if (ret)
323 return ret;
324
325 target->thread.tls_ptr = tls;
326 return ret;
327}
328
329enum metag_regset {
330 REGSET_GENERAL,
331 REGSET_CBUF,
332 REGSET_READPIPE,
333 REGSET_TLS,
334};
335
336static const struct user_regset metag_regsets[] = {
337 [REGSET_GENERAL] = {
338 .core_note_type = NT_PRSTATUS,
339 .n = ELF_NGREG,
340 .size = sizeof(long),
341 .align = sizeof(long long),
342 .get = metag_gp_regs_get,
343 .set = metag_gp_regs_set,
344 },
345 [REGSET_CBUF] = {
346 .core_note_type = NT_METAG_CBUF,
347 .n = sizeof(struct user_cb_regs) / sizeof(long),
348 .size = sizeof(long),
349 .align = sizeof(long long),
350 .get = metag_cb_regs_get,
351 .set = metag_cb_regs_set,
352 },
353 [REGSET_READPIPE] = {
354 .core_note_type = NT_METAG_RPIPE,
355 .n = sizeof(struct user_rp_state) / sizeof(long),
356 .size = sizeof(long),
357 .align = sizeof(long long),
358 .get = metag_rp_state_get,
359 .set = metag_rp_state_set,
360 },
361 [REGSET_TLS] = {
362 .core_note_type = NT_METAG_TLS,
363 .n = 1,
364 .size = sizeof(void *),
365 .align = sizeof(void *),
366 .get = metag_tls_get,
367 .set = metag_tls_set,
368 },
369};
370
371static const struct user_regset_view user_metag_view = {
372 .name = "metag",
373 .e_machine = EM_METAG,
374 .regsets = metag_regsets,
375 .n = ARRAY_SIZE(metag_regsets)
376};
377
378const struct user_regset_view *task_user_regset_view(struct task_struct *task)
379{
380 return &user_metag_view;
381}
382
383/*
384 * Called by kernel/ptrace.c when detaching..
385 *
386 * Make sure single step bits etc are not set.
387 */
388void ptrace_disable(struct task_struct *child)
389{
390 /* nothing to do.. */
391}
392
393long arch_ptrace(struct task_struct *child, long request, unsigned long addr,
394 unsigned long data)
395{
396 int ret;
397
398 switch (request) {
399 default:
400 ret = ptrace_request(child, request, addr, data);
401 break;
402 }
403
404 return ret;
405}
406
407int syscall_trace_enter(struct pt_regs *regs)
408{
409 int ret = 0;
410
411 if (test_thread_flag(TIF_SYSCALL_TRACE))
412 ret = tracehook_report_syscall_entry(regs);
413
414 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
415 trace_sys_enter(regs, regs->ctx.DX[0].U1);
416
417 return ret ? -1 : regs->ctx.DX[0].U1;
418}
419
420void syscall_trace_leave(struct pt_regs *regs)
421{
422 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
423 trace_sys_exit(regs, regs->ctx.DX[0].U1);
424
425 if (test_thread_flag(TIF_SYSCALL_TRACE))
426 tracehook_report_syscall_exit(regs, 0);
427}
diff --git a/arch/metag/kernel/setup.c b/arch/metag/kernel/setup.c
deleted file mode 100644
index 1166f1fbfd63..000000000000
--- a/arch/metag/kernel/setup.c
+++ /dev/null
@@ -1,622 +0,0 @@
1/*
2 * Copyright (C) 2005-2012 Imagination Technologies Ltd.
3 *
4 * This file contains the architecture-dependant parts of system setup.
5 *
6 */
7
8#include <linux/export.h>
9#include <linux/bootmem.h>
10#include <linux/console.h>
11#include <linux/cpu.h>
12#include <linux/delay.h>
13#include <linux/errno.h>
14#include <linux/fs.h>
15#include <linux/genhd.h>
16#include <linux/init.h>
17#include <linux/initrd.h>
18#include <linux/interrupt.h>
19#include <linux/kernel.h>
20#include <linux/memblock.h>
21#include <linux/mm.h>
22#include <linux/of_fdt.h>
23#include <linux/pfn.h>
24#include <linux/root_dev.h>
25#include <linux/sched.h>
26#include <linux/seq_file.h>
27#include <linux/start_kernel.h>
28#include <linux/string.h>
29
30#include <asm/cachepart.h>
31#include <asm/clock.h>
32#include <asm/core_reg.h>
33#include <asm/cpu.h>
34#include <asm/da.h>
35#include <asm/highmem.h>
36#include <asm/hwthread.h>
37#include <asm/l2cache.h>
38#include <asm/mach/arch.h>
39#include <asm/metag_mem.h>
40#include <asm/metag_regs.h>
41#include <asm/mmu.h>
42#include <asm/mmzone.h>
43#include <asm/processor.h>
44#include <asm/sections.h>
45#include <asm/setup.h>
46#include <asm/traps.h>
47
48/* Priv protect as many registers as possible. */
49#define DEFAULT_PRIV (TXPRIVEXT_COPRO_BITS | \
50 TXPRIVEXT_TXTRIGGER_BIT | \
51 TXPRIVEXT_TXGBLCREG_BIT | \
52 TXPRIVEXT_ILOCK_BIT | \
53 TXPRIVEXT_TXITACCYC_BIT | \
54 TXPRIVEXT_TXDIVTIME_BIT | \
55 TXPRIVEXT_TXAMAREGX_BIT | \
56 TXPRIVEXT_TXTIMERI_BIT | \
57 TXPRIVEXT_TXSTATUS_BIT | \
58 TXPRIVEXT_TXDISABLE_BIT)
59
60/* Meta2 specific bits. */
61#ifdef CONFIG_METAG_META12
62#define META2_PRIV 0
63#else
64#define META2_PRIV (TXPRIVEXT_TXTIMER_BIT | \
65 TXPRIVEXT_TRACE_BIT)
66#endif
67
68/* Unaligned access checking bits. */
69#ifdef CONFIG_METAG_UNALIGNED
70#define UNALIGNED_PRIV TXPRIVEXT_ALIGNREW_BIT
71#else
72#define UNALIGNED_PRIV 0
73#endif
74
75#define PRIV_BITS (DEFAULT_PRIV | \
76 META2_PRIV | \
77 UNALIGNED_PRIV)
78
79/*
80 * Protect access to:
81 * 0x06000000-0x07ffffff Direct mapped region
82 * 0x05000000-0x05ffffff MMU table region (Meta1)
83 * 0x04400000-0x047fffff Cache flush region
84 * 0x84000000-0x87ffffff Core cache memory region (Meta2)
85 *
86 * Allow access to:
87 * 0x80000000-0x81ffffff Core code memory region (Meta2)
88 */
89#ifdef CONFIG_METAG_META12
90#define PRIVSYSR_BITS TXPRIVSYSR_ALL_BITS
91#else
92#define PRIVSYSR_BITS (TXPRIVSYSR_ALL_BITS & ~TXPRIVSYSR_CORECODE_BIT)
93#endif
94
95/* Protect all 0x02xxxxxx and 0x048xxxxx. */
96#define PIOREG_BITS 0xffffffff
97
98/*
99 * Protect all 0x04000xx0 (system events)
100 * except write combiner flush and write fence (system events 4 and 5).
101 */
102#define PSYREG_BITS 0xfffffffb
103
104
105extern char _heap_start[];
106
107#ifdef CONFIG_DA_CONSOLE
108/* Our early channel based console driver */
109extern struct console dash_console;
110#endif
111
112const struct machine_desc *machine_desc __initdata;
113
114/*
115 * Map a Linux CPU number to a hardware thread ID
116 * In SMP this will be setup with the correct mapping at startup; in UP this
117 * will map to the HW thread on which we are running.
118 */
119u8 cpu_2_hwthread_id[NR_CPUS] __read_mostly = {
120 [0 ... NR_CPUS-1] = BAD_HWTHREAD_ID
121};
122EXPORT_SYMBOL_GPL(cpu_2_hwthread_id);
123
124/*
125 * Map a hardware thread ID to a Linux CPU number
126 * In SMP this will be fleshed out with the correct CPU ID for a particular
127 * hardware thread. In UP this will be initialised with the boot CPU ID.
128 */
129u8 hwthread_id_2_cpu[4] __read_mostly = {
130 [0 ... 3] = BAD_CPU_ID
131};
132
133/* The relative offset of the MMU mapped memory (from ldlk or bootloader)
134 * to the real physical memory. This is needed as we have to use the
135 * physical addresses in the MMU tables (pte entries), and not the virtual
136 * addresses.
137 * This variable is used in the __pa() and __va() macros, and should
138 * probably only be used via them.
139 */
140unsigned int meta_memoffset;
141EXPORT_SYMBOL(meta_memoffset);
142
143static char __initdata *original_cmd_line;
144
145DEFINE_PER_CPU(PTBI, pTBI);
146
147/*
148 * Mapping are specified as "CPU_ID:HWTHREAD_ID", e.g.
149 *
150 * "hwthread_map=0:1,1:2,2:3,3:0"
151 *
152 * Linux CPU ID HWTHREAD_ID
153 * ---------------------------
154 * 0 1
155 * 1 2
156 * 2 3
157 * 3 0
158 */
159static int __init parse_hwthread_map(char *p)
160{
161 int cpu;
162
163 while (*p) {
164 cpu = (*p++) - '0';
165 if (cpu < 0 || cpu > 9)
166 goto err_cpu;
167
168 p++; /* skip semi-colon */
169 cpu_2_hwthread_id[cpu] = (*p++) - '0';
170 if (cpu_2_hwthread_id[cpu] >= 4)
171 goto err_thread;
172 hwthread_id_2_cpu[cpu_2_hwthread_id[cpu]] = cpu;
173
174 if (*p == ',')
175 p++; /* skip comma */
176 }
177
178 return 0;
179err_cpu:
180 pr_err("%s: hwthread_map cpu argument out of range\n", __func__);
181 return -EINVAL;
182err_thread:
183 pr_err("%s: hwthread_map thread argument out of range\n", __func__);
184 return -EINVAL;
185}
186early_param("hwthread_map", parse_hwthread_map);
187
188void __init dump_machine_table(void)
189{
190 struct machine_desc *p;
191 const char **compat;
192
193 pr_info("Available machine support:\n\tNAME\t\tCOMPATIBLE LIST\n");
194 for_each_machine_desc(p) {
195 pr_info("\t%s\t[", p->name);
196 for (compat = p->dt_compat; compat && *compat; ++compat)
197 printk(" '%s'", *compat);
198 printk(" ]\n");
199 }
200
201 pr_info("\nPlease check your kernel config and/or bootloader.\n");
202
203 hard_processor_halt(HALT_PANIC);
204}
205
206#ifdef CONFIG_METAG_HALT_ON_PANIC
207static int metag_panic_event(struct notifier_block *this, unsigned long event,
208 void *ptr)
209{
210 hard_processor_halt(HALT_PANIC);
211 return NOTIFY_DONE;
212}
213
214static struct notifier_block metag_panic_block = {
215 metag_panic_event,
216 NULL,
217 0
218};
219#endif
220
221void __init setup_arch(char **cmdline_p)
222{
223 unsigned long start_pfn;
224 unsigned long text_start = (unsigned long)(&_stext);
225 unsigned long cpu = smp_processor_id();
226 unsigned long heap_start, heap_end;
227 unsigned long start_pte;
228 PTBI _pTBI;
229 PTBISEG p_heap;
230 int heap_id, i;
231
232 metag_cache_probe();
233
234 metag_da_probe();
235#ifdef CONFIG_DA_CONSOLE
236 if (metag_da_enabled()) {
237 /* An early channel based console driver */
238 register_console(&dash_console);
239 add_preferred_console("ttyDA", 1, NULL);
240 }
241#endif
242
243 /* try interpreting the argument as a device tree */
244 machine_desc = setup_machine_fdt(original_cmd_line);
245 /* if it doesn't look like a device tree it must be a command line */
246 if (!machine_desc) {
247#ifdef CONFIG_METAG_BUILTIN_DTB
248 /* try the embedded device tree */
249 machine_desc = setup_machine_fdt(__dtb_start);
250 if (!machine_desc)
251 panic("Invalid embedded device tree.");
252#else
253 /* use the default machine description */
254 machine_desc = default_machine_desc();
255#endif
256#ifndef CONFIG_CMDLINE_FORCE
257 /* append the bootloader cmdline to any builtin fdt cmdline */
258 if (boot_command_line[0] && original_cmd_line[0])
259 strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
260 strlcat(boot_command_line, original_cmd_line,
261 COMMAND_LINE_SIZE);
262#endif
263 }
264 setup_meta_clocks(machine_desc->clocks);
265
266 *cmdline_p = boot_command_line;
267 parse_early_param();
268
269 /*
270 * Make sure we don't alias in dcache or icache
271 */
272 check_for_cache_aliasing(cpu);
273
274
275#ifdef CONFIG_METAG_HALT_ON_PANIC
276 atomic_notifier_chain_register(&panic_notifier_list,
277 &metag_panic_block);
278#endif
279
280#ifdef CONFIG_DUMMY_CONSOLE
281 conswitchp = &dummy_con;
282#endif
283
284 if (!(__core_reg_get(TXSTATUS) & TXSTATUS_PSTAT_BIT))
285 panic("Privilege must be enabled for this thread.");
286
287 _pTBI = __TBI(TBID_ISTAT_BIT);
288
289 per_cpu(pTBI, cpu) = _pTBI;
290
291 if (!per_cpu(pTBI, cpu))
292 panic("No TBI found!");
293
294 /*
295 * Initialize all interrupt vectors to our copy of __TBIUnExpXXX,
296 * rather than the version from the bootloader. This makes call
297 * stacks easier to understand and may allow us to unmap the
298 * bootloader at some point.
299 */
300 for (i = 0; i <= TBID_SIGNUM_MAX; i++)
301 _pTBI->fnSigs[i] = __TBIUnExpXXX;
302
303 /* A Meta requirement is that the kernel is loaded (virtually)
304 * at the PAGE_OFFSET.
305 */
306 if (PAGE_OFFSET != text_start)
307 panic("Kernel not loaded at PAGE_OFFSET (%#x) but at %#lx.",
308 PAGE_OFFSET, text_start);
309
310 start_pte = mmu_read_second_level_page(text_start);
311
312 /*
313 * Kernel pages should have the PRIV bit set by the bootloader.
314 */
315 if (!(start_pte & _PAGE_KERNEL))
316 panic("kernel pte does not have PRIV set");
317
318 /*
319 * See __pa and __va in include/asm/page.h.
320 * This value is negative when running in local space but the
321 * calculations work anyway.
322 */
323 meta_memoffset = text_start - (start_pte & PAGE_MASK);
324
325 /* Now lets look at the heap space */
326 heap_id = (__TBIThreadId() & TBID_THREAD_BITS)
327 + TBID_SEG(0, TBID_SEGSCOPE_LOCAL, TBID_SEGTYPE_HEAP);
328
329 p_heap = __TBIFindSeg(NULL, heap_id);
330
331 if (!p_heap)
332 panic("Could not find heap from TBI!");
333
334 /* The heap begins at the first full page after the kernel data. */
335 heap_start = (unsigned long) &_heap_start;
336
337 /* The heap ends at the end of the heap segment specified with
338 * ldlk.
339 */
340 if (is_global_space(text_start)) {
341 pr_debug("WARNING: running in global space!\n");
342 heap_end = (unsigned long)p_heap->pGAddr + p_heap->Bytes;
343 } else {
344 heap_end = (unsigned long)p_heap->pLAddr + p_heap->Bytes;
345 }
346
347 ROOT_DEV = Root_RAM0;
348
349 /* init_mm is the mm struct used for the first task. It is then
350 * cloned for all other tasks spawned from that task.
351 *
352 * Note - we are using the virtual addresses here.
353 */
354 init_mm.start_code = (unsigned long)(&_stext);
355 init_mm.end_code = (unsigned long)(&_etext);
356 init_mm.end_data = (unsigned long)(&_edata);
357 init_mm.brk = (unsigned long)heap_start;
358
359 min_low_pfn = PFN_UP(__pa(text_start));
360 max_low_pfn = PFN_DOWN(__pa(heap_end));
361
362 pfn_base = min_low_pfn;
363
364 /* Round max_pfn up to a 4Mb boundary. The free_bootmem_node()
365 * call later makes sure to keep the rounded up pages marked reserved.
366 */
367 max_pfn = max_low_pfn + ((1 << MAX_ORDER) - 1);
368 max_pfn &= ~((1 << MAX_ORDER) - 1);
369
370 start_pfn = PFN_UP(__pa(heap_start));
371
372 if (min_low_pfn & ((1 << MAX_ORDER) - 1)) {
373 /* Theoretically, we could expand the space that the
374 * bootmem allocator covers - much as we do for the
375 * 'high' address, and then tell the bootmem system
376 * that the lowest chunk is 'not available'. Right
377 * now it is just much easier to constrain the
378 * user to always MAX_ORDER align their kernel space.
379 */
380
381 panic("Kernel must be %d byte aligned, currently at %#lx.",
382 1 << (MAX_ORDER + PAGE_SHIFT),
383 min_low_pfn << PAGE_SHIFT);
384 }
385
386#ifdef CONFIG_HIGHMEM
387 highstart_pfn = highend_pfn = max_pfn;
388 high_memory = (void *) __va(PFN_PHYS(highstart_pfn));
389#else
390 high_memory = (void *)__va(PFN_PHYS(max_pfn));
391#endif
392
393 paging_init(heap_end);
394
395 setup_priv();
396
397 /* Setup the boot cpu's mapping. The rest will be setup below. */
398 cpu_2_hwthread_id[smp_processor_id()] = hard_processor_id();
399 hwthread_id_2_cpu[hard_processor_id()] = smp_processor_id();
400
401 unflatten_and_copy_device_tree();
402
403#ifdef CONFIG_SMP
404 smp_init_cpus();
405#endif
406
407 if (machine_desc->init_early)
408 machine_desc->init_early();
409}
410
411static int __init customize_machine(void)
412{
413 /* customizes platform devices, or adds new ones */
414 if (machine_desc->init_machine)
415 machine_desc->init_machine();
416
417 return 0;
418}
419arch_initcall(customize_machine);
420
421static int __init init_machine_late(void)
422{
423 if (machine_desc->init_late)
424 machine_desc->init_late();
425 return 0;
426}
427late_initcall(init_machine_late);
428
429#ifdef CONFIG_PROC_FS
430/*
431 * Get CPU information for use by the procfs.
432 */
433static const char *get_cpu_capabilities(unsigned int txenable)
434{
435#ifdef CONFIG_METAG_META21
436 /* See CORE_ID in META HTP.GP TRM - Architecture Overview 2.1.238 */
437 int coreid = metag_in32(METAC_CORE_ID);
438 unsigned int dsp_type = (coreid >> 3) & 7;
439 unsigned int fpu_type = (coreid >> 7) & 3;
440
441 switch (dsp_type | fpu_type << 3) {
442 case (0x00): return "EDSP";
443 case (0x01): return "DSP";
444 case (0x08): return "EDSP+LFPU";
445 case (0x09): return "DSP+LFPU";
446 case (0x10): return "EDSP+FPU";
447 case (0x11): return "DSP+FPU";
448 }
449 return "UNKNOWN";
450
451#else
452 if (!(txenable & TXENABLE_CLASS_BITS))
453 return "DSP";
454 else
455 return "";
456#endif
457}
458
459static int show_cpuinfo(struct seq_file *m, void *v)
460{
461 const char *cpu;
462 unsigned int txenable, thread_id, major, minor;
463 unsigned long clockfreq = get_coreclock();
464#ifdef CONFIG_SMP
465 int i;
466 unsigned long lpj;
467#endif
468
469 cpu = "META";
470
471 txenable = __core_reg_get(TXENABLE);
472 major = (txenable & TXENABLE_MAJOR_REV_BITS) >> TXENABLE_MAJOR_REV_S;
473 minor = (txenable & TXENABLE_MINOR_REV_BITS) >> TXENABLE_MINOR_REV_S;
474 thread_id = (txenable >> 8) & 0x3;
475
476#ifdef CONFIG_SMP
477 for_each_online_cpu(i) {
478 lpj = per_cpu(cpu_data, i).loops_per_jiffy;
479 txenable = core_reg_read(TXUCT_ID, TXENABLE_REGNUM,
480 cpu_2_hwthread_id[i]);
481
482 seq_printf(m, "CPU:\t\t%s %d.%d (thread %d)\n"
483 "Clocking:\t%lu.%1luMHz\n"
484 "BogoMips:\t%lu.%02lu\n"
485 "Calibration:\t%lu loops\n"
486 "Capabilities:\t%s\n\n",
487 cpu, major, minor, i,
488 clockfreq / 1000000, (clockfreq / 100000) % 10,
489 lpj / (500000 / HZ), (lpj / (5000 / HZ)) % 100,
490 lpj,
491 get_cpu_capabilities(txenable));
492 }
493#else
494 seq_printf(m, "CPU:\t\t%s %d.%d (thread %d)\n"
495 "Clocking:\t%lu.%1luMHz\n"
496 "BogoMips:\t%lu.%02lu\n"
497 "Calibration:\t%lu loops\n"
498 "Capabilities:\t%s\n",
499 cpu, major, minor, thread_id,
500 clockfreq / 1000000, (clockfreq / 100000) % 10,
501 loops_per_jiffy / (500000 / HZ),
502 (loops_per_jiffy / (5000 / HZ)) % 100,
503 loops_per_jiffy,
504 get_cpu_capabilities(txenable));
505#endif /* CONFIG_SMP */
506
507#ifdef CONFIG_METAG_L2C
508 if (meta_l2c_is_present()) {
509 seq_printf(m, "L2 cache:\t%s\n"
510 "L2 cache size:\t%d KB\n",
511 meta_l2c_is_enabled() ? "enabled" : "disabled",
512 meta_l2c_size() >> 10);
513 }
514#endif
515 return 0;
516}
517
518static void *c_start(struct seq_file *m, loff_t *pos)
519{
520 return (void *)(*pos == 0);
521}
522static void *c_next(struct seq_file *m, void *v, loff_t *pos)
523{
524 return NULL;
525}
526static void c_stop(struct seq_file *m, void *v)
527{
528}
529const struct seq_operations cpuinfo_op = {
530 .start = c_start,
531 .next = c_next,
532 .stop = c_stop,
533 .show = show_cpuinfo,
534};
535#endif /* CONFIG_PROC_FS */
536
537void __init metag_start_kernel(char *args)
538{
539 /* Zero the timer register so timestamps are from the point at
540 * which the kernel started running.
541 */
542 __core_reg_set(TXTIMER, 0);
543
544 /* Clear the bss. */
545 memset(__bss_start, 0,
546 (unsigned long)__bss_stop - (unsigned long)__bss_start);
547
548 /* Remember where these are for use in setup_arch */
549 original_cmd_line = args;
550
551 current_thread_info()->cpu = hard_processor_id();
552
553 start_kernel();
554}
555
556/**
557 * setup_priv() - Set up privilege protection registers.
558 *
559 * Set up privilege protection registers such as TXPRIVEXT to prevent userland
560 * from touching our precious registers and sensitive memory areas.
561 */
562void setup_priv(void)
563{
564 unsigned int offset = hard_processor_id() << TXPRIVREG_STRIDE_S;
565
566 __core_reg_set(TXPRIVEXT, PRIV_BITS);
567
568 metag_out32(PRIVSYSR_BITS, T0PRIVSYSR + offset);
569 metag_out32(PIOREG_BITS, T0PIOREG + offset);
570 metag_out32(PSYREG_BITS, T0PSYREG + offset);
571}
572
573PTBI pTBI_get(unsigned int cpu)
574{
575 return per_cpu(pTBI, cpu);
576}
577EXPORT_SYMBOL(pTBI_get);
578
579#if defined(CONFIG_METAG_DSP) && defined(CONFIG_METAG_FPU)
580static char capabilities[] = "dsp fpu";
581#elif defined(CONFIG_METAG_DSP)
582static char capabilities[] = "dsp";
583#elif defined(CONFIG_METAG_FPU)
584static char capabilities[] = "fpu";
585#else
586static char capabilities[] = "";
587#endif
588
589static struct ctl_table caps_kern_table[] = {
590 {
591 .procname = "capabilities",
592 .data = capabilities,
593 .maxlen = sizeof(capabilities),
594 .mode = 0444,
595 .proc_handler = proc_dostring,
596 },
597 {}
598};
599
600static struct ctl_table caps_root_table[] = {
601 {
602 .procname = "kernel",
603 .mode = 0555,
604 .child = caps_kern_table,
605 },
606 {}
607};
608
609static int __init capabilities_register_sysctl(void)
610{
611 struct ctl_table_header *caps_table_header;
612
613 caps_table_header = register_sysctl_table(caps_root_table);
614 if (!caps_table_header) {
615 pr_err("Unable to register CAPABILITIES sysctl\n");
616 return -ENOMEM;
617 }
618
619 return 0;
620}
621
622core_initcall(capabilities_register_sysctl);
diff --git a/arch/metag/kernel/signal.c b/arch/metag/kernel/signal.c
deleted file mode 100644
index e64e8b0a9363..000000000000
--- a/arch/metag/kernel/signal.c
+++ /dev/null
@@ -1,336 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 1991,1992 Linus Torvalds
4 * Copyright (C) 2005-2012 Imagination Technologies Ltd.
5 *
6 * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson
7 *
8 */
9
10#include <linux/sched.h>
11#include <linux/sched/task_stack.h>
12#include <linux/mm.h>
13#include <linux/smp.h>
14#include <linux/kernel.h>
15#include <linux/signal.h>
16#include <linux/errno.h>
17#include <linux/wait.h>
18#include <linux/ptrace.h>
19#include <linux/unistd.h>
20#include <linux/stddef.h>
21#include <linux/personality.h>
22#include <linux/uaccess.h>
23#include <linux/tracehook.h>
24
25#include <asm/ucontext.h>
26#include <asm/cacheflush.h>
27#include <asm/switch.h>
28#include <asm/syscall.h>
29#include <asm/syscalls.h>
30
31#define REG_FLAGS ctx.SaveMask
32#define REG_RETVAL ctx.DX[0].U0
33#define REG_SYSCALL ctx.DX[0].U1
34#define REG_SP ctx.AX[0].U0
35#define REG_ARG1 ctx.DX[3].U1
36#define REG_ARG2 ctx.DX[3].U0
37#define REG_ARG3 ctx.DX[2].U1
38#define REG_PC ctx.CurrPC
39#define REG_RTP ctx.DX[4].U1
40
41struct rt_sigframe {
42 struct siginfo info;
43 struct ucontext uc;
44 unsigned long retcode[2];
45};
46
47static int restore_sigcontext(struct pt_regs *regs,
48 struct sigcontext __user *sc)
49{
50 int err;
51
52 /* Always make any pending restarted system calls return -EINTR */
53 current->restart_block.fn = do_no_restart_syscall;
54
55 err = metag_gp_regs_copyin(regs, 0, sizeof(struct user_gp_regs), NULL,
56 &sc->regs);
57 if (!err)
58 err = metag_cb_regs_copyin(regs, 0,
59 sizeof(struct user_cb_regs), NULL,
60 &sc->cb);
61 if (!err)
62 err = metag_rp_state_copyin(regs, 0,
63 sizeof(struct user_rp_state), NULL,
64 &sc->rp);
65
66 /* This is a user-mode context. */
67 regs->REG_FLAGS |= TBICTX_PRIV_BIT;
68
69 return err;
70}
71
72long sys_rt_sigreturn(void)
73{
74 /* NOTE - Meta stack goes UPWARDS - so we wind the stack back */
75 struct pt_regs *regs = current_pt_regs();
76 struct rt_sigframe __user *frame;
77 sigset_t set;
78
79 frame = (__force struct rt_sigframe __user *)(regs->REG_SP -
80 sizeof(*frame));
81
82 if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
83 goto badframe;
84
85 if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
86 goto badframe;
87
88 set_current_blocked(&set);
89
90 if (restore_sigcontext(regs, &frame->uc.uc_mcontext))
91 goto badframe;
92
93 if (restore_altstack(&frame->uc.uc_stack))
94 goto badframe;
95
96 return regs->REG_RETVAL;
97
98badframe:
99 force_sig(SIGSEGV, current);
100
101 return 0;
102}
103
104static int setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
105 unsigned long mask)
106{
107 int err;
108
109 err = metag_gp_regs_copyout(regs, 0, sizeof(struct user_gp_regs), NULL,
110 &sc->regs);
111
112 if (!err)
113 err = metag_cb_regs_copyout(regs, 0,
114 sizeof(struct user_cb_regs), NULL,
115 &sc->cb);
116 if (!err)
117 err = metag_rp_state_copyout(regs, 0,
118 sizeof(struct user_rp_state), NULL,
119 &sc->rp);
120
121 /* OK, clear that cbuf flag in the old context, or our stored
122 * catch buffer will be restored when we go to call the signal
123 * handler. Also clear out the CBRP RA/RD pipe bit incase
124 * that is pending as well!
125 * Note that as we have already stored this context, these
126 * flags will get restored on sigreturn to their original
127 * state.
128 */
129 regs->REG_FLAGS &= ~(TBICTX_XCBF_BIT | TBICTX_CBUF_BIT |
130 TBICTX_CBRP_BIT);
131
132 /* Clear out the LSM_STEP bits in case we are in the middle of
133 * and MSET/MGET.
134 */
135 regs->ctx.Flags &= ~TXSTATUS_LSM_STEP_BITS;
136
137 err |= __put_user(mask, &sc->oldmask);
138
139 return err;
140}
141
142/*
143 * Determine which stack to use..
144 */
145static void __user *get_sigframe(struct ksignal *ksig, unsigned long sp)
146{
147 sp = sigsp(sp, ksig);
148 sp = (sp + 7) & ~7; /* 8byte align stack */
149
150 return (void __user *)sp;
151}
152
153static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
154 struct pt_regs *regs)
155{
156 struct rt_sigframe __user *frame;
157 int err;
158 unsigned long code;
159
160 frame = get_sigframe(ksig, regs->REG_SP);
161 if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
162 return -EFAULT;
163
164 err = copy_siginfo_to_user(&frame->info, &ksig->info);
165
166 /* Create the ucontext. */
167 err |= __put_user(0, &frame->uc.uc_flags);
168 err |= __put_user(0, (unsigned long __user *)&frame->uc.uc_link);
169 err |= __save_altstack(&frame->uc.uc_stack, regs->REG_SP);
170 err |= setup_sigcontext(&frame->uc.uc_mcontext,
171 regs, set->sig[0]);
172 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
173
174 if (err)
175 return -EFAULT;
176
177 /* Set up to return from userspace. */
178
179 /* MOV D1Re0 (D1.0), #__NR_rt_sigreturn */
180 code = 0x03000004 | (__NR_rt_sigreturn << 3);
181 err |= __put_user(code, (unsigned long __user *)(&frame->retcode[0]));
182
183 /* SWITCH #__METAG_SW_SYS */
184 code = __METAG_SW_ENCODING(SYS);
185 err |= __put_user(code, (unsigned long __user *)(&frame->retcode[1]));
186
187 if (err)
188 return -EFAULT;
189
190 /* Set up registers for signal handler */
191 regs->REG_RTP = (unsigned long) frame->retcode;
192 regs->REG_SP = (unsigned long) frame + sizeof(*frame);
193 regs->REG_ARG1 = ksig->sig;
194 regs->REG_ARG2 = (unsigned long) &frame->info;
195 regs->REG_ARG3 = (unsigned long) &frame->uc;
196 regs->REG_PC = (unsigned long) ksig->ka.sa.sa_handler;
197
198 pr_debug("SIG deliver (%s:%d): sp=%p pc=%08x pr=%08x\n",
199 current->comm, current->pid, frame, regs->REG_PC,
200 regs->REG_RTP);
201
202 /* Now pass size of 'new code' into sigtramp so we can do a more
203 * effective cache flush - directed rather than 'full flush'.
204 */
205 flush_cache_sigtramp(regs->REG_RTP, sizeof(frame->retcode));
206
207 return 0;
208}
209
210static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
211{
212 sigset_t *oldset = sigmask_to_save();
213 int ret;
214
215 /* Set up the stack frame */
216 ret = setup_rt_frame(ksig, oldset, regs);
217
218 signal_setup_done(ret, ksig, test_thread_flag(TIF_SINGLESTEP));
219}
220
221 /*
222 * Notes for Meta.
223 * We have moved from the old 2.4.9 SH way of using syscall_nr (in the stored
224 * context) to passing in the syscall flag on the stack.
225 * This is because having syscall_nr in our context does not fit with TBX, and
226 * corrupted the stack.
227 */
228static int do_signal(struct pt_regs *regs, int syscall)
229{
230 unsigned int retval = 0, continue_addr = 0, restart_addr = 0;
231 int restart = 0;
232 struct ksignal ksig;
233
234 /*
235 * By the end of rt_sigreturn the context describes the point that the
236 * signal was taken (which may happen to be just before a syscall if
237 * it's already been restarted). This should *never* be mistaken for a
238 * system call in need of restarting.
239 */
240 if (syscall == __NR_rt_sigreturn)
241 syscall = -1;
242
243 /* Did we come from a system call? */
244 if (syscall >= 0) {
245 continue_addr = regs->REG_PC;
246 restart_addr = continue_addr - 4;
247 retval = regs->REG_RETVAL;
248
249 /*
250 * Prepare for system call restart. We do this here so that a
251 * debugger will see the already changed PC.
252 */
253 switch (retval) {
254 case -ERESTART_RESTARTBLOCK:
255 restart = -2;
256 case -ERESTARTNOHAND:
257 case -ERESTARTSYS:
258 case -ERESTARTNOINTR:
259 ++restart;
260 regs->REG_PC = restart_addr;
261 break;
262 }
263 }
264
265 /*
266 * Get the signal to deliver. When running under ptrace, at this point
267 * the debugger may change all our registers ...
268 */
269 get_signal(&ksig);
270
271 /*
272 * Depending on the signal settings we may need to revert the decision
273 * to restart the system call. But skip this if a debugger has chosen to
274 * restart at a different PC.
275 */
276 if (regs->REG_PC != restart_addr)
277 restart = 0;
278 if (ksig.sig > 0) {
279 if (unlikely(restart)) {
280 if (retval == -ERESTARTNOHAND
281 || retval == -ERESTART_RESTARTBLOCK
282 || (retval == -ERESTARTSYS
283 && !(ksig.ka.sa.sa_flags & SA_RESTART))) {
284 regs->REG_RETVAL = -EINTR;
285 regs->REG_PC = continue_addr;
286 }
287 }
288
289 /* Whee! Actually deliver the signal. */
290 handle_signal(&ksig, regs);
291 return 0;
292 }
293
294 /* Handlerless -ERESTART_RESTARTBLOCK re-enters via restart_syscall */
295 if (unlikely(restart < 0))
296 regs->REG_SYSCALL = __NR_restart_syscall;
297
298 /*
299 * If there's no signal to deliver, we just put the saved sigmask back.
300 */
301 restore_saved_sigmask();
302
303 return restart;
304}
305
306int do_work_pending(struct pt_regs *regs, unsigned int thread_flags,
307 int syscall)
308{
309 do {
310 if (likely(thread_flags & _TIF_NEED_RESCHED)) {
311 schedule();
312 } else {
313 if (unlikely(!user_mode(regs)))
314 return 0;
315 local_irq_enable();
316 if (thread_flags & _TIF_SIGPENDING) {
317 int restart = do_signal(regs, syscall);
318 if (unlikely(restart)) {
319 /*
320 * Restart without handlers.
321 * Deal with it without leaving
322 * the kernel space.
323 */
324 return restart;
325 }
326 syscall = -1;
327 } else {
328 clear_thread_flag(TIF_NOTIFY_RESUME);
329 tracehook_notify_resume(regs);
330 }
331 }
332 local_irq_disable();
333 thread_flags = current_thread_info()->flags;
334 } while (thread_flags & _TIF_WORK_MASK);
335 return 0;
336}
diff --git a/arch/metag/kernel/smp.c b/arch/metag/kernel/smp.c
deleted file mode 100644
index 2dbbb7c66043..000000000000
--- a/arch/metag/kernel/smp.c
+++ /dev/null
@@ -1,668 +0,0 @@
1/*
2 * Copyright (C) 2009,2010,2011 Imagination Technologies Ltd.
3 *
4 * Copyright (C) 2002 ARM Limited, All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include <linux/atomic.h>
11#include <linux/completion.h>
12#include <linux/delay.h>
13#include <linux/init.h>
14#include <linux/spinlock.h>
15#include <linux/sched/mm.h>
16#include <linux/sched/hotplug.h>
17#include <linux/sched/task_stack.h>
18#include <linux/interrupt.h>
19#include <linux/cache.h>
20#include <linux/profile.h>
21#include <linux/errno.h>
22#include <linux/mm.h>
23#include <linux/err.h>
24#include <linux/cpu.h>
25#include <linux/smp.h>
26#include <linux/seq_file.h>
27#include <linux/irq.h>
28#include <linux/bootmem.h>
29
30#include <asm/cacheflush.h>
31#include <asm/cachepart.h>
32#include <asm/core_reg.h>
33#include <asm/cpu.h>
34#include <asm/global_lock.h>
35#include <asm/metag_mem.h>
36#include <asm/mmu_context.h>
37#include <asm/pgtable.h>
38#include <asm/pgalloc.h>
39#include <asm/processor.h>
40#include <asm/setup.h>
41#include <asm/tlbflush.h>
42#include <asm/hwthread.h>
43#include <asm/traps.h>
44
45#define SYSC_DCPART(n) (SYSC_DCPART0 + SYSC_xCPARTn_STRIDE * (n))
46#define SYSC_ICPART(n) (SYSC_ICPART0 + SYSC_xCPARTn_STRIDE * (n))
47
48DECLARE_PER_CPU(PTBI, pTBI);
49
50void *secondary_data_stack;
51
52/*
53 * structures for inter-processor calls
54 * - A collection of single bit ipi messages.
55 */
56struct ipi_data {
57 spinlock_t lock;
58 unsigned long ipi_count;
59 unsigned long bits;
60};
61
62static DEFINE_PER_CPU(struct ipi_data, ipi_data) = {
63 .lock = __SPIN_LOCK_UNLOCKED(ipi_data.lock),
64};
65
66static DEFINE_SPINLOCK(boot_lock);
67
68static DECLARE_COMPLETION(cpu_running);
69
70/*
71 * "thread" is assumed to be a valid Meta hardware thread ID.
72 */
73static int boot_secondary(unsigned int thread, struct task_struct *idle)
74{
75 u32 val;
76
77 /*
78 * set synchronisation state between this boot processor
79 * and the secondary one
80 */
81 spin_lock(&boot_lock);
82
83 core_reg_write(TXUPC_ID, 0, thread, (unsigned int)secondary_startup);
84 core_reg_write(TXUPC_ID, 1, thread, 0);
85
86 /*
87 * Give the thread privilege (PSTAT) and clear potentially problematic
88 * bits in the process (namely ISTAT, CBMarker, CBMarkerI, LSM_STEP).
89 */
90 core_reg_write(TXUCT_ID, TXSTATUS_REGNUM, thread, TXSTATUS_PSTAT_BIT);
91
92 /* Clear the minim enable bit. */
93 val = core_reg_read(TXUCT_ID, TXPRIVEXT_REGNUM, thread);
94 core_reg_write(TXUCT_ID, TXPRIVEXT_REGNUM, thread, val & ~0x80);
95
96 /*
97 * set the ThreadEnable bit (0x1) in the TXENABLE register
98 * for the specified thread - off it goes!
99 */
100 val = core_reg_read(TXUCT_ID, TXENABLE_REGNUM, thread);
101 core_reg_write(TXUCT_ID, TXENABLE_REGNUM, thread, val | 0x1);
102
103 /*
104 * now the secondary core is starting up let it run its
105 * calibrations, then wait for it to finish
106 */
107 spin_unlock(&boot_lock);
108
109 return 0;
110}
111
112/**
113 * describe_cachepart_change: describe a change to cache partitions.
114 * @thread: Hardware thread number.
115 * @label: Label of cache type, e.g. "dcache" or "icache".
116 * @sz: Total size of the cache.
117 * @old: Old cache partition configuration (*CPART* register).
118 * @new: New cache partition configuration (*CPART* register).
119 *
120 * If the cache partition has changed, prints a message to the log describing
121 * those changes.
122 */
123static void describe_cachepart_change(unsigned int thread, const char *label,
124 unsigned int sz, unsigned int old,
125 unsigned int new)
126{
127 unsigned int lor1, land1, gor1, gand1;
128 unsigned int lor2, land2, gor2, gand2;
129 unsigned int diff = old ^ new;
130
131 if (!diff)
132 return;
133
134 pr_info("Thread %d: %s partition changed:", thread, label);
135 if (diff & (SYSC_xCPARTL_OR_BITS | SYSC_xCPARTL_AND_BITS)) {
136 lor1 = (old & SYSC_xCPARTL_OR_BITS) >> SYSC_xCPARTL_OR_S;
137 lor2 = (new & SYSC_xCPARTL_OR_BITS) >> SYSC_xCPARTL_OR_S;
138 land1 = (old & SYSC_xCPARTL_AND_BITS) >> SYSC_xCPARTL_AND_S;
139 land2 = (new & SYSC_xCPARTL_AND_BITS) >> SYSC_xCPARTL_AND_S;
140 pr_cont(" L:%#x+%#x->%#x+%#x",
141 (lor1 * sz) >> 4,
142 ((land1 + 1) * sz) >> 4,
143 (lor2 * sz) >> 4,
144 ((land2 + 1) * sz) >> 4);
145 }
146 if (diff & (SYSC_xCPARTG_OR_BITS | SYSC_xCPARTG_AND_BITS)) {
147 gor1 = (old & SYSC_xCPARTG_OR_BITS) >> SYSC_xCPARTG_OR_S;
148 gor2 = (new & SYSC_xCPARTG_OR_BITS) >> SYSC_xCPARTG_OR_S;
149 gand1 = (old & SYSC_xCPARTG_AND_BITS) >> SYSC_xCPARTG_AND_S;
150 gand2 = (new & SYSC_xCPARTG_AND_BITS) >> SYSC_xCPARTG_AND_S;
151 pr_cont(" G:%#x+%#x->%#x+%#x",
152 (gor1 * sz) >> 4,
153 ((gand1 + 1) * sz) >> 4,
154 (gor2 * sz) >> 4,
155 ((gand2 + 1) * sz) >> 4);
156 }
157 if (diff & SYSC_CWRMODE_BIT)
158 pr_cont(" %sWR",
159 (new & SYSC_CWRMODE_BIT) ? "+" : "-");
160 if (diff & SYSC_DCPART_GCON_BIT)
161 pr_cont(" %sGCOn",
162 (new & SYSC_DCPART_GCON_BIT) ? "+" : "-");
163 pr_cont("\n");
164}
165
166/**
167 * setup_smp_cache: ensure cache coherency for new SMP thread.
168 * @thread: New hardware thread number.
169 *
170 * Ensures that coherency is enabled and that the threads share the same cache
171 * partitions.
172 */
173static void setup_smp_cache(unsigned int thread)
174{
175 unsigned int this_thread, lflags;
176 unsigned int dcsz, dcpart_this, dcpart_old, dcpart_new;
177 unsigned int icsz, icpart_old, icpart_new;
178
179 /*
180 * Copy over the current thread's cache partition configuration to the
181 * new thread so that they share cache partitions.
182 */
183 __global_lock2(lflags);
184 this_thread = hard_processor_id();
185 /* Share dcache partition */
186 dcpart_this = metag_in32(SYSC_DCPART(this_thread));
187 dcpart_old = metag_in32(SYSC_DCPART(thread));
188 dcpart_new = dcpart_this;
189#if PAGE_OFFSET < LINGLOBAL_BASE
190 /*
191 * For the local data cache to be coherent the threads must also have
192 * GCOn enabled.
193 */
194 dcpart_new |= SYSC_DCPART_GCON_BIT;
195 metag_out32(dcpart_new, SYSC_DCPART(this_thread));
196#endif
197 metag_out32(dcpart_new, SYSC_DCPART(thread));
198 /* Share icache partition too */
199 icpart_new = metag_in32(SYSC_ICPART(this_thread));
200 icpart_old = metag_in32(SYSC_ICPART(thread));
201 metag_out32(icpart_new, SYSC_ICPART(thread));
202 __global_unlock2(lflags);
203
204 /*
205 * Log if the cache partitions were altered so the user is aware of any
206 * potential unintentional cache wastage.
207 */
208 dcsz = get_dcache_size();
209 icsz = get_dcache_size();
210 describe_cachepart_change(this_thread, "dcache", dcsz,
211 dcpart_this, dcpart_new);
212 describe_cachepart_change(thread, "dcache", dcsz,
213 dcpart_old, dcpart_new);
214 describe_cachepart_change(thread, "icache", icsz,
215 icpart_old, icpart_new);
216}
217
218int __cpu_up(unsigned int cpu, struct task_struct *idle)
219{
220 unsigned int thread = cpu_2_hwthread_id[cpu];
221 int ret;
222
223 load_pgd(swapper_pg_dir, thread);
224
225 flush_tlb_all();
226
227 setup_smp_cache(thread);
228
229 /*
230 * Tell the secondary CPU where to find its idle thread's stack.
231 */
232 secondary_data_stack = task_stack_page(idle);
233
234 wmb();
235
236 /*
237 * Now bring the CPU into our world.
238 */
239 ret = boot_secondary(thread, idle);
240 if (ret == 0) {
241 /*
242 * CPU was successfully started, wait for it
243 * to come online or time out.
244 */
245 wait_for_completion_timeout(&cpu_running,
246 msecs_to_jiffies(1000));
247
248 if (!cpu_online(cpu))
249 ret = -EIO;
250 }
251
252 secondary_data_stack = NULL;
253
254 if (ret) {
255 pr_crit("CPU%u: processor failed to boot\n", cpu);
256
257 /*
258 * FIXME: We need to clean up the new idle thread. --rmk
259 */
260 }
261
262 return ret;
263}
264
265#ifdef CONFIG_HOTPLUG_CPU
266
267/*
268 * __cpu_disable runs on the processor to be shutdown.
269 */
270int __cpu_disable(void)
271{
272 unsigned int cpu = smp_processor_id();
273
274 /*
275 * Take this CPU offline. Once we clear this, we can't return,
276 * and we must not schedule until we're ready to give up the cpu.
277 */
278 set_cpu_online(cpu, false);
279
280 /*
281 * OK - migrate IRQs away from this CPU
282 */
283 migrate_irqs();
284
285 /*
286 * Flush user cache and TLB mappings, and then remove this CPU
287 * from the vm mask set of all processes.
288 */
289 flush_cache_all();
290 local_flush_tlb_all();
291
292 clear_tasks_mm_cpumask(cpu);
293
294 return 0;
295}
296
297/*
298 * called on the thread which is asking for a CPU to be shutdown -
299 * waits until shutdown has completed, or it is timed out.
300 */
301void __cpu_die(unsigned int cpu)
302{
303 if (!cpu_wait_death(cpu, 1))
304 pr_err("CPU%u: unable to kill\n", cpu);
305}
306
307/*
308 * Called from the idle thread for the CPU which has been shutdown.
309 *
310 * Note that we do not return from this function. If this cpu is
311 * brought online again it will need to run secondary_startup().
312 */
313void cpu_die(void)
314{
315 local_irq_disable();
316 idle_task_exit();
317 irq_ctx_exit(smp_processor_id());
318
319 (void)cpu_report_death();
320
321 asm ("XOR TXENABLE, D0Re0,D0Re0\n");
322}
323#endif /* CONFIG_HOTPLUG_CPU */
324
325/*
326 * Called by both boot and secondaries to move global data into
327 * per-processor storage.
328 */
329void smp_store_cpu_info(unsigned int cpuid)
330{
331 struct cpuinfo_metag *cpu_info = &per_cpu(cpu_data, cpuid);
332
333 cpu_info->loops_per_jiffy = loops_per_jiffy;
334}
335
336/*
337 * This is the secondary CPU boot entry. We're using this CPUs
338 * idle thread stack and the global page tables.
339 */
340asmlinkage void secondary_start_kernel(void)
341{
342 struct mm_struct *mm = &init_mm;
343 unsigned int cpu = smp_processor_id();
344
345 /*
346 * All kernel threads share the same mm context; grab a
347 * reference and switch to it.
348 */
349 mmget(mm);
350 mmgrab(mm);
351 current->active_mm = mm;
352 cpumask_set_cpu(cpu, mm_cpumask(mm));
353 enter_lazy_tlb(mm, current);
354 local_flush_tlb_all();
355
356 /*
357 * TODO: Some day it might be useful for each Linux CPU to
358 * have its own TBI structure. That would allow each Linux CPU
359 * to run different interrupt handlers for the same IRQ
360 * number.
361 *
362 * For now, simply copying the pointer to the boot CPU's TBI
363 * structure is sufficient because we always want to run the
364 * same interrupt handler whatever CPU takes the interrupt.
365 */
366 per_cpu(pTBI, cpu) = __TBI(TBID_ISTAT_BIT);
367
368 if (!per_cpu(pTBI, cpu))
369 panic("No TBI found!");
370
371 per_cpu_trap_init(cpu);
372 irq_ctx_init(cpu);
373
374 preempt_disable();
375
376 setup_priv();
377
378 notify_cpu_starting(cpu);
379
380 pr_info("CPU%u (thread %u): Booted secondary processor\n",
381 cpu, cpu_2_hwthread_id[cpu]);
382
383 calibrate_delay();
384 smp_store_cpu_info(cpu);
385
386 /*
387 * OK, now it's safe to let the boot CPU continue
388 */
389 set_cpu_online(cpu, true);
390 complete(&cpu_running);
391
392 /*
393 * Enable local interrupts.
394 */
395 tbi_startup_interrupt(TBID_SIGNUM_TRT);
396 local_irq_enable();
397
398 /*
399 * OK, it's off to the idle thread for us
400 */
401 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
402}
403
404void __init smp_cpus_done(unsigned int max_cpus)
405{
406 int cpu;
407 unsigned long bogosum = 0;
408
409 for_each_online_cpu(cpu)
410 bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy;
411
412 pr_info("SMP: Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
413 num_online_cpus(),
414 bogosum / (500000/HZ),
415 (bogosum / (5000/HZ)) % 100);
416}
417
418void __init smp_prepare_cpus(unsigned int max_cpus)
419{
420 unsigned int cpu = smp_processor_id();
421
422 init_new_context(current, &init_mm);
423 current_thread_info()->cpu = cpu;
424
425 smp_store_cpu_info(cpu);
426 init_cpu_present(cpu_possible_mask);
427}
428
429void __init smp_prepare_boot_cpu(void)
430{
431 unsigned int cpu = smp_processor_id();
432
433 per_cpu(pTBI, cpu) = __TBI(TBID_ISTAT_BIT);
434
435 if (!per_cpu(pTBI, cpu))
436 panic("No TBI found!");
437}
438
439static void smp_cross_call(cpumask_t callmap, enum ipi_msg_type msg);
440
441static void send_ipi_message(const struct cpumask *mask, enum ipi_msg_type msg)
442{
443 unsigned long flags;
444 unsigned int cpu;
445 cpumask_t map;
446
447 cpumask_clear(&map);
448 local_irq_save(flags);
449
450 for_each_cpu(cpu, mask) {
451 struct ipi_data *ipi = &per_cpu(ipi_data, cpu);
452
453 spin_lock(&ipi->lock);
454
455 /*
456 * KICK interrupts are queued in hardware so we'll get
457 * multiple interrupts if we call smp_cross_call()
458 * multiple times for one msg. The problem is that we
459 * only have one bit for each message - we can't queue
460 * them in software.
461 *
462 * The first time through ipi_handler() we'll clear
463 * the msg bit, having done all the work. But when we
464 * return we'll get _another_ interrupt (and another,
465 * and another until we've handled all the queued
466 * KICKs). Running ipi_handler() when there's no work
467 * to do is bad because that's how kick handler
468 * chaining detects who the KICK was intended for.
469 * See arch/metag/kernel/kick.c for more details.
470 *
471 * So only add 'cpu' to 'map' if we haven't already
472 * queued a KICK interrupt for 'msg'.
473 */
474 if (!(ipi->bits & (1 << msg))) {
475 ipi->bits |= 1 << msg;
476 cpumask_set_cpu(cpu, &map);
477 }
478
479 spin_unlock(&ipi->lock);
480 }
481
482 /*
483 * Call the platform specific cross-CPU call function.
484 */
485 smp_cross_call(map, msg);
486
487 local_irq_restore(flags);
488}
489
490void arch_send_call_function_ipi_mask(const struct cpumask *mask)
491{
492 send_ipi_message(mask, IPI_CALL_FUNC);
493}
494
495void arch_send_call_function_single_ipi(int cpu)
496{
497 send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC);
498}
499
500void show_ipi_list(struct seq_file *p)
501{
502 unsigned int cpu;
503
504 seq_puts(p, "IPI:");
505
506 for_each_present_cpu(cpu)
507 seq_printf(p, " %10lu", per_cpu(ipi_data, cpu).ipi_count);
508
509 seq_putc(p, '\n');
510}
511
512static DEFINE_SPINLOCK(stop_lock);
513
514/*
515 * Main handler for inter-processor interrupts
516 *
517 * For Meta, the ipimask now only identifies a single
518 * category of IPI (Bit 1 IPIs have been replaced by a
519 * different mechanism):
520 *
521 * Bit 0 - Inter-processor function call
522 */
523static int do_IPI(void)
524{
525 unsigned int cpu = smp_processor_id();
526 struct ipi_data *ipi = &per_cpu(ipi_data, cpu);
527 unsigned long msgs, nextmsg;
528 int handled = 0;
529
530 ipi->ipi_count++;
531
532 spin_lock(&ipi->lock);
533 msgs = ipi->bits;
534 nextmsg = msgs & -msgs;
535 ipi->bits &= ~nextmsg;
536 spin_unlock(&ipi->lock);
537
538 if (nextmsg) {
539 handled = 1;
540
541 nextmsg = ffz(~nextmsg);
542 switch (nextmsg) {
543 case IPI_RESCHEDULE:
544 scheduler_ipi();
545 break;
546
547 case IPI_CALL_FUNC:
548 generic_smp_call_function_interrupt();
549 break;
550
551 default:
552 pr_crit("CPU%u: Unknown IPI message 0x%lx\n",
553 cpu, nextmsg);
554 break;
555 }
556 }
557
558 return handled;
559}
560
561void smp_send_reschedule(int cpu)
562{
563 send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE);
564}
565
566static void stop_this_cpu(void *data)
567{
568 unsigned int cpu = smp_processor_id();
569
570 if (system_state <= SYSTEM_RUNNING) {
571 spin_lock(&stop_lock);
572 pr_crit("CPU%u: stopping\n", cpu);
573 dump_stack();
574 spin_unlock(&stop_lock);
575 }
576
577 set_cpu_online(cpu, false);
578
579 local_irq_disable();
580
581 hard_processor_halt(HALT_OK);
582}
583
584void smp_send_stop(void)
585{
586 smp_call_function(stop_this_cpu, NULL, 0);
587}
588
589/*
590 * not supported here
591 */
592int setup_profiling_timer(unsigned int multiplier)
593{
594 return -EINVAL;
595}
596
597/*
598 * We use KICKs for inter-processor interrupts.
599 *
600 * For every CPU in "callmap" the IPI data must already have been
601 * stored in that CPU's "ipi_data" member prior to calling this
602 * function.
603 */
604static void kick_raise_softirq(cpumask_t callmap, unsigned int irq)
605{
606 int cpu;
607
608 for_each_cpu(cpu, &callmap) {
609 unsigned int thread;
610
611 thread = cpu_2_hwthread_id[cpu];
612
613 BUG_ON(thread == BAD_HWTHREAD_ID);
614
615 metag_out32(1, T0KICKI + (thread * TnXKICK_STRIDE));
616 }
617}
618
619static TBIRES ipi_handler(TBIRES State, int SigNum, int Triggers,
620 int Inst, PTBI pTBI, int *handled)
621{
622 *handled = do_IPI();
623
624 return State;
625}
626
627static struct kick_irq_handler ipi_irq = {
628 .func = ipi_handler,
629};
630
631static void smp_cross_call(cpumask_t callmap, enum ipi_msg_type msg)
632{
633 kick_raise_softirq(callmap, 1);
634}
635
636static inline unsigned int get_core_count(void)
637{
638 int i;
639 unsigned int ret = 0;
640
641 for (i = 0; i < CONFIG_NR_CPUS; i++) {
642 if (core_reg_read(TXUCT_ID, TXENABLE_REGNUM, i))
643 ret++;
644 }
645
646 return ret;
647}
648
649/*
650 * Initialise the CPU possible map early - this describes the CPUs
651 * which may be present or become present in the system.
652 */
653void __init smp_init_cpus(void)
654{
655 unsigned int i, ncores = get_core_count();
656
657 /* If no hwthread_map early param was set use default mapping */
658 for (i = 0; i < NR_CPUS; i++)
659 if (cpu_2_hwthread_id[i] == BAD_HWTHREAD_ID) {
660 cpu_2_hwthread_id[i] = i;
661 hwthread_id_2_cpu[i] = i;
662 }
663
664 for (i = 0; i < ncores; i++)
665 set_cpu_possible(i, true);
666
667 kick_register_func(&ipi_irq);
668}
diff --git a/arch/metag/kernel/stacktrace.c b/arch/metag/kernel/stacktrace.c
deleted file mode 100644
index 09d67b7f51ca..000000000000
--- a/arch/metag/kernel/stacktrace.c
+++ /dev/null
@@ -1,187 +0,0 @@
1#include <linux/export.h>
2#include <linux/sched.h>
3#include <linux/sched/debug.h>
4#include <linux/sched/task_stack.h>
5#include <linux/stacktrace.h>
6
7#include <asm/stacktrace.h>
8
9#if defined(CONFIG_FRAME_POINTER)
10
11#ifdef CONFIG_KALLSYMS
12#include <linux/kallsyms.h>
13#include <linux/module.h>
14
15static unsigned long tbi_boing_addr;
16static unsigned long tbi_boing_size;
17
18static void tbi_boing_init(void)
19{
20 /* We need to know where TBIBoingVec is and it's size */
21 unsigned long size;
22 unsigned long offset;
23 char modname[MODULE_NAME_LEN];
24 char name[KSYM_NAME_LEN];
25 tbi_boing_addr = kallsyms_lookup_name("___TBIBoingVec");
26 if (!tbi_boing_addr)
27 tbi_boing_addr = 1;
28 else if (!lookup_symbol_attrs(tbi_boing_addr, &size,
29 &offset, modname, name))
30 tbi_boing_size = size;
31}
32#endif
33
34/*
35 * Unwind the current stack frame and store the new register values in the
36 * structure passed as argument. Unwinding is equivalent to a function return,
37 * hence the new PC value rather than LR should be used for backtrace.
38 */
39int notrace unwind_frame(struct stackframe *frame)
40{
41 struct metag_frame *fp = (struct metag_frame *)frame->fp;
42 unsigned long lr;
43 unsigned long fpnew;
44
45 if (frame->fp & 0x7)
46 return -EINVAL;
47
48 fpnew = fp->fp;
49 lr = fp->lr - 4;
50
51#ifdef CONFIG_KALLSYMS
52 /* If we've reached TBIBoingVec then we're at an interrupt
53 * entry point or a syscall entry point. The frame pointer
54 * points to a pt_regs which can be used to continue tracing on
55 * the other side of the boing.
56 */
57 if (!tbi_boing_addr)
58 tbi_boing_init();
59 if (tbi_boing_size && lr >= tbi_boing_addr &&
60 lr < tbi_boing_addr + tbi_boing_size) {
61 struct pt_regs *regs = (struct pt_regs *)fpnew;
62 if (user_mode(regs))
63 return -EINVAL;
64 fpnew = regs->ctx.AX[1].U0;
65 lr = regs->ctx.DX[4].U1;
66 }
67#endif
68
69 /* stack grows up, so frame pointers must decrease */
70 if (fpnew < (ALIGN_DOWN((unsigned long)fp, THREAD_SIZE) +
71 sizeof(struct thread_info)) || fpnew >= (unsigned long)fp)
72 return -EINVAL;
73
74 /* restore the registers from the stack frame */
75 frame->fp = fpnew;
76 frame->pc = lr;
77
78 return 0;
79}
80#else
81int notrace unwind_frame(struct stackframe *frame)
82{
83 struct metag_frame *sp = (struct metag_frame *)frame->sp;
84
85 if (frame->sp & 0x7)
86 return -EINVAL;
87
88 while (!kstack_end(sp)) {
89 unsigned long addr = sp->lr - 4;
90 sp--;
91
92 if (__kernel_text_address(addr)) {
93 frame->sp = (unsigned long)sp;
94 frame->pc = addr;
95 return 0;
96 }
97 }
98 return -EINVAL;
99}
100#endif
101
102void notrace walk_stackframe(struct stackframe *frame,
103 int (*fn)(struct stackframe *, void *), void *data)
104{
105 while (1) {
106 int ret;
107
108 if (fn(frame, data))
109 break;
110 ret = unwind_frame(frame);
111 if (ret < 0)
112 break;
113 }
114}
115EXPORT_SYMBOL(walk_stackframe);
116
117#ifdef CONFIG_STACKTRACE
118struct stack_trace_data {
119 struct stack_trace *trace;
120 unsigned int no_sched_functions;
121 unsigned int skip;
122};
123
124static int save_trace(struct stackframe *frame, void *d)
125{
126 struct stack_trace_data *data = d;
127 struct stack_trace *trace = data->trace;
128 unsigned long addr = frame->pc;
129
130 if (data->no_sched_functions && in_sched_functions(addr))
131 return 0;
132 if (data->skip) {
133 data->skip--;
134 return 0;
135 }
136
137 trace->entries[trace->nr_entries++] = addr;
138
139 return trace->nr_entries >= trace->max_entries;
140}
141
142void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
143{
144 struct stack_trace_data data;
145 struct stackframe frame;
146
147 data.trace = trace;
148 data.skip = trace->skip;
149
150 if (tsk != current) {
151#ifdef CONFIG_SMP
152 /*
153 * What guarantees do we have here that 'tsk' is not
154 * running on another CPU? For now, ignore it as we
155 * can't guarantee we won't explode.
156 */
157 if (trace->nr_entries < trace->max_entries)
158 trace->entries[trace->nr_entries++] = ULONG_MAX;
159 return;
160#else
161 data.no_sched_functions = 1;
162 frame.fp = thread_saved_fp(tsk);
163 frame.sp = thread_saved_sp(tsk);
164 frame.lr = 0; /* recovered from the stack */
165 frame.pc = thread_saved_pc(tsk);
166#endif
167 } else {
168 register unsigned long current_sp asm ("A0StP");
169
170 data.no_sched_functions = 0;
171 frame.fp = (unsigned long)__builtin_frame_address(0);
172 frame.sp = current_sp;
173 frame.lr = (unsigned long)__builtin_return_address(0);
174 frame.pc = (unsigned long)save_stack_trace_tsk;
175 }
176
177 walk_stackframe(&frame, save_trace, &data);
178 if (trace->nr_entries < trace->max_entries)
179 trace->entries[trace->nr_entries++] = ULONG_MAX;
180}
181
182void save_stack_trace(struct stack_trace *trace)
183{
184 save_stack_trace_tsk(current, trace);
185}
186EXPORT_SYMBOL_GPL(save_stack_trace);
187#endif
diff --git a/arch/metag/kernel/sys_metag.c b/arch/metag/kernel/sys_metag.c
deleted file mode 100644
index 27d96499dd38..000000000000
--- a/arch/metag/kernel/sys_metag.c
+++ /dev/null
@@ -1,181 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * This file contains various random system calls that
4 * have a non-standard calling sequence on the Linux/Meta
5 * platform.
6 */
7
8#include <linux/errno.h>
9#include <linux/sched.h>
10#include <linux/mm.h>
11#include <linux/syscalls.h>
12#include <linux/mman.h>
13#include <linux/file.h>
14#include <linux/fs.h>
15#include <linux/uaccess.h>
16#include <linux/unistd.h>
17#include <asm/cacheflush.h>
18#include <asm/core_reg.h>
19#include <asm/global_lock.h>
20#include <asm/switch.h>
21#include <asm/syscall.h>
22#include <asm/syscalls.h>
23#include <asm/user_gateway.h>
24
25#define merge_64(hi, lo) ((((unsigned long long)(hi)) << 32) + \
26 ((lo) & 0xffffffffUL))
27
28int metag_mmap_check(unsigned long addr, unsigned long len,
29 unsigned long flags)
30{
31 /* We can't have people trying to write to the bottom of the
32 * memory map, there are mysterious unspecified things there that
33 * we don't want people trampling on.
34 */
35 if ((flags & MAP_FIXED) && (addr < TASK_UNMAPPED_BASE))
36 return -EINVAL;
37
38 return 0;
39}
40
41asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
42 unsigned long prot, unsigned long flags,
43 unsigned long fd, unsigned long pgoff)
44{
45 /* The shift for mmap2 is constant, regardless of PAGE_SIZE setting. */
46 if (pgoff & ((1 << (PAGE_SHIFT - 12)) - 1))
47 return -EINVAL;
48
49 pgoff >>= PAGE_SHIFT - 12;
50
51 return sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff);
52}
53
54asmlinkage int sys_metag_setglobalbit(char __user *addr, int mask)
55{
56 char tmp;
57 int ret = 0;
58 unsigned int flags;
59
60 if (!((__force unsigned int)addr >= LINCORE_BASE))
61 return -EFAULT;
62
63 __global_lock2(flags);
64
65 metag_data_cache_flush((__force void *)addr, sizeof(mask));
66
67 ret = __get_user(tmp, addr);
68 if (ret)
69 goto out;
70 tmp |= mask;
71 ret = __put_user(tmp, addr);
72
73 metag_data_cache_flush((__force void *)addr, sizeof(mask));
74
75out:
76 __global_unlock2(flags);
77
78 return ret;
79}
80
81#define TXDEFR_FPU_MASK ((0x1f << 16) | 0x1f)
82
83asmlinkage void sys_metag_set_fpu_flags(unsigned int flags)
84{
85 unsigned int temp;
86
87 flags &= TXDEFR_FPU_MASK;
88
89 temp = __core_reg_get(TXDEFR);
90 temp &= ~TXDEFR_FPU_MASK;
91 temp |= flags;
92 __core_reg_set(TXDEFR, temp);
93}
94
95asmlinkage int sys_metag_set_tls(void __user *ptr)
96{
97 current->thread.tls_ptr = ptr;
98 set_gateway_tls(ptr);
99
100 return 0;
101}
102
103asmlinkage void *sys_metag_get_tls(void)
104{
105 return (__force void *)current->thread.tls_ptr;
106}
107
108asmlinkage long sys_truncate64_metag(const char __user *path, unsigned long lo,
109 unsigned long hi)
110{
111 return sys_truncate64(path, merge_64(hi, lo));
112}
113
114asmlinkage long sys_ftruncate64_metag(unsigned int fd, unsigned long lo,
115 unsigned long hi)
116{
117 return sys_ftruncate64(fd, merge_64(hi, lo));
118}
119
120asmlinkage long sys_fadvise64_64_metag(int fd, unsigned long offs_lo,
121 unsigned long offs_hi,
122 unsigned long len_lo,
123 unsigned long len_hi, int advice)
124{
125 return sys_fadvise64_64(fd, merge_64(offs_hi, offs_lo),
126 merge_64(len_hi, len_lo), advice);
127}
128
129asmlinkage long sys_readahead_metag(int fd, unsigned long lo, unsigned long hi,
130 size_t count)
131{
132 return sys_readahead(fd, merge_64(hi, lo), count);
133}
134
135asmlinkage ssize_t sys_pread64_metag(unsigned long fd, char __user *buf,
136 size_t count, unsigned long lo,
137 unsigned long hi)
138{
139 return sys_pread64(fd, buf, count, merge_64(hi, lo));
140}
141
142asmlinkage ssize_t sys_pwrite64_metag(unsigned long fd, char __user *buf,
143 size_t count, unsigned long lo,
144 unsigned long hi)
145{
146 return sys_pwrite64(fd, buf, count, merge_64(hi, lo));
147}
148
149asmlinkage long sys_sync_file_range_metag(int fd, unsigned long offs_lo,
150 unsigned long offs_hi,
151 unsigned long len_lo,
152 unsigned long len_hi,
153 unsigned int flags)
154{
155 return sys_sync_file_range(fd, merge_64(offs_hi, offs_lo),
156 merge_64(len_hi, len_lo), flags);
157}
158
159/* Provide the actual syscall number to call mapping. */
160#undef __SYSCALL
161#define __SYSCALL(nr, call) [nr] = (call),
162
163/*
164 * We need wrappers for anything with unaligned 64bit arguments
165 */
166#define sys_truncate64 sys_truncate64_metag
167#define sys_ftruncate64 sys_ftruncate64_metag
168#define sys_fadvise64_64 sys_fadvise64_64_metag
169#define sys_readahead sys_readahead_metag
170#define sys_pread64 sys_pread64_metag
171#define sys_pwrite64 sys_pwrite64_metag
172#define sys_sync_file_range sys_sync_file_range_metag
173
174/*
175 * Note that we can't include <linux/unistd.h> here since the header
176 * guard will defeat us; <asm/unistd.h> checks for __SYSCALL as well.
177 */
178const void *sys_call_table[__NR_syscalls] = {
179 [0 ... __NR_syscalls-1] = sys_ni_syscall,
180#include <asm/unistd.h>
181};
diff --git a/arch/metag/kernel/tbiunexp.S b/arch/metag/kernel/tbiunexp.S
deleted file mode 100644
index 2664808086c7..000000000000
--- a/arch/metag/kernel/tbiunexp.S
+++ /dev/null
@@ -1,23 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/* Pass a breakpoint through to Codescape */
3
4#include <asm/tbx.h>
5
6 .text
7 .global ___TBIUnExpXXX
8 .type ___TBIUnExpXXX,function
9___TBIUnExpXXX:
10 TSTT D0Ar2,#TBICTX_CRIT_BIT ! Result of nestable int call?
11 BZ $LTBINormCase ! UnExpXXX at background level
12 MOV D0Re0,TXMASKI ! Read TXMASKI
13 XOR TXMASKI,D1Re0,D1Re0 ! Turn off BGNDHALT handling!
14 OR D0Ar2,D0Ar2,D0Re0 ! Preserve bits cleared
15$LTBINormCase:
16 MSETL [A0StP],D0Ar6,D0Ar4,D0Ar2 ! Save args on stack
17 SETL [A0StP++],D0Ar2,D1Ar1 ! Init area for returned values
18 SWITCH #0xC20208 ! Total stack frame size 8 Dwords
19 ! write back size 2 Dwords
20 GETL D0Re0,D1Re0,[--A0StP] ! Get result
21 SUB A0StP,A0StP,#(8*3) ! Recover stack frame
22 MOV PC,D1RtP
23 .size ___TBIUnExpXXX,.-___TBIUnExpXXX
diff --git a/arch/metag/kernel/tcm.c b/arch/metag/kernel/tcm.c
deleted file mode 100644
index 1d7b4e33b114..000000000000
--- a/arch/metag/kernel/tcm.c
+++ /dev/null
@@ -1,152 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2010 Imagination Technologies Ltd.
4 */
5
6#include <linux/init.h>
7#include <linux/kernel.h>
8#include <linux/spinlock.h>
9#include <linux/stddef.h>
10#include <linux/genalloc.h>
11#include <linux/string.h>
12#include <linux/list.h>
13#include <linux/slab.h>
14#include <asm/page.h>
15#include <asm/tcm.h>
16
17struct tcm_pool {
18 struct list_head list;
19 unsigned int tag;
20 unsigned long start;
21 unsigned long end;
22 struct gen_pool *pool;
23};
24
25static LIST_HEAD(pool_list);
26
27static struct tcm_pool *find_pool(unsigned int tag)
28{
29 struct list_head *lh;
30 struct tcm_pool *pool;
31
32 list_for_each(lh, &pool_list) {
33 pool = list_entry(lh, struct tcm_pool, list);
34 if (pool->tag == tag)
35 return pool;
36 }
37
38 return NULL;
39}
40
41/**
42 * tcm_alloc - allocate memory from a TCM pool
43 * @tag: tag of the pool to allocate memory from
44 * @len: number of bytes to be allocated
45 *
46 * Allocate the requested number of bytes from the pool matching
47 * the specified tag. Returns the address of the allocated memory
48 * or zero on failure.
49 */
50unsigned long tcm_alloc(unsigned int tag, size_t len)
51{
52 unsigned long vaddr;
53 struct tcm_pool *pool;
54
55 pool = find_pool(tag);
56 if (!pool)
57 return 0;
58
59 vaddr = gen_pool_alloc(pool->pool, len);
60 if (!vaddr)
61 return 0;
62
63 return vaddr;
64}
65
66/**
67 * tcm_free - free a block of memory to a TCM pool
68 * @tag: tag of the pool to free memory to
69 * @addr: address of the memory to be freed
70 * @len: number of bytes to be freed
71 *
72 * Free the requested number of bytes at a specific address to the
73 * pool matching the specified tag.
74 */
75void tcm_free(unsigned int tag, unsigned long addr, size_t len)
76{
77 struct tcm_pool *pool;
78
79 pool = find_pool(tag);
80 if (!pool)
81 return;
82 gen_pool_free(pool->pool, addr, len);
83}
84
85/**
86 * tcm_lookup_tag - find the tag matching an address
87 * @p: memory address to lookup the tag for
88 *
89 * Find the tag of the tcm memory region that contains the
90 * specified address. Returns %TCM_INVALID_TAG if no such
91 * memory region could be found.
92 */
93unsigned int tcm_lookup_tag(unsigned long p)
94{
95 struct list_head *lh;
96 struct tcm_pool *pool;
97 unsigned long addr = (unsigned long) p;
98
99 list_for_each(lh, &pool_list) {
100 pool = list_entry(lh, struct tcm_pool, list);
101 if (addr >= pool->start && addr < pool->end)
102 return pool->tag;
103 }
104
105 return TCM_INVALID_TAG;
106}
107
108/**
109 * tcm_add_region - add a memory region to TCM pool list
110 * @reg: descriptor of region to be added
111 *
112 * Add a region of memory to the TCM pool list. Returns 0 on success.
113 */
114int __init tcm_add_region(struct tcm_region *reg)
115{
116 struct tcm_pool *pool;
117
118 pool = kmalloc(sizeof(*pool), GFP_KERNEL);
119 if (!pool) {
120 pr_err("Failed to alloc memory for TCM pool!\n");
121 return -ENOMEM;
122 }
123
124 pool->tag = reg->tag;
125 pool->start = reg->res.start;
126 pool->end = reg->res.end;
127
128 /*
129 * 2^3 = 8 bytes granularity to allow for 64bit access alignment.
130 * -1 = NUMA node specifier.
131 */
132 pool->pool = gen_pool_create(3, -1);
133
134 if (!pool->pool) {
135 pr_err("Failed to create TCM pool!\n");
136 kfree(pool);
137 return -ENOMEM;
138 }
139
140 if (gen_pool_add(pool->pool, reg->res.start,
141 reg->res.end - reg->res.start + 1, -1)) {
142 pr_err("Failed to add memory to TCM pool!\n");
143 return -ENOMEM;
144 }
145 pr_info("Added %s TCM pool (%08x bytes @ %08x)\n",
146 reg->res.name, reg->res.end - reg->res.start + 1,
147 reg->res.start);
148
149 list_add_tail(&pool->list, &pool_list);
150
151 return 0;
152}
diff --git a/arch/metag/kernel/time.c b/arch/metag/kernel/time.c
deleted file mode 100644
index 1e809e3b43d1..000000000000
--- a/arch/metag/kernel/time.c
+++ /dev/null
@@ -1,26 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2005-2013 Imagination Technologies Ltd.
4 *
5 * This file contains the Meta-specific time handling details.
6 *
7 */
8
9#include <clocksource/metag_generic.h>
10#include <linux/clk-provider.h>
11#include <linux/init.h>
12#include <asm/clock.h>
13
14void __init time_init(void)
15{
16#ifdef CONFIG_COMMON_CLK
17 /* Init clocks from device tree */
18 of_clk_init(NULL);
19#endif
20
21 /* Init meta clocks, particularly the core clock */
22 init_metag_clocks();
23
24 /* Set up the timer clock sources */
25 metag_generic_timer_init();
26}
diff --git a/arch/metag/kernel/topology.c b/arch/metag/kernel/topology.c
deleted file mode 100644
index 4ba595701f7d..000000000000
--- a/arch/metag/kernel/topology.c
+++ /dev/null
@@ -1,78 +0,0 @@
1/*
2 * Copyright (C) 2007 Paul Mundt
3 * Copyright (C) 2010 Imagination Technolohies Ltd.
4 *
5 * This file is subject to the terms and conditions of the GNU General Public
6 * License. See the file "COPYING" in the main directory of this archive
7 * for more details.
8 */
9#include <linux/cpu.h>
10#include <linux/cpumask.h>
11#include <linux/init.h>
12#include <linux/percpu.h>
13#include <linux/node.h>
14#include <linux/nodemask.h>
15#include <linux/topology.h>
16
17#include <asm/cpu.h>
18
19DEFINE_PER_CPU(struct cpuinfo_metag, cpu_data);
20
21cpumask_t cpu_core_map[NR_CPUS];
22EXPORT_SYMBOL(cpu_core_map);
23
24static cpumask_t cpu_coregroup_map(unsigned int cpu)
25{
26 return *cpu_possible_mask;
27}
28
29const struct cpumask *cpu_coregroup_mask(unsigned int cpu)
30{
31 return &cpu_core_map[cpu];
32}
33
34int arch_update_cpu_topology(void)
35{
36 unsigned int cpu;
37
38 for_each_possible_cpu(cpu)
39 cpu_core_map[cpu] = cpu_coregroup_map(cpu);
40
41 return 0;
42}
43
44static int __init topology_init(void)
45{
46 int i, ret;
47
48#ifdef CONFIG_NEED_MULTIPLE_NODES
49 for_each_online_node(i)
50 register_one_node(i);
51#endif
52
53 for_each_present_cpu(i) {
54 struct cpuinfo_metag *cpuinfo = &per_cpu(cpu_data, i);
55#ifdef CONFIG_HOTPLUG_CPU
56 cpuinfo->cpu.hotpluggable = 1;
57#endif
58 ret = register_cpu(&cpuinfo->cpu, i);
59 if (unlikely(ret))
60 pr_warn("%s: register_cpu %d failed (%d)\n",
61 __func__, i, ret);
62 }
63
64#if defined(CONFIG_NUMA) && !defined(CONFIG_SMP)
65 /*
66 * In the UP case, make sure the CPU association is still
67 * registered under each node. Without this, sysfs fails
68 * to make the connection between nodes other than node0
69 * and cpu0.
70 */
71 for_each_online_node(i)
72 if (i != numa_node_id())
73 register_cpu_under_node(raw_smp_processor_id(), i);
74#endif
75
76 return 0;
77}
78subsys_initcall(topology_init);
diff --git a/arch/metag/kernel/traps.c b/arch/metag/kernel/traps.c
deleted file mode 100644
index 3b62b1b0c0b5..000000000000
--- a/arch/metag/kernel/traps.c
+++ /dev/null
@@ -1,992 +0,0 @@
1/*
2 * Meta exception handling.
3 *
4 * Copyright (C) 2005,2006,2007,2008,2009,2012 Imagination Technologies Ltd.
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file COPYING in the main directory of this archive
8 * for more details.
9 */
10
11#include <linux/export.h>
12#include <linux/sched.h>
13#include <linux/sched/debug.h>
14#include <linux/sched/task.h>
15#include <linux/sched/task_stack.h>
16#include <linux/signal.h>
17#include <linux/kernel.h>
18#include <linux/mm.h>
19#include <linux/types.h>
20#include <linux/init.h>
21#include <linux/interrupt.h>
22#include <linux/preempt.h>
23#include <linux/ptrace.h>
24#include <linux/module.h>
25#include <linux/kallsyms.h>
26#include <linux/kdebug.h>
27#include <linux/kexec.h>
28#include <linux/unistd.h>
29#include <linux/smp.h>
30#include <linux/slab.h>
31#include <linux/syscalls.h>
32
33#include <asm/bug.h>
34#include <asm/core_reg.h>
35#include <asm/irqflags.h>
36#include <asm/siginfo.h>
37#include <asm/traps.h>
38#include <asm/hwthread.h>
39#include <asm/setup.h>
40#include <asm/switch.h>
41#include <asm/user_gateway.h>
42#include <asm/syscall.h>
43#include <asm/syscalls.h>
44
45/* Passing syscall arguments as long long is quicker. */
46typedef unsigned int (*LPSYSCALL) (unsigned long long,
47 unsigned long long,
48 unsigned long long);
49
50/*
51 * Users of LNKSET should compare the bus error bits obtained from DEFR
52 * against TXDEFR_LNKSET_SUCCESS only as the failure code will vary between
53 * different cores revisions.
54 */
55#define TXDEFR_LNKSET_SUCCESS 0x02000000
56#define TXDEFR_LNKSET_FAILURE 0x04000000
57
58/*
59 * Our global TBI handle. Initialised from setup.c/setup_arch.
60 */
61DECLARE_PER_CPU(PTBI, pTBI);
62
63#ifdef CONFIG_SMP
64static DEFINE_PER_CPU(unsigned int, trigger_mask);
65#else
66unsigned int global_trigger_mask;
67EXPORT_SYMBOL(global_trigger_mask);
68#endif
69
70unsigned long per_cpu__stack_save[NR_CPUS];
71
72static const char * const trap_names[] = {
73 [TBIXXF_SIGNUM_IIF] = "Illegal instruction fault",
74 [TBIXXF_SIGNUM_PGF] = "Privilege violation",
75 [TBIXXF_SIGNUM_DHF] = "Unaligned data access fault",
76 [TBIXXF_SIGNUM_IGF] = "Code fetch general read failure",
77 [TBIXXF_SIGNUM_DGF] = "Data access general read/write fault",
78 [TBIXXF_SIGNUM_IPF] = "Code fetch page fault",
79 [TBIXXF_SIGNUM_DPF] = "Data access page fault",
80 [TBIXXF_SIGNUM_IHF] = "Instruction breakpoint",
81 [TBIXXF_SIGNUM_DWF] = "Read-only data access fault",
82};
83
84const char *trap_name(int trapno)
85{
86 if (trapno >= 0 && trapno < ARRAY_SIZE(trap_names)
87 && trap_names[trapno])
88 return trap_names[trapno];
89 return "Unknown fault";
90}
91
92static DEFINE_SPINLOCK(die_lock);
93
94void __noreturn die(const char *str, struct pt_regs *regs,
95 long err, unsigned long addr)
96{
97 static int die_counter;
98
99 oops_enter();
100
101 spin_lock_irq(&die_lock);
102 console_verbose();
103 bust_spinlocks(1);
104 pr_err("%s: err %04lx (%s) addr %08lx [#%d]\n", str, err & 0xffff,
105 trap_name(err & 0xffff), addr, ++die_counter);
106
107 print_modules();
108 show_regs(regs);
109
110 pr_err("Process: %s (pid: %d, stack limit = %p)\n", current->comm,
111 task_pid_nr(current), task_stack_page(current) + THREAD_SIZE);
112
113 bust_spinlocks(0);
114 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
115 if (kexec_should_crash(current))
116 crash_kexec(regs);
117
118 if (in_interrupt())
119 panic("Fatal exception in interrupt");
120
121 if (panic_on_oops)
122 panic("Fatal exception");
123
124 spin_unlock_irq(&die_lock);
125 oops_exit();
126 do_exit(SIGSEGV);
127}
128
129#ifdef CONFIG_METAG_DSP
130/*
131 * The ECH encoding specifies the size of a DSPRAM as,
132 *
133 * "slots" / 4
134 *
135 * A "slot" is the size of two DSPRAM bank entries; an entry from
136 * DSPRAM bank A and an entry from DSPRAM bank B. One DSPRAM bank
137 * entry is 4 bytes.
138 */
139#define SLOT_SZ 8
140static inline unsigned int decode_dspram_size(unsigned int size)
141{
142 unsigned int _sz = size & 0x7f;
143
144 return _sz * SLOT_SZ * 4;
145}
146
147static void dspram_save(struct meta_ext_context *dsp_ctx,
148 unsigned int ramA_sz, unsigned int ramB_sz)
149{
150 unsigned int ram_sz[2];
151 int i;
152
153 ram_sz[0] = ramA_sz;
154 ram_sz[1] = ramB_sz;
155
156 for (i = 0; i < 2; i++) {
157 if (ram_sz[i] != 0) {
158 unsigned int sz;
159
160 if (i == 0)
161 sz = decode_dspram_size(ram_sz[i] >> 8);
162 else
163 sz = decode_dspram_size(ram_sz[i]);
164
165 if (dsp_ctx->ram[i] == NULL) {
166 dsp_ctx->ram[i] = kmalloc(sz, GFP_KERNEL);
167
168 if (dsp_ctx->ram[i] == NULL)
169 panic("couldn't save DSP context");
170 } else {
171 if (ram_sz[i] > dsp_ctx->ram_sz[i]) {
172 kfree(dsp_ctx->ram[i]);
173
174 dsp_ctx->ram[i] = kmalloc(sz,
175 GFP_KERNEL);
176
177 if (dsp_ctx->ram[i] == NULL)
178 panic("couldn't save DSP context");
179 }
180 }
181
182 if (i == 0)
183 __TBIDspramSaveA(ram_sz[i], dsp_ctx->ram[i]);
184 else
185 __TBIDspramSaveB(ram_sz[i], dsp_ctx->ram[i]);
186
187 dsp_ctx->ram_sz[i] = ram_sz[i];
188 }
189 }
190}
191#endif /* CONFIG_METAG_DSP */
192
193/*
194 * Allow interrupts to be nested and save any "extended" register
195 * context state, e.g. DSP regs and RAMs.
196 */
197static void nest_interrupts(TBIRES State, unsigned long mask)
198{
199#ifdef CONFIG_METAG_DSP
200 struct meta_ext_context *dsp_ctx;
201 unsigned int D0_8;
202
203 /*
204 * D0.8 may contain an ECH encoding. The upper 16 bits
205 * tell us what DSP resources the current process is
206 * using. OR the bits into the SaveMask so that
207 * __TBINestInts() knows what resources to save as
208 * part of this context.
209 *
210 * Don't save the context if we're nesting interrupts in the
211 * kernel because the kernel doesn't use DSP hardware.
212 */
213 D0_8 = __core_reg_get(D0.8);
214
215 if (D0_8 && (State.Sig.SaveMask & TBICTX_PRIV_BIT)) {
216 State.Sig.SaveMask |= (D0_8 >> 16);
217
218 dsp_ctx = current->thread.dsp_context;
219 if (dsp_ctx == NULL) {
220 dsp_ctx = kzalloc(sizeof(*dsp_ctx), GFP_KERNEL);
221 if (dsp_ctx == NULL)
222 panic("couldn't save DSP context: ENOMEM");
223
224 current->thread.dsp_context = dsp_ctx;
225 }
226
227 current->thread.user_flags |= (D0_8 & 0xffff0000);
228 __TBINestInts(State, &dsp_ctx->regs, mask);
229 dspram_save(dsp_ctx, D0_8 & 0x7f00, D0_8 & 0x007f);
230 } else
231 __TBINestInts(State, NULL, mask);
232#else
233 __TBINestInts(State, NULL, mask);
234#endif
235}
236
237void head_end(TBIRES State, unsigned long mask)
238{
239 unsigned int savemask = (unsigned short)State.Sig.SaveMask;
240 unsigned int ctx_savemask = (unsigned short)State.Sig.pCtx->SaveMask;
241
242 if (savemask & TBICTX_PRIV_BIT) {
243 ctx_savemask |= TBICTX_PRIV_BIT;
244 current->thread.user_flags = savemask;
245 }
246
247 /* Always undo the sleep bit */
248 ctx_savemask &= ~TBICTX_WAIT_BIT;
249
250 /* Always save the catch buffer and RD pipe if they are dirty */
251 savemask |= TBICTX_XCBF_BIT;
252
253 /* Only save the catch and RD if we have not already done so.
254 * Note - the RD bits are in the pCtx only, and not in the
255 * State.SaveMask.
256 */
257 if ((savemask & TBICTX_CBUF_BIT) ||
258 (ctx_savemask & TBICTX_CBRP_BIT)) {
259 /* Have we already saved the buffers though?
260 * - See TestTrack 5071 */
261 if (ctx_savemask & TBICTX_XCBF_BIT) {
262 /* Strip off the bits so the call to __TBINestInts
263 * won't save the buffers again. */
264 savemask &= ~TBICTX_CBUF_BIT;
265 ctx_savemask &= ~TBICTX_CBRP_BIT;
266 }
267 }
268
269#ifdef CONFIG_METAG_META21
270 {
271 unsigned int depth, txdefr;
272
273 /*
274 * Save TXDEFR state.
275 *
276 * The process may have been interrupted after a LNKSET, but
277 * before it could read the DEFR state, so we mustn't lose that
278 * state or it could end up retrying an atomic operation that
279 * succeeded.
280 *
281 * All interrupts are disabled at this point so we
282 * don't need to perform any locking. We must do this
283 * dance before we use LNKGET or LNKSET.
284 */
285 BUG_ON(current->thread.int_depth > HARDIRQ_BITS);
286
287 depth = current->thread.int_depth++;
288
289 txdefr = __core_reg_get(TXDEFR);
290
291 txdefr &= TXDEFR_BUS_STATE_BITS;
292 if (txdefr & TXDEFR_LNKSET_SUCCESS)
293 current->thread.txdefr_failure &= ~(1 << depth);
294 else
295 current->thread.txdefr_failure |= (1 << depth);
296 }
297#endif
298
299 State.Sig.SaveMask = savemask;
300 State.Sig.pCtx->SaveMask = ctx_savemask;
301
302 nest_interrupts(State, mask);
303
304#ifdef CONFIG_METAG_POISON_CATCH_BUFFERS
305 /* Poison the catch registers. This shows up any mistakes we have
306 * made in their handling MUCH quicker.
307 */
308 __core_reg_set(TXCATCH0, 0x87650021);
309 __core_reg_set(TXCATCH1, 0x87654322);
310 __core_reg_set(TXCATCH2, 0x87654323);
311 __core_reg_set(TXCATCH3, 0x87654324);
312#endif /* CONFIG_METAG_POISON_CATCH_BUFFERS */
313}
314
315TBIRES tail_end_sys(TBIRES State, int syscall, int *restart)
316{
317 struct pt_regs *regs = (struct pt_regs *)State.Sig.pCtx;
318 unsigned long flags;
319
320 local_irq_disable();
321
322 if (user_mode(regs)) {
323 flags = current_thread_info()->flags;
324 if (flags & _TIF_WORK_MASK &&
325 do_work_pending(regs, flags, syscall)) {
326 *restart = 1;
327 return State;
328 }
329
330#ifdef CONFIG_METAG_FPU
331 if (current->thread.fpu_context &&
332 current->thread.fpu_context->needs_restore) {
333 __TBICtxFPURestore(State, current->thread.fpu_context);
334 /*
335 * Clearing this bit ensures the FP unit is not made
336 * active again unless it is used.
337 */
338 State.Sig.SaveMask &= ~TBICTX_FPAC_BIT;
339 current->thread.fpu_context->needs_restore = false;
340 }
341 State.Sig.TrigMask |= TBI_TRIG_BIT(TBID_SIGNUM_DFR);
342#endif
343 }
344
345 /* TBI will turn interrupts back on at some point. */
346 if (!irqs_disabled_flags((unsigned long)State.Sig.TrigMask))
347 trace_hardirqs_on();
348
349#ifdef CONFIG_METAG_DSP
350 /*
351 * If we previously saved an extended context then restore it
352 * now. Otherwise, clear D0.8 because this process is not
353 * using DSP hardware.
354 */
355 if (State.Sig.pCtx->SaveMask & TBICTX_XEXT_BIT) {
356 unsigned int D0_8;
357 struct meta_ext_context *dsp_ctx = current->thread.dsp_context;
358
359 /* Make sure we're going to return to userland. */
360 BUG_ON(current->thread.int_depth != 1);
361
362 if (dsp_ctx->ram_sz[0] > 0)
363 __TBIDspramRestoreA(dsp_ctx->ram_sz[0],
364 dsp_ctx->ram[0]);
365 if (dsp_ctx->ram_sz[1] > 0)
366 __TBIDspramRestoreB(dsp_ctx->ram_sz[1],
367 dsp_ctx->ram[1]);
368
369 State.Sig.SaveMask |= State.Sig.pCtx->SaveMask;
370 __TBICtxRestore(State, current->thread.dsp_context);
371 D0_8 = __core_reg_get(D0.8);
372 D0_8 |= current->thread.user_flags & 0xffff0000;
373 D0_8 |= (dsp_ctx->ram_sz[1] | dsp_ctx->ram_sz[0]) & 0xffff;
374 __core_reg_set(D0.8, D0_8);
375 } else
376 __core_reg_set(D0.8, 0);
377#endif /* CONFIG_METAG_DSP */
378
379#ifdef CONFIG_METAG_META21
380 {
381 unsigned int depth, txdefr;
382
383 /*
384 * If there hasn't been a LNKSET since the last LNKGET then the
385 * link flag will be set, causing the next LNKSET to succeed if
386 * the addresses match. The two LNK operations may not be a pair
387 * (e.g. see atomic_read()), so the LNKSET should fail.
388 * We use a conditional-never LNKSET to clear the link flag
389 * without side effects.
390 */
391 asm volatile("LNKSETDNV [D0Re0],D0Re0");
392
393 depth = --current->thread.int_depth;
394
395 BUG_ON(user_mode(regs) && depth);
396
397 txdefr = __core_reg_get(TXDEFR);
398
399 txdefr &= ~TXDEFR_BUS_STATE_BITS;
400
401 /* Do we need to restore a failure code into TXDEFR? */
402 if (current->thread.txdefr_failure & (1 << depth))
403 txdefr |= (TXDEFR_LNKSET_FAILURE | TXDEFR_BUS_TRIG_BIT);
404 else
405 txdefr |= (TXDEFR_LNKSET_SUCCESS | TXDEFR_BUS_TRIG_BIT);
406
407 __core_reg_set(TXDEFR, txdefr);
408 }
409#endif
410 return State;
411}
412
413#ifdef CONFIG_SMP
414/*
415 * If we took an interrupt in the middle of __kuser_get_tls then we need
416 * to rewind the PC to the start of the function in case the process
417 * gets migrated to another thread (SMP only) and it reads the wrong tls
418 * data.
419 */
420static inline void _restart_critical_section(TBIRES State)
421{
422 unsigned long get_tls_start;
423 unsigned long get_tls_end;
424
425 get_tls_start = (unsigned long)__kuser_get_tls -
426 (unsigned long)&__user_gateway_start;
427
428 get_tls_start += USER_GATEWAY_PAGE;
429
430 get_tls_end = (unsigned long)__kuser_get_tls_end -
431 (unsigned long)&__user_gateway_start;
432
433 get_tls_end += USER_GATEWAY_PAGE;
434
435 if ((State.Sig.pCtx->CurrPC >= get_tls_start) &&
436 (State.Sig.pCtx->CurrPC < get_tls_end))
437 State.Sig.pCtx->CurrPC = get_tls_start;
438}
439#else
440/*
441 * If we took an interrupt in the middle of
442 * __kuser_cmpxchg then we need to rewind the PC to the
443 * start of the function.
444 */
445static inline void _restart_critical_section(TBIRES State)
446{
447 unsigned long cmpxchg_start;
448 unsigned long cmpxchg_end;
449
450 cmpxchg_start = (unsigned long)__kuser_cmpxchg -
451 (unsigned long)&__user_gateway_start;
452
453 cmpxchg_start += USER_GATEWAY_PAGE;
454
455 cmpxchg_end = (unsigned long)__kuser_cmpxchg_end -
456 (unsigned long)&__user_gateway_start;
457
458 cmpxchg_end += USER_GATEWAY_PAGE;
459
460 if ((State.Sig.pCtx->CurrPC >= cmpxchg_start) &&
461 (State.Sig.pCtx->CurrPC < cmpxchg_end))
462 State.Sig.pCtx->CurrPC = cmpxchg_start;
463}
464#endif
465
466/* Used by kick_handler() */
467void restart_critical_section(TBIRES State)
468{
469 _restart_critical_section(State);
470}
471
472TBIRES trigger_handler(TBIRES State, int SigNum, int Triggers, int Inst,
473 PTBI pTBI)
474{
475 head_end(State, ~INTS_OFF_MASK);
476
477 /* If we interrupted user code handle any critical sections. */
478 if (State.Sig.SaveMask & TBICTX_PRIV_BIT)
479 _restart_critical_section(State);
480
481 trace_hardirqs_off();
482
483 do_IRQ(SigNum, (struct pt_regs *)State.Sig.pCtx);
484
485 return tail_end(State);
486}
487
488static unsigned int load_fault(PTBICTXEXTCB0 pbuf)
489{
490 return pbuf->CBFlags & TXCATCH0_READ_BIT;
491}
492
493static unsigned long fault_address(PTBICTXEXTCB0 pbuf)
494{
495 return pbuf->CBAddr;
496}
497
498static void unhandled_fault(struct pt_regs *regs, unsigned long addr,
499 int signo, int code, int trapno)
500{
501 if (user_mode(regs)) {
502 siginfo_t info;
503
504 if (show_unhandled_signals && unhandled_signal(current, signo)
505 && printk_ratelimit()) {
506
507 pr_info("pid %d unhandled fault: pc 0x%08x, addr 0x%08lx, trap %d (%s)\n",
508 current->pid, regs->ctx.CurrPC, addr,
509 trapno, trap_name(trapno));
510 print_vma_addr(" in ", regs->ctx.CurrPC);
511 print_vma_addr(" rtp in ", regs->ctx.DX[4].U1);
512 printk("\n");
513 show_regs(regs);
514 }
515
516 info.si_signo = signo;
517 info.si_errno = 0;
518 info.si_code = code;
519 info.si_addr = (__force void __user *)addr;
520 info.si_trapno = trapno;
521 force_sig_info(signo, &info, current);
522 } else {
523 die("Oops", regs, trapno, addr);
524 }
525}
526
527static int handle_data_fault(PTBICTXEXTCB0 pcbuf, struct pt_regs *regs,
528 unsigned int data_address, int trapno)
529{
530 int ret;
531
532 ret = do_page_fault(regs, data_address, !load_fault(pcbuf), trapno);
533
534 return ret;
535}
536
537static unsigned long get_inst_fault_address(struct pt_regs *regs)
538{
539 return regs->ctx.CurrPC;
540}
541
542TBIRES fault_handler(TBIRES State, int SigNum, int Triggers,
543 int Inst, PTBI pTBI)
544{
545 struct pt_regs *regs = (struct pt_regs *)State.Sig.pCtx;
546 PTBICTXEXTCB0 pcbuf = (PTBICTXEXTCB0)&regs->extcb0;
547 unsigned long data_address;
548
549 head_end(State, ~INTS_OFF_MASK);
550
551 /* Hardware breakpoint or data watch */
552 if ((SigNum == TBIXXF_SIGNUM_IHF) ||
553 ((SigNum == TBIXXF_SIGNUM_DHF) &&
554 (pcbuf[0].CBFlags & (TXCATCH0_WATCH1_BIT |
555 TXCATCH0_WATCH0_BIT)))) {
556 State = __TBIUnExpXXX(State, SigNum, Triggers, Inst,
557 pTBI);
558 return tail_end(State);
559 }
560
561 local_irq_enable();
562
563 data_address = fault_address(pcbuf);
564
565 switch (SigNum) {
566 case TBIXXF_SIGNUM_IGF:
567 /* 1st-level entry invalid (instruction fetch) */
568 case TBIXXF_SIGNUM_IPF: {
569 /* 2nd-level entry invalid (instruction fetch) */
570 unsigned long addr = get_inst_fault_address(regs);
571 do_page_fault(regs, addr, 0, SigNum);
572 break;
573 }
574
575 case TBIXXF_SIGNUM_DGF:
576 /* 1st-level entry invalid (data access) */
577 case TBIXXF_SIGNUM_DPF:
578 /* 2nd-level entry invalid (data access) */
579 case TBIXXF_SIGNUM_DWF:
580 /* Write to read only page */
581 handle_data_fault(pcbuf, regs, data_address, SigNum);
582 break;
583
584 case TBIXXF_SIGNUM_IIF:
585 /* Illegal instruction */
586 unhandled_fault(regs, regs->ctx.CurrPC, SIGILL, ILL_ILLOPC,
587 SigNum);
588 break;
589
590 case TBIXXF_SIGNUM_DHF:
591 /* Unaligned access */
592 unhandled_fault(regs, data_address, SIGBUS, BUS_ADRALN,
593 SigNum);
594 break;
595 case TBIXXF_SIGNUM_PGF:
596 /* Privilege violation */
597 unhandled_fault(regs, data_address, SIGSEGV, SEGV_ACCERR,
598 SigNum);
599 break;
600 default:
601 BUG();
602 break;
603 }
604
605 return tail_end(State);
606}
607
608static bool switch_is_syscall(unsigned int inst)
609{
610 return inst == __METAG_SW_ENCODING(SYS);
611}
612
613static bool switch_is_legacy_syscall(unsigned int inst)
614{
615 return inst == __METAG_SW_ENCODING(SYS_LEGACY);
616}
617
618static inline void step_over_switch(struct pt_regs *regs, unsigned int inst)
619{
620 regs->ctx.CurrPC += 4;
621}
622
623static inline int test_syscall_work(void)
624{
625 return current_thread_info()->flags & _TIF_WORK_SYSCALL_MASK;
626}
627
628TBIRES switch1_handler(TBIRES State, int SigNum, int Triggers,
629 int Inst, PTBI pTBI)
630{
631 struct pt_regs *regs = (struct pt_regs *)State.Sig.pCtx;
632 unsigned int sysnumber;
633 unsigned long long a1_a2, a3_a4, a5_a6;
634 LPSYSCALL syscall_entry;
635 int restart;
636
637 head_end(State, ~INTS_OFF_MASK);
638
639 /*
640 * If this is not a syscall SWITCH it could be a breakpoint.
641 */
642 if (!switch_is_syscall(Inst)) {
643 /*
644 * Alert the user if they're trying to use legacy system
645 * calls. This suggests they need to update their C
646 * library and build against up to date kernel headers.
647 */
648 if (switch_is_legacy_syscall(Inst))
649 pr_warn_once("WARNING: A legacy syscall was made. Your userland needs updating.\n");
650 /*
651 * We don't know how to handle the SWITCH and cannot
652 * safely ignore it, so treat all unknown switches
653 * (including breakpoints) as traps.
654 */
655 force_sig(SIGTRAP, current);
656 return tail_end(State);
657 }
658
659 local_irq_enable();
660
661restart_syscall:
662 restart = 0;
663 sysnumber = regs->ctx.DX[0].U1;
664
665 if (test_syscall_work())
666 sysnumber = syscall_trace_enter(regs);
667
668 /* Skip over the SWITCH instruction - or you just get 'stuck' on it! */
669 step_over_switch(regs, Inst);
670
671 if (sysnumber >= __NR_syscalls) {
672 pr_debug("unknown syscall number: %d\n", sysnumber);
673 syscall_entry = (LPSYSCALL) sys_ni_syscall;
674 } else {
675 syscall_entry = (LPSYSCALL) sys_call_table[sysnumber];
676 }
677
678 /* Use 64bit loads for speed. */
679 a5_a6 = *(unsigned long long *)&regs->ctx.DX[1];
680 a3_a4 = *(unsigned long long *)&regs->ctx.DX[2];
681 a1_a2 = *(unsigned long long *)&regs->ctx.DX[3];
682
683 /* here is the actual call to the syscall handler functions */
684 regs->ctx.DX[0].U0 = syscall_entry(a1_a2, a3_a4, a5_a6);
685
686 if (test_syscall_work())
687 syscall_trace_leave(regs);
688
689 State = tail_end_sys(State, sysnumber, &restart);
690 /* Handlerless restarts shouldn't go via userland */
691 if (restart)
692 goto restart_syscall;
693 return State;
694}
695
696TBIRES switchx_handler(TBIRES State, int SigNum, int Triggers,
697 int Inst, PTBI pTBI)
698{
699 struct pt_regs *regs = (struct pt_regs *)State.Sig.pCtx;
700
701 /*
702 * This can be caused by any user process simply executing an unusual
703 * SWITCH instruction. If there's no DA, __TBIUnExpXXX will cause the
704 * thread to stop, so signal a SIGTRAP instead.
705 */
706 head_end(State, ~INTS_OFF_MASK);
707 if (user_mode(regs))
708 force_sig(SIGTRAP, current);
709 else
710 State = __TBIUnExpXXX(State, SigNum, Triggers, Inst, pTBI);
711 return tail_end(State);
712}
713
714#ifdef CONFIG_METAG_META21
715TBIRES fpe_handler(TBIRES State, int SigNum, int Triggers, int Inst, PTBI pTBI)
716{
717 struct pt_regs *regs = (struct pt_regs *)State.Sig.pCtx;
718 unsigned int error_state = Triggers;
719 siginfo_t info;
720
721 head_end(State, ~INTS_OFF_MASK);
722
723 local_irq_enable();
724
725 info.si_signo = SIGFPE;
726
727 if (error_state & TXSTAT_FPE_INVALID_BIT)
728 info.si_code = FPE_FLTINV;
729 else if (error_state & TXSTAT_FPE_DIVBYZERO_BIT)
730 info.si_code = FPE_FLTDIV;
731 else if (error_state & TXSTAT_FPE_OVERFLOW_BIT)
732 info.si_code = FPE_FLTOVF;
733 else if (error_state & TXSTAT_FPE_UNDERFLOW_BIT)
734 info.si_code = FPE_FLTUND;
735 else if (error_state & TXSTAT_FPE_INEXACT_BIT)
736 info.si_code = FPE_FLTRES;
737 else
738 info.si_code = FPE_FIXME;
739 info.si_errno = 0;
740 info.si_addr = (__force void __user *)regs->ctx.CurrPC;
741 force_sig_info(SIGFPE, &info, current);
742
743 return tail_end(State);
744}
745#endif
746
747#ifdef CONFIG_METAG_SUSPEND_MEM
748struct traps_context {
749 PTBIAPIFN fnSigs[TBID_SIGNUM_MAX + 1];
750};
751
752static struct traps_context *metag_traps_context;
753
754int traps_save_context(void)
755{
756 unsigned long cpu = smp_processor_id();
757 PTBI _pTBI = per_cpu(pTBI, cpu);
758 struct traps_context *context;
759
760 context = kzalloc(sizeof(*context), GFP_ATOMIC);
761 if (!context)
762 return -ENOMEM;
763
764 memcpy(context->fnSigs, (void *)_pTBI->fnSigs, sizeof(context->fnSigs));
765
766 metag_traps_context = context;
767 return 0;
768}
769
770int traps_restore_context(void)
771{
772 unsigned long cpu = smp_processor_id();
773 PTBI _pTBI = per_cpu(pTBI, cpu);
774 struct traps_context *context = metag_traps_context;
775
776 metag_traps_context = NULL;
777
778 memcpy((void *)_pTBI->fnSigs, context->fnSigs, sizeof(context->fnSigs));
779
780 kfree(context);
781 return 0;
782}
783#endif
784
785#ifdef CONFIG_SMP
786static inline unsigned int _get_trigger_mask(void)
787{
788 unsigned long cpu = smp_processor_id();
789 return per_cpu(trigger_mask, cpu);
790}
791
792unsigned int get_trigger_mask(void)
793{
794 return _get_trigger_mask();
795}
796EXPORT_SYMBOL(get_trigger_mask);
797
798static void set_trigger_mask(unsigned int mask)
799{
800 unsigned long cpu = smp_processor_id();
801 per_cpu(trigger_mask, cpu) = mask;
802}
803
804void arch_local_irq_enable(void)
805{
806 preempt_disable();
807 arch_local_irq_restore(_get_trigger_mask());
808 preempt_enable_no_resched();
809}
810EXPORT_SYMBOL(arch_local_irq_enable);
811#else
812static void set_trigger_mask(unsigned int mask)
813{
814 global_trigger_mask = mask;
815}
816#endif
817
818void per_cpu_trap_init(unsigned long cpu)
819{
820 TBIRES int_context;
821 unsigned int thread = cpu_2_hwthread_id[cpu];
822
823 set_trigger_mask(TBI_INTS_INIT(thread) | /* interrupts */
824 TBI_TRIG_BIT(TBID_SIGNUM_LWK) | /* low level kick */
825 TBI_TRIG_BIT(TBID_SIGNUM_SW1));
826
827 /* non-priv - use current stack */
828 int_context.Sig.pCtx = NULL;
829 /* Start with interrupts off */
830 int_context.Sig.TrigMask = INTS_OFF_MASK;
831 int_context.Sig.SaveMask = 0;
832
833 /* And call __TBIASyncTrigger() */
834 __TBIASyncTrigger(int_context);
835}
836
837void __init trap_init(void)
838{
839 unsigned long cpu = smp_processor_id();
840 PTBI _pTBI = per_cpu(pTBI, cpu);
841
842 _pTBI->fnSigs[TBID_SIGNUM_XXF] = fault_handler;
843 _pTBI->fnSigs[TBID_SIGNUM_SW0] = switchx_handler;
844 _pTBI->fnSigs[TBID_SIGNUM_SW1] = switch1_handler;
845 _pTBI->fnSigs[TBID_SIGNUM_SW2] = switchx_handler;
846 _pTBI->fnSigs[TBID_SIGNUM_SW3] = switchx_handler;
847 _pTBI->fnSigs[TBID_SIGNUM_LWK] = kick_handler;
848
849#ifdef CONFIG_METAG_META21
850 _pTBI->fnSigs[TBID_SIGNUM_DFR] = __TBIHandleDFR;
851 _pTBI->fnSigs[TBID_SIGNUM_FPE] = fpe_handler;
852#endif
853
854 per_cpu_trap_init(cpu);
855}
856
857void tbi_startup_interrupt(int irq)
858{
859 unsigned long cpu = smp_processor_id();
860 PTBI _pTBI = per_cpu(pTBI, cpu);
861
862 BUG_ON(irq > TBID_SIGNUM_MAX);
863
864 /* For TR1 and TR2, the thread id is encoded in the irq number */
865 if (irq >= TBID_SIGNUM_T10 && irq < TBID_SIGNUM_TR3)
866 cpu = hwthread_id_2_cpu[(irq - TBID_SIGNUM_T10) % 4];
867
868 set_trigger_mask(get_trigger_mask() | TBI_TRIG_BIT(irq));
869
870 _pTBI->fnSigs[irq] = trigger_handler;
871}
872
873void tbi_shutdown_interrupt(int irq)
874{
875 unsigned long cpu = smp_processor_id();
876 PTBI _pTBI = per_cpu(pTBI, cpu);
877
878 BUG_ON(irq > TBID_SIGNUM_MAX);
879
880 set_trigger_mask(get_trigger_mask() & ~TBI_TRIG_BIT(irq));
881
882 _pTBI->fnSigs[irq] = __TBIUnExpXXX;
883}
884
885int ret_from_fork(TBIRES arg)
886{
887 struct task_struct *prev = arg.Switch.pPara;
888 struct task_struct *tsk = current;
889 struct pt_regs *regs = task_pt_regs(tsk);
890 int (*fn)(void *);
891 TBIRES Next;
892
893 schedule_tail(prev);
894
895 if (tsk->flags & PF_KTHREAD) {
896 fn = (void *)regs->ctx.DX[4].U1;
897 BUG_ON(!fn);
898
899 fn((void *)regs->ctx.DX[3].U1);
900 }
901
902 if (test_syscall_work())
903 syscall_trace_leave(regs);
904
905 preempt_disable();
906
907 Next.Sig.TrigMask = get_trigger_mask();
908 Next.Sig.SaveMask = 0;
909 Next.Sig.pCtx = &regs->ctx;
910
911 set_gateway_tls(current->thread.tls_ptr);
912
913 preempt_enable_no_resched();
914
915 /* And interrupts should come back on when we resume the real usermode
916 * code. Call __TBIASyncResume()
917 */
918 __TBIASyncResume(tail_end(Next));
919 /* ASyncResume should NEVER return */
920 BUG();
921 return 0;
922}
923
924void show_trace(struct task_struct *tsk, unsigned long *sp,
925 struct pt_regs *regs)
926{
927 unsigned long addr;
928#ifdef CONFIG_FRAME_POINTER
929 unsigned long fp, fpnew;
930 unsigned long stack;
931#endif
932
933 if (regs && user_mode(regs))
934 return;
935
936 printk("\nCall trace: ");
937#ifdef CONFIG_KALLSYMS
938 printk("\n");
939#endif
940
941 if (!tsk)
942 tsk = current;
943
944#ifdef CONFIG_FRAME_POINTER
945 if (regs) {
946 print_ip_sym(regs->ctx.CurrPC);
947 fp = regs->ctx.AX[1].U0;
948 } else {
949 fp = __core_reg_get(A0FrP);
950 }
951
952 /* detect when the frame pointer has been used for other purposes and
953 * doesn't point to the stack (it may point completely elsewhere which
954 * kstack_end may not detect).
955 */
956 stack = (unsigned long)task_stack_page(tsk);
957 while (fp >= stack && fp + 8 <= stack + THREAD_SIZE) {
958 addr = __raw_readl((unsigned long *)(fp + 4)) - 4;
959 if (kernel_text_address(addr))
960 print_ip_sym(addr);
961 else
962 break;
963 /* stack grows up, so frame pointers must decrease */
964 fpnew = __raw_readl((unsigned long *)(fp + 0));
965 if (fpnew >= fp)
966 break;
967 fp = fpnew;
968 }
969#else
970 while (!kstack_end(sp)) {
971 addr = (*sp--) - 4;
972 if (kernel_text_address(addr))
973 print_ip_sym(addr);
974 }
975#endif
976
977 printk("\n");
978
979 debug_show_held_locks(tsk);
980}
981
982void show_stack(struct task_struct *tsk, unsigned long *sp)
983{
984 if (!tsk)
985 tsk = current;
986 if (tsk == current)
987 sp = (unsigned long *)current_stack_pointer;
988 else
989 sp = (unsigned long *)tsk->thread.kernel_context->AX[0].U0;
990
991 show_trace(tsk, sp, NULL);
992}
diff --git a/arch/metag/kernel/user_gateway.S b/arch/metag/kernel/user_gateway.S
deleted file mode 100644
index 7833fb8f9ddd..000000000000
--- a/arch/metag/kernel/user_gateway.S
+++ /dev/null
@@ -1,98 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (C) 2010 Imagination Technologies Ltd.
4 *
5 * This file contains code that can be accessed from userspace and can
6 * access certain kernel data structures without the overhead of a system
7 * call.
8 */
9
10#include <asm/metag_regs.h>
11#include <asm/user_gateway.h>
12
13/*
14 * User helpers.
15 *
16 * These are segment of kernel provided user code reachable from user space
17 * at a fixed address in kernel memory. This is used to provide user space
18 * with some operations which require kernel help because of unimplemented
19 * native feature and/or instructions in some Meta CPUs. The idea is for
20 * this code to be executed directly in user mode for best efficiency but
21 * which is too intimate with the kernel counter part to be left to user
22 * libraries. The kernel reserves the right to change this code as needed
23 * without warning. Only the entry points and their results are guaranteed
24 * to be stable.
25 *
26 * Each segment is 64-byte aligned. This mechanism should be used only for
27 * for things that are really small and justified, and not be abused freely.
28 */
29 .text
30 .global ___user_gateway_start
31___user_gateway_start:
32
33 /* get_tls
34 * Offset: 0
35 * Description: Get the TLS pointer for this process.
36 */
37 .global ___kuser_get_tls
38 .type ___kuser_get_tls,function
39___kuser_get_tls:
40 MOVT D1Ar1,#HI(USER_GATEWAY_PAGE + USER_GATEWAY_TLS)
41 ADD D1Ar1,D1Ar1,#LO(USER_GATEWAY_PAGE + USER_GATEWAY_TLS)
42 MOV D1Ar3,TXENABLE
43 AND D1Ar3,D1Ar3,#(TXENABLE_THREAD_BITS)
44 LSR D1Ar3,D1Ar3,#(TXENABLE_THREAD_S - 2)
45 GETD D0Re0,[D1Ar1+D1Ar3]
46___kuser_get_tls_end: /* Beyond this point the read will complete */
47 MOV PC,D1RtP
48 .size ___kuser_get_tls,.-___kuser_get_tls
49 .global ___kuser_get_tls_end
50
51 /* cmpxchg
52 * Offset: 64
53 * Description: Replace the value at 'ptr' with 'newval' if the current
54 * value is 'oldval'. Return zero if we succeeded,
55 * non-zero otherwise.
56 *
57 * Reference prototype:
58 *
59 * int __kuser_cmpxchg(int oldval, int newval, unsigned long *ptr)
60 *
61 */
62 .balign 64
63 .global ___kuser_cmpxchg
64 .type ___kuser_cmpxchg,function
65___kuser_cmpxchg:
66#ifdef CONFIG_SMP
67 /*
68 * We must use LNKGET/LNKSET with an SMP kernel because the other method
69 * does not provide atomicity across multiple CPUs.
70 */
710: LNKGETD D0Re0,[D1Ar3]
72 CMP D0Re0,D1Ar1
73 LNKSETDZ [D1Ar3],D0Ar2
74 BNZ 1f
75 DEFR D0Re0,TXSTAT
76 ANDT D0Re0,D0Re0,#HI(0x3f000000)
77 CMPT D0Re0,#HI(0x02000000)
78 BNE 0b
79#ifdef CONFIG_METAG_LNKGET_AROUND_CACHE
80 DCACHE [D1Ar3], D0Re0
81#endif
821: MOV D0Re0,#1
83 XORZ D0Re0,D0Re0,D0Re0
84 MOV PC,D1RtP
85#else
86 GETD D0Re0,[D1Ar3]
87 CMP D0Re0,D1Ar1
88 SETDZ [D1Ar3],D0Ar2
89___kuser_cmpxchg_end: /* Beyond this point the write will complete */
90 MOV D0Re0,#1
91 XORZ D0Re0,D0Re0,D0Re0
92 MOV PC,D1RtP
93#endif /* CONFIG_SMP */
94 .size ___kuser_cmpxchg,.-___kuser_cmpxchg
95 .global ___kuser_cmpxchg_end
96
97 .global ___user_gateway_end
98___user_gateway_end:
diff --git a/arch/metag/kernel/vmlinux.lds.S b/arch/metag/kernel/vmlinux.lds.S
deleted file mode 100644
index 1efadae2ea8e..000000000000
--- a/arch/metag/kernel/vmlinux.lds.S
+++ /dev/null
@@ -1,74 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/* ld script to make Meta Linux kernel */
3
4#include <asm/thread_info.h>
5#include <asm/page.h>
6#include <asm/cache.h>
7
8#include <asm-generic/vmlinux.lds.h>
9
10OUTPUT_FORMAT("elf32-metag", "elf32-metag", "elf32-metag")
11OUTPUT_ARCH(metag)
12ENTRY(__start)
13
14_jiffies = _jiffies_64;
15SECTIONS
16{
17 . = CONFIG_PAGE_OFFSET;
18 _text = .;
19 __text = .;
20 __stext = .;
21 HEAD_TEXT_SECTION
22 .text : {
23 TEXT_TEXT
24 SCHED_TEXT
25 CPUIDLE_TEXT
26 LOCK_TEXT
27 KPROBES_TEXT
28 IRQENTRY_TEXT
29 SOFTIRQENTRY_TEXT
30 *(.text.*)
31 *(.gnu.warning)
32 }
33
34 __etext = .; /* End of text section */
35
36 __sdata = .;
37 RO_DATA_SECTION(PAGE_SIZE)
38 RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
39 __edata = .; /* End of data section */
40
41 EXCEPTION_TABLE(16)
42 NOTES
43
44 . = ALIGN(PAGE_SIZE); /* Init code and data */
45 ___init_begin = .;
46 INIT_TEXT_SECTION(PAGE_SIZE)
47 INIT_DATA_SECTION(16)
48
49 .init.arch.info : {
50 ___arch_info_begin = .;
51 *(.arch.info.init)
52 ___arch_info_end = .;
53 }
54
55 PERCPU_SECTION(L1_CACHE_BYTES)
56
57 ___init_end = .;
58
59 BSS_SECTION(0, PAGE_SIZE, 0)
60
61 __end = .;
62
63 . = ALIGN(PAGE_SIZE);
64 __heap_start = .;
65
66 DWARF_DEBUG
67
68 /* When something in the kernel is NOT compiled as a module, the
69 * module cleanup code and data are put into these segments. Both
70 * can then be thrown away, as cleanup code is never called unless
71 * it's a module.
72 */
73 DISCARDS
74}
diff --git a/arch/metag/lib/Makefile b/arch/metag/lib/Makefile
deleted file mode 100644
index 3982850d692c..000000000000
--- a/arch/metag/lib/Makefile
+++ /dev/null
@@ -1,23 +0,0 @@
1# SPDX-License-Identifier: GPL-2.0
2#
3# Makefile for Meta-specific library files.
4#
5
6lib-y += usercopy.o
7lib-y += copy_page.o
8lib-y += clear_page.o
9lib-y += memcpy.o
10lib-y += memmove.o
11lib-y += memset.o
12lib-y += delay.o
13lib-y += div64.o
14lib-y += muldi3.o
15lib-y += ashrdi3.o
16lib-y += ashldi3.o
17lib-y += lshrdi3.o
18lib-y += divsi3.o
19lib-y += modsi3.o
20lib-y += cmpdi2.o
21lib-y += ucmpdi2.o
22lib-y += ip_fast_csum.o
23lib-y += checksum.o
diff --git a/arch/metag/lib/ashldi3.S b/arch/metag/lib/ashldi3.S
deleted file mode 100644
index 5055df9e5c7b..000000000000
--- a/arch/metag/lib/ashldi3.S
+++ /dev/null
@@ -1,34 +0,0 @@
1! SPDX-License-Identifier: GPL-2.0
2! Copyright (C) 2012 by Imagination Technologies Ltd.
3!
4! 64-bit arithmetic shift left routine.
5!
6
7 .text
8 .global ___ashldi3
9 .type ___ashldi3,function
10
11___ashldi3:
12 MOV D0Re0,D0Ar2
13 MOV D1Re0,D1Ar1
14 CMP D1Ar3,#0 ! COUNT == 0
15 MOVEQ PC,D1RtP ! Yes, return
16
17 SUBS D0Ar4,D1Ar3,#32 ! N = COUNT - 32
18 BGE $L10
19
20!! Shift < 32
21 NEG D0Ar4,D0Ar4 ! N = - N
22 LSL D1Re0,D1Re0,D1Ar3 ! HI = HI << COUNT
23 LSR D0Ar6,D0Re0,D0Ar4 ! TMP= LO >> -(COUNT - 32)
24 OR D1Re0,D1Re0,D0Ar6 ! HI = HI | TMP
25 SWAP D0Ar4,D1Ar3
26 LSL D0Re0,D0Re0,D0Ar4 ! LO = LO << COUNT
27 MOV PC,D1RtP
28
29$L10:
30!! Shift >= 32
31 LSL D1Re0,D0Re0,D0Ar4 ! HI = LO << N
32 MOV D0Re0,#0 ! LO = 0
33 MOV PC,D1RtP
34 .size ___ashldi3,.-___ashldi3
diff --git a/arch/metag/lib/ashrdi3.S b/arch/metag/lib/ashrdi3.S
deleted file mode 100644
index 0c838fd9da85..000000000000
--- a/arch/metag/lib/ashrdi3.S
+++ /dev/null
@@ -1,34 +0,0 @@
1! SPDX-License-Identifier: GPL-2.0
2! Copyright (C) 2012 by Imagination Technologies Ltd.
3!
4! 64-bit arithmetic shift right routine.
5!
6
7 .text
8 .global ___ashrdi3
9 .type ___ashrdi3,function
10
11___ashrdi3:
12 MOV D0Re0,D0Ar2
13 MOV D1Re0,D1Ar1
14 CMP D1Ar3,#0 ! COUNT == 0
15 MOVEQ PC,D1RtP ! Yes, return
16
17 MOV D0Ar4,D1Ar3
18 SUBS D1Ar3,D1Ar3,#32 ! N = COUNT - 32
19 BGE $L20
20
21!! Shift < 32
22 NEG D1Ar3,D1Ar3 ! N = - N
23 LSR D0Re0,D0Re0,D0Ar4 ! LO = LO >> COUNT
24 LSL D0Ar6,D1Re0,D1Ar3 ! TMP= HI << -(COUNT - 32)
25 OR D0Re0,D0Re0,D0Ar6 ! LO = LO | TMP
26 SWAP D1Ar3,D0Ar4
27 ASR D1Re0,D1Re0,D1Ar3 ! HI = HI >> COUNT
28 MOV PC,D1RtP
29$L20:
30!! Shift >= 32
31 ASR D0Re0,D1Re0,D1Ar3 ! LO = HI >> N
32 ASR D1Re0,D1Re0,#31 ! HI = HI >> 31
33 MOV PC,D1RtP
34 .size ___ashrdi3,.-___ashrdi3
diff --git a/arch/metag/lib/checksum.c b/arch/metag/lib/checksum.c
deleted file mode 100644
index 5d6a98a05e9d..000000000000
--- a/arch/metag/lib/checksum.c
+++ /dev/null
@@ -1,167 +0,0 @@
1/*
2 *
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * IP/TCP/UDP checksumming routines
8 *
9 * Authors: Jorge Cwik, <jorge@laser.satlink.net>
10 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
11 * Tom May, <ftom@netcom.com>
12 * Andreas Schwab, <schwab@issan.informatik.uni-dortmund.de>
13 * Lots of code moved from tcp.c and ip.c; see those files
14 * for more names.
15 *
16 * 03/02/96 Jes Sorensen, Andreas Schwab, Roman Hodek:
17 * Fixed some nasty bugs, causing some horrible crashes.
18 * A: At some points, the sum (%0) was used as
19 * length-counter instead of the length counter
20 * (%1). Thanks to Roman Hodek for pointing this out.
21 * B: GCC seems to mess up if one uses too many
22 * data-registers to hold input values and one tries to
23 * specify d0 and d1 as scratch registers. Letting gcc
24 * choose these registers itself solves the problem.
25 *
26 * This program is free software; you can redistribute it and/or
27 * modify it under the terms of the GNU General Public License
28 * as published by the Free Software Foundation; either version
29 * 2 of the License, or (at your option) any later version.
30 */
31
32/* Revised by Kenneth Albanowski for m68knommu. Basic problem: unaligned access
33 kills, so most of the assembly has to go. */
34
35#include <linux/module.h>
36#include <net/checksum.h>
37
38#include <asm/byteorder.h>
39
40static inline unsigned short from32to16(unsigned int x)
41{
42 /* add up 16-bit and 16-bit for 16+c bit */
43 x = (x & 0xffff) + (x >> 16);
44 /* add up carry.. */
45 x = (x & 0xffff) + (x >> 16);
46 return x;
47}
48
49static unsigned int do_csum(const unsigned char *buff, int len)
50{
51 int odd;
52 unsigned int result = 0;
53
54 if (len <= 0)
55 goto out;
56 odd = 1 & (unsigned long) buff;
57 if (odd) {
58#ifdef __LITTLE_ENDIAN
59 result += (*buff << 8);
60#else
61 result = *buff;
62#endif
63 len--;
64 buff++;
65 }
66 if (len >= 2) {
67 if (2 & (unsigned long) buff) {
68 result += *(unsigned short *) buff;
69 len -= 2;
70 buff += 2;
71 }
72 if (len >= 4) {
73 const unsigned char *end = buff + ((unsigned)len & ~3);
74 unsigned int carry = 0;
75 do {
76 unsigned int w = *(unsigned int *) buff;
77 buff += 4;
78 result += carry;
79 result += w;
80 carry = (w > result);
81 } while (buff < end);
82 result += carry;
83 result = (result & 0xffff) + (result >> 16);
84 }
85 if (len & 2) {
86 result += *(unsigned short *) buff;
87 buff += 2;
88 }
89 }
90 if (len & 1)
91#ifdef __LITTLE_ENDIAN
92 result += *buff;
93#else
94 result += (*buff << 8);
95#endif
96 result = from32to16(result);
97 if (odd)
98 result = ((result >> 8) & 0xff) | ((result & 0xff) << 8);
99out:
100 return result;
101}
102EXPORT_SYMBOL(ip_fast_csum);
103
104/*
105 * computes the checksum of a memory block at buff, length len,
106 * and adds in "sum" (32-bit)
107 *
108 * returns a 32-bit number suitable for feeding into itself
109 * or csum_tcpudp_magic
110 *
111 * this function must be called with even lengths, except
112 * for the last fragment, which may be odd
113 *
114 * it's best to have buff aligned on a 32-bit boundary
115 */
116__wsum csum_partial(const void *buff, int len, __wsum wsum)
117{
118 unsigned int sum = (__force unsigned int)wsum;
119 unsigned int result = do_csum(buff, len);
120
121 /* add in old sum, and carry.. */
122 result += sum;
123 if (sum > result)
124 result += 1;
125 return (__force __wsum)result;
126}
127
128/*
129 * this routine is used for miscellaneous IP-like checksums, mainly
130 * in icmp.c
131 */
132__sum16 ip_compute_csum(const void *buff, int len)
133{
134 return (__force __sum16)~do_csum(buff, len);
135}
136EXPORT_SYMBOL(ip_compute_csum);
137
138/*
139 * copy from fs while checksumming, otherwise like csum_partial
140 */
141__wsum
142csum_partial_copy_from_user(const void __user *src, void *dst, int len,
143 __wsum sum, int *csum_err)
144{
145 int missing;
146
147 missing = __copy_from_user(dst, src, len);
148 if (missing) {
149 memset(dst + len - missing, 0, missing);
150 *csum_err = -EFAULT;
151 } else
152 *csum_err = 0;
153
154 return csum_partial(dst, len, sum);
155}
156EXPORT_SYMBOL(csum_partial_copy_from_user);
157
158/*
159 * copy from ds while checksumming, otherwise like csum_partial
160 */
161__wsum
162csum_partial_copy(const void *src, void *dst, int len, __wsum sum)
163{
164 memcpy(dst, src, len);
165 return csum_partial(dst, len, sum);
166}
167EXPORT_SYMBOL(csum_partial_copy);
diff --git a/arch/metag/lib/clear_page.S b/arch/metag/lib/clear_page.S
deleted file mode 100644
index 87756a5d1367..000000000000
--- a/arch/metag/lib/clear_page.S
+++ /dev/null
@@ -1,18 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2 ! Copyright 2007,2008,2009 Imagination Technologies Ltd.
3
4#include <asm/page.h>
5
6 .text
7 .global _clear_page
8 .type _clear_page,function
9 !! D1Ar1 - page
10_clear_page:
11 MOV TXRPT,#((PAGE_SIZE / 8) - 1)
12 MOV D0Re0,#0
13 MOV D1Re0,#0
14$Lclear_page_loop:
15 SETL [D1Ar1++],D0Re0,D1Re0
16 BR $Lclear_page_loop
17 MOV PC,D1RtP
18 .size _clear_page,.-_clear_page
diff --git a/arch/metag/lib/cmpdi2.S b/arch/metag/lib/cmpdi2.S
deleted file mode 100644
index ab70bd94fd81..000000000000
--- a/arch/metag/lib/cmpdi2.S
+++ /dev/null
@@ -1,33 +0,0 @@
1! SPDX-License-Identifier: GPL-2.0
2! Copyright (C) 2012 by Imagination Technologies Ltd.
3!
4! 64-bit signed compare routine.
5!
6
7 .text
8 .global ___cmpdi2
9 .type ___cmpdi2,function
10
11! low high
12! s64 a (D0Ar2, D1Ar1)
13! s64 b (D0Ar4, D1Ar3)
14___cmpdi2:
15 ! start at 1 (equal) and conditionally increment or decrement
16 MOV D0Re0,#1
17
18 ! high words differ?
19 CMP D1Ar1,D1Ar3
20 BNE $Lhigh_differ
21
22 ! unsigned compare low words
23 CMP D0Ar2,D0Ar4
24 SUBLO D0Re0,D0Re0,#1
25 ADDHI D0Re0,D0Re0,#1
26 MOV PC,D1RtP
27
28$Lhigh_differ:
29 ! signed compare high words
30 SUBLT D0Re0,D0Re0,#1
31 ADDGT D0Re0,D0Re0,#1
32 MOV PC,D1RtP
33 .size ___cmpdi2,.-___cmpdi2
diff --git a/arch/metag/lib/copy_page.S b/arch/metag/lib/copy_page.S
deleted file mode 100644
index abbc75e94374..000000000000
--- a/arch/metag/lib/copy_page.S
+++ /dev/null
@@ -1,21 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2 ! Copyright 2007,2008 Imagination Technologies Ltd.
3
4#include <asm/page.h>
5
6 .text
7 .global _copy_page
8 .type _copy_page,function
9 !! D1Ar1 - to
10 !! D0Ar2 - from
11_copy_page:
12 MOV D0FrT,#PAGE_SIZE
13$Lcopy_page_loop:
14 GETL D0Re0,D1Re0,[D0Ar2++]
15 GETL D0Ar6,D1Ar5,[D0Ar2++]
16 SETL [D1Ar1++],D0Re0,D1Re0
17 SETL [D1Ar1++],D0Ar6,D1Ar5
18 SUBS D0FrT,D0FrT,#16
19 BNZ $Lcopy_page_loop
20 MOV PC,D1RtP
21 .size _copy_page,.-_copy_page
diff --git a/arch/metag/lib/delay.c b/arch/metag/lib/delay.c
deleted file mode 100644
index 6754012a261f..000000000000
--- a/arch/metag/lib/delay.c
+++ /dev/null
@@ -1,57 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Precise Delay Loops for Meta
4 *
5 * Copyright (C) 1993 Linus Torvalds
6 * Copyright (C) 1997 Martin Mares <mj@atrey.karlin.mff.cuni.cz>
7 * Copyright (C) 2007,2009 Imagination Technologies Ltd.
8 *
9 */
10
11#include <linux/export.h>
12#include <linux/sched.h>
13#include <linux/delay.h>
14
15#include <asm/core_reg.h>
16#include <asm/processor.h>
17
18/*
19 * TXTACTCYC is only 24 bits, so on chips with fast clocks it will wrap
20 * many times per-second. If it does wrap __delay will return prematurely,
21 * but this is only likely with large delay values.
22 *
23 * We also can't implement read_current_timer() with TXTACTCYC due to
24 * this wrapping behaviour.
25 */
26#define rdtimer(t) t = __core_reg_get(TXTACTCYC)
27
28void __delay(unsigned long loops)
29{
30 unsigned long bclock, now;
31
32 rdtimer(bclock);
33 do {
34 asm("NOP");
35 rdtimer(now);
36 } while ((now-bclock) < loops);
37}
38EXPORT_SYMBOL(__delay);
39
40inline void __const_udelay(unsigned long xloops)
41{
42 u64 loops = (u64)xloops * (u64)loops_per_jiffy * HZ;
43 __delay(loops >> 32);
44}
45EXPORT_SYMBOL(__const_udelay);
46
47void __udelay(unsigned long usecs)
48{
49 __const_udelay(usecs * 0x000010c7); /* 2**32 / 1000000 (rounded up) */
50}
51EXPORT_SYMBOL(__udelay);
52
53void __ndelay(unsigned long nsecs)
54{
55 __const_udelay(nsecs * 0x00005); /* 2**32 / 1000000000 (rounded up) */
56}
57EXPORT_SYMBOL(__ndelay);
diff --git a/arch/metag/lib/div64.S b/arch/metag/lib/div64.S
deleted file mode 100644
index 55eece273a6b..000000000000
--- a/arch/metag/lib/div64.S
+++ /dev/null
@@ -1,109 +0,0 @@
1! SPDX-License-Identifier: GPL-2.0
2! Copyright (C) 2012 Imagination Technologies Ltd.
3!
4! Signed/unsigned 64-bit division routines.
5!
6
7 .text
8 .global _div_u64
9 .type _div_u64,function
10
11_div_u64:
12$L1:
13 ORS A0.3,D1Ar3,D0Ar4
14 BNE $L3
15$L2:
16 MOV D0Re0,D0Ar2
17 MOV D1Re0,D1Ar1
18 MOV PC,D1RtP
19$L3:
20 CMP D1Ar3,D1Ar1
21 CMPEQ D0Ar4,D0Ar2
22 MOV D0Re0,#1
23 MOV D1Re0,#0
24 BHS $L6
25$L4:
26 ADDS D0Ar6,D0Ar4,D0Ar4
27 ADD D1Ar5,D1Ar3,D1Ar3
28 ADDCS D1Ar5,D1Ar5,#1
29 CMP D1Ar5,D1Ar3
30 CMPEQ D0Ar6,D0Ar4
31 BLO $L6
32$L5:
33 MOV D0Ar4,D0Ar6
34 MOV D1Ar3,D1Ar5
35 ADDS D0Re0,D0Re0,D0Re0
36 ADD D1Re0,D1Re0,D1Re0
37 ADDCS D1Re0,D1Re0,#1
38 CMP D1Ar3,D1Ar1
39 CMPEQ D0Ar4,D0Ar2
40 BLO $L4
41$L6:
42 ORS A0.3,D1Re0,D0Re0
43 MOV D0Ar6,#0
44 MOV D1Ar5,D0Ar6
45 BEQ $L10
46$L7:
47 CMP D1Ar1,D1Ar3
48 CMPEQ D0Ar2,D0Ar4
49 BLO $L9
50$L8:
51 ADDS D0Ar6,D0Ar6,D0Re0
52 ADD D1Ar5,D1Ar5,D1Re0
53 ADDCS D1Ar5,D1Ar5,#1
54
55 SUBS D0Ar2,D0Ar2,D0Ar4
56 SUB D1Ar1,D1Ar1,D1Ar3
57 SUBCS D1Ar1,D1Ar1,#1
58$L9:
59 LSL A0.3,D1Re0,#31
60 LSR D0Re0,D0Re0,#1
61 LSR D1Re0,D1Re0,#1
62 OR D0Re0,D0Re0,A0.3
63 LSL A0.3,D1Ar3,#31
64 LSR D0Ar4,D0Ar4,#1
65 LSR D1Ar3,D1Ar3,#1
66 OR D0Ar4,D0Ar4,A0.3
67 ORS A0.3,D1Re0,D0Re0
68 BNE $L7
69$L10:
70 MOV D0Re0,D0Ar6
71 MOV D1Re0,D1Ar5
72 MOV PC,D1RtP
73 .size _div_u64,.-_div_u64
74
75 .text
76 .global _div_s64
77 .type _div_s64,function
78_div_s64:
79 MSETL [A0StP],D0FrT,D0.5
80 XOR D0.5,D0Ar2,D0Ar4
81 XOR D1.5,D1Ar1,D1Ar3
82 TSTT D1Ar1,#HI(0x80000000)
83 BZ $L25
84
85 NEGS D0Ar2,D0Ar2
86 NEG D1Ar1,D1Ar1
87 SUBCS D1Ar1,D1Ar1,#1
88$L25:
89 TSTT D1Ar3,#HI(0x80000000)
90 BZ $L27
91
92 NEGS D0Ar4,D0Ar4
93 NEG D1Ar3,D1Ar3
94 SUBCS D1Ar3,D1Ar3,#1
95$L27:
96 CALLR D1RtP,_div_u64
97 TSTT D1.5,#HI(0x80000000)
98 BZ $L29
99
100 NEGS D0Re0,D0Re0
101 NEG D1Re0,D1Re0
102 SUBCS D1Re0,D1Re0,#1
103$L29:
104
105 GETL D0FrT,D1RtP,[A0StP+#(-16)]
106 GETL D0.5,D1.5,[A0StP+#(-8)]
107 SUB A0StP,A0StP,#16
108 MOV PC,D1RtP
109 .size _div_s64,.-_div_s64
diff --git a/arch/metag/lib/divsi3.S b/arch/metag/lib/divsi3.S
deleted file mode 100644
index 9e31abefb160..000000000000
--- a/arch/metag/lib/divsi3.S
+++ /dev/null
@@ -1,101 +0,0 @@
1! SPDX-License-Identifier: GPL-2.0
2! Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007
3! Imagination Technologies Ltd
4!
5! Integer divide routines.
6!
7
8 .text
9 .global ___udivsi3
10 .type ___udivsi3,function
11 .align 2
12___udivsi3:
13!!
14!! Since core is signed divide case, just set control variable
15!!
16 MOV D1Re0,D0Ar2 ! Au already in A1Ar1, Bu -> D1Re0
17 MOV D0Re0,#0 ! Result is 0
18 MOV D0Ar4,#0 ! Return positive result
19 B $LIDMCUStart
20 .size ___udivsi3,.-___udivsi3
21
22!!
23!! 32-bit division signed i/p - passed signed 32-bit numbers
24!!
25 .global ___divsi3
26 .type ___divsi3,function
27 .align 2
28___divsi3:
29!!
30!! A already in D1Ar1, B already in D0Ar2 -> make B abs(B)
31!!
32 MOV D1Re0,D0Ar2 ! A already in A1Ar1, B -> D1Re0
33 MOV D0Re0,#0 ! Result is 0
34 XOR D0Ar4,D1Ar1,D1Re0 ! D0Ar4 -ive if result is -ive
35 ABS D1Ar1,D1Ar1 ! abs(A) -> Au
36 ABS D1Re0,D1Re0 ! abs(B) -> Bu
37$LIDMCUStart:
38 CMP D1Ar1,D1Re0 ! Is ( Au > Bu )?
39 LSR D1Ar3,D1Ar1,#2 ! Calculate (Au & (~3)) >> 2
40 CMPHI D1Re0,D1Ar3 ! OR ( (Au & (~3)) <= (Bu << 2) )?
41 LSLSHI D1Ar3,D1Re0,#1 ! Buq = Bu << 1
42 BLS $LIDMCUSetup ! Yes: Do normal divide
43!!
44!! Quick divide setup can assume that CurBit only needs to start at 2
45!!
46$LIDMCQuick:
47 CMP D1Ar1,D1Ar3 ! ( A >= Buq )?
48 ADDCC D0Re0,D0Re0,#2 ! If yes result += 2
49 SUBCC D1Ar1,D1Ar1,D1Ar3 ! and A -= Buq
50 CMP D1Ar1,D1Re0 ! ( A >= Bu )?
51 ADDCC D0Re0,D0Re0,#1 ! If yes result += 1
52 SUBCC D1Ar1,D1Ar1,D1Re0 ! and A -= Bu
53 ORS D0Ar4,D0Ar4,D0Ar4 ! Return neg result?
54 NEG D0Ar2,D0Re0 ! Calculate neg result
55 MOVMI D0Re0,D0Ar2 ! Yes: Take neg result
56$LIDMCRet:
57 MOV PC,D1RtP
58!!
59!! Setup for general unsigned divide code
60!!
61!! D0Re0 is used to form the result, already set to Zero
62!! D1Re0 is the input Bu value, this gets trashed
63!! D0Ar6 is curbit which is set to 1 at the start and shifted up
64!! D0Ar4 is negative if we should return a negative result
65!! D1Ar1 is the input Au value, eventually this holds the remainder
66!!
67$LIDMCUSetup:
68 CMP D1Ar1,D1Re0 ! Is ( Au < Bu )?
69 MOV D0Ar6,#1 ! Set curbit to 1
70 BCS $LIDMCRet ! Yes: Return 0 remainder Au
71!!
72!! Calculate alignment using FFB instruction
73!!
74 FFB D1Ar5,D1Ar1 ! Find first bit of Au
75 ANDN D1Ar5,D1Ar5,#31 ! Handle exceptional case.
76 ORN D1Ar5,D1Ar5,#31 ! if N bit set, set to 31
77 FFB D1Ar3,D1Re0 ! Find first bit of Bu
78 ANDN D1Ar3,D1Ar3,#31 ! Handle exceptional case.
79 ORN D1Ar3,D1Ar3,#31 ! if N bit set, set to 31
80 SUBS D1Ar3,D1Ar5,D1Ar3 ! calculate diff, ffbA - ffbB
81 MOV D0Ar2,D1Ar3 ! copy into bank 0
82 LSLGT D1Re0,D1Re0,D1Ar3 ! ( > 0) ? left shift B
83 LSLGT D0Ar6,D0Ar6,D0Ar2 ! ( > 0) ? left shift curbit
84!!
85!! Now we start the divide proper, logic is
86!!
87!! if ( A >= B ) add curbit to result and subtract B from A
88!! shift curbit and B down by 1 in either case
89!!
90$LIDMCLoop:
91 CMP D1Ar1, D1Re0 ! ( A >= B )?
92 ADDCC D0Re0, D0Re0, D0Ar6 ! If yes result += curbit
93 SUBCC D1Ar1, D1Ar1, D1Re0 ! and A -= B
94 LSRS D0Ar6, D0Ar6, #1 ! Shift down curbit, is it zero?
95 LSR D1Re0, D1Re0, #1 ! Shift down B
96 BNZ $LIDMCLoop ! Was single bit in curbit lost?
97 ORS D0Ar4,D0Ar4,D0Ar4 ! Return neg result?
98 NEG D0Ar2,D0Re0 ! Calculate neg result
99 MOVMI D0Re0,D0Ar2 ! Yes: Take neg result
100 MOV PC,D1RtP
101 .size ___divsi3,.-___divsi3
diff --git a/arch/metag/lib/ip_fast_csum.S b/arch/metag/lib/ip_fast_csum.S
deleted file mode 100644
index 441f489d6a81..000000000000
--- a/arch/metag/lib/ip_fast_csum.S
+++ /dev/null
@@ -1,33 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2
3 .text
4/*
5 * This is a version of ip_compute_csum() optimized for IP headers,
6 * which always checksum on 4 octet boundaries.
7 *
8 * extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl);
9 *
10 */
11 .global _ip_fast_csum
12 .type _ip_fast_csum,function
13_ip_fast_csum:
14 !! TXRPT needs loops - 1
15 SUBS TXRPT,D0Ar2,#1
16 MOV D0Re0,#0
17 BLO $Lfast_csum_exit
18$Lfast_csum_loop:
19 GETD D1Ar3,[D1Ar1++]
20 ADDS D0Re0,D0Re0,D1Ar3
21 ADDCS D0Re0,D0Re0,#1
22 BR $Lfast_csum_loop
23 LSR D0Ar4,D0Re0,#16
24 AND D0Re0,D0Re0,#0xffff
25 AND D0Ar4,D0Ar4,#0xffff
26 ADD D0Re0,D0Re0,D0Ar4
27 LSR D0Ar4,D0Re0,#16
28 ADD D0Re0,D0Re0,D0Ar4
29 XOR D0Re0,D0Re0,#-1
30 AND D0Re0,D0Re0,#0xffff
31$Lfast_csum_exit:
32 MOV PC,D1RtP
33 .size _ip_fast_csum,.-_ip_fast_csum
diff --git a/arch/metag/lib/lshrdi3.S b/arch/metag/lib/lshrdi3.S
deleted file mode 100644
index cf7ffc9b377f..000000000000
--- a/arch/metag/lib/lshrdi3.S
+++ /dev/null
@@ -1,34 +0,0 @@
1! SPDX-License-Identifier: GPL-2.0
2! Copyright (C) 2012 by Imagination Technologies Ltd.
3!
4! 64-bit logical shift right routine.
5!
6
7 .text
8 .global ___lshrdi3
9 .type ___lshrdi3,function
10
11___lshrdi3:
12 MOV D0Re0,D0Ar2
13 MOV D1Re0,D1Ar1
14 CMP D1Ar3,#0 ! COUNT == 0
15 MOVEQ PC,D1RtP ! Yes, return
16
17 MOV D0Ar4,D1Ar3
18 SUBS D1Ar3,D1Ar3,#32 ! N = COUNT - 32
19 BGE $L30
20
21!! Shift < 32
22 NEG D1Ar3,D1Ar3 ! N = - N
23 LSR D0Re0,D0Re0,D0Ar4 ! LO = LO >> COUNT
24 LSL D0Ar6,D1Re0,D1Ar3 ! TMP= HI << -(COUNT - 32)
25 OR D0Re0,D0Re0,D0Ar6 ! LO = LO | TMP
26 SWAP D1Ar3,D0Ar4
27 LSR D1Re0,D1Re0,D1Ar3 ! HI = HI >> COUNT
28 MOV PC,D1RtP
29$L30:
30!! Shift >= 32
31 LSR D0Re0,D1Re0,D1Ar3 ! LO = HI >> N
32 MOV D1Re0,#0 ! HI = 0
33 MOV PC,D1RtP
34 .size ___lshrdi3,.-___lshrdi3
diff --git a/arch/metag/lib/memcpy.S b/arch/metag/lib/memcpy.S
deleted file mode 100644
index c2e8395f9456..000000000000
--- a/arch/metag/lib/memcpy.S
+++ /dev/null
@@ -1,186 +0,0 @@
1! SPDX-License-Identifier: GPL-2.0
2! Copyright (C) 2008-2012 Imagination Technologies Ltd.
3
4 .text
5 .global _memcpy
6 .type _memcpy,function
7! D1Ar1 dst
8! D0Ar2 src
9! D1Ar3 cnt
10! D0Re0 dst
11_memcpy:
12 CMP D1Ar3, #16
13 MOV A1.2, D0Ar2 ! source pointer
14 MOV A0.2, D1Ar1 ! destination pointer
15 MOV A0.3, D1Ar1 ! for return value
16! If there are less than 16 bytes to copy use the byte copy loop
17 BGE $Llong_copy
18
19$Lbyte_copy:
20! Simply copy a byte at a time
21 SUBS TXRPT, D1Ar3, #1
22 BLT $Lend
23$Lloop_byte:
24 GETB D1Re0, [A1.2++]
25 SETB [A0.2++], D1Re0
26 BR $Lloop_byte
27
28$Lend:
29! Finally set return value and return
30 MOV D0Re0, A0.3
31 MOV PC, D1RtP
32
33$Llong_copy:
34 ANDS D1Ar5, D1Ar1, #7 ! test destination alignment
35 BZ $Laligned_dst
36
37! The destination address is not 8 byte aligned. We will copy bytes from
38! the source to the destination until the remaining data has an 8 byte
39! destination address alignment (i.e we should never copy more than 7
40! bytes here).
41$Lalign_dst:
42 GETB D0Re0, [A1.2++]
43 ADD D1Ar5, D1Ar5, #1 ! dest is aligned when D1Ar5 reaches #8
44 SUB D1Ar3, D1Ar3, #1 ! decrement count of remaining bytes
45 SETB [A0.2++], D0Re0
46 CMP D1Ar5, #8
47 BNE $Lalign_dst
48
49! We have at least (16 - 7) = 9 bytes to copy - calculate the number of 8 byte
50! blocks, then jump to the unaligned copy loop or fall through to the aligned
51! copy loop as appropriate.
52$Laligned_dst:
53 MOV D0Ar4, A1.2
54 LSR D1Ar5, D1Ar3, #3 ! D1Ar5 = number of 8 byte blocks
55 ANDS D0Ar4, D0Ar4, #7 ! test source alignment
56 BNZ $Lunaligned_copy ! if unaligned, use unaligned copy loop
57
58! Both source and destination are 8 byte aligned - the easy case.
59$Laligned_copy:
60 LSRS D1Ar5, D1Ar3, #5 ! D1Ar5 = number of 32 byte blocks
61 BZ $Lbyte_copy
62 SUB TXRPT, D1Ar5, #1
63
64$Laligned_32:
65 GETL D0Re0, D1Re0, [A1.2++]
66 GETL D0Ar6, D1Ar5, [A1.2++]
67 SETL [A0.2++], D0Re0, D1Re0
68 SETL [A0.2++], D0Ar6, D1Ar5
69 GETL D0Re0, D1Re0, [A1.2++]
70 GETL D0Ar6, D1Ar5, [A1.2++]
71 SETL [A0.2++], D0Re0, D1Re0
72 SETL [A0.2++], D0Ar6, D1Ar5
73 BR $Laligned_32
74
75! If there are any remaining bytes use the byte copy loop, otherwise we are done
76 ANDS D1Ar3, D1Ar3, #0x1f
77 BNZ $Lbyte_copy
78 B $Lend
79
80! The destination is 8 byte aligned but the source is not, and there are 8
81! or more bytes to be copied.
82$Lunaligned_copy:
83! Adjust the source pointer (A1.2) to the 8 byte boundary before its
84! current value
85 MOV D0Ar4, A1.2
86 MOV D0Ar6, A1.2
87 ANDMB D0Ar4, D0Ar4, #0xfff8
88 MOV A1.2, D0Ar4
89! Save the number of bytes of mis-alignment in D0Ar4 for use later
90 SUBS D0Ar6, D0Ar6, D0Ar4
91 MOV D0Ar4, D0Ar6
92! if there is no mis-alignment after all, use the aligned copy loop
93 BZ $Laligned_copy
94
95! prefetch 8 bytes
96 GETL D0Re0, D1Re0, [A1.2]
97
98 SUB TXRPT, D1Ar5, #1
99
100! There are 3 mis-alignment cases to be considered. Less than 4 bytes, exactly
101! 4 bytes, and more than 4 bytes.
102 CMP D0Ar6, #4
103 BLT $Lunaligned_1_2_3 ! use 1-3 byte mis-alignment loop
104 BZ $Lunaligned_4 ! use 4 byte mis-alignment loop
105
106! The mis-alignment is more than 4 bytes
107$Lunaligned_5_6_7:
108 SUB D0Ar6, D0Ar6, #4
109! Calculate the bit offsets required for the shift operations necesssary
110! to align the data.
111! D0Ar6 = bit offset, D1Ar5 = (32 - bit offset)
112 MULW D0Ar6, D0Ar6, #8
113 MOV D1Ar5, #32
114 SUB D1Ar5, D1Ar5, D0Ar6
115! Move data 4 bytes before we enter the main loop
116 MOV D0Re0, D1Re0
117
118$Lloop_5_6_7:
119 GETL D0Ar2, D1Ar1, [++A1.2]
120! form 64-bit data in D0Re0, D1Re0
121 LSR D0Re0, D0Re0, D0Ar6
122 MOV D1Re0, D0Ar2
123 LSL D1Re0, D1Re0, D1Ar5
124 ADD D0Re0, D0Re0, D1Re0
125
126 LSR D0Ar2, D0Ar2, D0Ar6
127 LSL D1Re0, D1Ar1, D1Ar5
128 ADD D1Re0, D1Re0, D0Ar2
129
130 SETL [A0.2++], D0Re0, D1Re0
131 MOV D0Re0, D1Ar1
132 BR $Lloop_5_6_7
133
134 B $Lunaligned_end
135
136$Lunaligned_1_2_3:
137! Calculate the bit offsets required for the shift operations necesssary
138! to align the data.
139! D0Ar6 = bit offset, D1Ar5 = (32 - bit offset)
140 MULW D0Ar6, D0Ar6, #8
141 MOV D1Ar5, #32
142 SUB D1Ar5, D1Ar5, D0Ar6
143
144$Lloop_1_2_3:
145! form 64-bit data in D0Re0,D1Re0
146 LSR D0Re0, D0Re0, D0Ar6
147 LSL D1Ar1, D1Re0, D1Ar5
148 ADD D0Re0, D0Re0, D1Ar1
149 MOV D0Ar2, D1Re0
150 LSR D0FrT, D0Ar2, D0Ar6
151 GETL D0Ar2, D1Ar1, [++A1.2]
152
153 MOV D1Re0, D0Ar2
154 LSL D1Re0, D1Re0, D1Ar5
155 ADD D1Re0, D1Re0, D0FrT
156
157 SETL [A0.2++], D0Re0, D1Re0
158 MOV D0Re0, D0Ar2
159 MOV D1Re0, D1Ar1
160 BR $Lloop_1_2_3
161
162 B $Lunaligned_end
163
164! The 4 byte mis-alignment case - this does not require any shifting, just a
165! shuffling of registers.
166$Lunaligned_4:
167 MOV D0Re0, D1Re0
168$Lloop_4:
169 GETL D0Ar2, D1Ar1, [++A1.2]
170 MOV D1Re0, D0Ar2
171 SETL [A0.2++], D0Re0, D1Re0
172 MOV D0Re0, D1Ar1
173 BR $Lloop_4
174
175$Lunaligned_end:
176! If there are no remaining bytes to copy, we are done.
177 ANDS D1Ar3, D1Ar3, #7
178 BZ $Lend
179! Re-adjust the source pointer (A1.2) back to the actual (unaligned) byte
180! address of the remaining bytes, and fall through to the byte copy loop.
181 MOV D0Ar6, A1.2
182 ADD D1Ar5, D0Ar4, D0Ar6
183 MOV A1.2, D1Ar5
184 B $Lbyte_copy
185
186 .size _memcpy,.-_memcpy
diff --git a/arch/metag/lib/memmove.S b/arch/metag/lib/memmove.S
deleted file mode 100644
index 934abda0e680..000000000000
--- a/arch/metag/lib/memmove.S
+++ /dev/null
@@ -1,346 +0,0 @@
1! SPDX-License-Identifier: GPL-2.0
2! Copyright (C) 2008-2012 Imagination Technologies Ltd.
3
4 .text
5 .global _memmove
6 .type _memmove,function
7! D1Ar1 dst
8! D0Ar2 src
9! D1Ar3 cnt
10! D0Re0 dst
11_memmove:
12 CMP D1Ar3, #0
13 MOV D0Re0, D1Ar1
14 BZ $LEND2
15 MSETL [A0StP], D0.5, D0.6, D0.7
16 MOV D1Ar5, D0Ar2
17 CMP D1Ar1, D1Ar5
18 BLT $Lforwards_copy
19 SUB D0Ar4, D1Ar1, D1Ar3
20 ADD D0Ar4, D0Ar4, #1
21 CMP D0Ar2, D0Ar4
22 BLT $Lforwards_copy
23 ! should copy backwards
24 MOV D1Re0, D0Ar2
25 ! adjust pointer to the end of mem
26 ADD D0Ar2, D1Re0, D1Ar3
27 ADD D1Ar1, D1Ar1, D1Ar3
28
29 MOV A1.2, D0Ar2
30 MOV A0.2, D1Ar1
31 CMP D1Ar3, #8
32 BLT $Lbbyte_loop
33
34 MOV D0Ar4, D0Ar2
35 MOV D1Ar5, D1Ar1
36
37 ! test 8 byte alignment
38 ANDS D1Ar5, D1Ar5, #7
39 BNE $Lbdest_unaligned
40
41 ANDS D0Ar4, D0Ar4, #7
42 BNE $Lbsrc_unaligned
43
44 LSR D1Ar5, D1Ar3, #3
45
46$Lbaligned_loop:
47 GETL D0Re0, D1Re0, [--A1.2]
48 SETL [--A0.2], D0Re0, D1Re0
49 SUBS D1Ar5, D1Ar5, #1
50 BNE $Lbaligned_loop
51
52 ANDS D1Ar3, D1Ar3, #7
53 BZ $Lbbyte_loop_exit
54$Lbbyte_loop:
55 GETB D1Re0, [--A1.2]
56 SETB [--A0.2], D1Re0
57 SUBS D1Ar3, D1Ar3, #1
58 BNE $Lbbyte_loop
59$Lbbyte_loop_exit:
60 MOV D0Re0, A0.2
61$LEND:
62 SUB A0.2, A0StP, #24
63 MGETL D0.5, D0.6, D0.7, [A0.2]
64 SUB A0StP, A0StP, #24
65$LEND2:
66 MOV PC, D1RtP
67
68$Lbdest_unaligned:
69 GETB D0Re0, [--A1.2]
70 SETB [--A0.2], D0Re0
71 SUBS D1Ar5, D1Ar5, #1
72 SUB D1Ar3, D1Ar3, #1
73 BNE $Lbdest_unaligned
74 CMP D1Ar3, #8
75 BLT $Lbbyte_loop
76$Lbsrc_unaligned:
77 LSR D1Ar5, D1Ar3, #3
78 ! adjust A1.2
79 MOV D0Ar4, A1.2
80 ! save original address
81 MOV D0Ar6, A1.2
82
83 ADD D0Ar4, D0Ar4, #7
84 ANDMB D0Ar4, D0Ar4, #0xfff8
85 ! new address is the 8-byte aligned one above the original
86 MOV A1.2, D0Ar4
87
88 ! A0.2 dst 64-bit is aligned
89 ! measure the gap size
90 SUB D0Ar6, D0Ar4, D0Ar6
91 MOVS D0Ar4, D0Ar6
92 ! keep this information for the later adjustment
93 ! both aligned
94 BZ $Lbaligned_loop
95
96 ! prefetch
97 GETL D0Re0, D1Re0, [--A1.2]
98
99 CMP D0Ar6, #4
100 BLT $Lbunaligned_1_2_3
101 ! 32-bit aligned
102 BZ $Lbaligned_4
103
104 SUB D0Ar6, D0Ar6, #4
105 ! D1.6 stores the gap size in bits
106 MULW D1.6, D0Ar6, #8
107 MOV D0.6, #32
108 ! D0.6 stores the complement of the gap size
109 SUB D0.6, D0.6, D1.6
110
111$Lbunaligned_5_6_7:
112 GETL D0.7, D1.7, [--A1.2]
113 ! form 64-bit data in D0Re0, D1Re0
114 MOV D1Re0, D0Re0
115 ! D1Re0 << gap-size
116 LSL D1Re0, D1Re0, D1.6
117 MOV D0Re0, D1.7
118 ! D0Re0 >> complement
119 LSR D0Re0, D0Re0, D0.6
120 MOV D1.5, D0Re0
121 ! combine the both
122 ADD D1Re0, D1Re0, D1.5
123
124 MOV D1.5, D1.7
125 LSL D1.5, D1.5, D1.6
126 MOV D0Re0, D0.7
127 LSR D0Re0, D0Re0, D0.6
128 MOV D0.5, D1.5
129 ADD D0Re0, D0Re0, D0.5
130
131 SETL [--A0.2], D0Re0, D1Re0
132 MOV D0Re0, D0.7
133 MOV D1Re0, D1.7
134 SUBS D1Ar5, D1Ar5, #1
135 BNE $Lbunaligned_5_6_7
136
137 ANDS D1Ar3, D1Ar3, #7
138 BZ $Lbbyte_loop_exit
139 ! Adjust A1.2
140 ! A1.2 <- A1.2 +8 - gapsize
141 ADD A1.2, A1.2, #8
142 SUB A1.2, A1.2, D0Ar4
143 B $Lbbyte_loop
144
145$Lbunaligned_1_2_3:
146 MULW D1.6, D0Ar6, #8
147 MOV D0.6, #32
148 SUB D0.6, D0.6, D1.6
149
150$Lbunaligned_1_2_3_loop:
151 GETL D0.7, D1.7, [--A1.2]
152 ! form 64-bit data in D0Re0, D1Re0
153 LSL D1Re0, D1Re0, D1.6
154 ! save D0Re0 for later use
155 MOV D0.5, D0Re0
156 LSR D0Re0, D0Re0, D0.6
157 MOV D1.5, D0Re0
158 ADD D1Re0, D1Re0, D1.5
159
160 ! orignal data in D0Re0
161 MOV D1.5, D0.5
162 LSL D1.5, D1.5, D1.6
163 MOV D0Re0, D1.7
164 LSR D0Re0, D0Re0, D0.6
165 MOV D0.5, D1.5
166 ADD D0Re0, D0Re0, D0.5
167
168 SETL [--A0.2], D0Re0, D1Re0
169 MOV D0Re0, D0.7
170 MOV D1Re0, D1.7
171 SUBS D1Ar5, D1Ar5, #1
172 BNE $Lbunaligned_1_2_3_loop
173
174 ANDS D1Ar3, D1Ar3, #7
175 BZ $Lbbyte_loop_exit
176 ! Adjust A1.2
177 ADD A1.2, A1.2, #8
178 SUB A1.2, A1.2, D0Ar4
179 B $Lbbyte_loop
180
181$Lbaligned_4:
182 GETL D0.7, D1.7, [--A1.2]
183 MOV D1Re0, D0Re0
184 MOV D0Re0, D1.7
185 SETL [--A0.2], D0Re0, D1Re0
186 MOV D0Re0, D0.7
187 MOV D1Re0, D1.7
188 SUBS D1Ar5, D1Ar5, #1
189 BNE $Lbaligned_4
190 ANDS D1Ar3, D1Ar3, #7
191 BZ $Lbbyte_loop_exit
192 ! Adjust A1.2
193 ADD A1.2, A1.2, #8
194 SUB A1.2, A1.2, D0Ar4
195 B $Lbbyte_loop
196
197$Lforwards_copy:
198 MOV A1.2, D0Ar2
199 MOV A0.2, D1Ar1
200 CMP D1Ar3, #8
201 BLT $Lfbyte_loop
202
203 MOV D0Ar4, D0Ar2
204 MOV D1Ar5, D1Ar1
205
206 ANDS D1Ar5, D1Ar5, #7
207 BNE $Lfdest_unaligned
208
209 ANDS D0Ar4, D0Ar4, #7
210 BNE $Lfsrc_unaligned
211
212 LSR D1Ar5, D1Ar3, #3
213
214$Lfaligned_loop:
215 GETL D0Re0, D1Re0, [A1.2++]
216 SUBS D1Ar5, D1Ar5, #1
217 SETL [A0.2++], D0Re0, D1Re0
218 BNE $Lfaligned_loop
219
220 ANDS D1Ar3, D1Ar3, #7
221 BZ $Lfbyte_loop_exit
222$Lfbyte_loop:
223 GETB D1Re0, [A1.2++]
224 SETB [A0.2++], D1Re0
225 SUBS D1Ar3, D1Ar3, #1
226 BNE $Lfbyte_loop
227$Lfbyte_loop_exit:
228 MOV D0Re0, D1Ar1
229 B $LEND
230
231$Lfdest_unaligned:
232 GETB D0Re0, [A1.2++]
233 ADD D1Ar5, D1Ar5, #1
234 SUB D1Ar3, D1Ar3, #1
235 SETB [A0.2++], D0Re0
236 CMP D1Ar5, #8
237 BNE $Lfdest_unaligned
238 CMP D1Ar3, #8
239 BLT $Lfbyte_loop
240$Lfsrc_unaligned:
241 ! adjust A1.2
242 LSR D1Ar5, D1Ar3, #3
243
244 MOV D0Ar4, A1.2
245 MOV D0Ar6, A1.2
246 ANDMB D0Ar4, D0Ar4, #0xfff8
247 MOV A1.2, D0Ar4
248
249 ! A0.2 dst 64-bit is aligned
250 SUB D0Ar6, D0Ar6, D0Ar4
251 ! keep the information for the later adjustment
252 MOVS D0Ar4, D0Ar6
253
254 ! both aligned
255 BZ $Lfaligned_loop
256
257 ! prefetch
258 GETL D0Re0, D1Re0, [A1.2]
259
260 CMP D0Ar6, #4
261 BLT $Lfunaligned_1_2_3
262 BZ $Lfaligned_4
263
264 SUB D0Ar6, D0Ar6, #4
265 MULW D0.6, D0Ar6, #8
266 MOV D1.6, #32
267 SUB D1.6, D1.6, D0.6
268
269$Lfunaligned_5_6_7:
270 GETL D0.7, D1.7, [++A1.2]
271 ! form 64-bit data in D0Re0, D1Re0
272 MOV D0Re0, D1Re0
273 LSR D0Re0, D0Re0, D0.6
274 MOV D1Re0, D0.7
275 LSL D1Re0, D1Re0, D1.6
276 MOV D0.5, D1Re0
277 ADD D0Re0, D0Re0, D0.5
278
279 MOV D0.5, D0.7
280 LSR D0.5, D0.5, D0.6
281 MOV D1Re0, D1.7
282 LSL D1Re0, D1Re0, D1.6
283 MOV D1.5, D0.5
284 ADD D1Re0, D1Re0, D1.5
285
286 SETL [A0.2++], D0Re0, D1Re0
287 MOV D0Re0, D0.7
288 MOV D1Re0, D1.7
289 SUBS D1Ar5, D1Ar5, #1
290 BNE $Lfunaligned_5_6_7
291
292 ANDS D1Ar3, D1Ar3, #7
293 BZ $Lfbyte_loop_exit
294 ! Adjust A1.2
295 ADD A1.2, A1.2, D0Ar4
296 B $Lfbyte_loop
297
298$Lfunaligned_1_2_3:
299 MULW D0.6, D0Ar6, #8
300 MOV D1.6, #32
301 SUB D1.6, D1.6, D0.6
302
303$Lfunaligned_1_2_3_loop:
304 GETL D0.7, D1.7, [++A1.2]
305 ! form 64-bit data in D0Re0, D1Re0
306 LSR D0Re0, D0Re0, D0.6
307 MOV D1.5, D1Re0
308 LSL D1Re0, D1Re0, D1.6
309 MOV D0.5, D1Re0
310 ADD D0Re0, D0Re0, D0.5
311
312 MOV D0.5, D1.5
313 LSR D0.5, D0.5, D0.6
314 MOV D1Re0, D0.7
315 LSL D1Re0, D1Re0, D1.6
316 MOV D1.5, D0.5
317 ADD D1Re0, D1Re0, D1.5
318
319 SETL [A0.2++], D0Re0, D1Re0
320 MOV D0Re0, D0.7
321 MOV D1Re0, D1.7
322 SUBS D1Ar5, D1Ar5, #1
323 BNE $Lfunaligned_1_2_3_loop
324
325 ANDS D1Ar3, D1Ar3, #7
326 BZ $Lfbyte_loop_exit
327 ! Adjust A1.2
328 ADD A1.2, A1.2, D0Ar4
329 B $Lfbyte_loop
330
331$Lfaligned_4:
332 GETL D0.7, D1.7, [++A1.2]
333 MOV D0Re0, D1Re0
334 MOV D1Re0, D0.7
335 SETL [A0.2++], D0Re0, D1Re0
336 MOV D0Re0, D0.7
337 MOV D1Re0, D1.7
338 SUBS D1Ar5, D1Ar5, #1
339 BNE $Lfaligned_4
340 ANDS D1Ar3, D1Ar3, #7
341 BZ $Lfbyte_loop_exit
342 ! Adjust A1.2
343 ADD A1.2, A1.2, D0Ar4
344 B $Lfbyte_loop
345
346 .size _memmove,.-_memmove
diff --git a/arch/metag/lib/memset.S b/arch/metag/lib/memset.S
deleted file mode 100644
index 6ee246d831c7..000000000000
--- a/arch/metag/lib/memset.S
+++ /dev/null
@@ -1,87 +0,0 @@
1! SPDX-License-Identifier: GPL-2.0
2! Copyright (C) 2008-2012 Imagination Technologies Ltd.
3
4 .text
5 .global _memset
6 .type _memset,function
7! D1Ar1 dst
8! D0Ar2 c
9! D1Ar3 cnt
10! D0Re0 dst
11_memset:
12 AND D0Ar2,D0Ar2,#0xFF ! Ensure a byte input value
13 MULW D0Ar2,D0Ar2,#0x0101 ! Duplicate byte value into 0-15
14 ANDS D0Ar4,D1Ar1,#7 ! Extract bottom LSBs of dst
15 LSL D0Re0,D0Ar2,#16 ! Duplicate byte value into 16-31
16 ADD A0.2,D0Ar2,D0Re0 ! Duplicate byte value into 4 (A0.2)
17 MOV D0Re0,D1Ar1 ! Return dst
18 BZ $LLongStub ! if start address is aligned
19 ! start address is not aligned on an 8 byte boundary, so we
20 ! need the number of bytes up to the next 8 byte address
21 ! boundary, or the length of the string if less than 8, in D1Ar5
22 MOV D0Ar2,#8 ! Need 8 - N in D1Ar5 ...
23 SUB D1Ar5,D0Ar2,D0Ar4 ! ... subtract N
24 CMP D1Ar3,D1Ar5
25 MOVMI D1Ar5,D1Ar3
26 B $LByteStub ! dst is mis-aligned, do $LByteStub
27
28!
29! Preamble to LongLoop which generates 4*8 bytes per interation (5 cycles)
30!
31$LLongStub:
32 LSRS D0Ar2,D1Ar3,#5
33 AND D1Ar3,D1Ar3,#0x1F
34 MOV A1.2,A0.2
35 BEQ $LLongishStub
36 SUB TXRPT,D0Ar2,#1
37 CMP D1Ar3,#0
38$LLongLoop:
39 SETL [D1Ar1++],A0.2,A1.2
40 SETL [D1Ar1++],A0.2,A1.2
41 SETL [D1Ar1++],A0.2,A1.2
42 SETL [D1Ar1++],A0.2,A1.2
43 BR $LLongLoop
44 BZ $Lexit
45!
46! Preamble to LongishLoop which generates 1*8 bytes per interation (2 cycles)
47!
48$LLongishStub:
49 LSRS D0Ar2,D1Ar3,#3
50 AND D1Ar3,D1Ar3,#0x7
51 MOV D1Ar5,D1Ar3
52 BEQ $LByteStub
53 SUB TXRPT,D0Ar2,#1
54 CMP D1Ar3,#0
55$LLongishLoop:
56 SETL [D1Ar1++],A0.2,A1.2
57 BR $LLongishLoop
58 BZ $Lexit
59!
60! This does a byte structured burst of up to 7 bytes
61!
62! D1Ar1 should point to the location required
63! D1Ar3 should be the remaining total byte count
64! D1Ar5 should be burst size (<= D1Ar3)
65!
66$LByteStub:
67 SUBS D1Ar3,D1Ar3,D1Ar5 ! Reduce count
68 ADD D1Ar1,D1Ar1,D1Ar5 ! Advance pointer to end of area
69 MULW D1Ar5,D1Ar5,#4 ! Scale to (1*4), (2*4), (3*4)
70 SUB D1Ar5,D1Ar5,#(8*4) ! Rebase to -(7*4), -(6*4), -(5*4), ...
71 MOV A1.2,D1Ar5
72 SUB PC,CPC1,A1.2 ! Jump into table below
73 SETB [D1Ar1+#(-7)],A0.2
74 SETB [D1Ar1+#(-6)],A0.2
75 SETB [D1Ar1+#(-5)],A0.2
76 SETB [D1Ar1+#(-4)],A0.2
77 SETB [D1Ar1+#(-3)],A0.2
78 SETB [D1Ar1+#(-2)],A0.2
79 SETB [D1Ar1+#(-1)],A0.2
80!
81! Return if all data has been output, otherwise do $LLongStub
82!
83 BNZ $LLongStub
84$Lexit:
85 MOV PC,D1RtP
86 .size _memset,.-_memset
87
diff --git a/arch/metag/lib/modsi3.S b/arch/metag/lib/modsi3.S
deleted file mode 100644
index d65a2e5b3154..000000000000
--- a/arch/metag/lib/modsi3.S
+++ /dev/null
@@ -1,39 +0,0 @@
1! SPDX-License-Identifier: GPL-2.0
2! Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007
3! Imagination Technologies Ltd
4!
5! Integer modulus routines.
6!
7!!
8!! 32-bit modulus unsigned i/p - passed unsigned 32-bit numbers
9!!
10 .text
11 .global ___umodsi3
12 .type ___umodsi3,function
13 .align 2
14___umodsi3:
15 MOV D0FrT,D1RtP ! Save original return address
16 CALLR D1RtP,___udivsi3
17 MOV D1RtP,D0FrT ! Recover return address
18 MOV D0Re0,D1Ar1 ! Return remainder
19 MOV PC,D1RtP
20 .size ___umodsi3,.-___umodsi3
21
22!!
23!! 32-bit modulus signed i/p - passed signed 32-bit numbers
24!!
25 .global ___modsi3
26 .type ___modsi3,function
27 .align 2
28___modsi3:
29 MOV D0FrT,D1RtP ! Save original return address
30 MOV A0.2,D1Ar1 ! Save A in A0.2
31 CALLR D1RtP,___divsi3
32 MOV D1RtP,D0FrT ! Recover return address
33 MOV D1Re0,A0.2 ! Recover A
34 MOV D0Re0,D1Ar1 ! Return remainder
35 ORS D1Re0,D1Re0,D1Re0 ! Was A negative?
36 NEG D1Ar1,D1Ar1 ! Negate remainder
37 MOVMI D0Re0,D1Ar1 ! Return neg remainder
38 MOV PC, D1RtP
39 .size ___modsi3,.-___modsi3
diff --git a/arch/metag/lib/muldi3.S b/arch/metag/lib/muldi3.S
deleted file mode 100644
index 9d106790244d..000000000000
--- a/arch/metag/lib/muldi3.S
+++ /dev/null
@@ -1,45 +0,0 @@
1! SPDX-License-Identifier: GPL-2.0
2! Copyright (C) 2012 by Imagination Technologies Ltd.
3!
4! 64-bit multiply routine.
5!
6
7!
8! 64-bit signed/unsigned multiply
9!
10! A = D1Ar1:D0Ar2 = a 2^48 + b 2^32 + c 2^16 + d 2^0
11!
12! B = D1Ar3:D0Ar4 = w 2^48 + x 2^32 + y 2^16 + z 2^0
13!
14 .text
15 .global ___muldi3
16 .type ___muldi3,function
17
18___muldi3:
19 MULD D1Re0,D1Ar1,D0Ar4 ! (a 2^48 + b 2^32)(y 2^16 + z 2^0)
20 MULD D0Re0,D0Ar2,D1Ar3 ! (w 2^48 + x 2^32)(c 2^16 + d 2^0)
21 ADD D1Re0,D1Re0,D0Re0
22
23 MULW D0Re0,D0Ar2,D0Ar4 ! (d 2^0) * (z 2^0)
24
25 RTDW D0Ar2,D0Ar2
26 MULW D0Ar6,D0Ar2,D0Ar4 ! (c 2^16)(z 2^0)
27 LSR D1Ar5,D0Ar6,#16
28 LSL D0Ar6,D0Ar6,#16
29 ADDS D0Re0,D0Re0,D0Ar6
30 ADDCS D1Re0,D1Re0,#1
31 RTDW D0Ar4,D0Ar4
32 ADD D1Re0,D1Re0,D1Ar5
33
34 MULW D0Ar6,D0Ar2,D0Ar4 ! (c 2^16)(y 2^16)
35 ADD D1Re0,D1Re0,D0Ar6
36
37 RTDW D0Ar2,D0Ar2
38 MULW D0Ar6,D0Ar2,D0Ar4 ! (d 2^0)(y 2^16)
39 LSR D1Ar5,D0Ar6,#16
40 LSL D0Ar6,D0Ar6,#16
41 ADDS D0Re0,D0Re0,D0Ar6
42 ADD D1Re0,D1Re0,D1Ar5
43 ADDCS D1Re0,D1Re0,#1
44 MOV PC, D1RtP
45 .size ___muldi3,.-___muldi3
diff --git a/arch/metag/lib/ucmpdi2.S b/arch/metag/lib/ucmpdi2.S
deleted file mode 100644
index 46f5686db8b1..000000000000
--- a/arch/metag/lib/ucmpdi2.S
+++ /dev/null
@@ -1,28 +0,0 @@
1! SPDX-License-Identifier: GPL-2.0
2! Copyright (C) 2012 by Imagination Technologies Ltd.
3!
4! 64-bit unsigned compare routine.
5!
6
7 .text
8 .global ___ucmpdi2
9 .type ___ucmpdi2,function
10
11! low high
12! u64 a (D0Ar2, D1Ar1)
13! u64 b (D0Ar4, D1Ar3)
14___ucmpdi2:
15 ! start at 1 (equal) and conditionally increment or decrement
16 MOV D0Re0,#1
17
18 ! high words
19 CMP D1Ar1,D1Ar3
20 ! or if equal, low words
21 CMPEQ D0Ar2,D0Ar4
22
23 ! unsigned compare
24 SUBLO D0Re0,D0Re0,#1
25 ADDHI D0Re0,D0Re0,#1
26
27 MOV PC,D1RtP
28 .size ___ucmpdi2,.-___ucmpdi2
diff --git a/arch/metag/lib/usercopy.c b/arch/metag/lib/usercopy.c
deleted file mode 100644
index a48ef522c02d..000000000000
--- a/arch/metag/lib/usercopy.c
+++ /dev/null
@@ -1,1257 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * User address space access functions.
4 * The non-inlined parts of asm-metag/uaccess.h are here.
5 *
6 * Copyright (C) 2006, Imagination Technologies.
7 * Copyright (C) 2000, Axis Communications AB.
8 *
9 * Written by Hans-Peter Nilsson.
10 * Pieces used from memcpy, originally by Kenny Ranerup long time ago.
11 * Modified for Meta by Will Newton.
12 */
13
14#include <linux/export.h>
15#include <linux/uaccess.h>
16#include <asm/cache.h> /* def of L1_CACHE_BYTES */
17
18#define USE_RAPF
19#define RAPF_MIN_BUF_SIZE (3*L1_CACHE_BYTES)
20
21
22/* The "double write" in this code is because the Meta will not fault
23 * immediately unless the memory pipe is forced to by e.g. a data stall or
24 * another memory op. The second write should be discarded by the write
25 * combiner so should have virtually no cost.
26 */
27
28#define __asm_copy_user_cont(to, from, ret, COPY, FIXUP, TENTRY) \
29 asm volatile ( \
30 COPY \
31 "1:\n" \
32 " .section .fixup,\"ax\"\n" \
33 FIXUP \
34 " MOVT D1Ar1,#HI(1b)\n" \
35 " JUMP D1Ar1,#LO(1b)\n" \
36 " .previous\n" \
37 " .section __ex_table,\"a\"\n" \
38 TENTRY \
39 " .previous\n" \
40 : "=r" (to), "=r" (from), "=r" (ret) \
41 : "0" (to), "1" (from), "2" (ret) \
42 : "D1Ar1", "memory")
43
44
45#define __asm_copy_to_user_1(to, from, ret) \
46 __asm_copy_user_cont(to, from, ret, \
47 " GETB D1Ar1,[%1++]\n" \
48 " SETB [%0],D1Ar1\n" \
49 "2: SETB [%0++],D1Ar1\n", \
50 "3: ADD %2,%2,#1\n", \
51 " .long 2b,3b\n")
52
53#define __asm_copy_to_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
54 __asm_copy_user_cont(to, from, ret, \
55 " GETW D1Ar1,[%1++]\n" \
56 " SETW [%0],D1Ar1\n" \
57 "2: SETW [%0++],D1Ar1\n" COPY, \
58 "3: ADD %2,%2,#2\n" FIXUP, \
59 " .long 2b,3b\n" TENTRY)
60
61#define __asm_copy_to_user_2(to, from, ret) \
62 __asm_copy_to_user_2x_cont(to, from, ret, "", "", "")
63
64#define __asm_copy_to_user_3(to, from, ret) \
65 __asm_copy_to_user_2x_cont(to, from, ret, \
66 " GETB D1Ar1,[%1++]\n" \
67 " SETB [%0],D1Ar1\n" \
68 "4: SETB [%0++],D1Ar1\n", \
69 "5: ADD %2,%2,#1\n", \
70 " .long 4b,5b\n")
71
72#define __asm_copy_to_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
73 __asm_copy_user_cont(to, from, ret, \
74 " GETD D1Ar1,[%1++]\n" \
75 " SETD [%0],D1Ar1\n" \
76 "2: SETD [%0++],D1Ar1\n" COPY, \
77 "3: ADD %2,%2,#4\n" FIXUP, \
78 " .long 2b,3b\n" TENTRY)
79
80#define __asm_copy_to_user_4(to, from, ret) \
81 __asm_copy_to_user_4x_cont(to, from, ret, "", "", "")
82
83#define __asm_copy_to_user_5(to, from, ret) \
84 __asm_copy_to_user_4x_cont(to, from, ret, \
85 " GETB D1Ar1,[%1++]\n" \
86 " SETB [%0],D1Ar1\n" \
87 "4: SETB [%0++],D1Ar1\n", \
88 "5: ADD %2,%2,#1\n", \
89 " .long 4b,5b\n")
90
91#define __asm_copy_to_user_6x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
92 __asm_copy_to_user_4x_cont(to, from, ret, \
93 " GETW D1Ar1,[%1++]\n" \
94 " SETW [%0],D1Ar1\n" \
95 "4: SETW [%0++],D1Ar1\n" COPY, \
96 "5: ADD %2,%2,#2\n" FIXUP, \
97 " .long 4b,5b\n" TENTRY)
98
99#define __asm_copy_to_user_6(to, from, ret) \
100 __asm_copy_to_user_6x_cont(to, from, ret, "", "", "")
101
102#define __asm_copy_to_user_7(to, from, ret) \
103 __asm_copy_to_user_6x_cont(to, from, ret, \
104 " GETB D1Ar1,[%1++]\n" \
105 " SETB [%0],D1Ar1\n" \
106 "6: SETB [%0++],D1Ar1\n", \
107 "7: ADD %2,%2,#1\n", \
108 " .long 6b,7b\n")
109
110#define __asm_copy_to_user_8x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
111 __asm_copy_to_user_4x_cont(to, from, ret, \
112 " GETD D1Ar1,[%1++]\n" \
113 " SETD [%0],D1Ar1\n" \
114 "4: SETD [%0++],D1Ar1\n" COPY, \
115 "5: ADD %2,%2,#4\n" FIXUP, \
116 " .long 4b,5b\n" TENTRY)
117
118#define __asm_copy_to_user_8(to, from, ret) \
119 __asm_copy_to_user_8x_cont(to, from, ret, "", "", "")
120
121#define __asm_copy_to_user_9(to, from, ret) \
122 __asm_copy_to_user_8x_cont(to, from, ret, \
123 " GETB D1Ar1,[%1++]\n" \
124 " SETB [%0],D1Ar1\n" \
125 "6: SETB [%0++],D1Ar1\n", \
126 "7: ADD %2,%2,#1\n", \
127 " .long 6b,7b\n")
128
129#define __asm_copy_to_user_10x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
130 __asm_copy_to_user_8x_cont(to, from, ret, \
131 " GETW D1Ar1,[%1++]\n" \
132 " SETW [%0],D1Ar1\n" \
133 "6: SETW [%0++],D1Ar1\n" COPY, \
134 "7: ADD %2,%2,#2\n" FIXUP, \
135 " .long 6b,7b\n" TENTRY)
136
137#define __asm_copy_to_user_10(to, from, ret) \
138 __asm_copy_to_user_10x_cont(to, from, ret, "", "", "")
139
140#define __asm_copy_to_user_11(to, from, ret) \
141 __asm_copy_to_user_10x_cont(to, from, ret, \
142 " GETB D1Ar1,[%1++]\n" \
143 " SETB [%0],D1Ar1\n" \
144 "8: SETB [%0++],D1Ar1\n", \
145 "9: ADD %2,%2,#1\n", \
146 " .long 8b,9b\n")
147
148#define __asm_copy_to_user_12x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
149 __asm_copy_to_user_8x_cont(to, from, ret, \
150 " GETD D1Ar1,[%1++]\n" \
151 " SETD [%0],D1Ar1\n" \
152 "6: SETD [%0++],D1Ar1\n" COPY, \
153 "7: ADD %2,%2,#4\n" FIXUP, \
154 " .long 6b,7b\n" TENTRY)
155#define __asm_copy_to_user_12(to, from, ret) \
156 __asm_copy_to_user_12x_cont(to, from, ret, "", "", "")
157
158#define __asm_copy_to_user_13(to, from, ret) \
159 __asm_copy_to_user_12x_cont(to, from, ret, \
160 " GETB D1Ar1,[%1++]\n" \
161 " SETB [%0],D1Ar1\n" \
162 "8: SETB [%0++],D1Ar1\n", \
163 "9: ADD %2,%2,#1\n", \
164 " .long 8b,9b\n")
165
166#define __asm_copy_to_user_14x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
167 __asm_copy_to_user_12x_cont(to, from, ret, \
168 " GETW D1Ar1,[%1++]\n" \
169 " SETW [%0],D1Ar1\n" \
170 "8: SETW [%0++],D1Ar1\n" COPY, \
171 "9: ADD %2,%2,#2\n" FIXUP, \
172 " .long 8b,9b\n" TENTRY)
173
174#define __asm_copy_to_user_14(to, from, ret) \
175 __asm_copy_to_user_14x_cont(to, from, ret, "", "", "")
176
177#define __asm_copy_to_user_15(to, from, ret) \
178 __asm_copy_to_user_14x_cont(to, from, ret, \
179 " GETB D1Ar1,[%1++]\n" \
180 " SETB [%0],D1Ar1\n" \
181 "10: SETB [%0++],D1Ar1\n", \
182 "11: ADD %2,%2,#1\n", \
183 " .long 10b,11b\n")
184
185#define __asm_copy_to_user_16x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
186 __asm_copy_to_user_12x_cont(to, from, ret, \
187 " GETD D1Ar1,[%1++]\n" \
188 " SETD [%0],D1Ar1\n" \
189 "8: SETD [%0++],D1Ar1\n" COPY, \
190 "9: ADD %2,%2,#4\n" FIXUP, \
191 " .long 8b,9b\n" TENTRY)
192
193#define __asm_copy_to_user_16(to, from, ret) \
194 __asm_copy_to_user_16x_cont(to, from, ret, "", "", "")
195
196#define __asm_copy_to_user_8x64(to, from, ret) \
197 asm volatile ( \
198 " GETL D0Ar2,D1Ar1,[%1++]\n" \
199 " SETL [%0],D0Ar2,D1Ar1\n" \
200 "2: SETL [%0++],D0Ar2,D1Ar1\n" \
201 "1:\n" \
202 " .section .fixup,\"ax\"\n" \
203 "3: ADD %2,%2,#8\n" \
204 " MOVT D0Ar2,#HI(1b)\n" \
205 " JUMP D0Ar2,#LO(1b)\n" \
206 " .previous\n" \
207 " .section __ex_table,\"a\"\n" \
208 " .long 2b,3b\n" \
209 " .previous\n" \
210 : "=r" (to), "=r" (from), "=r" (ret) \
211 : "0" (to), "1" (from), "2" (ret) \
212 : "D1Ar1", "D0Ar2", "memory")
213
214/*
215 * optimized copying loop using RAPF when 64 bit aligned
216 *
217 * n will be automatically decremented inside the loop
218 * ret will be left intact. if error occurs we will rewind
219 * so that the original non optimized code will fill up
220 * this value correctly.
221 *
222 * on fault:
223 * > n will hold total number of uncopied bytes
224 *
225 * > {'to','from'} will be rewind back so that
226 * the non-optimized code will do the proper fix up
227 *
228 * DCACHE drops the cacheline which helps in reducing cache
229 * pollution.
230 *
231 * We introduce an extra SETL at the end of the loop to
232 * ensure we don't fall off the loop before we catch all
233 * erros.
234 *
235 * NOTICE:
236 * LSM_STEP in TXSTATUS must be cleared in fix up code.
237 * since we're using M{S,G}ETL, a fault might happen at
238 * any address in the middle of M{S,G}ETL causing
239 * the value of LSM_STEP to be incorrect which can
240 * cause subsequent use of M{S,G}ET{L,D} to go wrong.
241 * ie: if LSM_STEP was 1 when a fault occurs, the
242 * next call to M{S,G}ET{L,D} will skip the first
243 * copy/getting as it think that the first 1 has already
244 * been done.
245 *
246 */
247#define __asm_copy_user_64bit_rapf_loop( \
248 to, from, ret, n, id, FIXUP) \
249 asm volatile ( \
250 ".balign 8\n" \
251 " MOV RAPF, %1\n" \
252 " MSETL [A0StP++], D0Ar6, D0FrT, D0.5, D0.6, D0.7\n" \
253 " MOV D0Ar6, #0\n" \
254 " LSR D1Ar5, %3, #6\n" \
255 " SUB TXRPT, D1Ar5, #2\n" \
256 " MOV RAPF, %1\n" \
257 "$Lloop"id":\n" \
258 " ADD RAPF, %1, #64\n" \
259 "21: MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
260 "22: MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
261 "23: SUB %3, %3, #32\n" \
262 "24: MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
263 "25: MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
264 "26: SUB %3, %3, #32\n" \
265 " DCACHE [%1+#-64], D0Ar6\n" \
266 " BR $Lloop"id"\n" \
267 \
268 " MOV RAPF, %1\n" \
269 "27: MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
270 "28: MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
271 "29: SUB %3, %3, #32\n" \
272 "30: MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
273 "31: MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
274 "32: SETL [%0+#-8], D0.7, D1.7\n" \
275 " SUB %3, %3, #32\n" \
276 "1: DCACHE [%1+#-64], D0Ar6\n" \
277 " GETL D0Ar6, D1Ar5, [A0StP+#-40]\n" \
278 " GETL D0FrT, D1RtP, [A0StP+#-32]\n" \
279 " GETL D0.5, D1.5, [A0StP+#-24]\n" \
280 " GETL D0.6, D1.6, [A0StP+#-16]\n" \
281 " GETL D0.7, D1.7, [A0StP+#-8]\n" \
282 " SUB A0StP, A0StP, #40\n" \
283 " .section .fixup,\"ax\"\n" \
284 "3: MOV D0Ar2, TXSTATUS\n" \
285 " MOV D1Ar1, TXSTATUS\n" \
286 " AND D1Ar1, D1Ar1, #0xFFFFF8FF\n" \
287 " MOV TXSTATUS, D1Ar1\n" \
288 FIXUP \
289 " MOVT D0Ar2, #HI(1b)\n" \
290 " JUMP D0Ar2, #LO(1b)\n" \
291 " .previous\n" \
292 " .section __ex_table,\"a\"\n" \
293 " .long 21b,3b\n" \
294 " .long 22b,3b\n" \
295 " .long 23b,3b\n" \
296 " .long 24b,3b\n" \
297 " .long 25b,3b\n" \
298 " .long 26b,3b\n" \
299 " .long 27b,3b\n" \
300 " .long 28b,3b\n" \
301 " .long 29b,3b\n" \
302 " .long 30b,3b\n" \
303 " .long 31b,3b\n" \
304 " .long 32b,3b\n" \
305 " .previous\n" \
306 : "=r" (to), "=r" (from), "=r" (ret), "=d" (n) \
307 : "0" (to), "1" (from), "2" (ret), "3" (n) \
308 : "D1Ar1", "D0Ar2", "cc", "memory")
309
310/* rewind 'to' and 'from' pointers when a fault occurs
311 *
312 * Rationale:
313 * A fault always occurs on writing to user buffer. A fault
314 * is at a single address, so we need to rewind by only 4
315 * bytes.
316 * Since we do a complete read from kernel buffer before
317 * writing, we need to rewind it also. The amount to be
318 * rewind equals the number of faulty writes in MSETD
319 * which is: [4 - (LSM_STEP-1)]*8
320 * LSM_STEP is bits 10:8 in TXSTATUS which is already read
321 * and stored in D0Ar2
322 *
323 * NOTE: If a fault occurs at the last operation in M{G,S}ETL
324 * LSM_STEP will be 0. ie: we do 4 writes in our case, if
325 * a fault happens at the 4th write, LSM_STEP will be 0
326 * instead of 4. The code copes with that.
327 *
328 * n is updated by the number of successful writes, which is:
329 * n = n - (LSM_STEP-1)*8
330 */
331#define __asm_copy_to_user_64bit_rapf_loop(to, from, ret, n, id)\
332 __asm_copy_user_64bit_rapf_loop(to, from, ret, n, id, \
333 "LSR D0Ar2, D0Ar2, #8\n" \
334 "ANDS D0Ar2, D0Ar2, #0x7\n" \
335 "ADDZ D0Ar2, D0Ar2, #4\n" \
336 "SUB D0Ar2, D0Ar2, #1\n" \
337 "MOV D1Ar1, #4\n" \
338 "SUB D0Ar2, D1Ar1, D0Ar2\n" \
339 "LSL D0Ar2, D0Ar2, #3\n" \
340 "LSL D1Ar1, D1Ar1, #3\n" \
341 "SUB D1Ar1, D1Ar1, D0Ar2\n" \
342 "SUB %0, %0, #8\n" \
343 "SUB %1, %1,D0Ar2\n" \
344 "SUB %3, %3, D1Ar1\n")
345
346/*
347 * optimized copying loop using RAPF when 32 bit aligned
348 *
349 * n will be automatically decremented inside the loop
350 * ret will be left intact. if error occurs we will rewind
351 * so that the original non optimized code will fill up
352 * this value correctly.
353 *
354 * on fault:
355 * > n will hold total number of uncopied bytes
356 *
357 * > {'to','from'} will be rewind back so that
358 * the non-optimized code will do the proper fix up
359 *
360 * DCACHE drops the cacheline which helps in reducing cache
361 * pollution.
362 *
363 * We introduce an extra SETD at the end of the loop to
364 * ensure we don't fall off the loop before we catch all
365 * erros.
366 *
367 * NOTICE:
368 * LSM_STEP in TXSTATUS must be cleared in fix up code.
369 * since we're using M{S,G}ETL, a fault might happen at
370 * any address in the middle of M{S,G}ETL causing
371 * the value of LSM_STEP to be incorrect which can
372 * cause subsequent use of M{S,G}ET{L,D} to go wrong.
373 * ie: if LSM_STEP was 1 when a fault occurs, the
374 * next call to M{S,G}ET{L,D} will skip the first
375 * copy/getting as it think that the first 1 has already
376 * been done.
377 *
378 */
379#define __asm_copy_user_32bit_rapf_loop( \
380 to, from, ret, n, id, FIXUP) \
381 asm volatile ( \
382 ".balign 8\n" \
383 " MOV RAPF, %1\n" \
384 " MSETL [A0StP++], D0Ar6, D0FrT, D0.5, D0.6, D0.7\n" \
385 " MOV D0Ar6, #0\n" \
386 " LSR D1Ar5, %3, #6\n" \
387 " SUB TXRPT, D1Ar5, #2\n" \
388 " MOV RAPF, %1\n" \
389 "$Lloop"id":\n" \
390 " ADD RAPF, %1, #64\n" \
391 "21: MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
392 "22: MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
393 "23: SUB %3, %3, #16\n" \
394 "24: MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
395 "25: MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
396 "26: SUB %3, %3, #16\n" \
397 "27: MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
398 "28: MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
399 "29: SUB %3, %3, #16\n" \
400 "30: MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
401 "31: MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
402 "32: SUB %3, %3, #16\n" \
403 " DCACHE [%1+#-64], D0Ar6\n" \
404 " BR $Lloop"id"\n" \
405 \
406 " MOV RAPF, %1\n" \
407 "33: MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
408 "34: MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
409 "35: SUB %3, %3, #16\n" \
410 "36: MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
411 "37: MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
412 "38: SUB %3, %3, #16\n" \
413 "39: MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
414 "40: MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
415 "41: SUB %3, %3, #16\n" \
416 "42: MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
417 "43: MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
418 "44: SETD [%0+#-4], D0.7\n" \
419 " SUB %3, %3, #16\n" \
420 "1: DCACHE [%1+#-64], D0Ar6\n" \
421 " GETL D0Ar6, D1Ar5, [A0StP+#-40]\n" \
422 " GETL D0FrT, D1RtP, [A0StP+#-32]\n" \
423 " GETL D0.5, D1.5, [A0StP+#-24]\n" \
424 " GETL D0.6, D1.6, [A0StP+#-16]\n" \
425 " GETL D0.7, D1.7, [A0StP+#-8]\n" \
426 " SUB A0StP, A0StP, #40\n" \
427 " .section .fixup,\"ax\"\n" \
428 "3: MOV D0Ar2, TXSTATUS\n" \
429 " MOV D1Ar1, TXSTATUS\n" \
430 " AND D1Ar1, D1Ar1, #0xFFFFF8FF\n" \
431 " MOV TXSTATUS, D1Ar1\n" \
432 FIXUP \
433 " MOVT D0Ar2, #HI(1b)\n" \
434 " JUMP D0Ar2, #LO(1b)\n" \
435 " .previous\n" \
436 " .section __ex_table,\"a\"\n" \
437 " .long 21b,3b\n" \
438 " .long 22b,3b\n" \
439 " .long 23b,3b\n" \
440 " .long 24b,3b\n" \
441 " .long 25b,3b\n" \
442 " .long 26b,3b\n" \
443 " .long 27b,3b\n" \
444 " .long 28b,3b\n" \
445 " .long 29b,3b\n" \
446 " .long 30b,3b\n" \
447 " .long 31b,3b\n" \
448 " .long 32b,3b\n" \
449 " .long 33b,3b\n" \
450 " .long 34b,3b\n" \
451 " .long 35b,3b\n" \
452 " .long 36b,3b\n" \
453 " .long 37b,3b\n" \
454 " .long 38b,3b\n" \
455 " .long 39b,3b\n" \
456 " .long 40b,3b\n" \
457 " .long 41b,3b\n" \
458 " .long 42b,3b\n" \
459 " .long 43b,3b\n" \
460 " .long 44b,3b\n" \
461 " .previous\n" \
462 : "=r" (to), "=r" (from), "=r" (ret), "=d" (n) \
463 : "0" (to), "1" (from), "2" (ret), "3" (n) \
464 : "D1Ar1", "D0Ar2", "cc", "memory")
465
466/* rewind 'to' and 'from' pointers when a fault occurs
467 *
468 * Rationale:
469 * A fault always occurs on writing to user buffer. A fault
470 * is at a single address, so we need to rewind by only 4
471 * bytes.
472 * Since we do a complete read from kernel buffer before
473 * writing, we need to rewind it also. The amount to be
474 * rewind equals the number of faulty writes in MSETD
475 * which is: [4 - (LSM_STEP-1)]*4
476 * LSM_STEP is bits 10:8 in TXSTATUS which is already read
477 * and stored in D0Ar2
478 *
479 * NOTE: If a fault occurs at the last operation in M{G,S}ETL
480 * LSM_STEP will be 0. ie: we do 4 writes in our case, if
481 * a fault happens at the 4th write, LSM_STEP will be 0
482 * instead of 4. The code copes with that.
483 *
484 * n is updated by the number of successful writes, which is:
485 * n = n - (LSM_STEP-1)*4
486 */
487#define __asm_copy_to_user_32bit_rapf_loop(to, from, ret, n, id)\
488 __asm_copy_user_32bit_rapf_loop(to, from, ret, n, id, \
489 "LSR D0Ar2, D0Ar2, #8\n" \
490 "ANDS D0Ar2, D0Ar2, #0x7\n" \
491 "ADDZ D0Ar2, D0Ar2, #4\n" \
492 "SUB D0Ar2, D0Ar2, #1\n" \
493 "MOV D1Ar1, #4\n" \
494 "SUB D0Ar2, D1Ar1, D0Ar2\n" \
495 "LSL D0Ar2, D0Ar2, #2\n" \
496 "LSL D1Ar1, D1Ar1, #2\n" \
497 "SUB D1Ar1, D1Ar1, D0Ar2\n" \
498 "SUB %0, %0, #4\n" \
499 "SUB %1, %1, D0Ar2\n" \
500 "SUB %3, %3, D1Ar1\n")
501
502unsigned long raw_copy_to_user(void __user *pdst, const void *psrc,
503 unsigned long n)
504{
505 register char __user *dst asm ("A0.2") = pdst;
506 register const char *src asm ("A1.2") = psrc;
507 unsigned long retn = 0;
508
509 if (n == 0)
510 return 0;
511
512 if ((unsigned long) src & 1) {
513 __asm_copy_to_user_1(dst, src, retn);
514 n--;
515 if (retn)
516 return retn + n;
517 }
518 if ((unsigned long) dst & 1) {
519 /* Worst case - byte copy */
520 while (n > 0) {
521 __asm_copy_to_user_1(dst, src, retn);
522 n--;
523 if (retn)
524 return retn + n;
525 }
526 }
527 if (((unsigned long) src & 2) && n >= 2) {
528 __asm_copy_to_user_2(dst, src, retn);
529 n -= 2;
530 if (retn)
531 return retn + n;
532 }
533 if ((unsigned long) dst & 2) {
534 /* Second worst case - word copy */
535 while (n >= 2) {
536 __asm_copy_to_user_2(dst, src, retn);
537 n -= 2;
538 if (retn)
539 return retn + n;
540 }
541 }
542
543#ifdef USE_RAPF
544 /* 64 bit copy loop */
545 if (!(((unsigned long) src | (__force unsigned long) dst) & 7)) {
546 if (n >= RAPF_MIN_BUF_SIZE) {
547 /* copy user using 64 bit rapf copy */
548 __asm_copy_to_user_64bit_rapf_loop(dst, src, retn,
549 n, "64cu");
550 }
551 while (n >= 8) {
552 __asm_copy_to_user_8x64(dst, src, retn);
553 n -= 8;
554 if (retn)
555 return retn + n;
556 }
557 }
558 if (n >= RAPF_MIN_BUF_SIZE) {
559 /* copy user using 32 bit rapf copy */
560 __asm_copy_to_user_32bit_rapf_loop(dst, src, retn, n, "32cu");
561 }
562#else
563 /* 64 bit copy loop */
564 if (!(((unsigned long) src | (__force unsigned long) dst) & 7)) {
565 while (n >= 8) {
566 __asm_copy_to_user_8x64(dst, src, retn);
567 n -= 8;
568 if (retn)
569 return retn + n;
570 }
571 }
572#endif
573
574 while (n >= 16) {
575 __asm_copy_to_user_16(dst, src, retn);
576 n -= 16;
577 if (retn)
578 return retn + n;
579 }
580
581 while (n >= 4) {
582 __asm_copy_to_user_4(dst, src, retn);
583 n -= 4;
584 if (retn)
585 return retn + n;
586 }
587
588 switch (n) {
589 case 0:
590 break;
591 case 1:
592 __asm_copy_to_user_1(dst, src, retn);
593 break;
594 case 2:
595 __asm_copy_to_user_2(dst, src, retn);
596 break;
597 case 3:
598 __asm_copy_to_user_3(dst, src, retn);
599 break;
600 }
601
602 /*
603 * If we get here, retn correctly reflects the number of failing
604 * bytes.
605 */
606 return retn;
607}
608EXPORT_SYMBOL(raw_copy_to_user);
609
610#define __asm_copy_from_user_1(to, from, ret) \
611 __asm_copy_user_cont(to, from, ret, \
612 " GETB D1Ar1,[%1++]\n" \
613 "2: SETB [%0++],D1Ar1\n", \
614 "3: ADD %2,%2,#1\n", \
615 " .long 2b,3b\n")
616
617#define __asm_copy_from_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
618 __asm_copy_user_cont(to, from, ret, \
619 " GETW D1Ar1,[%1++]\n" \
620 "2: SETW [%0++],D1Ar1\n" COPY, \
621 "3: ADD %2,%2,#2\n" FIXUP, \
622 " .long 2b,3b\n" TENTRY)
623
624#define __asm_copy_from_user_2(to, from, ret) \
625 __asm_copy_from_user_2x_cont(to, from, ret, "", "", "")
626
627#define __asm_copy_from_user_3(to, from, ret) \
628 __asm_copy_from_user_2x_cont(to, from, ret, \
629 " GETB D1Ar1,[%1++]\n" \
630 "4: SETB [%0++],D1Ar1\n", \
631 "5: ADD %2,%2,#1\n", \
632 " .long 4b,5b\n")
633
634#define __asm_copy_from_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
635 __asm_copy_user_cont(to, from, ret, \
636 " GETD D1Ar1,[%1++]\n" \
637 "2: SETD [%0++],D1Ar1\n" COPY, \
638 "3: ADD %2,%2,#4\n" FIXUP, \
639 " .long 2b,3b\n" TENTRY)
640
641#define __asm_copy_from_user_4(to, from, ret) \
642 __asm_copy_from_user_4x_cont(to, from, ret, "", "", "")
643
644#define __asm_copy_from_user_8x64(to, from, ret) \
645 asm volatile ( \
646 " GETL D0Ar2,D1Ar1,[%1++]\n" \
647 "2: SETL [%0++],D0Ar2,D1Ar1\n" \
648 "1:\n" \
649 " .section .fixup,\"ax\"\n" \
650 "3: ADD %2,%2,#8\n" \
651 " MOVT D0Ar2,#HI(1b)\n" \
652 " JUMP D0Ar2,#LO(1b)\n" \
653 " .previous\n" \
654 " .section __ex_table,\"a\"\n" \
655 " .long 2b,3b\n" \
656 " .previous\n" \
657 : "=a" (to), "=r" (from), "=r" (ret) \
658 : "0" (to), "1" (from), "2" (ret) \
659 : "D1Ar1", "D0Ar2", "memory")
660
661/* rewind 'from' pointer when a fault occurs
662 *
663 * Rationale:
664 * A fault occurs while reading from user buffer, which is the
665 * source.
666 * Since we don't write to kernel buffer until we read first,
667 * the kernel buffer is at the right state and needn't be
668 * corrected, but the source must be rewound to the beginning of
669 * the block, which is LSM_STEP*8 bytes.
670 * LSM_STEP is bits 10:8 in TXSTATUS which is already read
671 * and stored in D0Ar2
672 *
673 * NOTE: If a fault occurs at the last operation in M{G,S}ETL
674 * LSM_STEP will be 0. ie: we do 4 writes in our case, if
675 * a fault happens at the 4th write, LSM_STEP will be 0
676 * instead of 4. The code copes with that.
677 */
678#define __asm_copy_from_user_64bit_rapf_loop(to, from, ret, n, id) \
679 __asm_copy_user_64bit_rapf_loop(to, from, ret, n, id, \
680 "LSR D0Ar2, D0Ar2, #5\n" \
681 "ANDS D0Ar2, D0Ar2, #0x38\n" \
682 "ADDZ D0Ar2, D0Ar2, #32\n" \
683 "SUB %1, %1, D0Ar2\n")
684
685/* rewind 'from' pointer when a fault occurs
686 *
687 * Rationale:
688 * A fault occurs while reading from user buffer, which is the
689 * source.
690 * Since we don't write to kernel buffer until we read first,
691 * the kernel buffer is at the right state and needn't be
692 * corrected, but the source must be rewound to the beginning of
693 * the block, which is LSM_STEP*4 bytes.
694 * LSM_STEP is bits 10:8 in TXSTATUS which is already read
695 * and stored in D0Ar2
696 *
697 * NOTE: If a fault occurs at the last operation in M{G,S}ETL
698 * LSM_STEP will be 0. ie: we do 4 writes in our case, if
699 * a fault happens at the 4th write, LSM_STEP will be 0
700 * instead of 4. The code copes with that.
701 */
702#define __asm_copy_from_user_32bit_rapf_loop(to, from, ret, n, id) \
703 __asm_copy_user_32bit_rapf_loop(to, from, ret, n, id, \
704 "LSR D0Ar2, D0Ar2, #6\n" \
705 "ANDS D0Ar2, D0Ar2, #0x1c\n" \
706 "ADDZ D0Ar2, D0Ar2, #16\n" \
707 "SUB %1, %1, D0Ar2\n")
708
709
710/*
711 * Copy from user to kernel. The return-value is the number of bytes that were
712 * inaccessible.
713 */
714unsigned long raw_copy_from_user(void *pdst, const void __user *psrc,
715 unsigned long n)
716{
717 register char *dst asm ("A0.2") = pdst;
718 register const char __user *src asm ("A1.2") = psrc;
719 unsigned long retn = 0;
720
721 if (n == 0)
722 return 0;
723
724 if ((unsigned long) src & 1) {
725 __asm_copy_from_user_1(dst, src, retn);
726 n--;
727 if (retn)
728 return retn + n;
729 }
730 if ((unsigned long) dst & 1) {
731 /* Worst case - byte copy */
732 while (n > 0) {
733 __asm_copy_from_user_1(dst, src, retn);
734 n--;
735 if (retn)
736 return retn + n;
737 }
738 }
739 if (((unsigned long) src & 2) && n >= 2) {
740 __asm_copy_from_user_2(dst, src, retn);
741 n -= 2;
742 if (retn)
743 return retn + n;
744 }
745 if ((unsigned long) dst & 2) {
746 /* Second worst case - word copy */
747 while (n >= 2) {
748 __asm_copy_from_user_2(dst, src, retn);
749 n -= 2;
750 if (retn)
751 return retn + n;
752 }
753 }
754
755#ifdef USE_RAPF
756 /* 64 bit copy loop */
757 if (!(((unsigned long) src | (unsigned long) dst) & 7)) {
758 if (n >= RAPF_MIN_BUF_SIZE) {
759 /* Copy using fast 64bit rapf */
760 __asm_copy_from_user_64bit_rapf_loop(dst, src, retn,
761 n, "64cuz");
762 }
763 while (n >= 8) {
764 __asm_copy_from_user_8x64(dst, src, retn);
765 n -= 8;
766 if (retn)
767 return retn + n;
768 }
769 }
770
771 if (n >= RAPF_MIN_BUF_SIZE) {
772 /* Copy using fast 32bit rapf */
773 __asm_copy_from_user_32bit_rapf_loop(dst, src, retn,
774 n, "32cuz");
775 }
776#else
777 /* 64 bit copy loop */
778 if (!(((unsigned long) src | (unsigned long) dst) & 7)) {
779 while (n >= 8) {
780 __asm_copy_from_user_8x64(dst, src, retn);
781 n -= 8;
782 if (retn)
783 return retn + n;
784 }
785 }
786#endif
787
788 while (n >= 4) {
789 __asm_copy_from_user_4(dst, src, retn);
790 n -= 4;
791
792 if (retn)
793 return retn + n;
794 }
795
796 /* If we get here, there were no memory read faults. */
797 switch (n) {
798 /* These copies are at least "naturally aligned" (so we don't
799 have to check each byte), due to the src alignment code.
800 The *_3 case *will* get the correct count for retn. */
801 case 0:
802 /* This case deliberately left in (if you have doubts check the
803 generated assembly code). */
804 break;
805 case 1:
806 __asm_copy_from_user_1(dst, src, retn);
807 break;
808 case 2:
809 __asm_copy_from_user_2(dst, src, retn);
810 break;
811 case 3:
812 __asm_copy_from_user_3(dst, src, retn);
813 break;
814 }
815
816 /* If we get here, retn correctly reflects the number of failing
817 bytes. */
818 return retn;
819}
820EXPORT_SYMBOL(raw_copy_from_user);
821
822#define __asm_clear_8x64(to, ret) \
823 asm volatile ( \
824 " MOV D0Ar2,#0\n" \
825 " MOV D1Ar1,#0\n" \
826 " SETL [%0],D0Ar2,D1Ar1\n" \
827 "2: SETL [%0++],D0Ar2,D1Ar1\n" \
828 "1:\n" \
829 " .section .fixup,\"ax\"\n" \
830 "3: ADD %1,%1,#8\n" \
831 " MOVT D0Ar2,#HI(1b)\n" \
832 " JUMP D0Ar2,#LO(1b)\n" \
833 " .previous\n" \
834 " .section __ex_table,\"a\"\n" \
835 " .long 2b,3b\n" \
836 " .previous\n" \
837 : "=r" (to), "=r" (ret) \
838 : "0" (to), "1" (ret) \
839 : "D1Ar1", "D0Ar2", "memory")
840
841/* Zero userspace. */
842
843#define __asm_clear(to, ret, CLEAR, FIXUP, TENTRY) \
844 asm volatile ( \
845 " MOV D1Ar1,#0\n" \
846 CLEAR \
847 "1:\n" \
848 " .section .fixup,\"ax\"\n" \
849 FIXUP \
850 " MOVT D1Ar1,#HI(1b)\n" \
851 " JUMP D1Ar1,#LO(1b)\n" \
852 " .previous\n" \
853 " .section __ex_table,\"a\"\n" \
854 TENTRY \
855 " .previous" \
856 : "=r" (to), "=r" (ret) \
857 : "0" (to), "1" (ret) \
858 : "D1Ar1", "memory")
859
860#define __asm_clear_1(to, ret) \
861 __asm_clear(to, ret, \
862 " SETB [%0],D1Ar1\n" \
863 "2: SETB [%0++],D1Ar1\n", \
864 "3: ADD %1,%1,#1\n", \
865 " .long 2b,3b\n")
866
867#define __asm_clear_2(to, ret) \
868 __asm_clear(to, ret, \
869 " SETW [%0],D1Ar1\n" \
870 "2: SETW [%0++],D1Ar1\n", \
871 "3: ADD %1,%1,#2\n", \
872 " .long 2b,3b\n")
873
874#define __asm_clear_3(to, ret) \
875 __asm_clear(to, ret, \
876 "2: SETW [%0++],D1Ar1\n" \
877 " SETB [%0],D1Ar1\n" \
878 "3: SETB [%0++],D1Ar1\n", \
879 "4: ADD %1,%1,#2\n" \
880 "5: ADD %1,%1,#1\n", \
881 " .long 2b,4b\n" \
882 " .long 3b,5b\n")
883
884#define __asm_clear_4x_cont(to, ret, CLEAR, FIXUP, TENTRY) \
885 __asm_clear(to, ret, \
886 " SETD [%0],D1Ar1\n" \
887 "2: SETD [%0++],D1Ar1\n" CLEAR, \
888 "3: ADD %1,%1,#4\n" FIXUP, \
889 " .long 2b,3b\n" TENTRY)
890
891#define __asm_clear_4(to, ret) \
892 __asm_clear_4x_cont(to, ret, "", "", "")
893
894#define __asm_clear_8x_cont(to, ret, CLEAR, FIXUP, TENTRY) \
895 __asm_clear_4x_cont(to, ret, \
896 " SETD [%0],D1Ar1\n" \
897 "4: SETD [%0++],D1Ar1\n" CLEAR, \
898 "5: ADD %1,%1,#4\n" FIXUP, \
899 " .long 4b,5b\n" TENTRY)
900
901#define __asm_clear_8(to, ret) \
902 __asm_clear_8x_cont(to, ret, "", "", "")
903
904#define __asm_clear_12x_cont(to, ret, CLEAR, FIXUP, TENTRY) \
905 __asm_clear_8x_cont(to, ret, \
906 " SETD [%0],D1Ar1\n" \
907 "6: SETD [%0++],D1Ar1\n" CLEAR, \
908 "7: ADD %1,%1,#4\n" FIXUP, \
909 " .long 6b,7b\n" TENTRY)
910
911#define __asm_clear_12(to, ret) \
912 __asm_clear_12x_cont(to, ret, "", "", "")
913
914#define __asm_clear_16x_cont(to, ret, CLEAR, FIXUP, TENTRY) \
915 __asm_clear_12x_cont(to, ret, \
916 " SETD [%0],D1Ar1\n" \
917 "8: SETD [%0++],D1Ar1\n" CLEAR, \
918 "9: ADD %1,%1,#4\n" FIXUP, \
919 " .long 8b,9b\n" TENTRY)
920
921#define __asm_clear_16(to, ret) \
922 __asm_clear_16x_cont(to, ret, "", "", "")
923
924unsigned long __do_clear_user(void __user *pto, unsigned long pn)
925{
926 register char __user *dst asm ("D0Re0") = pto;
927 register unsigned long n asm ("D1Re0") = pn;
928 register unsigned long retn asm ("D0Ar6") = 0;
929
930 if ((unsigned long) dst & 1) {
931 __asm_clear_1(dst, retn);
932 n--;
933 }
934
935 if ((unsigned long) dst & 2) {
936 __asm_clear_2(dst, retn);
937 n -= 2;
938 }
939
940 /* 64 bit copy loop */
941 if (!((__force unsigned long) dst & 7)) {
942 while (n >= 8) {
943 __asm_clear_8x64(dst, retn);
944 n -= 8;
945 }
946 }
947
948 while (n >= 16) {
949 __asm_clear_16(dst, retn);
950 n -= 16;
951 }
952
953 while (n >= 4) {
954 __asm_clear_4(dst, retn);
955 n -= 4;
956 }
957
958 switch (n) {
959 case 0:
960 break;
961 case 1:
962 __asm_clear_1(dst, retn);
963 break;
964 case 2:
965 __asm_clear_2(dst, retn);
966 break;
967 case 3:
968 __asm_clear_3(dst, retn);
969 break;
970 }
971
972 return retn;
973}
974EXPORT_SYMBOL(__do_clear_user);
975
976unsigned char __get_user_asm_b(const void __user *addr, long *err)
977{
978 register unsigned char x asm ("D0Re0") = 0;
979 asm volatile (
980 " GETB %0,[%2]\n"
981 "1:\n"
982 " GETB %0,[%2]\n"
983 "2:\n"
984 " .section .fixup,\"ax\"\n"
985 "3: MOV D0FrT,%3\n"
986 " SETD [%1],D0FrT\n"
987 " MOVT D0FrT,#HI(2b)\n"
988 " JUMP D0FrT,#LO(2b)\n"
989 " .previous\n"
990 " .section __ex_table,\"a\"\n"
991 " .long 1b,3b\n"
992 " .previous\n"
993 : "=r" (x)
994 : "r" (err), "r" (addr), "P" (-EFAULT)
995 : "D0FrT");
996 return x;
997}
998EXPORT_SYMBOL(__get_user_asm_b);
999
1000unsigned short __get_user_asm_w(const void __user *addr, long *err)
1001{
1002 register unsigned short x asm ("D0Re0") = 0;
1003 asm volatile (
1004 " GETW %0,[%2]\n"
1005 "1:\n"
1006 " GETW %0,[%2]\n"
1007 "2:\n"
1008 " .section .fixup,\"ax\"\n"
1009 "3: MOV D0FrT,%3\n"
1010 " SETD [%1],D0FrT\n"
1011 " MOVT D0FrT,#HI(2b)\n"
1012 " JUMP D0FrT,#LO(2b)\n"
1013 " .previous\n"
1014 " .section __ex_table,\"a\"\n"
1015 " .long 1b,3b\n"
1016 " .previous\n"
1017 : "=r" (x)
1018 : "r" (err), "r" (addr), "P" (-EFAULT)
1019 : "D0FrT");
1020 return x;
1021}
1022EXPORT_SYMBOL(__get_user_asm_w);
1023
1024unsigned int __get_user_asm_d(const void __user *addr, long *err)
1025{
1026 register unsigned int x asm ("D0Re0") = 0;
1027 asm volatile (
1028 " GETD %0,[%2]\n"
1029 "1:\n"
1030 " GETD %0,[%2]\n"
1031 "2:\n"
1032 " .section .fixup,\"ax\"\n"
1033 "3: MOV D0FrT,%3\n"
1034 " SETD [%1],D0FrT\n"
1035 " MOVT D0FrT,#HI(2b)\n"
1036 " JUMP D0FrT,#LO(2b)\n"
1037 " .previous\n"
1038 " .section __ex_table,\"a\"\n"
1039 " .long 1b,3b\n"
1040 " .previous\n"
1041 : "=r" (x)
1042 : "r" (err), "r" (addr), "P" (-EFAULT)
1043 : "D0FrT");
1044 return x;
1045}
1046EXPORT_SYMBOL(__get_user_asm_d);
1047
1048unsigned long long __get_user_asm_l(const void __user *addr, long *err)
1049{
1050 register unsigned long long x asm ("D0Re0") = 0;
1051 asm volatile (
1052 " GETL %0,%t0,[%2]\n"
1053 "1:\n"
1054 " GETL %0,%t0,[%2]\n"
1055 "2:\n"
1056 " .section .fixup,\"ax\"\n"
1057 "3: MOV D0FrT,%3\n"
1058 " SETD [%1],D0FrT\n"
1059 " MOVT D0FrT,#HI(2b)\n"
1060 " JUMP D0FrT,#LO(2b)\n"
1061 " .previous\n"
1062 " .section __ex_table,\"a\"\n"
1063 " .long 1b,3b\n"
1064 " .previous\n"
1065 : "=r" (x)
1066 : "r" (err), "r" (addr), "P" (-EFAULT)
1067 : "D0FrT");
1068 return x;
1069}
1070EXPORT_SYMBOL(__get_user_asm_l);
1071
1072long __put_user_asm_b(unsigned int x, void __user *addr)
1073{
1074 register unsigned int err asm ("D0Re0") = 0;
1075 asm volatile (
1076 " MOV %0,#0\n"
1077 " SETB [%2],%1\n"
1078 "1:\n"
1079 " SETB [%2],%1\n"
1080 "2:\n"
1081 ".section .fixup,\"ax\"\n"
1082 "3: MOV %0,%3\n"
1083 " MOVT D0FrT,#HI(2b)\n"
1084 " JUMP D0FrT,#LO(2b)\n"
1085 ".previous\n"
1086 ".section __ex_table,\"a\"\n"
1087 " .long 1b,3b\n"
1088 ".previous"
1089 : "=r"(err)
1090 : "d" (x), "a" (addr), "P"(-EFAULT)
1091 : "D0FrT");
1092 return err;
1093}
1094EXPORT_SYMBOL(__put_user_asm_b);
1095
1096long __put_user_asm_w(unsigned int x, void __user *addr)
1097{
1098 register unsigned int err asm ("D0Re0") = 0;
1099 asm volatile (
1100 " MOV %0,#0\n"
1101 " SETW [%2],%1\n"
1102 "1:\n"
1103 " SETW [%2],%1\n"
1104 "2:\n"
1105 ".section .fixup,\"ax\"\n"
1106 "3: MOV %0,%3\n"
1107 " MOVT D0FrT,#HI(2b)\n"
1108 " JUMP D0FrT,#LO(2b)\n"
1109 ".previous\n"
1110 ".section __ex_table,\"a\"\n"
1111 " .long 1b,3b\n"
1112 ".previous"
1113 : "=r"(err)
1114 : "d" (x), "a" (addr), "P"(-EFAULT)
1115 : "D0FrT");
1116 return err;
1117}
1118EXPORT_SYMBOL(__put_user_asm_w);
1119
1120long __put_user_asm_d(unsigned int x, void __user *addr)
1121{
1122 register unsigned int err asm ("D0Re0") = 0;
1123 asm volatile (
1124 " MOV %0,#0\n"
1125 " SETD [%2],%1\n"
1126 "1:\n"
1127 " SETD [%2],%1\n"
1128 "2:\n"
1129 ".section .fixup,\"ax\"\n"
1130 "3: MOV %0,%3\n"
1131 " MOVT D0FrT,#HI(2b)\n"
1132 " JUMP D0FrT,#LO(2b)\n"
1133 ".previous\n"
1134 ".section __ex_table,\"a\"\n"
1135 " .long 1b,3b\n"
1136 ".previous"
1137 : "=r"(err)
1138 : "d" (x), "a" (addr), "P"(-EFAULT)
1139 : "D0FrT");
1140 return err;
1141}
1142EXPORT_SYMBOL(__put_user_asm_d);
1143
1144long __put_user_asm_l(unsigned long long x, void __user *addr)
1145{
1146 register unsigned int err asm ("D0Re0") = 0;
1147 asm volatile (
1148 " MOV %0,#0\n"
1149 " SETL [%2],%1,%t1\n"
1150 "1:\n"
1151 " SETL [%2],%1,%t1\n"
1152 "2:\n"
1153 ".section .fixup,\"ax\"\n"
1154 "3: MOV %0,%3\n"
1155 " MOVT D0FrT,#HI(2b)\n"
1156 " JUMP D0FrT,#LO(2b)\n"
1157 ".previous\n"
1158 ".section __ex_table,\"a\"\n"
1159 " .long 1b,3b\n"
1160 ".previous"
1161 : "=r"(err)
1162 : "d" (x), "a" (addr), "P"(-EFAULT)
1163 : "D0FrT");
1164 return err;
1165}
1166EXPORT_SYMBOL(__put_user_asm_l);
1167
1168long strnlen_user(const char __user *src, long count)
1169{
1170 long res;
1171
1172 if (!access_ok(VERIFY_READ, src, 0))
1173 return 0;
1174
1175 asm volatile (" MOV D0Ar4, %1\n"
1176 " MOV D0Ar6, %2\n"
1177 "0:\n"
1178 " SUBS D0FrT, D0Ar6, #0\n"
1179 " SUB D0Ar6, D0Ar6, #1\n"
1180 " BLE 2f\n"
1181 " GETB D0FrT, [D0Ar4+#1++]\n"
1182 "1:\n"
1183 " TST D0FrT, #255\n"
1184 " BNE 0b\n"
1185 "2:\n"
1186 " SUB %0, %2, D0Ar6\n"
1187 "3:\n"
1188 " .section .fixup,\"ax\"\n"
1189 "4:\n"
1190 " MOV %0, #0\n"
1191 " MOVT D0FrT,#HI(3b)\n"
1192 " JUMP D0FrT,#LO(3b)\n"
1193 " .previous\n"
1194 " .section __ex_table,\"a\"\n"
1195 " .long 1b,4b\n"
1196 " .previous\n"
1197 : "=r" (res)
1198 : "r" (src), "r" (count)
1199 : "D0FrT", "D0Ar4", "D0Ar6", "cc");
1200
1201 return res;
1202}
1203EXPORT_SYMBOL(strnlen_user);
1204
1205long __strncpy_from_user(char *dst, const char __user *src, long count)
1206{
1207 long res;
1208
1209 if (count == 0)
1210 return 0;
1211
1212 /*
1213 * Currently, in 2.4.0-test9, most ports use a simple byte-copy loop.
1214 * So do we.
1215 *
1216 * This code is deduced from:
1217 *
1218 * char tmp2;
1219 * long tmp1, tmp3;
1220 * tmp1 = count;
1221 * while ((*dst++ = (tmp2 = *src++)) != 0
1222 * && --tmp1)
1223 * ;
1224 *
1225 * res = count - tmp1;
1226 *
1227 * with tweaks.
1228 */
1229
1230 asm volatile (" MOV %0,%3\n"
1231 "1:\n"
1232 " GETB D0FrT,[%2++]\n"
1233 "2:\n"
1234 " CMP D0FrT,#0\n"
1235 " SETB [%1++],D0FrT\n"
1236 " BEQ 3f\n"
1237 " SUBS %0,%0,#1\n"
1238 " BNZ 1b\n"
1239 "3:\n"
1240 " SUB %0,%3,%0\n"
1241 "4:\n"
1242 " .section .fixup,\"ax\"\n"
1243 "5:\n"
1244 " MOV %0,%7\n"
1245 " MOVT D0FrT,#HI(4b)\n"
1246 " JUMP D0FrT,#LO(4b)\n"
1247 " .previous\n"
1248 " .section __ex_table,\"a\"\n"
1249 " .long 2b,5b\n"
1250 " .previous"
1251 : "=r" (res), "=r" (dst), "=r" (src), "=r" (count)
1252 : "3" (count), "1" (dst), "2" (src), "P" (-EFAULT)
1253 : "D0FrT", "memory", "cc");
1254
1255 return res;
1256}
1257EXPORT_SYMBOL(__strncpy_from_user);
diff --git a/arch/metag/mm/Kconfig b/arch/metag/mm/Kconfig
deleted file mode 100644
index 9d4b2c67dcc1..000000000000
--- a/arch/metag/mm/Kconfig
+++ /dev/null
@@ -1,147 +0,0 @@
1# SPDX-License-Identifier: GPL-2.0
2menu "Memory management options"
3
4config PAGE_OFFSET
5 hex "Kernel page offset address"
6 default "0x40000000"
7 help
8 This option allows you to set the virtual address at which the
9 kernel will be mapped to.
10endmenu
11
12config KERNEL_4M_PAGES
13 bool "Map kernel with 4MB pages"
14 depends on METAG_META21_MMU
15 default y
16 help
17 Map the kernel with large pages to reduce TLB pressure.
18
19choice
20 prompt "User page size"
21 default PAGE_SIZE_4K
22
23config PAGE_SIZE_4K
24 bool "4kB"
25 help
26 This is the default page size used by all Meta cores.
27
28config PAGE_SIZE_8K
29 bool "8kB"
30 depends on METAG_META21_MMU
31 help
32 This enables 8kB pages as supported by Meta 2.x and later MMUs.
33
34config PAGE_SIZE_16K
35 bool "16kB"
36 depends on METAG_META21_MMU
37 help
38 This enables 16kB pages as supported by Meta 2.x and later MMUs.
39
40endchoice
41
42config NUMA
43 bool "Non Uniform Memory Access (NUMA) Support"
44 select ARCH_WANT_NUMA_VARIABLE_LOCALITY
45 help
46 Some Meta systems have MMU-mappable on-chip memories with
47 lower latencies than main memory. This enables support for
48 these blocks by binding them to nodes and allowing
49 memory policies to be used for prioritizing and controlling
50 allocation behaviour.
51
52config FORCE_MAX_ZONEORDER
53 int "Maximum zone order"
54 range 10 32
55 default "10"
56 help
57 The kernel memory allocator divides physically contiguous memory
58 blocks into "zones", where each zone is a power of two number of
59 pages. This option selects the largest power of two that the kernel
60 keeps in the memory allocator. If you need to allocate very large
61 blocks of physically contiguous memory, then you may need to
62 increase this value.
63
64 This config option is actually maximum order plus one. For example,
65 a value of 11 means that the largest free memory block is 2^10 pages.
66
67 The page size is not necessarily 4KB. Keep this in mind
68 when choosing a value for this option.
69
70config METAG_L2C
71 bool "Level 2 Cache Support"
72 depends on METAG_META21
73 help
74 Press y here to enable support for the Meta Level 2 (L2) cache. This
75 will enable the cache at start up if it hasn't already been enabled
76 by the bootloader.
77
78 If the bootloader enables the L2 you must press y here to ensure the
79 kernel takes the appropriate actions to keep the cache coherent.
80
81config NODES_SHIFT
82 int
83 default "1"
84 depends on NEED_MULTIPLE_NODES
85
86config ARCH_FLATMEM_ENABLE
87 def_bool y
88 depends on !NUMA
89
90config ARCH_SPARSEMEM_ENABLE
91 def_bool y
92 select SPARSEMEM_STATIC
93
94config ARCH_SPARSEMEM_DEFAULT
95 def_bool y
96
97config ARCH_SELECT_MEMORY_MODEL
98 def_bool y
99
100config SYS_SUPPORTS_HUGETLBFS
101 def_bool y
102 depends on METAG_META21_MMU
103
104choice
105 prompt "HugeTLB page size"
106 depends on METAG_META21_MMU && HUGETLB_PAGE
107 default HUGETLB_PAGE_SIZE_1M
108
109config HUGETLB_PAGE_SIZE_8K
110 bool "8kB"
111 depends on PAGE_SIZE_4K
112
113config HUGETLB_PAGE_SIZE_16K
114 bool "16kB"
115 depends on PAGE_SIZE_4K || PAGE_SIZE_8K
116
117config HUGETLB_PAGE_SIZE_32K
118 bool "32kB"
119
120config HUGETLB_PAGE_SIZE_64K
121 bool "64kB"
122
123config HUGETLB_PAGE_SIZE_128K
124 bool "128kB"
125
126config HUGETLB_PAGE_SIZE_256K
127 bool "256kB"
128
129config HUGETLB_PAGE_SIZE_512K
130 bool "512kB"
131
132config HUGETLB_PAGE_SIZE_1M
133 bool "1MB"
134
135config HUGETLB_PAGE_SIZE_2M
136 bool "2MB"
137
138config HUGETLB_PAGE_SIZE_4M
139 bool "4MB"
140
141endchoice
142
143config METAG_COREMEM
144 bool
145 default y if SUSPEND
146
147source "mm/Kconfig"
diff --git a/arch/metag/mm/Makefile b/arch/metag/mm/Makefile
deleted file mode 100644
index 0c7c91ba9fb9..000000000000
--- a/arch/metag/mm/Makefile
+++ /dev/null
@@ -1,20 +0,0 @@
1# SPDX-License-Identifier: GPL-2.0
2#
3# Makefile for the linux Meta-specific parts of the memory manager.
4#
5
6obj-y += cache.o
7obj-y += extable.o
8obj-y += fault.o
9obj-y += init.o
10obj-y += ioremap.o
11obj-y += maccess.o
12
13mmu-y := mmu-meta1.o
14mmu-$(CONFIG_METAG_META21_MMU) := mmu-meta2.o
15obj-y += $(mmu-y)
16
17obj-$(CONFIG_HIGHMEM) += highmem.o
18obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
19obj-$(CONFIG_METAG_L2C) += l2cache.o
20obj-$(CONFIG_NUMA) += numa.o
diff --git a/arch/metag/mm/cache.c b/arch/metag/mm/cache.c
deleted file mode 100644
index a62285284ab8..000000000000
--- a/arch/metag/mm/cache.c
+++ /dev/null
@@ -1,521 +0,0 @@
1/*
2 * arch/metag/mm/cache.c
3 *
4 * Copyright (C) 2001, 2002, 2005, 2007, 2012 Imagination Technologies.
5 *
6 * This program is free software; you can redistribute it and/or modify it under
7 * the terms of the GNU General Public License version 2 as published by the
8 * Free Software Foundation.
9 *
10 * Cache control code
11 */
12
13#include <linux/export.h>
14#include <linux/io.h>
15#include <asm/cacheflush.h>
16#include <asm/core_reg.h>
17#include <asm/global_lock.h>
18#include <asm/metag_isa.h>
19#include <asm/metag_mem.h>
20#include <asm/metag_regs.h>
21
22#define DEFAULT_CACHE_WAYS_LOG2 2
23
24/*
25 * Size of a set in the caches. Initialised for default 16K stride, adjusted
26 * according to values passed through TBI global heap segment via LDLK (on ATP)
27 * or config registers (on HTP/MTP)
28 */
29static int dcache_set_shift = METAG_TBI_CACHE_SIZE_BASE_LOG2
30 - DEFAULT_CACHE_WAYS_LOG2;
31static int icache_set_shift = METAG_TBI_CACHE_SIZE_BASE_LOG2
32 - DEFAULT_CACHE_WAYS_LOG2;
33/*
34 * The number of sets in the caches. Initialised for HTP/ATP, adjusted
35 * according to NOMMU setting in config registers
36 */
37static unsigned char dcache_sets_log2 = DEFAULT_CACHE_WAYS_LOG2;
38static unsigned char icache_sets_log2 = DEFAULT_CACHE_WAYS_LOG2;
39
40#ifndef CONFIG_METAG_META12
41/**
42 * metag_lnkget_probe() - Probe whether lnkget/lnkset go around the cache
43 */
44static volatile u32 lnkget_testdata[16] __initdata __aligned(64);
45
46#define LNKGET_CONSTANT 0xdeadbeef
47
48static void __init metag_lnkget_probe(void)
49{
50 int temp;
51 long flags;
52
53 /*
54 * It's conceivable the user has configured a globally coherent cache
55 * shared with non-Linux hardware threads, so use LOCK2 to prevent them
56 * from executing and causing cache eviction during the test.
57 */
58 __global_lock2(flags);
59
60 /* read a value to bring it into the cache */
61 (void)lnkget_testdata[0];
62 lnkget_testdata[0] = 0;
63
64 /* lnkget/lnkset it to modify it */
65 asm volatile(
66 "1: LNKGETD %0, [%1]\n"
67 " LNKSETD [%1], %2\n"
68 " DEFR %0, TXSTAT\n"
69 " ANDT %0, %0, #HI(0x3f000000)\n"
70 " CMPT %0, #HI(0x02000000)\n"
71 " BNZ 1b\n"
72 : "=&d" (temp)
73 : "da" (&lnkget_testdata[0]), "bd" (LNKGET_CONSTANT)
74 : "cc");
75
76 /* re-read it to see if the cached value changed */
77 temp = lnkget_testdata[0];
78
79 __global_unlock2(flags);
80
81 /* flush the cache line to fix any incoherency */
82 __builtin_dcache_flush((void *)&lnkget_testdata[0]);
83
84#if defined(CONFIG_METAG_LNKGET_AROUND_CACHE)
85 /* if the cache is right, LNKGET_AROUND_CACHE is unnecessary */
86 if (temp == LNKGET_CONSTANT)
87 pr_info("LNKGET/SET go through cache but CONFIG_METAG_LNKGET_AROUND_CACHE=y\n");
88#elif defined(CONFIG_METAG_ATOMICITY_LNKGET)
89 /*
90 * if the cache is wrong, LNKGET_AROUND_CACHE is really necessary
91 * because the kernel is configured to use LNKGET/SET for atomicity
92 */
93 WARN(temp != LNKGET_CONSTANT,
94 "LNKGET/SET go around cache but CONFIG_METAG_LNKGET_AROUND_CACHE=n\n"
95 "Expect kernel failure as it's used for atomicity primitives\n");
96#elif defined(CONFIG_SMP)
97 /*
98 * if the cache is wrong, LNKGET_AROUND_CACHE should be used or the
99 * gateway page won't flush and userland could break.
100 */
101 WARN(temp != LNKGET_CONSTANT,
102 "LNKGET/SET go around cache but CONFIG_METAG_LNKGET_AROUND_CACHE=n\n"
103 "Expect userland failure as it's used for user gateway page\n");
104#else
105 /*
106 * if the cache is wrong, LNKGET_AROUND_CACHE is set wrong, but it
107 * doesn't actually matter as it doesn't have any effect on !SMP &&
108 * !ATOMICITY_LNKGET.
109 */
110 if (temp != LNKGET_CONSTANT)
111 pr_warn("LNKGET/SET go around cache but CONFIG_METAG_LNKGET_AROUND_CACHE=n\n");
112#endif
113}
114#endif /* !CONFIG_METAG_META12 */
115
116/**
117 * metag_cache_probe() - Probe L1 cache configuration.
118 *
119 * Probe the L1 cache configuration to aid the L1 physical cache flushing
120 * functions.
121 */
122void __init metag_cache_probe(void)
123{
124#ifndef CONFIG_METAG_META12
125 int coreid = metag_in32(METAC_CORE_ID);
126 int config = metag_in32(METAC_CORE_CONFIG2);
127 int cfgcache = coreid & METAC_COREID_CFGCACHE_BITS;
128
129 if (cfgcache == METAC_COREID_CFGCACHE_TYPE0 ||
130 cfgcache == METAC_COREID_CFGCACHE_PRIVNOMMU) {
131 icache_sets_log2 = 1;
132 dcache_sets_log2 = 1;
133 }
134
135 /* For normal size caches, the smallest size is 4Kb.
136 For small caches, the smallest size is 64b */
137 icache_set_shift = (config & METAC_CORECFG2_ICSMALL_BIT)
138 ? 6 : 12;
139 icache_set_shift += (config & METAC_CORE_C2ICSZ_BITS)
140 >> METAC_CORE_C2ICSZ_S;
141 icache_set_shift -= icache_sets_log2;
142
143 dcache_set_shift = (config & METAC_CORECFG2_DCSMALL_BIT)
144 ? 6 : 12;
145 dcache_set_shift += (config & METAC_CORECFG2_DCSZ_BITS)
146 >> METAC_CORECFG2_DCSZ_S;
147 dcache_set_shift -= dcache_sets_log2;
148
149 metag_lnkget_probe();
150#else
151 /* Extract cache sizes from global heap segment */
152 unsigned long val, u;
153 int width, shift, addend;
154 PTBISEG seg;
155
156 seg = __TBIFindSeg(NULL, TBID_SEG(TBID_THREAD_GLOBAL,
157 TBID_SEGSCOPE_GLOBAL,
158 TBID_SEGTYPE_HEAP));
159 if (seg != NULL) {
160 val = seg->Data[1];
161
162 /* Work out width of I-cache size bit-field */
163 u = ((unsigned long) METAG_TBI_ICACHE_SIZE_BITS)
164 >> METAG_TBI_ICACHE_SIZE_S;
165 width = 0;
166 while (u & 1) {
167 width++;
168 u >>= 1;
169 }
170 /* Extract sign-extended size addend value */
171 shift = 32 - (METAG_TBI_ICACHE_SIZE_S + width);
172 addend = (long) ((val & METAG_TBI_ICACHE_SIZE_BITS)
173 << shift)
174 >> (shift + METAG_TBI_ICACHE_SIZE_S);
175 /* Now calculate I-cache set size */
176 icache_set_shift = (METAG_TBI_CACHE_SIZE_BASE_LOG2
177 - DEFAULT_CACHE_WAYS_LOG2)
178 + addend;
179
180 /* Similarly for D-cache */
181 u = ((unsigned long) METAG_TBI_DCACHE_SIZE_BITS)
182 >> METAG_TBI_DCACHE_SIZE_S;
183 width = 0;
184 while (u & 1) {
185 width++;
186 u >>= 1;
187 }
188 shift = 32 - (METAG_TBI_DCACHE_SIZE_S + width);
189 addend = (long) ((val & METAG_TBI_DCACHE_SIZE_BITS)
190 << shift)
191 >> (shift + METAG_TBI_DCACHE_SIZE_S);
192 dcache_set_shift = (METAG_TBI_CACHE_SIZE_BASE_LOG2
193 - DEFAULT_CACHE_WAYS_LOG2)
194 + addend;
195 }
196#endif
197}
198
199static void metag_phys_data_cache_flush(const void *start)
200{
201 unsigned long flush0, flush1, flush2, flush3;
202 int loops, step;
203 int thread;
204 int part, offset;
205 int set_shift;
206
207 /* Use a sequence of writes to flush the cache region requested */
208 thread = (__core_reg_get(TXENABLE) & TXENABLE_THREAD_BITS)
209 >> TXENABLE_THREAD_S;
210
211 /* Cache is broken into sets which lie in contiguous RAMs */
212 set_shift = dcache_set_shift;
213
214 /* Move to the base of the physical cache flush region */
215 flush0 = LINSYSCFLUSH_DCACHE_LINE;
216 step = 64;
217
218 /* Get partition data for this thread */
219 part = metag_in32(SYSC_DCPART0 +
220 (SYSC_xCPARTn_STRIDE * thread));
221
222 if ((int)start < 0)
223 /* Access Global vs Local partition */
224 part >>= SYSC_xCPARTG_AND_S
225 - SYSC_xCPARTL_AND_S;
226
227 /* Extract offset and move SetOff */
228 offset = (part & SYSC_xCPARTL_OR_BITS)
229 >> SYSC_xCPARTL_OR_S;
230 flush0 += (offset << (set_shift - 4));
231
232 /* Shrink size */
233 part = (part & SYSC_xCPARTL_AND_BITS)
234 >> SYSC_xCPARTL_AND_S;
235 loops = ((part + 1) << (set_shift - 4));
236
237 /* Reduce loops by step of cache line size */
238 loops /= step;
239
240 flush1 = flush0 + (1 << set_shift);
241 flush2 = flush0 + (2 << set_shift);
242 flush3 = flush0 + (3 << set_shift);
243
244 if (dcache_sets_log2 == 1) {
245 flush2 = flush1;
246 flush3 = flush1 + step;
247 flush1 = flush0 + step;
248 step <<= 1;
249 loops >>= 1;
250 }
251
252 /* Clear loops ways in cache */
253 while (loops-- != 0) {
254 /* Clear the ways. */
255#if 0
256 /*
257 * GCC doesn't generate very good code for this so we
258 * provide inline assembly instead.
259 */
260 metag_out8(0, flush0);
261 metag_out8(0, flush1);
262 metag_out8(0, flush2);
263 metag_out8(0, flush3);
264
265 flush0 += step;
266 flush1 += step;
267 flush2 += step;
268 flush3 += step;
269#else
270 asm volatile (
271 "SETB\t[%0+%4++],%5\n"
272 "SETB\t[%1+%4++],%5\n"
273 "SETB\t[%2+%4++],%5\n"
274 "SETB\t[%3+%4++],%5\n"
275 : "+e" (flush0),
276 "+e" (flush1),
277 "+e" (flush2),
278 "+e" (flush3)
279 : "e" (step), "a" (0));
280#endif
281 }
282}
283
284void metag_data_cache_flush_all(const void *start)
285{
286 if ((metag_in32(SYSC_CACHE_MMU_CONFIG) & SYSC_CMMUCFG_DC_ON_BIT) == 0)
287 /* No need to flush the data cache it's not actually enabled */
288 return;
289
290 metag_phys_data_cache_flush(start);
291}
292
293void metag_data_cache_flush(const void *start, int bytes)
294{
295 unsigned long flush0;
296 int loops, step;
297
298 if ((metag_in32(SYSC_CACHE_MMU_CONFIG) & SYSC_CMMUCFG_DC_ON_BIT) == 0)
299 /* No need to flush the data cache it's not actually enabled */
300 return;
301
302 if (bytes >= 4096) {
303 metag_phys_data_cache_flush(start);
304 return;
305 }
306
307 /* Use linear cache flush mechanism on META IP */
308 flush0 = (int)start;
309 loops = ((int)start & (DCACHE_LINE_BYTES - 1)) + bytes +
310 (DCACHE_LINE_BYTES - 1);
311 loops >>= DCACHE_LINE_S;
312
313#define PRIM_FLUSH(addr, offset) do { \
314 int __addr = ((int) (addr)) + ((offset) * 64); \
315 __builtin_dcache_flush((void *)(__addr)); \
316 } while (0)
317
318#define LOOP_INC (4*64)
319
320 do {
321 /* By default stop */
322 step = 0;
323
324 switch (loops) {
325 /* Drop Thru Cases! */
326 default:
327 PRIM_FLUSH(flush0, 3);
328 loops -= 4;
329 step = 1;
330 case 3:
331 PRIM_FLUSH(flush0, 2);
332 case 2:
333 PRIM_FLUSH(flush0, 1);
334 case 1:
335 PRIM_FLUSH(flush0, 0);
336 flush0 += LOOP_INC;
337 case 0:
338 break;
339 }
340 } while (step);
341}
342EXPORT_SYMBOL(metag_data_cache_flush);
343
344static void metag_phys_code_cache_flush(const void *start, int bytes)
345{
346 unsigned long flush0, flush1, flush2, flush3, end_set;
347 int loops, step;
348 int thread;
349 int set_shift, set_size;
350 int part, offset;
351
352 /* Use a sequence of writes to flush the cache region requested */
353 thread = (__core_reg_get(TXENABLE) & TXENABLE_THREAD_BITS)
354 >> TXENABLE_THREAD_S;
355 set_shift = icache_set_shift;
356
357 /* Move to the base of the physical cache flush region */
358 flush0 = LINSYSCFLUSH_ICACHE_LINE;
359 step = 64;
360
361 /* Get partition code for this thread */
362 part = metag_in32(SYSC_ICPART0 +
363 (SYSC_xCPARTn_STRIDE * thread));
364
365 if ((int)start < 0)
366 /* Access Global vs Local partition */
367 part >>= SYSC_xCPARTG_AND_S-SYSC_xCPARTL_AND_S;
368
369 /* Extract offset and move SetOff */
370 offset = (part & SYSC_xCPARTL_OR_BITS)
371 >> SYSC_xCPARTL_OR_S;
372 flush0 += (offset << (set_shift - 4));
373
374 /* Shrink size */
375 part = (part & SYSC_xCPARTL_AND_BITS)
376 >> SYSC_xCPARTL_AND_S;
377 loops = ((part + 1) << (set_shift - 4));
378
379 /* Where does the Set end? */
380 end_set = flush0 + loops;
381 set_size = loops;
382
383#ifdef CONFIG_METAG_META12
384 if ((bytes < 4096) && (bytes < loops)) {
385 /* Unreachable on HTP/MTP */
386 /* Only target the sets that could be relavent */
387 flush0 += (loops - step) & ((int) start);
388 loops = (((int) start) & (step-1)) + bytes + step - 1;
389 }
390#endif
391
392 /* Reduce loops by step of cache line size */
393 loops /= step;
394
395 flush1 = flush0 + (1<<set_shift);
396 flush2 = flush0 + (2<<set_shift);
397 flush3 = flush0 + (3<<set_shift);
398
399 if (icache_sets_log2 == 1) {
400 flush2 = flush1;
401 flush3 = flush1 + step;
402 flush1 = flush0 + step;
403#if 0
404 /* flush0 will stop one line early in this case
405 * (flush1 will do the final line).
406 * However we don't correct end_set here at the moment
407 * because it will never wrap on HTP/MTP
408 */
409 end_set -= step;
410#endif
411 step <<= 1;
412 loops >>= 1;
413 }
414
415 /* Clear loops ways in cache */
416 while (loops-- != 0) {
417#if 0
418 /*
419 * GCC doesn't generate very good code for this so we
420 * provide inline assembly instead.
421 */
422 /* Clear the ways */
423 metag_out8(0, flush0);
424 metag_out8(0, flush1);
425 metag_out8(0, flush2);
426 metag_out8(0, flush3);
427
428 flush0 += step;
429 flush1 += step;
430 flush2 += step;
431 flush3 += step;
432#else
433 asm volatile (
434 "SETB\t[%0+%4++],%5\n"
435 "SETB\t[%1+%4++],%5\n"
436 "SETB\t[%2+%4++],%5\n"
437 "SETB\t[%3+%4++],%5\n"
438 : "+e" (flush0),
439 "+e" (flush1),
440 "+e" (flush2),
441 "+e" (flush3)
442 : "e" (step), "a" (0));
443#endif
444
445 if (flush0 == end_set) {
446 /* Wrap within Set 0 */
447 flush0 -= set_size;
448 flush1 -= set_size;
449 flush2 -= set_size;
450 flush3 -= set_size;
451 }
452 }
453}
454
455void metag_code_cache_flush_all(const void *start)
456{
457 if ((metag_in32(SYSC_CACHE_MMU_CONFIG) & SYSC_CMMUCFG_IC_ON_BIT) == 0)
458 /* No need to flush the code cache it's not actually enabled */
459 return;
460
461 metag_phys_code_cache_flush(start, 4096);
462}
463EXPORT_SYMBOL(metag_code_cache_flush_all);
464
465void metag_code_cache_flush(const void *start, int bytes)
466{
467#ifndef CONFIG_METAG_META12
468 void *flush;
469 int loops, step;
470#endif /* !CONFIG_METAG_META12 */
471
472 if ((metag_in32(SYSC_CACHE_MMU_CONFIG) & SYSC_CMMUCFG_IC_ON_BIT) == 0)
473 /* No need to flush the code cache it's not actually enabled */
474 return;
475
476#ifdef CONFIG_METAG_META12
477 /* CACHEWD isn't available on Meta1, so always do full cache flush */
478 metag_phys_code_cache_flush(start, bytes);
479
480#else /* CONFIG_METAG_META12 */
481 /* If large size do full physical cache flush */
482 if (bytes >= 4096) {
483 metag_phys_code_cache_flush(start, bytes);
484 return;
485 }
486
487 /* Use linear cache flush mechanism on META IP */
488 flush = (void *)((int)start & ~(ICACHE_LINE_BYTES-1));
489 loops = ((int)start & (ICACHE_LINE_BYTES-1)) + bytes +
490 (ICACHE_LINE_BYTES-1);
491 loops >>= ICACHE_LINE_S;
492
493#define PRIM_IFLUSH(addr, offset) \
494 __builtin_meta2_cachewd(((addr) + ((offset) * 64)), CACHEW_ICACHE_BIT)
495
496#define LOOP_INC (4*64)
497
498 do {
499 /* By default stop */
500 step = 0;
501
502 switch (loops) {
503 /* Drop Thru Cases! */
504 default:
505 PRIM_IFLUSH(flush, 3);
506 loops -= 4;
507 step = 1;
508 case 3:
509 PRIM_IFLUSH(flush, 2);
510 case 2:
511 PRIM_IFLUSH(flush, 1);
512 case 1:
513 PRIM_IFLUSH(flush, 0);
514 flush += LOOP_INC;
515 case 0:
516 break;
517 }
518 } while (step);
519#endif /* !CONFIG_METAG_META12 */
520}
521EXPORT_SYMBOL(metag_code_cache_flush);
diff --git a/arch/metag/mm/extable.c b/arch/metag/mm/extable.c
deleted file mode 100644
index 9b92d3ad7f9c..000000000000
--- a/arch/metag/mm/extable.c
+++ /dev/null
@@ -1,15 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/extable.h>
3#include <linux/uaccess.h>
4
5int fixup_exception(struct pt_regs *regs)
6{
7 const struct exception_table_entry *fixup;
8 unsigned long pc = instruction_pointer(regs);
9
10 fixup = search_exception_tables(pc);
11 if (fixup)
12 regs->ctx.CurrPC = fixup->fixup;
13
14 return fixup != NULL;
15}
diff --git a/arch/metag/mm/fault.c b/arch/metag/mm/fault.c
deleted file mode 100644
index de54fe686080..000000000000
--- a/arch/metag/mm/fault.c
+++ /dev/null
@@ -1,247 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Meta page fault handling.
4 *
5 * Copyright (C) 2005-2012 Imagination Technologies Ltd.
6 */
7
8#include <linux/mman.h>
9#include <linux/mm.h>
10#include <linux/kernel.h>
11#include <linux/ptrace.h>
12#include <linux/sched/debug.h>
13#include <linux/interrupt.h>
14#include <linux/uaccess.h>
15
16#include <asm/tlbflush.h>
17#include <asm/mmu.h>
18#include <asm/traps.h>
19
20/* Clear any pending catch buffer state. */
21static void clear_cbuf_entry(struct pt_regs *regs, unsigned long addr,
22 unsigned int trapno)
23{
24 PTBICTXEXTCB0 cbuf = regs->extcb0;
25
26 switch (trapno) {
27 /* Instruction fetch faults leave no catch buffer state. */
28 case TBIXXF_SIGNUM_IGF:
29 case TBIXXF_SIGNUM_IPF:
30 return;
31 default:
32 if (cbuf[0].CBAddr == addr) {
33 cbuf[0].CBAddr = 0;
34 cbuf[0].CBFlags &= ~TXCATCH0_FAULT_BITS;
35
36 /* And, as this is the ONLY catch entry, we
37 * need to clear the cbuf bit from the context!
38 */
39 regs->ctx.SaveMask &= ~(TBICTX_CBUF_BIT |
40 TBICTX_XCBF_BIT);
41
42 return;
43 }
44 pr_err("Failed to clear cbuf entry!\n");
45 }
46}
47
48int show_unhandled_signals = 1;
49
50int do_page_fault(struct pt_regs *regs, unsigned long address,
51 unsigned int write_access, unsigned int trapno)
52{
53 struct task_struct *tsk;
54 struct mm_struct *mm;
55 struct vm_area_struct *vma, *prev_vma;
56 siginfo_t info;
57 int fault;
58 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
59
60 tsk = current;
61
62 if ((address >= VMALLOC_START) && (address < VMALLOC_END)) {
63 /*
64 * Synchronize this task's top level page-table
65 * with the 'reference' page table.
66 *
67 * Do _not_ use "tsk" here. We might be inside
68 * an interrupt in the middle of a task switch..
69 */
70 int offset = pgd_index(address);
71 pgd_t *pgd, *pgd_k;
72 pud_t *pud, *pud_k;
73 pmd_t *pmd, *pmd_k;
74 pte_t *pte_k;
75
76 pgd = ((pgd_t *)mmu_get_base()) + offset;
77 pgd_k = swapper_pg_dir + offset;
78
79 /* This will never happen with the folded page table. */
80 if (!pgd_present(*pgd)) {
81 if (!pgd_present(*pgd_k))
82 goto bad_area_nosemaphore;
83 set_pgd(pgd, *pgd_k);
84 return 0;
85 }
86
87 pud = pud_offset(pgd, address);
88 pud_k = pud_offset(pgd_k, address);
89 if (!pud_present(*pud_k))
90 goto bad_area_nosemaphore;
91 set_pud(pud, *pud_k);
92
93 pmd = pmd_offset(pud, address);
94 pmd_k = pmd_offset(pud_k, address);
95 if (!pmd_present(*pmd_k))
96 goto bad_area_nosemaphore;
97 set_pmd(pmd, *pmd_k);
98
99 pte_k = pte_offset_kernel(pmd_k, address);
100 if (!pte_present(*pte_k))
101 goto bad_area_nosemaphore;
102
103 /* May only be needed on Chorus2 */
104 flush_tlb_all();
105 return 0;
106 }
107
108 mm = tsk->mm;
109
110 if (faulthandler_disabled() || !mm)
111 goto no_context;
112
113 if (user_mode(regs))
114 flags |= FAULT_FLAG_USER;
115retry:
116 down_read(&mm->mmap_sem);
117
118 vma = find_vma_prev(mm, address, &prev_vma);
119
120 if (!vma || address < vma->vm_start)
121 goto check_expansion;
122
123good_area:
124 if (write_access) {
125 if (!(vma->vm_flags & VM_WRITE))
126 goto bad_area;
127 flags |= FAULT_FLAG_WRITE;
128 } else {
129 if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
130 goto bad_area;
131 }
132
133 /*
134 * If for any reason at all we couldn't handle the fault,
135 * make sure we exit gracefully rather than endlessly redo
136 * the fault.
137 */
138 fault = handle_mm_fault(vma, address, flags);
139
140 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
141 return 0;
142
143 if (unlikely(fault & VM_FAULT_ERROR)) {
144 if (fault & VM_FAULT_OOM)
145 goto out_of_memory;
146 else if (fault & VM_FAULT_SIGSEGV)
147 goto bad_area;
148 else if (fault & VM_FAULT_SIGBUS)
149 goto do_sigbus;
150 BUG();
151 }
152 if (flags & FAULT_FLAG_ALLOW_RETRY) {
153 if (fault & VM_FAULT_MAJOR)
154 tsk->maj_flt++;
155 else
156 tsk->min_flt++;
157 if (fault & VM_FAULT_RETRY) {
158 flags &= ~FAULT_FLAG_ALLOW_RETRY;
159 flags |= FAULT_FLAG_TRIED;
160
161 /*
162 * No need to up_read(&mm->mmap_sem) as we would
163 * have already released it in __lock_page_or_retry
164 * in mm/filemap.c.
165 */
166
167 goto retry;
168 }
169 }
170
171 up_read(&mm->mmap_sem);
172 return 0;
173
174check_expansion:
175 vma = prev_vma;
176 if (vma && (expand_stack(vma, address) == 0))
177 goto good_area;
178
179bad_area:
180 up_read(&mm->mmap_sem);
181
182bad_area_nosemaphore:
183 if (user_mode(regs)) {
184 info.si_signo = SIGSEGV;
185 info.si_errno = 0;
186 info.si_code = SEGV_MAPERR;
187 info.si_addr = (__force void __user *)address;
188 info.si_trapno = trapno;
189
190 if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
191 printk_ratelimit()) {
192 printk("%s%s[%d]: segfault at %lx pc %08x sp %08x write %d trap %#x (%s)",
193 task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
194 tsk->comm, task_pid_nr(tsk), address,
195 regs->ctx.CurrPC, regs->ctx.AX[0].U0,
196 write_access, trapno, trap_name(trapno));
197 print_vma_addr(" in ", regs->ctx.CurrPC);
198 print_vma_addr(" rtp in ", regs->ctx.DX[4].U1);
199 printk("\n");
200 show_regs(regs);
201 }
202 force_sig_info(SIGSEGV, &info, tsk);
203 return 1;
204 }
205 goto no_context;
206
207do_sigbus:
208 up_read(&mm->mmap_sem);
209
210 /*
211 * Send a sigbus, regardless of whether we were in kernel
212 * or user mode.
213 */
214 info.si_signo = SIGBUS;
215 info.si_errno = 0;
216 info.si_code = BUS_ADRERR;
217 info.si_addr = (__force void __user *)address;
218 info.si_trapno = trapno;
219 force_sig_info(SIGBUS, &info, tsk);
220
221 /* Kernel mode? Handle exceptions or die */
222 if (!user_mode(regs))
223 goto no_context;
224
225 return 1;
226
227 /*
228 * We ran out of memory, or some other thing happened to us that made
229 * us unable to handle the page fault gracefully.
230 */
231out_of_memory:
232 up_read(&mm->mmap_sem);
233 if (user_mode(regs)) {
234 pagefault_out_of_memory();
235 return 1;
236 }
237
238no_context:
239 /* Are we prepared to handle this kernel fault? */
240 if (fixup_exception(regs)) {
241 clear_cbuf_entry(regs, address, trapno);
242 return 1;
243 }
244
245 die("Oops", regs, (write_access << 15) | trapno, address);
246 do_exit(SIGKILL);
247}
diff --git a/arch/metag/mm/highmem.c b/arch/metag/mm/highmem.c
deleted file mode 100644
index 83527fc7c8a7..000000000000
--- a/arch/metag/mm/highmem.c
+++ /dev/null
@@ -1,122 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/export.h>
3#include <linux/highmem.h>
4#include <linux/sched.h>
5#include <linux/smp.h>
6#include <linux/interrupt.h>
7#include <asm/fixmap.h>
8#include <asm/tlbflush.h>
9
10static pte_t *kmap_pte;
11
12unsigned long highstart_pfn, highend_pfn;
13
14void *kmap(struct page *page)
15{
16 might_sleep();
17 if (!PageHighMem(page))
18 return page_address(page);
19 return kmap_high(page);
20}
21EXPORT_SYMBOL(kmap);
22
23void kunmap(struct page *page)
24{
25 BUG_ON(in_interrupt());
26 if (!PageHighMem(page))
27 return;
28 kunmap_high(page);
29}
30EXPORT_SYMBOL(kunmap);
31
32/*
33 * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
34 * no global lock is needed and because the kmap code must perform a global TLB
35 * invalidation when the kmap pool wraps.
36 *
37 * However when holding an atomic kmap is is not legal to sleep, so atomic
38 * kmaps are appropriate for short, tight code paths only.
39 */
40
41void *kmap_atomic(struct page *page)
42{
43 enum fixed_addresses idx;
44 unsigned long vaddr;
45 int type;
46
47 preempt_disable();
48 pagefault_disable();
49 if (!PageHighMem(page))
50 return page_address(page);
51
52 type = kmap_atomic_idx_push();
53 idx = type + KM_TYPE_NR * smp_processor_id();
54 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
55#ifdef CONFIG_DEBUG_HIGHMEM
56 BUG_ON(!pte_none(*(kmap_pte - idx)));
57#endif
58 set_pte(kmap_pte - idx, mk_pte(page, PAGE_KERNEL));
59
60 return (void *)vaddr;
61}
62EXPORT_SYMBOL(kmap_atomic);
63
64void __kunmap_atomic(void *kvaddr)
65{
66 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
67 int idx, type;
68
69 if (kvaddr >= (void *)FIXADDR_START) {
70 type = kmap_atomic_idx();
71 idx = type + KM_TYPE_NR * smp_processor_id();
72
73 /*
74 * Force other mappings to Oops if they'll try to access this
75 * pte without first remap it. Keeping stale mappings around
76 * is a bad idea also, in case the page changes cacheability
77 * attributes or becomes a protected page in a hypervisor.
78 */
79 pte_clear(&init_mm, vaddr, kmap_pte-idx);
80 flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE);
81
82 kmap_atomic_idx_pop();
83 }
84
85 pagefault_enable();
86 preempt_enable();
87}
88EXPORT_SYMBOL(__kunmap_atomic);
89
90/*
91 * This is the same as kmap_atomic() but can map memory that doesn't
92 * have a struct page associated with it.
93 */
94void *kmap_atomic_pfn(unsigned long pfn)
95{
96 enum fixed_addresses idx;
97 unsigned long vaddr;
98 int type;
99
100 preempt_disable();
101 pagefault_disable();
102
103 type = kmap_atomic_idx_push();
104 idx = type + KM_TYPE_NR * smp_processor_id();
105 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
106#ifdef CONFIG_DEBUG_HIGHMEM
107 BUG_ON(!pte_none(*(kmap_pte - idx)));
108#endif
109 set_pte(kmap_pte - idx, pfn_pte(pfn, PAGE_KERNEL));
110 flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE);
111
112 return (void *)vaddr;
113}
114
115void __init kmap_init(void)
116{
117 unsigned long kmap_vstart;
118
119 /* cache the first kmap pte */
120 kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
121 kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
122}
diff --git a/arch/metag/mm/hugetlbpage.c b/arch/metag/mm/hugetlbpage.c
deleted file mode 100644
index 012ee4c80dc7..000000000000
--- a/arch/metag/mm/hugetlbpage.c
+++ /dev/null
@@ -1,251 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * arch/metag/mm/hugetlbpage.c
4 *
5 * METAG HugeTLB page support.
6 *
7 * Cloned from SuperH
8 *
9 * Cloned from sparc64 by Paul Mundt.
10 *
11 * Copyright (C) 2002, 2003 David S. Miller (davem@redhat.com)
12 */
13
14#include <linux/init.h>
15#include <linux/fs.h>
16#include <linux/mm.h>
17#include <linux/hugetlb.h>
18#include <linux/pagemap.h>
19#include <linux/sysctl.h>
20
21#include <asm/mman.h>
22#include <asm/pgalloc.h>
23#include <asm/tlb.h>
24#include <asm/tlbflush.h>
25#include <asm/cacheflush.h>
26
27/*
28 * If the arch doesn't supply something else, assume that hugepage
29 * size aligned regions are ok without further preparation.
30 */
31int prepare_hugepage_range(struct file *file, unsigned long addr,
32 unsigned long len)
33{
34 struct mm_struct *mm = current->mm;
35 struct hstate *h = hstate_file(file);
36 struct vm_area_struct *vma;
37
38 if (len & ~huge_page_mask(h))
39 return -EINVAL;
40 if (addr & ~huge_page_mask(h))
41 return -EINVAL;
42 if (TASK_SIZE - len < addr)
43 return -EINVAL;
44
45 vma = find_vma(mm, ALIGN_HUGEPT(addr));
46 if (vma && !(vma->vm_flags & MAP_HUGETLB))
47 return -EINVAL;
48
49 vma = find_vma(mm, addr);
50 if (vma) {
51 if (addr + len > vma->vm_start)
52 return -EINVAL;
53 if (!(vma->vm_flags & MAP_HUGETLB) &&
54 (ALIGN_HUGEPT(addr + len) > vma->vm_start))
55 return -EINVAL;
56 }
57 return 0;
58}
59
60pte_t *huge_pte_alloc(struct mm_struct *mm,
61 unsigned long addr, unsigned long sz)
62{
63 pgd_t *pgd;
64 pud_t *pud;
65 pmd_t *pmd;
66 pte_t *pte;
67
68 pgd = pgd_offset(mm, addr);
69 pud = pud_offset(pgd, addr);
70 pmd = pmd_offset(pud, addr);
71 pte = pte_alloc_map(mm, pmd, addr);
72 pgd->pgd &= ~_PAGE_SZ_MASK;
73 pgd->pgd |= _PAGE_SZHUGE;
74
75 return pte;
76}
77
78pte_t *huge_pte_offset(struct mm_struct *mm,
79 unsigned long addr, unsigned long sz)
80{
81 pgd_t *pgd;
82 pud_t *pud;
83 pmd_t *pmd;
84 pte_t *pte = NULL;
85
86 pgd = pgd_offset(mm, addr);
87 pud = pud_offset(pgd, addr);
88 pmd = pmd_offset(pud, addr);
89 pte = pte_offset_kernel(pmd, addr);
90
91 return pte;
92}
93
94int pmd_huge(pmd_t pmd)
95{
96 return pmd_page_shift(pmd) > PAGE_SHIFT;
97}
98
99int pud_huge(pud_t pud)
100{
101 return 0;
102}
103
104struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
105 pmd_t *pmd, int write)
106{
107 return NULL;
108}
109
110#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
111
112/*
113 * Look for an unmapped area starting after another hugetlb vma.
114 * There are guaranteed to be no huge pte's spare if all the huge pages are
115 * full size (4MB), so in that case compile out this search.
116 */
117#if HPAGE_SHIFT == HUGEPT_SHIFT
118static inline unsigned long
119hugetlb_get_unmapped_area_existing(unsigned long len)
120{
121 return 0;
122}
123#else
124static unsigned long
125hugetlb_get_unmapped_area_existing(unsigned long len)
126{
127 struct mm_struct *mm = current->mm;
128 struct vm_area_struct *vma;
129 unsigned long start_addr, addr;
130 int after_huge;
131
132 if (mm->context.part_huge) {
133 start_addr = mm->context.part_huge;
134 after_huge = 1;
135 } else {
136 start_addr = TASK_UNMAPPED_BASE;
137 after_huge = 0;
138 }
139new_search:
140 addr = start_addr;
141
142 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
143 if ((!vma && !after_huge) || TASK_SIZE - len < addr) {
144 /*
145 * Start a new search - just in case we missed
146 * some holes.
147 */
148 if (start_addr != TASK_UNMAPPED_BASE) {
149 start_addr = TASK_UNMAPPED_BASE;
150 goto new_search;
151 }
152 return 0;
153 }
154 /* skip ahead if we've aligned right over some vmas */
155 if (vma && vma->vm_end <= addr)
156 continue;
157 /* space before the next vma? */
158 if (after_huge && (!vma || ALIGN_HUGEPT(addr + len)
159 <= vma->vm_start)) {
160 unsigned long end = addr + len;
161 if (end & HUGEPT_MASK)
162 mm->context.part_huge = end;
163 else if (addr == mm->context.part_huge)
164 mm->context.part_huge = 0;
165 return addr;
166 }
167 if (vma->vm_flags & MAP_HUGETLB) {
168 /* space after a huge vma in 2nd level page table? */
169 if (vma->vm_end & HUGEPT_MASK) {
170 after_huge = 1;
171 /* no need to align to the next PT block */
172 addr = vma->vm_end;
173 continue;
174 }
175 }
176 after_huge = 0;
177 addr = ALIGN_HUGEPT(vma->vm_end);
178 }
179}
180#endif
181
182/* Do a full search to find an area without any nearby normal pages. */
183static unsigned long
184hugetlb_get_unmapped_area_new_pmd(unsigned long len)
185{
186 struct vm_unmapped_area_info info;
187
188 info.flags = 0;
189 info.length = len;
190 info.low_limit = TASK_UNMAPPED_BASE;
191 info.high_limit = TASK_SIZE;
192 info.align_mask = PAGE_MASK & HUGEPT_MASK;
193 info.align_offset = 0;
194 return vm_unmapped_area(&info);
195}
196
197unsigned long
198hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
199 unsigned long len, unsigned long pgoff, unsigned long flags)
200{
201 struct hstate *h = hstate_file(file);
202
203 if (len & ~huge_page_mask(h))
204 return -EINVAL;
205 if (len > TASK_SIZE)
206 return -ENOMEM;
207
208 if (flags & MAP_FIXED) {
209 if (prepare_hugepage_range(file, addr, len))
210 return -EINVAL;
211 return addr;
212 }
213
214 if (addr) {
215 addr = ALIGN(addr, huge_page_size(h));
216 if (!prepare_hugepage_range(file, addr, len))
217 return addr;
218 }
219
220 /*
221 * Look for an existing hugetlb vma with space after it (this is to to
222 * minimise fragmentation caused by huge pages.
223 */
224 addr = hugetlb_get_unmapped_area_existing(len);
225 if (addr)
226 return addr;
227
228 /*
229 * Find an unmapped naturally aligned set of 4MB blocks that we can use
230 * for huge pages.
231 */
232 return hugetlb_get_unmapped_area_new_pmd(len);
233}
234
235#endif /*HAVE_ARCH_HUGETLB_UNMAPPED_AREA*/
236
237/* necessary for boot time 4MB huge page allocation */
238static __init int setup_hugepagesz(char *opt)
239{
240 unsigned long ps = memparse(opt, &opt);
241 if (ps == (1 << HPAGE_SHIFT)) {
242 hugetlb_add_hstate(HPAGE_SHIFT - PAGE_SHIFT);
243 } else {
244 hugetlb_bad_size();
245 pr_err("hugepagesz: Unsupported page size %lu M\n",
246 ps >> 20);
247 return 0;
248 }
249 return 1;
250}
251__setup("hugepagesz=", setup_hugepagesz);
diff --git a/arch/metag/mm/init.c b/arch/metag/mm/init.c
deleted file mode 100644
index 0e2ca9058998..000000000000
--- a/arch/metag/mm/init.c
+++ /dev/null
@@ -1,408 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2005,2006,2007,2008,2009,2010 Imagination Technologies
4 *
5 */
6
7#include <linux/export.h>
8#include <linux/mm.h>
9#include <linux/swap.h>
10#include <linux/init.h>
11#include <linux/bootmem.h>
12#include <linux/pagemap.h>
13#include <linux/percpu.h>
14#include <linux/memblock.h>
15#include <linux/initrd.h>
16#include <linux/sched/task.h>
17
18#include <asm/setup.h>
19#include <asm/page.h>
20#include <asm/pgalloc.h>
21#include <asm/mmu.h>
22#include <asm/mmu_context.h>
23#include <asm/sections.h>
24#include <asm/tlb.h>
25#include <asm/user_gateway.h>
26#include <asm/mmzone.h>
27#include <asm/fixmap.h>
28
29unsigned long pfn_base;
30EXPORT_SYMBOL(pfn_base);
31
32pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_data;
33
34unsigned long empty_zero_page;
35EXPORT_SYMBOL(empty_zero_page);
36
37extern char __user_gateway_start;
38extern char __user_gateway_end;
39
40void *gateway_page;
41
42/*
43 * Insert the gateway page into a set of page tables, creating the
44 * page tables if necessary.
45 */
46static void insert_gateway_page(pgd_t *pgd, unsigned long address)
47{
48 pud_t *pud;
49 pmd_t *pmd;
50 pte_t *pte;
51
52 BUG_ON(!pgd_present(*pgd));
53
54 pud = pud_offset(pgd, address);
55 BUG_ON(!pud_present(*pud));
56
57 pmd = pmd_offset(pud, address);
58 if (!pmd_present(*pmd)) {
59 pte = alloc_bootmem_pages(PAGE_SIZE);
60 set_pmd(pmd, __pmd(_PAGE_TABLE | __pa(pte)));
61 }
62
63 pte = pte_offset_kernel(pmd, address);
64 set_pte(pte, pfn_pte(__pa(gateway_page) >> PAGE_SHIFT, PAGE_READONLY));
65}
66
67/* Alloc and map a page in a known location accessible to userspace. */
68static void __init user_gateway_init(void)
69{
70 unsigned long address = USER_GATEWAY_PAGE;
71 int offset = pgd_index(address);
72 pgd_t *pgd;
73
74 gateway_page = alloc_bootmem_pages(PAGE_SIZE);
75
76 pgd = swapper_pg_dir + offset;
77 insert_gateway_page(pgd, address);
78
79#ifdef CONFIG_METAG_META12
80 /*
81 * Insert the gateway page into our current page tables even
82 * though we've already inserted it into our reference page
83 * table (swapper_pg_dir). This is because with a META1 mmu we
84 * copy just the user address range and not the gateway page
85 * entry on context switch, see switch_mmu().
86 */
87 pgd = (pgd_t *)mmu_get_base() + offset;
88 insert_gateway_page(pgd, address);
89#endif /* CONFIG_METAG_META12 */
90
91 BUG_ON((&__user_gateway_end - &__user_gateway_start) > PAGE_SIZE);
92
93 gateway_page += (address & ~PAGE_MASK);
94
95 memcpy(gateway_page, &__user_gateway_start,
96 &__user_gateway_end - &__user_gateway_start);
97
98 /*
99 * We don't need to flush the TLB here, there should be no mapping
100 * present at boot for this address and only valid mappings are in
101 * the TLB (apart from on Meta 1.x, but those cached invalid
102 * mappings should be impossible to hit here).
103 *
104 * We don't flush the code cache here even though we have written
105 * code through the data cache and they may not be coherent. At
106 * this point we assume there is no stale data in the code cache
107 * for this address so there is no need to flush.
108 */
109}
110
111static void __init allocate_pgdat(unsigned int nid)
112{
113 unsigned long start_pfn, end_pfn;
114#ifdef CONFIG_NEED_MULTIPLE_NODES
115 unsigned long phys;
116#endif
117
118 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
119
120#ifdef CONFIG_NEED_MULTIPLE_NODES
121 phys = __memblock_alloc_base(sizeof(struct pglist_data),
122 SMP_CACHE_BYTES, end_pfn << PAGE_SHIFT);
123 /* Retry with all of system memory */
124 if (!phys)
125 phys = __memblock_alloc_base(sizeof(struct pglist_data),
126 SMP_CACHE_BYTES,
127 memblock_end_of_DRAM());
128 if (!phys)
129 panic("Can't allocate pgdat for node %d\n", nid);
130
131 NODE_DATA(nid) = __va(phys);
132 memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
133
134 NODE_DATA(nid)->bdata = &bootmem_node_data[nid];
135#endif
136
137 NODE_DATA(nid)->node_start_pfn = start_pfn;
138 NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
139}
140
141static void __init bootmem_init_one_node(unsigned int nid)
142{
143 unsigned long total_pages, paddr;
144 unsigned long end_pfn;
145 struct pglist_data *p;
146
147 p = NODE_DATA(nid);
148
149 /* Nothing to do.. */
150 if (!p->node_spanned_pages)
151 return;
152
153 end_pfn = pgdat_end_pfn(p);
154#ifdef CONFIG_HIGHMEM
155 if (end_pfn > max_low_pfn)
156 end_pfn = max_low_pfn;
157#endif
158
159 total_pages = bootmem_bootmap_pages(end_pfn - p->node_start_pfn);
160
161 paddr = memblock_alloc(total_pages << PAGE_SHIFT, PAGE_SIZE);
162 if (!paddr)
163 panic("Can't allocate bootmap for nid[%d]\n", nid);
164
165 init_bootmem_node(p, paddr >> PAGE_SHIFT, p->node_start_pfn, end_pfn);
166
167 free_bootmem_with_active_regions(nid, end_pfn);
168
169 /*
170 * XXX Handle initial reservations for the system memory node
171 * only for the moment, we'll refactor this later for handling
172 * reservations in other nodes.
173 */
174 if (nid == 0) {
175 struct memblock_region *reg;
176
177 /* Reserve the sections we're already using. */
178 for_each_memblock(reserved, reg) {
179 unsigned long size = reg->size;
180
181#ifdef CONFIG_HIGHMEM
182 /* ...but not highmem */
183 if (PFN_DOWN(reg->base) >= highstart_pfn)
184 continue;
185
186 if (PFN_UP(reg->base + size) > highstart_pfn)
187 size = (highstart_pfn - PFN_DOWN(reg->base))
188 << PAGE_SHIFT;
189#endif
190
191 reserve_bootmem(reg->base, size, BOOTMEM_DEFAULT);
192 }
193 }
194
195 sparse_memory_present_with_active_regions(nid);
196}
197
198static void __init do_init_bootmem(void)
199{
200 struct memblock_region *reg;
201 int i;
202
203 /* Add active regions with valid PFNs. */
204 for_each_memblock(memory, reg) {
205 unsigned long start_pfn, end_pfn;
206 start_pfn = memblock_region_memory_base_pfn(reg);
207 end_pfn = memblock_region_memory_end_pfn(reg);
208 memblock_set_node(PFN_PHYS(start_pfn),
209 PFN_PHYS(end_pfn - start_pfn),
210 &memblock.memory, 0);
211 }
212
213 /* All of system RAM sits in node 0 for the non-NUMA case */
214 allocate_pgdat(0);
215 node_set_online(0);
216
217 soc_mem_setup();
218
219 for_each_online_node(i)
220 bootmem_init_one_node(i);
221
222 sparse_init();
223}
224
225extern char _heap_start[];
226
227static void __init init_and_reserve_mem(void)
228{
229 unsigned long start_pfn, heap_start;
230 u64 base = min_low_pfn << PAGE_SHIFT;
231 u64 size = (max_low_pfn << PAGE_SHIFT) - base;
232
233 heap_start = (unsigned long) &_heap_start;
234
235 memblock_add(base, size);
236
237 /*
238 * Partially used pages are not usable - thus
239 * we are rounding upwards:
240 */
241 start_pfn = PFN_UP(__pa(heap_start));
242
243 /*
244 * Reserve the kernel text.
245 */
246 memblock_reserve(base, (PFN_PHYS(start_pfn) + PAGE_SIZE - 1) - base);
247
248#ifdef CONFIG_HIGHMEM
249 /*
250 * Add & reserve highmem, so page structures are initialised.
251 */
252 base = highstart_pfn << PAGE_SHIFT;
253 size = (highend_pfn << PAGE_SHIFT) - base;
254 if (size) {
255 memblock_add(base, size);
256 memblock_reserve(base, size);
257 }
258#endif
259}
260
261#ifdef CONFIG_HIGHMEM
262/*
263 * Ensure we have allocated page tables in swapper_pg_dir for the
264 * fixed mappings range from 'start' to 'end'.
265 */
266static void __init allocate_pgtables(unsigned long start, unsigned long end)
267{
268 pgd_t *pgd;
269 pmd_t *pmd;
270 pte_t *pte;
271 int i, j;
272 unsigned long vaddr;
273
274 vaddr = start;
275 i = pgd_index(vaddr);
276 j = pmd_index(vaddr);
277 pgd = swapper_pg_dir + i;
278
279 for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
280 pmd = (pmd_t *)pgd;
281 for (; (j < PTRS_PER_PMD) && (vaddr != end); pmd++, j++) {
282 vaddr += PMD_SIZE;
283
284 if (!pmd_none(*pmd))
285 continue;
286
287 pte = (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE);
288 pmd_populate_kernel(&init_mm, pmd, pte);
289 }
290 j = 0;
291 }
292}
293
294static void __init fixedrange_init(void)
295{
296 unsigned long vaddr, end;
297 pgd_t *pgd;
298 pud_t *pud;
299 pmd_t *pmd;
300 pte_t *pte;
301
302 /*
303 * Fixed mappings:
304 */
305 vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
306 end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
307 allocate_pgtables(vaddr, end);
308
309 /*
310 * Permanent kmaps:
311 */
312 vaddr = PKMAP_BASE;
313 allocate_pgtables(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP);
314
315 pgd = swapper_pg_dir + pgd_index(vaddr);
316 pud = pud_offset(pgd, vaddr);
317 pmd = pmd_offset(pud, vaddr);
318 pte = pte_offset_kernel(pmd, vaddr);
319 pkmap_page_table = pte;
320}
321#endif /* CONFIG_HIGHMEM */
322
323/*
324 * paging_init() continues the virtual memory environment setup which
325 * was begun by the code in arch/metag/kernel/setup.c.
326 */
327void __init paging_init(unsigned long mem_end)
328{
329 unsigned long max_zone_pfns[MAX_NR_ZONES];
330 int nid;
331
332 init_and_reserve_mem();
333
334 memblock_allow_resize();
335
336 memblock_dump_all();
337
338 nodes_clear(node_online_map);
339
340 init_new_context(&init_task, &init_mm);
341
342 memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
343
344 do_init_bootmem();
345 mmu_init(mem_end);
346
347#ifdef CONFIG_HIGHMEM
348 fixedrange_init();
349 kmap_init();
350#endif
351
352 /* Initialize the zero page to a bootmem page, already zeroed. */
353 empty_zero_page = (unsigned long)alloc_bootmem_pages(PAGE_SIZE);
354
355 user_gateway_init();
356
357 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
358
359 for_each_online_node(nid) {
360 pg_data_t *pgdat = NODE_DATA(nid);
361 unsigned long low, start_pfn;
362
363 start_pfn = pgdat->bdata->node_min_pfn;
364 low = pgdat->bdata->node_low_pfn;
365
366 if (max_zone_pfns[ZONE_NORMAL] < low)
367 max_zone_pfns[ZONE_NORMAL] = low;
368
369#ifdef CONFIG_HIGHMEM
370 max_zone_pfns[ZONE_HIGHMEM] = highend_pfn;
371#endif
372 pr_info("Node %u: start_pfn = 0x%lx, low = 0x%lx\n",
373 nid, start_pfn, low);
374 }
375
376 free_area_init_nodes(max_zone_pfns);
377}
378
379void __init mem_init(void)
380{
381#ifdef CONFIG_HIGHMEM
382 unsigned long tmp;
383
384 /*
385 * Explicitly reset zone->managed_pages because highmem pages are
386 * freed before calling free_all_bootmem();
387 */
388 reset_all_zones_managed_pages();
389 for (tmp = highstart_pfn; tmp < highend_pfn; tmp++)
390 free_highmem_page(pfn_to_page(tmp));
391#endif /* CONFIG_HIGHMEM */
392
393 free_all_bootmem();
394 mem_init_print_info(NULL);
395}
396
397void free_initmem(void)
398{
399 free_initmem_default(POISON_FREE_INITMEM);
400}
401
402#ifdef CONFIG_BLK_DEV_INITRD
403void free_initrd_mem(unsigned long start, unsigned long end)
404{
405 free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM,
406 "initrd");
407}
408#endif
diff --git a/arch/metag/mm/ioremap.c b/arch/metag/mm/ioremap.c
deleted file mode 100644
index df2b59cb02eb..000000000000
--- a/arch/metag/mm/ioremap.c
+++ /dev/null
@@ -1,90 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Re-map IO memory to kernel address space so that we can access it.
4 * Needed for memory-mapped I/O devices mapped outside our normal DRAM
5 * window (that is, all memory-mapped I/O devices).
6 *
7 * Copyright (C) 1995,1996 Linus Torvalds
8 *
9 * Meta port based on CRIS-port by Axis Communications AB
10 */
11
12#include <linux/vmalloc.h>
13#include <linux/io.h>
14#include <linux/export.h>
15#include <linux/slab.h>
16#include <linux/mm.h>
17
18#include <asm/pgtable.h>
19
20/*
21 * Remap an arbitrary physical address space into the kernel virtual
22 * address space. Needed when the kernel wants to access high addresses
23 * directly.
24 *
25 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
26 * have to convert them into an offset in a page-aligned mapping, but the
27 * caller shouldn't need to know that small detail.
28 */
29void __iomem *__ioremap(unsigned long phys_addr, size_t size,
30 unsigned long flags)
31{
32 unsigned long addr;
33 struct vm_struct *area;
34 unsigned long offset, last_addr;
35 pgprot_t prot;
36
37 /* Don't allow wraparound or zero size */
38 last_addr = phys_addr + size - 1;
39 if (!size || last_addr < phys_addr)
40 return NULL;
41
42 /* Custom region addresses are accessible and uncached by default. */
43 if (phys_addr >= LINSYSCUSTOM_BASE &&
44 phys_addr < (LINSYSCUSTOM_BASE + LINSYSCUSTOM_LIMIT))
45 return (__force void __iomem *) phys_addr;
46
47 /*
48 * Mappings have to be page-aligned
49 */
50 offset = phys_addr & ~PAGE_MASK;
51 phys_addr &= PAGE_MASK;
52 size = PAGE_ALIGN(last_addr+1) - phys_addr;
53 prot = __pgprot(_PAGE_PRESENT | _PAGE_WRITE | _PAGE_DIRTY |
54 _PAGE_ACCESSED | _PAGE_KERNEL | _PAGE_CACHE_WIN0 |
55 flags);
56
57 /*
58 * Ok, go for it..
59 */
60 area = get_vm_area(size, VM_IOREMAP);
61 if (!area)
62 return NULL;
63 area->phys_addr = phys_addr;
64 addr = (unsigned long) area->addr;
65 if (ioremap_page_range(addr, addr + size, phys_addr, prot)) {
66 vunmap((void *) addr);
67 return NULL;
68 }
69 return (__force void __iomem *) (offset + (char *)addr);
70}
71EXPORT_SYMBOL(__ioremap);
72
73void __iounmap(void __iomem *addr)
74{
75 struct vm_struct *p;
76
77 if ((__force unsigned long)addr >= LINSYSCUSTOM_BASE &&
78 (__force unsigned long)addr < (LINSYSCUSTOM_BASE +
79 LINSYSCUSTOM_LIMIT))
80 return;
81
82 p = remove_vm_area((void *)(PAGE_MASK & (unsigned long __force)addr));
83 if (unlikely(!p)) {
84 pr_err("iounmap: bad address %p\n", addr);
85 return;
86 }
87
88 kfree(p);
89}
90EXPORT_SYMBOL(__iounmap);
diff --git a/arch/metag/mm/l2cache.c b/arch/metag/mm/l2cache.c
deleted file mode 100644
index addffc58989c..000000000000
--- a/arch/metag/mm/l2cache.c
+++ /dev/null
@@ -1,193 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/init.h>
3#include <linux/kernel.h>
4#include <linux/delay.h>
5
6#include <asm/l2cache.h>
7#include <asm/metag_isa.h>
8
9/* If non-0, then initialise the L2 cache */
10static int l2cache_init = 1;
11/* If non-0, then initialise the L2 cache prefetch */
12static int l2cache_init_pf = 1;
13
14int l2c_pfenable;
15
16static volatile u32 l2c_testdata[16] __initdata __aligned(64);
17
18static int __init parse_l2cache(char *p)
19{
20 char *cp = p;
21
22 if (get_option(&cp, &l2cache_init) != 1) {
23 pr_err("Bad l2cache parameter (%s)\n", p);
24 return 1;
25 }
26 return 0;
27}
28early_param("l2cache", parse_l2cache);
29
30static int __init parse_l2cache_pf(char *p)
31{
32 char *cp = p;
33
34 if (get_option(&cp, &l2cache_init_pf) != 1) {
35 pr_err("Bad l2cache_pf parameter (%s)\n", p);
36 return 1;
37 }
38 return 0;
39}
40early_param("l2cache_pf", parse_l2cache_pf);
41
42static int __init meta_l2c_setup(void)
43{
44 /*
45 * If the L2 cache isn't even present, don't do anything, but say so in
46 * the log.
47 */
48 if (!meta_l2c_is_present()) {
49 pr_info("L2 Cache: Not present\n");
50 return 0;
51 }
52
53 /*
54 * Check whether the line size is recognised.
55 */
56 if (!meta_l2c_linesize()) {
57 pr_warn_once("L2 Cache: unknown line size id (config=0x%08x)\n",
58 meta_l2c_config());
59 }
60
61 /*
62 * Initialise state.
63 */
64 l2c_pfenable = _meta_l2c_pf_is_enabled();
65
66 /*
67 * Enable the L2 cache and print to log whether it was already enabled
68 * by the bootloader.
69 */
70 if (l2cache_init) {
71 pr_info("L2 Cache: Enabling... ");
72 if (meta_l2c_enable())
73 pr_cont("already enabled\n");
74 else
75 pr_cont("done\n");
76 } else {
77 pr_info("L2 Cache: Not enabling\n");
78 }
79
80 /*
81 * Enable L2 cache prefetch.
82 */
83 if (l2cache_init_pf) {
84 pr_info("L2 Cache: Enabling prefetch... ");
85 if (meta_l2c_pf_enable(1))
86 pr_cont("already enabled\n");
87 else
88 pr_cont("done\n");
89 } else {
90 pr_info("L2 Cache: Not enabling prefetch\n");
91 }
92
93 return 0;
94}
95core_initcall(meta_l2c_setup);
96
97int meta_l2c_disable(void)
98{
99 unsigned long flags;
100 int en;
101
102 if (!meta_l2c_is_present())
103 return 1;
104
105 /*
106 * Prevent other threads writing during the writeback, otherwise the
107 * writes will get "lost" when the L2 is disabled.
108 */
109 __global_lock2(flags);
110 en = meta_l2c_is_enabled();
111 if (likely(en)) {
112 _meta_l2c_pf_enable(0);
113 wr_fence();
114 _meta_l2c_purge();
115 _meta_l2c_enable(0);
116 }
117 __global_unlock2(flags);
118
119 return !en;
120}
121
122int meta_l2c_enable(void)
123{
124 unsigned long flags;
125 int en;
126
127 if (!meta_l2c_is_present())
128 return 0;
129
130 /*
131 * Init (clearing the L2) can happen while the L2 is disabled, so other
132 * threads are safe to continue executing, however we must not init the
133 * cache if it's already enabled (dirty lines would be discarded), so
134 * this operation should still be atomic with other threads.
135 */
136 __global_lock1(flags);
137 en = meta_l2c_is_enabled();
138 if (likely(!en)) {
139 _meta_l2c_init();
140 _meta_l2c_enable(1);
141 _meta_l2c_pf_enable(l2c_pfenable);
142 }
143 __global_unlock1(flags);
144
145 return en;
146}
147
148int meta_l2c_pf_enable(int pfenable)
149{
150 unsigned long flags;
151 int en = l2c_pfenable;
152
153 if (!meta_l2c_is_present())
154 return 0;
155
156 /*
157 * We read modify write the enable register, so this operation must be
158 * atomic with other threads.
159 */
160 __global_lock1(flags);
161 en = l2c_pfenable;
162 l2c_pfenable = pfenable;
163 if (meta_l2c_is_enabled())
164 _meta_l2c_pf_enable(pfenable);
165 __global_unlock1(flags);
166
167 return en;
168}
169
170int meta_l2c_flush(void)
171{
172 unsigned long flags;
173 int en;
174
175 /*
176 * Prevent other threads writing during the writeback. This also
177 * involves read modify writes.
178 */
179 __global_lock2(flags);
180 en = meta_l2c_is_enabled();
181 if (likely(en)) {
182 _meta_l2c_pf_enable(0);
183 wr_fence();
184 _meta_l2c_purge();
185 _meta_l2c_enable(0);
186 _meta_l2c_init();
187 _meta_l2c_enable(1);
188 _meta_l2c_pf_enable(l2c_pfenable);
189 }
190 __global_unlock2(flags);
191
192 return !en;
193}
diff --git a/arch/metag/mm/maccess.c b/arch/metag/mm/maccess.c
deleted file mode 100644
index c22755165df9..000000000000
--- a/arch/metag/mm/maccess.c
+++ /dev/null
@@ -1,69 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * safe read and write memory routines callable while atomic
4 *
5 * Copyright 2012 Imagination Technologies
6 */
7
8#include <linux/uaccess.h>
9#include <asm/io.h>
10
11/*
12 * The generic probe_kernel_write() uses the user copy code which can split the
13 * writes if the source is unaligned, and repeats writes to make exceptions
14 * precise. We override it here to avoid these things happening to memory mapped
15 * IO memory where they could have undesired effects.
16 * Due to the use of CACHERD instruction this only works on Meta2 onwards.
17 */
18#ifdef CONFIG_METAG_META21
19long probe_kernel_write(void *dst, const void *src, size_t size)
20{
21 unsigned long ldst = (unsigned long)dst;
22 void __iomem *iodst = (void __iomem *)dst;
23 unsigned long lsrc = (unsigned long)src;
24 const u8 *psrc = (u8 *)src;
25 unsigned int pte, i;
26 u8 bounce[8] __aligned(8);
27
28 if (!size)
29 return 0;
30
31 /* Use the write combine bit to decide is the destination is MMIO. */
32 pte = __builtin_meta2_cacherd(dst);
33
34 /* Check the mapping is valid and writeable. */
35 if ((pte & (MMCU_ENTRY_WR_BIT | MMCU_ENTRY_VAL_BIT))
36 != (MMCU_ENTRY_WR_BIT | MMCU_ENTRY_VAL_BIT))
37 return -EFAULT;
38
39 /* Fall back to generic version for cases we're not interested in. */
40 if (pte & MMCU_ENTRY_WRC_BIT || /* write combined memory */
41 (ldst & (size - 1)) || /* destination unaligned */
42 size > 8 || /* more than max write size */
43 (size & (size - 1))) /* non power of 2 size */
44 return __probe_kernel_write(dst, src, size);
45
46 /* If src is unaligned, copy to the aligned bounce buffer first. */
47 if (lsrc & (size - 1)) {
48 for (i = 0; i < size; ++i)
49 bounce[i] = psrc[i];
50 psrc = bounce;
51 }
52
53 switch (size) {
54 case 1:
55 writeb(*psrc, iodst);
56 break;
57 case 2:
58 writew(*(const u16 *)psrc, iodst);
59 break;
60 case 4:
61 writel(*(const u32 *)psrc, iodst);
62 break;
63 case 8:
64 writeq(*(const u64 *)psrc, iodst);
65 break;
66 }
67 return 0;
68}
69#endif
diff --git a/arch/metag/mm/mmu-meta1.c b/arch/metag/mm/mmu-meta1.c
deleted file mode 100644
index 53190b13dc54..000000000000
--- a/arch/metag/mm/mmu-meta1.c
+++ /dev/null
@@ -1,157 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2005,2006,2007,2008,2009 Imagination Technologies
4 *
5 * Meta 1 MMU handling code.
6 *
7 */
8
9#include <linux/sched.h>
10#include <linux/mm.h>
11#include <linux/io.h>
12
13#include <asm/mmu.h>
14
15#define DM3_BASE (LINSYSDIRECT_BASE + (MMCU_DIRECTMAPn_ADDR_SCALE * 3))
16
17/*
18 * This contains the physical address of the top level 2k pgd table.
19 */
20static unsigned long mmu_base_phys;
21
22/*
23 * Given a physical address, return a mapped virtual address that can be used
24 * to access that location.
25 * In practice, we use the DirectMap region to make this happen.
26 */
27static unsigned long map_addr(unsigned long phys)
28{
29 static unsigned long dm_base = 0xFFFFFFFF;
30 int offset;
31
32 offset = phys - dm_base;
33
34 /* Are we in the current map range ? */
35 if ((offset < 0) || (offset >= MMCU_DIRECTMAPn_ADDR_SCALE)) {
36 /* Calculate new DM area */
37 dm_base = phys & ~(MMCU_DIRECTMAPn_ADDR_SCALE - 1);
38
39 /* Actually map it in! */
40 metag_out32(dm_base, MMCU_DIRECTMAP3_ADDR);
41
42 /* And calculate how far into that area our reference is */
43 offset = phys - dm_base;
44 }
45
46 return DM3_BASE + offset;
47}
48
49/*
50 * Return the physical address of the base of our pgd table.
51 */
52static inline unsigned long __get_mmu_base(void)
53{
54 unsigned long base_phys;
55 unsigned int stride;
56
57 if (is_global_space(PAGE_OFFSET))
58 stride = 4;
59 else
60 stride = hard_processor_id(); /* [0..3] */
61
62 base_phys = metag_in32(MMCU_TABLE_PHYS_ADDR);
63 base_phys += (0x800 * stride);
64
65 return base_phys;
66}
67
68/* Given a virtual address, return the virtual address of the relevant pgd */
69static unsigned long pgd_entry_addr(unsigned long virt)
70{
71 unsigned long pgd_phys;
72 unsigned long pgd_virt;
73
74 if (!mmu_base_phys)
75 mmu_base_phys = __get_mmu_base();
76
77 /*
78 * Are we trying to map a global address. If so, then index
79 * the global pgd table instead of our local one.
80 */
81 if (is_global_space(virt)) {
82 /* Scale into 2gig map */
83 virt &= ~0x80000000;
84 }
85
86 /* Base of the pgd table plus our 4Meg entry, 4bytes each */
87 pgd_phys = mmu_base_phys + ((virt >> PGDIR_SHIFT) * 4);
88
89 pgd_virt = map_addr(pgd_phys);
90
91 return pgd_virt;
92}
93
94/* Given a virtual address, return the virtual address of the relevant pte */
95static unsigned long pgtable_entry_addr(unsigned long virt)
96{
97 unsigned long pgtable_phys;
98 unsigned long pgtable_virt, pte_virt;
99
100 /* Find the physical address of the 4MB page table*/
101 pgtable_phys = metag_in32(pgd_entry_addr(virt)) & MMCU_ENTRY_ADDR_BITS;
102
103 /* Map it to a virtual address */
104 pgtable_virt = map_addr(pgtable_phys);
105
106 /* And index into it for our pte */
107 pte_virt = pgtable_virt + ((virt >> PAGE_SHIFT) & 0x3FF) * 4;
108
109 return pte_virt;
110}
111
112unsigned long mmu_read_first_level_page(unsigned long vaddr)
113{
114 return metag_in32(pgd_entry_addr(vaddr));
115}
116
117unsigned long mmu_read_second_level_page(unsigned long vaddr)
118{
119 return metag_in32(pgtable_entry_addr(vaddr));
120}
121
122unsigned long mmu_get_base(void)
123{
124 static unsigned long __base;
125
126 /* Find the base of our MMU pgd table */
127 if (!__base)
128 __base = pgd_entry_addr(0);
129
130 return __base;
131}
132
133void __init mmu_init(unsigned long mem_end)
134{
135 unsigned long entry, addr;
136 pgd_t *p_swapper_pg_dir;
137
138 /*
139 * Now copy over any MMU pgd entries already in the mmu page tables
140 * over to our root init process (swapper_pg_dir) map. This map is
141 * then inherited by all other processes, which means all processes
142 * inherit a map of the kernel space.
143 */
144 addr = PAGE_OFFSET;
145 entry = pgd_index(PAGE_OFFSET);
146 p_swapper_pg_dir = pgd_offset_k(0) + entry;
147
148 while (addr <= META_MEMORY_LIMIT) {
149 unsigned long pgd_entry;
150 /* copy over the current MMU value */
151 pgd_entry = mmu_read_first_level_page(addr);
152 pgd_val(*p_swapper_pg_dir) = pgd_entry;
153
154 p_swapper_pg_dir++;
155 addr += PGDIR_SIZE;
156 }
157}
diff --git a/arch/metag/mm/mmu-meta2.c b/arch/metag/mm/mmu-meta2.c
deleted file mode 100644
index 8b668a69c980..000000000000
--- a/arch/metag/mm/mmu-meta2.c
+++ /dev/null
@@ -1,208 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2008,2009,2010,2011 Imagination Technologies Ltd.
4 *
5 * Meta 2 enhanced mode MMU handling code.
6 *
7 */
8
9#include <linux/mm.h>
10#include <linux/init.h>
11#include <linux/kernel.h>
12#include <linux/io.h>
13#include <linux/bootmem.h>
14#include <linux/syscore_ops.h>
15
16#include <asm/mmu.h>
17#include <asm/mmu_context.h>
18
19unsigned long mmu_read_first_level_page(unsigned long vaddr)
20{
21 unsigned int cpu = hard_processor_id();
22 unsigned long offset, linear_base, linear_limit;
23 unsigned int phys0;
24 pgd_t *pgd, entry;
25
26 if (is_global_space(vaddr))
27 vaddr &= ~0x80000000;
28
29 offset = vaddr >> PGDIR_SHIFT;
30
31 phys0 = metag_in32(mmu_phys0_addr(cpu));
32
33 /* Top bit of linear base is always zero. */
34 linear_base = (phys0 >> PGDIR_SHIFT) & 0x1ff;
35
36 /* Limit in the range 0 (4MB) to 9 (2GB). */
37 linear_limit = 1 << ((phys0 >> 8) & 0xf);
38 linear_limit += linear_base;
39
40 /*
41 * If offset is below linear base or above the limit then no
42 * mapping exists.
43 */
44 if (offset < linear_base || offset > linear_limit)
45 return 0;
46
47 offset -= linear_base;
48 pgd = (pgd_t *)mmu_get_base();
49 entry = pgd[offset];
50
51 return pgd_val(entry);
52}
53
54unsigned long mmu_read_second_level_page(unsigned long vaddr)
55{
56 return __builtin_meta2_cacherd((void *)(vaddr & PAGE_MASK));
57}
58
59unsigned long mmu_get_base(void)
60{
61 unsigned int cpu = hard_processor_id();
62 unsigned long stride;
63
64 stride = cpu * LINSYSMEMTnX_STRIDE;
65
66 /*
67 * Bits 18:2 of the MMCU_TnLocal_TABLE_PHYS1 register should be
68 * used as an offset to the start of the top-level pgd table.
69 */
70 stride += (metag_in32(mmu_phys1_addr(cpu)) & 0x7fffc);
71
72 if (is_global_space(PAGE_OFFSET))
73 stride += LINSYSMEMTXG_OFFSET;
74
75 return LINSYSMEMT0L_BASE + stride;
76}
77
78#define FIRST_LEVEL_MASK 0xffffffc0
79#define SECOND_LEVEL_MASK 0xfffff000
80#define SECOND_LEVEL_ALIGN 64
81
82static void repriv_mmu_tables(void)
83{
84 unsigned long phys0_addr;
85 unsigned int g;
86
87 /*
88 * Check that all the mmu table regions are priv protected, and if not
89 * fix them and emit a warning. If we left them without priv protection
90 * then userland processes would have access to a 2M window into
91 * physical memory near where the page tables are.
92 */
93 phys0_addr = MMCU_T0LOCAL_TABLE_PHYS0;
94 for (g = 0; g < 2; ++g) {
95 unsigned int t, phys0;
96 unsigned long flags;
97 for (t = 0; t < 4; ++t) {
98 __global_lock2(flags);
99 phys0 = metag_in32(phys0_addr);
100 if ((phys0 & _PAGE_PRESENT) && !(phys0 & _PAGE_PRIV)) {
101 pr_warn("Fixing priv protection on T%d %s MMU table region\n",
102 t,
103 g ? "global" : "local");
104 phys0 |= _PAGE_PRIV;
105 metag_out32(phys0, phys0_addr);
106 }
107 __global_unlock2(flags);
108
109 phys0_addr += MMCU_TnX_TABLE_PHYSX_STRIDE;
110 }
111
112 phys0_addr += MMCU_TXG_TABLE_PHYSX_OFFSET
113 - 4*MMCU_TnX_TABLE_PHYSX_STRIDE;
114 }
115}
116
117#ifdef CONFIG_METAG_SUSPEND_MEM
118static void mmu_resume(void)
119{
120 /*
121 * If a full suspend to RAM has happened then the original bad MMU table
122 * priv may have been restored, so repriv them again.
123 */
124 repriv_mmu_tables();
125}
126#else
127#define mmu_resume NULL
128#endif /* CONFIG_METAG_SUSPEND_MEM */
129
130static struct syscore_ops mmu_syscore_ops = {
131 .resume = mmu_resume,
132};
133
134void __init mmu_init(unsigned long mem_end)
135{
136 unsigned long entry, addr;
137 pgd_t *p_swapper_pg_dir;
138#ifdef CONFIG_KERNEL_4M_PAGES
139 unsigned long mem_size = mem_end - PAGE_OFFSET;
140 unsigned int pages = DIV_ROUND_UP(mem_size, 1 << 22);
141 unsigned int second_level_entry = 0;
142 unsigned long *second_level_table;
143#endif
144
145 /*
146 * Now copy over any MMU pgd entries already in the mmu page tables
147 * over to our root init process (swapper_pg_dir) map. This map is
148 * then inherited by all other processes, which means all processes
149 * inherit a map of the kernel space.
150 */
151 addr = META_MEMORY_BASE;
152 entry = pgd_index(META_MEMORY_BASE);
153 p_swapper_pg_dir = pgd_offset_k(0) + entry;
154
155 while (entry < (PTRS_PER_PGD - pgd_index(META_MEMORY_BASE))) {
156 unsigned long pgd_entry;
157 /* copy over the current MMU value */
158 pgd_entry = mmu_read_first_level_page(addr);
159 pgd_val(*p_swapper_pg_dir) = pgd_entry;
160
161 p_swapper_pg_dir++;
162 addr += PGDIR_SIZE;
163 entry++;
164 }
165
166#ifdef CONFIG_KERNEL_4M_PAGES
167 /*
168 * At this point we can also map the kernel with 4MB pages to
169 * reduce TLB pressure.
170 */
171 second_level_table = alloc_bootmem_pages(SECOND_LEVEL_ALIGN * pages);
172
173 addr = PAGE_OFFSET;
174 entry = pgd_index(PAGE_OFFSET);
175 p_swapper_pg_dir = pgd_offset_k(0) + entry;
176
177 while (pages > 0) {
178 unsigned long phys_addr, second_level_phys;
179 pte_t *pte = (pte_t *)&second_level_table[second_level_entry];
180
181 phys_addr = __pa(addr);
182
183 second_level_phys = __pa(pte);
184
185 pgd_val(*p_swapper_pg_dir) = ((second_level_phys &
186 FIRST_LEVEL_MASK) |
187 _PAGE_SZ_4M |
188 _PAGE_PRESENT);
189
190 pte_val(*pte) = ((phys_addr & SECOND_LEVEL_MASK) |
191 _PAGE_PRESENT | _PAGE_DIRTY |
192 _PAGE_ACCESSED | _PAGE_WRITE |
193 _PAGE_CACHEABLE | _PAGE_KERNEL);
194
195 p_swapper_pg_dir++;
196 addr += PGDIR_SIZE;
197 /* Second level pages must be 64byte aligned. */
198 second_level_entry += (SECOND_LEVEL_ALIGN /
199 sizeof(unsigned long));
200 pages--;
201 }
202 load_pgd(swapper_pg_dir, hard_processor_id());
203 flush_tlb_all();
204#endif
205
206 repriv_mmu_tables();
207 register_syscore_ops(&mmu_syscore_ops);
208}
diff --git a/arch/metag/mm/numa.c b/arch/metag/mm/numa.c
deleted file mode 100644
index 67b46c295072..000000000000
--- a/arch/metag/mm/numa.c
+++ /dev/null
@@ -1,82 +0,0 @@
1/*
2 * Multiple memory node support for Meta machines
3 *
4 * Copyright (C) 2007 Paul Mundt
5 * Copyright (C) 2010 Imagination Technologies Ltd.
6 *
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file "COPYING" in the main directory of this archive
9 * for more details.
10 */
11#include <linux/export.h>
12#include <linux/bootmem.h>
13#include <linux/memblock.h>
14#include <linux/mm.h>
15#include <linux/numa.h>
16#include <linux/pfn.h>
17#include <asm/sections.h>
18
19struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
20EXPORT_SYMBOL_GPL(node_data);
21
22extern char _heap_start[];
23
24/*
25 * On Meta machines the conventional approach is to stash system RAM
26 * in node 0, and other memory blocks in to node 1 and up, ordered by
27 * latency. Each node's pgdat is node-local at the beginning of the node,
28 * immediately followed by the node mem map.
29 */
30void __init setup_bootmem_node(int nid, unsigned long start, unsigned long end)
31{
32 unsigned long bootmap_pages, bootmem_paddr;
33 unsigned long start_pfn, end_pfn;
34 unsigned long pgdat_paddr;
35
36 /* Don't allow bogus node assignment */
37 BUG_ON(nid >= MAX_NUMNODES || nid <= 0);
38
39 start_pfn = start >> PAGE_SHIFT;
40 end_pfn = end >> PAGE_SHIFT;
41
42 memblock_add(start, end - start);
43
44 memblock_set_node(PFN_PHYS(start_pfn),
45 PFN_PHYS(end_pfn - start_pfn),
46 &memblock.memory, nid);
47
48 /* Node-local pgdat */
49 pgdat_paddr = memblock_alloc_base(sizeof(struct pglist_data),
50 SMP_CACHE_BYTES, end);
51 NODE_DATA(nid) = __va(pgdat_paddr);
52 memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
53
54 NODE_DATA(nid)->bdata = &bootmem_node_data[nid];
55 NODE_DATA(nid)->node_start_pfn = start_pfn;
56 NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
57
58 /* Node-local bootmap */
59 bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
60 bootmem_paddr = memblock_alloc_base(bootmap_pages << PAGE_SHIFT,
61 PAGE_SIZE, end);
62 init_bootmem_node(NODE_DATA(nid), bootmem_paddr >> PAGE_SHIFT,
63 start_pfn, end_pfn);
64
65 free_bootmem_with_active_regions(nid, end_pfn);
66
67 /* Reserve the pgdat and bootmap space with the bootmem allocator */
68 reserve_bootmem_node(NODE_DATA(nid), pgdat_paddr & PAGE_MASK,
69 sizeof(struct pglist_data), BOOTMEM_DEFAULT);
70 reserve_bootmem_node(NODE_DATA(nid), bootmem_paddr,
71 bootmap_pages << PAGE_SHIFT, BOOTMEM_DEFAULT);
72
73 /* It's up */
74 node_set_online(nid);
75
76 /* Kick sparsemem */
77 sparse_memory_present_with_active_regions(nid);
78}
79
80void __init __weak soc_mem_setup(void)
81{
82}
diff --git a/arch/metag/oprofile/Makefile b/arch/metag/oprofile/Makefile
deleted file mode 100644
index dc92a4a3d618..000000000000
--- a/arch/metag/oprofile/Makefile
+++ /dev/null
@@ -1,18 +0,0 @@
1# SPDX-License-Identifier: GPL-2.0
2obj-$(CONFIG_OPROFILE) += oprofile.o
3
4oprofile-core-y += buffer_sync.o
5oprofile-core-y += cpu_buffer.o
6oprofile-core-y += event_buffer.o
7oprofile-core-y += oprof.o
8oprofile-core-y += oprofile_files.o
9oprofile-core-y += oprofile_stats.o
10oprofile-core-y += oprofilefs.o
11oprofile-core-y += timer_int.o
12oprofile-core-$(CONFIG_HW_PERF_EVENTS) += oprofile_perf.o
13
14oprofile-y += backtrace.o
15oprofile-y += common.o
16oprofile-y += $(addprefix ../../../drivers/oprofile/,$(oprofile-core-y))
17
18ccflags-y += -Werror
diff --git a/arch/metag/oprofile/backtrace.c b/arch/metag/oprofile/backtrace.c
deleted file mode 100644
index 7cc3f37cb40e..000000000000
--- a/arch/metag/oprofile/backtrace.c
+++ /dev/null
@@ -1,63 +0,0 @@
1/*
2 * Copyright (C) 2010-2013 Imagination Technologies Ltd.
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 */
8
9#include <linux/oprofile.h>
10#include <linux/uaccess.h>
11#include <asm/processor.h>
12#include <asm/stacktrace.h>
13
14#include "backtrace.h"
15
16static void user_backtrace_fp(unsigned long __user *fp, unsigned int depth)
17{
18 while (depth-- && access_ok(VERIFY_READ, fp, 8)) {
19 unsigned long addr;
20 unsigned long __user *fpnew;
21 if (__copy_from_user_inatomic(&addr, fp + 1, sizeof(addr)))
22 break;
23 addr -= 4;
24
25 oprofile_add_trace(addr);
26
27 /* stack grows up, so frame pointers must decrease */
28 if (__copy_from_user_inatomic(&fpnew, fp + 0, sizeof(fpnew)))
29 break;
30 if (fpnew >= fp)
31 break;
32 fp = fpnew;
33 }
34}
35
36static int kernel_backtrace_frame(struct stackframe *frame, void *data)
37{
38 unsigned int *depth = data;
39
40 oprofile_add_trace(frame->pc);
41
42 /* decrement depth and stop if we reach 0 */
43 if ((*depth)-- == 0)
44 return 1;
45
46 /* otherwise onto the next frame */
47 return 0;
48}
49
50void metag_backtrace(struct pt_regs * const regs, unsigned int depth)
51{
52 if (user_mode(regs)) {
53 unsigned long *fp = (unsigned long *)regs->ctx.AX[1].U0;
54 user_backtrace_fp((unsigned long __user __force *)fp, depth);
55 } else {
56 struct stackframe frame;
57 frame.fp = regs->ctx.AX[1].U0; /* A0FrP */
58 frame.sp = user_stack_pointer(regs); /* A0StP */
59 frame.lr = 0; /* from stack */
60 frame.pc = regs->ctx.CurrPC; /* PC */
61 walk_stackframe(&frame, &kernel_backtrace_frame, &depth);
62 }
63}
diff --git a/arch/metag/oprofile/backtrace.h b/arch/metag/oprofile/backtrace.h
deleted file mode 100644
index 60adb862aa2c..000000000000
--- a/arch/metag/oprofile/backtrace.h
+++ /dev/null
@@ -1,7 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _METAG_OPROFILE_BACKTRACE_H
3#define _METAG_OPROFILE_BACKTRACE_H
4
5void metag_backtrace(struct pt_regs * const regs, unsigned int depth);
6
7#endif
diff --git a/arch/metag/oprofile/common.c b/arch/metag/oprofile/common.c
deleted file mode 100644
index ba26152b3c00..000000000000
--- a/arch/metag/oprofile/common.c
+++ /dev/null
@@ -1,66 +0,0 @@
1/*
2 * arch/metag/oprofile/common.c
3 *
4 * Copyright (C) 2013 Imagination Technologies Ltd.
5 *
6 * Based on arch/sh/oprofile/common.c:
7 *
8 * Copyright (C) 2003 - 2010 Paul Mundt
9 *
10 * Based on arch/mips/oprofile/common.c:
11 *
12 * Copyright (C) 2004, 2005 Ralf Baechle
13 * Copyright (C) 2005 MIPS Technologies, Inc.
14 *
15 * This file is subject to the terms and conditions of the GNU General Public
16 * License. See the file "COPYING" in the main directory of this archive
17 * for more details.
18 */
19#include <linux/errno.h>
20#include <linux/init.h>
21#include <linux/oprofile.h>
22#include <linux/perf_event.h>
23#include <linux/slab.h>
24
25#include "backtrace.h"
26
27#ifdef CONFIG_HW_PERF_EVENTS
28/*
29 * This will need to be reworked when multiple PMUs are supported.
30 */
31static char *metag_pmu_op_name;
32
33char *op_name_from_perf_id(void)
34{
35 return metag_pmu_op_name;
36}
37
38int __init oprofile_arch_init(struct oprofile_operations *ops)
39{
40 ops->backtrace = metag_backtrace;
41
42 if (perf_num_counters() == 0)
43 return -ENODEV;
44
45 metag_pmu_op_name = kasprintf(GFP_KERNEL, "metag/%s",
46 perf_pmu_name());
47 if (unlikely(!metag_pmu_op_name))
48 return -ENOMEM;
49
50 return oprofile_perf_init(ops);
51}
52
53void oprofile_arch_exit(void)
54{
55 oprofile_perf_exit();
56 kfree(metag_pmu_op_name);
57}
58#else
59int __init oprofile_arch_init(struct oprofile_operations *ops)
60{
61 ops->backtrace = metag_backtrace;
62 /* fall back to timer interrupt PC sampling */
63 return -ENODEV;
64}
65void oprofile_arch_exit(void) {}
66#endif /* CONFIG_HW_PERF_EVENTS */
diff --git a/arch/metag/tbx/Makefile b/arch/metag/tbx/Makefile
deleted file mode 100644
index 98bc5453cf24..000000000000
--- a/arch/metag/tbx/Makefile
+++ /dev/null
@@ -1,22 +0,0 @@
1# SPDX-License-Identifier: GPL-2.0
2#
3# Makefile for TBX library files..
4#
5
6asflags-y += -mmetac=2.1 -Wa,-mfpu=metac21 -mdsp
7asflags-$(CONFIG_SMP) += -DTBX_PERCPU_SP_SAVE
8
9ccflags-y += -mmetac=2.1
10
11lib-y += tbicore.o
12lib-y += tbictx.o
13lib-y += tbidefr.o
14lib-y += tbilogf.o
15lib-y += tbipcx.o
16lib-y += tbiroot.o
17lib-y += tbisoft.o
18lib-y += tbistring.o
19lib-y += tbitimer.o
20
21lib-$(CONFIG_METAG_DSP) += tbidspram.o
22lib-$(CONFIG_METAG_FPU) += tbictxfpu.o
diff --git a/arch/metag/tbx/tbicore.S b/arch/metag/tbx/tbicore.S
deleted file mode 100644
index a0838ebcb433..000000000000
--- a/arch/metag/tbx/tbicore.S
+++ /dev/null
@@ -1,136 +0,0 @@
1/*
2 * tbicore.S
3 *
4 * Copyright (C) 2001, 2002, 2007, 2012 Imagination Technologies.
5 *
6 * This program is free software; you can redistribute it and/or modify it under
7 * the terms of the GNU General Public License version 2 as published by the
8 * Free Software Foundation.
9 *
10 * Core functions needed to support use of the thread binary interface for META
11 * processors
12 */
13
14 .file "tbicore.S"
15/* Get data structures and defines from the TBI C header */
16#include <asm/metag_mem.h>
17#include <asm/metag_regs.h>
18#include <asm/tbx.h>
19
20 .data
21 .balign 8
22 .global ___pTBISegs
23 .type ___pTBISegs,object
24___pTBISegs:
25 .quad 0 /* Segment list pointer with it's */
26 .size ___pTBISegs,.-___pTBISegs
27 /* own id or spin-lock location */
28/*
29 * Return ___pTBISegs value specific to privilege level - not very complicated
30 * at the moment
31 *
32 * Register Usage: D0Re0 is the result, D1Re0 is used as a scratch
33 */
34 .text
35 .balign 4
36 .global ___TBISegList
37 .type ___TBISegList,function
38___TBISegList:
39 MOVT A1LbP,#HI(___pTBISegs)
40 ADD A1LbP,A1LbP,#LO(___pTBISegs)
41 GETL D0Re0,D1Re0,[A1LbP]
42 MOV PC,D1RtP
43 .size ___TBISegList,.-___TBISegList
44
45/*
46 * Search the segment list for a match given Id, pStart can be NULL
47 *
48 * Register Usage: D1Ar1 is pSeg, D0Ar2 is Id, D0Re0 is the result
49 * D0Ar4, D1Ar3 are used as a scratch
50 * NB: The PSTAT bit if Id in D0Ar2 may be toggled
51 */
52 .text
53 .balign 4
54 .global ___TBIFindSeg
55 .type ___TBIFindSeg,function
56___TBIFindSeg:
57 MOVT A1LbP,#HI(___pTBISegs)
58 ADD A1LbP,A1LbP,#LO(___pTBISegs)
59 GETL D1Ar3,D0Ar4,[A1LbP] /* Read segment list head */
60 MOV D0Re0,TXSTATUS /* What priv level are we at? */
61 CMP D1Ar1,#0 /* Is pStart provided? */
62/* Disable privilege adaption for now */
63 ANDT D0Re0,D0Re0,#0 /*HI(TXSTATUS_PSTAT_BIT) ; Is PSTAT set? Zero if not */
64 LSL D0Re0,D0Re0,#(TBID_PSTAT_S-TXSTATUS_PSTAT_S)
65 XOR D0Ar2,D0Ar2,D0Re0 /* Toggle Id PSTAT if privileged */
66 MOVNZ D1Ar3,D1Ar1 /* Use pStart if provided */
67$LFindSegLoop:
68 ADDS D0Re0,D1Ar3,#0 /* End of list? Load result into D0Re0 */
69 MOVZ PC,D1RtP /* If result is NULL we leave */
70 GETL D1Ar3,D0Ar4,[D1Ar3] /* Read pLink and Id */
71 CMP D0Ar4,D0Ar2 /* Does it match? */
72 BNZ $LFindSegLoop /* Loop if there is no match */
73 TST D0Re0,D0Re0 /* Clear zero flag - we found it! */
74 MOV PC,D1RtP /* Return */
75 .size ___TBIFindSeg,.-___TBIFindSeg
76
77/* Useful offsets to encode the lower bits of the lock/unlock addresses */
78#define UON (LINSYSEVENT_WR_ATOMIC_LOCK & 0xFFF8)
79#define UOFF (LINSYSEVENT_WR_ATOMIC_UNLOCK & 0xFFF8)
80
81/*
82 * Perform a whole spin-lock sequence as used by the TBISignal routine
83 *
84 * Register Usage: D1Ar1 is pLock, D0Ar2 is Mask, D0Re0 is the result
85 * (All other usage due to ___TBIPoll - D0Ar6, D1Re0)
86 */
87 .text
88 .balign 4
89 .global ___TBISpin
90 .type ___TBISpin,function
91___TBISpin:
92 SETL [A0StP++],D0FrT,D1RtP /* Save our return address */
93 ORS D0Re0,D0Re0,#1 /* Clear zero flag */
94 MOV D1RtP,PC /* Setup return address to form loop */
95$LSpinLoop:
96 BNZ ___TBIPoll /* Keep repeating if fail to set */
97 GETL D0FrT,D1RtP,[--A0StP] /* Restore return address */
98 MOV PC,D1RtP /* Return */
99 .size ___TBISpin,.-___TBISpin
100
101/*
102 * Perform an attempt to gain access to a spin-lock and set some bits
103 *
104 * Register Usage: D1Ar1 is pLock, D0Ar2 is Mask, D0Re0 is the result
105 * !!On return Zero flag is SET if we are sucessfull!!
106 * A0.3 is used to hold base address of system event region
107 * D1Re0 use to hold TXMASKI while interrupts are off
108 */
109 .text
110 .balign 4
111 .global ___TBIPoll
112 .type ___TBIPoll,function
113___TBIPoll:
114 MOV D1Re0,#0 /* Prepare to disable ints */
115 MOVT A0.3,#HI(LINSYSEVENT_WR_ATOMIC_LOCK)
116 SWAP D1Re0,TXMASKI /* Really stop ints */
117 LOCK2 /* Gain all locks */
118 SET [A0.3+#UON],D1RtP /* Stop shared memory access too */
119 DCACHE [D1Ar1],A0.3 /* Flush Cache line */
120 GETD D0Re0,[D1Ar1] /* Get new state from memory or hit */
121 DCACHE [D1Ar1],A0.3 /* Flush Cache line */
122 GETD D0Re0,[D1Ar1] /* Get current state */
123 TST D0Re0,D0Ar2 /* Are we clear to send? */
124 ORZ D0Re0,D0Re0,D0Ar2 /* Yes: So set bits and */
125 SETDZ [D1Ar1],D0Re0 /* transmit new state */
126 SET [A0.3+#UOFF],D1RtP /* Allow shared memory access */
127 LOCK0 /* Release all locks */
128 MOV TXMASKI,D1Re0 /* Allow ints */
129$LPollEnd:
130 XORNZ D0Re0,D0Re0,D0Re0 /* No: Generate zero result */
131 MOV PC,D1RtP /* Return (NZ indicates failure) */
132 .size ___TBIPoll,.-___TBIPoll
133
134/*
135 * End of tbicore.S
136 */
diff --git a/arch/metag/tbx/tbictx.S b/arch/metag/tbx/tbictx.S
deleted file mode 100644
index 19af983a13ae..000000000000
--- a/arch/metag/tbx/tbictx.S
+++ /dev/null
@@ -1,366 +0,0 @@
1/*
2 * tbictx.S
3 *
4 * Copyright (C) 2001, 2002, 2007, 2012 Imagination Technologies.
5 *
6 * This program is free software; you can redistribute it and/or modify it under
7 * the terms of the GNU General Public License version 2 as published by the
8 * Free Software Foundation.
9 *
10 * Explicit state save and restore routines forming part of the thread binary
11 * interface for META processors
12 */
13
14 .file "tbictx.S"
15#include <asm/metag_regs.h>
16#include <asm/tbx.h>
17
18#ifdef METAC_1_0
19/* Ax.4 is NOT saved in XAX3 */
20#define A0_4
21#else
22/* Ax.4 is saved in XAX4 */
23#define A0_4 A0.4,
24#endif
25
26
27/* Size of the TBICTX structure */
28#define TBICTX_BYTES ((TBICTX_AX_REGS*8)+TBICTX_AX)
29
30/*
31 * TBIRES __TBINestInts( TBIRES State, void *pExt, int NoNestMask )
32 */
33 .text
34 .balign 4
35 .global ___TBINestInts
36 .type ___TBINestInts,function
37___TBINestInts:
38 XOR D0Ar4,D0Ar4,#-1 /* D0Ar4 = ~TrigBit */
39 AND D0Ar4,D0Ar4,#0xFFFF /* D0Ar4 &= 0xFFFF */
40 MOV D0Ar6,TXMASKI /* BGNDHALT currently enabled? */
41 TSTT D0Ar2,#TBICTX_XDX8_BIT+TBICTX_XAXX_BIT+TBICTX_XHL2_BIT+TBICTX_XTDP_BIT+TBICTX_XCBF_BIT
42 AND D0Ar4,D0Ar2,D0Ar4 /* D0Ar4 = Ints to allow */
43 XOR D0Ar2,D0Ar2,D0Ar4 /* Less Ints in TrigMask */
44 BNZ ___TBINestInts2 /* Jump if ctx save required! */
45 TSTT D0Ar2,#TBICTX_CBUF_BIT+TBICTX_CBRP_BIT /* Is catch state dirty? */
46 OR D0Ar4,D0Ar4,D0Ar6 /* Or in TXMASKI BGNDHALT if set */
47 TSTNZ D0Ar4,D0Ar4 /* Yes: AND triggers enabled */
48 MOV D0Re0,D0Ar2 /* Update State argument */
49 MOV D1Re0,D1Ar1 /* with less Ints in TrigMask */
50 MOVZ TXMASKI,D0Ar4 /* Early return: Enable Ints */
51 MOVZ PC,D1RtP /* Early return */
52 .size ___TBINestInts,.-___TBINestInts
53/*
54 * Drop thru into sub-function-
55 */
56 .global ___TBINestInts2
57 .type ___TBINestInts2,function
58___TBINestInts2:
59 MOV D0FrT,A0FrP /* Full entry sequence so we */
60 ADD A0FrP,A0StP,#0 /* can make sub-calls */
61 MSETL [A0StP],D0FrT,D0.5,D0.6 /* and preserve our result */
62 ORT D0Ar2,D0Ar2,#TBICTX_XCBF_BIT /* Add in XCBF save request */
63 MOV D0.5,D0Ar2 /* Save State in DX.5 */
64 MOV D1.5,D1Ar1
65 OR D0.6,D0Ar4,D0Ar6 /* Save TrigMask in D0.6 */
66 MOVT D1RtP,#HI(___TBICtxSave) /* Save catch buffer */
67 CALL D1RtP,#LO(___TBICtxSave)
68 MOV TXMASKI,D0.6 /* Allow Ints */
69 MOV D0Re0,D0.5 /* Return State */
70 MOV D1Re0,D1.5
71 MGETL D0FrT,D0.5,D0.6,[A0FrP] /* Full exit sequence */
72 SUB A0StP,A0FrP,#(8*3)
73 MOV A0FrP,D0FrT
74 MOV PC,D1RtP
75 .size ___TBINestInts2,.-___TBINestInts2
76
77/*
78 * void *__TBICtxSave( TBIRES State, void *pExt )
79 *
80 * D0Ar2 contains TBICTX_*_BIT values that control what
81 * extended data is to be saved beyond the end of D1Ar1.
82 * These bits must be ored into the SaveMask of this structure.
83 *
84 * Virtually all possible scratch registers are used.
85 *
86 * The D1Ar1 parameter is only used as the basis for saving
87 * CBUF state.
88 */
89/*
90 * If TBICTX_XEXT_BIT is specified in State. then State.pCtx->Ext is
91 * utilised to save the base address of the context save area and
92 * the extended states saved. The XEXT flag then indicates that the
93 * original state of the A0.2 and A1.2 registers from TBICTX.Ext.AX2
94 * are stored as the first part of the extended state structure.
95 */
96 .balign 4
97 .global ___TBICtxSave
98 .type ___TBICtxSave,function
99___TBICtxSave:
100 GETD D0Re0,[D1Ar1+#TBICTX_SaveMask-2] /* Get SaveMask */
101 TSTT D0Ar2,#TBICTX_XDX8_BIT+TBICTX_XAXX_BIT+TBICTX_XHL2_BIT+TBICTX_XTDP_BIT+TBICTX_XEXT_BIT
102 /* Just XCBF to save? */
103 MOV A0.2,D1Ar3 /* Save pointer into A0.2 */
104 MOV A1.2,D1RtP /* Free off D0FrT:D1RtP pair */
105 BZ $LCtxSaveCBUF /* Yes: Only XCBF may be saved */
106 TSTT D0Ar2,#TBICTX_XEXT_BIT /* Extended base-state model? */
107 BZ $LCtxSaveXDX8
108 GETL D0Ar6,D1Ar5,[D1Ar1+#TBICTX_Ext_AX2] /* Get A0.2, A1.2 state */
109 MOV D0Ar4,D0Ar2 /* Extract Ctx.SaveFlags value */
110 ANDMT D0Ar4,D0Ar4,#TBICTX_XDX8_BIT+TBICTX_XAXX_BIT+TBICTX_XHL2_BIT+TBICTX_XTDP_BIT+TBICTX_XEXT_BIT
111 SETD [D1Ar1+#TBICTX_Ext_Ctx_pExt],A0.2
112 SETD [D1Ar1+#TBICTX_Ext_Ctx_SaveMask-2],D0Ar4
113 SETL [A0.2++],D0Ar6,D1Ar5 /* Save A0.2, A1.2 state */
114$LCtxSaveXDX8:
115 TSTT D0Ar2,#TBICTX_XDX8_BIT /* Save extended DX regs? */
116 BZ $LCtxSaveXAXX
117/*
118 * Save 8 extra DX registers
119 */
120 MSETL [A0.2],D0.8,D0.9,D0.10,D0.11,D0.12,D0.13,D0.14,D0.15
121$LCtxSaveXAXX:
122 TSTT D0Ar2,#TBICTX_XAXX_BIT /* Save extended AX regs? */
123 SWAP D0Re0,A0.2 /* pDst into D0Re0 */
124 BZ $LCtxSaveXHL2
125/*
126 * Save 4 extra AX registers
127 */
128 MSETL [D0Re0], A0_4 A0.5,A0.6,A0.7 /* Save 8*3 bytes */
129$LCtxSaveXHL2:
130 TSTT D0Ar2,#TBICTX_XHL2_BIT /* Save hardware-loop regs? */
131 SWAP D0Re0,A0.2 /* pDst back into A0.2 */
132 MOV D0Ar6,TXL1START
133 MOV D1Ar5,TXL2START
134 BZ $LCtxSaveXTDP
135/*
136 * Save hardware loop registers
137 */
138 SETL [A0.2++],D0Ar6,D1Ar5 /* Save 8*1 bytes */
139 MOV D0Ar6,TXL1END
140 MOV D1Ar5,TXL2END
141 MOV D0FrT,TXL1COUNT
142 MOV D1RtP,TXL2COUNT
143 MSETL [A0.2],D0Ar6,D0FrT /* Save 8*2 bytes */
144/*
145 * Clear loop counters to disable any current loops
146 */
147 XOR TXL1COUNT,D0FrT,D0FrT
148 XOR TXL2COUNT,D1RtP,D1RtP
149$LCtxSaveXTDP:
150 TSTT D0Ar2,#TBICTX_XTDP_BIT /* Save per-thread DSP regs? */
151 BZ $LCtxSaveCBUF
152/*
153 * Save per-thread DSP registers; ACC.0, PR.0, PI.1-3 (PI.0 is zero)
154 */
155#ifndef CTX_NO_DSP
156D SETL [A0.2++],AC0.0,AC1.0 /* Save ACx.0 lower 32-bits */
157DH SETL [A0.2++],AC0.0,AC1.0 /* Save ACx.0 upper 32-bits */
158D SETL [A0.2++],D0AR.0,D1AR.0 /* Save DSP RAM registers */
159D SETL [A0.2++],D0AR.1,D1AR.1
160D SETL [A0.2++],D0AW.0,D1AW.0
161D SETL [A0.2++],D0AW.1,D1AW.1
162D SETL [A0.2++],D0BR.0,D1BR.0
163D SETL [A0.2++],D0BR.1,D1BR.1
164D SETL [A0.2++],D0BW.0,D1BW.0
165D SETL [A0.2++],D0BW.1,D1BW.1
166D SETL [A0.2++],D0ARI.0,D1ARI.0
167D SETL [A0.2++],D0ARI.1,D1ARI.1
168D SETL [A0.2++],D0AWI.0,D1AWI.0
169D SETL [A0.2++],D0AWI.1,D1AWI.1
170D SETL [A0.2++],D0BRI.0,D1BRI.0
171D SETL [A0.2++],D0BRI.1,D1BRI.1
172D SETL [A0.2++],D0BWI.0,D1BWI.0
173D SETL [A0.2++],D0BWI.1,D1BWI.1
174D SETD [A0.2++],T0
175D SETD [A0.2++],T1
176D SETD [A0.2++],T2
177D SETD [A0.2++],T3
178D SETD [A0.2++],T4
179D SETD [A0.2++],T5
180D SETD [A0.2++],T6
181D SETD [A0.2++],T7
182D SETD [A0.2++],T8
183D SETD [A0.2++],T9
184D SETD [A0.2++],TA
185D SETD [A0.2++],TB
186D SETD [A0.2++],TC
187D SETD [A0.2++],TD
188D SETD [A0.2++],TE
189D SETD [A0.2++],TF
190#else
191 ADD A0.2,A0.2,#(8*18+4*16)
192#endif
193 MOV D0Ar6,TXMRSIZE
194 MOV D1Ar5,TXDRSIZE
195 SETL [A0.2++],D0Ar6,D1Ar5 /* Save 8*1 bytes */
196
197$LCtxSaveCBUF:
198#ifdef TBI_1_3
199 MOV D0Ar4,D0Re0 /* Copy Ctx Flags */
200 ANDT D0Ar4,D0Ar4,#TBICTX_XCBF_BIT /* mask XCBF if already set */
201 XOR D0Ar4,D0Ar4,#-1
202 AND D0Ar2,D0Ar2,D0Ar4 /* remove XCBF if already set */
203#endif
204 TSTT D0Ar2,#TBICTX_XCBF_BIT /* Want to save CBUF? */
205 ANDT D0Ar2,D0Ar2,#TBICTX_XDX8_BIT+TBICTX_XAXX_BIT+TBICTX_XHL2_BIT+TBICTX_XTDP_BIT+TBICTX_XEXT_BIT
206 OR D0Ar2,D0Ar2,D0Re0 /* Generate new SaveMask */
207 SETD [D1Ar1+#TBICTX_SaveMask-2],D0Ar2/* Add in bits saved to TBICTX */
208 MOV D0Re0,A0.2 /* Return end of save area */
209 MOV D0Ar4,TXDIVTIME /* Get TXDIVTIME */
210 MOVZ PC,A1.2 /* No: Early return */
211 TSTT D0Ar2,#TBICTX_CBUF_BIT+TBICTX_CBRP_BIT /* Need to save CBUF? */
212 MOVZ PC,A1.2 /* No: Early return */
213 ORT D0Ar2,D0Ar2,#TBICTX_XCBF_BIT
214 SETD [D1Ar1+#TBICTX_SaveMask-2],D0Ar2/* Add in XCBF bit to TBICTX */
215 ADD A0.2,D1Ar1,#TBICTX_BYTES /* Dump CBUF state after TBICTX */
216/*
217 * Save CBUF
218 */
219 SETD [A0.2+# 0],TXCATCH0 /* Restore TXCATCHn */
220 SETD [A0.2+# 4],TXCATCH1
221 TSTT D0Ar2,#TBICTX_CBRP_BIT /* ... RDDIRTY was/is set */
222 SETD [A0.2+# 8],TXCATCH2
223 SETD [A0.2+#12],TXCATCH3
224 BZ $LCtxSaveComplete
225 SETL [A0.2+#(2*8)],RD /* Save read pipeline */
226 SETL [A0.2+#(3*8)],RD /* Save read pipeline */
227 SETL [A0.2+#(4*8)],RD /* Save read pipeline */
228 SETL [A0.2+#(5*8)],RD /* Save read pipeline */
229 SETL [A0.2+#(6*8)],RD /* Save read pipeline */
230 SETL [A0.2+#(7*8)],RD /* Save read pipeline */
231 AND TXDIVTIME,D0Ar4,#TXDIVTIME_DIV_BITS /* Clear RPDIRTY */
232$LCtxSaveComplete:
233 MOV PC,A1.2 /* Return */
234 .size ___TBICtxSave,.-___TBICtxSave
235
236/*
237 * void *__TBICtxRestore( TBIRES State, void *pExt )
238 *
239 * D0Ar2 contains TBICTX_*_BIT values that control what
240 * extended data is to be recovered from D1Ar3 (pExt).
241 *
242 * Virtually all possible scratch registers are used.
243 */
244/*
245 * If TBICTX_XEXT_BIT is specified in State. Then the saved state of
246 * the orginal A0.2 and A1.2 is restored from pExt and the XEXT
247 * related flags are removed from State.pCtx->SaveMask.
248 *
249 */
250 .balign 4
251 .global ___TBICtxRestore
252 .type ___TBICtxRestore,function
253___TBICtxRestore:
254 GETD D0Ar6,[D1Ar1+#TBICTX_CurrMODE] /* Get TXMODE Value */
255 ANDST D0Ar2,D0Ar2,#TBICTX_XDX8_BIT+TBICTX_XAXX_BIT+TBICTX_XHL2_BIT+TBICTX_XTDP_BIT+TBICTX_XEXT_BIT
256 MOV D1Re0,D0Ar2 /* Keep flags in D1Re0 */
257 MOV D0Re0,D1Ar3 /* D1Ar3 is default result */
258 MOVZ PC,D1RtP /* Early return, nothing to do */
259 ANDT D0Ar6,D0Ar6,#0xE000 /* Top bits of TXMODE required */
260 MOV A0.3,D0Ar6 /* Save TXMODE for later */
261 TSTT D1Re0,#TBICTX_XEXT_BIT /* Check for XEXT bit */
262 BZ $LCtxRestXDX8
263 GETD D0Ar4,[D1Ar1+#TBICTX_SaveMask-2]/* Get current SaveMask */
264 GETL D0Ar6,D1Ar5,[D0Re0++] /* Restore A0.2, A1.2 state */
265 ANDMT D0Ar4,D0Ar4,#(0xFFFF-(TBICTX_XDX8_BIT+TBICTX_XAXX_BIT+TBICTX_XHL2_BIT+TBICTX_XTDP_BIT+TBICTX_XEXT_BIT))
266 SETD [D1Ar1+#TBICTX_SaveMask-2],D0Ar4/* New SaveMask */
267#ifdef METAC_1_0
268 SETD [D1Ar1+#TBICTX_Ext_AX2_U0],D0Ar6
269 MOV D0Ar6,D1Ar1
270 SETD [D0Ar6+#TBICTX_Ext_AX2_U1],D1Ar5
271#else
272 SETL [D1Ar1+#TBICTX_Ext_AX2],D0Ar6,D1Ar5
273#endif
274$LCtxRestXDX8:
275 TSTT D1Re0,#TBICTX_XDX8_BIT /* Get extended DX regs? */
276 MOV A1.2,D1RtP /* Free off D1RtP register */
277 BZ $LCtxRestXAXX
278/*
279 * Restore 8 extra DX registers
280 */
281 MGETL D0.8,D0.9,D0.10,D0.11,D0.12,D0.13,D0.14,D0.15,[D0Re0]
282$LCtxRestXAXX:
283 TSTT D1Re0,#TBICTX_XAXX_BIT /* Get extended AX regs? */
284 BZ $LCtxRestXHL2
285/*
286 * Restore 3 extra AX registers
287 */
288 MGETL A0_4 A0.5,A0.6,A0.7,[D0Re0] /* Get 8*3 bytes */
289$LCtxRestXHL2:
290 TSTT D1Re0,#TBICTX_XHL2_BIT /* Get hardware-loop regs? */
291 BZ $LCtxRestXTDP
292/*
293 * Get hardware loop registers
294 */
295 MGETL D0Ar6,D0Ar4,D0Ar2,[D0Re0] /* Get 8*3 bytes */
296 MOV TXL1START,D0Ar6
297 MOV TXL2START,D1Ar5
298 MOV TXL1END,D0Ar4
299 MOV TXL2END,D1Ar3
300 MOV TXL1COUNT,D0Ar2
301 MOV TXL2COUNT,D1Ar1
302$LCtxRestXTDP:
303 TSTT D1Re0,#TBICTX_XTDP_BIT /* Get per-thread DSP regs? */
304 MOVZ PC,A1.2 /* No: Early return */
305/*
306 * Get per-thread DSP registers; ACC.0, PR.0, PI.1-3 (PI.0 is zero)
307 */
308 MOV A0.2,D0Re0
309 GETL D0Ar6,D1Ar5,[D0Re0++#((16*4)+(18*8))]
310#ifndef CTX_NO_DSP
311D GETL AC0.0,AC1.0,[A0.2++] /* Restore ACx.0 lower 32-bits */
312DH GETL AC0.0,AC1.0,[A0.2++] /* Restore ACx.0 upper 32-bits */
313#else
314 ADD A0.2,A0.2,#(2*8)
315#endif
316 ADD D0Re0,D0Re0,#(2*4)
317 MOV TXMODE,A0.3 /* Some TXMODE bits needed */
318 MOV TXMRSIZE,D0Ar6
319 MOV TXDRSIZE,D1Ar5
320#ifndef CTX_NO_DSP
321D GETL D0AR.0,D1AR.0,[A0.2++] /* Restore DSP RAM registers */
322D GETL D0AR.1,D1AR.1,[A0.2++]
323D GETL D0AW.0,D1AW.0,[A0.2++]
324D GETL D0AW.1,D1AW.1,[A0.2++]
325D GETL D0BR.0,D1BR.0,[A0.2++]
326D GETL D0BR.1,D1BR.1,[A0.2++]
327D GETL D0BW.0,D1BW.0,[A0.2++]
328D GETL D0BW.1,D1BW.1,[A0.2++]
329#else
330 ADD A0.2,A0.2,#(8*8)
331#endif
332 MOV TXMODE,#0 /* Restore TXMODE */
333#ifndef CTX_NO_DSP
334D GETL D0ARI.0,D1ARI.0,[A0.2++]
335D GETL D0ARI.1,D1ARI.1,[A0.2++]
336D GETL D0AWI.0,D1AWI.0,[A0.2++]
337D GETL D0AWI.1,D1AWI.1,[A0.2++]
338D GETL D0BRI.0,D1BRI.0,[A0.2++]
339D GETL D0BRI.1,D1BRI.1,[A0.2++]
340D GETL D0BWI.0,D1BWI.0,[A0.2++]
341D GETL D0BWI.1,D1BWI.1,[A0.2++]
342D GETD T0,[A0.2++]
343D GETD T1,[A0.2++]
344D GETD T2,[A0.2++]
345D GETD T3,[A0.2++]
346D GETD T4,[A0.2++]
347D GETD T5,[A0.2++]
348D GETD T6,[A0.2++]
349D GETD T7,[A0.2++]
350D GETD T8,[A0.2++]
351D GETD T9,[A0.2++]
352D GETD TA,[A0.2++]
353D GETD TB,[A0.2++]
354D GETD TC,[A0.2++]
355D GETD TD,[A0.2++]
356D GETD TE,[A0.2++]
357D GETD TF,[A0.2++]
358#else
359 ADD A0.2,A0.2,#(8*8+4*16)
360#endif
361 MOV PC,A1.2 /* Return */
362 .size ___TBICtxRestore,.-___TBICtxRestore
363
364/*
365 * End of tbictx.S
366 */
diff --git a/arch/metag/tbx/tbictxfpu.S b/arch/metag/tbx/tbictxfpu.S
deleted file mode 100644
index e773bea3e7bd..000000000000
--- a/arch/metag/tbx/tbictxfpu.S
+++ /dev/null
@@ -1,190 +0,0 @@
1/*
2 * tbictxfpu.S
3 *
4 * Copyright (C) 2009, 2012 Imagination Technologies.
5 *
6 * This program is free software; you can redistribute it and/or modify it under
7 * the terms of the GNU General Public License version 2 as published by the
8 * Free Software Foundation.
9 *
10 * Explicit state save and restore routines forming part of the thread binary
11 * interface for META processors
12 */
13
14 .file "tbifpuctx.S"
15
16#include <asm/metag_regs.h>
17#include <asm/tbx.h>
18
19#ifdef TBI_1_4
20/*
21 * void *__TBICtxFPUSave( TBIRES State, void *pExt )
22 *
23 * D0Ar2 contains TBICTX_*_BIT values that control what
24 * extended data is to be saved.
25 * These bits must be ored into the SaveMask of this structure.
26 *
27 * Virtually all possible scratch registers are used.
28 */
29 .text
30 .balign 4
31 .global ___TBICtxFPUSave
32 .type ___TBICtxFPUSave,function
33___TBICtxFPUSave:
34
35 /* D1Ar1:D0Ar2 - State
36 * D1Ar3 - pExt
37 * D0Ar4 - Value of METAC_CORE_ID
38 * D1Ar5 - Scratch
39 * D0Ar6 - Scratch
40 */
41
42 /* If the FPAC bit isnt set then there is nothing to do */
43 TSTT D0Ar2,#TBICTX_FPAC_BIT
44 MOVZ PC, D1RtP
45
46 /* Obtain the Core config */
47 MOVT D0Ar4, #HI(METAC_CORE_ID)
48 ADD D0Ar4, D0Ar4, #LO(METAC_CORE_ID)
49 GETD D0Ar4, [D0Ar4]
50
51 /* Detect FX.8 - FX.15 and add to core config */
52 MOV D0Ar6, TXENABLE
53 AND D0Ar6, D0Ar6, #(TXENABLE_CLASSALT_FPUR8 << TXENABLE_CLASS_S)
54 AND D0Ar4, D0Ar4, #LO(0x0000FFFF)
55 ORT D0Ar4, D0Ar4, #HI(TBICTX_CFGFPU_FX16_BIT)
56 XOR D0Ar4, D0Ar4, D0Ar6
57
58 /* Save the relevant bits to the buffer */
59 SETD [D1Ar3++], D0Ar4
60
61 /* Save the relevant bits of TXDEFR (Assumes TXDEFR is coherent) ... */
62 MOV D0Ar6, TXDEFR
63 LSR D0Re0, D0Ar6, #8
64 AND D0Re0, D0Re0, #LO(TXDEFR_FPE_FE_BITS>>8)
65 AND D0Ar6, D0Ar6, #LO(TXDEFR_FPE_ICTRL_BITS)
66 OR D0Re0, D0Re0, D0Ar6
67
68 /* ... along with relevant bits of TXMODE to buffer */
69 MOV D0Ar6, TXMODE
70 ANDT D0Ar6, D0Ar6, #HI(TXMODE_FPURMODE_BITS)
71 ORT D0Ar6, D0Ar6, #HI(TXMODE_FPURMODEWRITE_BIT)
72 OR D0Ar6, D0Ar6, D0Re0
73 SETD [D1Ar3++], D0Ar6
74
75 GETD D0Ar6,[D1Ar1+#TBICTX_SaveMask-2] /* Get the current SaveMask */
76 /* D0Ar6 - pCtx->SaveMask */
77
78 TSTT D0Ar4, #HI(TBICTX_CFGFPU_FX16_BIT) /* Perform test here for extended FPU registers
79 * to avoid stalls
80 */
81 /* Save the standard FPU registers */
82F MSETL [D1Ar3++], FX.0, FX.2, FX.4, FX.6
83
84 /* Save the extended FPU registers if they are present */
85 BZ $Lskip_save_fx8_fx16
86F MSETL [D1Ar3++], FX.8, FX.10, FX.12, FX.14
87$Lskip_save_fx8_fx16:
88
89 /* Save the FPU Accumulator if it is present */
90 TST D0Ar4, #METAC_COREID_NOFPACC_BIT
91 BNZ $Lskip_save_fpacc
92F SETL [D1Ar3++], ACF.0
93F SETL [D1Ar3++], ACF.1
94F SETL [D1Ar3++], ACF.2
95$Lskip_save_fpacc:
96
97 /* Update pCtx->SaveMask */
98 ANDT D0Ar2, D0Ar2, #TBICTX_FPAC_BIT
99 OR D0Ar6, D0Ar6, D0Ar2
100 SETD [D1Ar1+#TBICTX_SaveMask-2],D0Ar6/* Add in XCBF bit to TBICTX */
101
102 MOV D0Re0, D1Ar3 /* Return end of save area */
103 MOV PC, D1RtP
104
105 .size ___TBICtxFPUSave,.-___TBICtxFPUSave
106
107/*
108 * void *__TBICtxFPURestore( TBIRES State, void *pExt )
109 *
110 * D0Ar2 contains TBICTX_*_BIT values that control what
111 * extended data is to be recovered from D1Ar3 (pExt).
112 *
113 * Virtually all possible scratch registers are used.
114 */
115/*
116 * If TBICTX_XEXT_BIT is specified in State. Then the saved state of
117 * the orginal A0.2 and A1.2 is restored from pExt and the XEXT
118 * related flags are removed from State.pCtx->SaveMask.
119 *
120 */
121 .balign 4
122 .global ___TBICtxFPURestore
123 .type ___TBICtxFPURestore,function
124___TBICtxFPURestore:
125
126 /* D1Ar1:D0Ar2 - State
127 * D1Ar3 - pExt
128 * D0Ar4 - Value of METAC_CORE_ID
129 * D1Ar5 - Scratch
130 * D0Ar6 - Scratch
131 * D1Re0 - Scratch
132 */
133
134 /* If the FPAC bit isnt set then there is nothing to do */
135 TSTT D0Ar2,#TBICTX_FPAC_BIT
136 MOVZ PC, D1RtP
137
138 /* Obtain the relevant bits of the Core config */
139 GETD D0Ar4, [D1Ar3++]
140
141 /* Restore FPU related parts of TXDEFR. Assumes TXDEFR is coherent */
142 GETD D1Ar5, [D1Ar3++]
143 MOV D0Ar6, D1Ar5
144 LSL D1Re0, D1Ar5, #8
145 ANDT D1Re0, D1Re0, #HI(TXDEFR_FPE_FE_BITS|TXDEFR_FPE_ICTRL_BITS)
146 AND D1Ar5, D1Ar5, #LO(TXDEFR_FPE_FE_BITS|TXDEFR_FPE_ICTRL_BITS)
147 OR D1Re0, D1Re0, D1Ar5
148
149 MOV D1Ar5, TXDEFR
150 ANDMT D1Ar5, D1Ar5, #HI(~(TXDEFR_FPE_FE_BITS|TXDEFR_FPE_ICTRL_BITS))
151 ANDMB D1Ar5, D1Ar5, #LO(~(TXDEFR_FPE_FE_BITS|TXDEFR_FPE_ICTRL_BITS))
152 OR D1Re0, D1Re0, D1Ar5
153 MOV TXDEFR, D1Re0
154
155 /* Restore relevant bits of TXMODE */
156 MOV D1Ar5, TXMODE
157 ANDMT D1Ar5, D1Ar5, #HI(~TXMODE_FPURMODE_BITS)
158 ANDT D0Ar6, D0Ar6, #HI(TXMODE_FPURMODE_BITS|TXMODE_FPURMODEWRITE_BIT)
159 OR D0Ar6, D0Ar6, D1Ar5
160 MOV TXMODE, D0Ar6
161
162 TSTT D0Ar4, #HI(TBICTX_CFGFPU_FX16_BIT) /* Perform test here for extended FPU registers
163 * to avoid stalls
164 */
165 /* Save the standard FPU registers */
166F MGETL FX.0, FX.2, FX.4, FX.6, [D1Ar3++]
167
168 /* Save the extended FPU registers if they are present */
169 BZ $Lskip_restore_fx8_fx16
170F MGETL FX.8, FX.10, FX.12, FX.14, [D1Ar3++]
171$Lskip_restore_fx8_fx16:
172
173 /* Save the FPU Accumulator if it is present */
174 TST D0Ar4, #METAC_COREID_NOFPACC_BIT
175 BNZ $Lskip_restore_fpacc
176F GETL ACF.0, [D1Ar3++]
177F GETL ACF.1, [D1Ar3++]
178F GETL ACF.2, [D1Ar3++]
179$Lskip_restore_fpacc:
180
181 MOV D0Re0, D1Ar3 /* Return end of save area */
182 MOV PC, D1RtP
183
184 .size ___TBICtxFPURestore,.-___TBICtxFPURestore
185
186#endif /* TBI_1_4 */
187
188/*
189 * End of tbictx.S
190 */
diff --git a/arch/metag/tbx/tbidefr.S b/arch/metag/tbx/tbidefr.S
deleted file mode 100644
index 8f0902b22f70..000000000000
--- a/arch/metag/tbx/tbidefr.S
+++ /dev/null
@@ -1,175 +0,0 @@
1/*
2 * tbidefr.S
3 *
4 * Copyright (C) 2009, 2012 Imagination Technologies.
5 *
6 * This program is free software; you can redistribute it and/or modify it under
7 * the terms of the GNU General Public License version 2 as published by the
8 * Free Software Foundation.
9 *
10 * Routing deferred exceptions
11 */
12
13#include <asm/metag_regs.h>
14#include <asm/tbx.h>
15
16 .text
17 .balign 4
18 .global ___TBIHandleDFR
19 .type ___TBIHandleDFR,function
20/* D1Ar1:D0Ar2 -- State
21 * D0Ar3 -- SigNum
22 * D0Ar4 -- Triggers
23 * D1Ar5 -- Inst
24 * D0Ar6 -- pTBI (volatile)
25 */
26___TBIHandleDFR:
27#ifdef META_BUG_MBN100212
28 MSETL [A0StP++], D0FrT, D0.5
29
30 /* D1Ar1,D0Ar2,D1Ar5,D0Ar6 -- Arguments to handler, must be preserved
31 * D0Ar4 -- The deferred exceptions
32 * D1Ar3 -- As per D0Ar4 but just the trigger bits
33 * D0.5 -- The bgnd deferred exceptions
34 * D1.5 -- TXDEFR with bgnd re-added
35 */
36
37 /* - Collect the pending deferred exceptions using TXSTAT,
38 * (ack's the bgnd exceptions as a side-effect)
39 * - Manually collect remaining (interrupt) deferred exceptions
40 * using TXDEFR
41 * - Replace the triggers (from TXSTATI) with the int deferred
42 * exceptions DEFR ..., TXSTATI would have returned if it was valid
43 * from bgnd code
44 * - Reconstruct TXDEFR by or'ing bgnd deferred exceptions (except
45 * the DEFER bit) and the int deferred exceptions. This will be
46 * restored later
47 */
48 DEFR D0.5, TXSTAT
49 MOV D1.5, TXDEFR
50 ANDT D0.5, D0.5, #HI(0xFFFF0000)
51 MOV D1Ar3, D1.5
52 ANDT D1Ar3, D1Ar3, #HI(0xFFFF0000)
53 OR D0Ar4, D1Ar3, #TXSTAT_DEFER_BIT
54 OR D1.5, D1.5, D0.5
55
56 /* Mask off anything unrelated to the deferred exception triggers */
57 ANDT D1Ar3, D1Ar3, #HI(TXSTAT_BUSERR_BIT | TXSTAT_FPE_BITS)
58
59 /* Can assume that at least one exception happened since this
60 * handler wouldnt have been called otherwise.
61 *
62 * Replace the signal number and at the same time, prepare
63 * the mask to acknowledge the exception
64 *
65 * D1Re0 -- The bits to acknowledge
66 * D1Ar3 -- The signal number
67 * D1RtP -- Scratch to deal with non-conditional insns
68 */
69 MOVT D1Re0, #HI(TXSTAT_FPE_BITS & ~TXSTAT_FPE_DENORMAL_BIT)
70 MOV D1RtP, #TXSTAT_FPE_INVALID_S
71 FFB D1Ar3, D1Ar3
72 CMP D1Ar3, #TXSTAT_FPE_INVALID_S
73 MOVLE D1Ar3, D1RtP /* Collapse FPE triggers to a single signal */
74 MOV D1RtP, #1
75 LSLGT D1Re0, D1RtP, D1Ar3
76
77 /* Get the handler using the signal number
78 *
79 * D1Ar3 -- The signal number
80 * D0Re0 -- Offset into TBI struct containing handler address
81 * D1Re0 -- Mask of triggers to keep
82 * D1RtP -- Address of handler
83 */
84 SUB D1Ar3, D1Ar3, #(TXSTAT_FPE_INVALID_S - TBID_SIGNUM_FPE)
85 LSL D0Re0, D1Ar3, #2
86 XOR D1Re0, D1Re0, #-1 /* Prepare mask for acknowledge (avoids stall) */
87 ADD D0Re0,D0Re0,#TBI_fnSigs
88 GETD D1RtP, [D0Ar6+D0Re0]
89
90 /* Acknowledge triggers */
91 AND D1.5, D1.5, D1Re0
92
93 /* Restore remaining exceptions
94 * Do this here in case the handler enables nested interrupts
95 *
96 * D1.5 -- TXDEFR with this exception ack'd
97 */
98 MOV TXDEFR, D1.5
99
100 /* Call the handler */
101 SWAP D1RtP, PC
102
103 GETL D0.5, D1.5, [--A0StP]
104 GETL D0FrT, D1RtP, [--A0StP]
105 MOV PC,D1RtP
106#else /* META_BUG_MBN100212 */
107
108 /* D1Ar1,D0Ar2,D1Ar5,D0Ar6 -- Arguments to handler, must be preserved
109 * D0Ar4 -- The deferred exceptions
110 * D1Ar3 -- As per D0Ar4 but just the trigger bits
111 */
112
113 /* - Collect the pending deferred exceptions using TXSTAT,
114 * (ack's the interrupt exceptions as a side-effect)
115 */
116 DEFR D0Ar4, TXSTATI
117
118 /* Mask off anything unrelated to the deferred exception triggers */
119 MOV D1Ar3, D0Ar4
120 ANDT D1Ar3, D1Ar3, #HI(TXSTAT_BUSERR_BIT | TXSTAT_FPE_BITS)
121
122 /* Can assume that at least one exception happened since this
123 * handler wouldnt have been called otherwise.
124 *
125 * Replace the signal number and at the same time, prepare
126 * the mask to acknowledge the exception
127 *
128 * The unusual code for 1<<D1Ar3 may need explanation.
129 * Normally this would be done using 'MOV rs,#1' and 'LSL rd,rs,D1Ar3'
130 * but only D1Re0 is available in D1 and no crossunit insns are available
131 * Even worse, there is no conditional 'MOV r,#uimm8'.
132 * Since the CMP proves that D1Ar3 >= 20, we can reuse the bottom 12-bits
133 * of D1Re0 (using 'ORGT r,#1') in the knowledge that the top 20-bits will
134 * be discarded without affecting the result.
135 *
136 * D1Re0 -- The bits to acknowledge
137 * D1Ar3 -- The signal number
138 */
139 MOVT D1Re0, #HI(TXSTAT_FPE_BITS & ~TXSTAT_FPE_DENORMAL_BIT)
140 MOV D0Re0, #TXSTAT_FPE_INVALID_S
141 FFB D1Ar3, D1Ar3
142 CMP D1Ar3, #TXSTAT_FPE_INVALID_S
143 MOVLE D1Ar3, D0Re0 /* Collapse FPE triggers to a single signal */
144 ORGT D1Re0, D1Re0, #1
145 LSLGT D1Re0, D1Re0, D1Ar3
146
147 SUB D1Ar3, D1Ar3, #(TXSTAT_FPE_INVALID_S - TBID_SIGNUM_FPE)
148
149 /* Acknowledge triggers and restore remaining exceptions
150 * Do this here in case the handler enables nested interrupts
151 *
152 * (x | y) ^ y == x & ~y. It avoids the restrictive XOR ...,#-1 insn
153 * and is the same length
154 */
155 MOV D0Re0, TXDEFR
156 OR D0Re0, D0Re0, D1Re0
157 XOR TXDEFR, D0Re0, D1Re0
158
159 /* Get the handler using the signal number
160 *
161 * D1Ar3 -- The signal number
162 * D0Re0 -- Address of handler
163 */
164 LSL D0Re0, D1Ar3, #2
165 ADD D0Re0,D0Re0,#TBI_fnSigs
166 GETD D0Re0, [D0Ar6+D0Re0]
167
168 /* Tailcall the handler */
169 MOV PC,D0Re0
170
171#endif /* META_BUG_MBN100212 */
172 .size ___TBIHandleDFR,.-___TBIHandleDFR
173/*
174 * End of tbidefr.S
175 */
diff --git a/arch/metag/tbx/tbidspram.S b/arch/metag/tbx/tbidspram.S
deleted file mode 100644
index 2f27c0372212..000000000000
--- a/arch/metag/tbx/tbidspram.S
+++ /dev/null
@@ -1,161 +0,0 @@
1/*
2 * tbidspram.S
3 *
4 * Copyright (C) 2009, 2012 Imagination Technologies.
5 *
6 * This program is free software; you can redistribute it and/or modify it under
7 * the terms of the GNU General Public License version 2 as published by the
8 * Free Software Foundation.
9 *
10 * Explicit state save and restore routines forming part of the thread binary
11 * interface for META processors
12 */
13
14 .file "tbidspram.S"
15
16/* These aren't generally useful to a user so for now, they arent publically available */
17#define _TBIECH_DSPRAM_DUA_S 8
18#define _TBIECH_DSPRAM_DUA_BITS 0x7f00
19#define _TBIECH_DSPRAM_DUB_S 0
20#define _TBIECH_DSPRAM_DUB_BITS 0x007f
21
22/*
23 * void *__TBIDspramSaveA( short DspramSizes, void *pExt )
24 */
25 .text
26 .balign 4
27 .global ___TBIDspramSaveA
28 .type ___TBIDspramSaveA,function
29___TBIDspramSaveA:
30
31 SETL [A0StP++], D0.5, D1.5
32 MOV A0.3, D0Ar2
33
34 /* D1Ar1 - Dspram Sizes
35 * A0.4 - Pointer to buffer
36 */
37
38 /* Save the specified amount of dspram DUA */
39DL MOV D0AR.0, #0
40 LSR D1Ar1, D1Ar1, #_TBIECH_DSPRAM_DUA_S
41 AND D1Ar1, D1Ar1, #(_TBIECH_DSPRAM_DUA_BITS >> _TBIECH_DSPRAM_DUA_S)
42 SUB TXRPT, D1Ar1, #1
43$L1:
44DL MOV D0Re0, [D0AR.0++]
45DL MOV D0Ar6, [D0AR.0++]
46DL MOV D0Ar4, [D0AR.0++]
47DL MOV D0.5, [D0AR.0++]
48 MSETL [A0.3++], D0Re0, D0Ar6, D0Ar4, D0.5
49
50 BR $L1
51
52 GETL D0.5, D1.5, [--A0StP]
53 MOV PC, D1RtP
54
55 .size ___TBIDspramSaveA,.-___TBIDspramSaveA
56
57/*
58 * void *__TBIDspramSaveB( short DspramSizes, void *pExt )
59 */
60 .balign 4
61 .global ___TBIDspramSaveB
62 .type ___TBIDspramSaveB,function
63___TBIDspramSaveB:
64
65 SETL [A0StP++], D0.5, D1.5
66 MOV A0.3, D0Ar2
67
68 /* D1Ar1 - Dspram Sizes
69 * A0.3 - Pointer to buffer
70 */
71
72 /* Save the specified amount of dspram DUA */
73DL MOV D0BR.0, #0
74 LSR D1Ar1, D1Ar1, #_TBIECH_DSPRAM_DUB_S
75 AND D1Ar1, D1Ar1, #(_TBIECH_DSPRAM_DUB_BITS >> _TBIECH_DSPRAM_DUB_S)
76 SUB TXRPT, D1Ar1, #1
77$L2:
78DL MOV D0Re0, [D0BR.0++]
79DL MOV D0Ar6, [D0BR.0++]
80DL MOV D0Ar4, [D0BR.0++]
81DL MOV D0.5, [D0BR.0++]
82 MSETL [A0.3++], D0Re0, D0Ar6, D0Ar4, D0.5
83
84 BR $L2
85
86 GETL D0.5, D1.5, [--A0StP]
87 MOV PC, D1RtP
88
89 .size ___TBIDspramSaveB,.-___TBIDspramSaveB
90
91/*
92 * void *__TBIDspramRestoreA( short DspramSizes, void *pExt )
93 */
94 .balign 4
95 .global ___TBIDspramRestoreA
96 .type ___TBIDspramRestoreA,function
97___TBIDspramRestoreA:
98
99 SETL [A0StP++], D0.5, D1.5
100 MOV A0.3, D0Ar2
101
102 /* D1Ar1 - Dspram Sizes
103 * A0.3 - Pointer to buffer
104 */
105
106 /* Restore the specified amount of dspram DUA */
107DL MOV D0AW.0, #0
108 LSR D1Ar1, D1Ar1, #_TBIECH_DSPRAM_DUA_S
109 AND D1Ar1, D1Ar1, #(_TBIECH_DSPRAM_DUA_BITS >> _TBIECH_DSPRAM_DUA_S)
110 SUB TXRPT, D1Ar1, #1
111$L3:
112 MGETL D0Re0, D0Ar6, D0Ar4, D0.5, [A0.3++]
113DL MOV [D0AW.0++], D0Re0
114DL MOV [D0AW.0++], D0Ar6
115DL MOV [D0AW.0++], D0Ar4
116DL MOV [D0AW.0++], D0.5
117
118 BR $L3
119
120 GETL D0.5, D1.5, [--A0StP]
121 MOV PC, D1RtP
122
123 .size ___TBIDspramRestoreA,.-___TBIDspramRestoreA
124
125/*
126 * void *__TBIDspramRestoreB( short DspramSizes, void *pExt )
127 */
128 .balign 4
129 .global ___TBIDspramRestoreB
130 .type ___TBIDspramRestoreB,function
131___TBIDspramRestoreB:
132
133 SETL [A0StP++], D0.5, D1.5
134 MOV A0.3, D0Ar2
135
136 /* D1Ar1 - Dspram Sizes
137 * A0.3 - Pointer to buffer
138 */
139
140 /* Restore the specified amount of dspram DUA */
141DL MOV D0BW.0, #0
142 LSR D1Ar1, D1Ar1, #_TBIECH_DSPRAM_DUB_S
143 AND D1Ar1, D1Ar1, #(_TBIECH_DSPRAM_DUB_BITS >> _TBIECH_DSPRAM_DUB_S)
144 SUB TXRPT, D1Ar1, #1
145$L4:
146 MGETL D0Re0, D0Ar6, D0Ar4, D0.5, [A0.3++]
147DL MOV [D0BW.0++], D0Re0
148DL MOV [D0BW.0++], D0Ar6
149DL MOV [D0BW.0++], D0Ar4
150DL MOV [D0BW.0++], D0.5
151
152 BR $L4
153
154 GETL D0.5, D1.5, [--A0StP]
155 MOV PC, D1RtP
156
157 .size ___TBIDspramRestoreB,.-___TBIDspramRestoreB
158
159/*
160 * End of tbidspram.S
161 */
diff --git a/arch/metag/tbx/tbilogf.S b/arch/metag/tbx/tbilogf.S
deleted file mode 100644
index 4a34d80657db..000000000000
--- a/arch/metag/tbx/tbilogf.S
+++ /dev/null
@@ -1,48 +0,0 @@
1/*
2 * tbilogf.S
3 *
4 * Copyright (C) 2001, 2002, 2007, 2012 Imagination Technologies.
5 *
6 * This program is free software; you can redistribute it and/or modify it under
7 * the terms of the GNU General Public License version 2 as published by the
8 * Free Software Foundation.
9 *
10 * Defines __TBILogF trap code for debugging messages and __TBICont for debug
11 * assert to be implemented on.
12 */
13
14 .file "tbilogf.S"
15
16/*
17 * Perform console printf using external debugger or host support
18 */
19 .text
20 .balign 4
21 .global ___TBILogF
22 .type ___TBILogF,function
23___TBILogF:
24 MSETL [A0StP],D0Ar6,D0Ar4,D0Ar2
25 SWITCH #0xC10020
26 MOV D0Re0,#0
27 SUB A0StP,A0StP,#24
28 MOV PC,D1RtP
29 .size ___TBILogF,.-___TBILogF
30
31/*
32 * Perform wait for continue under control of the debugger
33 */
34 .text
35 .balign 4
36 .global ___TBICont
37 .type ___TBICont,function
38___TBICont:
39 MOV D0Ar6,#1
40 MSETL [A0StP],D0Ar6,D0Ar4,D0Ar2
41 SWITCH #0xC30006 /* Returns if we are to continue */
42 SUB A0StP,A0StP,#(8*3)
43 MOV PC,D1RtP /* Return */
44 .size ___TBICont,.-___TBICont
45
46/*
47 * End of tbilogf.S
48 */
diff --git a/arch/metag/tbx/tbipcx.S b/arch/metag/tbx/tbipcx.S
deleted file mode 100644
index 163c79ac913b..000000000000
--- a/arch/metag/tbx/tbipcx.S
+++ /dev/null
@@ -1,451 +0,0 @@
1/*
2 * tbipcx.S
3 *
4 * Copyright (C) 2001, 2002, 2007, 2009, 2012 Imagination Technologies.
5 *
6 * This program is free software; you can redistribute it and/or modify it under
7 * the terms of the GNU General Public License version 2 as published by the
8 * Free Software Foundation.
9 *
10 * Asyncronous trigger handling including exceptions
11 */
12
13 .file "tbipcx.S"
14#include <asm/metag_regs.h>
15#include <asm/tbx.h>
16
17/* BEGIN HACK */
18/* define these for now while doing initial conversion to GAS
19 will fix properly later */
20
21/* Signal identifiers always have the TBID_SIGNAL_BIT set and contain the
22 following related bit-fields */
23#define TBID_SIGNUM_S 2
24
25/* END HACK */
26
27#ifdef METAC_1_0
28/* Ax.4 is saved in TBICTX */
29#define A0_4 ,A0.4
30#else
31/* Ax.4 is NOT saved in TBICTX */
32#define A0_4
33#endif
34
35/* Size of the TBICTX structure */
36#define TBICTX_BYTES ((TBICTX_AX_REGS*8)+TBICTX_AX)
37
38#ifdef METAC_1_1
39#ifndef BOOTROM
40#ifndef SPECIAL_BUILD
41/* Jump straight into the boot ROM version of this code */
42#define CODE_USES_BOOTROM
43#endif
44#endif
45#endif
46
47/* Define space needed for CATCH buffer state in traditional units */
48#define CATCH_ENTRIES 5
49#define CATCH_ENTRY_BYTES 16
50
51#ifndef CODE_USES_BOOTROM
52#define A0GblIStP A0.15 /* PTBICTX for current thread in PRIV system */
53#define A1GblIGbP A1.15 /* Interrupt A1GbP value in PRIV system */
54#endif
55
56/*
57 * TBIRES __TBIASyncTrigger( TBIRES State )
58 */
59 .text
60 .balign 4
61 .global ___TBIASyncTrigger
62 .type ___TBIASyncTrigger,function
63___TBIASyncTrigger:
64#ifdef CODE_USES_BOOTROM
65 MOVT D0Re0,#HI(LINCORE_BASE)
66 JUMP D0Re0,#0xA0
67#else
68 MOV D0FrT,A0FrP /* Boing entry sequence */
69 ADD A0FrP,A0StP,#0
70 SETL [A0StP++],D0FrT,D1RtP
71 MOV D0Re0,PCX /* Check for repeat call */
72 MOVT D0FrT,#HI(___TBIBoingRTI+4)
73 ADD D0FrT,D0FrT,#LO(___TBIBoingRTI+4)
74 CMP D0Re0,D0FrT
75 BEQ ___TBIBoingExit /* Already set up - come out */
76 ADD D1Ar1,D1Ar1,#7 /* PRIV system stack here */
77 MOV A0.2,A0StP /* else push context here */
78 MOVS D0Re0,D0Ar2 /* Return in user mode? */
79 ANDMB D1Ar1,D1Ar1,#0xfff8 /* align priv stack to 64-bit */
80 MOV D1Re0,D1Ar1 /* and set result to arg */
81 MOVMI A0.2,D1Ar1 /* use priv stack if PRIV set */
82/*
83 * Generate an initial TBICTX to return to our own current call context
84 */
85 MOVT D1Ar5,#HI(___TBIBoingExit) /* Go here to return */
86 ADD D1Ar5,D1Ar5,#LO(___TBIBoingExit)
87 ADD A0.3,A0.2,#TBICTX_DX /* DX Save area */
88 ANDT D0Ar2,D0Ar2,#TBICTX_PRIV_BIT /* Extract PRIV bit */
89 MOVT D0Ar6,#TBICTX_SOFT_BIT /* Only soft thread state */
90 ADD D0Ar6,D0Ar6,D0Ar2 /* Add in PRIV bit if requested */
91 SETL [A0.2],D0Ar6,D1Ar5 /* Push header fields */
92 ADD D0FrT,A0.2,#TBICTX_AX /* Address AX save area */
93 MSETL [A0.3],D0Re0,D0Ar6,D0Ar4,D0Ar2,D0FrT,D0.5,D0.6,D0.7
94 MOV D0Ar6,#0
95 MOV D1Ar5,#0
96 SETL [A0.3++],D0Ar6,D1Ar5 /* Zero CT register states */
97 SETL [A0.3++],D0Ar6,D1Ar5
98 MSETL [D0FrT],A0StP,A0FrP,A0.2,A0.3 A0_4 /* Save AX regs */
99 MOV A0FrP,A0.2 /* Restore me! */
100 B ___TBIResume
101 .size ___TBIASyncTrigger,.-___TBIASyncTrigger
102
103/*
104 * Optimised return to handler for META Core
105 */
106___TBIBoingRTH:
107 RTH /* Go to background level */
108 MOVT A0.2, #HI($Lpcx_target)
109 ADD A0.2,A0.2,#LO($Lpcx_target)
110 MOV PCX,A0.2 /* Setup PCX for interrupts */
111 MOV PC,D1Re0 /* Jump to handler */
112/*
113 * This is where the code below needs to jump to wait for outermost interrupt
114 * event in a non-privilege mode system (single shared interrupt stack).
115 */
116___TBIBoingPCX:
117 MGETL A0StP,A0FrP,A0.2,A0.3 A0_4,[D1Re0] /* Restore AX regs */
118 MOV TXSTATUS,D0Re0 /* Restore flags */
119 GETL D0Re0,D1Re0,[D1Re0+#TBICTX_DX-TBICTX_BYTES]
120___TBIBoingRTI:
121 RTI /* Wait for interrupt */
122$Lpcx_target:
123/*
124 * Save initial interrupt state on current stack
125 */
126 SETL [A0StP+#TBICTX_DX],D0Re0,D1Re0 /* Save key registers */
127 ADD D1Re0,A0StP,#TBICTX_AX /* Address AX save area */
128 MOV D0Re0,TXSTATUS /* Read TXSTATUS into D0Re0 */
129 MOV TXSTATUS,#0 /* Clear TXSTATUS */
130 MSETL [D1Re0],A0StP,A0FrP,A0.2,A0.3 A0_4 /* Save AX critical regs */
131/*
132 * Register state at this point is-
133 *
134 * D0Re0 - Old TXSTATUS with PRIV and CBUF bits set if appropriate
135 * A0StP - Is call stack frame and base of TBICTX being generated
136 * A1GbP - Is valid static access link
137 */
138___TBIBoing:
139 LOCK0 /* Make sure we have no locks! */
140 ADD A1.2,A0StP,#TBICTX_DX+(8*1) /* Address DX.1 save area */
141 MOV A0FrP,A0StP /* Setup frame pointer */
142 MSETL [A1.2],D0Ar6,D0Ar4,D0Ar2,D0FrT,D0.5,D0.6,D0.7
143 MOV D0Ar4,TXRPT /* Save critical CT regs */
144 MOV D1Ar3,TXBPOBITS
145 MOV D1Ar1,TXDIVTIME /* Calc catch buffer pSrc */
146 MOV D0Ar2,TXMODE
147 MOV TXMODE,#0 /* Clear TXMODE */
148#ifdef TXDIVTIME_RPDIRTY_BIT
149 TSTT D1Ar1,#HI(TXDIVTIME_RPDIRTY_BIT)/* NZ = RPDIRTY */
150 MOVT D0Ar6,#TBICTX_CBRP_BIT
151 ORNZ D0Re0,D0Re0,D0Ar6 /* Set CBRP if RPDIRTY set */
152#endif
153 MSETL [A1.2],D0Ar4,D0Ar2 /* Save CT regs state */
154 MOV D0Ar2,D0Re0 /* Copy TXSTATUS */
155 ANDMT D0Ar2,D0Ar2,#TBICTX_CBUF_BIT+TBICTX_CBRP_BIT
156#ifdef TBI_1_4
157 MOVT D1Ar1,#TBICTX_FPAC_BIT /* Copy FPActive into FPAC */
158 TSTT D0Re0,#HI(TXSTATUS_FPACTIVE_BIT)
159 ORNZ D0Ar2,D0Ar2,D1Ar1
160#endif
161 MOV D1Ar1,PCX /* Read CurrPC */
162 ORT D0Ar2,D0Ar2,#TBICTX_CRIT_BIT /* SaveMask + CRIT bit */
163 SETL [A0FrP+#TBICTX_Flags],D0Ar2,D1Ar1 /* Set pCtx header fields */
164/*
165 * Completed context save, now we need to make a call to an interrupt handler
166 *
167 * D0Re0 - holds PRIV, WAIT, CBUF flags, HALT reason if appropriate
168 * A0FrP - interrupt stack frame and base of TBICTX being generated
169 * A0StP - same as A0FrP
170 */
171___TBIBoingWait:
172 /* Reserve space for TBICTX and CBUF */
173 ADD A0StP,A0StP,#TBICTX_BYTES+(CATCH_ENTRY_BYTES*CATCH_ENTRIES)
174 MOV D0Ar4,TXSTATI /* Read the Triggers data */
175 MOV D1Ar3,TXDIVTIME /* Read IRQEnc bits */
176 MOV D0Ar2,D0Re0 /* Copy PRIV and WAIT flags */
177 ANDT D0Ar2,D0Ar2,#TBICTX_PRIV_BIT+TBICTX_WAIT_BIT+TBICTX_CBUF_BIT
178#ifdef TBI_1_4
179 MOVT D1Ar5,#TBICTX_FPAC_BIT /* Copy FPActive into FPAC */
180 TSTT D0Re0,#HI(TXSTATUS_FPACTIVE_BIT)
181 ORNZ D0Ar2,D0Ar2,D1Ar5
182#endif
183 ANDT D1Ar3,D1Ar3,#HI(TXDIVTIME_IRQENC_BITS)
184 LSR D1Ar3,D1Ar3,#TXDIVTIME_IRQENC_S
185 AND TXSTATI,D0Ar4,#TXSTATI_BGNDHALT_BIT/* Ack any HALT seen */
186 ANDS D0Ar4,D0Ar4,#0xFFFF-TXSTATI_BGNDHALT_BIT /* Only seen HALT? */
187 ORT D0Ar2,D0Ar2,#TBICTX_CRIT_BIT /* Set CRIT */
188#ifndef BOOTROM
189 MOVT A1LbP,#HI(___pTBIs)
190 ADD A1LbP,A1LbP,#LO(___pTBIs)
191 GETL D1Ar5,D0Ar6,[A1LbP] /* D0Ar6 = ___pTBIs[1] */
192#else
193/*
194 * For BOOTROM support ___pTBIs must be allocated at offset 0 vs A1GbP
195 */
196 GETL D1Ar5,D0Ar6,[A1GbP] /* D0Ar6 = ___pTBIs[1] */
197#endif
198 BZ ___TBIBoingHalt /* Yes: Service HALT */
199/*
200 * Encode interrupt as signal vector, strip away same/lower TXMASKI bits
201 */
202 MOV D1Ar1,#1 /* Generate mask for this bit */
203 MOV D0Re0,TXMASKI /* Get interrupt mask */
204 LSL TXSTATI,D1Ar1,D1Ar3 /* Acknowledge trigger */
205 AND TXMASKI,D0Re0,#TXSTATI_BGNDHALT_BIT /* Only allow HALTs */
206 OR D0Ar2,D0Ar2,D0Re0 /* Set TBIRES.Sig.TrigMask */
207 ADD D1Ar3,D1Ar3,#TBID_SIGNUM_TRT /* Offset into interrupt sigs */
208 LSL D0Re0,D1Ar3,#TBID_SIGNUM_S /* Generate offset from SigNum */
209/*
210 * This is a key moment we are about to call the handler, register state is
211 * as follows-
212 *
213 * D0Re0 - Handler vector (SigNum<<TBID_SIGNUM_S)
214 * D0Ar2 - TXMASKI:TBICTX_CRIT_BIT with optional CBUF and PRIV bits
215 * D1Ar3 - SigNum
216 * D0Ar4 - State read from TXSTATI
217 * D1Ar5 - Inst for SWITCH trigger case only, otherwise undefined
218 * D0Ar6 - pTBI
219 */
220___TBIBoingVec:
221 ADD D0Re0,D0Re0,#TBI_fnSigs /* Offset into signal table */
222 GETD D1Re0,[D0Ar6+D0Re0] /* Get address for Handler */
223/*
224 * Call handler at interrupt level, when it returns simply resume execution
225 * of state indicated by D1Re0.
226 */
227 MOV D1Ar1,A0FrP /* Pass in pCtx */
228 CALLR D1RtP,___TBIBoingRTH /* Use RTH to invoke handler */
229
230/*
231 * Perform critical state restore and execute background thread.
232 *
233 * A0FrP - is pointer to TBICTX structure to resume
234 * D0Re0 - contains additional TXMASKI triggers
235 */
236 .text
237 .balign 4
238#ifdef BOOTROM
239 .global ___TBIResume
240#endif
241___TBIResume:
242/*
243 * New META IP method
244 */
245 RTH /* Go to interrupt level */
246 MOV D0Ar4,TXMASKI /* Read TXMASKI */
247 OR TXMASKI,D0Ar4,D0Re0 /* -Write-Modify TXMASKI */
248 GETL D0Re0,D1Re0,[A0FrP+#TBICTX_Flags]/* Get Flags:SaveMask, CurrPC */
249 MOV A0StP,A0FrP /* Position stack pointer */
250 MOV D0Ar2,TXPOLLI /* Read pending triggers */
251 MOV PCX,D1Re0 /* Set resumption PC */
252 TST D0Ar2,#0xFFFF /* Any pending triggers? */
253 BNZ ___TBIBoingWait /* Yes: Go for triggers */
254 TSTT D0Re0,#TBICTX_WAIT_BIT /* Do we WAIT anyway? */
255 BNZ ___TBIBoingWait /* Yes: Go for triggers */
256 LSLS D1Ar5,D0Re0,#1 /* Test XCBF (MI) & PRIV (CS)? */
257 ADD D1Re0,A0FrP,#TBICTX_CurrRPT /* Address CT save area */
258 ADD A0StP,A0FrP,#TBICTX_DX+(8*1) /* Address DX.1 save area */
259 MGETL A0.2,A0.3,[D1Re0] /* Get CT reg states */
260 MOV D1Ar3,A1.3 /* Copy old TXDIVTIME */
261 BPL ___TBIResCrit /* No: Skip logic */
262 ADD D0Ar4,A0FrP,#TBICTX_BYTES /* Source is after TBICTX */
263 ANDST D1Ar3,D1Ar3,#HI(TXDIVTIME_RPMASK_BITS)/* !Z if RPDIRTY */
264 MGETL D0.5,D0.6,[D0Ar4] /* Read Catch state */
265 MOV TXCATCH0,D0.5 /* Restore TXCATCHn */
266 MOV TXCATCH1,D1.5
267 MOV TXCATCH2,D0.6
268 MOV TXCATCH3,D1.6
269 BZ ___TBIResCrit
270 MOV D0Ar2,#(1*8)
271 LSRS D1Ar3,D1Ar3,#TXDIVTIME_RPMASK_S+1 /* 2nd RPMASK bit -> bit 0 */
272 ADD RA,D0Ar4,#(0*8) /* Re-read read pipeline */
273 ADDNZ RA,D0Ar4,D0Ar2 /* If Bit 0 set issue RA */
274 LSRS D1Ar3,D1Ar3,#2 /* Bit 1 -> C, Bit 2 -> Bit 0 */
275 ADD D0Ar2,D0Ar2,#8
276 ADDCS RA,D0Ar4,D0Ar2 /* If C issue RA */
277 ADD D0Ar2,D0Ar2,#8
278 ADDNZ RA,D0Ar4,D0Ar2 /* If Bit 0 set issue RA */
279 LSRS D1Ar3,D1Ar3,#2 /* Bit 1 -> C, Bit 2 -> Bit 0 */
280 ADD D0Ar2,D0Ar2,#8
281 ADDCS RA,D0Ar4,D0Ar2 /* If C issue RA */
282 ADD D0Ar2,D0Ar2,#8
283 ADDNZ RA,D0Ar4,D0Ar2 /* If Bit 0 set issue RA */
284 MOV TXDIVTIME,A1.3 /* Set RPDIRTY again */
285___TBIResCrit:
286 LSLS D1Ar5,D0Re0,#1 /* Test XCBF (MI) & PRIV (CS)? */
287#ifdef TBI_1_4
288 ANDT D1Ar5,D1Ar5,#(TBICTX_FPAC_BIT*2)
289 LSL D0Ar6,D1Ar5,#3 /* Convert FPAC into FPACTIVE */
290#endif
291 ANDMT D0Re0,D0Re0,#TBICTX_CBUF_BIT /* Keep CBUF bit from SaveMask */
292#ifdef TBI_1_4
293 OR D0Re0,D0Re0,D0Ar6 /* Combine FPACTIVE with others */
294#endif
295 MGETL D0Ar6,D0Ar4,D0Ar2,D0FrT,D0.5,D0.6,D0.7,[A0StP] /* Restore DX */
296 MOV TXRPT,A0.2 /* Restore CT regs */
297 MOV TXBPOBITS,A1.2
298 MOV TXMODE,A0.3
299 BCC ___TBIBoingPCX /* Do non-PRIV wait! */
300 MOV A1GblIGbP,A1GbP /* Save A1GbP too */
301 MGETL A0StP,A0FrP,A0.2,A0.3 A0_4,[D1Re0] /* Restore AX regs */
302/*
303 * Wait for the first interrupt/exception trigger in a privilege mode system
304 * (interrupt stack area for current TASK to be pointed to by A0GblIStP
305 * or per_cpu__stack_save[hwthread_id]).
306 */
307 MOV TXSTATUS,D0Re0 /* Restore flags */
308 MOV D0Re0,TXPRIVEXT /* Set TXPRIVEXT_TXTOGGLEI_BIT */
309 SUB D1Re0,D1Re0,#TBICTX_BYTES /* TBICTX is top of int stack */
310#ifdef TBX_PERCPU_SP_SAVE
311 SWAP D1Ar3,A1GbP
312 MOV D1Ar3,TXENABLE /* Which thread are we? */
313 AND D1Ar3,D1Ar3,#TXENABLE_THREAD_BITS
314 LSR D1Ar3,D1Ar3,#TXENABLE_THREAD_S-2
315 ADDT D1Ar3,D1Ar3,#HI(_per_cpu__stack_save)
316 ADD D1Ar3,D1Ar3,#LO(_per_cpu__stack_save)
317 SETD [D1Ar3],D1Re0
318 SWAP D1Ar3,A1GbP
319#else
320 MOV A0GblIStP, D1Re0
321#endif
322 OR D0Re0,D0Re0,#TXPRIVEXT_TXTOGGLEI_BIT
323 MOV TXPRIVEXT,D0Re0 /* Cannot set TXPRIVEXT if !priv */
324 GETL D0Re0,D1Re0,[D1Re0+#TBICTX_DX]
325 RTI /* Wait for interrupt */
326/*
327 * Save initial interrupt state on A0GblIStP, switch to A0GblIStP if
328 * BOOTROM code, save and switch to [A1GbP] otherwise.
329 */
330___TBIBoingPCXP:
331#ifdef TBX_PERCPU_SP_SAVE
332 SWAP D1Ar3,A1GbP /* Get PRIV stack base */
333 MOV D1Ar3,TXENABLE /* Which thread are we? */
334 AND D1Ar3,D1Ar3,#TXENABLE_THREAD_BITS
335 LSR D1Ar3,D1Ar3,#TXENABLE_THREAD_S-2
336 ADDT D1Ar3,D1Ar3,#HI(_per_cpu__stack_save)
337 ADD D1Ar3,D1Ar3,#LO(_per_cpu__stack_save)
338 GETD D1Ar3,[D1Ar3]
339#else
340 SWAP D1Ar3,A0GblIStP /* Get PRIV stack base */
341#endif
342 SETL [D1Ar3+#TBICTX_DX],D0Re0,D1Re0 /* Save key registers */
343 MOV D0Re0,TXPRIVEXT /* Clear TXPRIVEXT_TXTOGGLEI_BIT */
344 ADD D1Re0,D1Ar3,#TBICTX_AX /* Address AX save area */
345 ANDMB D0Re0,D0Re0,#0xFFFF-TXPRIVEXT_TXTOGGLEI_BIT
346 MOV TXPRIVEXT,D0Re0 /* Cannot set TXPRIVEXT if !priv */
347 MOV D0Re0,TXSTATUS /* Read TXSTATUS into D0Re0 */
348 MOV TXSTATUS,#0 /* Clear TXSTATUS */
349 MSETL [D1Re0],A0StP,A0FrP,A0.2,A0.3 A0_4 /* Save AX critical regs */
350 MOV A0StP,D1Ar3 /* Switch stacks */
351#ifdef TBX_PERCPU_SP_SAVE
352 MOV D1Ar3,A1GbP /* Get D1Ar2 back */
353#else
354 MOV D1Ar3,A0GblIStP /* Get D1Ar2 back */
355#endif
356 ORT D0Re0,D0Re0,#TBICTX_PRIV_BIT /* Add PRIV to TXSTATUS */
357 MOV A1GbP,A1GblIGbP /* Restore A1GbP */
358 B ___TBIBoing /* Enter common handler code */
359/*
360 * At this point we know it's a background HALT case we are handling.
361 * The restored TXSTATUS always needs to have zero in the reason bits.
362 */
363___TBIBoingHalt:
364 MOV D0Ar4,TXMASKI /* Get interrupt mask */
365 ANDST D0Re0,D0Re0,#HI(TXSTATUS_MAJOR_HALT_BITS+TXSTATUS_MEM_FAULT_BITS)
366 AND TXMASKI,D0Ar4,#TXSTATI_BGNDHALT_BIT /* Only allow HALTs */
367 AND D0Ar4,D0Ar4,#0xFFFF-TXSTATI_BGNDHALT_BIT /* What ints are off? */
368 OR D0Ar2,D0Ar2,D0Ar4 /* Set TBIRES.Sig.TrigMask */
369 MOV D0Ar4,#TXSTATI_BGNDHALT_BIT /* This was the trigger state */
370 LSR D1Ar3,D0Re0,#TXSTATUS_MAJOR_HALT_S
371 MOV D0Re0,#TBID_SIGNUM_XXF<<TBID_SIGNUM_S
372 BNZ ___TBIBoingVec /* Jump to XXF exception handler */
373/*
374 * Only the SWITCH cases are left, PCX must be valid
375 */
376#ifdef TBI_1_4
377 MOV D1Ar5,TXPRIVEXT
378 TST D1Ar5,#TXPRIVEXT_MINIMON_BIT
379 LSR D1Ar3,D1Ar1,#1 /* Shift needed for MINIM paths (fill stall) */
380 BZ $Lmeta /* If META only, skip */
381 TSTT D1Ar1,#HI(0x00800000)
382 ANDMT D1Ar3,D1Ar3,#HI(0x007FFFFF >> 1)/* Shifted mask for large MINIM */
383 ANDT D1Ar1,D1Ar1,#HI(0xFFE00000) /* Static mask for small MINIM */
384 BZ $Llarge_minim /* If large MINIM */
385$Lsmall_minim:
386 TSTT D1Ar3,#HI(0x00100000 >> 1)
387 ANDMT D1Ar3,D1Ar3,#HI(0x001FFFFF >> 1)/* Correct shifted mask for large MINIM */
388 ADDZ D1Ar1,D1Ar1,D1Ar3 /* If META rgn, add twice to undo LSR #1 */
389 B $Lrecombine
390$Llarge_minim:
391 ANDST D1Ar1,D1Ar1,#HI(0xFF800000) /* Correct static mask for small MINIM */
392 /* Z=0 (Cannot place code at NULL) */
393$Lrecombine:
394 ADD D1Ar1,D1Ar1,D1Ar3 /* Combine static and shifted parts */
395$Lmeta:
396 GETW D1Ar5,[D1Ar1++] /* META: lo-16, MINIM: lo-16 (all-16 if short) */
397 GETW D1Ar3,[D1Ar1] /* META: hi-16, MINIM: hi-16 (only if long) */
398 MOV D1Re0,D1Ar5
399 XOR D1Re0,D1Re0,#0x4000
400 LSLSNZ D1Re0,D1Re0,#(32-14) /* MINIM: If long C=0, if short C=1 */
401 LSLCC D1Ar3,D1Ar3,#16 /* META/MINIM long: Move hi-16 up */
402 LSLCS D1Ar3,D1Ar5,#16 /* MINIM short: Dup all-16 */
403 ADD D1Ar5,D1Ar5,D1Ar3 /* ALL: Combine both 16-bit parts */
404#else
405 GETD D1Ar5,[D1Ar1] /* Read instruction for switch */
406#endif
407 LSR D1Ar3,D1Ar5,#22 /* Convert into signal number */
408 AND D1Ar3,D1Ar3,#TBID_SIGNUM_SW3-TBID_SIGNUM_SW0
409 LSL D0Re0,D1Ar3,#TBID_SIGNUM_S /* Generate offset from SigNum */
410 B ___TBIBoingVec /* Jump to switch handler */
411/*
412 * Exit from TBIASyncTrigger call
413 */
414___TBIBoingExit:
415 GETL D0FrT,D1RtP,[A0FrP++] /* Restore state from frame */
416 SUB A0StP,A0FrP,#8 /* Unwind stack */
417 MOV A0FrP,D0FrT /* Last memory read completes */
418 MOV PC,D1RtP /* Return to caller */
419#endif /* ifdef CODE_USES_BOOTROM */
420 .size ___TBIResume,.-___TBIResume
421
422#ifndef BOOTROM
423/*
424 * void __TBIASyncResume( TBIRES State )
425 */
426 .text
427 .balign 4
428 .global ___TBIASyncResume
429 .type ___TBIASyncResume,function
430___TBIASyncResume:
431/*
432 * Perform CRIT|SOFT state restore and execute background thread.
433 */
434 MOV D1Ar3,D1Ar1 /* Restore this context */
435 MOV D0Re0,D0Ar2 /* Carry in additional triggers */
436 /* Reserve space for TBICTX */
437 ADD D1Ar3,D1Ar3,#TBICTX_BYTES+(CATCH_ENTRY_BYTES*CATCH_ENTRIES)
438 MOV A0StP,D1Ar3 /* Enter with protection of */
439 MOV A0FrP,D1Ar1 /* TBICTX on our stack */
440#ifdef CODE_USES_BOOTROM
441 MOVT D1Ar1,#HI(LINCORE_BASE)
442 JUMP D1Ar1,#0xA4
443#else
444 B ___TBIResume
445#endif
446 .size ___TBIASyncResume,.-___TBIASyncResume
447#endif /* ifndef BOOTROM */
448
449/*
450 * End of tbipcx.S
451 */
diff --git a/arch/metag/tbx/tbiroot.S b/arch/metag/tbx/tbiroot.S
deleted file mode 100644
index 7d84daf1340b..000000000000
--- a/arch/metag/tbx/tbiroot.S
+++ /dev/null
@@ -1,87 +0,0 @@
1/*
2 * tbiroot.S
3 *
4 * Copyright (C) 2001, 2002, 2012 Imagination Technologies.
5 *
6 * This program is free software; you can redistribute it and/or modify it under
7 * the terms of the GNU General Public License version 2 as published by the
8 * Free Software Foundation.
9 *
10 * Module that creates and via ___TBI function returns a TBI Root Block for
11 * interrupt and background processing on the current thread.
12 */
13
14 .file "tbiroot.S"
15#include <asm/metag_regs.h>
16
17/*
18 * Get data structures and defines from the TBI C header
19 */
20#include <asm/tbx.h>
21
22
23/* If signals need to be exchanged we must create a TBI Root Block */
24
25 .data
26 .balign 8
27 .global ___pTBIs
28 .type ___pTBIs,object
29___pTBIs:
30 .long 0 /* Bgnd+Int root block ptrs */
31 .long 0
32 .size ___pTBIs,.-___pTBIs
33
34
35/*
36 * Return ___pTBIs value specific to execution level with promotion/demotion
37 *
38 * Register Usage: D1Ar1 is Id, D0Re0 is the primary result
39 * D1Re0 is secondary result (___pTBIs for other exec level)
40 */
41 .text
42 .balign 4
43 .global ___TBI
44 .type ___TBI,function
45___TBI:
46 TSTT D1Ar1,#HI(TBID_ISTAT_BIT) /* Bgnd or Int level? */
47 MOVT A1LbP,#HI(___pTBIs)
48 ADD A1LbP,A1LbP,#LO(___pTBIs)
49 GETL D0Re0,D1Re0,[A1LbP] /* Base of root block table */
50 SWAPNZ D0Re0,D1Re0 /* Swap if asked */
51 MOV PC,D1RtP
52 .size ___TBI,.-___TBI
53
54
55/*
56 * Return identifier of the current thread in TBI segment or signal format with
57 * secondary mask to indicate privilege and interrupt level of thread
58 */
59 .text
60 .balign 4
61 .global ___TBIThrdPrivId
62 .type ___TBIThrdPrivId,function
63___TBIThrdPrivId:
64 .global ___TBIThreadId
65 .type ___TBIThreadId,function
66___TBIThreadId:
67#ifndef METAC_0_1
68 MOV D1Re0,TXSTATUS /* Are we privileged or int? */
69 MOV D0Re0,TXENABLE /* Which thread are we? */
70/* Disable privilege adaption for now */
71 ANDT D1Re0,D1Re0,#HI(TXSTATUS_ISTAT_BIT) /* +TXSTATUS_PSTAT_BIT) */
72 LSL D1Re0,D1Re0,#TBID_ISTAT_S-TXSTATUS_ISTAT_S
73 AND D0Re0,D0Re0,#TXENABLE_THREAD_BITS
74 LSL D0Re0,D0Re0,#TBID_THREAD_S-TXENABLE_THREAD_S
75#else
76/* Thread 0 only */
77 XOR D0Re0,D0Re0,D0Re0
78 XOR D1Re0,D1Re0,D1Re0
79#endif
80 MOV PC,D1RtP /* Return */
81 .size ___TBIThrdPrivId,.-___TBIThrdPrivId
82 .size ___TBIThreadId,.-___TBIThreadId
83
84
85/*
86 * End of tbiroot.S
87 */
diff --git a/arch/metag/tbx/tbisoft.S b/arch/metag/tbx/tbisoft.S
deleted file mode 100644
index b04f50df8d91..000000000000
--- a/arch/metag/tbx/tbisoft.S
+++ /dev/null
@@ -1,237 +0,0 @@
1/*
2 * tbisoft.S
3 *
4 * Copyright (C) 2001, 2002, 2007, 2012 Imagination Technologies.
5 *
6 * This program is free software; you can redistribute it and/or modify it under
7 * the terms of the GNU General Public License version 2 as published by the
8 * Free Software Foundation.
9 *
10 * Support for soft threads and soft context switches
11 */
12
13 .file "tbisoft.S"
14
15#include <asm/tbx.h>
16
17#ifdef METAC_1_0
18/* Ax.4 is saved in TBICTX */
19#define A0_4 ,A0.4
20#define D0_5 ,D0.5
21#else
22/* Ax.4 is NOT saved in TBICTX */
23#define A0_4
24#define D0_5
25#endif
26
27/* Size of the TBICTX structure */
28#define TBICTX_BYTES ((TBICTX_AX_REGS*8)+TBICTX_AX)
29
30 .text
31 .balign 4
32 .global ___TBISwitchTail
33 .type ___TBISwitchTail,function
34___TBISwitchTail:
35 B $LSwitchTail
36 .size ___TBISwitchTail,.-___TBISwitchTail
37
38/*
39 * TBIRES __TBIJumpX( TBIX64 ArgsA, PTBICTX *rpSaveCtx, int TrigsMask,
40 * void (*fnMain)(), void *pStack );
41 *
42 * This is a combination of __TBISwitch and __TBIJump with the context of
43 * the calling thread being saved in the rpSaveCtx location with a drop-thru
44 * effect into the __TBIJump logic. ArgsB passes via __TBIJump to the
45 * routine eventually invoked will reflect the rpSaveCtx value specified.
46 */
47 .text
48 .balign 4
49 .global ___TBIJumpX
50 .type ___TBIJumpX,function
51___TBIJumpX:
52 CMP D1RtP,#-1
53 B $LSwitchStart
54 .size ___TBIJumpX,.-___TBIJumpX
55
56/*
57 * TBIRES __TBISwitch( TBIRES Switch, PTBICTX *rpSaveCtx )
58 *
59 * Software synchronous context switch between soft threads, save only the
60 * registers which are actually valid on call entry.
61 *
62 * A0FrP, D0RtP, D0.5, D0.6, D0.7 - Saved on stack
63 * A1GbP is global to all soft threads so not virtualised
64 * A0StP is then saved as the base of the TBICTX of the thread
65 *
66 */
67 .text
68 .balign 4
69 .global ___TBISwitch
70 .type ___TBISwitch,function
71___TBISwitch:
72 XORS D0Re0,D0Re0,D0Re0 /* Set ZERO flag */
73$LSwitchStart:
74 MOV D0FrT,A0FrP /* Boing entry sequence */
75 ADD A0FrP,A0StP,#0
76 SETL [A0StP+#8++],D0FrT,D1RtP
77/*
78 * Save current frame state - we save all regs because we don't want
79 * uninitialised crap in the TBICTX structure that the asynchronous resumption
80 * of a thread will restore.
81 */
82 MOVT D1Re0,#HI($LSwitchExit) /* ASync resume point here */
83 ADD D1Re0,D1Re0,#LO($LSwitchExit)
84 SETD [D1Ar3],A0StP /* Record pCtx of this thread */
85 MOVT D0Re0,#TBICTX_SOFT_BIT /* Only soft thread state */
86 SETL [A0StP++],D0Re0,D1Re0 /* Push header fields */
87 ADD D0FrT,A0StP,#TBICTX_AX-TBICTX_DX /* Address AX save area */
88 MOV D0Re0,#0 /* Setup 0:0 result for ASync */
89 MOV D1Re0,#0 /* resume of the thread */
90 MSETL [A0StP],D0Re0,D0Ar6,D0Ar4,D0Ar2,D0FrT,D0.5,D0.6,D0.7
91 SETL [A0StP++],D0Re0,D1Re0 /* Zero CurrRPT, CurrBPOBITS, */
92 SETL [A0StP++],D0Re0,D1Re0 /* Zero CurrMODE, CurrDIVTIME */
93 ADD A0StP,A0StP,#(TBICTX_AX_REGS*8) /* Reserve AX save space */
94 MSETL [D0FrT],A0StP,A0FrP,A0.2,A0.3 A0_4 /* Save AX regs */
95 BNZ ___TBIJump
96/*
97 * NextThread MUST be in TBICTX_SOFT_BIT state!
98 */
99$LSwitchTail:
100 MOV D0Re0,D0Ar2 /* Result from args */
101 MOV D1Re0,D1Ar1
102 ADD D1RtP,D1Ar1,#TBICTX_AX
103 MGETL A0StP,A0FrP,[D1RtP] /* Get frame values */
104$LSwitchCmn:
105 ADD A0.2,D1Ar1,#TBICTX_DX+(8*5)
106 MGETL D0.5,D0.6,D0.7,[A0.2] /* Get caller-saved DX regs */
107$LSwitchExit:
108 GETL D0FrT,D1RtP,[A0FrP++] /* Restore state from frame */
109 SUB A0StP,A0FrP,#8 /* Unwind stack */
110 MOV A0FrP,D0FrT /* Last memory read completes */
111 MOV PC,D1RtP /* Return to caller */
112 .size ___TBISwitch,.-___TBISwitch
113
114/*
115 * void __TBISyncResume( TBIRES State, int TrigMask );
116 *
117 * This routine causes the TBICTX structure specified in State.Sig.pCtx to
118 * be restored. This implies that execution will not return to the caller.
119 * The State.Sig.TrigMask field will be ored into TXMASKI during the
120 * context switch such that any immediately occurring interrupts occur in
121 * the context of the newly specified task. The State.Sig.SaveMask parameter
122 * is ignored.
123 */
124 .text
125 .balign 4
126 .global ___TBISyncResume
127 .type ___TBISyncResume,function
128___TBISyncResume:
129 MOV D0Re0,D0Ar2 /* Result from args */
130 MOV D1Re0,D1Ar1
131 XOR D1Ar5,D1Ar5,D1Ar5 /* D1Ar5 = 0 */
132 ADD D1RtP,D1Ar1,#TBICTX_AX
133 SWAP D1Ar5,TXMASKI /* D1Ar5 <-> TXMASKI */
134 MGETL A0StP,A0FrP,[D1RtP] /* Get frame values */
135 OR TXMASKI,D1Ar5,D1Ar3 /* New TXMASKI */
136 B $LSwitchCmn
137 .size ___TBISyncResume,.-___TBISyncResume
138
139/*
140 * void __TBIJump( TBIX64 ArgsA, TBIX32 ArgsB, int TrigsMask,
141 * void (*fnMain)(), void *pStack );
142 *
143 * Jump directly to a new routine on an arbitrary stack with arbitrary args
144 * oring bits back into TXMASKI on route.
145 */
146 .text
147 .balign 4
148 .global ___TBIJump
149 .type ___TBIJump,function
150___TBIJump:
151 XOR D0Re0,D0Re0,D0Re0 /* D0Re0 = 0 */
152 MOV A0StP,D0Ar6 /* Stack = Frame */
153 SWAP D0Re0,TXMASKI /* D0Re0 <-> TXMASKI */
154 MOV A0FrP,D0Ar6
155 MOVT A1LbP,#HI(__exit)
156 ADD A1LbP,A1LbP,#LO(__exit)
157 MOV D1RtP,A1LbP /* D1RtP = __exit */
158 OR TXMASKI,D0Re0,D0Ar4 /* New TXMASKI */
159 MOV PC,D1Ar5 /* Jump to fnMain */
160 .size ___TBIJump,.-___TBIJump
161
162/*
163 * PTBICTX __TBISwitchInit( void *pStack, int (*fnMain)(),
164 * .... 4 extra 32-bit args .... );
165 *
166 * Generate a new soft thread context ready for it's first outing.
167 *
168 * D1Ar1 - Region of memory to be used as the new soft thread stack
169 * D0Ar2 - Main line routine for new soft thread
170 * D1Ar3, D0Ar4, D1Ar5, D0Ar6 - arguments to be passed on stack
171 * The routine returns the initial PTBICTX value for the new thread
172 */
173 .text
174 .balign 4
175 .global ___TBISwitchInit
176 .type ___TBISwitchInit,function
177___TBISwitchInit:
178 MOV D0FrT,A0FrP /* Need save return point */
179 ADD A0FrP,A0StP,#0
180 SETL [A0StP++],D0FrT,D1RtP /* Save return to caller */
181 MOVT A1LbP,#HI(__exit)
182 ADD A1LbP,A1LbP,#LO(__exit)
183 MOV D1RtP,A1LbP /* Get address of __exit */
184 ADD D1Ar1,D1Ar1,#7 /* Align stack to 64-bits */
185 ANDMB D1Ar1,D1Ar1,#0xfff8 /* by rounding base up */
186 MOV A0.2,D1Ar1 /* A0.2 is new stack */
187 MOV D0FrT,D1Ar1 /* Initial puesdo-frame pointer */
188 SETL [A0.2++],D0FrT,D1RtP /* Save return to __exit */
189 MOV D1RtP,D0Ar2
190 SETL [A0.2++],D0FrT,D1RtP /* Save return to fnMain */
191 ADD D0FrT,D0FrT,#8 /* Advance puesdo-frame pointer */
192 MSETL [A0.2],D0Ar6,D0Ar4 /* Save extra initial args */
193 MOVT D1RtP,#HI(___TBIStart) /* Start up code for new stack */
194 ADD D1RtP,D1RtP,#LO(___TBIStart)
195 SETL [A0.2++],D0FrT,D1RtP /* Save return to ___TBIStart */
196 ADD D0FrT,D0FrT,#(8*3) /* Advance puesdo-frame pointer */
197 MOV D0Re0,A0.2 /* Return pCtx for new thread */
198 MOV D1Re0,#0 /* pCtx:0 is default Arg1:Arg2 */
199/*
200 * Generate initial TBICTX state
201 */
202 MOVT D1Ar1,#HI($LSwitchExit) /* Async restore code */
203 ADD D1Ar1,D1Ar1,#LO($LSwitchExit)
204 MOVT D0Ar2,#TBICTX_SOFT_BIT /* Only soft thread state */
205 ADD D0Ar6,A0.2,#TBICTX_BYTES /* New A0StP */
206 MOV D1Ar5,A1GbP /* Same A1GbP */
207 MOV D0Ar4,D0FrT /* Initial A0FrP */
208 MOV D1Ar3,A1LbP /* Same A1LbP */
209 SETL [A0.2++],D0Ar2,D1Ar1 /* Set header fields */
210 MSETL [A0.2],D0Re0,D0Ar6,D0Ar4,D0Ar2,D0FrT,D0.5,D0.6,D0.7
211 MOV D0Ar2,#0 /* Zero values */
212 MOV D1Ar1,#0
213 SETL [A0.2++],D0Ar2,D1Ar1 /* Zero CurrRPT, CurrBPOBITS, */
214 SETL [A0.2++],D0Ar2,D1Ar1 /* CurrMODE, and pCurrCBuf */
215 MSETL [A0.2],D0Ar6,D0Ar4,D0Ar2,D0FrT D0_5 /* Set DX and then AX regs */
216 B $LSwitchExit /* All done! */
217 .size ___TBISwitchInit,.-___TBISwitchInit
218
219 .text
220 .balign 4
221 .global ___TBIStart
222 .type ___TBIStart,function
223___TBIStart:
224 MOV D1Ar1,D1Re0 /* Pass TBIRES args to call */
225 MOV D0Ar2,D0Re0
226 MGETL D0Re0,D0Ar6,D0Ar4,[A0FrP] /* Get hidden args */
227 SUB A0StP,A0FrP,#(8*3) /* Entry stack pointer */
228 MOV A0FrP,D0Re0 /* Entry frame pointer */
229 MOVT A1LbP,#HI(__exit)
230 ADD A1LbP,A1LbP,#LO(__exit)
231 MOV D1RtP,A1LbP /* D1RtP = __exit */
232 MOV PC,D1Re0 /* Jump into fnMain */
233 .size ___TBIStart,.-___TBIStart
234
235/*
236 * End of tbisoft.S
237 */
diff --git a/arch/metag/tbx/tbistring.c b/arch/metag/tbx/tbistring.c
deleted file mode 100644
index f90cd0822065..000000000000
--- a/arch/metag/tbx/tbistring.c
+++ /dev/null
@@ -1,114 +0,0 @@
1/*
2 * tbistring.c
3 *
4 * Copyright (C) 2001, 2002, 2003, 2005, 2007, 2012 Imagination Technologies.
5 *
6 * This program is free software; you can redistribute it and/or modify it under
7 * the terms of the GNU General Public License version 2 as published by the
8 * Free Software Foundation.
9 *
10 * String table functions provided as part of the thread binary interface for
11 * Meta processors
12 */
13
14#include <linux/export.h>
15#include <linux/string.h>
16#include <asm/tbx.h>
17
18/*
19 * There are not any functions to modify the string table currently, if these
20 * are required at some later point I suggest having a seperate module and
21 * ensuring that creating new entries does not interfere with reading old
22 * entries in any way.
23 */
24
25const TBISTR *__TBIFindStr(const TBISTR *start,
26 const char *str, int match_len)
27{
28 const TBISTR *search = start;
29 bool exact = true;
30 const TBISEG *seg;
31
32 if (match_len < 0) {
33 /* Make match_len always positive for the inner loop */
34 match_len = -match_len;
35 exact = false;
36 } else {
37 /*
38 * Also support historic behaviour, which expected match_len to
39 * include null terminator
40 */
41 if (match_len && str[match_len-1] == '\0')
42 match_len--;
43 }
44
45 if (!search) {
46 /* Find global string table segment */
47 seg = __TBIFindSeg(NULL, TBID_SEG(TBID_THREAD_GLOBAL,
48 TBID_SEGSCOPE_GLOBAL,
49 TBID_SEGTYPE_STRING));
50
51 if (!seg || seg->Bytes < sizeof(TBISTR))
52 /* No string table! */
53 return NULL;
54
55 /* Start of string table */
56 search = seg->pGAddr;
57 }
58
59 for (;;) {
60 while (!search->Tag)
61 /* Allow simple gaps which are just zero initialised */
62 search = (const TBISTR *)((const char *)search + 8);
63
64 if (search->Tag == METAG_TBI_STRE) {
65 /* Reached the end of the table */
66 search = NULL;
67 break;
68 }
69
70 if ((search->Len >= match_len) &&
71 (!exact || (search->Len == match_len + 1)) &&
72 (search->Tag != METAG_TBI_STRG)) {
73 /* Worth searching */
74 if (!strncmp(str, (const char *)search->String,
75 match_len))
76 break;
77 }
78
79 /* Next entry */
80 search = (const TBISTR *)((const char *)search + search->Bytes);
81 }
82
83 return search;
84}
85
86const void *__TBITransStr(const char *str, int len)
87{
88 const TBISTR *search = NULL;
89 const void *res = NULL;
90
91 for (;;) {
92 /* Search onwards */
93 search = __TBIFindStr(search, str, len);
94
95 /* No translation returns NULL */
96 if (!search)
97 break;
98
99 /* Skip matching entries with no translation data */
100 if (search->TransLen != METAG_TBI_STRX) {
101 /* Calculate base of translation string */
102 res = (const char *)search->String +
103 ((search->Len + 7) & ~7);
104 break;
105 }
106
107 /* Next entry */
108 search = (const TBISTR *)((const char *)search + search->Bytes);
109 }
110
111 /* Return base address of translation data or NULL */
112 return res;
113}
114EXPORT_SYMBOL(__TBITransStr);
diff --git a/arch/metag/tbx/tbitimer.S b/arch/metag/tbx/tbitimer.S
deleted file mode 100644
index 5dbeddeee7ba..000000000000
--- a/arch/metag/tbx/tbitimer.S
+++ /dev/null
@@ -1,207 +0,0 @@
1/*
2 * tbitimer.S
3 *
4 * Copyright (C) 2001, 2002, 2007, 2012 Imagination Technologies.
5 *
6 * This program is free software; you can redistribute it and/or modify it under
7 * the terms of the GNU General Public License version 2 as published by the
8 * Free Software Foundation.
9 *
10 * TBI timer support routines and data values
11 */
12
13 .file "tbitimer.S"
14/*
15 * Get data structures and defines from the main C header
16 */
17#include <asm/tbx.h>
18
19 .data
20 .balign 8
21 .global ___TBITimeB
22 .type ___TBITimeB,object
23___TBITimeB:
24 .quad 0 /* Background 'lost' ticks */
25 .size ___TBITimeB,.-___TBITimeB
26
27 .data
28 .balign 8
29 .global ___TBITimeI
30 .type ___TBITimeI,object
31___TBITimeI:
32 .quad 0 /* Interrupt 'lost' ticks */
33 .size ___TBITimeI,.-___TBITimeI
34
35 .data
36 .balign 8
37 .global ___TBITimes
38 .type ___TBITimes,object
39___TBITimes:
40 .long ___TBITimeB /* Table of 'lost' tick values */
41 .long ___TBITimeI
42 .size ___TBITimes,.-___TBITimes
43
44/*
45 * Flag bits for control of ___TBITimeCore
46 */
47#define TIMER_SET_BIT 1
48#define TIMER_ADD_BIT 2
49
50/*
51 * Initialise or stop timer support
52 *
53 * Register Usage: D1Ar1 holds Id, D1Ar2 is initial delay or 0
54 * D0FrT is used to call ___TBITimeCore
55 * D0Re0 is used for the result which is TXSTAT_TIMER_BIT
56 * D0Ar4, D1Ar5, D0Ar6 are all used as scratch
57 * Other registers are those set by ___TBITimeCore
58 * A0.3 is assumed to point at ___TBITime(I/B)
59 */
60 .text
61 .balign 4
62 .global ___TBITimerCtrl
63 .type ___TBITimerCtrl,function
64___TBITimerCtrl:
65 MOV D1Ar5,#TIMER_SET_BIT /* Timer SET request */
66 MOVT D0FrT,#HI(___TBITimeCore) /* Get timer core reg values */
67 CALL D0FrT,#LO(___TBITimeCore) /* and perform register update */
68 NEGS D0Ar6,D0Ar2 /* Set flags from time-stamp */
69 ASR D1Ar5,D0Ar6,#31 /* Sign extend D0Ar6 into D1Ar5 */
70 SETLNZ [A0.3],D0Ar6,D1Ar5 /* ___TBITime(B/I)=-Start if enable */
71 MOV PC,D1RtP /* Return */
72 .size ___TBITimerCtrl,.-___TBITimerCtrl
73
74/*
75 * Return ___TBITimeStamp value
76 *
77 * Register Usage: D1Ar1 holds Id
78 * D0FrT is used to call ___TBITimeCore
79 * D0Re0, D1Re0 is used for the result
80 * D1Ar3, D0Ar4, D1Ar5
81 * Other registers are those set by ___TBITimeCore
82 * D0Ar6 is assumed to be the timer value read
83 * A0.3 is assumed to point at ___TBITime(I/B)
84 */
85 .text
86 .balign 4
87 .global ___TBITimeStamp
88 .type ___TBITimeStamp,function
89___TBITimeStamp:
90 MOV D1Ar5,#0 /* Timer GET request */
91 MOVT D0FrT,#HI(___TBITimeCore) /* Get timer core reg values */
92 CALL D0FrT,#LO(___TBITimeCore) /* with no register update */
93 ADDS D0Re0,D0Ar4,D0Ar6 /* Add current time value */
94 ADD D1Re0,D1Ar3,D1Ar5 /* to 64-bit signed extend time */
95 ADDCS D1Re0,D1Re0,#1 /* Support borrow too */
96 MOV PC,D1RtP /* Return */
97 .size ___TBITimeStamp,.-___TBITimeStamp
98
99/*
100 * Perform ___TBITimerAdd logic
101 *
102 * Register Usage: D1Ar1 holds Id, D0Ar2 holds value to be added to the timer
103 * D0Re0 is used for the result - new TIMER value
104 * D1Ar5, D0Ar6 are used as scratch
105 * Other registers are those set by ___TBITimeCore
106 * D0Ar6 is assumed to be the timer value read
107 * D0Ar4, D1Ar3 is the current value of ___TBITime(B/I)
108 */
109 .text
110 .balign 4
111 .global ___TBITimerAdd
112 .type ___TBITimerAdd,function
113___TBITimerAdd:
114 MOV D1Ar5,#TIMER_ADD_BIT /* Timer ADD request */
115 MOVT D0FrT,#HI(___TBITimeCore) /* Get timer core reg values */
116 CALL D0FrT,#LO(___TBITimeCore) /* with no register update */
117 ADD D0Re0,D0Ar2,D0Ar6 /* Regenerate new value = result */
118 NEG D0Ar2,D0Ar2 /* Negate delta */
119 ASR D1Re0,D0Ar2,#31 /* Sign extend negated delta */
120 ADDS D0Ar4,D0Ar4,D0Ar2 /* Add time added to ... */
121 ADD D1Ar3,D1Ar3,D1Re0 /* ... real timer ... */
122 ADDCS D1Ar3,D1Ar3,#1 /* ... with carry */
123 SETL [A0.3],D0Ar4,D1Ar3 /* Update ___TBITime(B/I) */
124 MOV PC,D1RtP /* Return */
125 .size ___TBITimerAdd,.-___TBITimerAdd
126
127#ifdef TBI_1_4
128/*
129 * Perform ___TBITimerDeadline logic
130 * NB: Delays are positive compared to the Wait values which are -ive
131 *
132 * Register Usage: D1Ar1 holds Id
133 * D0Ar2 holds Delay requested
134 * D0Re0 is used for the result - old TIMER Delay value
135 * D1Ar5, D0Ar6 are used as scratch
136 * Other registers are those set by ___TBITimeCore
137 * D0Ar6 is assumed to be the timer value read
138 * D0Ar4, D1Ar3 is the current value of ___TBITime(B/I)
139 *
140 */
141 .text
142 .type ___TBITimerDeadline,function
143 .global ___TBITimerDeadline
144 .align 2
145___TBITimerDeadline:
146 MOV D1Ar5,#TIMER_SET_BIT /* Timer SET request */
147 MOVT D0FrT,#HI(___TBITimeCore) /* Get timer core reg values */
148 CALL D0FrT,#LO(___TBITimeCore) /* with no register update */
149 MOV D0Re0,D0Ar6 /* Old value read = result */
150 SUB D0Ar2,D0Ar6,D0Ar2 /* Delta from (old - new) */
151 ASR D1Re0,D0Ar2,#31 /* Sign extend delta */
152 ADDS D0Ar4,D0Ar4,D0Ar2 /* Add time added to ... */
153 ADD D1Ar3,D1Ar3,D1Re0 /* ... real timer ... */
154 ADDCS D1Ar3,D1Ar3,#1 /* ... with carry */
155 SETL [A0.3],D0Ar4,D1Ar3 /* Update ___TBITime(B/I) */
156 MOV PC,D1RtP /* Return */
157 .size ___TBITimerDeadline,.-___TBITimerDeadline
158#endif /* TBI_1_4 */
159
160/*
161 * Perform core timer access logic
162 *
163 * Register Usage: D1Ar1 holds Id, D0Ar2 holds input value for SET and
164 * input value for ADD
165 * D1Ar5 controls op as SET or ADD as bit values
166 * On return D0Ar6, D1Ar5 holds the old 64-bit timer value
167 * A0.3 is setup to point at ___TBITime(I/B)
168 * A1.3 is setup to point at ___TBITimes
169 * D0Ar4, D1Ar3 is setup to value of ___TBITime(I/B)
170 */
171 .text
172 .balign 4
173 .global ___TBITimeCore
174 .type ___TBITimeCore,function
175___TBITimeCore:
176#ifndef METAC_0_1
177 TSTT D1Ar1,#HI(TBID_ISTAT_BIT) /* Interrupt level timer? */
178#endif
179 MOVT A1LbP,#HI(___TBITimes)
180 ADD A1LbP,A1LbP,#LO(___TBITimes)
181 MOV A1.3,A1LbP /* Get ___TBITimes address */
182#ifndef METAC_0_1
183 BNZ $LTimeCoreI /* Yes: Service TXTIMERI! */
184#endif
185 LSRS D1Ar5,D1Ar5,#1 /* Carry = SET, Zero = !ADD */
186 GETD A0.3,[A1.3+#0] /* A0.3 == &___TBITimeB */
187 MOV D0Ar6,TXTIMER /* Always GET old value */
188 MOVCS TXTIMER,D0Ar2 /* Conditional SET operation */
189 ADDNZ TXTIMER,D0Ar2,D0Ar6 /* Conditional ADD operation */
190#ifndef METAC_0_1
191 B $LTimeCoreEnd
192$LTimeCoreI:
193 LSRS D1Ar5,D1Ar5,#1 /* Carry = SET, Zero = !ADD */
194 GETD A0.3,[A1.3+#4] /* A0.3 == &___TBITimeI */
195 MOV D0Ar6,TXTIMERI /* Always GET old value */
196 MOVCS TXTIMERI,D0Ar2 /* Conditional SET operation */
197 ADDNZ TXTIMERI,D0Ar2,D0Ar6 /* Conditional ADD operation */
198$LTimeCoreEnd:
199#endif
200 ASR D1Ar5,D0Ar6,#31 /* Sign extend D0Ar6 into D1Ar5 */
201 GETL D0Ar4,D1Ar3,[A0.3] /* Read ___TBITime(B/I) */
202 MOV PC,D0FrT /* Return quickly */
203 .size ___TBITimeCore,.-___TBITimeCore
204
205/*
206 * End of tbitimer.S
207 */
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index b3b4ed9b6874..f99dbc2f7ee4 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -391,11 +391,6 @@ config ATMEL_ST
391 help 391 help
392 Support for the Atmel ST timer. 392 Support for the Atmel ST timer.
393 393
394config CLKSRC_METAG_GENERIC
395 def_bool y if METAG
396 help
397 This option enables support for the Meta per-thread timers.
398
399config CLKSRC_EXYNOS_MCT 394config CLKSRC_EXYNOS_MCT
400 bool "Exynos multi core timer driver" if COMPILE_TEST 395 bool "Exynos multi core timer driver" if COMPILE_TEST
401 depends on ARM || ARM64 396 depends on ARM || ARM64
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index d6dec4489d66..a2d47e9ecf91 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -61,7 +61,6 @@ obj-$(CONFIG_ARM_ARCH_TIMER) += arm_arch_timer.o
61obj-$(CONFIG_ARM_GLOBAL_TIMER) += arm_global_timer.o 61obj-$(CONFIG_ARM_GLOBAL_TIMER) += arm_global_timer.o
62obj-$(CONFIG_ARMV7M_SYSTICK) += armv7m_systick.o 62obj-$(CONFIG_ARMV7M_SYSTICK) += armv7m_systick.o
63obj-$(CONFIG_ARM_TIMER_SP804) += timer-sp804.o 63obj-$(CONFIG_ARM_TIMER_SP804) += timer-sp804.o
64obj-$(CONFIG_CLKSRC_METAG_GENERIC) += metag_generic.o
65obj-$(CONFIG_ARCH_HAS_TICK_BROADCAST) += dummy_timer.o 64obj-$(CONFIG_ARCH_HAS_TICK_BROADCAST) += dummy_timer.o
66obj-$(CONFIG_KEYSTONE_TIMER) += timer-keystone.o 65obj-$(CONFIG_KEYSTONE_TIMER) += timer-keystone.o
67obj-$(CONFIG_INTEGRATOR_AP_TIMER) += timer-integrator-ap.o 66obj-$(CONFIG_INTEGRATOR_AP_TIMER) += timer-integrator-ap.o
diff --git a/drivers/clocksource/metag_generic.c b/drivers/clocksource/metag_generic.c
deleted file mode 100644
index 3e5fa2f62d5f..000000000000
--- a/drivers/clocksource/metag_generic.c
+++ /dev/null
@@ -1,161 +0,0 @@
1/*
2 * Copyright (C) 2005-2013 Imagination Technologies Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 *
16 *
17 * Support for Meta per-thread timers.
18 *
19 * Meta hardware threads have 2 timers. The background timer (TXTIMER) is used
20 * as a free-running time base (hz clocksource), and the interrupt timer
21 * (TXTIMERI) is used for the timer interrupt (clock event). Both counters
22 * traditionally count at approximately 1MHz.
23 */
24
25#include <clocksource/metag_generic.h>
26#include <linux/cpu.h>
27#include <linux/errno.h>
28#include <linux/sched.h>
29#include <linux/kernel.h>
30#include <linux/param.h>
31#include <linux/time.h>
32#include <linux/init.h>
33#include <linux/proc_fs.h>
34#include <linux/clocksource.h>
35#include <linux/clockchips.h>
36#include <linux/interrupt.h>
37
38#include <asm/clock.h>
39#include <asm/hwthread.h>
40#include <asm/core_reg.h>
41#include <asm/metag_mem.h>
42#include <asm/tbx.h>
43
44#define HARDWARE_FREQ 1000000 /* 1MHz */
45#define HARDWARE_DIV 1 /* divide by 1 = 1MHz clock */
46#define HARDWARE_TO_NS_SHIFT 10 /* convert ticks to ns */
47
48static unsigned int hwtimer_freq = HARDWARE_FREQ;
49static DEFINE_PER_CPU(struct clock_event_device, local_clockevent);
50static DEFINE_PER_CPU(char [11], local_clockevent_name);
51
52static int metag_timer_set_next_event(unsigned long delta,
53 struct clock_event_device *dev)
54{
55 __core_reg_set(TXTIMERI, -delta);
56 return 0;
57}
58
59static u64 metag_clocksource_read(struct clocksource *cs)
60{
61 return __core_reg_get(TXTIMER);
62}
63
64static struct clocksource clocksource_metag = {
65 .name = "META",
66 .rating = 200,
67 .mask = CLOCKSOURCE_MASK(32),
68 .read = metag_clocksource_read,
69 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
70};
71
72static irqreturn_t metag_timer_interrupt(int irq, void *dummy)
73{
74 struct clock_event_device *evt = this_cpu_ptr(&local_clockevent);
75
76 evt->event_handler(evt);
77
78 return IRQ_HANDLED;
79}
80
81static struct irqaction metag_timer_irq = {
82 .name = "META core timer",
83 .handler = metag_timer_interrupt,
84 .flags = IRQF_TIMER | IRQF_IRQPOLL | IRQF_PERCPU,
85};
86
87unsigned long long sched_clock(void)
88{
89 unsigned long long ticks = __core_reg_get(TXTIMER);
90 return ticks << HARDWARE_TO_NS_SHIFT;
91}
92
93static int arch_timer_starting_cpu(unsigned int cpu)
94{
95 unsigned int txdivtime;
96 struct clock_event_device *clk = &per_cpu(local_clockevent, cpu);
97 char *name = per_cpu(local_clockevent_name, cpu);
98
99 txdivtime = __core_reg_get(TXDIVTIME);
100
101 txdivtime &= ~TXDIVTIME_DIV_BITS;
102 txdivtime |= (HARDWARE_DIV & TXDIVTIME_DIV_BITS);
103
104 __core_reg_set(TXDIVTIME, txdivtime);
105
106 sprintf(name, "META %d", cpu);
107 clk->name = name;
108 clk->features = CLOCK_EVT_FEAT_ONESHOT,
109
110 clk->rating = 200,
111 clk->shift = 12,
112 clk->irq = tbisig_map(TBID_SIGNUM_TRT),
113 clk->set_next_event = metag_timer_set_next_event,
114
115 clk->mult = div_sc(hwtimer_freq, NSEC_PER_SEC, clk->shift);
116 clk->max_delta_ns = clockevent_delta2ns(0x7fffffff, clk);
117 clk->max_delta_ticks = 0x7fffffff;
118 clk->min_delta_ns = clockevent_delta2ns(0xf, clk);
119 clk->min_delta_ticks = 0xf;
120 clk->cpumask = cpumask_of(cpu);
121
122 clockevents_register_device(clk);
123
124 /*
125 * For all non-boot CPUs we need to synchronize our free
126 * running clock (TXTIMER) with the boot CPU's clock.
127 *
128 * While this won't be accurate, it should be close enough.
129 */
130 if (cpu) {
131 unsigned int thread0 = cpu_2_hwthread_id[0];
132 unsigned long val;
133
134 val = core_reg_read(TXUCT_ID, TXTIMER_REGNUM, thread0);
135 __core_reg_set(TXTIMER, val);
136 }
137 return 0;
138}
139
140int __init metag_generic_timer_init(void)
141{
142 /*
143 * On Meta 2 SoCs, the actual frequency of the timer is based on the
144 * Meta core clock speed divided by an integer, so it is only
145 * approximately 1MHz. Calculating the real frequency here drastically
146 * reduces clock skew on these SoCs.
147 */
148#ifdef CONFIG_METAG_META21
149 hwtimer_freq = get_coreclock() / (metag_in32(EXPAND_TIMER_DIV) + 1);
150#endif
151 pr_info("Timer frequency: %u Hz\n", hwtimer_freq);
152
153 clocksource_register_hz(&clocksource_metag, hwtimer_freq);
154
155 setup_irq(tbisig_map(TBID_SIGNUM_TRT), &metag_timer_irq);
156
157 /* Hook cpu boot to configure the CPU's timers */
158 return cpuhp_setup_state(CPUHP_AP_METAG_TIMER_STARTING,
159 "clockevents/metag:starting",
160 arch_timer_starting_cpu, NULL);
161}
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index e2954fb86d65..68ceac7617ff 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -637,7 +637,7 @@ config I2C_IBM_IIC
637 637
638config I2C_IMG 638config I2C_IMG
639 tristate "Imagination Technologies I2C SCB Controller" 639 tristate "Imagination Technologies I2C SCB Controller"
640 depends on MIPS || METAG || COMPILE_TEST 640 depends on MIPS || COMPILE_TEST
641 help 641 help
642 Say Y here if you want to use the IMG I2C SCB controller, 642 Say Y here if you want to use the IMG I2C SCB controller,
643 available on the TZ1090 and other IMG SoCs. 643 available on the TZ1090 and other IMG SoCs.
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index d27e3e3619e0..b5b1f4c93413 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -15,8 +15,6 @@ obj-$(CONFIG_IRQ_MXS) += irq-mxs.o
15obj-$(CONFIG_ARCH_TEGRA) += irq-tegra.o 15obj-$(CONFIG_ARCH_TEGRA) += irq-tegra.o
16obj-$(CONFIG_ARCH_S3C24XX) += irq-s3c24xx.o 16obj-$(CONFIG_ARCH_S3C24XX) += irq-s3c24xx.o
17obj-$(CONFIG_DW_APB_ICTL) += irq-dw-apb-ictl.o 17obj-$(CONFIG_DW_APB_ICTL) += irq-dw-apb-ictl.o
18obj-$(CONFIG_METAG) += irq-metag-ext.o
19obj-$(CONFIG_METAG_PERFCOUNTER_IRQS) += irq-metag.o
20obj-$(CONFIG_CLPS711X_IRQCHIP) += irq-clps711x.o 18obj-$(CONFIG_CLPS711X_IRQCHIP) += irq-clps711x.o
21obj-$(CONFIG_OMPIC) += irq-ompic.o 19obj-$(CONFIG_OMPIC) += irq-ompic.o
22obj-$(CONFIG_OR1K_PIC) += irq-or1k-pic.o 20obj-$(CONFIG_OR1K_PIC) += irq-or1k-pic.o
diff --git a/drivers/irqchip/irq-metag-ext.c b/drivers/irqchip/irq-metag-ext.c
deleted file mode 100644
index e67483161f0f..000000000000
--- a/drivers/irqchip/irq-metag-ext.c
+++ /dev/null
@@ -1,871 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Meta External interrupt code.
4 *
5 * Copyright (C) 2005-2012 Imagination Technologies Ltd.
6 *
7 * External interrupts on Meta are configured at two-levels, in the CPU core and
8 * in the external trigger block. Interrupts from SoC peripherals are
9 * multiplexed onto a single Meta CPU "trigger" - traditionally it has always
10 * been trigger 2 (TR2). For info on how de-multiplexing happens check out
11 * meta_intc_irq_demux().
12 */
13
14#include <linux/interrupt.h>
15#include <linux/irqchip/metag-ext.h>
16#include <linux/irqdomain.h>
17#include <linux/io.h>
18#include <linux/of.h>
19#include <linux/slab.h>
20#include <linux/syscore_ops.h>
21
22#include <asm/irq.h>
23#include <asm/hwthread.h>
24
25#define HWSTAT_STRIDE 8
26#define HWVEC_BLK_STRIDE 0x1000
27
28/**
29 * struct meta_intc_priv - private meta external interrupt data
30 * @nr_banks: Number of interrupt banks
31 * @domain: IRQ domain for all banks of external IRQs
32 * @unmasked: Record of unmasked IRQs
33 * @levels_altered: Record of altered level bits
34 */
35struct meta_intc_priv {
36 unsigned int nr_banks;
37 struct irq_domain *domain;
38
39 unsigned long unmasked[4];
40
41#ifdef CONFIG_METAG_SUSPEND_MEM
42 unsigned long levels_altered[4];
43#endif
44};
45
46/* Private data for the one and only external interrupt controller */
47static struct meta_intc_priv meta_intc_priv;
48
49/**
50 * meta_intc_offset() - Get the offset into the bank of a hardware IRQ number
51 * @hw: Hardware IRQ number (within external trigger block)
52 *
53 * Returns: Bit offset into the IRQ's bank registers
54 */
55static unsigned int meta_intc_offset(irq_hw_number_t hw)
56{
57 return hw & 0x1f;
58}
59
60/**
61 * meta_intc_bank() - Get the bank number of a hardware IRQ number
62 * @hw: Hardware IRQ number (within external trigger block)
63 *
64 * Returns: Bank number indicating which register the IRQ's bits are
65 */
66static unsigned int meta_intc_bank(irq_hw_number_t hw)
67{
68 return hw >> 5;
69}
70
71/**
72 * meta_intc_stat_addr() - Get the address of a HWSTATEXT register
73 * @hw: Hardware IRQ number (within external trigger block)
74 *
75 * Returns: Address of a HWSTATEXT register containing the status bit for
76 * the specified hardware IRQ number
77 */
78static void __iomem *meta_intc_stat_addr(irq_hw_number_t hw)
79{
80 return (void __iomem *)(HWSTATEXT +
81 HWSTAT_STRIDE * meta_intc_bank(hw));
82}
83
84/**
85 * meta_intc_level_addr() - Get the address of a HWLEVELEXT register
86 * @hw: Hardware IRQ number (within external trigger block)
87 *
88 * Returns: Address of a HWLEVELEXT register containing the sense bit for
89 * the specified hardware IRQ number
90 */
91static void __iomem *meta_intc_level_addr(irq_hw_number_t hw)
92{
93 return (void __iomem *)(HWLEVELEXT +
94 HWSTAT_STRIDE * meta_intc_bank(hw));
95}
96
97/**
98 * meta_intc_mask_addr() - Get the address of a HWMASKEXT register
99 * @hw: Hardware IRQ number (within external trigger block)
100 *
101 * Returns: Address of a HWMASKEXT register containing the mask bit for the
102 * specified hardware IRQ number
103 */
104static void __iomem *meta_intc_mask_addr(irq_hw_number_t hw)
105{
106 return (void __iomem *)(HWMASKEXT +
107 HWSTAT_STRIDE * meta_intc_bank(hw));
108}
109
110/**
111 * meta_intc_vec_addr() - Get the vector address of a hardware interrupt
112 * @hw: Hardware IRQ number (within external trigger block)
113 *
114 * Returns: Address of a HWVECEXT register controlling the core trigger to
115 * vector the IRQ onto
116 */
117static inline void __iomem *meta_intc_vec_addr(irq_hw_number_t hw)
118{
119 return (void __iomem *)(HWVEC0EXT +
120 HWVEC_BLK_STRIDE * meta_intc_bank(hw) +
121 HWVECnEXT_STRIDE * meta_intc_offset(hw));
122}
123
124/**
125 * meta_intc_startup_irq() - set up an external irq
126 * @data: data for the external irq to start up
127 *
128 * Multiplex interrupts for irq onto TR2. Clear any pending interrupts and
129 * unmask irq, both using the appropriate callbacks.
130 */
131static unsigned int meta_intc_startup_irq(struct irq_data *data)
132{
133 irq_hw_number_t hw = data->hwirq;
134 void __iomem *vec_addr = meta_intc_vec_addr(hw);
135 int thread = hard_processor_id();
136
137 /* Perform any necessary acking. */
138 if (data->chip->irq_ack)
139 data->chip->irq_ack(data);
140
141 /* Wire up this interrupt to the core with HWVECxEXT. */
142 metag_out32(TBI_TRIG_VEC(TBID_SIGNUM_TR2(thread)), vec_addr);
143
144 /* Perform any necessary unmasking. */
145 data->chip->irq_unmask(data);
146
147 return 0;
148}
149
150/**
151 * meta_intc_shutdown_irq() - turn off an external irq
152 * @data: data for the external irq to turn off
153 *
154 * Mask irq using the appropriate callback and stop muxing it onto TR2.
155 */
156static void meta_intc_shutdown_irq(struct irq_data *data)
157{
158 irq_hw_number_t hw = data->hwirq;
159 void __iomem *vec_addr = meta_intc_vec_addr(hw);
160
161 /* Mask the IRQ */
162 data->chip->irq_mask(data);
163
164 /*
165 * Disable the IRQ at the core by removing the interrupt from
166 * the HW vector mapping.
167 */
168 metag_out32(0, vec_addr);
169}
170
171/**
172 * meta_intc_ack_irq() - acknowledge an external irq
173 * @data: data for the external irq to ack
174 *
175 * Clear down an edge interrupt in the status register.
176 */
177static void meta_intc_ack_irq(struct irq_data *data)
178{
179 irq_hw_number_t hw = data->hwirq;
180 unsigned int bit = 1 << meta_intc_offset(hw);
181 void __iomem *stat_addr = meta_intc_stat_addr(hw);
182
183 /* Ack the int, if it is still 'on'.
184 * NOTE - this only works for edge triggered interrupts.
185 */
186 if (metag_in32(stat_addr) & bit)
187 metag_out32(bit, stat_addr);
188}
189
190/**
191 * record_irq_is_masked() - record the IRQ masked so it doesn't get handled
192 * @data: data for the external irq to record
193 *
194 * This should get called whenever an external IRQ is masked (by whichever
195 * callback is used). It records the IRQ masked so that it doesn't get handled
196 * if it still shows up in the status register.
197 */
198static void record_irq_is_masked(struct irq_data *data)
199{
200 struct meta_intc_priv *priv = &meta_intc_priv;
201 irq_hw_number_t hw = data->hwirq;
202
203 clear_bit(meta_intc_offset(hw), &priv->unmasked[meta_intc_bank(hw)]);
204}
205
206/**
207 * record_irq_is_unmasked() - record the IRQ unmasked so it can be handled
208 * @data: data for the external irq to record
209 *
210 * This should get called whenever an external IRQ is unmasked (by whichever
211 * callback is used). It records the IRQ unmasked so that it gets handled if it
212 * shows up in the status register.
213 */
214static void record_irq_is_unmasked(struct irq_data *data)
215{
216 struct meta_intc_priv *priv = &meta_intc_priv;
217 irq_hw_number_t hw = data->hwirq;
218
219 set_bit(meta_intc_offset(hw), &priv->unmasked[meta_intc_bank(hw)]);
220}
221
222/*
223 * For use by wrapper IRQ drivers
224 */
225
226/**
227 * meta_intc_mask_irq_simple() - minimal mask used by wrapper IRQ drivers
228 * @data: data for the external irq being masked
229 *
230 * This should be called by any wrapper IRQ driver mask functions. it doesn't do
231 * any masking but records the IRQ as masked so that the core code knows the
232 * mask has taken place. It is the callers responsibility to ensure that the IRQ
233 * won't trigger an interrupt to the core.
234 */
235void meta_intc_mask_irq_simple(struct irq_data *data)
236{
237 record_irq_is_masked(data);
238}
239
240/**
241 * meta_intc_unmask_irq_simple() - minimal unmask used by wrapper IRQ drivers
242 * @data: data for the external irq being unmasked
243 *
244 * This should be called by any wrapper IRQ driver unmask functions. it doesn't
245 * do any unmasking but records the IRQ as unmasked so that the core code knows
246 * the unmask has taken place. It is the callers responsibility to ensure that
247 * the IRQ can now trigger an interrupt to the core.
248 */
249void meta_intc_unmask_irq_simple(struct irq_data *data)
250{
251 record_irq_is_unmasked(data);
252}
253
254
255/**
256 * meta_intc_mask_irq() - mask an external irq using HWMASKEXT
257 * @data: data for the external irq to mask
258 *
259 * This is a default implementation of a mask function which makes use of the
260 * HWMASKEXT registers available in newer versions.
261 *
262 * Earlier versions without these registers should use SoC level IRQ masking
263 * which call the meta_intc_*_simple() functions above, or if that isn't
264 * available should use the fallback meta_intc_*_nomask() functions below.
265 */
266static void meta_intc_mask_irq(struct irq_data *data)
267{
268 irq_hw_number_t hw = data->hwirq;
269 unsigned int bit = 1 << meta_intc_offset(hw);
270 void __iomem *mask_addr = meta_intc_mask_addr(hw);
271 unsigned long flags;
272
273 record_irq_is_masked(data);
274
275 /* update the interrupt mask */
276 __global_lock2(flags);
277 metag_out32(metag_in32(mask_addr) & ~bit, mask_addr);
278 __global_unlock2(flags);
279}
280
281/**
282 * meta_intc_unmask_irq() - unmask an external irq using HWMASKEXT
283 * @data: data for the external irq to unmask
284 *
285 * This is a default implementation of an unmask function which makes use of the
286 * HWMASKEXT registers available on new versions. It should be paired with
287 * meta_intc_mask_irq() above.
288 */
289static void meta_intc_unmask_irq(struct irq_data *data)
290{
291 irq_hw_number_t hw = data->hwirq;
292 unsigned int bit = 1 << meta_intc_offset(hw);
293 void __iomem *mask_addr = meta_intc_mask_addr(hw);
294 unsigned long flags;
295
296 record_irq_is_unmasked(data);
297
298 /* update the interrupt mask */
299 __global_lock2(flags);
300 metag_out32(metag_in32(mask_addr) | bit, mask_addr);
301 __global_unlock2(flags);
302}
303
304/**
305 * meta_intc_mask_irq_nomask() - mask an external irq by unvectoring
306 * @data: data for the external irq to mask
307 *
308 * This is the version of the mask function for older versions which don't have
309 * HWMASKEXT registers, or a SoC level means of masking IRQs. Instead the IRQ is
310 * unvectored from the core and retriggered if necessary later.
311 */
312static void meta_intc_mask_irq_nomask(struct irq_data *data)
313{
314 irq_hw_number_t hw = data->hwirq;
315 void __iomem *vec_addr = meta_intc_vec_addr(hw);
316
317 record_irq_is_masked(data);
318
319 /* there is no interrupt mask, so unvector the interrupt */
320 metag_out32(0, vec_addr);
321}
322
323/**
324 * meta_intc_unmask_edge_irq_nomask() - unmask an edge irq by revectoring
325 * @data: data for the external irq to unmask
326 *
327 * This is the version of the unmask function for older versions which don't
328 * have HWMASKEXT registers, or a SoC level means of masking IRQs. Instead the
329 * IRQ is revectored back to the core and retriggered if necessary.
330 *
331 * The retriggering done by this function is specific to edge interrupts.
332 */
333static void meta_intc_unmask_edge_irq_nomask(struct irq_data *data)
334{
335 irq_hw_number_t hw = data->hwirq;
336 unsigned int bit = 1 << meta_intc_offset(hw);
337 void __iomem *stat_addr = meta_intc_stat_addr(hw);
338 void __iomem *vec_addr = meta_intc_vec_addr(hw);
339 unsigned int thread = hard_processor_id();
340
341 record_irq_is_unmasked(data);
342
343 /* there is no interrupt mask, so revector the interrupt */
344 metag_out32(TBI_TRIG_VEC(TBID_SIGNUM_TR2(thread)), vec_addr);
345
346 /*
347 * Re-trigger interrupt
348 *
349 * Writing a 1 toggles, and a 0->1 transition triggers. We only
350 * retrigger if the status bit is already set, which means we
351 * need to clear it first. Retriggering is fundamentally racy
352 * because if the interrupt fires again after we clear it we
353 * could end up clearing it again and the interrupt handler
354 * thinking it hasn't fired. Therefore we need to keep trying to
355 * retrigger until the bit is set.
356 */
357 if (metag_in32(stat_addr) & bit) {
358 metag_out32(bit, stat_addr);
359 while (!(metag_in32(stat_addr) & bit))
360 metag_out32(bit, stat_addr);
361 }
362}
363
364/**
365 * meta_intc_unmask_level_irq_nomask() - unmask a level irq by revectoring
366 * @data: data for the external irq to unmask
367 *
368 * This is the version of the unmask function for older versions which don't
369 * have HWMASKEXT registers, or a SoC level means of masking IRQs. Instead the
370 * IRQ is revectored back to the core and retriggered if necessary.
371 *
372 * The retriggering done by this function is specific to level interrupts.
373 */
374static void meta_intc_unmask_level_irq_nomask(struct irq_data *data)
375{
376 irq_hw_number_t hw = data->hwirq;
377 unsigned int bit = 1 << meta_intc_offset(hw);
378 void __iomem *stat_addr = meta_intc_stat_addr(hw);
379 void __iomem *vec_addr = meta_intc_vec_addr(hw);
380 unsigned int thread = hard_processor_id();
381
382 record_irq_is_unmasked(data);
383
384 /* there is no interrupt mask, so revector the interrupt */
385 metag_out32(TBI_TRIG_VEC(TBID_SIGNUM_TR2(thread)), vec_addr);
386
387 /* Re-trigger interrupt */
388 /* Writing a 1 triggers interrupt */
389 if (metag_in32(stat_addr) & bit)
390 metag_out32(bit, stat_addr);
391}
392
393/**
394 * meta_intc_irq_set_type() - set the type of an external irq
395 * @data: data for the external irq to set the type of
396 * @flow_type: new irq flow type
397 *
398 * Set the flow type of an external interrupt. This updates the irq chip and irq
399 * handler depending on whether the irq is edge or level sensitive (the polarity
400 * is ignored), and also sets up the bit in HWLEVELEXT so the hardware knows
401 * when to trigger.
402 */
403static int meta_intc_irq_set_type(struct irq_data *data, unsigned int flow_type)
404{
405#ifdef CONFIG_METAG_SUSPEND_MEM
406 struct meta_intc_priv *priv = &meta_intc_priv;
407#endif
408 irq_hw_number_t hw = data->hwirq;
409 unsigned int bit = 1 << meta_intc_offset(hw);
410 void __iomem *level_addr = meta_intc_level_addr(hw);
411 unsigned long flags;
412 unsigned int level;
413
414 /* update the chip/handler */
415 if (flow_type & IRQ_TYPE_LEVEL_MASK)
416 irq_set_chip_handler_name_locked(data, &meta_intc_level_chip,
417 handle_level_irq, NULL);
418 else
419 irq_set_chip_handler_name_locked(data, &meta_intc_edge_chip,
420 handle_edge_irq, NULL);
421
422 /* and clear/set the bit in HWLEVELEXT */
423 __global_lock2(flags);
424 level = metag_in32(level_addr);
425 if (flow_type & IRQ_TYPE_LEVEL_MASK)
426 level |= bit;
427 else
428 level &= ~bit;
429 metag_out32(level, level_addr);
430#ifdef CONFIG_METAG_SUSPEND_MEM
431 priv->levels_altered[meta_intc_bank(hw)] |= bit;
432#endif
433 __global_unlock2(flags);
434
435 return 0;
436}
437
438/**
439 * meta_intc_irq_demux() - external irq de-multiplexer
440 * @desc: the interrupt description structure for this irq
441 *
442 * The cpu receives an interrupt on TR2 when a SoC interrupt has occurred. It is
443 * this function's job to demux this irq and figure out exactly which external
444 * irq needs servicing.
445 *
446 * Whilst using TR2 to detect external interrupts is a software convention it is
447 * (hopefully) unlikely to change.
448 */
449static void meta_intc_irq_demux(struct irq_desc *desc)
450{
451 struct meta_intc_priv *priv = &meta_intc_priv;
452 irq_hw_number_t hw;
453 unsigned int bank, irq_no, status;
454 void __iomem *stat_addr = meta_intc_stat_addr(0);
455
456 /*
457 * Locate which interrupt has caused our handler to run.
458 */
459 for (bank = 0; bank < priv->nr_banks; ++bank) {
460 /* Which interrupts are currently pending in this bank? */
461recalculate:
462 status = metag_in32(stat_addr) & priv->unmasked[bank];
463
464 for (hw = bank*32; status; status >>= 1, ++hw) {
465 if (status & 0x1) {
466 /*
467 * Map the hardware IRQ number to a virtual
468 * Linux IRQ number.
469 */
470 irq_no = irq_linear_revmap(priv->domain, hw);
471
472 /*
473 * Only fire off external interrupts that are
474 * registered to be handled by the kernel.
475 * Other external interrupts are probably being
476 * handled by other Meta hardware threads.
477 */
478 generic_handle_irq(irq_no);
479
480 /*
481 * The handler may have re-enabled interrupts
482 * which could have caused a nested invocation
483 * of this code and make the copy of the
484 * status register we are using invalid.
485 */
486 goto recalculate;
487 }
488 }
489 stat_addr += HWSTAT_STRIDE;
490 }
491}
492
493#ifdef CONFIG_SMP
494/**
495 * meta_intc_set_affinity() - set the affinity for an interrupt
496 * @data: data for the external irq to set the affinity of
497 * @cpumask: cpu mask representing cpus which can handle the interrupt
498 * @force: whether to force (ignored)
499 *
500 * Revector the specified external irq onto a specific cpu's TR2 trigger, so
501 * that that cpu tends to be the one who handles it.
502 */
503static int meta_intc_set_affinity(struct irq_data *data,
504 const struct cpumask *cpumask, bool force)
505{
506 irq_hw_number_t hw = data->hwirq;
507 void __iomem *vec_addr = meta_intc_vec_addr(hw);
508 unsigned int cpu, thread;
509
510 /*
511 * Wire up this interrupt from HWVECxEXT to the Meta core.
512 *
513 * Note that we can't wire up HWVECxEXT to interrupt more than
514 * one cpu (the interrupt code doesn't support it), so we just
515 * pick the first cpu we find in 'cpumask'.
516 */
517 cpu = cpumask_any_and(cpumask, cpu_online_mask);
518 thread = cpu_2_hwthread_id[cpu];
519
520 metag_out32(TBI_TRIG_VEC(TBID_SIGNUM_TR2(thread)), vec_addr);
521
522 irq_data_update_effective_affinity(data, cpumask_of(cpu));
523
524 return 0;
525}
526#else
527#define meta_intc_set_affinity NULL
528#endif
529
530#ifdef CONFIG_PM_SLEEP
531#define META_INTC_CHIP_FLAGS (IRQCHIP_MASK_ON_SUSPEND \
532 | IRQCHIP_SKIP_SET_WAKE)
533#else
534#define META_INTC_CHIP_FLAGS 0
535#endif
536
537/* public edge/level irq chips which SoCs can override */
538
539struct irq_chip meta_intc_edge_chip = {
540 .irq_startup = meta_intc_startup_irq,
541 .irq_shutdown = meta_intc_shutdown_irq,
542 .irq_ack = meta_intc_ack_irq,
543 .irq_mask = meta_intc_mask_irq,
544 .irq_unmask = meta_intc_unmask_irq,
545 .irq_set_type = meta_intc_irq_set_type,
546 .irq_set_affinity = meta_intc_set_affinity,
547 .flags = META_INTC_CHIP_FLAGS,
548};
549
550struct irq_chip meta_intc_level_chip = {
551 .irq_startup = meta_intc_startup_irq,
552 .irq_shutdown = meta_intc_shutdown_irq,
553 .irq_set_type = meta_intc_irq_set_type,
554 .irq_mask = meta_intc_mask_irq,
555 .irq_unmask = meta_intc_unmask_irq,
556 .irq_set_affinity = meta_intc_set_affinity,
557 .flags = META_INTC_CHIP_FLAGS,
558};
559
560/**
561 * meta_intc_map() - map an external irq
562 * @d: irq domain of external trigger block
563 * @irq: virtual irq number
564 * @hw: hardware irq number within external trigger block
565 *
566 * This sets up a virtual irq for a specified hardware interrupt. The irq chip
567 * and handler is configured, using the HWLEVELEXT registers to determine
568 * edge/level flow type. These registers will have been set when the irq type is
569 * set (or set to a default at init time).
570 */
571static int meta_intc_map(struct irq_domain *d, unsigned int irq,
572 irq_hw_number_t hw)
573{
574 unsigned int bit = 1 << meta_intc_offset(hw);
575 void __iomem *level_addr = meta_intc_level_addr(hw);
576
577 /* Go by the current sense in the HWLEVELEXT register */
578 if (metag_in32(level_addr) & bit)
579 irq_set_chip_and_handler(irq, &meta_intc_level_chip,
580 handle_level_irq);
581 else
582 irq_set_chip_and_handler(irq, &meta_intc_edge_chip,
583 handle_edge_irq);
584
585 irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(irq)));
586 return 0;
587}
588
589static const struct irq_domain_ops meta_intc_domain_ops = {
590 .map = meta_intc_map,
591 .xlate = irq_domain_xlate_twocell,
592};
593
594#ifdef CONFIG_METAG_SUSPEND_MEM
595
596/**
597 * struct meta_intc_context - suspend context
598 * @levels: State of HWLEVELEXT registers
599 * @masks: State of HWMASKEXT registers
600 * @vectors: State of HWVECEXT registers
601 * @txvecint: State of TxVECINT registers
602 *
603 * This structure stores the IRQ state across suspend.
604 */
605struct meta_intc_context {
606 u32 levels[4];
607 u32 masks[4];
608 u8 vectors[4*32];
609
610 u8 txvecint[4][4];
611};
612
613/* suspend context */
614static struct meta_intc_context *meta_intc_context;
615
616/**
617 * meta_intc_suspend() - store irq state
618 *
619 * To avoid interfering with other threads we only save the IRQ state of IRQs in
620 * use by Linux.
621 */
622static int meta_intc_suspend(void)
623{
624 struct meta_intc_priv *priv = &meta_intc_priv;
625 int i, j;
626 irq_hw_number_t hw;
627 unsigned int bank;
628 unsigned long flags;
629 struct meta_intc_context *context;
630 void __iomem *level_addr, *mask_addr, *vec_addr;
631 u32 mask, bit;
632
633 context = kzalloc(sizeof(*context), GFP_ATOMIC);
634 if (!context)
635 return -ENOMEM;
636
637 hw = 0;
638 level_addr = meta_intc_level_addr(0);
639 mask_addr = meta_intc_mask_addr(0);
640 for (bank = 0; bank < priv->nr_banks; ++bank) {
641 vec_addr = meta_intc_vec_addr(hw);
642
643 /* create mask of interrupts in use */
644 mask = 0;
645 for (bit = 1; bit; bit <<= 1) {
646 i = irq_linear_revmap(priv->domain, hw);
647 /* save mapped irqs which are enabled or have actions */
648 if (i && (!irqd_irq_disabled(irq_get_irq_data(i)) ||
649 irq_has_action(i))) {
650 mask |= bit;
651
652 /* save trigger vector */
653 context->vectors[hw] = metag_in32(vec_addr);
654 }
655
656 ++hw;
657 vec_addr += HWVECnEXT_STRIDE;
658 }
659
660 /* save level state if any IRQ levels altered */
661 if (priv->levels_altered[bank])
662 context->levels[bank] = metag_in32(level_addr);
663 /* save mask state if any IRQs in use */
664 if (mask)
665 context->masks[bank] = metag_in32(mask_addr);
666
667 level_addr += HWSTAT_STRIDE;
668 mask_addr += HWSTAT_STRIDE;
669 }
670
671 /* save trigger matrixing */
672 __global_lock2(flags);
673 for (i = 0; i < 4; ++i)
674 for (j = 0; j < 4; ++j)
675 context->txvecint[i][j] = metag_in32(T0VECINT_BHALT +
676 TnVECINT_STRIDE*i +
677 8*j);
678 __global_unlock2(flags);
679
680 meta_intc_context = context;
681 return 0;
682}
683
684/**
685 * meta_intc_resume() - restore saved irq state
686 *
687 * Restore the saved IRQ state and drop it.
688 */
689static void meta_intc_resume(void)
690{
691 struct meta_intc_priv *priv = &meta_intc_priv;
692 int i, j;
693 irq_hw_number_t hw;
694 unsigned int bank;
695 unsigned long flags;
696 struct meta_intc_context *context = meta_intc_context;
697 void __iomem *level_addr, *mask_addr, *vec_addr;
698 u32 mask, bit, tmp;
699
700 meta_intc_context = NULL;
701
702 hw = 0;
703 level_addr = meta_intc_level_addr(0);
704 mask_addr = meta_intc_mask_addr(0);
705 for (bank = 0; bank < priv->nr_banks; ++bank) {
706 vec_addr = meta_intc_vec_addr(hw);
707
708 /* create mask of interrupts in use */
709 mask = 0;
710 for (bit = 1; bit; bit <<= 1) {
711 i = irq_linear_revmap(priv->domain, hw);
712 /* restore mapped irqs, enabled or with actions */
713 if (i && (!irqd_irq_disabled(irq_get_irq_data(i)) ||
714 irq_has_action(i))) {
715 mask |= bit;
716
717 /* restore trigger vector */
718 metag_out32(context->vectors[hw], vec_addr);
719 }
720
721 ++hw;
722 vec_addr += HWVECnEXT_STRIDE;
723 }
724
725 if (mask) {
726 /* restore mask state */
727 __global_lock2(flags);
728 tmp = metag_in32(mask_addr);
729 tmp = (tmp & ~mask) | (context->masks[bank] & mask);
730 metag_out32(tmp, mask_addr);
731 __global_unlock2(flags);
732 }
733
734 mask = priv->levels_altered[bank];
735 if (mask) {
736 /* restore level state */
737 __global_lock2(flags);
738 tmp = metag_in32(level_addr);
739 tmp = (tmp & ~mask) | (context->levels[bank] & mask);
740 metag_out32(tmp, level_addr);
741 __global_unlock2(flags);
742 }
743
744 level_addr += HWSTAT_STRIDE;
745 mask_addr += HWSTAT_STRIDE;
746 }
747
748 /* restore trigger matrixing */
749 __global_lock2(flags);
750 for (i = 0; i < 4; ++i) {
751 for (j = 0; j < 4; ++j) {
752 metag_out32(context->txvecint[i][j],
753 T0VECINT_BHALT +
754 TnVECINT_STRIDE*i +
755 8*j);
756 }
757 }
758 __global_unlock2(flags);
759
760 kfree(context);
761}
762
763static struct syscore_ops meta_intc_syscore_ops = {
764 .suspend = meta_intc_suspend,
765 .resume = meta_intc_resume,
766};
767
768static void __init meta_intc_init_syscore_ops(struct meta_intc_priv *priv)
769{
770 register_syscore_ops(&meta_intc_syscore_ops);
771}
772#else
773#define meta_intc_init_syscore_ops(priv) do {} while (0)
774#endif
775
776/**
777 * meta_intc_init_cpu() - register with a Meta cpu
778 * @priv: private interrupt controller data
779 * @cpu: the CPU to register on
780 *
781 * Configure @cpu's TR2 irq so that we can demux external irqs.
782 */
783static void __init meta_intc_init_cpu(struct meta_intc_priv *priv, int cpu)
784{
785 unsigned int thread = cpu_2_hwthread_id[cpu];
786 unsigned int signum = TBID_SIGNUM_TR2(thread);
787 int irq = tbisig_map(signum);
788
789 /* Register the multiplexed IRQ handler */
790 irq_set_chained_handler(irq, meta_intc_irq_demux);
791 irq_set_irq_type(irq, IRQ_TYPE_LEVEL_LOW);
792}
793
794/**
795 * meta_intc_no_mask() - indicate lack of HWMASKEXT registers
796 *
797 * Called from SoC code (or init code below) to dynamically indicate the lack of
798 * HWMASKEXT registers (for example depending on some SoC revision register).
799 * This alters the irq mask and unmask callbacks to use the fallback
800 * unvectoring/retriggering technique instead of using HWMASKEXT registers.
801 */
802void __init meta_intc_no_mask(void)
803{
804 meta_intc_edge_chip.irq_mask = meta_intc_mask_irq_nomask;
805 meta_intc_edge_chip.irq_unmask = meta_intc_unmask_edge_irq_nomask;
806 meta_intc_level_chip.irq_mask = meta_intc_mask_irq_nomask;
807 meta_intc_level_chip.irq_unmask = meta_intc_unmask_level_irq_nomask;
808}
809
810/**
811 * init_external_IRQ() - initialise the external irq controller
812 *
813 * Set up the external irq controller using device tree properties. This is
814 * called from init_IRQ().
815 */
816int __init init_external_IRQ(void)
817{
818 struct meta_intc_priv *priv = &meta_intc_priv;
819 struct device_node *node;
820 int ret, cpu;
821 u32 val;
822 bool no_masks = false;
823
824 node = of_find_compatible_node(NULL, NULL, "img,meta-intc");
825 if (!node)
826 return -ENOENT;
827
828 /* Get number of banks */
829 ret = of_property_read_u32(node, "num-banks", &val);
830 if (ret) {
831 pr_err("meta-intc: No num-banks property found\n");
832 return ret;
833 }
834 if (val < 1 || val > 4) {
835 pr_err("meta-intc: num-banks (%u) out of range\n", val);
836 return -EINVAL;
837 }
838 priv->nr_banks = val;
839
840 /* Are any mask registers present? */
841 if (of_get_property(node, "no-mask", NULL))
842 no_masks = true;
843
844 /* No HWMASKEXT registers present? */
845 if (no_masks)
846 meta_intc_no_mask();
847
848 /* Set up an IRQ domain */
849 /*
850 * This is a legacy IRQ domain for now until all the platform setup code
851 * has been converted to devicetree.
852 */
853 priv->domain = irq_domain_add_linear(node, priv->nr_banks*32,
854 &meta_intc_domain_ops, priv);
855 if (unlikely(!priv->domain)) {
856 pr_err("meta-intc: cannot add IRQ domain\n");
857 return -ENOMEM;
858 }
859
860 /* Setup TR2 for all cpus. */
861 for_each_possible_cpu(cpu)
862 meta_intc_init_cpu(priv, cpu);
863
864 /* Set up system suspend/resume callbacks */
865 meta_intc_init_syscore_ops(priv);
866
867 pr_info("meta-intc: External IRQ controller initialised (%u IRQs)\n",
868 priv->nr_banks*32);
869
870 return 0;
871}
diff --git a/drivers/irqchip/irq-metag.c b/drivers/irqchip/irq-metag.c
deleted file mode 100644
index 857b946747eb..000000000000
--- a/drivers/irqchip/irq-metag.c
+++ /dev/null
@@ -1,343 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Meta internal (HWSTATMETA) interrupt code.
4 *
5 * Copyright (C) 2011-2012 Imagination Technologies Ltd.
6 *
7 * This code is based on the code in SoC/common/irq.c and SoC/comet/irq.c
8 * The code base could be generalised/merged as a lot of the functionality is
9 * similar. Until this is done, we try to keep the code simple here.
10 */
11
12#include <linux/interrupt.h>
13#include <linux/io.h>
14#include <linux/irqdomain.h>
15
16#include <asm/irq.h>
17#include <asm/hwthread.h>
18
19#define PERF0VECINT 0x04820580
20#define PERF1VECINT 0x04820588
21#define PERF0TRIG_OFFSET 16
22#define PERF1TRIG_OFFSET 17
23
24/**
25 * struct metag_internal_irq_priv - private meta internal interrupt data
26 * @domain: IRQ domain for all internal Meta IRQs (HWSTATMETA)
27 * @unmasked: Record of unmasked IRQs
28 */
29struct metag_internal_irq_priv {
30 struct irq_domain *domain;
31
32 unsigned long unmasked;
33};
34
35/* Private data for the one and only internal interrupt controller */
36static struct metag_internal_irq_priv metag_internal_irq_priv;
37
38static unsigned int metag_internal_irq_startup(struct irq_data *data);
39static void metag_internal_irq_shutdown(struct irq_data *data);
40static void metag_internal_irq_ack(struct irq_data *data);
41static void metag_internal_irq_mask(struct irq_data *data);
42static void metag_internal_irq_unmask(struct irq_data *data);
43#ifdef CONFIG_SMP
44static int metag_internal_irq_set_affinity(struct irq_data *data,
45 const struct cpumask *cpumask, bool force);
46#endif
47
48static struct irq_chip internal_irq_edge_chip = {
49 .name = "HWSTATMETA-IRQ",
50 .irq_startup = metag_internal_irq_startup,
51 .irq_shutdown = metag_internal_irq_shutdown,
52 .irq_ack = metag_internal_irq_ack,
53 .irq_mask = metag_internal_irq_mask,
54 .irq_unmask = metag_internal_irq_unmask,
55#ifdef CONFIG_SMP
56 .irq_set_affinity = metag_internal_irq_set_affinity,
57#endif
58};
59
60/*
61 * metag_hwvec_addr - get the address of *VECINT regs of irq
62 *
63 * This function is a table of supported triggers on HWSTATMETA
64 * Could do with a structure, but better keep it simple. Changes
65 * in this code should be rare.
66 */
67static inline void __iomem *metag_hwvec_addr(irq_hw_number_t hw)
68{
69 void __iomem *addr;
70
71 switch (hw) {
72 case PERF0TRIG_OFFSET:
73 addr = (void __iomem *)PERF0VECINT;
74 break;
75 case PERF1TRIG_OFFSET:
76 addr = (void __iomem *)PERF1VECINT;
77 break;
78 default:
79 addr = NULL;
80 break;
81 }
82 return addr;
83}
84
85/*
86 * metag_internal_startup - setup an internal irq
87 * @irq: the irq to startup
88 *
89 * Multiplex interrupts for @irq onto TR1. Clear any pending
90 * interrupts.
91 */
92static unsigned int metag_internal_irq_startup(struct irq_data *data)
93{
94 /* Clear (toggle) the bit in HWSTATMETA for our interrupt. */
95 metag_internal_irq_ack(data);
96
97 /* Enable the interrupt by unmasking it */
98 metag_internal_irq_unmask(data);
99
100 return 0;
101}
102
103/*
104 * metag_internal_irq_shutdown - turn off the irq
105 * @irq: the irq number to turn off
106 *
107 * Mask @irq and clear any pending interrupts.
108 * Stop muxing @irq onto TR1.
109 */
110static void metag_internal_irq_shutdown(struct irq_data *data)
111{
112 /* Disable the IRQ at the core by masking it. */
113 metag_internal_irq_mask(data);
114
115 /* Clear (toggle) the bit in HWSTATMETA for our interrupt. */
116 metag_internal_irq_ack(data);
117}
118
119/*
120 * metag_internal_irq_ack - acknowledge irq
121 * @irq: the irq to ack
122 */
123static void metag_internal_irq_ack(struct irq_data *data)
124{
125 irq_hw_number_t hw = data->hwirq;
126 unsigned int bit = 1 << hw;
127
128 if (metag_in32(HWSTATMETA) & bit)
129 metag_out32(bit, HWSTATMETA);
130}
131
132/**
133 * metag_internal_irq_mask() - mask an internal irq by unvectoring
134 * @data: data for the internal irq to mask
135 *
136 * HWSTATMETA has no mask register. Instead the IRQ is unvectored from the core
137 * and retriggered if necessary later.
138 */
139static void metag_internal_irq_mask(struct irq_data *data)
140{
141 struct metag_internal_irq_priv *priv = &metag_internal_irq_priv;
142 irq_hw_number_t hw = data->hwirq;
143 void __iomem *vec_addr = metag_hwvec_addr(hw);
144
145 clear_bit(hw, &priv->unmasked);
146
147 /* there is no interrupt mask, so unvector the interrupt */
148 metag_out32(0, vec_addr);
149}
150
151/**
152 * meta_intc_unmask_edge_irq_nomask() - unmask an edge irq by revectoring
153 * @data: data for the internal irq to unmask
154 *
155 * HWSTATMETA has no mask register. Instead the IRQ is revectored back to the
156 * core and retriggered if necessary.
157 */
158static void metag_internal_irq_unmask(struct irq_data *data)
159{
160 struct metag_internal_irq_priv *priv = &metag_internal_irq_priv;
161 irq_hw_number_t hw = data->hwirq;
162 unsigned int bit = 1 << hw;
163 void __iomem *vec_addr = metag_hwvec_addr(hw);
164 unsigned int thread = hard_processor_id();
165
166 set_bit(hw, &priv->unmasked);
167
168 /* there is no interrupt mask, so revector the interrupt */
169 metag_out32(TBI_TRIG_VEC(TBID_SIGNUM_TR1(thread)), vec_addr);
170
171 /*
172 * Re-trigger interrupt
173 *
174 * Writing a 1 toggles, and a 0->1 transition triggers. We only
175 * retrigger if the status bit is already set, which means we
176 * need to clear it first. Retriggering is fundamentally racy
177 * because if the interrupt fires again after we clear it we
178 * could end up clearing it again and the interrupt handler
179 * thinking it hasn't fired. Therefore we need to keep trying to
180 * retrigger until the bit is set.
181 */
182 if (metag_in32(HWSTATMETA) & bit) {
183 metag_out32(bit, HWSTATMETA);
184 while (!(metag_in32(HWSTATMETA) & bit))
185 metag_out32(bit, HWSTATMETA);
186 }
187}
188
189#ifdef CONFIG_SMP
190/*
191 * metag_internal_irq_set_affinity - set the affinity for an interrupt
192 */
193static int metag_internal_irq_set_affinity(struct irq_data *data,
194 const struct cpumask *cpumask, bool force)
195{
196 unsigned int cpu, thread;
197 irq_hw_number_t hw = data->hwirq;
198 /*
199 * Wire up this interrupt from *VECINT to the Meta core.
200 *
201 * Note that we can't wire up *VECINT to interrupt more than
202 * one cpu (the interrupt code doesn't support it), so we just
203 * pick the first cpu we find in 'cpumask'.
204 */
205 cpu = cpumask_any_and(cpumask, cpu_online_mask);
206 thread = cpu_2_hwthread_id[cpu];
207
208 metag_out32(TBI_TRIG_VEC(TBID_SIGNUM_TR1(thread)),
209 metag_hwvec_addr(hw));
210
211 return 0;
212}
213#endif
214
215/*
216 * metag_internal_irq_demux - irq de-multiplexer
217 * @irq: the interrupt number
218 * @desc: the interrupt description structure for this irq
219 *
220 * The cpu receives an interrupt on TR1 when an interrupt has
221 * occurred. It is this function's job to demux this irq and
222 * figure out exactly which trigger needs servicing.
223 */
224static void metag_internal_irq_demux(struct irq_desc *desc)
225{
226 struct metag_internal_irq_priv *priv = irq_desc_get_handler_data(desc);
227 irq_hw_number_t hw;
228 unsigned int irq_no;
229 u32 status;
230
231recalculate:
232 status = metag_in32(HWSTATMETA) & priv->unmasked;
233
234 for (hw = 0; status != 0; status >>= 1, ++hw) {
235 if (status & 0x1) {
236 /*
237 * Map the hardware IRQ number to a virtual Linux IRQ
238 * number.
239 */
240 irq_no = irq_linear_revmap(priv->domain, hw);
241
242 /*
243 * Only fire off interrupts that are
244 * registered to be handled by the kernel.
245 * Other interrupts are probably being
246 * handled by other Meta hardware threads.
247 */
248 generic_handle_irq(irq_no);
249
250 /*
251 * The handler may have re-enabled interrupts
252 * which could have caused a nested invocation
253 * of this code and make the copy of the
254 * status register we are using invalid.
255 */
256 goto recalculate;
257 }
258 }
259}
260
261/**
262 * internal_irq_map() - Map an internal meta IRQ to a virtual IRQ number.
263 * @hw: Number of the internal IRQ. Must be in range.
264 *
265 * Returns: The virtual IRQ number of the Meta internal IRQ specified by
266 * @hw.
267 */
268int internal_irq_map(unsigned int hw)
269{
270 struct metag_internal_irq_priv *priv = &metag_internal_irq_priv;
271 if (!priv->domain)
272 return -ENODEV;
273 return irq_create_mapping(priv->domain, hw);
274}
275
276/**
277 * metag_internal_irq_init_cpu - regsister with the Meta cpu
278 * @cpu: the CPU to register on
279 *
280 * Configure @cpu's TR1 irq so that we can demux irqs.
281 */
282static void metag_internal_irq_init_cpu(struct metag_internal_irq_priv *priv,
283 int cpu)
284{
285 unsigned int thread = cpu_2_hwthread_id[cpu];
286 unsigned int signum = TBID_SIGNUM_TR1(thread);
287 int irq = tbisig_map(signum);
288
289 /* Register the multiplexed IRQ handler */
290 irq_set_chained_handler_and_data(irq, metag_internal_irq_demux, priv);
291 irq_set_irq_type(irq, IRQ_TYPE_LEVEL_LOW);
292}
293
294/**
295 * metag_internal_intc_map() - map an internal irq
296 * @d: irq domain of internal trigger block
297 * @irq: virtual irq number
298 * @hw: hardware irq number within internal trigger block
299 *
300 * This sets up a virtual irq for a specified hardware interrupt. The irq chip
301 * and handler is configured.
302 */
303static int metag_internal_intc_map(struct irq_domain *d, unsigned int irq,
304 irq_hw_number_t hw)
305{
306 /* only register interrupt if it is mapped */
307 if (!metag_hwvec_addr(hw))
308 return -EINVAL;
309
310 irq_set_chip_and_handler(irq, &internal_irq_edge_chip,
311 handle_edge_irq);
312 return 0;
313}
314
315static const struct irq_domain_ops metag_internal_intc_domain_ops = {
316 .map = metag_internal_intc_map,
317};
318
319/**
320 * metag_internal_irq_register - register internal IRQs
321 *
322 * Register the irq chip and handler function for all internal IRQs
323 */
324int __init init_internal_IRQ(void)
325{
326 struct metag_internal_irq_priv *priv = &metag_internal_irq_priv;
327 unsigned int cpu;
328
329 /* Set up an IRQ domain */
330 priv->domain = irq_domain_add_linear(NULL, 32,
331 &metag_internal_intc_domain_ops,
332 priv);
333 if (unlikely(!priv->domain)) {
334 pr_err("meta-internal-intc: cannot add IRQ domain\n");
335 return -ENOMEM;
336 }
337
338 /* Setup TR1 for all cpus. */
339 for_each_possible_cpu(cpu)
340 metag_internal_irq_init_cpu(priv, cpu);
341
342 return 0;
343};
diff --git a/drivers/media/rc/img-ir/Kconfig b/drivers/media/rc/img-ir/Kconfig
index a896d3c83a1c..d2c6617d468e 100644
--- a/drivers/media/rc/img-ir/Kconfig
+++ b/drivers/media/rc/img-ir/Kconfig
@@ -1,7 +1,7 @@
1config IR_IMG 1config IR_IMG
2 tristate "ImgTec IR Decoder" 2 tristate "ImgTec IR Decoder"
3 depends on RC_CORE 3 depends on RC_CORE
4 depends on METAG || MIPS || COMPILE_TEST 4 depends on MIPS || COMPILE_TEST
5 select IR_IMG_HW if !IR_IMG_RAW 5 select IR_IMG_HW if !IR_IMG_RAW
6 help 6 help
7 Say Y or M here if you want to use the ImgTec infrared decoder 7 Say Y or M here if you want to use the ImgTec infrared decoder
diff --git a/drivers/tty/Kconfig b/drivers/tty/Kconfig
index b811442c5ce6..75a71ebcb369 100644
--- a/drivers/tty/Kconfig
+++ b/drivers/tty/Kconfig
@@ -402,19 +402,6 @@ config GOLDFISH_TTY_EARLY_CONSOLE
402 default y if GOLDFISH_TTY=y 402 default y if GOLDFISH_TTY=y
403 select SERIAL_EARLYCON 403 select SERIAL_EARLYCON
404 404
405config DA_TTY
406 bool "DA TTY"
407 depends on METAG_DA
408 select SERIAL_NONSTANDARD
409 help
410 This enables a TTY on a Dash channel.
411
412config DA_CONSOLE
413 bool "DA Console"
414 depends on DA_TTY
415 help
416 This enables a console on a Dash channel.
417
418config MIPS_EJTAG_FDC_TTY 405config MIPS_EJTAG_FDC_TTY
419 bool "MIPS EJTAG Fast Debug Channel TTY" 406 bool "MIPS EJTAG Fast Debug Channel TTY"
420 depends on MIPS_CDMM 407 depends on MIPS_CDMM
diff --git a/drivers/tty/Makefile b/drivers/tty/Makefile
index 8ce3a8661b31..47c71f43a397 100644
--- a/drivers/tty/Makefile
+++ b/drivers/tty/Makefile
@@ -32,7 +32,6 @@ obj-$(CONFIG_SYNCLINKMP) += synclinkmp.o
32obj-$(CONFIG_SYNCLINK) += synclink.o 32obj-$(CONFIG_SYNCLINK) += synclink.o
33obj-$(CONFIG_PPC_EPAPR_HV_BYTECHAN) += ehv_bytechan.o 33obj-$(CONFIG_PPC_EPAPR_HV_BYTECHAN) += ehv_bytechan.o
34obj-$(CONFIG_GOLDFISH_TTY) += goldfish.o 34obj-$(CONFIG_GOLDFISH_TTY) += goldfish.o
35obj-$(CONFIG_DA_TTY) += metag_da.o
36obj-$(CONFIG_MIPS_EJTAG_FDC_TTY) += mips_ejtag_fdc.o 35obj-$(CONFIG_MIPS_EJTAG_FDC_TTY) += mips_ejtag_fdc.o
37obj-$(CONFIG_VCC) += vcc.o 36obj-$(CONFIG_VCC) += vcc.o
38 37
diff --git a/drivers/tty/metag_da.c b/drivers/tty/metag_da.c
deleted file mode 100644
index 99eaed4b2dbc..000000000000
--- a/drivers/tty/metag_da.c
+++ /dev/null
@@ -1,665 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * dashtty.c - tty driver for Dash channels interface.
4 *
5 * Copyright (C) 2007,2008,2012 Imagination Technologies
6 */
7
8#include <linux/atomic.h>
9#include <linux/completion.h>
10#include <linux/console.h>
11#include <linux/delay.h>
12#include <linux/export.h>
13#include <linux/init.h>
14#include <linux/kernel.h>
15#include <linux/kthread.h>
16#include <linux/moduleparam.h>
17#include <linux/mutex.h>
18#include <linux/sched.h>
19#include <linux/serial.h>
20#include <linux/slab.h>
21#include <linux/spinlock.h>
22#include <linux/string.h>
23#include <linux/timer.h>
24#include <linux/tty.h>
25#include <linux/tty_driver.h>
26#include <linux/tty_flip.h>
27#include <linux/uaccess.h>
28
29#include <asm/da.h>
30
31/* Channel error codes */
32#define CONAOK 0
33#define CONERR 1
34#define CONBAD 2
35#define CONPRM 3
36#define CONADR 4
37#define CONCNT 5
38#define CONCBF 6
39#define CONCBE 7
40#define CONBSY 8
41
42/* Default channel for the console */
43#define CONSOLE_CHANNEL 1
44
45#define NUM_TTY_CHANNELS 6
46
47/* Auto allocate */
48#define DA_TTY_MAJOR 0
49
50/* A speedy poll rate helps the userland debug process connection response.
51 * But, if you set it too high then no other userland processes get much
52 * of a look in.
53 */
54#define DA_TTY_POLL (HZ / 50)
55
56/*
57 * A short put delay improves latency but has a high throughput overhead
58 */
59#define DA_TTY_PUT_DELAY (HZ / 100)
60
61static atomic_t num_channels_need_poll = ATOMIC_INIT(0);
62
63static struct timer_list poll_timer;
64
65static struct tty_driver *channel_driver;
66
67static struct timer_list put_timer;
68static struct task_struct *dashtty_thread;
69
70/*
71 * The console_poll parameter determines whether the console channel should be
72 * polled for input.
73 * By default the console channel isn't polled at all, in order to avoid the
74 * overhead, but that means it isn't possible to have a login on /dev/console.
75 */
76static bool console_poll;
77module_param(console_poll, bool, S_IRUGO);
78
79#define RX_BUF_SIZE 1024
80
81enum {
82 INCHR = 1,
83 OUTCHR,
84 RDBUF,
85 WRBUF,
86 RDSTAT
87};
88
89/**
90 * struct dashtty_port - Wrapper struct for dashtty tty_port.
91 * @port: TTY port data
92 * @rx_lock: Lock for rx_buf.
93 * This protects between the poll timer and user context.
94 * It's also held during read SWITCH operations.
95 * @rx_buf: Read buffer
96 * @xmit_lock: Lock for xmit_*, and port.xmit_buf.
97 * This protects between user context and kernel thread.
98 * It's also held during write SWITCH operations.
99 * @xmit_cnt: Size of xmit buffer contents
100 * @xmit_head: Head of xmit buffer where data is written
101 * @xmit_tail: Tail of xmit buffer where data is read
102 * @xmit_empty: Completion for xmit buffer being empty
103 */
104struct dashtty_port {
105 struct tty_port port;
106 spinlock_t rx_lock;
107 void *rx_buf;
108 struct mutex xmit_lock;
109 unsigned int xmit_cnt;
110 unsigned int xmit_head;
111 unsigned int xmit_tail;
112 struct completion xmit_empty;
113};
114
115static struct dashtty_port dashtty_ports[NUM_TTY_CHANNELS];
116
117static atomic_t dashtty_xmit_cnt = ATOMIC_INIT(0);
118static wait_queue_head_t dashtty_waitqueue;
119
120/*
121 * Low-level DA channel access routines
122 */
123static int chancall(int in_bios_function, int in_channel,
124 int in_arg2, void *in_arg3,
125 void *in_arg4)
126{
127 register int bios_function asm("D1Ar1") = in_bios_function;
128 register int channel asm("D0Ar2") = in_channel;
129 register int arg2 asm("D1Ar3") = in_arg2;
130 register void *arg3 asm("D0Ar4") = in_arg3;
131 register void *arg4 asm("D1Ar5") = in_arg4;
132 register int bios_call asm("D0Ar6") = 3;
133 register int result asm("D0Re0");
134
135 asm volatile (
136 "MSETL [A0StP++], %6,%4,%2\n\t"
137 "ADD A0StP, A0StP, #8\n\t"
138 "SWITCH #0x0C30208\n\t"
139 "GETD %0, [A0StP+#-8]\n\t"
140 "SUB A0StP, A0StP, #(4*6)+8\n\t"
141 : "=d" (result) /* outs */
142 : "d" (bios_function),
143 "d" (channel),
144 "d" (arg2),
145 "d" (arg3),
146 "d" (arg4),
147 "d" (bios_call) /* ins */
148 : "memory");
149
150 return result;
151}
152
153/*
154 * Attempts to fetch count bytes from channel and returns actual count.
155 */
156static int fetch_data(unsigned int channel)
157{
158 struct dashtty_port *dport = &dashtty_ports[channel];
159 int received = 0;
160
161 spin_lock_bh(&dport->rx_lock);
162 /* check the port isn't being shut down */
163 if (!dport->rx_buf)
164 goto unlock;
165 if (chancall(RDBUF, channel, RX_BUF_SIZE,
166 (void *)dport->rx_buf, &received) == CONAOK) {
167 if (received) {
168 int space;
169 unsigned char *cbuf;
170
171 space = tty_prepare_flip_string(&dport->port, &cbuf,
172 received);
173
174 if (space <= 0)
175 goto unlock;
176
177 memcpy(cbuf, dport->rx_buf, space);
178 tty_flip_buffer_push(&dport->port);
179 }
180 }
181unlock:
182 spin_unlock_bh(&dport->rx_lock);
183
184 return received;
185}
186
187/**
188 * find_channel_to_poll() - Returns number of the next channel to poll.
189 * Returns: The number of the next channel to poll, or -1 if none need
190 * polling.
191 */
192static int find_channel_to_poll(void)
193{
194 static int last_polled_channel;
195 int last = last_polled_channel;
196 int chan;
197 struct dashtty_port *dport;
198
199 for (chan = last + 1; ; ++chan) {
200 if (chan >= NUM_TTY_CHANNELS)
201 chan = 0;
202
203 dport = &dashtty_ports[chan];
204 if (dport->rx_buf) {
205 last_polled_channel = chan;
206 return chan;
207 }
208
209 if (chan == last)
210 break;
211 }
212 return -1;
213}
214
215/**
216 * put_channel_data() - Write out a block of channel data.
217 * @chan: DA channel number.
218 *
219 * Write a single block of data out to the debug adapter. If the circular buffer
220 * is wrapped then only the first block is written.
221 *
222 * Returns: 1 if the remote buffer was too full to accept data.
223 * 0 otherwise.
224 */
225static int put_channel_data(unsigned int chan)
226{
227 struct dashtty_port *dport;
228 struct tty_struct *tty;
229 int number_written;
230 unsigned int count = 0;
231
232 dport = &dashtty_ports[chan];
233 mutex_lock(&dport->xmit_lock);
234 if (dport->xmit_cnt) {
235 count = min((unsigned int)(SERIAL_XMIT_SIZE - dport->xmit_tail),
236 dport->xmit_cnt);
237 chancall(WRBUF, chan, count,
238 dport->port.xmit_buf + dport->xmit_tail,
239 &number_written);
240 dport->xmit_cnt -= number_written;
241 if (!dport->xmit_cnt) {
242 /* reset pointers to avoid wraps */
243 dport->xmit_head = 0;
244 dport->xmit_tail = 0;
245 complete(&dport->xmit_empty);
246 } else {
247 dport->xmit_tail += number_written;
248 if (dport->xmit_tail >= SERIAL_XMIT_SIZE)
249 dport->xmit_tail -= SERIAL_XMIT_SIZE;
250 }
251 atomic_sub(number_written, &dashtty_xmit_cnt);
252 }
253 mutex_unlock(&dport->xmit_lock);
254
255 /* if we've made more data available, wake up tty */
256 if (count && number_written) {
257 tty = tty_port_tty_get(&dport->port);
258 if (tty) {
259 tty_wakeup(tty);
260 tty_kref_put(tty);
261 }
262 }
263
264 /* did the write fail? */
265 return count && !number_written;
266}
267
268/**
269 * put_data() - Kernel thread to write out blocks of channel data to DA.
270 * @arg: Unused.
271 *
272 * This kernel thread runs while @dashtty_xmit_cnt != 0, and loops over the
273 * channels to write out any buffered data. If any of the channels stall due to
274 * the remote buffer being full, a hold off happens to allow the debugger to
275 * drain the buffer.
276 */
277static int put_data(void *arg)
278{
279 unsigned int chan, stall;
280
281 __set_current_state(TASK_RUNNING);
282 while (!kthread_should_stop()) {
283 /*
284 * For each channel see if there's anything to transmit in the
285 * port's xmit_buf.
286 */
287 stall = 0;
288 for (chan = 0; chan < NUM_TTY_CHANNELS; ++chan)
289 stall += put_channel_data(chan);
290
291 /*
292 * If some of the buffers are full, hold off for a short while
293 * to allow them to empty.
294 */
295 if (stall)
296 msleep(25);
297
298 wait_event_interruptible(dashtty_waitqueue,
299 atomic_read(&dashtty_xmit_cnt));
300 }
301
302 return 0;
303}
304
305/*
306 * This gets called every DA_TTY_POLL and polls the channels for data
307 */
308static void dashtty_timer(struct timer_list *poll_timer)
309{
310 int channel;
311
312 /* If there are no ports open do nothing and don't poll again. */
313 if (!atomic_read(&num_channels_need_poll))
314 return;
315
316 channel = find_channel_to_poll();
317
318 /* Did we find a channel to poll? */
319 if (channel >= 0)
320 fetch_data(channel);
321
322 mod_timer(poll_timer, jiffies + DA_TTY_POLL);
323}
324
325static void add_poll_timer(struct timer_list *poll_timer)
326{
327 timer_setup(poll_timer, dashtty_timer, TIMER_PINNED);
328 poll_timer->expires = jiffies + DA_TTY_POLL;
329
330 /*
331 * Always attach the timer to the boot CPU. The DA channels are per-CPU
332 * so all polling should be from a single CPU.
333 */
334 add_timer_on(poll_timer, 0);
335}
336
337static int dashtty_port_activate(struct tty_port *port, struct tty_struct *tty)
338{
339 struct dashtty_port *dport = container_of(port, struct dashtty_port,
340 port);
341 void *rx_buf;
342
343 /* Allocate the buffer we use for writing data */
344 if (tty_port_alloc_xmit_buf(port) < 0)
345 goto err;
346
347 /* Allocate the buffer we use for reading data */
348 rx_buf = kzalloc(RX_BUF_SIZE, GFP_KERNEL);
349 if (!rx_buf)
350 goto err_free_xmit;
351
352 spin_lock_bh(&dport->rx_lock);
353 dport->rx_buf = rx_buf;
354 spin_unlock_bh(&dport->rx_lock);
355
356 /*
357 * Don't add the poll timer if we're opening a console. This
358 * avoids the overhead of polling the Dash but means it is not
359 * possible to have a login on /dev/console.
360 *
361 */
362 if (console_poll || dport != &dashtty_ports[CONSOLE_CHANNEL])
363 if (atomic_inc_return(&num_channels_need_poll) == 1)
364 add_poll_timer(&poll_timer);
365
366 return 0;
367err_free_xmit:
368 tty_port_free_xmit_buf(port);
369err:
370 return -ENOMEM;
371}
372
373static void dashtty_port_shutdown(struct tty_port *port)
374{
375 struct dashtty_port *dport = container_of(port, struct dashtty_port,
376 port);
377 void *rx_buf;
378 unsigned int count;
379
380 /* stop reading */
381 if (console_poll || dport != &dashtty_ports[CONSOLE_CHANNEL])
382 if (atomic_dec_and_test(&num_channels_need_poll))
383 del_timer_sync(&poll_timer);
384
385 mutex_lock(&dport->xmit_lock);
386 count = dport->xmit_cnt;
387 mutex_unlock(&dport->xmit_lock);
388 if (count) {
389 /*
390 * There's still data to write out, so wake and wait for the
391 * writer thread to drain the buffer.
392 */
393 del_timer(&put_timer);
394 wake_up_interruptible(&dashtty_waitqueue);
395 wait_for_completion(&dport->xmit_empty);
396 }
397
398 /* Null the read buffer (timer could still be running!) */
399 spin_lock_bh(&dport->rx_lock);
400 rx_buf = dport->rx_buf;
401 dport->rx_buf = NULL;
402 spin_unlock_bh(&dport->rx_lock);
403 /* Free the read buffer */
404 kfree(rx_buf);
405
406 /* Free the write buffer */
407 tty_port_free_xmit_buf(port);
408}
409
410static const struct tty_port_operations dashtty_port_ops = {
411 .activate = dashtty_port_activate,
412 .shutdown = dashtty_port_shutdown,
413};
414
415static int dashtty_install(struct tty_driver *driver, struct tty_struct *tty)
416{
417 return tty_port_install(&dashtty_ports[tty->index].port, driver, tty);
418}
419
420static int dashtty_open(struct tty_struct *tty, struct file *filp)
421{
422 return tty_port_open(tty->port, tty, filp);
423}
424
425static void dashtty_close(struct tty_struct *tty, struct file *filp)
426{
427 return tty_port_close(tty->port, tty, filp);
428}
429
430static void dashtty_hangup(struct tty_struct *tty)
431{
432 int channel;
433 struct dashtty_port *dport;
434
435 channel = tty->index;
436 dport = &dashtty_ports[channel];
437
438 /* drop any data in the xmit buffer */
439 mutex_lock(&dport->xmit_lock);
440 if (dport->xmit_cnt) {
441 atomic_sub(dport->xmit_cnt, &dashtty_xmit_cnt);
442 dport->xmit_cnt = 0;
443 dport->xmit_head = 0;
444 dport->xmit_tail = 0;
445 complete(&dport->xmit_empty);
446 }
447 mutex_unlock(&dport->xmit_lock);
448
449 tty_port_hangup(tty->port);
450}
451
452/**
453 * dashtty_put_timer() - Delayed wake up of kernel thread.
454 * @ignored: unused
455 *
456 * This timer function wakes up the kernel thread if any data exists in the
457 * buffers. It is used to delay the expensive writeout until the writer has
458 * stopped writing.
459 */
460static void dashtty_put_timer(struct timer_list *unused)
461{
462 if (atomic_read(&dashtty_xmit_cnt))
463 wake_up_interruptible(&dashtty_waitqueue);
464}
465
466static int dashtty_write(struct tty_struct *tty, const unsigned char *buf,
467 int total)
468{
469 int channel, count, block;
470 struct dashtty_port *dport;
471
472 /* Determine the channel */
473 channel = tty->index;
474 dport = &dashtty_ports[channel];
475
476 /*
477 * Write to output buffer.
478 *
479 * The reason that we asynchronously write the buffer is because if we
480 * were to write the buffer synchronously then because DA channels are
481 * per-CPU the buffer would be written to the channel of whatever CPU
482 * we're running on.
483 *
484 * What we actually want to happen is have all input and output done on
485 * one CPU.
486 */
487 mutex_lock(&dport->xmit_lock);
488 /* work out how many bytes we can write to the xmit buffer */
489 total = min(total, (int)(SERIAL_XMIT_SIZE - dport->xmit_cnt));
490 atomic_add(total, &dashtty_xmit_cnt);
491 dport->xmit_cnt += total;
492 /* write the actual bytes (may need splitting if it wraps) */
493 for (count = total; count; count -= block) {
494 block = min(count, (int)(SERIAL_XMIT_SIZE - dport->xmit_head));
495 memcpy(dport->port.xmit_buf + dport->xmit_head, buf, block);
496 dport->xmit_head += block;
497 if (dport->xmit_head >= SERIAL_XMIT_SIZE)
498 dport->xmit_head -= SERIAL_XMIT_SIZE;
499 buf += block;
500 }
501 count = dport->xmit_cnt;
502 /* xmit buffer no longer empty? */
503 if (count)
504 reinit_completion(&dport->xmit_empty);
505 mutex_unlock(&dport->xmit_lock);
506
507 if (total) {
508 /*
509 * If the buffer is full, wake up the kthread, otherwise allow
510 * some more time for the buffer to fill up a bit before waking
511 * it.
512 */
513 if (count == SERIAL_XMIT_SIZE) {
514 del_timer(&put_timer);
515 wake_up_interruptible(&dashtty_waitqueue);
516 } else {
517 mod_timer(&put_timer, jiffies + DA_TTY_PUT_DELAY);
518 }
519 }
520 return total;
521}
522
523static int dashtty_write_room(struct tty_struct *tty)
524{
525 struct dashtty_port *dport;
526 int channel;
527 int room;
528
529 channel = tty->index;
530 dport = &dashtty_ports[channel];
531
532 /* report the space in the xmit buffer */
533 mutex_lock(&dport->xmit_lock);
534 room = SERIAL_XMIT_SIZE - dport->xmit_cnt;
535 mutex_unlock(&dport->xmit_lock);
536
537 return room;
538}
539
540static int dashtty_chars_in_buffer(struct tty_struct *tty)
541{
542 struct dashtty_port *dport;
543 int channel;
544 int chars;
545
546 channel = tty->index;
547 dport = &dashtty_ports[channel];
548
549 /* report the number of bytes in the xmit buffer */
550 mutex_lock(&dport->xmit_lock);
551 chars = dport->xmit_cnt;
552 mutex_unlock(&dport->xmit_lock);
553
554 return chars;
555}
556
557static const struct tty_operations dashtty_ops = {
558 .install = dashtty_install,
559 .open = dashtty_open,
560 .close = dashtty_close,
561 .hangup = dashtty_hangup,
562 .write = dashtty_write,
563 .write_room = dashtty_write_room,
564 .chars_in_buffer = dashtty_chars_in_buffer,
565};
566
567static int __init dashtty_init(void)
568{
569 int ret;
570 int nport;
571 struct dashtty_port *dport;
572
573 if (!metag_da_enabled())
574 return -ENODEV;
575
576 channel_driver = tty_alloc_driver(NUM_TTY_CHANNELS,
577 TTY_DRIVER_REAL_RAW);
578 if (IS_ERR(channel_driver))
579 return PTR_ERR(channel_driver);
580
581 channel_driver->driver_name = "metag_da";
582 channel_driver->name = "ttyDA";
583 channel_driver->major = DA_TTY_MAJOR;
584 channel_driver->minor_start = 0;
585 channel_driver->type = TTY_DRIVER_TYPE_SERIAL;
586 channel_driver->subtype = SERIAL_TYPE_NORMAL;
587 channel_driver->init_termios = tty_std_termios;
588 channel_driver->init_termios.c_cflag |= CLOCAL;
589
590 tty_set_operations(channel_driver, &dashtty_ops);
591 for (nport = 0; nport < NUM_TTY_CHANNELS; nport++) {
592 dport = &dashtty_ports[nport];
593 tty_port_init(&dport->port);
594 dport->port.ops = &dashtty_port_ops;
595 spin_lock_init(&dport->rx_lock);
596 mutex_init(&dport->xmit_lock);
597 /* the xmit buffer starts empty, i.e. completely written */
598 init_completion(&dport->xmit_empty);
599 complete(&dport->xmit_empty);
600 }
601
602 timer_setup(&put_timer, dashtty_put_timer, 0);
603
604 init_waitqueue_head(&dashtty_waitqueue);
605 dashtty_thread = kthread_create(put_data, NULL, "ttyDA");
606 if (IS_ERR(dashtty_thread)) {
607 pr_err("Couldn't create dashtty thread\n");
608 ret = PTR_ERR(dashtty_thread);
609 goto err_destroy_ports;
610 }
611 /*
612 * Bind the writer thread to the boot CPU so it can't migrate.
613 * DA channels are per-CPU and we want all channel I/O to be on a single
614 * predictable CPU.
615 */
616 kthread_bind(dashtty_thread, 0);
617 wake_up_process(dashtty_thread);
618
619 ret = tty_register_driver(channel_driver);
620
621 if (ret < 0) {
622 pr_err("Couldn't install dashtty driver: err %d\n",
623 ret);
624 goto err_stop_kthread;
625 }
626
627 return 0;
628
629err_stop_kthread:
630 kthread_stop(dashtty_thread);
631err_destroy_ports:
632 for (nport = 0; nport < NUM_TTY_CHANNELS; nport++) {
633 dport = &dashtty_ports[nport];
634 tty_port_destroy(&dport->port);
635 }
636 put_tty_driver(channel_driver);
637 return ret;
638}
639device_initcall(dashtty_init);
640
641#ifdef CONFIG_DA_CONSOLE
642
643static void dash_console_write(struct console *co, const char *s,
644 unsigned int count)
645{
646 int actually_written;
647
648 chancall(WRBUF, CONSOLE_CHANNEL, count, (void *)s, &actually_written);
649}
650
651static struct tty_driver *dash_console_device(struct console *c, int *index)
652{
653 *index = c->index;
654 return channel_driver;
655}
656
657struct console dash_console = {
658 .name = "ttyDA",
659 .write = dash_console_write,
660 .device = dash_console_device,
661 .flags = CON_PRINTBUFFER,
662 .index = 1,
663};
664
665#endif
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 37460cd6cabb..0e19679348d1 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -1605,7 +1605,7 @@ config BCM7038_WDT
1605config IMGPDC_WDT 1605config IMGPDC_WDT
1606 tristate "Imagination Technologies PDC Watchdog Timer" 1606 tristate "Imagination Technologies PDC Watchdog Timer"
1607 depends on HAS_IOMEM 1607 depends on HAS_IOMEM
1608 depends on METAG || MIPS || COMPILE_TEST 1608 depends on MIPS || COMPILE_TEST
1609 select WATCHDOG_CORE 1609 select WATCHDOG_CORE
1610 help 1610 help
1611 Driver for Imagination Technologies PowerDown Controller 1611 Driver for Imagination Technologies PowerDown Controller
diff --git a/include/clocksource/metag_generic.h b/include/clocksource/metag_generic.h
deleted file mode 100644
index ac17e7d06cfb..000000000000
--- a/include/clocksource/metag_generic.h
+++ /dev/null
@@ -1,21 +0,0 @@
1/*
2 * Copyright (C) 2013 Imaginaton Technologies Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16#ifndef __CLKSOURCE_METAG_GENERIC_H
17#define __CLKSOURCE_METAG_GENERIC_H
18
19extern int metag_generic_timer_init(void);
20
21#endif /* __CLKSOURCE_METAG_GENERIC_H */
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index 5172ad0daa7c..5b211fe295f0 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -108,7 +108,6 @@ enum cpuhp_state {
108 CPUHP_AP_PERF_X86_CQM_STARTING, 108 CPUHP_AP_PERF_X86_CQM_STARTING,
109 CPUHP_AP_PERF_X86_CSTATE_STARTING, 109 CPUHP_AP_PERF_X86_CSTATE_STARTING,
110 CPUHP_AP_PERF_XTENSA_STARTING, 110 CPUHP_AP_PERF_XTENSA_STARTING,
111 CPUHP_AP_PERF_METAG_STARTING,
112 CPUHP_AP_MIPS_OP_LOONGSON3_STARTING, 111 CPUHP_AP_MIPS_OP_LOONGSON3_STARTING,
113 CPUHP_AP_ARM_SDEI_STARTING, 112 CPUHP_AP_ARM_SDEI_STARTING,
114 CPUHP_AP_ARM_VFP_STARTING, 113 CPUHP_AP_ARM_VFP_STARTING,
@@ -122,7 +121,6 @@ enum cpuhp_state {
122 CPUHP_AP_JCORE_TIMER_STARTING, 121 CPUHP_AP_JCORE_TIMER_STARTING,
123 CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING, 122 CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING,
124 CPUHP_AP_ARM_TWD_STARTING, 123 CPUHP_AP_ARM_TWD_STARTING,
125 CPUHP_AP_METAG_TIMER_STARTING,
126 CPUHP_AP_QCOM_TIMER_STARTING, 124 CPUHP_AP_QCOM_TIMER_STARTING,
127 CPUHP_AP_ARMADA_TIMER_STARTING, 125 CPUHP_AP_ARMADA_TIMER_STARTING,
128 CPUHP_AP_MARCO_TIMER_STARTING, 126 CPUHP_AP_MARCO_TIMER_STARTING,
diff --git a/include/linux/irqchip/metag-ext.h b/include/linux/irqchip/metag-ext.h
deleted file mode 100644
index d120496370b9..000000000000
--- a/include/linux/irqchip/metag-ext.h
+++ /dev/null
@@ -1,34 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (C) 2012 Imagination Technologies
4 */
5
6#ifndef _LINUX_IRQCHIP_METAG_EXT_H_
7#define _LINUX_IRQCHIP_METAG_EXT_H_
8
9struct irq_data;
10struct platform_device;
11
12/* called from core irq code at init */
13int init_external_IRQ(void);
14
15/*
16 * called from SoC init_irq() callback to dynamically indicate the lack of
17 * HWMASKEXT registers.
18 */
19void meta_intc_no_mask(void);
20
21/*
22 * These allow SoCs to specialise the interrupt controller from their init_irq
23 * callbacks.
24 */
25
26extern struct irq_chip meta_intc_edge_chip;
27extern struct irq_chip meta_intc_level_chip;
28
29/* this should be called in the mask callback */
30void meta_intc_mask_irq_simple(struct irq_data *data);
31/* this should be called in the unmask callback */
32void meta_intc_unmask_irq_simple(struct irq_data *data);
33
34#endif /* _LINUX_IRQCHIP_METAG_EXT_H_ */
diff --git a/include/linux/irqchip/metag.h b/include/linux/irqchip/metag.h
deleted file mode 100644
index 0adcf449e4e4..000000000000
--- a/include/linux/irqchip/metag.h
+++ /dev/null
@@ -1,25 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (C) 2011 Imagination Technologies
4 */
5
6#ifndef _LINUX_IRQCHIP_METAG_H_
7#define _LINUX_IRQCHIP_METAG_H_
8
9#include <linux/errno.h>
10
11#ifdef CONFIG_METAG_PERFCOUNTER_IRQS
12extern int init_internal_IRQ(void);
13extern int internal_irq_map(unsigned int hw);
14#else
15static inline int init_internal_IRQ(void)
16{
17 return 0;
18}
19static inline int internal_irq_map(unsigned int hw)
20{
21 return -EINVAL;
22}
23#endif
24
25#endif /* _LINUX_IRQCHIP_METAG_H_ */
diff --git a/include/linux/mm.h b/include/linux/mm.h
index ad06d42adb1a..ccac10682ce5 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -241,8 +241,6 @@ extern unsigned int kobjsize(const void *objp);
241# define VM_SAO VM_ARCH_1 /* Strong Access Ordering (powerpc) */ 241# define VM_SAO VM_ARCH_1 /* Strong Access Ordering (powerpc) */
242#elif defined(CONFIG_PARISC) 242#elif defined(CONFIG_PARISC)
243# define VM_GROWSUP VM_ARCH_1 243# define VM_GROWSUP VM_ARCH_1
244#elif defined(CONFIG_METAG)
245# define VM_GROWSUP VM_ARCH_1
246#elif defined(CONFIG_IA64) 244#elif defined(CONFIG_IA64)
247# define VM_GROWSUP VM_ARCH_1 245# define VM_GROWSUP VM_ARCH_1
248#elif !defined(CONFIG_MMU) 246#elif !defined(CONFIG_MMU)
diff --git a/include/trace/events/mmflags.h b/include/trace/events/mmflags.h
index dbe1bb058c09..a81cffb76d89 100644
--- a/include/trace/events/mmflags.h
+++ b/include/trace/events/mmflags.h
@@ -115,7 +115,7 @@ IF_HAVE_PG_IDLE(PG_idle, "idle" )
115#define __VM_ARCH_SPECIFIC_1 {VM_PAT, "pat" } 115#define __VM_ARCH_SPECIFIC_1 {VM_PAT, "pat" }
116#elif defined(CONFIG_PPC) 116#elif defined(CONFIG_PPC)
117#define __VM_ARCH_SPECIFIC_1 {VM_SAO, "sao" } 117#define __VM_ARCH_SPECIFIC_1 {VM_SAO, "sao" }
118#elif defined(CONFIG_PARISC) || defined(CONFIG_METAG) || defined(CONFIG_IA64) 118#elif defined(CONFIG_PARISC) || defined(CONFIG_IA64)
119#define __VM_ARCH_SPECIFIC_1 {VM_GROWSUP, "growsup" } 119#define __VM_ARCH_SPECIFIC_1 {VM_GROWSUP, "growsup" }
120#elif !defined(CONFIG_MMU) 120#elif !defined(CONFIG_MMU)
121#define __VM_ARCH_SPECIFIC_1 {VM_MAPPED_COPY,"mappedcopy" } 121#define __VM_ARCH_SPECIFIC_1 {VM_MAPPED_COPY,"mappedcopy" }
diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h
index 3bf73fb58045..e2535d6dcec7 100644
--- a/include/uapi/linux/elf.h
+++ b/include/uapi/linux/elf.h
@@ -420,9 +420,6 @@ typedef struct elf64_shdr {
420#define NT_ARM_HW_WATCH 0x403 /* ARM hardware watchpoint registers */ 420#define NT_ARM_HW_WATCH 0x403 /* ARM hardware watchpoint registers */
421#define NT_ARM_SYSTEM_CALL 0x404 /* ARM system call number */ 421#define NT_ARM_SYSTEM_CALL 0x404 /* ARM system call number */
422#define NT_ARM_SVE 0x405 /* ARM Scalable Vector Extension registers */ 422#define NT_ARM_SVE 0x405 /* ARM Scalable Vector Extension registers */
423#define NT_METAG_CBUF 0x500 /* Metag catch buffer registers */
424#define NT_METAG_RPIPE 0x501 /* Metag read pipeline state */
425#define NT_METAG_TLS 0x502 /* Metag TLS pointer */
426#define NT_ARC_V2 0x600 /* ARCv2 accumulator/extra registers */ 423#define NT_ARC_V2 0x600 /* ARCv2 accumulator/extra registers */
427 424
428/* Note header in a PT_NOTE section */ 425/* Note header in a PT_NOTE section */
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 64155e310a9f..d5964b051017 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -356,7 +356,7 @@ config FRAME_POINTER
356 bool "Compile the kernel with frame pointers" 356 bool "Compile the kernel with frame pointers"
357 depends on DEBUG_KERNEL && \ 357 depends on DEBUG_KERNEL && \
358 (CRIS || M68K || FRV || UML || \ 358 (CRIS || M68K || FRV || UML || \
359 SUPERH || BLACKFIN || MN10300 || METAG) || \ 359 SUPERH || BLACKFIN || MN10300) || \
360 ARCH_WANT_FRAME_POINTERS 360 ARCH_WANT_FRAME_POINTERS
361 default y if (DEBUG_INFO && UML) || ARCH_WANT_FRAME_POINTERS 361 default y if (DEBUG_INFO && UML) || ARCH_WANT_FRAME_POINTERS
362 help 362 help
diff --git a/mm/Kconfig b/mm/Kconfig
index c782e8fb7235..abefa573bcd8 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -627,15 +627,14 @@ config GENERIC_EARLY_IOREMAP
627config MAX_STACK_SIZE_MB 627config MAX_STACK_SIZE_MB
628 int "Maximum user stack size for 32-bit processes (MB)" 628 int "Maximum user stack size for 32-bit processes (MB)"
629 default 80 629 default 80
630 range 8 256 if METAG
631 range 8 2048 630 range 8 2048
632 depends on STACK_GROWSUP && (!64BIT || COMPAT) 631 depends on STACK_GROWSUP && (!64BIT || COMPAT)
633 help 632 help
634 This is the maximum stack size in Megabytes in the VM layout of 32-bit 633 This is the maximum stack size in Megabytes in the VM layout of 32-bit
635 user processes when the stack grows upwards (currently only on parisc 634 user processes when the stack grows upwards (currently only on parisc
636 and metag arch). The stack will be located at the highest memory 635 arch). The stack will be located at the highest memory address minus
637 address minus the given value, unless the RLIMIT_STACK hard limit is 636 the given value, unless the RLIMIT_STACK hard limit is changed to a
638 changed to a smaller value in which case that is used. 637 smaller value in which case that is used.
639 638
640 A sane initial value is 80 MB. 639 A sane initial value is 80 MB.
641 640
diff --git a/scripts/checkstack.pl b/scripts/checkstack.pl
index cb993801e4b2..eeb9ac8dbcfb 100755
--- a/scripts/checkstack.pl
+++ b/scripts/checkstack.pl
@@ -64,10 +64,6 @@ my (@stack, $re, $dre, $x, $xs, $funcre);
64 # 2b6c: 4e56 fb70 linkw %fp,#-1168 64 # 2b6c: 4e56 fb70 linkw %fp,#-1168
65 # 1df770: defc ffe4 addaw #-28,%sp 65 # 1df770: defc ffe4 addaw #-28,%sp
66 $re = qr/.*(?:linkw %fp,|addaw )#-([0-9]{1,4})(?:,%sp)?$/o; 66 $re = qr/.*(?:linkw %fp,|addaw )#-([0-9]{1,4})(?:,%sp)?$/o;
67 } elsif ($arch eq 'metag') {
68 #400026fc: 40 00 00 82 ADD A0StP,A0StP,#0x8
69 $re = qr/.*ADD.*A0StP,A0StP,\#(0x$x{1,8})/o;
70 $funcre = qr/^$x* <[^\$](.*)>:$/;
71 } elsif ($arch eq 'mips64') { 67 } elsif ($arch eq 'mips64') {
72 #8800402c: 67bdfff0 daddiu sp,sp,-16 68 #8800402c: 67bdfff0 daddiu sp,sp,-16
73 $re = qr/.*daddiu.*sp,sp,-(([0-9]{2}|[3-9])[0-9]{2})/o; 69 $re = qr/.*daddiu.*sp,sp,-(([0-9]{2}|[3-9])[0-9]{2})/o;
diff --git a/scripts/recordmcount.c b/scripts/recordmcount.c
index 16e086dcc567..8c9691c3329e 100644
--- a/scripts/recordmcount.c
+++ b/scripts/recordmcount.c
@@ -33,20 +33,6 @@
33#include <string.h> 33#include <string.h>
34#include <unistd.h> 34#include <unistd.h>
35 35
36/*
37 * glibc synced up and added the metag number but didn't add the relocations.
38 * Work around this in a crude manner for now.
39 */
40#ifndef EM_METAG
41#define EM_METAG 174
42#endif
43#ifndef R_METAG_ADDR32
44#define R_METAG_ADDR32 2
45#endif
46#ifndef R_METAG_NONE
47#define R_METAG_NONE 3
48#endif
49
50#ifndef EM_AARCH64 36#ifndef EM_AARCH64
51#define EM_AARCH64 183 37#define EM_AARCH64 183
52#define R_AARCH64_NONE 0 38#define R_AARCH64_NONE 0
@@ -538,12 +524,6 @@ do_file(char const *const fname)
538 gpfx = '_'; 524 gpfx = '_';
539 break; 525 break;
540 case EM_IA_64: reltype = R_IA64_IMM64; gpfx = '_'; break; 526 case EM_IA_64: reltype = R_IA64_IMM64; gpfx = '_'; break;
541 case EM_METAG: reltype = R_METAG_ADDR32;
542 altmcount = "_mcount_wrapper";
543 rel_type_nop = R_METAG_NONE;
544 /* We happen to have the same requirement as MIPS */
545 is_fake_mcount32 = MIPS32_is_fake_mcount;
546 break;
547 case EM_MIPS: /* reltype: e_class */ gpfx = '_'; break; 527 case EM_MIPS: /* reltype: e_class */ gpfx = '_'; break;
548 case EM_PPC: reltype = R_PPC_ADDR32; gpfx = '_'; break; 528 case EM_PPC: reltype = R_PPC_ADDR32; gpfx = '_'; break;
549 case EM_PPC64: reltype = R_PPC64_ADDR64; gpfx = '_'; break; 529 case EM_PPC64: reltype = R_PPC64_ADDR64; gpfx = '_'; break;
diff --git a/tools/perf/perf-sys.h b/tools/perf/perf-sys.h
index 36673f98d66b..3eb7a39169f6 100644
--- a/tools/perf/perf-sys.h
+++ b/tools/perf/perf-sys.h
@@ -46,10 +46,6 @@
46#define CPUINFO_PROC {"Processor"} 46#define CPUINFO_PROC {"Processor"}
47#endif 47#endif
48 48
49#ifdef __metag__
50#define CPUINFO_PROC {"CPU"}
51#endif
52
53#ifdef __xtensa__ 49#ifdef __xtensa__
54#define CPUINFO_PROC {"core ID"} 50#define CPUINFO_PROC {"core ID"}
55#endif 51#endif