aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/arc/interrupts.txt24
-rw-r--r--MAINTAINERS6
-rw-r--r--arch/arc/Kbuild2
-rw-r--r--arch/arc/Kconfig453
-rw-r--r--arch/arc/Kconfig.debug34
-rw-r--r--arch/arc/Makefile126
-rw-r--r--arch/arc/boot/Makefile26
-rw-r--r--arch/arc/boot/dts/Makefile13
-rw-r--r--arch/arc/boot/dts/angel4.dts55
-rw-r--r--arch/arc/boot/dts/skeleton.dts10
-rw-r--r--arch/arc/boot/dts/skeleton.dtsi37
-rw-r--r--arch/arc/configs/fpga_defconfig61
-rw-r--r--arch/arc/include/asm/Kbuild49
-rw-r--r--arch/arc/include/asm/arcregs.h433
-rw-r--r--arch/arc/include/asm/asm-offsets.h9
-rw-r--r--arch/arc/include/asm/atomic.h232
-rw-r--r--arch/arc/include/asm/barrier.h42
-rw-r--r--arch/arc/include/asm/bitops.h516
-rw-r--r--arch/arc/include/asm/bug.h37
-rw-r--r--arch/arc/include/asm/cache.h75
-rw-r--r--arch/arc/include/asm/cacheflush.h67
-rw-r--r--arch/arc/include/asm/checksum.h101
-rw-r--r--arch/arc/include/asm/clk.h22
-rw-r--r--arch/arc/include/asm/cmpxchg.h143
-rw-r--r--arch/arc/include/asm/current.h32
-rw-r--r--arch/arc/include/asm/defines.h56
-rw-r--r--arch/arc/include/asm/delay.h68
-rw-r--r--arch/arc/include/asm/disasm.h116
-rw-r--r--arch/arc/include/asm/dma-mapping.h221
-rw-r--r--arch/arc/include/asm/dma.h14
-rw-r--r--arch/arc/include/asm/elf.h78
-rw-r--r--arch/arc/include/asm/entry.h724
-rw-r--r--arch/arc/include/asm/exec.h15
-rw-r--r--arch/arc/include/asm/futex.h151
-rw-r--r--arch/arc/include/asm/io.h105
-rw-r--r--arch/arc/include/asm/irq.h25
-rw-r--r--arch/arc/include/asm/irqflags.h153
-rw-r--r--arch/arc/include/asm/kdebug.h19
-rw-r--r--arch/arc/include/asm/kgdb.h61
-rw-r--r--arch/arc/include/asm/kprobes.h62
-rw-r--r--arch/arc/include/asm/linkage.h63
-rw-r--r--arch/arc/include/asm/mach_desc.h87
-rw-r--r--arch/arc/include/asm/mmu.h23
-rw-r--r--arch/arc/include/asm/mmu_context.h213
-rw-r--r--arch/arc/include/asm/module.h28
-rw-r--r--arch/arc/include/asm/mutex.h18
-rw-r--r--arch/arc/include/asm/page.h109
-rw-r--r--arch/arc/include/asm/perf_event.h13
-rw-r--r--arch/arc/include/asm/pgalloc.h134
-rw-r--r--arch/arc/include/asm/pgtable.h405
-rw-r--r--arch/arc/include/asm/processor.h151
-rw-r--r--arch/arc/include/asm/prom.h14
-rw-r--r--arch/arc/include/asm/ptrace.h130
-rw-r--r--arch/arc/include/asm/sections.h18
-rw-r--r--arch/arc/include/asm/segment.h24
-rw-r--r--arch/arc/include/asm/serial.h25
-rw-r--r--arch/arc/include/asm/setup.h37
-rw-r--r--arch/arc/include/asm/smp.h130
-rw-r--r--arch/arc/include/asm/spinlock.h144
-rw-r--r--arch/arc/include/asm/spinlock_types.h35
-rw-r--r--arch/arc/include/asm/string.h40
-rw-r--r--arch/arc/include/asm/switch_to.h41
-rw-r--r--arch/arc/include/asm/syscall.h72
-rw-r--r--arch/arc/include/asm/syscalls.h29
-rw-r--r--arch/arc/include/asm/thread_info.h121
-rw-r--r--arch/arc/include/asm/timex.h18
-rw-r--r--arch/arc/include/asm/tlb-mmu1.h104
-rw-r--r--arch/arc/include/asm/tlb.h58
-rw-r--r--arch/arc/include/asm/tlbflush.h28
-rw-r--r--arch/arc/include/asm/uaccess.h751
-rw-r--r--arch/arc/include/asm/unaligned.h29
-rw-r--r--arch/arc/include/asm/unwind.h163
-rw-r--r--arch/arc/include/uapi/asm/Kbuild12
-rw-r--r--arch/arc/include/uapi/asm/byteorder.h18
-rw-r--r--arch/arc/include/uapi/asm/cachectl.h28
-rw-r--r--arch/arc/include/uapi/asm/elf.h26
-rw-r--r--arch/arc/include/uapi/asm/page.h39
-rw-r--r--arch/arc/include/uapi/asm/ptrace.h48
-rw-r--r--arch/arc/include/uapi/asm/setup.h6
-rw-r--r--arch/arc/include/uapi/asm/sigcontext.h22
-rw-r--r--arch/arc/include/uapi/asm/signal.h27
-rw-r--r--arch/arc/include/uapi/asm/swab.h98
-rw-r--r--arch/arc/include/uapi/asm/unistd.h34
-rw-r--r--arch/arc/kernel/Makefile33
-rw-r--r--arch/arc/kernel/arc_hostlink.c58
-rw-r--r--arch/arc/kernel/arcksyms.c56
-rw-r--r--arch/arc/kernel/asm-offsets.c64
-rw-r--r--arch/arc/kernel/clk.c21
-rw-r--r--arch/arc/kernel/ctx_sw.c109
-rw-r--r--arch/arc/kernel/ctx_sw_asm.S58
-rw-r--r--arch/arc/kernel/devtree.c123
-rw-r--r--arch/arc/kernel/disasm.c538
-rw-r--r--arch/arc/kernel/entry.S839
-rw-r--r--arch/arc/kernel/fpu.c55
-rw-r--r--arch/arc/kernel/head.S111
-rw-r--r--arch/arc/kernel/irq.c273
-rw-r--r--arch/arc/kernel/kgdb.c205
-rw-r--r--arch/arc/kernel/kprobes.c525
-rw-r--r--arch/arc/kernel/module.c145
-rw-r--r--arch/arc/kernel/process.c235
-rw-r--r--arch/arc/kernel/ptrace.c158
-rw-r--r--arch/arc/kernel/reset.c33
-rw-r--r--arch/arc/kernel/setup.c473
-rw-r--r--arch/arc/kernel/signal.c360
-rw-r--r--arch/arc/kernel/smp.c332
-rw-r--r--arch/arc/kernel/stacktrace.c254
-rw-r--r--arch/arc/kernel/sys.c18
-rw-r--r--arch/arc/kernel/time.c265
-rw-r--r--arch/arc/kernel/traps.c170
-rw-r--r--arch/arc/kernel/troubleshoot.c322
-rw-r--r--arch/arc/kernel/unaligned.c245
-rw-r--r--arch/arc/kernel/unwind.c1329
-rw-r--r--arch/arc/kernel/vmlinux.lds.S163
-rw-r--r--arch/arc/lib/Makefile9
-rw-r--r--arch/arc/lib/memcmp.S124
-rw-r--r--arch/arc/lib/memcpy-700.S66
-rw-r--r--arch/arc/lib/memset.S59
-rw-r--r--arch/arc/lib/strchr-700.S123
-rw-r--r--arch/arc/lib/strcmp.S96
-rw-r--r--arch/arc/lib/strcpy-700.S70
-rw-r--r--arch/arc/lib/strlen.S83
-rw-r--r--arch/arc/mm/Makefile10
-rw-r--r--arch/arc/mm/cache_arc700.c768
-rw-r--r--arch/arc/mm/dma.c94
-rw-r--r--arch/arc/mm/extable.c63
-rw-r--r--arch/arc/mm/fault.c228
-rw-r--r--arch/arc/mm/init.c187
-rw-r--r--arch/arc/mm/ioremap.c91
-rw-r--r--arch/arc/mm/tlb.c645
-rw-r--r--arch/arc/mm/tlbex.S408
-rw-r--r--arch/arc/oprofile/Makefile9
-rw-r--r--arch/arc/oprofile/common.c26
-rw-r--r--arch/arc/plat-arcfpga/Kconfig84
-rw-r--r--arch/arc/plat-arcfpga/Makefile12
-rw-r--r--arch/arc/plat-arcfpga/include/plat/irq.h31
-rw-r--r--arch/arc/plat-arcfpga/include/plat/memmap.h31
-rw-r--r--arch/arc/plat-arcfpga/include/plat/smp.h118
-rw-r--r--arch/arc/plat-arcfpga/irq.c25
-rw-r--r--arch/arc/plat-arcfpga/platform.c226
-rw-r--r--arch/arc/plat-arcfpga/smp.c171
-rw-r--r--arch/parisc/Kconfig1
-rw-r--r--include/asm-generic/checksum.h4
-rw-r--r--include/asm-generic/uaccess.h14
-rw-r--r--init/Kconfig8
-rw-r--r--kernel/sysctl.c5
-rw-r--r--lib/checksum.c2
-rw-r--r--tools/perf/perf.h6
147 files changed, 19552 insertions, 1 deletions
diff --git a/Documentation/devicetree/bindings/arc/interrupts.txt b/Documentation/devicetree/bindings/arc/interrupts.txt
new file mode 100644
index 000000000000..9a5d562435ea
--- /dev/null
+++ b/Documentation/devicetree/bindings/arc/interrupts.txt
@@ -0,0 +1,24 @@
1* ARC700 incore Interrupt Controller
2
3 The core interrupt controller provides 32 prioritised interrupts (2 levels)
4 to ARC700 core.
5
6Properties:
7
8- compatible: "snps,arc700-intc"
9- interrupt-controller: This is an interrupt controller.
10- #interrupt-cells: Must be <1>.
11
12 Single Cell "interrupts" property of a device specifies the IRQ number
13 between 0 to 31
14
15 intc accessed via the special ARC AUX register interface, hence "reg" property
16 is not specified.
17
18Example:
19
20 intc: interrupt-controller {
21 compatible = "snps,arc700-intc";
22 interrupt-controller;
23 #interrupt-cells = <1>;
24 };
diff --git a/MAINTAINERS b/MAINTAINERS
index 6db1c6bdf015..aea0adf414dc 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -7682,6 +7682,12 @@ F: lib/swiotlb.c
7682F: arch/*/kernel/pci-swiotlb.c 7682F: arch/*/kernel/pci-swiotlb.c
7683F: include/linux/swiotlb.h 7683F: include/linux/swiotlb.h
7684 7684
7685SYNOPSYS ARC ARCHITECTURE
7686M: Vineet Gupta <vgupta@synopsys.com>
7687L: linux-snps-arc@vger.kernel.org
7688S: Supported
7689F: arch/arc/
7690
7685SYSV FILESYSTEM 7691SYSV FILESYSTEM
7686M: Christoph Hellwig <hch@infradead.org> 7692M: Christoph Hellwig <hch@infradead.org>
7687S: Maintained 7693S: Maintained
diff --git a/arch/arc/Kbuild b/arch/arc/Kbuild
new file mode 100644
index 000000000000..082d329d3245
--- /dev/null
+++ b/arch/arc/Kbuild
@@ -0,0 +1,2 @@
1obj-y += kernel/
2obj-y += mm/
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
new file mode 100644
index 000000000000..e6f4eca09ee3
--- /dev/null
+++ b/arch/arc/Kconfig
@@ -0,0 +1,453 @@
1#
2# Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3#
4# This program is free software; you can redistribute it and/or modify
5# it under the terms of the GNU General Public License version 2 as
6# published by the Free Software Foundation.
7#
8
9config ARC
10 def_bool y
11 select CLONE_BACKWARDS
12 # ARC Busybox based initramfs absolutely relies on DEVTMPFS for /dev
13 select DEVTMPFS if !INITRAMFS_SOURCE=""
14 select GENERIC_ATOMIC64
15 select GENERIC_CLOCKEVENTS
16 select GENERIC_FIND_FIRST_BIT
17 # for now, we don't need GENERIC_IRQ_PROBE, CONFIG_GENERIC_IRQ_CHIP
18 select GENERIC_IRQ_SHOW
19 select GENERIC_KERNEL_EXECVE
20 select GENERIC_KERNEL_THREAD
21 select GENERIC_PENDING_IRQ if SMP
22 select GENERIC_SMP_IDLE_THREAD
23 select HAVE_ARCH_KGDB
24 select HAVE_ARCH_TRACEHOOK
25 select HAVE_GENERIC_HARDIRQS
26 select HAVE_IOREMAP_PROT
27 select HAVE_KPROBES
28 select HAVE_KRETPROBES
29 select HAVE_MEMBLOCK
30 select HAVE_MOD_ARCH_SPECIFIC if ARC_DW2_UNWIND
31 select HAVE_OPROFILE
32 select HAVE_PERF_EVENTS
33 select IRQ_DOMAIN
34 select MODULES_USE_ELF_RELA
35 select NO_BOOTMEM
36 select OF
37 select OF_EARLY_FLATTREE
38 select PERF_USE_VMALLOC
39
40config SCHED_OMIT_FRAME_POINTER
41 def_bool y
42
43config GENERIC_CSUM
44 def_bool y
45
46config RWSEM_GENERIC_SPINLOCK
47 def_bool y
48
49config ARCH_FLATMEM_ENABLE
50 def_bool y
51
52config MMU
53 def_bool y
54
55config NO_IOPORT
56 def_bool y
57
58config GENERIC_CALIBRATE_DELAY
59 def_bool y
60
61config GENERIC_HWEIGHT
62 def_bool y
63
64config BINFMT_ELF
65 def_bool y
66
67config STACKTRACE_SUPPORT
68 def_bool y
69 select STACKTRACE
70
71config HAVE_LATENCYTOP_SUPPORT
72 def_bool y
73
74config NO_DMA
75 def_bool n
76
77source "init/Kconfig"
78source "kernel/Kconfig.freezer"
79
80menu "ARC Architecture Configuration"
81
82menu "ARC Platform/SoC/Board"
83
84source "arch/arc/plat-arcfpga/Kconfig"
85#New platform adds here
86
87endmenu
88
89menu "ARC CPU Configuration"
90
91choice
92 prompt "ARC Core"
93 default ARC_CPU_770
94
95config ARC_CPU_750D
96 bool "ARC750D"
97 help
98 Support for ARC750 core
99
100config ARC_CPU_770
101 bool "ARC770"
102 select ARC_CPU_REL_4_10
103 help
104 Support for ARC770 core introduced with Rel 4.10 (Summer 2011)
105 This core has a bunch of cool new features:
106 -MMU-v3: Variable Page Sz (4k, 8k, 16k), bigger J-TLB (128x4)
107 Shared Address Spaces (for sharing TLB entires in MMU)
108 -Caches: New Prog Model, Region Flush
109 -Insns: endian swap, load-locked/store-conditional, time-stamp-ctr
110
111endchoice
112
113config CPU_BIG_ENDIAN
114 bool "Enable Big Endian Mode"
115 default n
116 help
117 Build kernel for Big Endian Mode of ARC CPU
118
119# If a platform can't work with 0x8000_0000 based dma_addr_t
120config ARC_PLAT_NEEDS_CPU_TO_DMA
121 bool
122
123config SMP
124 bool "Symmetric Multi-Processing (Incomplete)"
125 default n
126 select USE_GENERIC_SMP_HELPERS
127 help
128 This enables support for systems with more than one CPU. If you have
129 a system with only one CPU, like most personal computers, say N. If
130 you have a system with more than one CPU, say Y.
131
132if SMP
133
134config ARC_HAS_COH_CACHES
135 def_bool n
136
137config ARC_HAS_COH_LLSC
138 def_bool n
139
140config ARC_HAS_COH_RTSC
141 def_bool n
142
143config ARC_HAS_REENTRANT_IRQ_LV2
144 def_bool n
145
146endif
147
148config NR_CPUS
149 int "Maximum number of CPUs (2-32)"
150 range 2 32
151 depends on SMP
152 default "2"
153
154menuconfig ARC_CACHE
155 bool "Enable Cache Support"
156 default y
157 # if SMP, cache enabled ONLY if ARC implementation has cache coherency
158 depends on !SMP || ARC_HAS_COH_CACHES
159
160if ARC_CACHE
161
162config ARC_CACHE_LINE_SHIFT
163 int "Cache Line Length (as power of 2)"
164 range 5 7
165 default "6"
166 help
167 Starting with ARC700 4.9, Cache line length is configurable,
168 This option specifies "N", with Line-len = 2 power N
169 So line lengths of 32, 64, 128 are specified by 5,6,7, respectively
170 Linux only supports same line lengths for I and D caches.
171
172config ARC_HAS_ICACHE
173 bool "Use Instruction Cache"
174 default y
175
176config ARC_HAS_DCACHE
177 bool "Use Data Cache"
178 default y
179
180config ARC_CACHE_PAGES
181 bool "Per Page Cache Control"
182 default y
183 depends on ARC_HAS_ICACHE || ARC_HAS_DCACHE
184 help
185 This can be used to over-ride the global I/D Cache Enable on a
186 per-page basis (but only for pages accessed via MMU such as
187 Kernel Virtual address or User Virtual Address)
188 TLB entries have a per-page Cache Enable Bit.
189 Note that Global I/D ENABLE + Per Page DISABLE works but corollary
190 Global DISABLE + Per Page ENABLE won't work
191
192endif #ARC_CACHE
193
194config ARC_HAS_ICCM
195 bool "Use ICCM"
196 help
197 Single Cycle RAMS to store Fast Path Code
198 default n
199
200config ARC_ICCM_SZ
201 int "ICCM Size in KB"
202 default "64"
203 depends on ARC_HAS_ICCM
204
205config ARC_HAS_DCCM
206 bool "Use DCCM"
207 help
208 Single Cycle RAMS to store Fast Path Data
209 default n
210
211config ARC_DCCM_SZ
212 int "DCCM Size in KB"
213 default "64"
214 depends on ARC_HAS_DCCM
215
216config ARC_DCCM_BASE
217 hex "DCCM map address"
218 default "0xA0000000"
219 depends on ARC_HAS_DCCM
220
221config ARC_HAS_HW_MPY
222 bool "Use Hardware Multiplier (Normal or Faster XMAC)"
223 default y
224 help
225 Influences how gcc generates code for MPY operations.
226 If enabled, MPYxx insns are generated, provided by Standard/XMAC
227 Multipler. Otherwise software multipy lib is used
228
229choice
230 prompt "ARC700 MMU Version"
231 default ARC_MMU_V3 if ARC_CPU_770
232 default ARC_MMU_V2 if ARC_CPU_750D
233
234config ARC_MMU_V1
235 bool "MMU v1"
236 help
237 Orig ARC700 MMU
238
239config ARC_MMU_V2
240 bool "MMU v2"
241 help
242 Fixed the deficiency of v1 - possible thrashing in memcpy sceanrio
243 when 2 D-TLB and 1 I-TLB entries index into same 2way set.
244
245config ARC_MMU_V3
246 bool "MMU v3"
247 depends on ARC_CPU_770
248 help
249 Introduced with ARC700 4.10: New Features
250 Variable Page size (1k-16k), var JTLB size 128 x (2 or 4)
251 Shared Address Spaces (SASID)
252
253endchoice
254
255
256choice
257 prompt "MMU Page Size"
258 default ARC_PAGE_SIZE_8K
259
260config ARC_PAGE_SIZE_8K
261 bool "8KB"
262 help
263 Choose between 8k vs 16k
264
265config ARC_PAGE_SIZE_16K
266 bool "16KB"
267 depends on ARC_MMU_V3
268
269config ARC_PAGE_SIZE_4K
270 bool "4KB"
271 depends on ARC_MMU_V3
272
273endchoice
274
275config ARC_COMPACT_IRQ_LEVELS
276 bool "ARCompact IRQ Priorities: High(2)/Low(1)"
277 default n
278 # Timer HAS to be high priority, for any other high priority config
279 select ARC_IRQ3_LV2
280 # if SMP, LV2 enabled ONLY if ARC implementation has LV2 re-entrancy
281 depends on !SMP || ARC_HAS_REENTRANT_IRQ_LV2
282
283if ARC_COMPACT_IRQ_LEVELS
284
285config ARC_IRQ3_LV2
286 bool
287
288config ARC_IRQ5_LV2
289 bool
290
291config ARC_IRQ6_LV2
292 bool
293
294endif
295
296config ARC_FPU_SAVE_RESTORE
297 bool "Enable FPU state persistence across context switch"
298 default n
299 help
300 Double Precision Floating Point unit had dedictaed regs which
301 need to be saved/restored across context-switch.
302 Note that ARC FPU is overly simplistic, unlike say x86, which has
303 hardware pieces to allow software to conditionally save/restore,
304 based on actual usage of FPU by a task. Thus our implemn does
305 this for all tasks in system.
306
307menuconfig ARC_CPU_REL_4_10
308 bool "Enable support for Rel 4.10 features"
309 default n
310 help
311 -ARC770 (and dependent features) enabled
312 -ARC750 also shares some of the new features with 770
313
314config ARC_HAS_LLSC
315 bool "Insn: LLOCK/SCOND (efficient atomic ops)"
316 default y
317 depends on ARC_CPU_770
318 # if SMP, enable LLSC ONLY if ARC implementation has coherent atomics
319 depends on !SMP || ARC_HAS_COH_LLSC
320
321config ARC_HAS_SWAPE
322 bool "Insn: SWAPE (endian-swap)"
323 default y
324 depends on ARC_CPU_REL_4_10
325
326config ARC_HAS_RTSC
327 bool "Insn: RTSC (64-bit r/o cycle counter)"
328 default y
329 depends on ARC_CPU_REL_4_10
330 # if SMP, enable RTSC only if counter is coherent across cores
331 depends on !SMP || ARC_HAS_COH_RTSC
332
333endmenu # "ARC CPU Configuration"
334
335config LINUX_LINK_BASE
336 hex "Linux Link Address"
337 default "0x80000000"
338 help
339 ARC700 divides the 32 bit phy address space into two equal halves
340 -Lower 2G (0 - 0x7FFF_FFFF ) is user virtual, translated by MMU
341 -Upper 2G (0x8000_0000 onwards) is untranslated, for kernel
342 Typically Linux kernel is linked at the start of untransalted addr,
343 hence the default value of 0x8zs.
344 However some customers have peripherals mapped at this addr, so
345 Linux needs to be scooted a bit.
346 If you don't know what the above means, leave this setting alone.
347
348config ARC_CURR_IN_REG
349 bool "Dedicate Register r25 for current_task pointer"
350 default y
351 help
352 This reserved Register R25 to point to Current Task in
353 kernel mode. This saves memory access for each such access
354
355
356config ARC_MISALIGN_ACCESS
357 bool "Emulate unaligned memory access (userspace only)"
358 default N
359 select SYSCTL_ARCH_UNALIGN_NO_WARN
360 select SYSCTL_ARCH_UNALIGN_ALLOW
361 help
362 This enables misaligned 16 & 32 bit memory access from user space.
363 Use ONLY-IF-ABS-NECESSARY as it will be very slow and also can hide
364 potential bugs in code
365
366config ARC_STACK_NONEXEC
367 bool "Make stack non-executable"
368 default n
369 help
370 To disable the execute permissions of stack/heap of processes
371 which are enabled by default.
372
373config HZ
374 int "Timer Frequency"
375 default 100
376
377config ARC_METAWARE_HLINK
378 bool "Support for Metaware debugger assisted Host access"
379 default n
380 help
381 This options allows a Linux userland apps to directly access
382 host file system (open/creat/read/write etc) with help from
383 Metaware Debugger. This can come in handy for Linux-host communication
384 when there is no real usable peripheral such as EMAC.
385
386menuconfig ARC_DBG
387 bool "ARC debugging"
388 default y
389
390config ARC_DW2_UNWIND
391 bool "Enable DWARF specific kernel stack unwind"
392 depends on ARC_DBG
393 default y
394 select KALLSYMS
395 help
396 Compiles the kernel with DWARF unwind information and can be used
397 to get stack backtraces.
398
399 If you say Y here the resulting kernel image will be slightly larger
400 but not slower, and it will give very useful debugging information.
401 If you don't debug the kernel, you can say N, but we may not be able
402 to solve problems without frame unwind information
403
404config ARC_DBG_TLB_PARANOIA
405 bool "Paranoia Checks in Low Level TLB Handlers"
406 depends on ARC_DBG
407 default n
408
409config ARC_DBG_TLB_MISS_COUNT
410 bool "Profile TLB Misses"
411 default n
412 select DEBUG_FS
413 depends on ARC_DBG
414 help
415 Counts number of I and D TLB Misses and exports them via Debugfs
416 The counters can be cleared via Debugfs as well
417
418config CMDLINE
419 string "Kernel command line to built-in"
420 default "print-fatal-signals=1"
421 help
422 The default command line which will be appended to the optional
423 u-boot provided command line (see below)
424
425config CMDLINE_UBOOT
426 bool "Support U-boot kernel command line passing"
427 default n
428 help
429 If you are using U-boot (www.denx.de) and wish to pass the kernel
430 command line from the U-boot environment to the Linux kernel then
431 switch this option on.
432 ARC U-boot will setup the cmdline in RAM/flash and set r2 to point
433 to it. kernel startup code will copy the string into cmdline buffer
434 and also append CONFIG_CMDLINE.
435
436config ARC_BUILTIN_DTB_NAME
437 string "Built in DTB"
438 help
439 Set the name of the DTB to embed in the vmlinux binary
440 Leaving it blank selects the minimal "skeleton" dtb
441
442source "kernel/Kconfig.preempt"
443
444endmenu # "ARC Architecture Configuration"
445
446source "mm/Kconfig"
447source "net/Kconfig"
448source "drivers/Kconfig"
449source "fs/Kconfig"
450source "arch/arc/Kconfig.debug"
451source "security/Kconfig"
452source "crypto/Kconfig"
453source "lib/Kconfig"
diff --git a/arch/arc/Kconfig.debug b/arch/arc/Kconfig.debug
new file mode 100644
index 000000000000..962c6099659e
--- /dev/null
+++ b/arch/arc/Kconfig.debug
@@ -0,0 +1,34 @@
1menu "Kernel hacking"
2
3source "lib/Kconfig.debug"
4
5config EARLY_PRINTK
6 bool "Early printk" if EMBEDDED
7 default y
8 help
9 Write kernel log output directly into the VGA buffer or to a serial
10 port.
11
12 This is useful for kernel debugging when your machine crashes very
13 early before the console code is initialized. For normal operation
14 it is not recommended because it looks ugly and doesn't cooperate
15 with klogd/syslogd or the X server. You should normally N here,
16 unless you want to debug such a crash.
17
18config DEBUG_STACKOVERFLOW
19 bool "Check for stack overflows"
20 depends on DEBUG_KERNEL
21 help
22 This option will cause messages to be printed if free stack space
23 drops below a certain limit.
24
25config 16KSTACKS
26 bool "Use 16Kb for kernel stacks instead of 8Kb"
27 help
28 If you say Y here the kernel will use a 16Kb stacksize for the
29 kernel stack attached to each process/thread. The default is 8K.
30 This increases the resident kernel footprint and will cause less
31 threads to run on the system and also increase the pressure
32 on the VM subsystem for higher order allocations.
33
34endmenu
diff --git a/arch/arc/Makefile b/arch/arc/Makefile
new file mode 100644
index 000000000000..92379c7cbc1a
--- /dev/null
+++ b/arch/arc/Makefile
@@ -0,0 +1,126 @@
1#
2# Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3#
4# This program is free software; you can redistribute it and/or modify
5# it under the terms of the GNU General Public License version 2 as
6# published by the Free Software Foundation.
7#
8
9UTS_MACHINE := arc
10
11KBUILD_DEFCONFIG := fpga_defconfig
12
13cflags-y += -mA7 -fno-common -pipe -fno-builtin -D__linux__
14
15LINUXINCLUDE += -include ${src}/arch/arc/include/asm/defines.h
16
17ifdef CONFIG_ARC_CURR_IN_REG
18# For a global register defintion, make sure it gets passed to every file
19# We had a customer reported bug where some code built in kernel was NOT using
20# any kernel headers, and missing the r25 global register
21# Can't do unconditionally (like above) because of recursive include issues
22# due to <linux/thread_info.h>
23LINUXINCLUDE += -include ${src}/arch/arc/include/asm/current.h
24endif
25
26atleast_gcc44 := $(call cc-ifversion, -gt, 0402, y)
27cflags-$(atleast_gcc44) += -fsection-anchors
28
29cflags-$(CONFIG_ARC_HAS_LLSC) += -mlock
30cflags-$(CONFIG_ARC_HAS_SWAPE) += -mswape
31cflags-$(CONFIG_ARC_HAS_RTSC) += -mrtsc
32cflags-$(CONFIG_ARC_DW2_UNWIND) += -fasynchronous-unwind-tables
33
34ifndef CONFIG_CC_OPTIMIZE_FOR_SIZE
35# Generic build system uses -O2, we want -O3
36cflags-y += -O3
37endif
38
39# small data is default for elf32 tool-chain. If not usable, disable it
40# This also allows repurposing GP as scratch reg to gcc reg allocator
41disable_small_data := y
42cflags-$(disable_small_data) += -mno-sdata -fcall-used-gp
43
44cflags-$(CONFIG_CPU_BIG_ENDIAN) += -mbig-endian
45ldflags-$(CONFIG_CPU_BIG_ENDIAN) += -EB
46
47# STAR 9000518362:
48# arc-linux-uclibc-ld (buildroot) or arceb-elf32-ld (EZChip) don't accept
49# --build-id w/o "-marclinux".
50# Default arc-elf32-ld is OK
51ldflags-y += -marclinux
52
53ARC_LIBGCC := -mA7
54cflags-$(CONFIG_ARC_HAS_HW_MPY) += -multcost=16
55
56ifndef CONFIG_ARC_HAS_HW_MPY
57 cflags-y += -mno-mpy
58
59# newlib for ARC700 assumes MPY to be always present, which is generally true
60# However, if someone really doesn't want MPY, we need to use the 600 ver
61# which coupled with -mno-mpy will use mpy emulation
62# With gcc 4.4.7, -mno-mpy is enough to make any other related adjustments,
63# e.g. increased cost of MPY. With gcc 4.2.1 this had to be explicitly hinted
64
65 ARC_LIBGCC := -marc600
66 ifneq ($(atleast_gcc44),y)
67 cflags-y += -multcost=30
68 endif
69endif
70
71LIBGCC := $(shell $(CC) $(ARC_LIBGCC) $(cflags-y) --print-libgcc-file-name)
72
73# Modules with short calls might break for calls into builtin-kernel
74KBUILD_CFLAGS_MODULE += -mlong-calls
75
76# Finally dump eveything into kernel build system
77KBUILD_CFLAGS += $(cflags-y)
78KBUILD_AFLAGS += $(KBUILD_CFLAGS)
79LDFLAGS += $(ldflags-y)
80
81head-y := arch/arc/kernel/head.o
82
83# See arch/arc/Kbuild for content of core part of the kernel
84core-y += arch/arc/
85
86# w/o this dtb won't embed into kernel binary
87core-y += arch/arc/boot/dts/
88
89core-$(CONFIG_ARC_PLAT_FPGA_LEGACY) += arch/arc/plat-arcfpga/
90
91drivers-$(CONFIG_OPROFILE) += arch/arc/oprofile/
92
93libs-y += arch/arc/lib/ $(LIBGCC)
94
95#default target for make without any arguements.
96KBUILD_IMAGE := bootpImage
97
98all: $(KBUILD_IMAGE)
99boot := arch/arc/boot
100
101bootpImage: vmlinux
102
103uImage: vmlinux
104 $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
105
106%.dtb %.dtb.S %.dtb.o: scripts
107 $(Q)$(MAKE) $(build)=$(boot)/dts $(boot)/dts/$@
108
109dtbs: scripts
110 $(Q)$(MAKE) $(build)=$(boot)/dts dtbs
111
112archclean:
113 $(Q)$(MAKE) $(clean)=$(boot)
114
115# Hacks to enable final link due to absence of link-time branch relexation
116# and gcc choosing optimal(shorter) branches at -O3
117#
118# vineetg Feb 2010: -mlong-calls switched off for overall kernel build
119# However lib/decompress_inflate.o (.init.text) calls
120# zlib_inflate_workspacesize (.text) causing relocation errors.
121# Thus forcing all exten calls in this file to be long calls
122export CFLAGS_decompress_inflate.o = -mmedium-calls
123export CFLAGS_initramfs.o = -mmedium-calls
124ifdef CONFIG_SMP
125export CFLAGS_core.o = -mmedium-calls
126endif
diff --git a/arch/arc/boot/Makefile b/arch/arc/boot/Makefile
new file mode 100644
index 000000000000..7d514c24e095
--- /dev/null
+++ b/arch/arc/boot/Makefile
@@ -0,0 +1,26 @@
1targets := vmlinux.bin vmlinux.bin.gz uImage
2
3# uImage build relies on mkimage being availble on your host for ARC target
4# You will need to build u-boot for ARC, rename mkimage to arc-elf32-mkimage
5# and make sure it's reacable from your PATH
6MKIMAGE := $(srctree)/scripts/mkuboot.sh
7
8OBJCOPYFLAGS= -O binary -R .note -R .note.gnu.build-id -R .comment -S
9
10LINUX_START_TEXT = $$(readelf -h vmlinux | \
11 grep "Entry point address" | grep -o 0x.*)
12
13UIMAGE_LOADADDR = $(CONFIG_LINUX_LINK_BASE)
14UIMAGE_ENTRYADDR = $(LINUX_START_TEXT)
15UIMAGE_COMPRESSION = gzip
16
17$(obj)/vmlinux.bin: vmlinux FORCE
18 $(call if_changed,objcopy)
19
20$(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE
21 $(call if_changed,gzip)
22
23$(obj)/uImage: $(obj)/vmlinux.bin.gz FORCE
24 $(call if_changed,uimage)
25
26PHONY += FORCE
diff --git a/arch/arc/boot/dts/Makefile b/arch/arc/boot/dts/Makefile
new file mode 100644
index 000000000000..5776835d583f
--- /dev/null
+++ b/arch/arc/boot/dts/Makefile
@@ -0,0 +1,13 @@
1# Built-in dtb
2builtindtb-y := angel4
3
4ifneq ($(CONFIG_ARC_BUILTIN_DTB_NAME),"")
5 builtindtb-y := $(patsubst "%",%,$(CONFIG_ARC_BUILTIN_DTB_NAME))
6endif
7
8obj-y += $(builtindtb-y).dtb.o
9targets += $(builtindtb-y).dtb
10
11dtbs: $(addprefix $(obj)/, $(builtindtb-y).dtb)
12
13clean-files := *.dtb
diff --git a/arch/arc/boot/dts/angel4.dts b/arch/arc/boot/dts/angel4.dts
new file mode 100644
index 000000000000..bae4f936cb03
--- /dev/null
+++ b/arch/arc/boot/dts/angel4.dts
@@ -0,0 +1,55 @@
1/*
2 * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8/dts-v1/;
9
10/include/ "skeleton.dtsi"
11
12/ {
13 compatible = "snps,arc-angel4";
14 clock-frequency = <80000000>; /* 80 MHZ */
15 #address-cells = <1>;
16 #size-cells = <1>;
17 interrupt-parent = <&intc>;
18
19 chosen {
20 bootargs = "console=ttyARC0,115200n8";
21 };
22
23 aliases {
24 serial0 = &arcuart0;
25 };
26
27 memory {
28 device_type = "memory";
29 reg = <0x00000000 0x10000000>; /* 256M */
30 };
31
32 fpga {
33 compatible = "simple-bus";
34 #address-cells = <1>;
35 #size-cells = <1>;
36
37 /* child and parent address space 1:1 mapped */
38 ranges;
39
40 intc: interrupt-controller {
41 compatible = "snps,arc700-intc";
42 interrupt-controller;
43 #interrupt-cells = <1>;
44 };
45
46 arcuart0: serial@c0fc1000 {
47 compatible = "snps,arc-uart";
48 reg = <0xc0fc1000 0x100>;
49 interrupts = <5>;
50 clock-frequency = <80000000>;
51 current-speed = <115200>;
52 status = "okay";
53 };
54 };
55};
diff --git a/arch/arc/boot/dts/skeleton.dts b/arch/arc/boot/dts/skeleton.dts
new file mode 100644
index 000000000000..25a84fb5b3dc
--- /dev/null
+++ b/arch/arc/boot/dts/skeleton.dts
@@ -0,0 +1,10 @@
1/*
2 * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8/dts-v1/;
9
10/include/ "skeleton.dtsi"
diff --git a/arch/arc/boot/dts/skeleton.dtsi b/arch/arc/boot/dts/skeleton.dtsi
new file mode 100644
index 000000000000..a870bdd5e404
--- /dev/null
+++ b/arch/arc/boot/dts/skeleton.dtsi
@@ -0,0 +1,37 @@
1/*
2 * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9/*
10 * Skeleton device tree; the bare minimum needed to boot; just include and
11 * add a compatible value.
12 */
13
14/ {
15 compatible = "snps,arc";
16 clock-frequency = <80000000>; /* 80 MHZ */
17 #address-cells = <1>;
18 #size-cells = <1>;
19 chosen { };
20 aliases { };
21
22 cpus {
23 #address-cells = <1>;
24 #size-cells = <0>;
25
26 cpu@0 {
27 device_type = "cpu";
28 compatible = "snps,arc770d";
29 reg = <0>;
30 };
31 };
32
33 memory {
34 device_type = "memory";
35 reg = <0x00000000 0x10000000>; /* 256M */
36 };
37};
diff --git a/arch/arc/configs/fpga_defconfig b/arch/arc/configs/fpga_defconfig
new file mode 100644
index 000000000000..b8698067ebbe
--- /dev/null
+++ b/arch/arc/configs/fpga_defconfig
@@ -0,0 +1,61 @@
1CONFIG_CROSS_COMPILE="arc-elf32-"
2# CONFIG_LOCALVERSION_AUTO is not set
3CONFIG_DEFAULT_HOSTNAME="ARCLinux"
4# CONFIG_SWAP is not set
5CONFIG_HIGH_RES_TIMERS=y
6CONFIG_IKCONFIG=y
7CONFIG_IKCONFIG_PROC=y
8CONFIG_NAMESPACES=y
9# CONFIG_UTS_NS is not set
10# CONFIG_PID_NS is not set
11CONFIG_BLK_DEV_INITRD=y
12CONFIG_INITRAMFS_SOURCE="../arc_initramfs"
13CONFIG_KALLSYMS_ALL=y
14CONFIG_EMBEDDED=y
15# CONFIG_SLUB_DEBUG is not set
16# CONFIG_COMPAT_BRK is not set
17CONFIG_KPROBES=y
18CONFIG_MODULES=y
19# CONFIG_LBDAF is not set
20# CONFIG_BLK_DEV_BSG is not set
21# CONFIG_IOSCHED_DEADLINE is not set
22# CONFIG_IOSCHED_CFQ is not set
23CONFIG_ARC_PLAT_FPGA_LEGACY=y
24CONFIG_ARC_BOARD_ML509=y
25# CONFIG_ARC_HAS_RTSC is not set
26CONFIG_ARC_BUILTIN_DTB_NAME="angel4"
27# CONFIG_COMPACTION is not set
28# CONFIG_CROSS_MEMORY_ATTACH is not set
29CONFIG_NET=y
30CONFIG_PACKET=y
31CONFIG_UNIX=y
32CONFIG_UNIX_DIAG=y
33CONFIG_NET_KEY=y
34CONFIG_INET=y
35# CONFIG_IPV6 is not set
36# CONFIG_STANDALONE is not set
37# CONFIG_PREVENT_FIRMWARE_BUILD is not set
38# CONFIG_FIRMWARE_IN_KERNEL is not set
39# CONFIG_BLK_DEV is not set
40# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
41# CONFIG_INPUT_KEYBOARD is not set
42# CONFIG_INPUT_MOUSE is not set
43# CONFIG_SERIO is not set
44# CONFIG_LEGACY_PTYS is not set
45# CONFIG_DEVKMEM is not set
46CONFIG_SERIAL_ARC=y
47CONFIG_SERIAL_ARC_CONSOLE=y
48# CONFIG_HW_RANDOM is not set
49# CONFIG_HWMON is not set
50# CONFIG_VGA_CONSOLE is not set
51# CONFIG_HID is not set
52# CONFIG_USB_SUPPORT is not set
53# CONFIG_IOMMU_SUPPORT is not set
54CONFIG_EXT2_FS=y
55CONFIG_EXT2_FS_XATTR=y
56CONFIG_TMPFS=y
57# CONFIG_MISC_FILESYSTEMS is not set
58CONFIG_NFS_FS=y
59# CONFIG_ENABLE_WARN_DEPRECATED is not set
60# CONFIG_ENABLE_MUST_CHECK is not set
61CONFIG_XZ_DEC=y
diff --git a/arch/arc/include/asm/Kbuild b/arch/arc/include/asm/Kbuild
new file mode 100644
index 000000000000..48af742f8b5a
--- /dev/null
+++ b/arch/arc/include/asm/Kbuild
@@ -0,0 +1,49 @@
1generic-y += auxvec.h
2generic-y += bugs.h
3generic-y += bitsperlong.h
4generic-y += clkdev.h
5generic-y += cputime.h
6generic-y += device.h
7generic-y += div64.h
8generic-y += emergency-restart.h
9generic-y += errno.h
10generic-y += fcntl.h
11generic-y += fb.h
12generic-y += ftrace.h
13generic-y += hardirq.h
14generic-y += hw_irq.h
15generic-y += ioctl.h
16generic-y += ioctls.h
17generic-y += ipcbuf.h
18generic-y += irq_regs.h
19generic-y += kmap_types.h
20generic-y += kvm_para.h
21generic-y += local.h
22generic-y += local64.h
23generic-y += mman.h
24generic-y += msgbuf.h
25generic-y += param.h
26generic-y += parport.h
27generic-y += pci.h
28generic-y += percpu.h
29generic-y += poll.h
30generic-y += posix_types.h
31generic-y += resource.h
32generic-y += scatterlist.h
33generic-y += sembuf.h
34generic-y += shmbuf.h
35generic-y += shmparam.h
36generic-y += siginfo.h
37generic-y += socket.h
38generic-y += sockios.h
39generic-y += stat.h
40generic-y += statfs.h
41generic-y += termbits.h
42generic-y += termios.h
43generic-y += topology.h
44generic-y += trace_clock.h
45generic-y += types.h
46generic-y += ucontext.h
47generic-y += user.h
48generic-y += vga.h
49generic-y += xor.h
diff --git a/arch/arc/include/asm/arcregs.h b/arch/arc/include/asm/arcregs.h
new file mode 100644
index 000000000000..1b907c465666
--- /dev/null
+++ b/arch/arc/include/asm/arcregs.h
@@ -0,0 +1,433 @@
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#ifndef _ASM_ARC_ARCREGS_H
10#define _ASM_ARC_ARCREGS_H
11
12#ifdef __KERNEL__
13
14/* Build Configuration Registers */
15#define ARC_REG_DCCMBASE_BCR 0x61 /* DCCM Base Addr */
16#define ARC_REG_CRC_BCR 0x62
17#define ARC_REG_DVFB_BCR 0x64
18#define ARC_REG_EXTARITH_BCR 0x65
19#define ARC_REG_VECBASE_BCR 0x68
20#define ARC_REG_PERIBASE_BCR 0x69
21#define ARC_REG_FP_BCR 0x6B /* Single-Precision FPU */
22#define ARC_REG_DPFP_BCR 0x6C /* Dbl Precision FPU */
23#define ARC_REG_MMU_BCR 0x6f
24#define ARC_REG_DCCM_BCR 0x74 /* DCCM Present + SZ */
25#define ARC_REG_TIMERS_BCR 0x75
26#define ARC_REG_ICCM_BCR 0x78
27#define ARC_REG_XY_MEM_BCR 0x79
28#define ARC_REG_MAC_BCR 0x7a
29#define ARC_REG_MUL_BCR 0x7b
30#define ARC_REG_SWAP_BCR 0x7c
31#define ARC_REG_NORM_BCR 0x7d
32#define ARC_REG_MIXMAX_BCR 0x7e
33#define ARC_REG_BARREL_BCR 0x7f
34#define ARC_REG_D_UNCACH_BCR 0x6A
35
36/* status32 Bits Positions */
37#define STATUS_H_BIT 0 /* CPU Halted */
38#define STATUS_E1_BIT 1 /* Int 1 enable */
39#define STATUS_E2_BIT 2 /* Int 2 enable */
40#define STATUS_A1_BIT 3 /* Int 1 active */
41#define STATUS_A2_BIT 4 /* Int 2 active */
42#define STATUS_AE_BIT 5 /* Exception active */
43#define STATUS_DE_BIT 6 /* PC is in delay slot */
44#define STATUS_U_BIT 7 /* User/Kernel mode */
45#define STATUS_L_BIT 12 /* Loop inhibit */
46
47/* These masks correspond to the status word(STATUS_32) bits */
48#define STATUS_H_MASK (1<<STATUS_H_BIT)
49#define STATUS_E1_MASK (1<<STATUS_E1_BIT)
50#define STATUS_E2_MASK (1<<STATUS_E2_BIT)
51#define STATUS_A1_MASK (1<<STATUS_A1_BIT)
52#define STATUS_A2_MASK (1<<STATUS_A2_BIT)
53#define STATUS_AE_MASK (1<<STATUS_AE_BIT)
54#define STATUS_DE_MASK (1<<STATUS_DE_BIT)
55#define STATUS_U_MASK (1<<STATUS_U_BIT)
56#define STATUS_L_MASK (1<<STATUS_L_BIT)
57
58/*
59 * ECR: Exception Cause Reg bits-n-pieces
60 * [23:16] = Exception Vector
61 * [15: 8] = Exception Cause Code
62 * [ 7: 0] = Exception Parameters (for certain types only)
63 */
64#define ECR_VEC_MASK 0xff0000
65#define ECR_CODE_MASK 0x00ff00
66#define ECR_PARAM_MASK 0x0000ff
67
68/* Exception Cause Vector Values */
69#define ECR_V_INSN_ERR 0x02
70#define ECR_V_MACH_CHK 0x20
71#define ECR_V_ITLB_MISS 0x21
72#define ECR_V_DTLB_MISS 0x22
73#define ECR_V_PROTV 0x23
74
75/* Protection Violation Exception Cause Code Values */
76#define ECR_C_PROTV_INST_FETCH 0x00
77#define ECR_C_PROTV_LOAD 0x01
78#define ECR_C_PROTV_STORE 0x02
79#define ECR_C_PROTV_XCHG 0x03
80#define ECR_C_PROTV_MISALIG_DATA 0x04
81
82/* DTLB Miss Exception Cause Code Values */
83#define ECR_C_BIT_DTLB_LD_MISS 8
84#define ECR_C_BIT_DTLB_ST_MISS 9
85
86
87/* Auxiliary registers */
88#define AUX_IDENTITY 4
89#define AUX_INTR_VEC_BASE 0x25
90#define AUX_IRQ_LEV 0x200 /* IRQ Priority: L1 or L2 */
91#define AUX_IRQ_HINT 0x201 /* For generating Soft Interrupts */
92#define AUX_IRQ_LV12 0x43 /* interrupt level register */
93
94#define AUX_IENABLE 0x40c
95#define AUX_ITRIGGER 0x40d
96#define AUX_IPULSE 0x415
97
98/* Timer related Aux registers */
99#define ARC_REG_TIMER0_LIMIT 0x23 /* timer 0 limit */
100#define ARC_REG_TIMER0_CTRL 0x22 /* timer 0 control */
101#define ARC_REG_TIMER0_CNT 0x21 /* timer 0 count */
102#define ARC_REG_TIMER1_LIMIT 0x102 /* timer 1 limit */
103#define ARC_REG_TIMER1_CTRL 0x101 /* timer 1 control */
104#define ARC_REG_TIMER1_CNT 0x100 /* timer 1 count */
105
106#define TIMER_CTRL_IE (1 << 0) /* Interupt when Count reachs limit */
107#define TIMER_CTRL_NH (1 << 1) /* Count only when CPU NOT halted */
108
109/* MMU Management regs */
110#define ARC_REG_TLBPD0 0x405
111#define ARC_REG_TLBPD1 0x406
112#define ARC_REG_TLBINDEX 0x407
113#define ARC_REG_TLBCOMMAND 0x408
114#define ARC_REG_PID 0x409
115#define ARC_REG_SCRATCH_DATA0 0x418
116
117/* Bits in MMU PID register */
118#define MMU_ENABLE (1 << 31) /* Enable MMU for process */
119
120/* Error code if probe fails */
121#define TLB_LKUP_ERR 0x80000000
122
123/* TLB Commands */
124#define TLBWrite 0x1
125#define TLBRead 0x2
126#define TLBGetIndex 0x3
127#define TLBProbe 0x4
128
129#if (CONFIG_ARC_MMU_VER >= 2)
130#define TLBWriteNI 0x5 /* write JTLB without inv uTLBs */
131#define TLBIVUTLB 0x6 /* explicitly inv uTLBs */
132#else
133#undef TLBWriteNI /* These cmds don't exist on older MMU */
134#undef TLBIVUTLB
135#endif
136
137/* Instruction cache related Auxiliary registers */
138#define ARC_REG_IC_BCR 0x77 /* Build Config reg */
139#define ARC_REG_IC_IVIC 0x10
140#define ARC_REG_IC_CTRL 0x11
141#define ARC_REG_IC_IVIL 0x19
142#if (CONFIG_ARC_MMU_VER > 2)
143#define ARC_REG_IC_PTAG 0x1E
144#endif
145
146/* Bit val in IC_CTRL */
147#define IC_CTRL_CACHE_DISABLE 0x1
148
149/* Data cache related Auxiliary registers */
150#define ARC_REG_DC_BCR 0x72
151#define ARC_REG_DC_IVDC 0x47
152#define ARC_REG_DC_CTRL 0x48
153#define ARC_REG_DC_IVDL 0x4A
154#define ARC_REG_DC_FLSH 0x4B
155#define ARC_REG_DC_FLDL 0x4C
156#if (CONFIG_ARC_MMU_VER > 2)
157#define ARC_REG_DC_PTAG 0x5C
158#endif
159
160/* Bit val in DC_CTRL */
161#define DC_CTRL_INV_MODE_FLUSH 0x40
162#define DC_CTRL_FLUSH_STATUS 0x100
163
164/* MMU Management regs */
165#define ARC_REG_PID 0x409
166#define ARC_REG_SCRATCH_DATA0 0x418
167
168/* Bits in MMU PID register */
169#define MMU_ENABLE (1 << 31) /* Enable MMU for process */
170
171/*
172 * Floating Pt Registers
173 * Status regs are read-only (build-time) so need not be saved/restored
174 */
175#define ARC_AUX_FP_STAT 0x300
176#define ARC_AUX_DPFP_1L 0x301
177#define ARC_AUX_DPFP_1H 0x302
178#define ARC_AUX_DPFP_2L 0x303
179#define ARC_AUX_DPFP_2H 0x304
180#define ARC_AUX_DPFP_STAT 0x305
181
182#ifndef __ASSEMBLY__
183
184/*
185 ******************************************************************
186 * Inline ASM macros to read/write AUX Regs
187 * Essentially invocation of lr/sr insns from "C"
188 */
189
190#if 1
191
192#define read_aux_reg(reg) __builtin_arc_lr(reg)
193
194/* gcc builtin sr needs reg param to be long immediate */
195#define write_aux_reg(reg_immed, val) \
196 __builtin_arc_sr((unsigned int)val, reg_immed)
197
198#else
199
200#define read_aux_reg(reg) \
201({ \
202 unsigned int __ret; \
203 __asm__ __volatile__( \
204 " lr %0, [%1]" \
205 : "=r"(__ret) \
206 : "i"(reg)); \
207 __ret; \
208})
209
210/*
211 * Aux Reg address is specified as long immediate by caller
212 * e.g.
213 * write_aux_reg(0x69, some_val);
214 * This generates tightest code.
215 */
216#define write_aux_reg(reg_imm, val) \
217({ \
218 __asm__ __volatile__( \
219 " sr %0, [%1] \n" \
220 : \
221 : "ir"(val), "i"(reg_imm)); \
222})
223
224/*
225 * Aux Reg address is specified in a variable
226 * * e.g.
227 * reg_num = 0x69
228 * write_aux_reg2(reg_num, some_val);
229 * This has to generate glue code to load the reg num from
230 * memory to a reg hence not recommended.
231 */
232#define write_aux_reg2(reg_in_var, val) \
233({ \
234 unsigned int tmp; \
235 \
236 __asm__ __volatile__( \
237 " ld %0, [%2] \n\t" \
238 " sr %1, [%0] \n\t" \
239 : "=&r"(tmp) \
240 : "r"(val), "memory"(&reg_in_var)); \
241})
242
243#endif
244
245#define READ_BCR(reg, into) \
246{ \
247 unsigned int tmp; \
248 tmp = read_aux_reg(reg); \
249 if (sizeof(tmp) == sizeof(into)) { \
250 into = *((typeof(into) *)&tmp); \
251 } else { \
252 extern void bogus_undefined(void); \
253 bogus_undefined(); \
254 } \
255}
256
257#define WRITE_BCR(reg, into) \
258{ \
259 unsigned int tmp; \
260 if (sizeof(tmp) == sizeof(into)) { \
261 tmp = (*(unsigned int *)(into)); \
262 write_aux_reg(reg, tmp); \
263 } else { \
264 extern void bogus_undefined(void); \
265 bogus_undefined(); \
266 } \
267}
268
269/* Helpers */
270#define TO_KB(bytes) ((bytes) >> 10)
271#define TO_MB(bytes) (TO_KB(bytes) >> 10)
272#define PAGES_TO_KB(n_pages) ((n_pages) << (PAGE_SHIFT - 10))
273#define PAGES_TO_MB(n_pages) (PAGES_TO_KB(n_pages) >> 10)
274
275#ifdef CONFIG_ARC_FPU_SAVE_RESTORE
276/* These DPFP regs need to be saved/restored across ctx-sw */
277struct arc_fpu {
278 struct {
279 unsigned int l, h;
280 } aux_dpfp[2];
281};
282#endif
283
284/*
285 ***************************************************************
286 * Build Configuration Registers, with encoded hardware config
287 */
288struct bcr_identity {
289#ifdef CONFIG_CPU_BIG_ENDIAN
290 unsigned int chip_id:16, cpu_id:8, family:8;
291#else
292 unsigned int family:8, cpu_id:8, chip_id:16;
293#endif
294};
295
296struct bcr_mmu_1_2 {
297#ifdef CONFIG_CPU_BIG_ENDIAN
298 unsigned int ver:8, ways:4, sets:4, u_itlb:8, u_dtlb:8;
299#else
300 unsigned int u_dtlb:8, u_itlb:8, sets:4, ways:4, ver:8;
301#endif
302};
303
304struct bcr_mmu_3 {
305#ifdef CONFIG_CPU_BIG_ENDIAN
306 unsigned int ver:8, ways:4, sets:4, osm:1, reserv:3, pg_sz:4,
307 u_itlb:4, u_dtlb:4;
308#else
309 unsigned int u_dtlb:4, u_itlb:4, pg_sz:4, reserv:3, osm:1, sets:4,
310 ways:4, ver:8;
311#endif
312};
313
314#define EXTN_SWAP_VALID 0x1
315#define EXTN_NORM_VALID 0x2
316#define EXTN_MINMAX_VALID 0x2
317#define EXTN_BARREL_VALID 0x2
318
319struct bcr_extn {
320#ifdef CONFIG_CPU_BIG_ENDIAN
321 unsigned int pad:20, crc:1, ext_arith:2, mul:2, barrel:2, minmax:2,
322 norm:2, swap:1;
323#else
324 unsigned int swap:1, norm:2, minmax:2, barrel:2, mul:2, ext_arith:2,
325 crc:1, pad:20;
326#endif
327};
328
329/* DSP Options Ref Manual */
330struct bcr_extn_mac_mul {
331#ifdef CONFIG_CPU_BIG_ENDIAN
332 unsigned int pad:16, type:8, ver:8;
333#else
334 unsigned int ver:8, type:8, pad:16;
335#endif
336};
337
338struct bcr_extn_xymem {
339#ifdef CONFIG_CPU_BIG_ENDIAN
340 unsigned int ram_org:2, num_banks:4, bank_sz:4, ver:8;
341#else
342 unsigned int ver:8, bank_sz:4, num_banks:4, ram_org:2;
343#endif
344};
345
346struct bcr_cache {
347#ifdef CONFIG_CPU_BIG_ENDIAN
348 unsigned int pad:12, line_len:4, sz:4, config:4, ver:8;
349#else
350 unsigned int ver:8, config:4, sz:4, line_len:4, pad:12;
351#endif
352};
353
354struct bcr_perip {
355#ifdef CONFIG_CPU_BIG_ENDIAN
356 unsigned int start:8, pad2:8, sz:8, pad:8;
357#else
358 unsigned int pad:8, sz:8, pad2:8, start:8;
359#endif
360};
361struct bcr_iccm {
362#ifdef CONFIG_CPU_BIG_ENDIAN
363 unsigned int base:16, pad:5, sz:3, ver:8;
364#else
365 unsigned int ver:8, sz:3, pad:5, base:16;
366#endif
367};
368
369/* DCCM Base Address Register: ARC_REG_DCCMBASE_BCR */
370struct bcr_dccm_base {
371#ifdef CONFIG_CPU_BIG_ENDIAN
372 unsigned int addr:24, ver:8;
373#else
374 unsigned int ver:8, addr:24;
375#endif
376};
377
378/* DCCM RAM Configuration Register: ARC_REG_DCCM_BCR */
379struct bcr_dccm {
380#ifdef CONFIG_CPU_BIG_ENDIAN
381 unsigned int res:21, sz:3, ver:8;
382#else
383 unsigned int ver:8, sz:3, res:21;
384#endif
385};
386
387/* Both SP and DP FPU BCRs have same format */
388struct bcr_fp {
389#ifdef CONFIG_CPU_BIG_ENDIAN
390 unsigned int fast:1, ver:8;
391#else
392 unsigned int ver:8, fast:1;
393#endif
394};
395
396/*
397 *******************************************************************
398 * Generic structures to hold build configuration used at runtime
399 */
400
401struct cpuinfo_arc_mmu {
402 unsigned int ver, pg_sz, sets, ways, u_dtlb, u_itlb, num_tlb;
403};
404
405struct cpuinfo_arc_cache {
406 unsigned int has_aliasing, sz, line_len, assoc, ver;
407};
408
409struct cpuinfo_arc_ccm {
410 unsigned int base_addr, sz;
411};
412
413struct cpuinfo_arc {
414 struct cpuinfo_arc_cache icache, dcache;
415 struct cpuinfo_arc_mmu mmu;
416 struct bcr_identity core;
417 unsigned int timers;
418 unsigned int vec_base;
419 unsigned int uncached_base;
420 struct cpuinfo_arc_ccm iccm, dccm;
421 struct bcr_extn extn;
422 struct bcr_extn_xymem extn_xymem;
423 struct bcr_extn_mac_mul extn_mac_mul;
424 struct bcr_fp fp, dpfp;
425};
426
427extern struct cpuinfo_arc cpuinfo_arc700[];
428
429#endif /* __ASEMBLY__ */
430
431#endif /* __KERNEL__ */
432
433#endif /* _ASM_ARC_ARCREGS_H */
diff --git a/arch/arc/include/asm/asm-offsets.h b/arch/arc/include/asm/asm-offsets.h
new file mode 100644
index 000000000000..dad18768fe43
--- /dev/null
+++ b/arch/arc/include/asm/asm-offsets.h
@@ -0,0 +1,9 @@
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#include <generated/asm-offsets.h>
diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h
new file mode 100644
index 000000000000..83f03ca6caf6
--- /dev/null
+++ b/arch/arc/include/asm/atomic.h
@@ -0,0 +1,232 @@
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#ifndef _ASM_ARC_ATOMIC_H
10#define _ASM_ARC_ATOMIC_H
11
12#ifdef __KERNEL__
13
14#ifndef __ASSEMBLY__
15
16#include <linux/types.h>
17#include <linux/compiler.h>
18#include <asm/cmpxchg.h>
19#include <asm/barrier.h>
20#include <asm/smp.h>
21
22#define atomic_read(v) ((v)->counter)
23
24#ifdef CONFIG_ARC_HAS_LLSC
25
26#define atomic_set(v, i) (((v)->counter) = (i))
27
28static inline void atomic_add(int i, atomic_t *v)
29{
30 unsigned int temp;
31
32 __asm__ __volatile__(
33 "1: llock %0, [%1] \n"
34 " add %0, %0, %2 \n"
35 " scond %0, [%1] \n"
36 " bnz 1b \n"
37 : "=&r"(temp) /* Early clobber, to prevent reg reuse */
38 : "r"(&v->counter), "ir"(i)
39 : "cc");
40}
41
42static inline void atomic_sub(int i, atomic_t *v)
43{
44 unsigned int temp;
45
46 __asm__ __volatile__(
47 "1: llock %0, [%1] \n"
48 " sub %0, %0, %2 \n"
49 " scond %0, [%1] \n"
50 " bnz 1b \n"
51 : "=&r"(temp)
52 : "r"(&v->counter), "ir"(i)
53 : "cc");
54}
55
56/* add and also return the new value */
57static inline int atomic_add_return(int i, atomic_t *v)
58{
59 unsigned int temp;
60
61 __asm__ __volatile__(
62 "1: llock %0, [%1] \n"
63 " add %0, %0, %2 \n"
64 " scond %0, [%1] \n"
65 " bnz 1b \n"
66 : "=&r"(temp)
67 : "r"(&v->counter), "ir"(i)
68 : "cc");
69
70 return temp;
71}
72
73static inline int atomic_sub_return(int i, atomic_t *v)
74{
75 unsigned int temp;
76
77 __asm__ __volatile__(
78 "1: llock %0, [%1] \n"
79 " sub %0, %0, %2 \n"
80 " scond %0, [%1] \n"
81 " bnz 1b \n"
82 : "=&r"(temp)
83 : "r"(&v->counter), "ir"(i)
84 : "cc");
85
86 return temp;
87}
88
89static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
90{
91 unsigned int temp;
92
93 __asm__ __volatile__(
94 "1: llock %0, [%1] \n"
95 " bic %0, %0, %2 \n"
96 " scond %0, [%1] \n"
97 " bnz 1b \n"
98 : "=&r"(temp)
99 : "r"(addr), "ir"(mask)
100 : "cc");
101}
102
103#else /* !CONFIG_ARC_HAS_LLSC */
104
105#ifndef CONFIG_SMP
106
107 /* violating atomic_xxx API locking protocol in UP for optimization sake */
108#define atomic_set(v, i) (((v)->counter) = (i))
109
110#else
111
112static inline void atomic_set(atomic_t *v, int i)
113{
114 /*
115 * Independent of hardware support, all of the atomic_xxx() APIs need
116 * to follow the same locking rules to make sure that a "hardware"
117 * atomic insn (e.g. LD) doesn't clobber an "emulated" atomic insn
118 * sequence
119 *
120 * Thus atomic_set() despite being 1 insn (and seemingly atomic)
121 * requires the locking.
122 */
123 unsigned long flags;
124
125 atomic_ops_lock(flags);
126 v->counter = i;
127 atomic_ops_unlock(flags);
128}
129#endif
130
131/*
132 * Non hardware assisted Atomic-R-M-W
133 * Locking would change to irq-disabling only (UP) and spinlocks (SMP)
134 */
135
136static inline void atomic_add(int i, atomic_t *v)
137{
138 unsigned long flags;
139
140 atomic_ops_lock(flags);
141 v->counter += i;
142 atomic_ops_unlock(flags);
143}
144
145static inline void atomic_sub(int i, atomic_t *v)
146{
147 unsigned long flags;
148
149 atomic_ops_lock(flags);
150 v->counter -= i;
151 atomic_ops_unlock(flags);
152}
153
154static inline int atomic_add_return(int i, atomic_t *v)
155{
156 unsigned long flags;
157 unsigned long temp;
158
159 atomic_ops_lock(flags);
160 temp = v->counter;
161 temp += i;
162 v->counter = temp;
163 atomic_ops_unlock(flags);
164
165 return temp;
166}
167
168static inline int atomic_sub_return(int i, atomic_t *v)
169{
170 unsigned long flags;
171 unsigned long temp;
172
173 atomic_ops_lock(flags);
174 temp = v->counter;
175 temp -= i;
176 v->counter = temp;
177 atomic_ops_unlock(flags);
178
179 return temp;
180}
181
182static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
183{
184 unsigned long flags;
185
186 atomic_ops_lock(flags);
187 *addr &= ~mask;
188 atomic_ops_unlock(flags);
189}
190
191#endif /* !CONFIG_ARC_HAS_LLSC */
192
193/**
194 * __atomic_add_unless - add unless the number is a given value
195 * @v: pointer of type atomic_t
196 * @a: the amount to add to v...
197 * @u: ...unless v is equal to u.
198 *
199 * Atomically adds @a to @v, so long as it was not @u.
200 * Returns the old value of @v
201 */
202#define __atomic_add_unless(v, a, u) \
203({ \
204 int c, old; \
205 c = atomic_read(v); \
206 while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c)\
207 c = old; \
208 c; \
209})
210
211#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
212
213#define atomic_inc(v) atomic_add(1, v)
214#define atomic_dec(v) atomic_sub(1, v)
215
216#define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
217#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
218#define atomic_inc_return(v) atomic_add_return(1, (v))
219#define atomic_dec_return(v) atomic_sub_return(1, (v))
220#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
221
222#define atomic_add_negative(i, v) (atomic_add_return(i, v) < 0)
223
224#define ATOMIC_INIT(i) { (i) }
225
226#include <asm-generic/atomic64.h>
227
228#endif
229
230#endif
231
232#endif
diff --git a/arch/arc/include/asm/barrier.h b/arch/arc/include/asm/barrier.h
new file mode 100644
index 000000000000..f6cb7c4ffb35
--- /dev/null
+++ b/arch/arc/include/asm/barrier.h
@@ -0,0 +1,42 @@
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#ifndef __ASM_BARRIER_H
10#define __ASM_BARRIER_H
11
12#ifndef __ASSEMBLY__
13
14/* TODO-vineetg: Need to see what this does, don't we need sync anywhere */
15#define mb() __asm__ __volatile__ ("" : : : "memory")
16#define rmb() mb()
17#define wmb() mb()
18#define set_mb(var, value) do { var = value; mb(); } while (0)
19#define set_wmb(var, value) do { var = value; wmb(); } while (0)
20#define read_barrier_depends() mb()
21
22/* TODO-vineetg verify the correctness of macros here */
23#ifdef CONFIG_SMP
24#define smp_mb() mb()
25#define smp_rmb() rmb()
26#define smp_wmb() wmb()
27#else
28#define smp_mb() barrier()
29#define smp_rmb() barrier()
30#define smp_wmb() barrier()
31#endif
32
33#define smp_mb__before_atomic_dec() barrier()
34#define smp_mb__after_atomic_dec() barrier()
35#define smp_mb__before_atomic_inc() barrier()
36#define smp_mb__after_atomic_inc() barrier()
37
38#define smp_read_barrier_depends() do { } while (0)
39
40#endif
41
42#endif
diff --git a/arch/arc/include/asm/bitops.h b/arch/arc/include/asm/bitops.h
new file mode 100644
index 000000000000..647a83a8e756
--- /dev/null
+++ b/arch/arc/include/asm/bitops.h
@@ -0,0 +1,516 @@
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#ifndef _ASM_BITOPS_H
10#define _ASM_BITOPS_H
11
12#ifndef _LINUX_BITOPS_H
13#error only <linux/bitops.h> can be included directly
14#endif
15
16#ifdef __KERNEL__
17
18#ifndef __ASSEMBLY__
19
20#include <linux/types.h>
21#include <linux/compiler.h>
22
23/*
24 * Hardware assisted read-modify-write using ARC700 LLOCK/SCOND insns.
25 * The Kconfig glue ensures that in SMP, this is only set if the container
26 * SoC/platform has cross-core coherent LLOCK/SCOND
27 */
28#if defined(CONFIG_ARC_HAS_LLSC)
29
30static inline void set_bit(unsigned long nr, volatile unsigned long *m)
31{
32 unsigned int temp;
33
34 m += nr >> 5;
35
36 if (__builtin_constant_p(nr))
37 nr &= 0x1f;
38
39 __asm__ __volatile__(
40 "1: llock %0, [%1] \n"
41 " bset %0, %0, %2 \n"
42 " scond %0, [%1] \n"
43 " bnz 1b \n"
44 : "=&r"(temp)
45 : "r"(m), "ir"(nr)
46 : "cc");
47}
48
49static inline void clear_bit(unsigned long nr, volatile unsigned long *m)
50{
51 unsigned int temp;
52
53 m += nr >> 5;
54
55 if (__builtin_constant_p(nr))
56 nr &= 0x1f;
57
58 __asm__ __volatile__(
59 "1: llock %0, [%1] \n"
60 " bclr %0, %0, %2 \n"
61 " scond %0, [%1] \n"
62 " bnz 1b \n"
63 : "=&r"(temp)
64 : "r"(m), "ir"(nr)
65 : "cc");
66}
67
68static inline void change_bit(unsigned long nr, volatile unsigned long *m)
69{
70 unsigned int temp;
71
72 m += nr >> 5;
73
74 if (__builtin_constant_p(nr))
75 nr &= 0x1f;
76
77 __asm__ __volatile__(
78 "1: llock %0, [%1] \n"
79 " bxor %0, %0, %2 \n"
80 " scond %0, [%1] \n"
81 " bnz 1b \n"
82 : "=&r"(temp)
83 : "r"(m), "ir"(nr)
84 : "cc");
85}
86
87/*
88 * Semantically:
89 * Test the bit
90 * if clear
91 * set it and return 0 (old value)
92 * else
93 * return 1 (old value).
94 *
95 * Since ARC lacks a equivalent h/w primitive, the bit is set unconditionally
96 * and the old value of bit is returned
97 */
98static inline int test_and_set_bit(unsigned long nr, volatile unsigned long *m)
99{
100 unsigned long old, temp;
101
102 m += nr >> 5;
103
104 if (__builtin_constant_p(nr))
105 nr &= 0x1f;
106
107 __asm__ __volatile__(
108 "1: llock %0, [%2] \n"
109 " bset %1, %0, %3 \n"
110 " scond %1, [%2] \n"
111 " bnz 1b \n"
112 : "=&r"(old), "=&r"(temp)
113 : "r"(m), "ir"(nr)
114 : "cc");
115
116 return (old & (1 << nr)) != 0;
117}
118
119static inline int
120test_and_clear_bit(unsigned long nr, volatile unsigned long *m)
121{
122 unsigned int old, temp;
123
124 m += nr >> 5;
125
126 if (__builtin_constant_p(nr))
127 nr &= 0x1f;
128
129 __asm__ __volatile__(
130 "1: llock %0, [%2] \n"
131 " bclr %1, %0, %3 \n"
132 " scond %1, [%2] \n"
133 " bnz 1b \n"
134 : "=&r"(old), "=&r"(temp)
135 : "r"(m), "ir"(nr)
136 : "cc");
137
138 return (old & (1 << nr)) != 0;
139}
140
141static inline int
142test_and_change_bit(unsigned long nr, volatile unsigned long *m)
143{
144 unsigned int old, temp;
145
146 m += nr >> 5;
147
148 if (__builtin_constant_p(nr))
149 nr &= 0x1f;
150
151 __asm__ __volatile__(
152 "1: llock %0, [%2] \n"
153 " bxor %1, %0, %3 \n"
154 " scond %1, [%2] \n"
155 " bnz 1b \n"
156 : "=&r"(old), "=&r"(temp)
157 : "r"(m), "ir"(nr)
158 : "cc");
159
160 return (old & (1 << nr)) != 0;
161}
162
163#else /* !CONFIG_ARC_HAS_LLSC */
164
165#include <asm/smp.h>
166
167/*
168 * Non hardware assisted Atomic-R-M-W
169 * Locking would change to irq-disabling only (UP) and spinlocks (SMP)
170 *
171 * There's "significant" micro-optimization in writing our own variants of
172 * bitops (over generic variants)
173 *
174 * (1) The generic APIs have "signed" @nr while we have it "unsigned"
175 * This avoids extra code to be generated for pointer arithmatic, since
176 * is "not sure" that index is NOT -ve
177 * (2) Utilize the fact that ARCompact bit fidding insn (BSET/BCLR/ASL) etc
178 * only consider bottom 5 bits of @nr, so NO need to mask them off.
179 * (GCC Quirk: however for constant @nr we still need to do the masking
180 * at compile time)
181 */
182
183static inline void set_bit(unsigned long nr, volatile unsigned long *m)
184{
185 unsigned long temp, flags;
186 m += nr >> 5;
187
188 if (__builtin_constant_p(nr))
189 nr &= 0x1f;
190
191 bitops_lock(flags);
192
193 temp = *m;
194 *m = temp | (1UL << nr);
195
196 bitops_unlock(flags);
197}
198
199static inline void clear_bit(unsigned long nr, volatile unsigned long *m)
200{
201 unsigned long temp, flags;
202 m += nr >> 5;
203
204 if (__builtin_constant_p(nr))
205 nr &= 0x1f;
206
207 bitops_lock(flags);
208
209 temp = *m;
210 *m = temp & ~(1UL << nr);
211
212 bitops_unlock(flags);
213}
214
215static inline void change_bit(unsigned long nr, volatile unsigned long *m)
216{
217 unsigned long temp, flags;
218 m += nr >> 5;
219
220 if (__builtin_constant_p(nr))
221 nr &= 0x1f;
222
223 bitops_lock(flags);
224
225 temp = *m;
226 *m = temp ^ (1UL << nr);
227
228 bitops_unlock(flags);
229}
230
231static inline int test_and_set_bit(unsigned long nr, volatile unsigned long *m)
232{
233 unsigned long old, flags;
234 m += nr >> 5;
235
236 if (__builtin_constant_p(nr))
237 nr &= 0x1f;
238
239 bitops_lock(flags);
240
241 old = *m;
242 *m = old | (1 << nr);
243
244 bitops_unlock(flags);
245
246 return (old & (1 << nr)) != 0;
247}
248
249static inline int
250test_and_clear_bit(unsigned long nr, volatile unsigned long *m)
251{
252 unsigned long old, flags;
253 m += nr >> 5;
254
255 if (__builtin_constant_p(nr))
256 nr &= 0x1f;
257
258 bitops_lock(flags);
259
260 old = *m;
261 *m = old & ~(1 << nr);
262
263 bitops_unlock(flags);
264
265 return (old & (1 << nr)) != 0;
266}
267
268static inline int
269test_and_change_bit(unsigned long nr, volatile unsigned long *m)
270{
271 unsigned long old, flags;
272 m += nr >> 5;
273
274 if (__builtin_constant_p(nr))
275 nr &= 0x1f;
276
277 bitops_lock(flags);
278
279 old = *m;
280 *m = old ^ (1 << nr);
281
282 bitops_unlock(flags);
283
284 return (old & (1 << nr)) != 0;
285}
286
287#endif /* CONFIG_ARC_HAS_LLSC */
288
289/***************************************
290 * Non atomic variants
291 **************************************/
292
293static inline void __set_bit(unsigned long nr, volatile unsigned long *m)
294{
295 unsigned long temp;
296 m += nr >> 5;
297
298 if (__builtin_constant_p(nr))
299 nr &= 0x1f;
300
301 temp = *m;
302 *m = temp | (1UL << nr);
303}
304
305static inline void __clear_bit(unsigned long nr, volatile unsigned long *m)
306{
307 unsigned long temp;
308 m += nr >> 5;
309
310 if (__builtin_constant_p(nr))
311 nr &= 0x1f;
312
313 temp = *m;
314 *m = temp & ~(1UL << nr);
315}
316
317static inline void __change_bit(unsigned long nr, volatile unsigned long *m)
318{
319 unsigned long temp;
320 m += nr >> 5;
321
322 if (__builtin_constant_p(nr))
323 nr &= 0x1f;
324
325 temp = *m;
326 *m = temp ^ (1UL << nr);
327}
328
329static inline int
330__test_and_set_bit(unsigned long nr, volatile unsigned long *m)
331{
332 unsigned long old;
333 m += nr >> 5;
334
335 if (__builtin_constant_p(nr))
336 nr &= 0x1f;
337
338 old = *m;
339 *m = old | (1 << nr);
340
341 return (old & (1 << nr)) != 0;
342}
343
344static inline int
345__test_and_clear_bit(unsigned long nr, volatile unsigned long *m)
346{
347 unsigned long old;
348 m += nr >> 5;
349
350 if (__builtin_constant_p(nr))
351 nr &= 0x1f;
352
353 old = *m;
354 *m = old & ~(1 << nr);
355
356 return (old & (1 << nr)) != 0;
357}
358
359static inline int
360__test_and_change_bit(unsigned long nr, volatile unsigned long *m)
361{
362 unsigned long old;
363 m += nr >> 5;
364
365 if (__builtin_constant_p(nr))
366 nr &= 0x1f;
367
368 old = *m;
369 *m = old ^ (1 << nr);
370
371 return (old & (1 << nr)) != 0;
372}
373
374/*
375 * This routine doesn't need to be atomic.
376 */
377static inline int
378__constant_test_bit(unsigned int nr, const volatile unsigned long *addr)
379{
380 return ((1UL << (nr & 31)) &
381 (((const volatile unsigned int *)addr)[nr >> 5])) != 0;
382}
383
384static inline int
385__test_bit(unsigned int nr, const volatile unsigned long *addr)
386{
387 unsigned long mask;
388
389 addr += nr >> 5;
390
391 /* ARC700 only considers 5 bits in bit-fiddling insn */
392 mask = 1 << nr;
393
394 return ((mask & *addr) != 0);
395}
396
397#define test_bit(nr, addr) (__builtin_constant_p(nr) ? \
398 __constant_test_bit((nr), (addr)) : \
399 __test_bit((nr), (addr)))
400
401/*
402 * Count the number of zeros, starting from MSB
403 * Helper for fls( ) friends
404 * This is a pure count, so (1-32) or (0-31) doesn't apply
405 * It could be 0 to 32, based on num of 0's in there
406 * clz(0x8000_0000) = 0, clz(0xFFFF_FFFF)=0, clz(0) = 32, clz(1) = 31
407 */
408static inline __attribute__ ((const)) int clz(unsigned int x)
409{
410 unsigned int res;
411
412 __asm__ __volatile__(
413 " norm.f %0, %1 \n"
414 " mov.n %0, 0 \n"
415 " add.p %0, %0, 1 \n"
416 : "=r"(res)
417 : "r"(x)
418 : "cc");
419
420 return res;
421}
422
423static inline int constant_fls(int x)
424{
425 int r = 32;
426
427 if (!x)
428 return 0;
429 if (!(x & 0xffff0000u)) {
430 x <<= 16;
431 r -= 16;
432 }
433 if (!(x & 0xff000000u)) {
434 x <<= 8;
435 r -= 8;
436 }
437 if (!(x & 0xf0000000u)) {
438 x <<= 4;
439 r -= 4;
440 }
441 if (!(x & 0xc0000000u)) {
442 x <<= 2;
443 r -= 2;
444 }
445 if (!(x & 0x80000000u)) {
446 x <<= 1;
447 r -= 1;
448 }
449 return r;
450}
451
452/*
453 * fls = Find Last Set in word
454 * @result: [1-32]
455 * fls(1) = 1, fls(0x80000000) = 32, fls(0) = 0
456 */
457static inline __attribute__ ((const)) int fls(unsigned long x)
458{
459 if (__builtin_constant_p(x))
460 return constant_fls(x);
461
462 return 32 - clz(x);
463}
464
465/*
466 * __fls: Similar to fls, but zero based (0-31)
467 */
468static inline __attribute__ ((const)) int __fls(unsigned long x)
469{
470 if (!x)
471 return 0;
472 else
473 return fls(x) - 1;
474}
475
476/*
477 * ffs = Find First Set in word (LSB to MSB)
478 * @result: [1-32], 0 if all 0's
479 */
480#define ffs(x) ({ unsigned long __t = (x); fls(__t & -__t); })
481
482/*
483 * __ffs: Similar to ffs, but zero based (0-31)
484 */
485static inline __attribute__ ((const)) int __ffs(unsigned long word)
486{
487 if (!word)
488 return word;
489
490 return ffs(word) - 1;
491}
492
493/*
494 * ffz = Find First Zero in word.
495 * @return:[0-31], 32 if all 1's
496 */
497#define ffz(x) __ffs(~(x))
498
499/* TODO does this affect uni-processor code */
500#define smp_mb__before_clear_bit() barrier()
501#define smp_mb__after_clear_bit() barrier()
502
503#include <asm-generic/bitops/hweight.h>
504#include <asm-generic/bitops/fls64.h>
505#include <asm-generic/bitops/sched.h>
506#include <asm-generic/bitops/lock.h>
507
508#include <asm-generic/bitops/find.h>
509#include <asm-generic/bitops/le.h>
510#include <asm-generic/bitops/ext2-atomic-setbit.h>
511
512#endif /* !__ASSEMBLY__ */
513
514#endif /* __KERNEL__ */
515
516#endif
diff --git a/arch/arc/include/asm/bug.h b/arch/arc/include/asm/bug.h
new file mode 100644
index 000000000000..2ad8f9b1c54b
--- /dev/null
+++ b/arch/arc/include/asm/bug.h
@@ -0,0 +1,37 @@
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#ifndef _ASM_ARC_BUG_H
10#define _ASM_ARC_BUG_H
11
12#ifndef __ASSEMBLY__
13
14#include <asm/ptrace.h>
15
16struct task_struct;
17
18void show_regs(struct pt_regs *regs);
19void show_stacktrace(struct task_struct *tsk, struct pt_regs *regs);
20void show_kernel_fault_diag(const char *str, struct pt_regs *regs,
21 unsigned long address, unsigned long cause_reg);
22void die(const char *str, struct pt_regs *regs, unsigned long address,
23 unsigned long cause_reg);
24
25#define BUG() do { \
26 dump_stack(); \
27 pr_warn("Kernel BUG in %s: %s: %d!\n", \
28 __FILE__, __func__, __LINE__); \
29} while (0)
30
31#define HAVE_ARCH_BUG
32
33#include <asm-generic/bug.h>
34
35#endif /* !__ASSEMBLY__ */
36
37#endif
diff --git a/arch/arc/include/asm/cache.h b/arch/arc/include/asm/cache.h
new file mode 100644
index 000000000000..6632273861fd
--- /dev/null
+++ b/arch/arc/include/asm/cache.h
@@ -0,0 +1,75 @@
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#ifndef __ARC_ASM_CACHE_H
10#define __ARC_ASM_CACHE_H
11
12/* In case $$ not config, setup a dummy number for rest of kernel */
13#ifndef CONFIG_ARC_CACHE_LINE_SHIFT
14#define L1_CACHE_SHIFT 6
15#else
16#define L1_CACHE_SHIFT CONFIG_ARC_CACHE_LINE_SHIFT
17#endif
18
19#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
20
21#define ARC_ICACHE_WAYS 2
22#define ARC_DCACHE_WAYS 4
23
24/* Helpers */
25#define ARC_ICACHE_LINE_LEN L1_CACHE_BYTES
26#define ARC_DCACHE_LINE_LEN L1_CACHE_BYTES
27
28#define ICACHE_LINE_MASK (~(ARC_ICACHE_LINE_LEN - 1))
29#define DCACHE_LINE_MASK (~(ARC_DCACHE_LINE_LEN - 1))
30
31#if ARC_ICACHE_LINE_LEN != ARC_DCACHE_LINE_LEN
32#error "Need to fix some code as I/D cache lines not same"
33#else
34#define is_not_cache_aligned(p) ((unsigned long)p & (~DCACHE_LINE_MASK))
35#endif
36
37#ifndef __ASSEMBLY__
38
39/* Uncached access macros */
40#define arc_read_uncached_32(ptr) \
41({ \
42 unsigned int __ret; \
43 __asm__ __volatile__( \
44 " ld.di %0, [%1] \n" \
45 : "=r"(__ret) \
46 : "r"(ptr)); \
47 __ret; \
48})
49
50#define arc_write_uncached_32(ptr, data)\
51({ \
52 __asm__ __volatile__( \
53 " st.di %0, [%1] \n" \
54 : \
55 : "r"(data), "r"(ptr)); \
56})
57
58/* used to give SHMLBA a value to avoid Cache Aliasing */
59extern unsigned int ARC_shmlba;
60
61#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
62
63/*
64 * ARC700 doesn't cache any access in top 256M.
65 * Ideal for wiring memory mapped peripherals as we don't need to do
66 * explicit uncached accesses (LD.di/ST.di) hence more portable drivers
67 */
68#define ARC_UNCACHED_ADDR_SPACE 0xc0000000
69
70extern void arc_cache_init(void);
71extern char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len);
72extern void __init read_decode_cache_bcr(void);
73#endif
74
75#endif /* _ASM_CACHE_H */
diff --git a/arch/arc/include/asm/cacheflush.h b/arch/arc/include/asm/cacheflush.h
new file mode 100644
index 000000000000..97ee96f26505
--- /dev/null
+++ b/arch/arc/include/asm/cacheflush.h
@@ -0,0 +1,67 @@
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * vineetg: May 2011: for Non-aliasing VIPT D-cache following can be NOPs
9 * -flush_cache_dup_mm (fork)
10 * -likewise for flush_cache_mm (exit/execve)
11 * -likewise for flush_cache_{range,page} (munmap, exit, COW-break)
12 *
13 * vineetg: April 2008
14 * -Added a critical CacheLine flush to copy_to_user_page( ) which
15 * was causing gdbserver to not setup breakpoints consistently
16 */
17
18#ifndef _ASM_CACHEFLUSH_H
19#define _ASM_CACHEFLUSH_H
20
21#include <linux/mm.h>
22
23void flush_cache_all(void);
24
25void flush_icache_range(unsigned long start, unsigned long end);
26void flush_icache_page(struct vm_area_struct *vma, struct page *page);
27void flush_icache_range_vaddr(unsigned long paddr, unsigned long u_vaddr,
28 int len);
29
30#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
31
32void flush_dcache_page(struct page *page);
33
34void dma_cache_wback_inv(unsigned long start, unsigned long sz);
35void dma_cache_inv(unsigned long start, unsigned long sz);
36void dma_cache_wback(unsigned long start, unsigned long sz);
37
38#define flush_dcache_mmap_lock(mapping) do { } while (0)
39#define flush_dcache_mmap_unlock(mapping) do { } while (0)
40
41/* TBD: optimize this */
42#define flush_cache_vmap(start, end) flush_cache_all()
43#define flush_cache_vunmap(start, end) flush_cache_all()
44
45/*
46 * VM callbacks when entire/range of user-space V-P mappings are
47 * torn-down/get-invalidated
48 *
49 * Currently we don't support D$ aliasing configs for our VIPT caches
50 * NOPS for VIPT Cache with non-aliasing D$ configurations only
51 */
52#define flush_cache_dup_mm(mm) /* called on fork */
53#define flush_cache_mm(mm) /* called on munmap/exit */
54#define flush_cache_range(mm, u_vstart, u_vend)
55#define flush_cache_page(vma, u_vaddr, pfn) /* PF handling/COW-break */
56
57#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
58do { \
59 memcpy(dst, src, len); \
60 if (vma->vm_flags & VM_EXEC) \
61 flush_icache_range_vaddr((unsigned long)(dst), vaddr, len);\
62} while (0)
63
64#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
65 memcpy(dst, src, len); \
66
67#endif
diff --git a/arch/arc/include/asm/checksum.h b/arch/arc/include/asm/checksum.h
new file mode 100644
index 000000000000..10957298b7a3
--- /dev/null
+++ b/arch/arc/include/asm/checksum.h
@@ -0,0 +1,101 @@
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * Joern Rennecke <joern.rennecke@embecosm.com>: Jan 2012
9 * -Insn Scheduling improvements to csum core routines.
10 * = csum_fold( ) largely derived from ARM version.
11 * = ip_fast_cum( ) to have module scheduling
12 * -gcc 4.4.x broke networking. Alias analysis needed to be primed.
13 * worked around by adding memory clobber to ip_fast_csum( )
14 *
15 * vineetg: May 2010
16 * -Rewrote ip_fast_cscum( ) and csum_fold( ) with fast inline asm
17 */
18
19#ifndef _ASM_ARC_CHECKSUM_H
20#define _ASM_ARC_CHECKSUM_H
21
22/*
23 * Fold a partial checksum
24 *
25 * The 2 swords comprising the 32bit sum are added, any carry to 16th bit
26 * added back and final sword result inverted.
27 */
28static inline __sum16 csum_fold(__wsum s)
29{
30 unsigned r = s << 16 | s >> 16; /* ror */
31 s = ~s;
32 s -= r;
33 return s >> 16;
34}
35
36/*
37 * This is a version of ip_compute_csum() optimized for IP headers,
38 * which always checksum on 4 octet boundaries.
39 */
40static inline __sum16
41ip_fast_csum(const void *iph, unsigned int ihl)
42{
43 const void *ptr = iph;
44 unsigned int tmp, tmp2, sum;
45
46 __asm__(
47 " ld.ab %0, [%3, 4] \n"
48 " ld.ab %2, [%3, 4] \n"
49 " sub %1, %4, 2 \n"
50 " lsr.f lp_count, %1, 1 \n"
51 " bcc 0f \n"
52 " add.f %0, %0, %2 \n"
53 " ld.ab %2, [%3, 4] \n"
54 "0: lp 1f \n"
55 " ld.ab %1, [%3, 4] \n"
56 " adc.f %0, %0, %2 \n"
57 " ld.ab %2, [%3, 4] \n"
58 " adc.f %0, %0, %1 \n"
59 "1: adc.f %0, %0, %2 \n"
60 " add.cs %0,%0,1 \n"
61 : "=&r"(sum), "=r"(tmp), "=&r"(tmp2), "+&r" (ptr)
62 : "r"(ihl)
63 : "cc", "lp_count", "memory");
64
65 return csum_fold(sum);
66}
67
68/*
69 * TCP pseudo Header is 12 bytes:
70 * SA [4], DA [4], zeroes [1], Proto[1], TCP Seg(hdr+data) Len [2]
71 */
72static inline __wsum
73csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len,
74 unsigned short proto, __wsum sum)
75{
76 __asm__ __volatile__(
77 " add.f %0, %0, %1 \n"
78 " adc.f %0, %0, %2 \n"
79 " adc.f %0, %0, %3 \n"
80 " adc.f %0, %0, %4 \n"
81 " adc %0, %0, 0 \n"
82 : "+&r"(sum)
83 : "r"(saddr), "r"(daddr),
84#ifdef CONFIG_CPU_BIG_ENDIAN
85 "r"(len),
86#else
87 "r"(len << 8),
88#endif
89 "r"(htons(proto))
90 : "cc");
91
92 return sum;
93}
94
95#define csum_fold csum_fold
96#define ip_fast_csum ip_fast_csum
97#define csum_tcpudp_nofold csum_tcpudp_nofold
98
99#include <asm-generic/checksum.h>
100
101#endif /* _ASM_ARC_CHECKSUM_H */
diff --git a/arch/arc/include/asm/clk.h b/arch/arc/include/asm/clk.h
new file mode 100644
index 000000000000..bf9d29f5bd53
--- /dev/null
+++ b/arch/arc/include/asm/clk.h
@@ -0,0 +1,22 @@
1/*
2 * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#ifndef _ASM_ARC_CLK_H
10#define _ASM_ARC_CLK_H
11
12/* Although we can't really hide core_freq, the accessor is still better way */
13extern unsigned long core_freq;
14
15static inline unsigned long arc_get_core_freq(void)
16{
17 return core_freq;
18}
19
20extern int arc_set_core_freq(unsigned long);
21
22#endif
diff --git a/arch/arc/include/asm/cmpxchg.h b/arch/arc/include/asm/cmpxchg.h
new file mode 100644
index 000000000000..03cd6894855d
--- /dev/null
+++ b/arch/arc/include/asm/cmpxchg.h
@@ -0,0 +1,143 @@
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#ifndef __ASM_ARC_CMPXCHG_H
10#define __ASM_ARC_CMPXCHG_H
11
12#include <linux/types.h>
13#include <asm/smp.h>
14
15#ifdef CONFIG_ARC_HAS_LLSC
16
17static inline unsigned long
18__cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new)
19{
20 unsigned long prev;
21
22 __asm__ __volatile__(
23 "1: llock %0, [%1] \n"
24 " brne %0, %2, 2f \n"
25 " scond %3, [%1] \n"
26 " bnz 1b \n"
27 "2: \n"
28 : "=&r"(prev)
29 : "r"(ptr), "ir"(expected),
30 "r"(new) /* can't be "ir". scond can't take limm for "b" */
31 : "cc");
32
33 return prev;
34}
35
36#else
37
38static inline unsigned long
39__cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new)
40{
41 unsigned long flags;
42 int prev;
43 volatile unsigned long *p = ptr;
44
45 atomic_ops_lock(flags);
46 prev = *p;
47 if (prev == expected)
48 *p = new;
49 atomic_ops_unlock(flags);
50 return prev;
51}
52
53#endif /* CONFIG_ARC_HAS_LLSC */
54
55#define cmpxchg(ptr, o, n) ((typeof(*(ptr)))__cmpxchg((ptr), \
56 (unsigned long)(o), (unsigned long)(n)))
57
58/*
59 * Since not supported natively, ARC cmpxchg() uses atomic_ops_lock (UP/SMP)
60 * just to gaurantee semantics.
61 * atomic_cmpxchg() needs to use the same locks as it's other atomic siblings
62 * which also happens to be atomic_ops_lock.
63 *
64 * Thus despite semantically being different, implementation of atomic_cmpxchg()
65 * is same as cmpxchg().
66 */
67#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
68
69
70/*
71 * xchg (reg with memory) based on "Native atomic" EX insn
72 */
73static inline unsigned long __xchg(unsigned long val, volatile void *ptr,
74 int size)
75{
76 extern unsigned long __xchg_bad_pointer(void);
77
78 switch (size) {
79 case 4:
80 __asm__ __volatile__(
81 " ex %0, [%1] \n"
82 : "+r"(val)
83 : "r"(ptr)
84 : "memory");
85
86 return val;
87 }
88 return __xchg_bad_pointer();
89}
90
91#define _xchg(ptr, with) ((typeof(*(ptr)))__xchg((unsigned long)(with), (ptr), \
92 sizeof(*(ptr))))
93
94/*
95 * On ARC700, EX insn is inherently atomic, so by default "vanilla" xchg() need
96 * not require any locking. However there's a quirk.
97 * ARC lacks native CMPXCHG, thus emulated (see above), using external locking -
98 * incidently it "reuses" the same atomic_ops_lock used by atomic APIs.
99 * Now, llist code uses cmpxchg() and xchg() on same data, so xchg() needs to
100 * abide by same serializing rules, thus ends up using atomic_ops_lock as well.
101 *
102 * This however is only relevant if SMP and/or ARC lacks LLSC
103 * if (UP or LLSC)
104 * xchg doesn't need serialization
105 * else <==> !(UP or LLSC) <==> (!UP and !LLSC) <==> (SMP and !LLSC)
106 * xchg needs serialization
107 */
108
109#if !defined(CONFIG_ARC_HAS_LLSC) && defined(CONFIG_SMP)
110
111#define xchg(ptr, with) \
112({ \
113 unsigned long flags; \
114 typeof(*(ptr)) old_val; \
115 \
116 atomic_ops_lock(flags); \
117 old_val = _xchg(ptr, with); \
118 atomic_ops_unlock(flags); \
119 old_val; \
120})
121
122#else
123
124#define xchg(ptr, with) _xchg(ptr, with)
125
126#endif
127
128/*
129 * "atomic" variant of xchg()
130 * REQ: It needs to follow the same serialization rules as other atomic_xxx()
131 * Since xchg() doesn't always do that, it would seem that following defintion
132 * is incorrect. But here's the rationale:
133 * SMP : Even xchg() takes the atomic_ops_lock, so OK.
134 * LLSC: atomic_ops_lock are not relevent at all (even if SMP, since LLSC
135 * is natively "SMP safe", no serialization required).
136 * UP : other atomics disable IRQ, so no way a difft ctxt atomic_xchg()
137 * could clobber them. atomic_xchg() itself would be 1 insn, so it
138 * can't be clobbered by others. Thus no serialization required when
139 * atomic_xchg is involved.
140 */
141#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
142
143#endif
diff --git a/arch/arc/include/asm/current.h b/arch/arc/include/asm/current.h
new file mode 100644
index 000000000000..87b918585c4a
--- /dev/null
+++ b/arch/arc/include/asm/current.h
@@ -0,0 +1,32 @@
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * Vineetg: May 16th, 2008
9 * - Current macro is now implemented as "global register" r25
10 */
11
12#ifndef _ASM_ARC_CURRENT_H
13#define _ASM_ARC_CURRENT_H
14
15#ifdef __KERNEL__
16
17#ifndef __ASSEMBLY__
18
19#ifdef CONFIG_ARC_CURR_IN_REG
20
21register struct task_struct *curr_arc asm("r25");
22#define current (curr_arc)
23
24#else
25#include <asm-generic/current.h>
26#endif /* ! CONFIG_ARC_CURR_IN_REG */
27
28#endif /* ! __ASSEMBLY__ */
29
30#endif /* __KERNEL__ */
31
32#endif /* _ASM_ARC_CURRENT_H */
diff --git a/arch/arc/include/asm/defines.h b/arch/arc/include/asm/defines.h
new file mode 100644
index 000000000000..6097bb439cc5
--- /dev/null
+++ b/arch/arc/include/asm/defines.h
@@ -0,0 +1,56 @@
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#ifndef __ARC_ASM_DEFINES_H__
10#define __ARC_ASM_DEFINES_H__
11
12#if defined(CONFIG_ARC_MMU_V1)
13#define CONFIG_ARC_MMU_VER 1
14#elif defined(CONFIG_ARC_MMU_V2)
15#define CONFIG_ARC_MMU_VER 2
16#elif defined(CONFIG_ARC_MMU_V3)
17#define CONFIG_ARC_MMU_VER 3
18#endif
19
20#ifdef CONFIG_ARC_HAS_LLSC
21#define __CONFIG_ARC_HAS_LLSC_VAL 1
22#else
23#define __CONFIG_ARC_HAS_LLSC_VAL 0
24#endif
25
26#ifdef CONFIG_ARC_HAS_SWAPE
27#define __CONFIG_ARC_HAS_SWAPE_VAL 1
28#else
29#define __CONFIG_ARC_HAS_SWAPE_VAL 0
30#endif
31
32#ifdef CONFIG_ARC_HAS_RTSC
33#define __CONFIG_ARC_HAS_RTSC_VAL 1
34#else
35#define __CONFIG_ARC_HAS_RTSC_VAL 0
36#endif
37
38#ifdef CONFIG_ARC_MMU_SASID
39#define __CONFIG_ARC_MMU_SASID_VAL 1
40#else
41#define __CONFIG_ARC_MMU_SASID_VAL 0
42#endif
43
44#ifdef CONFIG_ARC_HAS_ICACHE
45#define __CONFIG_ARC_HAS_ICACHE 1
46#else
47#define __CONFIG_ARC_HAS_ICACHE 0
48#endif
49
50#ifdef CONFIG_ARC_HAS_DCACHE
51#define __CONFIG_ARC_HAS_DCACHE 1
52#else
53#define __CONFIG_ARC_HAS_DCACHE 0
54#endif
55
56#endif /* __ARC_ASM_DEFINES_H__ */
diff --git a/arch/arc/include/asm/delay.h b/arch/arc/include/asm/delay.h
new file mode 100644
index 000000000000..442ce5d0f709
--- /dev/null
+++ b/arch/arc/include/asm/delay.h
@@ -0,0 +1,68 @@
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * Delay routines using pre computed loops_per_jiffy value.
9 *
10 * vineetg: Feb 2012
11 * -Rewrote in "C" to avoid dealing with availability of H/w MPY
12 * -Also reduced the num of MPY operations from 3 to 2
13 *
14 * Amit Bhor: Codito Technologies 2004
15 */
16
17#ifndef __ASM_ARC_UDELAY_H
18#define __ASM_ARC_UDELAY_H
19
20#include <asm/param.h> /* HZ */
21
22static inline void __delay(unsigned long loops)
23{
24 __asm__ __volatile__(
25 "1: sub.f %0, %0, 1 \n"
26 " jpnz 1b \n"
27 : "+r"(loops)
28 :
29 : "cc");
30}
31
32extern void __bad_udelay(void);
33
34/*
35 * Normal Math for computing loops in "N" usecs
36 * -we have precomputed @loops_per_jiffy
37 * -1 sec has HZ jiffies
38 * loops per "N" usecs = ((loops_per_jiffy * HZ / 1000000) * N)
39 *
40 * Approximate Division by multiplication:
41 * -Mathematically if we multiply and divide a number by same value the
42 * result remains unchanged: In this case, we use 2^32
43 * -> (loops_per_N_usec * 2^32 ) / 2^32
44 * -> (((loops_per_jiffy * HZ / 1000000) * N) * 2^32) / 2^32
45 * -> (loops_per_jiffy * HZ * N * 4295) / 2^32
46 *
47 * -Divide by 2^32 is very simply right shift by 32
48 * -We simply need to ensure that the multiply per above eqn happens in
49 * 64-bit precision (if CPU doesn't support it - gcc can emaulate it)
50 */
51
52static inline void __udelay(unsigned long usecs)
53{
54 unsigned long loops;
55
56 /* (long long) cast ensures 64 bit MPY - real or emulated
57 * HZ * 4295 is pre-evaluated by gcc - hence only 2 mpy ops
58 */
59 loops = ((long long)(usecs * 4295 * HZ) *
60 (long long)(loops_per_jiffy)) >> 32;
61
62 __delay(loops);
63}
64
65#define udelay(n) (__builtin_constant_p(n) ? ((n) > 20000 ? __bad_udelay() \
66 : __udelay(n)) : __udelay(n))
67
68#endif /* __ASM_ARC_UDELAY_H */
diff --git a/arch/arc/include/asm/disasm.h b/arch/arc/include/asm/disasm.h
new file mode 100644
index 000000000000..f1cce3d059a1
--- /dev/null
+++ b/arch/arc/include/asm/disasm.h
@@ -0,0 +1,116 @@
1/*
2 * several functions that help interpret ARC instructions
3 * used for unaligned accesses, kprobes and kgdb
4 *
5 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#ifndef __ARC_DISASM_H__
13#define __ARC_DISASM_H__
14
15enum {
16 op_Bcc = 0, op_BLcc = 1, op_LD = 2, op_ST = 3, op_MAJOR_4 = 4,
17 op_MAJOR_5 = 5, op_LD_ADD = 12, op_ADD_SUB_SHIFT = 13,
18 op_ADD_MOV_CMP = 14, op_S = 15, op_LD_S = 16, op_LDB_S = 17,
19 op_LDW_S = 18, op_LDWX_S = 19, op_ST_S = 20, op_STB_S = 21,
20 op_STW_S = 22, op_Su5 = 23, op_SP = 24, op_GP = 25,
21 op_Pcl = 26, op_MOV_S = 27, op_ADD_CMP = 28, op_BR_S = 29,
22 op_B_S = 30, op_BL_S = 31
23};
24
25enum flow {
26 noflow,
27 direct_jump,
28 direct_call,
29 indirect_jump,
30 indirect_call,
31 invalid_instr
32};
33
34#define IS_BIT(word, n) ((word) & (1<<n))
35#define BITS(word, s, e) (((word) >> (s)) & (~((-2) << ((e) - (s)))))
36
37#define MAJOR_OPCODE(word) (BITS((word), 27, 31))
38#define MINOR_OPCODE(word) (BITS((word), 16, 21))
39#define FIELD_A(word) (BITS((word), 0, 5))
40#define FIELD_B(word) ((BITS((word), 12, 14)<<3) | \
41 (BITS((word), 24, 26)))
42#define FIELD_C(word) (BITS((word), 6, 11))
43#define FIELD_u6(word) FIELDC(word)
44#define FIELD_s12(word) sign_extend(((BITS((word), 0, 5) << 6) | \
45 BITS((word), 6, 11)), 12)
46
47/* note that for BL/BRcc these two macro's need another AND statement to mask
48 * out bit 1 (make the result a multiple of 4) */
49#define FIELD_s9(word) sign_extend(((BITS(word, 15, 15) << 8) | \
50 BITS(word, 16, 23)), 9)
51#define FIELD_s21(word) sign_extend(((BITS(word, 6, 15) << 11) | \
52 (BITS(word, 17, 26) << 1)), 12)
53#define FIELD_s25(word) sign_extend(((BITS(word, 0, 3) << 21) | \
54 (BITS(word, 6, 15) << 11) | \
55 (BITS(word, 17, 26) << 1)), 12)
56
57/* note: these operate on 16 bits! */
58#define FIELD_S_A(word) ((BITS((word), 2, 2)<<3) | BITS((word), 0, 2))
59#define FIELD_S_B(word) ((BITS((word), 10, 10)<<3) | \
60 BITS((word), 8, 10))
61#define FIELD_S_C(word) ((BITS((word), 7, 7)<<3) | BITS((word), 5, 7))
62#define FIELD_S_H(word) ((BITS((word), 0, 2)<<3) | BITS((word), 5, 8))
63#define FIELD_S_u5(word) (BITS((word), 0, 4))
64#define FIELD_S_u6(word) (BITS((word), 0, 4) << 1)
65#define FIELD_S_u7(word) (BITS((word), 0, 4) << 2)
66#define FIELD_S_u10(word) (BITS((word), 0, 7) << 2)
67#define FIELD_S_s7(word) sign_extend(BITS((word), 0, 5) << 1, 9)
68#define FIELD_S_s8(word) sign_extend(BITS((word), 0, 7) << 1, 9)
69#define FIELD_S_s9(word) sign_extend(BITS((word), 0, 8), 9)
70#define FIELD_S_s10(word) sign_extend(BITS((word), 0, 8) << 1, 10)
71#define FIELD_S_s11(word) sign_extend(BITS((word), 0, 8) << 2, 11)
72#define FIELD_S_s13(word) sign_extend(BITS((word), 0, 10) << 2, 13)
73
74#define STATUS32_L 0x00000100
75#define REG_LIMM 62
76
77struct disasm_state {
78 /* generic info */
79 unsigned long words[2];
80 int instr_len;
81 int major_opcode;
82 /* info for branch/jump */
83 int is_branch;
84 int target;
85 int delay_slot;
86 enum flow flow;
87 /* info for load/store */
88 int src1, src2, src3, dest, wb_reg;
89 int zz, aa, x, pref, di;
90 int fault, write;
91};
92
93static inline int sign_extend(int value, int bits)
94{
95 if (IS_BIT(value, (bits - 1)))
96 value |= (0xffffffff << bits);
97
98 return value;
99}
100
101static inline int is_short_instr(unsigned long addr)
102{
103 uint16_t word = *((uint16_t *)addr);
104 int opcode = (word >> 11) & 0x1F;
105 return (opcode >= 0x0B);
106}
107
108void disasm_instr(unsigned long addr, struct disasm_state *state,
109 int userspace, struct pt_regs *regs, struct callee_regs *cregs);
110int disasm_next_pc(unsigned long pc, struct pt_regs *regs, struct callee_regs
111 *cregs, unsigned long *fall_thru, unsigned long *target);
112long get_reg(int reg, struct pt_regs *regs, struct callee_regs *cregs);
113void set_reg(int reg, long val, struct pt_regs *regs,
114 struct callee_regs *cregs);
115
116#endif /* __ARC_DISASM_H__ */
diff --git a/arch/arc/include/asm/dma-mapping.h b/arch/arc/include/asm/dma-mapping.h
new file mode 100644
index 000000000000..31f77aec0823
--- /dev/null
+++ b/arch/arc/include/asm/dma-mapping.h
@@ -0,0 +1,221 @@
1/*
2 * DMA Mapping glue for ARC
3 *
4 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#ifndef ASM_ARC_DMA_MAPPING_H
12#define ASM_ARC_DMA_MAPPING_H
13
14#include <asm-generic/dma-coherent.h>
15#include <asm/cacheflush.h>
16
17#ifndef CONFIG_ARC_PLAT_NEEDS_CPU_TO_DMA
18/*
19 * dma_map_* API take cpu addresses, which is kernel logical address in the
20 * untranslated address space (0x8000_0000) based. The dma address (bus addr)
21 * ideally needs to be 0x0000_0000 based hence these glue routines.
22 * However given that intermediate bus bridges can ignore the high bit, we can
23 * do with these routines being no-ops.
24 * If a platform/device comes up which sriclty requires 0 based bus addr
25 * (e.g. AHB-PCI bridge on Angel4 board), then it can provide it's own versions
26 */
27#define plat_dma_addr_to_kernel(dev, addr) ((unsigned long)(addr))
28#define plat_kernel_addr_to_dma(dev, ptr) ((dma_addr_t)(ptr))
29
30#else
31#include <plat/dma_addr.h>
32#endif
33
34void *dma_alloc_noncoherent(struct device *dev, size_t size,
35 dma_addr_t *dma_handle, gfp_t gfp);
36
37void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
38 dma_addr_t dma_handle);
39
40void *dma_alloc_coherent(struct device *dev, size_t size,
41 dma_addr_t *dma_handle, gfp_t gfp);
42
43void dma_free_coherent(struct device *dev, size_t size, void *kvaddr,
44 dma_addr_t dma_handle);
45
46/* drivers/base/dma-mapping.c */
47extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
48 void *cpu_addr, dma_addr_t dma_addr, size_t size);
49extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
50 void *cpu_addr, dma_addr_t dma_addr,
51 size_t size);
52
53#define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s)
54#define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s)
55
56/*
57 * streaming DMA Mapping API...
58 * CPU accesses page via normal paddr, thus needs to explicitly made
59 * consistent before each use
60 */
61
62static inline void __inline_dma_cache_sync(unsigned long paddr, size_t size,
63 enum dma_data_direction dir)
64{
65 switch (dir) {
66 case DMA_FROM_DEVICE:
67 dma_cache_inv(paddr, size);
68 break;
69 case DMA_TO_DEVICE:
70 dma_cache_wback(paddr, size);
71 break;
72 case DMA_BIDIRECTIONAL:
73 dma_cache_wback_inv(paddr, size);
74 break;
75 default:
76 pr_err("Invalid DMA dir [%d] for OP @ %lx\n", dir, paddr);
77 }
78}
79
80void __arc_dma_cache_sync(unsigned long paddr, size_t size,
81 enum dma_data_direction dir);
82
83#define _dma_cache_sync(addr, sz, dir) \
84do { \
85 if (__builtin_constant_p(dir)) \
86 __inline_dma_cache_sync(addr, sz, dir); \
87 else \
88 __arc_dma_cache_sync(addr, sz, dir); \
89} \
90while (0);
91
92static inline dma_addr_t
93dma_map_single(struct device *dev, void *cpu_addr, size_t size,
94 enum dma_data_direction dir)
95{
96 _dma_cache_sync((unsigned long)cpu_addr, size, dir);
97 return plat_kernel_addr_to_dma(dev, cpu_addr);
98}
99
100static inline void
101dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
102 size_t size, enum dma_data_direction dir)
103{
104}
105
106static inline dma_addr_t
107dma_map_page(struct device *dev, struct page *page,
108 unsigned long offset, size_t size,
109 enum dma_data_direction dir)
110{
111 unsigned long paddr = page_to_phys(page) + offset;
112 return dma_map_single(dev, (void *)paddr, size, dir);
113}
114
115static inline void
116dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
117 size_t size, enum dma_data_direction dir)
118{
119}
120
121static inline int
122dma_map_sg(struct device *dev, struct scatterlist *sg,
123 int nents, enum dma_data_direction dir)
124{
125 struct scatterlist *s;
126 int i;
127
128 for_each_sg(sg, s, nents, i)
129 sg->dma_address = dma_map_page(dev, sg_page(s), s->offset,
130 s->length, dir);
131
132 return nents;
133}
134
135static inline void
136dma_unmap_sg(struct device *dev, struct scatterlist *sg,
137 int nents, enum dma_data_direction dir)
138{
139 struct scatterlist *s;
140 int i;
141
142 for_each_sg(sg, s, nents, i)
143 dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir);
144}
145
146static inline void
147dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
148 size_t size, enum dma_data_direction dir)
149{
150 _dma_cache_sync(plat_dma_addr_to_kernel(dev, dma_handle), size,
151 DMA_FROM_DEVICE);
152}
153
154static inline void
155dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
156 size_t size, enum dma_data_direction dir)
157{
158 _dma_cache_sync(plat_dma_addr_to_kernel(dev, dma_handle), size,
159 DMA_TO_DEVICE);
160}
161
162static inline void
163dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
164 unsigned long offset, size_t size,
165 enum dma_data_direction direction)
166{
167 _dma_cache_sync(plat_dma_addr_to_kernel(dev, dma_handle) + offset,
168 size, DMA_FROM_DEVICE);
169}
170
171static inline void
172dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
173 unsigned long offset, size_t size,
174 enum dma_data_direction direction)
175{
176 _dma_cache_sync(plat_dma_addr_to_kernel(dev, dma_handle) + offset,
177 size, DMA_TO_DEVICE);
178}
179
180static inline void
181dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
182 enum dma_data_direction dir)
183{
184 int i;
185
186 for (i = 0; i < nelems; i++, sg++)
187 _dma_cache_sync((unsigned int)sg_virt(sg), sg->length, dir);
188}
189
190static inline void
191dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
192 enum dma_data_direction dir)
193{
194 int i;
195
196 for (i = 0; i < nelems; i++, sg++)
197 _dma_cache_sync((unsigned int)sg_virt(sg), sg->length, dir);
198}
199
200static inline int dma_supported(struct device *dev, u64 dma_mask)
201{
202 /* Support 32 bit DMA mask exclusively */
203 return dma_mask == DMA_BIT_MASK(32);
204}
205
206static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
207{
208 return 0;
209}
210
211static inline int dma_set_mask(struct device *dev, u64 dma_mask)
212{
213 if (!dev->dma_mask || !dma_supported(dev, dma_mask))
214 return -EIO;
215
216 *dev->dma_mask = dma_mask;
217
218 return 0;
219}
220
221#endif
diff --git a/arch/arc/include/asm/dma.h b/arch/arc/include/asm/dma.h
new file mode 100644
index 000000000000..ca7c45181de9
--- /dev/null
+++ b/arch/arc/include/asm/dma.h
@@ -0,0 +1,14 @@
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#ifndef ASM_ARC_DMA_H
10#define ASM_ARC_DMA_H
11
12#define MAX_DMA_ADDRESS 0xC0000000
13
14#endif
diff --git a/arch/arc/include/asm/elf.h b/arch/arc/include/asm/elf.h
new file mode 100644
index 000000000000..f4c8d36ebecb
--- /dev/null
+++ b/arch/arc/include/asm/elf.h
@@ -0,0 +1,78 @@
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#ifndef __ASM_ARC_ELF_H
10#define __ASM_ARC_ELF_H
11
12#include <linux/types.h>
13#include <uapi/asm/elf.h>
14
15/* These ELF defines belong to uapi but libc elf.h already defines them */
16#define EM_ARCOMPACT 93
17
18/* ARC Relocations (kernel Modules only) */
19#define R_ARC_32 0x4
20#define R_ARC_32_ME 0x1B
21#define R_ARC_S25H_PCREL 0x10
22#define R_ARC_S25W_PCREL 0x11
23
24/*to set parameters in the core dumps */
25#define ELF_ARCH EM_ARCOMPACT
26#define ELF_CLASS ELFCLASS32
27
28#ifdef CONFIG_CPU_BIG_ENDIAN
29#define ELF_DATA ELFDATA2MSB
30#else
31#define ELF_DATA ELFDATA2LSB
32#endif
33
34/*
35 * To ensure that
36 * -we don't load something for the wrong architecture.
37 * -The userspace is using the correct syscall ABI
38 */
39struct elf32_hdr;
40extern int elf_check_arch(const struct elf32_hdr *);
41#define elf_check_arch elf_check_arch
42
43#define CORE_DUMP_USE_REGSET
44
45#define ELF_EXEC_PAGESIZE PAGE_SIZE
46
47/*
48 * This is the location that an ET_DYN program is loaded if exec'ed. Typical
49 * use of this is to invoke "./ld.so someprog" to test out a new version of
50 * the loader. We need to make sure that it is out of the way of the program
51 * that it will "exec", and that there is sufficient room for the brk.
52 */
53#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
54
55/*
56 * When the program starts, a1 contains a pointer to a function to be
57 * registered with atexit, as per the SVR4 ABI. A value of 0 means we
58 * have no such handler.
59 */
60#define ELF_PLAT_INIT(_r, load_addr) ((_r)->r0 = 0)
61
62/*
63 * This yields a mask that user programs can use to figure out what
64 * instruction set this cpu supports.
65 */
66#define ELF_HWCAP (0)
67
68/*
69 * This yields a string that ld.so will use to load implementation
70 * specific libraries for optimization. This is more specific in
71 * intent than poking at uname or /proc/cpuinfo.
72 */
73#define ELF_PLATFORM (NULL)
74
75#define SET_PERSONALITY(ex) \
76 set_personality(PER_LINUX | (current->personality & (~PER_MASK)))
77
78#endif
diff --git a/arch/arc/include/asm/entry.h b/arch/arc/include/asm/entry.h
new file mode 100644
index 000000000000..23daa326fc9b
--- /dev/null
+++ b/arch/arc/include/asm/entry.h
@@ -0,0 +1,724 @@
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * Vineetg: March 2009 (Supporting 2 levels of Interrupts)
9 * Stack switching code can no longer reliably rely on the fact that
10 * if we are NOT in user mode, stack is switched to kernel mode.
11 * e.g. L2 IRQ interrupted a L1 ISR which had not yet completed
12 * it's prologue including stack switching from user mode
13 *
14 * Vineetg: Aug 28th 2008: Bug #94984
15 * -Zero Overhead Loop Context shd be cleared when entering IRQ/EXcp/Trap
16 * Normally CPU does this automatically, however when doing FAKE rtie,
17 * we also need to explicitly do this. The problem in macros
18 * FAKE_RET_FROM_EXCPN and FAKE_RET_FROM_EXCPN_LOCK_IRQ was that this bit
19 * was being "CLEARED" rather then "SET". Actually "SET" clears ZOL context
20 *
21 * Vineetg: May 5th 2008
22 * -Modified CALLEE_REG save/restore macros to handle the fact that
23 * r25 contains the kernel current task ptr
24 * - Defined Stack Switching Macro to be reused in all intr/excp hdlrs
25 * - Shaved off 11 instructions from RESTORE_ALL_INT1 by using the
26 * address Write back load ld.ab instead of seperate ld/add instn
27 *
28 * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
29 */
30
31#ifndef __ASM_ARC_ENTRY_H
32#define __ASM_ARC_ENTRY_H
33
34#ifdef __ASSEMBLY__
35#include <asm/unistd.h> /* For NR_syscalls defination */
36#include <asm/asm-offsets.h>
37#include <asm/arcregs.h>
38#include <asm/ptrace.h>
39#include <asm/processor.h> /* For VMALLOC_START */
40#include <asm/thread_info.h> /* For THREAD_SIZE */
41
42/* Note on the LD/ST addr modes with addr reg wback
43 *
44 * LD.a same as LD.aw
45 *
46 * LD.a reg1, [reg2, x] => Pre Incr
47 * Eff Addr for load = [reg2 + x]
48 *
49 * LD.ab reg1, [reg2, x] => Post Incr
50 * Eff Addr for load = [reg2]
51 */
52
53/*--------------------------------------------------------------
54 * Save caller saved registers (scratch registers) ( r0 - r12 )
55 * Registers are pushed / popped in the order defined in struct ptregs
56 * in asm/ptrace.h
57 *-------------------------------------------------------------*/
58.macro SAVE_CALLER_SAVED
59 st.a r0, [sp, -4]
60 st.a r1, [sp, -4]
61 st.a r2, [sp, -4]
62 st.a r3, [sp, -4]
63 st.a r4, [sp, -4]
64 st.a r5, [sp, -4]
65 st.a r6, [sp, -4]
66 st.a r7, [sp, -4]
67 st.a r8, [sp, -4]
68 st.a r9, [sp, -4]
69 st.a r10, [sp, -4]
70 st.a r11, [sp, -4]
71 st.a r12, [sp, -4]
72.endm
73
74/*--------------------------------------------------------------
75 * Restore caller saved registers (scratch registers)
76 *-------------------------------------------------------------*/
77.macro RESTORE_CALLER_SAVED
78 ld.ab r12, [sp, 4]
79 ld.ab r11, [sp, 4]
80 ld.ab r10, [sp, 4]
81 ld.ab r9, [sp, 4]
82 ld.ab r8, [sp, 4]
83 ld.ab r7, [sp, 4]
84 ld.ab r6, [sp, 4]
85 ld.ab r5, [sp, 4]
86 ld.ab r4, [sp, 4]
87 ld.ab r3, [sp, 4]
88 ld.ab r2, [sp, 4]
89 ld.ab r1, [sp, 4]
90 ld.ab r0, [sp, 4]
91.endm
92
93
94/*--------------------------------------------------------------
95 * Save callee saved registers (non scratch registers) ( r13 - r25 )
96 * on kernel stack.
97 * User mode callee regs need to be saved in case of
98 * -fork and friends for replicating from parent to child
99 * -before going into do_signal( ) for ptrace/core-dump
100 * Special case handling is required for r25 in case it is used by kernel
101 * for caching task ptr. Low level exception/ISR save user mode r25
102 * into task->thread.user_r25. So it needs to be retrieved from there and
103 * saved into kernel stack with rest of callee reg-file
104 *-------------------------------------------------------------*/
105.macro SAVE_CALLEE_SAVED_USER
106 st.a r13, [sp, -4]
107 st.a r14, [sp, -4]
108 st.a r15, [sp, -4]
109 st.a r16, [sp, -4]
110 st.a r17, [sp, -4]
111 st.a r18, [sp, -4]
112 st.a r19, [sp, -4]
113 st.a r20, [sp, -4]
114 st.a r21, [sp, -4]
115 st.a r22, [sp, -4]
116 st.a r23, [sp, -4]
117 st.a r24, [sp, -4]
118
119#ifdef CONFIG_ARC_CURR_IN_REG
120 ; Retrieve orig r25 and save it on stack
121 ld r12, [r25, TASK_THREAD + THREAD_USER_R25]
122 st.a r12, [sp, -4]
123#else
124 st.a r25, [sp, -4]
125#endif
126
127 /* move up by 1 word to "create" callee_regs->"stack_place_holder" */
128 sub sp, sp, 4
129.endm
130
131/*--------------------------------------------------------------
132 * Save callee saved registers (non scratch registers) ( r13 - r25 )
133 * kernel mode callee regs needed to be saved in case of context switch
134 * If r25 is used for caching task pointer then that need not be saved
135 * as it can be re-created from current task global
136 *-------------------------------------------------------------*/
137.macro SAVE_CALLEE_SAVED_KERNEL
138 st.a r13, [sp, -4]
139 st.a r14, [sp, -4]
140 st.a r15, [sp, -4]
141 st.a r16, [sp, -4]
142 st.a r17, [sp, -4]
143 st.a r18, [sp, -4]
144 st.a r19, [sp, -4]
145 st.a r20, [sp, -4]
146 st.a r21, [sp, -4]
147 st.a r22, [sp, -4]
148 st.a r23, [sp, -4]
149 st.a r24, [sp, -4]
150#ifdef CONFIG_ARC_CURR_IN_REG
151 sub sp, sp, 8
152#else
153 st.a r25, [sp, -4]
154 sub sp, sp, 4
155#endif
156.endm
157
158/*--------------------------------------------------------------
159 * RESTORE_CALLEE_SAVED_KERNEL:
160 * Loads callee (non scratch) Reg File by popping from Kernel mode stack.
161 * This is reverse of SAVE_CALLEE_SAVED,
162 *
163 * NOTE:
164 * Ideally this shd only be called in switch_to for loading
165 * switched-IN task's CALLEE Reg File.
166 * For all other cases RESTORE_CALLEE_SAVED_FAST must be used
167 * which simply pops the stack w/o touching regs.
168 *-------------------------------------------------------------*/
169.macro RESTORE_CALLEE_SAVED_KERNEL
170
171
172#ifdef CONFIG_ARC_CURR_IN_REG
173 add sp, sp, 8 /* skip callee_reg gutter and user r25 placeholder */
174#else
175 add sp, sp, 4 /* skip "callee_regs->stack_place_holder" */
176 ld.ab r25, [sp, 4]
177#endif
178
179 ld.ab r24, [sp, 4]
180 ld.ab r23, [sp, 4]
181 ld.ab r22, [sp, 4]
182 ld.ab r21, [sp, 4]
183 ld.ab r20, [sp, 4]
184 ld.ab r19, [sp, 4]
185 ld.ab r18, [sp, 4]
186 ld.ab r17, [sp, 4]
187 ld.ab r16, [sp, 4]
188 ld.ab r15, [sp, 4]
189 ld.ab r14, [sp, 4]
190 ld.ab r13, [sp, 4]
191
192.endm
193
194/*--------------------------------------------------------------
195 * RESTORE_CALLEE_SAVED_USER:
196 * This is called after do_signal where tracer might have changed callee regs
197 * thus we need to restore the reg file.
198 * Special case handling is required for r25 in case it is used by kernel
199 * for caching task ptr. Ptrace would have modified on-kernel-stack value of
200 * r25, which needs to be shoved back into task->thread.user_r25 where from
201 * Low level exception/ISR return code will retrieve to populate with rest of
202 * callee reg-file.
203 *-------------------------------------------------------------*/
204.macro RESTORE_CALLEE_SAVED_USER
205
206 add sp, sp, 4 /* skip "callee_regs->stack_place_holder" */
207
208#ifdef CONFIG_ARC_CURR_IN_REG
209 ld.ab r12, [sp, 4]
210 st r12, [r25, TASK_THREAD + THREAD_USER_R25]
211#else
212 ld.ab r25, [sp, 4]
213#endif
214
215 ld.ab r24, [sp, 4]
216 ld.ab r23, [sp, 4]
217 ld.ab r22, [sp, 4]
218 ld.ab r21, [sp, 4]
219 ld.ab r20, [sp, 4]
220 ld.ab r19, [sp, 4]
221 ld.ab r18, [sp, 4]
222 ld.ab r17, [sp, 4]
223 ld.ab r16, [sp, 4]
224 ld.ab r15, [sp, 4]
225 ld.ab r14, [sp, 4]
226 ld.ab r13, [sp, 4]
227.endm
228
229/*--------------------------------------------------------------
230 * Super FAST Restore callee saved regs by simply re-adjusting SP
231 *-------------------------------------------------------------*/
232.macro DISCARD_CALLEE_SAVED_USER
233 add sp, sp, 14 * 4
234.endm
235
236/*--------------------------------------------------------------
237 * Restore User mode r25 saved in task_struct->thread.user_r25
238 *-------------------------------------------------------------*/
239.macro RESTORE_USER_R25
240 ld r25, [r25, TASK_THREAD + THREAD_USER_R25]
241.endm
242
243/*-------------------------------------------------------------
244 * given a tsk struct, get to the base of it's kernel mode stack
245 * tsk->thread_info is really a PAGE, whose bottom hoists stack
246 * which grows upwards towards thread_info
247 *------------------------------------------------------------*/
248
249.macro GET_TSK_STACK_BASE tsk, out
250
251 /* Get task->thread_info (this is essentially start of a PAGE) */
252 ld \out, [\tsk, TASK_THREAD_INFO]
253
254 /* Go to end of page where stack begins (grows upwards) */
255 add2 \out, \out, (THREAD_SIZE - 4)/4 /* one word GUTTER */
256
257.endm
258
259/*--------------------------------------------------------------
260 * Switch to Kernel Mode stack if SP points to User Mode stack
261 *
262 * Entry : r9 contains pre-IRQ/exception/trap status32
263 * Exit : SP is set to kernel mode stack pointer
264 * If CURR_IN_REG, r25 set to "current" task pointer
265 * Clobbers: r9
266 *-------------------------------------------------------------*/
267
268.macro SWITCH_TO_KERNEL_STK
269
270 /* User Mode when this happened ? Yes: Proceed to switch stack */
271 bbit1 r9, STATUS_U_BIT, 88f
272
273 /* OK we were already in kernel mode when this event happened, thus can
274 * assume SP is kernel mode SP. _NO_ need to do any stack switching
275 */
276
277#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS
278 /* However....
279 * If Level 2 Interrupts enabled, we may end up with a corner case:
280 * 1. User Task executing
281 * 2. L1 IRQ taken, ISR starts (CPU auto-switched to KERNEL mode)
282 * 3. But before it could switch SP from USER to KERNEL stack
283 * a L2 IRQ "Interrupts" L1
284 * Thay way although L2 IRQ happened in Kernel mode, stack is still
285 * not switched.
286 * To handle this, we may need to switch stack even if in kernel mode
287 * provided SP has values in range of USER mode stack ( < 0x7000_0000 )
288 */
289 brlo sp, VMALLOC_START, 88f
290
291 /* TODO: vineetg:
292 * We need to be a bit more cautious here. What if a kernel bug in
293 * L1 ISR, caused SP to go whaco (some small value which looks like
294 * USER stk) and then we take L2 ISR.
295 * Above brlo alone would treat it as a valid L1-L2 sceanrio
296 * instead of shouting alound
297 * The only feasible way is to make sure this L2 happened in
298 * L1 prelogue ONLY i.e. ilink2 is less than a pre-set marker in
299 * L1 ISR before it switches stack
300 */
301
302#endif
303
304 /* Save Pre Intr/Exception KERNEL MODE SP on kernel stack
305 * safe-keeping not really needed, but it keeps the epilogue code
306 * (SP restore) simpler/uniform.
307 */
308 b.d 77f
309
310 st.a sp, [sp, -12] ; Make room for orig_r0 and orig_r8
311
31288: /*------Intr/Ecxp happened in user mode, "switch" stack ------ */
313
314 GET_CURR_TASK_ON_CPU r9
315
316#ifdef CONFIG_ARC_CURR_IN_REG
317
318 /* If current task pointer cached in r25, time to
319 * -safekeep USER r25 in task->thread_struct->user_r25
320 * -load r25 with current task ptr
321 */
322 st.as r25, [r9, (TASK_THREAD + THREAD_USER_R25)/4]
323 mov r25, r9
324#endif
325
326 /* With current tsk in r9, get it's kernel mode stack base */
327 GET_TSK_STACK_BASE r9, r9
328
329#ifdef PT_REGS_CANARY
330 st 0xabcdabcd, [r9, 0]
331#endif
332
333 /* Save Pre Intr/Exception User SP on kernel stack */
334 st.a sp, [r9, -12] ; Make room for orig_r0 and orig_r8
335
336 /* CAUTION:
337 * SP should be set at the very end when we are done with everything
338 * In case of 2 levels of interrupt we depend on value of SP to assume
339 * that everything else is done (loading r25 etc)
340 */
341
342 /* set SP to point to kernel mode stack */
343 mov sp, r9
344
34577: /* ----- Stack Switched to kernel Mode, Now save REG FILE ----- */
346
347.endm
348
349/*------------------------------------------------------------
350 * "FAKE" a rtie to return from CPU Exception context
351 * This is to re-enable Exceptions within exception
352 * Look at EV_ProtV to see how this is actually used
353 *-------------------------------------------------------------*/
354
355.macro FAKE_RET_FROM_EXCPN reg
356
357 ld \reg, [sp, PT_status32]
358 bic \reg, \reg, (STATUS_U_MASK|STATUS_DE_MASK)
359 bset \reg, \reg, STATUS_L_BIT
360 sr \reg, [erstatus]
361 mov \reg, 55f
362 sr \reg, [eret]
363
364 rtie
36555:
366.endm
367
368/*
369 * @reg [OUT] &thread_info of "current"
370 */
371.macro GET_CURR_THR_INFO_FROM_SP reg
372 and \reg, sp, ~(THREAD_SIZE - 1)
373.endm
374
375/*
376 * @reg [OUT] thread_info->flags of "current"
377 */
378.macro GET_CURR_THR_INFO_FLAGS reg
379 GET_CURR_THR_INFO_FROM_SP \reg
380 ld \reg, [\reg, THREAD_INFO_FLAGS]
381.endm
382
383/*--------------------------------------------------------------
384 * For early Exception Prologue, a core reg is temporarily needed to
385 * code the rest of prolog (stack switching). This is done by stashing
386 * it to memory (non-SMP case) or SCRATCH0 Aux Reg (SMP).
387 *
388 * Before saving the full regfile - this reg is restored back, only
389 * to be saved again on kernel mode stack, as part of ptregs.
390 *-------------------------------------------------------------*/
391.macro EXCPN_PROLOG_FREEUP_REG reg
392#ifdef CONFIG_SMP
393 sr \reg, [ARC_REG_SCRATCH_DATA0]
394#else
395 st \reg, [@ex_saved_reg1]
396#endif
397.endm
398
399.macro EXCPN_PROLOG_RESTORE_REG reg
400#ifdef CONFIG_SMP
401 lr \reg, [ARC_REG_SCRATCH_DATA0]
402#else
403 ld \reg, [@ex_saved_reg1]
404#endif
405.endm
406
407/*--------------------------------------------------------------
408 * Save all registers used by Exceptions (TLB Miss, Prot-V, Mem err etc)
409 * Requires SP to be already switched to kernel mode Stack
410 * sp points to the next free element on the stack at exit of this macro.
411 * Registers are pushed / popped in the order defined in struct ptregs
412 * in asm/ptrace.h
413 * Note that syscalls are implemented via TRAP which is also a exception
414 * from CPU's point of view
415 *-------------------------------------------------------------*/
416.macro SAVE_ALL_EXCEPTION marker
417
418 st \marker, [sp, 8]
419 st r0, [sp, 4] /* orig_r0, needed only for sys calls */
420
421 /* Restore r9 used to code the early prologue */
422 EXCPN_PROLOG_RESTORE_REG r9
423
424 SAVE_CALLER_SAVED
425 st.a r26, [sp, -4] /* gp */
426 st.a fp, [sp, -4]
427 st.a blink, [sp, -4]
428 lr r9, [eret]
429 st.a r9, [sp, -4]
430 lr r9, [erstatus]
431 st.a r9, [sp, -4]
432 st.a lp_count, [sp, -4]
433 lr r9, [lp_end]
434 st.a r9, [sp, -4]
435 lr r9, [lp_start]
436 st.a r9, [sp, -4]
437 lr r9, [erbta]
438 st.a r9, [sp, -4]
439
440#ifdef PT_REGS_CANARY
441 mov r9, 0xdeadbeef
442 st r9, [sp, -4]
443#endif
444
445 /* move up by 1 word to "create" pt_regs->"stack_place_holder" */
446 sub sp, sp, 4
447.endm
448
449/*--------------------------------------------------------------
450 * Save scratch regs for exceptions
451 *-------------------------------------------------------------*/
452.macro SAVE_ALL_SYS
453 SAVE_ALL_EXCEPTION orig_r8_IS_EXCPN
454.endm
455
456/*--------------------------------------------------------------
457 * Save scratch regs for sys calls
458 *-------------------------------------------------------------*/
459.macro SAVE_ALL_TRAP
460 /*
461 * Setup pt_regs->orig_r8.
462 * Encode syscall number (r8) in upper short word of event type (r9)
463 * N.B. #1: This is already endian safe (see ptrace.h)
464 * #2: Only r9 can be used as scratch as it is already clobbered
465 * and it's contents are no longer needed by the latter part
466 * of exception prologue
467 */
468 lsl r9, r8, 16
469 or r9, r9, orig_r8_IS_SCALL
470
471 SAVE_ALL_EXCEPTION r9
472.endm
473
474/*--------------------------------------------------------------
475 * Restore all registers used by system call or Exceptions
476 * SP should always be pointing to the next free stack element
477 * when entering this macro.
478 *
479 * NOTE:
480 *
481 * It is recommended that lp_count/ilink1/ilink2 not be used as a dest reg
482 * for memory load operations. If used in that way interrupts are deffered
483 * by hardware and that is not good.
484 *-------------------------------------------------------------*/
485.macro RESTORE_ALL_SYS
486
487 add sp, sp, 4 /* hop over unused "pt_regs->stack_place_holder" */
488
489 ld.ab r9, [sp, 4]
490 sr r9, [erbta]
491 ld.ab r9, [sp, 4]
492 sr r9, [lp_start]
493 ld.ab r9, [sp, 4]
494 sr r9, [lp_end]
495 ld.ab r9, [sp, 4]
496 mov lp_count, r9
497 ld.ab r9, [sp, 4]
498 sr r9, [erstatus]
499 ld.ab r9, [sp, 4]
500 sr r9, [eret]
501 ld.ab blink, [sp, 4]
502 ld.ab fp, [sp, 4]
503 ld.ab r26, [sp, 4] /* gp */
504 RESTORE_CALLER_SAVED
505
506 ld sp, [sp] /* restore original sp */
507 /* orig_r0 and orig_r8 skipped automatically */
508.endm
509
510
511/*--------------------------------------------------------------
512 * Save all registers used by interrupt handlers.
513 *-------------------------------------------------------------*/
514.macro SAVE_ALL_INT1
515
516 /* restore original r9 , saved in int1_saved_reg
517 * It will be saved on stack in macro: SAVE_CALLER_SAVED
518 */
519#ifdef CONFIG_SMP
520 lr r9, [ARC_REG_SCRATCH_DATA0]
521#else
522 ld r9, [@int1_saved_reg]
523#endif
524
525 /* now we are ready to save the remaining context :) */
526 st orig_r8_IS_IRQ1, [sp, 8] /* Event Type */
527 st 0, [sp, 4] /* orig_r0 , N/A for IRQ */
528 SAVE_CALLER_SAVED
529 st.a r26, [sp, -4] /* gp */
530 st.a fp, [sp, -4]
531 st.a blink, [sp, -4]
532 st.a ilink1, [sp, -4]
533 lr r9, [status32_l1]
534 st.a r9, [sp, -4]
535 st.a lp_count, [sp, -4]
536 lr r9, [lp_end]
537 st.a r9, [sp, -4]
538 lr r9, [lp_start]
539 st.a r9, [sp, -4]
540 lr r9, [bta_l1]
541 st.a r9, [sp, -4]
542
543#ifdef PT_REGS_CANARY
544 mov r9, 0xdeadbee1
545 st r9, [sp, -4]
546#endif
547 /* move up by 1 word to "create" pt_regs->"stack_place_holder" */
548 sub sp, sp, 4
549.endm
550
551.macro SAVE_ALL_INT2
552
553 /* TODO-vineetg: SMP we can't use global nor can we use
554 * SCRATCH0 as we do for int1 because while int1 is using
555 * it, int2 can come
556 */
557 /* retsore original r9 , saved in sys_saved_r9 */
558 ld r9, [@int2_saved_reg]
559
560 /* now we are ready to save the remaining context :) */
561 st orig_r8_IS_IRQ2, [sp, 8] /* Event Type */
562 st 0, [sp, 4] /* orig_r0 , N/A for IRQ */
563 SAVE_CALLER_SAVED
564 st.a r26, [sp, -4] /* gp */
565 st.a fp, [sp, -4]
566 st.a blink, [sp, -4]
567 st.a ilink2, [sp, -4]
568 lr r9, [status32_l2]
569 st.a r9, [sp, -4]
570 st.a lp_count, [sp, -4]
571 lr r9, [lp_end]
572 st.a r9, [sp, -4]
573 lr r9, [lp_start]
574 st.a r9, [sp, -4]
575 lr r9, [bta_l2]
576 st.a r9, [sp, -4]
577
578#ifdef PT_REGS_CANARY
579 mov r9, 0xdeadbee2
580 st r9, [sp, -4]
581#endif
582
583 /* move up by 1 word to "create" pt_regs->"stack_place_holder" */
584 sub sp, sp, 4
585.endm
586
587/*--------------------------------------------------------------
588 * Restore all registers used by interrupt handlers.
589 *
590 * NOTE:
591 *
592 * It is recommended that lp_count/ilink1/ilink2 not be used as a dest reg
593 * for memory load operations. If used in that way interrupts are deffered
594 * by hardware and that is not good.
595 *-------------------------------------------------------------*/
596
597.macro RESTORE_ALL_INT1
598 add sp, sp, 4 /* hop over unused "pt_regs->stack_place_holder" */
599
600 ld.ab r9, [sp, 4] /* Actual reg file */
601 sr r9, [bta_l1]
602 ld.ab r9, [sp, 4]
603 sr r9, [lp_start]
604 ld.ab r9, [sp, 4]
605 sr r9, [lp_end]
606 ld.ab r9, [sp, 4]
607 mov lp_count, r9
608 ld.ab r9, [sp, 4]
609 sr r9, [status32_l1]
610 ld.ab r9, [sp, 4]
611 mov ilink1, r9
612 ld.ab blink, [sp, 4]
613 ld.ab fp, [sp, 4]
614 ld.ab r26, [sp, 4] /* gp */
615 RESTORE_CALLER_SAVED
616
617 ld sp, [sp] /* restore original sp */
618 /* orig_r0 and orig_r8 skipped automatically */
619.endm
620
621.macro RESTORE_ALL_INT2
622 add sp, sp, 4 /* hop over unused "pt_regs->stack_place_holder" */
623
624 ld.ab r9, [sp, 4]
625 sr r9, [bta_l2]
626 ld.ab r9, [sp, 4]
627 sr r9, [lp_start]
628 ld.ab r9, [sp, 4]
629 sr r9, [lp_end]
630 ld.ab r9, [sp, 4]
631 mov lp_count, r9
632 ld.ab r9, [sp, 4]
633 sr r9, [status32_l2]
634 ld.ab r9, [sp, 4]
635 mov ilink2, r9
636 ld.ab blink, [sp, 4]
637 ld.ab fp, [sp, 4]
638 ld.ab r26, [sp, 4] /* gp */
639 RESTORE_CALLER_SAVED
640
641 ld sp, [sp] /* restore original sp */
642 /* orig_r0 and orig_r8 skipped automatically */
643
644.endm
645
646
647/* Get CPU-ID of this core */
648.macro GET_CPU_ID reg
649 lr \reg, [identity]
650 lsr \reg, \reg, 8
651 bmsk \reg, \reg, 7
652.endm
653
654#ifdef CONFIG_SMP
655
656/*-------------------------------------------------
657 * Retrieve the current running task on this CPU
658 * 1. Determine curr CPU id.
659 * 2. Use it to index into _current_task[ ]
660 */
661.macro GET_CURR_TASK_ON_CPU reg
662 GET_CPU_ID \reg
663 ld.as \reg, [@_current_task, \reg]
664.endm
665
666/*-------------------------------------------------
667 * Save a new task as the "current" task on this CPU
668 * 1. Determine curr CPU id.
669 * 2. Use it to index into _current_task[ ]
670 *
671 * Coded differently than GET_CURR_TASK_ON_CPU (which uses LD.AS)
672 * because ST r0, [r1, offset] can ONLY have s9 @offset
673 * while LD can take s9 (4 byte insn) or LIMM (8 byte insn)
674 */
675
676.macro SET_CURR_TASK_ON_CPU tsk, tmp
677 GET_CPU_ID \tmp
678 add2 \tmp, @_current_task, \tmp
679 st \tsk, [\tmp]
680#ifdef CONFIG_ARC_CURR_IN_REG
681 mov r25, \tsk
682#endif
683
684.endm
685
686
687#else /* Uniprocessor implementation of macros */
688
689.macro GET_CURR_TASK_ON_CPU reg
690 ld \reg, [@_current_task]
691.endm
692
693.macro SET_CURR_TASK_ON_CPU tsk, tmp
694 st \tsk, [@_current_task]
695#ifdef CONFIG_ARC_CURR_IN_REG
696 mov r25, \tsk
697#endif
698.endm
699
700#endif /* SMP / UNI */
701
702/* ------------------------------------------------------------------
703 * Get the ptr to some field of Current Task at @off in task struct
704 * -Uses r25 for Current task ptr if that is enabled
705 */
706
707#ifdef CONFIG_ARC_CURR_IN_REG
708
709.macro GET_CURR_TASK_FIELD_PTR off, reg
710 add \reg, r25, \off
711.endm
712
713#else
714
715.macro GET_CURR_TASK_FIELD_PTR off, reg
716 GET_CURR_TASK_ON_CPU \reg
717 add \reg, \reg, \off
718.endm
719
720#endif /* CONFIG_ARC_CURR_IN_REG */
721
722#endif /* __ASSEMBLY__ */
723
724#endif /* __ASM_ARC_ENTRY_H */
diff --git a/arch/arc/include/asm/exec.h b/arch/arc/include/asm/exec.h
new file mode 100644
index 000000000000..28abc6905e07
--- /dev/null
+++ b/arch/arc/include/asm/exec.h
@@ -0,0 +1,15 @@
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#ifndef __ASM_ARC_EXEC_H
10#define __ASM_ARC_EXEC_H
11
12/* Align to 16b */
13#define arch_align_stack(p) ((unsigned long)(p) & ~0xf)
14
15#endif
diff --git a/arch/arc/include/asm/futex.h b/arch/arc/include/asm/futex.h
new file mode 100644
index 000000000000..4dc64ddebece
--- /dev/null
+++ b/arch/arc/include/asm/futex.h
@@ -0,0 +1,151 @@
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * Vineetg: August 2010: From Android kernel work
9 */
10
11#ifndef _ASM_FUTEX_H
12#define _ASM_FUTEX_H
13
14#include <linux/futex.h>
15#include <linux/preempt.h>
16#include <linux/uaccess.h>
17#include <asm/errno.h>
18
19#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg)\
20 \
21 __asm__ __volatile__( \
22 "1: ld %1, [%2] \n" \
23 insn "\n" \
24 "2: st %0, [%2] \n" \
25 " mov %0, 0 \n" \
26 "3: \n" \
27 " .section .fixup,\"ax\" \n" \
28 " .align 4 \n" \
29 "4: mov %0, %4 \n" \
30 " b 3b \n" \
31 " .previous \n" \
32 " .section __ex_table,\"a\" \n" \
33 " .align 4 \n" \
34 " .word 1b, 4b \n" \
35 " .word 2b, 4b \n" \
36 " .previous \n" \
37 \
38 : "=&r" (ret), "=&r" (oldval) \
39 : "r" (uaddr), "r" (oparg), "ir" (-EFAULT) \
40 : "cc", "memory")
41
42static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
43{
44 int op = (encoded_op >> 28) & 7;
45 int cmp = (encoded_op >> 24) & 15;
46 int oparg = (encoded_op << 8) >> 20;
47 int cmparg = (encoded_op << 20) >> 20;
48 int oldval = 0, ret;
49
50 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
51 oparg = 1 << oparg;
52
53 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
54 return -EFAULT;
55
56 pagefault_disable(); /* implies preempt_disable() */
57
58 switch (op) {
59 case FUTEX_OP_SET:
60 __futex_atomic_op("mov %0, %3", ret, oldval, uaddr, oparg);
61 break;
62 case FUTEX_OP_ADD:
63 __futex_atomic_op("add %0, %1, %3", ret, oldval, uaddr, oparg);
64 break;
65 case FUTEX_OP_OR:
66 __futex_atomic_op("or %0, %1, %3", ret, oldval, uaddr, oparg);
67 break;
68 case FUTEX_OP_ANDN:
69 __futex_atomic_op("bic %0, %1, %3", ret, oldval, uaddr, oparg);
70 break;
71 case FUTEX_OP_XOR:
72 __futex_atomic_op("xor %0, %1, %3", ret, oldval, uaddr, oparg);
73 break;
74 default:
75 ret = -ENOSYS;
76 }
77
78 pagefault_enable(); /* subsumes preempt_enable() */
79
80 if (!ret) {
81 switch (cmp) {
82 case FUTEX_OP_CMP_EQ:
83 ret = (oldval == cmparg);
84 break;
85 case FUTEX_OP_CMP_NE:
86 ret = (oldval != cmparg);
87 break;
88 case FUTEX_OP_CMP_LT:
89 ret = (oldval < cmparg);
90 break;
91 case FUTEX_OP_CMP_GE:
92 ret = (oldval >= cmparg);
93 break;
94 case FUTEX_OP_CMP_LE:
95 ret = (oldval <= cmparg);
96 break;
97 case FUTEX_OP_CMP_GT:
98 ret = (oldval > cmparg);
99 break;
100 default:
101 ret = -ENOSYS;
102 }
103 }
104 return ret;
105}
106
107/* Compare-xchg with preemption disabled.
108 * Notes:
109 * -Best-Effort: Exchg happens only if compare succeeds.
110 * If compare fails, returns; leaving retry/looping to upper layers
111 * -successful cmp-xchg: return orig value in @addr (same as cmp val)
112 * -Compare fails: return orig value in @addr
113 * -user access r/w fails: return -EFAULT
114 */
115static inline int
116futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval,
117 u32 newval)
118{
119 u32 val;
120
121 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
122 return -EFAULT;
123
124 pagefault_disable(); /* implies preempt_disable() */
125
126 /* TBD : can use llock/scond */
127 __asm__ __volatile__(
128 "1: ld %0, [%3] \n"
129 " brne %0, %1, 3f \n"
130 "2: st %2, [%3] \n"
131 "3: \n"
132 " .section .fixup,\"ax\" \n"
133 "4: mov %0, %4 \n"
134 " b 3b \n"
135 " .previous \n"
136 " .section __ex_table,\"a\" \n"
137 " .align 4 \n"
138 " .word 1b, 4b \n"
139 " .word 2b, 4b \n"
140 " .previous\n"
141 : "=&r"(val)
142 : "r"(oldval), "r"(newval), "r"(uaddr), "ir"(-EFAULT)
143 : "cc", "memory");
144
145 pagefault_enable(); /* subsumes preempt_enable() */
146
147 *uval = val;
148 return val;
149}
150
151#endif
diff --git a/arch/arc/include/asm/io.h b/arch/arc/include/asm/io.h
new file mode 100644
index 000000000000..473424d7528b
--- /dev/null
+++ b/arch/arc/include/asm/io.h
@@ -0,0 +1,105 @@
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#ifndef _ASM_ARC_IO_H
10#define _ASM_ARC_IO_H
11
12#include <linux/types.h>
13#include <asm/byteorder.h>
14#include <asm/page.h>
15
16#define PCI_IOBASE ((void __iomem *)0)
17
18extern void __iomem *ioremap(unsigned long physaddr, unsigned long size);
19extern void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size,
20 unsigned long flags);
21extern void iounmap(const void __iomem *addr);
22
23#define ioremap_nocache(phy, sz) ioremap(phy, sz)
24#define ioremap_wc(phy, sz) ioremap(phy, sz)
25
26/* Change struct page to physical address */
27#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
28
29#define __raw_readb __raw_readb
30static inline u8 __raw_readb(const volatile void __iomem *addr)
31{
32 u8 b;
33
34 __asm__ __volatile__(
35 " ldb%U1 %0, %1 \n"
36 : "=r" (b)
37 : "m" (*(volatile u8 __force *)addr)
38 : "memory");
39
40 return b;
41}
42
43#define __raw_readw __raw_readw
44static inline u16 __raw_readw(const volatile void __iomem *addr)
45{
46 u16 s;
47
48 __asm__ __volatile__(
49 " ldw%U1 %0, %1 \n"
50 : "=r" (s)
51 : "m" (*(volatile u16 __force *)addr)
52 : "memory");
53
54 return s;
55}
56
57#define __raw_readl __raw_readl
58static inline u32 __raw_readl(const volatile void __iomem *addr)
59{
60 u32 w;
61
62 __asm__ __volatile__(
63 " ld%U1 %0, %1 \n"
64 : "=r" (w)
65 : "m" (*(volatile u32 __force *)addr)
66 : "memory");
67
68 return w;
69}
70
71#define __raw_writeb __raw_writeb
72static inline void __raw_writeb(u8 b, volatile void __iomem *addr)
73{
74 __asm__ __volatile__(
75 " stb%U1 %0, %1 \n"
76 :
77 : "r" (b), "m" (*(volatile u8 __force *)addr)
78 : "memory");
79}
80
81#define __raw_writew __raw_writew
82static inline void __raw_writew(u16 s, volatile void __iomem *addr)
83{
84 __asm__ __volatile__(
85 " stw%U1 %0, %1 \n"
86 :
87 : "r" (s), "m" (*(volatile u16 __force *)addr)
88 : "memory");
89
90}
91
92#define __raw_writel __raw_writel
93static inline void __raw_writel(u32 w, volatile void __iomem *addr)
94{
95 __asm__ __volatile__(
96 " st%U1 %0, %1 \n"
97 :
98 : "r" (w), "m" (*(volatile u32 __force *)addr)
99 : "memory");
100
101}
102
103#include <asm-generic/io.h>
104
105#endif /* _ASM_ARC_IO_H */
diff --git a/arch/arc/include/asm/irq.h b/arch/arc/include/asm/irq.h
new file mode 100644
index 000000000000..4c588f9820cf
--- /dev/null
+++ b/arch/arc/include/asm/irq.h
@@ -0,0 +1,25 @@
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#ifndef __ASM_ARC_IRQ_H
10#define __ASM_ARC_IRQ_H
11
12#define NR_IRQS 32
13
14/* Platform Independent IRQs */
15#define TIMER0_IRQ 3
16#define TIMER1_IRQ 4
17
18#include <asm-generic/irq.h>
19
20extern void __init arc_init_IRQ(void);
21extern int __init get_hw_config_num_irq(void);
22
23void __cpuinit arc_local_timer_setup(unsigned int cpu);
24
25#endif
diff --git a/arch/arc/include/asm/irqflags.h b/arch/arc/include/asm/irqflags.h
new file mode 100644
index 000000000000..ccd84806b62f
--- /dev/null
+++ b/arch/arc/include/asm/irqflags.h
@@ -0,0 +1,153 @@
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#ifndef __ASM_ARC_IRQFLAGS_H
10#define __ASM_ARC_IRQFLAGS_H
11
12/* vineetg: March 2010 : local_irq_save( ) optimisation
13 * -Remove explicit mov of current status32 into reg, that is not needed
14 * -Use BIC insn instead of INVERTED + AND
15 * -Conditionally disable interrupts (if they are not enabled, don't disable)
16*/
17
18#ifdef __KERNEL__
19
20#include <asm/arcregs.h>
21
22#ifndef __ASSEMBLY__
23
24/******************************************************************
25 * IRQ Control Macros
26 ******************************************************************/
27
28/*
29 * Save IRQ state and disable IRQs
30 */
31static inline long arch_local_irq_save(void)
32{
33 unsigned long temp, flags;
34
35 __asm__ __volatile__(
36 " lr %1, [status32] \n"
37 " bic %0, %1, %2 \n"
38 " and.f 0, %1, %2 \n"
39 " flag.nz %0 \n"
40 : "=r"(temp), "=r"(flags)
41 : "n"((STATUS_E1_MASK | STATUS_E2_MASK))
42 : "cc");
43
44 return flags;
45}
46
47/*
48 * restore saved IRQ state
49 */
50static inline void arch_local_irq_restore(unsigned long flags)
51{
52
53 __asm__ __volatile__(
54 " flag %0 \n"
55 :
56 : "r"(flags));
57}
58
59/*
60 * Unconditionally Enable IRQs
61 */
62extern void arch_local_irq_enable(void);
63
64/*
65 * Unconditionally Disable IRQs
66 */
67static inline void arch_local_irq_disable(void)
68{
69 unsigned long temp;
70
71 __asm__ __volatile__(
72 " lr %0, [status32] \n"
73 " and %0, %0, %1 \n"
74 " flag %0 \n"
75 : "=&r"(temp)
76 : "n"(~(STATUS_E1_MASK | STATUS_E2_MASK)));
77}
78
79/*
80 * save IRQ state
81 */
82static inline long arch_local_save_flags(void)
83{
84 unsigned long temp;
85
86 __asm__ __volatile__(
87 " lr %0, [status32] \n"
88 : "=&r"(temp));
89
90 return temp;
91}
92
93/*
94 * Query IRQ state
95 */
96static inline int arch_irqs_disabled_flags(unsigned long flags)
97{
98 return !(flags & (STATUS_E1_MASK
99#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS
100 | STATUS_E2_MASK
101#endif
102 ));
103}
104
105static inline int arch_irqs_disabled(void)
106{
107 return arch_irqs_disabled_flags(arch_local_save_flags());
108}
109
110static inline void arch_mask_irq(unsigned int irq)
111{
112 unsigned int ienb;
113
114 ienb = read_aux_reg(AUX_IENABLE);
115 ienb &= ~(1 << irq);
116 write_aux_reg(AUX_IENABLE, ienb);
117}
118
119static inline void arch_unmask_irq(unsigned int irq)
120{
121 unsigned int ienb;
122
123 ienb = read_aux_reg(AUX_IENABLE);
124 ienb |= (1 << irq);
125 write_aux_reg(AUX_IENABLE, ienb);
126}
127
128#else
129
130.macro IRQ_DISABLE scratch
131 lr \scratch, [status32]
132 bic \scratch, \scratch, (STATUS_E1_MASK | STATUS_E2_MASK)
133 flag \scratch
134.endm
135
136.macro IRQ_DISABLE_SAVE scratch, save
137 lr \scratch, [status32]
138 mov \save, \scratch /* Make a copy */
139 bic \scratch, \scratch, (STATUS_E1_MASK | STATUS_E2_MASK)
140 flag \scratch
141.endm
142
143.macro IRQ_ENABLE scratch
144 lr \scratch, [status32]
145 or \scratch, \scratch, (STATUS_E1_MASK | STATUS_E2_MASK)
146 flag \scratch
147.endm
148
149#endif /* __ASSEMBLY__ */
150
151#endif /* KERNEL */
152
153#endif
diff --git a/arch/arc/include/asm/kdebug.h b/arch/arc/include/asm/kdebug.h
new file mode 100644
index 000000000000..3fbe6c472c0a
--- /dev/null
+++ b/arch/arc/include/asm/kdebug.h
@@ -0,0 +1,19 @@
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#ifndef _ASM_ARC_KDEBUG_H
10#define _ASM_ARC_KDEBUG_H
11
12enum die_val {
13 DIE_UNUSED,
14 DIE_TRAP,
15 DIE_IERR,
16 DIE_OOPS
17};
18
19#endif
diff --git a/arch/arc/include/asm/kgdb.h b/arch/arc/include/asm/kgdb.h
new file mode 100644
index 000000000000..f3c4934f0ca9
--- /dev/null
+++ b/arch/arc/include/asm/kgdb.h
@@ -0,0 +1,61 @@
1/*
2 * kgdb support for ARC
3 *
4 * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#ifndef __ARC_KGDB_H__
12#define __ARC_KGDB_H__
13
14#ifdef CONFIG_KGDB
15
16#include <asm/user.h>
17
18/* to ensure compatibility with Linux 2.6.35, we don't implement the get/set
19 * register API yet */
20#undef DBG_MAX_REG_NUM
21
22#define GDB_MAX_REGS 39
23
24#define BREAK_INSTR_SIZE 2
25#define CACHE_FLUSH_IS_SAFE 1
26#define NUMREGBYTES (GDB_MAX_REGS * 4)
27#define BUFMAX 2048
28
29static inline void arch_kgdb_breakpoint(void)
30{
31 __asm__ __volatile__ ("trap_s 0x4\n");
32}
33
34extern void kgdb_trap(struct pt_regs *regs, int param);
35
36enum arc700_linux_regnums {
37 _R0 = 0,
38 _R1, _R2, _R3, _R4, _R5, _R6, _R7, _R8, _R9, _R10, _R11, _R12, _R13,
39 _R14, _R15, _R16, _R17, _R18, _R19, _R20, _R21, _R22, _R23, _R24,
40 _R25, _R26,
41 _BTA = 27,
42 _LP_START = 28,
43 _LP_END = 29,
44 _LP_COUNT = 30,
45 _STATUS32 = 31,
46 _BLINK = 32,
47 _FP = 33,
48 __SP = 34,
49 _EFA = 35,
50 _RET = 36,
51 _ORIG_R8 = 37,
52 _STOP_PC = 38
53};
54
55#else
56static inline void kgdb_trap(struct pt_regs *regs, int param)
57{
58}
59#endif
60
61#endif /* __ARC_KGDB_H__ */
diff --git a/arch/arc/include/asm/kprobes.h b/arch/arc/include/asm/kprobes.h
new file mode 100644
index 000000000000..4d9c211fce70
--- /dev/null
+++ b/arch/arc/include/asm/kprobes.h
@@ -0,0 +1,62 @@
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#ifndef _ARC_KPROBES_H
10#define _ARC_KPROBES_H
11
12#ifdef CONFIG_KPROBES
13
14typedef u16 kprobe_opcode_t;
15
16#define UNIMP_S_INSTRUCTION 0x79e0
17#define TRAP_S_2_INSTRUCTION 0x785e
18
19#define MAX_INSN_SIZE 8
20#define MAX_STACK_SIZE 64
21
22struct arch_specific_insn {
23 int is_short;
24 kprobe_opcode_t *t1_addr, *t2_addr;
25 kprobe_opcode_t t1_opcode, t2_opcode;
26};
27
28#define flush_insn_slot(p) do { } while (0)
29
30#define kretprobe_blacklist_size 0
31
32struct kprobe;
33
34void arch_remove_kprobe(struct kprobe *p);
35
36int kprobe_exceptions_notify(struct notifier_block *self,
37 unsigned long val, void *data);
38
39struct prev_kprobe {
40 struct kprobe *kp;
41 unsigned long status;
42};
43
44struct kprobe_ctlblk {
45 unsigned int kprobe_status;
46 struct pt_regs jprobe_saved_regs;
47 char jprobes_stack[MAX_STACK_SIZE];
48 struct prev_kprobe prev_kprobe;
49};
50
51int kprobe_fault_handler(struct pt_regs *regs, unsigned long cause);
52void kretprobe_trampoline(void);
53void trap_is_kprobe(unsigned long cause, unsigned long address,
54 struct pt_regs *regs);
55#else
56static void trap_is_kprobe(unsigned long cause, unsigned long address,
57 struct pt_regs *regs)
58{
59}
60#endif
61
62#endif
diff --git a/arch/arc/include/asm/linkage.h b/arch/arc/include/asm/linkage.h
new file mode 100644
index 000000000000..0283e9e44e0d
--- /dev/null
+++ b/arch/arc/include/asm/linkage.h
@@ -0,0 +1,63 @@
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#ifndef __ASM_LINKAGE_H
10#define __ASM_LINKAGE_H
11
12#ifdef __ASSEMBLY__
13
14/* Can't use the ENTRY macro in linux/linkage.h
15 * gas considers ';' as comment vs. newline
16 */
17.macro ARC_ENTRY name
18 .global \name
19 .align 4
20 \name:
21.endm
22
23.macro ARC_EXIT name
24#define ASM_PREV_SYM_ADDR(name) .-##name
25 .size \ name, ASM_PREV_SYM_ADDR(\name)
26.endm
27
28/* annotation for data we want in DCCM - if enabled in .config */
29.macro ARCFP_DATA nm
30#ifdef CONFIG_ARC_HAS_DCCM
31 .section .data.arcfp
32#else
33 .section .data
34#endif
35 .global \nm
36.endm
37
38/* annotation for data we want in DCCM - if enabled in .config */
39.macro ARCFP_CODE
40#ifdef CONFIG_ARC_HAS_ICCM
41 .section .text.arcfp, "ax",@progbits
42#else
43 .section .text, "ax",@progbits
44#endif
45.endm
46
47#else /* !__ASSEMBLY__ */
48
49#ifdef CONFIG_ARC_HAS_ICCM
50#define __arcfp_code __attribute__((__section__(".text.arcfp")))
51#else
52#define __arcfp_code __attribute__((__section__(".text")))
53#endif
54
55#ifdef CONFIG_ARC_HAS_DCCM
56#define __arcfp_data __attribute__((__section__(".data.arcfp")))
57#else
58#define __arcfp_data __attribute__((__section__(".data")))
59#endif
60
61#endif /* __ASSEMBLY__ */
62
63#endif
diff --git a/arch/arc/include/asm/mach_desc.h b/arch/arc/include/asm/mach_desc.h
new file mode 100644
index 000000000000..9998dc846ebb
--- /dev/null
+++ b/arch/arc/include/asm/mach_desc.h
@@ -0,0 +1,87 @@
1/*
2 * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * based on METAG mach/arch.h (which in turn was based on ARM)
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#ifndef _ASM_ARC_MACH_DESC_H_
12#define _ASM_ARC_MACH_DESC_H_
13
14/**
15 * struct machine_desc - Board specific callbacks, called from ARC common code
16 * Provided by each ARC board using MACHINE_START()/MACHINE_END(), so
17 * a multi-platform kernel builds with array of such descriptors.
18 * We extend the early DT scan to also match the DT's "compatible" string
19 * against the @dt_compat of all such descriptors, and one with highest
20 * "DT score" is selected as global @machine_desc.
21 *
22 * @name: Board/SoC name
23 * @dt_compat: Array of device tree 'compatible' strings
24 * (XXX: although only 1st entry is looked at)
25 * @init_early: Very early callback [called from setup_arch()]
26 * @init_irq: setup external IRQ controllers [called from init_IRQ()]
27 * @init_smp: for each CPU (e.g. setup IPI)
28 * [(M):init_IRQ(), (o):start_kernel_secondary()]
29 * @init_time: platform specific clocksource/clockevent registration
30 * [called from time_init()]
31 * @init_machine: arch initcall level callback (e.g. populate static
32 * platform devices or parse Devicetree)
33 * @init_late: Late initcall level callback
34 *
35 */
36struct machine_desc {
37 const char *name;
38 const char **dt_compat;
39
40 void (*init_early)(void);
41 void (*init_irq)(void);
42#ifdef CONFIG_SMP
43 void (*init_smp)(unsigned int);
44#endif
45 void (*init_time)(void);
46 void (*init_machine)(void);
47 void (*init_late)(void);
48
49};
50
51/*
52 * Current machine - only accessible during boot.
53 */
54extern struct machine_desc *machine_desc;
55
56/*
57 * Machine type table - also only accessible during boot
58 */
59extern struct machine_desc __arch_info_begin[], __arch_info_end[];
60#define for_each_machine_desc(p) \
61 for (p = __arch_info_begin; p < __arch_info_end; p++)
62
63static inline struct machine_desc *default_machine_desc(void)
64{
65 /* the default machine is the last one linked in */
66 if (__arch_info_end - 1 < __arch_info_begin)
67 return NULL;
68 return __arch_info_end - 1;
69}
70
71/*
72 * Set of macros to define architecture features.
73 * This is built into a table by the linker.
74 */
75#define MACHINE_START(_type, _name) \
76static const struct machine_desc __mach_desc_##_type \
77__used \
78__attribute__((__section__(".arch.info.init"))) = { \
79 .name = _name,
80
81#define MACHINE_END \
82};
83
84extern struct machine_desc *setup_machine_fdt(void *dt);
85extern void __init copy_devtree(void);
86
87#endif
diff --git a/arch/arc/include/asm/mmu.h b/arch/arc/include/asm/mmu.h
new file mode 100644
index 000000000000..56b02320f1a9
--- /dev/null
+++ b/arch/arc/include/asm/mmu.h
@@ -0,0 +1,23 @@
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#ifndef _ASM_ARC_MMU_H
10#define _ASM_ARC_MMU_H
11
12#ifndef __ASSEMBLY__
13
14typedef struct {
15 unsigned long asid; /* Pvt Addr-Space ID for mm */
16#ifdef CONFIG_ARC_TLB_DBG
17 struct task_struct *tsk;
18#endif
19} mm_context_t;
20
21#endif
22
23#endif
diff --git a/arch/arc/include/asm/mmu_context.h b/arch/arc/include/asm/mmu_context.h
new file mode 100644
index 000000000000..0d71fb11b57c
--- /dev/null
+++ b/arch/arc/include/asm/mmu_context.h
@@ -0,0 +1,213 @@
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * vineetg: May 2011
9 * -Refactored get_new_mmu_context( ) to only handle live-mm.
10 * retiring-mm handled in other hooks
11 *
12 * Vineetg: March 25th, 2008: Bug #92690
13 * -Major rewrite of Core ASID allocation routine get_new_mmu_context
14 *
15 * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
16 */
17
18#ifndef _ASM_ARC_MMU_CONTEXT_H
19#define _ASM_ARC_MMU_CONTEXT_H
20
21#include <asm/arcregs.h>
22#include <asm/tlb.h>
23
24#include <asm-generic/mm_hooks.h>
25
26/* ARC700 ASID Management
27 *
28 * ARC MMU provides 8-bit ASID (0..255) to TAG TLB entries, allowing entries
29 * with same vaddr (different tasks) to co-exit. This provides for
30 * "Fast Context Switch" i.e. no TLB flush on ctxt-switch
31 *
32 * Linux assigns each task a unique ASID. A simple round-robin allocation
33 * of H/w ASID is done using software tracker @asid_cache.
34 * When it reaches max 255, the allocation cycle starts afresh by flushing
35 * the entire TLB and wrapping ASID back to zero.
36 *
37 * For book-keeping, Linux uses a couple of data-structures:
38 * -mm_struct has an @asid field to keep a note of task's ASID (needed at the
39 * time of say switch_mm( )
40 * -An array of mm structs @asid_mm_map[] for asid->mm the reverse mapping,
41 * given an ASID, finding the mm struct associated.
42 *
43 * The round-robin allocation algorithm allows for ASID stealing.
44 * If asid tracker is at "x-1", a new req will allocate "x", even if "x" was
45 * already assigned to another (switched-out) task. Obviously the prev owner
46 * is marked with an invalid ASID to make it request for a new ASID when it
47 * gets scheduled next time. However its TLB entries (with ASID "x") could
48 * exist, which must be cleared before the same ASID is used by the new owner.
49 * Flushing them would be plausible but costly solution. Instead we force a
50 * allocation policy quirk, which ensures that a stolen ASID won't have any
51 * TLB entries associates, alleviating the need to flush.
52 * The quirk essentially is not allowing ASID allocated in prev cycle
53 * to be used past a roll-over in the next cycle.
54 * When this happens (i.e. task ASID > asid tracker), task needs to refresh
55 * its ASID, aligning it to current value of tracker. If the task doesn't get
56 * scheduled past a roll-over, hence its ASID is not yet realigned with
57 * tracker, such ASID is anyways safely reusable because it is
58 * gauranteed that TLB entries with that ASID wont exist.
59 */
60
61#define FIRST_ASID 0
62#define MAX_ASID 255 /* 8 bit PID field in PID Aux reg */
63#define NO_ASID (MAX_ASID + 1) /* ASID Not alloc to mmu ctxt */
64#define NUM_ASID ((MAX_ASID - FIRST_ASID) + 1)
65
66/* ASID to mm struct mapping */
67extern struct mm_struct *asid_mm_map[NUM_ASID + 1];
68
69extern int asid_cache;
70
71/*
72 * Assign a new ASID to task. If the task already has an ASID, it is
73 * relinquished.
74 */
75static inline void get_new_mmu_context(struct mm_struct *mm)
76{
77 struct mm_struct *prev_owner;
78 unsigned long flags;
79
80 local_irq_save(flags);
81
82 /*
83 * Relinquish the currently owned ASID (if any).
84 * Doing unconditionally saves a cmp-n-branch; for already unused
85 * ASID slot, the value was/remains NULL
86 */
87 asid_mm_map[mm->context.asid] = (struct mm_struct *)NULL;
88
89 /* move to new ASID */
90 if (++asid_cache > MAX_ASID) { /* ASID roll-over */
91 asid_cache = FIRST_ASID;
92 flush_tlb_all();
93 }
94
95 /*
96 * Is next ASID already owned by some-one else (we are stealing it).
97 * If so, let the orig owner be aware of this, so when it runs, it
98 * asks for a brand new ASID. This would only happen for a long-lived
99 * task with ASID from prev allocation cycle (before ASID roll-over).
100 *
101 * This might look wrong - if we are re-using some other task's ASID,
102 * won't we use it's stale TLB entries too. Actually switch_mm( ) takes
103 * care of such a case: it ensures that task with ASID from prev alloc
104 * cycle, when scheduled will refresh it's ASID: see switch_mm( ) below
105 * The stealing scenario described here will only happen if that task
106 * didn't get a chance to refresh it's ASID - implying stale entries
107 * won't exist.
108 */
109 prev_owner = asid_mm_map[asid_cache];
110 if (prev_owner)
111 prev_owner->context.asid = NO_ASID;
112
113 /* Assign new ASID to tsk */
114 asid_mm_map[asid_cache] = mm;
115 mm->context.asid = asid_cache;
116
117#ifdef CONFIG_ARC_TLB_DBG
118 pr_info("ARC_TLB_DBG: NewMM=0x%x OldMM=0x%x task_struct=0x%x Task: %s,"
119 " pid:%u, assigned asid:%lu\n",
120 (unsigned int)mm, (unsigned int)prev_owner,
121 (unsigned int)(mm->context.tsk), (mm->context.tsk)->comm,
122 (mm->context.tsk)->pid, mm->context.asid);
123#endif
124
125 write_aux_reg(ARC_REG_PID, asid_cache | MMU_ENABLE);
126
127 local_irq_restore(flags);
128}
129
130/*
131 * Initialize the context related info for a new mm_struct
132 * instance.
133 */
134static inline int
135init_new_context(struct task_struct *tsk, struct mm_struct *mm)
136{
137 mm->context.asid = NO_ASID;
138#ifdef CONFIG_ARC_TLB_DBG
139 mm->context.tsk = tsk;
140#endif
141 return 0;
142}
143
144/* Prepare the MMU for task: setup PID reg with allocated ASID
145 If task doesn't have an ASID (never alloc or stolen, get a new ASID)
146*/
147static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
148 struct task_struct *tsk)
149{
150#ifndef CONFIG_SMP
151 /* PGD cached in MMU reg to avoid 3 mem lookups: task->mm->pgd */
152 write_aux_reg(ARC_REG_SCRATCH_DATA0, next->pgd);
153#endif
154
155 /*
156 * Get a new ASID if task doesn't have a valid one. Possible when
157 * -task never had an ASID (fresh after fork)
158 * -it's ASID was stolen - past an ASID roll-over.
159 * -There's a third obscure scenario (if this task is running for the
160 * first time afer an ASID rollover), where despite having a valid
161 * ASID, we force a get for new ASID - see comments at top.
162 *
163 * Both the non-alloc scenario and first-use-after-rollover can be
164 * detected using the single condition below: NO_ASID = 256
165 * while asid_cache is always a valid ASID value (0-255).
166 */
167 if (next->context.asid > asid_cache) {
168 get_new_mmu_context(next);
169 } else {
170 /*
171 * XXX: This will never happen given the chks above
172 * BUG_ON(next->context.asid > MAX_ASID);
173 */
174 write_aux_reg(ARC_REG_PID, next->context.asid | MMU_ENABLE);
175 }
176
177}
178
179static inline void destroy_context(struct mm_struct *mm)
180{
181 unsigned long flags;
182
183 local_irq_save(flags);
184
185 asid_mm_map[mm->context.asid] = NULL;
186 mm->context.asid = NO_ASID;
187
188 local_irq_restore(flags);
189}
190
191/* it seemed that deactivate_mm( ) is a reasonable place to do book-keeping
192 * for retiring-mm. However destroy_context( ) still needs to do that because
193 * between mm_release( ) = >deactive_mm( ) and
194 * mmput => .. => __mmdrop( ) => destroy_context( )
195 * there is a good chance that task gets sched-out/in, making it's ASID valid
196 * again (this teased me for a whole day).
197 */
198#define deactivate_mm(tsk, mm) do { } while (0)
199
200static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
201{
202#ifndef CONFIG_SMP
203 write_aux_reg(ARC_REG_SCRATCH_DATA0, next->pgd);
204#endif
205
206 /* Unconditionally get a new ASID */
207 get_new_mmu_context(next);
208
209}
210
211#define enter_lazy_tlb(mm, tsk)
212
213#endif /* __ASM_ARC_MMU_CONTEXT_H */
diff --git a/arch/arc/include/asm/module.h b/arch/arc/include/asm/module.h
new file mode 100644
index 000000000000..518222bb3f8e
--- /dev/null
+++ b/arch/arc/include/asm/module.h
@@ -0,0 +1,28 @@
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
9
10 */
11
12#ifndef _ASM_ARC_MODULE_H
13#define _ASM_ARC_MODULE_H
14
15#include <asm-generic/module.h>
16
17#ifdef CONFIG_ARC_DW2_UNWIND
18struct mod_arch_specific {
19 void *unw_info;
20 int unw_sec_idx;
21};
22#endif
23
24#define MODULE_PROC_FAMILY "ARC700"
25
26#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
27
28#endif /* _ASM_ARC_MODULE_H */
diff --git a/arch/arc/include/asm/mutex.h b/arch/arc/include/asm/mutex.h
new file mode 100644
index 000000000000..a2f88ff9f506
--- /dev/null
+++ b/arch/arc/include/asm/mutex.h
@@ -0,0 +1,18 @@
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9/*
10 * xchg() based mutex fast path maintains a state of 0 or 1, as opposed to
11 * atomic dec based which can "count" any number of lock contenders.
12 * This ideally needs to be fixed in core, but for now switching to dec ver.
13 */
14#if defined(CONFIG_SMP) && (CONFIG_NR_CPUS > 2)
15#include <asm-generic/mutex-dec.h>
16#else
17#include <asm-generic/mutex-xchg.h>
18#endif
diff --git a/arch/arc/include/asm/page.h b/arch/arc/include/asm/page.h
new file mode 100644
index 000000000000..bdf546104551
--- /dev/null
+++ b/arch/arc/include/asm/page.h
@@ -0,0 +1,109 @@
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8#ifndef __ASM_ARC_PAGE_H
9#define __ASM_ARC_PAGE_H
10
11#include <uapi/asm/page.h>
12
13
14#ifndef __ASSEMBLY__
15
16#define get_user_page(vaddr) __get_free_page(GFP_KERNEL)
17#define free_user_page(page, addr) free_page(addr)
18
19/* TBD: for now don't worry about VIPT D$ aliasing */
20#define clear_page(paddr) memset((paddr), 0, PAGE_SIZE)
21#define copy_page(to, from) memcpy((to), (from), PAGE_SIZE)
22
23#define clear_user_page(addr, vaddr, pg) clear_page(addr)
24#define copy_user_page(vto, vfrom, vaddr, pg) copy_page(vto, vfrom)
25
26#undef STRICT_MM_TYPECHECKS
27
28#ifdef STRICT_MM_TYPECHECKS
29/*
30 * These are used to make use of C type-checking..
31 */
32typedef struct {
33 unsigned long pte;
34} pte_t;
35typedef struct {
36 unsigned long pgd;
37} pgd_t;
38typedef struct {
39 unsigned long pgprot;
40} pgprot_t;
41typedef unsigned long pgtable_t;
42
43#define pte_val(x) ((x).pte)
44#define pgd_val(x) ((x).pgd)
45#define pgprot_val(x) ((x).pgprot)
46
47#define __pte(x) ((pte_t) { (x) })
48#define __pgd(x) ((pgd_t) { (x) })
49#define __pgprot(x) ((pgprot_t) { (x) })
50
51#define pte_pgprot(x) __pgprot(pte_val(x))
52
53#else /* !STRICT_MM_TYPECHECKS */
54
55typedef unsigned long pte_t;
56typedef unsigned long pgd_t;
57typedef unsigned long pgprot_t;
58typedef unsigned long pgtable_t;
59
60#define pte_val(x) (x)
61#define pgd_val(x) (x)
62#define pgprot_val(x) (x)
63#define __pte(x) (x)
64#define __pgprot(x) (x)
65#define pte_pgprot(x) (x)
66
67#endif
68
69#define ARCH_PFN_OFFSET (CONFIG_LINUX_LINK_BASE >> PAGE_SHIFT)
70
71#define pfn_valid(pfn) (((pfn) - ARCH_PFN_OFFSET) < max_mapnr)
72
73/*
74 * __pa, __va, virt_to_page (ALERT: deprecated, don't use them)
75 *
76 * These macros have historically been misnamed
77 * virt here means link-address/program-address as embedded in object code.
78 * So if kernel img is linked at 0x8000_0000 onwards, 0x8010_0000 will be
79 * 128th page, and virt_to_page( ) will return the struct page corresp to it.
80 * mem_map[ ] is an array of struct page for each page frame in the system
81 *
82 * Independent of where linux is linked at, link-addr = physical address
83 * So the old macro __pa = vaddr + PAGE_OFFSET - CONFIG_LINUX_LINK_BASE
84 * would have been wrong in case kernel is not at 0x8zs
85 */
86#define __pa(vaddr) ((unsigned long)vaddr)
87#define __va(paddr) ((void *)((unsigned long)(paddr)))
88
89#define virt_to_page(kaddr) \
90 (mem_map + ((__pa(kaddr) - CONFIG_LINUX_LINK_BASE) >> PAGE_SHIFT))
91
92#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
93
94/* Default Permissions for page, used in mmap.c */
95#ifdef CONFIG_ARC_STACK_NONEXEC
96#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE)
97#else
98#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
99 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
100#endif
101
102#define WANT_PAGE_VIRTUAL 1
103
104#include <asm-generic/memory_model.h> /* page_to_pfn, pfn_to_page */
105#include <asm-generic/getorder.h>
106
107#endif /* !__ASSEMBLY__ */
108
109#endif
diff --git a/arch/arc/include/asm/perf_event.h b/arch/arc/include/asm/perf_event.h
new file mode 100644
index 000000000000..115ad96480e6
--- /dev/null
+++ b/arch/arc/include/asm/perf_event.h
@@ -0,0 +1,13 @@
1/*
2 * Copyright (C) 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 */
9
10#ifndef __ASM_PERF_EVENT_H
11#define __ASM_PERF_EVENT_H
12
13#endif /* __ASM_PERF_EVENT_H */
diff --git a/arch/arc/include/asm/pgalloc.h b/arch/arc/include/asm/pgalloc.h
new file mode 100644
index 000000000000..36a9f20c21a3
--- /dev/null
+++ b/arch/arc/include/asm/pgalloc.h
@@ -0,0 +1,134 @@
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * vineetg: June 2011
9 * -"/proc/meminfo | grep PageTables" kept on increasing
10 * Recently added pgtable dtor was not getting called.
11 *
12 * vineetg: May 2011
13 * -Variable pg-sz means that Page Tables could be variable sized themselves
14 * So calculate it based on addr traversal split [pgd-bits:pte-bits:xxx]
15 * -Page Table size capped to max 1 to save memory - hence verified.
16 * -Since these deal with constants, gcc compile-time optimizes them.
17 *
18 * vineetg: Nov 2010
19 * -Added pgtable ctor/dtor used for pgtable mem accounting
20 *
21 * vineetg: April 2010
22 * -Switched pgtable_t from being struct page * to unsigned long
23 * =Needed so that Page Table allocator (pte_alloc_one) is not forced to
24 * to deal with struct page. Thay way in future we can make it allocate
25 * multiple PG Tbls in one Page Frame
26 * =sweet side effect is avoiding calls to ugly page_address( ) from the
27 * pg-tlb allocator sub-sys (pte_alloc_one, ptr_free, pmd_populate
28 *
29 * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
30 */
31
32#ifndef _ASM_ARC_PGALLOC_H
33#define _ASM_ARC_PGALLOC_H
34
35#include <linux/mm.h>
36#include <linux/log2.h>
37
38static inline void
39pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
40{
41 pmd_set(pmd, pte);
42}
43
44static inline void
45pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t ptep)
46{
47 pmd_set(pmd, (pte_t *) ptep);
48}
49
50static inline int __get_order_pgd(void)
51{
52 return get_order(PTRS_PER_PGD * 4);
53}
54
55static inline pgd_t *pgd_alloc(struct mm_struct *mm)
56{
57 int num, num2;
58 pgd_t *ret = (pgd_t *) __get_free_pages(GFP_KERNEL, __get_order_pgd());
59
60 if (ret) {
61 num = USER_PTRS_PER_PGD + USER_KERNEL_GUTTER / PGDIR_SIZE;
62 memzero(ret, num * sizeof(pgd_t));
63
64 num2 = VMALLOC_SIZE / PGDIR_SIZE;
65 memcpy(ret + num, swapper_pg_dir + num, num2 * sizeof(pgd_t));
66
67 memzero(ret + num + num2,
68 (PTRS_PER_PGD - num - num2) * sizeof(pgd_t));
69
70 }
71 return ret;
72}
73
74static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
75{
76 free_pages((unsigned long)pgd, __get_order_pgd());
77}
78
79
80/*
81 * With software-only page-tables, addr-split for traversal is tweakable and
82 * that directly governs how big tables would be at each level.
83 * Further, the MMU page size is configurable.
84 * Thus we need to programatically assert the size constraint
85 * All of this is const math, allowing gcc to do constant folding/propagation.
86 */
87
88static inline int __get_order_pte(void)
89{
90 return get_order(PTRS_PER_PTE * 4);
91}
92
93static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
94 unsigned long address)
95{
96 pte_t *pte;
97
98 pte = (pte_t *) __get_free_pages(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO,
99 __get_order_pte());
100
101 return pte;
102}
103
104static inline pgtable_t
105pte_alloc_one(struct mm_struct *mm, unsigned long address)
106{
107 pgtable_t pte_pg;
108
109 pte_pg = __get_free_pages(GFP_KERNEL | __GFP_REPEAT, __get_order_pte());
110 if (pte_pg) {
111 memzero((void *)pte_pg, PTRS_PER_PTE * 4);
112 pgtable_page_ctor(virt_to_page(pte_pg));
113 }
114
115 return pte_pg;
116}
117
118static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
119{
120 free_pages((unsigned long)pte, __get_order_pte()); /* takes phy addr */
121}
122
123static inline void pte_free(struct mm_struct *mm, pgtable_t ptep)
124{
125 pgtable_page_dtor(virt_to_page(ptep));
126 free_pages(ptep, __get_order_pte());
127}
128
129#define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, pte)
130
131#define check_pgt_cache() do { } while (0)
132#define pmd_pgtable(pmd) pmd_page_vaddr(pmd)
133
134#endif /* _ASM_ARC_PGALLOC_H */
diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h
new file mode 100644
index 000000000000..b7e36684c091
--- /dev/null
+++ b/arch/arc/include/asm/pgtable.h
@@ -0,0 +1,405 @@
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * vineetg: May 2011
9 * -Folded PAGE_PRESENT (used by VM) and PAGE_VALID (used by MMU) into 1.
10 * They are semantically the same although in different contexts
11 * VALID marks a TLB entry exists and it will only happen if PRESENT
12 * - Utilise some unused free bits to confine PTE flags to 12 bits
13 * This is a must for 4k pg-sz
14 *
15 * vineetg: Mar 2011 - changes to accomodate MMU TLB Page Descriptor mods
16 * -TLB Locking never really existed, except for initial specs
17 * -SILENT_xxx not needed for our port
18 * -Per my request, MMU V3 changes the layout of some of the bits
19 * to avoid a few shifts in TLB Miss handlers.
20 *
21 * vineetg: April 2010
22 * -PGD entry no longer contains any flags. If empty it is 0, otherwise has
23 * Pg-Tbl ptr. Thus pmd_present(), pmd_valid(), pmd_set( ) become simpler
24 *
25 * vineetg: April 2010
26 * -Switched form 8:11:13 split for page table lookup to 11:8:13
27 * -this speeds up page table allocation itself as we now have to memset 1K
28 * instead of 8k per page table.
29 * -TODO: Right now page table alloc is 8K and rest 7K is unused
30 * need to optimise it
31 *
32 * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
33 */
34
35#ifndef _ASM_ARC_PGTABLE_H
36#define _ASM_ARC_PGTABLE_H
37
38#include <asm/page.h>
39#include <asm/mmu.h>
40#include <asm-generic/pgtable-nopmd.h>
41
42/**************************************************************************
43 * Page Table Flags
44 *
45 * ARC700 MMU only deals with softare managed TLB entries.
46 * Page Tables are purely for Linux VM's consumption and the bits below are
47 * suited to that (uniqueness). Hence some are not implemented in the TLB and
48 * some have different value in TLB.
49 * e.g. MMU v2: K_READ bit is 8 and so is GLOBAL (possible becoz they live in
50 * seperate PD0 and PD1, which combined forms a translation entry)
51 * while for PTE perspective, they are 8 and 9 respectively
52 * with MMU v3: Most bits (except SHARED) represent the exact hardware pos
53 * (saves some bit shift ops in TLB Miss hdlrs)
54 */
55
56#if (CONFIG_ARC_MMU_VER <= 2)
57
58#define _PAGE_ACCESSED (1<<1) /* Page is accessed (S) */
59#define _PAGE_CACHEABLE (1<<2) /* Page is cached (H) */
60#define _PAGE_EXECUTE (1<<3) /* Page has user execute perm (H) */
61#define _PAGE_WRITE (1<<4) /* Page has user write perm (H) */
62#define _PAGE_READ (1<<5) /* Page has user read perm (H) */
63#define _PAGE_K_EXECUTE (1<<6) /* Page has kernel execute perm (H) */
64#define _PAGE_K_WRITE (1<<7) /* Page has kernel write perm (H) */
65#define _PAGE_K_READ (1<<8) /* Page has kernel perm (H) */
66#define _PAGE_GLOBAL (1<<9) /* Page is global (H) */
67#define _PAGE_MODIFIED (1<<10) /* Page modified (dirty) (S) */
68#define _PAGE_FILE (1<<10) /* page cache/ swap (S) */
69#define _PAGE_PRESENT (1<<11) /* TLB entry is valid (H) */
70
71#else
72
73/* PD1 */
74#define _PAGE_CACHEABLE (1<<0) /* Page is cached (H) */
75#define _PAGE_EXECUTE (1<<1) /* Page has user execute perm (H) */
76#define _PAGE_WRITE (1<<2) /* Page has user write perm (H) */
77#define _PAGE_READ (1<<3) /* Page has user read perm (H) */
78#define _PAGE_K_EXECUTE (1<<4) /* Page has kernel execute perm (H) */
79#define _PAGE_K_WRITE (1<<5) /* Page has kernel write perm (H) */
80#define _PAGE_K_READ (1<<6) /* Page has kernel perm (H) */
81#define _PAGE_ACCESSED (1<<7) /* Page is accessed (S) */
82
83/* PD0 */
84#define _PAGE_GLOBAL (1<<8) /* Page is global (H) */
85#define _PAGE_PRESENT (1<<9) /* TLB entry is valid (H) */
86#define _PAGE_SHARED_CODE (1<<10) /* Shared Code page with cmn vaddr
87 usable for shared TLB entries (H) */
88
89#define _PAGE_MODIFIED (1<<11) /* Page modified (dirty) (S) */
90#define _PAGE_FILE (1<<12) /* page cache/ swap (S) */
91
92#define _PAGE_SHARED_CODE_H (1<<31) /* Hardware counterpart of above */
93#endif
94
95/* Kernel allowed all permissions for all pages */
96#define _K_PAGE_PERMS (_PAGE_K_EXECUTE | _PAGE_K_WRITE | _PAGE_K_READ)
97
98#ifdef CONFIG_ARC_CACHE_PAGES
99#define _PAGE_DEF_CACHEABLE _PAGE_CACHEABLE
100#else
101#define _PAGE_DEF_CACHEABLE (0)
102#endif
103
104/* Helper for every "user" page
105 * -kernel can R/W/X
106 * -by default cached, unless config otherwise
107 * -present in memory
108 */
109#define ___DEF (_PAGE_PRESENT | _K_PAGE_PERMS | _PAGE_DEF_CACHEABLE)
110
111/* Set of bits not changed in pte_modify */
112#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_MODIFIED)
113
114/* More Abbrevaited helpers */
115#define PAGE_U_NONE __pgprot(___DEF)
116#define PAGE_U_R __pgprot(___DEF | _PAGE_READ)
117#define PAGE_U_W_R __pgprot(___DEF | _PAGE_READ | _PAGE_WRITE)
118#define PAGE_U_X_R __pgprot(___DEF | _PAGE_READ | _PAGE_EXECUTE)
119#define PAGE_U_X_W_R __pgprot(___DEF | _PAGE_READ | _PAGE_WRITE | \
120 _PAGE_EXECUTE)
121
122#define PAGE_SHARED PAGE_U_W_R
123
124/* While kernel runs out of unstrslated space, vmalloc/modules use a chunk of
125 * kernel vaddr space - visible in all addr spaces, but kernel mode only
126 * Thus Global, all-kernel-access, no-user-access, cached
127 */
128#define PAGE_KERNEL __pgprot(___DEF | _PAGE_GLOBAL)
129
130/* ioremap */
131#define PAGE_KERNEL_NO_CACHE __pgprot(_PAGE_PRESENT | _K_PAGE_PERMS | \
132 _PAGE_GLOBAL)
133
134/**************************************************************************
135 * Mapping of vm_flags (Generic VM) to PTE flags (arch specific)
136 *
137 * Certain cases have 1:1 mapping
138 * e.g. __P101 means VM_READ, VM_EXEC and !VM_SHARED
139 * which directly corresponds to PAGE_U_X_R
140 *
141 * Other rules which cause the divergence from 1:1 mapping
142 *
143 * 1. Although ARC700 can do exclusive execute/write protection (meaning R
144 * can be tracked independet of X/W unlike some other CPUs), still to
145 * keep things consistent with other archs:
146 * -Write implies Read: W => R
147 * -Execute implies Read: X => R
148 *
149 * 2. Pvt Writable doesn't have Write Enabled initially: Pvt-W => !W
150 * This is to enable COW mechanism
151 */
152 /* xwr */
153#define __P000 PAGE_U_NONE
154#define __P001 PAGE_U_R
155#define __P010 PAGE_U_R /* Pvt-W => !W */
156#define __P011 PAGE_U_R /* Pvt-W => !W */
157#define __P100 PAGE_U_X_R /* X => R */
158#define __P101 PAGE_U_X_R
159#define __P110 PAGE_U_X_R /* Pvt-W => !W and X => R */
160#define __P111 PAGE_U_X_R /* Pvt-W => !W */
161
162#define __S000 PAGE_U_NONE
163#define __S001 PAGE_U_R
164#define __S010 PAGE_U_W_R /* W => R */
165#define __S011 PAGE_U_W_R
166#define __S100 PAGE_U_X_R /* X => R */
167#define __S101 PAGE_U_X_R
168#define __S110 PAGE_U_X_W_R /* X => R */
169#define __S111 PAGE_U_X_W_R
170
171/****************************************************************
172 * Page Table Lookup split
173 *
174 * We implement 2 tier paging and since this is all software, we are free
175 * to customize the span of a PGD / PTE entry to suit us
176 *
177 * 32 bit virtual address
178 * -------------------------------------------------------
179 * | BITS_FOR_PGD | BITS_FOR_PTE | BITS_IN_PAGE |
180 * -------------------------------------------------------
181 * | | |
182 * | | --> off in page frame
183 * | |
184 * | ---> index into Page Table
185 * |
186 * ----> index into Page Directory
187 */
188
189#define BITS_IN_PAGE PAGE_SHIFT
190
191/* Optimal Sizing of Pg Tbl - based on MMU page size */
192#if defined(CONFIG_ARC_PAGE_SIZE_8K)
193#define BITS_FOR_PTE 8
194#elif defined(CONFIG_ARC_PAGE_SIZE_16K)
195#define BITS_FOR_PTE 8
196#elif defined(CONFIG_ARC_PAGE_SIZE_4K)
197#define BITS_FOR_PTE 9
198#endif
199
200#define BITS_FOR_PGD (32 - BITS_FOR_PTE - BITS_IN_PAGE)
201
202#define PGDIR_SHIFT (BITS_FOR_PTE + BITS_IN_PAGE)
203#define PGDIR_SIZE (1UL << PGDIR_SHIFT) /* vaddr span, not PDG sz */
204#define PGDIR_MASK (~(PGDIR_SIZE-1))
205
206#ifdef __ASSEMBLY__
207#define PTRS_PER_PTE (1 << BITS_FOR_PTE)
208#define PTRS_PER_PGD (1 << BITS_FOR_PGD)
209#else
210#define PTRS_PER_PTE (1UL << BITS_FOR_PTE)
211#define PTRS_PER_PGD (1UL << BITS_FOR_PGD)
212#endif
213/*
214 * Number of entries a user land program use.
215 * TASK_SIZE is the maximum vaddr that can be used by a userland program.
216 */
217#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
218
219/*
220 * No special requirements for lowest virtual address we permit any user space
221 * mapping to be mapped at.
222 */
223#define FIRST_USER_ADDRESS 0
224
225
226/****************************************************************
227 * Bucket load of VM Helpers
228 */
229
230#ifndef __ASSEMBLY__
231
232#define pte_ERROR(e) \
233 pr_crit("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
234#define pgd_ERROR(e) \
235 pr_crit("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
236
237/* the zero page used for uninitialized and anonymous pages */
238extern char empty_zero_page[PAGE_SIZE];
239#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
240
241#define pte_unmap(pte) do { } while (0)
242#define pte_unmap_nested(pte) do { } while (0)
243
244#define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval))
245#define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval)
246
247/* find the page descriptor of the Page Tbl ref by PMD entry */
248#define pmd_page(pmd) virt_to_page(pmd_val(pmd) & PAGE_MASK)
249
250/* find the logical addr (phy for ARC) of the Page Tbl ref by PMD entry */
251#define pmd_page_vaddr(pmd) (pmd_val(pmd) & PAGE_MASK)
252
253/* In a 2 level sys, setup the PGD entry with PTE value */
254static inline void pmd_set(pmd_t *pmdp, pte_t *ptep)
255{
256 pmd_val(*pmdp) = (unsigned long)ptep;
257}
258
259#define pte_none(x) (!pte_val(x))
260#define pte_present(x) (pte_val(x) & _PAGE_PRESENT)
261#define pte_clear(mm, addr, ptep) set_pte_at(mm, addr, ptep, __pte(0))
262
263#define pmd_none(x) (!pmd_val(x))
264#define pmd_bad(x) ((pmd_val(x) & ~PAGE_MASK))
265#define pmd_present(x) (pmd_val(x))
266#define pmd_clear(xp) do { pmd_val(*(xp)) = 0; } while (0)
267
268#define pte_page(x) (mem_map + \
269 (unsigned long)(((pte_val(x) - PAGE_OFFSET) >> PAGE_SHIFT)))
270
271#define mk_pte(page, pgprot) \
272({ \
273 pte_t pte; \
274 pte_val(pte) = __pa(page_address(page)) + pgprot_val(pgprot); \
275 pte; \
276})
277
278/* TBD: Non linear mapping stuff */
279static inline int pte_file(pte_t pte)
280{
281 return pte_val(pte) & _PAGE_FILE;
282}
283
284#define PTE_FILE_MAX_BITS 30
285#define pgoff_to_pte(x) __pte(x)
286#define pte_to_pgoff(x) (pte_val(x) >> 2)
287#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
288#define pfn_pte(pfn, prot) (__pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)))
289#define __pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
290
291/*
292 * pte_offset gets a @ptr to PMD entry (PGD in our 2-tier paging system)
293 * and returns ptr to PTE entry corresponding to @addr
294 */
295#define pte_offset(dir, addr) ((pte_t *)(pmd_page_vaddr(*dir)) +\
296 __pte_index(addr))
297
298/* No mapping of Page Tables in high mem etc, so following same as above */
299#define pte_offset_kernel(dir, addr) pte_offset(dir, addr)
300#define pte_offset_map(dir, addr) pte_offset(dir, addr)
301
302/* Zoo of pte_xxx function */
303#define pte_read(pte) (pte_val(pte) & _PAGE_READ)
304#define pte_write(pte) (pte_val(pte) & _PAGE_WRITE)
305#define pte_dirty(pte) (pte_val(pte) & _PAGE_MODIFIED)
306#define pte_young(pte) (pte_val(pte) & _PAGE_ACCESSED)
307#define pte_special(pte) (0)
308
309#define PTE_BIT_FUNC(fn, op) \
310 static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; }
311
312PTE_BIT_FUNC(wrprotect, &= ~(_PAGE_WRITE));
313PTE_BIT_FUNC(mkwrite, |= (_PAGE_WRITE));
314PTE_BIT_FUNC(mkclean, &= ~(_PAGE_MODIFIED));
315PTE_BIT_FUNC(mkdirty, |= (_PAGE_MODIFIED));
316PTE_BIT_FUNC(mkold, &= ~(_PAGE_ACCESSED));
317PTE_BIT_FUNC(mkyoung, |= (_PAGE_ACCESSED));
318PTE_BIT_FUNC(exprotect, &= ~(_PAGE_EXECUTE));
319PTE_BIT_FUNC(mkexec, |= (_PAGE_EXECUTE));
320
321static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
322
323static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
324{
325 return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
326}
327
328/* Macro to mark a page protection as uncacheable */
329#define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) & ~_PAGE_CACHEABLE))
330
331static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
332 pte_t *ptep, pte_t pteval)
333{
334 set_pte(ptep, pteval);
335}
336
337/*
338 * All kernel related VM pages are in init's mm.
339 */
340#define pgd_offset_k(address) pgd_offset(&init_mm, address)
341#define pgd_index(addr) ((addr) >> PGDIR_SHIFT)
342#define pgd_offset(mm, addr) (((mm)->pgd)+pgd_index(addr))
343
344/*
345 * Macro to quickly access the PGD entry, utlising the fact that some
346 * arch may cache the pointer to Page Directory of "current" task
347 * in a MMU register
348 *
349 * Thus task->mm->pgd (3 pointer dereferences, cache misses etc simply
350 * becomes read a register
351 *
352 * ********CAUTION*******:
353 * Kernel code might be dealing with some mm_struct of NON "current"
354 * Thus use this macro only when you are certain that "current" is current
355 * e.g. when dealing with signal frame setup code etc
356 */
357#ifndef CONFIG_SMP
358#define pgd_offset_fast(mm, addr) \
359({ \
360 pgd_t *pgd_base = (pgd_t *) read_aux_reg(ARC_REG_SCRATCH_DATA0); \
361 pgd_base + pgd_index(addr); \
362})
363#else
364#define pgd_offset_fast(mm, addr) pgd_offset(mm, addr)
365#endif
366
367extern void paging_init(void);
368extern pgd_t swapper_pg_dir[] __aligned(PAGE_SIZE);
369void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
370 pte_t *ptep);
371
372/* Encode swap {type,off} tuple into PTE
373 * We reserve 13 bits for 5-bit @type, keeping bits 12-5 zero, ensuring that
374 * both PAGE_FILE and PAGE_PRESENT are zero in a PTE holding swap "identifier"
375 */
376#define __swp_entry(type, off) ((swp_entry_t) { \
377 ((type) & 0x1f) | ((off) << 13) })
378
379/* Decode a PTE containing swap "identifier "into constituents */
380#define __swp_type(pte_lookalike) (((pte_lookalike).val) & 0x1f)
381#define __swp_offset(pte_lookalike) ((pte_lookalike).val << 13)
382
383/* NOPs, to keep generic kernel happy */
384#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
385#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
386
387#define kern_addr_valid(addr) (1)
388
389/*
390 * remap a physical page `pfn' of size `size' with page protection `prot'
391 * into virtual address `from'
392 */
393#define io_remap_pfn_range(vma, from, pfn, size, prot) \
394 remap_pfn_range(vma, from, pfn, size, prot)
395
396#include <asm-generic/pgtable.h>
397
398/*
399 * No page table caches to initialise
400 */
401#define pgtable_cache_init() do { } while (0)
402
403#endif /* __ASSEMBLY__ */
404
405#endif
diff --git a/arch/arc/include/asm/processor.h b/arch/arc/include/asm/processor.h
new file mode 100644
index 000000000000..5f26b2c1cba0
--- /dev/null
+++ b/arch/arc/include/asm/processor.h
@@ -0,0 +1,151 @@
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * vineetg: March 2009
9 * -Implemented task_pt_regs( )
10 *
11 * Amit Bhor, Sameer Dhavale, Ashwin Chaugule: Codito Technologies 2004
12 */
13
14#ifndef __ASM_ARC_PROCESSOR_H
15#define __ASM_ARC_PROCESSOR_H
16
17#ifdef __KERNEL__
18
19#ifndef __ASSEMBLY__
20
21#include <asm/arcregs.h> /* for STATUS_E1_MASK et all */
22
23/* Arch specific stuff which needs to be saved per task.
24 * However these items are not so important so as to earn a place in
25 * struct thread_info
26 */
27struct thread_struct {
28 unsigned long ksp; /* kernel mode stack pointer */
29 unsigned long callee_reg; /* pointer to callee regs */
30 unsigned long fault_address; /* dbls as brkpt holder as well */
31 unsigned long cause_code; /* Exception Cause Code (ECR) */
32#ifdef CONFIG_ARC_CURR_IN_REG
33 unsigned long user_r25;
34#endif
35#ifdef CONFIG_ARC_FPU_SAVE_RESTORE
36 struct arc_fpu fpu;
37#endif
38};
39
40#define INIT_THREAD { \
41 .ksp = sizeof(init_stack) + (unsigned long) init_stack, \
42}
43
44/* Forward declaration, a strange C thing */
45struct task_struct;
46
47/*
48 * Return saved PC of a blocked thread.
49 */
50unsigned long thread_saved_pc(struct task_struct *t);
51
52#define task_pt_regs(p) \
53 ((struct pt_regs *)(THREAD_SIZE - 4 + (void *)task_stack_page(p)) - 1)
54
55/* Free all resources held by a thread. */
56#define release_thread(thread) do { } while (0)
57
58/* Prepare to copy thread state - unlazy all lazy status */
59#define prepare_to_copy(tsk) do { } while (0)
60
61/*
62 * A lot of busy-wait loops in SMP are based off of non-volatile data otherwise
63 * get optimised away by gcc
64 */
65#ifdef CONFIG_SMP
66#define cpu_relax() __asm__ __volatile__ ("" : : : "memory")
67#else
68#define cpu_relax() do { } while (0)
69#endif
70
71#define copy_segments(tsk, mm) do { } while (0)
72#define release_segments(mm) do { } while (0)
73
74#define KSTK_EIP(tsk) (task_pt_regs(tsk)->ret)
75
76/*
77 * Where abouts of Task's sp, fp, blink when it was last seen in kernel mode.
78 * These can't be derived from pt_regs as that would give correp user-mode val
79 */
80#define KSTK_ESP(tsk) (tsk->thread.ksp)
81#define KSTK_BLINK(tsk) (*((unsigned int *)((KSTK_ESP(tsk)) + (13+1+1)*4)))
82#define KSTK_FP(tsk) (*((unsigned int *)((KSTK_ESP(tsk)) + (13+1)*4)))
83
84/*
85 * Do necessary setup to start up a newly executed thread.
86 *
87 * E1,E2 so that Interrupts are enabled in user mode
88 * L set, so Loop inhibited to begin with
89 * lp_start and lp_end seeded with bogus non-zero values so to easily catch
90 * the ARC700 sr to lp_start hardware bug
91 */
92#define start_thread(_regs, _pc, _usp) \
93do { \
94 set_fs(USER_DS); /* reads from user space */ \
95 (_regs)->ret = (_pc); \
96 /* Interrupts enabled in User Mode */ \
97 (_regs)->status32 = STATUS_U_MASK | STATUS_L_MASK \
98 | STATUS_E1_MASK | STATUS_E2_MASK; \
99 (_regs)->sp = (_usp); \
100 /* bogus seed values for debugging */ \
101 (_regs)->lp_start = 0x10; \
102 (_regs)->lp_end = 0x80; \
103} while (0)
104
105extern unsigned int get_wchan(struct task_struct *p);
106
107/*
108 * Default implementation of macro that returns current
109 * instruction pointer ("program counter").
110 * Should the PC register be read instead ? This macro does not seem to
111 * be used in many places so this wont be all that bad.
112 */
113#define current_text_addr() ({ __label__ _l; _l: &&_l; })
114
115#endif /* !__ASSEMBLY__ */
116
117/* Kernels Virtual memory area.
118 * Unlike other architectures(MIPS, sh, cris ) ARC 700 does not have a
119 * "kernel translated" region (like KSEG2 in MIPS). So we use a upper part
120 * of the translated bottom 2GB for kernel virtual memory and protect
121 * these pages from user accesses by disabling Ru, Eu and Wu.
122 */
123#define VMALLOC_SIZE (0x10000000) /* 256M */
124#define VMALLOC_START (PAGE_OFFSET - VMALLOC_SIZE)
125#define VMALLOC_END (PAGE_OFFSET)
126
127/* Most of the architectures seem to be keeping some kind of padding between
128 * userspace TASK_SIZE and PAGE_OFFSET. i.e TASK_SIZE != PAGE_OFFSET.
129 */
130#define USER_KERNEL_GUTTER 0x10000000
131
132/* User address space:
133 * On ARC700, CPU allows the entire lower half of 32 bit address space to be
134 * translated. Thus potentially 2G (0:0x7FFF_FFFF) could be User vaddr space.
135 * However we steal 256M for kernel addr (0x7000_0000:0x7FFF_FFFF) and another
136 * 256M (0x6000_0000:0x6FFF_FFFF) is gutter between user/kernel spaces
137 * Thus total User vaddr space is (0:0x5FFF_FFFF)
138 */
139#define TASK_SIZE (PAGE_OFFSET - VMALLOC_SIZE - USER_KERNEL_GUTTER)
140
141#define STACK_TOP TASK_SIZE
142#define STACK_TOP_MAX STACK_TOP
143
144/* This decides where the kernel will search for a free chunk of vm
145 * space during mmap's.
146 */
147#define TASK_UNMAPPED_BASE (TASK_SIZE / 3)
148
149#endif /* __KERNEL__ */
150
151#endif /* __ASM_ARC_PROCESSOR_H */
diff --git a/arch/arc/include/asm/prom.h b/arch/arc/include/asm/prom.h
new file mode 100644
index 000000000000..692d0d0789a7
--- /dev/null
+++ b/arch/arc/include/asm/prom.h
@@ -0,0 +1,14 @@
1/*
2 * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#ifndef _ASM_ARC_PROM_H_
10#define _ASM_ARC_PROM_H_
11
12#define HAVE_ARCH_DEVTREE_FIXUPS
13
14#endif
diff --git a/arch/arc/include/asm/ptrace.h b/arch/arc/include/asm/ptrace.h
new file mode 100644
index 000000000000..8ae783d20a81
--- /dev/null
+++ b/arch/arc/include/asm/ptrace.h
@@ -0,0 +1,130 @@
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
9 */
10#ifndef __ASM_ARC_PTRACE_H
11#define __ASM_ARC_PTRACE_H
12
13#include <uapi/asm/ptrace.h>
14
15#ifndef __ASSEMBLY__
16
17/* THE pt_regs: Defines how regs are saved during entry into kernel */
18
19struct pt_regs {
20 /*
21 * 1 word gutter after reg-file has been saved
22 * Technically not needed, Since SP always points to a "full" location
23 * (vs. "empty"). But pt_regs is shared with tools....
24 */
25 long res;
26
27 /* Real registers */
28 long bta; /* bta_l1, bta_l2, erbta */
29 long lp_start;
30 long lp_end;
31 long lp_count;
32 long status32; /* status32_l1, status32_l2, erstatus */
33 long ret; /* ilink1, ilink2 or eret */
34 long blink;
35 long fp;
36 long r26; /* gp */
37 long r12;
38 long r11;
39 long r10;
40 long r9;
41 long r8;
42 long r7;
43 long r6;
44 long r5;
45 long r4;
46 long r3;
47 long r2;
48 long r1;
49 long r0;
50 long sp; /* user/kernel sp depending on where we came from */
51 long orig_r0;
52
53 /*to distinguish bet excp, syscall, irq */
54 union {
55#ifdef CONFIG_CPU_BIG_ENDIAN
56 /* so that assembly code is same for LE/BE */
57 unsigned long orig_r8:16, event:16;
58#else
59 unsigned long event:16, orig_r8:16;
60#endif
61 long orig_r8_word;
62 };
63};
64
65/* Callee saved registers - need to be saved only when you are scheduled out */
66
67struct callee_regs {
68 long res; /* Again this is not needed */
69 long r25;
70 long r24;
71 long r23;
72 long r22;
73 long r21;
74 long r20;
75 long r19;
76 long r18;
77 long r17;
78 long r16;
79 long r15;
80 long r14;
81 long r13;
82};
83
84#define instruction_pointer(regs) ((regs)->ret)
85#define profile_pc(regs) instruction_pointer(regs)
86
87/* return 1 if user mode or 0 if kernel mode */
88#define user_mode(regs) (regs->status32 & STATUS_U_MASK)
89
90#define user_stack_pointer(regs)\
91({ unsigned int sp; \
92 if (user_mode(regs)) \
93 sp = (regs)->sp;\
94 else \
95 sp = -1; \
96 sp; \
97})
98
99/* return 1 if PC in delay slot */
100#define delay_mode(regs) ((regs->status32 & STATUS_DE_MASK) == STATUS_DE_MASK)
101
102#define in_syscall(regs) (regs->event & orig_r8_IS_SCALL)
103#define in_brkpt_trap(regs) (regs->event & orig_r8_IS_BRKPT)
104
105#define syscall_wont_restart(regs) (regs->event |= orig_r8_IS_SCALL_RESTARTED)
106#define syscall_restartable(regs) !(regs->event & orig_r8_IS_SCALL_RESTARTED)
107
108#define current_pt_regs() \
109({ \
110 /* open-coded current_thread_info() */ \
111 register unsigned long sp asm ("sp"); \
112 unsigned long pg_start = (sp & ~(THREAD_SIZE - 1)); \
113 (struct pt_regs *)(pg_start + THREAD_SIZE - 4) - 1; \
114})
115
116static inline long regs_return_value(struct pt_regs *regs)
117{
118 return regs->r0;
119}
120
121#endif /* !__ASSEMBLY__ */
122
123#define orig_r8_IS_SCALL 0x0001
124#define orig_r8_IS_SCALL_RESTARTED 0x0002
125#define orig_r8_IS_BRKPT 0x0004
126#define orig_r8_IS_EXCPN 0x0004
127#define orig_r8_IS_IRQ1 0x0010
128#define orig_r8_IS_IRQ2 0x0020
129
130#endif /* __ASM_PTRACE_H */
diff --git a/arch/arc/include/asm/sections.h b/arch/arc/include/asm/sections.h
new file mode 100644
index 000000000000..6fc1159dfefe
--- /dev/null
+++ b/arch/arc/include/asm/sections.h
@@ -0,0 +1,18 @@
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#ifndef _ASM_ARC_SECTIONS_H
10#define _ASM_ARC_SECTIONS_H
11
12#include <asm-generic/sections.h>
13
14extern char _int_vec_base_lds[];
15extern char __arc_dccm_base[];
16extern char __dtb_start[];
17
18#endif
diff --git a/arch/arc/include/asm/segment.h b/arch/arc/include/asm/segment.h
new file mode 100644
index 000000000000..da2c45979817
--- /dev/null
+++ b/arch/arc/include/asm/segment.h
@@ -0,0 +1,24 @@
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#ifndef __ASMARC_SEGMENT_H
10#define __ASMARC_SEGMENT_H
11
12#ifndef __ASSEMBLY__
13
14typedef unsigned long mm_segment_t;
15
16#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
17
18#define KERNEL_DS MAKE_MM_SEG(0)
19#define USER_DS MAKE_MM_SEG(TASK_SIZE)
20
21#define segment_eq(a, b) ((a) == (b))
22
23#endif /* __ASSEMBLY__ */
24#endif /* __ASMARC_SEGMENT_H */
diff --git a/arch/arc/include/asm/serial.h b/arch/arc/include/asm/serial.h
new file mode 100644
index 000000000000..4dff5a1e4128
--- /dev/null
+++ b/arch/arc/include/asm/serial.h
@@ -0,0 +1,25 @@
1/*
2 * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#ifndef _ASM_ARC_SERIAL_H
10#define _ASM_ARC_SERIAL_H
11
12/*
13 * early-8250 requires BASE_BAUD to be defined and includes this header.
14 * We put in a typical value:
15 * (core clk / 16) - i.e. UART samples 16 times per sec.
16 * Athough in multi-platform-image this might not work, specially if the
17 * clk driving the UART is different.
18 * We can't use DeviceTree as this is typically for early serial.
19 */
20
21#include <asm/clk.h>
22
23#define BASE_BAUD (arc_get_core_freq() / 16)
24
25#endif /* _ASM_ARC_SERIAL_H */
diff --git a/arch/arc/include/asm/setup.h b/arch/arc/include/asm/setup.h
new file mode 100644
index 000000000000..229e50681497
--- /dev/null
+++ b/arch/arc/include/asm/setup.h
@@ -0,0 +1,37 @@
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8#ifndef __ASMARC_SETUP_H
9#define __ASMARC_SETUP_H
10
11
12#include <linux/types.h>
13#include <uapi/asm/setup.h>
14
15#define COMMAND_LINE_SIZE 256
16
17/*
18 * Data structure to map a ID to string
19 * Used a lot for bootup reporting of hardware diversity
20 */
21struct id_to_str {
22 int id;
23 const char *str;
24};
25
26struct cpuinfo_data {
27 struct id_to_str info;
28 int up_range;
29};
30
31extern int root_mountflags, end_mem;
32extern int running_on_hw;
33
34void __init setup_processor(void);
35void __init setup_arch_memory(void);
36
37#endif /* __ASMARC_SETUP_H */
diff --git a/arch/arc/include/asm/smp.h b/arch/arc/include/asm/smp.h
new file mode 100644
index 000000000000..c4fb211dcd25
--- /dev/null
+++ b/arch/arc/include/asm/smp.h
@@ -0,0 +1,130 @@
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#ifndef __ASM_ARC_SMP_H
10#define __ASM_ARC_SMP_H
11
12#ifdef CONFIG_SMP
13
14#include <linux/types.h>
15#include <linux/init.h>
16#include <linux/threads.h>
17
18#define raw_smp_processor_id() (current_thread_info()->cpu)
19
20/* including cpumask.h leads to cyclic deps hence this Forward declaration */
21struct cpumask;
22
23/*
24 * APIs provided by arch SMP code to generic code
25 */
26extern void arch_send_call_function_single_ipi(int cpu);
27extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
28
29/*
30 * APIs provided by arch SMP code to rest of arch code
31 */
32extern void __init smp_init_cpus(void);
33extern void __init first_lines_of_secondary(void);
34extern const char *arc_platform_smp_cpuinfo(void);
35
36/*
37 * API expected BY platform smp code (FROM arch smp code)
38 *
39 * smp_ipi_irq_setup:
40 * Takes @cpu and @irq to which the arch-common ISR is hooked up
41 */
42extern int smp_ipi_irq_setup(int cpu, int irq);
43
44/*
45 * struct plat_smp_ops - SMP callbacks provided by platform to ARC SMP
46 *
47 * @info: SoC SMP specific info for /proc/cpuinfo etc
48 * @cpu_kick: For Master to kickstart a cpu (optionally at a PC)
49 * @ipi_send: To send IPI to a @cpumask
50 * @ips_clear: To clear IPI received by @cpu at @irq
51 */
52struct plat_smp_ops {
53 const char *info;
54 void (*cpu_kick)(int cpu, unsigned long pc);
55 void (*ipi_send)(void *callmap);
56 void (*ipi_clear)(int cpu, int irq);
57};
58
59/* TBD: stop exporting it for direct population by platform */
60extern struct plat_smp_ops plat_smp_ops;
61
62#endif /* CONFIG_SMP */
63
64/*
65 * ARC700 doesn't support atomic Read-Modify-Write ops.
66 * Originally Interrupts had to be disabled around code to gaurantee atomicity.
67 * The LLOCK/SCOND insns allow writing interrupt-hassle-free based atomic ops
68 * based on retry-if-irq-in-atomic (with hardware assist).
69 * However despite these, we provide the IRQ disabling variant
70 *
71 * (1) These insn were introduced only in 4.10 release. So for older released
72 * support needed.
73 *
74 * (2) In a SMP setup, the LLOCK/SCOND atomiticity across CPUs needs to be
75 * gaurantted by the platform (not something which core handles).
76 * Assuming a platform won't, SMP Linux needs to use spinlocks + local IRQ
77 * disabling for atomicity.
78 *
79 * However exported spinlock API is not usable due to cyclic hdr deps
80 * (even after system.h disintegration upstream)
81 * asm/bitops.h -> linux/spinlock.h -> linux/preempt.h
82 * -> linux/thread_info.h -> linux/bitops.h -> asm/bitops.h
83 *
84 * So the workaround is to use the lowest level arch spinlock API.
85 * The exported spinlock API is smart enough to be NOP for !CONFIG_SMP,
86 * but same is not true for ARCH backend, hence the need for 2 variants
87 */
88#ifndef CONFIG_ARC_HAS_LLSC
89
90#include <linux/irqflags.h>
91#ifdef CONFIG_SMP
92
93#include <asm/spinlock.h>
94
95extern arch_spinlock_t smp_atomic_ops_lock;
96extern arch_spinlock_t smp_bitops_lock;
97
98#define atomic_ops_lock(flags) do { \
99 local_irq_save(flags); \
100 arch_spin_lock(&smp_atomic_ops_lock); \
101} while (0)
102
103#define atomic_ops_unlock(flags) do { \
104 arch_spin_unlock(&smp_atomic_ops_lock); \
105 local_irq_restore(flags); \
106} while (0)
107
108#define bitops_lock(flags) do { \
109 local_irq_save(flags); \
110 arch_spin_lock(&smp_bitops_lock); \
111} while (0)
112
113#define bitops_unlock(flags) do { \
114 arch_spin_unlock(&smp_bitops_lock); \
115 local_irq_restore(flags); \
116} while (0)
117
118#else /* !CONFIG_SMP */
119
120#define atomic_ops_lock(flags) local_irq_save(flags)
121#define atomic_ops_unlock(flags) local_irq_restore(flags)
122
123#define bitops_lock(flags) local_irq_save(flags)
124#define bitops_unlock(flags) local_irq_restore(flags)
125
126#endif /* !CONFIG_SMP */
127
128#endif /* !CONFIG_ARC_HAS_LLSC */
129
130#endif
diff --git a/arch/arc/include/asm/spinlock.h b/arch/arc/include/asm/spinlock.h
new file mode 100644
index 000000000000..f158197ac5b0
--- /dev/null
+++ b/arch/arc/include/asm/spinlock.h
@@ -0,0 +1,144 @@
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#ifndef __ASM_SPINLOCK_H
10#define __ASM_SPINLOCK_H
11
12#include <asm/spinlock_types.h>
13#include <asm/processor.h>
14#include <asm/barrier.h>
15
16#define arch_spin_is_locked(x) ((x)->slock != __ARCH_SPIN_LOCK_UNLOCKED__)
17#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
18#define arch_spin_unlock_wait(x) \
19 do { while (arch_spin_is_locked(x)) cpu_relax(); } while (0)
20
21static inline void arch_spin_lock(arch_spinlock_t *lock)
22{
23 unsigned int tmp = __ARCH_SPIN_LOCK_LOCKED__;
24
25 __asm__ __volatile__(
26 "1: ex %0, [%1] \n"
27 " breq %0, %2, 1b \n"
28 : "+&r" (tmp)
29 : "r"(&(lock->slock)), "ir"(__ARCH_SPIN_LOCK_LOCKED__)
30 : "memory");
31}
32
33static inline int arch_spin_trylock(arch_spinlock_t *lock)
34{
35 unsigned int tmp = __ARCH_SPIN_LOCK_LOCKED__;
36
37 __asm__ __volatile__(
38 "1: ex %0, [%1] \n"
39 : "+r" (tmp)
40 : "r"(&(lock->slock))
41 : "memory");
42
43 return (tmp == __ARCH_SPIN_LOCK_UNLOCKED__);
44}
45
46static inline void arch_spin_unlock(arch_spinlock_t *lock)
47{
48 lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__;
49 smp_mb();
50}
51
52/*
53 * Read-write spinlocks, allowing multiple readers but only one writer.
54 *
55 * The spinlock itself is contained in @counter and access to it is
56 * serialized with @lock_mutex.
57 *
58 * Unfair locking as Writers could be starved indefinitely by Reader(s)
59 */
60
61/* Would read_trylock() succeed? */
62#define arch_read_can_lock(x) ((x)->counter > 0)
63
64/* Would write_trylock() succeed? */
65#define arch_write_can_lock(x) ((x)->counter == __ARCH_RW_LOCK_UNLOCKED__)
66
67/* 1 - lock taken successfully */
68static inline int arch_read_trylock(arch_rwlock_t *rw)
69{
70 int ret = 0;
71
72 arch_spin_lock(&(rw->lock_mutex));
73
74 /*
75 * zero means writer holds the lock exclusively, deny Reader.
76 * Otherwise grant lock to first/subseq reader
77 */
78 if (rw->counter > 0) {
79 rw->counter--;
80 ret = 1;
81 }
82
83 arch_spin_unlock(&(rw->lock_mutex));
84
85 smp_mb();
86 return ret;
87}
88
89/* 1 - lock taken successfully */
90static inline int arch_write_trylock(arch_rwlock_t *rw)
91{
92 int ret = 0;
93
94 arch_spin_lock(&(rw->lock_mutex));
95
96 /*
97 * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
98 * deny writer. Otherwise if unlocked grant to writer
99 * Hence the claim that Linux rwlocks are unfair to writers.
100 * (can be starved for an indefinite time by readers).
101 */
102 if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
103 rw->counter = 0;
104 ret = 1;
105 }
106 arch_spin_unlock(&(rw->lock_mutex));
107
108 return ret;
109}
110
111static inline void arch_read_lock(arch_rwlock_t *rw)
112{
113 while (!arch_read_trylock(rw))
114 cpu_relax();
115}
116
117static inline void arch_write_lock(arch_rwlock_t *rw)
118{
119 while (!arch_write_trylock(rw))
120 cpu_relax();
121}
122
123static inline void arch_read_unlock(arch_rwlock_t *rw)
124{
125 arch_spin_lock(&(rw->lock_mutex));
126 rw->counter++;
127 arch_spin_unlock(&(rw->lock_mutex));
128}
129
130static inline void arch_write_unlock(arch_rwlock_t *rw)
131{
132 arch_spin_lock(&(rw->lock_mutex));
133 rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
134 arch_spin_unlock(&(rw->lock_mutex));
135}
136
137#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
138#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
139
140#define arch_spin_relax(lock) cpu_relax()
141#define arch_read_relax(lock) cpu_relax()
142#define arch_write_relax(lock) cpu_relax()
143
144#endif /* __ASM_SPINLOCK_H */
diff --git a/arch/arc/include/asm/spinlock_types.h b/arch/arc/include/asm/spinlock_types.h
new file mode 100644
index 000000000000..8276bfd61704
--- /dev/null
+++ b/arch/arc/include/asm/spinlock_types.h
@@ -0,0 +1,35 @@
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#ifndef __ASM_SPINLOCK_TYPES_H
10#define __ASM_SPINLOCK_TYPES_H
11
12typedef struct {
13 volatile unsigned int slock;
14} arch_spinlock_t;
15
16#define __ARCH_SPIN_LOCK_UNLOCKED__ 0
17#define __ARCH_SPIN_LOCK_LOCKED__ 1
18
19#define __ARCH_SPIN_LOCK_UNLOCKED { __ARCH_SPIN_LOCK_UNLOCKED__ }
20#define __ARCH_SPIN_LOCK_LOCKED { __ARCH_SPIN_LOCK_LOCKED__ }
21
22/*
23 * Unlocked: 0x01_00_00_00
24 * Read lock(s): 0x00_FF_00_00 to say 0x01
25 * Write lock: 0x0, but only possible if prior value "unlocked" 0x0100_0000
26 */
27typedef struct {
28 volatile unsigned int counter;
29 arch_spinlock_t lock_mutex;
30} arch_rwlock_t;
31
32#define __ARCH_RW_LOCK_UNLOCKED__ 0x01000000
33#define __ARCH_RW_LOCK_UNLOCKED { .counter = __ARCH_RW_LOCK_UNLOCKED__ }
34
35#endif
diff --git a/arch/arc/include/asm/string.h b/arch/arc/include/asm/string.h
new file mode 100644
index 000000000000..87676c8f1412
--- /dev/null
+++ b/arch/arc/include/asm/string.h
@@ -0,0 +1,40 @@
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * vineetg: May 2011
9 * -We had half-optimised memset/memcpy, got better versions of those
10 * -Added memcmp, strchr, strcpy, strcmp, strlen
11 *
12 * Amit Bhor: Codito Technologies 2004
13 */
14
15#ifndef _ASM_ARC_STRING_H
16#define _ASM_ARC_STRING_H
17
18#include <linux/types.h>
19
20#ifdef __KERNEL__
21
22#define __HAVE_ARCH_MEMSET
23#define __HAVE_ARCH_MEMCPY
24#define __HAVE_ARCH_MEMCMP
25#define __HAVE_ARCH_STRCHR
26#define __HAVE_ARCH_STRCPY
27#define __HAVE_ARCH_STRCMP
28#define __HAVE_ARCH_STRLEN
29
30extern void *memset(void *ptr, int, __kernel_size_t);
31extern void *memcpy(void *, const void *, __kernel_size_t);
32extern void memzero(void *ptr, __kernel_size_t n);
33extern int memcmp(const void *, const void *, __kernel_size_t);
34extern char *strchr(const char *s, int c);
35extern char *strcpy(char *dest, const char *src);
36extern int strcmp(const char *cs, const char *ct);
37extern __kernel_size_t strlen(const char *);
38
39#endif /* __KERNEL__ */
40#endif /* _ASM_ARC_STRING_H */
diff --git a/arch/arc/include/asm/switch_to.h b/arch/arc/include/asm/switch_to.h
new file mode 100644
index 000000000000..1b171ab5fec0
--- /dev/null
+++ b/arch/arc/include/asm/switch_to.h
@@ -0,0 +1,41 @@
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#ifndef _ASM_ARC_SWITCH_TO_H
10#define _ASM_ARC_SWITCH_TO_H
11
12#ifndef __ASSEMBLY__
13
14#include <linux/sched.h>
15
16#ifdef CONFIG_ARC_FPU_SAVE_RESTORE
17
18extern void fpu_save_restore(struct task_struct *p, struct task_struct *n);
19#define ARC_FPU_PREV(p, n) fpu_save_restore(p, n)
20#define ARC_FPU_NEXT(t)
21
22#else
23
24#define ARC_FPU_PREV(p, n)
25#define ARC_FPU_NEXT(n)
26
27#endif /* !CONFIG_ARC_FPU_SAVE_RESTORE */
28
29struct task_struct *__switch_to(struct task_struct *p, struct task_struct *n);
30
31#define switch_to(prev, next, last) \
32do { \
33 ARC_FPU_PREV(prev, next); \
34 last = __switch_to(prev, next);\
35 ARC_FPU_NEXT(next); \
36 mb(); \
37} while (0)
38
39#endif
40
41#endif
diff --git a/arch/arc/include/asm/syscall.h b/arch/arc/include/asm/syscall.h
new file mode 100644
index 000000000000..33ab3048e9b2
--- /dev/null
+++ b/arch/arc/include/asm/syscall.h
@@ -0,0 +1,72 @@
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#ifndef _ASM_ARC_SYSCALL_H
10#define _ASM_ARC_SYSCALL_H 1
11
12#include <linux/err.h>
13#include <linux/sched.h>
14#include <asm/unistd.h>
15#include <asm/ptrace.h> /* in_syscall() */
16
17static inline long
18syscall_get_nr(struct task_struct *task, struct pt_regs *regs)
19{
20 if (user_mode(regs) && in_syscall(regs))
21 return regs->orig_r8;
22 else
23 return -1;
24}
25
26static inline void
27syscall_rollback(struct task_struct *task, struct pt_regs *regs)
28{
29 /* XXX: I can't fathom how pt_regs->r8 will be clobbered ? */
30 regs->r8 = regs->orig_r8;
31}
32
33static inline long
34syscall_get_error(struct task_struct *task, struct pt_regs *regs)
35{
36 /* 0 if syscall succeeded, otherwise -Errorcode */
37 return IS_ERR_VALUE(regs->r0) ? regs->r0 : 0;
38}
39
40static inline long
41syscall_get_return_value(struct task_struct *task, struct pt_regs *regs)
42{
43 return regs->r0;
44}
45
46static inline void
47syscall_set_return_value(struct task_struct *task, struct pt_regs *regs,
48 int error, long val)
49{
50 regs->r0 = (long) error ?: val;
51}
52
53/*
54 * @i: argument index [0,5]
55 * @n: number of arguments; n+i must be [1,6].
56 */
57static inline void
58syscall_get_arguments(struct task_struct *task, struct pt_regs *regs,
59 unsigned int i, unsigned int n, unsigned long *args)
60{
61 unsigned long *inside_ptregs = &(regs->r0);
62 inside_ptregs -= i;
63
64 BUG_ON((i + n) > 6);
65
66 while (n--) {
67 args[i++] = (*inside_ptregs);
68 inside_ptregs--;
69 }
70}
71
72#endif
diff --git a/arch/arc/include/asm/syscalls.h b/arch/arc/include/asm/syscalls.h
new file mode 100644
index 000000000000..e53a5340ba4f
--- /dev/null
+++ b/arch/arc/include/asm/syscalls.h
@@ -0,0 +1,29 @@
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#ifndef _ASM_ARC_SYSCALLS_H
10#define _ASM_ARC_SYSCALLS_H 1
11
12#ifdef __KERNEL__
13
14#include <linux/compiler.h>
15#include <linux/linkage.h>
16#include <linux/types.h>
17
18int sys_clone_wrapper(int, int, int, int, int);
19int sys_fork_wrapper(void);
20int sys_vfork_wrapper(void);
21int sys_cacheflush(uint32_t, uint32_t uint32_t);
22int sys_arc_settls(void *);
23int sys_arc_gettls(void);
24
25#include <asm-generic/syscalls.h>
26
27#endif /* __KERNEL__ */
28
29#endif
diff --git a/arch/arc/include/asm/thread_info.h b/arch/arc/include/asm/thread_info.h
new file mode 100644
index 000000000000..2d50a4cdd7f3
--- /dev/null
+++ b/arch/arc/include/asm/thread_info.h
@@ -0,0 +1,121 @@
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * Vineetg: Oct 2009
9 * No need for ARC specific thread_info allocator (kmalloc/free). This is
10 * anyways one page allocation, thus slab alloc can be short-circuited and
11 * the generic version (get_free_page) would be loads better.
12 *
13 * Sameer Dhavale: Codito Technologies 2004
14 */
15
16#ifndef _ASM_THREAD_INFO_H
17#define _ASM_THREAD_INFO_H
18
19#ifdef __KERNEL__
20
21#include <asm/page.h>
22
23#ifdef CONFIG_16KSTACKS
24#define THREAD_SIZE_ORDER 1
25#else
26#define THREAD_SIZE_ORDER 0
27#endif
28
29#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
30
31#ifndef __ASSEMBLY__
32
33#include <linux/thread_info.h>
34#include <asm/segment.h>
35
36/*
37 * low level task data that entry.S needs immediate access to
38 * - this struct should fit entirely inside of one cache line
39 * - this struct shares the supervisor stack pages
40 * - if the contents of this structure are changed, the assembly constants
41 * must also be changed
42 */
43struct thread_info {
44 unsigned long flags; /* low level flags */
45 int preempt_count; /* 0 => preemptable, <0 => BUG */
46 struct task_struct *task; /* main task structure */
47 mm_segment_t addr_limit; /* thread address space */
48 struct exec_domain *exec_domain;/* execution domain */
49 __u32 cpu; /* current CPU */
50 unsigned long thr_ptr; /* TLS ptr */
51 struct restart_block restart_block;
52};
53
54/*
55 * macros/functions for gaining access to the thread information structure
56 *
57 * preempt_count needs to be 1 initially, until the scheduler is functional.
58 */
59#define INIT_THREAD_INFO(tsk) \
60{ \
61 .task = &tsk, \
62 .exec_domain = &default_exec_domain, \
63 .flags = 0, \
64 .cpu = 0, \
65 .preempt_count = INIT_PREEMPT_COUNT, \
66 .addr_limit = KERNEL_DS, \
67 .restart_block = { \
68 .fn = do_no_restart_syscall, \
69 }, \
70}
71
72#define init_thread_info (init_thread_union.thread_info)
73#define init_stack (init_thread_union.stack)
74
75static inline __attribute_const__ struct thread_info *current_thread_info(void)
76{
77 register unsigned long sp asm("sp");
78 return (struct thread_info *)(sp & ~(THREAD_SIZE - 1));
79}
80
81#endif /* !__ASSEMBLY__ */
82
83#define PREEMPT_ACTIVE 0x10000000
84
85/*
86 * thread information flags
87 * - these are process state flags that various assembly files may need to
88 * access
89 * - pending work-to-be-done flags are in LSW
90 * - other flags in MSW
91 */
92#define TIF_RESTORE_SIGMASK 0 /* restore sig mask in do_signal() */
93#define TIF_NOTIFY_RESUME 1 /* resumption notification requested */
94#define TIF_SIGPENDING 2 /* signal pending */
95#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
96#define TIF_SYSCALL_AUDIT 4 /* syscall auditing active */
97#define TIF_SYSCALL_TRACE 15 /* syscall trace active */
98
99/* true if poll_idle() is polling TIF_NEED_RESCHED */
100#define TIF_MEMDIE 16
101
102#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
103#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
104#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
105#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
106#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
107#define _TIF_MEMDIE (1<<TIF_MEMDIE)
108
109/* work to do on interrupt/exception return */
110#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
111 _TIF_NOTIFY_RESUME)
112
113/*
114 * _TIF_ALLWORK_MASK includes SYSCALL_TRACE, but we don't need it.
115 * SYSCALL_TRACE is anways seperately/unconditionally tested right after a
116 * syscall, so all that reamins to be tested is _TIF_WORK_MASK
117 */
118
119#endif /* __KERNEL__ */
120
121#endif /* _ASM_THREAD_INFO_H */
diff --git a/arch/arc/include/asm/timex.h b/arch/arc/include/asm/timex.h
new file mode 100644
index 000000000000..0a82960a75e9
--- /dev/null
+++ b/arch/arc/include/asm/timex.h
@@ -0,0 +1,18 @@
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#ifndef _ASM_ARC_TIMEX_H
10#define _ASM_ARC_TIMEX_H
11
12#define CLOCK_TICK_RATE 80000000 /* slated to be removed */
13
14#include <asm-generic/timex.h>
15
16/* XXX: get_cycles() to be implemented with RTSC insn */
17
18#endif /* _ASM_ARC_TIMEX_H */
diff --git a/arch/arc/include/asm/tlb-mmu1.h b/arch/arc/include/asm/tlb-mmu1.h
new file mode 100644
index 000000000000..a5ff961b1efc
--- /dev/null
+++ b/arch/arc/include/asm/tlb-mmu1.h
@@ -0,0 +1,104 @@
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#ifndef __ASM_TLB_MMU_V1_H__
10#define __ASM_TLB_MMU_V1_H__
11
12#if defined(__ASSEMBLY__) && defined(CONFIG_ARC_MMU_VER == 1)
13
14#include <asm/tlb.h>
15
16.macro TLB_WRITE_HEURISTICS
17
18#define JH_HACK1
19#undef JH_HACK2
20#undef JH_HACK3
21
22#ifdef JH_HACK3
23; Calculate set index for 2-way MMU
24; -avoiding use of GetIndex from MMU
25; and its unpleasant LFSR pseudo-random sequence
26;
27; r1 = TLBPD0 from TLB_RELOAD above
28;
29; -- jh_ex_way_set not cleared on startup
30; didn't want to change setup.c
31; hence extra instruction to clean
32;
33; -- should be in cache since in same line
34; as r0/r1 saves above
35;
36ld r0,[jh_ex_way_sel] ; victim pointer
37and r0,r0,1 ; clean
38xor.f r0,r0,1 ; flip
39st r0,[jh_ex_way_sel] ; store back
40asr r0,r1,12 ; get set # <<1, note bit 12=R=0
41or.nz r0,r0,1 ; set way bit
42and r0,r0,0xff ; clean
43sr r0,[ARC_REG_TLBINDEX]
44#endif
45
46#ifdef JH_HACK2
47; JH hack #2
48; Faster than hack #1 in non-thrash case, but hard-coded for 2-way MMU
49; Slower in thrash case (where it matters) because more code is executed
50; Inefficient due to two-register paradigm of this miss handler
51;
52/* r1 = data TLBPD0 at this point */
53lr r0,[eret] /* instruction address */
54xor r0,r0,r1 /* compare set # */
55and.f r0,r0,0x000fe000 /* 2-way MMU mask */
56bne 88f /* not in same set - no need to probe */
57
58lr r0,[eret] /* instruction address */
59and r0,r0,PAGE_MASK /* VPN of instruction address */
60; lr r1,[ARC_REG_TLBPD0] /* Data VPN+ASID - already in r1 from TLB_RELOAD*/
61and r1,r1,0xff /* Data ASID */
62or r0,r0,r1 /* Instruction address + Data ASID */
63
64lr r1,[ARC_REG_TLBPD0] /* save TLBPD0 containing data TLB*/
65sr r0,[ARC_REG_TLBPD0] /* write instruction address to TLBPD0 */
66sr TLBProbe, [ARC_REG_TLBCOMMAND] /* Look for instruction */
67lr r0,[ARC_REG_TLBINDEX] /* r0 = index where instruction is, if at all */
68sr r1,[ARC_REG_TLBPD0] /* restore TLBPD0 */
69
70xor r0,r0,1 /* flip bottom bit of data index */
71b.d 89f
72sr r0,[ARC_REG_TLBINDEX] /* and put it back */
7388:
74sr TLBGetIndex, [ARC_REG_TLBCOMMAND]
7589:
76#endif
77
78#ifdef JH_HACK1
79;
80; Always checks whether instruction will be kicked out by dtlb miss
81;
82mov_s r3, r1 ; save PD0 prepared by TLB_RELOAD in r3
83lr r0,[eret] /* instruction address */
84and r0,r0,PAGE_MASK /* VPN of instruction address */
85bmsk r1,r3,7 /* Data ASID, bits 7-0 */
86or_s r0,r0,r1 /* Instruction address + Data ASID */
87
88sr r0,[ARC_REG_TLBPD0] /* write instruction address to TLBPD0 */
89sr TLBProbe, [ARC_REG_TLBCOMMAND] /* Look for instruction */
90lr r0,[ARC_REG_TLBINDEX] /* r0 = index where instruction is, if at all */
91sr r3,[ARC_REG_TLBPD0] /* restore TLBPD0 */
92
93sr TLBGetIndex, [ARC_REG_TLBCOMMAND]
94lr r1,[ARC_REG_TLBINDEX] /* r1 = index where MMU wants to put data */
95cmp r0,r1 /* if no match on indices, go around */
96xor.eq r1,r1,1 /* flip bottom bit of data index */
97sr r1,[ARC_REG_TLBINDEX] /* and put it back */
98#endif
99
100.endm
101
102#endif
103
104#endif
diff --git a/arch/arc/include/asm/tlb.h b/arch/arc/include/asm/tlb.h
new file mode 100644
index 000000000000..3eb2ce0bdfa3
--- /dev/null
+++ b/arch/arc/include/asm/tlb.h
@@ -0,0 +1,58 @@
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#ifndef _ASM_ARC_TLB_H
10#define _ASM_ARC_TLB_H
11
12#ifdef __KERNEL__
13
14#include <asm/pgtable.h>
15
16/* Masks for actual TLB "PD"s */
17#define PTE_BITS_IN_PD0 (_PAGE_GLOBAL | _PAGE_PRESENT)
18#define PTE_BITS_IN_PD1 (PAGE_MASK | _PAGE_CACHEABLE | \
19 _PAGE_EXECUTE | _PAGE_WRITE | _PAGE_READ | \
20 _PAGE_K_EXECUTE | _PAGE_K_WRITE | _PAGE_K_READ)
21
22#ifndef __ASSEMBLY__
23
24#define tlb_flush(tlb) local_flush_tlb_mm((tlb)->mm)
25
26/*
27 * This pair is called at time of munmap/exit to flush cache and TLB entries
28 * for mappings being torn down.
29 * 1) cache-flush part -implemented via tlb_start_vma( ) can be NOP (for now)
30 * as we don't support aliasing configs in our VIPT D$.
31 * 2) tlb-flush part - implemted via tlb_end_vma( ) can be NOP as well-
32 * albiet for difft reasons - its better handled by moving to new ASID
33 *
34 * Note, read http://lkml.org/lkml/2004/1/15/6
35 */
36#define tlb_start_vma(tlb, vma)
37#define tlb_end_vma(tlb, vma)
38
39#define __tlb_remove_tlb_entry(tlb, ptep, address)
40
41#include <linux/pagemap.h>
42#include <asm-generic/tlb.h>
43
44#ifdef CONFIG_ARC_DBG_TLB_PARANOIA
45void tlb_paranoid_check(unsigned int pid_sw, unsigned long address);
46#else
47#define tlb_paranoid_check(a, b)
48#endif
49
50void arc_mmu_init(void);
51extern char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len);
52void __init read_decode_mmu_bcr(void);
53
54#endif /* __ASSEMBLY__ */
55
56#endif /* __KERNEL__ */
57
58#endif /* _ASM_ARC_TLB_H */
diff --git a/arch/arc/include/asm/tlbflush.h b/arch/arc/include/asm/tlbflush.h
new file mode 100644
index 000000000000..b2f9bc7f68c8
--- /dev/null
+++ b/arch/arc/include/asm/tlbflush.h
@@ -0,0 +1,28 @@
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#ifndef __ASM_ARC_TLBFLUSH__
10#define __ASM_ARC_TLBFLUSH__
11
12#include <linux/mm.h>
13
14void local_flush_tlb_all(void);
15void local_flush_tlb_mm(struct mm_struct *mm);
16void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
17void local_flush_tlb_kernel_range(unsigned long start, unsigned long end);
18void local_flush_tlb_range(struct vm_area_struct *vma,
19 unsigned long start, unsigned long end);
20
21/* XXX: Revisit for SMP */
22#define flush_tlb_range(vma, s, e) local_flush_tlb_range(vma, s, e)
23#define flush_tlb_page(vma, page) local_flush_tlb_page(vma, page)
24#define flush_tlb_kernel_range(s, e) local_flush_tlb_kernel_range(s, e)
25#define flush_tlb_all() local_flush_tlb_all()
26#define flush_tlb_mm(mm) local_flush_tlb_mm(mm)
27
28#endif
diff --git a/arch/arc/include/asm/uaccess.h b/arch/arc/include/asm/uaccess.h
new file mode 100644
index 000000000000..32420824375b
--- /dev/null
+++ b/arch/arc/include/asm/uaccess.h
@@ -0,0 +1,751 @@
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * vineetg: June 2010
9 * -__clear_user( ) called multiple times during elf load was byte loop
10 * converted to do as much word clear as possible.
11 *
12 * vineetg: Dec 2009
13 * -Hand crafted constant propagation for "constant" copy sizes
14 * -stock kernel shrunk by 33K at -O3
15 *
16 * vineetg: Sept 2009
17 * -Added option to (UN)inline copy_(to|from)_user to reduce code sz
18 * -kernel shrunk by 200K even at -O3 (gcc 4.2.1)
19 * -Enabled when doing -Os
20 *
21 * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
22 */
23
24#ifndef _ASM_ARC_UACCESS_H
25#define _ASM_ARC_UACCESS_H
26
27#include <linux/sched.h>
28#include <asm/errno.h>
29#include <linux/string.h> /* for generic string functions */
30
31
32#define __kernel_ok (segment_eq(get_fs(), KERNEL_DS))
33
34/*
35 * Algorthmically, for __user_ok() we want do:
36 * (start < TASK_SIZE) && (start+len < TASK_SIZE)
37 * where TASK_SIZE could either be retrieved from thread_info->addr_limit or
38 * emitted directly in code.
39 *
40 * This can however be rewritten as follows:
41 * (len <= TASK_SIZE) && (start+len < TASK_SIZE)
42 *
43 * Because it essentially checks if buffer end is within limit and @len is
44 * non-ngeative, which implies that buffer start will be within limit too.
45 *
46 * The reason for rewriting being, for majorit yof cases, @len is generally
47 * compile time constant, causing first sub-expression to be compile time
48 * subsumed.
49 *
50 * The second part would generate weird large LIMMs e.g. (0x6000_0000 - 0x10),
51 * so we check for TASK_SIZE using get_fs() since the addr_limit load from mem
52 * would already have been done at this call site for __kernel_ok()
53 *
54 */
55#define __user_ok(addr, sz) (((sz) <= TASK_SIZE) && \
56 (((addr)+(sz)) <= get_fs()))
57#define __access_ok(addr, sz) (unlikely(__kernel_ok) || \
58 likely(__user_ok((addr), (sz))))
59
60/*********** Single byte/hword/word copies ******************/
61
62#define __get_user_fn(sz, u, k) \
63({ \
64 long __ret = 0; /* success by default */ \
65 switch (sz) { \
66 case 1: __arc_get_user_one(*(k), u, "ldb", __ret); break; \
67 case 2: __arc_get_user_one(*(k), u, "ldw", __ret); break; \
68 case 4: __arc_get_user_one(*(k), u, "ld", __ret); break; \
69 case 8: __arc_get_user_one_64(*(k), u, __ret); break; \
70 } \
71 __ret; \
72})
73
74/*
75 * Returns 0 on success, -EFAULT if not.
76 * @ret already contains 0 - given that errors will be less likely
77 * (hence +r asm constraint below).
78 * In case of error, fixup code will make it -EFAULT
79 */
80#define __arc_get_user_one(dst, src, op, ret) \
81 __asm__ __volatile__( \
82 "1: "op" %1,[%2]\n" \
83 "2: ;nop\n" \
84 " .section .fixup, \"ax\"\n" \
85 " .align 4\n" \
86 "3: mov %0, %3\n" \
87 " j 2b\n" \
88 " .previous\n" \
89 " .section __ex_table, \"a\"\n" \
90 " .align 4\n" \
91 " .word 1b,3b\n" \
92 " .previous\n" \
93 \
94 : "+r" (ret), "=r" (dst) \
95 : "r" (src), "ir" (-EFAULT))
96
97#define __arc_get_user_one_64(dst, src, ret) \
98 __asm__ __volatile__( \
99 "1: ld %1,[%2]\n" \
100 "4: ld %R1,[%2, 4]\n" \
101 "2: ;nop\n" \
102 " .section .fixup, \"ax\"\n" \
103 " .align 4\n" \
104 "3: mov %0, %3\n" \
105 " j 2b\n" \
106 " .previous\n" \
107 " .section __ex_table, \"a\"\n" \
108 " .align 4\n" \
109 " .word 1b,3b\n" \
110 " .word 4b,3b\n" \
111 " .previous\n" \
112 \
113 : "+r" (ret), "=r" (dst) \
114 : "r" (src), "ir" (-EFAULT))
115
116#define __put_user_fn(sz, u, k) \
117({ \
118 long __ret = 0; /* success by default */ \
119 switch (sz) { \
120 case 1: __arc_put_user_one(*(k), u, "stb", __ret); break; \
121 case 2: __arc_put_user_one(*(k), u, "stw", __ret); break; \
122 case 4: __arc_put_user_one(*(k), u, "st", __ret); break; \
123 case 8: __arc_put_user_one_64(*(k), u, __ret); break; \
124 } \
125 __ret; \
126})
127
128#define __arc_put_user_one(src, dst, op, ret) \
129 __asm__ __volatile__( \
130 "1: "op" %1,[%2]\n" \
131 "2: ;nop\n" \
132 " .section .fixup, \"ax\"\n" \
133 " .align 4\n" \
134 "3: mov %0, %3\n" \
135 " j 2b\n" \
136 " .previous\n" \
137 " .section __ex_table, \"a\"\n" \
138 " .align 4\n" \
139 " .word 1b,3b\n" \
140 " .previous\n" \
141 \
142 : "+r" (ret) \
143 : "r" (src), "r" (dst), "ir" (-EFAULT))
144
145#define __arc_put_user_one_64(src, dst, ret) \
146 __asm__ __volatile__( \
147 "1: st %1,[%2]\n" \
148 "4: st %R1,[%2, 4]\n" \
149 "2: ;nop\n" \
150 " .section .fixup, \"ax\"\n" \
151 " .align 4\n" \
152 "3: mov %0, %3\n" \
153 " j 2b\n" \
154 " .previous\n" \
155 " .section __ex_table, \"a\"\n" \
156 " .align 4\n" \
157 " .word 1b,3b\n" \
158 " .word 4b,3b\n" \
159 " .previous\n" \
160 \
161 : "+r" (ret) \
162 : "r" (src), "r" (dst), "ir" (-EFAULT))
163
164
165static inline unsigned long
166__arc_copy_from_user(void *to, const void __user *from, unsigned long n)
167{
168 long res = 0;
169 char val;
170 unsigned long tmp1, tmp2, tmp3, tmp4;
171 unsigned long orig_n = n;
172
173 if (n == 0)
174 return 0;
175
176 /* unaligned */
177 if (((unsigned long)to & 0x3) || ((unsigned long)from & 0x3)) {
178
179 unsigned char tmp;
180
181 __asm__ __volatile__ (
182 " mov.f lp_count, %0 \n"
183 " lpnz 2f \n"
184 "1: ldb.ab %1, [%3, 1] \n"
185 " stb.ab %1, [%2, 1] \n"
186 " sub %0,%0,1 \n"
187 "2: ;nop \n"
188 " .section .fixup, \"ax\" \n"
189 " .align 4 \n"
190 "3: j 2b \n"
191 " .previous \n"
192 " .section __ex_table, \"a\" \n"
193 " .align 4 \n"
194 " .word 1b, 3b \n"
195 " .previous \n"
196
197 : "+r" (n),
198 /*
199 * Note as an '&' earlyclobber operand to make sure the
200 * temporary register inside the loop is not the same as
201 * FROM or TO.
202 */
203 "=&r" (tmp), "+r" (to), "+r" (from)
204 :
205 : "lp_count", "lp_start", "lp_end", "memory");
206
207 return n;
208 }
209
210 /*
211 * Hand-crafted constant propagation to reduce code sz of the
212 * laddered copy 16x,8,4,2,1
213 */
214 if (__builtin_constant_p(orig_n)) {
215 res = orig_n;
216
217 if (orig_n / 16) {
218 orig_n = orig_n % 16;
219
220 __asm__ __volatile__(
221 " lsr lp_count, %7,4 \n"
222 " lp 3f \n"
223 "1: ld.ab %3, [%2, 4] \n"
224 "11: ld.ab %4, [%2, 4] \n"
225 "12: ld.ab %5, [%2, 4] \n"
226 "13: ld.ab %6, [%2, 4] \n"
227 " st.ab %3, [%1, 4] \n"
228 " st.ab %4, [%1, 4] \n"
229 " st.ab %5, [%1, 4] \n"
230 " st.ab %6, [%1, 4] \n"
231 " sub %0,%0,16 \n"
232 "3: ;nop \n"
233 " .section .fixup, \"ax\" \n"
234 " .align 4 \n"
235 "4: j 3b \n"
236 " .previous \n"
237 " .section __ex_table, \"a\" \n"
238 " .align 4 \n"
239 " .word 1b, 4b \n"
240 " .word 11b,4b \n"
241 " .word 12b,4b \n"
242 " .word 13b,4b \n"
243 " .previous \n"
244 : "+r" (res), "+r"(to), "+r"(from),
245 "=r"(tmp1), "=r"(tmp2), "=r"(tmp3), "=r"(tmp4)
246 : "ir"(n)
247 : "lp_count", "memory");
248 }
249 if (orig_n / 8) {
250 orig_n = orig_n % 8;
251
252 __asm__ __volatile__(
253 "14: ld.ab %3, [%2,4] \n"
254 "15: ld.ab %4, [%2,4] \n"
255 " st.ab %3, [%1,4] \n"
256 " st.ab %4, [%1,4] \n"
257 " sub %0,%0,8 \n"
258 "31: ;nop \n"
259 " .section .fixup, \"ax\" \n"
260 " .align 4 \n"
261 "4: j 31b \n"
262 " .previous \n"
263 " .section __ex_table, \"a\" \n"
264 " .align 4 \n"
265 " .word 14b,4b \n"
266 " .word 15b,4b \n"
267 " .previous \n"
268 : "+r" (res), "+r"(to), "+r"(from),
269 "=r"(tmp1), "=r"(tmp2)
270 :
271 : "memory");
272 }
273 if (orig_n / 4) {
274 orig_n = orig_n % 4;
275
276 __asm__ __volatile__(
277 "16: ld.ab %3, [%2,4] \n"
278 " st.ab %3, [%1,4] \n"
279 " sub %0,%0,4 \n"
280 "32: ;nop \n"
281 " .section .fixup, \"ax\" \n"
282 " .align 4 \n"
283 "4: j 32b \n"
284 " .previous \n"
285 " .section __ex_table, \"a\" \n"
286 " .align 4 \n"
287 " .word 16b,4b \n"
288 " .previous \n"
289 : "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
290 :
291 : "memory");
292 }
293 if (orig_n / 2) {
294 orig_n = orig_n % 2;
295
296 __asm__ __volatile__(
297 "17: ldw.ab %3, [%2,2] \n"
298 " stw.ab %3, [%1,2] \n"
299 " sub %0,%0,2 \n"
300 "33: ;nop \n"
301 " .section .fixup, \"ax\" \n"
302 " .align 4 \n"
303 "4: j 33b \n"
304 " .previous \n"
305 " .section __ex_table, \"a\" \n"
306 " .align 4 \n"
307 " .word 17b,4b \n"
308 " .previous \n"
309 : "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
310 :
311 : "memory");
312 }
313 if (orig_n & 1) {
314 __asm__ __volatile__(
315 "18: ldb.ab %3, [%2,2] \n"
316 " stb.ab %3, [%1,2] \n"
317 " sub %0,%0,1 \n"
318 "34: ; nop \n"
319 " .section .fixup, \"ax\" \n"
320 " .align 4 \n"
321 "4: j 34b \n"
322 " .previous \n"
323 " .section __ex_table, \"a\" \n"
324 " .align 4 \n"
325 " .word 18b,4b \n"
326 " .previous \n"
327 : "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
328 :
329 : "memory");
330 }
331 } else { /* n is NOT constant, so laddered copy of 16x,8,4,2,1 */
332
333 __asm__ __volatile__(
334 " mov %0,%3 \n"
335 " lsr.f lp_count, %3,4 \n" /* 16x bytes */
336 " lpnz 3f \n"
337 "1: ld.ab %5, [%2, 4] \n"
338 "11: ld.ab %6, [%2, 4] \n"
339 "12: ld.ab %7, [%2, 4] \n"
340 "13: ld.ab %8, [%2, 4] \n"
341 " st.ab %5, [%1, 4] \n"
342 " st.ab %6, [%1, 4] \n"
343 " st.ab %7, [%1, 4] \n"
344 " st.ab %8, [%1, 4] \n"
345 " sub %0,%0,16 \n"
346 "3: and.f %3,%3,0xf \n" /* stragglers */
347 " bz 34f \n"
348 " bbit0 %3,3,31f \n" /* 8 bytes left */
349 "14: ld.ab %5, [%2,4] \n"
350 "15: ld.ab %6, [%2,4] \n"
351 " st.ab %5, [%1,4] \n"
352 " st.ab %6, [%1,4] \n"
353 " sub.f %0,%0,8 \n"
354 "31: bbit0 %3,2,32f \n" /* 4 bytes left */
355 "16: ld.ab %5, [%2,4] \n"
356 " st.ab %5, [%1,4] \n"
357 " sub.f %0,%0,4 \n"
358 "32: bbit0 %3,1,33f \n" /* 2 bytes left */
359 "17: ldw.ab %5, [%2,2] \n"
360 " stw.ab %5, [%1,2] \n"
361 " sub.f %0,%0,2 \n"
362 "33: bbit0 %3,0,34f \n"
363 "18: ldb.ab %5, [%2,1] \n" /* 1 byte left */
364 " stb.ab %5, [%1,1] \n"
365 " sub.f %0,%0,1 \n"
366 "34: ;nop \n"
367 " .section .fixup, \"ax\" \n"
368 " .align 4 \n"
369 "4: j 34b \n"
370 " .previous \n"
371 " .section __ex_table, \"a\" \n"
372 " .align 4 \n"
373 " .word 1b, 4b \n"
374 " .word 11b,4b \n"
375 " .word 12b,4b \n"
376 " .word 13b,4b \n"
377 " .word 14b,4b \n"
378 " .word 15b,4b \n"
379 " .word 16b,4b \n"
380 " .word 17b,4b \n"
381 " .word 18b,4b \n"
382 " .previous \n"
383 : "=r" (res), "+r"(to), "+r"(from), "+r"(n), "=r"(val),
384 "=r"(tmp1), "=r"(tmp2), "=r"(tmp3), "=r"(tmp4)
385 :
386 : "lp_count", "memory");
387 }
388
389 return res;
390}
391
392extern unsigned long slowpath_copy_to_user(void __user *to, const void *from,
393 unsigned long n);
394
395static inline unsigned long
396__arc_copy_to_user(void __user *to, const void *from, unsigned long n)
397{
398 long res = 0;
399 char val;
400 unsigned long tmp1, tmp2, tmp3, tmp4;
401 unsigned long orig_n = n;
402
403 if (n == 0)
404 return 0;
405
406 /* unaligned */
407 if (((unsigned long)to & 0x3) || ((unsigned long)from & 0x3)) {
408
409 unsigned char tmp;
410
411 __asm__ __volatile__(
412 " mov.f lp_count, %0 \n"
413 " lpnz 3f \n"
414 " ldb.ab %1, [%3, 1] \n"
415 "1: stb.ab %1, [%2, 1] \n"
416 " sub %0, %0, 1 \n"
417 "3: ;nop \n"
418 " .section .fixup, \"ax\" \n"
419 " .align 4 \n"
420 "4: j 3b \n"
421 " .previous \n"
422 " .section __ex_table, \"a\" \n"
423 " .align 4 \n"
424 " .word 1b, 4b \n"
425 " .previous \n"
426
427 : "+r" (n),
428 /* Note as an '&' earlyclobber operand to make sure the
429 * temporary register inside the loop is not the same as
430 * FROM or TO.
431 */
432 "=&r" (tmp), "+r" (to), "+r" (from)
433 :
434 : "lp_count", "lp_start", "lp_end", "memory");
435
436 return n;
437 }
438
439 if (__builtin_constant_p(orig_n)) {
440 res = orig_n;
441
442 if (orig_n / 16) {
443 orig_n = orig_n % 16;
444
445 __asm__ __volatile__(
446 " lsr lp_count, %7,4 \n"
447 " lp 3f \n"
448 " ld.ab %3, [%2, 4] \n"
449 " ld.ab %4, [%2, 4] \n"
450 " ld.ab %5, [%2, 4] \n"
451 " ld.ab %6, [%2, 4] \n"
452 "1: st.ab %3, [%1, 4] \n"
453 "11: st.ab %4, [%1, 4] \n"
454 "12: st.ab %5, [%1, 4] \n"
455 "13: st.ab %6, [%1, 4] \n"
456 " sub %0, %0, 16 \n"
457 "3:;nop \n"
458 " .section .fixup, \"ax\" \n"
459 " .align 4 \n"
460 "4: j 3b \n"
461 " .previous \n"
462 " .section __ex_table, \"a\" \n"
463 " .align 4 \n"
464 " .word 1b, 4b \n"
465 " .word 11b,4b \n"
466 " .word 12b,4b \n"
467 " .word 13b,4b \n"
468 " .previous \n"
469 : "+r" (res), "+r"(to), "+r"(from),
470 "=r"(tmp1), "=r"(tmp2), "=r"(tmp3), "=r"(tmp4)
471 : "ir"(n)
472 : "lp_count", "memory");
473 }
474 if (orig_n / 8) {
475 orig_n = orig_n % 8;
476
477 __asm__ __volatile__(
478 " ld.ab %3, [%2,4] \n"
479 " ld.ab %4, [%2,4] \n"
480 "14: st.ab %3, [%1,4] \n"
481 "15: st.ab %4, [%1,4] \n"
482 " sub %0, %0, 8 \n"
483 "31:;nop \n"
484 " .section .fixup, \"ax\" \n"
485 " .align 4 \n"
486 "4: j 31b \n"
487 " .previous \n"
488 " .section __ex_table, \"a\" \n"
489 " .align 4 \n"
490 " .word 14b,4b \n"
491 " .word 15b,4b \n"
492 " .previous \n"
493 : "+r" (res), "+r"(to), "+r"(from),
494 "=r"(tmp1), "=r"(tmp2)
495 :
496 : "memory");
497 }
498 if (orig_n / 4) {
499 orig_n = orig_n % 4;
500
501 __asm__ __volatile__(
502 " ld.ab %3, [%2,4] \n"
503 "16: st.ab %3, [%1,4] \n"
504 " sub %0, %0, 4 \n"
505 "32:;nop \n"
506 " .section .fixup, \"ax\" \n"
507 " .align 4 \n"
508 "4: j 32b \n"
509 " .previous \n"
510 " .section __ex_table, \"a\" \n"
511 " .align 4 \n"
512 " .word 16b,4b \n"
513 " .previous \n"
514 : "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
515 :
516 : "memory");
517 }
518 if (orig_n / 2) {
519 orig_n = orig_n % 2;
520
521 __asm__ __volatile__(
522 " ldw.ab %3, [%2,2] \n"
523 "17: stw.ab %3, [%1,2] \n"
524 " sub %0, %0, 2 \n"
525 "33:;nop \n"
526 " .section .fixup, \"ax\" \n"
527 " .align 4 \n"
528 "4: j 33b \n"
529 " .previous \n"
530 " .section __ex_table, \"a\" \n"
531 " .align 4 \n"
532 " .word 17b,4b \n"
533 " .previous \n"
534 : "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
535 :
536 : "memory");
537 }
538 if (orig_n & 1) {
539 __asm__ __volatile__(
540 " ldb.ab %3, [%2,1] \n"
541 "18: stb.ab %3, [%1,1] \n"
542 " sub %0, %0, 1 \n"
543 "34: ;nop \n"
544 " .section .fixup, \"ax\" \n"
545 " .align 4 \n"
546 "4: j 34b \n"
547 " .previous \n"
548 " .section __ex_table, \"a\" \n"
549 " .align 4 \n"
550 " .word 18b,4b \n"
551 " .previous \n"
552 : "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
553 :
554 : "memory");
555 }
556 } else { /* n is NOT constant, so laddered copy of 16x,8,4,2,1 */
557
558 __asm__ __volatile__(
559 " mov %0,%3 \n"
560 " lsr.f lp_count, %3,4 \n" /* 16x bytes */
561 " lpnz 3f \n"
562 " ld.ab %5, [%2, 4] \n"
563 " ld.ab %6, [%2, 4] \n"
564 " ld.ab %7, [%2, 4] \n"
565 " ld.ab %8, [%2, 4] \n"
566 "1: st.ab %5, [%1, 4] \n"
567 "11: st.ab %6, [%1, 4] \n"
568 "12: st.ab %7, [%1, 4] \n"
569 "13: st.ab %8, [%1, 4] \n"
570 " sub %0, %0, 16 \n"
571 "3: and.f %3,%3,0xf \n" /* stragglers */
572 " bz 34f \n"
573 " bbit0 %3,3,31f \n" /* 8 bytes left */
574 " ld.ab %5, [%2,4] \n"
575 " ld.ab %6, [%2,4] \n"
576 "14: st.ab %5, [%1,4] \n"
577 "15: st.ab %6, [%1,4] \n"
578 " sub.f %0, %0, 8 \n"
579 "31: bbit0 %3,2,32f \n" /* 4 bytes left */
580 " ld.ab %5, [%2,4] \n"
581 "16: st.ab %5, [%1,4] \n"
582 " sub.f %0, %0, 4 \n"
583 "32: bbit0 %3,1,33f \n" /* 2 bytes left */
584 " ldw.ab %5, [%2,2] \n"
585 "17: stw.ab %5, [%1,2] \n"
586 " sub.f %0, %0, 2 \n"
587 "33: bbit0 %3,0,34f \n"
588 " ldb.ab %5, [%2,1] \n" /* 1 byte left */
589 "18: stb.ab %5, [%1,1] \n"
590 " sub.f %0, %0, 1 \n"
591 "34: ;nop \n"
592 " .section .fixup, \"ax\" \n"
593 " .align 4 \n"
594 "4: j 34b \n"
595 " .previous \n"
596 " .section __ex_table, \"a\" \n"
597 " .align 4 \n"
598 " .word 1b, 4b \n"
599 " .word 11b,4b \n"
600 " .word 12b,4b \n"
601 " .word 13b,4b \n"
602 " .word 14b,4b \n"
603 " .word 15b,4b \n"
604 " .word 16b,4b \n"
605 " .word 17b,4b \n"
606 " .word 18b,4b \n"
607 " .previous \n"
608 : "=r" (res), "+r"(to), "+r"(from), "+r"(n), "=r"(val),
609 "=r"(tmp1), "=r"(tmp2), "=r"(tmp3), "=r"(tmp4)
610 :
611 : "lp_count", "memory");
612 }
613
614 return res;
615}
616
617static inline unsigned long __arc_clear_user(void __user *to, unsigned long n)
618{
619 long res = n;
620 unsigned char *d_char = to;
621
622 __asm__ __volatile__(
623 " bbit0 %0, 0, 1f \n"
624 "75: stb.ab %2, [%0,1] \n"
625 " sub %1, %1, 1 \n"
626 "1: bbit0 %0, 1, 2f \n"
627 "76: stw.ab %2, [%0,2] \n"
628 " sub %1, %1, 2 \n"
629 "2: asr.f lp_count, %1, 2 \n"
630 " lpnz 3f \n"
631 "77: st.ab %2, [%0,4] \n"
632 " sub %1, %1, 4 \n"
633 "3: bbit0 %1, 1, 4f \n"
634 "78: stw.ab %2, [%0,2] \n"
635 " sub %1, %1, 2 \n"
636 "4: bbit0 %1, 0, 5f \n"
637 "79: stb.ab %2, [%0,1] \n"
638 " sub %1, %1, 1 \n"
639 "5: \n"
640 " .section .fixup, \"ax\" \n"
641 " .align 4 \n"
642 "3: j 5b \n"
643 " .previous \n"
644 " .section __ex_table, \"a\" \n"
645 " .align 4 \n"
646 " .word 75b, 3b \n"
647 " .word 76b, 3b \n"
648 " .word 77b, 3b \n"
649 " .word 78b, 3b \n"
650 " .word 79b, 3b \n"
651 " .previous \n"
652 : "+r"(d_char), "+r"(res)
653 : "i"(0)
654 : "lp_count", "lp_start", "lp_end", "memory");
655
656 return res;
657}
658
659static inline long
660__arc_strncpy_from_user(char *dst, const char __user *src, long count)
661{
662 long res = count;
663 char val;
664 unsigned int hw_count;
665
666 if (count == 0)
667 return 0;
668
669 __asm__ __volatile__(
670 " lp 2f \n"
671 "1: ldb.ab %3, [%2, 1] \n"
672 " breq.d %3, 0, 2f \n"
673 " stb.ab %3, [%1, 1] \n"
674 "2: sub %0, %6, %4 \n"
675 "3: ;nop \n"
676 " .section .fixup, \"ax\" \n"
677 " .align 4 \n"
678 "4: mov %0, %5 \n"
679 " j 3b \n"
680 " .previous \n"
681 " .section __ex_table, \"a\" \n"
682 " .align 4 \n"
683 " .word 1b, 4b \n"
684 " .previous \n"
685 : "=r"(res), "+r"(dst), "+r"(src), "=&r"(val), "=l"(hw_count)
686 : "g"(-EFAULT), "ir"(count), "4"(count) /* this "4" seeds lp_count */
687 : "memory");
688
689 return res;
690}
691
692static inline long __arc_strnlen_user(const char __user *s, long n)
693{
694 long res, tmp1, cnt;
695 char val;
696
697 __asm__ __volatile__(
698 " mov %2, %1 \n"
699 "1: ldb.ab %3, [%0, 1] \n"
700 " breq.d %3, 0, 2f \n"
701 " sub.f %2, %2, 1 \n"
702 " bnz 1b \n"
703 " sub %2, %2, 1 \n"
704 "2: sub %0, %1, %2 \n"
705 "3: ;nop \n"
706 " .section .fixup, \"ax\" \n"
707 " .align 4 \n"
708 "4: mov %0, 0 \n"
709 " j 3b \n"
710 " .previous \n"
711 " .section __ex_table, \"a\" \n"
712 " .align 4 \n"
713 " .word 1b, 4b \n"
714 " .previous \n"
715 : "=r"(res), "=r"(tmp1), "=r"(cnt), "=r"(val)
716 : "0"(s), "1"(n)
717 : "memory");
718
719 return res;
720}
721
722#ifndef CONFIG_CC_OPTIMIZE_FOR_SIZE
723#define __copy_from_user(t, f, n) __arc_copy_from_user(t, f, n)
724#define __copy_to_user(t, f, n) __arc_copy_to_user(t, f, n)
725#define __clear_user(d, n) __arc_clear_user(d, n)
726#define __strncpy_from_user(d, s, n) __arc_strncpy_from_user(d, s, n)
727#define __strnlen_user(s, n) __arc_strnlen_user(s, n)
728#else
729extern long arc_copy_from_user_noinline(void *to, const void __user * from,
730 unsigned long n);
731extern long arc_copy_to_user_noinline(void __user *to, const void *from,
732 unsigned long n);
733extern unsigned long arc_clear_user_noinline(void __user *to,
734 unsigned long n);
735extern long arc_strncpy_from_user_noinline (char *dst, const char __user *src,
736 long count);
737extern long arc_strnlen_user_noinline(const char __user *src, long n);
738
739#define __copy_from_user(t, f, n) arc_copy_from_user_noinline(t, f, n)
740#define __copy_to_user(t, f, n) arc_copy_to_user_noinline(t, f, n)
741#define __clear_user(d, n) arc_clear_user_noinline(d, n)
742#define __strncpy_from_user(d, s, n) arc_strncpy_from_user_noinline(d, s, n)
743#define __strnlen_user(s, n) arc_strnlen_user_noinline(s, n)
744
745#endif
746
747#include <asm-generic/uaccess.h>
748
749extern int fixup_exception(struct pt_regs *regs);
750
751#endif
diff --git a/arch/arc/include/asm/unaligned.h b/arch/arc/include/asm/unaligned.h
new file mode 100644
index 000000000000..5dbe63f17b66
--- /dev/null
+++ b/arch/arc/include/asm/unaligned.h
@@ -0,0 +1,29 @@
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#ifndef _ASM_ARC_UNALIGNED_H
10#define _ASM_ARC_UNALIGNED_H
11
12/* ARC700 can't handle unaligned Data accesses. */
13
14#include <asm-generic/unaligned.h>
15#include <asm/ptrace.h>
16
17#ifdef CONFIG_ARC_MISALIGN_ACCESS
18int misaligned_fixup(unsigned long address, struct pt_regs *regs,
19 unsigned long cause, struct callee_regs *cregs);
20#else
21static inline int
22misaligned_fixup(unsigned long address, struct pt_regs *regs,
23 unsigned long cause, struct callee_regs *cregs)
24{
25 return 0;
26}
27#endif
28
29#endif /* _ASM_ARC_UNALIGNED_H */
diff --git a/arch/arc/include/asm/unwind.h b/arch/arc/include/asm/unwind.h
new file mode 100644
index 000000000000..7ca628b6ee2a
--- /dev/null
+++ b/arch/arc/include/asm/unwind.h
@@ -0,0 +1,163 @@
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#ifndef _ASM_ARC_UNWIND_H
10#define _ASM_ARC_UNWIND_H
11
12#ifdef CONFIG_ARC_DW2_UNWIND
13
14#include <linux/sched.h>
15
16struct arc700_regs {
17 unsigned long r0;
18 unsigned long r1;
19 unsigned long r2;
20 unsigned long r3;
21 unsigned long r4;
22 unsigned long r5;
23 unsigned long r6;
24 unsigned long r7;
25 unsigned long r8;
26 unsigned long r9;
27 unsigned long r10;
28 unsigned long r11;
29 unsigned long r12;
30 unsigned long r13;
31 unsigned long r14;
32 unsigned long r15;
33 unsigned long r16;
34 unsigned long r17;
35 unsigned long r18;
36 unsigned long r19;
37 unsigned long r20;
38 unsigned long r21;
39 unsigned long r22;
40 unsigned long r23;
41 unsigned long r24;
42 unsigned long r25;
43 unsigned long r26;
44 unsigned long r27; /* fp */
45 unsigned long r28; /* sp */
46 unsigned long r29;
47 unsigned long r30;
48 unsigned long r31; /* blink */
49 unsigned long r63; /* pc */
50};
51
52struct unwind_frame_info {
53 struct arc700_regs regs;
54 struct task_struct *task;
55 unsigned call_frame:1;
56};
57
58#define UNW_PC(frame) ((frame)->regs.r63)
59#define UNW_SP(frame) ((frame)->regs.r28)
60#define UNW_BLINK(frame) ((frame)->regs.r31)
61
62/* Rajesh FIXME */
63#ifdef CONFIG_FRAME_POINTER
64#define UNW_FP(frame) ((frame)->regs.r27)
65#define FRAME_RETADDR_OFFSET 4
66#define FRAME_LINK_OFFSET 0
67#define STACK_BOTTOM_UNW(tsk) STACK_LIMIT((tsk)->thread.ksp)
68#define STACK_TOP_UNW(tsk) ((tsk)->thread.ksp)
69#else
70#define UNW_FP(frame) ((void)(frame), 0)
71#endif
72
73#define STACK_LIMIT(ptr) (((ptr) - 1) & ~(THREAD_SIZE - 1))
74
75#define UNW_REGISTER_INFO \
76 PTREGS_INFO(r0), \
77 PTREGS_INFO(r1), \
78 PTREGS_INFO(r2), \
79 PTREGS_INFO(r3), \
80 PTREGS_INFO(r4), \
81 PTREGS_INFO(r5), \
82 PTREGS_INFO(r6), \
83 PTREGS_INFO(r7), \
84 PTREGS_INFO(r8), \
85 PTREGS_INFO(r9), \
86 PTREGS_INFO(r10), \
87 PTREGS_INFO(r11), \
88 PTREGS_INFO(r12), \
89 PTREGS_INFO(r13), \
90 PTREGS_INFO(r14), \
91 PTREGS_INFO(r15), \
92 PTREGS_INFO(r16), \
93 PTREGS_INFO(r17), \
94 PTREGS_INFO(r18), \
95 PTREGS_INFO(r19), \
96 PTREGS_INFO(r20), \
97 PTREGS_INFO(r21), \
98 PTREGS_INFO(r22), \
99 PTREGS_INFO(r23), \
100 PTREGS_INFO(r24), \
101 PTREGS_INFO(r25), \
102 PTREGS_INFO(r26), \
103 PTREGS_INFO(r27), \
104 PTREGS_INFO(r28), \
105 PTREGS_INFO(r29), \
106 PTREGS_INFO(r30), \
107 PTREGS_INFO(r31), \
108 PTREGS_INFO(r63)
109
110#define UNW_DEFAULT_RA(raItem, dataAlign) \
111 ((raItem).where == Memory && !((raItem).value * (dataAlign) + 4))
112
113extern int arc_unwind(struct unwind_frame_info *frame);
114extern void arc_unwind_init(void);
115extern void arc_unwind_setup(void);
116extern void *unwind_add_table(struct module *module, const void *table_start,
117 unsigned long table_size);
118extern void unwind_remove_table(void *handle, int init_only);
119
120static inline int
121arch_unwind_init_running(struct unwind_frame_info *info,
122 int (*callback) (struct unwind_frame_info *info,
123 void *arg),
124 void *arg)
125{
126 return 0;
127}
128
129static inline int arch_unw_user_mode(const struct unwind_frame_info *info)
130{
131 return 0;
132}
133
134static inline void arch_unw_init_blocked(struct unwind_frame_info *info)
135{
136 return;
137}
138
139static inline void arch_unw_init_frame_info(struct unwind_frame_info *info,
140 struct pt_regs *regs)
141{
142 return;
143}
144
145#else
146
147#define UNW_PC(frame) ((void)(frame), 0)
148#define UNW_SP(frame) ((void)(frame), 0)
149#define UNW_FP(frame) ((void)(frame), 0)
150
151static inline void arc_unwind_init(void)
152{
153}
154
155static inline void arc_unwind_setup(void)
156{
157}
158#define unwind_add_table(a, b, c)
159#define unwind_remove_table(a, b)
160
161#endif /* CONFIG_ARC_DW2_UNWIND */
162
163#endif /* _ASM_ARC_UNWIND_H */
diff --git a/arch/arc/include/uapi/asm/Kbuild b/arch/arc/include/uapi/asm/Kbuild
new file mode 100644
index 000000000000..18fefaea73fd
--- /dev/null
+++ b/arch/arc/include/uapi/asm/Kbuild
@@ -0,0 +1,12 @@
1# UAPI Header export list
2include include/uapi/asm-generic/Kbuild.asm
3header-y += elf.h
4header-y += page.h
5header-y += setup.h
6header-y += byteorder.h
7header-y += cachectl.h
8header-y += ptrace.h
9header-y += sigcontext.h
10header-y += signal.h
11header-y += swab.h
12header-y += unistd.h
diff --git a/arch/arc/include/uapi/asm/byteorder.h b/arch/arc/include/uapi/asm/byteorder.h
new file mode 100644
index 000000000000..9da71d415c38
--- /dev/null
+++ b/arch/arc/include/uapi/asm/byteorder.h
@@ -0,0 +1,18 @@
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#ifndef __ASM_ARC_BYTEORDER_H
10#define __ASM_ARC_BYTEORDER_H
11
12#ifdef CONFIG_CPU_BIG_ENDIAN
13#include <linux/byteorder/big_endian.h>
14#else
15#include <linux/byteorder/little_endian.h>
16#endif
17
18#endif /* ASM_ARC_BYTEORDER_H */
diff --git a/arch/arc/include/uapi/asm/cachectl.h b/arch/arc/include/uapi/asm/cachectl.h
new file mode 100644
index 000000000000..51c73f0255b3
--- /dev/null
+++ b/arch/arc/include/uapi/asm/cachectl.h
@@ -0,0 +1,28 @@
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#ifndef __ARC_ASM_CACHECTL_H
10#define __ARC_ASM_CACHECTL_H
11
12/*
13 * ARC ABI flags defined for Android's finegrained cacheflush requirements
14 */
15#define CF_I_INV 0x0002
16#define CF_D_FLUSH 0x0010
17#define CF_D_FLUSH_INV 0x0020
18
19#define CF_DEFAULT (CF_I_INV | CF_D_FLUSH)
20
21/*
22 * Standard flags expected by cacheflush system call users
23 */
24#define ICACHE CF_I_INV
25#define DCACHE CF_D_FLUSH
26#define BCACHE (CF_I_INV | CF_D_FLUSH)
27
28#endif
diff --git a/arch/arc/include/uapi/asm/elf.h b/arch/arc/include/uapi/asm/elf.h
new file mode 100644
index 000000000000..0f99ac8fcbb2
--- /dev/null
+++ b/arch/arc/include/uapi/asm/elf.h
@@ -0,0 +1,26 @@
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#ifndef _UAPI__ASM_ARC_ELF_H
10#define _UAPI__ASM_ARC_ELF_H
11
12#include <asm/ptrace.h> /* for user_regs_struct */
13
14/* Machine specific ELF Hdr flags */
15#define EF_ARC_OSABI_MSK 0x00000f00
16#define EF_ARC_OSABI_ORIG 0x00000000 /* MUST be zero for back-compat */
17#define EF_ARC_OSABI_CURRENT 0x00000300 /* v3 (no legacy syscalls) */
18
19typedef unsigned long elf_greg_t;
20typedef unsigned long elf_fpregset_t;
21
22#define ELF_NGREG (sizeof(struct user_regs_struct) / sizeof(elf_greg_t))
23
24typedef elf_greg_t elf_gregset_t[ELF_NGREG];
25
26#endif
diff --git a/arch/arc/include/uapi/asm/page.h b/arch/arc/include/uapi/asm/page.h
new file mode 100644
index 000000000000..e5d41e08240c
--- /dev/null
+++ b/arch/arc/include/uapi/asm/page.h
@@ -0,0 +1,39 @@
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#ifndef _UAPI__ASM_ARC_PAGE_H
10#define _UAPI__ASM_ARC_PAGE_H
11
12/* PAGE_SHIFT determines the page size */
13#if defined(CONFIG_ARC_PAGE_SIZE_16K)
14#define PAGE_SHIFT 14
15#elif defined(CONFIG_ARC_PAGE_SIZE_4K)
16#define PAGE_SHIFT 12
17#else
18/*
19 * Default 8k
20 * done this way (instead of under CONFIG_ARC_PAGE_SIZE_8K) because adhoc
21 * user code (busybox appletlib.h) expects PAGE_SHIFT to be defined w/o
22 * using the correct uClibc header and in their build our autoconf.h is
23 * not available
24 */
25#define PAGE_SHIFT 13
26#endif
27
28#ifdef __ASSEMBLY__
29#define PAGE_SIZE (1 << PAGE_SHIFT)
30#define PAGE_OFFSET (0x80000000)
31#else
32#define PAGE_SIZE (1UL << PAGE_SHIFT) /* Default 8K */
33#define PAGE_OFFSET (0x80000000UL) /* Kernel starts at 2G onwards */
34#endif
35
36#define PAGE_MASK (~(PAGE_SIZE-1))
37
38
39#endif /* _UAPI__ASM_ARC_PAGE_H */
diff --git a/arch/arc/include/uapi/asm/ptrace.h b/arch/arc/include/uapi/asm/ptrace.h
new file mode 100644
index 000000000000..6afa4f702075
--- /dev/null
+++ b/arch/arc/include/uapi/asm/ptrace.h
@@ -0,0 +1,48 @@
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
9 */
10
11#ifndef _UAPI__ASM_ARC_PTRACE_H
12#define _UAPI__ASM_ARC_PTRACE_H
13
14
15#ifndef __ASSEMBLY__
16/*
17 * Userspace ABI: Register state needed by
18 * -ptrace (gdbserver)
19 * -sigcontext (SA_SIGNINFO signal frame)
20 *
21 * This is to decouple pt_regs from user-space ABI, to be able to change it
22 * w/o affecting the ABI.
23 * Although the layout (initial padding) is similar to pt_regs to have some
24 * optimizations when copying pt_regs to/from user_regs_struct.
25 *
26 * Also, sigcontext only care about the scratch regs as that is what we really
27 * save/restore for signal handling.
28*/
29struct user_regs_struct {
30
31 struct scratch {
32 long pad;
33 long bta, lp_start, lp_end, lp_count;
34 long status32, ret, blink, fp, gp;
35 long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0;
36 long sp;
37 } scratch;
38 struct callee {
39 long pad;
40 long r25, r24, r23, r22, r21, r20;
41 long r19, r18, r17, r16, r15, r14, r13;
42 } callee;
43 long efa; /* break pt addr, for break points in delay slots */
44 long stop_pc; /* give dbg stop_pc directly after checking orig_r8 */
45};
46#endif /* !__ASSEMBLY__ */
47
48#endif /* _UAPI__ASM_ARC_PTRACE_H */
diff --git a/arch/arc/include/uapi/asm/setup.h b/arch/arc/include/uapi/asm/setup.h
new file mode 100644
index 000000000000..a6d4e44938be
--- /dev/null
+++ b/arch/arc/include/uapi/asm/setup.h
@@ -0,0 +1,6 @@
1/*
2 * setup.h is part of userspace header ABI so UAPI scripts have to generate it
3 * even if there's nothing to export - causing empty <uapi/asm/setup.h>
4 * However to prevent "patch" from discarding it we add this placeholder
5 * comment
6 */
diff --git a/arch/arc/include/uapi/asm/sigcontext.h b/arch/arc/include/uapi/asm/sigcontext.h
new file mode 100644
index 000000000000..9678a11fc158
--- /dev/null
+++ b/arch/arc/include/uapi/asm/sigcontext.h
@@ -0,0 +1,22 @@
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#ifndef _ASM_ARC_SIGCONTEXT_H
10#define _ASM_ARC_SIGCONTEXT_H
11
12#include <asm/ptrace.h>
13
14/*
15 * Signal context structure - contains all info to do with the state
16 * before the signal handler was invoked.
17 */
18struct sigcontext {
19 struct user_regs_struct regs;
20};
21
22#endif /* _ASM_ARC_SIGCONTEXT_H */
diff --git a/arch/arc/include/uapi/asm/signal.h b/arch/arc/include/uapi/asm/signal.h
new file mode 100644
index 000000000000..fad62f7f42d6
--- /dev/null
+++ b/arch/arc/include/uapi/asm/signal.h
@@ -0,0 +1,27 @@
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
9 */
10
11#ifndef _ASM_ARC_SIGNAL_H
12#define _ASM_ARC_SIGNAL_H
13
14/*
15 * This is much needed for ARC sigreturn optimization.
16 * This allows uClibc to piggback the addr of a sigreturn stub in sigaction,
17 * which allows sigreturn based re-entry into kernel after handling signal.
18 * W/o this kernel needs to "synthesize" the sigreturn trampoline on user
19 * mode stack which in turn forces the following:
20 * -TLB Flush (after making the stack page executable)
21 * -Cache line Flush (to make I/D Cache lines coherent)
22 */
23#define SA_RESTORER 0x04000000
24
25#include <asm-generic/signal.h>
26
27#endif /* _ASM_ARC_SIGNAL_H */
diff --git a/arch/arc/include/uapi/asm/swab.h b/arch/arc/include/uapi/asm/swab.h
new file mode 100644
index 000000000000..095599a73195
--- /dev/null
+++ b/arch/arc/include/uapi/asm/swab.h
@@ -0,0 +1,98 @@
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * vineetg: May 2011
9 * -Support single cycle endian-swap insn in ARC700 4.10
10 *
11 * vineetg: June 2009
12 * -Better htonl implementation (5 instead of 9 ALU instructions)
13 * -Hardware assisted single cycle bswap (Use Case of ARC custom instrn)
14 */
15
16#ifndef __ASM_ARC_SWAB_H
17#define __ASM_ARC_SWAB_H
18
19#include <linux/types.h>
20
21/* Native single cycle endian swap insn */
22#ifdef CONFIG_ARC_HAS_SWAPE
23
24#define __arch_swab32(x) \
25({ \
26 unsigned int tmp = x; \
27 __asm__( \
28 " swape %0, %1 \n" \
29 : "=r" (tmp) \
30 : "r" (tmp)); \
31 tmp; \
32})
33
34#else
35
36/* Several ways of Endian-Swap Emulation for ARC
37 * 0: kernel generic
38 * 1: ARC optimised "C"
39 * 2: ARC Custom instruction
40 */
41#define ARC_BSWAP_TYPE 1
42
43#if (ARC_BSWAP_TYPE == 1) /******* Software only ********/
44
45/* The kernel default implementation of htonl is
46 * return x<<24 | x>>24 |
47 * (x & (__u32)0x0000ff00UL)<<8 | (x & (__u32)0x00ff0000UL)>>8;
48 *
49 * This generates 9 instructions on ARC (excluding the ld/st)
50 *
51 * 8051fd8c: ld r3,[r7,20] ; Mem op : Get the value to be swapped
52 * 8051fd98: asl r5,r3,24 ; get 3rd Byte
53 * 8051fd9c: lsr r2,r3,24 ; get 0th Byte
54 * 8051fda0: and r4,r3,0xff00
55 * 8051fda8: asl r4,r4,8 ; get 1st Byte
56 * 8051fdac: and r3,r3,0x00ff0000
57 * 8051fdb4: or r2,r2,r5 ; combine 0th and 3rd Bytes
58 * 8051fdb8: lsr r3,r3,8 ; 2nd Byte at correct place in Dst Reg
59 * 8051fdbc: or r2,r2,r4 ; combine 0,3 Bytes with 1st Byte
60 * 8051fdc0: or r2,r2,r3 ; combine 0,3,1 Bytes with 2nd Byte
61 * 8051fdc4: st r2,[r1,20] ; Mem op : save result back to mem
62 *
63 * Joern suggested a better "C" algorithm which is great since
64 * (1) It is portable to any architecure
65 * (2) At the same time it takes advantage of ARC ISA (rotate intrns)
66 */
67
68#define __arch_swab32(x) \
69({ unsigned long __in = (x), __tmp; \
70 __tmp = __in << 8 | __in >> 24; /* ror tmp,in,24 */ \
71 __in = __in << 24 | __in >> 8; /* ror in,in,8 */ \
72 __tmp ^= __in; \
73 __tmp &= 0xff00ff; \
74 __tmp ^ __in; \
75})
76
77#elif (ARC_BSWAP_TYPE == 2) /* Custom single cycle bwap instruction */
78
79#define __arch_swab32(x) \
80({ \
81 unsigned int tmp = x; \
82 __asm__( \
83 " .extInstruction bswap, 7, 0x00, SUFFIX_NONE, SYNTAX_2OP \n"\
84 " bswap %0, %1 \n"\
85 : "=r" (tmp) \
86 : "r" (tmp)); \
87 tmp; \
88})
89
90#endif /* ARC_BSWAP_TYPE=zzz */
91
92#endif /* CONFIG_ARC_HAS_SWAPE */
93
94#if !defined(__STRICT_ANSI__) || defined(__KERNEL__)
95#define __SWAB_64_THRU_32__
96#endif
97
98#endif
diff --git a/arch/arc/include/uapi/asm/unistd.h b/arch/arc/include/uapi/asm/unistd.h
new file mode 100644
index 000000000000..6f30484f34b7
--- /dev/null
+++ b/arch/arc/include/uapi/asm/unistd.h
@@ -0,0 +1,34 @@
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9/******** no-legacy-syscalls-ABI *******/
10
11#define __ARCH_WANT_SYS_EXECVE
12#define __ARCH_WANT_SYS_CLONE
13#define __ARCH_WANT_SYS_VFORK
14#define __ARCH_WANT_SYS_FORK
15
16#define sys_mmap2 sys_mmap_pgoff
17
18#include <asm-generic/unistd.h>
19
20#define NR_syscalls __NR_syscalls
21
22/* ARC specific syscall */
23#define __NR_cacheflush (__NR_arch_specific_syscall + 0)
24#define __NR_arc_settls (__NR_arch_specific_syscall + 1)
25#define __NR_arc_gettls (__NR_arch_specific_syscall + 2)
26
27__SYSCALL(__NR_cacheflush, sys_cacheflush)
28__SYSCALL(__NR_arc_settls, sys_arc_settls)
29__SYSCALL(__NR_arc_gettls, sys_arc_gettls)
30
31
32/* Generic syscall (fs/filesystems.c - lost in asm-generic/unistd.h */
33#define __NR_sysfs (__NR_arch_specific_syscall + 3)
34__SYSCALL(__NR_sysfs, sys_sysfs)
diff --git a/arch/arc/kernel/Makefile b/arch/arc/kernel/Makefile
new file mode 100644
index 000000000000..c242ef07ba70
--- /dev/null
+++ b/arch/arc/kernel/Makefile
@@ -0,0 +1,33 @@
1#
2# Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3#
4# This program is free software; you can redistribute it and/or modify
5# it under the terms of the GNU General Public License version 2 as
6# published by the Free Software Foundation.
7
8# Pass UTS_MACHINE for user_regset definition
9CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"'
10
11obj-y := arcksyms.o setup.o irq.o time.o reset.o ptrace.o entry.o process.o
12obj-y += signal.o traps.o sys.o troubleshoot.o stacktrace.o disasm.o clk.o
13obj-y += devtree.o
14
15obj-$(CONFIG_MODULES) += arcksyms.o module.o
16obj-$(CONFIG_SMP) += smp.o
17obj-$(CONFIG_ARC_DW2_UNWIND) += unwind.o
18obj-$(CONFIG_KPROBES) += kprobes.o
19obj-$(CONFIG_ARC_MISALIGN_ACCESS) += unaligned.o
20obj-$(CONFIG_KGDB) += kgdb.o
21obj-$(CONFIG_ARC_METAWARE_HLINK) += arc_hostlink.o
22
23obj-$(CONFIG_ARC_FPU_SAVE_RESTORE) += fpu.o
24CFLAGS_fpu.o += -mdpfp
25
26ifdef CONFIG_ARC_DW2_UNWIND
27CFLAGS_ctx_sw.o += -fno-omit-frame-pointer
28obj-y += ctx_sw.o
29else
30obj-y += ctx_sw_asm.o
31endif
32
33extra-y := vmlinux.lds head.o
diff --git a/arch/arc/kernel/arc_hostlink.c b/arch/arc/kernel/arc_hostlink.c
new file mode 100644
index 000000000000..47b2a17cc52a
--- /dev/null
+++ b/arch/arc/kernel/arc_hostlink.c
@@ -0,0 +1,58 @@
1/*
2 * arc_hostlink.c: Pseudo-driver for Metaware provided "hostlink" facility
3 *
4 * Allows Linux userland access to host in absence of any peripherals.
5 *
6 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/fs.h> /* file_operations */
14#include <linux/miscdevice.h>
15#include <linux/mm.h> /* VM_IO */
16#include <linux/module.h>
17#include <linux/uaccess.h>
18
19static unsigned char __HOSTLINK__[4 * PAGE_SIZE] __aligned(PAGE_SIZE);
20
21static int arc_hl_mmap(struct file *fp, struct vm_area_struct *vma)
22{
23 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
24
25 if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
26 vma->vm_end - vma->vm_start,
27 vma->vm_page_prot)) {
28 pr_warn("Hostlink buffer mmap ERROR\n");
29 return -EAGAIN;
30 }
31 return 0;
32}
33
34static long arc_hl_ioctl(struct file *file, unsigned int cmd,
35 unsigned long arg)
36{
37 /* we only support, returning the physical addr to mmap in user space */
38 put_user((unsigned int)__HOSTLINK__, (int __user *)arg);
39 return 0;
40}
41
42static const struct file_operations arc_hl_fops = {
43 .unlocked_ioctl = arc_hl_ioctl,
44 .mmap = arc_hl_mmap,
45};
46
47static struct miscdevice arc_hl_dev = {
48 .minor = MISC_DYNAMIC_MINOR,
49 .name = "hostlink",
50 .fops = &arc_hl_fops
51};
52
53static int __init arc_hl_init(void)
54{
55 pr_info("ARC Hostlink driver mmap at 0x%p\n", __HOSTLINK__);
56 return misc_register(&arc_hl_dev);
57}
58module_init(arc_hl_init);
diff --git a/arch/arc/kernel/arcksyms.c b/arch/arc/kernel/arcksyms.c
new file mode 100644
index 000000000000..4d9e77724bed
--- /dev/null
+++ b/arch/arc/kernel/arcksyms.c
@@ -0,0 +1,56 @@
1/*
2 * arcksyms.c - Exporting symbols not exportable from their own sources
3 *
4 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 */
11
12#include <linux/module.h>
13
14/* libgcc functions, not part of kernel sources */
15extern void __ashldi3(void);
16extern void __ashrdi3(void);
17extern void __divsi3(void);
18extern void __divsf3(void);
19extern void __lshrdi3(void);
20extern void __modsi3(void);
21extern void __muldi3(void);
22extern void __ucmpdi2(void);
23extern void __udivsi3(void);
24extern void __umodsi3(void);
25extern void __cmpdi2(void);
26extern void __fixunsdfsi(void);
27extern void __muldf3(void);
28extern void __divdf3(void);
29extern void __floatunsidf(void);
30extern void __floatunsisf(void);
31
32EXPORT_SYMBOL(__ashldi3);
33EXPORT_SYMBOL(__ashrdi3);
34EXPORT_SYMBOL(__divsi3);
35EXPORT_SYMBOL(__divsf3);
36EXPORT_SYMBOL(__lshrdi3);
37EXPORT_SYMBOL(__modsi3);
38EXPORT_SYMBOL(__muldi3);
39EXPORT_SYMBOL(__ucmpdi2);
40EXPORT_SYMBOL(__udivsi3);
41EXPORT_SYMBOL(__umodsi3);
42EXPORT_SYMBOL(__cmpdi2);
43EXPORT_SYMBOL(__fixunsdfsi);
44EXPORT_SYMBOL(__muldf3);
45EXPORT_SYMBOL(__divdf3);
46EXPORT_SYMBOL(__floatunsidf);
47EXPORT_SYMBOL(__floatunsisf);
48
49/* ARC optimised assembler routines */
50EXPORT_SYMBOL(memset);
51EXPORT_SYMBOL(memcpy);
52EXPORT_SYMBOL(memcmp);
53EXPORT_SYMBOL(strchr);
54EXPORT_SYMBOL(strcpy);
55EXPORT_SYMBOL(strcmp);
56EXPORT_SYMBOL(strlen);
diff --git a/arch/arc/kernel/asm-offsets.c b/arch/arc/kernel/asm-offsets.c
new file mode 100644
index 000000000000..0dc148ebce74
--- /dev/null
+++ b/arch/arc/kernel/asm-offsets.c
@@ -0,0 +1,64 @@
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#include <linux/sched.h>
10#include <linux/mm.h>
11#include <linux/interrupt.h>
12#include <linux/thread_info.h>
13#include <linux/kbuild.h>
14#include <asm/hardirq.h>
15#include <asm/page.h>
16#include <asm/ptrace.h>
17
18int main(void)
19{
20 DEFINE(TASK_THREAD, offsetof(struct task_struct, thread));
21 DEFINE(TASK_THREAD_INFO, offsetof(struct task_struct, stack));
22
23 BLANK();
24
25 DEFINE(THREAD_KSP, offsetof(struct thread_struct, ksp));
26 DEFINE(THREAD_CALLEE_REG, offsetof(struct thread_struct, callee_reg));
27#ifdef CONFIG_ARC_CURR_IN_REG
28 DEFINE(THREAD_USER_R25, offsetof(struct thread_struct, user_r25));
29#endif
30 DEFINE(THREAD_FAULT_ADDR,
31 offsetof(struct thread_struct, fault_address));
32
33 BLANK();
34
35 DEFINE(THREAD_INFO_FLAGS, offsetof(struct thread_info, flags));
36 DEFINE(THREAD_INFO_PREEMPT_COUNT,
37 offsetof(struct thread_info, preempt_count));
38
39 BLANK();
40
41 DEFINE(TASK_ACT_MM, offsetof(struct task_struct, active_mm));
42 DEFINE(TASK_TGID, offsetof(struct task_struct, tgid));
43
44 DEFINE(MM_CTXT, offsetof(struct mm_struct, context));
45 DEFINE(MM_PGD, offsetof(struct mm_struct, pgd));
46
47 DEFINE(MM_CTXT_ASID, offsetof(mm_context_t, asid));
48
49 BLANK();
50
51 DEFINE(PT_status32, offsetof(struct pt_regs, status32));
52 DEFINE(PT_orig_r8, offsetof(struct pt_regs, orig_r8_word));
53 DEFINE(PT_sp, offsetof(struct pt_regs, sp));
54 DEFINE(PT_r0, offsetof(struct pt_regs, r0));
55 DEFINE(PT_r1, offsetof(struct pt_regs, r1));
56 DEFINE(PT_r2, offsetof(struct pt_regs, r2));
57 DEFINE(PT_r3, offsetof(struct pt_regs, r3));
58 DEFINE(PT_r4, offsetof(struct pt_regs, r4));
59 DEFINE(PT_r5, offsetof(struct pt_regs, r5));
60 DEFINE(PT_r6, offsetof(struct pt_regs, r6));
61 DEFINE(PT_r7, offsetof(struct pt_regs, r7));
62
63 return 0;
64}
diff --git a/arch/arc/kernel/clk.c b/arch/arc/kernel/clk.c
new file mode 100644
index 000000000000..66ce0dc917fb
--- /dev/null
+++ b/arch/arc/kernel/clk.c
@@ -0,0 +1,21 @@
1/*
2 * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#include <asm/clk.h>
10
11unsigned long core_freq = 800000000;
12
13/*
14 * As of now we default to device-tree provided clock
15 * In future we can determine this in early boot
16 */
17int arc_set_core_freq(unsigned long freq)
18{
19 core_freq = freq;
20 return 0;
21}
diff --git a/arch/arc/kernel/ctx_sw.c b/arch/arc/kernel/ctx_sw.c
new file mode 100644
index 000000000000..60844dac6132
--- /dev/null
+++ b/arch/arc/kernel/ctx_sw.c
@@ -0,0 +1,109 @@
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * Vineetg: Aug 2009
9 * -"C" version of lowest level context switch asm macro called by schedular
10 * gcc doesn't generate the dward CFI info for hand written asm, hence can't
11 * backtrace out of it (e.g. tasks sleeping in kernel).
12 * So we cheat a bit by writing almost similar code in inline-asm.
13 * -This is a hacky way of doing things, but there is no other simple way.
14 * I don't want/intend to extend unwinding code to understand raw asm
15 */
16
17#include <asm/asm-offsets.h>
18#include <linux/sched.h>
19
20struct task_struct *__sched
21__switch_to(struct task_struct *prev_task, struct task_struct *next_task)
22{
23 unsigned int tmp;
24 unsigned int prev = (unsigned int)prev_task;
25 unsigned int next = (unsigned int)next_task;
26 int num_words_to_skip = 1;
27#ifdef CONFIG_ARC_CURR_IN_REG
28 num_words_to_skip++;
29#endif
30
31 __asm__ __volatile__(
32 /* FP/BLINK save generated by gcc (standard function prologue */
33 "st.a r13, [sp, -4] \n\t"
34 "st.a r14, [sp, -4] \n\t"
35 "st.a r15, [sp, -4] \n\t"
36 "st.a r16, [sp, -4] \n\t"
37 "st.a r17, [sp, -4] \n\t"
38 "st.a r18, [sp, -4] \n\t"
39 "st.a r19, [sp, -4] \n\t"
40 "st.a r20, [sp, -4] \n\t"
41 "st.a r21, [sp, -4] \n\t"
42 "st.a r22, [sp, -4] \n\t"
43 "st.a r23, [sp, -4] \n\t"
44 "st.a r24, [sp, -4] \n\t"
45#ifndef CONFIG_ARC_CURR_IN_REG
46 "st.a r25, [sp, -4] \n\t"
47#endif
48 "sub sp, sp, %4 \n\t" /* create gutter at top */
49
50 /* set ksp of outgoing task in tsk->thread.ksp */
51 "st.as sp, [%3, %1] \n\t"
52
53 "sync \n\t"
54
55 /*
56 * setup _current_task with incoming tsk.
57 * optionally, set r25 to that as well
58 * For SMP extra work to get to &_current_task[cpu]
59 * (open coded SET_CURR_TASK_ON_CPU)
60 */
61#ifndef CONFIG_SMP
62 "st %2, [@_current_task] \n\t"
63#else
64 "lr r24, [identity] \n\t"
65 "lsr r24, r24, 8 \n\t"
66 "bmsk r24, r24, 7 \n\t"
67 "add2 r24, @_current_task, r24 \n\t"
68 "st %2, [r24] \n\t"
69#endif
70#ifdef CONFIG_ARC_CURR_IN_REG
71 "mov r25, %2 \n\t"
72#endif
73
74 /* get ksp of incoming task from tsk->thread.ksp */
75 "ld.as sp, [%2, %1] \n\t"
76
77 /* start loading it's CALLEE reg file */
78
79 "add sp, sp, %4 \n\t" /* skip gutter at top */
80
81#ifndef CONFIG_ARC_CURR_IN_REG
82 "ld.ab r25, [sp, 4] \n\t"
83#endif
84 "ld.ab r24, [sp, 4] \n\t"
85 "ld.ab r23, [sp, 4] \n\t"
86 "ld.ab r22, [sp, 4] \n\t"
87 "ld.ab r21, [sp, 4] \n\t"
88 "ld.ab r20, [sp, 4] \n\t"
89 "ld.ab r19, [sp, 4] \n\t"
90 "ld.ab r18, [sp, 4] \n\t"
91 "ld.ab r17, [sp, 4] \n\t"
92 "ld.ab r16, [sp, 4] \n\t"
93 "ld.ab r15, [sp, 4] \n\t"
94 "ld.ab r14, [sp, 4] \n\t"
95 "ld.ab r13, [sp, 4] \n\t"
96
97 /* last (ret value) = prev : although for ARC it mov r0, r0 */
98 "mov %0, %3 \n\t"
99
100 /* FP/BLINK restore generated by gcc (standard func epilogue */
101
102 : "=r"(tmp)
103 : "n"((TASK_THREAD + THREAD_KSP) / 4), "r"(next), "r"(prev),
104 "n"(num_words_to_skip * 4)
105 : "blink"
106 );
107
108 return (struct task_struct *)tmp;
109}
diff --git a/arch/arc/kernel/ctx_sw_asm.S b/arch/arc/kernel/ctx_sw_asm.S
new file mode 100644
index 000000000000..d8972345e4c2
--- /dev/null
+++ b/arch/arc/kernel/ctx_sw_asm.S
@@ -0,0 +1,58 @@
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * Vineetg: Aug 2009
9 * -Moved core context switch macro out of entry.S into this file.
10 * -This is the more "natural" hand written assembler
11 */
12
13#include <asm/entry.h> /* For the SAVE_* macros */
14#include <asm/asm-offsets.h>
15#include <asm/linkage.h>
16
17;################### Low Level Context Switch ##########################
18
19 .section .sched.text,"ax",@progbits
20 .align 4
21 .global __switch_to
22 .type __switch_to, @function
23__switch_to:
24
25 /* Save regs on kernel mode stack of task */
26 st.a blink, [sp, -4]
27 st.a fp, [sp, -4]
28 SAVE_CALLEE_SAVED_KERNEL
29
30 /* Save the now KSP in task->thread.ksp */
31 st.as sp, [r0, (TASK_THREAD + THREAD_KSP)/4]
32
33 /*
34 * Return last task in r0 (return reg)
35 * On ARC, Return reg = First Arg reg = r0.
36 * Since we already have last task in r0,
37 * don't need to do anything special to return it
38 */
39
40 /* hardware memory barrier */
41 sync
42
43 /*
44 * switch to new task, contained in r1
45 * Temp reg r3 is required to get the ptr to store val
46 */
47 SET_CURR_TASK_ON_CPU r1, r3
48
49 /* reload SP with kernel mode stack pointer in task->thread.ksp */
50 ld.as sp, [r1, (TASK_THREAD + THREAD_KSP)/4]
51
52 /* restore the registers */
53 RESTORE_CALLEE_SAVED_KERNEL
54 ld.ab fp, [sp, 4]
55 ld.ab blink, [sp, 4]
56 j [blink]
57
58ARC_EXIT __switch_to
diff --git a/arch/arc/kernel/devtree.c b/arch/arc/kernel/devtree.c
new file mode 100644
index 000000000000..bdee3a812052
--- /dev/null
+++ b/arch/arc/kernel/devtree.c
@@ -0,0 +1,123 @@
1/*
2 * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * Based on reduced version of METAG
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11
12#include <linux/init.h>
13#include <linux/reboot.h>
14#include <linux/memblock.h>
15#include <linux/of.h>
16#include <linux/of_fdt.h>
17#include <asm/prom.h>
18#include <asm/clk.h>
19#include <asm/mach_desc.h>
20
21/* called from unflatten_device_tree() to bootstrap devicetree itself */
22void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
23{
24 return __va(memblock_alloc(size, align));
25}
26
27/**
28 * setup_machine_fdt - Machine setup when an dtb was passed to the kernel
29 * @dt: virtual address pointer to dt blob
30 *
31 * If a dtb was passed to the kernel, then use it to choose the correct
32 * machine_desc and to setup the system.
33 */
34struct machine_desc * __init setup_machine_fdt(void *dt)
35{
36 struct boot_param_header *devtree = dt;
37 struct machine_desc *mdesc = NULL, *mdesc_best = NULL;
38 unsigned int score, mdesc_score = ~1;
39 unsigned long dt_root;
40 const char *model, *compat;
41 void *clk;
42 char manufacturer[16];
43 unsigned long len;
44
45 /* check device tree validity */
46 if (be32_to_cpu(devtree->magic) != OF_DT_HEADER)
47 return NULL;
48
49 initial_boot_params = devtree;
50 dt_root = of_get_flat_dt_root();
51
52 /*
53 * The kernel could be multi-platform enabled, thus could have many
54 * "baked-in" machine descriptors. Search thru all for the best
55 * "compatible" string match.
56 */
57 for_each_machine_desc(mdesc) {
58 score = of_flat_dt_match(dt_root, mdesc->dt_compat);
59 if (score > 0 && score < mdesc_score) {
60 mdesc_best = mdesc;
61 mdesc_score = score;
62 }
63 }
64 if (!mdesc_best) {
65 const char *prop;
66 long size;
67
68 pr_err("\n unrecognized device tree list:\n[ ");
69
70 prop = of_get_flat_dt_prop(dt_root, "compatible", &size);
71 if (prop) {
72 while (size > 0) {
73 printk("'%s' ", prop);
74 size -= strlen(prop) + 1;
75 prop += strlen(prop) + 1;
76 }
77 }
78 printk("]\n\n");
79
80 machine_halt();
81 }
82
83 /* compat = "<manufacturer>,<model>" */
84 compat = mdesc_best->dt_compat[0];
85
86 model = strchr(compat, ',');
87 if (model)
88 model++;
89
90 strlcpy(manufacturer, compat, model ? model - compat : strlen(compat));
91
92 pr_info("Board \"%s\" from %s (Manufacturer)\n", model, manufacturer);
93
94 /* Retrieve various information from the /chosen node */
95 of_scan_flat_dt(early_init_dt_scan_chosen, boot_command_line);
96
97 /* Initialize {size,address}-cells info */
98 of_scan_flat_dt(early_init_dt_scan_root, NULL);
99
100 /* Setup memory, calling early_init_dt_add_memory_arch */
101 of_scan_flat_dt(early_init_dt_scan_memory, NULL);
102
103 clk = of_get_flat_dt_prop(dt_root, "clock-frequency", &len);
104 if (clk)
105 arc_set_core_freq(of_read_ulong(clk, len/4));
106
107 return mdesc_best;
108}
109
110/*
111 * Copy the flattened DT out of .init since unflattening doesn't copy strings
112 * and the normal DT APIs refs them from orig flat DT
113 */
114void __init copy_devtree(void)
115{
116 void *alloc = early_init_dt_alloc_memory_arch(
117 be32_to_cpu(initial_boot_params->totalsize), 64);
118 if (alloc) {
119 memcpy(alloc, initial_boot_params,
120 be32_to_cpu(initial_boot_params->totalsize));
121 initial_boot_params = alloc;
122 }
123}
diff --git a/arch/arc/kernel/disasm.c b/arch/arc/kernel/disasm.c
new file mode 100644
index 000000000000..2f390289a792
--- /dev/null
+++ b/arch/arc/kernel/disasm.c
@@ -0,0 +1,538 @@
1/*
2 * several functions that help interpret ARC instructions
3 * used for unaligned accesses, kprobes and kgdb
4 *
5 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/types.h>
13#include <linux/kprobes.h>
14#include <linux/slab.h>
15#include <asm/disasm.h>
16#include <asm/uaccess.h>
17
18#if defined(CONFIG_KGDB) || defined(CONFIG_ARC_MISALIGN_ACCESS) || \
19 defined(CONFIG_KPROBES)
20
21/* disasm_instr: Analyses instruction at addr, stores
22 * findings in *state
23 */
24void __kprobes disasm_instr(unsigned long addr, struct disasm_state *state,
25 int userspace, struct pt_regs *regs, struct callee_regs *cregs)
26{
27 int fieldA = 0;
28 int fieldC = 0, fieldCisReg = 0;
29 uint16_t word1 = 0, word0 = 0;
30 int subopcode, is_linked, op_format;
31 uint16_t *ins_ptr;
32 uint16_t ins_buf[4];
33 int bytes_not_copied = 0;
34
35 memset(state, 0, sizeof(struct disasm_state));
36
37 /* This fetches the upper part of the 32 bit instruction
38 * in both the cases of Little Endian or Big Endian configurations. */
39 if (userspace) {
40 bytes_not_copied = copy_from_user(ins_buf,
41 (const void __user *) addr, 8);
42 if (bytes_not_copied > 6)
43 goto fault;
44 ins_ptr = ins_buf;
45 } else {
46 ins_ptr = (uint16_t *) addr;
47 }
48
49 word1 = *((uint16_t *)addr);
50
51 state->major_opcode = (word1 >> 11) & 0x1F;
52
53 /* Check if the instruction is 32 bit or 16 bit instruction */
54 if (state->major_opcode < 0x0B) {
55 if (bytes_not_copied > 4)
56 goto fault;
57 state->instr_len = 4;
58 word0 = *((uint16_t *)(addr+2));
59 state->words[0] = (word1 << 16) | word0;
60 } else {
61 state->instr_len = 2;
62 state->words[0] = word1;
63 }
64
65 /* Read the second word in case of limm */
66 word1 = *((uint16_t *)(addr + state->instr_len));
67 word0 = *((uint16_t *)(addr + state->instr_len + 2));
68 state->words[1] = (word1 << 16) | word0;
69
70 switch (state->major_opcode) {
71 case op_Bcc:
72 state->is_branch = 1;
73
74 /* unconditional branch s25, conditional branch s21 */
75 fieldA = (IS_BIT(state->words[0], 16)) ?
76 FIELD_s25(state->words[0]) :
77 FIELD_s21(state->words[0]);
78
79 state->delay_slot = IS_BIT(state->words[0], 5);
80 state->target = fieldA + (addr & ~0x3);
81 state->flow = direct_jump;
82 break;
83
84 case op_BLcc:
85 if (IS_BIT(state->words[0], 16)) {
86 /* Branch and Link*/
87 /* unconditional branch s25, conditional branch s21 */
88 fieldA = (IS_BIT(state->words[0], 17)) ?
89 (FIELD_s25(state->words[0]) & ~0x3) :
90 FIELD_s21(state->words[0]);
91
92 state->flow = direct_call;
93 } else {
94 /*Branch On Compare */
95 fieldA = FIELD_s9(state->words[0]) & ~0x3;
96 state->flow = direct_jump;
97 }
98
99 state->delay_slot = IS_BIT(state->words[0], 5);
100 state->target = fieldA + (addr & ~0x3);
101 state->is_branch = 1;
102 break;
103
104 case op_LD: /* LD<zz> a,[b,s9] */
105 state->write = 0;
106 state->di = BITS(state->words[0], 11, 11);
107 if (state->di)
108 break;
109 state->x = BITS(state->words[0], 6, 6);
110 state->zz = BITS(state->words[0], 7, 8);
111 state->aa = BITS(state->words[0], 9, 10);
112 state->wb_reg = FIELD_B(state->words[0]);
113 if (state->wb_reg == REG_LIMM) {
114 state->instr_len += 4;
115 state->aa = 0;
116 state->src1 = state->words[1];
117 } else {
118 state->src1 = get_reg(state->wb_reg, regs, cregs);
119 }
120 state->src2 = FIELD_s9(state->words[0]);
121 state->dest = FIELD_A(state->words[0]);
122 state->pref = (state->dest == REG_LIMM);
123 break;
124
125 case op_ST:
126 state->write = 1;
127 state->di = BITS(state->words[0], 5, 5);
128 if (state->di)
129 break;
130 state->aa = BITS(state->words[0], 3, 4);
131 state->zz = BITS(state->words[0], 1, 2);
132 state->src1 = FIELD_C(state->words[0]);
133 if (state->src1 == REG_LIMM) {
134 state->instr_len += 4;
135 state->src1 = state->words[1];
136 } else {
137 state->src1 = get_reg(state->src1, regs, cregs);
138 }
139 state->wb_reg = FIELD_B(state->words[0]);
140 if (state->wb_reg == REG_LIMM) {
141 state->aa = 0;
142 state->instr_len += 4;
143 state->src2 = state->words[1];
144 } else {
145 state->src2 = get_reg(state->wb_reg, regs, cregs);
146 }
147 state->src3 = FIELD_s9(state->words[0]);
148 break;
149
150 case op_MAJOR_4:
151 subopcode = MINOR_OPCODE(state->words[0]);
152 switch (subopcode) {
153 case 32: /* Jcc */
154 case 33: /* Jcc.D */
155 case 34: /* JLcc */
156 case 35: /* JLcc.D */
157 is_linked = 0;
158
159 if (subopcode == 33 || subopcode == 35)
160 state->delay_slot = 1;
161
162 if (subopcode == 34 || subopcode == 35)
163 is_linked = 1;
164
165 fieldCisReg = 0;
166 op_format = BITS(state->words[0], 22, 23);
167 if (op_format == 0 || ((op_format == 3) &&
168 (!IS_BIT(state->words[0], 5)))) {
169 fieldC = FIELD_C(state->words[0]);
170
171 if (fieldC == REG_LIMM) {
172 fieldC = state->words[1];
173 state->instr_len += 4;
174 } else {
175 fieldCisReg = 1;
176 }
177 } else if (op_format == 1 || ((op_format == 3)
178 && (IS_BIT(state->words[0], 5)))) {
179 fieldC = FIELD_C(state->words[0]);
180 } else {
181 /* op_format == 2 */
182 fieldC = FIELD_s12(state->words[0]);
183 }
184
185 if (!fieldCisReg) {
186 state->target = fieldC;
187 state->flow = is_linked ?
188 direct_call : direct_jump;
189 } else {
190 state->target = get_reg(fieldC, regs, cregs);
191 state->flow = is_linked ?
192 indirect_call : indirect_jump;
193 }
194 state->is_branch = 1;
195 break;
196
197 case 40: /* LPcc */
198 if (BITS(state->words[0], 22, 23) == 3) {
199 /* Conditional LPcc u7 */
200 fieldC = FIELD_C(state->words[0]);
201
202 fieldC = fieldC << 1;
203 fieldC += (addr & ~0x03);
204 state->is_branch = 1;
205 state->flow = direct_jump;
206 state->target = fieldC;
207 }
208 /* For Unconditional lp, next pc is the fall through
209 * which is updated */
210 break;
211
212 case 48 ... 55: /* LD a,[b,c] */
213 state->di = BITS(state->words[0], 15, 15);
214 if (state->di)
215 break;
216 state->x = BITS(state->words[0], 16, 16);
217 state->zz = BITS(state->words[0], 17, 18);
218 state->aa = BITS(state->words[0], 22, 23);
219 state->wb_reg = FIELD_B(state->words[0]);
220 if (state->wb_reg == REG_LIMM) {
221 state->instr_len += 4;
222 state->src1 = state->words[1];
223 } else {
224 state->src1 = get_reg(state->wb_reg, regs,
225 cregs);
226 }
227 state->src2 = FIELD_C(state->words[0]);
228 if (state->src2 == REG_LIMM) {
229 state->instr_len += 4;
230 state->src2 = state->words[1];
231 } else {
232 state->src2 = get_reg(state->src2, regs,
233 cregs);
234 }
235 state->dest = FIELD_A(state->words[0]);
236 if (state->dest == REG_LIMM)
237 state->pref = 1;
238 break;
239
240 case 10: /* MOV */
241 /* still need to check for limm to extract instr len */
242 /* MOV is special case because it only takes 2 args */
243 switch (BITS(state->words[0], 22, 23)) {
244 case 0: /* OP a,b,c */
245 if (FIELD_C(state->words[0]) == REG_LIMM)
246 state->instr_len += 4;
247 break;
248 case 1: /* OP a,b,u6 */
249 break;
250 case 2: /* OP b,b,s12 */
251 break;
252 case 3: /* OP.cc b,b,c/u6 */
253 if ((!IS_BIT(state->words[0], 5)) &&
254 (FIELD_C(state->words[0]) == REG_LIMM))
255 state->instr_len += 4;
256 break;
257 }
258 break;
259
260
261 default:
262 /* Not a Load, Jump or Loop instruction */
263 /* still need to check for limm to extract instr len */
264 switch (BITS(state->words[0], 22, 23)) {
265 case 0: /* OP a,b,c */
266 if ((FIELD_B(state->words[0]) == REG_LIMM) ||
267 (FIELD_C(state->words[0]) == REG_LIMM))
268 state->instr_len += 4;
269 break;
270 case 1: /* OP a,b,u6 */
271 break;
272 case 2: /* OP b,b,s12 */
273 break;
274 case 3: /* OP.cc b,b,c/u6 */
275 if ((!IS_BIT(state->words[0], 5)) &&
276 ((FIELD_B(state->words[0]) == REG_LIMM) ||
277 (FIELD_C(state->words[0]) == REG_LIMM)))
278 state->instr_len += 4;
279 break;
280 }
281 break;
282 }
283 break;
284
285 /* 16 Bit Instructions */
286 case op_LD_ADD: /* LD_S|LDB_S|LDW_S a,[b,c] */
287 state->zz = BITS(state->words[0], 3, 4);
288 state->src1 = get_reg(FIELD_S_B(state->words[0]), regs, cregs);
289 state->src2 = get_reg(FIELD_S_C(state->words[0]), regs, cregs);
290 state->dest = FIELD_S_A(state->words[0]);
291 break;
292
293 case op_ADD_MOV_CMP:
294 /* check for limm, ignore mov_s h,b (== mov_s 0,b) */
295 if ((BITS(state->words[0], 3, 4) < 3) &&
296 (FIELD_S_H(state->words[0]) == REG_LIMM))
297 state->instr_len += 4;
298 break;
299
300 case op_S:
301 subopcode = BITS(state->words[0], 5, 7);
302 switch (subopcode) {
303 case 0: /* j_s */
304 case 1: /* j_s.d */
305 case 2: /* jl_s */
306 case 3: /* jl_s.d */
307 state->target = get_reg(FIELD_S_B(state->words[0]),
308 regs, cregs);
309 state->delay_slot = subopcode & 1;
310 state->flow = (subopcode >= 2) ?
311 direct_call : indirect_jump;
312 break;
313 case 7:
314 switch (BITS(state->words[0], 8, 10)) {
315 case 4: /* jeq_s [blink] */
316 case 5: /* jne_s [blink] */
317 case 6: /* j_s [blink] */
318 case 7: /* j_s.d [blink] */
319 state->delay_slot = (subopcode == 7);
320 state->flow = indirect_jump;
321 state->target = get_reg(31, regs, cregs);
322 default:
323 break;
324 }
325 default:
326 break;
327 }
328 break;
329
330 case op_LD_S: /* LD_S c, [b, u7] */
331 state->src1 = get_reg(FIELD_S_B(state->words[0]), regs, cregs);
332 state->src2 = FIELD_S_u7(state->words[0]);
333 state->dest = FIELD_S_C(state->words[0]);
334 break;
335
336 case op_LDB_S:
337 case op_STB_S:
338 /* no further handling required as byte accesses should not
339 * cause an unaligned access exception */
340 state->zz = 1;
341 break;
342
343 case op_LDWX_S: /* LDWX_S c, [b, u6] */
344 state->x = 1;
345 /* intentional fall-through */
346
347 case op_LDW_S: /* LDW_S c, [b, u6] */
348 state->zz = 2;
349 state->src1 = get_reg(FIELD_S_B(state->words[0]), regs, cregs);
350 state->src2 = FIELD_S_u6(state->words[0]);
351 state->dest = FIELD_S_C(state->words[0]);
352 break;
353
354 case op_ST_S: /* ST_S c, [b, u7] */
355 state->write = 1;
356 state->src1 = get_reg(FIELD_S_C(state->words[0]), regs, cregs);
357 state->src2 = get_reg(FIELD_S_B(state->words[0]), regs, cregs);
358 state->src3 = FIELD_S_u7(state->words[0]);
359 break;
360
361 case op_STW_S: /* STW_S c,[b,u6] */
362 state->write = 1;
363 state->zz = 2;
364 state->src1 = get_reg(FIELD_S_C(state->words[0]), regs, cregs);
365 state->src2 = get_reg(FIELD_S_B(state->words[0]), regs, cregs);
366 state->src3 = FIELD_S_u6(state->words[0]);
367 break;
368
369 case op_SP: /* LD_S|LDB_S b,[sp,u7], ST_S|STB_S b,[sp,u7] */
370 /* note: we are ignoring possibility of:
371 * ADD_S, SUB_S, PUSH_S, POP_S as these should not
372 * cause unaliged exception anyway */
373 state->write = BITS(state->words[0], 6, 6);
374 state->zz = BITS(state->words[0], 5, 5);
375 if (state->zz)
376 break; /* byte accesses should not come here */
377 if (!state->write) {
378 state->src1 = get_reg(28, regs, cregs);
379 state->src2 = FIELD_S_u7(state->words[0]);
380 state->dest = FIELD_S_B(state->words[0]);
381 } else {
382 state->src1 = get_reg(FIELD_S_B(state->words[0]), regs,
383 cregs);
384 state->src2 = get_reg(28, regs, cregs);
385 state->src3 = FIELD_S_u7(state->words[0]);
386 }
387 break;
388
389 case op_GP: /* LD_S|LDB_S|LDW_S r0,[gp,s11/s9/s10] */
390 /* note: ADD_S r0, gp, s11 is ignored */
391 state->zz = BITS(state->words[0], 9, 10);
392 state->src1 = get_reg(26, regs, cregs);
393 state->src2 = state->zz ? FIELD_S_s10(state->words[0]) :
394 FIELD_S_s11(state->words[0]);
395 state->dest = 0;
396 break;
397
398 case op_Pcl: /* LD_S b,[pcl,u10] */
399 state->src1 = regs->ret & ~3;
400 state->src2 = FIELD_S_u10(state->words[0]);
401 state->dest = FIELD_S_B(state->words[0]);
402 break;
403
404 case op_BR_S:
405 state->target = FIELD_S_s8(state->words[0]) + (addr & ~0x03);
406 state->flow = direct_jump;
407 state->is_branch = 1;
408 break;
409
410 case op_B_S:
411 fieldA = (BITS(state->words[0], 9, 10) == 3) ?
412 FIELD_S_s7(state->words[0]) :
413 FIELD_S_s10(state->words[0]);
414 state->target = fieldA + (addr & ~0x03);
415 state->flow = direct_jump;
416 state->is_branch = 1;
417 break;
418
419 case op_BL_S:
420 state->target = FIELD_S_s13(state->words[0]) + (addr & ~0x03);
421 state->flow = direct_call;
422 state->is_branch = 1;
423 break;
424
425 default:
426 break;
427 }
428
429 if (bytes_not_copied <= (8 - state->instr_len))
430 return;
431
432fault: state->fault = 1;
433}
434
435long __kprobes get_reg(int reg, struct pt_regs *regs,
436 struct callee_regs *cregs)
437{
438 long *p;
439
440 if (reg <= 12) {
441 p = &regs->r0;
442 return p[-reg];
443 }
444
445 if (cregs && (reg <= 25)) {
446 p = &cregs->r13;
447 return p[13-reg];
448 }
449
450 if (reg == 26)
451 return regs->r26;
452 if (reg == 27)
453 return regs->fp;
454 if (reg == 28)
455 return regs->sp;
456 if (reg == 31)
457 return regs->blink;
458
459 return 0;
460}
461
462void __kprobes set_reg(int reg, long val, struct pt_regs *regs,
463 struct callee_regs *cregs)
464{
465 long *p;
466
467 switch (reg) {
468 case 0 ... 12:
469 p = &regs->r0;
470 p[-reg] = val;
471 break;
472 case 13 ... 25:
473 if (cregs) {
474 p = &cregs->r13;
475 p[13-reg] = val;
476 }
477 break;
478 case 26:
479 regs->r26 = val;
480 break;
481 case 27:
482 regs->fp = val;
483 break;
484 case 28:
485 regs->sp = val;
486 break;
487 case 31:
488 regs->blink = val;
489 break;
490 default:
491 break;
492 }
493}
494
495/*
496 * Disassembles the insn at @pc and sets @next_pc to next PC (which could be
497 * @pc +2/4/6 (ARCompact ISA allows free intermixing of 16/32 bit insns).
498 *
499 * If @pc is a branch
500 * -@tgt_if_br is set to branch target.
501 * -If branch has delay slot, @next_pc updated with actual next PC.
502 */
503int __kprobes disasm_next_pc(unsigned long pc, struct pt_regs *regs,
504 struct callee_regs *cregs,
505 unsigned long *next_pc, unsigned long *tgt_if_br)
506{
507 struct disasm_state instr;
508
509 memset(&instr, 0, sizeof(struct disasm_state));
510 disasm_instr(pc, &instr, 0, regs, cregs);
511
512 *next_pc = pc + instr.instr_len;
513
514 /* Instruction with possible two targets branch, jump and loop */
515 if (instr.is_branch)
516 *tgt_if_br = instr.target;
517
518 /* For the instructions with delay slots, the fall through is the
519 * instruction following the instruction in delay slot.
520 */
521 if (instr.delay_slot) {
522 struct disasm_state instr_d;
523
524 disasm_instr(*next_pc, &instr_d, 0, regs, cregs);
525
526 *next_pc += instr_d.instr_len;
527 }
528
529 /* Zero Overhead Loop - end of the loop */
530 if (!(regs->status32 & STATUS32_L) && (*next_pc == regs->lp_end)
531 && (regs->lp_count > 1)) {
532 *next_pc = regs->lp_start;
533 }
534
535 return instr.is_branch;
536}
537
538#endif /* CONFIG_KGDB || CONFIG_MISALIGN_ACCESS || CONFIG_KPROBES */
diff --git a/arch/arc/kernel/entry.S b/arch/arc/kernel/entry.S
new file mode 100644
index 000000000000..ef6800ba2f03
--- /dev/null
+++ b/arch/arc/kernel/entry.S
@@ -0,0 +1,839 @@
1/*
2 * Low Level Interrupts/Traps/Exceptions(non-TLB) Handling for ARC
3 *
4 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * vineetg: May 2011
11 * -Userspace unaligned access emulation
12 *
13 * vineetg: Feb 2011 (ptrace low level code fixes)
14 * -traced syscall return code (r0) was not saved into pt_regs for restoring
15 * into user reg-file when traded task rets to user space.
16 * -syscalls needing arch-wrappers (mainly for passing sp as pt_regs)
17 * were not invoking post-syscall trace hook (jumping directly into
18 * ret_from_system_call)
19 *
20 * vineetg: Nov 2010:
21 * -Vector table jumps (@8 bytes) converted into branches (@4 bytes)
22 * -To maintain the slot size of 8 bytes/vector, added nop, which is
23 * not executed at runtime.
24 *
25 * vineetg: Nov 2009 (Everything needed for TIF_RESTORE_SIGMASK)
26 * -do_signal()invoked upon TIF_RESTORE_SIGMASK as well
27 * -Wrappers for sys_{,rt_}sigsuspend() nolonger needed as they don't
28 * need ptregs anymore
29 *
30 * Vineetg: Oct 2009
31 * -In a rare scenario, Process gets a Priv-V exception and gets scheduled
32 * out. Since we don't do FAKE RTIE for Priv-V, CPU excpetion state remains
33 * active (AE bit enabled). This causes a double fault for a subseq valid
34 * exception. Thus FAKE RTIE needed in low level Priv-Violation handler.
35 * Instr Error could also cause similar scenario, so same there as well.
36 *
37 * Vineetg: March 2009 (Supporting 2 levels of Interrupts)
38 *
39 * Vineetg: Aug 28th 2008: Bug #94984
40 * -Zero Overhead Loop Context shd be cleared when entering IRQ/EXcp/Trap
41 * Normally CPU does this automatically, however when doing FAKE rtie,
42 * we need to explicitly do this. The problem in macros
43 * FAKE_RET_FROM_EXCPN and FAKE_RET_FROM_EXCPN_LOCK_IRQ was that this bit
44 * was being "CLEARED" rather then "SET". Since it is Loop INHIBIT Bit,
45 * setting it and not clearing it clears ZOL context
46 *
47 * Vineetg: May 16th, 2008
48 * - r25 now contains the Current Task when in kernel
49 *
50 * Vineetg: Dec 22, 2007
51 * Minor Surgery of Low Level ISR to make it SMP safe
52 * - MMU_SCRATCH0 Reg used for freeing up r9 in Level 1 ISR
53 * - _current_task is made an array of NR_CPUS
54 * - Access of _current_task wrapped inside a macro so that if hardware
55 * team agrees for a dedicated reg, no other code is touched
56 *
57 * Amit Bhor, Rahul Trivedi, Kanika Nema, Sameer Dhavale : Codito Tech 2004
58 */
59
60/*------------------------------------------------------------------
61 * Function ABI
62 *------------------------------------------------------------------
63 *
64 * Arguments r0 - r7
65 * Caller Saved Registers r0 - r12
66 * Callee Saved Registers r13- r25
67 * Global Pointer (gp) r26
68 * Frame Pointer (fp) r27
69 * Stack Pointer (sp) r28
70 * Interrupt link register (ilink1) r29
71 * Interrupt link register (ilink2) r30
72 * Branch link register (blink) r31
73 *------------------------------------------------------------------
74 */
75
76 .cpu A7
77
78;############################ Vector Table #################################
79
80.macro VECTOR lbl
81#if 1 /* Just in case, build breaks */
82 j \lbl
83#else
84 b \lbl
85 nop
86#endif
87.endm
88
89 .section .vector, "ax",@progbits
90 .align 4
91
92/* Each entry in the vector table must occupy 2 words. Since it is a jump
93 * across sections (.vector to .text) we are gauranteed that 'j somewhere'
94 * will use the 'j limm' form of the intrsuction as long as somewhere is in
95 * a section other than .vector.
96 */
97
98; ********* Critical System Events **********************
99VECTOR res_service ; 0x0, Restart Vector (0x0)
100VECTOR mem_service ; 0x8, Mem exception (0x1)
101VECTOR instr_service ; 0x10, Instrn Error (0x2)
102
103; ******************** Device ISRs **********************
104#ifdef CONFIG_ARC_IRQ3_LV2
105VECTOR handle_interrupt_level2
106#else
107VECTOR handle_interrupt_level1
108#endif
109
110VECTOR handle_interrupt_level1
111
112#ifdef CONFIG_ARC_IRQ5_LV2
113VECTOR handle_interrupt_level2
114#else
115VECTOR handle_interrupt_level1
116#endif
117
118#ifdef CONFIG_ARC_IRQ6_LV2
119VECTOR handle_interrupt_level2
120#else
121VECTOR handle_interrupt_level1
122#endif
123
124.rept 25
125VECTOR handle_interrupt_level1 ; Other devices
126.endr
127
128/* FOR ARC600: timer = 0x3, uart = 0x8, emac = 0x10 */
129
130; ******************** Exceptions **********************
131VECTOR EV_MachineCheck ; 0x100, Fatal Machine check (0x20)
132VECTOR EV_TLBMissI ; 0x108, Intruction TLB miss (0x21)
133VECTOR EV_TLBMissD ; 0x110, Data TLB miss (0x22)
134VECTOR EV_TLBProtV ; 0x118, Protection Violation (0x23)
135 ; or Misaligned Access
136VECTOR EV_PrivilegeV ; 0x120, Privilege Violation (0x24)
137VECTOR EV_Trap ; 0x128, Trap exception (0x25)
138VECTOR EV_Extension ; 0x130, Extn Intruction Excp (0x26)
139
140.rept 24
141VECTOR reserved ; Reserved Exceptions
142.endr
143
144#include <linux/linkage.h> /* ARC_{EXTRY,EXIT} */
145#include <asm/entry.h> /* SAVE_ALL_{INT1,INT2,TRAP...} */
146#include <asm/errno.h>
147#include <asm/arcregs.h>
148#include <asm/irqflags.h>
149
150;##################### Scratch Mem for IRQ stack switching #############
151
152ARCFP_DATA int1_saved_reg
153 .align 32
154 .type int1_saved_reg, @object
155 .size int1_saved_reg, 4
156int1_saved_reg:
157 .zero 4
158
159/* Each Interrupt level needs it's own scratch */
160#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS
161
162ARCFP_DATA int2_saved_reg
163 .type int2_saved_reg, @object
164 .size int2_saved_reg, 4
165int2_saved_reg:
166 .zero 4
167
168#endif
169
170; ---------------------------------------------
171 .section .text, "ax",@progbits
172
173res_service: ; processor restart
174 flag 0x1 ; not implemented
175 nop
176 nop
177
178reserved: ; processor restart
179 rtie ; jump to processor initializations
180
181;##################### Interrupt Handling ##############################
182
183#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS
184; ---------------------------------------------
185; Level 2 ISR: Can interrupt a Level 1 ISR
186; ---------------------------------------------
187ARC_ENTRY handle_interrupt_level2
188
189 ; TODO-vineetg for SMP this wont work
190 ; free up r9 as scratchpad
191 st r9, [@int2_saved_reg]
192
193 ;Which mode (user/kernel) was the system in when intr occured
194 lr r9, [status32_l2]
195
196 SWITCH_TO_KERNEL_STK
197 SAVE_ALL_INT2
198
199 ;------------------------------------------------------
200 ; if L2 IRQ interrupted a L1 ISR, disable preemption
201 ;------------------------------------------------------
202
203 ld r9, [sp, PT_status32] ; get statu32_l2 (saved in pt_regs)
204 bbit0 r9, STATUS_A1_BIT, 1f ; L1 not active when L2 IRQ, so normal
205
206 ; A1 is set in status32_l2
207 ; bump thread_info->preempt_count (Disable preemption)
208 GET_CURR_THR_INFO_FROM_SP r10
209 ld r9, [r10, THREAD_INFO_PREEMPT_COUNT]
210 add r9, r9, 1
211 st r9, [r10, THREAD_INFO_PREEMPT_COUNT]
212
2131:
214 ;------------------------------------------------------
215 ; setup params for Linux common ISR and invoke it
216 ;------------------------------------------------------
217 lr r0, [icause2]
218 and r0, r0, 0x1f
219
220 bl.d @arch_do_IRQ
221 mov r1, sp
222
223 mov r8,0x2
224 sr r8, [AUX_IRQ_LV12] ; clear bit in Sticky Status Reg
225
226 b ret_from_exception
227
228ARC_EXIT handle_interrupt_level2
229
230#endif
231
232; ---------------------------------------------
233; Level 1 ISR
234; ---------------------------------------------
235ARC_ENTRY handle_interrupt_level1
236
237 /* free up r9 as scratchpad */
238#ifdef CONFIG_SMP
239 sr r9, [ARC_REG_SCRATCH_DATA0]
240#else
241 st r9, [@int1_saved_reg]
242#endif
243
244 ;Which mode (user/kernel) was the system in when intr occured
245 lr r9, [status32_l1]
246
247 SWITCH_TO_KERNEL_STK
248 SAVE_ALL_INT1
249
250 lr r0, [icause1]
251 and r0, r0, 0x1f
252
253 bl.d @arch_do_IRQ
254 mov r1, sp
255
256 mov r8,0x1
257 sr r8, [AUX_IRQ_LV12] ; clear bit in Sticky Status Reg
258
259 b ret_from_exception
260ARC_EXIT handle_interrupt_level1
261
262;################### Non TLB Exception Handling #############################
263
264; ---------------------------------------------
265; Instruction Error Exception Handler
266; ---------------------------------------------
267
268ARC_ENTRY instr_service
269
270 EXCPN_PROLOG_FREEUP_REG r9
271
272 lr r9, [erstatus]
273
274 SWITCH_TO_KERNEL_STK
275 SAVE_ALL_SYS
276
277 lr r0, [ecr]
278 lr r1, [efa]
279
280 mov r2, sp
281
282 FAKE_RET_FROM_EXCPN r9
283
284 bl do_insterror_or_kprobe
285 b ret_from_exception
286ARC_EXIT instr_service
287
288; ---------------------------------------------
289; Memory Error Exception Handler
290; ---------------------------------------------
291
292ARC_ENTRY mem_service
293
294 EXCPN_PROLOG_FREEUP_REG r9
295
296 lr r9, [erstatus]
297
298 SWITCH_TO_KERNEL_STK
299 SAVE_ALL_SYS
300
301 lr r0, [ecr]
302 lr r1, [efa]
303 mov r2, sp
304 bl do_memory_error
305 b ret_from_exception
306ARC_EXIT mem_service
307
308; ---------------------------------------------
309; Machine Check Exception Handler
310; ---------------------------------------------
311
312ARC_ENTRY EV_MachineCheck
313
314 EXCPN_PROLOG_FREEUP_REG r9
315 lr r9, [erstatus]
316
317 SWITCH_TO_KERNEL_STK
318 SAVE_ALL_SYS
319
320 lr r0, [ecr]
321 lr r1, [efa]
322 mov r2, sp
323
324 brne r0, 0x200100, 1f
325 bl do_tlb_overlap_fault
326 b ret_from_exception
327
3281:
329 ; DEAD END: can't do much, display Regs and HALT
330 SAVE_CALLEE_SAVED_USER
331
332 GET_CURR_TASK_FIELD_PTR TASK_THREAD, r10
333 st sp, [r10, THREAD_CALLEE_REG]
334
335 j do_machine_check_fault
336
337ARC_EXIT EV_MachineCheck
338
339; ---------------------------------------------
340; Protection Violation Exception Handler
341; ---------------------------------------------
342
343ARC_ENTRY EV_TLBProtV
344
345 EXCPN_PROLOG_FREEUP_REG r9
346
347 ;Which mode (user/kernel) was the system in when Exception occured
348 lr r9, [erstatus]
349
350 SWITCH_TO_KERNEL_STK
351 SAVE_ALL_SYS
352
353 ;---------(3) Save some more regs-----------------
354 ; vineetg: Mar 6th: Random Seg Fault issue #1
355 ; ecr and efa were not saved in case an Intr sneaks in
356 ; after fake rtie
357 ;
358 lr r3, [ecr]
359 lr r4, [efa]
360
361 ; --------(4) Return from CPU Exception Mode ---------
362 ; Fake a rtie, but rtie to next label
363 ; That way, subsequently, do_page_fault ( ) executes in pure kernel
364 ; mode with further Exceptions enabled
365
366 FAKE_RET_FROM_EXCPN r9
367
368 ;------ (5) Type of Protection Violation? ----------
369 ;
370 ; ProtV Hardware Exception is triggered for Access Faults of 2 types
371 ; -Access Violaton (WRITE to READ ONLY Page) - for linux COW
372 ; -Unaligned Access (READ/WRITE on odd boundary)
373 ;
374 cmp r3, 0x230400 ; Misaligned data access ?
375 beq 4f
376
377 ;========= (6a) Access Violation Processing ========
378 cmp r3, 0x230100
379 mov r1, 0x0 ; if LD exception ? write = 0
380 mov.ne r1, 0x1 ; else write = 1
381
382 mov r2, r4 ; faulting address
383 mov r0, sp ; pt_regs
384 bl do_page_fault
385 b ret_from_exception
386
387 ;========== (6b) Non aligned access ============
3884:
389 mov r0, r3 ; cause code
390 mov r1, r4 ; faulting address
391 mov r2, sp ; pt_regs
392
393#ifdef CONFIG_ARC_MISALIGN_ACCESS
394 SAVE_CALLEE_SAVED_USER
395 mov r3, sp ; callee_regs
396#endif
397
398 bl do_misaligned_access
399
400#ifdef CONFIG_ARC_MISALIGN_ACCESS
401 DISCARD_CALLEE_SAVED_USER
402#endif
403
404 b ret_from_exception
405
406ARC_EXIT EV_TLBProtV
407
408; ---------------------------------------------
409; Privilege Violation Exception Handler
410; ---------------------------------------------
411ARC_ENTRY EV_PrivilegeV
412
413 EXCPN_PROLOG_FREEUP_REG r9
414
415 lr r9, [erstatus]
416
417 SWITCH_TO_KERNEL_STK
418 SAVE_ALL_SYS
419
420 lr r0, [ecr]
421 lr r1, [efa]
422 mov r2, sp
423
424 FAKE_RET_FROM_EXCPN r9
425
426 bl do_privilege_fault
427 b ret_from_exception
428ARC_EXIT EV_PrivilegeV
429
430; ---------------------------------------------
431; Extension Instruction Exception Handler
432; ---------------------------------------------
433ARC_ENTRY EV_Extension
434
435 EXCPN_PROLOG_FREEUP_REG r9
436 lr r9, [erstatus]
437
438 SWITCH_TO_KERNEL_STK
439 SAVE_ALL_SYS
440
441 lr r0, [ecr]
442 lr r1, [efa]
443 mov r2, sp
444 bl do_extension_fault
445 b ret_from_exception
446ARC_EXIT EV_Extension
447
448;######################### System Call Tracing #########################
449
450tracesys:
451 ; save EFA in case tracer wants the PC of traced task
452 ; using ERET won't work since next-PC has already committed
453 lr r12, [efa]
454 GET_CURR_TASK_FIELD_PTR TASK_THREAD, r11
455 st r12, [r11, THREAD_FAULT_ADDR]
456
457 ; PRE Sys Call Ptrace hook
458 mov r0, sp ; pt_regs needed
459 bl @syscall_trace_entry
460
461 ; Tracing code now returns the syscall num (orig or modif)
462 mov r8, r0
463
464 ; Do the Sys Call as we normally would.
465 ; Validate the Sys Call number
466 cmp r8, NR_syscalls
467 mov.hi r0, -ENOSYS
468 bhi tracesys_exit
469
470 ; Restore the sys-call args. Mere invocation of the hook abv could have
471 ; clobbered them (since they are in scratch regs). The tracer could also
472 ; have deliberately changed the syscall args: r0-r7
473 ld r0, [sp, PT_r0]
474 ld r1, [sp, PT_r1]
475 ld r2, [sp, PT_r2]
476 ld r3, [sp, PT_r3]
477 ld r4, [sp, PT_r4]
478 ld r5, [sp, PT_r5]
479 ld r6, [sp, PT_r6]
480 ld r7, [sp, PT_r7]
481 ld.as r9, [sys_call_table, r8]
482 jl [r9] ; Entry into Sys Call Handler
483
484tracesys_exit:
485 st r0, [sp, PT_r0] ; sys call return value in pt_regs
486
487 ;POST Sys Call Ptrace Hook
488 bl @syscall_trace_exit
489 b ret_from_exception ; NOT ret_from_system_call at is saves r0 which
490 ; we'd done before calling post hook above
491
492;################### Break Point TRAP ##########################
493
494 ; ======= (5b) Trap is due to Break-Point =========
495
496trap_with_param:
497
498 ; stop_pc info by gdb needs this info
499 stw orig_r8_IS_BRKPT, [sp, PT_orig_r8]
500
501 mov r0, r12
502 lr r1, [efa]
503 mov r2, sp
504
505 ; Now that we have read EFA, its safe to do "fake" rtie
506 ; and get out of CPU exception mode
507 FAKE_RET_FROM_EXCPN r11
508
509 ; Save callee regs in case gdb wants to have a look
510 ; SP will grow up by size of CALLEE Reg-File
511 ; NOTE: clobbers r12
512 SAVE_CALLEE_SAVED_USER
513
514 ; save location of saved Callee Regs @ thread_struct->pc
515 GET_CURR_TASK_FIELD_PTR TASK_THREAD, r10
516 st sp, [r10, THREAD_CALLEE_REG]
517
518 ; Call the trap handler
519 bl do_non_swi_trap
520
521 ; unwind stack to discard Callee saved Regs
522 DISCARD_CALLEE_SAVED_USER
523
524 b ret_from_exception
525
526;##################### Trap Handling ##############################
527;
528; EV_Trap caused by TRAP_S and TRAP0 instructions.
529;------------------------------------------------------------------
530; (1) System Calls
531; :parameters in r0-r7.
532; :r8 has the system call number
533; (2) Break Points
534;------------------------------------------------------------------
535
536ARC_ENTRY EV_Trap
537
538 ; Need at least 1 reg to code the early exception prolog
539 EXCPN_PROLOG_FREEUP_REG r9
540
541 ;Which mode (user/kernel) was the system in when intr occured
542 lr r9, [erstatus]
543
544 SWITCH_TO_KERNEL_STK
545 SAVE_ALL_TRAP
546
547 ;------- (4) What caused the Trap --------------
548 lr r12, [ecr]
549 and.f 0, r12, ECR_PARAM_MASK
550 bnz trap_with_param
551
552 ; ======= (5a) Trap is due to System Call ========
553
554 ; Before doing anything, return from CPU Exception Mode
555 FAKE_RET_FROM_EXCPN r11
556
557 ; If syscall tracing ongoing, invoke pre-pos-hooks
558 GET_CURR_THR_INFO_FLAGS r10
559 btst r10, TIF_SYSCALL_TRACE
560 bnz tracesys ; this never comes back
561
562 ;============ This is normal System Call case ==========
563 ; Sys-call num shd not exceed the total system calls avail
564 cmp r8, NR_syscalls
565 mov.hi r0, -ENOSYS
566 bhi ret_from_system_call
567
568 ; Offset into the syscall_table and call handler
569 ld.as r9,[sys_call_table, r8]
570 jl [r9] ; Entry into Sys Call Handler
571
572 ; fall through to ret_from_system_call
573ARC_EXIT EV_Trap
574
575ARC_ENTRY ret_from_system_call
576
577 st r0, [sp, PT_r0] ; sys call return value in pt_regs
578
579 ; fall through yet again to ret_from_exception
580
581;############# Return from Intr/Excp/Trap (Linux Specifics) ##############
582;
583; If ret to user mode do we need to handle signals, schedule() et al.
584
585ARC_ENTRY ret_from_exception
586
587 ; Pre-{IRQ,Trap,Exception} K/U mode from pt_regs->status32
588 ld r8, [sp, PT_status32] ; returning to User/Kernel Mode
589
590#ifdef CONFIG_PREEMPT
591 bbit0 r8, STATUS_U_BIT, resume_kernel_mode
592#else
593 bbit0 r8, STATUS_U_BIT, restore_regs
594#endif
595
596 ; Before returning to User mode check-for-and-complete any pending work
597 ; such as rescheduling/signal-delivery etc.
598resume_user_mode_begin:
599
600 ; Disable IRQs to ensures that chk for pending work itself is atomic
601 ; (and we don't end up missing a NEED_RESCHED/SIGPENDING due to an
602 ; interim IRQ).
603 IRQ_DISABLE r10
604
605 ; Fast Path return to user mode if no pending work
606 GET_CURR_THR_INFO_FLAGS r9
607 and.f 0, r9, _TIF_WORK_MASK
608 bz restore_regs
609
610 ; --- (Slow Path #1) task preemption ---
611 bbit0 r9, TIF_NEED_RESCHED, .Lchk_pend_signals
612 mov blink, resume_user_mode_begin ; tail-call to U mode ret chks
613 b @schedule ; BTST+Bnz causes relo error in link
614
615.Lchk_pend_signals:
616 IRQ_ENABLE r10
617
618 ; --- (Slow Path #2) pending signal ---
619 mov r0, sp ; pt_regs for arg to do_signal()/do_notify_resume()
620
621 bbit0 r9, TIF_SIGPENDING, .Lchk_notify_resume
622
623 ; Normal Trap/IRQ entry only saves Scratch (caller-saved) regs
624 ; in pt_reg since the "C" ABI (kernel code) will automatically
625 ; save/restore callee-saved regs.
626 ;
627 ; However, here we need to explicitly save callee regs because
628 ; (i) If this signal causes coredump - full regfile needed
629 ; (ii) If signal is SIGTRAP/SIGSTOP, task is being traced thus
630 ; tracer might call PEEKUSR(CALLEE reg)
631 ;
632 ; NOTE: SP will grow up by size of CALLEE Reg-File
633 SAVE_CALLEE_SAVED_USER ; clobbers r12
634
635 ; save location of saved Callee Regs @ thread_struct->callee
636 GET_CURR_TASK_FIELD_PTR TASK_THREAD, r10
637 st sp, [r10, THREAD_CALLEE_REG]
638
639 bl @do_signal
640
641 ; Ideally we want to discard the Callee reg above, however if this was
642 ; a tracing signal, tracer could have done a POKEUSR(CALLEE reg)
643 RESTORE_CALLEE_SAVED_USER
644
645 b resume_user_mode_begin ; loop back to start of U mode ret
646
647 ; --- (Slow Path #3) notify_resume ---
648.Lchk_notify_resume:
649 btst r9, TIF_NOTIFY_RESUME
650 blnz @do_notify_resume
651 b resume_user_mode_begin ; unconditionally back to U mode ret chks
652 ; for single exit point from this block
653
654#ifdef CONFIG_PREEMPT
655
656resume_kernel_mode:
657
658 ; Can't preempt if preemption disabled
659 GET_CURR_THR_INFO_FROM_SP r10
660 ld r8, [r10, THREAD_INFO_PREEMPT_COUNT]
661 brne r8, 0, restore_regs
662
663 ; check if this task's NEED_RESCHED flag set
664 ld r9, [r10, THREAD_INFO_FLAGS]
665 bbit0 r9, TIF_NEED_RESCHED, restore_regs
666
667 IRQ_DISABLE r9
668
669 ; Invoke PREEMPTION
670 bl preempt_schedule_irq
671
672 ; preempt_schedule_irq() always returns with IRQ disabled
673#endif
674
675 ; fall through
676
677;############# Return from Intr/Excp/Trap (ARC Specifics) ##############
678;
679; Restore the saved sys context (common exit-path for EXCPN/IRQ/Trap)
680; IRQ shd definitely not happen between now and rtie
681
682restore_regs :
683
684 ; Disable Interrupts while restoring reg-file back
685 ; XXX can this be optimised out
686 IRQ_DISABLE_SAVE r9, r10 ;@r10 has prisitine (pre-disable) copy
687
688#ifdef CONFIG_ARC_CURR_IN_REG
689 ; Restore User R25
690 ; Earlier this used to be only for returning to user mode
691 ; However with 2 levels of IRQ this can also happen even if
692 ; in kernel mode
693 ld r9, [sp, PT_sp]
694 brhs r9, VMALLOC_START, 8f
695 RESTORE_USER_R25
6968:
697#endif
698
699 ; Restore REG File. In case multiple Events outstanding,
700 ; use the same priorty as rtie: EXCPN, L2 IRQ, L1 IRQ, None
701 ; Note that we use realtime STATUS32 (not pt_regs->status32) to
702 ; decide that.
703
704 ; if Returning from Exception
705 bbit0 r10, STATUS_AE_BIT, not_exception
706 RESTORE_ALL_SYS
707 rtie
708
709 ; Not Exception so maybe Interrupts (Level 1 or 2)
710
711not_exception:
712
713#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS
714
715 bbit0 r10, STATUS_A2_BIT, not_level2_interrupt
716
717 ;------------------------------------------------------------------
718 ; if L2 IRQ interrupted a L1 ISR, we'd disbaled preemption earlier
719 ; so that sched doesnt move to new task, causing L1 to be delayed
720 ; undeterministically. Now that we've achieved that, lets reset
721 ; things to what they were, before returning from L2 context
722 ;----------------------------------------------------------------
723
724 ldw r9, [sp, PT_orig_r8] ; get orig_r8 to make sure it is
725 brne r9, orig_r8_IS_IRQ2, 149f ; infact a L2 ISR ret path
726
727 ld r9, [sp, PT_status32] ; get statu32_l2 (saved in pt_regs)
728 bbit0 r9, STATUS_A1_BIT, 149f ; L1 not active when L2 IRQ, so normal
729
730 ; A1 is set in status32_l2
731 ; decrement thread_info->preempt_count (re-enable preemption)
732 GET_CURR_THR_INFO_FROM_SP r10
733 ld r9, [r10, THREAD_INFO_PREEMPT_COUNT]
734
735 ; paranoid check, given A1 was active when A2 happened, preempt count
736 ; must not be 0 beccause we would have incremented it.
737 ; If this does happen we simply HALT as it means a BUG !!!
738 cmp r9, 0
739 bnz 2f
740 flag 1
741
7422:
743 sub r9, r9, 1
744 st r9, [r10, THREAD_INFO_PREEMPT_COUNT]
745
746149:
747 ;return from level 2
748 RESTORE_ALL_INT2
749debug_marker_l2:
750 rtie
751
752not_level2_interrupt:
753
754#endif
755
756 bbit0 r10, STATUS_A1_BIT, not_level1_interrupt
757
758 ;return from level 1
759
760 RESTORE_ALL_INT1
761debug_marker_l1:
762 rtie
763
764not_level1_interrupt:
765
766 ;this case is for syscalls or Exceptions (with fake rtie)
767
768 RESTORE_ALL_SYS
769debug_marker_syscall:
770 rtie
771
772ARC_EXIT ret_from_exception
773
774ARC_ENTRY ret_from_fork
775 ; when the forked child comes here from the __switch_to function
776 ; r0 has the last task pointer.
777 ; put last task in scheduler queue
778 bl @schedule_tail
779
780 ; If kernel thread, jump to it's entry-point
781 ld r9, [sp, PT_status32]
782 brne r9, 0, 1f
783
784 jl.d [r14]
785 mov r0, r13 ; arg to payload
786
7871:
788 ; special case of kernel_thread entry point returning back due to
789 ; kernel_execve() - pretend return from syscall to ret to userland
790 b ret_from_exception
791ARC_EXIT ret_from_fork
792
793;################### Special Sys Call Wrappers ##########################
794
795; TBD: call do_fork directly from here
796ARC_ENTRY sys_fork_wrapper
797 SAVE_CALLEE_SAVED_USER
798 bl @sys_fork
799 DISCARD_CALLEE_SAVED_USER
800
801 GET_CURR_THR_INFO_FLAGS r10
802 btst r10, TIF_SYSCALL_TRACE
803 bnz tracesys_exit
804
805 b ret_from_system_call
806ARC_EXIT sys_fork_wrapper
807
808ARC_ENTRY sys_vfork_wrapper
809 SAVE_CALLEE_SAVED_USER
810 bl @sys_vfork
811 DISCARD_CALLEE_SAVED_USER
812
813 GET_CURR_THR_INFO_FLAGS r10
814 btst r10, TIF_SYSCALL_TRACE
815 bnz tracesys_exit
816
817 b ret_from_system_call
818ARC_EXIT sys_vfork_wrapper
819
820ARC_ENTRY sys_clone_wrapper
821 SAVE_CALLEE_SAVED_USER
822 bl @sys_clone
823 DISCARD_CALLEE_SAVED_USER
824
825 GET_CURR_THR_INFO_FLAGS r10
826 btst r10, TIF_SYSCALL_TRACE
827 bnz tracesys_exit
828
829 b ret_from_system_call
830ARC_EXIT sys_clone_wrapper
831
832#ifdef CONFIG_ARC_DW2_UNWIND
833; Workaround for bug 94179 (STAR ):
834; Despite -fasynchronous-unwind-tables, linker is not making dwarf2 unwinder
835; section (.debug_frame) as loadable. So we force it here.
836; This also fixes STAR 9000487933 where the prev-workaround (objcopy --setflag)
837; would not work after a clean build due to kernel build system dependencies.
838.section .debug_frame, "wa",@progbits
839#endif
diff --git a/arch/arc/kernel/fpu.c b/arch/arc/kernel/fpu.c
new file mode 100644
index 000000000000..f352e512cbd1
--- /dev/null
+++ b/arch/arc/kernel/fpu.c
@@ -0,0 +1,55 @@
1/*
2 * fpu.c - save/restore of Floating Point Unit Registers on task switch
3 *
4 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/sched.h>
12#include <asm/switch_to.h>
13
14/*
15 * To save/restore FPU regs, simplest scheme would use LR/SR insns.
16 * However since SR serializes the pipeline, an alternate "hack" can be used
17 * which uses the FPU Exchange insn (DEXCL) to r/w FPU regs.
18 *
19 * Store to 64bit dpfp1 reg from a pair of core regs:
20 * dexcl1 0, r1, r0 ; where r1:r0 is the 64 bit val
21 *
22 * Read from dpfp1 into pair of core regs (w/o clobbering dpfp1)
23 * mov_s r3, 0
24 * daddh11 r1, r3, r3 ; get "hi" into r1 (dpfp1 unchanged)
25 * dexcl1 r0, r1, r3 ; get "low" into r0 (dpfp1 low clobbered)
26 * dexcl1 0, r1, r0 ; restore dpfp1 to orig value
27 *
28 * However we can tweak the read, so that read-out of outgoing task's FPU regs
29 * and write of incoming task's regs happen in one shot. So all the work is
30 * done before context switch
31 */
32
33void fpu_save_restore(struct task_struct *prev, struct task_struct *next)
34{
35 unsigned int *saveto = &prev->thread.fpu.aux_dpfp[0].l;
36 unsigned int *readfrom = &next->thread.fpu.aux_dpfp[0].l;
37
38 const unsigned int zero = 0;
39
40 __asm__ __volatile__(
41 "daddh11 %0, %2, %2\n"
42 "dexcl1 %1, %3, %4\n"
43 : "=&r" (*(saveto + 1)), /* early clobber must here */
44 "=&r" (*(saveto))
45 : "r" (zero), "r" (*(readfrom + 1)), "r" (*(readfrom))
46 );
47
48 __asm__ __volatile__(
49 "daddh22 %0, %2, %2\n"
50 "dexcl2 %1, %3, %4\n"
51 : "=&r"(*(saveto + 3)), /* early clobber must here */
52 "=&r"(*(saveto + 2))
53 : "r" (zero), "r" (*(readfrom + 3)), "r" (*(readfrom + 2))
54 );
55}
diff --git a/arch/arc/kernel/head.S b/arch/arc/kernel/head.S
new file mode 100644
index 000000000000..006dec3fc353
--- /dev/null
+++ b/arch/arc/kernel/head.S
@@ -0,0 +1,111 @@
1/*
2 * ARC CPU startup Code
3 *
4 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * Vineetg: Dec 2007
11 * -Check if we are running on Simulator or on real hardware
12 * to skip certain things during boot on simulator
13 */
14
15#include <asm/asm-offsets.h>
16#include <asm/entry.h>
17#include <linux/linkage.h>
18#include <asm/arcregs.h>
19
20 .cpu A7
21
22 .section .init.text, "ax",@progbits
23 .type stext, @function
24 .globl stext
25stext:
26 ;-------------------------------------------------------------------
27 ; Don't clobber r0-r4 yet. It might have bootloader provided info
28 ;-------------------------------------------------------------------
29
30#ifdef CONFIG_SMP
31 ; Only Boot (Master) proceeds. Others wait in platform dependent way
32 ; IDENTITY Reg [ 3 2 1 0 ]
33 ; (cpu-id) ^^^ => Zero for UP ARC700
34 ; => #Core-ID if SMP (Master 0)
35 GET_CPU_ID r5
36 cmp r5, 0
37 jnz arc_platform_smp_wait_to_boot
38#endif
39 ; Clear BSS before updating any globals
40 ; XXX: use ZOL here
41 mov r5, __bss_start
42 mov r6, __bss_stop
431:
44 st.ab 0, [r5,4]
45 brlt r5, r6, 1b
46
47#ifdef CONFIG_CMDLINE_UBOOT
48 ; support for bootloader provided cmdline
49 ; If cmdline passed by u-boot, then
50 ; r0 = 1 (because ATAGS parsing, now retired, used to use 0)
51 ; r1 = magic number (board identity)
52 ; r2 = addr of cmdline string (somewhere in memory/flash)
53
54 brne r0, 1, .Lother_bootup_chores ; u-boot didn't pass cmdline
55 breq r2, 0, .Lother_bootup_chores ; or cmdline is NULL
56
57 mov r5, @command_line
581:
59 ldb.ab r6, [r2, 1]
60 breq r6, 0, .Lother_bootup_chores
61 b.d 1b
62 stb.ab r6, [r5, 1]
63#endif
64
65.Lother_bootup_chores:
66
67 ; Identify if running on ISS vs Silicon
68 ; IDENTITY Reg [ 3 2 1 0 ]
69 ; (chip-id) ^^^^^ ==> 0xffff for ISS
70 lr r0, [identity]
71 lsr r3, r0, 16
72 cmp r3, 0xffff
73 mov.z r4, 0
74 mov.nz r4, 1
75 st r4, [@running_on_hw]
76
77 ; setup "current" tsk and optionally cache it in dedicated r25
78 mov r9, @init_task
79 SET_CURR_TASK_ON_CPU r9, r0 ; r9 = tsk, r0 = scratch
80
81 ; setup stack (fp, sp)
82 mov fp, 0
83
84 ; tsk->thread_info is really a PAGE, whose bottom hoists stack
85 GET_TSK_STACK_BASE r9, sp ; r9 = tsk, sp = stack base(output)
86
87 j start_kernel ; "C" entry point
88
89#ifdef CONFIG_SMP
90;----------------------------------------------------------------
91; First lines of code run by secondary before jumping to 'C'
92;----------------------------------------------------------------
93 .section .init.text, "ax",@progbits
94 .type first_lines_of_secondary, @function
95 .globl first_lines_of_secondary
96
97first_lines_of_secondary:
98
99 ; setup per-cpu idle task as "current" on this CPU
100 ld r0, [@secondary_idle_tsk]
101 SET_CURR_TASK_ON_CPU r0, r1
102
103 ; setup stack (fp, sp)
104 mov fp, 0
105
106 ; set it's stack base to tsk->thread_info bottom
107 GET_TSK_STACK_BASE r0, sp
108
109 j start_kernel_secondary
110
111#endif
diff --git a/arch/arc/kernel/irq.c b/arch/arc/kernel/irq.c
new file mode 100644
index 000000000000..551c10dff481
--- /dev/null
+++ b/arch/arc/kernel/irq.c
@@ -0,0 +1,273 @@
1/*
2 * Copyright (C) 2011-12 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 */
9
10#include <linux/interrupt.h>
11#include <linux/module.h>
12#include <linux/of.h>
13#include <linux/irqdomain.h>
14#include <asm/sections.h>
15#include <asm/irq.h>
16#include <asm/mach_desc.h>
17
18/*
19 * Early Hardware specific Interrupt setup
20 * -Called very early (start_kernel -> setup_arch -> setup_processor)
21 * -Platform Independent (must for any ARC700)
22 * -Needed for each CPU (hence not foldable into init_IRQ)
23 *
24 * what it does ?
25 * -setup Vector Table Base Reg - in case Linux not linked at 0x8000_0000
26 * -Disable all IRQs (on CPU side)
27 * -Optionally, setup the High priority Interrupts as Level 2 IRQs
28 */
29void __init arc_init_IRQ(void)
30{
31 int level_mask = 0;
32
33 write_aux_reg(AUX_INTR_VEC_BASE, _int_vec_base_lds);
34
35 /* Disable all IRQs: enable them as devices request */
36 write_aux_reg(AUX_IENABLE, 0);
37
38 /* setup any high priority Interrupts (Level2 in ARCompact jargon) */
39#ifdef CONFIG_ARC_IRQ3_LV2
40 level_mask |= (1 << 3);
41#endif
42#ifdef CONFIG_ARC_IRQ5_LV2
43 level_mask |= (1 << 5);
44#endif
45#ifdef CONFIG_ARC_IRQ6_LV2
46 level_mask |= (1 << 6);
47#endif
48
49 if (level_mask) {
50 pr_info("Level-2 interrupts bitset %x\n", level_mask);
51 write_aux_reg(AUX_IRQ_LEV, level_mask);
52 }
53}
54
55/*
56 * ARC700 core includes a simple on-chip intc supporting
57 * -per IRQ enable/disable
58 * -2 levels of interrupts (high/low)
59 * -all interrupts being level triggered
60 *
61 * To reduce platform code, we assume all IRQs directly hooked-up into intc.
62 * Platforms with external intc, hence cascaded IRQs, are free to over-ride
63 * below, per IRQ.
64 */
65
66static void arc_mask_irq(struct irq_data *data)
67{
68 arch_mask_irq(data->irq);
69}
70
71static void arc_unmask_irq(struct irq_data *data)
72{
73 arch_unmask_irq(data->irq);
74}
75
76static struct irq_chip onchip_intc = {
77 .name = "ARC In-core Intc",
78 .irq_mask = arc_mask_irq,
79 .irq_unmask = arc_unmask_irq,
80};
81
82static int arc_intc_domain_map(struct irq_domain *d, unsigned int irq,
83 irq_hw_number_t hw)
84{
85 if (irq == TIMER0_IRQ)
86 irq_set_chip_and_handler(irq, &onchip_intc, handle_percpu_irq);
87 else
88 irq_set_chip_and_handler(irq, &onchip_intc, handle_level_irq);
89
90 return 0;
91}
92
93static const struct irq_domain_ops arc_intc_domain_ops = {
94 .xlate = irq_domain_xlate_onecell,
95 .map = arc_intc_domain_map,
96};
97
98static struct irq_domain *root_domain;
99
100void __init init_onchip_IRQ(void)
101{
102 struct device_node *intc = NULL;
103
104 intc = of_find_compatible_node(NULL, NULL, "snps,arc700-intc");
105 if(!intc)
106 panic("DeviceTree Missing incore intc\n");
107
108 root_domain = irq_domain_add_legacy(intc, NR_IRQS, 0, 0,
109 &arc_intc_domain_ops, NULL);
110
111 if (!root_domain)
112 panic("root irq domain not avail\n");
113
114 /* with this we don't need to export root_domain */
115 irq_set_default_host(root_domain);
116}
117
118/*
119 * Late Interrupt system init called from start_kernel for Boot CPU only
120 *
121 * Since slab must already be initialized, platforms can start doing any
122 * needed request_irq( )s
123 */
124void __init init_IRQ(void)
125{
126 init_onchip_IRQ();
127
128 /* Any external intc can be setup here */
129 if (machine_desc->init_irq)
130 machine_desc->init_irq();
131
132#ifdef CONFIG_SMP
133 /* Master CPU can initialize it's side of IPI */
134 if (machine_desc->init_smp)
135 machine_desc->init_smp(smp_processor_id());
136#endif
137}
138
139/*
140 * "C" Entry point for any ARC ISR, called from low level vector handler
141 * @irq is the vector number read from ICAUSE reg of on-chip intc
142 */
143void arch_do_IRQ(unsigned int irq, struct pt_regs *regs)
144{
145 struct pt_regs *old_regs = set_irq_regs(regs);
146
147 irq_enter();
148 generic_handle_irq(irq);
149 irq_exit();
150 set_irq_regs(old_regs);
151}
152
153int __init get_hw_config_num_irq(void)
154{
155 uint32_t val = read_aux_reg(ARC_REG_VECBASE_BCR);
156
157 switch (val & 0x03) {
158 case 0:
159 return 16;
160 case 1:
161 return 32;
162 case 2:
163 return 8;
164 default:
165 return 0;
166 }
167
168 return 0;
169}
170
171/*
172 * arch_local_irq_enable - Enable interrupts.
173 *
174 * 1. Explicitly called to re-enable interrupts
175 * 2. Implicitly called from spin_unlock_irq, write_unlock_irq etc
176 * which maybe in hard ISR itself
177 *
178 * Semantics of this function change depending on where it is called from:
179 *
180 * -If called from hard-ISR, it must not invert interrupt priorities
181 * e.g. suppose TIMER is high priority (Level 2) IRQ
182 * Time hard-ISR, timer_interrupt( ) calls spin_unlock_irq several times.
183 * Here local_irq_enable( ) shd not re-enable lower priority interrupts
184 * -If called from soft-ISR, it must re-enable all interrupts
185 * soft ISR are low prioity jobs which can be very slow, thus all IRQs
186 * must be enabled while they run.
187 * Now hardware context wise we may still be in L2 ISR (not done rtie)
188 * still we must re-enable both L1 and L2 IRQs
189 * Another twist is prev scenario with flow being
190 * L1 ISR ==> interrupted by L2 ISR ==> L2 soft ISR
191 * here we must not re-enable Ll as prev Ll Interrupt's h/w context will get
192 * over-written (this is deficiency in ARC700 Interrupt mechanism)
193 */
194
195#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS /* Complex version for 2 IRQ levels */
196
197void arch_local_irq_enable(void)
198{
199
200 unsigned long flags;
201 flags = arch_local_save_flags();
202
203 /* Allow both L1 and L2 at the onset */
204 flags |= (STATUS_E1_MASK | STATUS_E2_MASK);
205
206 /* Called from hard ISR (between irq_enter and irq_exit) */
207 if (in_irq()) {
208
209 /* If in L2 ISR, don't re-enable any further IRQs as this can
210 * cause IRQ priorities to get upside down. e.g. it could allow
211 * L1 be taken while in L2 hard ISR which is wrong not only in
212 * theory, it can also cause the dreaded L1-L2-L1 scenario
213 */
214 if (flags & STATUS_A2_MASK)
215 flags &= ~(STATUS_E1_MASK | STATUS_E2_MASK);
216
217 /* Even if in L1 ISR, allowe Higher prio L2 IRQs */
218 else if (flags & STATUS_A1_MASK)
219 flags &= ~(STATUS_E1_MASK);
220 }
221
222 /* called from soft IRQ, ideally we want to re-enable all levels */
223
224 else if (in_softirq()) {
225
226 /* However if this is case of L1 interrupted by L2,
227 * re-enabling both may cause whaco L1-L2-L1 scenario
228 * because ARC700 allows level 1 to interrupt an active L2 ISR
229 * Thus we disable both
230 * However some code, executing in soft ISR wants some IRQs
231 * to be enabled so we re-enable L2 only
232 *
233 * How do we determine L1 intr by L2
234 * -A2 is set (means in L2 ISR)
235 * -E1 is set in this ISR's pt_regs->status32 which is
236 * saved copy of status32_l2 when l2 ISR happened
237 */
238 struct pt_regs *pt = get_irq_regs();
239 if ((flags & STATUS_A2_MASK) && pt &&
240 (pt->status32 & STATUS_A1_MASK)) {
241 /*flags &= ~(STATUS_E1_MASK | STATUS_E2_MASK); */
242 flags &= ~(STATUS_E1_MASK);
243 }
244 }
245
246 arch_local_irq_restore(flags);
247}
248
249#else /* ! CONFIG_ARC_COMPACT_IRQ_LEVELS */
250
251/*
252 * Simpler version for only 1 level of interrupt
253 * Here we only Worry about Level 1 Bits
254 */
255void arch_local_irq_enable(void)
256{
257 unsigned long flags;
258
259 /*
260 * ARC IDE Drivers tries to re-enable interrupts from hard-isr
261 * context which is simply wrong
262 */
263 if (in_irq()) {
264 WARN_ONCE(1, "IRQ enabled from hard-isr");
265 return;
266 }
267
268 flags = arch_local_save_flags();
269 flags |= (STATUS_E1_MASK | STATUS_E2_MASK);
270 arch_local_irq_restore(flags);
271}
272#endif
273EXPORT_SYMBOL(arch_local_irq_enable);
diff --git a/arch/arc/kernel/kgdb.c b/arch/arc/kernel/kgdb.c
new file mode 100644
index 000000000000..2888ba5be47e
--- /dev/null
+++ b/arch/arc/kernel/kgdb.c
@@ -0,0 +1,205 @@
1/*
2 * kgdb support for ARC
3 *
4 * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/kgdb.h>
12#include <asm/disasm.h>
13#include <asm/cacheflush.h>
14
15static void to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *kernel_regs,
16 struct callee_regs *cregs)
17{
18 int regno;
19
20 for (regno = 0; regno <= 26; regno++)
21 gdb_regs[_R0 + regno] = get_reg(regno, kernel_regs, cregs);
22
23 for (regno = 27; regno < GDB_MAX_REGS; regno++)
24 gdb_regs[regno] = 0;
25
26 gdb_regs[_FP] = kernel_regs->fp;
27 gdb_regs[__SP] = kernel_regs->sp;
28 gdb_regs[_BLINK] = kernel_regs->blink;
29 gdb_regs[_RET] = kernel_regs->ret;
30 gdb_regs[_STATUS32] = kernel_regs->status32;
31 gdb_regs[_LP_COUNT] = kernel_regs->lp_count;
32 gdb_regs[_LP_END] = kernel_regs->lp_end;
33 gdb_regs[_LP_START] = kernel_regs->lp_start;
34 gdb_regs[_BTA] = kernel_regs->bta;
35 gdb_regs[_STOP_PC] = kernel_regs->ret;
36}
37
38static void from_gdb_regs(unsigned long *gdb_regs, struct pt_regs *kernel_regs,
39 struct callee_regs *cregs)
40{
41 int regno;
42
43 for (regno = 0; regno <= 26; regno++)
44 set_reg(regno, gdb_regs[regno + _R0], kernel_regs, cregs);
45
46 kernel_regs->fp = gdb_regs[_FP];
47 kernel_regs->sp = gdb_regs[__SP];
48 kernel_regs->blink = gdb_regs[_BLINK];
49 kernel_regs->ret = gdb_regs[_RET];
50 kernel_regs->status32 = gdb_regs[_STATUS32];
51 kernel_regs->lp_count = gdb_regs[_LP_COUNT];
52 kernel_regs->lp_end = gdb_regs[_LP_END];
53 kernel_regs->lp_start = gdb_regs[_LP_START];
54 kernel_regs->bta = gdb_regs[_BTA];
55}
56
57
58void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *kernel_regs)
59{
60 to_gdb_regs(gdb_regs, kernel_regs, (struct callee_regs *)
61 current->thread.callee_reg);
62}
63
64void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *kernel_regs)
65{
66 from_gdb_regs(gdb_regs, kernel_regs, (struct callee_regs *)
67 current->thread.callee_reg);
68}
69
70void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs,
71 struct task_struct *task)
72{
73 if (task)
74 to_gdb_regs(gdb_regs, task_pt_regs(task),
75 (struct callee_regs *) task->thread.callee_reg);
76}
77
78struct single_step_data_t {
79 uint16_t opcode[2];
80 unsigned long address[2];
81 int is_branch;
82 int armed;
83} single_step_data;
84
85static void undo_single_step(struct pt_regs *regs)
86{
87 if (single_step_data.armed) {
88 int i;
89
90 for (i = 0; i < (single_step_data.is_branch ? 2 : 1); i++) {
91 memcpy((void *) single_step_data.address[i],
92 &single_step_data.opcode[i],
93 BREAK_INSTR_SIZE);
94
95 flush_icache_range(single_step_data.address[i],
96 single_step_data.address[i] +
97 BREAK_INSTR_SIZE);
98 }
99 single_step_data.armed = 0;
100 }
101}
102
103static void place_trap(unsigned long address, void *save)
104{
105 memcpy(save, (void *) address, BREAK_INSTR_SIZE);
106 memcpy((void *) address, &arch_kgdb_ops.gdb_bpt_instr,
107 BREAK_INSTR_SIZE);
108 flush_icache_range(address, address + BREAK_INSTR_SIZE);
109}
110
111static void do_single_step(struct pt_regs *regs)
112{
113 single_step_data.is_branch = disasm_next_pc((unsigned long)
114 regs->ret, regs, (struct callee_regs *)
115 current->thread.callee_reg,
116 &single_step_data.address[0],
117 &single_step_data.address[1]);
118
119 place_trap(single_step_data.address[0], &single_step_data.opcode[0]);
120
121 if (single_step_data.is_branch) {
122 place_trap(single_step_data.address[1],
123 &single_step_data.opcode[1]);
124 }
125
126 single_step_data.armed++;
127}
128
129int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
130 char *remcomInBuffer, char *remcomOutBuffer,
131 struct pt_regs *regs)
132{
133 unsigned long addr;
134 char *ptr;
135
136 undo_single_step(regs);
137
138 switch (remcomInBuffer[0]) {
139 case 's':
140 case 'c':
141 ptr = &remcomInBuffer[1];
142 if (kgdb_hex2long(&ptr, &addr))
143 regs->ret = addr;
144
145 case 'D':
146 case 'k':
147 atomic_set(&kgdb_cpu_doing_single_step, -1);
148
149 if (remcomInBuffer[0] == 's') {
150 do_single_step(regs);
151 atomic_set(&kgdb_cpu_doing_single_step,
152 smp_processor_id());
153 }
154
155 return 0;
156 }
157 return -1;
158}
159
160unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs)
161{
162 return instruction_pointer(regs);
163}
164
165int kgdb_arch_init(void)
166{
167 single_step_data.armed = 0;
168 return 0;
169}
170
171void kgdb_trap(struct pt_regs *regs, int param)
172{
173 /* trap_s 3 is used for breakpoints that overwrite existing
174 * instructions, while trap_s 4 is used for compiled breakpoints.
175 *
176 * with trap_s 3 breakpoints the original instruction needs to be
177 * restored and continuation needs to start at the location of the
178 * breakpoint.
179 *
180 * with trap_s 4 (compiled) breakpoints, continuation needs to
181 * start after the breakpoint.
182 */
183 if (param == 3)
184 instruction_pointer(regs) -= BREAK_INSTR_SIZE;
185
186 kgdb_handle_exception(1, SIGTRAP, 0, regs);
187}
188
189void kgdb_arch_exit(void)
190{
191}
192
193void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip)
194{
195 instruction_pointer(regs) = ip;
196}
197
198struct kgdb_arch arch_kgdb_ops = {
199 /* breakpoint instruction: TRAP_S 0x3 */
200#ifdef CONFIG_CPU_BIG_ENDIAN
201 .gdb_bpt_instr = {0x78, 0x7e},
202#else
203 .gdb_bpt_instr = {0x7e, 0x78},
204#endif
205};
diff --git a/arch/arc/kernel/kprobes.c b/arch/arc/kernel/kprobes.c
new file mode 100644
index 000000000000..3bfeacb674de
--- /dev/null
+++ b/arch/arc/kernel/kprobes.c
@@ -0,0 +1,525 @@
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#include <linux/types.h>
10#include <linux/kprobes.h>
11#include <linux/slab.h>
12#include <linux/module.h>
13#include <linux/kprobes.h>
14#include <linux/kdebug.h>
15#include <linux/sched.h>
16#include <linux/uaccess.h>
17#include <asm/cacheflush.h>
18#include <asm/current.h>
19#include <asm/disasm.h>
20
21#define MIN_STACK_SIZE(addr) min((unsigned long)MAX_STACK_SIZE, \
22 (unsigned long)current_thread_info() + THREAD_SIZE - (addr))
23
24DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
25DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
26
27int __kprobes arch_prepare_kprobe(struct kprobe *p)
28{
29 /* Attempt to probe at unaligned address */
30 if ((unsigned long)p->addr & 0x01)
31 return -EINVAL;
32
33 /* Address should not be in exception handling code */
34
35 p->ainsn.is_short = is_short_instr((unsigned long)p->addr);
36 p->opcode = *p->addr;
37
38 return 0;
39}
40
41void __kprobes arch_arm_kprobe(struct kprobe *p)
42{
43 *p->addr = UNIMP_S_INSTRUCTION;
44
45 flush_icache_range((unsigned long)p->addr,
46 (unsigned long)p->addr + sizeof(kprobe_opcode_t));
47}
48
49void __kprobes arch_disarm_kprobe(struct kprobe *p)
50{
51 *p->addr = p->opcode;
52
53 flush_icache_range((unsigned long)p->addr,
54 (unsigned long)p->addr + sizeof(kprobe_opcode_t));
55}
56
57void __kprobes arch_remove_kprobe(struct kprobe *p)
58{
59 arch_disarm_kprobe(p);
60
61 /* Can we remove the kprobe in the middle of kprobe handling? */
62 if (p->ainsn.t1_addr) {
63 *(p->ainsn.t1_addr) = p->ainsn.t1_opcode;
64
65 flush_icache_range((unsigned long)p->ainsn.t1_addr,
66 (unsigned long)p->ainsn.t1_addr +
67 sizeof(kprobe_opcode_t));
68
69 p->ainsn.t1_addr = NULL;
70 }
71
72 if (p->ainsn.t2_addr) {
73 *(p->ainsn.t2_addr) = p->ainsn.t2_opcode;
74
75 flush_icache_range((unsigned long)p->ainsn.t2_addr,
76 (unsigned long)p->ainsn.t2_addr +
77 sizeof(kprobe_opcode_t));
78
79 p->ainsn.t2_addr = NULL;
80 }
81}
82
83static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
84{
85 kcb->prev_kprobe.kp = kprobe_running();
86 kcb->prev_kprobe.status = kcb->kprobe_status;
87}
88
89static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
90{
91 __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
92 kcb->kprobe_status = kcb->prev_kprobe.status;
93}
94
95static inline void __kprobes set_current_kprobe(struct kprobe *p)
96{
97 __get_cpu_var(current_kprobe) = p;
98}
99
100static void __kprobes resume_execution(struct kprobe *p, unsigned long addr,
101 struct pt_regs *regs)
102{
103 /* Remove the trap instructions inserted for single step and
104 * restore the original instructions
105 */
106 if (p->ainsn.t1_addr) {
107 *(p->ainsn.t1_addr) = p->ainsn.t1_opcode;
108
109 flush_icache_range((unsigned long)p->ainsn.t1_addr,
110 (unsigned long)p->ainsn.t1_addr +
111 sizeof(kprobe_opcode_t));
112
113 p->ainsn.t1_addr = NULL;
114 }
115
116 if (p->ainsn.t2_addr) {
117 *(p->ainsn.t2_addr) = p->ainsn.t2_opcode;
118
119 flush_icache_range((unsigned long)p->ainsn.t2_addr,
120 (unsigned long)p->ainsn.t2_addr +
121 sizeof(kprobe_opcode_t));
122
123 p->ainsn.t2_addr = NULL;
124 }
125
126 return;
127}
128
129static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs)
130{
131 unsigned long next_pc;
132 unsigned long tgt_if_br = 0;
133 int is_branch;
134 unsigned long bta;
135
136 /* Copy the opcode back to the kprobe location and execute the
137 * instruction. Because of this we will not be able to get into the
138 * same kprobe until this kprobe is done
139 */
140 *(p->addr) = p->opcode;
141
142 flush_icache_range((unsigned long)p->addr,
143 (unsigned long)p->addr + sizeof(kprobe_opcode_t));
144
145 /* Now we insert the trap at the next location after this instruction to
146 * single step. If it is a branch we insert the trap at possible branch
147 * targets
148 */
149
150 bta = regs->bta;
151
152 if (regs->status32 & 0x40) {
153 /* We are in a delay slot with the branch taken */
154
155 next_pc = bta & ~0x01;
156
157 if (!p->ainsn.is_short) {
158 if (bta & 0x01)
159 regs->blink += 2;
160 else {
161 /* Branch not taken */
162 next_pc += 2;
163
164 /* next pc is taken from bta after executing the
165 * delay slot instruction
166 */
167 regs->bta += 2;
168 }
169 }
170
171 is_branch = 0;
172 } else
173 is_branch =
174 disasm_next_pc((unsigned long)p->addr, regs,
175 (struct callee_regs *) current->thread.callee_reg,
176 &next_pc, &tgt_if_br);
177
178 p->ainsn.t1_addr = (kprobe_opcode_t *) next_pc;
179 p->ainsn.t1_opcode = *(p->ainsn.t1_addr);
180 *(p->ainsn.t1_addr) = TRAP_S_2_INSTRUCTION;
181
182 flush_icache_range((unsigned long)p->ainsn.t1_addr,
183 (unsigned long)p->ainsn.t1_addr +
184 sizeof(kprobe_opcode_t));
185
186 if (is_branch) {
187 p->ainsn.t2_addr = (kprobe_opcode_t *) tgt_if_br;
188 p->ainsn.t2_opcode = *(p->ainsn.t2_addr);
189 *(p->ainsn.t2_addr) = TRAP_S_2_INSTRUCTION;
190
191 flush_icache_range((unsigned long)p->ainsn.t2_addr,
192 (unsigned long)p->ainsn.t2_addr +
193 sizeof(kprobe_opcode_t));
194 }
195}
196
197int __kprobes arc_kprobe_handler(unsigned long addr, struct pt_regs *regs)
198{
199 struct kprobe *p;
200 struct kprobe_ctlblk *kcb;
201
202 preempt_disable();
203
204 kcb = get_kprobe_ctlblk();
205 p = get_kprobe((unsigned long *)addr);
206
207 if (p) {
208 /*
209 * We have reentered the kprobe_handler, since another kprobe
210 * was hit while within the handler, we save the original
211 * kprobes and single step on the instruction of the new probe
212 * without calling any user handlers to avoid recursive
213 * kprobes.
214 */
215 if (kprobe_running()) {
216 save_previous_kprobe(kcb);
217 set_current_kprobe(p);
218 kprobes_inc_nmissed_count(p);
219 setup_singlestep(p, regs);
220 kcb->kprobe_status = KPROBE_REENTER;
221 return 1;
222 }
223
224 set_current_kprobe(p);
225 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
226
227 /* If we have no pre-handler or it returned 0, we continue with
228 * normal processing. If we have a pre-handler and it returned
229 * non-zero - which is expected from setjmp_pre_handler for
230 * jprobe, we return without single stepping and leave that to
231 * the break-handler which is invoked by a kprobe from
232 * jprobe_return
233 */
234 if (!p->pre_handler || !p->pre_handler(p, regs)) {
235 setup_singlestep(p, regs);
236 kcb->kprobe_status = KPROBE_HIT_SS;
237 }
238
239 return 1;
240 } else if (kprobe_running()) {
241 p = __get_cpu_var(current_kprobe);
242 if (p->break_handler && p->break_handler(p, regs)) {
243 setup_singlestep(p, regs);
244 kcb->kprobe_status = KPROBE_HIT_SS;
245 return 1;
246 }
247 }
248
249 /* no_kprobe: */
250 preempt_enable_no_resched();
251 return 0;
252}
253
254static int __kprobes arc_post_kprobe_handler(unsigned long addr,
255 struct pt_regs *regs)
256{
257 struct kprobe *cur = kprobe_running();
258 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
259
260 if (!cur)
261 return 0;
262
263 resume_execution(cur, addr, regs);
264
265 /* Rearm the kprobe */
266 arch_arm_kprobe(cur);
267
268 /*
269 * When we return from trap instruction we go to the next instruction
270 * We restored the actual instruction in resume_exectuiont and we to
271 * return to the same address and execute it
272 */
273 regs->ret = addr;
274
275 if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
276 kcb->kprobe_status = KPROBE_HIT_SSDONE;
277 cur->post_handler(cur, regs, 0);
278 }
279
280 if (kcb->kprobe_status == KPROBE_REENTER) {
281 restore_previous_kprobe(kcb);
282 goto out;
283 }
284
285 reset_current_kprobe();
286
287out:
288 preempt_enable_no_resched();
289 return 1;
290}
291
292/*
293 * Fault can be for the instruction being single stepped or for the
294 * pre/post handlers in the module.
295 * This is applicable for applications like user probes, where we have the
296 * probe in user space and the handlers in the kernel
297 */
298
299int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned long trapnr)
300{
301 struct kprobe *cur = kprobe_running();
302 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
303
304 switch (kcb->kprobe_status) {
305 case KPROBE_HIT_SS:
306 case KPROBE_REENTER:
307 /*
308 * We are here because the instruction being single stepped
309 * caused the fault. We reset the current kprobe and allow the
310 * exception handler as if it is regular exception. In our
311 * case it doesn't matter because the system will be halted
312 */
313 resume_execution(cur, (unsigned long)cur->addr, regs);
314
315 if (kcb->kprobe_status == KPROBE_REENTER)
316 restore_previous_kprobe(kcb);
317 else
318 reset_current_kprobe();
319
320 preempt_enable_no_resched();
321 break;
322
323 case KPROBE_HIT_ACTIVE:
324 case KPROBE_HIT_SSDONE:
325 /*
326 * We are here because the instructions in the pre/post handler
327 * caused the fault.
328 */
329
330 /* We increment the nmissed count for accounting,
331 * we can also use npre/npostfault count for accouting
332 * these specific fault cases.
333 */
334 kprobes_inc_nmissed_count(cur);
335
336 /*
337 * We come here because instructions in the pre/post
338 * handler caused the page_fault, this could happen
339 * if handler tries to access user space by
340 * copy_from_user(), get_user() etc. Let the
341 * user-specified handler try to fix it first.
342 */
343 if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
344 return 1;
345
346 /*
347 * In case the user-specified fault handler returned zero,
348 * try to fix up.
349 */
350 if (fixup_exception(regs))
351 return 1;
352
353 /*
354 * fixup_exception() could not handle it,
355 * Let do_page_fault() fix it.
356 */
357 break;
358
359 default:
360 break;
361 }
362 return 0;
363}
364
365int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
366 unsigned long val, void *data)
367{
368 struct die_args *args = data;
369 unsigned long addr = args->err;
370 int ret = NOTIFY_DONE;
371
372 switch (val) {
373 case DIE_IERR:
374 if (arc_kprobe_handler(addr, args->regs))
375 return NOTIFY_STOP;
376 break;
377
378 case DIE_TRAP:
379 if (arc_post_kprobe_handler(addr, args->regs))
380 return NOTIFY_STOP;
381 break;
382
383 default:
384 break;
385 }
386
387 return ret;
388}
389
390int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
391{
392 struct jprobe *jp = container_of(p, struct jprobe, kp);
393 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
394 unsigned long sp_addr = regs->sp;
395
396 kcb->jprobe_saved_regs = *regs;
397 memcpy(kcb->jprobes_stack, (void *)sp_addr, MIN_STACK_SIZE(sp_addr));
398 regs->ret = (unsigned long)(jp->entry);
399
400 return 1;
401}
402
403void __kprobes jprobe_return(void)
404{
405 __asm__ __volatile__("unimp_s");
406 return;
407}
408
409int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
410{
411 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
412 unsigned long sp_addr;
413
414 *regs = kcb->jprobe_saved_regs;
415 sp_addr = regs->sp;
416 memcpy((void *)sp_addr, kcb->jprobes_stack, MIN_STACK_SIZE(sp_addr));
417 preempt_enable_no_resched();
418
419 return 1;
420}
421
422static void __used kretprobe_trampoline_holder(void)
423{
424 __asm__ __volatile__(".global kretprobe_trampoline\n"
425 "kretprobe_trampoline:\n" "nop\n");
426}
427
428void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
429 struct pt_regs *regs)
430{
431
432 ri->ret_addr = (kprobe_opcode_t *) regs->blink;
433
434 /* Replace the return addr with trampoline addr */
435 regs->blink = (unsigned long)&kretprobe_trampoline;
436}
437
438static int __kprobes trampoline_probe_handler(struct kprobe *p,
439 struct pt_regs *regs)
440{
441 struct kretprobe_instance *ri = NULL;
442 struct hlist_head *head, empty_rp;
443 struct hlist_node *tmp;
444 unsigned long flags, orig_ret_address = 0;
445 unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
446
447 INIT_HLIST_HEAD(&empty_rp);
448 kretprobe_hash_lock(current, &head, &flags);
449
450 /*
451 * It is possible to have multiple instances associated with a given
452 * task either because an multiple functions in the call path
453 * have a return probe installed on them, and/or more than one return
454 * return probe was registered for a target function.
455 *
456 * We can handle this because:
457 * - instances are always inserted at the head of the list
458 * - when multiple return probes are registered for the same
459 * function, the first instance's ret_addr will point to the
460 * real return address, and all the rest will point to
461 * kretprobe_trampoline
462 */
463 hlist_for_each_entry_safe(ri, tmp, head, hlist) {
464 if (ri->task != current)
465 /* another task is sharing our hash bucket */
466 continue;
467
468 if (ri->rp && ri->rp->handler)
469 ri->rp->handler(ri, regs);
470
471 orig_ret_address = (unsigned long)ri->ret_addr;
472 recycle_rp_inst(ri, &empty_rp);
473
474 if (orig_ret_address != trampoline_address) {
475 /*
476 * This is the real return address. Any other
477 * instances associated with this task are for
478 * other calls deeper on the call stack
479 */
480 break;
481 }
482 }
483
484 kretprobe_assert(ri, orig_ret_address, trampoline_address);
485 regs->ret = orig_ret_address;
486
487 reset_current_kprobe();
488 kretprobe_hash_unlock(current, &flags);
489 preempt_enable_no_resched();
490
491 hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
492 hlist_del(&ri->hlist);
493 kfree(ri);
494 }
495
496 /* By returning a non zero value, we are telling the kprobe handler
497 * that we don't want the post_handler to run
498 */
499 return 1;
500}
501
502static struct kprobe trampoline_p = {
503 .addr = (kprobe_opcode_t *) &kretprobe_trampoline,
504 .pre_handler = trampoline_probe_handler
505};
506
507int __init arch_init_kprobes(void)
508{
509 /* Registering the trampoline code for the kret probe */
510 return register_kprobe(&trampoline_p);
511}
512
513int __kprobes arch_trampoline_kprobe(struct kprobe *p)
514{
515 if (p->addr == (kprobe_opcode_t *) &kretprobe_trampoline)
516 return 1;
517
518 return 0;
519}
520
521void trap_is_kprobe(unsigned long cause, unsigned long address,
522 struct pt_regs *regs)
523{
524 notify_die(DIE_TRAP, "kprobe_trap", regs, address, cause, SIGTRAP);
525}
diff --git a/arch/arc/kernel/module.c b/arch/arc/kernel/module.c
new file mode 100644
index 000000000000..cdd359352c0a
--- /dev/null
+++ b/arch/arc/kernel/module.c
@@ -0,0 +1,145 @@
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#include <linux/module.h>
10#include <linux/moduleloader.h>
11#include <linux/kernel.h>
12#include <linux/elf.h>
13#include <linux/vmalloc.h>
14#include <linux/slab.h>
15#include <linux/fs.h>
16#include <linux/string.h>
17#include <asm/unwind.h>
18
19static inline void arc_write_me(unsigned short *addr, unsigned long value)
20{
21 *addr = (value & 0xffff0000) >> 16;
22 *(addr + 1) = (value & 0xffff);
23}
24
25/* ARC specific section quirks - before relocation loop in generic loader
26 *
27 * For dwarf unwinding out of modules, this needs to
28 * 1. Ensure the .debug_frame is allocatable (ARC Linker bug: despite
29 * -fasynchronous-unwind-tables it doesn't).
30 * 2. Since we are iterating thru sec hdr tbl anyways, make a note of
31 * the exact section index, for later use.
32 */
33int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
34 char *secstr, struct module *mod)
35{
36#ifdef CONFIG_ARC_DW2_UNWIND
37 int i;
38
39 mod->arch.unw_sec_idx = 0;
40 mod->arch.unw_info = NULL;
41
42 for (i = 1; i < hdr->e_shnum; i++) {
43 if (strcmp(secstr+sechdrs[i].sh_name, ".debug_frame") == 0) {
44 sechdrs[i].sh_flags |= SHF_ALLOC;
45 mod->arch.unw_sec_idx = i;
46 break;
47 }
48 }
49#endif
50 return 0;
51}
52
53void module_arch_cleanup(struct module *mod)
54{
55#ifdef CONFIG_ARC_DW2_UNWIND
56 if (mod->arch.unw_info)
57 unwind_remove_table(mod->arch.unw_info, 0);
58#endif
59}
60
61int apply_relocate_add(Elf32_Shdr *sechdrs,
62 const char *strtab,
63 unsigned int symindex, /* sec index for sym tbl */
64 unsigned int relsec, /* sec index for relo sec */
65 struct module *module)
66{
67 int i, n;
68 Elf32_Rela *rel_entry = (void *)sechdrs[relsec].sh_addr;
69 Elf32_Sym *sym_entry, *sym_sec;
70 Elf32_Addr relocation;
71 Elf32_Addr location;
72 Elf32_Addr sec_to_patch;
73 int relo_type;
74
75 sec_to_patch = sechdrs[sechdrs[relsec].sh_info].sh_addr;
76 sym_sec = (Elf32_Sym *) sechdrs[symindex].sh_addr;
77 n = sechdrs[relsec].sh_size / sizeof(*rel_entry);
78
79 pr_debug("\n========== Module Sym reloc ===========================\n");
80 pr_debug("Section to fixup %x\n", sec_to_patch);
81 pr_debug("=========================================================\n");
82 pr_debug("rela->r_off | rela->addend | sym->st_value | ADDR | VALUE\n");
83 pr_debug("=========================================================\n");
84
85 /* Loop thru entries in relocation section */
86 for (i = 0; i < n; i++) {
87
88 /* This is where to make the change */
89 location = sec_to_patch + rel_entry[i].r_offset;
90
91 /* This is the symbol it is referring to. Note that all
92 undefined symbols have been resolved. */
93 sym_entry = sym_sec + ELF32_R_SYM(rel_entry[i].r_info);
94
95 relocation = sym_entry->st_value + rel_entry[i].r_addend;
96
97 pr_debug("\t%x\t\t%x\t\t%x %x %x [%s]\n",
98 rel_entry[i].r_offset, rel_entry[i].r_addend,
99 sym_entry->st_value, location, relocation,
100 strtab + sym_entry->st_name);
101
102 /* This assumes modules are built with -mlong-calls
103 * so any branches/jumps are absolute 32 bit jmps
104 * global data access again is abs 32 bit.
105 * Both of these are handled by same relocation type
106 */
107 relo_type = ELF32_R_TYPE(rel_entry[i].r_info);
108
109 if (likely(R_ARC_32_ME == relo_type))
110 arc_write_me((unsigned short *)location, relocation);
111 else if (R_ARC_32 == relo_type)
112 *((Elf32_Addr *) location) = relocation;
113 else
114 goto relo_err;
115
116 }
117 return 0;
118
119relo_err:
120 pr_err("%s: unknown relocation: %u\n",
121 module->name, ELF32_R_TYPE(rel_entry[i].r_info));
122 return -ENOEXEC;
123
124}
125
126/* Just before lift off: After sections have been relocated, we add the
127 * dwarf section to unwinder table pool
128 * This couldn't be done in module_frob_arch_sections() because
129 * relocations had not been applied by then
130 */
131int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs,
132 struct module *mod)
133{
134#ifdef CONFIG_ARC_DW2_UNWIND
135 void *unw;
136 int unwsec = mod->arch.unw_sec_idx;
137
138 if (unwsec) {
139 unw = unwind_add_table(mod, (void *)sechdrs[unwsec].sh_addr,
140 sechdrs[unwsec].sh_size);
141 mod->arch.unw_info = unw;
142 }
143#endif
144 return 0;
145}
diff --git a/arch/arc/kernel/process.c b/arch/arc/kernel/process.c
new file mode 100644
index 000000000000..0a7531d99294
--- /dev/null
+++ b/arch/arc/kernel/process.c
@@ -0,0 +1,235 @@
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * Amit Bhor, Kanika Nema: Codito Technologies 2004
9 */
10
11#include <linux/errno.h>
12#include <linux/module.h>
13#include <linux/sched.h>
14#include <linux/mm.h>
15#include <linux/fs.h>
16#include <linux/unistd.h>
17#include <linux/ptrace.h>
18#include <linux/slab.h>
19#include <linux/syscalls.h>
20#include <linux/elf.h>
21#include <linux/tick.h>
22
23SYSCALL_DEFINE1(arc_settls, void *, user_tls_data_ptr)
24{
25 task_thread_info(current)->thr_ptr = (unsigned int)user_tls_data_ptr;
26 return 0;
27}
28
29/*
30 * We return the user space TLS data ptr as sys-call return code
31 * Ideally it should be copy to user.
32 * However we can cheat by the fact that some sys-calls do return
33 * absurdly high values
34 * Since the tls dat aptr is not going to be in range of 0xFFFF_xxxx
35 * it won't be considered a sys-call error
36 * and it will be loads better than copy-to-user, which is a definite
37 * D-TLB Miss
38 */
39SYSCALL_DEFINE0(arc_gettls)
40{
41 return task_thread_info(current)->thr_ptr;
42}
43
44static inline void arch_idle(void)
45{
46 /* sleep, but enable all interrupts before committing */
47 __asm__("sleep 0x3");
48}
49
50void cpu_idle(void)
51{
52 /* Since we SLEEP in idle loop, TIF_POLLING_NRFLAG can't be set */
53
54 /* endless idle loop with no priority at all */
55 while (1) {
56 tick_nohz_idle_enter();
57 rcu_idle_enter();
58
59doze:
60 local_irq_disable();
61 if (!need_resched()) {
62 arch_idle();
63 goto doze;
64 } else {
65 local_irq_enable();
66 }
67
68 rcu_idle_exit();
69 tick_nohz_idle_exit();
70
71 schedule_preempt_disabled();
72 }
73}
74
75asmlinkage void ret_from_fork(void);
76
77/* Layout of Child kernel mode stack as setup at the end of this function is
78 *
79 * | ... |
80 * | ... |
81 * | unused |
82 * | |
83 * ------------------ <==== top of Stack (thread.ksp)
84 * | UNUSED 1 word|
85 * ------------------
86 * | r25 |
87 * ~ ~
88 * | --to-- | (CALLEE Regs of user mode)
89 * | r13 |
90 * ------------------
91 * | fp |
92 * | blink | @ret_from_fork
93 * ------------------
94 * | |
95 * ~ ~
96 * ~ ~
97 * | |
98 * ------------------
99 * | r12 |
100 * ~ ~
101 * | --to-- | (scratch Regs of user mode)
102 * | r0 |
103 * ------------------
104 * | UNUSED 1 word|
105 * ------------------ <===== END of PAGE
106 */
107int copy_thread(unsigned long clone_flags,
108 unsigned long usp, unsigned long arg,
109 struct task_struct *p)
110{
111 struct pt_regs *c_regs; /* child's pt_regs */
112 unsigned long *childksp; /* to unwind out of __switch_to() */
113 struct callee_regs *c_callee; /* child's callee regs */
114 struct callee_regs *parent_callee; /* paren't callee */
115 struct pt_regs *regs = current_pt_regs();
116
117 /* Mark the specific anchors to begin with (see pic above) */
118 c_regs = task_pt_regs(p);
119 childksp = (unsigned long *)c_regs - 2; /* 2 words for FP/BLINK */
120 c_callee = ((struct callee_regs *)childksp) - 1;
121
122 /*
123 * __switch_to() uses thread.ksp to start unwinding stack
124 * For kernel threads we don't need to create callee regs, the
125 * stack layout nevertheless needs to remain the same.
126 * Also, since __switch_to anyways unwinds callee regs, we use
127 * this to populate kernel thread entry-pt/args into callee regs,
128 * so that ret_from_kernel_thread() becomes simpler.
129 */
130 p->thread.ksp = (unsigned long)c_callee; /* THREAD_KSP */
131
132 /* __switch_to expects FP(0), BLINK(return addr) at top */
133 childksp[0] = 0; /* fp */
134 childksp[1] = (unsigned long)ret_from_fork; /* blink */
135
136 if (unlikely(p->flags & PF_KTHREAD)) {
137 memset(c_regs, 0, sizeof(struct pt_regs));
138
139 c_callee->r13 = arg; /* argument to kernel thread */
140 c_callee->r14 = usp; /* function */
141
142 return 0;
143 }
144
145 /*--------- User Task Only --------------*/
146
147 /* __switch_to expects FP(0), BLINK(return addr) at top of stack */
148 childksp[0] = 0; /* for POP fp */
149 childksp[1] = (unsigned long)ret_from_fork; /* for POP blink */
150
151 /* Copy parents pt regs on child's kernel mode stack */
152 *c_regs = *regs;
153
154 if (usp)
155 c_regs->sp = usp;
156
157 c_regs->r0 = 0; /* fork returns 0 in child */
158
159 parent_callee = ((struct callee_regs *)regs) - 1;
160 *c_callee = *parent_callee;
161
162 if (unlikely(clone_flags & CLONE_SETTLS)) {
163 /*
164 * set task's userland tls data ptr from 4th arg
165 * clone C-lib call is difft from clone sys-call
166 */
167 task_thread_info(p)->thr_ptr = regs->r3;
168 } else {
169 /* Normal fork case: set parent's TLS ptr in child */
170 task_thread_info(p)->thr_ptr =
171 task_thread_info(current)->thr_ptr;
172 }
173
174 return 0;
175}
176
177/*
178 * Some archs flush debug and FPU info here
179 */
180void flush_thread(void)
181{
182}
183
184/*
185 * Free any architecture-specific thread data structures, etc.
186 */
187void exit_thread(void)
188{
189}
190
191int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
192{
193 return 0;
194}
195
196/*
197 * API: expected by schedular Code: If thread is sleeping where is that.
198 * What is this good for? it will be always the scheduler or ret_from_fork.
199 * So we hard code that anyways.
200 */
201unsigned long thread_saved_pc(struct task_struct *t)
202{
203 struct pt_regs *regs = task_pt_regs(t);
204 unsigned long blink = 0;
205
206 /*
207 * If the thread being queried for in not itself calling this, then it
208 * implies it is not executing, which in turn implies it is sleeping,
209 * which in turn implies it got switched OUT by the schedular.
210 * In that case, it's kernel mode blink can reliably retrieved as per
211 * the picture above (right above pt_regs).
212 */
213 if (t != current && t->state != TASK_RUNNING)
214 blink = *((unsigned int *)regs - 1);
215
216 return blink;
217}
218
219int elf_check_arch(const struct elf32_hdr *x)
220{
221 unsigned int eflags;
222
223 if (x->e_machine != EM_ARCOMPACT)
224 return 0;
225
226 eflags = x->e_flags;
227 if ((eflags & EF_ARC_OSABI_MSK) < EF_ARC_OSABI_CURRENT) {
228 pr_err("ABI mismatch - you need newer toolchain\n");
229 force_sigsegv(SIGSEGV, current);
230 return 0;
231 }
232
233 return 1;
234}
235EXPORT_SYMBOL(elf_check_arch);
diff --git a/arch/arc/kernel/ptrace.c b/arch/arc/kernel/ptrace.c
new file mode 100644
index 000000000000..c6a81c58d0f3
--- /dev/null
+++ b/arch/arc/kernel/ptrace.c
@@ -0,0 +1,158 @@
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#include <linux/ptrace.h>
10#include <linux/tracehook.h>
11#include <linux/regset.h>
12#include <linux/unistd.h>
13#include <linux/elf.h>
14
15static struct callee_regs *task_callee_regs(struct task_struct *tsk)
16{
17 struct callee_regs *tmp = (struct callee_regs *)tsk->thread.callee_reg;
18 return tmp;
19}
20
21static int genregs_get(struct task_struct *target,
22 const struct user_regset *regset,
23 unsigned int pos, unsigned int count,
24 void *kbuf, void __user *ubuf)
25{
26 const struct pt_regs *ptregs = task_pt_regs(target);
27 const struct callee_regs *cregs = task_callee_regs(target);
28 int ret = 0;
29 unsigned int stop_pc_val;
30
31#define REG_O_CHUNK(START, END, PTR) \
32 if (!ret) \
33 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, PTR, \
34 offsetof(struct user_regs_struct, START), \
35 offsetof(struct user_regs_struct, END));
36
37#define REG_O_ONE(LOC, PTR) \
38 if (!ret) \
39 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, PTR, \
40 offsetof(struct user_regs_struct, LOC), \
41 offsetof(struct user_regs_struct, LOC) + 4);
42
43 REG_O_CHUNK(scratch, callee, ptregs);
44 REG_O_CHUNK(callee, efa, cregs);
45 REG_O_CHUNK(efa, stop_pc, &target->thread.fault_address);
46
47 if (!ret) {
48 if (in_brkpt_trap(ptregs)) {
49 stop_pc_val = target->thread.fault_address;
50 pr_debug("\t\tstop_pc (brk-pt)\n");
51 } else {
52 stop_pc_val = ptregs->ret;
53 pr_debug("\t\tstop_pc (others)\n");
54 }
55
56 REG_O_ONE(stop_pc, &stop_pc_val);
57 }
58
59 return ret;
60}
61
62static int genregs_set(struct task_struct *target,
63 const struct user_regset *regset,
64 unsigned int pos, unsigned int count,
65 const void *kbuf, const void __user *ubuf)
66{
67 const struct pt_regs *ptregs = task_pt_regs(target);
68 const struct callee_regs *cregs = task_callee_regs(target);
69 int ret = 0;
70
71#define REG_IN_CHUNK(FIRST, NEXT, PTR) \
72 if (!ret) \
73 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, \
74 (void *)(PTR), \
75 offsetof(struct user_regs_struct, FIRST), \
76 offsetof(struct user_regs_struct, NEXT));
77
78#define REG_IN_ONE(LOC, PTR) \
79 if (!ret) \
80 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, \
81 (void *)(PTR), \
82 offsetof(struct user_regs_struct, LOC), \
83 offsetof(struct user_regs_struct, LOC) + 4);
84
85#define REG_IGNORE_ONE(LOC) \
86 if (!ret) \
87 ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, \
88 offsetof(struct user_regs_struct, LOC), \
89 offsetof(struct user_regs_struct, LOC) + 4);
90
91 /* TBD: disallow updates to STATUS32, orig_r8 etc*/
92 REG_IN_CHUNK(scratch, callee, ptregs); /* pt_regs[bta..orig_r8] */
93 REG_IN_CHUNK(callee, efa, cregs); /* callee_regs[r25..r13] */
94 REG_IGNORE_ONE(efa); /* efa update invalid */
95 REG_IN_ONE(stop_pc, &ptregs->ret); /* stop_pc: PC update */
96
97 return ret;
98}
99
100enum arc_getset {
101 REGSET_GENERAL,
102};
103
104static const struct user_regset arc_regsets[] = {
105 [REGSET_GENERAL] = {
106 .core_note_type = NT_PRSTATUS,
107 .n = ELF_NGREG,
108 .size = sizeof(unsigned long),
109 .align = sizeof(unsigned long),
110 .get = genregs_get,
111 .set = genregs_set,
112 }
113};
114
115static const struct user_regset_view user_arc_view = {
116 .name = UTS_MACHINE,
117 .e_machine = EM_ARCOMPACT,
118 .regsets = arc_regsets,
119 .n = ARRAY_SIZE(arc_regsets)
120};
121
122const struct user_regset_view *task_user_regset_view(struct task_struct *task)
123{
124 return &user_arc_view;
125}
126
127void ptrace_disable(struct task_struct *child)
128{
129}
130
131long arch_ptrace(struct task_struct *child, long request,
132 unsigned long addr, unsigned long data)
133{
134 int ret = -EIO;
135
136 pr_debug("REQ=%ld: ADDR =0x%lx, DATA=0x%lx)\n", request, addr, data);
137
138 switch (request) {
139 default:
140 ret = ptrace_request(child, request, addr, data);
141 break;
142 }
143
144 return ret;
145}
146
147asmlinkage int syscall_trace_entry(struct pt_regs *regs)
148{
149 if (tracehook_report_syscall_entry(regs))
150 return ULONG_MAX;
151
152 return regs->r8;
153}
154
155asmlinkage void syscall_trace_exit(struct pt_regs *regs)
156{
157 tracehook_report_syscall_exit(regs, 0);
158}
diff --git a/arch/arc/kernel/reset.c b/arch/arc/kernel/reset.c
new file mode 100644
index 000000000000..e227a2b1c943
--- /dev/null
+++ b/arch/arc/kernel/reset.c
@@ -0,0 +1,33 @@
1/*
2 * Copyright (C) 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#include <linux/kernel.h>
10#include <linux/printk.h>
11#include <linux/reboot.h>
12#include <linux/pm.h>
13
14void machine_halt(void)
15{
16 /* Halt the processor */
17 __asm__ __volatile__("flag 1\n");
18}
19
20void machine_restart(char *__unused)
21{
22 /* Soft reset : jump to reset vector */
23 pr_info("Put your restart handler here\n");
24 machine_halt();
25}
26
27void machine_power_off(void)
28{
29 /* FIXME :: power off ??? */
30 machine_halt();
31}
32
33void (*pm_power_off) (void) = NULL;
diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
new file mode 100644
index 000000000000..dc0f968dae0a
--- /dev/null
+++ b/arch/arc/kernel/setup.c
@@ -0,0 +1,473 @@
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#include <linux/seq_file.h>
10#include <linux/fs.h>
11#include <linux/delay.h>
12#include <linux/root_dev.h>
13#include <linux/console.h>
14#include <linux/module.h>
15#include <linux/cpu.h>
16#include <linux/of_fdt.h>
17#include <asm/sections.h>
18#include <asm/arcregs.h>
19#include <asm/tlb.h>
20#include <asm/cache.h>
21#include <asm/setup.h>
22#include <asm/page.h>
23#include <asm/irq.h>
24#include <asm/arcregs.h>
25#include <asm/prom.h>
26#include <asm/unwind.h>
27#include <asm/clk.h>
28#include <asm/mach_desc.h>
29
30#define FIX_PTR(x) __asm__ __volatile__(";" : "+r"(x))
31
32int running_on_hw = 1; /* vs. on ISS */
33
34char __initdata command_line[COMMAND_LINE_SIZE];
35struct machine_desc *machine_desc __initdata;
36
37struct task_struct *_current_task[NR_CPUS]; /* For stack switching */
38
39struct cpuinfo_arc cpuinfo_arc700[NR_CPUS];
40
41
42void __init read_arc_build_cfg_regs(void)
43{
44 struct bcr_perip uncached_space;
45 struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
46 FIX_PTR(cpu);
47
48 READ_BCR(AUX_IDENTITY, cpu->core);
49
50 cpu->timers = read_aux_reg(ARC_REG_TIMERS_BCR);
51
52 cpu->vec_base = read_aux_reg(AUX_INTR_VEC_BASE);
53 if (cpu->vec_base == 0)
54 cpu->vec_base = (unsigned int)_int_vec_base_lds;
55
56 READ_BCR(ARC_REG_D_UNCACH_BCR, uncached_space);
57 cpu->uncached_base = uncached_space.start << 24;
58
59 cpu->extn.mul = read_aux_reg(ARC_REG_MUL_BCR);
60 cpu->extn.swap = read_aux_reg(ARC_REG_SWAP_BCR);
61 cpu->extn.norm = read_aux_reg(ARC_REG_NORM_BCR);
62 cpu->extn.minmax = read_aux_reg(ARC_REG_MIXMAX_BCR);
63 cpu->extn.barrel = read_aux_reg(ARC_REG_BARREL_BCR);
64 READ_BCR(ARC_REG_MAC_BCR, cpu->extn_mac_mul);
65
66 cpu->extn.ext_arith = read_aux_reg(ARC_REG_EXTARITH_BCR);
67 cpu->extn.crc = read_aux_reg(ARC_REG_CRC_BCR);
68
69 /* Note that we read the CCM BCRs independent of kernel config
70 * This is to catch the cases where user doesn't know that
71 * CCMs are present in hardware build
72 */
73 {
74 struct bcr_iccm iccm;
75 struct bcr_dccm dccm;
76 struct bcr_dccm_base dccm_base;
77 unsigned int bcr_32bit_val;
78
79 bcr_32bit_val = read_aux_reg(ARC_REG_ICCM_BCR);
80 if (bcr_32bit_val) {
81 iccm = *((struct bcr_iccm *)&bcr_32bit_val);
82 cpu->iccm.base_addr = iccm.base << 16;
83 cpu->iccm.sz = 0x2000 << (iccm.sz - 1);
84 }
85
86 bcr_32bit_val = read_aux_reg(ARC_REG_DCCM_BCR);
87 if (bcr_32bit_val) {
88 dccm = *((struct bcr_dccm *)&bcr_32bit_val);
89 cpu->dccm.sz = 0x800 << (dccm.sz);
90
91 READ_BCR(ARC_REG_DCCMBASE_BCR, dccm_base);
92 cpu->dccm.base_addr = dccm_base.addr << 8;
93 }
94 }
95
96 READ_BCR(ARC_REG_XY_MEM_BCR, cpu->extn_xymem);
97
98 read_decode_mmu_bcr();
99 read_decode_cache_bcr();
100
101 READ_BCR(ARC_REG_FP_BCR, cpu->fp);
102 READ_BCR(ARC_REG_DPFP_BCR, cpu->dpfp);
103}
104
105static const struct cpuinfo_data arc_cpu_tbl[] = {
106 { {0x10, "ARCTangent A5"}, 0x1F},
107 { {0x20, "ARC 600" }, 0x2F},
108 { {0x30, "ARC 700" }, 0x33},
109 { {0x34, "ARC 700 R4.10"}, 0x34},
110 { {0x00, NULL } }
111};
112
113char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
114{
115 int n = 0;
116 struct cpuinfo_arc *cpu = &cpuinfo_arc700[cpu_id];
117 struct bcr_identity *core = &cpu->core;
118 const struct cpuinfo_data *tbl;
119 int be = 0;
120#ifdef CONFIG_CPU_BIG_ENDIAN
121 be = 1;
122#endif
123 FIX_PTR(cpu);
124
125 n += scnprintf(buf + n, len - n,
126 "\nARC IDENTITY\t: Family [%#02x]"
127 " Cpu-id [%#02x] Chip-id [%#4x]\n",
128 core->family, core->cpu_id,
129 core->chip_id);
130
131 for (tbl = &arc_cpu_tbl[0]; tbl->info.id != 0; tbl++) {
132 if ((core->family >= tbl->info.id) &&
133 (core->family <= tbl->up_range)) {
134 n += scnprintf(buf + n, len - n,
135 "processor\t: %s %s\n",
136 tbl->info.str,
137 be ? "[Big Endian]" : "");
138 break;
139 }
140 }
141
142 if (tbl->info.id == 0)
143 n += scnprintf(buf + n, len - n, "UNKNOWN ARC Processor\n");
144
145 n += scnprintf(buf + n, len - n, "CPU speed\t: %u.%02u Mhz\n",
146 (unsigned int)(arc_get_core_freq() / 1000000),
147 (unsigned int)(arc_get_core_freq() / 10000) % 100);
148
149 n += scnprintf(buf + n, len - n, "Timers\t\t: %s %s\n",
150 (cpu->timers & 0x200) ? "TIMER1" : "",
151 (cpu->timers & 0x100) ? "TIMER0" : "");
152
153 n += scnprintf(buf + n, len - n, "Vect Tbl Base\t: %#x\n",
154 cpu->vec_base);
155
156 n += scnprintf(buf + n, len - n, "UNCACHED Base\t: %#x\n",
157 cpu->uncached_base);
158
159 return buf;
160}
161
162static const struct id_to_str mul_type_nm[] = {
163 { 0x0, "N/A"},
164 { 0x1, "32x32 (spl Result Reg)" },
165 { 0x2, "32x32 (ANY Result Reg)" }
166};
167
168static const struct id_to_str mac_mul_nm[] = {
169 {0x0, "N/A"},
170 {0x1, "N/A"},
171 {0x2, "Dual 16 x 16"},
172 {0x3, "N/A"},
173 {0x4, "32x16"},
174 {0x5, "N/A"},
175 {0x6, "Dual 16x16 and 32x16"}
176};
177
178char *arc_extn_mumbojumbo(int cpu_id, char *buf, int len)
179{
180 int n = 0;
181 struct cpuinfo_arc *cpu = &cpuinfo_arc700[cpu_id];
182
183 FIX_PTR(cpu);
184#define IS_AVAIL1(var, str) ((var) ? str : "")
185#define IS_AVAIL2(var, str) ((var == 0x2) ? str : "")
186#define IS_USED(var) ((var) ? "(in-use)" : "(not used)")
187
188 n += scnprintf(buf + n, len - n,
189 "Extn [700-Base]\t: %s %s %s %s %s %s\n",
190 IS_AVAIL2(cpu->extn.norm, "norm,"),
191 IS_AVAIL2(cpu->extn.barrel, "barrel-shift,"),
192 IS_AVAIL1(cpu->extn.swap, "swap,"),
193 IS_AVAIL2(cpu->extn.minmax, "minmax,"),
194 IS_AVAIL1(cpu->extn.crc, "crc,"),
195 IS_AVAIL2(cpu->extn.ext_arith, "ext-arith"));
196
197 n += scnprintf(buf + n, len - n, "Extn [700-MPY]\t: %s",
198 mul_type_nm[cpu->extn.mul].str);
199
200 n += scnprintf(buf + n, len - n, " MAC MPY: %s\n",
201 mac_mul_nm[cpu->extn_mac_mul.type].str);
202
203 if (cpu->core.family == 0x34) {
204 n += scnprintf(buf + n, len - n,
205 "Extn [700-4.10]\t: LLOCK/SCOND %s, SWAPE %s, RTSC %s\n",
206 IS_USED(__CONFIG_ARC_HAS_LLSC_VAL),
207 IS_USED(__CONFIG_ARC_HAS_SWAPE_VAL),
208 IS_USED(__CONFIG_ARC_HAS_RTSC_VAL));
209 }
210
211 n += scnprintf(buf + n, len - n, "Extn [CCM]\t: %s",
212 !(cpu->dccm.sz || cpu->iccm.sz) ? "N/A" : "");
213
214 if (cpu->dccm.sz)
215 n += scnprintf(buf + n, len - n, "DCCM: @ %x, %d KB ",
216 cpu->dccm.base_addr, TO_KB(cpu->dccm.sz));
217
218 if (cpu->iccm.sz)
219 n += scnprintf(buf + n, len - n, "ICCM: @ %x, %d KB",
220 cpu->iccm.base_addr, TO_KB(cpu->iccm.sz));
221
222 n += scnprintf(buf + n, len - n, "\nExtn [FPU]\t: %s",
223 !(cpu->fp.ver || cpu->dpfp.ver) ? "N/A" : "");
224
225 if (cpu->fp.ver)
226 n += scnprintf(buf + n, len - n, "SP [v%d] %s",
227 cpu->fp.ver, cpu->fp.fast ? "(fast)" : "");
228
229 if (cpu->dpfp.ver)
230 n += scnprintf(buf + n, len - n, "DP [v%d] %s",
231 cpu->dpfp.ver, cpu->dpfp.fast ? "(fast)" : "");
232
233 n += scnprintf(buf + n, len - n, "\n");
234
235#ifdef _ASM_GENERIC_UNISTD_H
236 n += scnprintf(buf + n, len - n,
237 "OS ABI [v2]\t: asm-generic/{unistd,stat,fcntl}\n");
238#endif
239
240 return buf;
241}
242
243void __init arc_chk_ccms(void)
244{
245#if defined(CONFIG_ARC_HAS_DCCM) || defined(CONFIG_ARC_HAS_ICCM)
246 struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
247
248#ifdef CONFIG_ARC_HAS_DCCM
249 /*
250 * DCCM can be arbit placed in hardware.
251 * Make sure it's placement/sz matches what Linux is built with
252 */
253 if ((unsigned int)__arc_dccm_base != cpu->dccm.base_addr)
254 panic("Linux built with incorrect DCCM Base address\n");
255
256 if (CONFIG_ARC_DCCM_SZ != cpu->dccm.sz)
257 panic("Linux built with incorrect DCCM Size\n");
258#endif
259
260#ifdef CONFIG_ARC_HAS_ICCM
261 if (CONFIG_ARC_ICCM_SZ != cpu->iccm.sz)
262 panic("Linux built with incorrect ICCM Size\n");
263#endif
264#endif
265}
266
267/*
268 * Ensure that FP hardware and kernel config match
269 * -If hardware contains DPFP, kernel needs to save/restore FPU state
270 * across context switches
271 * -If hardware lacks DPFP, but kernel configured to save FPU state then
272 * kernel trying to access non-existant DPFP regs will crash
273 *
274 * We only check for Dbl precision Floating Point, because only DPFP
275 * hardware has dedicated regs which need to be saved/restored on ctx-sw
276 * (Single Precision uses core regs), thus kernel is kind of oblivious to it
277 */
278void __init arc_chk_fpu(void)
279{
280 struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
281
282 if (cpu->dpfp.ver) {
283#ifndef CONFIG_ARC_FPU_SAVE_RESTORE
284 pr_warn("DPFP support broken in this kernel...\n");
285#endif
286 } else {
287#ifdef CONFIG_ARC_FPU_SAVE_RESTORE
288 panic("H/w lacks DPFP support, apps won't work\n");
289#endif
290 }
291}
292
293/*
294 * Initialize and setup the processor core
295 * This is called by all the CPUs thus should not do special case stuff
296 * such as only for boot CPU etc
297 */
298
299void __init setup_processor(void)
300{
301 char str[512];
302 int cpu_id = smp_processor_id();
303
304 read_arc_build_cfg_regs();
305 arc_init_IRQ();
306
307 printk(arc_cpu_mumbojumbo(cpu_id, str, sizeof(str)));
308
309 arc_mmu_init();
310 arc_cache_init();
311 arc_chk_ccms();
312
313 printk(arc_extn_mumbojumbo(cpu_id, str, sizeof(str)));
314
315#ifdef CONFIG_SMP
316 printk(arc_platform_smp_cpuinfo());
317#endif
318
319 arc_chk_fpu();
320}
321
322void __init setup_arch(char **cmdline_p)
323{
324#ifdef CONFIG_CMDLINE_UBOOT
325 /* Make sure that a whitespace is inserted before */
326 strlcat(command_line, " ", sizeof(command_line));
327#endif
328 /*
329 * Append .config cmdline to base command line, which might already
330 * contain u-boot "bootargs" (handled by head.S, if so configured)
331 */
332 strlcat(command_line, CONFIG_CMDLINE, sizeof(command_line));
333
334 /* Save unparsed command line copy for /proc/cmdline */
335 strlcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
336 *cmdline_p = command_line;
337
338 machine_desc = setup_machine_fdt(__dtb_start);
339 if (!machine_desc)
340 panic("Embedded DT invalid\n");
341
342 /* To force early parsing of things like mem=xxx */
343 parse_early_param();
344
345 /* Platform/board specific: e.g. early console registration */
346 if (machine_desc->init_early)
347 machine_desc->init_early();
348
349 setup_processor();
350
351#ifdef CONFIG_SMP
352 smp_init_cpus();
353#endif
354
355 setup_arch_memory();
356
357 /* copy flat DT out of .init and then unflatten it */
358 copy_devtree();
359 unflatten_device_tree();
360
361 /* Can be issue if someone passes cmd line arg "ro"
362 * But that is unlikely so keeping it as it is
363 */
364 root_mountflags &= ~MS_RDONLY;
365
366 console_verbose();
367
368#if defined(CONFIG_VT) && defined(CONFIG_DUMMY_CONSOLE)
369 conswitchp = &dummy_con;
370#endif
371
372 arc_unwind_init();
373 arc_unwind_setup();
374}
375
376static int __init customize_machine(void)
377{
378 /* Add platform devices */
379 if (machine_desc->init_machine)
380 machine_desc->init_machine();
381
382 return 0;
383}
384arch_initcall(customize_machine);
385
386static int __init init_late_machine(void)
387{
388 if (machine_desc->init_late)
389 machine_desc->init_late();
390
391 return 0;
392}
393late_initcall(init_late_machine);
394/*
395 * Get CPU information for use by the procfs.
396 */
397
398#define cpu_to_ptr(c) ((void *)(0xFFFF0000 | (unsigned int)(c)))
399#define ptr_to_cpu(p) (~0xFFFF0000UL & (unsigned int)(p))
400
401static int show_cpuinfo(struct seq_file *m, void *v)
402{
403 char *str;
404 int cpu_id = ptr_to_cpu(v);
405
406 str = (char *)__get_free_page(GFP_TEMPORARY);
407 if (!str)
408 goto done;
409
410 seq_printf(m, arc_cpu_mumbojumbo(cpu_id, str, PAGE_SIZE));
411
412 seq_printf(m, "Bogo MIPS : \t%lu.%02lu\n",
413 loops_per_jiffy / (500000 / HZ),
414 (loops_per_jiffy / (5000 / HZ)) % 100);
415
416 seq_printf(m, arc_mmu_mumbojumbo(cpu_id, str, PAGE_SIZE));
417
418 seq_printf(m, arc_cache_mumbojumbo(cpu_id, str, PAGE_SIZE));
419
420 seq_printf(m, arc_extn_mumbojumbo(cpu_id, str, PAGE_SIZE));
421
422#ifdef CONFIG_SMP
423 seq_printf(m, arc_platform_smp_cpuinfo());
424#endif
425
426 free_page((unsigned long)str);
427done:
428 seq_printf(m, "\n\n");
429
430 return 0;
431}
432
433static void *c_start(struct seq_file *m, loff_t *pos)
434{
435 /*
436 * Callback returns cpu-id to iterator for show routine, NULL to stop.
437 * However since NULL is also a valid cpu-id (0), we use a round-about
438 * way to pass it w/o having to kmalloc/free a 2 byte string.
439 * Encode cpu-id as 0xFFcccc, which is decoded by show routine.
440 */
441 return *pos < num_possible_cpus() ? cpu_to_ptr(*pos) : NULL;
442}
443
444static void *c_next(struct seq_file *m, void *v, loff_t *pos)
445{
446 ++*pos;
447 return c_start(m, pos);
448}
449
450static void c_stop(struct seq_file *m, void *v)
451{
452}
453
454const struct seq_operations cpuinfo_op = {
455 .start = c_start,
456 .next = c_next,
457 .stop = c_stop,
458 .show = show_cpuinfo
459};
460
461static DEFINE_PER_CPU(struct cpu, cpu_topology);
462
463static int __init topology_init(void)
464{
465 int cpu;
466
467 for_each_present_cpu(cpu)
468 register_cpu(&per_cpu(cpu_topology, cpu), cpu);
469
470 return 0;
471}
472
473subsys_initcall(topology_init);
diff --git a/arch/arc/kernel/signal.c b/arch/arc/kernel/signal.c
new file mode 100644
index 000000000000..ee6ef2f60a28
--- /dev/null
+++ b/arch/arc/kernel/signal.c
@@ -0,0 +1,360 @@
1/*
2 * Signal Handling for ARC
3 *
4 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * vineetg: Jan 2010 (Restarting of timer related syscalls)
11 *
12 * vineetg: Nov 2009 (Everything needed for TIF_RESTORE_SIGMASK)
13 * -do_signal() supports TIF_RESTORE_SIGMASK
14 * -do_signal() no loner needs oldset, required by OLD sys_sigsuspend
15 * -sys_rt_sigsuspend() now comes from generic code, so discard arch implemen
16 * -sys_sigsuspend() no longer needs to fudge ptregs, hence that arg removed
17 * -sys_sigsuspend() no longer loops for do_signal(), sets TIF_xxx and leaves
18 * the job to do_signal()
19 *
20 * vineetg: July 2009
21 * -Modified Code to support the uClibc provided userland sigreturn stub
22 * to avoid kernel synthesing it on user stack at runtime, costing TLB
23 * probes and Cache line flushes.
24 *
25 * vineetg: July 2009
26 * -In stash_usr_regs( ) and restore_usr_regs( ), save/restore of user regs
27 * in done in block copy rather than one word at a time.
28 * This saves around 2K of code and improves LMBench lat_sig <catch>
29 *
30 * rajeshwarr: Feb 2009
31 * - Support for Realtime Signals
32 *
33 * vineetg: Aug 11th 2008: Bug #94183
34 * -ViXS were still seeing crashes when using insmod to load drivers.
35 * It turned out that the code to change Execute permssions for TLB entries
36 * of user was not guarded for interrupts (mod_tlb_permission)
37 * This was cauing TLB entries to be overwritten on unrelated indexes
38 *
39 * Vineetg: July 15th 2008: Bug #94183
40 * -Exception happens in Delay slot of a JMP, and before user space resumes,
41 * Signal is delivered (Ctrl + C) = >SIGINT.
42 * setup_frame( ) sets up PC,SP,BLINK to enable user space signal handler
43 * to run, but doesn't clear the Delay slot bit from status32. As a result,
44 * on resuming user mode, signal handler branches off to BTA of orig JMP
45 * -FIX: clear the DE bit from status32 in setup_frame( )
46 *
47 * Rahul Trivedi, Kanika Nema: Codito Technologies 2004
48 */
49
50#include <linux/signal.h>
51#include <linux/ptrace.h>
52#include <linux/personality.h>
53#include <linux/uaccess.h>
54#include <linux/syscalls.h>
55#include <linux/tracehook.h>
56#include <asm/ucontext.h>
57
58struct rt_sigframe {
59 struct siginfo info;
60 struct ucontext uc;
61#define MAGIC_SIGALTSTK 0x07302004
62 unsigned int sigret_magic;
63};
64
65static int
66stash_usr_regs(struct rt_sigframe __user *sf, struct pt_regs *regs,
67 sigset_t *set)
68{
69 int err;
70 err = __copy_to_user(&(sf->uc.uc_mcontext.regs), regs,
71 sizeof(sf->uc.uc_mcontext.regs.scratch));
72 err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(sigset_t));
73
74 return err;
75}
76
77static int restore_usr_regs(struct pt_regs *regs, struct rt_sigframe __user *sf)
78{
79 sigset_t set;
80 int err;
81
82 err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set));
83 if (!err)
84 set_current_blocked(&set);
85
86 err |= __copy_from_user(regs, &(sf->uc.uc_mcontext.regs),
87 sizeof(sf->uc.uc_mcontext.regs.scratch));
88
89 return err;
90}
91
92static inline int is_do_ss_needed(unsigned int magic)
93{
94 if (MAGIC_SIGALTSTK == magic)
95 return 1;
96 else
97 return 0;
98}
99
100SYSCALL_DEFINE0(rt_sigreturn)
101{
102 struct rt_sigframe __user *sf;
103 unsigned int magic;
104 int err;
105 struct pt_regs *regs = current_pt_regs();
106
107 /* Always make any pending restarted system calls return -EINTR */
108 current_thread_info()->restart_block.fn = do_no_restart_syscall;
109
110 /* Since we stacked the signal on a word boundary,
111 * then 'sp' should be word aligned here. If it's
112 * not, then the user is trying to mess with us.
113 */
114 if (regs->sp & 3)
115 goto badframe;
116
117 sf = (struct rt_sigframe __force __user *)(regs->sp);
118
119 if (!access_ok(VERIFY_READ, sf, sizeof(*sf)))
120 goto badframe;
121
122 err = restore_usr_regs(regs, sf);
123 err |= __get_user(magic, &sf->sigret_magic);
124 if (err)
125 goto badframe;
126
127 if (unlikely(is_do_ss_needed(magic)))
128 if (restore_altstack(&sf->uc.uc_stack))
129 goto badframe;
130
131 /* Don't restart from sigreturn */
132 syscall_wont_restart(regs);
133
134 return regs->r0;
135
136badframe:
137 force_sig(SIGSEGV, current);
138 return 0;
139}
140
141/*
142 * Determine which stack to use..
143 */
144static inline void __user *get_sigframe(struct k_sigaction *ka,
145 struct pt_regs *regs,
146 unsigned long framesize)
147{
148 unsigned long sp = regs->sp;
149 void __user *frame;
150
151 /* This is the X/Open sanctioned signal stack switching */
152 if ((ka->sa.sa_flags & SA_ONSTACK) && !sas_ss_flags(sp))
153 sp = current->sas_ss_sp + current->sas_ss_size;
154
155 /* No matter what happens, 'sp' must be word
156 * aligned otherwise nasty things could happen
157 */
158
159 /* ATPCS B01 mandates 8-byte alignment */
160 frame = (void __user *)((sp - framesize) & ~7);
161
162 /* Check that we can actually write to the signal frame */
163 if (!access_ok(VERIFY_WRITE, frame, framesize))
164 frame = NULL;
165
166 return frame;
167}
168
169/*
170 * translate the signal
171 */
172static inline int map_sig(int sig)
173{
174 struct thread_info *thread = current_thread_info();
175 if (thread->exec_domain && thread->exec_domain->signal_invmap
176 && sig < 32)
177 sig = thread->exec_domain->signal_invmap[sig];
178 return sig;
179}
180
181static int
182setup_rt_frame(int signo, struct k_sigaction *ka, siginfo_t *info,
183 sigset_t *set, struct pt_regs *regs)
184{
185 struct rt_sigframe __user *sf;
186 unsigned int magic = 0;
187 int err = 0;
188
189 sf = get_sigframe(ka, regs, sizeof(struct rt_sigframe));
190 if (!sf)
191 return 1;
192
193 /*
194 * SA_SIGINFO requires 3 args to signal handler:
195 * #1: sig-no (common to any handler)
196 * #2: struct siginfo
197 * #3: struct ucontext (completely populated)
198 */
199 if (unlikely(ka->sa.sa_flags & SA_SIGINFO)) {
200 err |= copy_siginfo_to_user(&sf->info, info);
201 err |= __put_user(0, &sf->uc.uc_flags);
202 err |= __put_user(NULL, &sf->uc.uc_link);
203 err |= __save_altstack(&sf->uc.uc_stack, regs->sp);
204
205 /* setup args 2 and 3 for user mode handler */
206 regs->r1 = (unsigned long)&sf->info;
207 regs->r2 = (unsigned long)&sf->uc;
208
209 /*
210 * small optim to avoid unconditonally calling do_sigaltstack
211 * in sigreturn path, now that we only have rt_sigreturn
212 */
213 magic = MAGIC_SIGALTSTK;
214 }
215
216 /*
217 * w/o SA_SIGINFO, struct ucontext is partially populated (only
218 * uc_mcontext/uc_sigmask) for kernel's normal user state preservation
219 * during signal handler execution. This works for SA_SIGINFO as well
220 * although the semantics are now overloaded (the same reg state can be
221 * inspected by userland: but are they allowed to fiddle with it ?
222 */
223 err |= stash_usr_regs(sf, regs, set);
224 err |= __put_user(magic, &sf->sigret_magic);
225 if (err)
226 return err;
227
228 /* #1 arg to the user Signal handler */
229 regs->r0 = map_sig(signo);
230
231 /* setup PC of user space signal handler */
232 regs->ret = (unsigned long)ka->sa.sa_handler;
233
234 /*
235 * handler returns using sigreturn stub provided already by userpsace
236 */
237 BUG_ON(!(ka->sa.sa_flags & SA_RESTORER));
238 regs->blink = (unsigned long)ka->sa.sa_restorer;
239
240 /* User Stack for signal handler will be above the frame just carved */
241 regs->sp = (unsigned long)sf;
242
243 /*
244 * Bug 94183, Clear the DE bit, so that when signal handler
245 * starts to run, it doesn't use BTA
246 */
247 regs->status32 &= ~STATUS_DE_MASK;
248 regs->status32 |= STATUS_L_MASK;
249
250 return err;
251}
252
253static void arc_restart_syscall(struct k_sigaction *ka, struct pt_regs *regs)
254{
255 switch (regs->r0) {
256 case -ERESTART_RESTARTBLOCK:
257 case -ERESTARTNOHAND:
258 /*
259 * ERESTARTNOHAND means that the syscall should
260 * only be restarted if there was no handler for
261 * the signal, and since we only get here if there
262 * is a handler, we don't restart
263 */
264 regs->r0 = -EINTR; /* ERESTART_xxx is internal */
265 break;
266
267 case -ERESTARTSYS:
268 /*
269 * ERESTARTSYS means to restart the syscall if
270 * there is no handler or the handler was
271 * registered with SA_RESTART
272 */
273 if (!(ka->sa.sa_flags & SA_RESTART)) {
274 regs->r0 = -EINTR;
275 break;
276 }
277 /* fallthrough */
278
279 case -ERESTARTNOINTR:
280 /*
281 * ERESTARTNOINTR means that the syscall should
282 * be called again after the signal handler returns.
283 * Setup reg state just as it was before doing the trap
284 * r0 has been clobbered with sys call ret code thus it
285 * needs to be reloaded with orig first arg to syscall
286 * in orig_r0. Rest of relevant reg-file:
287 * r8 (syscall num) and (r1 - r7) will be reset to
288 * their orig user space value when we ret from kernel
289 */
290 regs->r0 = regs->orig_r0;
291 regs->ret -= 4;
292 break;
293 }
294}
295
296/*
297 * OK, we're invoking a handler
298 */
299static void
300handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
301 struct pt_regs *regs)
302{
303 sigset_t *oldset = sigmask_to_save();
304 int ret;
305
306 /* Set up the stack frame */
307 ret = setup_rt_frame(sig, ka, info, oldset, regs);
308
309 if (ret)
310 force_sigsegv(sig, current);
311 else
312 signal_delivered(sig, info, ka, regs, 0);
313}
314
315void do_signal(struct pt_regs *regs)
316{
317 struct k_sigaction ka;
318 siginfo_t info;
319 int signr;
320 int restart_scall;
321
322 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
323
324 restart_scall = in_syscall(regs) && syscall_restartable(regs);
325
326 if (signr > 0) {
327 if (restart_scall) {
328 arc_restart_syscall(&ka, regs);
329 syscall_wont_restart(regs); /* No more restarts */
330 }
331 handle_signal(signr, &ka, &info, regs);
332 return;
333 }
334
335 if (restart_scall) {
336 /* No handler for syscall: restart it */
337 if (regs->r0 == -ERESTARTNOHAND ||
338 regs->r0 == -ERESTARTSYS || regs->r0 == -ERESTARTNOINTR) {
339 regs->r0 = regs->orig_r0;
340 regs->ret -= 4;
341 } else if (regs->r0 == -ERESTART_RESTARTBLOCK) {
342 regs->r8 = __NR_restart_syscall;
343 regs->ret -= 4;
344 }
345 syscall_wont_restart(regs); /* No more restarts */
346 }
347
348 /* If there's no signal to deliver, restore the saved sigmask back */
349 restore_saved_sigmask();
350}
351
352void do_notify_resume(struct pt_regs *regs)
353{
354 /*
355 * ASM glue gaurantees that this is only called when returning to
356 * user mode
357 */
358 if (test_and_clear_thread_flag(TIF_NOTIFY_RESUME))
359 tracehook_notify_resume(regs);
360}
diff --git a/arch/arc/kernel/smp.c b/arch/arc/kernel/smp.c
new file mode 100644
index 000000000000..3af3e06dcf02
--- /dev/null
+++ b/arch/arc/kernel/smp.c
@@ -0,0 +1,332 @@
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * RajeshwarR: Dec 11, 2007
9 * -- Added support for Inter Processor Interrupts
10 *
11 * Vineetg: Nov 1st, 2007
12 * -- Initial Write (Borrowed heavily from ARM)
13 */
14
15#include <linux/module.h>
16#include <linux/init.h>
17#include <linux/spinlock.h>
18#include <linux/sched.h>
19#include <linux/interrupt.h>
20#include <linux/profile.h>
21#include <linux/errno.h>
22#include <linux/err.h>
23#include <linux/mm.h>
24#include <linux/cpu.h>
25#include <linux/smp.h>
26#include <linux/irq.h>
27#include <linux/delay.h>
28#include <linux/atomic.h>
29#include <linux/percpu.h>
30#include <linux/cpumask.h>
31#include <linux/spinlock_types.h>
32#include <linux/reboot.h>
33#include <asm/processor.h>
34#include <asm/setup.h>
35#include <asm/mach_desc.h>
36
37arch_spinlock_t smp_atomic_ops_lock = __ARCH_SPIN_LOCK_UNLOCKED;
38arch_spinlock_t smp_bitops_lock = __ARCH_SPIN_LOCK_UNLOCKED;
39
40struct plat_smp_ops plat_smp_ops;
41
42/* XXX: per cpu ? Only needed once in early seconday boot */
43struct task_struct *secondary_idle_tsk;
44
45/* Called from start_kernel */
46void __init smp_prepare_boot_cpu(void)
47{
48}
49
50/*
51 * Initialise the CPU possible map early - this describes the CPUs
52 * which may be present or become present in the system.
53 */
54void __init smp_init_cpus(void)
55{
56 unsigned int i;
57
58 for (i = 0; i < NR_CPUS; i++)
59 set_cpu_possible(i, true);
60}
61
62/* called from init ( ) => process 1 */
63void __init smp_prepare_cpus(unsigned int max_cpus)
64{
65 int i;
66
67 /*
68 * Initialise the present map, which describes the set of CPUs
69 * actually populated at the present time.
70 */
71 for (i = 0; i < max_cpus; i++)
72 set_cpu_present(i, true);
73}
74
75void __init smp_cpus_done(unsigned int max_cpus)
76{
77
78}
79
80/*
81 * After power-up, a non Master CPU needs to wait for Master to kick start it
82 *
83 * The default implementation halts
84 *
85 * This relies on platform specific support allowing Master to directly set
86 * this CPU's PC (to be @first_lines_of_secondary() and kick start it.
87 *
88 * In lack of such h/w assist, platforms can override this function
89 * - make this function busy-spin on a token, eventually set by Master
90 * (from arc_platform_smp_wakeup_cpu())
91 * - Once token is available, jump to @first_lines_of_secondary
92 * (using inline asm).
93 *
94 * Alert: can NOT use stack here as it has not been determined/setup for CPU.
95 * If it turns out to be elaborate, it's better to code it in assembly
96 *
97 */
98void __attribute__((weak)) arc_platform_smp_wait_to_boot(int cpu)
99{
100 /*
101 * As a hack for debugging - since debugger will single-step over the
102 * FLAG insn - wrap the halt itself it in a self loop
103 */
104 __asm__ __volatile__(
105 "1: \n"
106 " flag 1 \n"
107 " b 1b \n");
108}
109
110const char *arc_platform_smp_cpuinfo(void)
111{
112 return plat_smp_ops.info;
113}
114
115/*
116 * The very first "C" code executed by secondary
117 * Called from asm stub in head.S
118 * "current"/R25 already setup by low level boot code
119 */
120void __cpuinit start_kernel_secondary(void)
121{
122 struct mm_struct *mm = &init_mm;
123 unsigned int cpu = smp_processor_id();
124
125 /* MMU, Caches, Vector Table, Interrupts etc */
126 setup_processor();
127
128 atomic_inc(&mm->mm_users);
129 atomic_inc(&mm->mm_count);
130 current->active_mm = mm;
131
132 notify_cpu_starting(cpu);
133 set_cpu_online(cpu, true);
134
135 pr_info("## CPU%u LIVE ##: Executing Code...\n", cpu);
136
137 if (machine_desc->init_smp)
138 machine_desc->init_smp(smp_processor_id());
139
140 arc_local_timer_setup(cpu);
141
142 local_irq_enable();
143 preempt_disable();
144 cpu_idle();
145}
146
147/*
148 * Called from kernel_init( ) -> smp_init( ) - for each CPU
149 *
150 * At this point, Secondary Processor is "HALT"ed:
151 * -It booted, but was halted in head.S
152 * -It was configured to halt-on-reset
153 * So need to wake it up.
154 *
155 * Essential requirements being where to run from (PC) and stack (SP)
156*/
157int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle)
158{
159 unsigned long wait_till;
160
161 secondary_idle_tsk = idle;
162
163 pr_info("Idle Task [%d] %p", cpu, idle);
164 pr_info("Trying to bring up CPU%u ...\n", cpu);
165
166 if (plat_smp_ops.cpu_kick)
167 plat_smp_ops.cpu_kick(cpu,
168 (unsigned long)first_lines_of_secondary);
169
170 /* wait for 1 sec after kicking the secondary */
171 wait_till = jiffies + HZ;
172 while (time_before(jiffies, wait_till)) {
173 if (cpu_online(cpu))
174 break;
175 }
176
177 if (!cpu_online(cpu)) {
178 pr_info("Timeout: CPU%u FAILED to comeup !!!\n", cpu);
179 return -1;
180 }
181
182 secondary_idle_tsk = NULL;
183
184 return 0;
185}
186
187/*
188 * not supported here
189 */
190int __init setup_profiling_timer(unsigned int multiplier)
191{
192 return -EINVAL;
193}
194
195/*****************************************************************************/
196/* Inter Processor Interrupt Handling */
197/*****************************************************************************/
198
199/*
200 * structures for inter-processor calls
201 * A Collection of single bit ipi messages
202 *
203 */
204
205/*
206 * TODO_rajesh investigate tlb message types.
207 * IPI Timer not needed because each ARC has an individual Interrupting Timer
208 */
209enum ipi_msg_type {
210 IPI_NOP = 0,
211 IPI_RESCHEDULE = 1,
212 IPI_CALL_FUNC,
213 IPI_CALL_FUNC_SINGLE,
214 IPI_CPU_STOP
215};
216
217struct ipi_data {
218 unsigned long bits;
219};
220
221static DEFINE_PER_CPU(struct ipi_data, ipi_data);
222
223static void ipi_send_msg(const struct cpumask *callmap, enum ipi_msg_type msg)
224{
225 unsigned long flags;
226 unsigned int cpu;
227
228 local_irq_save(flags);
229
230 for_each_cpu(cpu, callmap) {
231 struct ipi_data *ipi = &per_cpu(ipi_data, cpu);
232 set_bit(msg, &ipi->bits);
233 }
234
235 /* Call the platform specific cross-CPU call function */
236 if (plat_smp_ops.ipi_send)
237 plat_smp_ops.ipi_send((void *)callmap);
238
239 local_irq_restore(flags);
240}
241
242void smp_send_reschedule(int cpu)
243{
244 ipi_send_msg(cpumask_of(cpu), IPI_RESCHEDULE);
245}
246
247void smp_send_stop(void)
248{
249 struct cpumask targets;
250 cpumask_copy(&targets, cpu_online_mask);
251 cpumask_clear_cpu(smp_processor_id(), &targets);
252 ipi_send_msg(&targets, IPI_CPU_STOP);
253}
254
255void arch_send_call_function_single_ipi(int cpu)
256{
257 ipi_send_msg(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
258}
259
260void arch_send_call_function_ipi_mask(const struct cpumask *mask)
261{
262 ipi_send_msg(mask, IPI_CALL_FUNC);
263}
264
265/*
266 * ipi_cpu_stop - handle IPI from smp_send_stop()
267 */
268static void ipi_cpu_stop(unsigned int cpu)
269{
270 machine_halt();
271}
272
273static inline void __do_IPI(unsigned long *ops, struct ipi_data *ipi, int cpu)
274{
275 unsigned long msg = 0;
276
277 do {
278 msg = find_next_bit(ops, BITS_PER_LONG, msg+1);
279
280 switch (msg) {
281 case IPI_RESCHEDULE:
282 scheduler_ipi();
283 break;
284
285 case IPI_CALL_FUNC:
286 generic_smp_call_function_interrupt();
287 break;
288
289 case IPI_CALL_FUNC_SINGLE:
290 generic_smp_call_function_single_interrupt();
291 break;
292
293 case IPI_CPU_STOP:
294 ipi_cpu_stop(cpu);
295 break;
296 }
297 } while (msg < BITS_PER_LONG);
298
299}
300
301/*
302 * arch-common ISR to handle for inter-processor interrupts
303 * Has hooks for platform specific IPI
304 */
305irqreturn_t do_IPI(int irq, void *dev_id)
306{
307 int cpu = smp_processor_id();
308 struct ipi_data *ipi = &per_cpu(ipi_data, cpu);
309 unsigned long ops;
310
311 if (plat_smp_ops.ipi_clear)
312 plat_smp_ops.ipi_clear(cpu, irq);
313
314 /*
315 * XXX: is this loop really needed
316 * And do we need to move ipi_clean inside
317 */
318 while ((ops = xchg(&ipi->bits, 0)) != 0)
319 __do_IPI(&ops, ipi, cpu);
320
321 return IRQ_HANDLED;
322}
323
324/*
325 * API called by platform code to hookup arch-common ISR to their IPI IRQ
326 */
327static DEFINE_PER_CPU(int, ipi_dev);
328int smp_ipi_irq_setup(int cpu, int irq)
329{
330 int *dev_id = &per_cpu(ipi_dev, smp_processor_id());
331 return request_percpu_irq(irq, do_IPI, "IPI Interrupt", dev_id);
332}
diff --git a/arch/arc/kernel/stacktrace.c b/arch/arc/kernel/stacktrace.c
new file mode 100644
index 000000000000..a63ff842564b
--- /dev/null
+++ b/arch/arc/kernel/stacktrace.c
@@ -0,0 +1,254 @@
1/*
2 * stacktrace.c : stacktracing APIs needed by rest of kernel
3 * (wrappers over ARC dwarf based unwinder)
4 *
5 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * vineetg: aug 2009
12 * -Implemented CONFIG_STACKTRACE APIs, primarily save_stack_trace_tsk( )
13 * for displaying task's kernel mode call stack in /proc/<pid>/stack
14 * -Iterator based approach to have single copy of unwinding core and APIs
15 * needing unwinding, implement the logic in iterator regarding:
16 * = which frame onwards to start capture
17 * = which frame to stop capturing (wchan)
18 * = specifics of data structs where trace is saved(CONFIG_STACKTRACE etc)
19 *
20 * vineetg: March 2009
21 * -Implemented correct versions of thread_saved_pc() and get_wchan()
22 *
23 * rajeshwarr: 2008
24 * -Initial implementation
25 */
26
27#include <linux/ptrace.h>
28#include <linux/export.h>
29#include <linux/stacktrace.h>
30#include <linux/kallsyms.h>
31#include <asm/arcregs.h>
32#include <asm/unwind.h>
33#include <asm/switch_to.h>
34
35/*-------------------------------------------------------------------------
36 * Unwinder Iterator
37 *-------------------------------------------------------------------------
38 */
39
40#ifdef CONFIG_ARC_DW2_UNWIND
41
42static void seed_unwind_frame_info(struct task_struct *tsk,
43 struct pt_regs *regs,
44 struct unwind_frame_info *frame_info)
45{
46 if (tsk == NULL && regs == NULL) {
47 unsigned long fp, sp, blink, ret;
48 frame_info->task = current;
49
50 __asm__ __volatile__(
51 "mov %0,r27\n\t"
52 "mov %1,r28\n\t"
53 "mov %2,r31\n\t"
54 "mov %3,r63\n\t"
55 : "=r"(fp), "=r"(sp), "=r"(blink), "=r"(ret)
56 );
57
58 frame_info->regs.r27 = fp;
59 frame_info->regs.r28 = sp;
60 frame_info->regs.r31 = blink;
61 frame_info->regs.r63 = ret;
62 frame_info->call_frame = 0;
63 } else if (regs == NULL) {
64
65 frame_info->task = tsk;
66
67 frame_info->regs.r27 = KSTK_FP(tsk);
68 frame_info->regs.r28 = KSTK_ESP(tsk);
69 frame_info->regs.r31 = KSTK_BLINK(tsk);
70 frame_info->regs.r63 = (unsigned int)__switch_to;
71
72 /* In the prologue of __switch_to, first FP is saved on stack
73 * and then SP is copied to FP. Dwarf assumes cfa as FP based
74 * but we didn't save FP. The value retrieved above is FP's
75 * state in previous frame.
76 * As a work around for this, we unwind from __switch_to start
77 * and adjust SP accordingly. The other limitation is that
78 * __switch_to macro is dwarf rules are not generated for inline
79 * assembly code
80 */
81 frame_info->regs.r27 = 0;
82 frame_info->regs.r28 += 64;
83 frame_info->call_frame = 0;
84
85 } else {
86 frame_info->task = tsk;
87
88 frame_info->regs.r27 = regs->fp;
89 frame_info->regs.r28 = regs->sp;
90 frame_info->regs.r31 = regs->blink;
91 frame_info->regs.r63 = regs->ret;
92 frame_info->call_frame = 0;
93 }
94}
95
96#endif
97
98static noinline unsigned int
99arc_unwind_core(struct task_struct *tsk, struct pt_regs *regs,
100 int (*consumer_fn) (unsigned int, void *), void *arg)
101{
102#ifdef CONFIG_ARC_DW2_UNWIND
103 int ret = 0;
104 unsigned int address;
105 struct unwind_frame_info frame_info;
106
107 seed_unwind_frame_info(tsk, regs, &frame_info);
108
109 while (1) {
110 address = UNW_PC(&frame_info);
111
112 if (address && __kernel_text_address(address)) {
113 if (consumer_fn(address, arg) == -1)
114 break;
115 }
116
117 ret = arc_unwind(&frame_info);
118
119 if (ret == 0) {
120 frame_info.regs.r63 = frame_info.regs.r31;
121 continue;
122 } else {
123 break;
124 }
125 }
126
127 return address; /* return the last address it saw */
128#else
129 /* On ARC, only Dward based unwinder works. fp based backtracing is
130 * not possible (-fno-omit-frame-pointer) because of the way function
131 * prelogue is setup (callee regs saved and then fp set and not other
132 * way around
133 */
134 pr_warn("CONFIG_ARC_DW2_UNWIND needs to be enabled\n");
135 return 0;
136
137#endif
138}
139
140/*-------------------------------------------------------------------------
141 * callbacks called by unwinder iterator to implement kernel APIs
142 *
143 * The callback can return -1 to force the iterator to stop, which by default
144 * keeps going till the bottom-most frame.
145 *-------------------------------------------------------------------------
146 */
147
148/* Call-back which plugs into unwinding core to dump the stack in
149 * case of panic/OOPs/BUG etc
150 */
151static int __print_sym(unsigned int address, void *unused)
152{
153 __print_symbol(" %s\n", address);
154 return 0;
155}
156
157#ifdef CONFIG_STACKTRACE
158
159/* Call-back which plugs into unwinding core to capture the
160 * traces needed by kernel on /proc/<pid>/stack
161 */
162static int __collect_all(unsigned int address, void *arg)
163{
164 struct stack_trace *trace = arg;
165
166 if (trace->skip > 0)
167 trace->skip--;
168 else
169 trace->entries[trace->nr_entries++] = address;
170
171 if (trace->nr_entries >= trace->max_entries)
172 return -1;
173
174 return 0;
175}
176
177static int __collect_all_but_sched(unsigned int address, void *arg)
178{
179 struct stack_trace *trace = arg;
180
181 if (in_sched_functions(address))
182 return 0;
183
184 if (trace->skip > 0)
185 trace->skip--;
186 else
187 trace->entries[trace->nr_entries++] = address;
188
189 if (trace->nr_entries >= trace->max_entries)
190 return -1;
191
192 return 0;
193}
194
195#endif
196
197static int __get_first_nonsched(unsigned int address, void *unused)
198{
199 if (in_sched_functions(address))
200 return 0;
201
202 return -1;
203}
204
205/*-------------------------------------------------------------------------
206 * APIs expected by various kernel sub-systems
207 *-------------------------------------------------------------------------
208 */
209
210noinline void show_stacktrace(struct task_struct *tsk, struct pt_regs *regs)
211{
212 pr_info("\nStack Trace:\n");
213 arc_unwind_core(tsk, regs, __print_sym, NULL);
214}
215EXPORT_SYMBOL(show_stacktrace);
216
217/* Expected by sched Code */
218void show_stack(struct task_struct *tsk, unsigned long *sp)
219{
220 show_stacktrace(tsk, NULL);
221}
222
223/* Expected by Rest of kernel code */
224void dump_stack(void)
225{
226 show_stacktrace(NULL, NULL);
227}
228EXPORT_SYMBOL(dump_stack);
229
230/* Another API expected by schedular, shows up in "ps" as Wait Channel
231 * Ofcourse just returning schedule( ) would be pointless so unwind until
232 * the function is not in schedular code
233 */
234unsigned int get_wchan(struct task_struct *tsk)
235{
236 return arc_unwind_core(tsk, NULL, __get_first_nonsched, NULL);
237}
238
239#ifdef CONFIG_STACKTRACE
240
241/*
242 * API required by CONFIG_STACKTRACE, CONFIG_LATENCYTOP.
243 * A typical use is when /proc/<pid>/stack is queried by userland
244 */
245void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
246{
247 arc_unwind_core(tsk, NULL, __collect_all_but_sched, trace);
248}
249
250void save_stack_trace(struct stack_trace *trace)
251{
252 arc_unwind_core(current, NULL, __collect_all, trace);
253}
254#endif
diff --git a/arch/arc/kernel/sys.c b/arch/arc/kernel/sys.c
new file mode 100644
index 000000000000..f6bdd07583f3
--- /dev/null
+++ b/arch/arc/kernel/sys.c
@@ -0,0 +1,18 @@
1
2#include <linux/syscalls.h>
3#include <linux/signal.h>
4#include <linux/unistd.h>
5
6#include <asm/syscalls.h>
7
8#define sys_clone sys_clone_wrapper
9#define sys_fork sys_fork_wrapper
10#define sys_vfork sys_vfork_wrapper
11
12#undef __SYSCALL
13#define __SYSCALL(nr, call) [nr] = (call),
14
15void *sys_call_table[NR_syscalls] = {
16 [0 ... NR_syscalls-1] = sys_ni_syscall,
17#include <asm/unistd.h>
18};
diff --git a/arch/arc/kernel/time.c b/arch/arc/kernel/time.c
new file mode 100644
index 000000000000..f13f72807aa5
--- /dev/null
+++ b/arch/arc/kernel/time.c
@@ -0,0 +1,265 @@
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * vineetg: Jan 1011
9 * -sched_clock( ) no longer jiffies based. Uses the same clocksource
10 * as gtod
11 *
12 * Rajeshwarr/Vineetg: Mar 2008
13 * -Implemented CONFIG_GENERIC_TIME (rather deleted arch specific code)
14 * for arch independent gettimeofday()
15 * -Implemented CONFIG_GENERIC_CLOCKEVENTS as base for hrtimers
16 *
17 * Vineetg: Mar 2008: Forked off from time.c which now is time-jiff.c
18 */
19
20/* ARC700 has two 32bit independent prog Timers: TIMER0 and TIMER1
21 * Each can programmed to go from @count to @limit and optionally
22 * interrupt when that happens.
23 * A write to Control Register clears the Interrupt
24 *
25 * We've designated TIMER0 for events (clockevents)
26 * while TIMER1 for free running (clocksource)
27 *
28 * Newer ARC700 cores have 64bit clk fetching RTSC insn, preferred over TIMER1
29 */
30
31#include <linux/spinlock.h>
32#include <linux/interrupt.h>
33#include <linux/module.h>
34#include <linux/sched.h>
35#include <linux/kernel.h>
36#include <linux/interrupt.h>
37#include <linux/time.h>
38#include <linux/init.h>
39#include <linux/timex.h>
40#include <linux/profile.h>
41#include <linux/clocksource.h>
42#include <linux/clockchips.h>
43#include <asm/irq.h>
44#include <asm/arcregs.h>
45#include <asm/clk.h>
46#include <asm/mach_desc.h>
47
48#define ARC_TIMER_MAX 0xFFFFFFFF
49
50/********** Clock Source Device *********/
51
52#ifdef CONFIG_ARC_HAS_RTSC
53
54int __cpuinit arc_counter_setup(void)
55{
56 /* RTSC insn taps into cpu clk, needs no setup */
57
58 /* For SMP, only allowed if cross-core-sync, hence usable as cs */
59 return 1;
60}
61
62static cycle_t arc_counter_read(struct clocksource *cs)
63{
64 unsigned long flags;
65 union {
66#ifdef CONFIG_CPU_BIG_ENDIAN
67 struct { u32 high, low; };
68#else
69 struct { u32 low, high; };
70#endif
71 cycle_t full;
72 } stamp;
73
74 flags = arch_local_irq_save();
75
76 __asm__ __volatile(
77 " .extCoreRegister tsch, 58, r, cannot_shortcut \n"
78 " rtsc %0, 0 \n"
79 " mov %1, 0 \n"
80 : "=r" (stamp.low), "=r" (stamp.high));
81
82 arch_local_irq_restore(flags);
83
84 return stamp.full;
85}
86
87static struct clocksource arc_counter = {
88 .name = "ARC RTSC",
89 .rating = 300,
90 .read = arc_counter_read,
91 .mask = CLOCKSOURCE_MASK(32),
92 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
93};
94
95#else /* !CONFIG_ARC_HAS_RTSC */
96
97static bool is_usable_as_clocksource(void)
98{
99#ifdef CONFIG_SMP
100 return 0;
101#else
102 return 1;
103#endif
104}
105
106/*
107 * set 32bit TIMER1 to keep counting monotonically and wraparound
108 */
109int __cpuinit arc_counter_setup(void)
110{
111 write_aux_reg(ARC_REG_TIMER1_LIMIT, ARC_TIMER_MAX);
112 write_aux_reg(ARC_REG_TIMER1_CNT, 0);
113 write_aux_reg(ARC_REG_TIMER1_CTRL, TIMER_CTRL_NH);
114
115 return is_usable_as_clocksource();
116}
117
118static cycle_t arc_counter_read(struct clocksource *cs)
119{
120 return (cycle_t) read_aux_reg(ARC_REG_TIMER1_CNT);
121}
122
123static struct clocksource arc_counter = {
124 .name = "ARC Timer1",
125 .rating = 300,
126 .read = arc_counter_read,
127 .mask = CLOCKSOURCE_MASK(32),
128 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
129};
130
131#endif
132
133/********** Clock Event Device *********/
134
135/*
136 * Arm the timer to interrupt after @limit cycles
137 * The distinction for oneshot/periodic is done in arc_event_timer_ack() below
138 */
139static void arc_timer_event_setup(unsigned int limit)
140{
141 write_aux_reg(ARC_REG_TIMER0_LIMIT, limit);
142 write_aux_reg(ARC_REG_TIMER0_CNT, 0); /* start from 0 */
143
144 write_aux_reg(ARC_REG_TIMER0_CTRL, TIMER_CTRL_IE | TIMER_CTRL_NH);
145}
146
147/*
148 * Acknowledge the interrupt (oneshot) and optionally re-arm it (periodic)
149 * -Any write to CTRL Reg will ack the intr (NH bit: Count when not halted)
150 * -Rearming is done by setting the IE bit
151 *
152 * Small optimisation: Normal code would have been
153 * if (irq_reenable)
154 * CTRL_REG = (IE | NH);
155 * else
156 * CTRL_REG = NH;
157 * However since IE is BIT0 we can fold the branch
158 */
159static void arc_timer_event_ack(unsigned int irq_reenable)
160{
161 write_aux_reg(ARC_REG_TIMER0_CTRL, irq_reenable | TIMER_CTRL_NH);
162}
163
164static int arc_clkevent_set_next_event(unsigned long delta,
165 struct clock_event_device *dev)
166{
167 arc_timer_event_setup(delta);
168 return 0;
169}
170
171static void arc_clkevent_set_mode(enum clock_event_mode mode,
172 struct clock_event_device *dev)
173{
174 switch (mode) {
175 case CLOCK_EVT_MODE_PERIODIC:
176 arc_timer_event_setup(arc_get_core_freq() / HZ);
177 break;
178 case CLOCK_EVT_MODE_ONESHOT:
179 break;
180 default:
181 break;
182 }
183
184 return;
185}
186
187static DEFINE_PER_CPU(struct clock_event_device, arc_clockevent_device) = {
188 .name = "ARC Timer0",
189 .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC,
190 .mode = CLOCK_EVT_MODE_UNUSED,
191 .rating = 300,
192 .irq = TIMER0_IRQ, /* hardwired, no need for resources */
193 .set_next_event = arc_clkevent_set_next_event,
194 .set_mode = arc_clkevent_set_mode,
195};
196
197static irqreturn_t timer_irq_handler(int irq, void *dev_id)
198{
199 struct clock_event_device *clk = &__get_cpu_var(arc_clockevent_device);
200
201 arc_timer_event_ack(clk->mode == CLOCK_EVT_MODE_PERIODIC);
202 clk->event_handler(clk);
203 return IRQ_HANDLED;
204}
205
206static struct irqaction arc_timer_irq = {
207 .name = "Timer0 (clock-evt-dev)",
208 .flags = IRQF_TIMER | IRQF_PERCPU,
209 .handler = timer_irq_handler,
210};
211
212/*
213 * Setup the local event timer for @cpu
214 * N.B. weak so that some exotic ARC SoCs can completely override it
215 */
216void __attribute__((weak)) __cpuinit arc_local_timer_setup(unsigned int cpu)
217{
218 struct clock_event_device *clk = &per_cpu(arc_clockevent_device, cpu);
219
220 clockevents_calc_mult_shift(clk, arc_get_core_freq(), 5);
221
222 clk->max_delta_ns = clockevent_delta2ns(ARC_TIMER_MAX, clk);
223 clk->cpumask = cpumask_of(cpu);
224
225 clockevents_register_device(clk);
226
227 /*
228 * setup the per-cpu timer IRQ handler - for all cpus
229 * For non boot CPU explicitly unmask at intc
230 * setup_irq() -> .. -> irq_startup() already does this on boot-cpu
231 */
232 if (!cpu)
233 setup_irq(TIMER0_IRQ, &arc_timer_irq);
234 else
235 arch_unmask_irq(TIMER0_IRQ);
236}
237
238/*
239 * Called from start_kernel() - boot CPU only
240 *
241 * -Sets up h/w timers as applicable on boot cpu
242 * -Also sets up any global state needed for timer subsystem:
243 * - for "counting" timer, registers a clocksource, usable across CPUs
244 * (provided that underlying counter h/w is synchronized across cores)
245 * - for "event" timer, sets up TIMER0 IRQ (as that is platform agnostic)
246 */
247void __init time_init(void)
248{
249 /*
250 * sets up the timekeeping free-flowing counter which also returns
251 * whether the counter is usable as clocksource
252 */
253 if (arc_counter_setup())
254 /*
255 * CLK upto 4.29 GHz can be safely represented in 32 bits
256 * because Max 32 bit number is 4,294,967,295
257 */
258 clocksource_register_hz(&arc_counter, arc_get_core_freq());
259
260 /* sets up the periodic event timer */
261 arc_local_timer_setup(smp_processor_id());
262
263 if (machine_desc->init_time)
264 machine_desc->init_time();
265}
diff --git a/arch/arc/kernel/traps.c b/arch/arc/kernel/traps.c
new file mode 100644
index 000000000000..7496995371e8
--- /dev/null
+++ b/arch/arc/kernel/traps.c
@@ -0,0 +1,170 @@
1/*
2 * Traps/Non-MMU Exception handling for ARC
3 *
4 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * vineetg: May 2011
11 * -user-space unaligned access emulation
12 *
13 * Rahul Trivedi: Codito Technologies 2004
14 */
15
16#include <linux/sched.h>
17#include <linux/kdebug.h>
18#include <linux/uaccess.h>
19#include <asm/ptrace.h>
20#include <asm/setup.h>
21#include <asm/kprobes.h>
22#include <asm/unaligned.h>
23#include <asm/kgdb.h>
24
25void __init trap_init(void)
26{
27 return;
28}
29
30void die(const char *str, struct pt_regs *regs, unsigned long address,
31 unsigned long cause_reg)
32{
33 show_kernel_fault_diag(str, regs, address, cause_reg);
34
35 /* DEAD END */
36 __asm__("flag 1");
37}
38
39/*
40 * Helper called for bulk of exceptions NOT needing specific handling
41 * -for user faults enqueues requested signal
42 * -for kernel, chk if due to copy_(to|from)_user, otherwise die()
43 */
44static noinline int handle_exception(unsigned long cause, char *str,
45 struct pt_regs *regs, siginfo_t *info)
46{
47 if (user_mode(regs)) {
48 struct task_struct *tsk = current;
49
50 tsk->thread.fault_address = (__force unsigned int)info->si_addr;
51 tsk->thread.cause_code = cause;
52
53 force_sig_info(info->si_signo, info, tsk);
54
55 } else {
56 /* If not due to copy_(to|from)_user, we are doomed */
57 if (fixup_exception(regs))
58 return 0;
59
60 die(str, regs, (unsigned long)info->si_addr, cause);
61 }
62
63 return 1;
64}
65
66#define DO_ERROR_INFO(signr, str, name, sicode) \
67int name(unsigned long cause, unsigned long address, struct pt_regs *regs) \
68{ \
69 siginfo_t info = { \
70 .si_signo = signr, \
71 .si_errno = 0, \
72 .si_code = sicode, \
73 .si_addr = (void __user *)address, \
74 }; \
75 return handle_exception(cause, str, regs, &info);\
76}
77
78/*
79 * Entry points for exceptions NOT needing specific handling
80 */
81DO_ERROR_INFO(SIGILL, "Priv Op/Disabled Extn", do_privilege_fault, ILL_PRVOPC)
82DO_ERROR_INFO(SIGILL, "Invalid Extn Insn", do_extension_fault, ILL_ILLOPC)
83DO_ERROR_INFO(SIGILL, "Illegal Insn (or Seq)", insterror_is_error, ILL_ILLOPC)
84DO_ERROR_INFO(SIGBUS, "Invalid Mem Access", do_memory_error, BUS_ADRERR)
85DO_ERROR_INFO(SIGTRAP, "Breakpoint Set", trap_is_brkpt, TRAP_BRKPT)
86
87#ifdef CONFIG_ARC_MISALIGN_ACCESS
88/*
89 * Entry Point for Misaligned Data access Exception, for emulating in software
90 */
91int do_misaligned_access(unsigned long cause, unsigned long address,
92 struct pt_regs *regs, struct callee_regs *cregs)
93{
94 if (misaligned_fixup(address, regs, cause, cregs) != 0) {
95 siginfo_t info;
96
97 info.si_signo = SIGBUS;
98 info.si_errno = 0;
99 info.si_code = BUS_ADRALN;
100 info.si_addr = (void __user *)address;
101 return handle_exception(cause, "Misaligned Access", regs,
102 &info);
103 }
104 return 0;
105}
106
107#else
108DO_ERROR_INFO(SIGSEGV, "Misaligned Access", do_misaligned_access, SEGV_ACCERR)
109#endif
110
111/*
112 * Entry point for miscll errors such as Nested Exceptions
113 * -Duplicate TLB entry is handled seperately though
114 */
115void do_machine_check_fault(unsigned long cause, unsigned long address,
116 struct pt_regs *regs)
117{
118 die("Machine Check Exception", regs, address, cause);
119}
120
121
122/*
123 * Entry point for traps induced by ARCompact TRAP_S <n> insn
124 * This is same family as TRAP0/SWI insn (use the same vector).
125 * The only difference being SWI insn take no operand, while TRAP_S does
126 * which reflects in ECR Reg as 8 bit param.
127 * Thus TRAP_S <n> can be used for specific purpose
128 * -1 used for software breakpointing (gdb)
129 * -2 used by kprobes
130 */
131void do_non_swi_trap(unsigned long cause, unsigned long address,
132 struct pt_regs *regs)
133{
134 unsigned int param = cause & 0xff;
135
136 switch (param) {
137 case 1:
138 trap_is_brkpt(cause, address, regs);
139 break;
140
141 case 2:
142 trap_is_kprobe(param, address, regs);
143 break;
144
145 case 3:
146 case 4:
147 kgdb_trap(regs, param);
148 break;
149
150 default:
151 break;
152 }
153}
154
155/*
156 * Entry point for Instruction Error Exception
157 * -For a corner case, ARC kprobes implementation resorts to using
158 * this exception, hence the check
159 */
160void do_insterror_or_kprobe(unsigned long cause,
161 unsigned long address,
162 struct pt_regs *regs)
163{
164 /* Check if this exception is caused by kprobes */
165 if (notify_die(DIE_IERR, "kprobe_ierr", regs, address,
166 cause, SIGILL) == NOTIFY_STOP)
167 return;
168
169 insterror_is_error(cause, address, regs);
170}
diff --git a/arch/arc/kernel/troubleshoot.c b/arch/arc/kernel/troubleshoot.c
new file mode 100644
index 000000000000..7c10873c311f
--- /dev/null
+++ b/arch/arc/kernel/troubleshoot.c
@@ -0,0 +1,322 @@
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 */
7
8#include <linux/ptrace.h>
9#include <linux/module.h>
10#include <linux/mm.h>
11#include <linux/fs.h>
12#include <linux/kdev_t.h>
13#include <linux/fs_struct.h>
14#include <linux/proc_fs.h>
15#include <linux/file.h>
16#include <asm/arcregs.h>
17
18/*
19 * Common routine to print scratch regs (r0-r12) or callee regs (r13-r25)
20 * -Prints 3 regs per line and a CR.
21 * -To continue, callee regs right after scratch, special handling of CR
22 */
23static noinline void print_reg_file(long *reg_rev, int start_num)
24{
25 unsigned int i;
26 char buf[512];
27 int n = 0, len = sizeof(buf);
28
29 /* weird loop because pt_regs regs rev r12..r0, r25..r13 */
30 for (i = start_num; i < start_num + 13; i++) {
31 n += scnprintf(buf + n, len - n, "r%02u: 0x%08lx\t",
32 i, (unsigned long)*reg_rev);
33
34 if (((i + 1) % 3) == 0)
35 n += scnprintf(buf + n, len - n, "\n");
36
37 reg_rev--;
38 }
39
40 if (start_num != 0)
41 n += scnprintf(buf + n, len - n, "\n\n");
42
43 pr_info("%s", buf);
44}
45
46static void show_callee_regs(struct callee_regs *cregs)
47{
48 print_reg_file(&(cregs->r13), 13);
49}
50
51void print_task_path_n_nm(struct task_struct *tsk, char *buf)
52{
53 struct path path;
54 char *path_nm = NULL;
55 struct mm_struct *mm;
56 struct file *exe_file;
57
58 mm = get_task_mm(tsk);
59 if (!mm)
60 goto done;
61
62 exe_file = get_mm_exe_file(mm);
63 mmput(mm);
64
65 if (exe_file) {
66 path = exe_file->f_path;
67 path_get(&exe_file->f_path);
68 fput(exe_file);
69 path_nm = d_path(&path, buf, 255);
70 path_put(&path);
71 }
72
73done:
74 pr_info("%s, TGID %u\n", path_nm, tsk->tgid);
75}
76EXPORT_SYMBOL(print_task_path_n_nm);
77
78static void show_faulting_vma(unsigned long address, char *buf)
79{
80 struct vm_area_struct *vma;
81 struct inode *inode;
82 unsigned long ino = 0;
83 dev_t dev = 0;
84 char *nm = buf;
85
86 vma = find_vma(current->active_mm, address);
87
88 /* check against the find_vma( ) behaviour which returns the next VMA
89 * if the container VMA is not found
90 */
91 if (vma && (vma->vm_start <= address)) {
92 struct file *file = vma->vm_file;
93 if (file) {
94 struct path *path = &file->f_path;
95 nm = d_path(path, buf, PAGE_SIZE - 1);
96 inode = vma->vm_file->f_path.dentry->d_inode;
97 dev = inode->i_sb->s_dev;
98 ino = inode->i_ino;
99 }
100 pr_info(" @off 0x%lx in [%s]\n"
101 " VMA: 0x%08lx to 0x%08lx\n\n",
102 address - vma->vm_start, nm, vma->vm_start, vma->vm_end);
103 } else
104 pr_info(" @No matching VMA found\n");
105}
106
107static void show_ecr_verbose(struct pt_regs *regs)
108{
109 unsigned int vec, cause_code, cause_reg;
110 unsigned long address;
111
112 cause_reg = current->thread.cause_code;
113 pr_info("\n[ECR]: 0x%08x => ", cause_reg);
114
115 /* For Data fault, this is data address not instruction addr */
116 address = current->thread.fault_address;
117
118 vec = cause_reg >> 16;
119 cause_code = (cause_reg >> 8) & 0xFF;
120
121 /* For DTLB Miss or ProtV, display the memory involved too */
122 if (vec == ECR_V_DTLB_MISS) {
123 pr_cont("Invalid (%s) @ 0x%08lx by insn @ 0x%08lx\n",
124 (cause_code == 0x01) ? "Read From" :
125 ((cause_code == 0x02) ? "Write to" : "EX"),
126 address, regs->ret);
127 } else if (vec == ECR_V_ITLB_MISS) {
128 pr_cont("Insn could not be fetched\n");
129 } else if (vec == ECR_V_MACH_CHK) {
130 pr_cont("%s\n", (cause_code == 0x0) ?
131 "Double Fault" : "Other Fatal Err");
132
133 } else if (vec == ECR_V_PROTV) {
134 if (cause_code == ECR_C_PROTV_INST_FETCH)
135 pr_cont("Execute from Non-exec Page\n");
136 else if (cause_code == ECR_C_PROTV_LOAD)
137 pr_cont("Read from Non-readable Page\n");
138 else if (cause_code == ECR_C_PROTV_STORE)
139 pr_cont("Write to Non-writable Page\n");
140 else if (cause_code == ECR_C_PROTV_XCHG)
141 pr_cont("Data exchange protection violation\n");
142 else if (cause_code == ECR_C_PROTV_MISALIG_DATA)
143 pr_cont("Misaligned r/w from 0x%08lx\n", address);
144 } else if (vec == ECR_V_INSN_ERR) {
145 pr_cont("Illegal Insn\n");
146 } else {
147 pr_cont("Check Programmer's Manual\n");
148 }
149}
150
151/************************************************************************
152 * API called by rest of kernel
153 ***********************************************************************/
154
155void show_regs(struct pt_regs *regs)
156{
157 struct task_struct *tsk = current;
158 struct callee_regs *cregs;
159 char *buf;
160
161 buf = (char *)__get_free_page(GFP_TEMPORARY);
162 if (!buf)
163 return;
164
165 print_task_path_n_nm(tsk, buf);
166
167 if (current->thread.cause_code)
168 show_ecr_verbose(regs);
169
170 pr_info("[EFA]: 0x%08lx\n", current->thread.fault_address);
171 pr_info("[ERET]: 0x%08lx (PC of Faulting Instr)\n", regs->ret);
172
173 show_faulting_vma(regs->ret, buf); /* faulting code, not data */
174
175 /* can't use print_vma_addr() yet as it doesn't check for
176 * non-inclusive vma
177 */
178
179 /* print special regs */
180 pr_info("status32: 0x%08lx\n", regs->status32);
181 pr_info(" SP: 0x%08lx\tFP: 0x%08lx\n", regs->sp, regs->fp);
182 pr_info("BTA: 0x%08lx\tBLINK: 0x%08lx\n",
183 regs->bta, regs->blink);
184 pr_info("LPS: 0x%08lx\tLPE: 0x%08lx\tLPC: 0x%08lx\n",
185 regs->lp_start, regs->lp_end, regs->lp_count);
186
187 /* print regs->r0 thru regs->r12
188 * Sequential printing was generating horrible code
189 */
190 print_reg_file(&(regs->r0), 0);
191
192 /* If Callee regs were saved, display them too */
193 cregs = (struct callee_regs *)current->thread.callee_reg;
194 if (cregs)
195 show_callee_regs(cregs);
196
197 free_page((unsigned long)buf);
198}
199
200void show_kernel_fault_diag(const char *str, struct pt_regs *regs,
201 unsigned long address, unsigned long cause_reg)
202{
203 current->thread.fault_address = address;
204 current->thread.cause_code = cause_reg;
205
206 /* Caller and Callee regs */
207 show_regs(regs);
208
209 /* Show stack trace if this Fatality happened in kernel mode */
210 if (!user_mode(regs))
211 show_stacktrace(current, regs);
212}
213
214#ifdef CONFIG_DEBUG_FS
215
216#include <linux/module.h>
217#include <linux/fs.h>
218#include <linux/mount.h>
219#include <linux/pagemap.h>
220#include <linux/init.h>
221#include <linux/namei.h>
222#include <linux/debugfs.h>
223
224static struct dentry *test_dentry;
225static struct dentry *test_dir;
226static struct dentry *test_u32_dentry;
227
228static u32 clr_on_read = 1;
229
230#ifdef CONFIG_ARC_DBG_TLB_MISS_COUNT
231u32 numitlb, numdtlb, num_pte_not_present;
232
233static int fill_display_data(char *kbuf)
234{
235 size_t num = 0;
236 num += sprintf(kbuf + num, "I-TLB Miss %x\n", numitlb);
237 num += sprintf(kbuf + num, "D-TLB Miss %x\n", numdtlb);
238 num += sprintf(kbuf + num, "PTE not present %x\n", num_pte_not_present);
239
240 if (clr_on_read)
241 numitlb = numdtlb = num_pte_not_present = 0;
242
243 return num;
244}
245
246static int tlb_stats_open(struct inode *inode, struct file *file)
247{
248 file->private_data = (void *)__get_free_page(GFP_KERNEL);
249 return 0;
250}
251
252/* called on user read(): display the couters */
253static ssize_t tlb_stats_output(struct file *file, /* file descriptor */
254 char __user *user_buf, /* user buffer */
255 size_t len, /* length of buffer */
256 loff_t *offset) /* offset in the file */
257{
258 size_t num;
259 char *kbuf = (char *)file->private_data;
260
261 /* All of the data can he shoved in one iteration */
262 if (*offset != 0)
263 return 0;
264
265 num = fill_display_data(kbuf);
266
267 /* simple_read_from_buffer() is helper for copy to user space
268 It copies up to @2 (num) bytes from kernel buffer @4 (kbuf) at offset
269 @3 (offset) into the user space address starting at @1 (user_buf).
270 @5 (len) is max size of user buffer
271 */
272 return simple_read_from_buffer(user_buf, num, offset, kbuf, len);
273}
274
275/* called on user write : clears the counters */
276static ssize_t tlb_stats_clear(struct file *file, const char __user *user_buf,
277 size_t length, loff_t *offset)
278{
279 numitlb = numdtlb = num_pte_not_present = 0;
280 return length;
281}
282
283static int tlb_stats_close(struct inode *inode, struct file *file)
284{
285 free_page((unsigned long)(file->private_data));
286 return 0;
287}
288
289static const struct file_operations tlb_stats_file_ops = {
290 .read = tlb_stats_output,
291 .write = tlb_stats_clear,
292 .open = tlb_stats_open,
293 .release = tlb_stats_close
294};
295#endif
296
297static int __init arc_debugfs_init(void)
298{
299 test_dir = debugfs_create_dir("arc", NULL);
300
301#ifdef CONFIG_ARC_DBG_TLB_MISS_COUNT
302 test_dentry = debugfs_create_file("tlb_stats", 0444, test_dir, NULL,
303 &tlb_stats_file_ops);
304#endif
305
306 test_u32_dentry =
307 debugfs_create_u32("clr_on_read", 0444, test_dir, &clr_on_read);
308
309 return 0;
310}
311
312module_init(arc_debugfs_init);
313
314static void __exit arc_debugfs_exit(void)
315{
316 debugfs_remove(test_u32_dentry);
317 debugfs_remove(test_dentry);
318 debugfs_remove(test_dir);
319}
320module_exit(arc_debugfs_exit);
321
322#endif
diff --git a/arch/arc/kernel/unaligned.c b/arch/arc/kernel/unaligned.c
new file mode 100644
index 000000000000..4cd81633febd
--- /dev/null
+++ b/arch/arc/kernel/unaligned.c
@@ -0,0 +1,245 @@
1/*
2 * Copyright (C) 2011-2012 Synopsys (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * vineetg : May 2011
9 * -Adapted (from .26 to .35)
10 * -original contribution by Tim.yao@amlogic.com
11 *
12 */
13
14#include <linux/types.h>
15#include <linux/ptrace.h>
16#include <linux/uaccess.h>
17#include <asm/disasm.h>
18
19#define __get8_unaligned_check(val, addr, err) \
20 __asm__( \
21 "1: ldb.ab %1, [%2, 1]\n" \
22 "2:\n" \
23 " .section .fixup,\"ax\"\n" \
24 " .align 4\n" \
25 "3: mov %0, 1\n" \
26 " b 2b\n" \
27 " .previous\n" \
28 " .section __ex_table,\"a\"\n" \
29 " .align 4\n" \
30 " .long 1b, 3b\n" \
31 " .previous\n" \
32 : "=r" (err), "=&r" (val), "=r" (addr) \
33 : "0" (err), "2" (addr))
34
35#define get16_unaligned_check(val, addr) \
36 do { \
37 unsigned int err = 0, v, a = addr; \
38 __get8_unaligned_check(v, a, err); \
39 val = v ; \
40 __get8_unaligned_check(v, a, err); \
41 val |= v << 8; \
42 if (err) \
43 goto fault; \
44 } while (0)
45
46#define get32_unaligned_check(val, addr) \
47 do { \
48 unsigned int err = 0, v, a = addr; \
49 __get8_unaligned_check(v, a, err); \
50 val = v << 0; \
51 __get8_unaligned_check(v, a, err); \
52 val |= v << 8; \
53 __get8_unaligned_check(v, a, err); \
54 val |= v << 16; \
55 __get8_unaligned_check(v, a, err); \
56 val |= v << 24; \
57 if (err) \
58 goto fault; \
59 } while (0)
60
61#define put16_unaligned_check(val, addr) \
62 do { \
63 unsigned int err = 0, v = val, a = addr;\
64 \
65 __asm__( \
66 "1: stb.ab %1, [%2, 1]\n" \
67 " lsr %1, %1, 8\n" \
68 "2: stb %1, [%2]\n" \
69 "3:\n" \
70 " .section .fixup,\"ax\"\n" \
71 " .align 4\n" \
72 "4: mov %0, 1\n" \
73 " b 3b\n" \
74 " .previous\n" \
75 " .section __ex_table,\"a\"\n" \
76 " .align 4\n" \
77 " .long 1b, 4b\n" \
78 " .long 2b, 4b\n" \
79 " .previous\n" \
80 : "=r" (err), "=&r" (v), "=&r" (a) \
81 : "0" (err), "1" (v), "2" (a)); \
82 \
83 if (err) \
84 goto fault; \
85 } while (0)
86
87#define put32_unaligned_check(val, addr) \
88 do { \
89 unsigned int err = 0, v = val, a = addr;\
90 __asm__( \
91 \
92 "1: stb.ab %1, [%2, 1]\n" \
93 " lsr %1, %1, 8\n" \
94 "2: stb.ab %1, [%2, 1]\n" \
95 " lsr %1, %1, 8\n" \
96 "3: stb.ab %1, [%2, 1]\n" \
97 " lsr %1, %1, 8\n" \
98 "4: stb %1, [%2]\n" \
99 "5:\n" \
100 " .section .fixup,\"ax\"\n" \
101 " .align 4\n" \
102 "6: mov %0, 1\n" \
103 " b 5b\n" \
104 " .previous\n" \
105 " .section __ex_table,\"a\"\n" \
106 " .align 4\n" \
107 " .long 1b, 6b\n" \
108 " .long 2b, 6b\n" \
109 " .long 3b, 6b\n" \
110 " .long 4b, 6b\n" \
111 " .previous\n" \
112 : "=r" (err), "=&r" (v), "=&r" (a) \
113 : "0" (err), "1" (v), "2" (a)); \
114 \
115 if (err) \
116 goto fault; \
117 } while (0)
118
119/* sysctl hooks */
120int unaligned_enabled __read_mostly = 1; /* Enabled by default */
121int no_unaligned_warning __read_mostly = 1; /* Only 1 warning by default */
122
123static void fixup_load(struct disasm_state *state, struct pt_regs *regs,
124 struct callee_regs *cregs)
125{
126 int val;
127
128 /* register write back */
129 if ((state->aa == 1) || (state->aa == 2)) {
130 set_reg(state->wb_reg, state->src1 + state->src2, regs, cregs);
131
132 if (state->aa == 2)
133 state->src2 = 0;
134 }
135
136 if (state->zz == 0) {
137 get32_unaligned_check(val, state->src1 + state->src2);
138 } else {
139 get16_unaligned_check(val, state->src1 + state->src2);
140
141 if (state->x)
142 val = (val << 16) >> 16;
143 }
144
145 if (state->pref == 0)
146 set_reg(state->dest, val, regs, cregs);
147
148 return;
149
150fault: state->fault = 1;
151}
152
153static void fixup_store(struct disasm_state *state, struct pt_regs *regs,
154 struct callee_regs *cregs)
155{
156 /* register write back */
157 if ((state->aa == 1) || (state->aa == 2)) {
158 set_reg(state->wb_reg, state->src2 + state->src3, regs, cregs);
159
160 if (state->aa == 3)
161 state->src3 = 0;
162 } else if (state->aa == 3) {
163 if (state->zz == 2) {
164 set_reg(state->wb_reg, state->src2 + (state->src3 << 1),
165 regs, cregs);
166 } else if (!state->zz) {
167 set_reg(state->wb_reg, state->src2 + (state->src3 << 2),
168 regs, cregs);
169 } else {
170 goto fault;
171 }
172 }
173
174 /* write fix-up */
175 if (!state->zz)
176 put32_unaligned_check(state->src1, state->src2 + state->src3);
177 else
178 put16_unaligned_check(state->src1, state->src2 + state->src3);
179
180 return;
181
182fault: state->fault = 1;
183}
184
185/*
186 * Handle an unaligned access
187 * Returns 0 if successfully handled, 1 if some error happened
188 */
189int misaligned_fixup(unsigned long address, struct pt_regs *regs,
190 unsigned long cause, struct callee_regs *cregs)
191{
192 struct disasm_state state;
193 char buf[TASK_COMM_LEN];
194
195 /* handle user mode only and only if enabled by sysadmin */
196 if (!user_mode(regs) || !unaligned_enabled)
197 return 1;
198
199 if (no_unaligned_warning) {
200 pr_warn_once("%s(%d) made unaligned access which was emulated"
201 " by kernel assist\n. This can degrade application"
202 " performance significantly\n. To enable further"
203 " logging of such instances, please \n"
204 " echo 0 > /proc/sys/kernel/ignore-unaligned-usertrap\n",
205 get_task_comm(buf, current), task_pid_nr(current));
206 } else {
207 /* Add rate limiting if it gets down to it */
208 pr_warn("%s(%d): unaligned access to/from 0x%lx by PC: 0x%lx\n",
209 get_task_comm(buf, current), task_pid_nr(current),
210 address, regs->ret);
211
212 }
213
214 disasm_instr(regs->ret, &state, 1, regs, cregs);
215
216 if (state.fault)
217 goto fault;
218
219 /* ldb/stb should not have unaligned exception */
220 if ((state.zz == 1) || (state.di))
221 goto fault;
222
223 if (!state.write)
224 fixup_load(&state, regs, cregs);
225 else
226 fixup_store(&state, regs, cregs);
227
228 if (state.fault)
229 goto fault;
230
231 if (delay_mode(regs)) {
232 regs->ret = regs->bta;
233 regs->status32 &= ~STATUS_DE_MASK;
234 } else {
235 regs->ret += state.instr_len;
236 }
237
238 return 0;
239
240fault:
241 pr_err("Alignment trap: fault in fix-up %08lx at [<%08lx>]\n",
242 state.words[0], address);
243
244 return 1;
245}
diff --git a/arch/arc/kernel/unwind.c b/arch/arc/kernel/unwind.c
new file mode 100644
index 000000000000..a8d02223da44
--- /dev/null
+++ b/arch/arc/kernel/unwind.c
@@ -0,0 +1,1329 @@
1/*
2 * Copyright (C) 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 * Copyright (C) 2002-2006 Novell, Inc.
4 * Jan Beulich <jbeulich@novell.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * A simple API for unwinding kernel stacks. This is used for
11 * debugging and error reporting purposes. The kernel doesn't need
12 * full-blown stack unwinding with all the bells and whistles, so there
13 * is not much point in implementing the full Dwarf2 unwind API.
14 */
15
16#include <linux/sched.h>
17#include <linux/module.h>
18#include <linux/bootmem.h>
19#include <linux/sort.h>
20#include <linux/slab.h>
21#include <linux/stop_machine.h>
22#include <linux/uaccess.h>
23#include <linux/ptrace.h>
24#include <asm/sections.h>
25#include <asm/unaligned.h>
26#include <asm/unwind.h>
27
28extern char __start_unwind[], __end_unwind[];
29/* extern const u8 __start_unwind_hdr[], __end_unwind_hdr[];*/
30
31/* #define UNWIND_DEBUG */
32
33#ifdef UNWIND_DEBUG
34int dbg_unw;
35#define unw_debug(fmt, ...) \
36do { \
37 if (dbg_unw) \
38 pr_info(fmt, ##__VA_ARGS__); \
39} while (0);
40#else
41#define unw_debug(fmt, ...)
42#endif
43
44#define MAX_STACK_DEPTH 8
45
46#define EXTRA_INFO(f) { \
47 BUILD_BUG_ON_ZERO(offsetof(struct unwind_frame_info, f) \
48 % FIELD_SIZEOF(struct unwind_frame_info, f)) \
49 + offsetof(struct unwind_frame_info, f) \
50 / FIELD_SIZEOF(struct unwind_frame_info, f), \
51 FIELD_SIZEOF(struct unwind_frame_info, f) \
52 }
53#define PTREGS_INFO(f) EXTRA_INFO(regs.f)
54
55static const struct {
56 unsigned offs:BITS_PER_LONG / 2;
57 unsigned width:BITS_PER_LONG / 2;
58} reg_info[] = {
59UNW_REGISTER_INFO};
60
61#undef PTREGS_INFO
62#undef EXTRA_INFO
63
64#ifndef REG_INVALID
65#define REG_INVALID(r) (reg_info[r].width == 0)
66#endif
67
68#define DW_CFA_nop 0x00
69#define DW_CFA_set_loc 0x01
70#define DW_CFA_advance_loc1 0x02
71#define DW_CFA_advance_loc2 0x03
72#define DW_CFA_advance_loc4 0x04
73#define DW_CFA_offset_extended 0x05
74#define DW_CFA_restore_extended 0x06
75#define DW_CFA_undefined 0x07
76#define DW_CFA_same_value 0x08
77#define DW_CFA_register 0x09
78#define DW_CFA_remember_state 0x0a
79#define DW_CFA_restore_state 0x0b
80#define DW_CFA_def_cfa 0x0c
81#define DW_CFA_def_cfa_register 0x0d
82#define DW_CFA_def_cfa_offset 0x0e
83#define DW_CFA_def_cfa_expression 0x0f
84#define DW_CFA_expression 0x10
85#define DW_CFA_offset_extended_sf 0x11
86#define DW_CFA_def_cfa_sf 0x12
87#define DW_CFA_def_cfa_offset_sf 0x13
88#define DW_CFA_val_offset 0x14
89#define DW_CFA_val_offset_sf 0x15
90#define DW_CFA_val_expression 0x16
91#define DW_CFA_lo_user 0x1c
92#define DW_CFA_GNU_window_save 0x2d
93#define DW_CFA_GNU_args_size 0x2e
94#define DW_CFA_GNU_negative_offset_extended 0x2f
95#define DW_CFA_hi_user 0x3f
96
97#define DW_EH_PE_FORM 0x07
98#define DW_EH_PE_native 0x00
99#define DW_EH_PE_leb128 0x01
100#define DW_EH_PE_data2 0x02
101#define DW_EH_PE_data4 0x03
102#define DW_EH_PE_data8 0x04
103#define DW_EH_PE_signed 0x08
104#define DW_EH_PE_ADJUST 0x70
105#define DW_EH_PE_abs 0x00
106#define DW_EH_PE_pcrel 0x10
107#define DW_EH_PE_textrel 0x20
108#define DW_EH_PE_datarel 0x30
109#define DW_EH_PE_funcrel 0x40
110#define DW_EH_PE_aligned 0x50
111#define DW_EH_PE_indirect 0x80
112#define DW_EH_PE_omit 0xff
113
114typedef unsigned long uleb128_t;
115typedef signed long sleb128_t;
116
117static struct unwind_table {
118 struct {
119 unsigned long pc;
120 unsigned long range;
121 } core, init;
122 const void *address;
123 unsigned long size;
124 const unsigned char *header;
125 unsigned long hdrsz;
126 struct unwind_table *link;
127 const char *name;
128} root_table;
129
130struct unwind_item {
131 enum item_location {
132 Nowhere,
133 Memory,
134 Register,
135 Value
136 } where;
137 uleb128_t value;
138};
139
140struct unwind_state {
141 uleb128_t loc, org;
142 const u8 *cieStart, *cieEnd;
143 uleb128_t codeAlign;
144 sleb128_t dataAlign;
145 struct cfa {
146 uleb128_t reg, offs;
147 } cfa;
148 struct unwind_item regs[ARRAY_SIZE(reg_info)];
149 unsigned stackDepth:8;
150 unsigned version:8;
151 const u8 *label;
152 const u8 *stack[MAX_STACK_DEPTH];
153};
154
155static const struct cfa badCFA = { ARRAY_SIZE(reg_info), 1 };
156
157static struct unwind_table *find_table(unsigned long pc)
158{
159 struct unwind_table *table;
160
161 for (table = &root_table; table; table = table->link)
162 if ((pc >= table->core.pc
163 && pc < table->core.pc + table->core.range)
164 || (pc >= table->init.pc
165 && pc < table->init.pc + table->init.range))
166 break;
167
168 return table;
169}
170
171static unsigned long read_pointer(const u8 **pLoc,
172 const void *end, signed ptrType);
173
174static void init_unwind_table(struct unwind_table *table, const char *name,
175 const void *core_start, unsigned long core_size,
176 const void *init_start, unsigned long init_size,
177 const void *table_start, unsigned long table_size,
178 const u8 *header_start, unsigned long header_size)
179{
180 const u8 *ptr = header_start + 4;
181 const u8 *end = header_start + header_size;
182
183 table->core.pc = (unsigned long)core_start;
184 table->core.range = core_size;
185 table->init.pc = (unsigned long)init_start;
186 table->init.range = init_size;
187 table->address = table_start;
188 table->size = table_size;
189
190 /* See if the linker provided table looks valid. */
191 if (header_size <= 4
192 || header_start[0] != 1
193 || (void *)read_pointer(&ptr, end, header_start[1]) != table_start
194 || header_start[2] == DW_EH_PE_omit
195 || read_pointer(&ptr, end, header_start[2]) <= 0
196 || header_start[3] == DW_EH_PE_omit)
197 header_start = NULL;
198
199 table->hdrsz = header_size;
200 smp_wmb();
201 table->header = header_start;
202 table->link = NULL;
203 table->name = name;
204}
205
206void __init arc_unwind_init(void)
207{
208 init_unwind_table(&root_table, "kernel", _text, _end - _text, NULL, 0,
209 __start_unwind, __end_unwind - __start_unwind,
210 NULL, 0);
211 /*__start_unwind_hdr, __end_unwind_hdr - __start_unwind_hdr);*/
212}
213
214static const u32 bad_cie, not_fde;
215static const u32 *cie_for_fde(const u32 *fde, const struct unwind_table *);
216static signed fde_pointer_type(const u32 *cie);
217
218struct eh_frame_hdr_table_entry {
219 unsigned long start, fde;
220};
221
222static int cmp_eh_frame_hdr_table_entries(const void *p1, const void *p2)
223{
224 const struct eh_frame_hdr_table_entry *e1 = p1;
225 const struct eh_frame_hdr_table_entry *e2 = p2;
226
227 return (e1->start > e2->start) - (e1->start < e2->start);
228}
229
230static void swap_eh_frame_hdr_table_entries(void *p1, void *p2, int size)
231{
232 struct eh_frame_hdr_table_entry *e1 = p1;
233 struct eh_frame_hdr_table_entry *e2 = p2;
234 unsigned long v;
235
236 v = e1->start;
237 e1->start = e2->start;
238 e2->start = v;
239 v = e1->fde;
240 e1->fde = e2->fde;
241 e2->fde = v;
242}
243
244static void __init setup_unwind_table(struct unwind_table *table,
245 void *(*alloc) (unsigned long))
246{
247 const u8 *ptr;
248 unsigned long tableSize = table->size, hdrSize;
249 unsigned n;
250 const u32 *fde;
251 struct {
252 u8 version;
253 u8 eh_frame_ptr_enc;
254 u8 fde_count_enc;
255 u8 table_enc;
256 unsigned long eh_frame_ptr;
257 unsigned int fde_count;
258 struct eh_frame_hdr_table_entry table[];
259 } __attribute__ ((__packed__)) *header;
260
261 if (table->header)
262 return;
263
264 if (table->hdrsz)
265 pr_warn(".eh_frame_hdr for '%s' present but unusable\n",
266 table->name);
267
268 if (tableSize & (sizeof(*fde) - 1))
269 return;
270
271 for (fde = table->address, n = 0;
272 tableSize > sizeof(*fde) && tableSize - sizeof(*fde) >= *fde;
273 tableSize -= sizeof(*fde) + *fde, fde += 1 + *fde / sizeof(*fde)) {
274 const u32 *cie = cie_for_fde(fde, table);
275 signed ptrType;
276
277 if (cie == &not_fde)
278 continue;
279 if (cie == NULL || cie == &bad_cie)
280 return;
281 ptrType = fde_pointer_type(cie);
282 if (ptrType < 0)
283 return;
284
285 ptr = (const u8 *)(fde + 2);
286 if (!read_pointer(&ptr, (const u8 *)(fde + 1) + *fde,
287 ptrType)) {
288 /* FIXME_Rajesh We have 4 instances of null addresses
289 * instead of the initial loc addr
290 * return;
291 */
292 }
293 ++n;
294 }
295
296 if (tableSize || !n)
297 return;
298
299 hdrSize = 4 + sizeof(unsigned long) + sizeof(unsigned int)
300 + 2 * n * sizeof(unsigned long);
301 header = alloc(hdrSize);
302 if (!header)
303 return;
304 header->version = 1;
305 header->eh_frame_ptr_enc = DW_EH_PE_abs | DW_EH_PE_native;
306 header->fde_count_enc = DW_EH_PE_abs | DW_EH_PE_data4;
307 header->table_enc = DW_EH_PE_abs | DW_EH_PE_native;
308 put_unaligned((unsigned long)table->address, &header->eh_frame_ptr);
309 BUILD_BUG_ON(offsetof(typeof(*header), fde_count)
310 % __alignof(typeof(header->fde_count)));
311 header->fde_count = n;
312
313 BUILD_BUG_ON(offsetof(typeof(*header), table)
314 % __alignof(typeof(*header->table)));
315 for (fde = table->address, tableSize = table->size, n = 0;
316 tableSize;
317 tableSize -= sizeof(*fde) + *fde, fde += 1 + *fde / sizeof(*fde)) {
318 /* const u32 *cie = fde + 1 - fde[1] / sizeof(*fde); */
319 const u32 *cie = (const u32 *)(fde[1]);
320
321 if (fde[1] == 0xffffffff)
322 continue; /* this is a CIE */
323 ptr = (const u8 *)(fde + 2);
324 header->table[n].start = read_pointer(&ptr,
325 (const u8 *)(fde + 1) +
326 *fde,
327 fde_pointer_type(cie));
328 header->table[n].fde = (unsigned long)fde;
329 ++n;
330 }
331 WARN_ON(n != header->fde_count);
332
333 sort(header->table,
334 n,
335 sizeof(*header->table),
336 cmp_eh_frame_hdr_table_entries, swap_eh_frame_hdr_table_entries);
337
338 table->hdrsz = hdrSize;
339 smp_wmb();
340 table->header = (const void *)header;
341}
342
343static void *__init balloc(unsigned long sz)
344{
345 return __alloc_bootmem_nopanic(sz,
346 sizeof(unsigned int),
347 __pa(MAX_DMA_ADDRESS));
348}
349
350void __init arc_unwind_setup(void)
351{
352 setup_unwind_table(&root_table, balloc);
353}
354
355#ifdef CONFIG_MODULES
356
357static struct unwind_table *last_table;
358
359/* Must be called with module_mutex held. */
360void *unwind_add_table(struct module *module, const void *table_start,
361 unsigned long table_size)
362{
363 struct unwind_table *table;
364
365 if (table_size <= 0)
366 return NULL;
367
368 table = kmalloc(sizeof(*table), GFP_KERNEL);
369 if (!table)
370 return NULL;
371
372 init_unwind_table(table, module->name,
373 module->module_core, module->core_size,
374 module->module_init, module->init_size,
375 table_start, table_size,
376 NULL, 0);
377
378#ifdef UNWIND_DEBUG
379 unw_debug("Table added for [%s] %lx %lx\n",
380 module->name, table->core.pc, table->core.range);
381#endif
382 if (last_table)
383 last_table->link = table;
384 else
385 root_table.link = table;
386 last_table = table;
387
388 return table;
389}
390
391struct unlink_table_info {
392 struct unwind_table *table;
393 int init_only;
394};
395
396static int unlink_table(void *arg)
397{
398 struct unlink_table_info *info = arg;
399 struct unwind_table *table = info->table, *prev;
400
401 for (prev = &root_table; prev->link && prev->link != table;
402 prev = prev->link)
403 ;
404
405 if (prev->link) {
406 if (info->init_only) {
407 table->init.pc = 0;
408 table->init.range = 0;
409 info->table = NULL;
410 } else {
411 prev->link = table->link;
412 if (!prev->link)
413 last_table = prev;
414 }
415 } else
416 info->table = NULL;
417
418 return 0;
419}
420
421/* Must be called with module_mutex held. */
422void unwind_remove_table(void *handle, int init_only)
423{
424 struct unwind_table *table = handle;
425 struct unlink_table_info info;
426
427 if (!table || table == &root_table)
428 return;
429
430 if (init_only && table == last_table) {
431 table->init.pc = 0;
432 table->init.range = 0;
433 return;
434 }
435
436 info.table = table;
437 info.init_only = init_only;
438
439 unlink_table(&info); /* XXX: SMP */
440 kfree(table);
441}
442
443#endif /* CONFIG_MODULES */
444
445static uleb128_t get_uleb128(const u8 **pcur, const u8 *end)
446{
447 const u8 *cur = *pcur;
448 uleb128_t value;
449 unsigned shift;
450
451 for (shift = 0, value = 0; cur < end; shift += 7) {
452 if (shift + 7 > 8 * sizeof(value)
453 && (*cur & 0x7fU) >= (1U << (8 * sizeof(value) - shift))) {
454 cur = end + 1;
455 break;
456 }
457 value |= (uleb128_t) (*cur & 0x7f) << shift;
458 if (!(*cur++ & 0x80))
459 break;
460 }
461 *pcur = cur;
462
463 return value;
464}
465
466static sleb128_t get_sleb128(const u8 **pcur, const u8 *end)
467{
468 const u8 *cur = *pcur;
469 sleb128_t value;
470 unsigned shift;
471
472 for (shift = 0, value = 0; cur < end; shift += 7) {
473 if (shift + 7 > 8 * sizeof(value)
474 && (*cur & 0x7fU) >= (1U << (8 * sizeof(value) - shift))) {
475 cur = end + 1;
476 break;
477 }
478 value |= (sleb128_t) (*cur & 0x7f) << shift;
479 if (!(*cur & 0x80)) {
480 value |= -(*cur++ & 0x40) << shift;
481 break;
482 }
483 }
484 *pcur = cur;
485
486 return value;
487}
488
489static const u32 *cie_for_fde(const u32 *fde, const struct unwind_table *table)
490{
491 const u32 *cie;
492
493 if (!*fde || (*fde & (sizeof(*fde) - 1)))
494 return &bad_cie;
495
496 if (fde[1] == 0xffffffff)
497 return &not_fde; /* this is a CIE */
498
499 if ((fde[1] & (sizeof(*fde) - 1)))
500/* || fde[1] > (unsigned long)(fde + 1) - (unsigned long)table->address) */
501 return NULL; /* this is not a valid FDE */
502
503 /* cie = fde + 1 - fde[1] / sizeof(*fde); */
504 cie = (u32 *) fde[1];
505
506 if (*cie <= sizeof(*cie) + 4 || *cie >= fde[1] - sizeof(*fde)
507 || (*cie & (sizeof(*cie) - 1))
508 || (cie[1] != 0xffffffff))
509 return NULL; /* this is not a (valid) CIE */
510 return cie;
511}
512
513static unsigned long read_pointer(const u8 **pLoc, const void *end,
514 signed ptrType)
515{
516 unsigned long value = 0;
517 union {
518 const u8 *p8;
519 const u16 *p16u;
520 const s16 *p16s;
521 const u32 *p32u;
522 const s32 *p32s;
523 const unsigned long *pul;
524 } ptr;
525
526 if (ptrType < 0 || ptrType == DW_EH_PE_omit)
527 return 0;
528 ptr.p8 = *pLoc;
529 switch (ptrType & DW_EH_PE_FORM) {
530 case DW_EH_PE_data2:
531 if (end < (const void *)(ptr.p16u + 1))
532 return 0;
533 if (ptrType & DW_EH_PE_signed)
534 value = get_unaligned((u16 *) ptr.p16s++);
535 else
536 value = get_unaligned((u16 *) ptr.p16u++);
537 break;
538 case DW_EH_PE_data4:
539#ifdef CONFIG_64BIT
540 if (end < (const void *)(ptr.p32u + 1))
541 return 0;
542 if (ptrType & DW_EH_PE_signed)
543 value = get_unaligned(ptr.p32s++);
544 else
545 value = get_unaligned(ptr.p32u++);
546 break;
547 case DW_EH_PE_data8:
548 BUILD_BUG_ON(sizeof(u64) != sizeof(value));
549#else
550 BUILD_BUG_ON(sizeof(u32) != sizeof(value));
551#endif
552 case DW_EH_PE_native:
553 if (end < (const void *)(ptr.pul + 1))
554 return 0;
555 value = get_unaligned((unsigned long *)ptr.pul++);
556 break;
557 case DW_EH_PE_leb128:
558 BUILD_BUG_ON(sizeof(uleb128_t) > sizeof(value));
559 value = ptrType & DW_EH_PE_signed ? get_sleb128(&ptr.p8, end)
560 : get_uleb128(&ptr.p8, end);
561 if ((const void *)ptr.p8 > end)
562 return 0;
563 break;
564 default:
565 return 0;
566 }
567 switch (ptrType & DW_EH_PE_ADJUST) {
568 case DW_EH_PE_abs:
569 break;
570 case DW_EH_PE_pcrel:
571 value += (unsigned long)*pLoc;
572 break;
573 default:
574 return 0;
575 }
576 if ((ptrType & DW_EH_PE_indirect)
577 && __get_user(value, (unsigned long __user *)value))
578 return 0;
579 *pLoc = ptr.p8;
580
581 return value;
582}
583
584static signed fde_pointer_type(const u32 *cie)
585{
586 const u8 *ptr = (const u8 *)(cie + 2);
587 unsigned version = *ptr;
588
589 if (version != 1)
590 return -1; /* unsupported */
591
592 if (*++ptr) {
593 const char *aug;
594 const u8 *end = (const u8 *)(cie + 1) + *cie;
595 uleb128_t len;
596
597 /* check if augmentation size is first (and thus present) */
598 if (*ptr != 'z')
599 return -1;
600
601 /* check if augmentation string is nul-terminated */
602 aug = (const void *)ptr;
603 ptr = memchr(aug, 0, end - ptr);
604 if (ptr == NULL)
605 return -1;
606
607 ++ptr; /* skip terminator */
608 get_uleb128(&ptr, end); /* skip code alignment */
609 get_sleb128(&ptr, end); /* skip data alignment */
610 /* skip return address column */
611 version <= 1 ? (void) ++ptr : (void)get_uleb128(&ptr, end);
612 len = get_uleb128(&ptr, end); /* augmentation length */
613
614 if (ptr + len < ptr || ptr + len > end)
615 return -1;
616
617 end = ptr + len;
618 while (*++aug) {
619 if (ptr >= end)
620 return -1;
621 switch (*aug) {
622 case 'L':
623 ++ptr;
624 break;
625 case 'P':{
626 signed ptrType = *ptr++;
627
628 if (!read_pointer(&ptr, end, ptrType)
629 || ptr > end)
630 return -1;
631 }
632 break;
633 case 'R':
634 return *ptr;
635 default:
636 return -1;
637 }
638 }
639 }
640 return DW_EH_PE_native | DW_EH_PE_abs;
641}
642
643static int advance_loc(unsigned long delta, struct unwind_state *state)
644{
645 state->loc += delta * state->codeAlign;
646
647 /* FIXME_Rajesh: Probably we are defining for the initial range as well;
648 return delta > 0;
649 */
650 unw_debug("delta %3lu => loc 0x%lx: ", delta, state->loc);
651 return 1;
652}
653
654static void set_rule(uleb128_t reg, enum item_location where, uleb128_t value,
655 struct unwind_state *state)
656{
657 if (reg < ARRAY_SIZE(state->regs)) {
658 state->regs[reg].where = where;
659 state->regs[reg].value = value;
660
661#ifdef UNWIND_DEBUG
662 unw_debug("r%lu: ", reg);
663 switch (where) {
664 case Nowhere:
665 unw_debug("s ");
666 break;
667 case Memory:
668 unw_debug("c(%lu) ", value);
669 break;
670 case Register:
671 unw_debug("r(%lu) ", value);
672 break;
673 case Value:
674 unw_debug("v(%lu) ", value);
675 break;
676 default:
677 break;
678 }
679#endif
680 }
681}
682
683static int processCFI(const u8 *start, const u8 *end, unsigned long targetLoc,
684 signed ptrType, struct unwind_state *state)
685{
686 union {
687 const u8 *p8;
688 const u16 *p16;
689 const u32 *p32;
690 } ptr;
691 int result = 1;
692 u8 opcode;
693
694 if (start != state->cieStart) {
695 state->loc = state->org;
696 result =
697 processCFI(state->cieStart, state->cieEnd, 0, ptrType,
698 state);
699 if (targetLoc == 0 && state->label == NULL)
700 return result;
701 }
702 for (ptr.p8 = start; result && ptr.p8 < end;) {
703 switch (*ptr.p8 >> 6) {
704 uleb128_t value;
705
706 case 0:
707 opcode = *ptr.p8++;
708
709 switch (opcode) {
710 case DW_CFA_nop:
711 unw_debug("cfa nop ");
712 break;
713 case DW_CFA_set_loc:
714 state->loc = read_pointer(&ptr.p8, end,
715 ptrType);
716 if (state->loc == 0)
717 result = 0;
718 unw_debug("cfa_set_loc: 0x%lx ", state->loc);
719 break;
720 case DW_CFA_advance_loc1:
721 unw_debug("\ncfa advance loc1:");
722 result = ptr.p8 < end
723 && advance_loc(*ptr.p8++, state);
724 break;
725 case DW_CFA_advance_loc2:
726 value = *ptr.p8++;
727 value += *ptr.p8++ << 8;
728 unw_debug("\ncfa advance loc2:");
729 result = ptr.p8 <= end + 2
730 /* && advance_loc(*ptr.p16++, state); */
731 && advance_loc(value, state);
732 break;
733 case DW_CFA_advance_loc4:
734 unw_debug("\ncfa advance loc4:");
735 result = ptr.p8 <= end + 4
736 && advance_loc(*ptr.p32++, state);
737 break;
738 case DW_CFA_offset_extended:
739 value = get_uleb128(&ptr.p8, end);
740 unw_debug("cfa_offset_extended: ");
741 set_rule(value, Memory,
742 get_uleb128(&ptr.p8, end), state);
743 break;
744 case DW_CFA_val_offset:
745 value = get_uleb128(&ptr.p8, end);
746 set_rule(value, Value,
747 get_uleb128(&ptr.p8, end), state);
748 break;
749 case DW_CFA_offset_extended_sf:
750 value = get_uleb128(&ptr.p8, end);
751 set_rule(value, Memory,
752 get_sleb128(&ptr.p8, end), state);
753 break;
754 case DW_CFA_val_offset_sf:
755 value = get_uleb128(&ptr.p8, end);
756 set_rule(value, Value,
757 get_sleb128(&ptr.p8, end), state);
758 break;
759 case DW_CFA_restore_extended:
760 unw_debug("cfa_restore_extended: ");
761 case DW_CFA_undefined:
762 unw_debug("cfa_undefined: ");
763 case DW_CFA_same_value:
764 unw_debug("cfa_same_value: ");
765 set_rule(get_uleb128(&ptr.p8, end), Nowhere, 0,
766 state);
767 break;
768 case DW_CFA_register:
769 unw_debug("cfa_register: ");
770 value = get_uleb128(&ptr.p8, end);
771 set_rule(value,
772 Register,
773 get_uleb128(&ptr.p8, end), state);
774 break;
775 case DW_CFA_remember_state:
776 unw_debug("cfa_remember_state: ");
777 if (ptr.p8 == state->label) {
778 state->label = NULL;
779 return 1;
780 }
781 if (state->stackDepth >= MAX_STACK_DEPTH)
782 return 0;
783 state->stack[state->stackDepth++] = ptr.p8;
784 break;
785 case DW_CFA_restore_state:
786 unw_debug("cfa_restore_state: ");
787 if (state->stackDepth) {
788 const uleb128_t loc = state->loc;
789 const u8 *label = state->label;
790
791 state->label =
792 state->stack[state->stackDepth - 1];
793 memcpy(&state->cfa, &badCFA,
794 sizeof(state->cfa));
795 memset(state->regs, 0,
796 sizeof(state->regs));
797 state->stackDepth = 0;
798 result =
799 processCFI(start, end, 0, ptrType,
800 state);
801 state->loc = loc;
802 state->label = label;
803 } else
804 return 0;
805 break;
806 case DW_CFA_def_cfa:
807 state->cfa.reg = get_uleb128(&ptr.p8, end);
808 unw_debug("cfa_def_cfa: r%lu ", state->cfa.reg);
809 /*nobreak*/
810 case DW_CFA_def_cfa_offset:
811 state->cfa.offs = get_uleb128(&ptr.p8, end);
812 unw_debug("cfa_def_cfa_offset: 0x%lx ",
813 state->cfa.offs);
814 break;
815 case DW_CFA_def_cfa_sf:
816 state->cfa.reg = get_uleb128(&ptr.p8, end);
817 /*nobreak */
818 case DW_CFA_def_cfa_offset_sf:
819 state->cfa.offs = get_sleb128(&ptr.p8, end)
820 * state->dataAlign;
821 break;
822 case DW_CFA_def_cfa_register:
823 unw_debug("cfa_def_cfa_regsiter: ");
824 state->cfa.reg = get_uleb128(&ptr.p8, end);
825 break;
826 /*todo case DW_CFA_def_cfa_expression: */
827 /*todo case DW_CFA_expression: */
828 /*todo case DW_CFA_val_expression: */
829 case DW_CFA_GNU_args_size:
830 get_uleb128(&ptr.p8, end);
831 break;
832 case DW_CFA_GNU_negative_offset_extended:
833 value = get_uleb128(&ptr.p8, end);
834 set_rule(value,
835 Memory,
836 (uleb128_t) 0 - get_uleb128(&ptr.p8,
837 end),
838 state);
839 break;
840 case DW_CFA_GNU_window_save:
841 default:
842 unw_debug("UNKNOW OPCODE 0x%x\n", opcode);
843 result = 0;
844 break;
845 }
846 break;
847 case 1:
848 unw_debug("\ncfa_adv_loc: ");
849 result = advance_loc(*ptr.p8++ & 0x3f, state);
850 break;
851 case 2:
852 unw_debug("cfa_offset: ");
853 value = *ptr.p8++ & 0x3f;
854 set_rule(value, Memory, get_uleb128(&ptr.p8, end),
855 state);
856 break;
857 case 3:
858 unw_debug("cfa_restore: ");
859 set_rule(*ptr.p8++ & 0x3f, Nowhere, 0, state);
860 break;
861 }
862
863 if (ptr.p8 > end)
864 result = 0;
865 if (result && targetLoc != 0 && targetLoc < state->loc)
866 return 1;
867 }
868
869 return result && ptr.p8 == end && (targetLoc == 0 || (
870 /*todo While in theory this should apply, gcc in practice omits
871 everything past the function prolog, and hence the location
872 never reaches the end of the function.
873 targetLoc < state->loc && */ state->label == NULL));
874}
875
876/* Unwind to previous to frame. Returns 0 if successful, negative
877 * number in case of an error. */
878int arc_unwind(struct unwind_frame_info *frame)
879{
880#define FRAME_REG(r, t) (((t *)frame)[reg_info[r].offs])
881 const u32 *fde = NULL, *cie = NULL;
882 const u8 *ptr = NULL, *end = NULL;
883 unsigned long pc = UNW_PC(frame) - frame->call_frame;
884 unsigned long startLoc = 0, endLoc = 0, cfa;
885 unsigned i;
886 signed ptrType = -1;
887 uleb128_t retAddrReg = 0;
888 const struct unwind_table *table;
889 struct unwind_state state;
890 unsigned long *fptr;
891 unsigned long addr;
892
893 unw_debug("\n\nUNWIND FRAME:\n");
894 unw_debug("PC: 0x%lx BLINK: 0x%lx, SP: 0x%lx, FP: 0x%x\n",
895 UNW_PC(frame), UNW_BLINK(frame), UNW_SP(frame),
896 UNW_FP(frame));
897
898 if (UNW_PC(frame) == 0)
899 return -EINVAL;
900
901#ifdef UNWIND_DEBUG
902 {
903 unsigned long *sptr = (unsigned long *)UNW_SP(frame);
904 unw_debug("\nStack Dump:\n");
905 for (i = 0; i < 20; i++, sptr++)
906 unw_debug("0x%p: 0x%lx\n", sptr, *sptr);
907 unw_debug("\n");
908 }
909#endif
910
911 table = find_table(pc);
912 if (table != NULL
913 && !(table->size & (sizeof(*fde) - 1))) {
914 const u8 *hdr = table->header;
915 unsigned long tableSize;
916
917 smp_rmb();
918 if (hdr && hdr[0] == 1) {
919 switch (hdr[3] & DW_EH_PE_FORM) {
920 case DW_EH_PE_native:
921 tableSize = sizeof(unsigned long);
922 break;
923 case DW_EH_PE_data2:
924 tableSize = 2;
925 break;
926 case DW_EH_PE_data4:
927 tableSize = 4;
928 break;
929 case DW_EH_PE_data8:
930 tableSize = 8;
931 break;
932 default:
933 tableSize = 0;
934 break;
935 }
936 ptr = hdr + 4;
937 end = hdr + table->hdrsz;
938 if (tableSize && read_pointer(&ptr, end, hdr[1])
939 == (unsigned long)table->address
940 && (i = read_pointer(&ptr, end, hdr[2])) > 0
941 && i == (end - ptr) / (2 * tableSize)
942 && !((end - ptr) % (2 * tableSize))) {
943 do {
944 const u8 *cur =
945 ptr + (i / 2) * (2 * tableSize);
946
947 startLoc = read_pointer(&cur,
948 cur + tableSize,
949 hdr[3]);
950 if (pc < startLoc)
951 i /= 2;
952 else {
953 ptr = cur - tableSize;
954 i = (i + 1) / 2;
955 }
956 } while (startLoc && i > 1);
957 if (i == 1
958 && (startLoc = read_pointer(&ptr,
959 ptr + tableSize,
960 hdr[3])) != 0
961 && pc >= startLoc)
962 fde = (void *)read_pointer(&ptr,
963 ptr +
964 tableSize,
965 hdr[3]);
966 }
967 }
968
969 if (fde != NULL) {
970 cie = cie_for_fde(fde, table);
971 ptr = (const u8 *)(fde + 2);
972 if (cie != NULL
973 && cie != &bad_cie
974 && cie != &not_fde
975 && (ptrType = fde_pointer_type(cie)) >= 0
976 && read_pointer(&ptr,
977 (const u8 *)(fde + 1) + *fde,
978 ptrType) == startLoc) {
979 if (!(ptrType & DW_EH_PE_indirect))
980 ptrType &=
981 DW_EH_PE_FORM | DW_EH_PE_signed;
982 endLoc =
983 startLoc + read_pointer(&ptr,
984 (const u8 *)(fde +
985 1) +
986 *fde, ptrType);
987 if (pc >= endLoc)
988 fde = NULL;
989 } else
990 fde = NULL;
991 }
992 if (fde == NULL) {
993 for (fde = table->address, tableSize = table->size;
994 cie = NULL, tableSize > sizeof(*fde)
995 && tableSize - sizeof(*fde) >= *fde;
996 tableSize -= sizeof(*fde) + *fde,
997 fde += 1 + *fde / sizeof(*fde)) {
998 cie = cie_for_fde(fde, table);
999 if (cie == &bad_cie) {
1000 cie = NULL;
1001 break;
1002 }
1003 if (cie == NULL
1004 || cie == &not_fde
1005 || (ptrType = fde_pointer_type(cie)) < 0)
1006 continue;
1007 ptr = (const u8 *)(fde + 2);
1008 startLoc = read_pointer(&ptr,
1009 (const u8 *)(fde + 1) +
1010 *fde, ptrType);
1011 if (!startLoc)
1012 continue;
1013 if (!(ptrType & DW_EH_PE_indirect))
1014 ptrType &=
1015 DW_EH_PE_FORM | DW_EH_PE_signed;
1016 endLoc =
1017 startLoc + read_pointer(&ptr,
1018 (const u8 *)(fde +
1019 1) +
1020 *fde, ptrType);
1021 if (pc >= startLoc && pc < endLoc)
1022 break;
1023 }
1024 }
1025 }
1026 if (cie != NULL) {
1027 memset(&state, 0, sizeof(state));
1028 state.cieEnd = ptr; /* keep here temporarily */
1029 ptr = (const u8 *)(cie + 2);
1030 end = (const u8 *)(cie + 1) + *cie;
1031 frame->call_frame = 1;
1032 if ((state.version = *ptr) != 1)
1033 cie = NULL; /* unsupported version */
1034 else if (*++ptr) {
1035 /* check if augmentation size is first (thus present) */
1036 if (*ptr == 'z') {
1037 while (++ptr < end && *ptr) {
1038 switch (*ptr) {
1039 /* chk for ignorable or already handled
1040 * nul-terminated augmentation string */
1041 case 'L':
1042 case 'P':
1043 case 'R':
1044 continue;
1045 case 'S':
1046 frame->call_frame = 0;
1047 continue;
1048 default:
1049 break;
1050 }
1051 break;
1052 }
1053 }
1054 if (ptr >= end || *ptr)
1055 cie = NULL;
1056 }
1057 ++ptr;
1058 }
1059 if (cie != NULL) {
1060 /* get code aligment factor */
1061 state.codeAlign = get_uleb128(&ptr, end);
1062 /* get data aligment factor */
1063 state.dataAlign = get_sleb128(&ptr, end);
1064 if (state.codeAlign == 0 || state.dataAlign == 0 || ptr >= end)
1065 cie = NULL;
1066 else {
1067 retAddrReg =
1068 state.version <= 1 ? *ptr++ : get_uleb128(&ptr,
1069 end);
1070 unw_debug("CIE Frame Info:\n");
1071 unw_debug("return Address register 0x%lx\n",
1072 retAddrReg);
1073 unw_debug("data Align: %ld\n", state.dataAlign);
1074 unw_debug("code Align: %lu\n", state.codeAlign);
1075 /* skip augmentation */
1076 if (((const char *)(cie + 2))[1] == 'z') {
1077 uleb128_t augSize = get_uleb128(&ptr, end);
1078
1079 ptr += augSize;
1080 }
1081 if (ptr > end || retAddrReg >= ARRAY_SIZE(reg_info)
1082 || REG_INVALID(retAddrReg)
1083 || reg_info[retAddrReg].width !=
1084 sizeof(unsigned long))
1085 cie = NULL;
1086 }
1087 }
1088 if (cie != NULL) {
1089 state.cieStart = ptr;
1090 ptr = state.cieEnd;
1091 state.cieEnd = end;
1092 end = (const u8 *)(fde + 1) + *fde;
1093 /* skip augmentation */
1094 if (((const char *)(cie + 2))[1] == 'z') {
1095 uleb128_t augSize = get_uleb128(&ptr, end);
1096
1097 if ((ptr += augSize) > end)
1098 fde = NULL;
1099 }
1100 }
1101 if (cie == NULL || fde == NULL) {
1102#ifdef CONFIG_FRAME_POINTER
1103 unsigned long top, bottom;
1104
1105 top = STACK_TOP_UNW(frame->task);
1106 bottom = STACK_BOTTOM_UNW(frame->task);
1107#if FRAME_RETADDR_OFFSET < 0
1108 if (UNW_SP(frame) < top && UNW_FP(frame) <= UNW_SP(frame)
1109 && bottom < UNW_FP(frame)
1110#else
1111 if (UNW_SP(frame) > top && UNW_FP(frame) >= UNW_SP(frame)
1112 && bottom > UNW_FP(frame)
1113#endif
1114 && !((UNW_SP(frame) | UNW_FP(frame))
1115 & (sizeof(unsigned long) - 1))) {
1116 unsigned long link;
1117
1118 if (!__get_user(link, (unsigned long *)
1119 (UNW_FP(frame) + FRAME_LINK_OFFSET))
1120#if FRAME_RETADDR_OFFSET < 0
1121 && link > bottom && link < UNW_FP(frame)
1122#else
1123 && link > UNW_FP(frame) && link < bottom
1124#endif
1125 && !(link & (sizeof(link) - 1))
1126 && !__get_user(UNW_PC(frame),
1127 (unsigned long *)(UNW_FP(frame)
1128 + FRAME_RETADDR_OFFSET)))
1129 {
1130 UNW_SP(frame) =
1131 UNW_FP(frame) + FRAME_RETADDR_OFFSET
1132#if FRAME_RETADDR_OFFSET < 0
1133 -
1134#else
1135 +
1136#endif
1137 sizeof(UNW_PC(frame));
1138 UNW_FP(frame) = link;
1139 return 0;
1140 }
1141 }
1142#endif
1143 return -ENXIO;
1144 }
1145 state.org = startLoc;
1146 memcpy(&state.cfa, &badCFA, sizeof(state.cfa));
1147
1148 unw_debug("\nProcess instructions\n");
1149
1150 /* process instructions
1151 * For ARC, we optimize by having blink(retAddrReg) with
1152 * the sameValue in the leaf function, so we should not check
1153 * state.regs[retAddrReg].where == Nowhere
1154 */
1155 if (!processCFI(ptr, end, pc, ptrType, &state)
1156 || state.loc > endLoc
1157/* || state.regs[retAddrReg].where == Nowhere */
1158 || state.cfa.reg >= ARRAY_SIZE(reg_info)
1159 || reg_info[state.cfa.reg].width != sizeof(unsigned long)
1160 || state.cfa.offs % sizeof(unsigned long))
1161 return -EIO;
1162
1163#ifdef UNWIND_DEBUG
1164 unw_debug("\n");
1165
1166 unw_debug("\nRegister State Based on the rules parsed from FDE:\n");
1167 for (i = 0; i < ARRAY_SIZE(state.regs); ++i) {
1168
1169 if (REG_INVALID(i))
1170 continue;
1171
1172 switch (state.regs[i].where) {
1173 case Nowhere:
1174 break;
1175 case Memory:
1176 unw_debug(" r%d: c(%lu),", i, state.regs[i].value);
1177 break;
1178 case Register:
1179 unw_debug(" r%d: r(%lu),", i, state.regs[i].value);
1180 break;
1181 case Value:
1182 unw_debug(" r%d: v(%lu),", i, state.regs[i].value);
1183 break;
1184 }
1185 }
1186
1187 unw_debug("\n");
1188#endif
1189
1190 /* update frame */
1191#ifndef CONFIG_AS_CFI_SIGNAL_FRAME
1192 if (frame->call_frame
1193 && !UNW_DEFAULT_RA(state.regs[retAddrReg], state.dataAlign))
1194 frame->call_frame = 0;
1195#endif
1196 cfa = FRAME_REG(state.cfa.reg, unsigned long) + state.cfa.offs;
1197 startLoc = min_t(unsigned long, UNW_SP(frame), cfa);
1198 endLoc = max_t(unsigned long, UNW_SP(frame), cfa);
1199 if (STACK_LIMIT(startLoc) != STACK_LIMIT(endLoc)) {
1200 startLoc = min(STACK_LIMIT(cfa), cfa);
1201 endLoc = max(STACK_LIMIT(cfa), cfa);
1202 }
1203
1204 unw_debug("\nCFA reg: 0x%lx, offset: 0x%lx => 0x%lx\n",
1205 state.cfa.reg, state.cfa.offs, cfa);
1206
1207 for (i = 0; i < ARRAY_SIZE(state.regs); ++i) {
1208 if (REG_INVALID(i)) {
1209 if (state.regs[i].where == Nowhere)
1210 continue;
1211 return -EIO;
1212 }
1213 switch (state.regs[i].where) {
1214 default:
1215 break;
1216 case Register:
1217 if (state.regs[i].value >= ARRAY_SIZE(reg_info)
1218 || REG_INVALID(state.regs[i].value)
1219 || reg_info[i].width >
1220 reg_info[state.regs[i].value].width)
1221 return -EIO;
1222 switch (reg_info[state.regs[i].value].width) {
1223 case sizeof(u8):
1224 state.regs[i].value =
1225 FRAME_REG(state.regs[i].value, const u8);
1226 break;
1227 case sizeof(u16):
1228 state.regs[i].value =
1229 FRAME_REG(state.regs[i].value, const u16);
1230 break;
1231 case sizeof(u32):
1232 state.regs[i].value =
1233 FRAME_REG(state.regs[i].value, const u32);
1234 break;
1235#ifdef CONFIG_64BIT
1236 case sizeof(u64):
1237 state.regs[i].value =
1238 FRAME_REG(state.regs[i].value, const u64);
1239 break;
1240#endif
1241 default:
1242 return -EIO;
1243 }
1244 break;
1245 }
1246 }
1247
1248 unw_debug("\nRegister state after evaluation with realtime Stack:\n");
1249 fptr = (unsigned long *)(&frame->regs);
1250 for (i = 0; i < ARRAY_SIZE(state.regs); ++i, fptr++) {
1251
1252 if (REG_INVALID(i))
1253 continue;
1254 switch (state.regs[i].where) {
1255 case Nowhere:
1256 if (reg_info[i].width != sizeof(UNW_SP(frame))
1257 || &FRAME_REG(i, __typeof__(UNW_SP(frame)))
1258 != &UNW_SP(frame))
1259 continue;
1260 UNW_SP(frame) = cfa;
1261 break;
1262 case Register:
1263 switch (reg_info[i].width) {
1264 case sizeof(u8):
1265 FRAME_REG(i, u8) = state.regs[i].value;
1266 break;
1267 case sizeof(u16):
1268 FRAME_REG(i, u16) = state.regs[i].value;
1269 break;
1270 case sizeof(u32):
1271 FRAME_REG(i, u32) = state.regs[i].value;
1272 break;
1273#ifdef CONFIG_64BIT
1274 case sizeof(u64):
1275 FRAME_REG(i, u64) = state.regs[i].value;
1276 break;
1277#endif
1278 default:
1279 return -EIO;
1280 }
1281 break;
1282 case Value:
1283 if (reg_info[i].width != sizeof(unsigned long))
1284 return -EIO;
1285 FRAME_REG(i, unsigned long) = cfa + state.regs[i].value
1286 * state.dataAlign;
1287 break;
1288 case Memory:
1289 addr = cfa + state.regs[i].value * state.dataAlign;
1290
1291 if ((state.regs[i].value * state.dataAlign)
1292 % sizeof(unsigned long)
1293 || addr < startLoc
1294 || addr + sizeof(unsigned long) < addr
1295 || addr + sizeof(unsigned long) > endLoc)
1296 return -EIO;
1297
1298 switch (reg_info[i].width) {
1299 case sizeof(u8):
1300 __get_user(FRAME_REG(i, u8),
1301 (u8 __user *)addr);
1302 break;
1303 case sizeof(u16):
1304 __get_user(FRAME_REG(i, u16),
1305 (u16 __user *)addr);
1306 break;
1307 case sizeof(u32):
1308 __get_user(FRAME_REG(i, u32),
1309 (u32 __user *)addr);
1310 break;
1311#ifdef CONFIG_64BIT
1312 case sizeof(u64):
1313 __get_user(FRAME_REG(i, u64),
1314 (u64 __user *)addr);
1315 break;
1316#endif
1317 default:
1318 return -EIO;
1319 }
1320
1321 break;
1322 }
1323 unw_debug("r%d: 0x%lx ", i, *fptr);
1324 }
1325
1326 return 0;
1327#undef FRAME_REG
1328}
1329EXPORT_SYMBOL(arc_unwind);
diff --git a/arch/arc/kernel/vmlinux.lds.S b/arch/arc/kernel/vmlinux.lds.S
new file mode 100644
index 000000000000..d3c92f52d444
--- /dev/null
+++ b/arch/arc/kernel/vmlinux.lds.S
@@ -0,0 +1,163 @@
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#include <asm-generic/vmlinux.lds.h>
10#include <asm/cache.h>
11#include <asm/page.h>
12#include <asm/thread_info.h>
13
14OUTPUT_ARCH(arc)
15ENTRY(_stext)
16
17#ifdef CONFIG_CPU_BIG_ENDIAN
18jiffies = jiffies_64 + 4;
19#else
20jiffies = jiffies_64;
21#endif
22
23SECTIONS
24{
25 /*
26 * ICCM starts at 0x8000_0000. So if kernel is relocated to some other
27 * address, make sure peripheral at 0x8z doesn't clash with ICCM
28 * Essentially vector is also in ICCM.
29 */
30
31 . = CONFIG_LINUX_LINK_BASE;
32
33 _int_vec_base_lds = .;
34 .vector : {
35 *(.vector)
36 . = ALIGN(PAGE_SIZE);
37 }
38
39#ifdef CONFIG_ARC_HAS_ICCM
40 .text.arcfp : {
41 *(.text.arcfp)
42 . = ALIGN(CONFIG_ARC_ICCM_SZ * 1024);
43 }
44#endif
45
46 /*
47 * The reason for having a seperate subsection .init.ramfs is to
48 * prevent objump from including it in kernel dumps
49 *
50 * Reason for having .init.ramfs above .init is to make sure that the
51 * binary blob is tucked away to one side, reducing the displacement
52 * between .init.text and .text, avoiding any possible relocation
53 * errors because of calls from .init.text to .text
54 * Yes such calls do exist. e.g.
55 * decompress_inflate.c:gunzip( ) -> zlib_inflate_workspace( )
56 */
57
58 __init_begin = .;
59
60 .init.ramfs : { INIT_RAM_FS }
61
62 . = ALIGN(PAGE_SIZE);
63 _stext = .;
64
65 HEAD_TEXT_SECTION
66 INIT_TEXT_SECTION(L1_CACHE_BYTES)
67
68 /* INIT_DATA_SECTION open-coded: special INIT_RAM_FS handling */
69 .init.data : {
70 INIT_DATA
71 INIT_SETUP(L1_CACHE_BYTES)
72 INIT_CALLS
73 CON_INITCALL
74 SECURITY_INITCALL
75 }
76
77 .init.arch.info : {
78 __arch_info_begin = .;
79 *(.arch.info.init)
80 __arch_info_end = .;
81 }
82
83 PERCPU_SECTION(L1_CACHE_BYTES)
84
85 /*
86 * .exit.text is discard at runtime, not link time, to deal with
87 * references from .debug_frame
88 * It will be init freed, being inside [__init_start : __init_end]
89 */
90 .exit.text : { EXIT_TEXT }
91 .exit.data : { EXIT_DATA }
92
93 . = ALIGN(PAGE_SIZE);
94 __init_end = .;
95
96 .text : {
97 _text = .;
98 TEXT_TEXT
99 SCHED_TEXT
100 LOCK_TEXT
101 KPROBES_TEXT
102 *(.fixup)
103 *(.gnu.warning)
104 }
105 EXCEPTION_TABLE(L1_CACHE_BYTES)
106 _etext = .;
107
108 _sdata = .;
109 RO_DATA_SECTION(PAGE_SIZE)
110
111 /*
112 * 1. this is .data essentially
113 * 2. THREAD_SIZE for init.task, must be kernel-stk sz aligned
114 */
115 RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
116
117 _edata = .;
118
119 BSS_SECTION(0, 0, 0)
120
121#ifdef CONFIG_ARC_DW2_UNWIND
122 . = ALIGN(PAGE_SIZE);
123 .debug_frame : {
124 __start_unwind = .;
125 *(.debug_frame)
126 __end_unwind = .;
127 }
128#else
129 /DISCARD/ : { *(.debug_frame) }
130#endif
131
132 NOTES
133
134 . = ALIGN(PAGE_SIZE);
135 _end = . ;
136
137 STABS_DEBUG
138 DISCARDS
139
140 .arcextmap 0 : {
141 *(.gnu.linkonce.arcextmap.*)
142 *(.arcextmap.*)
143 }
144
145 /* open-coded because we need .debug_frame seperately for unwinding */
146 .debug_aranges 0 : { *(.debug_aranges) }
147 .debug_pubnames 0 : { *(.debug_pubnames) }
148 .debug_info 0 : { *(.debug_info) }
149 .debug_abbrev 0 : { *(.debug_abbrev) }
150 .debug_line 0 : { *(.debug_line) }
151 .debug_str 0 : { *(.debug_str) }
152 .debug_loc 0 : { *(.debug_loc) }
153 .debug_macinfo 0 : { *(.debug_macinfo) }
154
155#ifdef CONFIG_ARC_HAS_DCCM
156 . = CONFIG_ARC_DCCM_BASE;
157 __arc_dccm_base = .;
158 .data.arcfp : {
159 *(.data.arcfp)
160 }
161 . = ALIGN(CONFIG_ARC_DCCM_SZ * 1024);
162#endif
163}
diff --git a/arch/arc/lib/Makefile b/arch/arc/lib/Makefile
new file mode 100644
index 000000000000..db46e200baba
--- /dev/null
+++ b/arch/arc/lib/Makefile
@@ -0,0 +1,9 @@
1#
2# Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3#
4# This program is free software; you can redistribute it and/or modify
5# it under the terms of the GNU General Public License version 2 as
6# published by the Free Software Foundation.
7
8lib-y := strchr-700.o strcmp.o strcpy-700.o strlen.o
9lib-y += memcmp.o memcpy-700.o memset.o
diff --git a/arch/arc/lib/memcmp.S b/arch/arc/lib/memcmp.S
new file mode 100644
index 000000000000..bc813d55b6c3
--- /dev/null
+++ b/arch/arc/lib/memcmp.S
@@ -0,0 +1,124 @@
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#include <asm/linkage.h>
10
11#ifdef __LITTLE_ENDIAN__
12#define WORD2 r2
13#define SHIFT r3
14#else /* BIG ENDIAN */
15#define WORD2 r3
16#define SHIFT r2
17#endif
18
19ARC_ENTRY memcmp
20 or r12,r0,r1
21 asl_s r12,r12,30
22 sub r3,r2,1
23 brls r2,r12,.Lbytewise
24 ld r4,[r0,0]
25 ld r5,[r1,0]
26 lsr.f lp_count,r3,3
27 lpne .Loop_end
28 ld_s WORD2,[r0,4]
29 ld_s r12,[r1,4]
30 brne r4,r5,.Leven
31 ld.a r4,[r0,8]
32 ld.a r5,[r1,8]
33 brne WORD2,r12,.Lodd
34.Loop_end:
35 asl_s SHIFT,SHIFT,3
36 bhs_s .Last_cmp
37 brne r4,r5,.Leven
38 ld r4,[r0,4]
39 ld r5,[r1,4]
40#ifdef __LITTLE_ENDIAN__
41 nop_s
42 ; one more load latency cycle
43.Last_cmp:
44 xor r0,r4,r5
45 bset r0,r0,SHIFT
46 sub_s r1,r0,1
47 bic_s r1,r1,r0
48 norm r1,r1
49 b.d .Leven_cmp
50 and r1,r1,24
51.Leven:
52 xor r0,r4,r5
53 sub_s r1,r0,1
54 bic_s r1,r1,r0
55 norm r1,r1
56 ; slow track insn
57 and r1,r1,24
58.Leven_cmp:
59 asl r2,r4,r1
60 asl r12,r5,r1
61 lsr_s r2,r2,1
62 lsr_s r12,r12,1
63 j_s.d [blink]
64 sub r0,r2,r12
65 .balign 4
66.Lodd:
67 xor r0,WORD2,r12
68 sub_s r1,r0,1
69 bic_s r1,r1,r0
70 norm r1,r1
71 ; slow track insn
72 and r1,r1,24
73 asl_s r2,r2,r1
74 asl_s r12,r12,r1
75 lsr_s r2,r2,1
76 lsr_s r12,r12,1
77 j_s.d [blink]
78 sub r0,r2,r12
79#else /* BIG ENDIAN */
80.Last_cmp:
81 neg_s SHIFT,SHIFT
82 lsr r4,r4,SHIFT
83 lsr r5,r5,SHIFT
84 ; slow track insn
85.Leven:
86 sub.f r0,r4,r5
87 mov.ne r0,1
88 j_s.d [blink]
89 bset.cs r0,r0,31
90.Lodd:
91 cmp_s WORD2,r12
92
93 mov_s r0,1
94 j_s.d [blink]
95 bset.cs r0,r0,31
96#endif /* ENDIAN */
97 .balign 4
98.Lbytewise:
99 breq r2,0,.Lnil
100 ldb r4,[r0,0]
101 ldb r5,[r1,0]
102 lsr.f lp_count,r3
103 lpne .Lbyte_end
104 ldb_s r3,[r0,1]
105 ldb r12,[r1,1]
106 brne r4,r5,.Lbyte_even
107 ldb.a r4,[r0,2]
108 ldb.a r5,[r1,2]
109 brne r3,r12,.Lbyte_odd
110.Lbyte_end:
111 bcc .Lbyte_even
112 brne r4,r5,.Lbyte_even
113 ldb_s r3,[r0,1]
114 ldb_s r12,[r1,1]
115.Lbyte_odd:
116 j_s.d [blink]
117 sub r0,r3,r12
118.Lbyte_even:
119 j_s.d [blink]
120 sub r0,r4,r5
121.Lnil:
122 j_s.d [blink]
123 mov r0,0
124ARC_EXIT memcmp
diff --git a/arch/arc/lib/memcpy-700.S b/arch/arc/lib/memcpy-700.S
new file mode 100644
index 000000000000..b64cc10ac918
--- /dev/null
+++ b/arch/arc/lib/memcpy-700.S
@@ -0,0 +1,66 @@
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#include <asm/linkage.h>
10
11ARC_ENTRY memcpy
12 or r3,r0,r1
13 asl_s r3,r3,30
14 mov_s r5,r0
15 brls.d r2,r3,.Lcopy_bytewise
16 sub.f r3,r2,1
17 ld_s r12,[r1,0]
18 asr.f lp_count,r3,3
19 bbit0.d r3,2,.Lnox4
20 bmsk_s r2,r2,1
21 st.ab r12,[r5,4]
22 ld.a r12,[r1,4]
23.Lnox4:
24 lppnz .Lendloop
25 ld_s r3,[r1,4]
26 st.ab r12,[r5,4]
27 ld.a r12,[r1,8]
28 st.ab r3,[r5,4]
29.Lendloop:
30 breq r2,0,.Last_store
31 ld r3,[r5,0]
32#ifdef __LITTLE_ENDIAN__
33 add3 r2,-1,r2
34 ; uses long immediate
35 xor_s r12,r12,r3
36 bmsk r12,r12,r2
37 xor_s r12,r12,r3
38#else /* BIG ENDIAN */
39 sub3 r2,31,r2
40 ; uses long immediate
41 xor_s r3,r3,r12
42 bmsk r3,r3,r2
43 xor_s r12,r12,r3
44#endif /* ENDIAN */
45.Last_store:
46 j_s.d [blink]
47 st r12,[r5,0]
48
49 .balign 4
50.Lcopy_bytewise:
51 jcs [blink]
52 ldb_s r12,[r1,0]
53 lsr.f lp_count,r3
54 bhs_s .Lnox1
55 stb.ab r12,[r5,1]
56 ldb.a r12,[r1,1]
57.Lnox1:
58 lppnz .Lendbloop
59 ldb_s r3,[r1,1]
60 stb.ab r12,[r5,1]
61 ldb.a r12,[r1,2]
62 stb.ab r3,[r5,1]
63.Lendbloop:
64 j_s.d [blink]
65 stb r12,[r5,0]
66ARC_EXIT memcpy
diff --git a/arch/arc/lib/memset.S b/arch/arc/lib/memset.S
new file mode 100644
index 000000000000..9b2d88d2e141
--- /dev/null
+++ b/arch/arc/lib/memset.S
@@ -0,0 +1,59 @@
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#include <asm/linkage.h>
10
11#define SMALL 7 /* Must be at least 6 to deal with alignment/loop issues. */
12
13ARC_ENTRY memset
14 mov_s r4,r0
15 or r12,r0,r2
16 bmsk.f r12,r12,1
17 extb_s r1,r1
18 asl r3,r1,8
19 beq.d .Laligned
20 or_s r1,r1,r3
21 brls r2,SMALL,.Ltiny
22 add r3,r2,r0
23 stb r1,[r3,-1]
24 bclr_s r3,r3,0
25 stw r1,[r3,-2]
26 bmsk.f r12,r0,1
27 add_s r2,r2,r12
28 sub.ne r2,r2,4
29 stb.ab r1,[r4,1]
30 and r4,r4,-2
31 stw.ab r1,[r4,2]
32 and r4,r4,-4
33.Laligned: ; This code address should be aligned for speed.
34 asl r3,r1,16
35 lsr.f lp_count,r2,2
36 or_s r1,r1,r3
37 lpne .Loop_end
38 st.ab r1,[r4,4]
39.Loop_end:
40 j_s [blink]
41
42 .balign 4
43.Ltiny:
44 mov.f lp_count,r2
45 lpne .Ltiny_end
46 stb.ab r1,[r4,1]
47.Ltiny_end:
48 j_s [blink]
49ARC_EXIT memset
50
51; memzero: @r0 = mem, @r1 = size_t
52; memset: @r0 = mem, @r1 = char, @r2 = size_t
53
54ARC_ENTRY memzero
55 ; adjust bzero args to memset args
56 mov r2, r1
57 mov r1, 0
58 b memset ;tail call so need to tinker with blink
59ARC_EXIT memzero
diff --git a/arch/arc/lib/strchr-700.S b/arch/arc/lib/strchr-700.S
new file mode 100644
index 000000000000..99c10475d477
--- /dev/null
+++ b/arch/arc/lib/strchr-700.S
@@ -0,0 +1,123 @@
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9/* ARC700 has a relatively long pipeline and branch prediction, so we want
10 to avoid branches that are hard to predict. On the other hand, the
11 presence of the norm instruction makes it easier to operate on whole
12 words branch-free. */
13
14#include <asm/linkage.h>
15
16ARC_ENTRY strchr
17 extb_s r1,r1
18 asl r5,r1,8
19 bmsk r2,r0,1
20 or r5,r5,r1
21 mov_s r3,0x01010101
22 breq.d r2,r0,.Laligned
23 asl r4,r5,16
24 sub_s r0,r0,r2
25 asl r7,r2,3
26 ld_s r2,[r0]
27#ifdef __LITTLE_ENDIAN__
28 asl r7,r3,r7
29#else
30 lsr r7,r3,r7
31#endif
32 or r5,r5,r4
33 ror r4,r3
34 sub r12,r2,r7
35 bic_s r12,r12,r2
36 and r12,r12,r4
37 brne.d r12,0,.Lfound0_ua
38 xor r6,r2,r5
39 ld.a r2,[r0,4]
40 sub r12,r6,r7
41 bic r12,r12,r6
42 and r7,r12,r4
43 breq r7,0,.Loop ; For speed, we want this branch to be unaligned.
44 b .Lfound_char ; Likewise this one.
45; /* We require this code address to be unaligned for speed... */
46.Laligned:
47 ld_s r2,[r0]
48 or r5,r5,r4
49 ror r4,r3
50; /* ... so that this code address is aligned, for itself and ... */
51.Loop:
52 sub r12,r2,r3
53 bic_s r12,r12,r2
54 and r12,r12,r4
55 brne.d r12,0,.Lfound0
56 xor r6,r2,r5
57 ld.a r2,[r0,4]
58 sub r12,r6,r3
59 bic r12,r12,r6
60 and r7,r12,r4
61 breq r7,0,.Loop /* ... so that this branch is unaligned. */
62 ; Found searched-for character. r0 has already advanced to next word.
63#ifdef __LITTLE_ENDIAN__
64/* We only need the information about the first matching byte
65 (i.e. the least significant matching byte) to be exact,
66 hence there is no problem with carry effects. */
67.Lfound_char:
68 sub r3,r7,1
69 bic r3,r3,r7
70 norm r2,r3
71 sub_s r0,r0,1
72 asr_s r2,r2,3
73 j.d [blink]
74 sub_s r0,r0,r2
75
76 .balign 4
77.Lfound0_ua:
78 mov r3,r7
79.Lfound0:
80 sub r3,r6,r3
81 bic r3,r3,r6
82 and r2,r3,r4
83 or_s r12,r12,r2
84 sub_s r3,r12,1
85 bic_s r3,r3,r12
86 norm r3,r3
87 add_s r0,r0,3
88 asr_s r12,r3,3
89 asl.f 0,r2,r3
90 sub_s r0,r0,r12
91 j_s.d [blink]
92 mov.pl r0,0
93#else /* BIG ENDIAN */
94.Lfound_char:
95 lsr r7,r7,7
96
97 bic r2,r7,r6
98 norm r2,r2
99 sub_s r0,r0,4
100 asr_s r2,r2,3
101 j.d [blink]
102 add_s r0,r0,r2
103
104.Lfound0_ua:
105 mov_s r3,r7
106.Lfound0:
107 asl_s r2,r2,7
108 or r7,r6,r4
109 bic_s r12,r12,r2
110 sub r2,r7,r3
111 or r2,r2,r6
112 bic r12,r2,r12
113 bic.f r3,r4,r12
114 norm r3,r3
115
116 add.pl r3,r3,1
117 asr_s r12,r3,3
118 asl.f 0,r2,r3
119 add_s r0,r0,r12
120 j_s.d [blink]
121 mov.mi r0,0
122#endif /* ENDIAN */
123ARC_EXIT strchr
diff --git a/arch/arc/lib/strcmp.S b/arch/arc/lib/strcmp.S
new file mode 100644
index 000000000000..5dc802b45cf3
--- /dev/null
+++ b/arch/arc/lib/strcmp.S
@@ -0,0 +1,96 @@
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9/* This is optimized primarily for the ARC700.
10 It would be possible to speed up the loops by one cycle / word
11 respective one cycle / byte by forcing double source 1 alignment, unrolling
12 by a factor of two, and speculatively loading the second word / byte of
13 source 1; however, that would increase the overhead for loop setup / finish,
14 and strcmp might often terminate early. */
15
16#include <asm/linkage.h>
17
18ARC_ENTRY strcmp
19 or r2,r0,r1
20 bmsk_s r2,r2,1
21 brne r2,0,.Lcharloop
22 mov_s r12,0x01010101
23 ror r5,r12
24.Lwordloop:
25 ld.ab r2,[r0,4]
26 ld.ab r3,[r1,4]
27 nop_s
28 sub r4,r2,r12
29 bic r4,r4,r2
30 and r4,r4,r5
31 brne r4,0,.Lfound0
32 breq r2,r3,.Lwordloop
33#ifdef __LITTLE_ENDIAN__
34 xor r0,r2,r3 ; mask for difference
35 sub_s r1,r0,1
36 bic_s r0,r0,r1 ; mask for least significant difference bit
37 sub r1,r5,r0
38 xor r0,r5,r1 ; mask for least significant difference byte
39 and_s r2,r2,r0
40 and_s r3,r3,r0
41#endif /* LITTLE ENDIAN */
42 cmp_s r2,r3
43 mov_s r0,1
44 j_s.d [blink]
45 bset.lo r0,r0,31
46
47 .balign 4
48#ifdef __LITTLE_ENDIAN__
49.Lfound0:
50 xor r0,r2,r3 ; mask for difference
51 or r0,r0,r4 ; or in zero indicator
52 sub_s r1,r0,1
53 bic_s r0,r0,r1 ; mask for least significant difference bit
54 sub r1,r5,r0
55 xor r0,r5,r1 ; mask for least significant difference byte
56 and_s r2,r2,r0
57 and_s r3,r3,r0
58 sub.f r0,r2,r3
59 mov.hi r0,1
60 j_s.d [blink]
61 bset.lo r0,r0,31
62#else /* BIG ENDIAN */
63 /* The zero-detection above can mis-detect 0x01 bytes as zeroes
64 because of carry-propagateion from a lower significant zero byte.
65 We can compensate for this by checking that bit0 is zero.
66 This compensation is not necessary in the step where we
67 get a low estimate for r2, because in any affected bytes
68 we already have 0x00 or 0x01, which will remain unchanged
69 when bit 7 is cleared. */
70 .balign 4
71.Lfound0:
72 lsr r0,r4,8
73 lsr_s r1,r2
74 bic_s r2,r2,r0 ; get low estimate for r2 and get ...
75 bic_s r0,r0,r1 ; <this is the adjusted mask for zeros>
76 or_s r3,r3,r0 ; ... high estimate r3 so that r2 > r3 will ...
77 cmp_s r3,r2 ; ... be independent of trailing garbage
78 or_s r2,r2,r0 ; likewise for r3 > r2
79 bic_s r3,r3,r0
80 rlc r0,0 ; r0 := r2 > r3 ? 1 : 0
81 cmp_s r2,r3
82 j_s.d [blink]
83 bset.lo r0,r0,31
84#endif /* ENDIAN */
85
86 .balign 4
87.Lcharloop:
88 ldb.ab r2,[r0,1]
89 ldb.ab r3,[r1,1]
90 nop_s
91 breq r2,0,.Lcmpend
92 breq r2,r3,.Lcharloop
93.Lcmpend:
94 j_s.d [blink]
95 sub r0,r2,r3
96ARC_EXIT strcmp
diff --git a/arch/arc/lib/strcpy-700.S b/arch/arc/lib/strcpy-700.S
new file mode 100644
index 000000000000..b7ca4ae81d88
--- /dev/null
+++ b/arch/arc/lib/strcpy-700.S
@@ -0,0 +1,70 @@
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9/* If dst and src are 4 byte aligned, copy 8 bytes at a time.
10 If the src is 4, but not 8 byte aligned, we first read 4 bytes to get
11 it 8 byte aligned. Thus, we can do a little read-ahead, without
12 dereferencing a cache line that we should not touch.
13 Note that short and long instructions have been scheduled to avoid
14 branch stalls.
15 The beq_s to r3z could be made unaligned & long to avoid a stall
16 there, but the it is not likely to be taken often, and it
17 would also be likey to cost an unaligned mispredict at the next call. */
18
19#include <asm/linkage.h>
20
21ARC_ENTRY strcpy
22 or r2,r0,r1
23 bmsk_s r2,r2,1
24 brne.d r2,0,charloop
25 mov_s r10,r0
26 ld_s r3,[r1,0]
27 mov r8,0x01010101
28 bbit0.d r1,2,loop_start
29 ror r12,r8
30 sub r2,r3,r8
31 bic_s r2,r2,r3
32 tst_s r2,r12
33 bne r3z
34 mov_s r4,r3
35 .balign 4
36loop:
37 ld.a r3,[r1,4]
38 st.ab r4,[r10,4]
39loop_start:
40 ld.a r4,[r1,4]
41 sub r2,r3,r8
42 bic_s r2,r2,r3
43 tst_s r2,r12
44 bne_s r3z
45 st.ab r3,[r10,4]
46 sub r2,r4,r8
47 bic r2,r2,r4
48 tst r2,r12
49 beq loop
50 mov_s r3,r4
51#ifdef __LITTLE_ENDIAN__
52r3z: bmsk.f r1,r3,7
53 lsr_s r3,r3,8
54#else
55r3z: lsr.f r1,r3,24
56 asl_s r3,r3,8
57#endif
58 bne.d r3z
59 stb.ab r1,[r10,1]
60 j_s [blink]
61
62 .balign 4
63charloop:
64 ldb.ab r3,[r1,1]
65
66
67 brne.d r3,0,charloop
68 stb.ab r3,[r10,1]
69 j [blink]
70ARC_EXIT strcpy
diff --git a/arch/arc/lib/strlen.S b/arch/arc/lib/strlen.S
new file mode 100644
index 000000000000..39759e099696
--- /dev/null
+++ b/arch/arc/lib/strlen.S
@@ -0,0 +1,83 @@
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#include <asm/linkage.h>
10
11ARC_ENTRY strlen
12 or r3,r0,7
13 ld r2,[r3,-7]
14 ld.a r6,[r3,-3]
15 mov r4,0x01010101
16 ; uses long immediate
17#ifdef __LITTLE_ENDIAN__
18 asl_s r1,r0,3
19 btst_s r0,2
20 asl r7,r4,r1
21 ror r5,r4
22 sub r1,r2,r7
23 bic_s r1,r1,r2
24 mov.eq r7,r4
25 sub r12,r6,r7
26 bic r12,r12,r6
27 or.eq r12,r12,r1
28 and r12,r12,r5
29 brne r12,0,.Learly_end
30#else /* BIG ENDIAN */
31 ror r5,r4
32 btst_s r0,2
33 mov_s r1,31
34 sub3 r7,r1,r0
35 sub r1,r2,r4
36 bic_s r1,r1,r2
37 bmsk r1,r1,r7
38 sub r12,r6,r4
39 bic r12,r12,r6
40 bmsk.ne r12,r12,r7
41 or.eq r12,r12,r1
42 and r12,r12,r5
43 brne r12,0,.Learly_end
44#endif /* ENDIAN */
45
46.Loop:
47 ld_s r2,[r3,4]
48 ld.a r6,[r3,8]
49 ; stall for load result
50 sub r1,r2,r4
51 bic_s r1,r1,r2
52 sub r12,r6,r4
53 bic r12,r12,r6
54 or r12,r12,r1
55 and r12,r12,r5
56 breq r12,0,.Loop
57.Lend:
58 and.f r1,r1,r5
59 sub.ne r3,r3,4
60 mov.eq r1,r12
61#ifdef __LITTLE_ENDIAN__
62 sub_s r2,r1,1
63 bic_s r2,r2,r1
64 norm r1,r2
65 sub_s r0,r0,3
66 lsr_s r1,r1,3
67 sub r0,r3,r0
68 j_s.d [blink]
69 sub r0,r0,r1
70#else /* BIG ENDIAN */
71 lsr_s r1,r1,7
72 mov.eq r2,r6
73 bic_s r1,r1,r2
74 norm r1,r1
75 sub r0,r3,r0
76 lsr_s r1,r1,3
77 j_s.d [blink]
78 add r0,r0,r1
79#endif /* ENDIAN */
80.Learly_end:
81 b.d .Lend
82 sub_s.ne r1,r1,r1
83ARC_EXIT strlen
diff --git a/arch/arc/mm/Makefile b/arch/arc/mm/Makefile
new file mode 100644
index 000000000000..168dc146a8f6
--- /dev/null
+++ b/arch/arc/mm/Makefile
@@ -0,0 +1,10 @@
1#
2# Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3#
4# This program is free software; you can redistribute it and/or modify
5# it under the terms of the GNU General Public License version 2 as
6# published by the Free Software Foundation.
7#
8
9obj-y := extable.o ioremap.o dma.o fault.o init.o
10obj-y += tlb.o tlbex.o cache_arc700.o
diff --git a/arch/arc/mm/cache_arc700.c b/arch/arc/mm/cache_arc700.c
new file mode 100644
index 000000000000..88d617d84234
--- /dev/null
+++ b/arch/arc/mm/cache_arc700.c
@@ -0,0 +1,768 @@
1/*
2 * ARC700 VIPT Cache Management
3 *
4 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * vineetg: May 2011: for Non-aliasing VIPT D-cache following can be NOPs
11 * -flush_cache_dup_mm (fork)
12 * -likewise for flush_cache_mm (exit/execve)
13 * -likewise for flush_cache_range,flush_cache_page (munmap, exit, COW-break)
14 *
15 * vineetg: Apr 2011
16 * -Now that MMU can support larger pg sz (16K), the determiniation of
17 * aliasing shd not be based on assumption of 8k pg
18 *
19 * vineetg: Mar 2011
20 * -optimised version of flush_icache_range( ) for making I/D coherent
21 * when vaddr is available (agnostic of num of aliases)
22 *
23 * vineetg: Mar 2011
24 * -Added documentation about I-cache aliasing on ARC700 and the way it
25 * was handled up until MMU V2.
26 * -Spotted a three year old bug when killing the 4 aliases, which needs
27 * bottom 2 bits, so we need to do paddr | {0x00, 0x01, 0x02, 0x03}
28 * instead of paddr | {0x00, 0x01, 0x10, 0x11}
29 * (Rajesh you owe me one now)
30 *
31 * vineetg: Dec 2010
32 * -Off-by-one error when computing num_of_lines to flush
33 * This broke signal handling with bionic which uses synthetic sigret stub
34 *
35 * vineetg: Mar 2010
36 * -GCC can't generate ZOL for core cache flush loops.
37 * Conv them into iterations based as opposed to while (start < end) types
38 *
39 * Vineetg: July 2009
40 * -In I-cache flush routine we used to chk for aliasing for every line INV.
41 * Instead now we setup routines per cache geometry and invoke them
42 * via function pointers.
43 *
44 * Vineetg: Jan 2009
45 * -Cache Line flush routines used to flush an extra line beyond end addr
46 * because check was while (end >= start) instead of (end > start)
47 * =Some call sites had to work around by doing -1, -4 etc to end param
48 * =Some callers didnt care. This was spec bad in case of INV routines
49 * which would discard valid data (cause of the horrible ext2 bug
50 * in ARC IDE driver)
51 *
52 * vineetg: June 11th 2008: Fixed flush_icache_range( )
53 * -Since ARC700 caches are not coherent (I$ doesnt snoop D$) both need
54 * to be flushed, which it was not doing.
55 * -load_module( ) passes vmalloc addr (Kernel Virtual Addr) to the API,
56 * however ARC cache maintenance OPs require PHY addr. Thus need to do
57 * vmalloc_to_phy.
58 * -Also added optimisation there, that for range > PAGE SIZE we flush the
59 * entire cache in one shot rather than line by line. For e.g. a module
60 * with Code sz 600k, old code flushed 600k worth of cache (line-by-line),
61 * while cache is only 16 or 32k.
62 */
63
64#include <linux/module.h>
65#include <linux/mm.h>
66#include <linux/sched.h>
67#include <linux/cache.h>
68#include <linux/mmu_context.h>
69#include <linux/syscalls.h>
70#include <linux/uaccess.h>
71#include <asm/cacheflush.h>
72#include <asm/cachectl.h>
73#include <asm/setup.h>
74
75
76#ifdef CONFIG_ARC_HAS_ICACHE
77static void __ic_line_inv_no_alias(unsigned long, int);
78static void __ic_line_inv_2_alias(unsigned long, int);
79static void __ic_line_inv_4_alias(unsigned long, int);
80
81/* Holds the ptr to flush routine, dependign on size due to aliasing issues */
82static void (*___flush_icache_rtn) (unsigned long, int);
83#endif
84
85char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len)
86{
87 int n = 0;
88 unsigned int c = smp_processor_id();
89
90#define PR_CACHE(p, enb, str) \
91{ \
92 if (!(p)->ver) \
93 n += scnprintf(buf + n, len - n, str"\t\t: N/A\n"); \
94 else \
95 n += scnprintf(buf + n, len - n, \
96 str"\t\t: (%uK) VIPT, %dway set-asc, %ub Line %s\n", \
97 TO_KB((p)->sz), (p)->assoc, (p)->line_len, \
98 enb ? "" : "DISABLED (kernel-build)"); \
99}
100
101 PR_CACHE(&cpuinfo_arc700[c].icache, __CONFIG_ARC_HAS_ICACHE, "I-Cache");
102 PR_CACHE(&cpuinfo_arc700[c].dcache, __CONFIG_ARC_HAS_DCACHE, "D-Cache");
103
104 return buf;
105}
106
107/*
108 * Read the Cache Build Confuration Registers, Decode them and save into
109 * the cpuinfo structure for later use.
110 * No Validation done here, simply read/convert the BCRs
111 */
112void __init read_decode_cache_bcr(void)
113{
114 struct bcr_cache ibcr, dbcr;
115 struct cpuinfo_arc_cache *p_ic, *p_dc;
116 unsigned int cpu = smp_processor_id();
117
118 p_ic = &cpuinfo_arc700[cpu].icache;
119 READ_BCR(ARC_REG_IC_BCR, ibcr);
120
121 if (ibcr.config == 0x3)
122 p_ic->assoc = 2;
123 p_ic->line_len = 8 << ibcr.line_len;
124 p_ic->sz = 0x200 << ibcr.sz;
125 p_ic->ver = ibcr.ver;
126
127 p_dc = &cpuinfo_arc700[cpu].dcache;
128 READ_BCR(ARC_REG_DC_BCR, dbcr);
129
130 if (dbcr.config == 0x2)
131 p_dc->assoc = 4;
132 p_dc->line_len = 16 << dbcr.line_len;
133 p_dc->sz = 0x200 << dbcr.sz;
134 p_dc->ver = dbcr.ver;
135}
136
137/*
138 * 1. Validate the Cache Geomtery (compile time config matches hardware)
139 * 2. If I-cache suffers from aliasing, setup work arounds (difft flush rtn)
140 * (aliasing D-cache configurations are not supported YET)
141 * 3. Enable the Caches, setup default flush mode for D-Cache
142 * 3. Calculate the SHMLBA used by user space
143 */
144void __init arc_cache_init(void)
145{
146 unsigned int temp;
147 unsigned int cpu = smp_processor_id();
148 struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache;
149 struct cpuinfo_arc_cache *dc = &cpuinfo_arc700[cpu].dcache;
150 int way_pg_ratio = way_pg_ratio;
151 char str[256];
152
153 printk(arc_cache_mumbojumbo(0, str, sizeof(str)));
154
155 if (!ic->ver)
156 goto chk_dc;
157
158#ifdef CONFIG_ARC_HAS_ICACHE
159 /* 1. Confirm some of I-cache params which Linux assumes */
160 if ((ic->assoc != ARC_ICACHE_WAYS) ||
161 (ic->line_len != ARC_ICACHE_LINE_LEN)) {
162 panic("Cache H/W doesn't match kernel Config");
163 }
164#if (CONFIG_ARC_MMU_VER > 2)
165 if (ic->ver != 3) {
166 if (running_on_hw)
167 panic("Cache ver doesn't match MMU ver\n");
168
169 /* For ISS - suggest the toggles to use */
170 pr_err("Use -prop=icache_version=3,-prop=dcache_version=3\n");
171
172 }
173#endif
174
175 /*
176 * if Cache way size is <= page size then no aliasing exhibited
177 * otherwise ratio determines num of aliases.
178 * e.g. 32K I$, 2 way set assoc, 8k pg size
179 * way-sz = 32k/2 = 16k
180 * way-pg-ratio = 16k/8k = 2, so 2 aliases possible
181 * (meaning 1 line could be in 2 possible locations).
182 */
183 way_pg_ratio = ic->sz / ARC_ICACHE_WAYS / PAGE_SIZE;
184 switch (way_pg_ratio) {
185 case 0:
186 case 1:
187 ___flush_icache_rtn = __ic_line_inv_no_alias;
188 break;
189 case 2:
190 ___flush_icache_rtn = __ic_line_inv_2_alias;
191 break;
192 case 4:
193 ___flush_icache_rtn = __ic_line_inv_4_alias;
194 break;
195 default:
196 panic("Unsupported I-Cache Sz\n");
197 }
198#endif
199
200 /* Enable/disable I-Cache */
201 temp = read_aux_reg(ARC_REG_IC_CTRL);
202
203#ifdef CONFIG_ARC_HAS_ICACHE
204 temp &= ~IC_CTRL_CACHE_DISABLE;
205#else
206 temp |= IC_CTRL_CACHE_DISABLE;
207#endif
208
209 write_aux_reg(ARC_REG_IC_CTRL, temp);
210
211chk_dc:
212 if (!dc->ver)
213 return;
214
215#ifdef CONFIG_ARC_HAS_DCACHE
216 if ((dc->assoc != ARC_DCACHE_WAYS) ||
217 (dc->line_len != ARC_DCACHE_LINE_LEN)) {
218 panic("Cache H/W doesn't match kernel Config");
219 }
220
221 /* check for D-Cache aliasing */
222 if ((dc->sz / ARC_DCACHE_WAYS) > PAGE_SIZE)
223 panic("D$ aliasing not handled right now\n");
224#endif
225
226 /* Set the default Invalidate Mode to "simpy discard dirty lines"
227 * as this is more frequent then flush before invalidate
228 * Ofcourse we toggle this default behviour when desired
229 */
230 temp = read_aux_reg(ARC_REG_DC_CTRL);
231 temp &= ~DC_CTRL_INV_MODE_FLUSH;
232
233#ifdef CONFIG_ARC_HAS_DCACHE
234 /* Enable D-Cache: Clear Bit 0 */
235 write_aux_reg(ARC_REG_DC_CTRL, temp & ~IC_CTRL_CACHE_DISABLE);
236#else
237 /* Flush D cache */
238 write_aux_reg(ARC_REG_DC_FLSH, 0x1);
239 /* Disable D cache */
240 write_aux_reg(ARC_REG_DC_CTRL, temp | IC_CTRL_CACHE_DISABLE);
241#endif
242
243 return;
244}
245
246#define OP_INV 0x1
247#define OP_FLUSH 0x2
248#define OP_FLUSH_N_INV 0x3
249
250#ifdef CONFIG_ARC_HAS_DCACHE
251
252/***************************************************************
253 * Machine specific helpers for Entire D-Cache or Per Line ops
254 */
255
256static inline void wait_for_flush(void)
257{
258 while (read_aux_reg(ARC_REG_DC_CTRL) & DC_CTRL_FLUSH_STATUS)
259 ;
260}
261
262/*
263 * Operation on Entire D-Cache
264 * @cacheop = {OP_INV, OP_FLUSH, OP_FLUSH_N_INV}
265 * Note that constant propagation ensures all the checks are gone
266 * in generated code
267 */
268static inline void __dc_entire_op(const int cacheop)
269{
270 unsigned long flags, tmp = tmp;
271 int aux;
272
273 local_irq_save(flags);
274
275 if (cacheop == OP_FLUSH_N_INV) {
276 /* Dcache provides 2 cmd: FLUSH or INV
277 * INV inturn has sub-modes: DISCARD or FLUSH-BEFORE
278 * flush-n-inv is achieved by INV cmd but with IM=1
279 * Default INV sub-mode is DISCARD, which needs to be toggled
280 */
281 tmp = read_aux_reg(ARC_REG_DC_CTRL);
282 write_aux_reg(ARC_REG_DC_CTRL, tmp | DC_CTRL_INV_MODE_FLUSH);
283 }
284
285 if (cacheop & OP_INV) /* Inv or flush-n-inv use same cmd reg */
286 aux = ARC_REG_DC_IVDC;
287 else
288 aux = ARC_REG_DC_FLSH;
289
290 write_aux_reg(aux, 0x1);
291
292 if (cacheop & OP_FLUSH) /* flush / flush-n-inv both wait */
293 wait_for_flush();
294
295 /* Switch back the DISCARD ONLY Invalidate mode */
296 if (cacheop == OP_FLUSH_N_INV)
297 write_aux_reg(ARC_REG_DC_CTRL, tmp & ~DC_CTRL_INV_MODE_FLUSH);
298
299 local_irq_restore(flags);
300}
301
302/*
303 * Per Line Operation on D-Cache
304 * Doesn't deal with type-of-op/IRQ-disabling/waiting-for-flush-to-complete
305 * It's sole purpose is to help gcc generate ZOL
306 */
307static inline void __dc_line_loop(unsigned long start, unsigned long sz,
308 int aux_reg)
309{
310 int num_lines, slack;
311
312 /* Ensure we properly floor/ceil the non-line aligned/sized requests
313 * and have @start - aligned to cache line and integral @num_lines.
314 * This however can be avoided for page sized since:
315 * -@start will be cache-line aligned already (being page aligned)
316 * -@sz will be integral multiple of line size (being page sized).
317 */
318 if (!(__builtin_constant_p(sz) && sz == PAGE_SIZE)) {
319 slack = start & ~DCACHE_LINE_MASK;
320 sz += slack;
321 start -= slack;
322 }
323
324 num_lines = DIV_ROUND_UP(sz, ARC_DCACHE_LINE_LEN);
325
326 while (num_lines-- > 0) {
327#if (CONFIG_ARC_MMU_VER > 2)
328 /*
329 * Just as for I$, in MMU v3, D$ ops also require
330 * "tag" bits in DC_PTAG, "index" bits in FLDL,IVDL ops
331 * But we pass phy addr for both. This works since Linux
332 * doesn't support aliasing configs for D$, yet.
333 * Thus paddr is enough to provide both tag and index.
334 */
335 write_aux_reg(ARC_REG_DC_PTAG, start);
336#endif
337 write_aux_reg(aux_reg, start);
338 start += ARC_DCACHE_LINE_LEN;
339 }
340}
341
342/*
343 * D-Cache : Per Line INV (discard or wback+discard) or FLUSH (wback)
344 */
345static inline void __dc_line_op(unsigned long start, unsigned long sz,
346 const int cacheop)
347{
348 unsigned long flags, tmp = tmp;
349 int aux;
350
351 local_irq_save(flags);
352
353 if (cacheop == OP_FLUSH_N_INV) {
354 /*
355 * Dcache provides 2 cmd: FLUSH or INV
356 * INV inturn has sub-modes: DISCARD or FLUSH-BEFORE
357 * flush-n-inv is achieved by INV cmd but with IM=1
358 * Default INV sub-mode is DISCARD, which needs to be toggled
359 */
360 tmp = read_aux_reg(ARC_REG_DC_CTRL);
361 write_aux_reg(ARC_REG_DC_CTRL, tmp | DC_CTRL_INV_MODE_FLUSH);
362 }
363
364 if (cacheop & OP_INV) /* Inv / flush-n-inv use same cmd reg */
365 aux = ARC_REG_DC_IVDL;
366 else
367 aux = ARC_REG_DC_FLDL;
368
369 __dc_line_loop(start, sz, aux);
370
371 if (cacheop & OP_FLUSH) /* flush / flush-n-inv both wait */
372 wait_for_flush();
373
374 /* Switch back the DISCARD ONLY Invalidate mode */
375 if (cacheop == OP_FLUSH_N_INV)
376 write_aux_reg(ARC_REG_DC_CTRL, tmp & ~DC_CTRL_INV_MODE_FLUSH);
377
378 local_irq_restore(flags);
379}
380
381#else
382
383#define __dc_entire_op(cacheop)
384#define __dc_line_op(start, sz, cacheop)
385
386#endif /* CONFIG_ARC_HAS_DCACHE */
387
388
389#ifdef CONFIG_ARC_HAS_ICACHE
390
391/*
392 * I-Cache Aliasing in ARC700 VIPT caches
393 *
394 * For fetching code from I$, ARC700 uses vaddr (embedded in program code)
395 * to "index" into SET of cache-line and paddr from MMU to match the TAG
396 * in the WAYS of SET.
397 *
398 * However the CDU iterface (to flush/inv) lines from software, only takes
399 * paddr (to have simpler hardware interface). For simpler cases, using paddr
400 * alone suffices.
401 * e.g. 2-way-set-assoc, 16K I$ (8k MMU pg sz, 32b cache line size):
402 * way_sz = cache_sz / num_ways = 16k/2 = 8k
403 * num_sets = way_sz / line_sz = 8k/32 = 256 => 8 bits
404 * Ignoring the bottom 5 bits corresp to the off within a 32b cacheline,
405 * bits req for calc set-index = bits 12:5 (0 based). Since this range fits
406 * inside the bottom 13 bits of paddr, which are same for vaddr and paddr
407 * (with 8k pg sz), paddr alone can be safely used by CDU to unambigously
408 * locate a cache-line.
409 *
410 * However for a difft sized cache, say 32k I$, above math yields need
411 * for 14 bits of vaddr to locate a cache line, which can't be provided by
412 * paddr, since the bit 13 (0 based) might differ between the two.
413 *
414 * This lack of extra bits needed for correct line addressing, defines the
415 * classical problem of Cache aliasing with VIPT architectures
416 * num_aliases = 1 << extra_bits
417 * e.g. 2-way-set-assoc, 32K I$ with 8k MMU pg sz => 2 aliases
418 * 2-way-set-assoc, 64K I$ with 8k MMU pg sz => 4 aliases
419 * 2-way-set-assoc, 16K I$ with 8k MMU pg sz => NO aliases
420 *
421 * ------------------
422 * MMU v1/v2 (Fixed Page Size 8k)
423 * ------------------
424 * The solution was to provide CDU with these additonal vaddr bits. These
425 * would be bits [x:13], x would depend on cache-geom.
426 * H/w folks chose [17:13] to be a future safe range, and moreso these 5 bits
427 * of vaddr could easily be "stuffed" in the paddr as bits [4:0] since the
428 * orig 5 bits of paddr were anyways ignored by CDU line ops, as they
429 * represent the offset within cache-line. The adv of using this "clumsy"
430 * interface for additional info was no new reg was needed in CDU.
431 *
432 * 17:13 represented the max num of bits passable, actual bits needed were
433 * fewer, based on the num-of-aliases possible.
434 * -for 2 alias possibility, only bit 13 needed (32K cache)
435 * -for 4 alias possibility, bits 14:13 needed (64K cache)
436 *
437 * Since vaddr was not available for all instances of I$ flush req by core
438 * kernel, the only safe way (non-optimal though) was to kill all possible
439 * lines which could represent an alias (even if they didnt represent one
440 * in execution).
441 * e.g. for 64K I$, 4 aliases possible, so we did
442 * flush start
443 * flush start | 0x01
444 * flush start | 0x2
445 * flush start | 0x3
446 *
447 * The penalty was invoking the operation itself, since tag match is anyways
448 * paddr based, a line which didn't represent an alias would not match the
449 * paddr, hence wont be killed
450 *
451 * Note that aliasing concerns are independent of line-sz for a given cache
452 * geometry (size + set_assoc) because the extra bits required by line-sz are
453 * reduced from the set calc.
454 * e.g. 2-way-set-assoc, 32K I$ with 8k MMU pg sz and using math above
455 * 32b line-sz: 9 bits set-index-calc, 5 bits offset-in-line => 1 extra bit
456 * 64b line-sz: 8 bits set-index-calc, 6 bits offset-in-line => 1 extra bit
457 *
458 * ------------------
459 * MMU v3
460 * ------------------
461 * This ver of MMU supports var page sizes (1k-16k) - Linux will support
462 * 8k (default), 16k and 4k.
463 * However from hardware perspective, smaller page sizes aggrevate aliasing
464 * meaning more vaddr bits needed to disambiguate the cache-line-op ;
465 * the existing scheme of piggybacking won't work for certain configurations.
466 * Two new registers IC_PTAG and DC_PTAG inttoduced.
467 * "tag" bits are provided in PTAG, index bits in existing IVIL/IVDL/FLDL regs
468 */
469
470/***********************************************************
471 * Machine specific helpers for per line I-Cache invalidate.
472 * 3 routines to accpunt for 1, 2, 4 aliases possible
473 */
474
475static void __ic_line_inv_no_alias(unsigned long start, int num_lines)
476{
477 while (num_lines-- > 0) {
478#if (CONFIG_ARC_MMU_VER > 2)
479 write_aux_reg(ARC_REG_IC_PTAG, start);
480#endif
481 write_aux_reg(ARC_REG_IC_IVIL, start);
482 start += ARC_ICACHE_LINE_LEN;
483 }
484}
485
486static void __ic_line_inv_2_alias(unsigned long start, int num_lines)
487{
488 while (num_lines-- > 0) {
489
490#if (CONFIG_ARC_MMU_VER > 2)
491 /*
492 * MMU v3, CDU prog model (for line ops) now uses a new IC_PTAG
493 * reg to pass the "tag" bits and existing IVIL reg only looks
494 * at bits relevant for "index" (details above)
495 * Programming Notes:
496 * -when writing tag to PTAG reg, bit chopping can be avoided,
497 * CDU ignores non-tag bits.
498 * -Ideally "index" must be computed from vaddr, but it is not
499 * avail in these rtns. So to be safe, we kill the lines in all
500 * possible indexes corresp to num of aliases possible for
501 * given cache config.
502 */
503 write_aux_reg(ARC_REG_IC_PTAG, start);
504 write_aux_reg(ARC_REG_IC_IVIL,
505 start & ~(0x1 << PAGE_SHIFT));
506 write_aux_reg(ARC_REG_IC_IVIL, start | (0x1 << PAGE_SHIFT));
507#else
508 write_aux_reg(ARC_REG_IC_IVIL, start);
509 write_aux_reg(ARC_REG_IC_IVIL, start | 0x01);
510#endif
511 start += ARC_ICACHE_LINE_LEN;
512 }
513}
514
515static void __ic_line_inv_4_alias(unsigned long start, int num_lines)
516{
517 while (num_lines-- > 0) {
518
519#if (CONFIG_ARC_MMU_VER > 2)
520 write_aux_reg(ARC_REG_IC_PTAG, start);
521
522 write_aux_reg(ARC_REG_IC_IVIL,
523 start & ~(0x3 << PAGE_SHIFT));
524 write_aux_reg(ARC_REG_IC_IVIL,
525 start & ~(0x2 << PAGE_SHIFT));
526 write_aux_reg(ARC_REG_IC_IVIL,
527 start & ~(0x1 << PAGE_SHIFT));
528 write_aux_reg(ARC_REG_IC_IVIL, start | (0x3 << PAGE_SHIFT));
529#else
530 write_aux_reg(ARC_REG_IC_IVIL, start);
531 write_aux_reg(ARC_REG_IC_IVIL, start | 0x01);
532 write_aux_reg(ARC_REG_IC_IVIL, start | 0x02);
533 write_aux_reg(ARC_REG_IC_IVIL, start | 0x03);
534#endif
535 start += ARC_ICACHE_LINE_LEN;
536 }
537}
538
539static void __ic_line_inv(unsigned long start, unsigned long sz)
540{
541 unsigned long flags;
542 int num_lines, slack;
543
544 /*
545 * Ensure we properly floor/ceil the non-line aligned/sized requests
546 * and have @start - aligned to cache line, and integral @num_lines
547 * However page sized flushes can be compile time optimised.
548 * -@start will be cache-line aligned already (being page aligned)
549 * -@sz will be integral multiple of line size (being page sized).
550 */
551 if (!(__builtin_constant_p(sz) && sz == PAGE_SIZE)) {
552 slack = start & ~ICACHE_LINE_MASK;
553 sz += slack;
554 start -= slack;
555 }
556
557 num_lines = DIV_ROUND_UP(sz, ARC_ICACHE_LINE_LEN);
558
559 local_irq_save(flags);
560 (*___flush_icache_rtn) (start, num_lines);
561 local_irq_restore(flags);
562}
563
564/* Unlike routines above, having vaddr for flush op (along with paddr),
565 * prevents the need to speculatively kill the lines in multiple sets
566 * based on ratio of way_sz : pg_sz
567 */
568static void __ic_line_inv_vaddr(unsigned long phy_start,
569 unsigned long vaddr, unsigned long sz)
570{
571 unsigned long flags;
572 int num_lines, slack;
573 unsigned int addr;
574
575 slack = phy_start & ~ICACHE_LINE_MASK;
576 sz += slack;
577 phy_start -= slack;
578 num_lines = DIV_ROUND_UP(sz, ARC_ICACHE_LINE_LEN);
579
580#if (CONFIG_ARC_MMU_VER > 2)
581 vaddr &= ~ICACHE_LINE_MASK;
582 addr = phy_start;
583#else
584 /* bits 17:13 of vaddr go as bits 4:0 of paddr */
585 addr = phy_start | ((vaddr >> 13) & 0x1F);
586#endif
587
588 local_irq_save(flags);
589 while (num_lines-- > 0) {
590#if (CONFIG_ARC_MMU_VER > 2)
591 /* tag comes from phy addr */
592 write_aux_reg(ARC_REG_IC_PTAG, addr);
593
594 /* index bits come from vaddr */
595 write_aux_reg(ARC_REG_IC_IVIL, vaddr);
596 vaddr += ARC_ICACHE_LINE_LEN;
597#else
598 /* this paddr contains vaddrs bits as needed */
599 write_aux_reg(ARC_REG_IC_IVIL, addr);
600#endif
601 addr += ARC_ICACHE_LINE_LEN;
602 }
603 local_irq_restore(flags);
604}
605
606#else
607
608#define __ic_line_inv(start, sz)
609#define __ic_line_inv_vaddr(pstart, vstart, sz)
610
611#endif /* CONFIG_ARC_HAS_ICACHE */
612
613
614/***********************************************************
615 * Exported APIs
616 */
617
618/* TBD: use pg_arch_1 to optimize this */
619void flush_dcache_page(struct page *page)
620{
621 __dc_line_op((unsigned long)page_address(page), PAGE_SIZE, OP_FLUSH);
622}
623EXPORT_SYMBOL(flush_dcache_page);
624
625
626void dma_cache_wback_inv(unsigned long start, unsigned long sz)
627{
628 __dc_line_op(start, sz, OP_FLUSH_N_INV);
629}
630EXPORT_SYMBOL(dma_cache_wback_inv);
631
632void dma_cache_inv(unsigned long start, unsigned long sz)
633{
634 __dc_line_op(start, sz, OP_INV);
635}
636EXPORT_SYMBOL(dma_cache_inv);
637
638void dma_cache_wback(unsigned long start, unsigned long sz)
639{
640 __dc_line_op(start, sz, OP_FLUSH);
641}
642EXPORT_SYMBOL(dma_cache_wback);
643
644/*
645 * This is API for making I/D Caches consistent when modifying code
646 * (loadable modules, kprobes, etc)
647 * This is called on insmod, with kernel virtual address for CODE of
648 * the module. ARC cache maintenance ops require PHY address thus we
649 * need to convert vmalloc addr to PHY addr
650 */
651void flush_icache_range(unsigned long kstart, unsigned long kend)
652{
653 unsigned int tot_sz, off, sz;
654 unsigned long phy, pfn;
655 unsigned long flags;
656
657 /* printk("Kernel Cache Cohenercy: %lx to %lx\n",kstart, kend); */
658
659 /* This is not the right API for user virtual address */
660 if (kstart < TASK_SIZE) {
661 BUG_ON("Flush icache range for user virtual addr space");
662 return;
663 }
664
665 /* Shortcut for bigger flush ranges.
666 * Here we don't care if this was kernel virtual or phy addr
667 */
668 tot_sz = kend - kstart;
669 if (tot_sz > PAGE_SIZE) {
670 flush_cache_all();
671 return;
672 }
673
674 /* Case: Kernel Phy addr (0x8000_0000 onwards) */
675 if (likely(kstart > PAGE_OFFSET)) {
676 __ic_line_inv(kstart, kend - kstart);
677 __dc_line_op(kstart, kend - kstart, OP_FLUSH);
678 return;
679 }
680
681 /*
682 * Case: Kernel Vaddr (0x7000_0000 to 0x7fff_ffff)
683 * (1) ARC Cache Maintenance ops only take Phy addr, hence special
684 * handling of kernel vaddr.
685 *
686 * (2) Despite @tot_sz being < PAGE_SIZE (bigger cases handled already),
687 * it still needs to handle a 2 page scenario, where the range
688 * straddles across 2 virtual pages and hence need for loop
689 */
690 while (tot_sz > 0) {
691 off = kstart % PAGE_SIZE;
692 pfn = vmalloc_to_pfn((void *)kstart);
693 phy = (pfn << PAGE_SHIFT) + off;
694 sz = min_t(unsigned int, tot_sz, PAGE_SIZE - off);
695 local_irq_save(flags);
696 __dc_line_op(phy, sz, OP_FLUSH);
697 __ic_line_inv(phy, sz);
698 local_irq_restore(flags);
699 kstart += sz;
700 tot_sz -= sz;
701 }
702}
703
704/*
705 * Optimised ver of flush_icache_range() with spec callers: ptrace/signals
706 * where vaddr is also available. This allows passing both vaddr and paddr
707 * bits to CDU for cache flush, short-circuting the current pessimistic algo
708 * which kills all possible aliases.
709 * An added adv of knowing that vaddr is user-vaddr avoids various checks
710 * and handling for k-vaddr, k-paddr as done in orig ver above
711 */
712void flush_icache_range_vaddr(unsigned long paddr, unsigned long u_vaddr,
713 int len)
714{
715 __ic_line_inv_vaddr(paddr, u_vaddr, len);
716 __dc_line_op(paddr, len, OP_FLUSH);
717}
718
719/*
720 * XXX: This also needs to be optim using pg_arch_1
721 * This is called when a page-cache page is about to be mapped into a
722 * user process' address space. It offers an opportunity for a
723 * port to ensure d-cache/i-cache coherency if necessary.
724 */
725void flush_icache_page(struct vm_area_struct *vma, struct page *page)
726{
727 if (!(vma->vm_flags & VM_EXEC))
728 return;
729
730 __ic_line_inv((unsigned long)page_address(page), PAGE_SIZE);
731}
732
733void flush_icache_all(void)
734{
735 unsigned long flags;
736
737 local_irq_save(flags);
738
739 write_aux_reg(ARC_REG_IC_IVIC, 1);
740
741 /* lr will not complete till the icache inv operation is not over */
742 read_aux_reg(ARC_REG_IC_CTRL);
743 local_irq_restore(flags);
744}
745
746noinline void flush_cache_all(void)
747{
748 unsigned long flags;
749
750 local_irq_save(flags);
751
752 flush_icache_all();
753 __dc_entire_op(OP_FLUSH_N_INV);
754
755 local_irq_restore(flags);
756
757}
758
759/**********************************************************************
760 * Explicit Cache flush request from user space via syscall
761 * Needed for JITs which generate code on the fly
762 */
763SYSCALL_DEFINE3(cacheflush, uint32_t, start, uint32_t, sz, uint32_t, flags)
764{
765 /* TBD: optimize this */
766 flush_cache_all();
767 return 0;
768}
diff --git a/arch/arc/mm/dma.c b/arch/arc/mm/dma.c
new file mode 100644
index 000000000000..12cc6485b218
--- /dev/null
+++ b/arch/arc/mm/dma.c
@@ -0,0 +1,94 @@
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9/*
10 * DMA Coherent API Notes
11 *
12 * I/O is inherently non-coherent on ARC. So a coherent DMA buffer is
13 * implemented by accessintg it using a kernel virtual address, with
14 * Cache bit off in the TLB entry.
15 *
16 * The default DMA address == Phy address which is 0x8000_0000 based.
17 * A platform/device can make it zero based, by over-riding
18 * plat_{dma,kernel}_addr_to_{kernel,dma}
19 */
20
21#include <linux/dma-mapping.h>
22#include <linux/dma-debug.h>
23#include <linux/export.h>
24#include <asm/cacheflush.h>
25
26/*
27 * Helpers for Coherent DMA API.
28 */
29void *dma_alloc_noncoherent(struct device *dev, size_t size,
30 dma_addr_t *dma_handle, gfp_t gfp)
31{
32 void *paddr;
33
34 /* This is linear addr (0x8000_0000 based) */
35 paddr = alloc_pages_exact(size, gfp);
36 if (!paddr)
37 return NULL;
38
39 /* This is bus address, platform dependent */
40 *dma_handle = plat_kernel_addr_to_dma(dev, paddr);
41
42 return paddr;
43}
44EXPORT_SYMBOL(dma_alloc_noncoherent);
45
46void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
47 dma_addr_t dma_handle)
48{
49 free_pages_exact((void *)plat_dma_addr_to_kernel(dev, dma_handle),
50 size);
51}
52EXPORT_SYMBOL(dma_free_noncoherent);
53
54void *dma_alloc_coherent(struct device *dev, size_t size,
55 dma_addr_t *dma_handle, gfp_t gfp)
56{
57 void *paddr, *kvaddr;
58
59 /* This is linear addr (0x8000_0000 based) */
60 paddr = alloc_pages_exact(size, gfp);
61 if (!paddr)
62 return NULL;
63
64 /* This is kernel Virtual address (0x7000_0000 based) */
65 kvaddr = ioremap_nocache((unsigned long)paddr, size);
66 if (kvaddr != NULL)
67 memset(kvaddr, 0, size);
68
69 /* This is bus address, platform dependent */
70 *dma_handle = plat_kernel_addr_to_dma(dev, paddr);
71
72 return kvaddr;
73}
74EXPORT_SYMBOL(dma_alloc_coherent);
75
76void dma_free_coherent(struct device *dev, size_t size, void *kvaddr,
77 dma_addr_t dma_handle)
78{
79 iounmap((void __force __iomem *)kvaddr);
80
81 free_pages_exact((void *)plat_dma_addr_to_kernel(dev, dma_handle),
82 size);
83}
84EXPORT_SYMBOL(dma_free_coherent);
85
86/*
87 * Helper for streaming DMA...
88 */
89void __arc_dma_cache_sync(unsigned long paddr, size_t size,
90 enum dma_data_direction dir)
91{
92 __inline_dma_cache_sync(paddr, size, dir);
93}
94EXPORT_SYMBOL(__arc_dma_cache_sync);
diff --git a/arch/arc/mm/extable.c b/arch/arc/mm/extable.c
new file mode 100644
index 000000000000..014172ba8432
--- /dev/null
+++ b/arch/arc/mm/extable.c
@@ -0,0 +1,63 @@
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * Borrowed heavily from MIPS
9 */
10
11#include <linux/module.h>
12#include <linux/uaccess.h>
13
14int fixup_exception(struct pt_regs *regs)
15{
16 const struct exception_table_entry *fixup;
17
18 fixup = search_exception_tables(instruction_pointer(regs));
19 if (fixup) {
20 regs->ret = fixup->fixup;
21
22 return 1;
23 }
24
25 return 0;
26}
27
28#ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
29
30long arc_copy_from_user_noinline(void *to, const void __user * from,
31 unsigned long n)
32{
33 return __arc_copy_from_user(to, from, n);
34}
35EXPORT_SYMBOL(arc_copy_from_user_noinline);
36
37long arc_copy_to_user_noinline(void __user *to, const void *from,
38 unsigned long n)
39{
40 return __arc_copy_to_user(to, from, n);
41}
42EXPORT_SYMBOL(arc_copy_to_user_noinline);
43
44unsigned long arc_clear_user_noinline(void __user *to,
45 unsigned long n)
46{
47 return __arc_clear_user(to, n);
48}
49EXPORT_SYMBOL(arc_clear_user_noinline);
50
51long arc_strncpy_from_user_noinline (char *dst, const char __user *src,
52 long count)
53{
54 return __arc_strncpy_from_user(dst, src, count);
55}
56EXPORT_SYMBOL(arc_strncpy_from_user_noinline);
57
58long arc_strnlen_user_noinline(const char __user *src, long n)
59{
60 return __arc_strnlen_user(src, n);
61}
62EXPORT_SYMBOL(arc_strnlen_user_noinline);
63#endif
diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c
new file mode 100644
index 000000000000..af55aab803d2
--- /dev/null
+++ b/arch/arc/mm/fault.c
@@ -0,0 +1,228 @@
1/* Page Fault Handling for ARC (TLB Miss / ProtV)
2 *
3 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10#include <linux/signal.h>
11#include <linux/interrupt.h>
12#include <linux/sched.h>
13#include <linux/errno.h>
14#include <linux/ptrace.h>
15#include <linux/version.h>
16#include <linux/uaccess.h>
17#include <linux/kdebug.h>
18#include <asm/pgalloc.h>
19
20static int handle_vmalloc_fault(struct mm_struct *mm, unsigned long address)
21{
22 /*
23 * Synchronize this task's top level page-table
24 * with the 'reference' page table.
25 */
26 pgd_t *pgd, *pgd_k;
27 pud_t *pud, *pud_k;
28 pmd_t *pmd, *pmd_k;
29
30 pgd = pgd_offset_fast(mm, address);
31 pgd_k = pgd_offset_k(address);
32
33 if (!pgd_present(*pgd_k))
34 goto bad_area;
35
36 pud = pud_offset(pgd, address);
37 pud_k = pud_offset(pgd_k, address);
38 if (!pud_present(*pud_k))
39 goto bad_area;
40
41 pmd = pmd_offset(pud, address);
42 pmd_k = pmd_offset(pud_k, address);
43 if (!pmd_present(*pmd_k))
44 goto bad_area;
45
46 set_pmd(pmd, *pmd_k);
47
48 /* XXX: create the TLB entry here */
49 return 0;
50
51bad_area:
52 return 1;
53}
54
55void do_page_fault(struct pt_regs *regs, int write, unsigned long address,
56 unsigned long cause_code)
57{
58 struct vm_area_struct *vma = NULL;
59 struct task_struct *tsk = current;
60 struct mm_struct *mm = tsk->mm;
61 siginfo_t info;
62 int fault, ret;
63 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
64 (write ? FAULT_FLAG_WRITE : 0);
65
66 /*
67 * We fault-in kernel-space virtual memory on-demand. The
68 * 'reference' page table is init_mm.pgd.
69 *
70 * NOTE! We MUST NOT take any locks for this case. We may
71 * be in an interrupt or a critical region, and should
72 * only copy the information from the master page table,
73 * nothing more.
74 */
75 if (address >= VMALLOC_START && address <= VMALLOC_END) {
76 ret = handle_vmalloc_fault(mm, address);
77 if (unlikely(ret))
78 goto bad_area_nosemaphore;
79 else
80 return;
81 }
82
83 info.si_code = SEGV_MAPERR;
84
85 /*
86 * If we're in an interrupt or have no user
87 * context, we must not take the fault..
88 */
89 if (in_atomic() || !mm)
90 goto no_context;
91
92retry:
93 down_read(&mm->mmap_sem);
94 vma = find_vma(mm, address);
95 if (!vma)
96 goto bad_area;
97 if (vma->vm_start <= address)
98 goto good_area;
99 if (!(vma->vm_flags & VM_GROWSDOWN))
100 goto bad_area;
101 if (expand_stack(vma, address))
102 goto bad_area;
103
104 /*
105 * Ok, we have a good vm_area for this memory access, so
106 * we can handle it..
107 */
108good_area:
109 info.si_code = SEGV_ACCERR;
110
111 /* Handle protection violation, execute on heap or stack */
112
113 if (cause_code == ((ECR_V_PROTV << 16) | ECR_C_PROTV_INST_FETCH))
114 goto bad_area;
115
116 if (write) {
117 if (!(vma->vm_flags & VM_WRITE))
118 goto bad_area;
119 } else {
120 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
121 goto bad_area;
122 }
123
124survive:
125 /*
126 * If for any reason at all we couldn't handle the fault,
127 * make sure we exit gracefully rather than endlessly redo
128 * the fault.
129 */
130 fault = handle_mm_fault(mm, vma, address, flags);
131
132 /* If Pagefault was interrupted by SIGKILL, exit page fault "early" */
133 if (unlikely(fatal_signal_pending(current))) {
134 if ((fault & VM_FAULT_ERROR) && !(fault & VM_FAULT_RETRY))
135 up_read(&mm->mmap_sem);
136 if (user_mode(regs))
137 return;
138 }
139
140 if (likely(!(fault & VM_FAULT_ERROR))) {
141 if (flags & FAULT_FLAG_ALLOW_RETRY) {
142 /* To avoid updating stats twice for retry case */
143 if (fault & VM_FAULT_MAJOR)
144 tsk->maj_flt++;
145 else
146 tsk->min_flt++;
147
148 if (fault & VM_FAULT_RETRY) {
149 flags &= ~FAULT_FLAG_ALLOW_RETRY;
150 flags |= FAULT_FLAG_TRIED;
151 goto retry;
152 }
153 }
154
155 /* Fault Handled Gracefully */
156 up_read(&mm->mmap_sem);
157 return;
158 }
159
160 /* TBD: switch to pagefault_out_of_memory() */
161 if (fault & VM_FAULT_OOM)
162 goto out_of_memory;
163 else if (fault & VM_FAULT_SIGBUS)
164 goto do_sigbus;
165
166 /* no man's land */
167 BUG();
168
169 /*
170 * Something tried to access memory that isn't in our memory map..
171 * Fix it, but check if it's kernel or user first..
172 */
173bad_area:
174 up_read(&mm->mmap_sem);
175
176bad_area_nosemaphore:
177 /* User mode accesses just cause a SIGSEGV */
178 if (user_mode(regs)) {
179 tsk->thread.fault_address = address;
180 tsk->thread.cause_code = cause_code;
181 info.si_signo = SIGSEGV;
182 info.si_errno = 0;
183 /* info.si_code has been set above */
184 info.si_addr = (void __user *)address;
185 force_sig_info(SIGSEGV, &info, tsk);
186 return;
187 }
188
189no_context:
190 /* Are we prepared to handle this kernel fault?
191 *
192 * (The kernel has valid exception-points in the source
193 * when it acesses user-memory. When it fails in one
194 * of those points, we find it in a table and do a jump
195 * to some fixup code that loads an appropriate error
196 * code)
197 */
198 if (fixup_exception(regs))
199 return;
200
201 die("Oops", regs, address, cause_code);
202
203out_of_memory:
204 if (is_global_init(tsk)) {
205 yield();
206 goto survive;
207 }
208 up_read(&mm->mmap_sem);
209
210 if (user_mode(regs))
211 do_group_exit(SIGKILL); /* This will never return */
212
213 goto no_context;
214
215do_sigbus:
216 up_read(&mm->mmap_sem);
217
218 if (!user_mode(regs))
219 goto no_context;
220
221 tsk->thread.fault_address = address;
222 tsk->thread.cause_code = cause_code;
223 info.si_signo = SIGBUS;
224 info.si_errno = 0;
225 info.si_code = BUS_ADRERR;
226 info.si_addr = (void __user *)address;
227 force_sig_info(SIGBUS, &info, tsk);
228}
diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c
new file mode 100644
index 000000000000..caf797de23fc
--- /dev/null
+++ b/arch/arc/mm/init.c
@@ -0,0 +1,187 @@
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#include <linux/kernel.h>
10#include <linux/mm.h>
11#include <linux/bootmem.h>
12#include <linux/memblock.h>
13#ifdef CONFIG_BLOCK_DEV_RAM
14#include <linux/blk.h>
15#endif
16#include <linux/swap.h>
17#include <linux/module.h>
18#include <asm/page.h>
19#include <asm/pgalloc.h>
20#include <asm/sections.h>
21#include <asm/arcregs.h>
22
23pgd_t swapper_pg_dir[PTRS_PER_PGD] __aligned(PAGE_SIZE);
24char empty_zero_page[PAGE_SIZE] __aligned(PAGE_SIZE);
25EXPORT_SYMBOL(empty_zero_page);
26
27/* Default tot mem from .config */
28static unsigned long arc_mem_sz = 0x20000000; /* some default */
29
30/* User can over-ride above with "mem=nnn[KkMm]" in cmdline */
31static int __init setup_mem_sz(char *str)
32{
33 arc_mem_sz = memparse(str, NULL) & PAGE_MASK;
34
35 /* early console might not be setup yet - it will show up later */
36 pr_info("\"mem=%s\": mem sz set to %ldM\n", str, TO_MB(arc_mem_sz));
37
38 return 0;
39}
40early_param("mem", setup_mem_sz);
41
42void __init early_init_dt_add_memory_arch(u64 base, u64 size)
43{
44 arc_mem_sz = size & PAGE_MASK;
45 pr_info("Memory size set via devicetree %ldM\n", TO_MB(arc_mem_sz));
46}
47
48/*
49 * First memory setup routine called from setup_arch()
50 * 1. setup swapper's mm @init_mm
51 * 2. Count the pages we have and setup bootmem allocator
52 * 3. zone setup
53 */
54void __init setup_arch_memory(void)
55{
56 unsigned long zones_size[MAX_NR_ZONES] = { 0, 0 };
57 unsigned long end_mem = CONFIG_LINUX_LINK_BASE + arc_mem_sz;
58
59 init_mm.start_code = (unsigned long)_text;
60 init_mm.end_code = (unsigned long)_etext;
61 init_mm.end_data = (unsigned long)_edata;
62 init_mm.brk = (unsigned long)_end;
63
64 /*
65 * We do it here, so that memory is correctly instantiated
66 * even if "mem=xxx" cmline over-ride is given and/or
67 * DT has memory node. Each causes an update to @arc_mem_sz
68 * and we finally add memory one here
69 */
70 memblock_add(CONFIG_LINUX_LINK_BASE, arc_mem_sz);
71
72 /*------------- externs in mm need setting up ---------------*/
73
74 /* first page of system - kernel .vector starts here */
75 min_low_pfn = PFN_DOWN(CONFIG_LINUX_LINK_BASE);
76
77 /* Last usable page of low mem (no HIGHMEM yet for ARC port) */
78 max_low_pfn = max_pfn = PFN_DOWN(end_mem);
79
80 max_mapnr = num_physpages = max_low_pfn - min_low_pfn;
81
82 /*------------- reserve kernel image -----------------------*/
83 memblock_reserve(CONFIG_LINUX_LINK_BASE,
84 __pa(_end) - CONFIG_LINUX_LINK_BASE);
85
86 memblock_dump_all();
87
88 /*-------------- node setup --------------------------------*/
89 memset(zones_size, 0, sizeof(zones_size));
90 zones_size[ZONE_NORMAL] = num_physpages;
91
92 /*
93 * We can't use the helper free_area_init(zones[]) because it uses
94 * PAGE_OFFSET to compute the @min_low_pfn which would be wrong
95 * when our kernel doesn't start at PAGE_OFFSET, i.e.
96 * PAGE_OFFSET != CONFIG_LINUX_LINK_BASE
97 */
98 free_area_init_node(0, /* node-id */
99 zones_size, /* num pages per zone */
100 min_low_pfn, /* first pfn of node */
101 NULL); /* NO holes */
102}
103
104/*
105 * mem_init - initializes memory
106 *
107 * Frees up bootmem
108 * Calculates and displays memory available/used
109 */
110void __init mem_init(void)
111{
112 int codesize, datasize, initsize, reserved_pages, free_pages;
113 int tmp;
114
115 high_memory = (void *)(CONFIG_LINUX_LINK_BASE + arc_mem_sz);
116
117 totalram_pages = free_all_bootmem();
118
119 /* count all reserved pages [kernel code/data/mem_map..] */
120 reserved_pages = 0;
121 for (tmp = 0; tmp < max_mapnr; tmp++)
122 if (PageReserved(mem_map + tmp))
123 reserved_pages++;
124
125 /* XXX: nr_free_pages() is equivalent */
126 free_pages = max_mapnr - reserved_pages;
127
128 /*
129 * For the purpose of display below, split the "reserve mem"
130 * kernel code/data is already shown explicitly,
131 * Show any other reservations (mem_map[ ] et al)
132 */
133 reserved_pages -= (((unsigned int)_end - CONFIG_LINUX_LINK_BASE) >>
134 PAGE_SHIFT);
135
136 codesize = _etext - _text;
137 datasize = _end - _etext;
138 initsize = __init_end - __init_begin;
139
140 pr_info("Memory Available: %dM / %ldM (%dK code, %dK data, %dK init, %dK reserv)\n",
141 PAGES_TO_MB(free_pages),
142 TO_MB(arc_mem_sz),
143 TO_KB(codesize), TO_KB(datasize), TO_KB(initsize),
144 PAGES_TO_KB(reserved_pages));
145}
146
147static void __init free_init_pages(const char *what, unsigned long begin,
148 unsigned long end)
149{
150 unsigned long addr;
151
152 pr_info("Freeing %s: %ldk [%lx] to [%lx]\n",
153 what, TO_KB(end - begin), begin, end);
154
155 /* need to check that the page we free is not a partial page */
156 for (addr = begin; addr + PAGE_SIZE <= end; addr += PAGE_SIZE) {
157 ClearPageReserved(virt_to_page(addr));
158 init_page_count(virt_to_page(addr));
159 free_page(addr);
160 totalram_pages++;
161 }
162}
163
164/*
165 * free_initmem: Free all the __init memory.
166 */
167void __init_refok free_initmem(void)
168{
169 free_init_pages("unused kernel memory",
170 (unsigned long)__init_begin,
171 (unsigned long)__init_end);
172}
173
174#ifdef CONFIG_BLK_DEV_INITRD
175void __init free_initrd_mem(unsigned long start, unsigned long end)
176{
177 free_init_pages("initrd memory", start, end);
178}
179#endif
180
181#ifdef CONFIG_OF_FLATTREE
182void __init early_init_dt_setup_initrd_arch(unsigned long start,
183 unsigned long end)
184{
185 pr_err("%s(%lx, %lx)\n", __func__, start, end);
186}
187#endif /* CONFIG_OF_FLATTREE */
diff --git a/arch/arc/mm/ioremap.c b/arch/arc/mm/ioremap.c
new file mode 100644
index 000000000000..3e5c92c79936
--- /dev/null
+++ b/arch/arc/mm/ioremap.c
@@ -0,0 +1,91 @@
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#include <linux/vmalloc.h>
10#include <linux/init.h>
11#include <linux/module.h>
12#include <linux/io.h>
13#include <linux/mm.h>
14#include <linux/slab.h>
15#include <asm/cache.h>
16
17void __iomem *ioremap(unsigned long paddr, unsigned long size)
18{
19 unsigned long end;
20
21 /* Don't allow wraparound or zero size */
22 end = paddr + size - 1;
23 if (!size || (end < paddr))
24 return NULL;
25
26 /* If the region is h/w uncached, avoid MMU mappings */
27 if (paddr >= ARC_UNCACHED_ADDR_SPACE)
28 return (void __iomem *)paddr;
29
30 return ioremap_prot(paddr, size, PAGE_KERNEL_NO_CACHE);
31}
32EXPORT_SYMBOL(ioremap);
33
34/*
35 * ioremap with access flags
36 * Cache semantics wise it is same as ioremap - "forced" uncached.
37 * However unline vanilla ioremap which bypasses ARC MMU for addresses in
38 * ARC hardware uncached region, this one still goes thru the MMU as caller
39 * might need finer access control (R/W/X)
40 */
41void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size,
42 unsigned long flags)
43{
44 void __iomem *vaddr;
45 struct vm_struct *area;
46 unsigned long off, end;
47 pgprot_t prot = __pgprot(flags);
48
49 /* Don't allow wraparound, zero size */
50 end = paddr + size - 1;
51 if ((!size) || (end < paddr))
52 return NULL;
53
54 /* An early platform driver might end up here */
55 if (!slab_is_available())
56 return NULL;
57
58 /* force uncached */
59 prot = pgprot_noncached(prot);
60
61 /* Mappings have to be page-aligned */
62 off = paddr & ~PAGE_MASK;
63 paddr &= PAGE_MASK;
64 size = PAGE_ALIGN(end + 1) - paddr;
65
66 /*
67 * Ok, go for it..
68 */
69 area = get_vm_area(size, VM_IOREMAP);
70 if (!area)
71 return NULL;
72 area->phys_addr = paddr;
73 vaddr = (void __iomem *)area->addr;
74 if (ioremap_page_range((unsigned long)vaddr,
75 (unsigned long)vaddr + size, paddr, prot)) {
76 vunmap((void __force *)vaddr);
77 return NULL;
78 }
79 return (void __iomem *)(off + (char __iomem *)vaddr);
80}
81EXPORT_SYMBOL(ioremap_prot);
82
83
84void iounmap(const void __iomem *addr)
85{
86 if (addr >= (void __force __iomem *)ARC_UNCACHED_ADDR_SPACE)
87 return;
88
89 vfree((void *)(PAGE_MASK & (unsigned long __force)addr));
90}
91EXPORT_SYMBOL(iounmap);
diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c
new file mode 100644
index 000000000000..9b9ce23f4ec3
--- /dev/null
+++ b/arch/arc/mm/tlb.c
@@ -0,0 +1,645 @@
1/*
2 * TLB Management (flush/create/diagnostics) for ARC700
3 *
4 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * vineetg: Aug 2011
11 * -Reintroduce duplicate PD fixup - some customer chips still have the issue
12 *
13 * vineetg: May 2011
14 * -No need to flush_cache_page( ) for each call to update_mmu_cache()
15 * some of the LMBench tests improved amazingly
16 * = page-fault thrice as fast (75 usec to 28 usec)
17 * = mmap twice as fast (9.6 msec to 4.6 msec),
18 * = fork (5.3 msec to 3.7 msec)
19 *
20 * vineetg: April 2011 :
21 * -MMU v3: PD{0,1} bits layout changed: They don't overlap anymore,
22 * helps avoid a shift when preparing PD0 from PTE
23 *
24 * vineetg: April 2011 : Preparing for MMU V3
25 * -MMU v2/v3 BCRs decoded differently
26 * -Remove TLB_SIZE hardcoding as it's variable now: 256 or 512
27 * -tlb_entry_erase( ) can be void
28 * -local_flush_tlb_range( ):
29 * = need not "ceil" @end
30 * = walks MMU only if range spans < 32 entries, as opposed to 256
31 *
32 * Vineetg: Sept 10th 2008
33 * -Changes related to MMU v2 (Rel 4.8)
34 *
35 * Vineetg: Aug 29th 2008
36 * -In TLB Flush operations (Metal Fix MMU) there is a explict command to
37 * flush Micro-TLBS. If TLB Index Reg is invalid prior to TLBIVUTLB cmd,
38 * it fails. Thus need to load it with ANY valid value before invoking
39 * TLBIVUTLB cmd
40 *
41 * Vineetg: Aug 21th 2008:
42 * -Reduced the duration of IRQ lockouts in TLB Flush routines
43 * -Multiple copies of TLB erase code seperated into a "single" function
44 * -In TLB Flush routines, interrupt disabling moved UP to retrieve ASID
45 * in interrupt-safe region.
46 *
47 * Vineetg: April 23rd Bug #93131
48 * Problem: tlb_flush_kernel_range() doesnt do anything if the range to
49 * flush is more than the size of TLB itself.
50 *
51 * Rahul Trivedi : Codito Technologies 2004
52 */
53
54#include <linux/module.h>
55#include <asm/arcregs.h>
56#include <asm/setup.h>
57#include <asm/mmu_context.h>
58#include <asm/tlb.h>
59
60/* Need for ARC MMU v2
61 *
62 * ARC700 MMU-v1 had a Joint-TLB for Code and Data and is 2 way set-assoc.
63 * For a memcpy operation with 3 players (src/dst/code) such that all 3 pages
64 * map into same set, there would be contention for the 2 ways causing severe
65 * Thrashing.
66 *
67 * Although J-TLB is 2 way set assoc, ARC700 caches J-TLB into uTLBS which has
68 * much higher associativity. u-D-TLB is 8 ways, u-I-TLB is 4 ways.
69 * Given this, the thrasing problem should never happen because once the 3
70 * J-TLB entries are created (even though 3rd will knock out one of the prev
71 * two), the u-D-TLB and u-I-TLB will have what is required to accomplish memcpy
72 *
73 * Yet we still see the Thrashing because a J-TLB Write cause flush of u-TLBs.
74 * This is a simple design for keeping them in sync. So what do we do?
75 * The solution which James came up was pretty neat. It utilised the assoc
76 * of uTLBs by not invalidating always but only when absolutely necessary.
77 *
78 * - Existing TLB commands work as before
79 * - New command (TLBWriteNI) for TLB write without clearing uTLBs
80 * - New command (TLBIVUTLB) to invalidate uTLBs.
81 *
82 * The uTLBs need only be invalidated when pages are being removed from the
83 * OS page table. If a 'victim' TLB entry is being overwritten in the main TLB
84 * as a result of a miss, the removed entry is still allowed to exist in the
85 * uTLBs as it is still valid and present in the OS page table. This allows the
86 * full associativity of the uTLBs to hide the limited associativity of the main
87 * TLB.
88 *
89 * During a miss handler, the new "TLBWriteNI" command is used to load
90 * entries without clearing the uTLBs.
91 *
92 * When the OS page table is updated, TLB entries that may be associated with a
93 * removed page are removed (flushed) from the TLB using TLBWrite. In this
94 * circumstance, the uTLBs must also be cleared. This is done by using the
95 * existing TLBWrite command. An explicit IVUTLB is also required for those
96 * corner cases when TLBWrite was not executed at all because the corresp
97 * J-TLB entry got evicted/replaced.
98 */
99
100/* A copy of the ASID from the PID reg is kept in asid_cache */
101int asid_cache = FIRST_ASID;
102
103/* ASID to mm struct mapping. We have one extra entry corresponding to
104 * NO_ASID to save us a compare when clearing the mm entry for old asid
105 * see get_new_mmu_context (asm-arc/mmu_context.h)
106 */
107struct mm_struct *asid_mm_map[NUM_ASID + 1];
108
109/*
110 * Utility Routine to erase a J-TLB entry
111 * The procedure is to look it up in the MMU. If found, ERASE it by
112 * issuing a TlbWrite CMD with PD0 = PD1 = 0
113 */
114
115static void __tlb_entry_erase(void)
116{
117 write_aux_reg(ARC_REG_TLBPD1, 0);
118 write_aux_reg(ARC_REG_TLBPD0, 0);
119 write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite);
120}
121
122static void tlb_entry_erase(unsigned int vaddr_n_asid)
123{
124 unsigned int idx;
125
126 /* Locate the TLB entry for this vaddr + ASID */
127 write_aux_reg(ARC_REG_TLBPD0, vaddr_n_asid);
128 write_aux_reg(ARC_REG_TLBCOMMAND, TLBProbe);
129 idx = read_aux_reg(ARC_REG_TLBINDEX);
130
131 /* No error means entry found, zero it out */
132 if (likely(!(idx & TLB_LKUP_ERR))) {
133 __tlb_entry_erase();
134 } else { /* Some sort of Error */
135
136 /* Duplicate entry error */
137 if (idx & 0x1) {
138 /* TODO we need to handle this case too */
139 pr_emerg("unhandled Duplicate flush for %x\n",
140 vaddr_n_asid);
141 }
142 /* else entry not found so nothing to do */
143 }
144}
145
146/****************************************************************************
147 * ARC700 MMU caches recently used J-TLB entries (RAM) as uTLBs (FLOPs)
148 *
149 * New IVUTLB cmd in MMU v2 explictly invalidates the uTLB
150 *
151 * utlb_invalidate ( )
152 * -For v2 MMU calls Flush uTLB Cmd
153 * -For v1 MMU does nothing (except for Metal Fix v1 MMU)
154 * This is because in v1 TLBWrite itself invalidate uTLBs
155 ***************************************************************************/
156
157static void utlb_invalidate(void)
158{
159#if (CONFIG_ARC_MMU_VER >= 2)
160
161#if (CONFIG_ARC_MMU_VER < 3)
162 /* MMU v2 introduced the uTLB Flush command.
163 * There was however an obscure hardware bug, where uTLB flush would
164 * fail when a prior probe for J-TLB (both totally unrelated) would
165 * return lkup err - because the entry didnt exist in MMU.
166 * The Workround was to set Index reg with some valid value, prior to
167 * flush. This was fixed in MMU v3 hence not needed any more
168 */
169 unsigned int idx;
170
171 /* make sure INDEX Reg is valid */
172 idx = read_aux_reg(ARC_REG_TLBINDEX);
173
174 /* If not write some dummy val */
175 if (unlikely(idx & TLB_LKUP_ERR))
176 write_aux_reg(ARC_REG_TLBINDEX, 0xa);
177#endif
178
179 write_aux_reg(ARC_REG_TLBCOMMAND, TLBIVUTLB);
180#endif
181
182}
183
184/*
185 * Un-conditionally (without lookup) erase the entire MMU contents
186 */
187
188noinline void local_flush_tlb_all(void)
189{
190 unsigned long flags;
191 unsigned int entry;
192 struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
193
194 local_irq_save(flags);
195
196 /* Load PD0 and PD1 with template for a Blank Entry */
197 write_aux_reg(ARC_REG_TLBPD1, 0);
198 write_aux_reg(ARC_REG_TLBPD0, 0);
199
200 for (entry = 0; entry < mmu->num_tlb; entry++) {
201 /* write this entry to the TLB */
202 write_aux_reg(ARC_REG_TLBINDEX, entry);
203 write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite);
204 }
205
206 utlb_invalidate();
207
208 local_irq_restore(flags);
209}
210
211/*
212 * Flush the entrie MM for userland. The fastest way is to move to Next ASID
213 */
214noinline void local_flush_tlb_mm(struct mm_struct *mm)
215{
216 /*
217 * Small optimisation courtesy IA64
218 * flush_mm called during fork,exit,munmap etc, multiple times as well.
219 * Only for fork( ) do we need to move parent to a new MMU ctxt,
220 * all other cases are NOPs, hence this check.
221 */
222 if (atomic_read(&mm->mm_users) == 0)
223 return;
224
225 /*
226 * Workaround for Android weirdism:
227 * A binder VMA could end up in a task such that vma->mm != tsk->mm
228 * old code would cause h/w - s/w ASID to get out of sync
229 */
230 if (current->mm != mm)
231 destroy_context(mm);
232 else
233 get_new_mmu_context(mm);
234}
235
236/*
237 * Flush a Range of TLB entries for userland.
238 * @start is inclusive, while @end is exclusive
239 * Difference between this and Kernel Range Flush is
240 * -Here the fastest way (if range is too large) is to move to next ASID
241 * without doing any explicit Shootdown
242 * -In case of kernel Flush, entry has to be shot down explictly
243 */
244void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
245 unsigned long end)
246{
247 unsigned long flags;
248 unsigned int asid;
249
250 /* If range @start to @end is more than 32 TLB entries deep,
251 * its better to move to a new ASID rather than searching for
252 * individual entries and then shooting them down
253 *
254 * The calc above is rough, doesn't account for unaligned parts,
255 * since this is heuristics based anyways
256 */
257 if (unlikely((end - start) >= PAGE_SIZE * 32)) {
258 local_flush_tlb_mm(vma->vm_mm);
259 return;
260 }
261
262 /*
263 * @start moved to page start: this alone suffices for checking
264 * loop end condition below, w/o need for aligning @end to end
265 * e.g. 2000 to 4001 will anyhow loop twice
266 */
267 start &= PAGE_MASK;
268
269 local_irq_save(flags);
270 asid = vma->vm_mm->context.asid;
271
272 if (asid != NO_ASID) {
273 while (start < end) {
274 tlb_entry_erase(start | (asid & 0xff));
275 start += PAGE_SIZE;
276 }
277 }
278
279 utlb_invalidate();
280
281 local_irq_restore(flags);
282}
283
284/* Flush the kernel TLB entries - vmalloc/modules (Global from MMU perspective)
285 * @start, @end interpreted as kvaddr
286 * Interestingly, shared TLB entries can also be flushed using just
287 * @start,@end alone (interpreted as user vaddr), although technically SASID
288 * is also needed. However our smart TLbProbe lookup takes care of that.
289 */
290void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
291{
292 unsigned long flags;
293
294 /* exactly same as above, except for TLB entry not taking ASID */
295
296 if (unlikely((end - start) >= PAGE_SIZE * 32)) {
297 local_flush_tlb_all();
298 return;
299 }
300
301 start &= PAGE_MASK;
302
303 local_irq_save(flags);
304 while (start < end) {
305 tlb_entry_erase(start);
306 start += PAGE_SIZE;
307 }
308
309 utlb_invalidate();
310
311 local_irq_restore(flags);
312}
313
314/*
315 * Delete TLB entry in MMU for a given page (??? address)
316 * NOTE One TLB entry contains translation for single PAGE
317 */
318
319void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
320{
321 unsigned long flags;
322
323 /* Note that it is critical that interrupts are DISABLED between
324 * checking the ASID and using it flush the TLB entry
325 */
326 local_irq_save(flags);
327
328 if (vma->vm_mm->context.asid != NO_ASID) {
329 tlb_entry_erase((page & PAGE_MASK) |
330 (vma->vm_mm->context.asid & 0xff));
331 utlb_invalidate();
332 }
333
334 local_irq_restore(flags);
335}
336
337/*
338 * Routine to create a TLB entry
339 */
340void create_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
341{
342 unsigned long flags;
343 unsigned int idx, asid_or_sasid;
344 unsigned long pd0_flags;
345
346 /*
347 * create_tlb() assumes that current->mm == vma->mm, since
348 * -it ASID for TLB entry is fetched from MMU ASID reg (valid for curr)
349 * -completes the lazy write to SASID reg (again valid for curr tsk)
350 *
351 * Removing the assumption involves
352 * -Using vma->mm->context{ASID,SASID}, as opposed to MMU reg.
353 * -Fix the TLB paranoid debug code to not trigger false negatives.
354 * -More importantly it makes this handler inconsistent with fast-path
355 * TLB Refill handler which always deals with "current"
356 *
357 * Lets see the use cases when current->mm != vma->mm and we land here
358 * 1. execve->copy_strings()->__get_user_pages->handle_mm_fault
359 * Here VM wants to pre-install a TLB entry for user stack while
360 * current->mm still points to pre-execve mm (hence the condition).
361 * However the stack vaddr is soon relocated (randomization) and
362 * move_page_tables() tries to undo that TLB entry.
363 * Thus not creating TLB entry is not any worse.
364 *
365 * 2. ptrace(POKETEXT) causes a CoW - debugger(current) inserting a
366 * breakpoint in debugged task. Not creating a TLB now is not
367 * performance critical.
368 *
369 * Both the cases above are not good enough for code churn.
370 */
371 if (current->active_mm != vma->vm_mm)
372 return;
373
374 local_irq_save(flags);
375
376 tlb_paranoid_check(vma->vm_mm->context.asid, address);
377
378 address &= PAGE_MASK;
379
380 /* update this PTE credentials */
381 pte_val(*ptep) |= (_PAGE_PRESENT | _PAGE_ACCESSED);
382
383 /* Create HW TLB entry Flags (in PD0) from PTE Flags */
384#if (CONFIG_ARC_MMU_VER <= 2)
385 pd0_flags = ((pte_val(*ptep) & PTE_BITS_IN_PD0) >> 1);
386#else
387 pd0_flags = ((pte_val(*ptep) & PTE_BITS_IN_PD0));
388#endif
389
390 /* ASID for this task */
391 asid_or_sasid = read_aux_reg(ARC_REG_PID) & 0xff;
392
393 write_aux_reg(ARC_REG_TLBPD0, address | pd0_flags | asid_or_sasid);
394
395 /* Load remaining info in PD1 (Page Frame Addr and Kx/Kw/Kr Flags) */
396 write_aux_reg(ARC_REG_TLBPD1, (pte_val(*ptep) & PTE_BITS_IN_PD1));
397
398 /* First verify if entry for this vaddr+ASID already exists */
399 write_aux_reg(ARC_REG_TLBCOMMAND, TLBProbe);
400 idx = read_aux_reg(ARC_REG_TLBINDEX);
401
402 /*
403 * If Not already present get a free slot from MMU.
404 * Otherwise, Probe would have located the entry and set INDEX Reg
405 * with existing location. This will cause Write CMD to over-write
406 * existing entry with new PD0 and PD1
407 */
408 if (likely(idx & TLB_LKUP_ERR))
409 write_aux_reg(ARC_REG_TLBCOMMAND, TLBGetIndex);
410
411 /*
412 * Commit the Entry to MMU
413 * It doesnt sound safe to use the TLBWriteNI cmd here
414 * which doesn't flush uTLBs. I'd rather be safe than sorry.
415 */
416 write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite);
417
418 local_irq_restore(flags);
419}
420
421/* arch hook called by core VM at the end of handle_mm_fault( ),
422 * when a new PTE is entered in Page Tables or an existing one
423 * is modified. We aggresively pre-install a TLB entry
424 */
425
426void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddress,
427 pte_t *ptep)
428{
429
430 create_tlb(vma, vaddress, ptep);
431}
432
433/* Read the Cache Build Confuration Registers, Decode them and save into
434 * the cpuinfo structure for later use.
435 * No Validation is done here, simply read/convert the BCRs
436 */
437void __init read_decode_mmu_bcr(void)
438{
439 unsigned int tmp;
440 struct bcr_mmu_1_2 *mmu2; /* encoded MMU2 attr */
441 struct bcr_mmu_3 *mmu3; /* encoded MMU3 attr */
442 struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
443
444 tmp = read_aux_reg(ARC_REG_MMU_BCR);
445 mmu->ver = (tmp >> 24);
446
447 if (mmu->ver <= 2) {
448 mmu2 = (struct bcr_mmu_1_2 *)&tmp;
449 mmu->pg_sz = PAGE_SIZE;
450 mmu->sets = 1 << mmu2->sets;
451 mmu->ways = 1 << mmu2->ways;
452 mmu->u_dtlb = mmu2->u_dtlb;
453 mmu->u_itlb = mmu2->u_itlb;
454 } else {
455 mmu3 = (struct bcr_mmu_3 *)&tmp;
456 mmu->pg_sz = 512 << mmu3->pg_sz;
457 mmu->sets = 1 << mmu3->sets;
458 mmu->ways = 1 << mmu3->ways;
459 mmu->u_dtlb = mmu3->u_dtlb;
460 mmu->u_itlb = mmu3->u_itlb;
461 }
462
463 mmu->num_tlb = mmu->sets * mmu->ways;
464}
465
466char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len)
467{
468 int n = 0;
469 struct cpuinfo_arc_mmu *p_mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
470
471 n += scnprintf(buf + n, len - n, "ARC700 MMU [v%x]\t: %dk PAGE, ",
472 p_mmu->ver, TO_KB(p_mmu->pg_sz));
473
474 n += scnprintf(buf + n, len - n,
475 "J-TLB %d (%dx%d), uDTLB %d, uITLB %d, %s\n",
476 p_mmu->num_tlb, p_mmu->sets, p_mmu->ways,
477 p_mmu->u_dtlb, p_mmu->u_itlb,
478 __CONFIG_ARC_MMU_SASID_VAL ? "SASID" : "");
479
480 return buf;
481}
482
483void __init arc_mmu_init(void)
484{
485 char str[256];
486 struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
487
488 printk(arc_mmu_mumbojumbo(0, str, sizeof(str)));
489
490 /* For efficiency sake, kernel is compile time built for a MMU ver
491 * This must match the hardware it is running on.
492 * Linux built for MMU V2, if run on MMU V1 will break down because V1
493 * hardware doesn't understand cmds such as WriteNI, or IVUTLB
494 * On the other hand, Linux built for V1 if run on MMU V2 will do
495 * un-needed workarounds to prevent memcpy thrashing.
496 * Similarly MMU V3 has new features which won't work on older MMU
497 */
498 if (mmu->ver != CONFIG_ARC_MMU_VER) {
499 panic("MMU ver %d doesn't match kernel built for %d...\n",
500 mmu->ver, CONFIG_ARC_MMU_VER);
501 }
502
503 if (mmu->pg_sz != PAGE_SIZE)
504 panic("MMU pg size != PAGE_SIZE (%luk)\n", TO_KB(PAGE_SIZE));
505
506 /*
507 * ASID mgmt data structures are compile time init
508 * asid_cache = FIRST_ASID and asid_mm_map[] all zeroes
509 */
510
511 local_flush_tlb_all();
512
513 /* Enable the MMU */
514 write_aux_reg(ARC_REG_PID, MMU_ENABLE);
515
516 /* In smp we use this reg for interrupt 1 scratch */
517#ifndef CONFIG_SMP
518 /* swapper_pg_dir is the pgd for the kernel, used by vmalloc */
519 write_aux_reg(ARC_REG_SCRATCH_DATA0, swapper_pg_dir);
520#endif
521}
522
523/*
524 * TLB Programmer's Model uses Linear Indexes: 0 to {255, 511} for 128 x {2,4}
525 * The mapping is Column-first.
526 * --------------------- -----------
527 * |way0|way1|way2|way3| |way0|way1|
528 * --------------------- -----------
529 * [set0] | 0 | 1 | 2 | 3 | | 0 | 1 |
530 * [set1] | 4 | 5 | 6 | 7 | | 2 | 3 |
531 * ~ ~ ~ ~
532 * [set127] | 508| 509| 510| 511| | 254| 255|
533 * --------------------- -----------
534 * For normal operations we don't(must not) care how above works since
535 * MMU cmd getIndex(vaddr) abstracts that out.
536 * However for walking WAYS of a SET, we need to know this
537 */
538#define SET_WAY_TO_IDX(mmu, set, way) ((set) * mmu->ways + (way))
539
540/* Handling of Duplicate PD (TLB entry) in MMU.
541 * -Could be due to buggy customer tapeouts or obscure kernel bugs
542 * -MMU complaints not at the time of duplicate PD installation, but at the
543 * time of lookup matching multiple ways.
544 * -Ideally these should never happen - but if they do - workaround by deleting
545 * the duplicate one.
546 * -Knob to be verbose abt it.(TODO: hook them up to debugfs)
547 */
548volatile int dup_pd_verbose = 1;/* Be slient abt it or complain (default) */
549
550void do_tlb_overlap_fault(unsigned long cause, unsigned long address,
551 struct pt_regs *regs)
552{
553 int set, way, n;
554 unsigned int pd0[4], pd1[4]; /* assume max 4 ways */
555 unsigned long flags, is_valid;
556 struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
557
558 local_irq_save(flags);
559
560 /* re-enable the MMU */
561 write_aux_reg(ARC_REG_PID, MMU_ENABLE | read_aux_reg(ARC_REG_PID));
562
563 /* loop thru all sets of TLB */
564 for (set = 0; set < mmu->sets; set++) {
565
566 /* read out all the ways of current set */
567 for (way = 0, is_valid = 0; way < mmu->ways; way++) {
568 write_aux_reg(ARC_REG_TLBINDEX,
569 SET_WAY_TO_IDX(mmu, set, way));
570 write_aux_reg(ARC_REG_TLBCOMMAND, TLBRead);
571 pd0[way] = read_aux_reg(ARC_REG_TLBPD0);
572 pd1[way] = read_aux_reg(ARC_REG_TLBPD1);
573 is_valid |= pd0[way] & _PAGE_PRESENT;
574 }
575
576 /* If all the WAYS in SET are empty, skip to next SET */
577 if (!is_valid)
578 continue;
579
580 /* Scan the set for duplicate ways: needs a nested loop */
581 for (way = 0; way < mmu->ways; way++) {
582 if (!pd0[way])
583 continue;
584
585 for (n = way + 1; n < mmu->ways; n++) {
586 if ((pd0[way] & PAGE_MASK) ==
587 (pd0[n] & PAGE_MASK)) {
588
589 if (dup_pd_verbose) {
590 pr_info("Duplicate PD's @"
591 "[%d:%d]/[%d:%d]\n",
592 set, way, set, n);
593 pr_info("TLBPD0[%u]: %08x\n",
594 way, pd0[way]);
595 }
596
597 /*
598 * clear entry @way and not @n. This is
599 * critical to our optimised loop
600 */
601 pd0[way] = pd1[way] = 0;
602 write_aux_reg(ARC_REG_TLBINDEX,
603 SET_WAY_TO_IDX(mmu, set, way));
604 __tlb_entry_erase();
605 }
606 }
607 }
608 }
609
610 local_irq_restore(flags);
611}
612
613/***********************************************************************
614 * Diagnostic Routines
615 * -Called from Low Level TLB Hanlders if things don;t look good
616 **********************************************************************/
617
618#ifdef CONFIG_ARC_DBG_TLB_PARANOIA
619
620/*
621 * Low Level ASM TLB handler calls this if it finds that HW and SW ASIDS
622 * don't match
623 */
624void print_asid_mismatch(int is_fast_path)
625{
626 int pid_sw, pid_hw;
627 pid_sw = current->active_mm->context.asid;
628 pid_hw = read_aux_reg(ARC_REG_PID) & 0xff;
629
630 pr_emerg("ASID Mismatch in %s Path Handler: sw-pid=0x%x hw-pid=0x%x\n",
631 is_fast_path ? "Fast" : "Slow", pid_sw, pid_hw);
632
633 __asm__ __volatile__("flag 1");
634}
635
636void tlb_paranoid_check(unsigned int pid_sw, unsigned long addr)
637{
638 unsigned int pid_hw;
639
640 pid_hw = read_aux_reg(ARC_REG_PID) & 0xff;
641
642 if (addr < 0x70000000 && ((pid_hw != pid_sw) || (pid_sw == NO_ASID)))
643 print_asid_mismatch(0);
644}
645#endif
diff --git a/arch/arc/mm/tlbex.S b/arch/arc/mm/tlbex.S
new file mode 100644
index 000000000000..9df765dc7c3a
--- /dev/null
+++ b/arch/arc/mm/tlbex.S
@@ -0,0 +1,408 @@
1/*
2 * TLB Exception Handling for ARC
3 *
4 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * Vineetg: April 2011 :
11 * -MMU v1: moved out legacy code into a seperate file
12 * -MMU v3: PD{0,1} bits layout changed: They don't overlap anymore,
13 * helps avoid a shift when preparing PD0 from PTE
14 *
15 * Vineetg: July 2009
16 * -For MMU V2, we need not do heuristics at the time of commiting a D-TLB
17 * entry, so that it doesn't knock out it's I-TLB entry
18 * -Some more fine tuning:
19 * bmsk instead of add, asl.cc instead of branch, delay slot utilise etc
20 *
21 * Vineetg: July 2009
22 * -Practically rewrote the I/D TLB Miss handlers
23 * Now 40 and 135 instructions a peice as compared to 131 and 449 resp.
24 * Hence Leaner by 1.5 K
25 * Used Conditional arithmetic to replace excessive branching
26 * Also used short instructions wherever possible
27 *
28 * Vineetg: Aug 13th 2008
29 * -Passing ECR (Exception Cause REG) to do_page_fault( ) for printing
30 * more information in case of a Fatality
31 *
32 * Vineetg: March 25th Bug #92690
33 * -Added Debug Code to check if sw-ASID == hw-ASID
34
35 * Rahul Trivedi, Amit Bhor: Codito Technologies 2004
36 */
37
38 .cpu A7
39
40#include <linux/linkage.h>
41#include <asm/entry.h>
42#include <asm/tlb.h>
43#include <asm/pgtable.h>
44#include <asm/arcregs.h>
45#include <asm/cache.h>
46#include <asm/processor.h>
47#if (CONFIG_ARC_MMU_VER == 1)
48#include <asm/tlb-mmu1.h>
49#endif
50
51;--------------------------------------------------------------------------
52; scratch memory to save the registers (r0-r3) used to code TLB refill Handler
53; For details refer to comments before TLBMISS_FREEUP_REGS below
54;--------------------------------------------------------------------------
55
56ARCFP_DATA ex_saved_reg1
57 .align 1 << L1_CACHE_SHIFT ; IMP: Must be Cache Line aligned
58 .type ex_saved_reg1, @object
59#ifdef CONFIG_SMP
60 .size ex_saved_reg1, (CONFIG_NR_CPUS << L1_CACHE_SHIFT)
61ex_saved_reg1:
62 .zero (CONFIG_NR_CPUS << L1_CACHE_SHIFT)
63#else
64 .size ex_saved_reg1, 16
65ex_saved_reg1:
66 .zero 16
67#endif
68
69;============================================================================
70; Troubleshooting Stuff
71;============================================================================
72
73; Linux keeps ASID (Address Space ID) in task->active_mm->context.asid
74; When Creating TLB Entries, instead of doing 3 dependent loads from memory,
75; we use the MMU PID Reg to get current ASID.
76; In bizzare scenrios SW and HW ASID can get out-of-sync which is trouble.
77; So we try to detect this in TLB Mis shandler
78
79
80.macro DBG_ASID_MISMATCH
81
82#ifdef CONFIG_ARC_DBG_TLB_PARANOIA
83
84 ; make sure h/w ASID is same as s/w ASID
85
86 GET_CURR_TASK_ON_CPU r3
87 ld r0, [r3, TASK_ACT_MM]
88 ld r0, [r0, MM_CTXT+MM_CTXT_ASID]
89
90 lr r1, [ARC_REG_PID]
91 and r1, r1, 0xFF
92 breq r1, r0, 5f
93
94 ; Error if H/w and S/w ASID don't match, but NOT if in kernel mode
95 lr r0, [erstatus]
96 bbit0 r0, STATUS_U_BIT, 5f
97
98 ; We sure are in troubled waters, Flag the error, but to do so
99 ; need to switch to kernel mode stack to call error routine
100 GET_TSK_STACK_BASE r3, sp
101
102 ; Call printk to shoutout aloud
103 mov r0, 1
104 j print_asid_mismatch
105
1065: ; ASIDs match so proceed normally
107 nop
108
109#endif
110
111.endm
112
113;============================================================================
114;TLB Miss handling Code
115;============================================================================
116
117;-----------------------------------------------------------------------------
118; This macro does the page-table lookup for the faulting address.
119; OUT: r0 = PTE faulted on, r1 = ptr to PTE, r2 = Faulting V-address
120.macro LOAD_FAULT_PTE
121
122 lr r2, [efa]
123
124#ifndef CONFIG_SMP
125 lr r1, [ARC_REG_SCRATCH_DATA0] ; current pgd
126#else
127 GET_CURR_TASK_ON_CPU r1
128 ld r1, [r1, TASK_ACT_MM]
129 ld r1, [r1, MM_PGD]
130#endif
131
132 lsr r0, r2, PGDIR_SHIFT ; Bits for indexing into PGD
133 ld.as r1, [r1, r0] ; PGD entry corresp to faulting addr
134 and.f r1, r1, PAGE_MASK ; Ignoring protection and other flags
135 ; contains Ptr to Page Table
136 bz.d do_slow_path_pf ; if no Page Table, do page fault
137
138 ; Get the PTE entry: The idea is
139 ; (1) x = addr >> PAGE_SHIFT -> masks page-off bits from @fault-addr
140 ; (2) y = x & (PTRS_PER_PTE - 1) -> to get index
141 ; (3) z = pgtbl[y]
142 ; To avoid the multiply by in end, we do the -2, <<2 below
143
144 lsr r0, r2, (PAGE_SHIFT - 2)
145 and r0, r0, ( (PTRS_PER_PTE - 1) << 2)
146 ld.aw r0, [r1, r0] ; get PTE and PTE ptr for fault addr
147#ifdef CONFIG_ARC_DBG_TLB_MISS_COUNT
148 and.f 0, r0, _PAGE_PRESENT
149 bz 1f
150 ld r2, [num_pte_not_present]
151 add r2, r2, 1
152 st r2, [num_pte_not_present]
1531:
154#endif
155
156.endm
157
158;-----------------------------------------------------------------
159; Convert Linux PTE entry into TLB entry
160; A one-word PTE entry is programmed as two-word TLB Entry [PD0:PD1] in mmu
161; IN: r0 = PTE, r1 = ptr to PTE
162
163.macro CONV_PTE_TO_TLB
164 and r3, r0, PTE_BITS_IN_PD1 ; Extract permission flags+PFN from PTE
165 sr r3, [ARC_REG_TLBPD1] ; these go in PD1
166
167 and r2, r0, PTE_BITS_IN_PD0 ; Extract other PTE flags: (V)alid, (G)lb
168#if (CONFIG_ARC_MMU_VER <= 2) /* Neednot be done with v3 onwards */
169 lsr r2, r2 ; shift PTE flags to match layout in PD0
170#endif
171
172 lr r3,[ARC_REG_TLBPD0] ; MMU prepares PD0 with vaddr and asid
173
174 or r3, r3, r2 ; S | vaddr | {sasid|asid}
175 sr r3,[ARC_REG_TLBPD0] ; rewrite PD0
176.endm
177
178;-----------------------------------------------------------------
179; Commit the TLB entry into MMU
180
181.macro COMMIT_ENTRY_TO_MMU
182
183 /* Get free TLB slot: Set = computed from vaddr, way = random */
184 sr TLBGetIndex, [ARC_REG_TLBCOMMAND]
185
186 /* Commit the Write */
187#if (CONFIG_ARC_MMU_VER >= 2) /* introduced in v2 */
188 sr TLBWriteNI, [ARC_REG_TLBCOMMAND]
189#else
190 sr TLBWrite, [ARC_REG_TLBCOMMAND]
191#endif
192.endm
193
194;-----------------------------------------------------------------
195; ARC700 Exception Handling doesn't auto-switch stack and it only provides
196; ONE scratch AUX reg "ARC_REG_SCRATCH_DATA0"
197;
198; For Non-SMP, the scratch AUX reg is repurposed to cache task PGD, so a
199; "global" is used to free-up FIRST core reg to be able to code the rest of
200; exception prologue (IRQ auto-disabled on Exceptions, so it's IRQ-safe).
201; Since the Fast Path TLB Miss handler is coded with 4 regs, the remaining 3
202; need to be saved as well by extending the "global" to be 4 words. Hence
203; ".size ex_saved_reg1, 16"
204; [All of this dance is to avoid stack switching for each TLB Miss, since we
205; only need to save only a handful of regs, as opposed to complete reg file]
206;
207; For ARC700 SMP, the "global" obviously can't be used for free up the FIRST
208; core reg as it will not be SMP safe.
209; Thus scratch AUX reg is used (and no longer used to cache task PGD).
210; To save the rest of 3 regs - per cpu, the global is made "per-cpu".
211; Epilogue thus has to locate the "per-cpu" storage for regs.
212; To avoid cache line bouncing the per-cpu global is aligned/sized per
213; L1_CACHE_SHIFT, despite fundamentally needing to be 12 bytes only. Hence
214; ".size ex_saved_reg1, (CONFIG_NR_CPUS << L1_CACHE_SHIFT)"
215
216; As simple as that....
217
218.macro TLBMISS_FREEUP_REGS
219#ifdef CONFIG_SMP
220 sr r0, [ARC_REG_SCRATCH_DATA0] ; freeup r0 to code with
221 GET_CPU_ID r0 ; get to per cpu scratch mem,
222 lsl r0, r0, L1_CACHE_SHIFT ; cache line wide per cpu
223 add r0, @ex_saved_reg1, r0
224#else
225 st r0, [@ex_saved_reg1]
226 mov_s r0, @ex_saved_reg1
227#endif
228 st_s r1, [r0, 4]
229 st_s r2, [r0, 8]
230 st_s r3, [r0, 12]
231
232 ; VERIFY if the ASID in MMU-PID Reg is same as
233 ; one in Linux data structures
234
235 DBG_ASID_MISMATCH
236.endm
237
238;-----------------------------------------------------------------
239.macro TLBMISS_RESTORE_REGS
240#ifdef CONFIG_SMP
241 GET_CPU_ID r0 ; get to per cpu scratch mem
242 lsl r0, r0, L1_CACHE_SHIFT ; each is cache line wide
243 add r0, @ex_saved_reg1, r0
244 ld_s r3, [r0,12]
245 ld_s r2, [r0, 8]
246 ld_s r1, [r0, 4]
247 lr r0, [ARC_REG_SCRATCH_DATA0]
248#else
249 mov_s r0, @ex_saved_reg1
250 ld_s r3, [r0,12]
251 ld_s r2, [r0, 8]
252 ld_s r1, [r0, 4]
253 ld_s r0, [r0]
254#endif
255.endm
256
257ARCFP_CODE ;Fast Path Code, candidate for ICCM
258
259;-----------------------------------------------------------------------------
260; I-TLB Miss Exception Handler
261;-----------------------------------------------------------------------------
262
263ARC_ENTRY EV_TLBMissI
264
265 TLBMISS_FREEUP_REGS
266
267#ifdef CONFIG_ARC_DBG_TLB_MISS_COUNT
268 ld r0, [@numitlb]
269 add r0, r0, 1
270 st r0, [@numitlb]
271#endif
272
273 ;----------------------------------------------------------------
274 ; Get the PTE corresponding to V-addr accessed
275 LOAD_FAULT_PTE
276
277 ;----------------------------------------------------------------
278 ; VERIFY_PTE: Check if PTE permissions approp for executing code
279 cmp_s r2, VMALLOC_START
280 mov.lo r2, (_PAGE_PRESENT | _PAGE_READ | _PAGE_EXECUTE)
281 mov.hs r2, (_PAGE_PRESENT | _PAGE_K_READ | _PAGE_K_EXECUTE)
282
283 and r3, r0, r2 ; Mask out NON Flag bits from PTE
284 xor.f r3, r3, r2 ; check ( ( pte & flags_test ) == flags_test )
285 bnz do_slow_path_pf
286
287 ; Let Linux VM know that the page was accessed
288 or r0, r0, (_PAGE_PRESENT | _PAGE_ACCESSED) ; set Accessed Bit
289 st_s r0, [r1] ; Write back PTE
290
291 CONV_PTE_TO_TLB
292 COMMIT_ENTRY_TO_MMU
293 TLBMISS_RESTORE_REGS
294 rtie
295
296ARC_EXIT EV_TLBMissI
297
298;-----------------------------------------------------------------------------
299; D-TLB Miss Exception Handler
300;-----------------------------------------------------------------------------
301
302ARC_ENTRY EV_TLBMissD
303
304 TLBMISS_FREEUP_REGS
305
306#ifdef CONFIG_ARC_DBG_TLB_MISS_COUNT
307 ld r0, [@numdtlb]
308 add r0, r0, 1
309 st r0, [@numdtlb]
310#endif
311
312 ;----------------------------------------------------------------
313 ; Get the PTE corresponding to V-addr accessed
314 ; If PTE exists, it will setup, r0 = PTE, r1 = Ptr to PTE
315 LOAD_FAULT_PTE
316
317 ;----------------------------------------------------------------
318 ; VERIFY_PTE: Chk if PTE permissions approp for data access (R/W/R+W)
319
320 mov_s r2, 0
321 lr r3, [ecr]
322 btst_s r3, ECR_C_BIT_DTLB_LD_MISS ; Read Access
323 or.nz r2, r2, _PAGE_READ ; chk for Read flag in PTE
324 btst_s r3, ECR_C_BIT_DTLB_ST_MISS ; Write Access
325 or.nz r2, r2, _PAGE_WRITE ; chk for Write flag in PTE
326 ; Above laddering takes care of XCHG access
327 ; which is both Read and Write
328
329 ; If kernel mode access, ; make _PAGE_xx flags as _PAGE_K_xx
330 ; For copy_(to|from)_user, despite exception taken in kernel mode,
331 ; this code is not hit, because EFA would still be the user mode
332 ; address (EFA < 0x6000_0000).
333 ; This code is for legit kernel mode faults, vmalloc specifically
334 ; (EFA: 0x7000_0000 to 0x7FFF_FFFF)
335
336 lr r3, [efa]
337 cmp r3, VMALLOC_START - 1 ; If kernel mode access
338 asl.hi r2, r2, 3 ; make _PAGE_xx flags as _PAGE_K_xx
339 or r2, r2, _PAGE_PRESENT ; Common flag for K/U mode
340
341 ; By now, r2 setup with all the Flags we need to check in PTE
342 and r3, r0, r2 ; Mask out NON Flag bits from PTE
343 brne.d r3, r2, do_slow_path_pf ; is ((pte & flags_test) == flags_test)
344
345 ;----------------------------------------------------------------
346 ; UPDATE_PTE: Let Linux VM know that page was accessed/dirty
347 lr r3, [ecr]
348 or r0, r0, (_PAGE_PRESENT | _PAGE_ACCESSED) ; Accessed bit always
349 btst_s r3, ECR_C_BIT_DTLB_ST_MISS ; See if it was a Write Access ?
350 or.nz r0, r0, _PAGE_MODIFIED ; if Write, set Dirty bit as well
351 st_s r0, [r1] ; Write back PTE
352
353 CONV_PTE_TO_TLB
354
355#if (CONFIG_ARC_MMU_VER == 1)
356 ; MMU with 2 way set assoc J-TLB, needs some help in pathetic case of
357 ; memcpy where 3 parties contend for 2 ways, ensuing a livelock.
358 ; But only for old MMU or one with Metal Fix
359 TLB_WRITE_HEURISTICS
360#endif
361
362 COMMIT_ENTRY_TO_MMU
363 TLBMISS_RESTORE_REGS
364 rtie
365
366;-------- Common routine to call Linux Page Fault Handler -----------
367do_slow_path_pf:
368
369 ; Restore the 4-scratch regs saved by fast path miss handler
370 TLBMISS_RESTORE_REGS
371
372 ; Slow path TLB Miss handled as a regular ARC Exception
373 ; (stack switching / save the complete reg-file).
374 ; That requires freeing up r9
375 EXCPN_PROLOG_FREEUP_REG r9
376
377 lr r9, [erstatus]
378
379 SWITCH_TO_KERNEL_STK
380 SAVE_ALL_SYS
381
382 ; ------- setup args for Linux Page fault Hanlder ---------
383 mov_s r0, sp
384 lr r2, [efa]
385 lr r3, [ecr]
386
387 ; Both st and ex imply WRITE access of some sort, hence do_page_fault( )
388 ; invoked with write=1 for DTLB-st/ex Miss and write=0 for ITLB miss or
389 ; DTLB-ld Miss
390 ; DTLB Miss Cause code is ld = 0x01 , st = 0x02, ex = 0x03
391 ; Following code uses that fact that st/ex have one bit in common
392
393 btst_s r3, ECR_C_BIT_DTLB_ST_MISS
394 mov.z r1, 0
395 mov.nz r1, 1
396
397 ; We don't want exceptions to be disabled while the fault is handled.
398 ; Now that we have saved the context we return from exception hence
399 ; exceptions get re-enable
400
401 FAKE_RET_FROM_EXCPN r9
402
403 bl do_page_fault
404 b ret_from_exception
405
406ARC_EXIT EV_TLBMissD
407
408ARC_ENTRY EV_TLBMissB ; Bogus entry to measure sz of DTLBMiss hdlr
diff --git a/arch/arc/oprofile/Makefile b/arch/arc/oprofile/Makefile
new file mode 100644
index 000000000000..ce417a6e70b8
--- /dev/null
+++ b/arch/arc/oprofile/Makefile
@@ -0,0 +1,9 @@
1obj-$(CONFIG_OPROFILE) += oprofile.o
2
3DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \
4 oprof.o cpu_buffer.o buffer_sync.o \
5 event_buffer.o oprofile_files.o \
6 oprofilefs.o oprofile_stats.o \
7 timer_int.o )
8
9oprofile-y := $(DRIVER_OBJS) common.o
diff --git a/arch/arc/oprofile/common.c b/arch/arc/oprofile/common.c
new file mode 100644
index 000000000000..c80fcad4a5a7
--- /dev/null
+++ b/arch/arc/oprofile/common.c
@@ -0,0 +1,26 @@
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * Based on orig code from @author John Levon <levon@movementarian.org>
9 */
10
11#include <linux/oprofile.h>
12#include <linux/perf_event.h>
13
14int __init oprofile_arch_init(struct oprofile_operations *ops)
15{
16 /*
17 * A failure here, forces oprofile core to switch to Timer based PC
18 * sampling, which will happen if say perf is not enabled/available
19 */
20 return oprofile_perf_init(ops);
21}
22
23void oprofile_arch_exit(void)
24{
25 oprofile_perf_exit();
26}
diff --git a/arch/arc/plat-arcfpga/Kconfig b/arch/arc/plat-arcfpga/Kconfig
new file mode 100644
index 000000000000..b41e786cdbc0
--- /dev/null
+++ b/arch/arc/plat-arcfpga/Kconfig
@@ -0,0 +1,84 @@
1#
2# Copyright (C) 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3#
4# This program is free software; you can redistribute it and/or modify
5# it under the terms of the GNU General Public License version 2 as
6# published by the Free Software Foundation.
7#
8
9menuconfig ARC_PLAT_FPGA_LEGACY
10 bool "\"Legacy\" ARC FPGA dev Boards"
11 select ISS_SMP_EXTN if SMP
12 help
13 Support for ARC development boards, provided by Synopsys.
14 These are based on FPGA or ISS. e.g.
15 - ARCAngel4
16 - ML509
17 - MetaWare ISS
18
19if ARC_PLAT_FPGA_LEGACY
20
21config ARC_BOARD_ANGEL4
22 bool "ARC Angel4"
23 default y
24 help
25 ARC Angel4 FPGA Ref Platform (Xilinx Virtex Based)
26
27config ARC_BOARD_ML509
28 bool "ML509"
29 help
30 ARC ML509 FPGA Ref Platform (Xilinx Virtex-5 Based)
31
32config ISS_SMP_EXTN
33 bool "ARC SMP Extensions (ISS Models only)"
34 default n
35 depends on SMP
36 select ARC_HAS_COH_RTSC
37 help
38 SMP Extensions to ARC700, in a "simulation only" Model, supported in
39 ARC ISS (Instruction Set Simulator).
40 The SMP extensions include:
41 -IDU (Interrupt Distribution Unit)
42 -XTL (To enable CPU start/stop/set-PC for another CPU)
43 It doesn't provide coherent Caches and/or Atomic Ops (LLOCK/SCOND)
44
45config ARC_SERIAL_BAUD
46 int "UART Baud rate"
47 default "115200"
48 depends on SERIAL_ARC || SERIAL_ARC_CONSOLE
49 help
50 Baud rate for the ARC UART
51
52menuconfig ARC_HAS_BVCI_LAT_UNIT
53 bool "BVCI Bus Latency Unit"
54 depends on ARC_BOARD_ML509 || ARC_BOARD_ANGEL4
55 help
56 IP to add artifical latency to BVCI Bus Based FPGA builds.
57 The default latency (even worst case) for FPGA is non-realistic
58 (~10 SDRAM, ~5 SSRAM).
59
60config BVCI_LAT_UNITS
61 hex "Latency Unit(s) Bitmap"
62 default "0x0"
63 depends on ARC_HAS_BVCI_LAT_UNIT
64 help
65 There are multiple Latency Units corresponding to the many
66 interfaces of the system bus arbiter (both CPU side as well as
67 the peripheral side).
68 To add latency to ALL memory transaction, choose Unit 0, otherwise
69 for finer grainer - interface wise latency, specify a bitmap (1 bit
70 per unit) of all units. e.g. 1,2,12 will be 0x1003
71
72 Unit 0 - System Arb and Mem Controller
73 Unit 1 - I$ and System Bus
74 Unit 2 - D$ and System Bus
75 ..
76 Unit 12 - IDE Disk controller and System Bus
77
78config BVCI_LAT_CYCLES
79 int "Latency Value in cycles"
80 range 0 63
81 default "30"
82 depends on ARC_HAS_BVCI_LAT_UNIT
83
84endif
diff --git a/arch/arc/plat-arcfpga/Makefile b/arch/arc/plat-arcfpga/Makefile
new file mode 100644
index 000000000000..a44e22ebc1b7
--- /dev/null
+++ b/arch/arc/plat-arcfpga/Makefile
@@ -0,0 +1,12 @@
1#
2# Copyright (C) 2011-2012 Synopsys, Inc. (www.synopsys.com)
3#
4# This program is free software; you can redistribute it and/or modify
5# it under the terms of the GNU General Public License version 2 as
6# published by the Free Software Foundation.
7#
8
9KBUILD_CFLAGS += -Iarch/arc/plat-arcfpga/include
10
11obj-y := platform.o irq.o
12obj-$(CONFIG_SMP) += smp.o
diff --git a/arch/arc/plat-arcfpga/include/plat/irq.h b/arch/arc/plat-arcfpga/include/plat/irq.h
new file mode 100644
index 000000000000..41e335670f60
--- /dev/null
+++ b/arch/arc/plat-arcfpga/include/plat/irq.h
@@ -0,0 +1,31 @@
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * vineetg: Feb 2009
9 * -For AA4 board, IRQ assignments to peripherals
10 */
11
12#ifndef __PLAT_IRQ_H
13#define __PLAT_IRQ_H
14
15#define UART0_IRQ 5
16#define UART1_IRQ 10
17#define UART2_IRQ 11
18
19#define VMAC_IRQ 6
20
21#define IDE_IRQ 13
22#define PCI_IRQ 14
23#define PS2_IRQ 15
24
25#ifdef CONFIG_SMP
26#define IDU_INTERRUPT_0 16
27#endif
28
29extern void __init plat_fpga_init_IRQ(void);
30
31#endif
diff --git a/arch/arc/plat-arcfpga/include/plat/memmap.h b/arch/arc/plat-arcfpga/include/plat/memmap.h
new file mode 100644
index 000000000000..1663f3388085
--- /dev/null
+++ b/arch/arc/plat-arcfpga/include/plat/memmap.h
@@ -0,0 +1,31 @@
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * vineetg: Feb 2009
9 * -For AA4 board, System Memory Map for Peripherals etc
10 */
11
12#ifndef __PLAT_MEMMAP_H
13#define __PLAT_MEMMAP_H
14
15#define UART0_BASE 0xC0FC1000
16#define UART1_BASE 0xC0FC1100
17
18#define VMAC_REG_BASEADDR 0xC0FC2000
19
20#define IDE_CONTROLLER_BASE 0xC0FC9000
21
22#define AHB_PCI_HOST_BRG_BASE 0xC0FD0000
23
24#define PGU_BASEADDR 0xC0FC8000
25#define VLCK_ADDR 0xC0FCF028
26
27#define BVCI_LAT_UNIT_BASE 0xC0FED000
28
29#define PS2_BASE_ADDR 0xC0FCC000
30
31#endif
diff --git a/arch/arc/plat-arcfpga/include/plat/smp.h b/arch/arc/plat-arcfpga/include/plat/smp.h
new file mode 100644
index 000000000000..c09eb4cfc77c
--- /dev/null
+++ b/arch/arc/plat-arcfpga/include/plat/smp.h
@@ -0,0 +1,118 @@
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * Rajeshwar Ranga: Interrupt Distribution Unit API's
9 */
10
11#ifndef __PLAT_ARCFPGA_SMP_H
12#define __PLAT_ARCFPGA_SMP_H
13
14#ifdef CONFIG_SMP
15
16#include <linux/types.h>
17#include <asm/arcregs.h>
18
19#define ARC_AUX_IDU_REG_CMD 0x2000
20#define ARC_AUX_IDU_REG_PARAM 0x2001
21
22#define ARC_AUX_XTL_REG_CMD 0x2002
23#define ARC_AUX_XTL_REG_PARAM 0x2003
24
25#define ARC_REG_MP_BCR 0x2021
26
27#define ARC_XTL_CMD_WRITE_PC 0x04
28#define ARC_XTL_CMD_CLEAR_HALT 0x02
29
30/*
31 * Build Configuration Register which identifies the sub-components
32 */
33struct bcr_mp {
34#ifdef CONFIG_CPU_BIG_ENDIAN
35 unsigned int mp_arch:16, pad:5, sdu:1, idu:1, scu:1, ver:8;
36#else
37 unsigned int ver:8, scu:1, idu:1, sdu:1, pad:5, mp_arch:16;
38#endif
39};
40
41/* IDU supports 256 common interrupts */
42#define NR_IDU_IRQS 256
43
44/*
45 * The Aux Regs layout is same bit-by-bit in both BE/LE modes.
46 * However when casted as a bitfield encoded "C" struct, gcc treats it as
47 * memory, generating different code for BE/LE, requiring strcture adj (see
48 * include/asm/arcregs.h)
49 *
50 * However when manually "carving" the value for a Aux, no special handling
51 * of BE is needed because of the property discribed above
52 */
53#define IDU_SET_COMMAND(irq, cmd) \
54do { \
55 uint32_t __val; \
56 __val = (((irq & 0xFF) << 8) | (cmd & 0xFF)); \
57 write_aux_reg(ARC_AUX_IDU_REG_CMD, __val); \
58} while (0)
59
60#define IDU_SET_PARAM(par) write_aux_reg(ARC_AUX_IDU_REG_PARAM, par)
61#define IDU_GET_PARAM() read_aux_reg(ARC_AUX_IDU_REG_PARAM)
62
63/* IDU Commands */
64#define IDU_DISABLE 0x00
65#define IDU_ENABLE 0x01
66#define IDU_IRQ_CLEAR 0x02
67#define IDU_IRQ_ASSERT 0x03
68#define IDU_IRQ_WMODE 0x04
69#define IDU_IRQ_STATUS 0x05
70#define IDU_IRQ_ACK 0x06
71#define IDU_IRQ_PEND 0x07
72#define IDU_IRQ_RMODE 0x08
73#define IDU_IRQ_WBITMASK 0x09
74#define IDU_IRQ_RBITMASK 0x0A
75
76#define idu_enable() IDU_SET_COMMAND(0, IDU_ENABLE)
77#define idu_disable() IDU_SET_COMMAND(0, IDU_DISABLE)
78
79#define idu_irq_assert(irq) IDU_SET_COMMAND((irq), IDU_IRQ_ASSERT)
80#define idu_irq_clear(irq) IDU_SET_COMMAND((irq), IDU_IRQ_CLEAR)
81
82/* IDU Interrupt Mode - Destination Encoding */
83#define IDU_IRQ_MOD_DISABLE 0x00
84#define IDU_IRQ_MOD_ROUND_RECP 0x01
85#define IDU_IRQ_MOD_TCPU_FIRSTRECP 0x02
86#define IDU_IRQ_MOD_TCPU_ALLRECP 0x03
87
88/* IDU Interrupt Mode - Triggering Mode */
89#define IDU_IRQ_MODE_LEVEL_TRIG 0x00
90#define IDU_IRQ_MODE_PULSE_TRIG 0x01
91
92#define IDU_IRQ_MODE_PARAM(dest_mode, trig_mode) \
93 (((trig_mode & 0x01) << 15) | (dest_mode & 0xFF))
94
95struct idu_irq_config {
96 uint8_t irq;
97 uint8_t dest_mode;
98 uint8_t trig_mode;
99};
100
101struct idu_irq_status {
102 uint8_t irq;
103 bool enabled;
104 bool status;
105 bool ack;
106 bool pend;
107 uint8_t next_rr;
108};
109
110extern void idu_irq_set_tgtcpu(uint8_t irq, uint32_t mask);
111extern void idu_irq_set_mode(uint8_t irq, uint8_t dest_mode, uint8_t trig_mode);
112
113extern void iss_model_init_smp(unsigned int cpu);
114extern void iss_model_init_early_smp(void);
115
116#endif /* CONFIG_SMP */
117
118#endif
diff --git a/arch/arc/plat-arcfpga/irq.c b/arch/arc/plat-arcfpga/irq.c
new file mode 100644
index 000000000000..d2215fd889c2
--- /dev/null
+++ b/arch/arc/plat-arcfpga/irq.c
@@ -0,0 +1,25 @@
1/*
2 * ARC FPGA Platform IRQ hookups
3 *
4 * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/interrupt.h>
12#include <plat/irq.h>
13
14void __init plat_fpga_init_IRQ(void)
15{
16 /*
17 * SMP Hack because UART IRQ hardwired to cpu0 (boot-cpu) but if the
18 * request_irq() comes from any other CPU, the low level IRQ unamsking
19 * essential for getting Interrupts won't be enabled on cpu0, locking
20 * up the UART state machine.
21 */
22#ifdef CONFIG_SMP
23 arch_unmask_irq(UART0_IRQ);
24#endif
25}
diff --git a/arch/arc/plat-arcfpga/platform.c b/arch/arc/plat-arcfpga/platform.c
new file mode 100644
index 000000000000..4e20a1a5104d
--- /dev/null
+++ b/arch/arc/plat-arcfpga/platform.c
@@ -0,0 +1,226 @@
1/*
2 * ARC FPGA Platform support code
3 *
4 * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/types.h>
12#include <linux/init.h>
13#include <linux/device.h>
14#include <linux/platform_device.h>
15#include <linux/io.h>
16#include <linux/console.h>
17#include <linux/of_platform.h>
18#include <asm/setup.h>
19#include <asm/clk.h>
20#include <asm/mach_desc.h>
21#include <plat/memmap.h>
22#include <plat/smp.h>
23#include <plat/irq.h>
24
25/*-----------------------BVCI Latency Unit -----------------------------*/
26
27#ifdef CONFIG_ARC_HAS_BVCI_LAT_UNIT
28
29int lat_cycles = CONFIG_BVCI_LAT_CYCLES;
30
31/* BVCI Bus Profiler: Latency Unit */
32static void __init setup_bvci_lat_unit(void)
33{
34#define MAX_BVCI_UNITS 12
35
36 unsigned int i;
37 unsigned int *base = (unsigned int *)BVCI_LAT_UNIT_BASE;
38 const unsigned long units_req = CONFIG_BVCI_LAT_UNITS;
39 const unsigned int REG_UNIT = 21;
40 const unsigned int REG_VAL = 22;
41
42 /*
43 * There are multiple Latency Units corresponding to the many
44 * interfaces of the system bus arbiter (both CPU side as well as
45 * the peripheral side).
46 *
47 * Unit 0 - System Arb and Mem Controller - adds latency to all
48 * memory trasactions
49 * Unit 1 - I$ and System Bus
50 * Unit 2 - D$ and System Bus
51 * ..
52 * Unit 12 - IDE Disk controller and System Bus
53 *
54 * The programmers model requires writing to lat_unit reg first
55 * and then the latency value (cycles) to lat_value reg
56 */
57
58 if (CONFIG_BVCI_LAT_UNITS == 0) {
59 writel(0, base + REG_UNIT);
60 writel(lat_cycles, base + REG_VAL);
61 pr_info("BVCI Latency for all Memory Transactions %d cycles\n",
62 lat_cycles);
63 } else {
64 for_each_set_bit(i, &units_req, MAX_BVCI_UNITS) {
65 writel(i + 1, base + REG_UNIT); /* loop is 0 based */
66 writel(lat_cycles, base + REG_VAL);
67 pr_info("BVCI Latency for Unit[%d] = %d cycles\n",
68 (i + 1), lat_cycles);
69 }
70 }
71}
72#else
73static void __init setup_bvci_lat_unit(void)
74{
75}
76#endif
77
78/*----------------------- Platform Devices -----------------------------*/
79
80static unsigned long arc_uart_info[] = {
81 0, /* uart->is_emulated (runtime @running_on_hw) */
82 0, /* uart->port.uartclk */
83 0, /* uart->baud */
84 0
85};
86
87#if defined(CONFIG_SERIAL_ARC_CONSOLE)
88/*
89 * static platform data - but only for early serial
90 * TBD: derive this from a special DT node
91 */
92static struct resource arc_uart0_res[] = {
93 {
94 .start = UART0_BASE,
95 .end = UART0_BASE + 0xFF,
96 .flags = IORESOURCE_MEM,
97 },
98 {
99 .start = UART0_IRQ,
100 .end = UART0_IRQ,
101 .flags = IORESOURCE_IRQ,
102 },
103};
104
105static struct platform_device arc_uart0_dev = {
106 .name = "arc-uart",
107 .id = 0,
108 .num_resources = ARRAY_SIZE(arc_uart0_res),
109 .resource = arc_uart0_res,
110 .dev = {
111 .platform_data = &arc_uart_info,
112 },
113};
114
115static struct platform_device *fpga_early_devs[] __initdata = {
116 &arc_uart0_dev,
117};
118#endif
119
120static void arc_fpga_serial_init(void)
121{
122 /* To let driver workaround ISS bug: baudh Reg can't be set to 0 */
123 arc_uart_info[0] = !running_on_hw;
124
125 arc_uart_info[1] = arc_get_core_freq();
126
127 arc_uart_info[2] = CONFIG_ARC_SERIAL_BAUD;
128
129#if defined(CONFIG_SERIAL_ARC_CONSOLE)
130 early_platform_add_devices(fpga_early_devs,
131 ARRAY_SIZE(fpga_early_devs));
132
133 /*
134 * ARC console driver registers itself as an early platform driver
135 * of class "earlyprintk".
136 * Install it here, followed by probe of devices.
137 * The installation here doesn't require earlyprintk in command line
138 * To do so however, replace the lines below with
139 * parse_early_param();
140 * early_platform_driver_probe("earlyprintk", 1, 1);
141 * ^^
142 */
143 early_platform_driver_register_all("earlyprintk");
144 early_platform_driver_probe("earlyprintk", 1, 0);
145
146 /*
147 * This is to make sure that arc uart would be preferred console
148 * despite one/more of following:
149 * -command line lacked "console=ttyARC0" or
150 * -CONFIG_VT_CONSOLE was enabled (for no reason whatsoever)
151 * Note that this needs to be done after above early console is reg,
152 * otherwise the early console never gets a chance to run.
153 */
154 add_preferred_console("ttyARC", 0, "115200");
155#endif
156}
157
158static void __init plat_fpga_early_init(void)
159{
160 pr_info("[plat-arcfpga]: registering early dev resources\n");
161
162 setup_bvci_lat_unit();
163
164 arc_fpga_serial_init();
165
166#ifdef CONFIG_SMP
167 iss_model_init_early_smp();
168#endif
169}
170
171static struct of_dev_auxdata plat_auxdata_lookup[] __initdata = {
172#if defined(CONFIG_SERIAL_ARC) || defined(CONFIG_SERIAL_ARC_MODULE)
173 OF_DEV_AUXDATA("snps,arc-uart", UART0_BASE, "arc-uart", arc_uart_info),
174#endif
175 {}
176};
177
178static void __init plat_fpga_populate_dev(void)
179{
180 pr_info("[plat-arcfpga]: registering device resources\n");
181
182 /*
183 * Traverses flattened DeviceTree - registering platform devices
184 * complete with their resources
185 */
186 of_platform_populate(NULL, of_default_bus_match_table,
187 plat_auxdata_lookup, NULL);
188}
189
190/*----------------------- Machine Descriptions ------------------------------
191 *
192 * Machine description is simply a set of platform/board specific callbacks
193 * This is not directly related to DeviceTree based dynamic device creation,
194 * however as part of early device tree scan, we also select the right
195 * callback set, by matching the DT compatible name.
196 */
197
198static const char *aa4_compat[] __initdata = {
199 "snps,arc-angel4",
200 NULL,
201};
202
203MACHINE_START(ANGEL4, "angel4")
204 .dt_compat = aa4_compat,
205 .init_early = plat_fpga_early_init,
206 .init_machine = plat_fpga_populate_dev,
207 .init_irq = plat_fpga_init_IRQ,
208#ifdef CONFIG_SMP
209 .init_smp = iss_model_init_smp,
210#endif
211MACHINE_END
212
213static const char *ml509_compat[] __initdata = {
214 "snps,arc-ml509",
215 NULL,
216};
217
218MACHINE_START(ML509, "ml509")
219 .dt_compat = ml509_compat,
220 .init_early = plat_fpga_early_init,
221 .init_machine = plat_fpga_populate_dev,
222 .init_irq = plat_fpga_init_IRQ,
223#ifdef CONFIG_SMP
224 .init_smp = iss_model_init_smp,
225#endif
226MACHINE_END
diff --git a/arch/arc/plat-arcfpga/smp.c b/arch/arc/plat-arcfpga/smp.c
new file mode 100644
index 000000000000..91b55349a5f8
--- /dev/null
+++ b/arch/arc/plat-arcfpga/smp.c
@@ -0,0 +1,171 @@
1/*
2 * ARC700 Simulation-only Extensions for SMP
3 *
4 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * Vineet Gupta - 2012 : split off arch common and plat specific SMP
11 * Rajeshwar Ranga - 2007 : Interrupt Distribution Unit API's
12 */
13
14#include <linux/smp.h>
15#include <linux/irq.h>
16#include <plat/irq.h>
17#include <plat/smp.h>
18
19static char smp_cpuinfo_buf[128];
20
21/*
22 *-------------------------------------------------------------------
23 * Platform specific callbacks expected by arch SMP code
24 *-------------------------------------------------------------------
25 */
26
27/*
28 * Master kick starting another CPU
29 */
30static void iss_model_smp_wakeup_cpu(int cpu, unsigned long pc)
31{
32 /* setup the start PC */
33 write_aux_reg(ARC_AUX_XTL_REG_PARAM, pc);
34
35 /* Trigger WRITE_PC cmd for this cpu */
36 write_aux_reg(ARC_AUX_XTL_REG_CMD,
37 (ARC_XTL_CMD_WRITE_PC | (cpu << 8)));
38
39 /* Take the cpu out of Halt */
40 write_aux_reg(ARC_AUX_XTL_REG_CMD,
41 (ARC_XTL_CMD_CLEAR_HALT | (cpu << 8)));
42
43}
44
45/*
46 * Any SMP specific init any CPU does when it comes up.
47 * Here we setup the CPU to enable Inter-Processor-Interrupts
48 * Called for each CPU
49 * -Master : init_IRQ()
50 * -Other(s) : start_kernel_secondary()
51 */
52void iss_model_init_smp(unsigned int cpu)
53{
54 /* Check if CPU is configured for more than 16 interrupts */
55 if (NR_IRQS <= 16 || get_hw_config_num_irq() <= 16)
56 panic("[arcfpga] IRQ system can't support IDU IPI\n");
57
58 idu_disable();
59
60 /****************************************************************
61 * IDU provides a set of Common IRQs, each of which can be dynamically
62 * attached to (1|many|all) CPUs.
63 * The Common IRQs [0-15] are mapped as CPU pvt [16-31]
64 *
65 * Here we use a simple 1:1 mapping:
66 * A CPU 'x' is wired to Common IRQ 'x'.
67 * So an IDU ASSERT on IRQ 'x' will trigger Interupt on CPU 'x', which
68 * makes up for our simple IPI plumbing.
69 *
70 * TBD: Have a dedicated multicast IRQ for sending IPIs to all CPUs
71 * w/o having to do one-at-a-time
72 ******************************************************************/
73
74 /*
75 * Claim an IRQ which would trigger IPI on this CPU.
76 * In IDU parlance it involves setting up a cpu bitmask for the IRQ
77 * The bitmap here contains only 1 CPU (self).
78 */
79 idu_irq_set_tgtcpu(cpu, 0x1 << cpu);
80
81 /* Set the IRQ destination to use the bitmask above */
82 idu_irq_set_mode(cpu, 7, /* XXX: IDU_IRQ_MOD_TCPU_ALLRECP: ISS bug */
83 IDU_IRQ_MODE_PULSE_TRIG);
84
85 idu_enable();
86
87 /* Attach the arch-common IPI ISR to our IDU IRQ */
88 smp_ipi_irq_setup(cpu, IDU_INTERRUPT_0 + cpu);
89}
90
91static void iss_model_ipi_send(void *arg)
92{
93 struct cpumask *callmap = arg;
94 unsigned int cpu;
95
96 for_each_cpu(cpu, callmap)
97 idu_irq_assert(cpu);
98}
99
100static void iss_model_ipi_clear(int cpu, int irq)
101{
102 idu_irq_clear(IDU_INTERRUPT_0 + cpu);
103}
104
105void iss_model_init_early_smp(void)
106{
107#define IS_AVAIL1(var, str) ((var) ? str : "")
108
109 struct bcr_mp mp;
110
111 READ_BCR(ARC_REG_MP_BCR, mp);
112
113 sprintf(smp_cpuinfo_buf, "Extn [ISS-SMP]: v%d, arch(%d) %s %s %s\n",
114 mp.ver, mp.mp_arch, IS_AVAIL1(mp.scu, "SCU"),
115 IS_AVAIL1(mp.idu, "IDU"), IS_AVAIL1(mp.sdu, "SDU"));
116
117 plat_smp_ops.info = smp_cpuinfo_buf;
118
119 plat_smp_ops.cpu_kick = iss_model_smp_wakeup_cpu;
120 plat_smp_ops.ipi_send = iss_model_ipi_send;
121 plat_smp_ops.ipi_clear = iss_model_ipi_clear;
122}
123
124/*
125 *-------------------------------------------------------------------
126 * Low level Platform IPI Providers
127 *-------------------------------------------------------------------
128 */
129
130/* Set the Mode for the Common IRQ */
131void idu_irq_set_mode(uint8_t irq, uint8_t dest_mode, uint8_t trig_mode)
132{
133 uint32_t par = IDU_IRQ_MODE_PARAM(dest_mode, trig_mode);
134
135 IDU_SET_PARAM(par);
136 IDU_SET_COMMAND(irq, IDU_IRQ_WMODE);
137}
138
139/* Set the target cpu Bitmask for Common IRQ */
140void idu_irq_set_tgtcpu(uint8_t irq, uint32_t mask)
141{
142 IDU_SET_PARAM(mask);
143 IDU_SET_COMMAND(irq, IDU_IRQ_WBITMASK);
144}
145
146/* Get the Interrupt Acknowledged status for IRQ (as CPU Bitmask) */
147bool idu_irq_get_ack(uint8_t irq)
148{
149 uint32_t val;
150
151 IDU_SET_COMMAND(irq, IDU_IRQ_ACK);
152 val = IDU_GET_PARAM();
153
154 return val & (1 << irq);
155}
156
157/*
158 * Get the Interrupt Pending status for IRQ (as CPU Bitmask)
159 * -Pending means CPU has not yet noticed the IRQ (e.g. disabled)
160 * -After Interrupt has been taken, the IPI expcitily needs to be
161 * cleared, to be acknowledged.
162 */
163bool idu_irq_get_pend(uint8_t irq)
164{
165 uint32_t val;
166
167 IDU_SET_COMMAND(irq, IDU_IRQ_PEND);
168 val = IDU_GET_PARAM();
169
170 return val & (1 << irq);
171}
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index 4d5ea7648574..a2a47d9d6a22 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -18,6 +18,7 @@ config PARISC
18 select ARCH_HAVE_NMI_SAFE_CMPXCHG 18 select ARCH_HAVE_NMI_SAFE_CMPXCHG
19 select GENERIC_SMP_IDLE_THREAD 19 select GENERIC_SMP_IDLE_THREAD
20 select GENERIC_STRNCPY_FROM_USER 20 select GENERIC_STRNCPY_FROM_USER
21 select SYSCTL_ARCH_UNALIGN_ALLOW
21 select HAVE_MOD_ARCH_SPECIFIC 22 select HAVE_MOD_ARCH_SPECIFIC
22 select HAVE_VIRT_TO_BUS 23 select HAVE_VIRT_TO_BUS
23 select MODULES_USE_ELF_RELA 24 select MODULES_USE_ELF_RELA
diff --git a/include/asm-generic/checksum.h b/include/asm-generic/checksum.h
index c084767c88bc..59811df58c5b 100644
--- a/include/asm-generic/checksum.h
+++ b/include/asm-generic/checksum.h
@@ -38,12 +38,15 @@ extern __wsum csum_partial_copy_from_user(const void __user *src, void *dst,
38 csum_partial_copy((src), (dst), (len), (sum)) 38 csum_partial_copy((src), (dst), (len), (sum))
39#endif 39#endif
40 40
41#ifndef ip_fast_csum
41/* 42/*
42 * This is a version of ip_compute_csum() optimized for IP headers, 43 * This is a version of ip_compute_csum() optimized for IP headers,
43 * which always checksum on 4 octet boundaries. 44 * which always checksum on 4 octet boundaries.
44 */ 45 */
45extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl); 46extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl);
47#endif
46 48
49#ifndef csum_fold
47/* 50/*
48 * Fold a partial checksum 51 * Fold a partial checksum
49 */ 52 */
@@ -54,6 +57,7 @@ static inline __sum16 csum_fold(__wsum csum)
54 sum = (sum & 0xffff) + (sum >> 16); 57 sum = (sum & 0xffff) + (sum >> 16);
55 return (__force __sum16)~sum; 58 return (__force __sum16)~sum;
56} 59}
60#endif
57 61
58#ifndef csum_tcpudp_nofold 62#ifndef csum_tcpudp_nofold
59/* 63/*
diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h
index 9788568f7978..c184aa8ec8cd 100644
--- a/include/asm-generic/uaccess.h
+++ b/include/asm-generic/uaccess.h
@@ -7,7 +7,6 @@
7 * address space, e.g. all NOMMU machines. 7 * address space, e.g. all NOMMU machines.
8 */ 8 */
9#include <linux/sched.h> 9#include <linux/sched.h>
10#include <linux/mm.h>
11#include <linux/string.h> 10#include <linux/string.h>
12 11
13#include <asm/segment.h> 12#include <asm/segment.h>
@@ -32,7 +31,9 @@ static inline void set_fs(mm_segment_t fs)
32} 31}
33#endif 32#endif
34 33
34#ifndef segment_eq
35#define segment_eq(a, b) ((a).seg == (b).seg) 35#define segment_eq(a, b) ((a).seg == (b).seg)
36#endif
36 37
37#define VERIFY_READ 0 38#define VERIFY_READ 0
38#define VERIFY_WRITE 1 39#define VERIFY_WRITE 1
@@ -168,12 +169,18 @@ static inline __must_check long __copy_to_user(void __user *to,
168 -EFAULT; \ 169 -EFAULT; \
169}) 170})
170 171
172#ifndef __put_user_fn
173
171static inline int __put_user_fn(size_t size, void __user *ptr, void *x) 174static inline int __put_user_fn(size_t size, void __user *ptr, void *x)
172{ 175{
173 size = __copy_to_user(ptr, x, size); 176 size = __copy_to_user(ptr, x, size);
174 return size ? -EFAULT : size; 177 return size ? -EFAULT : size;
175} 178}
176 179
180#define __put_user_fn(sz, u, k) __put_user_fn(sz, u, k)
181
182#endif
183
177extern int __put_user_bad(void) __attribute__((noreturn)); 184extern int __put_user_bad(void) __attribute__((noreturn));
178 185
179#define __get_user(x, ptr) \ 186#define __get_user(x, ptr) \
@@ -224,12 +231,17 @@ extern int __put_user_bad(void) __attribute__((noreturn));
224 -EFAULT; \ 231 -EFAULT; \
225}) 232})
226 233
234#ifndef __get_user_fn
227static inline int __get_user_fn(size_t size, const void __user *ptr, void *x) 235static inline int __get_user_fn(size_t size, const void __user *ptr, void *x)
228{ 236{
229 size = __copy_from_user(x, ptr, size); 237 size = __copy_from_user(x, ptr, size);
230 return size ? -EFAULT : size; 238 return size ? -EFAULT : size;
231} 239}
232 240
241#define __get_user_fn(sz, u, k) __get_user_fn(sz, u, k)
242
243#endif
244
233extern int __get_user_bad(void) __attribute__((noreturn)); 245extern int __get_user_bad(void) __attribute__((noreturn));
234 246
235#ifndef __copy_from_user_inatomic 247#ifndef __copy_from_user_inatomic
diff --git a/init/Kconfig b/init/Kconfig
index 0a5e80fb9ba2..22616cd434bc 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1230,6 +1230,14 @@ config SYSCTL_ARCH_UNALIGN_NO_WARN
1230 Allows arch to define/use @no_unaligned_warning to possibly warn 1230 Allows arch to define/use @no_unaligned_warning to possibly warn
1231 about unaligned access emulation going on under the hood. 1231 about unaligned access emulation going on under the hood.
1232 1232
1233config SYSCTL_ARCH_UNALIGN_ALLOW
1234 bool
1235 help
1236 Enable support for /proc/sys/kernel/unaligned-trap
1237 Allows arches to define/use @unaligned_enabled to runtime toggle
1238 the unaligned access emulation.
1239 see arch/parisc/kernel/unaligned.c for reference
1240
1233config KALLSYMS 1241config KALLSYMS
1234 bool "Load all symbols for debugging/ksymoops" if EXPERT 1242 bool "Load all symbols for debugging/ksymoops" if EXPERT
1235 default y 1243 default y
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index d1b4ee67d2df..afc1dc60f3f8 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -157,6 +157,9 @@ extern int sysctl_tsb_ratio;
157 157
158#ifdef __hppa__ 158#ifdef __hppa__
159extern int pwrsw_enabled; 159extern int pwrsw_enabled;
160#endif
161
162#ifdef CONFIG_SYSCTL_ARCH_UNALIGN_ALLOW
160extern int unaligned_enabled; 163extern int unaligned_enabled;
161#endif 164#endif
162 165
@@ -555,6 +558,8 @@ static struct ctl_table kern_table[] = {
555 .mode = 0644, 558 .mode = 0644,
556 .proc_handler = proc_dointvec, 559 .proc_handler = proc_dointvec,
557 }, 560 },
561#endif
562#ifdef CONFIG_SYSCTL_ARCH_UNALIGN_ALLOW
558 { 563 {
559 .procname = "unaligned-trap", 564 .procname = "unaligned-trap",
560 .data = &unaligned_enabled, 565 .data = &unaligned_enabled,
diff --git a/lib/checksum.c b/lib/checksum.c
index 12dceb27ff20..129775eb6de6 100644
--- a/lib/checksum.c
+++ b/lib/checksum.c
@@ -102,6 +102,7 @@ out:
102} 102}
103#endif 103#endif
104 104
105#ifndef ip_fast_csum
105/* 106/*
106 * This is a version of ip_compute_csum() optimized for IP headers, 107 * This is a version of ip_compute_csum() optimized for IP headers,
107 * which always checksum on 4 octet boundaries. 108 * which always checksum on 4 octet boundaries.
@@ -111,6 +112,7 @@ __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
111 return (__force __sum16)~do_csum(iph, ihl*4); 112 return (__force __sum16)~do_csum(iph, ihl*4);
112} 113}
113EXPORT_SYMBOL(ip_fast_csum); 114EXPORT_SYMBOL(ip_fast_csum);
115#endif
114 116
115/* 117/*
116 * computes the checksum of a memory block at buff, length len, 118 * computes the checksum of a memory block at buff, length len,
diff --git a/tools/perf/perf.h b/tools/perf/perf.h
index c2206c87fc9f..d5818c98d051 100644
--- a/tools/perf/perf.h
+++ b/tools/perf/perf.h
@@ -94,6 +94,12 @@
94#define CPUINFO_PROC "cpu model" 94#define CPUINFO_PROC "cpu model"
95#endif 95#endif
96 96
97#ifdef __arc__
98#define rmb() asm volatile("" ::: "memory")
99#define cpu_relax() rmb()
100#define CPUINFO_PROC "Processor"
101#endif
102
97#include <time.h> 103#include <time.h>
98#include <unistd.h> 104#include <unistd.h>
99#include <sys/types.h> 105#include <sys/types.h>