aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-06-09 17:28:39 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-06-09 17:28:39 -0400
commit52e7d46c642ae7762b44cc3203a816ebaa4b6b8b (patch)
tree4ca2dead9b4cb0a6b5356f0cd730d7485d11e96a
parentc8ae067f2635be0f8c7e5db1bb74b757d623e05b (diff)
parented6aefed726a305bd36344e230d2a9e9301226fc (diff)
Merge tag 'arc-4.7-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc
Pull ARC fixes from Vineet Gupta: - Revert of ll-sc backoff retry workaround in atomics/spinlocks as hardware is now proven to work just fine - Typo fixes (Thanks Andrea Gelmini) - Removal of obsolete DT property (Alexey) - Other minor fixes * tag 'arc-4.7-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc: Revert "ARCv2: spinlock/rwlock/atomics: Delayed retry of failed SCOND with exponential backoff" Revert "ARCv2: spinlock/rwlock: Reset retry delay when starting a new spin-wait cycle" Revert "ARCv2: spinlock/rwlock/atomics: reduce 1 instruction in exponential backoff" ARC: don't enable DISCONTIGMEM unconditionally ARC: [intc-compact] simplify code for 2 priority levels arc: Get rid of root core-frequency property Fix typos
-rw-r--r--arch/arc/Kconfig31
-rw-r--r--arch/arc/Makefile2
-rw-r--r--arch/arc/boot/dts/abilis_tb100.dtsi2
-rw-r--r--arch/arc/boot/dts/abilis_tb101.dtsi2
-rw-r--r--arch/arc/boot/dts/axc001.dtsi1
-rw-r--r--arch/arc/boot/dts/axc003.dtsi1
-rw-r--r--arch/arc/boot/dts/axc003_idu.dtsi1
-rw-r--r--arch/arc/boot/dts/eznps.dts1
-rw-r--r--arch/arc/boot/dts/nsim_700.dts1
-rw-r--r--arch/arc/boot/dts/nsimosci.dts1
-rw-r--r--arch/arc/boot/dts/nsimosci_hs.dts1
-rw-r--r--arch/arc/boot/dts/nsimosci_hs_idu.dts1
-rw-r--r--arch/arc/boot/dts/skeleton.dtsi1
-rw-r--r--arch/arc/boot/dts/skeleton_hs.dtsi1
-rw-r--r--arch/arc/boot/dts/skeleton_hs_idu.dtsi1
-rw-r--r--arch/arc/boot/dts/vdk_axc003.dtsi1
-rw-r--r--arch/arc/boot/dts/vdk_axc003_idu.dtsi1
-rw-r--r--arch/arc/include/asm/atomic.h45
-rw-r--r--arch/arc/include/asm/entry-compact.h4
-rw-r--r--arch/arc/include/asm/mmu_context.h2
-rw-r--r--arch/arc/include/asm/pgtable.h2
-rw-r--r--arch/arc/include/asm/processor.h2
-rw-r--r--arch/arc/include/asm/smp.h2
-rw-r--r--arch/arc/include/asm/spinlock.h292
-rw-r--r--arch/arc/include/asm/thread_info.h2
-rw-r--r--arch/arc/include/asm/uaccess.h2
-rw-r--r--arch/arc/include/uapi/asm/swab.h2
-rw-r--r--arch/arc/kernel/entry-compact.S18
-rw-r--r--arch/arc/kernel/intc-compact.c6
-rw-r--r--arch/arc/kernel/perf_event.c2
-rw-r--r--arch/arc/kernel/setup.c2
-rw-r--r--arch/arc/kernel/signal.c2
-rw-r--r--arch/arc/kernel/troubleshoot.c2
-rw-r--r--arch/arc/mm/cache.c6
-rw-r--r--arch/arc/mm/dma.c2
35 files changed, 30 insertions, 415 deletions
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index 0dcbacfdea4b..0d3e59f56974 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -61,7 +61,7 @@ config RWSEM_GENERIC_SPINLOCK
61 def_bool y 61 def_bool y
62 62
63config ARCH_DISCONTIGMEM_ENABLE 63config ARCH_DISCONTIGMEM_ENABLE
64 def_bool y 64 def_bool n
65 65
66config ARCH_FLATMEM_ENABLE 66config ARCH_FLATMEM_ENABLE
67 def_bool y 67 def_bool y
@@ -186,9 +186,6 @@ if SMP
186config ARC_HAS_COH_CACHES 186config ARC_HAS_COH_CACHES
187 def_bool n 187 def_bool n
188 188
189config ARC_HAS_REENTRANT_IRQ_LV2
190 def_bool n
191
192config ARC_MCIP 189config ARC_MCIP
193 bool "ARConnect Multicore IP (MCIP) Support " 190 bool "ARConnect Multicore IP (MCIP) Support "
194 depends on ISA_ARCV2 191 depends on ISA_ARCV2
@@ -366,25 +363,10 @@ config NODES_SHIFT
366if ISA_ARCOMPACT 363if ISA_ARCOMPACT
367 364
368config ARC_COMPACT_IRQ_LEVELS 365config ARC_COMPACT_IRQ_LEVELS
369 bool "ARCompact IRQ Priorities: High(2)/Low(1)" 366 bool "Setup Timer IRQ as high Priority"
370 default n 367 default n
371 # Timer HAS to be high priority, for any other high priority config
372 select ARC_IRQ3_LV2
373 # if SMP, LV2 enabled ONLY if ARC implementation has LV2 re-entrancy 368 # if SMP, LV2 enabled ONLY if ARC implementation has LV2 re-entrancy
374 depends on !SMP || ARC_HAS_REENTRANT_IRQ_LV2 369 depends on !SMP
375
376if ARC_COMPACT_IRQ_LEVELS
377
378config ARC_IRQ3_LV2
379 bool
380
381config ARC_IRQ5_LV2
382 bool
383
384config ARC_IRQ6_LV2
385 bool
386
387endif #ARC_COMPACT_IRQ_LEVELS
388 370
389config ARC_FPU_SAVE_RESTORE 371config ARC_FPU_SAVE_RESTORE
390 bool "Enable FPU state persistence across context switch" 372 bool "Enable FPU state persistence across context switch"
@@ -407,11 +389,6 @@ config ARC_HAS_LLSC
407 default y 389 default y
408 depends on !ARC_CANT_LLSC 390 depends on !ARC_CANT_LLSC
409 391
410config ARC_STAR_9000923308
411 bool "Workaround for llock/scond livelock"
412 default n
413 depends on ISA_ARCV2 && SMP && ARC_HAS_LLSC
414
415config ARC_HAS_SWAPE 392config ARC_HAS_SWAPE
416 bool "Insn: SWAPE (endian-swap)" 393 bool "Insn: SWAPE (endian-swap)"
417 default y 394 default y
@@ -471,7 +448,7 @@ config LINUX_LINK_BASE
471 448
472config HIGHMEM 449config HIGHMEM
473 bool "High Memory Support" 450 bool "High Memory Support"
474 select DISCONTIGMEM 451 select ARCH_DISCONTIGMEM_ENABLE
475 help 452 help
476 With ARC 2G:2G address split, only upper 2G is directly addressable by 453 With ARC 2G:2G address split, only upper 2G is directly addressable by
477 kernel. Enable this to potentially allow access to rest of 2G and PAE 454 kernel. Enable this to potentially allow access to rest of 2G and PAE
diff --git a/arch/arc/Makefile b/arch/arc/Makefile
index 02fabef2891c..d4df6be66d58 100644
--- a/arch/arc/Makefile
+++ b/arch/arc/Makefile
@@ -127,7 +127,7 @@ libs-y += arch/arc/lib/ $(LIBGCC)
127 127
128boot := arch/arc/boot 128boot := arch/arc/boot
129 129
130#default target for make without any arguements. 130#default target for make without any arguments.
131KBUILD_IMAGE := bootpImage 131KBUILD_IMAGE := bootpImage
132 132
133all: $(KBUILD_IMAGE) 133all: $(KBUILD_IMAGE)
diff --git a/arch/arc/boot/dts/abilis_tb100.dtsi b/arch/arc/boot/dts/abilis_tb100.dtsi
index 3942634f805a..02410b211433 100644
--- a/arch/arc/boot/dts/abilis_tb100.dtsi
+++ b/arch/arc/boot/dts/abilis_tb100.dtsi
@@ -23,8 +23,6 @@
23 23
24 24
25/ { 25/ {
26 clock-frequency = <500000000>; /* 500 MHZ */
27
28 soc100 { 26 soc100 {
29 bus-frequency = <166666666>; 27 bus-frequency = <166666666>;
30 28
diff --git a/arch/arc/boot/dts/abilis_tb101.dtsi b/arch/arc/boot/dts/abilis_tb101.dtsi
index b0467229a5c4..f9e7686044eb 100644
--- a/arch/arc/boot/dts/abilis_tb101.dtsi
+++ b/arch/arc/boot/dts/abilis_tb101.dtsi
@@ -23,8 +23,6 @@
23 23
24 24
25/ { 25/ {
26 clock-frequency = <500000000>; /* 500 MHZ */
27
28 soc100 { 26 soc100 {
29 bus-frequency = <166666666>; 27 bus-frequency = <166666666>;
30 28
diff --git a/arch/arc/boot/dts/axc001.dtsi b/arch/arc/boot/dts/axc001.dtsi
index 3e02f152edcb..6ae2c476ad82 100644
--- a/arch/arc/boot/dts/axc001.dtsi
+++ b/arch/arc/boot/dts/axc001.dtsi
@@ -15,7 +15,6 @@
15 15
16/ { 16/ {
17 compatible = "snps,arc"; 17 compatible = "snps,arc";
18 clock-frequency = <750000000>; /* 750 MHZ */
19 #address-cells = <1>; 18 #address-cells = <1>;
20 #size-cells = <1>; 19 #size-cells = <1>;
21 20
diff --git a/arch/arc/boot/dts/axc003.dtsi b/arch/arc/boot/dts/axc003.dtsi
index 378e455a94c4..14df46f141bf 100644
--- a/arch/arc/boot/dts/axc003.dtsi
+++ b/arch/arc/boot/dts/axc003.dtsi
@@ -14,7 +14,6 @@
14 14
15/ { 15/ {
16 compatible = "snps,arc"; 16 compatible = "snps,arc";
17 clock-frequency = <90000000>;
18 #address-cells = <1>; 17 #address-cells = <1>;
19 #size-cells = <1>; 18 #size-cells = <1>;
20 19
diff --git a/arch/arc/boot/dts/axc003_idu.dtsi b/arch/arc/boot/dts/axc003_idu.dtsi
index 64c94b2860ab..3d6cfa32bf51 100644
--- a/arch/arc/boot/dts/axc003_idu.dtsi
+++ b/arch/arc/boot/dts/axc003_idu.dtsi
@@ -14,7 +14,6 @@
14 14
15/ { 15/ {
16 compatible = "snps,arc"; 16 compatible = "snps,arc";
17 clock-frequency = <90000000>;
18 #address-cells = <1>; 17 #address-cells = <1>;
19 #size-cells = <1>; 18 #size-cells = <1>;
20 19
diff --git a/arch/arc/boot/dts/eznps.dts b/arch/arc/boot/dts/eznps.dts
index b89f6c3eb352..1e0d225791c1 100644
--- a/arch/arc/boot/dts/eznps.dts
+++ b/arch/arc/boot/dts/eznps.dts
@@ -18,7 +18,6 @@
18 18
19/ { 19/ {
20 compatible = "ezchip,arc-nps"; 20 compatible = "ezchip,arc-nps";
21 clock-frequency = <83333333>; /* 83.333333 MHZ */
22 #address-cells = <1>; 21 #address-cells = <1>;
23 #size-cells = <1>; 22 #size-cells = <1>;
24 interrupt-parent = <&intc>; 23 interrupt-parent = <&intc>;
diff --git a/arch/arc/boot/dts/nsim_700.dts b/arch/arc/boot/dts/nsim_700.dts
index 5d5e373e0ebc..63970513e4ae 100644
--- a/arch/arc/boot/dts/nsim_700.dts
+++ b/arch/arc/boot/dts/nsim_700.dts
@@ -11,7 +11,6 @@
11 11
12/ { 12/ {
13 compatible = "snps,nsim"; 13 compatible = "snps,nsim";
14 clock-frequency = <80000000>; /* 80 MHZ */
15 #address-cells = <1>; 14 #address-cells = <1>;
16 #size-cells = <1>; 15 #size-cells = <1>;
17 interrupt-parent = <&core_intc>; 16 interrupt-parent = <&core_intc>;
diff --git a/arch/arc/boot/dts/nsimosci.dts b/arch/arc/boot/dts/nsimosci.dts
index b5b060adce8a..763d66c883da 100644
--- a/arch/arc/boot/dts/nsimosci.dts
+++ b/arch/arc/boot/dts/nsimosci.dts
@@ -11,7 +11,6 @@
11 11
12/ { 12/ {
13 compatible = "snps,nsimosci"; 13 compatible = "snps,nsimosci";
14 clock-frequency = <20000000>; /* 20 MHZ */
15 #address-cells = <1>; 14 #address-cells = <1>;
16 #size-cells = <1>; 15 #size-cells = <1>;
17 interrupt-parent = <&core_intc>; 16 interrupt-parent = <&core_intc>;
diff --git a/arch/arc/boot/dts/nsimosci_hs.dts b/arch/arc/boot/dts/nsimosci_hs.dts
index 325e73090a18..4eb97c584b18 100644
--- a/arch/arc/boot/dts/nsimosci_hs.dts
+++ b/arch/arc/boot/dts/nsimosci_hs.dts
@@ -11,7 +11,6 @@
11 11
12/ { 12/ {
13 compatible = "snps,nsimosci_hs"; 13 compatible = "snps,nsimosci_hs";
14 clock-frequency = <20000000>; /* 20 MHZ */
15 #address-cells = <1>; 14 #address-cells = <1>;
16 #size-cells = <1>; 15 #size-cells = <1>;
17 interrupt-parent = <&core_intc>; 16 interrupt-parent = <&core_intc>;
diff --git a/arch/arc/boot/dts/nsimosci_hs_idu.dts b/arch/arc/boot/dts/nsimosci_hs_idu.dts
index ee03d7126581..853f897eb2a3 100644
--- a/arch/arc/boot/dts/nsimosci_hs_idu.dts
+++ b/arch/arc/boot/dts/nsimosci_hs_idu.dts
@@ -11,7 +11,6 @@
11 11
12/ { 12/ {
13 compatible = "snps,nsimosci_hs"; 13 compatible = "snps,nsimosci_hs";
14 clock-frequency = <5000000>; /* 5 MHZ */
15 #address-cells = <1>; 14 #address-cells = <1>;
16 #size-cells = <1>; 15 #size-cells = <1>;
17 interrupt-parent = <&core_intc>; 16 interrupt-parent = <&core_intc>;
diff --git a/arch/arc/boot/dts/skeleton.dtsi b/arch/arc/boot/dts/skeleton.dtsi
index 3a10cc633e2b..65808fe0a290 100644
--- a/arch/arc/boot/dts/skeleton.dtsi
+++ b/arch/arc/boot/dts/skeleton.dtsi
@@ -13,7 +13,6 @@
13 13
14/ { 14/ {
15 compatible = "snps,arc"; 15 compatible = "snps,arc";
16 clock-frequency = <80000000>; /* 80 MHZ */
17 #address-cells = <1>; 16 #address-cells = <1>;
18 #size-cells = <1>; 17 #size-cells = <1>;
19 chosen { }; 18 chosen { };
diff --git a/arch/arc/boot/dts/skeleton_hs.dtsi b/arch/arc/boot/dts/skeleton_hs.dtsi
index 71fd308a9298..2dfe8037dfbb 100644
--- a/arch/arc/boot/dts/skeleton_hs.dtsi
+++ b/arch/arc/boot/dts/skeleton_hs.dtsi
@@ -8,7 +8,6 @@
8 8
9/ { 9/ {
10 compatible = "snps,arc"; 10 compatible = "snps,arc";
11 clock-frequency = <80000000>; /* 80 MHZ */
12 #address-cells = <1>; 11 #address-cells = <1>;
13 #size-cells = <1>; 12 #size-cells = <1>;
14 chosen { }; 13 chosen { };
diff --git a/arch/arc/boot/dts/skeleton_hs_idu.dtsi b/arch/arc/boot/dts/skeleton_hs_idu.dtsi
index d1cb25a66989..4c11079f3565 100644
--- a/arch/arc/boot/dts/skeleton_hs_idu.dtsi
+++ b/arch/arc/boot/dts/skeleton_hs_idu.dtsi
@@ -8,7 +8,6 @@
8 8
9/ { 9/ {
10 compatible = "snps,arc"; 10 compatible = "snps,arc";
11 clock-frequency = <80000000>; /* 80 MHZ */
12 #address-cells = <1>; 11 #address-cells = <1>;
13 #size-cells = <1>; 12 #size-cells = <1>;
14 chosen { }; 13 chosen { };
diff --git a/arch/arc/boot/dts/vdk_axc003.dtsi b/arch/arc/boot/dts/vdk_axc003.dtsi
index ad4ee43bd2ac..0fd6ba985b16 100644
--- a/arch/arc/boot/dts/vdk_axc003.dtsi
+++ b/arch/arc/boot/dts/vdk_axc003.dtsi
@@ -14,7 +14,6 @@
14 14
15/ { 15/ {
16 compatible = "snps,arc"; 16 compatible = "snps,arc";
17 clock-frequency = <50000000>;
18 #address-cells = <1>; 17 #address-cells = <1>;
19 #size-cells = <1>; 18 #size-cells = <1>;
20 19
diff --git a/arch/arc/boot/dts/vdk_axc003_idu.dtsi b/arch/arc/boot/dts/vdk_axc003_idu.dtsi
index a3cb6263c581..82214cd7ba0c 100644
--- a/arch/arc/boot/dts/vdk_axc003_idu.dtsi
+++ b/arch/arc/boot/dts/vdk_axc003_idu.dtsi
@@ -15,7 +15,6 @@
15 15
16/ { 16/ {
17 compatible = "snps,arc"; 17 compatible = "snps,arc";
18 clock-frequency = <50000000>;
19 #address-cells = <1>; 18 #address-cells = <1>;
20 #size-cells = <1>; 19 #size-cells = <1>;
21 20
diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h
index 5f3dcbbc0cc9..dd683995bc9d 100644
--- a/arch/arc/include/asm/atomic.h
+++ b/arch/arc/include/asm/atomic.h
@@ -25,50 +25,17 @@
25 25
26#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i)) 26#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
27 27
28#ifdef CONFIG_ARC_STAR_9000923308
29
30#define SCOND_FAIL_RETRY_VAR_DEF \
31 unsigned int delay = 1, tmp; \
32
33#define SCOND_FAIL_RETRY_ASM \
34 " bz 4f \n" \
35 " ; --- scond fail delay --- \n" \
36 " mov %[tmp], %[delay] \n" /* tmp = delay */ \
37 "2: brne.d %[tmp], 0, 2b \n" /* while (tmp != 0) */ \
38 " sub %[tmp], %[tmp], 1 \n" /* tmp-- */ \
39 " rol %[delay], %[delay] \n" /* delay *= 2 */ \
40 " b 1b \n" /* start over */ \
41 "4: ; --- success --- \n" \
42
43#define SCOND_FAIL_RETRY_VARS \
44 ,[delay] "+&r" (delay),[tmp] "=&r" (tmp) \
45
46#else /* !CONFIG_ARC_STAR_9000923308 */
47
48#define SCOND_FAIL_RETRY_VAR_DEF
49
50#define SCOND_FAIL_RETRY_ASM \
51 " bnz 1b \n" \
52
53#define SCOND_FAIL_RETRY_VARS
54
55#endif
56
57#define ATOMIC_OP(op, c_op, asm_op) \ 28#define ATOMIC_OP(op, c_op, asm_op) \
58static inline void atomic_##op(int i, atomic_t *v) \ 29static inline void atomic_##op(int i, atomic_t *v) \
59{ \ 30{ \
60 unsigned int val; \ 31 unsigned int val; \
61 SCOND_FAIL_RETRY_VAR_DEF \
62 \ 32 \
63 __asm__ __volatile__( \ 33 __asm__ __volatile__( \
64 "1: llock %[val], [%[ctr]] \n" \ 34 "1: llock %[val], [%[ctr]] \n" \
65 " " #asm_op " %[val], %[val], %[i] \n" \ 35 " " #asm_op " %[val], %[val], %[i] \n" \
66 " scond %[val], [%[ctr]] \n" \ 36 " scond %[val], [%[ctr]] \n" \
67 " \n" \ 37 " bnz 1b \n" \
68 SCOND_FAIL_RETRY_ASM \
69 \
70 : [val] "=&r" (val) /* Early clobber to prevent reg reuse */ \ 38 : [val] "=&r" (val) /* Early clobber to prevent reg reuse */ \
71 SCOND_FAIL_RETRY_VARS \
72 : [ctr] "r" (&v->counter), /* Not "m": llock only supports reg direct addr mode */ \ 39 : [ctr] "r" (&v->counter), /* Not "m": llock only supports reg direct addr mode */ \
73 [i] "ir" (i) \ 40 [i] "ir" (i) \
74 : "cc"); \ 41 : "cc"); \
@@ -77,8 +44,7 @@ static inline void atomic_##op(int i, atomic_t *v) \
77#define ATOMIC_OP_RETURN(op, c_op, asm_op) \ 44#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
78static inline int atomic_##op##_return(int i, atomic_t *v) \ 45static inline int atomic_##op##_return(int i, atomic_t *v) \
79{ \ 46{ \
80 unsigned int val; \ 47 unsigned int val; \
81 SCOND_FAIL_RETRY_VAR_DEF \
82 \ 48 \
83 /* \ 49 /* \
84 * Explicit full memory barrier needed before/after as \ 50 * Explicit full memory barrier needed before/after as \
@@ -90,11 +56,8 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
90 "1: llock %[val], [%[ctr]] \n" \ 56 "1: llock %[val], [%[ctr]] \n" \
91 " " #asm_op " %[val], %[val], %[i] \n" \ 57 " " #asm_op " %[val], %[val], %[i] \n" \
92 " scond %[val], [%[ctr]] \n" \ 58 " scond %[val], [%[ctr]] \n" \
93 " \n" \ 59 " bnz 1b \n" \
94 SCOND_FAIL_RETRY_ASM \
95 \
96 : [val] "=&r" (val) \ 60 : [val] "=&r" (val) \
97 SCOND_FAIL_RETRY_VARS \
98 : [ctr] "r" (&v->counter), \ 61 : [ctr] "r" (&v->counter), \
99 [i] "ir" (i) \ 62 [i] "ir" (i) \
100 : "cc"); \ 63 : "cc"); \
diff --git a/arch/arc/include/asm/entry-compact.h b/arch/arc/include/asm/entry-compact.h
index e0e1faf03c50..14c310f2e0b1 100644
--- a/arch/arc/include/asm/entry-compact.h
+++ b/arch/arc/include/asm/entry-compact.h
@@ -76,8 +76,8 @@
76 * We need to be a bit more cautious here. What if a kernel bug in 76 * We need to be a bit more cautious here. What if a kernel bug in
77 * L1 ISR, caused SP to go whaco (some small value which looks like 77 * L1 ISR, caused SP to go whaco (some small value which looks like
78 * USER stk) and then we take L2 ISR. 78 * USER stk) and then we take L2 ISR.
79 * Above brlo alone would treat it as a valid L1-L2 sceanrio 79 * Above brlo alone would treat it as a valid L1-L2 scenario
80 * instead of shouting alound 80 * instead of shouting around
81 * The only feasible way is to make sure this L2 happened in 81 * The only feasible way is to make sure this L2 happened in
82 * L1 prelogue ONLY i.e. ilink2 is less than a pre-set marker in 82 * L1 prelogue ONLY i.e. ilink2 is less than a pre-set marker in
83 * L1 ISR before it switches stack 83 * L1 ISR before it switches stack
diff --git a/arch/arc/include/asm/mmu_context.h b/arch/arc/include/asm/mmu_context.h
index 1fd467ef658f..b0b87f2447f5 100644
--- a/arch/arc/include/asm/mmu_context.h
+++ b/arch/arc/include/asm/mmu_context.h
@@ -83,7 +83,7 @@ static inline void get_new_mmu_context(struct mm_struct *mm)
83 local_flush_tlb_all(); 83 local_flush_tlb_all();
84 84
85 /* 85 /*
86 * Above checke for rollover of 8 bit ASID in 32 bit container. 86 * Above check for rollover of 8 bit ASID in 32 bit container.
87 * If the container itself wrapped around, set it to a non zero 87 * If the container itself wrapped around, set it to a non zero
88 * "generation" to distinguish from no context 88 * "generation" to distinguish from no context
89 */ 89 */
diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h
index 034bbdc0ff61..858f98ef7f1b 100644
--- a/arch/arc/include/asm/pgtable.h
+++ b/arch/arc/include/asm/pgtable.h
@@ -47,7 +47,7 @@
47 * Page Tables are purely for Linux VM's consumption and the bits below are 47 * Page Tables are purely for Linux VM's consumption and the bits below are
48 * suited to that (uniqueness). Hence some are not implemented in the TLB and 48 * suited to that (uniqueness). Hence some are not implemented in the TLB and
49 * some have different value in TLB. 49 * some have different value in TLB.
50 * e.g. MMU v2: K_READ bit is 8 and so is GLOBAL (possible becoz they live in 50 * e.g. MMU v2: K_READ bit is 8 and so is GLOBAL (possible because they live in
51 * seperate PD0 and PD1, which combined forms a translation entry) 51 * seperate PD0 and PD1, which combined forms a translation entry)
52 * while for PTE perspective, they are 8 and 9 respectively 52 * while for PTE perspective, they are 8 and 9 respectively
53 * with MMU v3: Most bits (except SHARED) represent the exact hardware pos 53 * with MMU v3: Most bits (except SHARED) represent the exact hardware pos
diff --git a/arch/arc/include/asm/processor.h b/arch/arc/include/asm/processor.h
index f9048994b22f..16b630fbeb6a 100644
--- a/arch/arc/include/asm/processor.h
+++ b/arch/arc/include/asm/processor.h
@@ -78,7 +78,7 @@ struct task_struct;
78#define KSTK_ESP(tsk) (task_pt_regs(tsk)->sp) 78#define KSTK_ESP(tsk) (task_pt_regs(tsk)->sp)
79 79
80/* 80/*
81 * Where abouts of Task's sp, fp, blink when it was last seen in kernel mode. 81 * Where about of Task's sp, fp, blink when it was last seen in kernel mode.
82 * Look in process.c for details of kernel stack layout 82 * Look in process.c for details of kernel stack layout
83 */ 83 */
84#define TSK_K_ESP(tsk) (tsk->thread.ksp) 84#define TSK_K_ESP(tsk) (tsk->thread.ksp)
diff --git a/arch/arc/include/asm/smp.h b/arch/arc/include/asm/smp.h
index 991380438d6b..89fdd1b0a76e 100644
--- a/arch/arc/include/asm/smp.h
+++ b/arch/arc/include/asm/smp.h
@@ -86,7 +86,7 @@ static inline const char *arc_platform_smp_cpuinfo(void)
86 * (1) These insn were introduced only in 4.10 release. So for older released 86 * (1) These insn were introduced only in 4.10 release. So for older released
87 * support needed. 87 * support needed.
88 * 88 *
89 * (2) In a SMP setup, the LLOCK/SCOND atomiticity across CPUs needs to be 89 * (2) In a SMP setup, the LLOCK/SCOND atomicity across CPUs needs to be
90 * gaurantted by the platform (not something which core handles). 90 * gaurantted by the platform (not something which core handles).
91 * Assuming a platform won't, SMP Linux needs to use spinlocks + local IRQ 91 * Assuming a platform won't, SMP Linux needs to use spinlocks + local IRQ
92 * disabling for atomicity. 92 * disabling for atomicity.
diff --git a/arch/arc/include/asm/spinlock.h b/arch/arc/include/asm/spinlock.h
index 800e7c430ca5..cded4a9b5438 100644
--- a/arch/arc/include/asm/spinlock.h
+++ b/arch/arc/include/asm/spinlock.h
@@ -20,11 +20,6 @@
20 20
21#ifdef CONFIG_ARC_HAS_LLSC 21#ifdef CONFIG_ARC_HAS_LLSC
22 22
23/*
24 * A normal LLOCK/SCOND based system, w/o need for livelock workaround
25 */
26#ifndef CONFIG_ARC_STAR_9000923308
27
28static inline void arch_spin_lock(arch_spinlock_t *lock) 23static inline void arch_spin_lock(arch_spinlock_t *lock)
29{ 24{
30 unsigned int val; 25 unsigned int val;
@@ -238,293 +233,6 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)
238 smp_mb(); 233 smp_mb();
239} 234}
240 235
241#else /* CONFIG_ARC_STAR_9000923308 */
242
243/*
244 * HS38x4 could get into a LLOCK/SCOND livelock in case of multiple overlapping
245 * coherency transactions in the SCU. The exclusive line state keeps rotating
246 * among contenting cores leading to a never ending cycle. So break the cycle
247 * by deferring the retry of failed exclusive access (SCOND). The actual delay
248 * needed is function of number of contending cores as well as the unrelated
249 * coherency traffic from other cores. To keep the code simple, start off with
250 * small delay of 1 which would suffice most cases and in case of contention
251 * double the delay. Eventually the delay is sufficient such that the coherency
252 * pipeline is drained, thus a subsequent exclusive access would succeed.
253 */
254
255#define SCOND_FAIL_RETRY_VAR_DEF \
256 unsigned int delay, tmp; \
257
258#define SCOND_FAIL_RETRY_ASM \
259 " ; --- scond fail delay --- \n" \
260 " mov %[tmp], %[delay] \n" /* tmp = delay */ \
261 "2: brne.d %[tmp], 0, 2b \n" /* while (tmp != 0) */ \
262 " sub %[tmp], %[tmp], 1 \n" /* tmp-- */ \
263 " rol %[delay], %[delay] \n" /* delay *= 2 */ \
264 " b 1b \n" /* start over */ \
265 " \n" \
266 "4: ; --- done --- \n" \
267
268#define SCOND_FAIL_RETRY_VARS \
269 ,[delay] "=&r" (delay), [tmp] "=&r" (tmp) \
270
271static inline void arch_spin_lock(arch_spinlock_t *lock)
272{
273 unsigned int val;
274 SCOND_FAIL_RETRY_VAR_DEF;
275
276 smp_mb();
277
278 __asm__ __volatile__(
279 "0: mov %[delay], 1 \n"
280 "1: llock %[val], [%[slock]] \n"
281 " breq %[val], %[LOCKED], 0b \n" /* spin while LOCKED */
282 " scond %[LOCKED], [%[slock]] \n" /* acquire */
283 " bz 4f \n" /* done */
284 " \n"
285 SCOND_FAIL_RETRY_ASM
286
287 : [val] "=&r" (val)
288 SCOND_FAIL_RETRY_VARS
289 : [slock] "r" (&(lock->slock)),
290 [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__)
291 : "memory", "cc");
292
293 smp_mb();
294}
295
296/* 1 - lock taken successfully */
297static inline int arch_spin_trylock(arch_spinlock_t *lock)
298{
299 unsigned int val, got_it = 0;
300 SCOND_FAIL_RETRY_VAR_DEF;
301
302 smp_mb();
303
304 __asm__ __volatile__(
305 "0: mov %[delay], 1 \n"
306 "1: llock %[val], [%[slock]] \n"
307 " breq %[val], %[LOCKED], 4f \n" /* already LOCKED, just bail */
308 " scond %[LOCKED], [%[slock]] \n" /* acquire */
309 " bz.d 4f \n"
310 " mov.z %[got_it], 1 \n" /* got it */
311 " \n"
312 SCOND_FAIL_RETRY_ASM
313
314 : [val] "=&r" (val),
315 [got_it] "+&r" (got_it)
316 SCOND_FAIL_RETRY_VARS
317 : [slock] "r" (&(lock->slock)),
318 [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__)
319 : "memory", "cc");
320
321 smp_mb();
322
323 return got_it;
324}
325
326static inline void arch_spin_unlock(arch_spinlock_t *lock)
327{
328 smp_mb();
329
330 lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__;
331
332 smp_mb();
333}
334
335/*
336 * Read-write spinlocks, allowing multiple readers but only one writer.
337 * Unfair locking as Writers could be starved indefinitely by Reader(s)
338 */
339
340static inline void arch_read_lock(arch_rwlock_t *rw)
341{
342 unsigned int val;
343 SCOND_FAIL_RETRY_VAR_DEF;
344
345 smp_mb();
346
347 /*
348 * zero means writer holds the lock exclusively, deny Reader.
349 * Otherwise grant lock to first/subseq reader
350 *
351 * if (rw->counter > 0) {
352 * rw->counter--;
353 * ret = 1;
354 * }
355 */
356
357 __asm__ __volatile__(
358 "0: mov %[delay], 1 \n"
359 "1: llock %[val], [%[rwlock]] \n"
360 " brls %[val], %[WR_LOCKED], 0b\n" /* <= 0: spin while write locked */
361 " sub %[val], %[val], 1 \n" /* reader lock */
362 " scond %[val], [%[rwlock]] \n"
363 " bz 4f \n" /* done */
364 " \n"
365 SCOND_FAIL_RETRY_ASM
366
367 : [val] "=&r" (val)
368 SCOND_FAIL_RETRY_VARS
369 : [rwlock] "r" (&(rw->counter)),
370 [WR_LOCKED] "ir" (0)
371 : "memory", "cc");
372
373 smp_mb();
374}
375
376/* 1 - lock taken successfully */
377static inline int arch_read_trylock(arch_rwlock_t *rw)
378{
379 unsigned int val, got_it = 0;
380 SCOND_FAIL_RETRY_VAR_DEF;
381
382 smp_mb();
383
384 __asm__ __volatile__(
385 "0: mov %[delay], 1 \n"
386 "1: llock %[val], [%[rwlock]] \n"
387 " brls %[val], %[WR_LOCKED], 4f\n" /* <= 0: already write locked, bail */
388 " sub %[val], %[val], 1 \n" /* counter-- */
389 " scond %[val], [%[rwlock]] \n"
390 " bz.d 4f \n"
391 " mov.z %[got_it], 1 \n" /* got it */
392 " \n"
393 SCOND_FAIL_RETRY_ASM
394
395 : [val] "=&r" (val),
396 [got_it] "+&r" (got_it)
397 SCOND_FAIL_RETRY_VARS
398 : [rwlock] "r" (&(rw->counter)),
399 [WR_LOCKED] "ir" (0)
400 : "memory", "cc");
401
402 smp_mb();
403
404 return got_it;
405}
406
407static inline void arch_write_lock(arch_rwlock_t *rw)
408{
409 unsigned int val;
410 SCOND_FAIL_RETRY_VAR_DEF;
411
412 smp_mb();
413
414 /*
415 * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
416 * deny writer. Otherwise if unlocked grant to writer
417 * Hence the claim that Linux rwlocks are unfair to writers.
418 * (can be starved for an indefinite time by readers).
419 *
420 * if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
421 * rw->counter = 0;
422 * ret = 1;
423 * }
424 */
425
426 __asm__ __volatile__(
427 "0: mov %[delay], 1 \n"
428 "1: llock %[val], [%[rwlock]] \n"
429 " brne %[val], %[UNLOCKED], 0b \n" /* while !UNLOCKED spin */
430 " mov %[val], %[WR_LOCKED] \n"
431 " scond %[val], [%[rwlock]] \n"
432 " bz 4f \n"
433 " \n"
434 SCOND_FAIL_RETRY_ASM
435
436 : [val] "=&r" (val)
437 SCOND_FAIL_RETRY_VARS
438 : [rwlock] "r" (&(rw->counter)),
439 [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__),
440 [WR_LOCKED] "ir" (0)
441 : "memory", "cc");
442
443 smp_mb();
444}
445
446/* 1 - lock taken successfully */
447static inline int arch_write_trylock(arch_rwlock_t *rw)
448{
449 unsigned int val, got_it = 0;
450 SCOND_FAIL_RETRY_VAR_DEF;
451
452 smp_mb();
453
454 __asm__ __volatile__(
455 "0: mov %[delay], 1 \n"
456 "1: llock %[val], [%[rwlock]] \n"
457 " brne %[val], %[UNLOCKED], 4f \n" /* !UNLOCKED, bail */
458 " mov %[val], %[WR_LOCKED] \n"
459 " scond %[val], [%[rwlock]] \n"
460 " bz.d 4f \n"
461 " mov.z %[got_it], 1 \n" /* got it */
462 " \n"
463 SCOND_FAIL_RETRY_ASM
464
465 : [val] "=&r" (val),
466 [got_it] "+&r" (got_it)
467 SCOND_FAIL_RETRY_VARS
468 : [rwlock] "r" (&(rw->counter)),
469 [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__),
470 [WR_LOCKED] "ir" (0)
471 : "memory", "cc");
472
473 smp_mb();
474
475 return got_it;
476}
477
478static inline void arch_read_unlock(arch_rwlock_t *rw)
479{
480 unsigned int val;
481
482 smp_mb();
483
484 /*
485 * rw->counter++;
486 */
487 __asm__ __volatile__(
488 "1: llock %[val], [%[rwlock]] \n"
489 " add %[val], %[val], 1 \n"
490 " scond %[val], [%[rwlock]] \n"
491 " bnz 1b \n"
492 " \n"
493 : [val] "=&r" (val)
494 : [rwlock] "r" (&(rw->counter))
495 : "memory", "cc");
496
497 smp_mb();
498}
499
500static inline void arch_write_unlock(arch_rwlock_t *rw)
501{
502 unsigned int val;
503
504 smp_mb();
505
506 /*
507 * rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
508 */
509 __asm__ __volatile__(
510 "1: llock %[val], [%[rwlock]] \n"
511 " scond %[UNLOCKED], [%[rwlock]]\n"
512 " bnz 1b \n"
513 " \n"
514 : [val] "=&r" (val)
515 : [rwlock] "r" (&(rw->counter)),
516 [UNLOCKED] "r" (__ARCH_RW_LOCK_UNLOCKED__)
517 : "memory", "cc");
518
519 smp_mb();
520}
521
522#undef SCOND_FAIL_RETRY_VAR_DEF
523#undef SCOND_FAIL_RETRY_ASM
524#undef SCOND_FAIL_RETRY_VARS
525
526#endif /* CONFIG_ARC_STAR_9000923308 */
527
528#else /* !CONFIG_ARC_HAS_LLSC */ 236#else /* !CONFIG_ARC_HAS_LLSC */
529 237
530static inline void arch_spin_lock(arch_spinlock_t *lock) 238static inline void arch_spin_lock(arch_spinlock_t *lock)
diff --git a/arch/arc/include/asm/thread_info.h b/arch/arc/include/asm/thread_info.h
index 3af67455659a..2d79e527fa50 100644
--- a/arch/arc/include/asm/thread_info.h
+++ b/arch/arc/include/asm/thread_info.h
@@ -103,7 +103,7 @@ static inline __attribute_const__ struct thread_info *current_thread_info(void)
103 103
104/* 104/*
105 * _TIF_ALLWORK_MASK includes SYSCALL_TRACE, but we don't need it. 105 * _TIF_ALLWORK_MASK includes SYSCALL_TRACE, but we don't need it.
106 * SYSCALL_TRACE is anways seperately/unconditionally tested right after a 106 * SYSCALL_TRACE is anyway seperately/unconditionally tested right after a
107 * syscall, so all that reamins to be tested is _TIF_WORK_MASK 107 * syscall, so all that reamins to be tested is _TIF_WORK_MASK
108 */ 108 */
109 109
diff --git a/arch/arc/include/asm/uaccess.h b/arch/arc/include/asm/uaccess.h
index d1da6032b715..a78d5670884f 100644
--- a/arch/arc/include/asm/uaccess.h
+++ b/arch/arc/include/asm/uaccess.h
@@ -32,7 +32,7 @@
32#define __kernel_ok (segment_eq(get_fs(), KERNEL_DS)) 32#define __kernel_ok (segment_eq(get_fs(), KERNEL_DS))
33 33
34/* 34/*
35 * Algorthmically, for __user_ok() we want do: 35 * Algorithmically, for __user_ok() we want do:
36 * (start < TASK_SIZE) && (start+len < TASK_SIZE) 36 * (start < TASK_SIZE) && (start+len < TASK_SIZE)
37 * where TASK_SIZE could either be retrieved from thread_info->addr_limit or 37 * where TASK_SIZE could either be retrieved from thread_info->addr_limit or
38 * emitted directly in code. 38 * emitted directly in code.
diff --git a/arch/arc/include/uapi/asm/swab.h b/arch/arc/include/uapi/asm/swab.h
index 095599a73195..71f3918b0fc3 100644
--- a/arch/arc/include/uapi/asm/swab.h
+++ b/arch/arc/include/uapi/asm/swab.h
@@ -74,7 +74,7 @@
74 __tmp ^ __in; \ 74 __tmp ^ __in; \
75}) 75})
76 76
77#elif (ARC_BSWAP_TYPE == 2) /* Custom single cycle bwap instruction */ 77#elif (ARC_BSWAP_TYPE == 2) /* Custom single cycle bswap instruction */
78 78
79#define __arch_swab32(x) \ 79#define __arch_swab32(x) \
80({ \ 80({ \
diff --git a/arch/arc/kernel/entry-compact.S b/arch/arc/kernel/entry-compact.S
index 0cb0abaa0479..98812c1248df 100644
--- a/arch/arc/kernel/entry-compact.S
+++ b/arch/arc/kernel/entry-compact.S
@@ -91,27 +91,13 @@ VECTOR mem_service ; 0x8, Mem exception (0x1)
91VECTOR instr_service ; 0x10, Instrn Error (0x2) 91VECTOR instr_service ; 0x10, Instrn Error (0x2)
92 92
93; ******************** Device ISRs ********************** 93; ******************** Device ISRs **********************
94#ifdef CONFIG_ARC_IRQ3_LV2 94#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS
95VECTOR handle_interrupt_level2
96#else
97VECTOR handle_interrupt_level1
98#endif
99
100VECTOR handle_interrupt_level1
101
102#ifdef CONFIG_ARC_IRQ5_LV2
103VECTOR handle_interrupt_level2
104#else
105VECTOR handle_interrupt_level1
106#endif
107
108#ifdef CONFIG_ARC_IRQ6_LV2
109VECTOR handle_interrupt_level2 95VECTOR handle_interrupt_level2
110#else 96#else
111VECTOR handle_interrupt_level1 97VECTOR handle_interrupt_level1
112#endif 98#endif
113 99
114.rept 25 100.rept 28
115VECTOR handle_interrupt_level1 ; Other devices 101VECTOR handle_interrupt_level1 ; Other devices
116.endr 102.endr
117 103
diff --git a/arch/arc/kernel/intc-compact.c b/arch/arc/kernel/intc-compact.c
index c5cceca36118..ce9deb953ca9 100644
--- a/arch/arc/kernel/intc-compact.c
+++ b/arch/arc/kernel/intc-compact.c
@@ -28,10 +28,8 @@ void arc_init_IRQ(void)
28{ 28{
29 int level_mask = 0; 29 int level_mask = 0;
30 30
31 /* setup any high priority Interrupts (Level2 in ARCompact jargon) */ 31 /* Is timer high priority Interrupt (Level2 in ARCompact jargon) */
32 level_mask |= IS_ENABLED(CONFIG_ARC_IRQ3_LV2) << 3; 32 level_mask |= IS_ENABLED(CONFIG_ARC_COMPACT_IRQ_LEVELS) << TIMER0_IRQ;
33 level_mask |= IS_ENABLED(CONFIG_ARC_IRQ5_LV2) << 5;
34 level_mask |= IS_ENABLED(CONFIG_ARC_IRQ6_LV2) << 6;
35 33
36 /* 34 /*
37 * Write to register, even if no LV2 IRQs configured to reset it 35 * Write to register, even if no LV2 IRQs configured to reset it
diff --git a/arch/arc/kernel/perf_event.c b/arch/arc/kernel/perf_event.c
index 6fd48021324b..08f03d9b5b3e 100644
--- a/arch/arc/kernel/perf_event.c
+++ b/arch/arc/kernel/perf_event.c
@@ -108,7 +108,7 @@ static void arc_perf_event_update(struct perf_event *event,
108 int64_t delta = new_raw_count - prev_raw_count; 108 int64_t delta = new_raw_count - prev_raw_count;
109 109
110 /* 110 /*
111 * We don't afaraid of hwc->prev_count changing beneath our feet 111 * We aren't afraid of hwc->prev_count changing beneath our feet
112 * because there's no way for us to re-enter this function anytime. 112 * because there's no way for us to re-enter this function anytime.
113 */ 113 */
114 local64_set(&hwc->prev_count, new_raw_count); 114 local64_set(&hwc->prev_count, new_raw_count);
diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
index f63b8bfefb0c..2ee7a4d758a8 100644
--- a/arch/arc/kernel/setup.c
+++ b/arch/arc/kernel/setup.c
@@ -392,7 +392,7 @@ void __init setup_arch(char **cmdline_p)
392 /* 392 /*
393 * If we are here, it is established that @uboot_arg didn't 393 * If we are here, it is established that @uboot_arg didn't
394 * point to DT blob. Instead if u-boot says it is cmdline, 394 * point to DT blob. Instead if u-boot says it is cmdline,
395 * Appent to embedded DT cmdline. 395 * append to embedded DT cmdline.
396 * setup_machine_fdt() would have populated @boot_command_line 396 * setup_machine_fdt() would have populated @boot_command_line
397 */ 397 */
398 if (uboot_tag == 1) { 398 if (uboot_tag == 1) {
diff --git a/arch/arc/kernel/signal.c b/arch/arc/kernel/signal.c
index 004b7f0bc76c..6cb3736b6b83 100644
--- a/arch/arc/kernel/signal.c
+++ b/arch/arc/kernel/signal.c
@@ -34,7 +34,7 @@
34 * -ViXS were still seeing crashes when using insmod to load drivers. 34 * -ViXS were still seeing crashes when using insmod to load drivers.
35 * It turned out that the code to change Execute permssions for TLB entries 35 * It turned out that the code to change Execute permssions for TLB entries
36 * of user was not guarded for interrupts (mod_tlb_permission) 36 * of user was not guarded for interrupts (mod_tlb_permission)
37 * This was cauing TLB entries to be overwritten on unrelated indexes 37 * This was causing TLB entries to be overwritten on unrelated indexes
38 * 38 *
39 * Vineetg: July 15th 2008: Bug #94183 39 * Vineetg: July 15th 2008: Bug #94183
40 * -Exception happens in Delay slot of a JMP, and before user space resumes, 40 * -Exception happens in Delay slot of a JMP, and before user space resumes,
diff --git a/arch/arc/kernel/troubleshoot.c b/arch/arc/kernel/troubleshoot.c
index a6f91e88ce36..934150e7ac48 100644
--- a/arch/arc/kernel/troubleshoot.c
+++ b/arch/arc/kernel/troubleshoot.c
@@ -276,7 +276,7 @@ static int tlb_stats_open(struct inode *inode, struct file *file)
276 return 0; 276 return 0;
277} 277}
278 278
279/* called on user read(): display the couters */ 279/* called on user read(): display the counters */
280static ssize_t tlb_stats_output(struct file *file, /* file descriptor */ 280static ssize_t tlb_stats_output(struct file *file, /* file descriptor */
281 char __user *user_buf, /* user buffer */ 281 char __user *user_buf, /* user buffer */
282 size_t len, /* length of buffer */ 282 size_t len, /* length of buffer */
diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c
index 9e5eddbb856f..5a294b2c3cb3 100644
--- a/arch/arc/mm/cache.c
+++ b/arch/arc/mm/cache.c
@@ -215,7 +215,7 @@ slc_chk:
215 * ------------------ 215 * ------------------
216 * This ver of MMU supports variable page sizes (1k-16k): although Linux will 216 * This ver of MMU supports variable page sizes (1k-16k): although Linux will
217 * only support 8k (default), 16k and 4k. 217 * only support 8k (default), 16k and 4k.
218 * However from hardware perspective, smaller page sizes aggrevate aliasing 218 * However from hardware perspective, smaller page sizes aggravate aliasing
219 * meaning more vaddr bits needed to disambiguate the cache-line-op ; 219 * meaning more vaddr bits needed to disambiguate the cache-line-op ;
220 * the existing scheme of piggybacking won't work for certain configurations. 220 * the existing scheme of piggybacking won't work for certain configurations.
221 * Two new registers IC_PTAG and DC_PTAG inttoduced. 221 * Two new registers IC_PTAG and DC_PTAG inttoduced.
@@ -302,7 +302,7 @@ void __cache_line_loop_v3(phys_addr_t paddr, unsigned long vaddr,
302 302
303 /* 303 /*
304 * This is technically for MMU v4, using the MMU v3 programming model 304 * This is technically for MMU v4, using the MMU v3 programming model
305 * Special work for HS38 aliasing I-cache configuratino with PAE40 305 * Special work for HS38 aliasing I-cache configuration with PAE40
306 * - upper 8 bits of paddr need to be written into PTAG_HI 306 * - upper 8 bits of paddr need to be written into PTAG_HI
307 * - (and needs to be written before the lower 32 bits) 307 * - (and needs to be written before the lower 32 bits)
308 * Note that PTAG_HI is hoisted outside the line loop 308 * Note that PTAG_HI is hoisted outside the line loop
@@ -936,7 +936,7 @@ void arc_cache_init(void)
936 ic->ver, CONFIG_ARC_MMU_VER); 936 ic->ver, CONFIG_ARC_MMU_VER);
937 937
938 /* 938 /*
939 * In MMU v4 (HS38x) the alising icache config uses IVIL/PTAG 939 * In MMU v4 (HS38x) the aliasing icache config uses IVIL/PTAG
940 * pair to provide vaddr/paddr respectively, just as in MMU v3 940 * pair to provide vaddr/paddr respectively, just as in MMU v3
941 */ 941 */
942 if (is_isa_arcv2() && ic->alias) 942 if (is_isa_arcv2() && ic->alias)
diff --git a/arch/arc/mm/dma.c b/arch/arc/mm/dma.c
index 8c8e36fa5659..73d7e4c75b7d 100644
--- a/arch/arc/mm/dma.c
+++ b/arch/arc/mm/dma.c
@@ -10,7 +10,7 @@
10 * DMA Coherent API Notes 10 * DMA Coherent API Notes
11 * 11 *
12 * I/O is inherently non-coherent on ARC. So a coherent DMA buffer is 12 * I/O is inherently non-coherent on ARC. So a coherent DMA buffer is
13 * implemented by accessintg it using a kernel virtual address, with 13 * implemented by accessing it using a kernel virtual address, with
14 * Cache bit off in the TLB entry. 14 * Cache bit off in the TLB entry.
15 * 15 *
16 * The default DMA address == Phy address which is 0x8000_0000 based. 16 * The default DMA address == Phy address which is 0x8000_0000 based.