aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-08-06 09:00:05 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-08-06 09:00:05 -0400
commit1630e843e104528ddf0208dfc692c70c9bd05a89 (patch)
treeb75d583b719a0b1398162aceccf2f443e7d36858 /arch/sh/include
parent194d6ad32e1eef433c61040385dcfd98e6fe2ef9 (diff)
parente61c10e468a42512f5fad74c00b62af5cc19f65f (diff)
Merge tag 'sh-for-4.8' of git://git.libc.org/linux-sh
Pull arch/sh updates from Rich Felker: "These changes improve device tree support (including builtin DTB), add support for the J-Core J2 processor, an open source synthesizable reimplementation of the SH-2 ISA, resolve a longstanding sigcontext ABI mismatch issue, and fix various bugs including nommu-specific issues and minor regressions introduced in 4.6. The J-Core arch support is included here but to be usable it needs drivers that are waiting on approval/inclusion from their subsystem maintainers" * tag 'sh-for-4.8' of git://git.libc.org/linux-sh: (23 commits) sh: add device tree source for J2 FPGA on Mimas v2 board sh: add defconfig for J-Core J2 sh: use common clock framework with device tree boards sh: system call wire up sh: Delete unnecessary checks before the function call "mempool_destroy" sh: do not perform IPI-based cache flush except on boards that need it sh: add SMP support for J2 sh: SMP support for SH2 entry.S sh: add working futex atomic ops on userspace addresses for smp sh: add J2 atomics using the cas.l instruction sh: add AT_HWCAP flag for J-Core cas.l instruction sh: add support for J-Core J2 processor sh: fix build regression with CONFIG_OF && !CONFIG_OF_FLATTREE sh: allow clocksource drivers to register sched_clock backends sh: make heartbeat driver explicitly non-modular sh: make board-secureedge5410 explicitly non-modular sh: make mm/asids-debugfs explicitly non-modular sh: make time.c explicitly non-modular sh: fix futex/robust_list on nommu models sh: disable aliased page logic on NOMMU models ...
Diffstat (limited to 'arch/sh/include')
-rw-r--r--arch/sh/include/asm/atomic.h8
-rw-r--r--arch/sh/include/asm/barrier.h5
-rw-r--r--arch/sh/include/asm/bitops-cas.h93
-rw-r--r--arch/sh/include/asm/bitops.h2
-rw-r--r--arch/sh/include/asm/cmpxchg-cas.h24
-rw-r--r--arch/sh/include/asm/cmpxchg-xchg.h2
-rw-r--r--arch/sh/include/asm/cmpxchg.h2
-rw-r--r--arch/sh/include/asm/futex-cas.h34
-rw-r--r--arch/sh/include/asm/futex-irq.h86
-rw-r--r--arch/sh/include/asm/futex-llsc.h41
-rw-r--r--arch/sh/include/asm/futex.h97
-rw-r--r--arch/sh/include/asm/processor.h2
-rw-r--r--arch/sh/include/asm/spinlock-cas.h117
-rw-r--r--arch/sh/include/asm/spinlock-llsc.h224
-rw-r--r--arch/sh/include/asm/spinlock.h222
-rw-r--r--arch/sh/include/uapi/asm/cpu-features.h1
-rw-r--r--arch/sh/include/uapi/asm/sigcontext.h3
-rw-r--r--arch/sh/include/uapi/asm/unistd_32.h16
-rw-r--r--arch/sh/include/uapi/asm/unistd_64.h16
19 files changed, 648 insertions, 347 deletions
diff --git a/arch/sh/include/asm/atomic.h b/arch/sh/include/asm/atomic.h
index c399e1c55685..8a7bd80c8b33 100644
--- a/arch/sh/include/asm/atomic.h
+++ b/arch/sh/include/asm/atomic.h
@@ -1,6 +1,12 @@
1#ifndef __ASM_SH_ATOMIC_H 1#ifndef __ASM_SH_ATOMIC_H
2#define __ASM_SH_ATOMIC_H 2#define __ASM_SH_ATOMIC_H
3 3
4#if defined(CONFIG_CPU_J2)
5
6#include <asm-generic/atomic.h>
7
8#else
9
4/* 10/*
5 * Atomic operations that C can't guarantee us. Useful for 11 * Atomic operations that C can't guarantee us. Useful for
6 * resource counting etc.. 12 * resource counting etc..
@@ -63,4 +69,6 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
63 return c; 69 return c;
64} 70}
65 71
72#endif /* CONFIG_CPU_J2 */
73
66#endif /* __ASM_SH_ATOMIC_H */ 74#endif /* __ASM_SH_ATOMIC_H */
diff --git a/arch/sh/include/asm/barrier.h b/arch/sh/include/asm/barrier.h
index 8a84e05adb2e..3c30b6e166b6 100644
--- a/arch/sh/include/asm/barrier.h
+++ b/arch/sh/include/asm/barrier.h
@@ -29,6 +29,11 @@
29#define wmb() mb() 29#define wmb() mb()
30#define ctrl_barrier() __icbi(PAGE_OFFSET) 30#define ctrl_barrier() __icbi(PAGE_OFFSET)
31#else 31#else
32#if defined(CONFIG_CPU_J2) && defined(CONFIG_SMP)
33#define __smp_mb() do { int tmp = 0; __asm__ __volatile__ ("cas.l %0,%0,@%1" : "+r"(tmp) : "z"(&tmp) : "memory", "t"); } while(0)
34#define __smp_rmb() __smp_mb()
35#define __smp_wmb() __smp_mb()
36#endif
32#define ctrl_barrier() __asm__ __volatile__ ("nop;nop;nop;nop;nop;nop;nop;nop") 37#define ctrl_barrier() __asm__ __volatile__ ("nop;nop;nop;nop;nop;nop;nop;nop")
33#endif 38#endif
34 39
diff --git a/arch/sh/include/asm/bitops-cas.h b/arch/sh/include/asm/bitops-cas.h
new file mode 100644
index 000000000000..88f793c04d3c
--- /dev/null
+++ b/arch/sh/include/asm/bitops-cas.h
@@ -0,0 +1,93 @@
1#ifndef __ASM_SH_BITOPS_CAS_H
2#define __ASM_SH_BITOPS_CAS_H
3
4static inline unsigned __bo_cas(volatile unsigned *p, unsigned old, unsigned new)
5{
6 __asm__ __volatile__("cas.l %1,%0,@r0"
7 : "+r"(new)
8 : "r"(old), "z"(p)
9 : "t", "memory" );
10 return new;
11}
12
13static inline void set_bit(int nr, volatile void *addr)
14{
15 unsigned mask, old;
16 volatile unsigned *a = addr;
17
18 a += nr >> 5;
19 mask = 1U << (nr & 0x1f);
20
21 do old = *a;
22 while (__bo_cas(a, old, old|mask) != old);
23}
24
25static inline void clear_bit(int nr, volatile void *addr)
26{
27 unsigned mask, old;
28 volatile unsigned *a = addr;
29
30 a += nr >> 5;
31 mask = 1U << (nr & 0x1f);
32
33 do old = *a;
34 while (__bo_cas(a, old, old&~mask) != old);
35}
36
37static inline void change_bit(int nr, volatile void *addr)
38{
39 unsigned mask, old;
40 volatile unsigned *a = addr;
41
42 a += nr >> 5;
43 mask = 1U << (nr & 0x1f);
44
45 do old = *a;
46 while (__bo_cas(a, old, old^mask) != old);
47}
48
49static inline int test_and_set_bit(int nr, volatile void *addr)
50{
51 unsigned mask, old;
52 volatile unsigned *a = addr;
53
54 a += nr >> 5;
55 mask = 1U << (nr & 0x1f);
56
57 do old = *a;
58 while (__bo_cas(a, old, old|mask) != old);
59
60 return !!(old & mask);
61}
62
63static inline int test_and_clear_bit(int nr, volatile void *addr)
64{
65 unsigned mask, old;
66 volatile unsigned *a = addr;
67
68 a += nr >> 5;
69 mask = 1U << (nr & 0x1f);
70
71 do old = *a;
72 while (__bo_cas(a, old, old&~mask) != old);
73
74 return !!(old & mask);
75}
76
77static inline int test_and_change_bit(int nr, volatile void *addr)
78{
79 unsigned mask, old;
80 volatile unsigned *a = addr;
81
82 a += nr >> 5;
83 mask = 1U << (nr & 0x1f);
84
85 do old = *a;
86 while (__bo_cas(a, old, old^mask) != old);
87
88 return !!(old & mask);
89}
90
91#include <asm-generic/bitops/non-atomic.h>
92
93#endif /* __ASM_SH_BITOPS_CAS_H */
diff --git a/arch/sh/include/asm/bitops.h b/arch/sh/include/asm/bitops.h
index fc8e652cf173..a8699d60a8c4 100644
--- a/arch/sh/include/asm/bitops.h
+++ b/arch/sh/include/asm/bitops.h
@@ -18,6 +18,8 @@
18#include <asm/bitops-op32.h> 18#include <asm/bitops-op32.h>
19#elif defined(CONFIG_CPU_SH4A) 19#elif defined(CONFIG_CPU_SH4A)
20#include <asm/bitops-llsc.h> 20#include <asm/bitops-llsc.h>
21#elif defined(CONFIG_CPU_J2) && defined(CONFIG_SMP)
22#include <asm/bitops-cas.h>
21#else 23#else
22#include <asm-generic/bitops/atomic.h> 24#include <asm-generic/bitops/atomic.h>
23#include <asm-generic/bitops/non-atomic.h> 25#include <asm-generic/bitops/non-atomic.h>
diff --git a/arch/sh/include/asm/cmpxchg-cas.h b/arch/sh/include/asm/cmpxchg-cas.h
new file mode 100644
index 000000000000..d0d86649e8c1
--- /dev/null
+++ b/arch/sh/include/asm/cmpxchg-cas.h
@@ -0,0 +1,24 @@
1#ifndef __ASM_SH_CMPXCHG_CAS_H
2#define __ASM_SH_CMPXCHG_CAS_H
3
4static inline unsigned long
5__cmpxchg_u32(volatile u32 *m, unsigned long old, unsigned long new)
6{
7 __asm__ __volatile__("cas.l %1,%0,@r0"
8 : "+r"(new)
9 : "r"(old), "z"(m)
10 : "t", "memory" );
11 return new;
12}
13
14static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val)
15{
16 unsigned long old;
17 do old = *m;
18 while (__cmpxchg_u32(m, old, val) != old);
19 return old;
20}
21
22#include <asm/cmpxchg-xchg.h>
23
24#endif /* __ASM_SH_CMPXCHG_CAS_H */
diff --git a/arch/sh/include/asm/cmpxchg-xchg.h b/arch/sh/include/asm/cmpxchg-xchg.h
index 7219719c23a3..1e881f5db659 100644
--- a/arch/sh/include/asm/cmpxchg-xchg.h
+++ b/arch/sh/include/asm/cmpxchg-xchg.h
@@ -21,7 +21,7 @@ static inline u32 __xchg_cmpxchg(volatile void *ptr, u32 x, int size)
21 int off = (unsigned long)ptr % sizeof(u32); 21 int off = (unsigned long)ptr % sizeof(u32);
22 volatile u32 *p = ptr - off; 22 volatile u32 *p = ptr - off;
23#ifdef __BIG_ENDIAN 23#ifdef __BIG_ENDIAN
24 int bitoff = (sizeof(u32) - 1 - off) * BITS_PER_BYTE; 24 int bitoff = (sizeof(u32) - size - off) * BITS_PER_BYTE;
25#else 25#else
26 int bitoff = off * BITS_PER_BYTE; 26 int bitoff = off * BITS_PER_BYTE;
27#endif 27#endif
diff --git a/arch/sh/include/asm/cmpxchg.h b/arch/sh/include/asm/cmpxchg.h
index 5225916c1057..3dfe0467a773 100644
--- a/arch/sh/include/asm/cmpxchg.h
+++ b/arch/sh/include/asm/cmpxchg.h
@@ -13,6 +13,8 @@
13#include <asm/cmpxchg-grb.h> 13#include <asm/cmpxchg-grb.h>
14#elif defined(CONFIG_CPU_SH4A) 14#elif defined(CONFIG_CPU_SH4A)
15#include <asm/cmpxchg-llsc.h> 15#include <asm/cmpxchg-llsc.h>
16#elif defined(CONFIG_CPU_J2) && defined(CONFIG_SMP)
17#include <asm/cmpxchg-cas.h>
16#else 18#else
17#include <asm/cmpxchg-irq.h> 19#include <asm/cmpxchg-irq.h>
18#endif 20#endif
diff --git a/arch/sh/include/asm/futex-cas.h b/arch/sh/include/asm/futex-cas.h
new file mode 100644
index 000000000000..267cb7a5f101
--- /dev/null
+++ b/arch/sh/include/asm/futex-cas.h
@@ -0,0 +1,34 @@
1#ifndef __ASM_SH_FUTEX_CAS_H
2#define __ASM_SH_FUTEX_CAS_H
3
4static inline int atomic_futex_op_cmpxchg_inatomic(u32 *uval,
5 u32 __user *uaddr,
6 u32 oldval, u32 newval)
7{
8 int err = 0;
9 __asm__ __volatile__(
10 "1:\n\t"
11 "cas.l %2, %1, @r0\n"
12 "2:\n\t"
13#ifdef CONFIG_MMU
14 ".section .fixup,\"ax\"\n"
15 "3:\n\t"
16 "mov.l 4f, %0\n\t"
17 "jmp @%0\n\t"
18 " mov %3, %0\n\t"
19 ".balign 4\n"
20 "4: .long 2b\n\t"
21 ".previous\n"
22 ".section __ex_table,\"a\"\n\t"
23 ".long 1b, 3b\n\t"
24 ".previous"
25#endif
26 :"+r" (err), "+r" (newval)
27 :"r" (oldval), "i" (-EFAULT), "z" (uaddr)
28 :"t", "memory");
29 if (err) return err;
30 *uval = newval;
31 return 0;
32}
33
34#endif /* __ASM_SH_FUTEX_CAS_H */
diff --git a/arch/sh/include/asm/futex-irq.h b/arch/sh/include/asm/futex-irq.h
index 63d33129ea23..ab01dbee0a82 100644
--- a/arch/sh/include/asm/futex-irq.h
+++ b/arch/sh/include/asm/futex-irq.h
@@ -1,92 +1,6 @@
1#ifndef __ASM_SH_FUTEX_IRQ_H 1#ifndef __ASM_SH_FUTEX_IRQ_H
2#define __ASM_SH_FUTEX_IRQ_H 2#define __ASM_SH_FUTEX_IRQ_H
3 3
4
5static inline int atomic_futex_op_xchg_set(int oparg, u32 __user *uaddr,
6 int *oldval)
7{
8 unsigned long flags;
9 int ret;
10
11 local_irq_save(flags);
12
13 ret = get_user(*oldval, uaddr);
14 if (!ret)
15 ret = put_user(oparg, uaddr);
16
17 local_irq_restore(flags);
18
19 return ret;
20}
21
22static inline int atomic_futex_op_xchg_add(int oparg, u32 __user *uaddr,
23 int *oldval)
24{
25 unsigned long flags;
26 int ret;
27
28 local_irq_save(flags);
29
30 ret = get_user(*oldval, uaddr);
31 if (!ret)
32 ret = put_user(*oldval + oparg, uaddr);
33
34 local_irq_restore(flags);
35
36 return ret;
37}
38
39static inline int atomic_futex_op_xchg_or(int oparg, u32 __user *uaddr,
40 int *oldval)
41{
42 unsigned long flags;
43 int ret;
44
45 local_irq_save(flags);
46
47 ret = get_user(*oldval, uaddr);
48 if (!ret)
49 ret = put_user(*oldval | oparg, uaddr);
50
51 local_irq_restore(flags);
52
53 return ret;
54}
55
56static inline int atomic_futex_op_xchg_and(int oparg, u32 __user *uaddr,
57 int *oldval)
58{
59 unsigned long flags;
60 int ret;
61
62 local_irq_save(flags);
63
64 ret = get_user(*oldval, uaddr);
65 if (!ret)
66 ret = put_user(*oldval & oparg, uaddr);
67
68 local_irq_restore(flags);
69
70 return ret;
71}
72
73static inline int atomic_futex_op_xchg_xor(int oparg, u32 __user *uaddr,
74 int *oldval)
75{
76 unsigned long flags;
77 int ret;
78
79 local_irq_save(flags);
80
81 ret = get_user(*oldval, uaddr);
82 if (!ret)
83 ret = put_user(*oldval ^ oparg, uaddr);
84
85 local_irq_restore(flags);
86
87 return ret;
88}
89
90static inline int atomic_futex_op_cmpxchg_inatomic(u32 *uval, 4static inline int atomic_futex_op_cmpxchg_inatomic(u32 *uval,
91 u32 __user *uaddr, 5 u32 __user *uaddr,
92 u32 oldval, u32 newval) 6 u32 oldval, u32 newval)
diff --git a/arch/sh/include/asm/futex-llsc.h b/arch/sh/include/asm/futex-llsc.h
new file mode 100644
index 000000000000..23591703bec0
--- /dev/null
+++ b/arch/sh/include/asm/futex-llsc.h
@@ -0,0 +1,41 @@
1#ifndef __ASM_SH_FUTEX_LLSC_H
2#define __ASM_SH_FUTEX_LLSC_H
3
4static inline int atomic_futex_op_cmpxchg_inatomic(u32 *uval,
5 u32 __user *uaddr,
6 u32 oldval, u32 newval)
7{
8 int err = 0;
9 __asm__ __volatile__(
10 "synco\n"
11 "1:\n\t"
12 "movli.l @%2, r0\n\t"
13 "mov r0, %1\n\t"
14 "cmp/eq %1, %4\n\t"
15 "bf 2f\n\t"
16 "mov %5, r0\n\t"
17 "movco.l r0, @%2\n\t"
18 "bf 1b\n"
19 "2:\n\t"
20 "synco\n\t"
21#ifdef CONFIG_MMU
22 ".section .fixup,\"ax\"\n"
23 "3:\n\t"
24 "mov.l 4f, %0\n\t"
25 "jmp @%0\n\t"
26 " mov %3, %0\n\t"
27 ".balign 4\n"
28 "4: .long 2b\n\t"
29 ".previous\n"
30 ".section __ex_table,\"a\"\n\t"
31 ".long 1b, 3b\n\t"
32 ".previous"
33#endif
34 :"+r" (err), "=&r" (*uval)
35 :"r" (uaddr), "i" (-EFAULT), "r" (oldval), "r" (newval)
36 :"t", "memory", "r0");
37 if (err) return err;
38 return 0;
39}
40
41#endif /* __ASM_SH_FUTEX_LLSC_H */
diff --git a/arch/sh/include/asm/futex.h b/arch/sh/include/asm/futex.h
index 7be39a646fbd..d0078747d308 100644
--- a/arch/sh/include/asm/futex.h
+++ b/arch/sh/include/asm/futex.h
@@ -7,16 +7,34 @@
7#include <linux/uaccess.h> 7#include <linux/uaccess.h>
8#include <asm/errno.h> 8#include <asm/errno.h>
9 9
10/* XXX: UP variants, fix for SH-4A and SMP.. */ 10#if !defined(CONFIG_SMP)
11#include <asm/futex-irq.h> 11#include <asm/futex-irq.h>
12#elif defined(CONFIG_CPU_J2)
13#include <asm/futex-cas.h>
14#elif defined(CONFIG_CPU_SH4A)
15#include <asm/futex-llsc.h>
16#else
17#error SMP not supported on this configuration.
18#endif
19
20static inline int
21futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
22 u32 oldval, u32 newval)
23{
24 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
25 return -EFAULT;
26
27 return atomic_futex_op_cmpxchg_inatomic(uval, uaddr, oldval, newval);
28}
12 29
13static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) 30static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
14{ 31{
15 int op = (encoded_op >> 28) & 7; 32 int op = (encoded_op >> 28) & 7;
16 int cmp = (encoded_op >> 24) & 15; 33 int cmp = (encoded_op >> 24) & 15;
17 int oparg = (encoded_op << 8) >> 20; 34 u32 oparg = (encoded_op << 8) >> 20;
18 int cmparg = (encoded_op << 20) >> 20; 35 u32 cmparg = (encoded_op << 20) >> 20;
19 int oldval = 0, ret; 36 u32 oldval, newval, prev;
37 int ret;
20 38
21 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) 39 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
22 oparg = 1 << oparg; 40 oparg = 1 << oparg;
@@ -26,26 +44,39 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
26 44
27 pagefault_disable(); 45 pagefault_disable();
28 46
29 switch (op) { 47 do {
30 case FUTEX_OP_SET: 48 if (op == FUTEX_OP_SET)
31 ret = atomic_futex_op_xchg_set(oparg, uaddr, &oldval); 49 ret = oldval = 0;
32 break; 50 else
33 case FUTEX_OP_ADD: 51 ret = get_user(oldval, uaddr);
34 ret = atomic_futex_op_xchg_add(oparg, uaddr, &oldval); 52
35 break; 53 if (ret) break;
36 case FUTEX_OP_OR: 54
37 ret = atomic_futex_op_xchg_or(oparg, uaddr, &oldval); 55 switch (op) {
38 break; 56 case FUTEX_OP_SET:
39 case FUTEX_OP_ANDN: 57 newval = oparg;
40 ret = atomic_futex_op_xchg_and(~oparg, uaddr, &oldval); 58 break;
41 break; 59 case FUTEX_OP_ADD:
42 case FUTEX_OP_XOR: 60 newval = oldval + oparg;
43 ret = atomic_futex_op_xchg_xor(oparg, uaddr, &oldval); 61 break;
44 break; 62 case FUTEX_OP_OR:
45 default: 63 newval = oldval | oparg;
46 ret = -ENOSYS; 64 break;
47 break; 65 case FUTEX_OP_ANDN:
48 } 66 newval = oldval & ~oparg;
67 break;
68 case FUTEX_OP_XOR:
69 newval = oldval ^ oparg;
70 break;
71 default:
72 ret = -ENOSYS;
73 break;
74 }
75
76 if (ret) break;
77
78 ret = futex_atomic_cmpxchg_inatomic(&prev, uaddr, oldval, newval);
79 } while (!ret && prev != oldval);
49 80
50 pagefault_enable(); 81 pagefault_enable();
51 82
@@ -53,10 +84,10 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
53 switch (cmp) { 84 switch (cmp) {
54 case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break; 85 case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
55 case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break; 86 case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
56 case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break; 87 case FUTEX_OP_CMP_LT: ret = ((int)oldval < (int)cmparg); break;
57 case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break; 88 case FUTEX_OP_CMP_GE: ret = ((int)oldval >= (int)cmparg); break;
58 case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break; 89 case FUTEX_OP_CMP_LE: ret = ((int)oldval <= (int)cmparg); break;
59 case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break; 90 case FUTEX_OP_CMP_GT: ret = ((int)oldval > (int)cmparg); break;
60 default: ret = -ENOSYS; 91 default: ret = -ENOSYS;
61 } 92 }
62 } 93 }
@@ -64,15 +95,5 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
64 return ret; 95 return ret;
65} 96}
66 97
67static inline int
68futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
69 u32 oldval, u32 newval)
70{
71 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
72 return -EFAULT;
73
74 return atomic_futex_op_cmpxchg_inatomic(uval, uaddr, oldval, newval);
75}
76
77#endif /* __KERNEL__ */ 98#endif /* __KERNEL__ */
78#endif /* __ASM_SH_FUTEX_H */ 99#endif /* __ASM_SH_FUTEX_H */
diff --git a/arch/sh/include/asm/processor.h b/arch/sh/include/asm/processor.h
index 1506897648aa..f9a09942a32d 100644
--- a/arch/sh/include/asm/processor.h
+++ b/arch/sh/include/asm/processor.h
@@ -15,7 +15,7 @@
15 */ 15 */
16enum cpu_type { 16enum cpu_type {
17 /* SH-2 types */ 17 /* SH-2 types */
18 CPU_SH7619, 18 CPU_SH7619, CPU_J2,
19 19
20 /* SH-2A types */ 20 /* SH-2A types */
21 CPU_SH7201, CPU_SH7203, CPU_SH7206, CPU_SH7263, CPU_SH7264, CPU_SH7269, 21 CPU_SH7201, CPU_SH7203, CPU_SH7206, CPU_SH7263, CPU_SH7264, CPU_SH7269,
diff --git a/arch/sh/include/asm/spinlock-cas.h b/arch/sh/include/asm/spinlock-cas.h
new file mode 100644
index 000000000000..c46e8cc7b515
--- /dev/null
+++ b/arch/sh/include/asm/spinlock-cas.h
@@ -0,0 +1,117 @@
1/*
2 * include/asm-sh/spinlock-cas.h
3 *
4 * Copyright (C) 2015 SEI
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 */
10#ifndef __ASM_SH_SPINLOCK_CAS_H
11#define __ASM_SH_SPINLOCK_CAS_H
12
13#include <asm/barrier.h>
14#include <asm/processor.h>
15
16static inline unsigned __sl_cas(volatile unsigned *p, unsigned old, unsigned new)
17{
18 __asm__ __volatile__("cas.l %1,%0,@r0"
19 : "+r"(new)
20 : "r"(old), "z"(p)
21 : "t", "memory" );
22 return new;
23}
24
25/*
26 * Your basic SMP spinlocks, allowing only a single CPU anywhere
27 */
28
29#define arch_spin_is_locked(x) ((x)->lock <= 0)
30#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
31
32static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
33{
34 smp_cond_load_acquire(&lock->lock, VAL > 0);
35}
36
37static inline void arch_spin_lock(arch_spinlock_t *lock)
38{
39 while (!__sl_cas(&lock->lock, 1, 0));
40}
41
42static inline void arch_spin_unlock(arch_spinlock_t *lock)
43{
44 __sl_cas(&lock->lock, 0, 1);
45}
46
47static inline int arch_spin_trylock(arch_spinlock_t *lock)
48{
49 return __sl_cas(&lock->lock, 1, 0);
50}
51
52/*
53 * Read-write spinlocks, allowing multiple readers but only one writer.
54 *
55 * NOTE! it is quite common to have readers in interrupts but no interrupt
56 * writers. For those circumstances we can "mix" irq-safe locks - any writer
57 * needs to get a irq-safe write-lock, but readers can get non-irqsafe
58 * read-locks.
59 */
60
61/**
62 * read_can_lock - would read_trylock() succeed?
63 * @lock: the rwlock in question.
64 */
65#define arch_read_can_lock(x) ((x)->lock > 0)
66
67/**
68 * write_can_lock - would write_trylock() succeed?
69 * @lock: the rwlock in question.
70 */
71#define arch_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
72
73static inline void arch_read_lock(arch_rwlock_t *rw)
74{
75 unsigned old;
76 do old = rw->lock;
77 while (!old || __sl_cas(&rw->lock, old, old-1) != old);
78}
79
80static inline void arch_read_unlock(arch_rwlock_t *rw)
81{
82 unsigned old;
83 do old = rw->lock;
84 while (__sl_cas(&rw->lock, old, old+1) != old);
85}
86
87static inline void arch_write_lock(arch_rwlock_t *rw)
88{
89 while (__sl_cas(&rw->lock, RW_LOCK_BIAS, 0) != RW_LOCK_BIAS);
90}
91
92static inline void arch_write_unlock(arch_rwlock_t *rw)
93{
94 __sl_cas(&rw->lock, 0, RW_LOCK_BIAS);
95}
96
97static inline int arch_read_trylock(arch_rwlock_t *rw)
98{
99 unsigned old;
100 do old = rw->lock;
101 while (old && __sl_cas(&rw->lock, old, old-1) != old);
102 return !!old;
103}
104
105static inline int arch_write_trylock(arch_rwlock_t *rw)
106{
107 return __sl_cas(&rw->lock, RW_LOCK_BIAS, 0) == RW_LOCK_BIAS;
108}
109
110#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
111#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
112
113#define arch_spin_relax(lock) cpu_relax()
114#define arch_read_relax(lock) cpu_relax()
115#define arch_write_relax(lock) cpu_relax()
116
117#endif /* __ASM_SH_SPINLOCK_CAS_H */
diff --git a/arch/sh/include/asm/spinlock-llsc.h b/arch/sh/include/asm/spinlock-llsc.h
new file mode 100644
index 000000000000..cec78143fa83
--- /dev/null
+++ b/arch/sh/include/asm/spinlock-llsc.h
@@ -0,0 +1,224 @@
1/*
2 * include/asm-sh/spinlock-llsc.h
3 *
4 * Copyright (C) 2002, 2003 Paul Mundt
5 * Copyright (C) 2006, 2007 Akio Idehara
6 *
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file "COPYING" in the main directory of this archive
9 * for more details.
10 */
11#ifndef __ASM_SH_SPINLOCK_LLSC_H
12#define __ASM_SH_SPINLOCK_LLSC_H
13
14#include <asm/barrier.h>
15#include <asm/processor.h>
16
17/*
18 * Your basic SMP spinlocks, allowing only a single CPU anywhere
19 */
20
21#define arch_spin_is_locked(x) ((x)->lock <= 0)
22#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
23
24static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
25{
26 smp_cond_load_acquire(&lock->lock, VAL > 0);
27}
28
29/*
30 * Simple spin lock operations. There are two variants, one clears IRQ's
31 * on the local processor, one does not.
32 *
33 * We make no fairness assumptions. They have a cost.
34 */
35static inline void arch_spin_lock(arch_spinlock_t *lock)
36{
37 unsigned long tmp;
38 unsigned long oldval;
39
40 __asm__ __volatile__ (
41 "1: \n\t"
42 "movli.l @%2, %0 ! arch_spin_lock \n\t"
43 "mov %0, %1 \n\t"
44 "mov #0, %0 \n\t"
45 "movco.l %0, @%2 \n\t"
46 "bf 1b \n\t"
47 "cmp/pl %1 \n\t"
48 "bf 1b \n\t"
49 : "=&z" (tmp), "=&r" (oldval)
50 : "r" (&lock->lock)
51 : "t", "memory"
52 );
53}
54
55static inline void arch_spin_unlock(arch_spinlock_t *lock)
56{
57 unsigned long tmp;
58
59 __asm__ __volatile__ (
60 "mov #1, %0 ! arch_spin_unlock \n\t"
61 "mov.l %0, @%1 \n\t"
62 : "=&z" (tmp)
63 : "r" (&lock->lock)
64 : "t", "memory"
65 );
66}
67
68static inline int arch_spin_trylock(arch_spinlock_t *lock)
69{
70 unsigned long tmp, oldval;
71
72 __asm__ __volatile__ (
73 "1: \n\t"
74 "movli.l @%2, %0 ! arch_spin_trylock \n\t"
75 "mov %0, %1 \n\t"
76 "mov #0, %0 \n\t"
77 "movco.l %0, @%2 \n\t"
78 "bf 1b \n\t"
79 "synco \n\t"
80 : "=&z" (tmp), "=&r" (oldval)
81 : "r" (&lock->lock)
82 : "t", "memory"
83 );
84
85 return oldval;
86}
87
88/*
89 * Read-write spinlocks, allowing multiple readers but only one writer.
90 *
91 * NOTE! it is quite common to have readers in interrupts but no interrupt
92 * writers. For those circumstances we can "mix" irq-safe locks - any writer
93 * needs to get a irq-safe write-lock, but readers can get non-irqsafe
94 * read-locks.
95 */
96
97/**
98 * read_can_lock - would read_trylock() succeed?
99 * @lock: the rwlock in question.
100 */
101#define arch_read_can_lock(x) ((x)->lock > 0)
102
103/**
104 * write_can_lock - would write_trylock() succeed?
105 * @lock: the rwlock in question.
106 */
107#define arch_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
108
109static inline void arch_read_lock(arch_rwlock_t *rw)
110{
111 unsigned long tmp;
112
113 __asm__ __volatile__ (
114 "1: \n\t"
115 "movli.l @%1, %0 ! arch_read_lock \n\t"
116 "cmp/pl %0 \n\t"
117 "bf 1b \n\t"
118 "add #-1, %0 \n\t"
119 "movco.l %0, @%1 \n\t"
120 "bf 1b \n\t"
121 : "=&z" (tmp)
122 : "r" (&rw->lock)
123 : "t", "memory"
124 );
125}
126
127static inline void arch_read_unlock(arch_rwlock_t *rw)
128{
129 unsigned long tmp;
130
131 __asm__ __volatile__ (
132 "1: \n\t"
133 "movli.l @%1, %0 ! arch_read_unlock \n\t"
134 "add #1, %0 \n\t"
135 "movco.l %0, @%1 \n\t"
136 "bf 1b \n\t"
137 : "=&z" (tmp)
138 : "r" (&rw->lock)
139 : "t", "memory"
140 );
141}
142
143static inline void arch_write_lock(arch_rwlock_t *rw)
144{
145 unsigned long tmp;
146
147 __asm__ __volatile__ (
148 "1: \n\t"
149 "movli.l @%1, %0 ! arch_write_lock \n\t"
150 "cmp/hs %2, %0 \n\t"
151 "bf 1b \n\t"
152 "sub %2, %0 \n\t"
153 "movco.l %0, @%1 \n\t"
154 "bf 1b \n\t"
155 : "=&z" (tmp)
156 : "r" (&rw->lock), "r" (RW_LOCK_BIAS)
157 : "t", "memory"
158 );
159}
160
161static inline void arch_write_unlock(arch_rwlock_t *rw)
162{
163 __asm__ __volatile__ (
164 "mov.l %1, @%0 ! arch_write_unlock \n\t"
165 :
166 : "r" (&rw->lock), "r" (RW_LOCK_BIAS)
167 : "t", "memory"
168 );
169}
170
171static inline int arch_read_trylock(arch_rwlock_t *rw)
172{
173 unsigned long tmp, oldval;
174
175 __asm__ __volatile__ (
176 "1: \n\t"
177 "movli.l @%2, %0 ! arch_read_trylock \n\t"
178 "mov %0, %1 \n\t"
179 "cmp/pl %0 \n\t"
180 "bf 2f \n\t"
181 "add #-1, %0 \n\t"
182 "movco.l %0, @%2 \n\t"
183 "bf 1b \n\t"
184 "2: \n\t"
185 "synco \n\t"
186 : "=&z" (tmp), "=&r" (oldval)
187 : "r" (&rw->lock)
188 : "t", "memory"
189 );
190
191 return (oldval > 0);
192}
193
194static inline int arch_write_trylock(arch_rwlock_t *rw)
195{
196 unsigned long tmp, oldval;
197
198 __asm__ __volatile__ (
199 "1: \n\t"
200 "movli.l @%2, %0 ! arch_write_trylock \n\t"
201 "mov %0, %1 \n\t"
202 "cmp/hs %3, %0 \n\t"
203 "bf 2f \n\t"
204 "sub %3, %0 \n\t"
205 "2: \n\t"
206 "movco.l %0, @%2 \n\t"
207 "bf 1b \n\t"
208 "synco \n\t"
209 : "=&z" (tmp), "=&r" (oldval)
210 : "r" (&rw->lock), "r" (RW_LOCK_BIAS)
211 : "t", "memory"
212 );
213
214 return (oldval > (RW_LOCK_BIAS - 1));
215}
216
217#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
218#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
219
220#define arch_spin_relax(lock) cpu_relax()
221#define arch_read_relax(lock) cpu_relax()
222#define arch_write_relax(lock) cpu_relax()
223
224#endif /* __ASM_SH_SPINLOCK_LLSC_H */
diff --git a/arch/sh/include/asm/spinlock.h b/arch/sh/include/asm/spinlock.h
index 416834b60ad0..c2c61ea6a8e2 100644
--- a/arch/sh/include/asm/spinlock.h
+++ b/arch/sh/include/asm/spinlock.h
@@ -11,222 +11,12 @@
11#ifndef __ASM_SH_SPINLOCK_H 11#ifndef __ASM_SH_SPINLOCK_H
12#define __ASM_SH_SPINLOCK_H 12#define __ASM_SH_SPINLOCK_H
13 13
14/* 14#if defined(CONFIG_CPU_SH4A)
15 * The only locking implemented here uses SH-4A opcodes. For others, 15#include <asm/spinlock-llsc.h>
16 * split this out as per atomic-*.h. 16#elif defined(CONFIG_CPU_J2)
17 */ 17#include <asm/spinlock-cas.h>
18#ifndef CONFIG_CPU_SH4A 18#else
19#error "Need movli.l/movco.l for spinlocks" 19#error "The configured cpu type does not support spinlocks"
20#endif 20#endif
21 21
22#include <asm/barrier.h>
23#include <asm/processor.h>
24
25/*
26 * Your basic SMP spinlocks, allowing only a single CPU anywhere
27 */
28
29#define arch_spin_is_locked(x) ((x)->lock <= 0)
30#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
31
32static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
33{
34 smp_cond_load_acquire(&lock->lock, VAL > 0);
35}
36
37/*
38 * Simple spin lock operations. There are two variants, one clears IRQ's
39 * on the local processor, one does not.
40 *
41 * We make no fairness assumptions. They have a cost.
42 */
43static inline void arch_spin_lock(arch_spinlock_t *lock)
44{
45 unsigned long tmp;
46 unsigned long oldval;
47
48 __asm__ __volatile__ (
49 "1: \n\t"
50 "movli.l @%2, %0 ! arch_spin_lock \n\t"
51 "mov %0, %1 \n\t"
52 "mov #0, %0 \n\t"
53 "movco.l %0, @%2 \n\t"
54 "bf 1b \n\t"
55 "cmp/pl %1 \n\t"
56 "bf 1b \n\t"
57 : "=&z" (tmp), "=&r" (oldval)
58 : "r" (&lock->lock)
59 : "t", "memory"
60 );
61}
62
63static inline void arch_spin_unlock(arch_spinlock_t *lock)
64{
65 unsigned long tmp;
66
67 __asm__ __volatile__ (
68 "mov #1, %0 ! arch_spin_unlock \n\t"
69 "mov.l %0, @%1 \n\t"
70 : "=&z" (tmp)
71 : "r" (&lock->lock)
72 : "t", "memory"
73 );
74}
75
76static inline int arch_spin_trylock(arch_spinlock_t *lock)
77{
78 unsigned long tmp, oldval;
79
80 __asm__ __volatile__ (
81 "1: \n\t"
82 "movli.l @%2, %0 ! arch_spin_trylock \n\t"
83 "mov %0, %1 \n\t"
84 "mov #0, %0 \n\t"
85 "movco.l %0, @%2 \n\t"
86 "bf 1b \n\t"
87 "synco \n\t"
88 : "=&z" (tmp), "=&r" (oldval)
89 : "r" (&lock->lock)
90 : "t", "memory"
91 );
92
93 return oldval;
94}
95
96/*
97 * Read-write spinlocks, allowing multiple readers but only one writer.
98 *
99 * NOTE! it is quite common to have readers in interrupts but no interrupt
100 * writers. For those circumstances we can "mix" irq-safe locks - any writer
101 * needs to get a irq-safe write-lock, but readers can get non-irqsafe
102 * read-locks.
103 */
104
105/**
106 * read_can_lock - would read_trylock() succeed?
107 * @lock: the rwlock in question.
108 */
109#define arch_read_can_lock(x) ((x)->lock > 0)
110
111/**
112 * write_can_lock - would write_trylock() succeed?
113 * @lock: the rwlock in question.
114 */
115#define arch_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
116
117static inline void arch_read_lock(arch_rwlock_t *rw)
118{
119 unsigned long tmp;
120
121 __asm__ __volatile__ (
122 "1: \n\t"
123 "movli.l @%1, %0 ! arch_read_lock \n\t"
124 "cmp/pl %0 \n\t"
125 "bf 1b \n\t"
126 "add #-1, %0 \n\t"
127 "movco.l %0, @%1 \n\t"
128 "bf 1b \n\t"
129 : "=&z" (tmp)
130 : "r" (&rw->lock)
131 : "t", "memory"
132 );
133}
134
135static inline void arch_read_unlock(arch_rwlock_t *rw)
136{
137 unsigned long tmp;
138
139 __asm__ __volatile__ (
140 "1: \n\t"
141 "movli.l @%1, %0 ! arch_read_unlock \n\t"
142 "add #1, %0 \n\t"
143 "movco.l %0, @%1 \n\t"
144 "bf 1b \n\t"
145 : "=&z" (tmp)
146 : "r" (&rw->lock)
147 : "t", "memory"
148 );
149}
150
151static inline void arch_write_lock(arch_rwlock_t *rw)
152{
153 unsigned long tmp;
154
155 __asm__ __volatile__ (
156 "1: \n\t"
157 "movli.l @%1, %0 ! arch_write_lock \n\t"
158 "cmp/hs %2, %0 \n\t"
159 "bf 1b \n\t"
160 "sub %2, %0 \n\t"
161 "movco.l %0, @%1 \n\t"
162 "bf 1b \n\t"
163 : "=&z" (tmp)
164 : "r" (&rw->lock), "r" (RW_LOCK_BIAS)
165 : "t", "memory"
166 );
167}
168
169static inline void arch_write_unlock(arch_rwlock_t *rw)
170{
171 __asm__ __volatile__ (
172 "mov.l %1, @%0 ! arch_write_unlock \n\t"
173 :
174 : "r" (&rw->lock), "r" (RW_LOCK_BIAS)
175 : "t", "memory"
176 );
177}
178
179static inline int arch_read_trylock(arch_rwlock_t *rw)
180{
181 unsigned long tmp, oldval;
182
183 __asm__ __volatile__ (
184 "1: \n\t"
185 "movli.l @%2, %0 ! arch_read_trylock \n\t"
186 "mov %0, %1 \n\t"
187 "cmp/pl %0 \n\t"
188 "bf 2f \n\t"
189 "add #-1, %0 \n\t"
190 "movco.l %0, @%2 \n\t"
191 "bf 1b \n\t"
192 "2: \n\t"
193 "synco \n\t"
194 : "=&z" (tmp), "=&r" (oldval)
195 : "r" (&rw->lock)
196 : "t", "memory"
197 );
198
199 return (oldval > 0);
200}
201
202static inline int arch_write_trylock(arch_rwlock_t *rw)
203{
204 unsigned long tmp, oldval;
205
206 __asm__ __volatile__ (
207 "1: \n\t"
208 "movli.l @%2, %0 ! arch_write_trylock \n\t"
209 "mov %0, %1 \n\t"
210 "cmp/hs %3, %0 \n\t"
211 "bf 2f \n\t"
212 "sub %3, %0 \n\t"
213 "2: \n\t"
214 "movco.l %0, @%2 \n\t"
215 "bf 1b \n\t"
216 "synco \n\t"
217 : "=&z" (tmp), "=&r" (oldval)
218 : "r" (&rw->lock), "r" (RW_LOCK_BIAS)
219 : "t", "memory"
220 );
221
222 return (oldval > (RW_LOCK_BIAS - 1));
223}
224
225#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
226#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
227
228#define arch_spin_relax(lock) cpu_relax()
229#define arch_read_relax(lock) cpu_relax()
230#define arch_write_relax(lock) cpu_relax()
231
232#endif /* __ASM_SH_SPINLOCK_H */ 22#endif /* __ASM_SH_SPINLOCK_H */
diff --git a/arch/sh/include/uapi/asm/cpu-features.h b/arch/sh/include/uapi/asm/cpu-features.h
index 694abe490edb..2f1bc851042a 100644
--- a/arch/sh/include/uapi/asm/cpu-features.h
+++ b/arch/sh/include/uapi/asm/cpu-features.h
@@ -22,5 +22,6 @@
22#define CPU_HAS_L2_CACHE 0x0080 /* Secondary cache / URAM */ 22#define CPU_HAS_L2_CACHE 0x0080 /* Secondary cache / URAM */
23#define CPU_HAS_OP32 0x0100 /* 32-bit instruction support */ 23#define CPU_HAS_OP32 0x0100 /* 32-bit instruction support */
24#define CPU_HAS_PTEAEX 0x0200 /* PTE ASID Extension support */ 24#define CPU_HAS_PTEAEX 0x0200 /* PTE ASID Extension support */
25#define CPU_HAS_CAS_L 0x0400 /* cas.l atomic compare-and-swap */
25 26
26#endif /* __ASM_SH_CPU_FEATURES_H */ 27#endif /* __ASM_SH_CPU_FEATURES_H */
diff --git a/arch/sh/include/uapi/asm/sigcontext.h b/arch/sh/include/uapi/asm/sigcontext.h
index 8ce1435bc0bf..faa5d0833412 100644
--- a/arch/sh/include/uapi/asm/sigcontext.h
+++ b/arch/sh/include/uapi/asm/sigcontext.h
@@ -25,8 +25,6 @@ struct sigcontext {
25 unsigned long sc_mach; 25 unsigned long sc_mach;
26 unsigned long sc_macl; 26 unsigned long sc_macl;
27 27
28#if defined(__SH4__) || defined(CONFIG_CPU_SH4) || \
29 defined(__SH2A__) || defined(CONFIG_CPU_SH2A)
30 /* FPU registers */ 28 /* FPU registers */
31 unsigned long sc_fpregs[16]; 29 unsigned long sc_fpregs[16];
32 unsigned long sc_xfpregs[16]; 30 unsigned long sc_xfpregs[16];
@@ -34,7 +32,6 @@ struct sigcontext {
34 unsigned int sc_fpul; 32 unsigned int sc_fpul;
35 unsigned int sc_ownedfp; 33 unsigned int sc_ownedfp;
36#endif 34#endif
37#endif
38}; 35};
39 36
40#endif /* __ASM_SH_SIGCONTEXT_H */ 37#endif /* __ASM_SH_SIGCONTEXT_H */
diff --git a/arch/sh/include/uapi/asm/unistd_32.h b/arch/sh/include/uapi/asm/unistd_32.h
index d13a1d623736..c801bde9e6f5 100644
--- a/arch/sh/include/uapi/asm/unistd_32.h
+++ b/arch/sh/include/uapi/asm/unistd_32.h
@@ -380,7 +380,21 @@
380#define __NR_process_vm_writev 366 380#define __NR_process_vm_writev 366
381#define __NR_kcmp 367 381#define __NR_kcmp 367
382#define __NR_finit_module 368 382#define __NR_finit_module 368
383#define __NR_sched_getattr 369
384#define __NR_sched_setattr 370
385#define __NR_renameat2 371
386#define __NR_seccomp 372
387#define __NR_getrandom 373
388#define __NR_memfd_create 374
389#define __NR_bpf 375
390#define __NR_execveat 376
391#define __NR_userfaultfd 377
392#define __NR_membarrier 378
393#define __NR_mlock2 379
394#define __NR_copy_file_range 380
395#define __NR_preadv2 381
396#define __NR_pwritev2 382
383 397
384#define NR_syscalls 369 398#define NR_syscalls 383
385 399
386#endif /* __ASM_SH_UNISTD_32_H */ 400#endif /* __ASM_SH_UNISTD_32_H */
diff --git a/arch/sh/include/uapi/asm/unistd_64.h b/arch/sh/include/uapi/asm/unistd_64.h
index 47ebd5b5ed55..ce0cb3598b62 100644
--- a/arch/sh/include/uapi/asm/unistd_64.h
+++ b/arch/sh/include/uapi/asm/unistd_64.h
@@ -400,7 +400,21 @@
400#define __NR_process_vm_writev 377 400#define __NR_process_vm_writev 377
401#define __NR_kcmp 378 401#define __NR_kcmp 378
402#define __NR_finit_module 379 402#define __NR_finit_module 379
403#define __NR_sched_getattr 380
404#define __NR_sched_setattr 381
405#define __NR_renameat2 382
406#define __NR_seccomp 383
407#define __NR_getrandom 384
408#define __NR_memfd_create 385
409#define __NR_bpf 386
410#define __NR_execveat 387
411#define __NR_userfaultfd 388
412#define __NR_membarrier 389
413#define __NR_mlock2 390
414#define __NR_copy_file_range 391
415#define __NR_preadv2 392
416#define __NR_pwritev2 393
403 417
404#define NR_syscalls 380 418#define NR_syscalls 394
405 419
406#endif /* __ASM_SH_UNISTD_64_H */ 420#endif /* __ASM_SH_UNISTD_64_H */