diff options
Diffstat (limited to 'include/asm-sparc64')
-rw-r--r-- | include/asm-sparc64/atomic.h | 1 | ||||
-rw-r--r-- | include/asm-sparc64/cache.h | 1 | ||||
-rw-r--r-- | include/asm-sparc64/elf.h | 2 | ||||
-rw-r--r-- | include/asm-sparc64/futex.h | 49 | ||||
-rw-r--r-- | include/asm-sparc64/kprobes.h | 10 | ||||
-rw-r--r-- | include/asm-sparc64/mmu_context.h | 2 | ||||
-rw-r--r-- | include/asm-sparc64/mutex.h | 9 | ||||
-rw-r--r-- | include/asm-sparc64/oplib.h | 2 | ||||
-rw-r--r-- | include/asm-sparc64/processor.h | 5 | ||||
-rw-r--r-- | include/asm-sparc64/system.h | 18 | ||||
-rw-r--r-- | include/asm-sparc64/thread_info.h | 6 | ||||
-rw-r--r-- | include/asm-sparc64/unistd.h | 23 |
12 files changed, 54 insertions, 74 deletions
diff --git a/include/asm-sparc64/atomic.h b/include/asm-sparc64/atomic.h index 11f5aa5d108c..25256bdc8aae 100644 --- a/include/asm-sparc64/atomic.h +++ b/include/asm-sparc64/atomic.h | |||
@@ -72,6 +72,7 @@ extern int atomic64_sub_ret(int, atomic64_t *); | |||
72 | #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0) | 72 | #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0) |
73 | 73 | ||
74 | #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) | 74 | #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) |
75 | #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) | ||
75 | 76 | ||
76 | #define atomic_add_unless(v, a, u) \ | 77 | #define atomic_add_unless(v, a, u) \ |
77 | ({ \ | 78 | ({ \ |
diff --git a/include/asm-sparc64/cache.h b/include/asm-sparc64/cache.h index ade5ec3bfd5a..f7d35a2ae9b8 100644 --- a/include/asm-sparc64/cache.h +++ b/include/asm-sparc64/cache.h | |||
@@ -9,7 +9,6 @@ | |||
9 | #define L1_CACHE_BYTES 32 /* Two 16-byte sub-blocks per line. */ | 9 | #define L1_CACHE_BYTES 32 /* Two 16-byte sub-blocks per line. */ |
10 | 10 | ||
11 | #define L1_CACHE_ALIGN(x) (((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1)) | 11 | #define L1_CACHE_ALIGN(x) (((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1)) |
12 | #define L1_CACHE_SHIFT_MAX 5 /* largest L1 which this arch supports */ | ||
13 | 12 | ||
14 | #define SMP_CACHE_BYTES_SHIFT 6 | 13 | #define SMP_CACHE_BYTES_SHIFT 6 |
15 | #define SMP_CACHE_BYTES (1 << SMP_CACHE_BYTES_SHIFT) /* L2 cache line size. */ | 14 | #define SMP_CACHE_BYTES (1 << SMP_CACHE_BYTES_SHIFT) /* L2 cache line size. */ |
diff --git a/include/asm-sparc64/elf.h b/include/asm-sparc64/elf.h index 91458118277e..69539a8ab833 100644 --- a/include/asm-sparc64/elf.h +++ b/include/asm-sparc64/elf.h | |||
@@ -119,7 +119,7 @@ typedef struct { | |||
119 | #endif | 119 | #endif |
120 | 120 | ||
121 | #define ELF_CORE_COPY_TASK_REGS(__tsk, __elf_regs) \ | 121 | #define ELF_CORE_COPY_TASK_REGS(__tsk, __elf_regs) \ |
122 | ({ ELF_CORE_COPY_REGS((*(__elf_regs)), (__tsk)->thread_info->kregs); 1; }) | 122 | ({ ELF_CORE_COPY_REGS((*(__elf_regs)), task_pt_regs(__tsk)); 1; }) |
123 | 123 | ||
124 | /* | 124 | /* |
125 | * This is used to ensure we don't load something for the wrong architecture. | 125 | * This is used to ensure we don't load something for the wrong architecture. |
diff --git a/include/asm-sparc64/futex.h b/include/asm-sparc64/futex.h index 9feff4ce1424..6a332a9f099c 100644 --- a/include/asm-sparc64/futex.h +++ b/include/asm-sparc64/futex.h | |||
@@ -1,53 +1,6 @@ | |||
1 | #ifndef _ASM_FUTEX_H | 1 | #ifndef _ASM_FUTEX_H |
2 | #define _ASM_FUTEX_H | 2 | #define _ASM_FUTEX_H |
3 | 3 | ||
4 | #ifdef __KERNEL__ | 4 | #include <asm-generic/futex.h> |
5 | 5 | ||
6 | #include <linux/futex.h> | ||
7 | #include <asm/errno.h> | ||
8 | #include <asm/uaccess.h> | ||
9 | |||
10 | static inline int | ||
11 | futex_atomic_op_inuser (int encoded_op, int __user *uaddr) | ||
12 | { | ||
13 | int op = (encoded_op >> 28) & 7; | ||
14 | int cmp = (encoded_op >> 24) & 15; | ||
15 | int oparg = (encoded_op << 8) >> 20; | ||
16 | int cmparg = (encoded_op << 20) >> 20; | ||
17 | int oldval = 0, ret; | ||
18 | if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) | ||
19 | oparg = 1 << oparg; | ||
20 | |||
21 | if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) | ||
22 | return -EFAULT; | ||
23 | |||
24 | inc_preempt_count(); | ||
25 | |||
26 | switch (op) { | ||
27 | case FUTEX_OP_SET: | ||
28 | case FUTEX_OP_ADD: | ||
29 | case FUTEX_OP_OR: | ||
30 | case FUTEX_OP_ANDN: | ||
31 | case FUTEX_OP_XOR: | ||
32 | default: | ||
33 | ret = -ENOSYS; | ||
34 | } | ||
35 | |||
36 | dec_preempt_count(); | ||
37 | |||
38 | if (!ret) { | ||
39 | switch (cmp) { | ||
40 | case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break; | ||
41 | case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break; | ||
42 | case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break; | ||
43 | case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break; | ||
44 | case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break; | ||
45 | case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break; | ||
46 | default: ret = -ENOSYS; | ||
47 | } | ||
48 | } | ||
49 | return ret; | ||
50 | } | ||
51 | |||
52 | #endif | ||
53 | #endif | 6 | #endif |
diff --git a/include/asm-sparc64/kprobes.h b/include/asm-sparc64/kprobes.h index 7ba845320f5c..e4efe652b54b 100644 --- a/include/asm-sparc64/kprobes.h +++ b/include/asm-sparc64/kprobes.h | |||
@@ -12,6 +12,7 @@ typedef u32 kprobe_opcode_t; | |||
12 | #define MAX_INSN_SIZE 2 | 12 | #define MAX_INSN_SIZE 2 |
13 | 13 | ||
14 | #define JPROBE_ENTRY(pentry) (kprobe_opcode_t *)pentry | 14 | #define JPROBE_ENTRY(pentry) (kprobe_opcode_t *)pentry |
15 | #define arch_remove_kprobe(p) do {} while (0) | ||
15 | 16 | ||
16 | /* Architecture specific copy of original instruction*/ | 17 | /* Architecture specific copy of original instruction*/ |
17 | struct arch_specific_insn { | 18 | struct arch_specific_insn { |
@@ -38,15 +39,6 @@ struct kprobe_ctlblk { | |||
38 | struct prev_kprobe prev_kprobe; | 39 | struct prev_kprobe prev_kprobe; |
39 | }; | 40 | }; |
40 | 41 | ||
41 | #ifdef CONFIG_KPROBES | ||
42 | extern int kprobe_exceptions_notify(struct notifier_block *self, | 42 | extern int kprobe_exceptions_notify(struct notifier_block *self, |
43 | unsigned long val, void *data); | 43 | unsigned long val, void *data); |
44 | #else /* !CONFIG_KPROBES */ | ||
45 | static inline int kprobe_exceptions_notify(struct notifier_block *self, | ||
46 | unsigned long val, void *data) | ||
47 | { | ||
48 | return 0; | ||
49 | } | ||
50 | #endif | ||
51 | |||
52 | #endif /* _SPARC64_KPROBES_H */ | 44 | #endif /* _SPARC64_KPROBES_H */ |
diff --git a/include/asm-sparc64/mmu_context.h b/include/asm-sparc64/mmu_context.h index 08ba72d7722c..57ee7b306189 100644 --- a/include/asm-sparc64/mmu_context.h +++ b/include/asm-sparc64/mmu_context.h | |||
@@ -60,7 +60,7 @@ do { \ | |||
60 | register unsigned long pgd_cache asm("o4"); \ | 60 | register unsigned long pgd_cache asm("o4"); \ |
61 | paddr = __pa((__mm)->pgd); \ | 61 | paddr = __pa((__mm)->pgd); \ |
62 | pgd_cache = 0UL; \ | 62 | pgd_cache = 0UL; \ |
63 | if ((__tsk)->thread_info->flags & _TIF_32BIT) \ | 63 | if (task_thread_info(__tsk)->flags & _TIF_32BIT) \ |
64 | pgd_cache = get_pgd_cache((__mm)->pgd); \ | 64 | pgd_cache = get_pgd_cache((__mm)->pgd); \ |
65 | __asm__ __volatile__("wrpr %%g0, 0x494, %%pstate\n\t" \ | 65 | __asm__ __volatile__("wrpr %%g0, 0x494, %%pstate\n\t" \ |
66 | "mov %3, %%g4\n\t" \ | 66 | "mov %3, %%g4\n\t" \ |
diff --git a/include/asm-sparc64/mutex.h b/include/asm-sparc64/mutex.h new file mode 100644 index 000000000000..458c1f7fbc18 --- /dev/null +++ b/include/asm-sparc64/mutex.h | |||
@@ -0,0 +1,9 @@ | |||
1 | /* | ||
2 | * Pull in the generic implementation for the mutex fastpath. | ||
3 | * | ||
4 | * TODO: implement optimized primitives instead, or leave the generic | ||
5 | * implementation in place, or pick the atomic_xchg() based generic | ||
6 | * implementation. (see asm-generic/mutex-xchg.h for details) | ||
7 | */ | ||
8 | |||
9 | #include <asm-generic/mutex-dec.h> | ||
diff --git a/include/asm-sparc64/oplib.h b/include/asm-sparc64/oplib.h index d02f1e8ae1a6..3c59b2693fb9 100644 --- a/include/asm-sparc64/oplib.h +++ b/include/asm-sparc64/oplib.h | |||
@@ -163,6 +163,7 @@ enum prom_input_device { | |||
163 | PROMDEV_IKBD, /* input from keyboard */ | 163 | PROMDEV_IKBD, /* input from keyboard */ |
164 | PROMDEV_ITTYA, /* input from ttya */ | 164 | PROMDEV_ITTYA, /* input from ttya */ |
165 | PROMDEV_ITTYB, /* input from ttyb */ | 165 | PROMDEV_ITTYB, /* input from ttyb */ |
166 | PROMDEV_IRSC, /* input from rsc */ | ||
166 | PROMDEV_I_UNK, | 167 | PROMDEV_I_UNK, |
167 | }; | 168 | }; |
168 | 169 | ||
@@ -174,6 +175,7 @@ enum prom_output_device { | |||
174 | PROMDEV_OSCREEN, /* to screen */ | 175 | PROMDEV_OSCREEN, /* to screen */ |
175 | PROMDEV_OTTYA, /* to ttya */ | 176 | PROMDEV_OTTYA, /* to ttya */ |
176 | PROMDEV_OTTYB, /* to ttyb */ | 177 | PROMDEV_OTTYB, /* to ttyb */ |
178 | PROMDEV_ORSC, /* to rsc */ | ||
177 | PROMDEV_O_UNK, | 179 | PROMDEV_O_UNK, |
178 | }; | 180 | }; |
179 | 181 | ||
diff --git a/include/asm-sparc64/processor.h b/include/asm-sparc64/processor.h index 3169f3e2237e..cd8d9b4c8658 100644 --- a/include/asm-sparc64/processor.h +++ b/include/asm-sparc64/processor.h | |||
@@ -186,8 +186,9 @@ extern pid_t kernel_thread(int (*fn)(void *), void * arg, unsigned long flags); | |||
186 | 186 | ||
187 | extern unsigned long get_wchan(struct task_struct *task); | 187 | extern unsigned long get_wchan(struct task_struct *task); |
188 | 188 | ||
189 | #define KSTK_EIP(tsk) ((tsk)->thread_info->kregs->tpc) | 189 | #define task_pt_regs(tsk) (task_thread_info(tsk)->kregs) |
190 | #define KSTK_ESP(tsk) ((tsk)->thread_info->kregs->u_regs[UREG_FP]) | 190 | #define KSTK_EIP(tsk) (task_pt_regs(tsk)->tpc) |
191 | #define KSTK_ESP(tsk) (task_pt_regs(tsk)->u_regs[UREG_FP]) | ||
191 | 192 | ||
192 | #define cpu_relax() barrier() | 193 | #define cpu_relax() barrier() |
193 | 194 | ||
diff --git a/include/asm-sparc64/system.h b/include/asm-sparc64/system.h index b5417529f6f1..af254e581834 100644 --- a/include/asm-sparc64/system.h +++ b/include/asm-sparc64/system.h | |||
@@ -193,11 +193,7 @@ do { \ | |||
193 | * not preserve it's value. Hairy, but it lets us remove 2 loads | 193 | * not preserve it's value. Hairy, but it lets us remove 2 loads |
194 | * and 2 stores in this critical code path. -DaveM | 194 | * and 2 stores in this critical code path. -DaveM |
195 | */ | 195 | */ |
196 | #if __GNUC__ >= 3 | ||
197 | #define EXTRA_CLOBBER ,"%l1" | 196 | #define EXTRA_CLOBBER ,"%l1" |
198 | #else | ||
199 | #define EXTRA_CLOBBER | ||
200 | #endif | ||
201 | #define switch_to(prev, next, last) \ | 197 | #define switch_to(prev, next, last) \ |
202 | do { if (test_thread_flag(TIF_PERFCTR)) { \ | 198 | do { if (test_thread_flag(TIF_PERFCTR)) { \ |
203 | unsigned long __tmp; \ | 199 | unsigned long __tmp; \ |
@@ -212,7 +208,7 @@ do { if (test_thread_flag(TIF_PERFCTR)) { \ | |||
212 | /* If you are tempted to conditionalize the following */ \ | 208 | /* If you are tempted to conditionalize the following */ \ |
213 | /* so that ASI is only written if it changes, think again. */ \ | 209 | /* so that ASI is only written if it changes, think again. */ \ |
214 | __asm__ __volatile__("wr %%g0, %0, %%asi" \ | 210 | __asm__ __volatile__("wr %%g0, %0, %%asi" \ |
215 | : : "r" (__thread_flag_byte_ptr(next->thread_info)[TI_FLAG_BYTE_CURRENT_DS]));\ | 211 | : : "r" (__thread_flag_byte_ptr(task_thread_info(next))[TI_FLAG_BYTE_CURRENT_DS]));\ |
216 | __asm__ __volatile__( \ | 212 | __asm__ __volatile__( \ |
217 | "mov %%g4, %%g7\n\t" \ | 213 | "mov %%g4, %%g7\n\t" \ |
218 | "wrpr %%g0, 0x95, %%pstate\n\t" \ | 214 | "wrpr %%g0, 0x95, %%pstate\n\t" \ |
@@ -242,7 +238,7 @@ do { if (test_thread_flag(TIF_PERFCTR)) { \ | |||
242 | "b,a ret_from_syscall\n\t" \ | 238 | "b,a ret_from_syscall\n\t" \ |
243 | "1:\n\t" \ | 239 | "1:\n\t" \ |
244 | : "=&r" (last) \ | 240 | : "=&r" (last) \ |
245 | : "0" (next->thread_info), \ | 241 | : "0" (task_thread_info(next)), \ |
246 | "i" (TI_WSTATE), "i" (TI_KSP), "i" (TI_NEW_CHILD), \ | 242 | "i" (TI_WSTATE), "i" (TI_KSP), "i" (TI_NEW_CHILD), \ |
247 | "i" (TI_CWP), "i" (TI_TASK) \ | 243 | "i" (TI_CWP), "i" (TI_TASK) \ |
248 | : "cc", \ | 244 | : "cc", \ |
@@ -257,6 +253,16 @@ do { if (test_thread_flag(TIF_PERFCTR)) { \ | |||
257 | } \ | 253 | } \ |
258 | } while(0) | 254 | } while(0) |
259 | 255 | ||
256 | /* | ||
257 | * On SMP systems, when the scheduler does migration-cost autodetection, | ||
258 | * it needs a way to flush as much of the CPU's caches as possible. | ||
259 | * | ||
260 | * TODO: fill this in! | ||
261 | */ | ||
262 | static inline void sched_cacheflush(void) | ||
263 | { | ||
264 | } | ||
265 | |||
260 | static inline unsigned long xchg32(__volatile__ unsigned int *m, unsigned int val) | 266 | static inline unsigned long xchg32(__volatile__ unsigned int *m, unsigned int val) |
261 | { | 267 | { |
262 | unsigned long tmp1, tmp2; | 268 | unsigned long tmp1, tmp2; |
diff --git a/include/asm-sparc64/thread_info.h b/include/asm-sparc64/thread_info.h index c94d8b3991bd..ac9d068aab4f 100644 --- a/include/asm-sparc64/thread_info.h +++ b/include/asm-sparc64/thread_info.h | |||
@@ -221,7 +221,7 @@ register struct thread_info *current_thread_info_reg asm("g6"); | |||
221 | * nop | 221 | * nop |
222 | */ | 222 | */ |
223 | #define TIF_SYSCALL_TRACE 0 /* syscall trace active */ | 223 | #define TIF_SYSCALL_TRACE 0 /* syscall trace active */ |
224 | #define TIF_NOTIFY_RESUME 1 /* resumption notification requested */ | 224 | #define TIF_RESTORE_SIGMASK 1 /* restore signal mask in do_signal() */ |
225 | #define TIF_SIGPENDING 2 /* signal pending */ | 225 | #define TIF_SIGPENDING 2 /* signal pending */ |
226 | #define TIF_NEED_RESCHED 3 /* rescheduling necessary */ | 226 | #define TIF_NEED_RESCHED 3 /* rescheduling necessary */ |
227 | #define TIF_PERFCTR 4 /* performance counters active */ | 227 | #define TIF_PERFCTR 4 /* performance counters active */ |
@@ -241,7 +241,6 @@ register struct thread_info *current_thread_info_reg asm("g6"); | |||
241 | #define TIF_POLLING_NRFLAG 14 | 241 | #define TIF_POLLING_NRFLAG 14 |
242 | 242 | ||
243 | #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) | 243 | #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) |
244 | #define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) | ||
245 | #define _TIF_SIGPENDING (1<<TIF_SIGPENDING) | 244 | #define _TIF_SIGPENDING (1<<TIF_SIGPENDING) |
246 | #define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) | 245 | #define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) |
247 | #define _TIF_PERFCTR (1<<TIF_PERFCTR) | 246 | #define _TIF_PERFCTR (1<<TIF_PERFCTR) |
@@ -250,11 +249,12 @@ register struct thread_info *current_thread_info_reg asm("g6"); | |||
250 | #define _TIF_32BIT (1<<TIF_32BIT) | 249 | #define _TIF_32BIT (1<<TIF_32BIT) |
251 | #define _TIF_SECCOMP (1<<TIF_SECCOMP) | 250 | #define _TIF_SECCOMP (1<<TIF_SECCOMP) |
252 | #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) | 251 | #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) |
252 | #define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) | ||
253 | #define _TIF_ABI_PENDING (1<<TIF_ABI_PENDING) | 253 | #define _TIF_ABI_PENDING (1<<TIF_ABI_PENDING) |
254 | #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) | 254 | #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) |
255 | 255 | ||
256 | #define _TIF_USER_WORK_MASK ((0xff << TI_FLAG_WSAVED_SHIFT) | \ | 256 | #define _TIF_USER_WORK_MASK ((0xff << TI_FLAG_WSAVED_SHIFT) | \ |
257 | (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING | \ | 257 | (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK | \ |
258 | _TIF_NEED_RESCHED | _TIF_PERFCTR)) | 258 | _TIF_NEED_RESCHED | _TIF_PERFCTR)) |
259 | 259 | ||
260 | #endif /* __KERNEL__ */ | 260 | #endif /* __KERNEL__ */ |
diff --git a/include/asm-sparc64/unistd.h b/include/asm-sparc64/unistd.h index 51ec2879b881..84ac2bdb0902 100644 --- a/include/asm-sparc64/unistd.h +++ b/include/asm-sparc64/unistd.h | |||
@@ -302,11 +302,26 @@ | |||
302 | #define __NR_add_key 281 | 302 | #define __NR_add_key 281 |
303 | #define __NR_request_key 282 | 303 | #define __NR_request_key 282 |
304 | #define __NR_keyctl 283 | 304 | #define __NR_keyctl 283 |
305 | #define __NR_openat 284 | ||
306 | #define __NR_mkdirat 285 | ||
307 | #define __NR_mknodat 286 | ||
308 | #define __NR_fchownat 287 | ||
309 | #define __NR_futimesat 288 | ||
310 | #define __NR_newfstatat 289 | ||
311 | #define __NR_unlinkat 290 | ||
312 | #define __NR_renameat 291 | ||
313 | #define __NR_linkat 292 | ||
314 | #define __NR_symlinkat 293 | ||
315 | #define __NR_readlinkat 294 | ||
316 | #define __NR_fchmodat 295 | ||
317 | #define __NR_faccessat 296 | ||
318 | #define __NR_pselect6 297 | ||
319 | #define __NR_ppoll 298 | ||
305 | 320 | ||
306 | /* WARNING: You MAY NOT add syscall numbers larger than 283, since | 321 | /* WARNING: You MAY NOT add syscall numbers larger than 298, since |
307 | * all of the syscall tables in the Sparc kernel are | 322 | * all of the syscall tables in the Sparc kernel are |
308 | * sized to have 283 entries (starting at zero). Therefore | 323 | * sized to have 298 entries (starting at zero). Therefore |
309 | * find a free slot in the 0-282 range. | 324 | * find a free slot in the 0-298 range. |
310 | */ | 325 | */ |
311 | 326 | ||
312 | #define _syscall0(type,name) \ | 327 | #define _syscall0(type,name) \ |
@@ -501,6 +516,8 @@ asmlinkage long sys_rt_sigaction(int sig, | |||
501 | #define __ARCH_WANT_SYS_OLDUMOUNT | 516 | #define __ARCH_WANT_SYS_OLDUMOUNT |
502 | #define __ARCH_WANT_SYS_SIGPENDING | 517 | #define __ARCH_WANT_SYS_SIGPENDING |
503 | #define __ARCH_WANT_SYS_SIGPROCMASK | 518 | #define __ARCH_WANT_SYS_SIGPROCMASK |
519 | #define __ARCH_WANT_SYS_RT_SIGSUSPEND | ||
520 | #define __ARCH_WANT_COMPAT_SYS_RT_SIGSUSPEND | ||
504 | #endif | 521 | #endif |
505 | 522 | ||
506 | /* | 523 | /* |