diff options
Diffstat (limited to 'arch/x86/include')
-rw-r--r-- | arch/x86/include/asm/atomic_32.h | 218 | ||||
-rw-r--r-- | arch/x86/include/asm/hardirq.h | 1 | ||||
-rw-r--r-- | arch/x86/include/asm/hw_irq.h | 2 | ||||
-rw-r--r-- | arch/x86/include/asm/intel_arch_perfmon.h | 31 | ||||
-rw-r--r-- | arch/x86/include/asm/perf_counter.h | 95 | ||||
-rw-r--r-- | arch/x86/include/asm/thread_info.h | 4 | ||||
-rw-r--r-- | arch/x86/include/asm/unistd_32.h | 1 | ||||
-rw-r--r-- | arch/x86/include/asm/unistd_64.h | 3 |
8 files changed, 322 insertions, 33 deletions
diff --git a/arch/x86/include/asm/atomic_32.h b/arch/x86/include/asm/atomic_32.h index 85b46fba4229..977250ed8b89 100644 --- a/arch/x86/include/asm/atomic_32.h +++ b/arch/x86/include/asm/atomic_32.h | |||
@@ -247,5 +247,223 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u) | |||
247 | #define smp_mb__before_atomic_inc() barrier() | 247 | #define smp_mb__before_atomic_inc() barrier() |
248 | #define smp_mb__after_atomic_inc() barrier() | 248 | #define smp_mb__after_atomic_inc() barrier() |
249 | 249 | ||
250 | /* An 64bit atomic type */ | ||
251 | |||
252 | typedef struct { | ||
253 | unsigned long long counter; | ||
254 | } atomic64_t; | ||
255 | |||
256 | #define ATOMIC64_INIT(val) { (val) } | ||
257 | |||
258 | /** | ||
259 | * atomic64_read - read atomic64 variable | ||
260 | * @v: pointer of type atomic64_t | ||
261 | * | ||
262 | * Atomically reads the value of @v. | ||
263 | * Doesn't imply a read memory barrier. | ||
264 | */ | ||
265 | #define __atomic64_read(ptr) ((ptr)->counter) | ||
266 | |||
267 | static inline unsigned long long | ||
268 | cmpxchg8b(unsigned long long *ptr, unsigned long long old, unsigned long long new) | ||
269 | { | ||
270 | asm volatile( | ||
271 | |||
272 | LOCK_PREFIX "cmpxchg8b (%[ptr])\n" | ||
273 | |||
274 | : "=A" (old) | ||
275 | |||
276 | : [ptr] "D" (ptr), | ||
277 | "A" (old), | ||
278 | "b" (ll_low(new)), | ||
279 | "c" (ll_high(new)) | ||
280 | |||
281 | : "memory"); | ||
282 | |||
283 | return old; | ||
284 | } | ||
285 | |||
286 | static inline unsigned long long | ||
287 | atomic64_cmpxchg(atomic64_t *ptr, unsigned long long old_val, | ||
288 | unsigned long long new_val) | ||
289 | { | ||
290 | return cmpxchg8b(&ptr->counter, old_val, new_val); | ||
291 | } | ||
292 | |||
293 | /** | ||
294 | * atomic64_set - set atomic64 variable | ||
295 | * @ptr: pointer to type atomic64_t | ||
296 | * @new_val: value to assign | ||
297 | * | ||
298 | * Atomically sets the value of @ptr to @new_val. | ||
299 | */ | ||
300 | static inline void atomic64_set(atomic64_t *ptr, unsigned long long new_val) | ||
301 | { | ||
302 | unsigned long long old_val; | ||
303 | |||
304 | do { | ||
305 | old_val = atomic_read(ptr); | ||
306 | } while (atomic64_cmpxchg(ptr, old_val, new_val) != old_val); | ||
307 | } | ||
308 | |||
309 | /** | ||
310 | * atomic64_read - read atomic64 variable | ||
311 | * @ptr: pointer to type atomic64_t | ||
312 | * | ||
313 | * Atomically reads the value of @ptr and returns it. | ||
314 | */ | ||
315 | static inline unsigned long long atomic64_read(atomic64_t *ptr) | ||
316 | { | ||
317 | unsigned long long curr_val; | ||
318 | |||
319 | do { | ||
320 | curr_val = __atomic64_read(ptr); | ||
321 | } while (atomic64_cmpxchg(ptr, curr_val, curr_val) != curr_val); | ||
322 | |||
323 | return curr_val; | ||
324 | } | ||
325 | |||
326 | /** | ||
327 | * atomic64_add_return - add and return | ||
328 | * @delta: integer value to add | ||
329 | * @ptr: pointer to type atomic64_t | ||
330 | * | ||
331 | * Atomically adds @delta to @ptr and returns @delta + *@ptr | ||
332 | */ | ||
333 | static inline unsigned long long | ||
334 | atomic64_add_return(unsigned long long delta, atomic64_t *ptr) | ||
335 | { | ||
336 | unsigned long long old_val, new_val; | ||
337 | |||
338 | do { | ||
339 | old_val = atomic_read(ptr); | ||
340 | new_val = old_val + delta; | ||
341 | |||
342 | } while (atomic64_cmpxchg(ptr, old_val, new_val) != old_val); | ||
343 | |||
344 | return new_val; | ||
345 | } | ||
346 | |||
347 | static inline long atomic64_sub_return(unsigned long long delta, atomic64_t *ptr) | ||
348 | { | ||
349 | return atomic64_add_return(-delta, ptr); | ||
350 | } | ||
351 | |||
352 | static inline long atomic64_inc_return(atomic64_t *ptr) | ||
353 | { | ||
354 | return atomic64_add_return(1, ptr); | ||
355 | } | ||
356 | |||
357 | static inline long atomic64_dec_return(atomic64_t *ptr) | ||
358 | { | ||
359 | return atomic64_sub_return(1, ptr); | ||
360 | } | ||
361 | |||
362 | /** | ||
363 | * atomic64_add - add integer to atomic64 variable | ||
364 | * @delta: integer value to add | ||
365 | * @ptr: pointer to type atomic64_t | ||
366 | * | ||
367 | * Atomically adds @delta to @ptr. | ||
368 | */ | ||
369 | static inline void atomic64_add(unsigned long long delta, atomic64_t *ptr) | ||
370 | { | ||
371 | atomic64_add_return(delta, ptr); | ||
372 | } | ||
373 | |||
374 | /** | ||
375 | * atomic64_sub - subtract the atomic64 variable | ||
376 | * @delta: integer value to subtract | ||
377 | * @ptr: pointer to type atomic64_t | ||
378 | * | ||
379 | * Atomically subtracts @delta from @ptr. | ||
380 | */ | ||
381 | static inline void atomic64_sub(unsigned long long delta, atomic64_t *ptr) | ||
382 | { | ||
383 | atomic64_add(-delta, ptr); | ||
384 | } | ||
385 | |||
386 | /** | ||
387 | * atomic64_sub_and_test - subtract value from variable and test result | ||
388 | * @delta: integer value to subtract | ||
389 | * @ptr: pointer to type atomic64_t | ||
390 | * | ||
391 | * Atomically subtracts @delta from @ptr and returns | ||
392 | * true if the result is zero, or false for all | ||
393 | * other cases. | ||
394 | */ | ||
395 | static inline int | ||
396 | atomic64_sub_and_test(unsigned long long delta, atomic64_t *ptr) | ||
397 | { | ||
398 | unsigned long long old_val = atomic64_sub_return(delta, ptr); | ||
399 | |||
400 | return old_val == 0; | ||
401 | } | ||
402 | |||
403 | /** | ||
404 | * atomic64_inc - increment atomic64 variable | ||
405 | * @ptr: pointer to type atomic64_t | ||
406 | * | ||
407 | * Atomically increments @ptr by 1. | ||
408 | */ | ||
409 | static inline void atomic64_inc(atomic64_t *ptr) | ||
410 | { | ||
411 | atomic64_add(1, ptr); | ||
412 | } | ||
413 | |||
414 | /** | ||
415 | * atomic64_dec - decrement atomic64 variable | ||
416 | * @ptr: pointer to type atomic64_t | ||
417 | * | ||
418 | * Atomically decrements @ptr by 1. | ||
419 | */ | ||
420 | static inline void atomic64_dec(atomic64_t *ptr) | ||
421 | { | ||
422 | atomic64_sub(1, ptr); | ||
423 | } | ||
424 | |||
425 | /** | ||
426 | * atomic64_dec_and_test - decrement and test | ||
427 | * @ptr: pointer to type atomic64_t | ||
428 | * | ||
429 | * Atomically decrements @ptr by 1 and | ||
430 | * returns true if the result is 0, or false for all other | ||
431 | * cases. | ||
432 | */ | ||
433 | static inline int atomic64_dec_and_test(atomic64_t *ptr) | ||
434 | { | ||
435 | return atomic64_sub_and_test(1, ptr); | ||
436 | } | ||
437 | |||
438 | /** | ||
439 | * atomic64_inc_and_test - increment and test | ||
440 | * @ptr: pointer to type atomic64_t | ||
441 | * | ||
442 | * Atomically increments @ptr by 1 | ||
443 | * and returns true if the result is zero, or false for all | ||
444 | * other cases. | ||
445 | */ | ||
446 | static inline int atomic64_inc_and_test(atomic64_t *ptr) | ||
447 | { | ||
448 | return atomic64_sub_and_test(-1, ptr); | ||
449 | } | ||
450 | |||
451 | /** | ||
452 | * atomic64_add_negative - add and test if negative | ||
453 | * @delta: integer value to add | ||
454 | * @ptr: pointer to type atomic64_t | ||
455 | * | ||
456 | * Atomically adds @delta to @ptr and returns true | ||
457 | * if the result is negative, or false when | ||
458 | * result is greater than or equal to zero. | ||
459 | */ | ||
460 | static inline int | ||
461 | atomic64_add_negative(unsigned long long delta, atomic64_t *ptr) | ||
462 | { | ||
463 | long long old_val = atomic64_add_return(delta, ptr); | ||
464 | |||
465 | return old_val < 0; | ||
466 | } | ||
467 | |||
250 | #include <asm-generic/atomic.h> | 468 | #include <asm-generic/atomic.h> |
251 | #endif /* _ASM_X86_ATOMIC_32_H */ | 469 | #endif /* _ASM_X86_ATOMIC_32_H */ |
diff --git a/arch/x86/include/asm/hardirq.h b/arch/x86/include/asm/hardirq.h index 176f058e7159..46ebed797e4f 100644 --- a/arch/x86/include/asm/hardirq.h +++ b/arch/x86/include/asm/hardirq.h | |||
@@ -12,6 +12,7 @@ typedef struct { | |||
12 | unsigned int apic_timer_irqs; /* arch dependent */ | 12 | unsigned int apic_timer_irqs; /* arch dependent */ |
13 | unsigned int irq_spurious_count; | 13 | unsigned int irq_spurious_count; |
14 | #endif | 14 | #endif |
15 | unsigned int apic_perf_irqs; | ||
15 | #ifdef CONFIG_SMP | 16 | #ifdef CONFIG_SMP |
16 | unsigned int irq_resched_count; | 17 | unsigned int irq_resched_count; |
17 | unsigned int irq_call_count; | 18 | unsigned int irq_call_count; |
diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h index 370e1c83bb49..f39881b6b68b 100644 --- a/arch/x86/include/asm/hw_irq.h +++ b/arch/x86/include/asm/hw_irq.h | |||
@@ -28,6 +28,8 @@ | |||
28 | /* Interrupt handlers registered during init_IRQ */ | 28 | /* Interrupt handlers registered during init_IRQ */ |
29 | extern void apic_timer_interrupt(void); | 29 | extern void apic_timer_interrupt(void); |
30 | extern void error_interrupt(void); | 30 | extern void error_interrupt(void); |
31 | extern void perf_counter_interrupt(void); | ||
32 | |||
31 | extern void spurious_interrupt(void); | 33 | extern void spurious_interrupt(void); |
32 | extern void thermal_interrupt(void); | 34 | extern void thermal_interrupt(void); |
33 | extern void reschedule_interrupt(void); | 35 | extern void reschedule_interrupt(void); |
diff --git a/arch/x86/include/asm/intel_arch_perfmon.h b/arch/x86/include/asm/intel_arch_perfmon.h deleted file mode 100644 index fa0fd068bc2e..000000000000 --- a/arch/x86/include/asm/intel_arch_perfmon.h +++ /dev/null | |||
@@ -1,31 +0,0 @@ | |||
1 | #ifndef _ASM_X86_INTEL_ARCH_PERFMON_H | ||
2 | #define _ASM_X86_INTEL_ARCH_PERFMON_H | ||
3 | |||
4 | #define MSR_ARCH_PERFMON_PERFCTR0 0xc1 | ||
5 | #define MSR_ARCH_PERFMON_PERFCTR1 0xc2 | ||
6 | |||
7 | #define MSR_ARCH_PERFMON_EVENTSEL0 0x186 | ||
8 | #define MSR_ARCH_PERFMON_EVENTSEL1 0x187 | ||
9 | |||
10 | #define ARCH_PERFMON_EVENTSEL0_ENABLE (1 << 22) | ||
11 | #define ARCH_PERFMON_EVENTSEL_INT (1 << 20) | ||
12 | #define ARCH_PERFMON_EVENTSEL_OS (1 << 17) | ||
13 | #define ARCH_PERFMON_EVENTSEL_USR (1 << 16) | ||
14 | |||
15 | #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL (0x3c) | ||
16 | #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8) | ||
17 | #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX (0) | ||
18 | #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT \ | ||
19 | (1 << (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX)) | ||
20 | |||
21 | union cpuid10_eax { | ||
22 | struct { | ||
23 | unsigned int version_id:8; | ||
24 | unsigned int num_counters:8; | ||
25 | unsigned int bit_width:8; | ||
26 | unsigned int mask_length:8; | ||
27 | } split; | ||
28 | unsigned int full; | ||
29 | }; | ||
30 | |||
31 | #endif /* _ASM_X86_INTEL_ARCH_PERFMON_H */ | ||
diff --git a/arch/x86/include/asm/perf_counter.h b/arch/x86/include/asm/perf_counter.h new file mode 100644 index 000000000000..2e08ed736647 --- /dev/null +++ b/arch/x86/include/asm/perf_counter.h | |||
@@ -0,0 +1,95 @@ | |||
1 | #ifndef _ASM_X86_PERF_COUNTER_H | ||
2 | #define _ASM_X86_PERF_COUNTER_H | ||
3 | |||
4 | /* | ||
5 | * Performance counter hw details: | ||
6 | */ | ||
7 | |||
8 | #define X86_PMC_MAX_GENERIC 8 | ||
9 | #define X86_PMC_MAX_FIXED 3 | ||
10 | |||
11 | #define X86_PMC_IDX_GENERIC 0 | ||
12 | #define X86_PMC_IDX_FIXED 32 | ||
13 | #define X86_PMC_IDX_MAX 64 | ||
14 | |||
15 | #define MSR_ARCH_PERFMON_PERFCTR0 0xc1 | ||
16 | #define MSR_ARCH_PERFMON_PERFCTR1 0xc2 | ||
17 | |||
18 | #define MSR_ARCH_PERFMON_EVENTSEL0 0x186 | ||
19 | #define MSR_ARCH_PERFMON_EVENTSEL1 0x187 | ||
20 | |||
21 | #define ARCH_PERFMON_EVENTSEL0_ENABLE (1 << 22) | ||
22 | #define ARCH_PERFMON_EVENTSEL_INT (1 << 20) | ||
23 | #define ARCH_PERFMON_EVENTSEL_OS (1 << 17) | ||
24 | #define ARCH_PERFMON_EVENTSEL_USR (1 << 16) | ||
25 | |||
26 | /* | ||
27 | * Includes eventsel and unit mask as well: | ||
28 | */ | ||
29 | #define ARCH_PERFMON_EVENT_MASK 0xffff | ||
30 | |||
31 | #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 0x3c | ||
32 | #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8) | ||
33 | #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX 0 | ||
34 | #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT \ | ||
35 | (1 << (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX)) | ||
36 | |||
37 | #define ARCH_PERFMON_BRANCH_MISSES_RETIRED 6 | ||
38 | |||
39 | /* | ||
40 | * Intel "Architectural Performance Monitoring" CPUID | ||
41 | * detection/enumeration details: | ||
42 | */ | ||
43 | union cpuid10_eax { | ||
44 | struct { | ||
45 | unsigned int version_id:8; | ||
46 | unsigned int num_counters:8; | ||
47 | unsigned int bit_width:8; | ||
48 | unsigned int mask_length:8; | ||
49 | } split; | ||
50 | unsigned int full; | ||
51 | }; | ||
52 | |||
53 | union cpuid10_edx { | ||
54 | struct { | ||
55 | unsigned int num_counters_fixed:4; | ||
56 | unsigned int reserved:28; | ||
57 | } split; | ||
58 | unsigned int full; | ||
59 | }; | ||
60 | |||
61 | |||
62 | /* | ||
63 | * Fixed-purpose performance counters: | ||
64 | */ | ||
65 | |||
66 | /* | ||
67 | * All 3 fixed-mode PMCs are configured via this single MSR: | ||
68 | */ | ||
69 | #define MSR_ARCH_PERFMON_FIXED_CTR_CTRL 0x38d | ||
70 | |||
71 | /* | ||
72 | * The counts are available in three separate MSRs: | ||
73 | */ | ||
74 | |||
75 | /* Instr_Retired.Any: */ | ||
76 | #define MSR_ARCH_PERFMON_FIXED_CTR0 0x309 | ||
77 | #define X86_PMC_IDX_FIXED_INSTRUCTIONS (X86_PMC_IDX_FIXED + 0) | ||
78 | |||
79 | /* CPU_CLK_Unhalted.Core: */ | ||
80 | #define MSR_ARCH_PERFMON_FIXED_CTR1 0x30a | ||
81 | #define X86_PMC_IDX_FIXED_CPU_CYCLES (X86_PMC_IDX_FIXED + 1) | ||
82 | |||
83 | /* CPU_CLK_Unhalted.Ref: */ | ||
84 | #define MSR_ARCH_PERFMON_FIXED_CTR2 0x30b | ||
85 | #define X86_PMC_IDX_FIXED_BUS_CYCLES (X86_PMC_IDX_FIXED + 2) | ||
86 | |||
87 | #ifdef CONFIG_PERF_COUNTERS | ||
88 | extern void init_hw_perf_counters(void); | ||
89 | extern void perf_counters_lapic_init(int nmi); | ||
90 | #else | ||
91 | static inline void init_hw_perf_counters(void) { } | ||
92 | static inline void perf_counters_lapic_init(int nmi) { } | ||
93 | #endif | ||
94 | |||
95 | #endif /* _ASM_X86_PERF_COUNTER_H */ | ||
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h index df9d5f78385e..ca7310e02446 100644 --- a/arch/x86/include/asm/thread_info.h +++ b/arch/x86/include/asm/thread_info.h | |||
@@ -83,6 +83,7 @@ struct thread_info { | |||
83 | #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ | 83 | #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ |
84 | #define TIF_SECCOMP 8 /* secure computing */ | 84 | #define TIF_SECCOMP 8 /* secure computing */ |
85 | #define TIF_MCE_NOTIFY 10 /* notify userspace of an MCE */ | 85 | #define TIF_MCE_NOTIFY 10 /* notify userspace of an MCE */ |
86 | #define TIF_PERF_COUNTERS 11 /* notify perf counter work */ | ||
86 | #define TIF_NOTSC 16 /* TSC is not accessible in userland */ | 87 | #define TIF_NOTSC 16 /* TSC is not accessible in userland */ |
87 | #define TIF_IA32 17 /* 32bit process */ | 88 | #define TIF_IA32 17 /* 32bit process */ |
88 | #define TIF_FORK 18 /* ret_from_fork */ | 89 | #define TIF_FORK 18 /* ret_from_fork */ |
@@ -105,6 +106,7 @@ struct thread_info { | |||
105 | #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) | 106 | #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) |
106 | #define _TIF_SECCOMP (1 << TIF_SECCOMP) | 107 | #define _TIF_SECCOMP (1 << TIF_SECCOMP) |
107 | #define _TIF_MCE_NOTIFY (1 << TIF_MCE_NOTIFY) | 108 | #define _TIF_MCE_NOTIFY (1 << TIF_MCE_NOTIFY) |
109 | #define _TIF_PERF_COUNTERS (1 << TIF_PERF_COUNTERS) | ||
108 | #define _TIF_NOTSC (1 << TIF_NOTSC) | 110 | #define _TIF_NOTSC (1 << TIF_NOTSC) |
109 | #define _TIF_IA32 (1 << TIF_IA32) | 111 | #define _TIF_IA32 (1 << TIF_IA32) |
110 | #define _TIF_FORK (1 << TIF_FORK) | 112 | #define _TIF_FORK (1 << TIF_FORK) |
@@ -136,7 +138,7 @@ struct thread_info { | |||
136 | 138 | ||
137 | /* Only used for 64 bit */ | 139 | /* Only used for 64 bit */ |
138 | #define _TIF_DO_NOTIFY_MASK \ | 140 | #define _TIF_DO_NOTIFY_MASK \ |
139 | (_TIF_SIGPENDING|_TIF_MCE_NOTIFY|_TIF_NOTIFY_RESUME) | 141 | (_TIF_SIGPENDING|_TIF_MCE_NOTIFY|_TIF_PERF_COUNTERS|_TIF_NOTIFY_RESUME) |
140 | 142 | ||
141 | /* flags to check in __switch_to() */ | 143 | /* flags to check in __switch_to() */ |
142 | #define _TIF_WORK_CTXSW \ | 144 | #define _TIF_WORK_CTXSW \ |
diff --git a/arch/x86/include/asm/unistd_32.h b/arch/x86/include/asm/unistd_32.h index f2bba78430a4..7e47658b0a6f 100644 --- a/arch/x86/include/asm/unistd_32.h +++ b/arch/x86/include/asm/unistd_32.h | |||
@@ -338,6 +338,7 @@ | |||
338 | #define __NR_dup3 330 | 338 | #define __NR_dup3 330 |
339 | #define __NR_pipe2 331 | 339 | #define __NR_pipe2 331 |
340 | #define __NR_inotify_init1 332 | 340 | #define __NR_inotify_init1 332 |
341 | #define __NR_perf_counter_open 333 | ||
341 | 342 | ||
342 | #ifdef __KERNEL__ | 343 | #ifdef __KERNEL__ |
343 | 344 | ||
diff --git a/arch/x86/include/asm/unistd_64.h b/arch/x86/include/asm/unistd_64.h index d2e415e6666f..53025feaf88d 100644 --- a/arch/x86/include/asm/unistd_64.h +++ b/arch/x86/include/asm/unistd_64.h | |||
@@ -653,7 +653,8 @@ __SYSCALL(__NR_dup3, sys_dup3) | |||
653 | __SYSCALL(__NR_pipe2, sys_pipe2) | 653 | __SYSCALL(__NR_pipe2, sys_pipe2) |
654 | #define __NR_inotify_init1 294 | 654 | #define __NR_inotify_init1 294 |
655 | __SYSCALL(__NR_inotify_init1, sys_inotify_init1) | 655 | __SYSCALL(__NR_inotify_init1, sys_inotify_init1) |
656 | 656 | #define __NR_perf_counter_open 295 | |
657 | __SYSCALL(__NR_perf_counter_open, sys_perf_counter_open) | ||
657 | 658 | ||
658 | #ifndef __NO_STUBS | 659 | #ifndef __NO_STUBS |
659 | #define __ARCH_WANT_OLD_READDIR | 660 | #define __ARCH_WANT_OLD_READDIR |