aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/asm-generic/cputime.h65
-rw-r--r--include/linux/bitops.h10
-rw-r--r--include/linux/bootmem.h2
-rw-r--r--include/linux/debugobjects.h6
-rw-r--r--include/linux/jump_label.h27
-rw-r--r--include/linux/kernel_stat.h36
-rw-r--r--include/linux/latencytop.h3
-rw-r--r--include/linux/lockdep.h4
-rw-r--r--include/linux/memblock.h170
-rw-r--r--include/linux/mm.h34
-rw-r--r--include/linux/mmzone.h8
-rw-r--r--include/linux/perf_event.h8
-rw-r--r--include/linux/poison.h6
-rw-r--r--include/linux/sched.h23
-rw-r--r--include/linux/wait.h4
-rw-r--r--include/trace/events/sched.h57
16 files changed, 323 insertions, 140 deletions
diff --git a/include/asm-generic/cputime.h b/include/asm-generic/cputime.h
index 12a1764f612b..9a62937c56ca 100644
--- a/include/asm-generic/cputime.h
+++ b/include/asm-generic/cputime.h
@@ -4,71 +4,66 @@
4#include <linux/time.h> 4#include <linux/time.h>
5#include <linux/jiffies.h> 5#include <linux/jiffies.h>
6 6
7typedef unsigned long cputime_t; 7typedef unsigned long __nocast cputime_t;
8 8
9#define cputime_zero (0UL)
10#define cputime_one_jiffy jiffies_to_cputime(1) 9#define cputime_one_jiffy jiffies_to_cputime(1)
11#define cputime_max ((~0UL >> 1) - 1) 10#define cputime_to_jiffies(__ct) (__force unsigned long)(__ct)
12#define cputime_add(__a, __b) ((__a) + (__b))
13#define cputime_sub(__a, __b) ((__a) - (__b))
14#define cputime_div(__a, __n) ((__a) / (__n))
15#define cputime_halve(__a) ((__a) >> 1)
16#define cputime_eq(__a, __b) ((__a) == (__b))
17#define cputime_gt(__a, __b) ((__a) > (__b))
18#define cputime_ge(__a, __b) ((__a) >= (__b))
19#define cputime_lt(__a, __b) ((__a) < (__b))
20#define cputime_le(__a, __b) ((__a) <= (__b))
21#define cputime_to_jiffies(__ct) (__ct)
22#define cputime_to_scaled(__ct) (__ct) 11#define cputime_to_scaled(__ct) (__ct)
23#define jiffies_to_cputime(__hz) (__hz) 12#define jiffies_to_cputime(__hz) (__force cputime_t)(__hz)
24 13
25typedef u64 cputime64_t; 14typedef u64 __nocast cputime64_t;
26 15
27#define cputime64_zero (0ULL) 16#define cputime64_to_jiffies64(__ct) (__force u64)(__ct)
28#define cputime64_add(__a, __b) ((__a) + (__b)) 17#define jiffies64_to_cputime64(__jif) (__force cputime64_t)(__jif)
29#define cputime64_sub(__a, __b) ((__a) - (__b))
30#define cputime64_to_jiffies64(__ct) (__ct)
31#define jiffies64_to_cputime64(__jif) (__jif)
32#define cputime_to_cputime64(__ct) ((u64) __ct)
33#define cputime64_gt(__a, __b) ((__a) > (__b))
34 18
35#define nsecs_to_cputime64(__ct) nsecs_to_jiffies64(__ct) 19#define nsecs_to_cputime64(__ct) \
20 jiffies64_to_cputime64(nsecs_to_jiffies64(__ct))
36 21
37 22
38/* 23/*
39 * Convert cputime to microseconds and back. 24 * Convert cputime to microseconds and back.
40 */ 25 */
41#define cputime_to_usecs(__ct) jiffies_to_usecs(__ct) 26#define cputime_to_usecs(__ct) \
42#define usecs_to_cputime(__msecs) usecs_to_jiffies(__msecs) 27 jiffies_to_usecs(cputime_to_jiffies(__ct))
43#define usecs_to_cputime64(__msecs) nsecs_to_jiffies64((__msecs) * 1000) 28#define usecs_to_cputime(__usec) \
29 jiffies_to_cputime(usecs_to_jiffies(__usec))
30#define usecs_to_cputime64(__usec) \
31 jiffies64_to_cputime64(nsecs_to_jiffies64((__usec) * 1000))
44 32
45/* 33/*
46 * Convert cputime to seconds and back. 34 * Convert cputime to seconds and back.
47 */ 35 */
48#define cputime_to_secs(jif) ((jif) / HZ) 36#define cputime_to_secs(jif) (cputime_to_jiffies(jif) / HZ)
49#define secs_to_cputime(sec) ((sec) * HZ) 37#define secs_to_cputime(sec) jiffies_to_cputime((sec) * HZ)
50 38
51/* 39/*
52 * Convert cputime to timespec and back. 40 * Convert cputime to timespec and back.
53 */ 41 */
54#define timespec_to_cputime(__val) timespec_to_jiffies(__val) 42#define timespec_to_cputime(__val) \
55#define cputime_to_timespec(__ct,__val) jiffies_to_timespec(__ct,__val) 43 jiffies_to_cputime(timespec_to_jiffies(__val))
44#define cputime_to_timespec(__ct,__val) \
45 jiffies_to_timespec(cputime_to_jiffies(__ct),__val)
56 46
57/* 47/*
58 * Convert cputime to timeval and back. 48 * Convert cputime to timeval and back.
59 */ 49 */
60#define timeval_to_cputime(__val) timeval_to_jiffies(__val) 50#define timeval_to_cputime(__val) \
61#define cputime_to_timeval(__ct,__val) jiffies_to_timeval(__ct,__val) 51 jiffies_to_cputime(timeval_to_jiffies(__val))
52#define cputime_to_timeval(__ct,__val) \
53 jiffies_to_timeval(cputime_to_jiffies(__ct),__val)
62 54
63/* 55/*
64 * Convert cputime to clock and back. 56 * Convert cputime to clock and back.
65 */ 57 */
66#define cputime_to_clock_t(__ct) jiffies_to_clock_t(__ct) 58#define cputime_to_clock_t(__ct) \
67#define clock_t_to_cputime(__x) clock_t_to_jiffies(__x) 59 jiffies_to_clock_t(cputime_to_jiffies(__ct))
60#define clock_t_to_cputime(__x) \
61 jiffies_to_cputime(clock_t_to_jiffies(__x))
68 62
69/* 63/*
70 * Convert cputime64 to clock. 64 * Convert cputime64 to clock.
71 */ 65 */
72#define cputime64_to_clock_t(__ct) jiffies_64_to_clock_t(__ct) 66#define cputime64_to_clock_t(__ct) \
67 jiffies_64_to_clock_t(cputime64_to_jiffies64(__ct))
73 68
74#endif 69#endif
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
index a3ef66a2a083..3c1063acb2ab 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -22,8 +22,14 @@ extern unsigned long __sw_hweight64(__u64 w);
22#include <asm/bitops.h> 22#include <asm/bitops.h>
23 23
24#define for_each_set_bit(bit, addr, size) \ 24#define for_each_set_bit(bit, addr, size) \
25 for ((bit) = find_first_bit((addr), (size)); \ 25 for ((bit) = find_first_bit((addr), (size)); \
26 (bit) < (size); \ 26 (bit) < (size); \
27 (bit) = find_next_bit((addr), (size), (bit) + 1))
28
29/* same as for_each_set_bit() but use bit as value to start with */
30#define for_each_set_bit_cont(bit, addr, size) \
31 for ((bit) = find_next_bit((addr), (size), (bit)); \
32 (bit) < (size); \
27 (bit) = find_next_bit((addr), (size), (bit) + 1)) 33 (bit) = find_next_bit((addr), (size), (bit) + 1))
28 34
29static __inline__ int get_bitmask_order(unsigned int count) 35static __inline__ int get_bitmask_order(unsigned int count)
diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h
index ab344a521105..66d3e954eb6c 100644
--- a/include/linux/bootmem.h
+++ b/include/linux/bootmem.h
@@ -44,7 +44,7 @@ extern unsigned long init_bootmem_node(pg_data_t *pgdat,
44 unsigned long endpfn); 44 unsigned long endpfn);
45extern unsigned long init_bootmem(unsigned long addr, unsigned long memend); 45extern unsigned long init_bootmem(unsigned long addr, unsigned long memend);
46 46
47unsigned long free_all_memory_core_early(int nodeid); 47extern unsigned long free_low_memory_core_early(int nodeid);
48extern unsigned long free_all_bootmem_node(pg_data_t *pgdat); 48extern unsigned long free_all_bootmem_node(pg_data_t *pgdat);
49extern unsigned long free_all_bootmem(void); 49extern unsigned long free_all_bootmem(void);
50 50
diff --git a/include/linux/debugobjects.h b/include/linux/debugobjects.h
index 65970b811e22..0e5f5785d9f2 100644
--- a/include/linux/debugobjects.h
+++ b/include/linux/debugobjects.h
@@ -46,6 +46,8 @@ struct debug_obj {
46 * fails 46 * fails
47 * @fixup_free: fixup function, which is called when the free check 47 * @fixup_free: fixup function, which is called when the free check
48 * fails 48 * fails
49 * @fixup_assert_init: fixup function, which is called when the assert_init
50 * check fails
49 */ 51 */
50struct debug_obj_descr { 52struct debug_obj_descr {
51 const char *name; 53 const char *name;
@@ -54,6 +56,7 @@ struct debug_obj_descr {
54 int (*fixup_activate) (void *addr, enum debug_obj_state state); 56 int (*fixup_activate) (void *addr, enum debug_obj_state state);
55 int (*fixup_destroy) (void *addr, enum debug_obj_state state); 57 int (*fixup_destroy) (void *addr, enum debug_obj_state state);
56 int (*fixup_free) (void *addr, enum debug_obj_state state); 58 int (*fixup_free) (void *addr, enum debug_obj_state state);
59 int (*fixup_assert_init)(void *addr, enum debug_obj_state state);
57}; 60};
58 61
59#ifdef CONFIG_DEBUG_OBJECTS 62#ifdef CONFIG_DEBUG_OBJECTS
@@ -64,6 +67,7 @@ extern void debug_object_activate (void *addr, struct debug_obj_descr *descr);
64extern void debug_object_deactivate(void *addr, struct debug_obj_descr *descr); 67extern void debug_object_deactivate(void *addr, struct debug_obj_descr *descr);
65extern void debug_object_destroy (void *addr, struct debug_obj_descr *descr); 68extern void debug_object_destroy (void *addr, struct debug_obj_descr *descr);
66extern void debug_object_free (void *addr, struct debug_obj_descr *descr); 69extern void debug_object_free (void *addr, struct debug_obj_descr *descr);
70extern void debug_object_assert_init(void *addr, struct debug_obj_descr *descr);
67 71
68/* 72/*
69 * Active state: 73 * Active state:
@@ -89,6 +93,8 @@ static inline void
89debug_object_destroy (void *addr, struct debug_obj_descr *descr) { } 93debug_object_destroy (void *addr, struct debug_obj_descr *descr) { }
90static inline void 94static inline void
91debug_object_free (void *addr, struct debug_obj_descr *descr) { } 95debug_object_free (void *addr, struct debug_obj_descr *descr) { }
96static inline void
97debug_object_assert_init(void *addr, struct debug_obj_descr *descr) { }
92 98
93static inline void debug_objects_early_init(void) { } 99static inline void debug_objects_early_init(void) { }
94static inline void debug_objects_mem_init(void) { } 100static inline void debug_objects_mem_init(void) { }
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
index 388b0d425b50..5ce8b140428f 100644
--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -3,6 +3,7 @@
3 3
4#include <linux/types.h> 4#include <linux/types.h>
5#include <linux/compiler.h> 5#include <linux/compiler.h>
6#include <linux/workqueue.h>
6 7
7#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL) 8#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL)
8 9
@@ -14,6 +15,12 @@ struct jump_label_key {
14#endif 15#endif
15}; 16};
16 17
18struct jump_label_key_deferred {
19 struct jump_label_key key;
20 unsigned long timeout;
21 struct delayed_work work;
22};
23
17# include <asm/jump_label.h> 24# include <asm/jump_label.h>
18# define HAVE_JUMP_LABEL 25# define HAVE_JUMP_LABEL
19#endif /* CC_HAVE_ASM_GOTO && CONFIG_JUMP_LABEL */ 26#endif /* CC_HAVE_ASM_GOTO && CONFIG_JUMP_LABEL */
@@ -51,8 +58,11 @@ extern void arch_jump_label_transform_static(struct jump_entry *entry,
51extern int jump_label_text_reserved(void *start, void *end); 58extern int jump_label_text_reserved(void *start, void *end);
52extern void jump_label_inc(struct jump_label_key *key); 59extern void jump_label_inc(struct jump_label_key *key);
53extern void jump_label_dec(struct jump_label_key *key); 60extern void jump_label_dec(struct jump_label_key *key);
61extern void jump_label_dec_deferred(struct jump_label_key_deferred *key);
54extern bool jump_label_enabled(struct jump_label_key *key); 62extern bool jump_label_enabled(struct jump_label_key *key);
55extern void jump_label_apply_nops(struct module *mod); 63extern void jump_label_apply_nops(struct module *mod);
64extern void jump_label_rate_limit(struct jump_label_key_deferred *key,
65 unsigned long rl);
56 66
57#else /* !HAVE_JUMP_LABEL */ 67#else /* !HAVE_JUMP_LABEL */
58 68
@@ -68,6 +78,10 @@ static __always_inline void jump_label_init(void)
68{ 78{
69} 79}
70 80
81struct jump_label_key_deferred {
82 struct jump_label_key key;
83};
84
71static __always_inline bool static_branch(struct jump_label_key *key) 85static __always_inline bool static_branch(struct jump_label_key *key)
72{ 86{
73 if (unlikely(atomic_read(&key->enabled))) 87 if (unlikely(atomic_read(&key->enabled)))
@@ -85,6 +99,11 @@ static inline void jump_label_dec(struct jump_label_key *key)
85 atomic_dec(&key->enabled); 99 atomic_dec(&key->enabled);
86} 100}
87 101
102static inline void jump_label_dec_deferred(struct jump_label_key_deferred *key)
103{
104 jump_label_dec(&key->key);
105}
106
88static inline int jump_label_text_reserved(void *start, void *end) 107static inline int jump_label_text_reserved(void *start, void *end)
89{ 108{
90 return 0; 109 return 0;
@@ -102,6 +121,14 @@ static inline int jump_label_apply_nops(struct module *mod)
102{ 121{
103 return 0; 122 return 0;
104} 123}
124
125static inline void jump_label_rate_limit(struct jump_label_key_deferred *key,
126 unsigned long rl)
127{
128}
105#endif /* HAVE_JUMP_LABEL */ 129#endif /* HAVE_JUMP_LABEL */
106 130
131#define jump_label_key_enabled ((struct jump_label_key){ .enabled = ATOMIC_INIT(1), })
132#define jump_label_key_disabled ((struct jump_label_key){ .enabled = ATOMIC_INIT(0), })
133
107#endif /* _LINUX_JUMP_LABEL_H */ 134#endif /* _LINUX_JUMP_LABEL_H */
diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h
index 0cce2db580c3..2fbd9053c2df 100644
--- a/include/linux/kernel_stat.h
+++ b/include/linux/kernel_stat.h
@@ -6,6 +6,7 @@
6#include <linux/percpu.h> 6#include <linux/percpu.h>
7#include <linux/cpumask.h> 7#include <linux/cpumask.h>
8#include <linux/interrupt.h> 8#include <linux/interrupt.h>
9#include <linux/sched.h>
9#include <asm/irq.h> 10#include <asm/irq.h>
10#include <asm/cputime.h> 11#include <asm/cputime.h>
11 12
@@ -15,21 +16,25 @@
15 * used by rstatd/perfmeter 16 * used by rstatd/perfmeter
16 */ 17 */
17 18
18struct cpu_usage_stat { 19enum cpu_usage_stat {
19 cputime64_t user; 20 CPUTIME_USER,
20 cputime64_t nice; 21 CPUTIME_NICE,
21 cputime64_t system; 22 CPUTIME_SYSTEM,
22 cputime64_t softirq; 23 CPUTIME_SOFTIRQ,
23 cputime64_t irq; 24 CPUTIME_IRQ,
24 cputime64_t idle; 25 CPUTIME_IDLE,
25 cputime64_t iowait; 26 CPUTIME_IOWAIT,
26 cputime64_t steal; 27 CPUTIME_STEAL,
27 cputime64_t guest; 28 CPUTIME_GUEST,
28 cputime64_t guest_nice; 29 CPUTIME_GUEST_NICE,
30 NR_STATS,
31};
32
33struct kernel_cpustat {
34 u64 cpustat[NR_STATS];
29}; 35};
30 36
31struct kernel_stat { 37struct kernel_stat {
32 struct cpu_usage_stat cpustat;
33#ifndef CONFIG_GENERIC_HARDIRQS 38#ifndef CONFIG_GENERIC_HARDIRQS
34 unsigned int irqs[NR_IRQS]; 39 unsigned int irqs[NR_IRQS];
35#endif 40#endif
@@ -38,10 +43,13 @@ struct kernel_stat {
38}; 43};
39 44
40DECLARE_PER_CPU(struct kernel_stat, kstat); 45DECLARE_PER_CPU(struct kernel_stat, kstat);
46DECLARE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
41 47
42#define kstat_cpu(cpu) per_cpu(kstat, cpu)
43/* Must have preemption disabled for this to be meaningful. */ 48/* Must have preemption disabled for this to be meaningful. */
44#define kstat_this_cpu __get_cpu_var(kstat) 49#define kstat_this_cpu (&__get_cpu_var(kstat))
50#define kcpustat_this_cpu (&__get_cpu_var(kernel_cpustat))
51#define kstat_cpu(cpu) per_cpu(kstat, cpu)
52#define kcpustat_cpu(cpu) per_cpu(kernel_cpustat, cpu)
45 53
46extern unsigned long long nr_context_switches(void); 54extern unsigned long long nr_context_switches(void);
47 55
diff --git a/include/linux/latencytop.h b/include/linux/latencytop.h
index b0e99898527c..e23121f9d82a 100644
--- a/include/linux/latencytop.h
+++ b/include/linux/latencytop.h
@@ -10,6 +10,8 @@
10#define _INCLUDE_GUARD_LATENCYTOP_H_ 10#define _INCLUDE_GUARD_LATENCYTOP_H_
11 11
12#include <linux/compiler.h> 12#include <linux/compiler.h>
13struct task_struct;
14
13#ifdef CONFIG_LATENCYTOP 15#ifdef CONFIG_LATENCYTOP
14 16
15#define LT_SAVECOUNT 32 17#define LT_SAVECOUNT 32
@@ -23,7 +25,6 @@ struct latency_record {
23}; 25};
24 26
25 27
26struct task_struct;
27 28
28extern int latencytop_enabled; 29extern int latencytop_enabled;
29void __account_scheduler_latency(struct task_struct *task, int usecs, int inter); 30void __account_scheduler_latency(struct task_struct *task, int usecs, int inter);
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index b6a56e37284c..d36619ead3ba 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -343,6 +343,8 @@ extern void lockdep_trace_alloc(gfp_t mask);
343 343
344#define lockdep_assert_held(l) WARN_ON(debug_locks && !lockdep_is_held(l)) 344#define lockdep_assert_held(l) WARN_ON(debug_locks && !lockdep_is_held(l))
345 345
346#define lockdep_recursing(tsk) ((tsk)->lockdep_recursion)
347
346#else /* !LOCKDEP */ 348#else /* !LOCKDEP */
347 349
348static inline void lockdep_off(void) 350static inline void lockdep_off(void)
@@ -392,6 +394,8 @@ struct lock_class_key { };
392 394
393#define lockdep_assert_held(l) do { } while (0) 395#define lockdep_assert_held(l) do { } while (0)
394 396
397#define lockdep_recursing(tsk) (0)
398
395#endif /* !LOCKDEP */ 399#endif /* !LOCKDEP */
396 400
397#ifdef CONFIG_LOCK_STAT 401#ifdef CONFIG_LOCK_STAT
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index e6b843e16e81..a6bb10235148 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -2,8 +2,6 @@
2#define _LINUX_MEMBLOCK_H 2#define _LINUX_MEMBLOCK_H
3#ifdef __KERNEL__ 3#ifdef __KERNEL__
4 4
5#define MEMBLOCK_ERROR 0
6
7#ifdef CONFIG_HAVE_MEMBLOCK 5#ifdef CONFIG_HAVE_MEMBLOCK
8/* 6/*
9 * Logical memory blocks. 7 * Logical memory blocks.
@@ -19,81 +17,161 @@
19#include <linux/init.h> 17#include <linux/init.h>
20#include <linux/mm.h> 18#include <linux/mm.h>
21 19
22#include <asm/memblock.h>
23
24#define INIT_MEMBLOCK_REGIONS 128 20#define INIT_MEMBLOCK_REGIONS 128
25 21
26struct memblock_region { 22struct memblock_region {
27 phys_addr_t base; 23 phys_addr_t base;
28 phys_addr_t size; 24 phys_addr_t size;
25#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
26 int nid;
27#endif
29}; 28};
30 29
31struct memblock_type { 30struct memblock_type {
32 unsigned long cnt; /* number of regions */ 31 unsigned long cnt; /* number of regions */
33 unsigned long max; /* size of the allocated array */ 32 unsigned long max; /* size of the allocated array */
33 phys_addr_t total_size; /* size of all regions */
34 struct memblock_region *regions; 34 struct memblock_region *regions;
35}; 35};
36 36
37struct memblock { 37struct memblock {
38 phys_addr_t current_limit; 38 phys_addr_t current_limit;
39 phys_addr_t memory_size; /* Updated by memblock_analyze() */
40 struct memblock_type memory; 39 struct memblock_type memory;
41 struct memblock_type reserved; 40 struct memblock_type reserved;
42}; 41};
43 42
44extern struct memblock memblock; 43extern struct memblock memblock;
45extern int memblock_debug; 44extern int memblock_debug;
46extern int memblock_can_resize;
47 45
48#define memblock_dbg(fmt, ...) \ 46#define memblock_dbg(fmt, ...) \
49 if (memblock_debug) printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__) 47 if (memblock_debug) printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
50 48
51u64 memblock_find_in_range(u64 start, u64 end, u64 size, u64 align); 49phys_addr_t memblock_find_in_range_node(phys_addr_t start, phys_addr_t end,
50 phys_addr_t size, phys_addr_t align, int nid);
51phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end,
52 phys_addr_t size, phys_addr_t align);
52int memblock_free_reserved_regions(void); 53int memblock_free_reserved_regions(void);
53int memblock_reserve_reserved_regions(void); 54int memblock_reserve_reserved_regions(void);
54 55
55extern void memblock_init(void); 56void memblock_allow_resize(void);
56extern void memblock_analyze(void); 57int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid);
57extern long memblock_add(phys_addr_t base, phys_addr_t size); 58int memblock_add(phys_addr_t base, phys_addr_t size);
58extern long memblock_remove(phys_addr_t base, phys_addr_t size); 59int memblock_remove(phys_addr_t base, phys_addr_t size);
59extern long memblock_free(phys_addr_t base, phys_addr_t size); 60int memblock_free(phys_addr_t base, phys_addr_t size);
60extern long memblock_reserve(phys_addr_t base, phys_addr_t size); 61int memblock_reserve(phys_addr_t base, phys_addr_t size);
62
63#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
64void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
65 unsigned long *out_end_pfn, int *out_nid);
66
67/**
68 * for_each_mem_pfn_range - early memory pfn range iterator
69 * @i: an integer used as loop variable
70 * @nid: node selector, %MAX_NUMNODES for all nodes
71 * @p_start: ptr to ulong for start pfn of the range, can be %NULL
72 * @p_end: ptr to ulong for end pfn of the range, can be %NULL
73 * @p_nid: ptr to int for nid of the range, can be %NULL
74 *
75 * Walks over configured memory ranges. Available after early_node_map is
76 * populated.
77 */
78#define for_each_mem_pfn_range(i, nid, p_start, p_end, p_nid) \
79 for (i = -1, __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid); \
80 i >= 0; __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid))
81#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
82
83void __next_free_mem_range(u64 *idx, int nid, phys_addr_t *out_start,
84 phys_addr_t *out_end, int *out_nid);
85
86/**
87 * for_each_free_mem_range - iterate through free memblock areas
88 * @i: u64 used as loop variable
89 * @nid: node selector, %MAX_NUMNODES for all nodes
90 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
91 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
92 * @p_nid: ptr to int for nid of the range, can be %NULL
93 *
94 * Walks over free (memory && !reserved) areas of memblock. Available as
95 * soon as memblock is initialized.
96 */
97#define for_each_free_mem_range(i, nid, p_start, p_end, p_nid) \
98 for (i = 0, \
99 __next_free_mem_range(&i, nid, p_start, p_end, p_nid); \
100 i != (u64)ULLONG_MAX; \
101 __next_free_mem_range(&i, nid, p_start, p_end, p_nid))
102
103void __next_free_mem_range_rev(u64 *idx, int nid, phys_addr_t *out_start,
104 phys_addr_t *out_end, int *out_nid);
61 105
62/* The numa aware allocator is only available if 106/**
63 * CONFIG_ARCH_POPULATES_NODE_MAP is set 107 * for_each_free_mem_range_reverse - rev-iterate through free memblock areas
108 * @i: u64 used as loop variable
109 * @nid: node selector, %MAX_NUMNODES for all nodes
110 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
111 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
112 * @p_nid: ptr to int for nid of the range, can be %NULL
113 *
114 * Walks over free (memory && !reserved) areas of memblock in reverse
115 * order. Available as soon as memblock is initialized.
64 */ 116 */
65extern phys_addr_t memblock_alloc_nid(phys_addr_t size, phys_addr_t align, 117#define for_each_free_mem_range_reverse(i, nid, p_start, p_end, p_nid) \
66 int nid); 118 for (i = (u64)ULLONG_MAX, \
67extern phys_addr_t memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, 119 __next_free_mem_range_rev(&i, nid, p_start, p_end, p_nid); \
68 int nid); 120 i != (u64)ULLONG_MAX; \
121 __next_free_mem_range_rev(&i, nid, p_start, p_end, p_nid))
69 122
70extern phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align); 123#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
124int memblock_set_node(phys_addr_t base, phys_addr_t size, int nid);
125
126static inline void memblock_set_region_node(struct memblock_region *r, int nid)
127{
128 r->nid = nid;
129}
130
131static inline int memblock_get_region_node(const struct memblock_region *r)
132{
133 return r->nid;
134}
135#else
136static inline void memblock_set_region_node(struct memblock_region *r, int nid)
137{
138}
139
140static inline int memblock_get_region_node(const struct memblock_region *r)
141{
142 return 0;
143}
144#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
145
146phys_addr_t memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid);
147phys_addr_t memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid);
148
149phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align);
71 150
72/* Flags for memblock_alloc_base() amd __memblock_alloc_base() */ 151/* Flags for memblock_alloc_base() amd __memblock_alloc_base() */
73#define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0) 152#define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0)
74#define MEMBLOCK_ALLOC_ACCESSIBLE 0 153#define MEMBLOCK_ALLOC_ACCESSIBLE 0
75 154
76extern phys_addr_t memblock_alloc_base(phys_addr_t size, 155phys_addr_t memblock_alloc_base(phys_addr_t size, phys_addr_t align,
77 phys_addr_t align, 156 phys_addr_t max_addr);
78 phys_addr_t max_addr); 157phys_addr_t __memblock_alloc_base(phys_addr_t size, phys_addr_t align,
79extern phys_addr_t __memblock_alloc_base(phys_addr_t size, 158 phys_addr_t max_addr);
80 phys_addr_t align, 159phys_addr_t memblock_phys_mem_size(void);
81 phys_addr_t max_addr); 160phys_addr_t memblock_start_of_DRAM(void);
82extern phys_addr_t memblock_phys_mem_size(void); 161phys_addr_t memblock_end_of_DRAM(void);
83extern phys_addr_t memblock_start_of_DRAM(void); 162void memblock_enforce_memory_limit(phys_addr_t memory_limit);
84extern phys_addr_t memblock_end_of_DRAM(void); 163int memblock_is_memory(phys_addr_t addr);
85extern void memblock_enforce_memory_limit(phys_addr_t memory_limit); 164int memblock_is_region_memory(phys_addr_t base, phys_addr_t size);
86extern int memblock_is_memory(phys_addr_t addr); 165int memblock_is_reserved(phys_addr_t addr);
87extern int memblock_is_region_memory(phys_addr_t base, phys_addr_t size); 166int memblock_is_region_reserved(phys_addr_t base, phys_addr_t size);
88extern int memblock_is_reserved(phys_addr_t addr); 167
89extern int memblock_is_region_reserved(phys_addr_t base, phys_addr_t size); 168extern void __memblock_dump_all(void);
90 169
91extern void memblock_dump_all(void); 170static inline void memblock_dump_all(void)
92 171{
93/* Provided by the architecture */ 172 if (memblock_debug)
94extern phys_addr_t memblock_nid_range(phys_addr_t start, phys_addr_t end, int *nid); 173 __memblock_dump_all();
95extern int memblock_memory_can_coalesce(phys_addr_t addr1, phys_addr_t size1, 174}
96 phys_addr_t addr2, phys_addr_t size2);
97 175
98/** 176/**
99 * memblock_set_current_limit - Set the current allocation limit to allow 177 * memblock_set_current_limit - Set the current allocation limit to allow
@@ -101,7 +179,7 @@ extern int memblock_memory_can_coalesce(phys_addr_t addr1, phys_addr_t size1,
101 * accessible during boot 179 * accessible during boot
102 * @limit: New limit value (physical address) 180 * @limit: New limit value (physical address)
103 */ 181 */
104extern void memblock_set_current_limit(phys_addr_t limit); 182void memblock_set_current_limit(phys_addr_t limit);
105 183
106 184
107/* 185/*
@@ -154,9 +232,9 @@ static inline unsigned long memblock_region_reserved_end_pfn(const struct memblo
154 region++) 232 region++)
155 233
156 234
157#ifdef ARCH_DISCARD_MEMBLOCK 235#ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
158#define __init_memblock __init 236#define __init_memblock __meminit
159#define __initdata_memblock __initdata 237#define __initdata_memblock __meminitdata
160#else 238#else
161#define __init_memblock 239#define __init_memblock
162#define __initdata_memblock 240#define __initdata_memblock
@@ -165,7 +243,7 @@ static inline unsigned long memblock_region_reserved_end_pfn(const struct memblo
165#else 243#else
166static inline phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align) 244static inline phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align)
167{ 245{
168 return MEMBLOCK_ERROR; 246 return 0;
169} 247}
170 248
171#endif /* CONFIG_HAVE_MEMBLOCK */ 249#endif /* CONFIG_HAVE_MEMBLOCK */
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 4baadd18f4ad..5d9b4c9813bd 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1253,41 +1253,34 @@ static inline void pgtable_page_dtor(struct page *page)
1253extern void free_area_init(unsigned long * zones_size); 1253extern void free_area_init(unsigned long * zones_size);
1254extern void free_area_init_node(int nid, unsigned long * zones_size, 1254extern void free_area_init_node(int nid, unsigned long * zones_size,
1255 unsigned long zone_start_pfn, unsigned long *zholes_size); 1255 unsigned long zone_start_pfn, unsigned long *zholes_size);
1256#ifdef CONFIG_ARCH_POPULATES_NODE_MAP 1256#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
1257/* 1257/*
1258 * With CONFIG_ARCH_POPULATES_NODE_MAP set, an architecture may initialise its 1258 * With CONFIG_HAVE_MEMBLOCK_NODE_MAP set, an architecture may initialise its
1259 * zones, allocate the backing mem_map and account for memory holes in a more 1259 * zones, allocate the backing mem_map and account for memory holes in a more
1260 * architecture independent manner. This is a substitute for creating the 1260 * architecture independent manner. This is a substitute for creating the
1261 * zone_sizes[] and zholes_size[] arrays and passing them to 1261 * zone_sizes[] and zholes_size[] arrays and passing them to
1262 * free_area_init_node() 1262 * free_area_init_node()
1263 * 1263 *
1264 * An architecture is expected to register range of page frames backed by 1264 * An architecture is expected to register range of page frames backed by
1265 * physical memory with add_active_range() before calling 1265 * physical memory with memblock_add[_node]() before calling
1266 * free_area_init_nodes() passing in the PFN each zone ends at. At a basic 1266 * free_area_init_nodes() passing in the PFN each zone ends at. At a basic
1267 * usage, an architecture is expected to do something like 1267 * usage, an architecture is expected to do something like
1268 * 1268 *
1269 * unsigned long max_zone_pfns[MAX_NR_ZONES] = {max_dma, max_normal_pfn, 1269 * unsigned long max_zone_pfns[MAX_NR_ZONES] = {max_dma, max_normal_pfn,
1270 * max_highmem_pfn}; 1270 * max_highmem_pfn};
1271 * for_each_valid_physical_page_range() 1271 * for_each_valid_physical_page_range()
1272 * add_active_range(node_id, start_pfn, end_pfn) 1272 * memblock_add_node(base, size, nid)
1273 * free_area_init_nodes(max_zone_pfns); 1273 * free_area_init_nodes(max_zone_pfns);
1274 * 1274 *
1275 * If the architecture guarantees that there are no holes in the ranges 1275 * free_bootmem_with_active_regions() calls free_bootmem_node() for each
1276 * registered with add_active_range(), free_bootmem_active_regions() 1276 * registered physical page range. Similarly
1277 * will call free_bootmem_node() for each registered physical page range. 1277 * sparse_memory_present_with_active_regions() calls memory_present() for
1278 * Similarly sparse_memory_present_with_active_regions() calls 1278 * each range when SPARSEMEM is enabled.
1279 * memory_present() for each range when SPARSEMEM is enabled.
1280 * 1279 *
1281 * See mm/page_alloc.c for more information on each function exposed by 1280 * See mm/page_alloc.c for more information on each function exposed by
1282 * CONFIG_ARCH_POPULATES_NODE_MAP 1281 * CONFIG_HAVE_MEMBLOCK_NODE_MAP.
1283 */ 1282 */
1284extern void free_area_init_nodes(unsigned long *max_zone_pfn); 1283extern void free_area_init_nodes(unsigned long *max_zone_pfn);
1285extern void add_active_range(unsigned int nid, unsigned long start_pfn,
1286 unsigned long end_pfn);
1287extern void remove_active_range(unsigned int nid, unsigned long start_pfn,
1288 unsigned long end_pfn);
1289extern void remove_all_active_ranges(void);
1290void sort_node_map(void);
1291unsigned long node_map_pfn_alignment(void); 1284unsigned long node_map_pfn_alignment(void);
1292unsigned long __absent_pages_in_range(int nid, unsigned long start_pfn, 1285unsigned long __absent_pages_in_range(int nid, unsigned long start_pfn,
1293 unsigned long end_pfn); 1286 unsigned long end_pfn);
@@ -1300,14 +1293,11 @@ extern void free_bootmem_with_active_regions(int nid,
1300 unsigned long max_low_pfn); 1293 unsigned long max_low_pfn);
1301int add_from_early_node_map(struct range *range, int az, 1294int add_from_early_node_map(struct range *range, int az,
1302 int nr_range, int nid); 1295 int nr_range, int nid);
1303u64 __init find_memory_core_early(int nid, u64 size, u64 align,
1304 u64 goal, u64 limit);
1305typedef int (*work_fn_t)(unsigned long, unsigned long, void *);
1306extern void work_with_active_regions(int nid, work_fn_t work_fn, void *data);
1307extern void sparse_memory_present_with_active_regions(int nid); 1296extern void sparse_memory_present_with_active_regions(int nid);
1308#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
1309 1297
1310#if !defined(CONFIG_ARCH_POPULATES_NODE_MAP) && \ 1298#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
1299
1300#if !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) && \
1311 !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) 1301 !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID)
1312static inline int __early_pfn_to_nid(unsigned long pfn) 1302static inline int __early_pfn_to_nid(unsigned long pfn)
1313{ 1303{
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 188cb2ffe8db..3ac040f19369 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -598,13 +598,13 @@ struct zonelist {
598#endif 598#endif
599}; 599};
600 600
601#ifdef CONFIG_ARCH_POPULATES_NODE_MAP 601#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
602struct node_active_region { 602struct node_active_region {
603 unsigned long start_pfn; 603 unsigned long start_pfn;
604 unsigned long end_pfn; 604 unsigned long end_pfn;
605 int nid; 605 int nid;
606}; 606};
607#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */ 607#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
608 608
609#ifndef CONFIG_DISCONTIGMEM 609#ifndef CONFIG_DISCONTIGMEM
610/* The array of struct pages - for discontigmem use pgdat->lmem_map */ 610/* The array of struct pages - for discontigmem use pgdat->lmem_map */
@@ -720,7 +720,7 @@ extern int movable_zone;
720 720
721static inline int zone_movable_is_highmem(void) 721static inline int zone_movable_is_highmem(void)
722{ 722{
723#if defined(CONFIG_HIGHMEM) && defined(CONFIG_ARCH_POPULATES_NODE_MAP) 723#if defined(CONFIG_HIGHMEM) && defined(CONFIG_HAVE_MEMBLOCK_NODE)
724 return movable_zone == ZONE_HIGHMEM; 724 return movable_zone == ZONE_HIGHMEM;
725#else 725#else
726 return 0; 726 return 0;
@@ -938,7 +938,7 @@ static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
938#endif 938#endif
939 939
940#if !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) && \ 940#if !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) && \
941 !defined(CONFIG_ARCH_POPULATES_NODE_MAP) 941 !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP)
942static inline unsigned long early_pfn_to_nid(unsigned long pfn) 942static inline unsigned long early_pfn_to_nid(unsigned long pfn)
943{ 943{
944 return 0; 944 return 0;
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index b1f89122bf6a..08855613ceb3 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -54,6 +54,7 @@ enum perf_hw_id {
54 PERF_COUNT_HW_BUS_CYCLES = 6, 54 PERF_COUNT_HW_BUS_CYCLES = 6,
55 PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 7, 55 PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 7,
56 PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 8, 56 PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 8,
57 PERF_COUNT_HW_REF_CPU_CYCLES = 9,
57 58
58 PERF_COUNT_HW_MAX, /* non-ABI */ 59 PERF_COUNT_HW_MAX, /* non-ABI */
59}; 60};
@@ -890,6 +891,7 @@ struct perf_event_context {
890 int nr_active; 891 int nr_active;
891 int is_active; 892 int is_active;
892 int nr_stat; 893 int nr_stat;
894 int nr_freq;
893 int rotate_disable; 895 int rotate_disable;
894 atomic_t refcount; 896 atomic_t refcount;
895 struct task_struct *task; 897 struct task_struct *task;
@@ -1063,12 +1065,12 @@ perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
1063 } 1065 }
1064} 1066}
1065 1067
1066extern struct jump_label_key perf_sched_events; 1068extern struct jump_label_key_deferred perf_sched_events;
1067 1069
1068static inline void perf_event_task_sched_in(struct task_struct *prev, 1070static inline void perf_event_task_sched_in(struct task_struct *prev,
1069 struct task_struct *task) 1071 struct task_struct *task)
1070{ 1072{
1071 if (static_branch(&perf_sched_events)) 1073 if (static_branch(&perf_sched_events.key))
1072 __perf_event_task_sched_in(prev, task); 1074 __perf_event_task_sched_in(prev, task);
1073} 1075}
1074 1076
@@ -1077,7 +1079,7 @@ static inline void perf_event_task_sched_out(struct task_struct *prev,
1077{ 1079{
1078 perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, NULL, 0); 1080 perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, NULL, 0);
1079 1081
1080 if (static_branch(&perf_sched_events)) 1082 if (static_branch(&perf_sched_events.key))
1081 __perf_event_task_sched_out(prev, next); 1083 __perf_event_task_sched_out(prev, next);
1082} 1084}
1083 1085
diff --git a/include/linux/poison.h b/include/linux/poison.h
index 79159de0e341..2110a81c5e2a 100644
--- a/include/linux/poison.h
+++ b/include/linux/poison.h
@@ -40,12 +40,6 @@
40#define RED_INACTIVE 0x09F911029D74E35BULL /* when obj is inactive */ 40#define RED_INACTIVE 0x09F911029D74E35BULL /* when obj is inactive */
41#define RED_ACTIVE 0xD84156C5635688C0ULL /* when obj is active */ 41#define RED_ACTIVE 0xD84156C5635688C0ULL /* when obj is active */
42 42
43#ifdef CONFIG_PHYS_ADDR_T_64BIT
44#define MEMBLOCK_INACTIVE 0x3a84fb0144c9e71bULL
45#else
46#define MEMBLOCK_INACTIVE 0x44c9e71bUL
47#endif
48
49#define SLUB_RED_INACTIVE 0xbb 43#define SLUB_RED_INACTIVE 0xbb
50#define SLUB_RED_ACTIVE 0xcc 44#define SLUB_RED_ACTIVE 0xcc
51 45
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 56fa25a5b1eb..40d84481a1c9 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -273,9 +273,11 @@ extern int runqueue_is_locked(int cpu);
273 273
274#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ) 274#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ)
275extern void select_nohz_load_balancer(int stop_tick); 275extern void select_nohz_load_balancer(int stop_tick);
276extern void set_cpu_sd_state_idle(void);
276extern int get_nohz_timer_target(void); 277extern int get_nohz_timer_target(void);
277#else 278#else
278static inline void select_nohz_load_balancer(int stop_tick) { } 279static inline void select_nohz_load_balancer(int stop_tick) { }
280static inline void set_cpu_sd_state_idle(void) { }
279#endif 281#endif
280 282
281/* 283/*
@@ -483,8 +485,8 @@ struct task_cputime {
483 485
484#define INIT_CPUTIME \ 486#define INIT_CPUTIME \
485 (struct task_cputime) { \ 487 (struct task_cputime) { \
486 .utime = cputime_zero, \ 488 .utime = 0, \
487 .stime = cputime_zero, \ 489 .stime = 0, \
488 .sum_exec_runtime = 0, \ 490 .sum_exec_runtime = 0, \
489 } 491 }
490 492
@@ -901,6 +903,10 @@ struct sched_group_power {
901 * single CPU. 903 * single CPU.
902 */ 904 */
903 unsigned int power, power_orig; 905 unsigned int power, power_orig;
906 /*
907 * Number of busy cpus in this group.
908 */
909 atomic_t nr_busy_cpus;
904}; 910};
905 911
906struct sched_group { 912struct sched_group {
@@ -925,6 +931,15 @@ static inline struct cpumask *sched_group_cpus(struct sched_group *sg)
925 return to_cpumask(sg->cpumask); 931 return to_cpumask(sg->cpumask);
926} 932}
927 933
934/**
935 * group_first_cpu - Returns the first cpu in the cpumask of a sched_group.
936 * @group: The group whose first cpu is to be returned.
937 */
938static inline unsigned int group_first_cpu(struct sched_group *group)
939{
940 return cpumask_first(sched_group_cpus(group));
941}
942
928struct sched_domain_attr { 943struct sched_domain_attr {
929 int relax_domain_level; 944 int relax_domain_level;
930}; 945};
@@ -1315,8 +1330,8 @@ struct task_struct {
1315 * older sibling, respectively. (p->father can be replaced with 1330 * older sibling, respectively. (p->father can be replaced with
1316 * p->real_parent->pid) 1331 * p->real_parent->pid)
1317 */ 1332 */
1318 struct task_struct *real_parent; /* real parent process */ 1333 struct task_struct __rcu *real_parent; /* real parent process */
1319 struct task_struct *parent; /* recipient of SIGCHLD, wait4() reports */ 1334 struct task_struct __rcu *parent; /* recipient of SIGCHLD, wait4() reports */
1320 /* 1335 /*
1321 * children/sibling forms the list of my natural children 1336 * children/sibling forms the list of my natural children
1322 */ 1337 */
diff --git a/include/linux/wait.h b/include/linux/wait.h
index 3efc9f3f43a0..a9ce45e8501c 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -77,13 +77,13 @@ struct task_struct;
77#define __WAIT_BIT_KEY_INITIALIZER(word, bit) \ 77#define __WAIT_BIT_KEY_INITIALIZER(word, bit) \
78 { .flags = word, .bit_nr = bit, } 78 { .flags = word, .bit_nr = bit, }
79 79
80extern void __init_waitqueue_head(wait_queue_head_t *q, struct lock_class_key *); 80extern void __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct lock_class_key *);
81 81
82#define init_waitqueue_head(q) \ 82#define init_waitqueue_head(q) \
83 do { \ 83 do { \
84 static struct lock_class_key __key; \ 84 static struct lock_class_key __key; \
85 \ 85 \
86 __init_waitqueue_head((q), &__key); \ 86 __init_waitqueue_head((q), #q, &__key); \
87 } while (0) 87 } while (0)
88 88
89#ifdef CONFIG_LOCKDEP 89#ifdef CONFIG_LOCKDEP
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index 959ff18b63b6..6ba596b07a72 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -331,6 +331,13 @@ DEFINE_EVENT(sched_stat_template, sched_stat_iowait,
331 TP_ARGS(tsk, delay)); 331 TP_ARGS(tsk, delay));
332 332
333/* 333/*
334 * Tracepoint for accounting blocked time (time the task is in uninterruptible).
335 */
336DEFINE_EVENT(sched_stat_template, sched_stat_blocked,
337 TP_PROTO(struct task_struct *tsk, u64 delay),
338 TP_ARGS(tsk, delay));
339
340/*
334 * Tracepoint for accounting runtime (time the task is executing 341 * Tracepoint for accounting runtime (time the task is executing
335 * on a CPU). 342 * on a CPU).
336 */ 343 */
@@ -363,6 +370,56 @@ TRACE_EVENT(sched_stat_runtime,
363 (unsigned long long)__entry->vruntime) 370 (unsigned long long)__entry->vruntime)
364); 371);
365 372
373#ifdef CREATE_TRACE_POINTS
374static inline u64 trace_get_sleeptime(struct task_struct *tsk)
375{
376#ifdef CONFIG_SCHEDSTATS
377 u64 block, sleep;
378
379 block = tsk->se.statistics.block_start;
380 sleep = tsk->se.statistics.sleep_start;
381 tsk->se.statistics.block_start = 0;
382 tsk->se.statistics.sleep_start = 0;
383
384 return block ? block : sleep ? sleep : 0;
385#else
386 return 0;
387#endif
388}
389#endif
390
391/*
392 * Tracepoint for accounting sleeptime (time the task is sleeping
393 * or waiting for I/O).
394 */
395TRACE_EVENT(sched_stat_sleeptime,
396
397 TP_PROTO(struct task_struct *tsk, u64 now),
398
399 TP_ARGS(tsk, now),
400
401 TP_STRUCT__entry(
402 __array( char, comm, TASK_COMM_LEN )
403 __field( pid_t, pid )
404 __field( u64, sleeptime )
405 ),
406
407 TP_fast_assign(
408 memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
409 __entry->pid = tsk->pid;
410 __entry->sleeptime = trace_get_sleeptime(tsk);
411 __entry->sleeptime = __entry->sleeptime ?
412 now - __entry->sleeptime : 0;
413 )
414 TP_perf_assign(
415 __perf_count(__entry->sleeptime);
416 ),
417
418 TP_printk("comm=%s pid=%d sleeptime=%Lu [ns]",
419 __entry->comm, __entry->pid,
420 (unsigned long long)__entry->sleeptime)
421);
422
366/* 423/*
367 * Tracepoint for showing priority inheritance modifying a tasks 424 * Tracepoint for showing priority inheritance modifying a tasks
368 * priority. 425 * priority.