diff options
Diffstat (limited to 'include/linux')
| -rw-r--r-- | include/linux/bitops.h | 10 | ||||
| -rw-r--r-- | include/linux/blkdev.h | 3 | ||||
| -rw-r--r-- | include/linux/bootmem.h | 2 | ||||
| -rw-r--r-- | include/linux/clocksource.h | 11 | ||||
| -rw-r--r-- | include/linux/cpu.h | 1 | ||||
| -rw-r--r-- | include/linux/debugobjects.h | 6 | ||||
| -rw-r--r-- | include/linux/dma_remapping.h | 2 | ||||
| -rw-r--r-- | include/linux/hardirq.h | 21 | ||||
| -rw-r--r-- | include/linux/jump_label.h | 27 | ||||
| -rw-r--r-- | include/linux/kernel_stat.h | 36 | ||||
| -rw-r--r-- | include/linux/kvm.h | 1 | ||||
| -rw-r--r-- | include/linux/latencytop.h | 3 | ||||
| -rw-r--r-- | include/linux/lglock.h | 36 | ||||
| -rw-r--r-- | include/linux/lockdep.h | 4 | ||||
| -rw-r--r-- | include/linux/log2.h | 1 | ||||
| -rw-r--r-- | include/linux/memblock.h | 170 | ||||
| -rw-r--r-- | include/linux/mm.h | 34 | ||||
| -rw-r--r-- | include/linux/mmc/card.h | 6 | ||||
| -rw-r--r-- | include/linux/mmzone.h | 8 | ||||
| -rw-r--r-- | include/linux/perf_event.h | 8 | ||||
| -rw-r--r-- | include/linux/poison.h | 6 | ||||
| -rw-r--r-- | include/linux/rcupdate.h | 115 | ||||
| -rw-r--r-- | include/linux/sched.h | 31 | ||||
| -rw-r--r-- | include/linux/security.h | 2 | ||||
| -rw-r--r-- | include/linux/srcu.h | 87 | ||||
| -rw-r--r-- | include/linux/tick.h | 11 | ||||
| -rw-r--r-- | include/linux/wait.h | 4 |
27 files changed, 449 insertions, 197 deletions
diff --git a/include/linux/bitops.h b/include/linux/bitops.h index a3ef66a2a083..3c1063acb2ab 100644 --- a/include/linux/bitops.h +++ b/include/linux/bitops.h | |||
| @@ -22,8 +22,14 @@ extern unsigned long __sw_hweight64(__u64 w); | |||
| 22 | #include <asm/bitops.h> | 22 | #include <asm/bitops.h> |
| 23 | 23 | ||
| 24 | #define for_each_set_bit(bit, addr, size) \ | 24 | #define for_each_set_bit(bit, addr, size) \ |
| 25 | for ((bit) = find_first_bit((addr), (size)); \ | 25 | for ((bit) = find_first_bit((addr), (size)); \ |
| 26 | (bit) < (size); \ | 26 | (bit) < (size); \ |
| 27 | (bit) = find_next_bit((addr), (size), (bit) + 1)) | ||
| 28 | |||
| 29 | /* same as for_each_set_bit() but use bit as value to start with */ | ||
| 30 | #define for_each_set_bit_cont(bit, addr, size) \ | ||
| 31 | for ((bit) = find_next_bit((addr), (size), (bit)); \ | ||
| 32 | (bit) < (size); \ | ||
| 27 | (bit) = find_next_bit((addr), (size), (bit) + 1)) | 33 | (bit) = find_next_bit((addr), (size), (bit) + 1)) |
| 28 | 34 | ||
| 29 | static __inline__ int get_bitmask_order(unsigned int count) | 35 | static __inline__ int get_bitmask_order(unsigned int count) |
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index c7a6d3b5bc7b..94acd8172b5b 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
| @@ -805,9 +805,6 @@ extern void blk_unprep_request(struct request *); | |||
| 805 | */ | 805 | */ |
| 806 | extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn, | 806 | extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn, |
| 807 | spinlock_t *lock, int node_id); | 807 | spinlock_t *lock, int node_id); |
| 808 | extern struct request_queue *blk_init_allocated_queue_node(struct request_queue *, | ||
| 809 | request_fn_proc *, | ||
| 810 | spinlock_t *, int node_id); | ||
| 811 | extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *); | 808 | extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *); |
| 812 | extern struct request_queue *blk_init_allocated_queue(struct request_queue *, | 809 | extern struct request_queue *blk_init_allocated_queue(struct request_queue *, |
| 813 | request_fn_proc *, spinlock_t *); | 810 | request_fn_proc *, spinlock_t *); |
diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h index ab344a521105..66d3e954eb6c 100644 --- a/include/linux/bootmem.h +++ b/include/linux/bootmem.h | |||
| @@ -44,7 +44,7 @@ extern unsigned long init_bootmem_node(pg_data_t *pgdat, | |||
| 44 | unsigned long endpfn); | 44 | unsigned long endpfn); |
| 45 | extern unsigned long init_bootmem(unsigned long addr, unsigned long memend); | 45 | extern unsigned long init_bootmem(unsigned long addr, unsigned long memend); |
| 46 | 46 | ||
| 47 | unsigned long free_all_memory_core_early(int nodeid); | 47 | extern unsigned long free_low_memory_core_early(int nodeid); |
| 48 | extern unsigned long free_all_bootmem_node(pg_data_t *pgdat); | 48 | extern unsigned long free_all_bootmem_node(pg_data_t *pgdat); |
| 49 | extern unsigned long free_all_bootmem(void); | 49 | extern unsigned long free_all_bootmem(void); |
| 50 | 50 | ||
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h index c86c940d1de3..081147da0564 100644 --- a/include/linux/clocksource.h +++ b/include/linux/clocksource.h | |||
| @@ -71,7 +71,7 @@ struct timecounter { | |||
| 71 | 71 | ||
| 72 | /** | 72 | /** |
| 73 | * cyclecounter_cyc2ns - converts cycle counter cycles to nanoseconds | 73 | * cyclecounter_cyc2ns - converts cycle counter cycles to nanoseconds |
| 74 | * @tc: Pointer to cycle counter. | 74 | * @cc: Pointer to cycle counter. |
| 75 | * @cycles: Cycles | 75 | * @cycles: Cycles |
| 76 | * | 76 | * |
| 77 | * XXX - This could use some mult_lxl_ll() asm optimization. Same code | 77 | * XXX - This could use some mult_lxl_ll() asm optimization. Same code |
| @@ -114,7 +114,7 @@ extern u64 timecounter_read(struct timecounter *tc); | |||
| 114 | * time base as values returned by | 114 | * time base as values returned by |
| 115 | * timecounter_read() | 115 | * timecounter_read() |
| 116 | * @tc: Pointer to time counter. | 116 | * @tc: Pointer to time counter. |
| 117 | * @cycle: a value returned by tc->cc->read() | 117 | * @cycle_tstamp: a value returned by tc->cc->read() |
| 118 | * | 118 | * |
| 119 | * Cycle counts that are converted correctly as long as they | 119 | * Cycle counts that are converted correctly as long as they |
| 120 | * fall into the interval [-1/2 max cycle count, +1/2 max cycle count], | 120 | * fall into the interval [-1/2 max cycle count, +1/2 max cycle count], |
| @@ -156,11 +156,12 @@ extern u64 timecounter_cyc2time(struct timecounter *tc, | |||
| 156 | * @mult: cycle to nanosecond multiplier | 156 | * @mult: cycle to nanosecond multiplier |
| 157 | * @shift: cycle to nanosecond divisor (power of two) | 157 | * @shift: cycle to nanosecond divisor (power of two) |
| 158 | * @max_idle_ns: max idle time permitted by the clocksource (nsecs) | 158 | * @max_idle_ns: max idle time permitted by the clocksource (nsecs) |
| 159 | * @maxadj maximum adjustment value to mult (~11%) | 159 | * @maxadj: maximum adjustment value to mult (~11%) |
| 160 | * @flags: flags describing special properties | 160 | * @flags: flags describing special properties |
| 161 | * @archdata: arch-specific data | 161 | * @archdata: arch-specific data |
| 162 | * @suspend: suspend function for the clocksource, if necessary | 162 | * @suspend: suspend function for the clocksource, if necessary |
| 163 | * @resume: resume function for the clocksource, if necessary | 163 | * @resume: resume function for the clocksource, if necessary |
| 164 | * @cycle_last: most recent cycle counter value seen by ::read() | ||
| 164 | */ | 165 | */ |
| 165 | struct clocksource { | 166 | struct clocksource { |
| 166 | /* | 167 | /* |
| @@ -187,6 +188,7 @@ struct clocksource { | |||
| 187 | void (*suspend)(struct clocksource *cs); | 188 | void (*suspend)(struct clocksource *cs); |
| 188 | void (*resume)(struct clocksource *cs); | 189 | void (*resume)(struct clocksource *cs); |
| 189 | 190 | ||
| 191 | /* private: */ | ||
| 190 | #ifdef CONFIG_CLOCKSOURCE_WATCHDOG | 192 | #ifdef CONFIG_CLOCKSOURCE_WATCHDOG |
| 191 | /* Watchdog related data, used by the framework */ | 193 | /* Watchdog related data, used by the framework */ |
| 192 | struct list_head wd_list; | 194 | struct list_head wd_list; |
| @@ -261,6 +263,9 @@ static inline u32 clocksource_hz2mult(u32 hz, u32 shift_constant) | |||
| 261 | 263 | ||
| 262 | /** | 264 | /** |
| 263 | * clocksource_cyc2ns - converts clocksource cycles to nanoseconds | 265 | * clocksource_cyc2ns - converts clocksource cycles to nanoseconds |
| 266 | * @cycles: cycles | ||
| 267 | * @mult: cycle to nanosecond multiplier | ||
| 268 | * @shift: cycle to nanosecond divisor (power of two) | ||
| 264 | * | 269 | * |
| 265 | * Converts cycles to nanoseconds, using the given mult and shift. | 270 | * Converts cycles to nanoseconds, using the given mult and shift. |
| 266 | * | 271 | * |
diff --git a/include/linux/cpu.h b/include/linux/cpu.h index 6cb60fd2ea84..305c263021e7 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h | |||
| @@ -27,6 +27,7 @@ struct cpu { | |||
| 27 | 27 | ||
| 28 | extern int register_cpu(struct cpu *cpu, int num); | 28 | extern int register_cpu(struct cpu *cpu, int num); |
| 29 | extern struct sys_device *get_cpu_sysdev(unsigned cpu); | 29 | extern struct sys_device *get_cpu_sysdev(unsigned cpu); |
| 30 | extern bool cpu_is_hotpluggable(unsigned cpu); | ||
| 30 | 31 | ||
| 31 | extern int cpu_add_sysdev_attr(struct sysdev_attribute *attr); | 32 | extern int cpu_add_sysdev_attr(struct sysdev_attribute *attr); |
| 32 | extern void cpu_remove_sysdev_attr(struct sysdev_attribute *attr); | 33 | extern void cpu_remove_sysdev_attr(struct sysdev_attribute *attr); |
diff --git a/include/linux/debugobjects.h b/include/linux/debugobjects.h index 65970b811e22..0e5f5785d9f2 100644 --- a/include/linux/debugobjects.h +++ b/include/linux/debugobjects.h | |||
| @@ -46,6 +46,8 @@ struct debug_obj { | |||
| 46 | * fails | 46 | * fails |
| 47 | * @fixup_free: fixup function, which is called when the free check | 47 | * @fixup_free: fixup function, which is called when the free check |
| 48 | * fails | 48 | * fails |
| 49 | * @fixup_assert_init: fixup function, which is called when the assert_init | ||
| 50 | * check fails | ||
| 49 | */ | 51 | */ |
| 50 | struct debug_obj_descr { | 52 | struct debug_obj_descr { |
| 51 | const char *name; | 53 | const char *name; |
| @@ -54,6 +56,7 @@ struct debug_obj_descr { | |||
| 54 | int (*fixup_activate) (void *addr, enum debug_obj_state state); | 56 | int (*fixup_activate) (void *addr, enum debug_obj_state state); |
| 55 | int (*fixup_destroy) (void *addr, enum debug_obj_state state); | 57 | int (*fixup_destroy) (void *addr, enum debug_obj_state state); |
| 56 | int (*fixup_free) (void *addr, enum debug_obj_state state); | 58 | int (*fixup_free) (void *addr, enum debug_obj_state state); |
| 59 | int (*fixup_assert_init)(void *addr, enum debug_obj_state state); | ||
| 57 | }; | 60 | }; |
| 58 | 61 | ||
| 59 | #ifdef CONFIG_DEBUG_OBJECTS | 62 | #ifdef CONFIG_DEBUG_OBJECTS |
| @@ -64,6 +67,7 @@ extern void debug_object_activate (void *addr, struct debug_obj_descr *descr); | |||
| 64 | extern void debug_object_deactivate(void *addr, struct debug_obj_descr *descr); | 67 | extern void debug_object_deactivate(void *addr, struct debug_obj_descr *descr); |
| 65 | extern void debug_object_destroy (void *addr, struct debug_obj_descr *descr); | 68 | extern void debug_object_destroy (void *addr, struct debug_obj_descr *descr); |
| 66 | extern void debug_object_free (void *addr, struct debug_obj_descr *descr); | 69 | extern void debug_object_free (void *addr, struct debug_obj_descr *descr); |
| 70 | extern void debug_object_assert_init(void *addr, struct debug_obj_descr *descr); | ||
| 67 | 71 | ||
| 68 | /* | 72 | /* |
| 69 | * Active state: | 73 | * Active state: |
| @@ -89,6 +93,8 @@ static inline void | |||
| 89 | debug_object_destroy (void *addr, struct debug_obj_descr *descr) { } | 93 | debug_object_destroy (void *addr, struct debug_obj_descr *descr) { } |
| 90 | static inline void | 94 | static inline void |
| 91 | debug_object_free (void *addr, struct debug_obj_descr *descr) { } | 95 | debug_object_free (void *addr, struct debug_obj_descr *descr) { } |
| 96 | static inline void | ||
| 97 | debug_object_assert_init(void *addr, struct debug_obj_descr *descr) { } | ||
| 92 | 98 | ||
| 93 | static inline void debug_objects_early_init(void) { } | 99 | static inline void debug_objects_early_init(void) { } |
| 94 | static inline void debug_objects_mem_init(void) { } | 100 | static inline void debug_objects_mem_init(void) { } |
diff --git a/include/linux/dma_remapping.h b/include/linux/dma_remapping.h index ef90cbd8e173..57c9a8ae4f2d 100644 --- a/include/linux/dma_remapping.h +++ b/include/linux/dma_remapping.h | |||
| @@ -31,6 +31,7 @@ extern void free_dmar_iommu(struct intel_iommu *iommu); | |||
| 31 | extern int iommu_calculate_agaw(struct intel_iommu *iommu); | 31 | extern int iommu_calculate_agaw(struct intel_iommu *iommu); |
| 32 | extern int iommu_calculate_max_sagaw(struct intel_iommu *iommu); | 32 | extern int iommu_calculate_max_sagaw(struct intel_iommu *iommu); |
| 33 | extern int dmar_disabled; | 33 | extern int dmar_disabled; |
| 34 | extern int intel_iommu_enabled; | ||
| 34 | #else | 35 | #else |
| 35 | static inline int iommu_calculate_agaw(struct intel_iommu *iommu) | 36 | static inline int iommu_calculate_agaw(struct intel_iommu *iommu) |
| 36 | { | 37 | { |
| @@ -44,6 +45,7 @@ static inline void free_dmar_iommu(struct intel_iommu *iommu) | |||
| 44 | { | 45 | { |
| 45 | } | 46 | } |
| 46 | #define dmar_disabled (1) | 47 | #define dmar_disabled (1) |
| 48 | #define intel_iommu_enabled (0) | ||
| 47 | #endif | 49 | #endif |
| 48 | 50 | ||
| 49 | 51 | ||
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h index f743883f769e..bb7f30971858 100644 --- a/include/linux/hardirq.h +++ b/include/linux/hardirq.h | |||
| @@ -139,20 +139,7 @@ static inline void account_system_vtime(struct task_struct *tsk) | |||
| 139 | extern void account_system_vtime(struct task_struct *tsk); | 139 | extern void account_system_vtime(struct task_struct *tsk); |
| 140 | #endif | 140 | #endif |
| 141 | 141 | ||
| 142 | #if defined(CONFIG_NO_HZ) | ||
| 143 | #if defined(CONFIG_TINY_RCU) || defined(CONFIG_TINY_PREEMPT_RCU) | 142 | #if defined(CONFIG_TINY_RCU) || defined(CONFIG_TINY_PREEMPT_RCU) |
| 144 | extern void rcu_enter_nohz(void); | ||
| 145 | extern void rcu_exit_nohz(void); | ||
| 146 | |||
| 147 | static inline void rcu_irq_enter(void) | ||
| 148 | { | ||
| 149 | rcu_exit_nohz(); | ||
| 150 | } | ||
| 151 | |||
| 152 | static inline void rcu_irq_exit(void) | ||
| 153 | { | ||
| 154 | rcu_enter_nohz(); | ||
| 155 | } | ||
| 156 | 143 | ||
| 157 | static inline void rcu_nmi_enter(void) | 144 | static inline void rcu_nmi_enter(void) |
| 158 | { | 145 | { |
| @@ -163,17 +150,9 @@ static inline void rcu_nmi_exit(void) | |||
| 163 | } | 150 | } |
| 164 | 151 | ||
| 165 | #else | 152 | #else |
| 166 | extern void rcu_irq_enter(void); | ||
| 167 | extern void rcu_irq_exit(void); | ||
| 168 | extern void rcu_nmi_enter(void); | 153 | extern void rcu_nmi_enter(void); |
| 169 | extern void rcu_nmi_exit(void); | 154 | extern void rcu_nmi_exit(void); |
| 170 | #endif | 155 | #endif |
| 171 | #else | ||
| 172 | # define rcu_irq_enter() do { } while (0) | ||
| 173 | # define rcu_irq_exit() do { } while (0) | ||
| 174 | # define rcu_nmi_enter() do { } while (0) | ||
| 175 | # define rcu_nmi_exit() do { } while (0) | ||
| 176 | #endif /* #if defined(CONFIG_NO_HZ) */ | ||
| 177 | 156 | ||
| 178 | /* | 157 | /* |
| 179 | * It is safe to do non-atomic ops on ->hardirq_context, | 158 | * It is safe to do non-atomic ops on ->hardirq_context, |
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h index 388b0d425b50..5ce8b140428f 100644 --- a/include/linux/jump_label.h +++ b/include/linux/jump_label.h | |||
| @@ -3,6 +3,7 @@ | |||
| 3 | 3 | ||
| 4 | #include <linux/types.h> | 4 | #include <linux/types.h> |
| 5 | #include <linux/compiler.h> | 5 | #include <linux/compiler.h> |
| 6 | #include <linux/workqueue.h> | ||
| 6 | 7 | ||
| 7 | #if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL) | 8 | #if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL) |
| 8 | 9 | ||
| @@ -14,6 +15,12 @@ struct jump_label_key { | |||
| 14 | #endif | 15 | #endif |
| 15 | }; | 16 | }; |
| 16 | 17 | ||
| 18 | struct jump_label_key_deferred { | ||
| 19 | struct jump_label_key key; | ||
| 20 | unsigned long timeout; | ||
| 21 | struct delayed_work work; | ||
| 22 | }; | ||
| 23 | |||
| 17 | # include <asm/jump_label.h> | 24 | # include <asm/jump_label.h> |
| 18 | # define HAVE_JUMP_LABEL | 25 | # define HAVE_JUMP_LABEL |
| 19 | #endif /* CC_HAVE_ASM_GOTO && CONFIG_JUMP_LABEL */ | 26 | #endif /* CC_HAVE_ASM_GOTO && CONFIG_JUMP_LABEL */ |
| @@ -51,8 +58,11 @@ extern void arch_jump_label_transform_static(struct jump_entry *entry, | |||
| 51 | extern int jump_label_text_reserved(void *start, void *end); | 58 | extern int jump_label_text_reserved(void *start, void *end); |
| 52 | extern void jump_label_inc(struct jump_label_key *key); | 59 | extern void jump_label_inc(struct jump_label_key *key); |
| 53 | extern void jump_label_dec(struct jump_label_key *key); | 60 | extern void jump_label_dec(struct jump_label_key *key); |
| 61 | extern void jump_label_dec_deferred(struct jump_label_key_deferred *key); | ||
| 54 | extern bool jump_label_enabled(struct jump_label_key *key); | 62 | extern bool jump_label_enabled(struct jump_label_key *key); |
| 55 | extern void jump_label_apply_nops(struct module *mod); | 63 | extern void jump_label_apply_nops(struct module *mod); |
| 64 | extern void jump_label_rate_limit(struct jump_label_key_deferred *key, | ||
| 65 | unsigned long rl); | ||
| 56 | 66 | ||
| 57 | #else /* !HAVE_JUMP_LABEL */ | 67 | #else /* !HAVE_JUMP_LABEL */ |
| 58 | 68 | ||
| @@ -68,6 +78,10 @@ static __always_inline void jump_label_init(void) | |||
| 68 | { | 78 | { |
| 69 | } | 79 | } |
| 70 | 80 | ||
| 81 | struct jump_label_key_deferred { | ||
| 82 | struct jump_label_key key; | ||
| 83 | }; | ||
| 84 | |||
| 71 | static __always_inline bool static_branch(struct jump_label_key *key) | 85 | static __always_inline bool static_branch(struct jump_label_key *key) |
| 72 | { | 86 | { |
| 73 | if (unlikely(atomic_read(&key->enabled))) | 87 | if (unlikely(atomic_read(&key->enabled))) |
| @@ -85,6 +99,11 @@ static inline void jump_label_dec(struct jump_label_key *key) | |||
| 85 | atomic_dec(&key->enabled); | 99 | atomic_dec(&key->enabled); |
| 86 | } | 100 | } |
| 87 | 101 | ||
| 102 | static inline void jump_label_dec_deferred(struct jump_label_key_deferred *key) | ||
| 103 | { | ||
| 104 | jump_label_dec(&key->key); | ||
| 105 | } | ||
| 106 | |||
| 88 | static inline int jump_label_text_reserved(void *start, void *end) | 107 | static inline int jump_label_text_reserved(void *start, void *end) |
| 89 | { | 108 | { |
| 90 | return 0; | 109 | return 0; |
| @@ -102,6 +121,14 @@ static inline int jump_label_apply_nops(struct module *mod) | |||
| 102 | { | 121 | { |
| 103 | return 0; | 122 | return 0; |
| 104 | } | 123 | } |
| 124 | |||
| 125 | static inline void jump_label_rate_limit(struct jump_label_key_deferred *key, | ||
| 126 | unsigned long rl) | ||
| 127 | { | ||
| 128 | } | ||
| 105 | #endif /* HAVE_JUMP_LABEL */ | 129 | #endif /* HAVE_JUMP_LABEL */ |
| 106 | 130 | ||
| 131 | #define jump_label_key_enabled ((struct jump_label_key){ .enabled = ATOMIC_INIT(1), }) | ||
| 132 | #define jump_label_key_disabled ((struct jump_label_key){ .enabled = ATOMIC_INIT(0), }) | ||
| 133 | |||
| 107 | #endif /* _LINUX_JUMP_LABEL_H */ | 134 | #endif /* _LINUX_JUMP_LABEL_H */ |
diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h index 0cce2db580c3..2fbd9053c2df 100644 --- a/include/linux/kernel_stat.h +++ b/include/linux/kernel_stat.h | |||
| @@ -6,6 +6,7 @@ | |||
| 6 | #include <linux/percpu.h> | 6 | #include <linux/percpu.h> |
| 7 | #include <linux/cpumask.h> | 7 | #include <linux/cpumask.h> |
| 8 | #include <linux/interrupt.h> | 8 | #include <linux/interrupt.h> |
| 9 | #include <linux/sched.h> | ||
| 9 | #include <asm/irq.h> | 10 | #include <asm/irq.h> |
| 10 | #include <asm/cputime.h> | 11 | #include <asm/cputime.h> |
| 11 | 12 | ||
| @@ -15,21 +16,25 @@ | |||
| 15 | * used by rstatd/perfmeter | 16 | * used by rstatd/perfmeter |
| 16 | */ | 17 | */ |
| 17 | 18 | ||
| 18 | struct cpu_usage_stat { | 19 | enum cpu_usage_stat { |
| 19 | cputime64_t user; | 20 | CPUTIME_USER, |
| 20 | cputime64_t nice; | 21 | CPUTIME_NICE, |
| 21 | cputime64_t system; | 22 | CPUTIME_SYSTEM, |
| 22 | cputime64_t softirq; | 23 | CPUTIME_SOFTIRQ, |
| 23 | cputime64_t irq; | 24 | CPUTIME_IRQ, |
| 24 | cputime64_t idle; | 25 | CPUTIME_IDLE, |
| 25 | cputime64_t iowait; | 26 | CPUTIME_IOWAIT, |
| 26 | cputime64_t steal; | 27 | CPUTIME_STEAL, |
| 27 | cputime64_t guest; | 28 | CPUTIME_GUEST, |
| 28 | cputime64_t guest_nice; | 29 | CPUTIME_GUEST_NICE, |
| 30 | NR_STATS, | ||
| 31 | }; | ||
| 32 | |||
| 33 | struct kernel_cpustat { | ||
| 34 | u64 cpustat[NR_STATS]; | ||
| 29 | }; | 35 | }; |
| 30 | 36 | ||
| 31 | struct kernel_stat { | 37 | struct kernel_stat { |
| 32 | struct cpu_usage_stat cpustat; | ||
| 33 | #ifndef CONFIG_GENERIC_HARDIRQS | 38 | #ifndef CONFIG_GENERIC_HARDIRQS |
| 34 | unsigned int irqs[NR_IRQS]; | 39 | unsigned int irqs[NR_IRQS]; |
| 35 | #endif | 40 | #endif |
| @@ -38,10 +43,13 @@ struct kernel_stat { | |||
| 38 | }; | 43 | }; |
| 39 | 44 | ||
| 40 | DECLARE_PER_CPU(struct kernel_stat, kstat); | 45 | DECLARE_PER_CPU(struct kernel_stat, kstat); |
| 46 | DECLARE_PER_CPU(struct kernel_cpustat, kernel_cpustat); | ||
| 41 | 47 | ||
| 42 | #define kstat_cpu(cpu) per_cpu(kstat, cpu) | ||
| 43 | /* Must have preemption disabled for this to be meaningful. */ | 48 | /* Must have preemption disabled for this to be meaningful. */ |
| 44 | #define kstat_this_cpu __get_cpu_var(kstat) | 49 | #define kstat_this_cpu (&__get_cpu_var(kstat)) |
| 50 | #define kcpustat_this_cpu (&__get_cpu_var(kernel_cpustat)) | ||
| 51 | #define kstat_cpu(cpu) per_cpu(kstat, cpu) | ||
| 52 | #define kcpustat_cpu(cpu) per_cpu(kernel_cpustat, cpu) | ||
| 45 | 53 | ||
| 46 | extern unsigned long long nr_context_switches(void); | 54 | extern unsigned long long nr_context_switches(void); |
| 47 | 55 | ||
diff --git a/include/linux/kvm.h b/include/linux/kvm.h index c3892fc1d538..68e67e50d028 100644 --- a/include/linux/kvm.h +++ b/include/linux/kvm.h | |||
| @@ -557,6 +557,7 @@ struct kvm_ppc_pvinfo { | |||
| 557 | #define KVM_CAP_MAX_VCPUS 66 /* returns max vcpus per vm */ | 557 | #define KVM_CAP_MAX_VCPUS 66 /* returns max vcpus per vm */ |
| 558 | #define KVM_CAP_PPC_PAPR 68 | 558 | #define KVM_CAP_PPC_PAPR 68 |
| 559 | #define KVM_CAP_S390_GMAP 71 | 559 | #define KVM_CAP_S390_GMAP 71 |
| 560 | #define KVM_CAP_TSC_DEADLINE_TIMER 72 | ||
| 560 | 561 | ||
| 561 | #ifdef KVM_CAP_IRQ_ROUTING | 562 | #ifdef KVM_CAP_IRQ_ROUTING |
| 562 | 563 | ||
diff --git a/include/linux/latencytop.h b/include/linux/latencytop.h index b0e99898527c..e23121f9d82a 100644 --- a/include/linux/latencytop.h +++ b/include/linux/latencytop.h | |||
| @@ -10,6 +10,8 @@ | |||
| 10 | #define _INCLUDE_GUARD_LATENCYTOP_H_ | 10 | #define _INCLUDE_GUARD_LATENCYTOP_H_ |
| 11 | 11 | ||
| 12 | #include <linux/compiler.h> | 12 | #include <linux/compiler.h> |
| 13 | struct task_struct; | ||
| 14 | |||
| 13 | #ifdef CONFIG_LATENCYTOP | 15 | #ifdef CONFIG_LATENCYTOP |
| 14 | 16 | ||
| 15 | #define LT_SAVECOUNT 32 | 17 | #define LT_SAVECOUNT 32 |
| @@ -23,7 +25,6 @@ struct latency_record { | |||
| 23 | }; | 25 | }; |
| 24 | 26 | ||
| 25 | 27 | ||
| 26 | struct task_struct; | ||
| 27 | 28 | ||
| 28 | extern int latencytop_enabled; | 29 | extern int latencytop_enabled; |
| 29 | void __account_scheduler_latency(struct task_struct *task, int usecs, int inter); | 30 | void __account_scheduler_latency(struct task_struct *task, int usecs, int inter); |
diff --git a/include/linux/lglock.h b/include/linux/lglock.h index f549056fb20b..87f402ccec55 100644 --- a/include/linux/lglock.h +++ b/include/linux/lglock.h | |||
| @@ -22,6 +22,7 @@ | |||
| 22 | #include <linux/spinlock.h> | 22 | #include <linux/spinlock.h> |
| 23 | #include <linux/lockdep.h> | 23 | #include <linux/lockdep.h> |
| 24 | #include <linux/percpu.h> | 24 | #include <linux/percpu.h> |
| 25 | #include <linux/cpu.h> | ||
| 25 | 26 | ||
| 26 | /* can make br locks by using local lock for read side, global lock for write */ | 27 | /* can make br locks by using local lock for read side, global lock for write */ |
| 27 | #define br_lock_init(name) name##_lock_init() | 28 | #define br_lock_init(name) name##_lock_init() |
| @@ -72,9 +73,31 @@ | |||
| 72 | 73 | ||
| 73 | #define DEFINE_LGLOCK(name) \ | 74 | #define DEFINE_LGLOCK(name) \ |
| 74 | \ | 75 | \ |
| 76 | DEFINE_SPINLOCK(name##_cpu_lock); \ | ||
| 77 | cpumask_t name##_cpus __read_mostly; \ | ||
| 75 | DEFINE_PER_CPU(arch_spinlock_t, name##_lock); \ | 78 | DEFINE_PER_CPU(arch_spinlock_t, name##_lock); \ |
| 76 | DEFINE_LGLOCK_LOCKDEP(name); \ | 79 | DEFINE_LGLOCK_LOCKDEP(name); \ |
| 77 | \ | 80 | \ |
| 81 | static int \ | ||
| 82 | name##_lg_cpu_callback(struct notifier_block *nb, \ | ||
| 83 | unsigned long action, void *hcpu) \ | ||
| 84 | { \ | ||
| 85 | switch (action & ~CPU_TASKS_FROZEN) { \ | ||
| 86 | case CPU_UP_PREPARE: \ | ||
| 87 | spin_lock(&name##_cpu_lock); \ | ||
| 88 | cpu_set((unsigned long)hcpu, name##_cpus); \ | ||
| 89 | spin_unlock(&name##_cpu_lock); \ | ||
| 90 | break; \ | ||
| 91 | case CPU_UP_CANCELED: case CPU_DEAD: \ | ||
| 92 | spin_lock(&name##_cpu_lock); \ | ||
| 93 | cpu_clear((unsigned long)hcpu, name##_cpus); \ | ||
| 94 | spin_unlock(&name##_cpu_lock); \ | ||
| 95 | } \ | ||
| 96 | return NOTIFY_OK; \ | ||
| 97 | } \ | ||
| 98 | static struct notifier_block name##_lg_cpu_notifier = { \ | ||
| 99 | .notifier_call = name##_lg_cpu_callback, \ | ||
| 100 | }; \ | ||
| 78 | void name##_lock_init(void) { \ | 101 | void name##_lock_init(void) { \ |
| 79 | int i; \ | 102 | int i; \ |
| 80 | LOCKDEP_INIT_MAP(&name##_lock_dep_map, #name, &name##_lock_key, 0); \ | 103 | LOCKDEP_INIT_MAP(&name##_lock_dep_map, #name, &name##_lock_key, 0); \ |
| @@ -83,6 +106,11 @@ | |||
| 83 | lock = &per_cpu(name##_lock, i); \ | 106 | lock = &per_cpu(name##_lock, i); \ |
| 84 | *lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; \ | 107 | *lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; \ |
| 85 | } \ | 108 | } \ |
| 109 | register_hotcpu_notifier(&name##_lg_cpu_notifier); \ | ||
| 110 | get_online_cpus(); \ | ||
| 111 | for_each_online_cpu(i) \ | ||
| 112 | cpu_set(i, name##_cpus); \ | ||
| 113 | put_online_cpus(); \ | ||
| 86 | } \ | 114 | } \ |
| 87 | EXPORT_SYMBOL(name##_lock_init); \ | 115 | EXPORT_SYMBOL(name##_lock_init); \ |
| 88 | \ | 116 | \ |
| @@ -124,9 +152,9 @@ | |||
| 124 | \ | 152 | \ |
| 125 | void name##_global_lock_online(void) { \ | 153 | void name##_global_lock_online(void) { \ |
| 126 | int i; \ | 154 | int i; \ |
| 127 | preempt_disable(); \ | 155 | spin_lock(&name##_cpu_lock); \ |
| 128 | rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_); \ | 156 | rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_); \ |
| 129 | for_each_online_cpu(i) { \ | 157 | for_each_cpu(i, &name##_cpus) { \ |
| 130 | arch_spinlock_t *lock; \ | 158 | arch_spinlock_t *lock; \ |
| 131 | lock = &per_cpu(name##_lock, i); \ | 159 | lock = &per_cpu(name##_lock, i); \ |
| 132 | arch_spin_lock(lock); \ | 160 | arch_spin_lock(lock); \ |
| @@ -137,12 +165,12 @@ | |||
| 137 | void name##_global_unlock_online(void) { \ | 165 | void name##_global_unlock_online(void) { \ |
| 138 | int i; \ | 166 | int i; \ |
| 139 | rwlock_release(&name##_lock_dep_map, 1, _RET_IP_); \ | 167 | rwlock_release(&name##_lock_dep_map, 1, _RET_IP_); \ |
| 140 | for_each_online_cpu(i) { \ | 168 | for_each_cpu(i, &name##_cpus) { \ |
| 141 | arch_spinlock_t *lock; \ | 169 | arch_spinlock_t *lock; \ |
| 142 | lock = &per_cpu(name##_lock, i); \ | 170 | lock = &per_cpu(name##_lock, i); \ |
| 143 | arch_spin_unlock(lock); \ | 171 | arch_spin_unlock(lock); \ |
| 144 | } \ | 172 | } \ |
| 145 | preempt_enable(); \ | 173 | spin_unlock(&name##_cpu_lock); \ |
| 146 | } \ | 174 | } \ |
| 147 | EXPORT_SYMBOL(name##_global_unlock_online); \ | 175 | EXPORT_SYMBOL(name##_global_unlock_online); \ |
| 148 | \ | 176 | \ |
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index b6a56e37284c..d36619ead3ba 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h | |||
| @@ -343,6 +343,8 @@ extern void lockdep_trace_alloc(gfp_t mask); | |||
| 343 | 343 | ||
| 344 | #define lockdep_assert_held(l) WARN_ON(debug_locks && !lockdep_is_held(l)) | 344 | #define lockdep_assert_held(l) WARN_ON(debug_locks && !lockdep_is_held(l)) |
| 345 | 345 | ||
| 346 | #define lockdep_recursing(tsk) ((tsk)->lockdep_recursion) | ||
| 347 | |||
| 346 | #else /* !LOCKDEP */ | 348 | #else /* !LOCKDEP */ |
| 347 | 349 | ||
| 348 | static inline void lockdep_off(void) | 350 | static inline void lockdep_off(void) |
| @@ -392,6 +394,8 @@ struct lock_class_key { }; | |||
| 392 | 394 | ||
| 393 | #define lockdep_assert_held(l) do { } while (0) | 395 | #define lockdep_assert_held(l) do { } while (0) |
| 394 | 396 | ||
| 397 | #define lockdep_recursing(tsk) (0) | ||
| 398 | |||
| 395 | #endif /* !LOCKDEP */ | 399 | #endif /* !LOCKDEP */ |
| 396 | 400 | ||
| 397 | #ifdef CONFIG_LOCK_STAT | 401 | #ifdef CONFIG_LOCK_STAT |
diff --git a/include/linux/log2.h b/include/linux/log2.h index 25b808631cd9..fd7ff3d91e6a 100644 --- a/include/linux/log2.h +++ b/include/linux/log2.h | |||
| @@ -185,7 +185,6 @@ unsigned long __rounddown_pow_of_two(unsigned long n) | |||
| 185 | #define rounddown_pow_of_two(n) \ | 185 | #define rounddown_pow_of_two(n) \ |
| 186 | ( \ | 186 | ( \ |
| 187 | __builtin_constant_p(n) ? ( \ | 187 | __builtin_constant_p(n) ? ( \ |
| 188 | (n == 1) ? 0 : \ | ||
| 189 | (1UL << ilog2(n))) : \ | 188 | (1UL << ilog2(n))) : \ |
| 190 | __rounddown_pow_of_two(n) \ | 189 | __rounddown_pow_of_two(n) \ |
| 191 | ) | 190 | ) |
diff --git a/include/linux/memblock.h b/include/linux/memblock.h index e6b843e16e81..a6bb10235148 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h | |||
| @@ -2,8 +2,6 @@ | |||
| 2 | #define _LINUX_MEMBLOCK_H | 2 | #define _LINUX_MEMBLOCK_H |
| 3 | #ifdef __KERNEL__ | 3 | #ifdef __KERNEL__ |
| 4 | 4 | ||
| 5 | #define MEMBLOCK_ERROR 0 | ||
| 6 | |||
| 7 | #ifdef CONFIG_HAVE_MEMBLOCK | 5 | #ifdef CONFIG_HAVE_MEMBLOCK |
| 8 | /* | 6 | /* |
| 9 | * Logical memory blocks. | 7 | * Logical memory blocks. |
| @@ -19,81 +17,161 @@ | |||
| 19 | #include <linux/init.h> | 17 | #include <linux/init.h> |
| 20 | #include <linux/mm.h> | 18 | #include <linux/mm.h> |
| 21 | 19 | ||
| 22 | #include <asm/memblock.h> | ||
| 23 | |||
| 24 | #define INIT_MEMBLOCK_REGIONS 128 | 20 | #define INIT_MEMBLOCK_REGIONS 128 |
| 25 | 21 | ||
| 26 | struct memblock_region { | 22 | struct memblock_region { |
| 27 | phys_addr_t base; | 23 | phys_addr_t base; |
| 28 | phys_addr_t size; | 24 | phys_addr_t size; |
| 25 | #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP | ||
| 26 | int nid; | ||
| 27 | #endif | ||
| 29 | }; | 28 | }; |
| 30 | 29 | ||
| 31 | struct memblock_type { | 30 | struct memblock_type { |
| 32 | unsigned long cnt; /* number of regions */ | 31 | unsigned long cnt; /* number of regions */ |
| 33 | unsigned long max; /* size of the allocated array */ | 32 | unsigned long max; /* size of the allocated array */ |
| 33 | phys_addr_t total_size; /* size of all regions */ | ||
| 34 | struct memblock_region *regions; | 34 | struct memblock_region *regions; |
| 35 | }; | 35 | }; |
| 36 | 36 | ||
| 37 | struct memblock { | 37 | struct memblock { |
| 38 | phys_addr_t current_limit; | 38 | phys_addr_t current_limit; |
| 39 | phys_addr_t memory_size; /* Updated by memblock_analyze() */ | ||
| 40 | struct memblock_type memory; | 39 | struct memblock_type memory; |
| 41 | struct memblock_type reserved; | 40 | struct memblock_type reserved; |
| 42 | }; | 41 | }; |
| 43 | 42 | ||
| 44 | extern struct memblock memblock; | 43 | extern struct memblock memblock; |
| 45 | extern int memblock_debug; | 44 | extern int memblock_debug; |
| 46 | extern int memblock_can_resize; | ||
| 47 | 45 | ||
| 48 | #define memblock_dbg(fmt, ...) \ | 46 | #define memblock_dbg(fmt, ...) \ |
| 49 | if (memblock_debug) printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__) | 47 | if (memblock_debug) printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__) |
| 50 | 48 | ||
| 51 | u64 memblock_find_in_range(u64 start, u64 end, u64 size, u64 align); | 49 | phys_addr_t memblock_find_in_range_node(phys_addr_t start, phys_addr_t end, |
| 50 | phys_addr_t size, phys_addr_t align, int nid); | ||
| 51 | phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end, | ||
| 52 | phys_addr_t size, phys_addr_t align); | ||
| 52 | int memblock_free_reserved_regions(void); | 53 | int memblock_free_reserved_regions(void); |
| 53 | int memblock_reserve_reserved_regions(void); | 54 | int memblock_reserve_reserved_regions(void); |
| 54 | 55 | ||
| 55 | extern void memblock_init(void); | 56 | void memblock_allow_resize(void); |
| 56 | extern void memblock_analyze(void); | 57 | int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid); |
| 57 | extern long memblock_add(phys_addr_t base, phys_addr_t size); | 58 | int memblock_add(phys_addr_t base, phys_addr_t size); |
| 58 | extern long memblock_remove(phys_addr_t base, phys_addr_t size); | 59 | int memblock_remove(phys_addr_t base, phys_addr_t size); |
| 59 | extern long memblock_free(phys_addr_t base, phys_addr_t size); | 60 | int memblock_free(phys_addr_t base, phys_addr_t size); |
| 60 | extern long memblock_reserve(phys_addr_t base, phys_addr_t size); | 61 | int memblock_reserve(phys_addr_t base, phys_addr_t size); |
| 62 | |||
| 63 | #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP | ||
| 64 | void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn, | ||
| 65 | unsigned long *out_end_pfn, int *out_nid); | ||
| 66 | |||
| 67 | /** | ||
| 68 | * for_each_mem_pfn_range - early memory pfn range iterator | ||
| 69 | * @i: an integer used as loop variable | ||
| 70 | * @nid: node selector, %MAX_NUMNODES for all nodes | ||
| 71 | * @p_start: ptr to ulong for start pfn of the range, can be %NULL | ||
| 72 | * @p_end: ptr to ulong for end pfn of the range, can be %NULL | ||
| 73 | * @p_nid: ptr to int for nid of the range, can be %NULL | ||
| 74 | * | ||
| 75 | * Walks over configured memory ranges. Available after early_node_map is | ||
| 76 | * populated. | ||
| 77 | */ | ||
| 78 | #define for_each_mem_pfn_range(i, nid, p_start, p_end, p_nid) \ | ||
| 79 | for (i = -1, __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid); \ | ||
| 80 | i >= 0; __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid)) | ||
| 81 | #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ | ||
| 82 | |||
| 83 | void __next_free_mem_range(u64 *idx, int nid, phys_addr_t *out_start, | ||
| 84 | phys_addr_t *out_end, int *out_nid); | ||
| 85 | |||
| 86 | /** | ||
| 87 | * for_each_free_mem_range - iterate through free memblock areas | ||
| 88 | * @i: u64 used as loop variable | ||
| 89 | * @nid: node selector, %MAX_NUMNODES for all nodes | ||
| 90 | * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL | ||
| 91 | * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL | ||
| 92 | * @p_nid: ptr to int for nid of the range, can be %NULL | ||
| 93 | * | ||
| 94 | * Walks over free (memory && !reserved) areas of memblock. Available as | ||
| 95 | * soon as memblock is initialized. | ||
| 96 | */ | ||
| 97 | #define for_each_free_mem_range(i, nid, p_start, p_end, p_nid) \ | ||
| 98 | for (i = 0, \ | ||
| 99 | __next_free_mem_range(&i, nid, p_start, p_end, p_nid); \ | ||
| 100 | i != (u64)ULLONG_MAX; \ | ||
| 101 | __next_free_mem_range(&i, nid, p_start, p_end, p_nid)) | ||
| 102 | |||
| 103 | void __next_free_mem_range_rev(u64 *idx, int nid, phys_addr_t *out_start, | ||
| 104 | phys_addr_t *out_end, int *out_nid); | ||
| 61 | 105 | ||
| 62 | /* The numa aware allocator is only available if | 106 | /** |
| 63 | * CONFIG_ARCH_POPULATES_NODE_MAP is set | 107 | * for_each_free_mem_range_reverse - rev-iterate through free memblock areas |
| 108 | * @i: u64 used as loop variable | ||
| 109 | * @nid: node selector, %MAX_NUMNODES for all nodes | ||
| 110 | * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL | ||
| 111 | * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL | ||
| 112 | * @p_nid: ptr to int for nid of the range, can be %NULL | ||
| 113 | * | ||
| 114 | * Walks over free (memory && !reserved) areas of memblock in reverse | ||
| 115 | * order. Available as soon as memblock is initialized. | ||
| 64 | */ | 116 | */ |
| 65 | extern phys_addr_t memblock_alloc_nid(phys_addr_t size, phys_addr_t align, | 117 | #define for_each_free_mem_range_reverse(i, nid, p_start, p_end, p_nid) \ |
| 66 | int nid); | 118 | for (i = (u64)ULLONG_MAX, \ |
| 67 | extern phys_addr_t memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, | 119 | __next_free_mem_range_rev(&i, nid, p_start, p_end, p_nid); \ |
| 68 | int nid); | 120 | i != (u64)ULLONG_MAX; \ |
| 121 | __next_free_mem_range_rev(&i, nid, p_start, p_end, p_nid)) | ||
| 69 | 122 | ||
| 70 | extern phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align); | 123 | #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP |
| 124 | int memblock_set_node(phys_addr_t base, phys_addr_t size, int nid); | ||
| 125 | |||
| 126 | static inline void memblock_set_region_node(struct memblock_region *r, int nid) | ||
| 127 | { | ||
| 128 | r->nid = nid; | ||
| 129 | } | ||
| 130 | |||
| 131 | static inline int memblock_get_region_node(const struct memblock_region *r) | ||
| 132 | { | ||
| 133 | return r->nid; | ||
| 134 | } | ||
| 135 | #else | ||
| 136 | static inline void memblock_set_region_node(struct memblock_region *r, int nid) | ||
| 137 | { | ||
| 138 | } | ||
| 139 | |||
| 140 | static inline int memblock_get_region_node(const struct memblock_region *r) | ||
| 141 | { | ||
| 142 | return 0; | ||
| 143 | } | ||
| 144 | #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ | ||
| 145 | |||
| 146 | phys_addr_t memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid); | ||
| 147 | phys_addr_t memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid); | ||
| 148 | |||
| 149 | phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align); | ||
| 71 | 150 | ||
| 72 | /* Flags for memblock_alloc_base() amd __memblock_alloc_base() */ | 151 | /* Flags for memblock_alloc_base() amd __memblock_alloc_base() */ |
| 73 | #define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0) | 152 | #define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0) |
| 74 | #define MEMBLOCK_ALLOC_ACCESSIBLE 0 | 153 | #define MEMBLOCK_ALLOC_ACCESSIBLE 0 |
| 75 | 154 | ||
| 76 | extern phys_addr_t memblock_alloc_base(phys_addr_t size, | 155 | phys_addr_t memblock_alloc_base(phys_addr_t size, phys_addr_t align, |
| 77 | phys_addr_t align, | 156 | phys_addr_t max_addr); |
| 78 | phys_addr_t max_addr); | 157 | phys_addr_t __memblock_alloc_base(phys_addr_t size, phys_addr_t align, |
| 79 | extern phys_addr_t __memblock_alloc_base(phys_addr_t size, | 158 | phys_addr_t max_addr); |
| 80 | phys_addr_t align, | 159 | phys_addr_t memblock_phys_mem_size(void); |
| 81 | phys_addr_t max_addr); | 160 | phys_addr_t memblock_start_of_DRAM(void); |
| 82 | extern phys_addr_t memblock_phys_mem_size(void); | 161 | phys_addr_t memblock_end_of_DRAM(void); |
| 83 | extern phys_addr_t memblock_start_of_DRAM(void); | 162 | void memblock_enforce_memory_limit(phys_addr_t memory_limit); |
| 84 | extern phys_addr_t memblock_end_of_DRAM(void); | 163 | int memblock_is_memory(phys_addr_t addr); |
| 85 | extern void memblock_enforce_memory_limit(phys_addr_t memory_limit); | 164 | int memblock_is_region_memory(phys_addr_t base, phys_addr_t size); |
| 86 | extern int memblock_is_memory(phys_addr_t addr); | 165 | int memblock_is_reserved(phys_addr_t addr); |
| 87 | extern int memblock_is_region_memory(phys_addr_t base, phys_addr_t size); | 166 | int memblock_is_region_reserved(phys_addr_t base, phys_addr_t size); |
| 88 | extern int memblock_is_reserved(phys_addr_t addr); | 167 | |
| 89 | extern int memblock_is_region_reserved(phys_addr_t base, phys_addr_t size); | 168 | extern void __memblock_dump_all(void); |
| 90 | 169 | ||
| 91 | extern void memblock_dump_all(void); | 170 | static inline void memblock_dump_all(void) |
| 92 | 171 | { | |
| 93 | /* Provided by the architecture */ | 172 | if (memblock_debug) |
| 94 | extern phys_addr_t memblock_nid_range(phys_addr_t start, phys_addr_t end, int *nid); | 173 | __memblock_dump_all(); |
| 95 | extern int memblock_memory_can_coalesce(phys_addr_t addr1, phys_addr_t size1, | 174 | } |
| 96 | phys_addr_t addr2, phys_addr_t size2); | ||
| 97 | 175 | ||
| 98 | /** | 176 | /** |
| 99 | * memblock_set_current_limit - Set the current allocation limit to allow | 177 | * memblock_set_current_limit - Set the current allocation limit to allow |
| @@ -101,7 +179,7 @@ extern int memblock_memory_can_coalesce(phys_addr_t addr1, phys_addr_t size1, | |||
| 101 | * accessible during boot | 179 | * accessible during boot |
| 102 | * @limit: New limit value (physical address) | 180 | * @limit: New limit value (physical address) |
| 103 | */ | 181 | */ |
| 104 | extern void memblock_set_current_limit(phys_addr_t limit); | 182 | void memblock_set_current_limit(phys_addr_t limit); |
| 105 | 183 | ||
| 106 | 184 | ||
| 107 | /* | 185 | /* |
| @@ -154,9 +232,9 @@ static inline unsigned long memblock_region_reserved_end_pfn(const struct memblo | |||
| 154 | region++) | 232 | region++) |
| 155 | 233 | ||
| 156 | 234 | ||
| 157 | #ifdef ARCH_DISCARD_MEMBLOCK | 235 | #ifdef CONFIG_ARCH_DISCARD_MEMBLOCK |
| 158 | #define __init_memblock __init | 236 | #define __init_memblock __meminit |
| 159 | #define __initdata_memblock __initdata | 237 | #define __initdata_memblock __meminitdata |
| 160 | #else | 238 | #else |
| 161 | #define __init_memblock | 239 | #define __init_memblock |
| 162 | #define __initdata_memblock | 240 | #define __initdata_memblock |
| @@ -165,7 +243,7 @@ static inline unsigned long memblock_region_reserved_end_pfn(const struct memblo | |||
| 165 | #else | 243 | #else |
| 166 | static inline phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align) | 244 | static inline phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align) |
| 167 | { | 245 | { |
| 168 | return MEMBLOCK_ERROR; | 246 | return 0; |
| 169 | } | 247 | } |
| 170 | 248 | ||
| 171 | #endif /* CONFIG_HAVE_MEMBLOCK */ | 249 | #endif /* CONFIG_HAVE_MEMBLOCK */ |
diff --git a/include/linux/mm.h b/include/linux/mm.h index 4baadd18f4ad..5d9b4c9813bd 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
| @@ -1253,41 +1253,34 @@ static inline void pgtable_page_dtor(struct page *page) | |||
| 1253 | extern void free_area_init(unsigned long * zones_size); | 1253 | extern void free_area_init(unsigned long * zones_size); |
| 1254 | extern void free_area_init_node(int nid, unsigned long * zones_size, | 1254 | extern void free_area_init_node(int nid, unsigned long * zones_size, |
| 1255 | unsigned long zone_start_pfn, unsigned long *zholes_size); | 1255 | unsigned long zone_start_pfn, unsigned long *zholes_size); |
| 1256 | #ifdef CONFIG_ARCH_POPULATES_NODE_MAP | 1256 | #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP |
| 1257 | /* | 1257 | /* |
| 1258 | * With CONFIG_ARCH_POPULATES_NODE_MAP set, an architecture may initialise its | 1258 | * With CONFIG_HAVE_MEMBLOCK_NODE_MAP set, an architecture may initialise its |
| 1259 | * zones, allocate the backing mem_map and account for memory holes in a more | 1259 | * zones, allocate the backing mem_map and account for memory holes in a more |
| 1260 | * architecture independent manner. This is a substitute for creating the | 1260 | * architecture independent manner. This is a substitute for creating the |
| 1261 | * zone_sizes[] and zholes_size[] arrays and passing them to | 1261 | * zone_sizes[] and zholes_size[] arrays and passing them to |
| 1262 | * free_area_init_node() | 1262 | * free_area_init_node() |
| 1263 | * | 1263 | * |
| 1264 | * An architecture is expected to register range of page frames backed by | 1264 | * An architecture is expected to register range of page frames backed by |
| 1265 | * physical memory with add_active_range() before calling | 1265 | * physical memory with memblock_add[_node]() before calling |
| 1266 | * free_area_init_nodes() passing in the PFN each zone ends at. At a basic | 1266 | * free_area_init_nodes() passing in the PFN each zone ends at. At a basic |
| 1267 | * usage, an architecture is expected to do something like | 1267 | * usage, an architecture is expected to do something like |
| 1268 | * | 1268 | * |
| 1269 | * unsigned long max_zone_pfns[MAX_NR_ZONES] = {max_dma, max_normal_pfn, | 1269 | * unsigned long max_zone_pfns[MAX_NR_ZONES] = {max_dma, max_normal_pfn, |
| 1270 | * max_highmem_pfn}; | 1270 | * max_highmem_pfn}; |
| 1271 | * for_each_valid_physical_page_range() | 1271 | * for_each_valid_physical_page_range() |
| 1272 | * add_active_range(node_id, start_pfn, end_pfn) | 1272 | * memblock_add_node(base, size, nid) |
| 1273 | * free_area_init_nodes(max_zone_pfns); | 1273 | * free_area_init_nodes(max_zone_pfns); |
| 1274 | * | 1274 | * |
| 1275 | * If the architecture guarantees that there are no holes in the ranges | 1275 | * free_bootmem_with_active_regions() calls free_bootmem_node() for each |
| 1276 | * registered with add_active_range(), free_bootmem_active_regions() | 1276 | * registered physical page range. Similarly |
| 1277 | * will call free_bootmem_node() for each registered physical page range. | 1277 | * sparse_memory_present_with_active_regions() calls memory_present() for |
| 1278 | * Similarly sparse_memory_present_with_active_regions() calls | 1278 | * each range when SPARSEMEM is enabled. |
| 1279 | * memory_present() for each range when SPARSEMEM is enabled. | ||
| 1280 | * | 1279 | * |
| 1281 | * See mm/page_alloc.c for more information on each function exposed by | 1280 | * See mm/page_alloc.c for more information on each function exposed by |
| 1282 | * CONFIG_ARCH_POPULATES_NODE_MAP | 1281 | * CONFIG_HAVE_MEMBLOCK_NODE_MAP. |
| 1283 | */ | 1282 | */ |
| 1284 | extern void free_area_init_nodes(unsigned long *max_zone_pfn); | 1283 | extern void free_area_init_nodes(unsigned long *max_zone_pfn); |
| 1285 | extern void add_active_range(unsigned int nid, unsigned long start_pfn, | ||
| 1286 | unsigned long end_pfn); | ||
| 1287 | extern void remove_active_range(unsigned int nid, unsigned long start_pfn, | ||
| 1288 | unsigned long end_pfn); | ||
| 1289 | extern void remove_all_active_ranges(void); | ||
| 1290 | void sort_node_map(void); | ||
| 1291 | unsigned long node_map_pfn_alignment(void); | 1284 | unsigned long node_map_pfn_alignment(void); |
| 1292 | unsigned long __absent_pages_in_range(int nid, unsigned long start_pfn, | 1285 | unsigned long __absent_pages_in_range(int nid, unsigned long start_pfn, |
| 1293 | unsigned long end_pfn); | 1286 | unsigned long end_pfn); |
| @@ -1300,14 +1293,11 @@ extern void free_bootmem_with_active_regions(int nid, | |||
| 1300 | unsigned long max_low_pfn); | 1293 | unsigned long max_low_pfn); |
| 1301 | int add_from_early_node_map(struct range *range, int az, | 1294 | int add_from_early_node_map(struct range *range, int az, |
| 1302 | int nr_range, int nid); | 1295 | int nr_range, int nid); |
| 1303 | u64 __init find_memory_core_early(int nid, u64 size, u64 align, | ||
| 1304 | u64 goal, u64 limit); | ||
| 1305 | typedef int (*work_fn_t)(unsigned long, unsigned long, void *); | ||
| 1306 | extern void work_with_active_regions(int nid, work_fn_t work_fn, void *data); | ||
| 1307 | extern void sparse_memory_present_with_active_regions(int nid); | 1296 | extern void sparse_memory_present_with_active_regions(int nid); |
| 1308 | #endif /* CONFIG_ARCH_POPULATES_NODE_MAP */ | ||
| 1309 | 1297 | ||
| 1310 | #if !defined(CONFIG_ARCH_POPULATES_NODE_MAP) && \ | 1298 | #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ |
| 1299 | |||
| 1300 | #if !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) && \ | ||
| 1311 | !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) | 1301 | !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) |
| 1312 | static inline int __early_pfn_to_nid(unsigned long pfn) | 1302 | static inline int __early_pfn_to_nid(unsigned long pfn) |
| 1313 | { | 1303 | { |
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h index 415f2db414e1..c8ef9bc54d50 100644 --- a/include/linux/mmc/card.h +++ b/include/linux/mmc/card.h | |||
| @@ -218,6 +218,7 @@ struct mmc_card { | |||
| 218 | #define MMC_QUIRK_INAND_CMD38 (1<<6) /* iNAND devices have broken CMD38 */ | 218 | #define MMC_QUIRK_INAND_CMD38 (1<<6) /* iNAND devices have broken CMD38 */ |
| 219 | #define MMC_QUIRK_BLK_NO_CMD23 (1<<7) /* Avoid CMD23 for regular multiblock */ | 219 | #define MMC_QUIRK_BLK_NO_CMD23 (1<<7) /* Avoid CMD23 for regular multiblock */ |
| 220 | #define MMC_QUIRK_BROKEN_BYTE_MODE_512 (1<<8) /* Avoid sending 512 bytes in */ | 220 | #define MMC_QUIRK_BROKEN_BYTE_MODE_512 (1<<8) /* Avoid sending 512 bytes in */ |
| 221 | #define MMC_QUIRK_LONG_READ_TIME (1<<9) /* Data read time > CSD says */ | ||
| 221 | /* byte mode */ | 222 | /* byte mode */ |
| 222 | unsigned int poweroff_notify_state; /* eMMC4.5 notify feature */ | 223 | unsigned int poweroff_notify_state; /* eMMC4.5 notify feature */ |
| 223 | #define MMC_NO_POWER_NOTIFICATION 0 | 224 | #define MMC_NO_POWER_NOTIFICATION 0 |
| @@ -433,6 +434,11 @@ static inline int mmc_card_broken_byte_mode_512(const struct mmc_card *c) | |||
| 433 | return c->quirks & MMC_QUIRK_BROKEN_BYTE_MODE_512; | 434 | return c->quirks & MMC_QUIRK_BROKEN_BYTE_MODE_512; |
| 434 | } | 435 | } |
| 435 | 436 | ||
| 437 | static inline int mmc_card_long_read_time(const struct mmc_card *c) | ||
| 438 | { | ||
| 439 | return c->quirks & MMC_QUIRK_LONG_READ_TIME; | ||
| 440 | } | ||
| 441 | |||
| 436 | #define mmc_card_name(c) ((c)->cid.prod_name) | 442 | #define mmc_card_name(c) ((c)->cid.prod_name) |
| 437 | #define mmc_card_id(c) (dev_name(&(c)->dev)) | 443 | #define mmc_card_id(c) (dev_name(&(c)->dev)) |
| 438 | 444 | ||
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 188cb2ffe8db..3ac040f19369 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h | |||
| @@ -598,13 +598,13 @@ struct zonelist { | |||
| 598 | #endif | 598 | #endif |
| 599 | }; | 599 | }; |
| 600 | 600 | ||
| 601 | #ifdef CONFIG_ARCH_POPULATES_NODE_MAP | 601 | #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP |
| 602 | struct node_active_region { | 602 | struct node_active_region { |
| 603 | unsigned long start_pfn; | 603 | unsigned long start_pfn; |
| 604 | unsigned long end_pfn; | 604 | unsigned long end_pfn; |
| 605 | int nid; | 605 | int nid; |
| 606 | }; | 606 | }; |
| 607 | #endif /* CONFIG_ARCH_POPULATES_NODE_MAP */ | 607 | #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ |
| 608 | 608 | ||
| 609 | #ifndef CONFIG_DISCONTIGMEM | 609 | #ifndef CONFIG_DISCONTIGMEM |
| 610 | /* The array of struct pages - for discontigmem use pgdat->lmem_map */ | 610 | /* The array of struct pages - for discontigmem use pgdat->lmem_map */ |
| @@ -720,7 +720,7 @@ extern int movable_zone; | |||
| 720 | 720 | ||
| 721 | static inline int zone_movable_is_highmem(void) | 721 | static inline int zone_movable_is_highmem(void) |
| 722 | { | 722 | { |
| 723 | #if defined(CONFIG_HIGHMEM) && defined(CONFIG_ARCH_POPULATES_NODE_MAP) | 723 | #if defined(CONFIG_HIGHMEM) && defined(CONFIG_HAVE_MEMBLOCK_NODE) |
| 724 | return movable_zone == ZONE_HIGHMEM; | 724 | return movable_zone == ZONE_HIGHMEM; |
| 725 | #else | 725 | #else |
| 726 | return 0; | 726 | return 0; |
| @@ -938,7 +938,7 @@ static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist, | |||
| 938 | #endif | 938 | #endif |
| 939 | 939 | ||
| 940 | #if !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) && \ | 940 | #if !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) && \ |
| 941 | !defined(CONFIG_ARCH_POPULATES_NODE_MAP) | 941 | !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) |
| 942 | static inline unsigned long early_pfn_to_nid(unsigned long pfn) | 942 | static inline unsigned long early_pfn_to_nid(unsigned long pfn) |
| 943 | { | 943 | { |
| 944 | return 0; | 944 | return 0; |
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index b1f89122bf6a..08855613ceb3 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h | |||
| @@ -54,6 +54,7 @@ enum perf_hw_id { | |||
| 54 | PERF_COUNT_HW_BUS_CYCLES = 6, | 54 | PERF_COUNT_HW_BUS_CYCLES = 6, |
| 55 | PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 7, | 55 | PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 7, |
| 56 | PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 8, | 56 | PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 8, |
| 57 | PERF_COUNT_HW_REF_CPU_CYCLES = 9, | ||
| 57 | 58 | ||
| 58 | PERF_COUNT_HW_MAX, /* non-ABI */ | 59 | PERF_COUNT_HW_MAX, /* non-ABI */ |
| 59 | }; | 60 | }; |
| @@ -890,6 +891,7 @@ struct perf_event_context { | |||
| 890 | int nr_active; | 891 | int nr_active; |
| 891 | int is_active; | 892 | int is_active; |
| 892 | int nr_stat; | 893 | int nr_stat; |
| 894 | int nr_freq; | ||
| 893 | int rotate_disable; | 895 | int rotate_disable; |
| 894 | atomic_t refcount; | 896 | atomic_t refcount; |
| 895 | struct task_struct *task; | 897 | struct task_struct *task; |
| @@ -1063,12 +1065,12 @@ perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) | |||
| 1063 | } | 1065 | } |
| 1064 | } | 1066 | } |
| 1065 | 1067 | ||
| 1066 | extern struct jump_label_key perf_sched_events; | 1068 | extern struct jump_label_key_deferred perf_sched_events; |
| 1067 | 1069 | ||
| 1068 | static inline void perf_event_task_sched_in(struct task_struct *prev, | 1070 | static inline void perf_event_task_sched_in(struct task_struct *prev, |
| 1069 | struct task_struct *task) | 1071 | struct task_struct *task) |
| 1070 | { | 1072 | { |
| 1071 | if (static_branch(&perf_sched_events)) | 1073 | if (static_branch(&perf_sched_events.key)) |
| 1072 | __perf_event_task_sched_in(prev, task); | 1074 | __perf_event_task_sched_in(prev, task); |
| 1073 | } | 1075 | } |
| 1074 | 1076 | ||
| @@ -1077,7 +1079,7 @@ static inline void perf_event_task_sched_out(struct task_struct *prev, | |||
| 1077 | { | 1079 | { |
| 1078 | perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, NULL, 0); | 1080 | perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, NULL, 0); |
| 1079 | 1081 | ||
| 1080 | if (static_branch(&perf_sched_events)) | 1082 | if (static_branch(&perf_sched_events.key)) |
| 1081 | __perf_event_task_sched_out(prev, next); | 1083 | __perf_event_task_sched_out(prev, next); |
| 1082 | } | 1084 | } |
| 1083 | 1085 | ||
diff --git a/include/linux/poison.h b/include/linux/poison.h index 79159de0e341..2110a81c5e2a 100644 --- a/include/linux/poison.h +++ b/include/linux/poison.h | |||
| @@ -40,12 +40,6 @@ | |||
| 40 | #define RED_INACTIVE 0x09F911029D74E35BULL /* when obj is inactive */ | 40 | #define RED_INACTIVE 0x09F911029D74E35BULL /* when obj is inactive */ |
| 41 | #define RED_ACTIVE 0xD84156C5635688C0ULL /* when obj is active */ | 41 | #define RED_ACTIVE 0xD84156C5635688C0ULL /* when obj is active */ |
| 42 | 42 | ||
| 43 | #ifdef CONFIG_PHYS_ADDR_T_64BIT | ||
| 44 | #define MEMBLOCK_INACTIVE 0x3a84fb0144c9e71bULL | ||
| 45 | #else | ||
| 46 | #define MEMBLOCK_INACTIVE 0x44c9e71bUL | ||
| 47 | #endif | ||
| 48 | |||
| 49 | #define SLUB_RED_INACTIVE 0xbb | 43 | #define SLUB_RED_INACTIVE 0xbb |
| 50 | #define SLUB_RED_ACTIVE 0xcc | 44 | #define SLUB_RED_ACTIVE 0xcc |
| 51 | 45 | ||
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 2cf4226ade7e..81c04f4348ec 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h | |||
| @@ -51,6 +51,8 @@ extern int rcutorture_runnable; /* for sysctl */ | |||
| 51 | #if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) | 51 | #if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) |
| 52 | extern void rcutorture_record_test_transition(void); | 52 | extern void rcutorture_record_test_transition(void); |
| 53 | extern void rcutorture_record_progress(unsigned long vernum); | 53 | extern void rcutorture_record_progress(unsigned long vernum); |
| 54 | extern void do_trace_rcu_torture_read(char *rcutorturename, | ||
| 55 | struct rcu_head *rhp); | ||
| 54 | #else | 56 | #else |
| 55 | static inline void rcutorture_record_test_transition(void) | 57 | static inline void rcutorture_record_test_transition(void) |
| 56 | { | 58 | { |
| @@ -58,6 +60,12 @@ static inline void rcutorture_record_test_transition(void) | |||
| 58 | static inline void rcutorture_record_progress(unsigned long vernum) | 60 | static inline void rcutorture_record_progress(unsigned long vernum) |
| 59 | { | 61 | { |
| 60 | } | 62 | } |
| 63 | #ifdef CONFIG_RCU_TRACE | ||
| 64 | extern void do_trace_rcu_torture_read(char *rcutorturename, | ||
| 65 | struct rcu_head *rhp); | ||
| 66 | #else | ||
| 67 | #define do_trace_rcu_torture_read(rcutorturename, rhp) do { } while (0) | ||
| 68 | #endif | ||
| 61 | #endif | 69 | #endif |
| 62 | 70 | ||
| 63 | #define UINT_CMP_GE(a, b) (UINT_MAX / 2 >= (a) - (b)) | 71 | #define UINT_CMP_GE(a, b) (UINT_MAX / 2 >= (a) - (b)) |
| @@ -177,23 +185,10 @@ extern void rcu_sched_qs(int cpu); | |||
| 177 | extern void rcu_bh_qs(int cpu); | 185 | extern void rcu_bh_qs(int cpu); |
| 178 | extern void rcu_check_callbacks(int cpu, int user); | 186 | extern void rcu_check_callbacks(int cpu, int user); |
| 179 | struct notifier_block; | 187 | struct notifier_block; |
| 180 | 188 | extern void rcu_idle_enter(void); | |
| 181 | #ifdef CONFIG_NO_HZ | 189 | extern void rcu_idle_exit(void); |
| 182 | 190 | extern void rcu_irq_enter(void); | |
| 183 | extern void rcu_enter_nohz(void); | 191 | extern void rcu_irq_exit(void); |
| 184 | extern void rcu_exit_nohz(void); | ||
| 185 | |||
| 186 | #else /* #ifdef CONFIG_NO_HZ */ | ||
| 187 | |||
| 188 | static inline void rcu_enter_nohz(void) | ||
| 189 | { | ||
| 190 | } | ||
| 191 | |||
| 192 | static inline void rcu_exit_nohz(void) | ||
| 193 | { | ||
| 194 | } | ||
| 195 | |||
| 196 | #endif /* #else #ifdef CONFIG_NO_HZ */ | ||
| 197 | 192 | ||
| 198 | /* | 193 | /* |
| 199 | * Infrastructure to implement the synchronize_() primitives in | 194 | * Infrastructure to implement the synchronize_() primitives in |
| @@ -233,22 +228,30 @@ static inline void destroy_rcu_head_on_stack(struct rcu_head *head) | |||
| 233 | 228 | ||
| 234 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 229 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
| 235 | 230 | ||
| 236 | extern struct lockdep_map rcu_lock_map; | 231 | #ifdef CONFIG_PROVE_RCU |
| 237 | # define rcu_read_acquire() \ | 232 | extern int rcu_is_cpu_idle(void); |
| 238 | lock_acquire(&rcu_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_) | 233 | #else /* !CONFIG_PROVE_RCU */ |
| 239 | # define rcu_read_release() lock_release(&rcu_lock_map, 1, _THIS_IP_) | 234 | static inline int rcu_is_cpu_idle(void) |
| 235 | { | ||
| 236 | return 0; | ||
| 237 | } | ||
| 238 | #endif /* else !CONFIG_PROVE_RCU */ | ||
| 240 | 239 | ||
| 241 | extern struct lockdep_map rcu_bh_lock_map; | 240 | static inline void rcu_lock_acquire(struct lockdep_map *map) |
| 242 | # define rcu_read_acquire_bh() \ | 241 | { |
| 243 | lock_acquire(&rcu_bh_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_) | 242 | WARN_ON_ONCE(rcu_is_cpu_idle()); |
| 244 | # define rcu_read_release_bh() lock_release(&rcu_bh_lock_map, 1, _THIS_IP_) | 243 | lock_acquire(map, 0, 0, 2, 1, NULL, _THIS_IP_); |
| 244 | } | ||
| 245 | 245 | ||
| 246 | extern struct lockdep_map rcu_sched_lock_map; | 246 | static inline void rcu_lock_release(struct lockdep_map *map) |
| 247 | # define rcu_read_acquire_sched() \ | 247 | { |
| 248 | lock_acquire(&rcu_sched_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_) | 248 | WARN_ON_ONCE(rcu_is_cpu_idle()); |
| 249 | # define rcu_read_release_sched() \ | 249 | lock_release(map, 1, _THIS_IP_); |
| 250 | lock_release(&rcu_sched_lock_map, 1, _THIS_IP_) | 250 | } |
| 251 | 251 | ||
| 252 | extern struct lockdep_map rcu_lock_map; | ||
| 253 | extern struct lockdep_map rcu_bh_lock_map; | ||
| 254 | extern struct lockdep_map rcu_sched_lock_map; | ||
| 252 | extern int debug_lockdep_rcu_enabled(void); | 255 | extern int debug_lockdep_rcu_enabled(void); |
| 253 | 256 | ||
| 254 | /** | 257 | /** |
| @@ -262,11 +265,18 @@ extern int debug_lockdep_rcu_enabled(void); | |||
| 262 | * | 265 | * |
| 263 | * Checks debug_lockdep_rcu_enabled() to prevent false positives during boot | 266 | * Checks debug_lockdep_rcu_enabled() to prevent false positives during boot |
| 264 | * and while lockdep is disabled. | 267 | * and while lockdep is disabled. |
| 268 | * | ||
| 269 | * Note that rcu_read_lock() and the matching rcu_read_unlock() must | ||
| 270 | * occur in the same context, for example, it is illegal to invoke | ||
| 271 | * rcu_read_unlock() in process context if the matching rcu_read_lock() | ||
| 272 | * was invoked from within an irq handler. | ||
| 265 | */ | 273 | */ |
| 266 | static inline int rcu_read_lock_held(void) | 274 | static inline int rcu_read_lock_held(void) |
| 267 | { | 275 | { |
| 268 | if (!debug_lockdep_rcu_enabled()) | 276 | if (!debug_lockdep_rcu_enabled()) |
| 269 | return 1; | 277 | return 1; |
| 278 | if (rcu_is_cpu_idle()) | ||
| 279 | return 0; | ||
| 270 | return lock_is_held(&rcu_lock_map); | 280 | return lock_is_held(&rcu_lock_map); |
| 271 | } | 281 | } |
| 272 | 282 | ||
| @@ -290,6 +300,19 @@ extern int rcu_read_lock_bh_held(void); | |||
| 290 | * | 300 | * |
| 291 | * Check debug_lockdep_rcu_enabled() to prevent false positives during boot | 301 | * Check debug_lockdep_rcu_enabled() to prevent false positives during boot |
| 292 | * and while lockdep is disabled. | 302 | * and while lockdep is disabled. |
| 303 | * | ||
| 304 | * Note that if the CPU is in the idle loop from an RCU point of | ||
| 305 | * view (ie: that we are in the section between rcu_idle_enter() and | ||
| 306 | * rcu_idle_exit()) then rcu_read_lock_held() returns false even if the CPU | ||
| 307 | * did an rcu_read_lock(). The reason for this is that RCU ignores CPUs | ||
| 308 | * that are in such a section, considering these as in extended quiescent | ||
| 309 | * state, so such a CPU is effectively never in an RCU read-side critical | ||
| 310 | * section regardless of what RCU primitives it invokes. This state of | ||
| 311 | * affairs is required --- we need to keep an RCU-free window in idle | ||
| 312 | * where the CPU may possibly enter into low power mode. This way we can | ||
| 313 | * notice an extended quiescent state to other CPUs that started a grace | ||
| 314 | * period. Otherwise we would delay any grace period as long as we run in | ||
| 315 | * the idle task. | ||
| 293 | */ | 316 | */ |
| 294 | #ifdef CONFIG_PREEMPT_COUNT | 317 | #ifdef CONFIG_PREEMPT_COUNT |
| 295 | static inline int rcu_read_lock_sched_held(void) | 318 | static inline int rcu_read_lock_sched_held(void) |
| @@ -298,6 +321,8 @@ static inline int rcu_read_lock_sched_held(void) | |||
| 298 | 321 | ||
| 299 | if (!debug_lockdep_rcu_enabled()) | 322 | if (!debug_lockdep_rcu_enabled()) |
| 300 | return 1; | 323 | return 1; |
| 324 | if (rcu_is_cpu_idle()) | ||
| 325 | return 0; | ||
| 301 | if (debug_locks) | 326 | if (debug_locks) |
| 302 | lockdep_opinion = lock_is_held(&rcu_sched_lock_map); | 327 | lockdep_opinion = lock_is_held(&rcu_sched_lock_map); |
| 303 | return lockdep_opinion || preempt_count() != 0 || irqs_disabled(); | 328 | return lockdep_opinion || preempt_count() != 0 || irqs_disabled(); |
| @@ -311,12 +336,8 @@ static inline int rcu_read_lock_sched_held(void) | |||
| 311 | 336 | ||
| 312 | #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ | 337 | #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ |
| 313 | 338 | ||
| 314 | # define rcu_read_acquire() do { } while (0) | 339 | # define rcu_lock_acquire(a) do { } while (0) |
| 315 | # define rcu_read_release() do { } while (0) | 340 | # define rcu_lock_release(a) do { } while (0) |
| 316 | # define rcu_read_acquire_bh() do { } while (0) | ||
| 317 | # define rcu_read_release_bh() do { } while (0) | ||
| 318 | # define rcu_read_acquire_sched() do { } while (0) | ||
| 319 | # define rcu_read_release_sched() do { } while (0) | ||
| 320 | 341 | ||
| 321 | static inline int rcu_read_lock_held(void) | 342 | static inline int rcu_read_lock_held(void) |
| 322 | { | 343 | { |
| @@ -637,7 +658,7 @@ static inline void rcu_read_lock(void) | |||
| 637 | { | 658 | { |
| 638 | __rcu_read_lock(); | 659 | __rcu_read_lock(); |
| 639 | __acquire(RCU); | 660 | __acquire(RCU); |
| 640 | rcu_read_acquire(); | 661 | rcu_lock_acquire(&rcu_lock_map); |
| 641 | } | 662 | } |
| 642 | 663 | ||
| 643 | /* | 664 | /* |
| @@ -657,7 +678,7 @@ static inline void rcu_read_lock(void) | |||
| 657 | */ | 678 | */ |
| 658 | static inline void rcu_read_unlock(void) | 679 | static inline void rcu_read_unlock(void) |
| 659 | { | 680 | { |
| 660 | rcu_read_release(); | 681 | rcu_lock_release(&rcu_lock_map); |
| 661 | __release(RCU); | 682 | __release(RCU); |
| 662 | __rcu_read_unlock(); | 683 | __rcu_read_unlock(); |
| 663 | } | 684 | } |
| @@ -673,12 +694,17 @@ static inline void rcu_read_unlock(void) | |||
| 673 | * critical sections in interrupt context can use just rcu_read_lock(), | 694 | * critical sections in interrupt context can use just rcu_read_lock(), |
| 674 | * though this should at least be commented to avoid confusing people | 695 | * though this should at least be commented to avoid confusing people |
| 675 | * reading the code. | 696 | * reading the code. |
| 697 | * | ||
| 698 | * Note that rcu_read_lock_bh() and the matching rcu_read_unlock_bh() | ||
| 699 | * must occur in the same context, for example, it is illegal to invoke | ||
| 700 | * rcu_read_unlock_bh() from one task if the matching rcu_read_lock_bh() | ||
| 701 | * was invoked from some other task. | ||
| 676 | */ | 702 | */ |
| 677 | static inline void rcu_read_lock_bh(void) | 703 | static inline void rcu_read_lock_bh(void) |
| 678 | { | 704 | { |
| 679 | local_bh_disable(); | 705 | local_bh_disable(); |
| 680 | __acquire(RCU_BH); | 706 | __acquire(RCU_BH); |
| 681 | rcu_read_acquire_bh(); | 707 | rcu_lock_acquire(&rcu_bh_lock_map); |
| 682 | } | 708 | } |
| 683 | 709 | ||
| 684 | /* | 710 | /* |
| @@ -688,7 +714,7 @@ static inline void rcu_read_lock_bh(void) | |||
| 688 | */ | 714 | */ |
| 689 | static inline void rcu_read_unlock_bh(void) | 715 | static inline void rcu_read_unlock_bh(void) |
| 690 | { | 716 | { |
| 691 | rcu_read_release_bh(); | 717 | rcu_lock_release(&rcu_bh_lock_map); |
| 692 | __release(RCU_BH); | 718 | __release(RCU_BH); |
| 693 | local_bh_enable(); | 719 | local_bh_enable(); |
| 694 | } | 720 | } |
| @@ -700,12 +726,17 @@ static inline void rcu_read_unlock_bh(void) | |||
| 700 | * are being done using call_rcu_sched() or synchronize_rcu_sched(). | 726 | * are being done using call_rcu_sched() or synchronize_rcu_sched(). |
| 701 | * Read-side critical sections can also be introduced by anything that | 727 | * Read-side critical sections can also be introduced by anything that |
| 702 | * disables preemption, including local_irq_disable() and friends. | 728 | * disables preemption, including local_irq_disable() and friends. |
| 729 | * | ||
| 730 | * Note that rcu_read_lock_sched() and the matching rcu_read_unlock_sched() | ||
| 731 | * must occur in the same context, for example, it is illegal to invoke | ||
| 732 | * rcu_read_unlock_sched() from process context if the matching | ||
| 733 | * rcu_read_lock_sched() was invoked from an NMI handler. | ||
| 703 | */ | 734 | */ |
| 704 | static inline void rcu_read_lock_sched(void) | 735 | static inline void rcu_read_lock_sched(void) |
| 705 | { | 736 | { |
| 706 | preempt_disable(); | 737 | preempt_disable(); |
| 707 | __acquire(RCU_SCHED); | 738 | __acquire(RCU_SCHED); |
| 708 | rcu_read_acquire_sched(); | 739 | rcu_lock_acquire(&rcu_sched_lock_map); |
| 709 | } | 740 | } |
| 710 | 741 | ||
| 711 | /* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */ | 742 | /* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */ |
| @@ -722,7 +753,7 @@ static inline notrace void rcu_read_lock_sched_notrace(void) | |||
| 722 | */ | 753 | */ |
| 723 | static inline void rcu_read_unlock_sched(void) | 754 | static inline void rcu_read_unlock_sched(void) |
| 724 | { | 755 | { |
| 725 | rcu_read_release_sched(); | 756 | rcu_lock_release(&rcu_sched_lock_map); |
| 726 | __release(RCU_SCHED); | 757 | __release(RCU_SCHED); |
| 727 | preempt_enable(); | 758 | preempt_enable(); |
| 728 | } | 759 | } |
diff --git a/include/linux/sched.h b/include/linux/sched.h index 1c4f3e9b9bc5..cf0eb342bcba 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
| @@ -273,9 +273,11 @@ extern int runqueue_is_locked(int cpu); | |||
| 273 | 273 | ||
| 274 | #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ) | 274 | #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ) |
| 275 | extern void select_nohz_load_balancer(int stop_tick); | 275 | extern void select_nohz_load_balancer(int stop_tick); |
| 276 | extern void set_cpu_sd_state_idle(void); | ||
| 276 | extern int get_nohz_timer_target(void); | 277 | extern int get_nohz_timer_target(void); |
| 277 | #else | 278 | #else |
| 278 | static inline void select_nohz_load_balancer(int stop_tick) { } | 279 | static inline void select_nohz_load_balancer(int stop_tick) { } |
| 280 | static inline void set_cpu_sd_state_idle(void) { } | ||
| 279 | #endif | 281 | #endif |
| 280 | 282 | ||
| 281 | /* | 283 | /* |
| @@ -483,8 +485,8 @@ struct task_cputime { | |||
| 483 | 485 | ||
| 484 | #define INIT_CPUTIME \ | 486 | #define INIT_CPUTIME \ |
| 485 | (struct task_cputime) { \ | 487 | (struct task_cputime) { \ |
| 486 | .utime = cputime_zero, \ | 488 | .utime = 0, \ |
| 487 | .stime = cputime_zero, \ | 489 | .stime = 0, \ |
| 488 | .sum_exec_runtime = 0, \ | 490 | .sum_exec_runtime = 0, \ |
| 489 | } | 491 | } |
| 490 | 492 | ||
| @@ -901,6 +903,10 @@ struct sched_group_power { | |||
| 901 | * single CPU. | 903 | * single CPU. |
| 902 | */ | 904 | */ |
| 903 | unsigned int power, power_orig; | 905 | unsigned int power, power_orig; |
| 906 | /* | ||
| 907 | * Number of busy cpus in this group. | ||
| 908 | */ | ||
| 909 | atomic_t nr_busy_cpus; | ||
| 904 | }; | 910 | }; |
| 905 | 911 | ||
| 906 | struct sched_group { | 912 | struct sched_group { |
| @@ -925,6 +931,15 @@ static inline struct cpumask *sched_group_cpus(struct sched_group *sg) | |||
| 925 | return to_cpumask(sg->cpumask); | 931 | return to_cpumask(sg->cpumask); |
| 926 | } | 932 | } |
| 927 | 933 | ||
| 934 | /** | ||
| 935 | * group_first_cpu - Returns the first cpu in the cpumask of a sched_group. | ||
| 936 | * @group: The group whose first cpu is to be returned. | ||
| 937 | */ | ||
| 938 | static inline unsigned int group_first_cpu(struct sched_group *group) | ||
| 939 | { | ||
| 940 | return cpumask_first(sched_group_cpus(group)); | ||
| 941 | } | ||
| 942 | |||
| 928 | struct sched_domain_attr { | 943 | struct sched_domain_attr { |
| 929 | int relax_domain_level; | 944 | int relax_domain_level; |
| 930 | }; | 945 | }; |
| @@ -1315,8 +1330,8 @@ struct task_struct { | |||
| 1315 | * older sibling, respectively. (p->father can be replaced with | 1330 | * older sibling, respectively. (p->father can be replaced with |
| 1316 | * p->real_parent->pid) | 1331 | * p->real_parent->pid) |
| 1317 | */ | 1332 | */ |
| 1318 | struct task_struct *real_parent; /* real parent process */ | 1333 | struct task_struct __rcu *real_parent; /* real parent process */ |
| 1319 | struct task_struct *parent; /* recipient of SIGCHLD, wait4() reports */ | 1334 | struct task_struct __rcu *parent; /* recipient of SIGCHLD, wait4() reports */ |
| 1320 | /* | 1335 | /* |
| 1321 | * children/sibling forms the list of my natural children | 1336 | * children/sibling forms the list of my natural children |
| 1322 | */ | 1337 | */ |
| @@ -2070,6 +2085,14 @@ extern int sched_setscheduler(struct task_struct *, int, | |||
| 2070 | extern int sched_setscheduler_nocheck(struct task_struct *, int, | 2085 | extern int sched_setscheduler_nocheck(struct task_struct *, int, |
| 2071 | const struct sched_param *); | 2086 | const struct sched_param *); |
| 2072 | extern struct task_struct *idle_task(int cpu); | 2087 | extern struct task_struct *idle_task(int cpu); |
| 2088 | /** | ||
| 2089 | * is_idle_task - is the specified task an idle task? | ||
| 2090 | * @tsk: the task in question. | ||
| 2091 | */ | ||
| 2092 | static inline bool is_idle_task(struct task_struct *p) | ||
| 2093 | { | ||
| 2094 | return p->pid == 0; | ||
| 2095 | } | ||
| 2073 | extern struct task_struct *curr_task(int cpu); | 2096 | extern struct task_struct *curr_task(int cpu); |
| 2074 | extern void set_curr_task(int cpu, struct task_struct *p); | 2097 | extern void set_curr_task(int cpu, struct task_struct *p); |
| 2075 | 2098 | ||
diff --git a/include/linux/security.h b/include/linux/security.h index 19d8e04e1688..e8c619d39291 100644 --- a/include/linux/security.h +++ b/include/linux/security.h | |||
| @@ -2056,7 +2056,7 @@ static inline int security_old_inode_init_security(struct inode *inode, | |||
| 2056 | char **name, void **value, | 2056 | char **name, void **value, |
| 2057 | size_t *len) | 2057 | size_t *len) |
| 2058 | { | 2058 | { |
| 2059 | return 0; | 2059 | return -EOPNOTSUPP; |
| 2060 | } | 2060 | } |
| 2061 | 2061 | ||
| 2062 | static inline int security_inode_create(struct inode *dir, | 2062 | static inline int security_inode_create(struct inode *dir, |
diff --git a/include/linux/srcu.h b/include/linux/srcu.h index 58971e891f48..e1b005918bbb 100644 --- a/include/linux/srcu.h +++ b/include/linux/srcu.h | |||
| @@ -28,6 +28,7 @@ | |||
| 28 | #define _LINUX_SRCU_H | 28 | #define _LINUX_SRCU_H |
| 29 | 29 | ||
| 30 | #include <linux/mutex.h> | 30 | #include <linux/mutex.h> |
| 31 | #include <linux/rcupdate.h> | ||
| 31 | 32 | ||
| 32 | struct srcu_struct_array { | 33 | struct srcu_struct_array { |
| 33 | int c[2]; | 34 | int c[2]; |
| @@ -60,18 +61,10 @@ int __init_srcu_struct(struct srcu_struct *sp, const char *name, | |||
| 60 | __init_srcu_struct((sp), #sp, &__srcu_key); \ | 61 | __init_srcu_struct((sp), #sp, &__srcu_key); \ |
| 61 | }) | 62 | }) |
| 62 | 63 | ||
| 63 | # define srcu_read_acquire(sp) \ | ||
| 64 | lock_acquire(&(sp)->dep_map, 0, 0, 2, 1, NULL, _THIS_IP_) | ||
| 65 | # define srcu_read_release(sp) \ | ||
| 66 | lock_release(&(sp)->dep_map, 1, _THIS_IP_) | ||
| 67 | |||
| 68 | #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ | 64 | #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ |
| 69 | 65 | ||
| 70 | int init_srcu_struct(struct srcu_struct *sp); | 66 | int init_srcu_struct(struct srcu_struct *sp); |
| 71 | 67 | ||
| 72 | # define srcu_read_acquire(sp) do { } while (0) | ||
| 73 | # define srcu_read_release(sp) do { } while (0) | ||
| 74 | |||
| 75 | #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ | 68 | #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ |
| 76 | 69 | ||
| 77 | void cleanup_srcu_struct(struct srcu_struct *sp); | 70 | void cleanup_srcu_struct(struct srcu_struct *sp); |
| @@ -90,12 +83,32 @@ long srcu_batches_completed(struct srcu_struct *sp); | |||
| 90 | * read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC, | 83 | * read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC, |
| 91 | * this assumes we are in an SRCU read-side critical section unless it can | 84 | * this assumes we are in an SRCU read-side critical section unless it can |
| 92 | * prove otherwise. | 85 | * prove otherwise. |
| 86 | * | ||
| 87 | * Checks debug_lockdep_rcu_enabled() to prevent false positives during boot | ||
| 88 | * and while lockdep is disabled. | ||
| 89 | * | ||
| 90 | * Note that if the CPU is in the idle loop from an RCU point of view | ||
| 91 | * (ie: that we are in the section between rcu_idle_enter() and | ||
| 92 | * rcu_idle_exit()) then srcu_read_lock_held() returns false even if | ||
| 93 | * the CPU did an srcu_read_lock(). The reason for this is that RCU | ||
| 94 | * ignores CPUs that are in such a section, considering these as in | ||
| 95 | * extended quiescent state, so such a CPU is effectively never in an | ||
| 96 | * RCU read-side critical section regardless of what RCU primitives it | ||
| 97 | * invokes. This state of affairs is required --- we need to keep an | ||
| 98 | * RCU-free window in idle where the CPU may possibly enter into low | ||
| 99 | * power mode. This way we can notice an extended quiescent state to | ||
| 100 | * other CPUs that started a grace period. Otherwise we would delay any | ||
| 101 | * grace period as long as we run in the idle task. | ||
| 93 | */ | 102 | */ |
| 94 | static inline int srcu_read_lock_held(struct srcu_struct *sp) | 103 | static inline int srcu_read_lock_held(struct srcu_struct *sp) |
| 95 | { | 104 | { |
| 96 | if (debug_locks) | 105 | if (rcu_is_cpu_idle()) |
| 97 | return lock_is_held(&sp->dep_map); | 106 | return 0; |
| 98 | return 1; | 107 | |
| 108 | if (!debug_lockdep_rcu_enabled()) | ||
| 109 | return 1; | ||
| 110 | |||
| 111 | return lock_is_held(&sp->dep_map); | ||
| 99 | } | 112 | } |
| 100 | 113 | ||
| 101 | #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ | 114 | #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ |
| @@ -145,12 +158,17 @@ static inline int srcu_read_lock_held(struct srcu_struct *sp) | |||
| 145 | * one way to indirectly wait on an SRCU grace period is to acquire | 158 | * one way to indirectly wait on an SRCU grace period is to acquire |
| 146 | * a mutex that is held elsewhere while calling synchronize_srcu() or | 159 | * a mutex that is held elsewhere while calling synchronize_srcu() or |
| 147 | * synchronize_srcu_expedited(). | 160 | * synchronize_srcu_expedited(). |
| 161 | * | ||
| 162 | * Note that srcu_read_lock() and the matching srcu_read_unlock() must | ||
| 163 | * occur in the same context, for example, it is illegal to invoke | ||
| 164 | * srcu_read_unlock() in an irq handler if the matching srcu_read_lock() | ||
| 165 | * was invoked in process context. | ||
| 148 | */ | 166 | */ |
| 149 | static inline int srcu_read_lock(struct srcu_struct *sp) __acquires(sp) | 167 | static inline int srcu_read_lock(struct srcu_struct *sp) __acquires(sp) |
| 150 | { | 168 | { |
| 151 | int retval = __srcu_read_lock(sp); | 169 | int retval = __srcu_read_lock(sp); |
| 152 | 170 | ||
| 153 | srcu_read_acquire(sp); | 171 | rcu_lock_acquire(&(sp)->dep_map); |
| 154 | return retval; | 172 | return retval; |
| 155 | } | 173 | } |
| 156 | 174 | ||
| @@ -164,8 +182,51 @@ static inline int srcu_read_lock(struct srcu_struct *sp) __acquires(sp) | |||
| 164 | static inline void srcu_read_unlock(struct srcu_struct *sp, int idx) | 182 | static inline void srcu_read_unlock(struct srcu_struct *sp, int idx) |
| 165 | __releases(sp) | 183 | __releases(sp) |
| 166 | { | 184 | { |
| 167 | srcu_read_release(sp); | 185 | rcu_lock_release(&(sp)->dep_map); |
| 186 | __srcu_read_unlock(sp, idx); | ||
| 187 | } | ||
| 188 | |||
| 189 | /** | ||
| 190 | * srcu_read_lock_raw - register a new reader for an SRCU-protected structure. | ||
| 191 | * @sp: srcu_struct in which to register the new reader. | ||
| 192 | * | ||
| 193 | * Enter an SRCU read-side critical section. Similar to srcu_read_lock(), | ||
| 194 | * but avoids the RCU-lockdep checking. This means that it is legal to | ||
| 195 | * use srcu_read_lock_raw() in one context, for example, in an exception | ||
| 196 | * handler, and then have the matching srcu_read_unlock_raw() in another | ||
| 197 | * context, for example in the task that took the exception. | ||
| 198 | * | ||
| 199 | * However, the entire SRCU read-side critical section must reside within a | ||
| 200 | * single task. For example, beware of using srcu_read_lock_raw() in | ||
| 201 | * a device interrupt handler and srcu_read_unlock() in the interrupted | ||
| 202 | * task: This will not work if interrupts are threaded. | ||
| 203 | */ | ||
| 204 | static inline int srcu_read_lock_raw(struct srcu_struct *sp) | ||
| 205 | { | ||
| 206 | unsigned long flags; | ||
| 207 | int ret; | ||
| 208 | |||
| 209 | local_irq_save(flags); | ||
| 210 | ret = __srcu_read_lock(sp); | ||
| 211 | local_irq_restore(flags); | ||
| 212 | return ret; | ||
| 213 | } | ||
| 214 | |||
| 215 | /** | ||
| 216 | * srcu_read_unlock_raw - unregister reader from an SRCU-protected structure. | ||
| 217 | * @sp: srcu_struct in which to unregister the old reader. | ||
| 218 | * @idx: return value from corresponding srcu_read_lock_raw(). | ||
| 219 | * | ||
| 220 | * Exit an SRCU read-side critical section without lockdep-RCU checking. | ||
| 221 | * See srcu_read_lock_raw() for more details. | ||
| 222 | */ | ||
| 223 | static inline void srcu_read_unlock_raw(struct srcu_struct *sp, int idx) | ||
| 224 | { | ||
| 225 | unsigned long flags; | ||
| 226 | |||
| 227 | local_irq_save(flags); | ||
| 168 | __srcu_read_unlock(sp, idx); | 228 | __srcu_read_unlock(sp, idx); |
| 229 | local_irq_restore(flags); | ||
| 169 | } | 230 | } |
| 170 | 231 | ||
| 171 | #endif | 232 | #endif |
diff --git a/include/linux/tick.h b/include/linux/tick.h index b232ccc0ee29..ab8be90b5cc9 100644 --- a/include/linux/tick.h +++ b/include/linux/tick.h | |||
| @@ -7,6 +7,7 @@ | |||
| 7 | #define _LINUX_TICK_H | 7 | #define _LINUX_TICK_H |
| 8 | 8 | ||
| 9 | #include <linux/clockchips.h> | 9 | #include <linux/clockchips.h> |
| 10 | #include <linux/irqflags.h> | ||
| 10 | 11 | ||
| 11 | #ifdef CONFIG_GENERIC_CLOCKEVENTS | 12 | #ifdef CONFIG_GENERIC_CLOCKEVENTS |
| 12 | 13 | ||
| @@ -121,14 +122,16 @@ static inline int tick_oneshot_mode_active(void) { return 0; } | |||
| 121 | #endif /* !CONFIG_GENERIC_CLOCKEVENTS */ | 122 | #endif /* !CONFIG_GENERIC_CLOCKEVENTS */ |
| 122 | 123 | ||
| 123 | # ifdef CONFIG_NO_HZ | 124 | # ifdef CONFIG_NO_HZ |
| 124 | extern void tick_nohz_stop_sched_tick(int inidle); | 125 | extern void tick_nohz_idle_enter(void); |
| 125 | extern void tick_nohz_restart_sched_tick(void); | 126 | extern void tick_nohz_idle_exit(void); |
| 127 | extern void tick_nohz_irq_exit(void); | ||
| 126 | extern ktime_t tick_nohz_get_sleep_length(void); | 128 | extern ktime_t tick_nohz_get_sleep_length(void); |
| 127 | extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time); | 129 | extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time); |
| 128 | extern u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time); | 130 | extern u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time); |
| 129 | # else | 131 | # else |
| 130 | static inline void tick_nohz_stop_sched_tick(int inidle) { } | 132 | static inline void tick_nohz_idle_enter(void) { } |
| 131 | static inline void tick_nohz_restart_sched_tick(void) { } | 133 | static inline void tick_nohz_idle_exit(void) { } |
| 134 | |||
| 132 | static inline ktime_t tick_nohz_get_sleep_length(void) | 135 | static inline ktime_t tick_nohz_get_sleep_length(void) |
| 133 | { | 136 | { |
| 134 | ktime_t len = { .tv64 = NSEC_PER_SEC/HZ }; | 137 | ktime_t len = { .tv64 = NSEC_PER_SEC/HZ }; |
diff --git a/include/linux/wait.h b/include/linux/wait.h index 3efc9f3f43a0..a9ce45e8501c 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h | |||
| @@ -77,13 +77,13 @@ struct task_struct; | |||
| 77 | #define __WAIT_BIT_KEY_INITIALIZER(word, bit) \ | 77 | #define __WAIT_BIT_KEY_INITIALIZER(word, bit) \ |
| 78 | { .flags = word, .bit_nr = bit, } | 78 | { .flags = word, .bit_nr = bit, } |
| 79 | 79 | ||
| 80 | extern void __init_waitqueue_head(wait_queue_head_t *q, struct lock_class_key *); | 80 | extern void __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct lock_class_key *); |
| 81 | 81 | ||
| 82 | #define init_waitqueue_head(q) \ | 82 | #define init_waitqueue_head(q) \ |
| 83 | do { \ | 83 | do { \ |
| 84 | static struct lock_class_key __key; \ | 84 | static struct lock_class_key __key; \ |
| 85 | \ | 85 | \ |
| 86 | __init_waitqueue_head((q), &__key); \ | 86 | __init_waitqueue_head((q), #q, &__key); \ |
| 87 | } while (0) | 87 | } while (0) |
| 88 | 88 | ||
| 89 | #ifdef CONFIG_LOCKDEP | 89 | #ifdef CONFIG_LOCKDEP |
