aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/bpf/syscall.c6
-rw-r--r--kernel/events/core.c15
-rw-r--r--kernel/events/uprobes.c8
-rw-r--r--kernel/locking/lockdep.c79
-rw-r--r--kernel/sched/core.c18
-rw-r--r--kernel/time/tick-sched.c61
-rw-r--r--kernel/time/tick-sched.h2
7 files changed, 147 insertions, 42 deletions
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 2a2efe1bc76c..adc5e4bd74f8 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -137,11 +137,13 @@ static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
137 "map_type:\t%u\n" 137 "map_type:\t%u\n"
138 "key_size:\t%u\n" 138 "key_size:\t%u\n"
139 "value_size:\t%u\n" 139 "value_size:\t%u\n"
140 "max_entries:\t%u\n", 140 "max_entries:\t%u\n"
141 "map_flags:\t%#x\n",
141 map->map_type, 142 map->map_type,
142 map->key_size, 143 map->key_size,
143 map->value_size, 144 map->value_size,
144 map->max_entries); 145 map->max_entries,
146 map->map_flags);
145} 147}
146#endif 148#endif
147 149
diff --git a/kernel/events/core.c b/kernel/events/core.c
index de24fbce5277..52bedc5a5aaa 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -2417,14 +2417,24 @@ static void ctx_sched_out(struct perf_event_context *ctx,
2417 cpuctx->task_ctx = NULL; 2417 cpuctx->task_ctx = NULL;
2418 } 2418 }
2419 2419
2420 is_active ^= ctx->is_active; /* changed bits */ 2420 /*
2421 2421 * Always update time if it was set; not only when it changes.
2422 * Otherwise we can 'forget' to update time for any but the last
2423 * context we sched out. For example:
2424 *
2425 * ctx_sched_out(.event_type = EVENT_FLEXIBLE)
2426 * ctx_sched_out(.event_type = EVENT_PINNED)
2427 *
2428 * would only update time for the pinned events.
2429 */
2422 if (is_active & EVENT_TIME) { 2430 if (is_active & EVENT_TIME) {
2423 /* update (and stop) ctx time */ 2431 /* update (and stop) ctx time */
2424 update_context_time(ctx); 2432 update_context_time(ctx);
2425 update_cgrp_time_from_cpuctx(cpuctx); 2433 update_cgrp_time_from_cpuctx(cpuctx);
2426 } 2434 }
2427 2435
2436 is_active ^= ctx->is_active; /* changed bits */
2437
2428 if (!ctx->nr_active || !(is_active & EVENT_ALL)) 2438 if (!ctx->nr_active || !(is_active & EVENT_ALL))
2429 return; 2439 return;
2430 2440
@@ -8532,6 +8542,7 @@ SYSCALL_DEFINE5(perf_event_open,
8532 f_flags); 8542 f_flags);
8533 if (IS_ERR(event_file)) { 8543 if (IS_ERR(event_file)) {
8534 err = PTR_ERR(event_file); 8544 err = PTR_ERR(event_file);
8545 event_file = NULL;
8535 goto err_context; 8546 goto err_context;
8536 } 8547 }
8537 8548
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index 220fc17b9718..7edc95edfaee 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -321,7 +321,7 @@ retry:
321 copy_to_page(new_page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE); 321 copy_to_page(new_page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE);
322 322
323 ret = __replace_page(vma, vaddr, old_page, new_page); 323 ret = __replace_page(vma, vaddr, old_page, new_page);
324 page_cache_release(new_page); 324 put_page(new_page);
325put_old: 325put_old:
326 put_page(old_page); 326 put_page(old_page);
327 327
@@ -539,14 +539,14 @@ static int __copy_insn(struct address_space *mapping, struct file *filp,
539 * see uprobe_register(). 539 * see uprobe_register().
540 */ 540 */
541 if (mapping->a_ops->readpage) 541 if (mapping->a_ops->readpage)
542 page = read_mapping_page(mapping, offset >> PAGE_CACHE_SHIFT, filp); 542 page = read_mapping_page(mapping, offset >> PAGE_SHIFT, filp);
543 else 543 else
544 page = shmem_read_mapping_page(mapping, offset >> PAGE_CACHE_SHIFT); 544 page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
545 if (IS_ERR(page)) 545 if (IS_ERR(page))
546 return PTR_ERR(page); 546 return PTR_ERR(page);
547 547
548 copy_from_page(page, offset, insn, nbytes); 548 copy_from_page(page, offset, insn, nbytes);
549 page_cache_release(page); 549 put_page(page);
550 550
551 return 0; 551 return 0;
552} 552}
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 53ab2f85d77e..2324ba5310db 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -2000,6 +2000,77 @@ static inline int get_first_held_lock(struct task_struct *curr,
2000} 2000}
2001 2001
2002/* 2002/*
2003 * Returns the next chain_key iteration
2004 */
2005static u64 print_chain_key_iteration(int class_idx, u64 chain_key)
2006{
2007 u64 new_chain_key = iterate_chain_key(chain_key, class_idx);
2008
2009 printk(" class_idx:%d -> chain_key:%016Lx",
2010 class_idx,
2011 (unsigned long long)new_chain_key);
2012 return new_chain_key;
2013}
2014
2015static void
2016print_chain_keys_held_locks(struct task_struct *curr, struct held_lock *hlock_next)
2017{
2018 struct held_lock *hlock;
2019 u64 chain_key = 0;
2020 int depth = curr->lockdep_depth;
2021 int i;
2022
2023 printk("depth: %u\n", depth + 1);
2024 for (i = get_first_held_lock(curr, hlock_next); i < depth; i++) {
2025 hlock = curr->held_locks + i;
2026 chain_key = print_chain_key_iteration(hlock->class_idx, chain_key);
2027
2028 print_lock(hlock);
2029 }
2030
2031 print_chain_key_iteration(hlock_next->class_idx, chain_key);
2032 print_lock(hlock_next);
2033}
2034
2035static void print_chain_keys_chain(struct lock_chain *chain)
2036{
2037 int i;
2038 u64 chain_key = 0;
2039 int class_id;
2040
2041 printk("depth: %u\n", chain->depth);
2042 for (i = 0; i < chain->depth; i++) {
2043 class_id = chain_hlocks[chain->base + i];
2044 chain_key = print_chain_key_iteration(class_id + 1, chain_key);
2045
2046 print_lock_name(lock_classes + class_id);
2047 printk("\n");
2048 }
2049}
2050
2051static void print_collision(struct task_struct *curr,
2052 struct held_lock *hlock_next,
2053 struct lock_chain *chain)
2054{
2055 printk("\n");
2056 printk("======================\n");
2057 printk("[chain_key collision ]\n");
2058 print_kernel_ident();
2059 printk("----------------------\n");
2060 printk("%s/%d: ", current->comm, task_pid_nr(current));
2061 printk("Hash chain already cached but the contents don't match!\n");
2062
2063 printk("Held locks:");
2064 print_chain_keys_held_locks(curr, hlock_next);
2065
2066 printk("Locks in cached chain:");
2067 print_chain_keys_chain(chain);
2068
2069 printk("\nstack backtrace:\n");
2070 dump_stack();
2071}
2072
2073/*
2003 * Checks whether the chain and the current held locks are consistent 2074 * Checks whether the chain and the current held locks are consistent
2004 * in depth and also in content. If they are not it most likely means 2075 * in depth and also in content. If they are not it most likely means
2005 * that there was a collision during the calculation of the chain_key. 2076 * that there was a collision during the calculation of the chain_key.
@@ -2014,14 +2085,18 @@ static int check_no_collision(struct task_struct *curr,
2014 2085
2015 i = get_first_held_lock(curr, hlock); 2086 i = get_first_held_lock(curr, hlock);
2016 2087
2017 if (DEBUG_LOCKS_WARN_ON(chain->depth != curr->lockdep_depth - (i - 1))) 2088 if (DEBUG_LOCKS_WARN_ON(chain->depth != curr->lockdep_depth - (i - 1))) {
2089 print_collision(curr, hlock, chain);
2018 return 0; 2090 return 0;
2091 }
2019 2092
2020 for (j = 0; j < chain->depth - 1; j++, i++) { 2093 for (j = 0; j < chain->depth - 1; j++, i++) {
2021 id = curr->held_locks[i].class_idx - 1; 2094 id = curr->held_locks[i].class_idx - 1;
2022 2095
2023 if (DEBUG_LOCKS_WARN_ON(chain_hlocks[chain->base + j] != id)) 2096 if (DEBUG_LOCKS_WARN_ON(chain_hlocks[chain->base + j] != id)) {
2097 print_collision(curr, hlock, chain);
2024 return 0; 2098 return 0;
2099 }
2025 } 2100 }
2026#endif 2101#endif
2027 return 1; 2102 return 1;
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index d8465eeab8b3..8b489fcac37b 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -321,6 +321,24 @@ static inline void init_hrtick(void)
321} 321}
322#endif /* CONFIG_SCHED_HRTICK */ 322#endif /* CONFIG_SCHED_HRTICK */
323 323
324/*
325 * cmpxchg based fetch_or, macro so it works for different integer types
326 */
327#define fetch_or(ptr, mask) \
328 ({ \
329 typeof(ptr) _ptr = (ptr); \
330 typeof(mask) _mask = (mask); \
331 typeof(*_ptr) _old, _val = *_ptr; \
332 \
333 for (;;) { \
334 _old = cmpxchg(_ptr, _val, _val | _mask); \
335 if (_old == _val) \
336 break; \
337 _val = _old; \
338 } \
339 _old; \
340})
341
324#if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG) 342#if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG)
325/* 343/*
326 * Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG, 344 * Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG,
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 084b79f5917e..58e3310c9b21 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -157,52 +157,50 @@ static void tick_sched_handle(struct tick_sched *ts, struct pt_regs *regs)
157cpumask_var_t tick_nohz_full_mask; 157cpumask_var_t tick_nohz_full_mask;
158cpumask_var_t housekeeping_mask; 158cpumask_var_t housekeeping_mask;
159bool tick_nohz_full_running; 159bool tick_nohz_full_running;
160static unsigned long tick_dep_mask; 160static atomic_t tick_dep_mask;
161 161
162static void trace_tick_dependency(unsigned long dep) 162static bool check_tick_dependency(atomic_t *dep)
163{ 163{
164 if (dep & TICK_DEP_MASK_POSIX_TIMER) { 164 int val = atomic_read(dep);
165
166 if (val & TICK_DEP_MASK_POSIX_TIMER) {
165 trace_tick_stop(0, TICK_DEP_MASK_POSIX_TIMER); 167 trace_tick_stop(0, TICK_DEP_MASK_POSIX_TIMER);
166 return; 168 return true;
167 } 169 }
168 170
169 if (dep & TICK_DEP_MASK_PERF_EVENTS) { 171 if (val & TICK_DEP_MASK_PERF_EVENTS) {
170 trace_tick_stop(0, TICK_DEP_MASK_PERF_EVENTS); 172 trace_tick_stop(0, TICK_DEP_MASK_PERF_EVENTS);
171 return; 173 return true;
172 } 174 }
173 175
174 if (dep & TICK_DEP_MASK_SCHED) { 176 if (val & TICK_DEP_MASK_SCHED) {
175 trace_tick_stop(0, TICK_DEP_MASK_SCHED); 177 trace_tick_stop(0, TICK_DEP_MASK_SCHED);
176 return; 178 return true;
177 } 179 }
178 180
179 if (dep & TICK_DEP_MASK_CLOCK_UNSTABLE) 181 if (val & TICK_DEP_MASK_CLOCK_UNSTABLE) {
180 trace_tick_stop(0, TICK_DEP_MASK_CLOCK_UNSTABLE); 182 trace_tick_stop(0, TICK_DEP_MASK_CLOCK_UNSTABLE);
183 return true;
184 }
185
186 return false;
181} 187}
182 188
183static bool can_stop_full_tick(struct tick_sched *ts) 189static bool can_stop_full_tick(struct tick_sched *ts)
184{ 190{
185 WARN_ON_ONCE(!irqs_disabled()); 191 WARN_ON_ONCE(!irqs_disabled());
186 192
187 if (tick_dep_mask) { 193 if (check_tick_dependency(&tick_dep_mask))
188 trace_tick_dependency(tick_dep_mask);
189 return false; 194 return false;
190 }
191 195
192 if (ts->tick_dep_mask) { 196 if (check_tick_dependency(&ts->tick_dep_mask))
193 trace_tick_dependency(ts->tick_dep_mask);
194 return false; 197 return false;
195 }
196 198
197 if (current->tick_dep_mask) { 199 if (check_tick_dependency(&current->tick_dep_mask))
198 trace_tick_dependency(current->tick_dep_mask);
199 return false; 200 return false;
200 }
201 201
202 if (current->signal->tick_dep_mask) { 202 if (check_tick_dependency(&current->signal->tick_dep_mask))
203 trace_tick_dependency(current->signal->tick_dep_mask);
204 return false; 203 return false;
205 }
206 204
207 return true; 205 return true;
208} 206}
@@ -259,12 +257,12 @@ static void tick_nohz_full_kick_all(void)
259 preempt_enable(); 257 preempt_enable();
260} 258}
261 259
262static void tick_nohz_dep_set_all(unsigned long *dep, 260static void tick_nohz_dep_set_all(atomic_t *dep,
263 enum tick_dep_bits bit) 261 enum tick_dep_bits bit)
264{ 262{
265 unsigned long prev; 263 int prev;
266 264
267 prev = fetch_or(dep, BIT_MASK(bit)); 265 prev = atomic_fetch_or(dep, BIT(bit));
268 if (!prev) 266 if (!prev)
269 tick_nohz_full_kick_all(); 267 tick_nohz_full_kick_all();
270} 268}
@@ -280,7 +278,7 @@ void tick_nohz_dep_set(enum tick_dep_bits bit)
280 278
281void tick_nohz_dep_clear(enum tick_dep_bits bit) 279void tick_nohz_dep_clear(enum tick_dep_bits bit)
282{ 280{
283 clear_bit(bit, &tick_dep_mask); 281 atomic_andnot(BIT(bit), &tick_dep_mask);
284} 282}
285 283
286/* 284/*
@@ -289,12 +287,12 @@ void tick_nohz_dep_clear(enum tick_dep_bits bit)
289 */ 287 */
290void tick_nohz_dep_set_cpu(int cpu, enum tick_dep_bits bit) 288void tick_nohz_dep_set_cpu(int cpu, enum tick_dep_bits bit)
291{ 289{
292 unsigned long prev; 290 int prev;
293 struct tick_sched *ts; 291 struct tick_sched *ts;
294 292
295 ts = per_cpu_ptr(&tick_cpu_sched, cpu); 293 ts = per_cpu_ptr(&tick_cpu_sched, cpu);
296 294
297 prev = fetch_or(&ts->tick_dep_mask, BIT_MASK(bit)); 295 prev = atomic_fetch_or(&ts->tick_dep_mask, BIT(bit));
298 if (!prev) { 296 if (!prev) {
299 preempt_disable(); 297 preempt_disable();
300 /* Perf needs local kick that is NMI safe */ 298 /* Perf needs local kick that is NMI safe */
@@ -313,7 +311,7 @@ void tick_nohz_dep_clear_cpu(int cpu, enum tick_dep_bits bit)
313{ 311{
314 struct tick_sched *ts = per_cpu_ptr(&tick_cpu_sched, cpu); 312 struct tick_sched *ts = per_cpu_ptr(&tick_cpu_sched, cpu);
315 313
316 clear_bit(bit, &ts->tick_dep_mask); 314 atomic_andnot(BIT(bit), &ts->tick_dep_mask);
317} 315}
318 316
319/* 317/*
@@ -331,7 +329,7 @@ void tick_nohz_dep_set_task(struct task_struct *tsk, enum tick_dep_bits bit)
331 329
332void tick_nohz_dep_clear_task(struct task_struct *tsk, enum tick_dep_bits bit) 330void tick_nohz_dep_clear_task(struct task_struct *tsk, enum tick_dep_bits bit)
333{ 331{
334 clear_bit(bit, &tsk->tick_dep_mask); 332 atomic_andnot(BIT(bit), &tsk->tick_dep_mask);
335} 333}
336 334
337/* 335/*
@@ -345,7 +343,7 @@ void tick_nohz_dep_set_signal(struct signal_struct *sig, enum tick_dep_bits bit)
345 343
346void tick_nohz_dep_clear_signal(struct signal_struct *sig, enum tick_dep_bits bit) 344void tick_nohz_dep_clear_signal(struct signal_struct *sig, enum tick_dep_bits bit)
347{ 345{
348 clear_bit(bit, &sig->tick_dep_mask); 346 atomic_andnot(BIT(bit), &sig->tick_dep_mask);
349} 347}
350 348
351/* 349/*
@@ -366,7 +364,8 @@ void __tick_nohz_task_switch(void)
366 ts = this_cpu_ptr(&tick_cpu_sched); 364 ts = this_cpu_ptr(&tick_cpu_sched);
367 365
368 if (ts->tick_stopped) { 366 if (ts->tick_stopped) {
369 if (current->tick_dep_mask || current->signal->tick_dep_mask) 367 if (atomic_read(&current->tick_dep_mask) ||
368 atomic_read(&current->signal->tick_dep_mask))
370 tick_nohz_full_kick(); 369 tick_nohz_full_kick();
371 } 370 }
372out: 371out:
diff --git a/kernel/time/tick-sched.h b/kernel/time/tick-sched.h
index eb4e32566a83..bf38226e5c17 100644
--- a/kernel/time/tick-sched.h
+++ b/kernel/time/tick-sched.h
@@ -60,7 +60,7 @@ struct tick_sched {
60 u64 next_timer; 60 u64 next_timer;
61 ktime_t idle_expires; 61 ktime_t idle_expires;
62 int do_timer_last; 62 int do_timer_last;
63 unsigned long tick_dep_mask; 63 atomic_t tick_dep_mask;
64}; 64};
65 65
66extern struct tick_sched *tick_get_tick_sched(int cpu); 66extern struct tick_sched *tick_get_tick_sched(int cpu);