aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/bpf/core.c3
-rw-r--r--kernel/bpf/offload.c15
-rw-r--r--kernel/cpu.c14
-rw-r--r--kernel/debug/kdb/kdb_io.c2
-rw-r--r--kernel/events/core.c5
-rw-r--r--kernel/irq/matrix.c4
-rw-r--r--kernel/kallsyms.c8
-rw-r--r--kernel/locking/lockdep.c3
-rw-r--r--kernel/module.c6
-rw-r--r--kernel/printk/printk.c3
-rw-r--r--kernel/sched/fair.c102
-rw-r--r--kernel/sched/wait.c2
-rw-r--r--kernel/trace/blktrace.c30
-rw-r--r--kernel/trace/bpf_trace.c8
14 files changed, 135 insertions, 70 deletions
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index b9f8686a84cf..86b50aa26ee8 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -1447,7 +1447,8 @@ int bpf_prog_array_length(struct bpf_prog_array __rcu *progs)
1447 rcu_read_lock(); 1447 rcu_read_lock();
1448 prog = rcu_dereference(progs)->progs; 1448 prog = rcu_dereference(progs)->progs;
1449 for (; *prog; prog++) 1449 for (; *prog; prog++)
1450 cnt++; 1450 if (*prog != &dummy_bpf_prog.prog)
1451 cnt++;
1451 rcu_read_unlock(); 1452 rcu_read_unlock();
1452 return cnt; 1453 return cnt;
1453} 1454}
diff --git a/kernel/bpf/offload.c b/kernel/bpf/offload.c
index 68ec884440b7..8455b89d1bbf 100644
--- a/kernel/bpf/offload.c
+++ b/kernel/bpf/offload.c
@@ -1,3 +1,18 @@
1/*
2 * Copyright (C) 2017 Netronome Systems, Inc.
3 *
4 * This software is licensed under the GNU General License Version 2,
5 * June 1991 as shown in the file COPYING in the top-level directory of this
6 * source tree.
7 *
8 * THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS"
9 * WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING,
10 * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
11 * FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE
12 * OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME
13 * THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
14 */
15
1#include <linux/bpf.h> 16#include <linux/bpf.h>
2#include <linux/bpf_verifier.h> 17#include <linux/bpf_verifier.h>
3#include <linux/bug.h> 18#include <linux/bug.h>
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 04892a82f6ac..41376c3ac93b 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -780,8 +780,8 @@ static int takedown_cpu(unsigned int cpu)
780 BUG_ON(cpu_online(cpu)); 780 BUG_ON(cpu_online(cpu));
781 781
782 /* 782 /*
783 * The CPUHP_AP_SCHED_MIGRATE_DYING callback will have removed all 783 * The teardown callback for CPUHP_AP_SCHED_STARTING will have removed
784 * runnable tasks from the cpu, there's only the idle task left now 784 * all runnable tasks from the CPU, there's only the idle task left now
785 * that the migration thread is done doing the stop_machine thing. 785 * that the migration thread is done doing the stop_machine thing.
786 * 786 *
787 * Wait for the stop thread to go away. 787 * Wait for the stop thread to go away.
@@ -1289,11 +1289,6 @@ static struct cpuhp_step cpuhp_bp_states[] = {
1289 .teardown.single = NULL, 1289 .teardown.single = NULL,
1290 .cant_stop = true, 1290 .cant_stop = true,
1291 }, 1291 },
1292 [CPUHP_AP_SMPCFD_DYING] = {
1293 .name = "smpcfd:dying",
1294 .startup.single = NULL,
1295 .teardown.single = smpcfd_dying_cpu,
1296 },
1297 /* 1292 /*
1298 * Handled on controll processor until the plugged processor manages 1293 * Handled on controll processor until the plugged processor manages
1299 * this itself. 1294 * this itself.
@@ -1335,6 +1330,11 @@ static struct cpuhp_step cpuhp_ap_states[] = {
1335 .startup.single = NULL, 1330 .startup.single = NULL,
1336 .teardown.single = rcutree_dying_cpu, 1331 .teardown.single = rcutree_dying_cpu,
1337 }, 1332 },
1333 [CPUHP_AP_SMPCFD_DYING] = {
1334 .name = "smpcfd:dying",
1335 .startup.single = NULL,
1336 .teardown.single = smpcfd_dying_cpu,
1337 },
1338 /* Entry state on starting. Interrupts enabled from here on. Transient 1338 /* Entry state on starting. Interrupts enabled from here on. Transient
1339 * state for synchronsization */ 1339 * state for synchronsization */
1340 [CPUHP_AP_ONLINE] = { 1340 [CPUHP_AP_ONLINE] = {
diff --git a/kernel/debug/kdb/kdb_io.c b/kernel/debug/kdb/kdb_io.c
index e74be38245ad..ed5d34925ad0 100644
--- a/kernel/debug/kdb/kdb_io.c
+++ b/kernel/debug/kdb/kdb_io.c
@@ -350,7 +350,7 @@ poll_again:
350 } 350 }
351 kdb_printf("\n"); 351 kdb_printf("\n");
352 for (i = 0; i < count; i++) { 352 for (i = 0; i < count; i++) {
353 if (kallsyms_symbol_next(p_tmp, i) < 0) 353 if (WARN_ON(!kallsyms_symbol_next(p_tmp, i)))
354 break; 354 break;
355 kdb_printf("%s ", p_tmp); 355 kdb_printf("%s ", p_tmp);
356 *(p_tmp + len) = '\0'; 356 *(p_tmp + len) = '\0';
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 16beab4767e1..5961ef6dfd64 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -6639,6 +6639,7 @@ static void perf_event_namespaces_output(struct perf_event *event,
6639 struct perf_namespaces_event *namespaces_event = data; 6639 struct perf_namespaces_event *namespaces_event = data;
6640 struct perf_output_handle handle; 6640 struct perf_output_handle handle;
6641 struct perf_sample_data sample; 6641 struct perf_sample_data sample;
6642 u16 header_size = namespaces_event->event_id.header.size;
6642 int ret; 6643 int ret;
6643 6644
6644 if (!perf_event_namespaces_match(event)) 6645 if (!perf_event_namespaces_match(event))
@@ -6649,7 +6650,7 @@ static void perf_event_namespaces_output(struct perf_event *event,
6649 ret = perf_output_begin(&handle, event, 6650 ret = perf_output_begin(&handle, event,
6650 namespaces_event->event_id.header.size); 6651 namespaces_event->event_id.header.size);
6651 if (ret) 6652 if (ret)
6652 return; 6653 goto out;
6653 6654
6654 namespaces_event->event_id.pid = perf_event_pid(event, 6655 namespaces_event->event_id.pid = perf_event_pid(event,
6655 namespaces_event->task); 6656 namespaces_event->task);
@@ -6661,6 +6662,8 @@ static void perf_event_namespaces_output(struct perf_event *event,
6661 perf_event__output_id_sample(event, &handle, &sample); 6662 perf_event__output_id_sample(event, &handle, &sample);
6662 6663
6663 perf_output_end(&handle); 6664 perf_output_end(&handle);
6665out:
6666 namespaces_event->event_id.header.size = header_size;
6664} 6667}
6665 6668
6666static void perf_fill_ns_link_info(struct perf_ns_link_info *ns_link_info, 6669static void perf_fill_ns_link_info(struct perf_ns_link_info *ns_link_info,
diff --git a/kernel/irq/matrix.c b/kernel/irq/matrix.c
index 7df2480005f8..0ba0dd8863a7 100644
--- a/kernel/irq/matrix.c
+++ b/kernel/irq/matrix.c
@@ -384,7 +384,9 @@ unsigned int irq_matrix_available(struct irq_matrix *m, bool cpudown)
384{ 384{
385 struct cpumap *cm = this_cpu_ptr(m->maps); 385 struct cpumap *cm = this_cpu_ptr(m->maps);
386 386
387 return (m->global_available - cpudown) ? cm->available : 0; 387 if (!cpudown)
388 return m->global_available;
389 return m->global_available - cm->available;
388} 390}
389 391
390/** 392/**
diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
index 531ffa984bc2..d5fa4116688a 100644
--- a/kernel/kallsyms.c
+++ b/kernel/kallsyms.c
@@ -614,14 +614,14 @@ static void s_stop(struct seq_file *m, void *p)
614 614
615static int s_show(struct seq_file *m, void *p) 615static int s_show(struct seq_file *m, void *p)
616{ 616{
617 unsigned long value; 617 void *value;
618 struct kallsym_iter *iter = m->private; 618 struct kallsym_iter *iter = m->private;
619 619
620 /* Some debugging symbols have no name. Ignore them. */ 620 /* Some debugging symbols have no name. Ignore them. */
621 if (!iter->name[0]) 621 if (!iter->name[0])
622 return 0; 622 return 0;
623 623
624 value = iter->show_value ? iter->value : 0; 624 value = iter->show_value ? (void *)iter->value : NULL;
625 625
626 if (iter->module_name[0]) { 626 if (iter->module_name[0]) {
627 char type; 627 char type;
@@ -632,10 +632,10 @@ static int s_show(struct seq_file *m, void *p)
632 */ 632 */
633 type = iter->exported ? toupper(iter->type) : 633 type = iter->exported ? toupper(iter->type) :
634 tolower(iter->type); 634 tolower(iter->type);
635 seq_printf(m, KALLSYM_FMT " %c %s\t[%s]\n", value, 635 seq_printf(m, "%px %c %s\t[%s]\n", value,
636 type, iter->name, iter->module_name); 636 type, iter->name, iter->module_name);
637 } else 637 } else
638 seq_printf(m, KALLSYM_FMT " %c %s\n", value, 638 seq_printf(m, "%px %c %s\n", value,
639 iter->type, iter->name); 639 iter->type, iter->name);
640 return 0; 640 return 0;
641} 641}
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 9776da8db180..670d8d7d8087 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -4790,7 +4790,8 @@ void lockdep_invariant_state(bool force)
4790 * Verify the former, enforce the latter. 4790 * Verify the former, enforce the latter.
4791 */ 4791 */
4792 WARN_ON_ONCE(!force && current->lockdep_depth); 4792 WARN_ON_ONCE(!force && current->lockdep_depth);
4793 invalidate_xhlock(&xhlock(current->xhlock_idx)); 4793 if (current->xhlocks)
4794 invalidate_xhlock(&xhlock(current->xhlock_idx));
4794} 4795}
4795 4796
4796static int cross_lock(struct lockdep_map *lock) 4797static int cross_lock(struct lockdep_map *lock)
diff --git a/kernel/module.c b/kernel/module.c
index f0411a271765..dea01ac9cb74 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -4157,7 +4157,7 @@ static int m_show(struct seq_file *m, void *p)
4157{ 4157{
4158 struct module *mod = list_entry(p, struct module, list); 4158 struct module *mod = list_entry(p, struct module, list);
4159 char buf[MODULE_FLAGS_BUF_SIZE]; 4159 char buf[MODULE_FLAGS_BUF_SIZE];
4160 unsigned long value; 4160 void *value;
4161 4161
4162 /* We always ignore unformed modules. */ 4162 /* We always ignore unformed modules. */
4163 if (mod->state == MODULE_STATE_UNFORMED) 4163 if (mod->state == MODULE_STATE_UNFORMED)
@@ -4173,8 +4173,8 @@ static int m_show(struct seq_file *m, void *p)
4173 mod->state == MODULE_STATE_COMING ? "Loading" : 4173 mod->state == MODULE_STATE_COMING ? "Loading" :
4174 "Live"); 4174 "Live");
4175 /* Used by oprofile and other similar tools. */ 4175 /* Used by oprofile and other similar tools. */
4176 value = m->private ? 0 : (unsigned long)mod->core_layout.base; 4176 value = m->private ? NULL : mod->core_layout.base;
4177 seq_printf(m, " 0x" KALLSYM_FMT, value); 4177 seq_printf(m, " 0x%px", value);
4178 4178
4179 /* Taints info */ 4179 /* Taints info */
4180 if (mod->taints) 4180 if (mod->taints)
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index 5d81206a572d..b9006617710f 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -3141,9 +3141,6 @@ void dump_stack_print_info(const char *log_lvl)
3141void show_regs_print_info(const char *log_lvl) 3141void show_regs_print_info(const char *log_lvl)
3142{ 3142{
3143 dump_stack_print_info(log_lvl); 3143 dump_stack_print_info(log_lvl);
3144
3145 printk("%stask: %p task.stack: %p\n",
3146 log_lvl, current, task_stack_page(current));
3147} 3144}
3148 3145
3149#endif 3146#endif
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 4037e19bbca2..2fe3aa853e4d 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3413,9 +3413,9 @@ void set_task_rq_fair(struct sched_entity *se,
3413 * _IFF_ we look at the pure running and runnable sums. Because they 3413 * _IFF_ we look at the pure running and runnable sums. Because they
3414 * represent the very same entity, just at different points in the hierarchy. 3414 * represent the very same entity, just at different points in the hierarchy.
3415 * 3415 *
3416 * 3416 * Per the above update_tg_cfs_util() is trivial and simply copies the running
3417 * Per the above update_tg_cfs_util() is trivial (and still 'wrong') and 3417 * sum over (but still wrong, because the group entity and group rq do not have
3418 * simply copies the running sum over. 3418 * their PELT windows aligned).
3419 * 3419 *
3420 * However, update_tg_cfs_runnable() is more complex. So we have: 3420 * However, update_tg_cfs_runnable() is more complex. So we have:
3421 * 3421 *
@@ -3424,11 +3424,11 @@ void set_task_rq_fair(struct sched_entity *se,
3424 * And since, like util, the runnable part should be directly transferable, 3424 * And since, like util, the runnable part should be directly transferable,
3425 * the following would _appear_ to be the straight forward approach: 3425 * the following would _appear_ to be the straight forward approach:
3426 * 3426 *
3427 * grq->avg.load_avg = grq->load.weight * grq->avg.running_avg (3) 3427 * grq->avg.load_avg = grq->load.weight * grq->avg.runnable_avg (3)
3428 * 3428 *
3429 * And per (1) we have: 3429 * And per (1) we have:
3430 * 3430 *
3431 * ge->avg.running_avg == grq->avg.running_avg 3431 * ge->avg.runnable_avg == grq->avg.runnable_avg
3432 * 3432 *
3433 * Which gives: 3433 * Which gives:
3434 * 3434 *
@@ -3447,27 +3447,28 @@ void set_task_rq_fair(struct sched_entity *se,
3447 * to (shortly) return to us. This only works by keeping the weights as 3447 * to (shortly) return to us. This only works by keeping the weights as
3448 * integral part of the sum. We therefore cannot decompose as per (3). 3448 * integral part of the sum. We therefore cannot decompose as per (3).
3449 * 3449 *
3450 * OK, so what then? 3450 * Another reason this doesn't work is that runnable isn't a 0-sum entity.
3451 * Imagine a rq with 2 tasks that each are runnable 2/3 of the time. Then the
3452 * rq itself is runnable anywhere between 2/3 and 1 depending on how the
3453 * runnable section of these tasks overlap (or not). If they were to perfectly
3454 * align the rq as a whole would be runnable 2/3 of the time. If however we
3455 * always have at least 1 runnable task, the rq as a whole is always runnable.
3451 * 3456 *
3457 * So we'll have to approximate.. :/
3452 * 3458 *
3453 * Another way to look at things is: 3459 * Given the constraint:
3454 * 3460 *
3455 * grq->avg.load_avg = \Sum se->avg.load_avg 3461 * ge->avg.running_sum <= ge->avg.runnable_sum <= LOAD_AVG_MAX
3456 * 3462 *
3457 * Therefore, per (2): 3463 * We can construct a rule that adds runnable to a rq by assuming minimal
3464 * overlap.
3458 * 3465 *
3459 * grq->avg.load_avg = \Sum se->load.weight * se->avg.runnable_avg 3466 * On removal, we'll assume each task is equally runnable; which yields:
3460 * 3467 *
3461 * And the very thing we're propagating is a change in that sum (someone 3468 * grq->avg.runnable_sum = grq->avg.load_sum / grq->load.weight
3462 * joined/left). So we can easily know the runnable change, which would be, per
3463 * (2) the already tracked se->load_avg divided by the corresponding
3464 * se->weight.
3465 * 3469 *
3466 * Basically (4) but in differential form: 3470 * XXX: only do this for the part of runnable > running ?
3467 * 3471 *
3468 * d(runnable_avg) += se->avg.load_avg / se->load.weight
3469 * (5)
3470 * ge->avg.load_avg += ge->load.weight * d(runnable_avg)
3471 */ 3472 */
3472 3473
3473static inline void 3474static inline void
@@ -3479,6 +3480,14 @@ update_tg_cfs_util(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq
3479 if (!delta) 3480 if (!delta)
3480 return; 3481 return;
3481 3482
3483 /*
3484 * The relation between sum and avg is:
3485 *
3486 * LOAD_AVG_MAX - 1024 + sa->period_contrib
3487 *
3488 * however, the PELT windows are not aligned between grq and gse.
3489 */
3490
3482 /* Set new sched_entity's utilization */ 3491 /* Set new sched_entity's utilization */
3483 se->avg.util_avg = gcfs_rq->avg.util_avg; 3492 se->avg.util_avg = gcfs_rq->avg.util_avg;
3484 se->avg.util_sum = se->avg.util_avg * LOAD_AVG_MAX; 3493 se->avg.util_sum = se->avg.util_avg * LOAD_AVG_MAX;
@@ -3491,33 +3500,68 @@ update_tg_cfs_util(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq
3491static inline void 3500static inline void
3492update_tg_cfs_runnable(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq) 3501update_tg_cfs_runnable(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq)
3493{ 3502{
3494 long runnable_sum = gcfs_rq->prop_runnable_sum; 3503 long delta_avg, running_sum, runnable_sum = gcfs_rq->prop_runnable_sum;
3495 long runnable_load_avg, load_avg; 3504 unsigned long runnable_load_avg, load_avg;
3496 s64 runnable_load_sum, load_sum; 3505 u64 runnable_load_sum, load_sum = 0;
3506 s64 delta_sum;
3497 3507
3498 if (!runnable_sum) 3508 if (!runnable_sum)
3499 return; 3509 return;
3500 3510
3501 gcfs_rq->prop_runnable_sum = 0; 3511 gcfs_rq->prop_runnable_sum = 0;
3502 3512
3513 if (runnable_sum >= 0) {
3514 /*
3515 * Add runnable; clip at LOAD_AVG_MAX. Reflects that until
3516 * the CPU is saturated running == runnable.
3517 */
3518 runnable_sum += se->avg.load_sum;
3519 runnable_sum = min(runnable_sum, (long)LOAD_AVG_MAX);
3520 } else {
3521 /*
3522 * Estimate the new unweighted runnable_sum of the gcfs_rq by
3523 * assuming all tasks are equally runnable.
3524 */
3525 if (scale_load_down(gcfs_rq->load.weight)) {
3526 load_sum = div_s64(gcfs_rq->avg.load_sum,
3527 scale_load_down(gcfs_rq->load.weight));
3528 }
3529
3530 /* But make sure to not inflate se's runnable */
3531 runnable_sum = min(se->avg.load_sum, load_sum);
3532 }
3533
3534 /*
3535 * runnable_sum can't be lower than running_sum
3536 * As running sum is scale with cpu capacity wehreas the runnable sum
3537 * is not we rescale running_sum 1st
3538 */
3539 running_sum = se->avg.util_sum /
3540 arch_scale_cpu_capacity(NULL, cpu_of(rq_of(cfs_rq)));
3541 runnable_sum = max(runnable_sum, running_sum);
3542
3503 load_sum = (s64)se_weight(se) * runnable_sum; 3543 load_sum = (s64)se_weight(se) * runnable_sum;
3504 load_avg = div_s64(load_sum, LOAD_AVG_MAX); 3544 load_avg = div_s64(load_sum, LOAD_AVG_MAX);
3505 3545
3506 add_positive(&se->avg.load_sum, runnable_sum); 3546 delta_sum = load_sum - (s64)se_weight(se) * se->avg.load_sum;
3507 add_positive(&se->avg.load_avg, load_avg); 3547 delta_avg = load_avg - se->avg.load_avg;
3508 3548
3509 add_positive(&cfs_rq->avg.load_avg, load_avg); 3549 se->avg.load_sum = runnable_sum;
3510 add_positive(&cfs_rq->avg.load_sum, load_sum); 3550 se->avg.load_avg = load_avg;
3551 add_positive(&cfs_rq->avg.load_avg, delta_avg);
3552 add_positive(&cfs_rq->avg.load_sum, delta_sum);
3511 3553
3512 runnable_load_sum = (s64)se_runnable(se) * runnable_sum; 3554 runnable_load_sum = (s64)se_runnable(se) * runnable_sum;
3513 runnable_load_avg = div_s64(runnable_load_sum, LOAD_AVG_MAX); 3555 runnable_load_avg = div_s64(runnable_load_sum, LOAD_AVG_MAX);
3556 delta_sum = runnable_load_sum - se_weight(se) * se->avg.runnable_load_sum;
3557 delta_avg = runnable_load_avg - se->avg.runnable_load_avg;
3514 3558
3515 add_positive(&se->avg.runnable_load_sum, runnable_sum); 3559 se->avg.runnable_load_sum = runnable_sum;
3516 add_positive(&se->avg.runnable_load_avg, runnable_load_avg); 3560 se->avg.runnable_load_avg = runnable_load_avg;
3517 3561
3518 if (se->on_rq) { 3562 if (se->on_rq) {
3519 add_positive(&cfs_rq->avg.runnable_load_avg, runnable_load_avg); 3563 add_positive(&cfs_rq->avg.runnable_load_avg, delta_avg);
3520 add_positive(&cfs_rq->avg.runnable_load_sum, runnable_load_sum); 3564 add_positive(&cfs_rq->avg.runnable_load_sum, delta_sum);
3521 } 3565 }
3522} 3566}
3523 3567
diff --git a/kernel/sched/wait.c b/kernel/sched/wait.c
index 98feab7933c7..929ecb7d6b78 100644
--- a/kernel/sched/wait.c
+++ b/kernel/sched/wait.c
@@ -27,7 +27,7 @@ void add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq
27 27
28 wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE; 28 wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE;
29 spin_lock_irqsave(&wq_head->lock, flags); 29 spin_lock_irqsave(&wq_head->lock, flags);
30 __add_wait_queue_entry_tail(wq_head, wq_entry); 30 __add_wait_queue(wq_head, wq_entry);
31 spin_unlock_irqrestore(&wq_head->lock, flags); 31 spin_unlock_irqrestore(&wq_head->lock, flags);
32} 32}
33EXPORT_SYMBOL(add_wait_queue); 33EXPORT_SYMBOL(add_wait_queue);
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index 206e0e2ace53..987d9a9ae283 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -591,7 +591,7 @@ static int __blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
591 return ret; 591 return ret;
592 592
593 if (copy_to_user(arg, &buts, sizeof(buts))) { 593 if (copy_to_user(arg, &buts, sizeof(buts))) {
594 blk_trace_remove(q); 594 __blk_trace_remove(q);
595 return -EFAULT; 595 return -EFAULT;
596 } 596 }
597 return 0; 597 return 0;
@@ -637,7 +637,7 @@ static int compat_blk_trace_setup(struct request_queue *q, char *name,
637 return ret; 637 return ret;
638 638
639 if (copy_to_user(arg, &buts.name, ARRAY_SIZE(buts.name))) { 639 if (copy_to_user(arg, &buts.name, ARRAY_SIZE(buts.name))) {
640 blk_trace_remove(q); 640 __blk_trace_remove(q);
641 return -EFAULT; 641 return -EFAULT;
642 } 642 }
643 643
@@ -872,7 +872,7 @@ static void blk_add_trace_rq_complete(void *ignore, struct request *rq,
872 * 872 *
873 **/ 873 **/
874static void blk_add_trace_bio(struct request_queue *q, struct bio *bio, 874static void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
875 u32 what, int error, union kernfs_node_id *cgid) 875 u32 what, int error)
876{ 876{
877 struct blk_trace *bt = q->blk_trace; 877 struct blk_trace *bt = q->blk_trace;
878 878
@@ -880,22 +880,21 @@ static void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
880 return; 880 return;
881 881
882 __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size, 882 __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
883 bio_op(bio), bio->bi_opf, what, error, 0, NULL, cgid); 883 bio_op(bio), bio->bi_opf, what, error, 0, NULL,
884 blk_trace_bio_get_cgid(q, bio));
884} 885}
885 886
886static void blk_add_trace_bio_bounce(void *ignore, 887static void blk_add_trace_bio_bounce(void *ignore,
887 struct request_queue *q, struct bio *bio) 888 struct request_queue *q, struct bio *bio)
888{ 889{
889 blk_add_trace_bio(q, bio, BLK_TA_BOUNCE, 0, 890 blk_add_trace_bio(q, bio, BLK_TA_BOUNCE, 0);
890 blk_trace_bio_get_cgid(q, bio));
891} 891}
892 892
893static void blk_add_trace_bio_complete(void *ignore, 893static void blk_add_trace_bio_complete(void *ignore,
894 struct request_queue *q, struct bio *bio, 894 struct request_queue *q, struct bio *bio,
895 int error) 895 int error)
896{ 896{
897 blk_add_trace_bio(q, bio, BLK_TA_COMPLETE, error, 897 blk_add_trace_bio(q, bio, BLK_TA_COMPLETE, error);
898 blk_trace_bio_get_cgid(q, bio));
899} 898}
900 899
901static void blk_add_trace_bio_backmerge(void *ignore, 900static void blk_add_trace_bio_backmerge(void *ignore,
@@ -903,8 +902,7 @@ static void blk_add_trace_bio_backmerge(void *ignore,
903 struct request *rq, 902 struct request *rq,
904 struct bio *bio) 903 struct bio *bio)
905{ 904{
906 blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE, 0, 905 blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE, 0);
907 blk_trace_bio_get_cgid(q, bio));
908} 906}
909 907
910static void blk_add_trace_bio_frontmerge(void *ignore, 908static void blk_add_trace_bio_frontmerge(void *ignore,
@@ -912,15 +910,13 @@ static void blk_add_trace_bio_frontmerge(void *ignore,
912 struct request *rq, 910 struct request *rq,
913 struct bio *bio) 911 struct bio *bio)
914{ 912{
915 blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE, 0, 913 blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE, 0);
916 blk_trace_bio_get_cgid(q, bio));
917} 914}
918 915
919static void blk_add_trace_bio_queue(void *ignore, 916static void blk_add_trace_bio_queue(void *ignore,
920 struct request_queue *q, struct bio *bio) 917 struct request_queue *q, struct bio *bio)
921{ 918{
922 blk_add_trace_bio(q, bio, BLK_TA_QUEUE, 0, 919 blk_add_trace_bio(q, bio, BLK_TA_QUEUE, 0);
923 blk_trace_bio_get_cgid(q, bio));
924} 920}
925 921
926static void blk_add_trace_getrq(void *ignore, 922static void blk_add_trace_getrq(void *ignore,
@@ -928,8 +924,7 @@ static void blk_add_trace_getrq(void *ignore,
928 struct bio *bio, int rw) 924 struct bio *bio, int rw)
929{ 925{
930 if (bio) 926 if (bio)
931 blk_add_trace_bio(q, bio, BLK_TA_GETRQ, 0, 927 blk_add_trace_bio(q, bio, BLK_TA_GETRQ, 0);
932 blk_trace_bio_get_cgid(q, bio));
933 else { 928 else {
934 struct blk_trace *bt = q->blk_trace; 929 struct blk_trace *bt = q->blk_trace;
935 930
@@ -945,8 +940,7 @@ static void blk_add_trace_sleeprq(void *ignore,
945 struct bio *bio, int rw) 940 struct bio *bio, int rw)
946{ 941{
947 if (bio) 942 if (bio)
948 blk_add_trace_bio(q, bio, BLK_TA_SLEEPRQ, 0, 943 blk_add_trace_bio(q, bio, BLK_TA_SLEEPRQ, 0);
949 blk_trace_bio_get_cgid(q, bio));
950 else { 944 else {
951 struct blk_trace *bt = q->blk_trace; 945 struct blk_trace *bt = q->blk_trace;
952 946
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index 27d1f4ffa3de..0ce99c379c30 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -759,6 +759,8 @@ const struct bpf_prog_ops perf_event_prog_ops = {
759 759
760static DEFINE_MUTEX(bpf_event_mutex); 760static DEFINE_MUTEX(bpf_event_mutex);
761 761
762#define BPF_TRACE_MAX_PROGS 64
763
762int perf_event_attach_bpf_prog(struct perf_event *event, 764int perf_event_attach_bpf_prog(struct perf_event *event,
763 struct bpf_prog *prog) 765 struct bpf_prog *prog)
764{ 766{
@@ -772,6 +774,12 @@ int perf_event_attach_bpf_prog(struct perf_event *event,
772 goto unlock; 774 goto unlock;
773 775
774 old_array = event->tp_event->prog_array; 776 old_array = event->tp_event->prog_array;
777 if (old_array &&
778 bpf_prog_array_length(old_array) >= BPF_TRACE_MAX_PROGS) {
779 ret = -E2BIG;
780 goto unlock;
781 }
782
775 ret = bpf_prog_array_copy(old_array, NULL, prog, &new_array); 783 ret = bpf_prog_array_copy(old_array, NULL, prog, &new_array);
776 if (ret < 0) 784 if (ret < 0)
777 goto unlock; 785 goto unlock;