aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2018-05-11 20:53:22 -0400
committerDavid S. Miller <davem@davemloft.net>2018-05-11 20:53:22 -0400
commitb2d6cee117f708d493c020f9f355297321507be7 (patch)
tree2c6975b47034de78fc899b4191260bb2704efc0f /kernel
parentb753a9faaf9aef1338c28ebd9ace6d749428788b (diff)
parent4bc871984f7cb5b2dec3ae64b570cb02f9ce2227 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
The bpf syscall and selftests conflicts were trivial overlapping changes. The r8169 change involved moving the added mdelay from 'net' into a different function. A TLS close bug fix overlapped with the splitting of the TLS state into separate TX and RX parts. I just expanded the tests in the bug fix from "ctx->conf == X" into "ctx->tx_conf == X && ctx->rx_conf == X". Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/bpf/syscall.c19
-rw-r--r--kernel/compat.c1
-rw-r--r--kernel/sched/cpufreq_schedutil.c16
-rw-r--r--kernel/time/clocksource.c63
-rw-r--r--kernel/trace/ftrace.c4
-rw-r--r--kernel/trace/trace_events_filter.c3
-rw-r--r--kernel/trace/trace_stack.c2
7 files changed, 67 insertions, 41 deletions
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 9b87198deea2..c286e75ec087 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -28,6 +28,7 @@
28#include <linux/timekeeping.h> 28#include <linux/timekeeping.h>
29#include <linux/ctype.h> 29#include <linux/ctype.h>
30#include <linux/btf.h> 30#include <linux/btf.h>
31#include <linux/nospec.h>
31 32
32#define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PROG_ARRAY || \ 33#define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PROG_ARRAY || \
33 (map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \ 34 (map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \
@@ -104,12 +105,14 @@ const struct bpf_map_ops bpf_map_offload_ops = {
104static struct bpf_map *find_and_alloc_map(union bpf_attr *attr) 105static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
105{ 106{
106 const struct bpf_map_ops *ops; 107 const struct bpf_map_ops *ops;
108 u32 type = attr->map_type;
107 struct bpf_map *map; 109 struct bpf_map *map;
108 int err; 110 int err;
109 111
110 if (attr->map_type >= ARRAY_SIZE(bpf_map_types)) 112 if (type >= ARRAY_SIZE(bpf_map_types))
111 return ERR_PTR(-EINVAL); 113 return ERR_PTR(-EINVAL);
112 ops = bpf_map_types[attr->map_type]; 114 type = array_index_nospec(type, ARRAY_SIZE(bpf_map_types));
115 ops = bpf_map_types[type];
113 if (!ops) 116 if (!ops)
114 return ERR_PTR(-EINVAL); 117 return ERR_PTR(-EINVAL);
115 118
@@ -124,7 +127,7 @@ static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
124 if (IS_ERR(map)) 127 if (IS_ERR(map))
125 return map; 128 return map;
126 map->ops = ops; 129 map->ops = ops;
127 map->map_type = attr->map_type; 130 map->map_type = type;
128 return map; 131 return map;
129} 132}
130 133
@@ -897,11 +900,17 @@ static const struct bpf_prog_ops * const bpf_prog_types[] = {
897 900
898static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog) 901static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog)
899{ 902{
900 if (type >= ARRAY_SIZE(bpf_prog_types) || !bpf_prog_types[type]) 903 const struct bpf_prog_ops *ops;
904
905 if (type >= ARRAY_SIZE(bpf_prog_types))
906 return -EINVAL;
907 type = array_index_nospec(type, ARRAY_SIZE(bpf_prog_types));
908 ops = bpf_prog_types[type];
909 if (!ops)
901 return -EINVAL; 910 return -EINVAL;
902 911
903 if (!bpf_prog_is_dev_bound(prog->aux)) 912 if (!bpf_prog_is_dev_bound(prog->aux))
904 prog->aux->ops = bpf_prog_types[type]; 913 prog->aux->ops = ops;
905 else 914 else
906 prog->aux->ops = &bpf_offload_prog_ops; 915 prog->aux->ops = &bpf_offload_prog_ops;
907 prog->type = type; 916 prog->type = type;
diff --git a/kernel/compat.c b/kernel/compat.c
index 6d21894806b4..92d8c98c0f57 100644
--- a/kernel/compat.c
+++ b/kernel/compat.c
@@ -34,6 +34,7 @@ int compat_get_timex(struct timex *txc, const struct compat_timex __user *utp)
34{ 34{
35 struct compat_timex tx32; 35 struct compat_timex tx32;
36 36
37 memset(txc, 0, sizeof(struct timex));
37 if (copy_from_user(&tx32, utp, sizeof(struct compat_timex))) 38 if (copy_from_user(&tx32, utp, sizeof(struct compat_timex)))
38 return -EFAULT; 39 return -EFAULT;
39 40
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index d2c6083304b4..e13df951aca7 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -305,7 +305,8 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
305 * Do not reduce the frequency if the CPU has not been idle 305 * Do not reduce the frequency if the CPU has not been idle
306 * recently, as the reduction is likely to be premature then. 306 * recently, as the reduction is likely to be premature then.
307 */ 307 */
308 if (busy && next_f < sg_policy->next_freq) { 308 if (busy && next_f < sg_policy->next_freq &&
309 sg_policy->next_freq != UINT_MAX) {
309 next_f = sg_policy->next_freq; 310 next_f = sg_policy->next_freq;
310 311
311 /* Reset cached freq as next_freq has changed */ 312 /* Reset cached freq as next_freq has changed */
@@ -396,19 +397,6 @@ static void sugov_irq_work(struct irq_work *irq_work)
396 397
397 sg_policy = container_of(irq_work, struct sugov_policy, irq_work); 398 sg_policy = container_of(irq_work, struct sugov_policy, irq_work);
398 399
399 /*
400 * For RT tasks, the schedutil governor shoots the frequency to maximum.
401 * Special care must be taken to ensure that this kthread doesn't result
402 * in the same behavior.
403 *
404 * This is (mostly) guaranteed by the work_in_progress flag. The flag is
405 * updated only at the end of the sugov_work() function and before that
406 * the schedutil governor rejects all other frequency scaling requests.
407 *
408 * There is a very rare case though, where the RT thread yields right
409 * after the work_in_progress flag is cleared. The effects of that are
410 * neglected for now.
411 */
412 kthread_queue_work(&sg_policy->worker, &sg_policy->work); 400 kthread_queue_work(&sg_policy->worker, &sg_policy->work);
413} 401}
414 402
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index 0e974cface0b..84f37420fcf5 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -119,6 +119,16 @@ static DEFINE_SPINLOCK(watchdog_lock);
119static int watchdog_running; 119static int watchdog_running;
120static atomic_t watchdog_reset_pending; 120static atomic_t watchdog_reset_pending;
121 121
122static void inline clocksource_watchdog_lock(unsigned long *flags)
123{
124 spin_lock_irqsave(&watchdog_lock, *flags);
125}
126
127static void inline clocksource_watchdog_unlock(unsigned long *flags)
128{
129 spin_unlock_irqrestore(&watchdog_lock, *flags);
130}
131
122static int clocksource_watchdog_kthread(void *data); 132static int clocksource_watchdog_kthread(void *data);
123static void __clocksource_change_rating(struct clocksource *cs, int rating); 133static void __clocksource_change_rating(struct clocksource *cs, int rating);
124 134
@@ -142,9 +152,19 @@ static void __clocksource_unstable(struct clocksource *cs)
142 cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG); 152 cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG);
143 cs->flags |= CLOCK_SOURCE_UNSTABLE; 153 cs->flags |= CLOCK_SOURCE_UNSTABLE;
144 154
155 /*
156 * If the clocksource is registered clocksource_watchdog_kthread() will
157 * re-rate and re-select.
158 */
159 if (list_empty(&cs->list)) {
160 cs->rating = 0;
161 return;
162 }
163
145 if (cs->mark_unstable) 164 if (cs->mark_unstable)
146 cs->mark_unstable(cs); 165 cs->mark_unstable(cs);
147 166
167 /* kick clocksource_watchdog_kthread() */
148 if (finished_booting) 168 if (finished_booting)
149 schedule_work(&watchdog_work); 169 schedule_work(&watchdog_work);
150} 170}
@@ -153,10 +173,8 @@ static void __clocksource_unstable(struct clocksource *cs)
153 * clocksource_mark_unstable - mark clocksource unstable via watchdog 173 * clocksource_mark_unstable - mark clocksource unstable via watchdog
154 * @cs: clocksource to be marked unstable 174 * @cs: clocksource to be marked unstable
155 * 175 *
156 * This function is called instead of clocksource_change_rating from 176 * This function is called by the x86 TSC code to mark clocksources as unstable;
157 * cpu hotplug code to avoid a deadlock between the clocksource mutex 177 * it defers demotion and re-selection to a kthread.
158 * and the cpu hotplug mutex. It defers the update of the clocksource
159 * to the watchdog thread.
160 */ 178 */
161void clocksource_mark_unstable(struct clocksource *cs) 179void clocksource_mark_unstable(struct clocksource *cs)
162{ 180{
@@ -164,7 +182,7 @@ void clocksource_mark_unstable(struct clocksource *cs)
164 182
165 spin_lock_irqsave(&watchdog_lock, flags); 183 spin_lock_irqsave(&watchdog_lock, flags);
166 if (!(cs->flags & CLOCK_SOURCE_UNSTABLE)) { 184 if (!(cs->flags & CLOCK_SOURCE_UNSTABLE)) {
167 if (list_empty(&cs->wd_list)) 185 if (!list_empty(&cs->list) && list_empty(&cs->wd_list))
168 list_add(&cs->wd_list, &watchdog_list); 186 list_add(&cs->wd_list, &watchdog_list);
169 __clocksource_unstable(cs); 187 __clocksource_unstable(cs);
170 } 188 }
@@ -319,9 +337,8 @@ static void clocksource_resume_watchdog(void)
319 337
320static void clocksource_enqueue_watchdog(struct clocksource *cs) 338static void clocksource_enqueue_watchdog(struct clocksource *cs)
321{ 339{
322 unsigned long flags; 340 INIT_LIST_HEAD(&cs->wd_list);
323 341
324 spin_lock_irqsave(&watchdog_lock, flags);
325 if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) { 342 if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) {
326 /* cs is a clocksource to be watched. */ 343 /* cs is a clocksource to be watched. */
327 list_add(&cs->wd_list, &watchdog_list); 344 list_add(&cs->wd_list, &watchdog_list);
@@ -331,7 +348,6 @@ static void clocksource_enqueue_watchdog(struct clocksource *cs)
331 if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) 348 if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS)
332 cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; 349 cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
333 } 350 }
334 spin_unlock_irqrestore(&watchdog_lock, flags);
335} 351}
336 352
337static void clocksource_select_watchdog(bool fallback) 353static void clocksource_select_watchdog(bool fallback)
@@ -373,9 +389,6 @@ static void clocksource_select_watchdog(bool fallback)
373 389
374static void clocksource_dequeue_watchdog(struct clocksource *cs) 390static void clocksource_dequeue_watchdog(struct clocksource *cs)
375{ 391{
376 unsigned long flags;
377
378 spin_lock_irqsave(&watchdog_lock, flags);
379 if (cs != watchdog) { 392 if (cs != watchdog) {
380 if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) { 393 if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) {
381 /* cs is a watched clocksource. */ 394 /* cs is a watched clocksource. */
@@ -384,21 +397,19 @@ static void clocksource_dequeue_watchdog(struct clocksource *cs)
384 clocksource_stop_watchdog(); 397 clocksource_stop_watchdog();
385 } 398 }
386 } 399 }
387 spin_unlock_irqrestore(&watchdog_lock, flags);
388} 400}
389 401
390static int __clocksource_watchdog_kthread(void) 402static int __clocksource_watchdog_kthread(void)
391{ 403{
392 struct clocksource *cs, *tmp; 404 struct clocksource *cs, *tmp;
393 unsigned long flags; 405 unsigned long flags;
394 LIST_HEAD(unstable);
395 int select = 0; 406 int select = 0;
396 407
397 spin_lock_irqsave(&watchdog_lock, flags); 408 spin_lock_irqsave(&watchdog_lock, flags);
398 list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) { 409 list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) {
399 if (cs->flags & CLOCK_SOURCE_UNSTABLE) { 410 if (cs->flags & CLOCK_SOURCE_UNSTABLE) {
400 list_del_init(&cs->wd_list); 411 list_del_init(&cs->wd_list);
401 list_add(&cs->wd_list, &unstable); 412 __clocksource_change_rating(cs, 0);
402 select = 1; 413 select = 1;
403 } 414 }
404 if (cs->flags & CLOCK_SOURCE_RESELECT) { 415 if (cs->flags & CLOCK_SOURCE_RESELECT) {
@@ -410,11 +421,6 @@ static int __clocksource_watchdog_kthread(void)
410 clocksource_stop_watchdog(); 421 clocksource_stop_watchdog();
411 spin_unlock_irqrestore(&watchdog_lock, flags); 422 spin_unlock_irqrestore(&watchdog_lock, flags);
412 423
413 /* Needs to be done outside of watchdog lock */
414 list_for_each_entry_safe(cs, tmp, &unstable, wd_list) {
415 list_del_init(&cs->wd_list);
416 __clocksource_change_rating(cs, 0);
417 }
418 return select; 424 return select;
419} 425}
420 426
@@ -447,6 +453,9 @@ static inline int __clocksource_watchdog_kthread(void) { return 0; }
447static bool clocksource_is_watchdog(struct clocksource *cs) { return false; } 453static bool clocksource_is_watchdog(struct clocksource *cs) { return false; }
448void clocksource_mark_unstable(struct clocksource *cs) { } 454void clocksource_mark_unstable(struct clocksource *cs) { }
449 455
456static void inline clocksource_watchdog_lock(unsigned long *flags) { }
457static void inline clocksource_watchdog_unlock(unsigned long *flags) { }
458
450#endif /* CONFIG_CLOCKSOURCE_WATCHDOG */ 459#endif /* CONFIG_CLOCKSOURCE_WATCHDOG */
451 460
452/** 461/**
@@ -779,14 +788,19 @@ EXPORT_SYMBOL_GPL(__clocksource_update_freq_scale);
779 */ 788 */
780int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq) 789int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq)
781{ 790{
791 unsigned long flags;
782 792
783 /* Initialize mult/shift and max_idle_ns */ 793 /* Initialize mult/shift and max_idle_ns */
784 __clocksource_update_freq_scale(cs, scale, freq); 794 __clocksource_update_freq_scale(cs, scale, freq);
785 795
786 /* Add clocksource to the clocksource list */ 796 /* Add clocksource to the clocksource list */
787 mutex_lock(&clocksource_mutex); 797 mutex_lock(&clocksource_mutex);
798
799 clocksource_watchdog_lock(&flags);
788 clocksource_enqueue(cs); 800 clocksource_enqueue(cs);
789 clocksource_enqueue_watchdog(cs); 801 clocksource_enqueue_watchdog(cs);
802 clocksource_watchdog_unlock(&flags);
803
790 clocksource_select(); 804 clocksource_select();
791 clocksource_select_watchdog(false); 805 clocksource_select_watchdog(false);
792 mutex_unlock(&clocksource_mutex); 806 mutex_unlock(&clocksource_mutex);
@@ -808,8 +822,13 @@ static void __clocksource_change_rating(struct clocksource *cs, int rating)
808 */ 822 */
809void clocksource_change_rating(struct clocksource *cs, int rating) 823void clocksource_change_rating(struct clocksource *cs, int rating)
810{ 824{
825 unsigned long flags;
826
811 mutex_lock(&clocksource_mutex); 827 mutex_lock(&clocksource_mutex);
828 clocksource_watchdog_lock(&flags);
812 __clocksource_change_rating(cs, rating); 829 __clocksource_change_rating(cs, rating);
830 clocksource_watchdog_unlock(&flags);
831
813 clocksource_select(); 832 clocksource_select();
814 clocksource_select_watchdog(false); 833 clocksource_select_watchdog(false);
815 mutex_unlock(&clocksource_mutex); 834 mutex_unlock(&clocksource_mutex);
@@ -821,6 +840,8 @@ EXPORT_SYMBOL(clocksource_change_rating);
821 */ 840 */
822static int clocksource_unbind(struct clocksource *cs) 841static int clocksource_unbind(struct clocksource *cs)
823{ 842{
843 unsigned long flags;
844
824 if (clocksource_is_watchdog(cs)) { 845 if (clocksource_is_watchdog(cs)) {
825 /* Select and try to install a replacement watchdog. */ 846 /* Select and try to install a replacement watchdog. */
826 clocksource_select_watchdog(true); 847 clocksource_select_watchdog(true);
@@ -834,8 +855,12 @@ static int clocksource_unbind(struct clocksource *cs)
834 if (curr_clocksource == cs) 855 if (curr_clocksource == cs)
835 return -EBUSY; 856 return -EBUSY;
836 } 857 }
858
859 clocksource_watchdog_lock(&flags);
837 clocksource_dequeue_watchdog(cs); 860 clocksource_dequeue_watchdog(cs);
838 list_del_init(&cs->list); 861 list_del_init(&cs->list);
862 clocksource_watchdog_unlock(&flags);
863
839 return 0; 864 return 0;
840} 865}
841 866
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 16bbf062018f..8d83bcf9ef69 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -5514,10 +5514,10 @@ static __init int ftrace_init_dyn_tracefs(struct dentry *d_tracer)
5514 ftrace_create_filter_files(&global_ops, d_tracer); 5514 ftrace_create_filter_files(&global_ops, d_tracer);
5515 5515
5516#ifdef CONFIG_FUNCTION_GRAPH_TRACER 5516#ifdef CONFIG_FUNCTION_GRAPH_TRACER
5517 trace_create_file("set_graph_function", 0444, d_tracer, 5517 trace_create_file("set_graph_function", 0644, d_tracer,
5518 NULL, 5518 NULL,
5519 &ftrace_graph_fops); 5519 &ftrace_graph_fops);
5520 trace_create_file("set_graph_notrace", 0444, d_tracer, 5520 trace_create_file("set_graph_notrace", 0644, d_tracer,
5521 NULL, 5521 NULL,
5522 &ftrace_graph_notrace_fops); 5522 &ftrace_graph_notrace_fops);
5523#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 5523#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index 1f951b3df60c..7d306b74230f 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -762,6 +762,9 @@ static int regex_match_full(char *str, struct regex *r, int len)
762 762
763static int regex_match_front(char *str, struct regex *r, int len) 763static int regex_match_front(char *str, struct regex *r, int len)
764{ 764{
765 if (len < r->len)
766 return 0;
767
765 if (strncmp(str, r->pattern, r->len) == 0) 768 if (strncmp(str, r->pattern, r->len) == 0)
766 return 1; 769 return 1;
767 return 0; 770 return 0;
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
index 3c7bfc4bf5e9..4237eba4ef20 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -472,7 +472,7 @@ static __init int stack_trace_init(void)
472 NULL, &stack_trace_fops); 472 NULL, &stack_trace_fops);
473 473
474#ifdef CONFIG_DYNAMIC_FTRACE 474#ifdef CONFIG_DYNAMIC_FTRACE
475 trace_create_file("stack_trace_filter", 0444, d_tracer, 475 trace_create_file("stack_trace_filter", 0644, d_tracer,
476 &trace_ops, &stack_trace_filter_fops); 476 &trace_ops, &stack_trace_filter_fops);
477#endif 477#endif
478 478