diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/bpf/arraymap.c | 18 | ||||
-rw-r--r-- | kernel/bpf/hashtab.c | 22 | ||||
-rw-r--r-- | kernel/bpf/stackmap.c | 20 | ||||
-rw-r--r-- | kernel/bpf/syscall.c | 26 | ||||
-rw-r--r-- | kernel/cgroup.c | 13 | ||||
-rw-r--r-- | kernel/events/core.c | 94 | ||||
-rw-r--r-- | kernel/irq/irqdomain.c | 44 | ||||
-rw-r--r-- | kernel/module.c | 53 | ||||
-rw-r--r-- | kernel/power/suspend.c | 4 | ||||
-rw-r--r-- | kernel/stacktrace.c | 12 | ||||
-rw-r--r-- | kernel/time/tick-sched.c | 5 | ||||
-rw-r--r-- | kernel/trace/trace_hwlat.c | 8 | ||||
-rw-r--r-- | kernel/trace/trace_kprobe.c | 2 | ||||
-rw-r--r-- | kernel/ucount.c | 3 |
14 files changed, 189 insertions, 135 deletions
diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c index 229a5d5df977..3d55d95dcf49 100644 --- a/kernel/bpf/arraymap.c +++ b/kernel/bpf/arraymap.c | |||
@@ -11,7 +11,6 @@ | |||
11 | */ | 11 | */ |
12 | #include <linux/bpf.h> | 12 | #include <linux/bpf.h> |
13 | #include <linux/err.h> | 13 | #include <linux/err.h> |
14 | #include <linux/vmalloc.h> | ||
15 | #include <linux/slab.h> | 14 | #include <linux/slab.h> |
16 | #include <linux/mm.h> | 15 | #include <linux/mm.h> |
17 | #include <linux/filter.h> | 16 | #include <linux/filter.h> |
@@ -74,14 +73,10 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr) | |||
74 | if (array_size >= U32_MAX - PAGE_SIZE) | 73 | if (array_size >= U32_MAX - PAGE_SIZE) |
75 | return ERR_PTR(-ENOMEM); | 74 | return ERR_PTR(-ENOMEM); |
76 | 75 | ||
77 | |||
78 | /* allocate all map elements and zero-initialize them */ | 76 | /* allocate all map elements and zero-initialize them */ |
79 | array = kzalloc(array_size, GFP_USER | __GFP_NOWARN); | 77 | array = bpf_map_area_alloc(array_size); |
80 | if (!array) { | 78 | if (!array) |
81 | array = vzalloc(array_size); | 79 | return ERR_PTR(-ENOMEM); |
82 | if (!array) | ||
83 | return ERR_PTR(-ENOMEM); | ||
84 | } | ||
85 | 80 | ||
86 | /* copy mandatory map attributes */ | 81 | /* copy mandatory map attributes */ |
87 | array->map.map_type = attr->map_type; | 82 | array->map.map_type = attr->map_type; |
@@ -97,7 +92,7 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr) | |||
97 | 92 | ||
98 | if (array_size >= U32_MAX - PAGE_SIZE || | 93 | if (array_size >= U32_MAX - PAGE_SIZE || |
99 | elem_size > PCPU_MIN_UNIT_SIZE || bpf_array_alloc_percpu(array)) { | 94 | elem_size > PCPU_MIN_UNIT_SIZE || bpf_array_alloc_percpu(array)) { |
100 | kvfree(array); | 95 | bpf_map_area_free(array); |
101 | return ERR_PTR(-ENOMEM); | 96 | return ERR_PTR(-ENOMEM); |
102 | } | 97 | } |
103 | out: | 98 | out: |
@@ -262,7 +257,7 @@ static void array_map_free(struct bpf_map *map) | |||
262 | if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) | 257 | if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) |
263 | bpf_array_free_percpu(array); | 258 | bpf_array_free_percpu(array); |
264 | 259 | ||
265 | kvfree(array); | 260 | bpf_map_area_free(array); |
266 | } | 261 | } |
267 | 262 | ||
268 | static const struct bpf_map_ops array_ops = { | 263 | static const struct bpf_map_ops array_ops = { |
@@ -319,7 +314,8 @@ static void fd_array_map_free(struct bpf_map *map) | |||
319 | /* make sure it's empty */ | 314 | /* make sure it's empty */ |
320 | for (i = 0; i < array->map.max_entries; i++) | 315 | for (i = 0; i < array->map.max_entries; i++) |
321 | BUG_ON(array->ptrs[i] != NULL); | 316 | BUG_ON(array->ptrs[i] != NULL); |
322 | kvfree(array); | 317 | |
318 | bpf_map_area_free(array); | ||
323 | } | 319 | } |
324 | 320 | ||
325 | static void *fd_array_map_lookup_elem(struct bpf_map *map, void *key) | 321 | static void *fd_array_map_lookup_elem(struct bpf_map *map, void *key) |
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c index 3f2bb58952d8..a753bbe7df0a 100644 --- a/kernel/bpf/hashtab.c +++ b/kernel/bpf/hashtab.c | |||
@@ -13,7 +13,6 @@ | |||
13 | #include <linux/bpf.h> | 13 | #include <linux/bpf.h> |
14 | #include <linux/jhash.h> | 14 | #include <linux/jhash.h> |
15 | #include <linux/filter.h> | 15 | #include <linux/filter.h> |
16 | #include <linux/vmalloc.h> | ||
17 | #include "percpu_freelist.h" | 16 | #include "percpu_freelist.h" |
18 | #include "bpf_lru_list.h" | 17 | #include "bpf_lru_list.h" |
19 | 18 | ||
@@ -103,7 +102,7 @@ static void htab_free_elems(struct bpf_htab *htab) | |||
103 | free_percpu(pptr); | 102 | free_percpu(pptr); |
104 | } | 103 | } |
105 | free_elems: | 104 | free_elems: |
106 | vfree(htab->elems); | 105 | bpf_map_area_free(htab->elems); |
107 | } | 106 | } |
108 | 107 | ||
109 | static struct htab_elem *prealloc_lru_pop(struct bpf_htab *htab, void *key, | 108 | static struct htab_elem *prealloc_lru_pop(struct bpf_htab *htab, void *key, |
@@ -125,7 +124,8 @@ static int prealloc_init(struct bpf_htab *htab) | |||
125 | { | 124 | { |
126 | int err = -ENOMEM, i; | 125 | int err = -ENOMEM, i; |
127 | 126 | ||
128 | htab->elems = vzalloc(htab->elem_size * htab->map.max_entries); | 127 | htab->elems = bpf_map_area_alloc(htab->elem_size * |
128 | htab->map.max_entries); | ||
129 | if (!htab->elems) | 129 | if (!htab->elems) |
130 | return -ENOMEM; | 130 | return -ENOMEM; |
131 | 131 | ||
@@ -320,14 +320,10 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr) | |||
320 | goto free_htab; | 320 | goto free_htab; |
321 | 321 | ||
322 | err = -ENOMEM; | 322 | err = -ENOMEM; |
323 | htab->buckets = kmalloc_array(htab->n_buckets, sizeof(struct bucket), | 323 | htab->buckets = bpf_map_area_alloc(htab->n_buckets * |
324 | GFP_USER | __GFP_NOWARN); | 324 | sizeof(struct bucket)); |
325 | 325 | if (!htab->buckets) | |
326 | if (!htab->buckets) { | 326 | goto free_htab; |
327 | htab->buckets = vmalloc(htab->n_buckets * sizeof(struct bucket)); | ||
328 | if (!htab->buckets) | ||
329 | goto free_htab; | ||
330 | } | ||
331 | 327 | ||
332 | for (i = 0; i < htab->n_buckets; i++) { | 328 | for (i = 0; i < htab->n_buckets; i++) { |
333 | INIT_HLIST_HEAD(&htab->buckets[i].head); | 329 | INIT_HLIST_HEAD(&htab->buckets[i].head); |
@@ -354,7 +350,7 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr) | |||
354 | free_extra_elems: | 350 | free_extra_elems: |
355 | free_percpu(htab->extra_elems); | 351 | free_percpu(htab->extra_elems); |
356 | free_buckets: | 352 | free_buckets: |
357 | kvfree(htab->buckets); | 353 | bpf_map_area_free(htab->buckets); |
358 | free_htab: | 354 | free_htab: |
359 | kfree(htab); | 355 | kfree(htab); |
360 | return ERR_PTR(err); | 356 | return ERR_PTR(err); |
@@ -1014,7 +1010,7 @@ static void htab_map_free(struct bpf_map *map) | |||
1014 | prealloc_destroy(htab); | 1010 | prealloc_destroy(htab); |
1015 | 1011 | ||
1016 | free_percpu(htab->extra_elems); | 1012 | free_percpu(htab->extra_elems); |
1017 | kvfree(htab->buckets); | 1013 | bpf_map_area_free(htab->buckets); |
1018 | kfree(htab); | 1014 | kfree(htab); |
1019 | } | 1015 | } |
1020 | 1016 | ||
diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c index 732ae16d12b7..be8519148c25 100644 --- a/kernel/bpf/stackmap.c +++ b/kernel/bpf/stackmap.c | |||
@@ -7,7 +7,6 @@ | |||
7 | #include <linux/bpf.h> | 7 | #include <linux/bpf.h> |
8 | #include <linux/jhash.h> | 8 | #include <linux/jhash.h> |
9 | #include <linux/filter.h> | 9 | #include <linux/filter.h> |
10 | #include <linux/vmalloc.h> | ||
11 | #include <linux/stacktrace.h> | 10 | #include <linux/stacktrace.h> |
12 | #include <linux/perf_event.h> | 11 | #include <linux/perf_event.h> |
13 | #include "percpu_freelist.h" | 12 | #include "percpu_freelist.h" |
@@ -32,7 +31,7 @@ static int prealloc_elems_and_freelist(struct bpf_stack_map *smap) | |||
32 | u32 elem_size = sizeof(struct stack_map_bucket) + smap->map.value_size; | 31 | u32 elem_size = sizeof(struct stack_map_bucket) + smap->map.value_size; |
33 | int err; | 32 | int err; |
34 | 33 | ||
35 | smap->elems = vzalloc(elem_size * smap->map.max_entries); | 34 | smap->elems = bpf_map_area_alloc(elem_size * smap->map.max_entries); |
36 | if (!smap->elems) | 35 | if (!smap->elems) |
37 | return -ENOMEM; | 36 | return -ENOMEM; |
38 | 37 | ||
@@ -45,7 +44,7 @@ static int prealloc_elems_and_freelist(struct bpf_stack_map *smap) | |||
45 | return 0; | 44 | return 0; |
46 | 45 | ||
47 | free_elems: | 46 | free_elems: |
48 | vfree(smap->elems); | 47 | bpf_map_area_free(smap->elems); |
49 | return err; | 48 | return err; |
50 | } | 49 | } |
51 | 50 | ||
@@ -76,12 +75,9 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr) | |||
76 | if (cost >= U32_MAX - PAGE_SIZE) | 75 | if (cost >= U32_MAX - PAGE_SIZE) |
77 | return ERR_PTR(-E2BIG); | 76 | return ERR_PTR(-E2BIG); |
78 | 77 | ||
79 | smap = kzalloc(cost, GFP_USER | __GFP_NOWARN); | 78 | smap = bpf_map_area_alloc(cost); |
80 | if (!smap) { | 79 | if (!smap) |
81 | smap = vzalloc(cost); | 80 | return ERR_PTR(-ENOMEM); |
82 | if (!smap) | ||
83 | return ERR_PTR(-ENOMEM); | ||
84 | } | ||
85 | 81 | ||
86 | err = -E2BIG; | 82 | err = -E2BIG; |
87 | cost += n_buckets * (value_size + sizeof(struct stack_map_bucket)); | 83 | cost += n_buckets * (value_size + sizeof(struct stack_map_bucket)); |
@@ -112,7 +108,7 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr) | |||
112 | put_buffers: | 108 | put_buffers: |
113 | put_callchain_buffers(); | 109 | put_callchain_buffers(); |
114 | free_smap: | 110 | free_smap: |
115 | kvfree(smap); | 111 | bpf_map_area_free(smap); |
116 | return ERR_PTR(err); | 112 | return ERR_PTR(err); |
117 | } | 113 | } |
118 | 114 | ||
@@ -262,9 +258,9 @@ static void stack_map_free(struct bpf_map *map) | |||
262 | /* wait for bpf programs to complete before freeing stack map */ | 258 | /* wait for bpf programs to complete before freeing stack map */ |
263 | synchronize_rcu(); | 259 | synchronize_rcu(); |
264 | 260 | ||
265 | vfree(smap->elems); | 261 | bpf_map_area_free(smap->elems); |
266 | pcpu_freelist_destroy(&smap->freelist); | 262 | pcpu_freelist_destroy(&smap->freelist); |
267 | kvfree(smap); | 263 | bpf_map_area_free(smap); |
268 | put_callchain_buffers(); | 264 | put_callchain_buffers(); |
269 | } | 265 | } |
270 | 266 | ||
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 1d6b29e4e2c3..19b6129eab23 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c | |||
@@ -12,6 +12,8 @@ | |||
12 | #include <linux/bpf.h> | 12 | #include <linux/bpf.h> |
13 | #include <linux/syscalls.h> | 13 | #include <linux/syscalls.h> |
14 | #include <linux/slab.h> | 14 | #include <linux/slab.h> |
15 | #include <linux/vmalloc.h> | ||
16 | #include <linux/mmzone.h> | ||
15 | #include <linux/anon_inodes.h> | 17 | #include <linux/anon_inodes.h> |
16 | #include <linux/file.h> | 18 | #include <linux/file.h> |
17 | #include <linux/license.h> | 19 | #include <linux/license.h> |
@@ -49,6 +51,30 @@ void bpf_register_map_type(struct bpf_map_type_list *tl) | |||
49 | list_add(&tl->list_node, &bpf_map_types); | 51 | list_add(&tl->list_node, &bpf_map_types); |
50 | } | 52 | } |
51 | 53 | ||
54 | void *bpf_map_area_alloc(size_t size) | ||
55 | { | ||
56 | /* We definitely need __GFP_NORETRY, so OOM killer doesn't | ||
57 | * trigger under memory pressure as we really just want to | ||
58 | * fail instead. | ||
59 | */ | ||
60 | const gfp_t flags = __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO; | ||
61 | void *area; | ||
62 | |||
63 | if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) { | ||
64 | area = kmalloc(size, GFP_USER | flags); | ||
65 | if (area != NULL) | ||
66 | return area; | ||
67 | } | ||
68 | |||
69 | return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | flags, | ||
70 | PAGE_KERNEL); | ||
71 | } | ||
72 | |||
73 | void bpf_map_area_free(void *area) | ||
74 | { | ||
75 | kvfree(area); | ||
76 | } | ||
77 | |||
52 | int bpf_map_precharge_memlock(u32 pages) | 78 | int bpf_map_precharge_memlock(u32 pages) |
53 | { | 79 | { |
54 | struct user_struct *user = get_current_user(); | 80 | struct user_struct *user = get_current_user(); |
diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 2ee9ec3051b2..688dd02af985 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c | |||
@@ -5221,6 +5221,11 @@ err_free_css: | |||
5221 | return ERR_PTR(err); | 5221 | return ERR_PTR(err); |
5222 | } | 5222 | } |
5223 | 5223 | ||
5224 | /* | ||
5225 | * The returned cgroup is fully initialized including its control mask, but | ||
5226 | * it isn't associated with its kernfs_node and doesn't have the control | ||
5227 | * mask applied. | ||
5228 | */ | ||
5224 | static struct cgroup *cgroup_create(struct cgroup *parent) | 5229 | static struct cgroup *cgroup_create(struct cgroup *parent) |
5225 | { | 5230 | { |
5226 | struct cgroup_root *root = parent->root; | 5231 | struct cgroup_root *root = parent->root; |
@@ -5288,11 +5293,6 @@ static struct cgroup *cgroup_create(struct cgroup *parent) | |||
5288 | 5293 | ||
5289 | cgroup_propagate_control(cgrp); | 5294 | cgroup_propagate_control(cgrp); |
5290 | 5295 | ||
5291 | /* @cgrp doesn't have dir yet so the following will only create csses */ | ||
5292 | ret = cgroup_apply_control_enable(cgrp); | ||
5293 | if (ret) | ||
5294 | goto out_destroy; | ||
5295 | |||
5296 | return cgrp; | 5296 | return cgrp; |
5297 | 5297 | ||
5298 | out_cancel_ref: | 5298 | out_cancel_ref: |
@@ -5300,9 +5300,6 @@ out_cancel_ref: | |||
5300 | out_free_cgrp: | 5300 | out_free_cgrp: |
5301 | kfree(cgrp); | 5301 | kfree(cgrp); |
5302 | return ERR_PTR(ret); | 5302 | return ERR_PTR(ret); |
5303 | out_destroy: | ||
5304 | cgroup_destroy_locked(cgrp); | ||
5305 | return ERR_PTR(ret); | ||
5306 | } | 5303 | } |
5307 | 5304 | ||
5308 | static int cgroup_mkdir(struct kernfs_node *parent_kn, const char *name, | 5305 | static int cgroup_mkdir(struct kernfs_node *parent_kn, const char *name, |
diff --git a/kernel/events/core.c b/kernel/events/core.c index 110b38a58493..e235bb991bdd 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -1469,7 +1469,6 @@ ctx_group_list(struct perf_event *event, struct perf_event_context *ctx) | |||
1469 | static void | 1469 | static void |
1470 | list_add_event(struct perf_event *event, struct perf_event_context *ctx) | 1470 | list_add_event(struct perf_event *event, struct perf_event_context *ctx) |
1471 | { | 1471 | { |
1472 | |||
1473 | lockdep_assert_held(&ctx->lock); | 1472 | lockdep_assert_held(&ctx->lock); |
1474 | 1473 | ||
1475 | WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT); | 1474 | WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT); |
@@ -1624,6 +1623,8 @@ static void perf_group_attach(struct perf_event *event) | |||
1624 | { | 1623 | { |
1625 | struct perf_event *group_leader = event->group_leader, *pos; | 1624 | struct perf_event *group_leader = event->group_leader, *pos; |
1626 | 1625 | ||
1626 | lockdep_assert_held(&event->ctx->lock); | ||
1627 | |||
1627 | /* | 1628 | /* |
1628 | * We can have double attach due to group movement in perf_event_open. | 1629 | * We can have double attach due to group movement in perf_event_open. |
1629 | */ | 1630 | */ |
@@ -1697,6 +1698,8 @@ static void perf_group_detach(struct perf_event *event) | |||
1697 | struct perf_event *sibling, *tmp; | 1698 | struct perf_event *sibling, *tmp; |
1698 | struct list_head *list = NULL; | 1699 | struct list_head *list = NULL; |
1699 | 1700 | ||
1701 | lockdep_assert_held(&event->ctx->lock); | ||
1702 | |||
1700 | /* | 1703 | /* |
1701 | * We can have double detach due to exit/hot-unplug + close. | 1704 | * We can have double detach due to exit/hot-unplug + close. |
1702 | */ | 1705 | */ |
@@ -1895,9 +1898,29 @@ __perf_remove_from_context(struct perf_event *event, | |||
1895 | */ | 1898 | */ |
1896 | static void perf_remove_from_context(struct perf_event *event, unsigned long flags) | 1899 | static void perf_remove_from_context(struct perf_event *event, unsigned long flags) |
1897 | { | 1900 | { |
1898 | lockdep_assert_held(&event->ctx->mutex); | 1901 | struct perf_event_context *ctx = event->ctx; |
1902 | |||
1903 | lockdep_assert_held(&ctx->mutex); | ||
1899 | 1904 | ||
1900 | event_function_call(event, __perf_remove_from_context, (void *)flags); | 1905 | event_function_call(event, __perf_remove_from_context, (void *)flags); |
1906 | |||
1907 | /* | ||
1908 | * The above event_function_call() can NO-OP when it hits | ||
1909 | * TASK_TOMBSTONE. In that case we must already have been detached | ||
1910 | * from the context (by perf_event_exit_event()) but the grouping | ||
1911 | * might still be in-tact. | ||
1912 | */ | ||
1913 | WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT); | ||
1914 | if ((flags & DETACH_GROUP) && | ||
1915 | (event->attach_state & PERF_ATTACH_GROUP)) { | ||
1916 | /* | ||
1917 | * Since in that case we cannot possibly be scheduled, simply | ||
1918 | * detach now. | ||
1919 | */ | ||
1920 | raw_spin_lock_irq(&ctx->lock); | ||
1921 | perf_group_detach(event); | ||
1922 | raw_spin_unlock_irq(&ctx->lock); | ||
1923 | } | ||
1901 | } | 1924 | } |
1902 | 1925 | ||
1903 | /* | 1926 | /* |
@@ -3464,14 +3487,15 @@ struct perf_read_data { | |||
3464 | int ret; | 3487 | int ret; |
3465 | }; | 3488 | }; |
3466 | 3489 | ||
3467 | static int find_cpu_to_read(struct perf_event *event, int local_cpu) | 3490 | static int __perf_event_read_cpu(struct perf_event *event, int event_cpu) |
3468 | { | 3491 | { |
3469 | int event_cpu = event->oncpu; | ||
3470 | u16 local_pkg, event_pkg; | 3492 | u16 local_pkg, event_pkg; |
3471 | 3493 | ||
3472 | if (event->group_caps & PERF_EV_CAP_READ_ACTIVE_PKG) { | 3494 | if (event->group_caps & PERF_EV_CAP_READ_ACTIVE_PKG) { |
3473 | event_pkg = topology_physical_package_id(event_cpu); | 3495 | int local_cpu = smp_processor_id(); |
3474 | local_pkg = topology_physical_package_id(local_cpu); | 3496 | |
3497 | event_pkg = topology_physical_package_id(event_cpu); | ||
3498 | local_pkg = topology_physical_package_id(local_cpu); | ||
3475 | 3499 | ||
3476 | if (event_pkg == local_pkg) | 3500 | if (event_pkg == local_pkg) |
3477 | return local_cpu; | 3501 | return local_cpu; |
@@ -3601,7 +3625,7 @@ u64 perf_event_read_local(struct perf_event *event) | |||
3601 | 3625 | ||
3602 | static int perf_event_read(struct perf_event *event, bool group) | 3626 | static int perf_event_read(struct perf_event *event, bool group) |
3603 | { | 3627 | { |
3604 | int ret = 0, cpu_to_read, local_cpu; | 3628 | int event_cpu, ret = 0; |
3605 | 3629 | ||
3606 | /* | 3630 | /* |
3607 | * If event is enabled and currently active on a CPU, update the | 3631 | * If event is enabled and currently active on a CPU, update the |
@@ -3614,21 +3638,25 @@ static int perf_event_read(struct perf_event *event, bool group) | |||
3614 | .ret = 0, | 3638 | .ret = 0, |
3615 | }; | 3639 | }; |
3616 | 3640 | ||
3617 | local_cpu = get_cpu(); | 3641 | event_cpu = READ_ONCE(event->oncpu); |
3618 | cpu_to_read = find_cpu_to_read(event, local_cpu); | 3642 | if ((unsigned)event_cpu >= nr_cpu_ids) |
3619 | put_cpu(); | 3643 | return 0; |
3644 | |||
3645 | preempt_disable(); | ||
3646 | event_cpu = __perf_event_read_cpu(event, event_cpu); | ||
3620 | 3647 | ||
3621 | /* | 3648 | /* |
3622 | * Purposely ignore the smp_call_function_single() return | 3649 | * Purposely ignore the smp_call_function_single() return |
3623 | * value. | 3650 | * value. |
3624 | * | 3651 | * |
3625 | * If event->oncpu isn't a valid CPU it means the event got | 3652 | * If event_cpu isn't a valid CPU it means the event got |
3626 | * scheduled out and that will have updated the event count. | 3653 | * scheduled out and that will have updated the event count. |
3627 | * | 3654 | * |
3628 | * Therefore, either way, we'll have an up-to-date event count | 3655 | * Therefore, either way, we'll have an up-to-date event count |
3629 | * after this. | 3656 | * after this. |
3630 | */ | 3657 | */ |
3631 | (void)smp_call_function_single(cpu_to_read, __perf_event_read, &data, 1); | 3658 | (void)smp_call_function_single(event_cpu, __perf_event_read, &data, 1); |
3659 | preempt_enable(); | ||
3632 | ret = data.ret; | 3660 | ret = data.ret; |
3633 | } else if (event->state == PERF_EVENT_STATE_INACTIVE) { | 3661 | } else if (event->state == PERF_EVENT_STATE_INACTIVE) { |
3634 | struct perf_event_context *ctx = event->ctx; | 3662 | struct perf_event_context *ctx = event->ctx; |
@@ -6609,6 +6637,27 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event) | |||
6609 | char *buf = NULL; | 6637 | char *buf = NULL; |
6610 | char *name; | 6638 | char *name; |
6611 | 6639 | ||
6640 | if (vma->vm_flags & VM_READ) | ||
6641 | prot |= PROT_READ; | ||
6642 | if (vma->vm_flags & VM_WRITE) | ||
6643 | prot |= PROT_WRITE; | ||
6644 | if (vma->vm_flags & VM_EXEC) | ||
6645 | prot |= PROT_EXEC; | ||
6646 | |||
6647 | if (vma->vm_flags & VM_MAYSHARE) | ||
6648 | flags = MAP_SHARED; | ||
6649 | else | ||
6650 | flags = MAP_PRIVATE; | ||
6651 | |||
6652 | if (vma->vm_flags & VM_DENYWRITE) | ||
6653 | flags |= MAP_DENYWRITE; | ||
6654 | if (vma->vm_flags & VM_MAYEXEC) | ||
6655 | flags |= MAP_EXECUTABLE; | ||
6656 | if (vma->vm_flags & VM_LOCKED) | ||
6657 | flags |= MAP_LOCKED; | ||
6658 | if (vma->vm_flags & VM_HUGETLB) | ||
6659 | flags |= MAP_HUGETLB; | ||
6660 | |||
6612 | if (file) { | 6661 | if (file) { |
6613 | struct inode *inode; | 6662 | struct inode *inode; |
6614 | dev_t dev; | 6663 | dev_t dev; |
@@ -6635,27 +6684,6 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event) | |||
6635 | maj = MAJOR(dev); | 6684 | maj = MAJOR(dev); |
6636 | min = MINOR(dev); | 6685 | min = MINOR(dev); |
6637 | 6686 | ||
6638 | if (vma->vm_flags & VM_READ) | ||
6639 | prot |= PROT_READ; | ||
6640 | if (vma->vm_flags & VM_WRITE) | ||
6641 | prot |= PROT_WRITE; | ||
6642 | if (vma->vm_flags & VM_EXEC) | ||
6643 | prot |= PROT_EXEC; | ||
6644 | |||
6645 | if (vma->vm_flags & VM_MAYSHARE) | ||
6646 | flags = MAP_SHARED; | ||
6647 | else | ||
6648 | flags = MAP_PRIVATE; | ||
6649 | |||
6650 | if (vma->vm_flags & VM_DENYWRITE) | ||
6651 | flags |= MAP_DENYWRITE; | ||
6652 | if (vma->vm_flags & VM_MAYEXEC) | ||
6653 | flags |= MAP_EXECUTABLE; | ||
6654 | if (vma->vm_flags & VM_LOCKED) | ||
6655 | flags |= MAP_LOCKED; | ||
6656 | if (vma->vm_flags & VM_HUGETLB) | ||
6657 | flags |= MAP_HUGETLB; | ||
6658 | |||
6659 | goto got_name; | 6687 | goto got_name; |
6660 | } else { | 6688 | } else { |
6661 | if (vma->vm_ops && vma->vm_ops->name) { | 6689 | if (vma->vm_ops && vma->vm_ops->name) { |
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c index 8c0a0ae43521..b59e6768c5e9 100644 --- a/kernel/irq/irqdomain.c +++ b/kernel/irq/irqdomain.c | |||
@@ -1346,6 +1346,30 @@ void irq_domain_free_irqs_parent(struct irq_domain *domain, | |||
1346 | } | 1346 | } |
1347 | EXPORT_SYMBOL_GPL(irq_domain_free_irqs_parent); | 1347 | EXPORT_SYMBOL_GPL(irq_domain_free_irqs_parent); |
1348 | 1348 | ||
1349 | static void __irq_domain_activate_irq(struct irq_data *irq_data) | ||
1350 | { | ||
1351 | if (irq_data && irq_data->domain) { | ||
1352 | struct irq_domain *domain = irq_data->domain; | ||
1353 | |||
1354 | if (irq_data->parent_data) | ||
1355 | __irq_domain_activate_irq(irq_data->parent_data); | ||
1356 | if (domain->ops->activate) | ||
1357 | domain->ops->activate(domain, irq_data); | ||
1358 | } | ||
1359 | } | ||
1360 | |||
1361 | static void __irq_domain_deactivate_irq(struct irq_data *irq_data) | ||
1362 | { | ||
1363 | if (irq_data && irq_data->domain) { | ||
1364 | struct irq_domain *domain = irq_data->domain; | ||
1365 | |||
1366 | if (domain->ops->deactivate) | ||
1367 | domain->ops->deactivate(domain, irq_data); | ||
1368 | if (irq_data->parent_data) | ||
1369 | __irq_domain_deactivate_irq(irq_data->parent_data); | ||
1370 | } | ||
1371 | } | ||
1372 | |||
1349 | /** | 1373 | /** |
1350 | * irq_domain_activate_irq - Call domain_ops->activate recursively to activate | 1374 | * irq_domain_activate_irq - Call domain_ops->activate recursively to activate |
1351 | * interrupt | 1375 | * interrupt |
@@ -1356,13 +1380,9 @@ EXPORT_SYMBOL_GPL(irq_domain_free_irqs_parent); | |||
1356 | */ | 1380 | */ |
1357 | void irq_domain_activate_irq(struct irq_data *irq_data) | 1381 | void irq_domain_activate_irq(struct irq_data *irq_data) |
1358 | { | 1382 | { |
1359 | if (irq_data && irq_data->domain) { | 1383 | if (!irqd_is_activated(irq_data)) { |
1360 | struct irq_domain *domain = irq_data->domain; | 1384 | __irq_domain_activate_irq(irq_data); |
1361 | 1385 | irqd_set_activated(irq_data); | |
1362 | if (irq_data->parent_data) | ||
1363 | irq_domain_activate_irq(irq_data->parent_data); | ||
1364 | if (domain->ops->activate) | ||
1365 | domain->ops->activate(domain, irq_data); | ||
1366 | } | 1386 | } |
1367 | } | 1387 | } |
1368 | 1388 | ||
@@ -1376,13 +1396,9 @@ void irq_domain_activate_irq(struct irq_data *irq_data) | |||
1376 | */ | 1396 | */ |
1377 | void irq_domain_deactivate_irq(struct irq_data *irq_data) | 1397 | void irq_domain_deactivate_irq(struct irq_data *irq_data) |
1378 | { | 1398 | { |
1379 | if (irq_data && irq_data->domain) { | 1399 | if (irqd_is_activated(irq_data)) { |
1380 | struct irq_domain *domain = irq_data->domain; | 1400 | __irq_domain_deactivate_irq(irq_data); |
1381 | 1401 | irqd_clr_activated(irq_data); | |
1382 | if (domain->ops->deactivate) | ||
1383 | domain->ops->deactivate(domain, irq_data); | ||
1384 | if (irq_data->parent_data) | ||
1385 | irq_domain_deactivate_irq(irq_data->parent_data); | ||
1386 | } | 1402 | } |
1387 | } | 1403 | } |
1388 | 1404 | ||
diff --git a/kernel/module.c b/kernel/module.c index 38d4270925d4..3d8f126208e3 100644 --- a/kernel/module.c +++ b/kernel/module.c | |||
@@ -389,16 +389,16 @@ extern const struct kernel_symbol __start___ksymtab_gpl[]; | |||
389 | extern const struct kernel_symbol __stop___ksymtab_gpl[]; | 389 | extern const struct kernel_symbol __stop___ksymtab_gpl[]; |
390 | extern const struct kernel_symbol __start___ksymtab_gpl_future[]; | 390 | extern const struct kernel_symbol __start___ksymtab_gpl_future[]; |
391 | extern const struct kernel_symbol __stop___ksymtab_gpl_future[]; | 391 | extern const struct kernel_symbol __stop___ksymtab_gpl_future[]; |
392 | extern const unsigned long __start___kcrctab[]; | 392 | extern const s32 __start___kcrctab[]; |
393 | extern const unsigned long __start___kcrctab_gpl[]; | 393 | extern const s32 __start___kcrctab_gpl[]; |
394 | extern const unsigned long __start___kcrctab_gpl_future[]; | 394 | extern const s32 __start___kcrctab_gpl_future[]; |
395 | #ifdef CONFIG_UNUSED_SYMBOLS | 395 | #ifdef CONFIG_UNUSED_SYMBOLS |
396 | extern const struct kernel_symbol __start___ksymtab_unused[]; | 396 | extern const struct kernel_symbol __start___ksymtab_unused[]; |
397 | extern const struct kernel_symbol __stop___ksymtab_unused[]; | 397 | extern const struct kernel_symbol __stop___ksymtab_unused[]; |
398 | extern const struct kernel_symbol __start___ksymtab_unused_gpl[]; | 398 | extern const struct kernel_symbol __start___ksymtab_unused_gpl[]; |
399 | extern const struct kernel_symbol __stop___ksymtab_unused_gpl[]; | 399 | extern const struct kernel_symbol __stop___ksymtab_unused_gpl[]; |
400 | extern const unsigned long __start___kcrctab_unused[]; | 400 | extern const s32 __start___kcrctab_unused[]; |
401 | extern const unsigned long __start___kcrctab_unused_gpl[]; | 401 | extern const s32 __start___kcrctab_unused_gpl[]; |
402 | #endif | 402 | #endif |
403 | 403 | ||
404 | #ifndef CONFIG_MODVERSIONS | 404 | #ifndef CONFIG_MODVERSIONS |
@@ -497,7 +497,7 @@ struct find_symbol_arg { | |||
497 | 497 | ||
498 | /* Output */ | 498 | /* Output */ |
499 | struct module *owner; | 499 | struct module *owner; |
500 | const unsigned long *crc; | 500 | const s32 *crc; |
501 | const struct kernel_symbol *sym; | 501 | const struct kernel_symbol *sym; |
502 | }; | 502 | }; |
503 | 503 | ||
@@ -563,7 +563,7 @@ static bool find_symbol_in_section(const struct symsearch *syms, | |||
563 | * (optional) module which owns it. Needs preempt disabled or module_mutex. */ | 563 | * (optional) module which owns it. Needs preempt disabled or module_mutex. */ |
564 | const struct kernel_symbol *find_symbol(const char *name, | 564 | const struct kernel_symbol *find_symbol(const char *name, |
565 | struct module **owner, | 565 | struct module **owner, |
566 | const unsigned long **crc, | 566 | const s32 **crc, |
567 | bool gplok, | 567 | bool gplok, |
568 | bool warn) | 568 | bool warn) |
569 | { | 569 | { |
@@ -1249,23 +1249,17 @@ static int try_to_force_load(struct module *mod, const char *reason) | |||
1249 | } | 1249 | } |
1250 | 1250 | ||
1251 | #ifdef CONFIG_MODVERSIONS | 1251 | #ifdef CONFIG_MODVERSIONS |
1252 | /* If the arch applies (non-zero) relocations to kernel kcrctab, unapply it. */ | 1252 | |
1253 | static unsigned long maybe_relocated(unsigned long crc, | 1253 | static u32 resolve_rel_crc(const s32 *crc) |
1254 | const struct module *crc_owner) | ||
1255 | { | 1254 | { |
1256 | #ifdef ARCH_RELOCATES_KCRCTAB | 1255 | return *(u32 *)((void *)crc + *crc); |
1257 | if (crc_owner == NULL) | ||
1258 | return crc - (unsigned long)reloc_start; | ||
1259 | #endif | ||
1260 | return crc; | ||
1261 | } | 1256 | } |
1262 | 1257 | ||
1263 | static int check_version(Elf_Shdr *sechdrs, | 1258 | static int check_version(Elf_Shdr *sechdrs, |
1264 | unsigned int versindex, | 1259 | unsigned int versindex, |
1265 | const char *symname, | 1260 | const char *symname, |
1266 | struct module *mod, | 1261 | struct module *mod, |
1267 | const unsigned long *crc, | 1262 | const s32 *crc) |
1268 | const struct module *crc_owner) | ||
1269 | { | 1263 | { |
1270 | unsigned int i, num_versions; | 1264 | unsigned int i, num_versions; |
1271 | struct modversion_info *versions; | 1265 | struct modversion_info *versions; |
@@ -1283,13 +1277,19 @@ static int check_version(Elf_Shdr *sechdrs, | |||
1283 | / sizeof(struct modversion_info); | 1277 | / sizeof(struct modversion_info); |
1284 | 1278 | ||
1285 | for (i = 0; i < num_versions; i++) { | 1279 | for (i = 0; i < num_versions; i++) { |
1280 | u32 crcval; | ||
1281 | |||
1286 | if (strcmp(versions[i].name, symname) != 0) | 1282 | if (strcmp(versions[i].name, symname) != 0) |
1287 | continue; | 1283 | continue; |
1288 | 1284 | ||
1289 | if (versions[i].crc == maybe_relocated(*crc, crc_owner)) | 1285 | if (IS_ENABLED(CONFIG_MODULE_REL_CRCS)) |
1286 | crcval = resolve_rel_crc(crc); | ||
1287 | else | ||
1288 | crcval = *crc; | ||
1289 | if (versions[i].crc == crcval) | ||
1290 | return 1; | 1290 | return 1; |
1291 | pr_debug("Found checksum %lX vs module %lX\n", | 1291 | pr_debug("Found checksum %X vs module %lX\n", |
1292 | maybe_relocated(*crc, crc_owner), versions[i].crc); | 1292 | crcval, versions[i].crc); |
1293 | goto bad_version; | 1293 | goto bad_version; |
1294 | } | 1294 | } |
1295 | 1295 | ||
@@ -1307,7 +1307,7 @@ static inline int check_modstruct_version(Elf_Shdr *sechdrs, | |||
1307 | unsigned int versindex, | 1307 | unsigned int versindex, |
1308 | struct module *mod) | 1308 | struct module *mod) |
1309 | { | 1309 | { |
1310 | const unsigned long *crc; | 1310 | const s32 *crc; |
1311 | 1311 | ||
1312 | /* | 1312 | /* |
1313 | * Since this should be found in kernel (which can't be removed), no | 1313 | * Since this should be found in kernel (which can't be removed), no |
@@ -1321,8 +1321,7 @@ static inline int check_modstruct_version(Elf_Shdr *sechdrs, | |||
1321 | } | 1321 | } |
1322 | preempt_enable(); | 1322 | preempt_enable(); |
1323 | return check_version(sechdrs, versindex, | 1323 | return check_version(sechdrs, versindex, |
1324 | VMLINUX_SYMBOL_STR(module_layout), mod, crc, | 1324 | VMLINUX_SYMBOL_STR(module_layout), mod, crc); |
1325 | NULL); | ||
1326 | } | 1325 | } |
1327 | 1326 | ||
1328 | /* First part is kernel version, which we ignore if module has crcs. */ | 1327 | /* First part is kernel version, which we ignore if module has crcs. */ |
@@ -1340,8 +1339,7 @@ static inline int check_version(Elf_Shdr *sechdrs, | |||
1340 | unsigned int versindex, | 1339 | unsigned int versindex, |
1341 | const char *symname, | 1340 | const char *symname, |
1342 | struct module *mod, | 1341 | struct module *mod, |
1343 | const unsigned long *crc, | 1342 | const s32 *crc) |
1344 | const struct module *crc_owner) | ||
1345 | { | 1343 | { |
1346 | return 1; | 1344 | return 1; |
1347 | } | 1345 | } |
@@ -1368,7 +1366,7 @@ static const struct kernel_symbol *resolve_symbol(struct module *mod, | |||
1368 | { | 1366 | { |
1369 | struct module *owner; | 1367 | struct module *owner; |
1370 | const struct kernel_symbol *sym; | 1368 | const struct kernel_symbol *sym; |
1371 | const unsigned long *crc; | 1369 | const s32 *crc; |
1372 | int err; | 1370 | int err; |
1373 | 1371 | ||
1374 | /* | 1372 | /* |
@@ -1383,8 +1381,7 @@ static const struct kernel_symbol *resolve_symbol(struct module *mod, | |||
1383 | if (!sym) | 1381 | if (!sym) |
1384 | goto unlock; | 1382 | goto unlock; |
1385 | 1383 | ||
1386 | if (!check_version(info->sechdrs, info->index.vers, name, mod, crc, | 1384 | if (!check_version(info->sechdrs, info->index.vers, name, mod, crc)) { |
1387 | owner)) { | ||
1388 | sym = ERR_PTR(-EINVAL); | 1385 | sym = ERR_PTR(-EINVAL); |
1389 | goto getname; | 1386 | goto getname; |
1390 | } | 1387 | } |
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c index f67ceb7768b8..15e6baef5c73 100644 --- a/kernel/power/suspend.c +++ b/kernel/power/suspend.c | |||
@@ -46,7 +46,7 @@ static const char * const mem_sleep_labels[] = { | |||
46 | const char *mem_sleep_states[PM_SUSPEND_MAX]; | 46 | const char *mem_sleep_states[PM_SUSPEND_MAX]; |
47 | 47 | ||
48 | suspend_state_t mem_sleep_current = PM_SUSPEND_FREEZE; | 48 | suspend_state_t mem_sleep_current = PM_SUSPEND_FREEZE; |
49 | suspend_state_t mem_sleep_default = PM_SUSPEND_MAX; | 49 | static suspend_state_t mem_sleep_default = PM_SUSPEND_MEM; |
50 | 50 | ||
51 | unsigned int pm_suspend_global_flags; | 51 | unsigned int pm_suspend_global_flags; |
52 | EXPORT_SYMBOL_GPL(pm_suspend_global_flags); | 52 | EXPORT_SYMBOL_GPL(pm_suspend_global_flags); |
@@ -168,7 +168,7 @@ void suspend_set_ops(const struct platform_suspend_ops *ops) | |||
168 | } | 168 | } |
169 | if (valid_state(PM_SUSPEND_MEM)) { | 169 | if (valid_state(PM_SUSPEND_MEM)) { |
170 | mem_sleep_states[PM_SUSPEND_MEM] = mem_sleep_labels[PM_SUSPEND_MEM]; | 170 | mem_sleep_states[PM_SUSPEND_MEM] = mem_sleep_labels[PM_SUSPEND_MEM]; |
171 | if (mem_sleep_default >= PM_SUSPEND_MEM) | 171 | if (mem_sleep_default == PM_SUSPEND_MEM) |
172 | mem_sleep_current = PM_SUSPEND_MEM; | 172 | mem_sleep_current = PM_SUSPEND_MEM; |
173 | } | 173 | } |
174 | 174 | ||
diff --git a/kernel/stacktrace.c b/kernel/stacktrace.c index b6e4c16377c7..9c15a9124e83 100644 --- a/kernel/stacktrace.c +++ b/kernel/stacktrace.c | |||
@@ -18,10 +18,8 @@ void print_stack_trace(struct stack_trace *trace, int spaces) | |||
18 | if (WARN_ON(!trace->entries)) | 18 | if (WARN_ON(!trace->entries)) |
19 | return; | 19 | return; |
20 | 20 | ||
21 | for (i = 0; i < trace->nr_entries; i++) { | 21 | for (i = 0; i < trace->nr_entries; i++) |
22 | printk("%*c", 1 + spaces, ' '); | 22 | printk("%*c%pS\n", 1 + spaces, ' ', (void *)trace->entries[i]); |
23 | print_ip_sym(trace->entries[i]); | ||
24 | } | ||
25 | } | 23 | } |
26 | EXPORT_SYMBOL_GPL(print_stack_trace); | 24 | EXPORT_SYMBOL_GPL(print_stack_trace); |
27 | 25 | ||
@@ -29,7 +27,6 @@ int snprint_stack_trace(char *buf, size_t size, | |||
29 | struct stack_trace *trace, int spaces) | 27 | struct stack_trace *trace, int spaces) |
30 | { | 28 | { |
31 | int i; | 29 | int i; |
32 | unsigned long ip; | ||
33 | int generated; | 30 | int generated; |
34 | int total = 0; | 31 | int total = 0; |
35 | 32 | ||
@@ -37,9 +34,8 @@ int snprint_stack_trace(char *buf, size_t size, | |||
37 | return 0; | 34 | return 0; |
38 | 35 | ||
39 | for (i = 0; i < trace->nr_entries; i++) { | 36 | for (i = 0; i < trace->nr_entries; i++) { |
40 | ip = trace->entries[i]; | 37 | generated = snprintf(buf, size, "%*c%pS\n", 1 + spaces, ' ', |
41 | generated = snprintf(buf, size, "%*c[<%p>] %pS\n", | 38 | (void *)trace->entries[i]); |
42 | 1 + spaces, ' ', (void *) ip, (void *) ip); | ||
43 | 39 | ||
44 | total += generated; | 40 | total += generated; |
45 | 41 | ||
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 74e0388cc88d..fc6f740d0277 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c | |||
@@ -725,6 +725,11 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts, | |||
725 | */ | 725 | */ |
726 | if (delta == 0) { | 726 | if (delta == 0) { |
727 | tick_nohz_restart(ts, now); | 727 | tick_nohz_restart(ts, now); |
728 | /* | ||
729 | * Make sure next tick stop doesn't get fooled by past | ||
730 | * clock deadline | ||
731 | */ | ||
732 | ts->next_tick = 0; | ||
728 | goto out; | 733 | goto out; |
729 | } | 734 | } |
730 | } | 735 | } |
diff --git a/kernel/trace/trace_hwlat.c b/kernel/trace/trace_hwlat.c index 775569ec50d0..af344a1bf0d0 100644 --- a/kernel/trace/trace_hwlat.c +++ b/kernel/trace/trace_hwlat.c | |||
@@ -266,7 +266,7 @@ out: | |||
266 | static struct cpumask save_cpumask; | 266 | static struct cpumask save_cpumask; |
267 | static bool disable_migrate; | 267 | static bool disable_migrate; |
268 | 268 | ||
269 | static void move_to_next_cpu(void) | 269 | static void move_to_next_cpu(bool initmask) |
270 | { | 270 | { |
271 | static struct cpumask *current_mask; | 271 | static struct cpumask *current_mask; |
272 | int next_cpu; | 272 | int next_cpu; |
@@ -275,7 +275,7 @@ static void move_to_next_cpu(void) | |||
275 | return; | 275 | return; |
276 | 276 | ||
277 | /* Just pick the first CPU on first iteration */ | 277 | /* Just pick the first CPU on first iteration */ |
278 | if (!current_mask) { | 278 | if (initmask) { |
279 | current_mask = &save_cpumask; | 279 | current_mask = &save_cpumask; |
280 | get_online_cpus(); | 280 | get_online_cpus(); |
281 | cpumask_and(current_mask, cpu_online_mask, tracing_buffer_mask); | 281 | cpumask_and(current_mask, cpu_online_mask, tracing_buffer_mask); |
@@ -330,10 +330,12 @@ static void move_to_next_cpu(void) | |||
330 | static int kthread_fn(void *data) | 330 | static int kthread_fn(void *data) |
331 | { | 331 | { |
332 | u64 interval; | 332 | u64 interval; |
333 | bool initmask = true; | ||
333 | 334 | ||
334 | while (!kthread_should_stop()) { | 335 | while (!kthread_should_stop()) { |
335 | 336 | ||
336 | move_to_next_cpu(); | 337 | move_to_next_cpu(initmask); |
338 | initmask = false; | ||
337 | 339 | ||
338 | local_irq_disable(); | 340 | local_irq_disable(); |
339 | get_sample(); | 341 | get_sample(); |
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index a133ecd741e4..7ad9e53ad174 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c | |||
@@ -1372,7 +1372,7 @@ kprobe_trace_selftest_target(int a1, int a2, int a3, int a4, int a5, int a6) | |||
1372 | return a1 + a2 + a3 + a4 + a5 + a6; | 1372 | return a1 + a2 + a3 + a4 + a5 + a6; |
1373 | } | 1373 | } |
1374 | 1374 | ||
1375 | static struct __init trace_event_file * | 1375 | static __init struct trace_event_file * |
1376 | find_trace_probe_file(struct trace_kprobe *tk, struct trace_array *tr) | 1376 | find_trace_probe_file(struct trace_kprobe *tk, struct trace_array *tr) |
1377 | { | 1377 | { |
1378 | struct trace_event_file *file; | 1378 | struct trace_event_file *file; |
diff --git a/kernel/ucount.c b/kernel/ucount.c index 4bbd38ec3788..95c6336fc2b3 100644 --- a/kernel/ucount.c +++ b/kernel/ucount.c | |||
@@ -227,11 +227,10 @@ static __init int user_namespace_sysctl_init(void) | |||
227 | * properly. | 227 | * properly. |
228 | */ | 228 | */ |
229 | user_header = register_sysctl("user", empty); | 229 | user_header = register_sysctl("user", empty); |
230 | kmemleak_ignore(user_header); | ||
230 | BUG_ON(!user_header); | 231 | BUG_ON(!user_header); |
231 | BUG_ON(!setup_userns_sysctls(&init_user_ns)); | 232 | BUG_ON(!setup_userns_sysctls(&init_user_ns)); |
232 | #endif | 233 | #endif |
233 | return 0; | 234 | return 0; |
234 | } | 235 | } |
235 | subsys_initcall(user_namespace_sysctl_init); | 236 | subsys_initcall(user_namespace_sysctl_init); |
236 | |||
237 | |||