diff options
| author | James Bottomley <James.Bottomley@HansenPartnership.com> | 2016-01-07 18:51:13 -0500 |
|---|---|---|
| committer | James Bottomley <James.Bottomley@HansenPartnership.com> | 2016-01-07 18:51:13 -0500 |
| commit | abaee091a18c19ccd86feb1c8374585d82e96777 (patch) | |
| tree | 01602bae73e1278c3d98dafe1c269049927c58ce /kernel | |
| parent | a2746fb16e41b7c8f02aa4d2605ecce97abbebbd (diff) | |
| parent | 3f8d6f2a0797e8c650a47e5c1b5c2601a46f4293 (diff) | |
Merge branch 'jejb-scsi' into misc
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/bpf/arraymap.c | 10 | ||||
| -rw-r--r-- | kernel/bpf/hashtab.c | 34 | ||||
| -rw-r--r-- | kernel/bpf/inode.c | 6 | ||||
| -rw-r--r-- | kernel/bpf/syscall.c | 40 | ||||
| -rw-r--r-- | kernel/bpf/verifier.c | 3 | ||||
| -rw-r--r-- | kernel/livepatch/core.c | 6 | ||||
| -rw-r--r-- | kernel/panic.c | 5 | ||||
| -rw-r--r-- | kernel/pid.c | 4 | ||||
| -rw-r--r-- | kernel/sched/core.c | 36 | ||||
| -rw-r--r-- | kernel/sched/cputime.c | 3 | ||||
| -rw-r--r-- | kernel/sched/rt.c | 2 | ||||
| -rw-r--r-- | kernel/sched/sched.h | 3 | ||||
| -rw-r--r-- | kernel/sched/wait.c | 16 | ||||
| -rw-r--r-- | kernel/signal.c | 2 | ||||
| -rw-r--r-- | kernel/trace/ring_buffer.c | 17 | ||||
| -rw-r--r-- | kernel/trace/trace_events.c | 16 |
16 files changed, 147 insertions, 56 deletions
diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c index 3f4c99e06c6b..b0799bced518 100644 --- a/kernel/bpf/arraymap.c +++ b/kernel/bpf/arraymap.c | |||
| @@ -28,11 +28,17 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr) | |||
| 28 | attr->value_size == 0) | 28 | attr->value_size == 0) |
| 29 | return ERR_PTR(-EINVAL); | 29 | return ERR_PTR(-EINVAL); |
| 30 | 30 | ||
| 31 | if (attr->value_size >= 1 << (KMALLOC_SHIFT_MAX - 1)) | ||
| 32 | /* if value_size is bigger, the user space won't be able to | ||
| 33 | * access the elements. | ||
| 34 | */ | ||
| 35 | return ERR_PTR(-E2BIG); | ||
| 36 | |||
| 31 | elem_size = round_up(attr->value_size, 8); | 37 | elem_size = round_up(attr->value_size, 8); |
| 32 | 38 | ||
| 33 | /* check round_up into zero and u32 overflow */ | 39 | /* check round_up into zero and u32 overflow */ |
| 34 | if (elem_size == 0 || | 40 | if (elem_size == 0 || |
| 35 | attr->max_entries > (U32_MAX - sizeof(*array)) / elem_size) | 41 | attr->max_entries > (U32_MAX - PAGE_SIZE - sizeof(*array)) / elem_size) |
| 36 | return ERR_PTR(-ENOMEM); | 42 | return ERR_PTR(-ENOMEM); |
| 37 | 43 | ||
| 38 | array_size = sizeof(*array) + attr->max_entries * elem_size; | 44 | array_size = sizeof(*array) + attr->max_entries * elem_size; |
| @@ -105,7 +111,7 @@ static int array_map_update_elem(struct bpf_map *map, void *key, void *value, | |||
| 105 | /* all elements already exist */ | 111 | /* all elements already exist */ |
| 106 | return -EEXIST; | 112 | return -EEXIST; |
| 107 | 113 | ||
| 108 | memcpy(array->value + array->elem_size * index, value, array->elem_size); | 114 | memcpy(array->value + array->elem_size * index, value, map->value_size); |
| 109 | return 0; | 115 | return 0; |
| 110 | } | 116 | } |
| 111 | 117 | ||
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c index 19909b22b4f8..34777b3746fa 100644 --- a/kernel/bpf/hashtab.c +++ b/kernel/bpf/hashtab.c | |||
| @@ -64,12 +64,35 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr) | |||
| 64 | */ | 64 | */ |
| 65 | goto free_htab; | 65 | goto free_htab; |
| 66 | 66 | ||
| 67 | err = -ENOMEM; | 67 | if (htab->map.value_size >= (1 << (KMALLOC_SHIFT_MAX - 1)) - |
| 68 | MAX_BPF_STACK - sizeof(struct htab_elem)) | ||
| 69 | /* if value_size is bigger, the user space won't be able to | ||
| 70 | * access the elements via bpf syscall. This check also makes | ||
| 71 | * sure that the elem_size doesn't overflow and it's | ||
| 72 | * kmalloc-able later in htab_map_update_elem() | ||
| 73 | */ | ||
| 74 | goto free_htab; | ||
| 75 | |||
| 76 | htab->elem_size = sizeof(struct htab_elem) + | ||
| 77 | round_up(htab->map.key_size, 8) + | ||
| 78 | htab->map.value_size; | ||
| 79 | |||
| 68 | /* prevent zero size kmalloc and check for u32 overflow */ | 80 | /* prevent zero size kmalloc and check for u32 overflow */ |
| 69 | if (htab->n_buckets == 0 || | 81 | if (htab->n_buckets == 0 || |
| 70 | htab->n_buckets > U32_MAX / sizeof(struct hlist_head)) | 82 | htab->n_buckets > U32_MAX / sizeof(struct hlist_head)) |
| 71 | goto free_htab; | 83 | goto free_htab; |
| 72 | 84 | ||
| 85 | if ((u64) htab->n_buckets * sizeof(struct hlist_head) + | ||
| 86 | (u64) htab->elem_size * htab->map.max_entries >= | ||
| 87 | U32_MAX - PAGE_SIZE) | ||
| 88 | /* make sure page count doesn't overflow */ | ||
| 89 | goto free_htab; | ||
| 90 | |||
| 91 | htab->map.pages = round_up(htab->n_buckets * sizeof(struct hlist_head) + | ||
| 92 | htab->elem_size * htab->map.max_entries, | ||
| 93 | PAGE_SIZE) >> PAGE_SHIFT; | ||
| 94 | |||
| 95 | err = -ENOMEM; | ||
| 73 | htab->buckets = kmalloc_array(htab->n_buckets, sizeof(struct hlist_head), | 96 | htab->buckets = kmalloc_array(htab->n_buckets, sizeof(struct hlist_head), |
| 74 | GFP_USER | __GFP_NOWARN); | 97 | GFP_USER | __GFP_NOWARN); |
| 75 | 98 | ||
| @@ -85,13 +108,6 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr) | |||
| 85 | raw_spin_lock_init(&htab->lock); | 108 | raw_spin_lock_init(&htab->lock); |
| 86 | htab->count = 0; | 109 | htab->count = 0; |
| 87 | 110 | ||
| 88 | htab->elem_size = sizeof(struct htab_elem) + | ||
| 89 | round_up(htab->map.key_size, 8) + | ||
| 90 | htab->map.value_size; | ||
| 91 | |||
| 92 | htab->map.pages = round_up(htab->n_buckets * sizeof(struct hlist_head) + | ||
| 93 | htab->elem_size * htab->map.max_entries, | ||
| 94 | PAGE_SIZE) >> PAGE_SHIFT; | ||
| 95 | return &htab->map; | 111 | return &htab->map; |
| 96 | 112 | ||
| 97 | free_htab: | 113 | free_htab: |
| @@ -222,7 +238,7 @@ static int htab_map_update_elem(struct bpf_map *map, void *key, void *value, | |||
| 222 | WARN_ON_ONCE(!rcu_read_lock_held()); | 238 | WARN_ON_ONCE(!rcu_read_lock_held()); |
| 223 | 239 | ||
| 224 | /* allocate new element outside of lock */ | 240 | /* allocate new element outside of lock */ |
| 225 | l_new = kmalloc(htab->elem_size, GFP_ATOMIC); | 241 | l_new = kmalloc(htab->elem_size, GFP_ATOMIC | __GFP_NOWARN); |
| 226 | if (!l_new) | 242 | if (!l_new) |
| 227 | return -ENOMEM; | 243 | return -ENOMEM; |
| 228 | 244 | ||
diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c index be6d726e31c9..5a8a797d50b7 100644 --- a/kernel/bpf/inode.c +++ b/kernel/bpf/inode.c | |||
| @@ -34,7 +34,7 @@ static void *bpf_any_get(void *raw, enum bpf_type type) | |||
| 34 | atomic_inc(&((struct bpf_prog *)raw)->aux->refcnt); | 34 | atomic_inc(&((struct bpf_prog *)raw)->aux->refcnt); |
| 35 | break; | 35 | break; |
| 36 | case BPF_TYPE_MAP: | 36 | case BPF_TYPE_MAP: |
| 37 | atomic_inc(&((struct bpf_map *)raw)->refcnt); | 37 | bpf_map_inc(raw, true); |
| 38 | break; | 38 | break; |
| 39 | default: | 39 | default: |
| 40 | WARN_ON_ONCE(1); | 40 | WARN_ON_ONCE(1); |
| @@ -51,7 +51,7 @@ static void bpf_any_put(void *raw, enum bpf_type type) | |||
| 51 | bpf_prog_put(raw); | 51 | bpf_prog_put(raw); |
| 52 | break; | 52 | break; |
| 53 | case BPF_TYPE_MAP: | 53 | case BPF_TYPE_MAP: |
| 54 | bpf_map_put(raw); | 54 | bpf_map_put_with_uref(raw); |
| 55 | break; | 55 | break; |
| 56 | default: | 56 | default: |
| 57 | WARN_ON_ONCE(1); | 57 | WARN_ON_ONCE(1); |
| @@ -64,7 +64,7 @@ static void *bpf_fd_probe_obj(u32 ufd, enum bpf_type *type) | |||
| 64 | void *raw; | 64 | void *raw; |
| 65 | 65 | ||
| 66 | *type = BPF_TYPE_MAP; | 66 | *type = BPF_TYPE_MAP; |
| 67 | raw = bpf_map_get(ufd); | 67 | raw = bpf_map_get_with_uref(ufd); |
| 68 | if (IS_ERR(raw)) { | 68 | if (IS_ERR(raw)) { |
| 69 | *type = BPF_TYPE_PROG; | 69 | *type = BPF_TYPE_PROG; |
| 70 | raw = bpf_prog_get(ufd); | 70 | raw = bpf_prog_get(ufd); |
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 0d3313d02a7e..3b39550d8485 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c | |||
| @@ -82,6 +82,14 @@ static void bpf_map_free_deferred(struct work_struct *work) | |||
| 82 | map->ops->map_free(map); | 82 | map->ops->map_free(map); |
| 83 | } | 83 | } |
| 84 | 84 | ||
| 85 | static void bpf_map_put_uref(struct bpf_map *map) | ||
| 86 | { | ||
| 87 | if (atomic_dec_and_test(&map->usercnt)) { | ||
| 88 | if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY) | ||
| 89 | bpf_fd_array_map_clear(map); | ||
| 90 | } | ||
| 91 | } | ||
| 92 | |||
| 85 | /* decrement map refcnt and schedule it for freeing via workqueue | 93 | /* decrement map refcnt and schedule it for freeing via workqueue |
| 86 | * (unrelying map implementation ops->map_free() might sleep) | 94 | * (unrelying map implementation ops->map_free() might sleep) |
| 87 | */ | 95 | */ |
| @@ -93,17 +101,15 @@ void bpf_map_put(struct bpf_map *map) | |||
| 93 | } | 101 | } |
| 94 | } | 102 | } |
| 95 | 103 | ||
| 96 | static int bpf_map_release(struct inode *inode, struct file *filp) | 104 | void bpf_map_put_with_uref(struct bpf_map *map) |
| 97 | { | 105 | { |
| 98 | struct bpf_map *map = filp->private_data; | 106 | bpf_map_put_uref(map); |
| 99 | |||
| 100 | if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY) | ||
| 101 | /* prog_array stores refcnt-ed bpf_prog pointers | ||
| 102 | * release them all when user space closes prog_array_fd | ||
| 103 | */ | ||
| 104 | bpf_fd_array_map_clear(map); | ||
| 105 | |||
| 106 | bpf_map_put(map); | 107 | bpf_map_put(map); |
| 108 | } | ||
| 109 | |||
| 110 | static int bpf_map_release(struct inode *inode, struct file *filp) | ||
| 111 | { | ||
| 112 | bpf_map_put_with_uref(filp->private_data); | ||
| 107 | return 0; | 113 | return 0; |
| 108 | } | 114 | } |
| 109 | 115 | ||
| @@ -142,6 +148,7 @@ static int map_create(union bpf_attr *attr) | |||
| 142 | return PTR_ERR(map); | 148 | return PTR_ERR(map); |
| 143 | 149 | ||
| 144 | atomic_set(&map->refcnt, 1); | 150 | atomic_set(&map->refcnt, 1); |
| 151 | atomic_set(&map->usercnt, 1); | ||
| 145 | 152 | ||
| 146 | err = bpf_map_charge_memlock(map); | 153 | err = bpf_map_charge_memlock(map); |
| 147 | if (err) | 154 | if (err) |
| @@ -174,7 +181,14 @@ struct bpf_map *__bpf_map_get(struct fd f) | |||
| 174 | return f.file->private_data; | 181 | return f.file->private_data; |
| 175 | } | 182 | } |
| 176 | 183 | ||
| 177 | struct bpf_map *bpf_map_get(u32 ufd) | 184 | void bpf_map_inc(struct bpf_map *map, bool uref) |
| 185 | { | ||
| 186 | atomic_inc(&map->refcnt); | ||
| 187 | if (uref) | ||
| 188 | atomic_inc(&map->usercnt); | ||
| 189 | } | ||
| 190 | |||
| 191 | struct bpf_map *bpf_map_get_with_uref(u32 ufd) | ||
| 178 | { | 192 | { |
| 179 | struct fd f = fdget(ufd); | 193 | struct fd f = fdget(ufd); |
| 180 | struct bpf_map *map; | 194 | struct bpf_map *map; |
| @@ -183,7 +197,7 @@ struct bpf_map *bpf_map_get(u32 ufd) | |||
| 183 | if (IS_ERR(map)) | 197 | if (IS_ERR(map)) |
| 184 | return map; | 198 | return map; |
| 185 | 199 | ||
| 186 | atomic_inc(&map->refcnt); | 200 | bpf_map_inc(map, true); |
| 187 | fdput(f); | 201 | fdput(f); |
| 188 | 202 | ||
| 189 | return map; | 203 | return map; |
| @@ -226,7 +240,7 @@ static int map_lookup_elem(union bpf_attr *attr) | |||
| 226 | goto free_key; | 240 | goto free_key; |
| 227 | 241 | ||
| 228 | err = -ENOMEM; | 242 | err = -ENOMEM; |
| 229 | value = kmalloc(map->value_size, GFP_USER); | 243 | value = kmalloc(map->value_size, GFP_USER | __GFP_NOWARN); |
| 230 | if (!value) | 244 | if (!value) |
| 231 | goto free_key; | 245 | goto free_key; |
| 232 | 246 | ||
| @@ -285,7 +299,7 @@ static int map_update_elem(union bpf_attr *attr) | |||
| 285 | goto free_key; | 299 | goto free_key; |
| 286 | 300 | ||
| 287 | err = -ENOMEM; | 301 | err = -ENOMEM; |
| 288 | value = kmalloc(map->value_size, GFP_USER); | 302 | value = kmalloc(map->value_size, GFP_USER | __GFP_NOWARN); |
| 289 | if (!value) | 303 | if (!value) |
| 290 | goto free_key; | 304 | goto free_key; |
| 291 | 305 | ||
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index c6073056badf..a7945d10b378 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c | |||
| @@ -2021,8 +2021,7 @@ static int replace_map_fd_with_map_ptr(struct verifier_env *env) | |||
| 2021 | * will be used by the valid program until it's unloaded | 2021 | * will be used by the valid program until it's unloaded |
| 2022 | * and all maps are released in free_bpf_prog_info() | 2022 | * and all maps are released in free_bpf_prog_info() |
| 2023 | */ | 2023 | */ |
| 2024 | atomic_inc(&map->refcnt); | 2024 | bpf_map_inc(map, false); |
| 2025 | |||
| 2026 | fdput(f); | 2025 | fdput(f); |
| 2027 | next_insn: | 2026 | next_insn: |
| 2028 | insn++; | 2027 | insn++; |
diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index 6e5344112419..db545cbcdb89 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c | |||
| @@ -294,6 +294,12 @@ static int klp_write_object_relocations(struct module *pmod, | |||
| 294 | 294 | ||
| 295 | for (reloc = obj->relocs; reloc->name; reloc++) { | 295 | for (reloc = obj->relocs; reloc->name; reloc++) { |
| 296 | if (!klp_is_module(obj)) { | 296 | if (!klp_is_module(obj)) { |
| 297 | |||
| 298 | #if defined(CONFIG_RANDOMIZE_BASE) | ||
| 299 | /* If KASLR has been enabled, adjust old value accordingly */ | ||
| 300 | if (kaslr_enabled()) | ||
| 301 | reloc->val += kaslr_offset(); | ||
| 302 | #endif | ||
| 297 | ret = klp_verify_vmlinux_symbol(reloc->name, | 303 | ret = klp_verify_vmlinux_symbol(reloc->name, |
| 298 | reloc->val); | 304 | reloc->val); |
| 299 | if (ret) | 305 | if (ret) |
diff --git a/kernel/panic.c b/kernel/panic.c index 4579dbb7ed87..4b150bc0c6c1 100644 --- a/kernel/panic.c +++ b/kernel/panic.c | |||
| @@ -152,8 +152,11 @@ void panic(const char *fmt, ...) | |||
| 152 | * We may have ended up stopping the CPU holding the lock (in | 152 | * We may have ended up stopping the CPU holding the lock (in |
| 153 | * smp_send_stop()) while still having some valuable data in the console | 153 | * smp_send_stop()) while still having some valuable data in the console |
| 154 | * buffer. Try to acquire the lock then release it regardless of the | 154 | * buffer. Try to acquire the lock then release it regardless of the |
| 155 | * result. The release will also print the buffers out. | 155 | * result. The release will also print the buffers out. Locks debug |
| 156 | * should be disabled to avoid reporting bad unlock balance when | ||
| 157 | * panic() is not being callled from OOPS. | ||
| 156 | */ | 158 | */ |
| 159 | debug_locks_off(); | ||
| 157 | console_trylock(); | 160 | console_trylock(); |
| 158 | console_unlock(); | 161 | console_unlock(); |
| 159 | 162 | ||
diff --git a/kernel/pid.c b/kernel/pid.c index ca368793808e..78b3d9f80d44 100644 --- a/kernel/pid.c +++ b/kernel/pid.c | |||
| @@ -467,7 +467,7 @@ struct pid *get_task_pid(struct task_struct *task, enum pid_type type) | |||
| 467 | rcu_read_lock(); | 467 | rcu_read_lock(); |
| 468 | if (type != PIDTYPE_PID) | 468 | if (type != PIDTYPE_PID) |
| 469 | task = task->group_leader; | 469 | task = task->group_leader; |
| 470 | pid = get_pid(task->pids[type].pid); | 470 | pid = get_pid(rcu_dereference(task->pids[type].pid)); |
| 471 | rcu_read_unlock(); | 471 | rcu_read_unlock(); |
| 472 | return pid; | 472 | return pid; |
| 473 | } | 473 | } |
| @@ -528,7 +528,7 @@ pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, | |||
| 528 | if (likely(pid_alive(task))) { | 528 | if (likely(pid_alive(task))) { |
| 529 | if (type != PIDTYPE_PID) | 529 | if (type != PIDTYPE_PID) |
| 530 | task = task->group_leader; | 530 | task = task->group_leader; |
| 531 | nr = pid_nr_ns(task->pids[type].pid, ns); | 531 | nr = pid_nr_ns(rcu_dereference(task->pids[type].pid), ns); |
| 532 | } | 532 | } |
| 533 | rcu_read_unlock(); | 533 | rcu_read_unlock(); |
| 534 | 534 | ||
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 4d568ac9319e..7063c6a07440 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
| @@ -1947,13 +1947,38 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) | |||
| 1947 | 1947 | ||
| 1948 | #ifdef CONFIG_SMP | 1948 | #ifdef CONFIG_SMP |
| 1949 | /* | 1949 | /* |
| 1950 | * Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be | ||
| 1951 | * possible to, falsely, observe p->on_cpu == 0. | ||
| 1952 | * | ||
| 1953 | * One must be running (->on_cpu == 1) in order to remove oneself | ||
| 1954 | * from the runqueue. | ||
| 1955 | * | ||
| 1956 | * [S] ->on_cpu = 1; [L] ->on_rq | ||
| 1957 | * UNLOCK rq->lock | ||
| 1958 | * RMB | ||
| 1959 | * LOCK rq->lock | ||
| 1960 | * [S] ->on_rq = 0; [L] ->on_cpu | ||
| 1961 | * | ||
| 1962 | * Pairs with the full barrier implied in the UNLOCK+LOCK on rq->lock | ||
| 1963 | * from the consecutive calls to schedule(); the first switching to our | ||
| 1964 | * task, the second putting it to sleep. | ||
| 1965 | */ | ||
| 1966 | smp_rmb(); | ||
| 1967 | |||
| 1968 | /* | ||
| 1950 | * If the owning (remote) cpu is still in the middle of schedule() with | 1969 | * If the owning (remote) cpu is still in the middle of schedule() with |
| 1951 | * this task as prev, wait until its done referencing the task. | 1970 | * this task as prev, wait until its done referencing the task. |
| 1952 | */ | 1971 | */ |
| 1953 | while (p->on_cpu) | 1972 | while (p->on_cpu) |
| 1954 | cpu_relax(); | 1973 | cpu_relax(); |
| 1955 | /* | 1974 | /* |
| 1956 | * Pairs with the smp_wmb() in finish_lock_switch(). | 1975 | * Combined with the control dependency above, we have an effective |
| 1976 | * smp_load_acquire() without the need for full barriers. | ||
| 1977 | * | ||
| 1978 | * Pairs with the smp_store_release() in finish_lock_switch(). | ||
| 1979 | * | ||
| 1980 | * This ensures that tasks getting woken will be fully ordered against | ||
| 1981 | * their previous state and preserve Program Order. | ||
| 1957 | */ | 1982 | */ |
| 1958 | smp_rmb(); | 1983 | smp_rmb(); |
| 1959 | 1984 | ||
| @@ -2039,7 +2064,6 @@ out: | |||
| 2039 | */ | 2064 | */ |
| 2040 | int wake_up_process(struct task_struct *p) | 2065 | int wake_up_process(struct task_struct *p) |
| 2041 | { | 2066 | { |
| 2042 | WARN_ON(task_is_stopped_or_traced(p)); | ||
| 2043 | return try_to_wake_up(p, TASK_NORMAL, 0); | 2067 | return try_to_wake_up(p, TASK_NORMAL, 0); |
| 2044 | } | 2068 | } |
| 2045 | EXPORT_SYMBOL(wake_up_process); | 2069 | EXPORT_SYMBOL(wake_up_process); |
| @@ -5847,13 +5871,13 @@ static int init_rootdomain(struct root_domain *rd) | |||
| 5847 | { | 5871 | { |
| 5848 | memset(rd, 0, sizeof(*rd)); | 5872 | memset(rd, 0, sizeof(*rd)); |
| 5849 | 5873 | ||
| 5850 | if (!alloc_cpumask_var(&rd->span, GFP_KERNEL)) | 5874 | if (!zalloc_cpumask_var(&rd->span, GFP_KERNEL)) |
| 5851 | goto out; | 5875 | goto out; |
| 5852 | if (!alloc_cpumask_var(&rd->online, GFP_KERNEL)) | 5876 | if (!zalloc_cpumask_var(&rd->online, GFP_KERNEL)) |
| 5853 | goto free_span; | 5877 | goto free_span; |
| 5854 | if (!alloc_cpumask_var(&rd->dlo_mask, GFP_KERNEL)) | 5878 | if (!zalloc_cpumask_var(&rd->dlo_mask, GFP_KERNEL)) |
| 5855 | goto free_online; | 5879 | goto free_online; |
| 5856 | if (!alloc_cpumask_var(&rd->rto_mask, GFP_KERNEL)) | 5880 | if (!zalloc_cpumask_var(&rd->rto_mask, GFP_KERNEL)) |
| 5857 | goto free_dlo_mask; | 5881 | goto free_dlo_mask; |
| 5858 | 5882 | ||
| 5859 | init_dl_bw(&rd->dl_bw); | 5883 | init_dl_bw(&rd->dl_bw); |
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c index 26a54461bf59..05de80b48586 100644 --- a/kernel/sched/cputime.c +++ b/kernel/sched/cputime.c | |||
| @@ -788,6 +788,9 @@ cputime_t task_gtime(struct task_struct *t) | |||
| 788 | unsigned int seq; | 788 | unsigned int seq; |
| 789 | cputime_t gtime; | 789 | cputime_t gtime; |
| 790 | 790 | ||
| 791 | if (!context_tracking_is_enabled()) | ||
| 792 | return t->gtime; | ||
| 793 | |||
| 791 | do { | 794 | do { |
| 792 | seq = read_seqbegin(&t->vtime_seqlock); | 795 | seq = read_seqbegin(&t->vtime_seqlock); |
| 793 | 796 | ||
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index e3cc16312046..8ec86abe0ea1 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c | |||
| @@ -64,7 +64,7 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b) | |||
| 64 | raw_spin_unlock(&rt_b->rt_runtime_lock); | 64 | raw_spin_unlock(&rt_b->rt_runtime_lock); |
| 65 | } | 65 | } |
| 66 | 66 | ||
| 67 | #ifdef CONFIG_SMP | 67 | #if defined(CONFIG_SMP) && defined(HAVE_RT_PUSH_IPI) |
| 68 | static void push_irq_work_func(struct irq_work *work); | 68 | static void push_irq_work_func(struct irq_work *work); |
| 69 | #endif | 69 | #endif |
| 70 | 70 | ||
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index efd3bfc7e347..b242775bf670 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h | |||
| @@ -1073,6 +1073,9 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) | |||
| 1073 | * We must ensure this doesn't happen until the switch is completely | 1073 | * We must ensure this doesn't happen until the switch is completely |
| 1074 | * finished. | 1074 | * finished. |
| 1075 | * | 1075 | * |
| 1076 | * In particular, the load of prev->state in finish_task_switch() must | ||
| 1077 | * happen before this. | ||
| 1078 | * | ||
| 1076 | * Pairs with the control dependency and rmb in try_to_wake_up(). | 1079 | * Pairs with the control dependency and rmb in try_to_wake_up(). |
| 1077 | */ | 1080 | */ |
| 1078 | smp_store_release(&prev->on_cpu, 0); | 1081 | smp_store_release(&prev->on_cpu, 0); |
diff --git a/kernel/sched/wait.c b/kernel/sched/wait.c index 052e02672d12..f10bd873e684 100644 --- a/kernel/sched/wait.c +++ b/kernel/sched/wait.c | |||
| @@ -583,18 +583,18 @@ EXPORT_SYMBOL(wake_up_atomic_t); | |||
| 583 | 583 | ||
| 584 | __sched int bit_wait(struct wait_bit_key *word) | 584 | __sched int bit_wait(struct wait_bit_key *word) |
| 585 | { | 585 | { |
| 586 | if (signal_pending_state(current->state, current)) | ||
| 587 | return 1; | ||
| 588 | schedule(); | 586 | schedule(); |
| 587 | if (signal_pending(current)) | ||
| 588 | return -EINTR; | ||
| 589 | return 0; | 589 | return 0; |
| 590 | } | 590 | } |
| 591 | EXPORT_SYMBOL(bit_wait); | 591 | EXPORT_SYMBOL(bit_wait); |
| 592 | 592 | ||
| 593 | __sched int bit_wait_io(struct wait_bit_key *word) | 593 | __sched int bit_wait_io(struct wait_bit_key *word) |
| 594 | { | 594 | { |
| 595 | if (signal_pending_state(current->state, current)) | ||
| 596 | return 1; | ||
| 597 | io_schedule(); | 595 | io_schedule(); |
| 596 | if (signal_pending(current)) | ||
| 597 | return -EINTR; | ||
| 598 | return 0; | 598 | return 0; |
| 599 | } | 599 | } |
| 600 | EXPORT_SYMBOL(bit_wait_io); | 600 | EXPORT_SYMBOL(bit_wait_io); |
| @@ -602,11 +602,11 @@ EXPORT_SYMBOL(bit_wait_io); | |||
| 602 | __sched int bit_wait_timeout(struct wait_bit_key *word) | 602 | __sched int bit_wait_timeout(struct wait_bit_key *word) |
| 603 | { | 603 | { |
| 604 | unsigned long now = READ_ONCE(jiffies); | 604 | unsigned long now = READ_ONCE(jiffies); |
| 605 | if (signal_pending_state(current->state, current)) | ||
| 606 | return 1; | ||
| 607 | if (time_after_eq(now, word->timeout)) | 605 | if (time_after_eq(now, word->timeout)) |
| 608 | return -EAGAIN; | 606 | return -EAGAIN; |
| 609 | schedule_timeout(word->timeout - now); | 607 | schedule_timeout(word->timeout - now); |
| 608 | if (signal_pending(current)) | ||
| 609 | return -EINTR; | ||
| 610 | return 0; | 610 | return 0; |
| 611 | } | 611 | } |
| 612 | EXPORT_SYMBOL_GPL(bit_wait_timeout); | 612 | EXPORT_SYMBOL_GPL(bit_wait_timeout); |
| @@ -614,11 +614,11 @@ EXPORT_SYMBOL_GPL(bit_wait_timeout); | |||
| 614 | __sched int bit_wait_io_timeout(struct wait_bit_key *word) | 614 | __sched int bit_wait_io_timeout(struct wait_bit_key *word) |
| 615 | { | 615 | { |
| 616 | unsigned long now = READ_ONCE(jiffies); | 616 | unsigned long now = READ_ONCE(jiffies); |
| 617 | if (signal_pending_state(current->state, current)) | ||
| 618 | return 1; | ||
| 619 | if (time_after_eq(now, word->timeout)) | 617 | if (time_after_eq(now, word->timeout)) |
| 620 | return -EAGAIN; | 618 | return -EAGAIN; |
| 621 | io_schedule_timeout(word->timeout - now); | 619 | io_schedule_timeout(word->timeout - now); |
| 620 | if (signal_pending(current)) | ||
| 621 | return -EINTR; | ||
| 622 | return 0; | 622 | return 0; |
| 623 | } | 623 | } |
| 624 | EXPORT_SYMBOL_GPL(bit_wait_io_timeout); | 624 | EXPORT_SYMBOL_GPL(bit_wait_io_timeout); |
diff --git a/kernel/signal.c b/kernel/signal.c index c0b01fe24bbd..f3f1f7a972fd 100644 --- a/kernel/signal.c +++ b/kernel/signal.c | |||
| @@ -3503,7 +3503,7 @@ SYSCALL_DEFINE0(pause) | |||
| 3503 | 3503 | ||
| 3504 | #endif | 3504 | #endif |
| 3505 | 3505 | ||
| 3506 | int sigsuspend(sigset_t *set) | 3506 | static int sigsuspend(sigset_t *set) |
| 3507 | { | 3507 | { |
| 3508 | current->saved_sigmask = current->blocked; | 3508 | current->saved_sigmask = current->blocked; |
| 3509 | set_current_blocked(set); | 3509 | set_current_blocked(set); |
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 75f1d05ea82d..9c6045a27ba3 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
| @@ -1887,12 +1887,6 @@ rb_event_index(struct ring_buffer_event *event) | |||
| 1887 | return (addr & ~PAGE_MASK) - BUF_PAGE_HDR_SIZE; | 1887 | return (addr & ~PAGE_MASK) - BUF_PAGE_HDR_SIZE; |
| 1888 | } | 1888 | } |
| 1889 | 1889 | ||
| 1890 | static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer) | ||
| 1891 | { | ||
| 1892 | cpu_buffer->read_stamp = cpu_buffer->reader_page->page->time_stamp; | ||
| 1893 | cpu_buffer->reader_page->read = 0; | ||
| 1894 | } | ||
| 1895 | |||
| 1896 | static void rb_inc_iter(struct ring_buffer_iter *iter) | 1890 | static void rb_inc_iter(struct ring_buffer_iter *iter) |
| 1897 | { | 1891 | { |
| 1898 | struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; | 1892 | struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; |
| @@ -2803,8 +2797,11 @@ rb_reserve_next_event(struct ring_buffer *buffer, | |||
| 2803 | 2797 | ||
| 2804 | event = __rb_reserve_next(cpu_buffer, &info); | 2798 | event = __rb_reserve_next(cpu_buffer, &info); |
| 2805 | 2799 | ||
| 2806 | if (unlikely(PTR_ERR(event) == -EAGAIN)) | 2800 | if (unlikely(PTR_ERR(event) == -EAGAIN)) { |
| 2801 | if (info.add_timestamp) | ||
| 2802 | info.length -= RB_LEN_TIME_EXTEND; | ||
| 2807 | goto again; | 2803 | goto again; |
| 2804 | } | ||
| 2808 | 2805 | ||
| 2809 | if (!event) | 2806 | if (!event) |
| 2810 | goto out_fail; | 2807 | goto out_fail; |
| @@ -3626,7 +3623,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) | |||
| 3626 | 3623 | ||
| 3627 | /* Finally update the reader page to the new head */ | 3624 | /* Finally update the reader page to the new head */ |
| 3628 | cpu_buffer->reader_page = reader; | 3625 | cpu_buffer->reader_page = reader; |
| 3629 | rb_reset_reader_page(cpu_buffer); | 3626 | cpu_buffer->reader_page->read = 0; |
| 3630 | 3627 | ||
| 3631 | if (overwrite != cpu_buffer->last_overrun) { | 3628 | if (overwrite != cpu_buffer->last_overrun) { |
| 3632 | cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun; | 3629 | cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun; |
| @@ -3636,6 +3633,10 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) | |||
| 3636 | goto again; | 3633 | goto again; |
| 3637 | 3634 | ||
| 3638 | out: | 3635 | out: |
| 3636 | /* Update the read_stamp on the first event */ | ||
| 3637 | if (reader && reader->read == 0) | ||
| 3638 | cpu_buffer->read_stamp = reader->page->time_stamp; | ||
| 3639 | |||
| 3639 | arch_spin_unlock(&cpu_buffer->lock); | 3640 | arch_spin_unlock(&cpu_buffer->lock); |
| 3640 | local_irq_restore(flags); | 3641 | local_irq_restore(flags); |
| 3641 | 3642 | ||
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 6bbc5f652355..4f6ef6912e00 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c | |||
| @@ -582,6 +582,12 @@ static void __ftrace_clear_event_pids(struct trace_array *tr) | |||
| 582 | unregister_trace_sched_wakeup(event_filter_pid_sched_wakeup_probe_pre, tr); | 582 | unregister_trace_sched_wakeup(event_filter_pid_sched_wakeup_probe_pre, tr); |
| 583 | unregister_trace_sched_wakeup(event_filter_pid_sched_wakeup_probe_post, tr); | 583 | unregister_trace_sched_wakeup(event_filter_pid_sched_wakeup_probe_post, tr); |
| 584 | 584 | ||
| 585 | unregister_trace_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_pre, tr); | ||
| 586 | unregister_trace_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_post, tr); | ||
| 587 | |||
| 588 | unregister_trace_sched_waking(event_filter_pid_sched_wakeup_probe_pre, tr); | ||
| 589 | unregister_trace_sched_waking(event_filter_pid_sched_wakeup_probe_post, tr); | ||
| 590 | |||
| 585 | list_for_each_entry(file, &tr->events, list) { | 591 | list_for_each_entry(file, &tr->events, list) { |
| 586 | clear_bit(EVENT_FILE_FL_PID_FILTER_BIT, &file->flags); | 592 | clear_bit(EVENT_FILE_FL_PID_FILTER_BIT, &file->flags); |
| 587 | } | 593 | } |
| @@ -1729,6 +1735,16 @@ ftrace_event_pid_write(struct file *filp, const char __user *ubuf, | |||
| 1729 | tr, INT_MAX); | 1735 | tr, INT_MAX); |
| 1730 | register_trace_prio_sched_wakeup(event_filter_pid_sched_wakeup_probe_post, | 1736 | register_trace_prio_sched_wakeup(event_filter_pid_sched_wakeup_probe_post, |
| 1731 | tr, 0); | 1737 | tr, 0); |
| 1738 | |||
| 1739 | register_trace_prio_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_pre, | ||
| 1740 | tr, INT_MAX); | ||
| 1741 | register_trace_prio_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_post, | ||
| 1742 | tr, 0); | ||
| 1743 | |||
| 1744 | register_trace_prio_sched_waking(event_filter_pid_sched_wakeup_probe_pre, | ||
| 1745 | tr, INT_MAX); | ||
| 1746 | register_trace_prio_sched_waking(event_filter_pid_sched_wakeup_probe_post, | ||
| 1747 | tr, 0); | ||
| 1732 | } | 1748 | } |
| 1733 | 1749 | ||
| 1734 | /* | 1750 | /* |
