aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cgroup.c5
-rw-r--r--kernel/cpuset.c29
-rw-r--r--kernel/events/core.c8
-rw-r--r--kernel/exit.c9
-rw-r--r--kernel/futex.c28
-rw-r--r--kernel/hung_task.c14
-rw-r--r--kernel/ptrace.c13
-rw-r--r--kernel/sched_fair.c14
-rw-r--r--kernel/signal.c2
-rw-r--r--kernel/sysctl_binary.c2
-rw-r--r--kernel/time/clockevents.c1
-rw-r--r--kernel/time/clocksource.c12
12 files changed, 98 insertions, 39 deletions
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index d9d5648f3cdc..a184470cf9b5 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -2098,11 +2098,6 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
2098 continue; 2098 continue;
2099 /* get old css_set pointer */ 2099 /* get old css_set pointer */
2100 task_lock(tsk); 2100 task_lock(tsk);
2101 if (tsk->flags & PF_EXITING) {
2102 /* ignore this task if it's going away */
2103 task_unlock(tsk);
2104 continue;
2105 }
2106 oldcg = tsk->cgroups; 2101 oldcg = tsk->cgroups;
2107 get_css_set(oldcg); 2102 get_css_set(oldcg);
2108 task_unlock(tsk); 2103 task_unlock(tsk);
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 9fe58c46a426..0b1712dba587 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -123,6 +123,19 @@ static inline struct cpuset *task_cs(struct task_struct *task)
123 struct cpuset, css); 123 struct cpuset, css);
124} 124}
125 125
126#ifdef CONFIG_NUMA
127static inline bool task_has_mempolicy(struct task_struct *task)
128{
129 return task->mempolicy;
130}
131#else
132static inline bool task_has_mempolicy(struct task_struct *task)
133{
134 return false;
135}
136#endif
137
138
126/* bits in struct cpuset flags field */ 139/* bits in struct cpuset flags field */
127typedef enum { 140typedef enum {
128 CS_CPU_EXCLUSIVE, 141 CS_CPU_EXCLUSIVE,
@@ -949,7 +962,7 @@ static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
949static void cpuset_change_task_nodemask(struct task_struct *tsk, 962static void cpuset_change_task_nodemask(struct task_struct *tsk,
950 nodemask_t *newmems) 963 nodemask_t *newmems)
951{ 964{
952 bool masks_disjoint = !nodes_intersects(*newmems, tsk->mems_allowed); 965 bool need_loop;
953 966
954repeat: 967repeat:
955 /* 968 /*
@@ -962,6 +975,14 @@ repeat:
962 return; 975 return;
963 976
964 task_lock(tsk); 977 task_lock(tsk);
978 /*
979 * Determine if a loop is necessary if another thread is doing
980 * get_mems_allowed(). If at least one node remains unchanged and
981 * tsk does not have a mempolicy, then an empty nodemask will not be
982 * possible when mems_allowed is larger than a word.
983 */
984 need_loop = task_has_mempolicy(tsk) ||
985 !nodes_intersects(*newmems, tsk->mems_allowed);
965 nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems); 986 nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems);
966 mpol_rebind_task(tsk, newmems, MPOL_REBIND_STEP1); 987 mpol_rebind_task(tsk, newmems, MPOL_REBIND_STEP1);
967 988
@@ -981,11 +1002,9 @@ repeat:
981 1002
982 /* 1003 /*
983 * Allocation of memory is very fast, we needn't sleep when waiting 1004 * Allocation of memory is very fast, we needn't sleep when waiting
984 * for the read-side. No wait is necessary, however, if at least one 1005 * for the read-side.
985 * node remains unchanged.
986 */ 1006 */
987 while (masks_disjoint && 1007 while (need_loop && ACCESS_ONCE(tsk->mems_allowed_change_disable)) {
988 ACCESS_ONCE(tsk->mems_allowed_change_disable)) {
989 task_unlock(tsk); 1008 task_unlock(tsk);
990 if (!task_curr(tsk)) 1009 if (!task_curr(tsk))
991 yield(); 1010 yield();
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 2f8f3f103cb4..24e3a4b1c89c 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -3360,9 +3360,13 @@ static void ring_buffer_wakeup(struct perf_event *event)
3360 3360
3361 rcu_read_lock(); 3361 rcu_read_lock();
3362 rb = rcu_dereference(event->rb); 3362 rb = rcu_dereference(event->rb);
3363 list_for_each_entry_rcu(event, &rb->event_list, rb_entry) { 3363 if (!rb)
3364 goto unlock;
3365
3366 list_for_each_entry_rcu(event, &rb->event_list, rb_entry)
3364 wake_up_all(&event->waitq); 3367 wake_up_all(&event->waitq);
3365 } 3368
3369unlock:
3366 rcu_read_unlock(); 3370 rcu_read_unlock();
3367} 3371}
3368 3372
diff --git a/kernel/exit.c b/kernel/exit.c
index d0b7d988f873..e6e01b959a0e 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -1540,8 +1540,15 @@ static int wait_consider_task(struct wait_opts *wo, int ptrace,
1540 } 1540 }
1541 1541
1542 /* dead body doesn't have much to contribute */ 1542 /* dead body doesn't have much to contribute */
1543 if (p->exit_state == EXIT_DEAD) 1543 if (unlikely(p->exit_state == EXIT_DEAD)) {
1544 /*
1545 * But do not ignore this task until the tracer does
1546 * wait_task_zombie()->do_notify_parent().
1547 */
1548 if (likely(!ptrace) && unlikely(ptrace_reparented(p)))
1549 wo->notask_error = 0;
1544 return 0; 1550 return 0;
1551 }
1545 1552
1546 /* slay zombie? */ 1553 /* slay zombie? */
1547 if (p->exit_state == EXIT_ZOMBIE) { 1554 if (p->exit_state == EXIT_ZOMBIE) {
diff --git a/kernel/futex.c b/kernel/futex.c
index ea87f4d2f455..1614be20173d 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -314,17 +314,29 @@ again:
314#endif 314#endif
315 315
316 lock_page(page_head); 316 lock_page(page_head);
317
318 /*
319 * If page_head->mapping is NULL, then it cannot be a PageAnon
320 * page; but it might be the ZERO_PAGE or in the gate area or
321 * in a special mapping (all cases which we are happy to fail);
322 * or it may have been a good file page when get_user_pages_fast
323 * found it, but truncated or holepunched or subjected to
324 * invalidate_complete_page2 before we got the page lock (also
325 * cases which we are happy to fail). And we hold a reference,
326 * so refcount care in invalidate_complete_page's remove_mapping
327 * prevents drop_caches from setting mapping to NULL beneath us.
328 *
329 * The case we do have to guard against is when memory pressure made
330 * shmem_writepage move it from filecache to swapcache beneath us:
331 * an unlikely race, but we do need to retry for page_head->mapping.
332 */
317 if (!page_head->mapping) { 333 if (!page_head->mapping) {
334 int shmem_swizzled = PageSwapCache(page_head);
318 unlock_page(page_head); 335 unlock_page(page_head);
319 put_page(page_head); 336 put_page(page_head);
320 /* 337 if (shmem_swizzled)
321 * ZERO_PAGE pages don't have a mapping. Avoid a busy loop 338 goto again;
322 * trying to find one. RW mapping would have COW'd (and thus 339 return -EFAULT;
323 * have a mapping) so this page is RO and won't ever change.
324 */
325 if ((page_head == ZERO_PAGE(address)))
326 return -EFAULT;
327 goto again;
328 } 340 }
329 341
330 /* 342 /*
diff --git a/kernel/hung_task.c b/kernel/hung_task.c
index 8b1748d0172c..2e48ec0c2e91 100644
--- a/kernel/hung_task.c
+++ b/kernel/hung_task.c
@@ -74,11 +74,17 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout)
74 74
75 /* 75 /*
76 * Ensure the task is not frozen. 76 * Ensure the task is not frozen.
77 * Also, when a freshly created task is scheduled once, changes 77 * Also, skip vfork and any other user process that freezer should skip.
78 * its state to TASK_UNINTERRUPTIBLE without having ever been
79 * switched out once, it musn't be checked.
80 */ 78 */
81 if (unlikely(t->flags & PF_FROZEN || !switch_count)) 79 if (unlikely(t->flags & (PF_FROZEN | PF_FREEZER_SKIP)))
80 return;
81
82 /*
83 * When a freshly created task is scheduled once, changes its state to
84 * TASK_UNINTERRUPTIBLE without having ever been switched out once, it
85 * musn't be checked.
86 */
87 if (unlikely(!switch_count))
82 return; 88 return;
83 89
84 if (switch_count != t->last_switch_count) { 90 if (switch_count != t->last_switch_count) {
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 24d04477b257..78ab24a7b0e4 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -96,9 +96,20 @@ void __ptrace_unlink(struct task_struct *child)
96 */ 96 */
97 if (!(child->flags & PF_EXITING) && 97 if (!(child->flags & PF_EXITING) &&
98 (child->signal->flags & SIGNAL_STOP_STOPPED || 98 (child->signal->flags & SIGNAL_STOP_STOPPED ||
99 child->signal->group_stop_count)) 99 child->signal->group_stop_count)) {
100 child->jobctl |= JOBCTL_STOP_PENDING; 100 child->jobctl |= JOBCTL_STOP_PENDING;
101 101
102 /*
103 * This is only possible if this thread was cloned by the
104 * traced task running in the stopped group, set the signal
105 * for the future reports.
106 * FIXME: we should change ptrace_init_task() to handle this
107 * case.
108 */
109 if (!(child->jobctl & JOBCTL_STOP_SIGMASK))
110 child->jobctl |= SIGSTOP;
111 }
112
102 /* 113 /*
103 * If transition to TASK_STOPPED is pending or in TASK_TRACED, kick 114 * If transition to TASK_STOPPED is pending or in TASK_TRACED, kick
104 * @child in the butt. Note that @resume should be used iff @child 115 * @child in the butt. Note that @resume should be used iff @child
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index a78ed2736ba7..8a39fa3e3c6c 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -2352,13 +2352,11 @@ again:
2352 if (!smt && (sd->flags & SD_SHARE_CPUPOWER)) 2352 if (!smt && (sd->flags & SD_SHARE_CPUPOWER))
2353 continue; 2353 continue;
2354 2354
2355 if (!(sd->flags & SD_SHARE_PKG_RESOURCES)) { 2355 if (smt && !(sd->flags & SD_SHARE_CPUPOWER))
2356 if (!smt) { 2356 break;
2357 smt = 1; 2357
2358 goto again; 2358 if (!(sd->flags & SD_SHARE_PKG_RESOURCES))
2359 }
2360 break; 2359 break;
2361 }
2362 2360
2363 sg = sd->groups; 2361 sg = sd->groups;
2364 do { 2362 do {
@@ -2378,6 +2376,10 @@ next:
2378 sg = sg->next; 2376 sg = sg->next;
2379 } while (sg != sd->groups); 2377 } while (sg != sd->groups);
2380 } 2378 }
2379 if (!smt) {
2380 smt = 1;
2381 goto again;
2382 }
2381done: 2383done:
2382 rcu_read_unlock(); 2384 rcu_read_unlock();
2383 2385
diff --git a/kernel/signal.c b/kernel/signal.c
index b3f78d09a105..206551563cce 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -1994,8 +1994,6 @@ static bool do_signal_stop(int signr)
1994 */ 1994 */
1995 if (!(sig->flags & SIGNAL_STOP_STOPPED)) 1995 if (!(sig->flags & SIGNAL_STOP_STOPPED))
1996 sig->group_exit_code = signr; 1996 sig->group_exit_code = signr;
1997 else
1998 WARN_ON_ONCE(!current->ptrace);
1999 1997
2000 sig->group_stop_count = 0; 1998 sig->group_stop_count = 0;
2001 1999
diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
index 6318b511afa1..a650694883a1 100644
--- a/kernel/sysctl_binary.c
+++ b/kernel/sysctl_binary.c
@@ -1354,7 +1354,7 @@ static ssize_t binary_sysctl(const int *name, int nlen,
1354 1354
1355 fput(file); 1355 fput(file);
1356out_putname: 1356out_putname:
1357 putname(pathname); 1357 __putname(pathname);
1358out: 1358out:
1359 return result; 1359 return result;
1360} 1360}
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
index c4eb71c8b2ea..1ecd6ba36d6c 100644
--- a/kernel/time/clockevents.c
+++ b/kernel/time/clockevents.c
@@ -387,7 +387,6 @@ void clockevents_exchange_device(struct clock_event_device *old,
387 * released list and do a notify add later. 387 * released list and do a notify add later.
388 */ 388 */
389 if (old) { 389 if (old) {
390 old->event_handler = clockevents_handle_noop;
391 clockevents_set_mode(old, CLOCK_EVT_MODE_UNUSED); 390 clockevents_set_mode(old, CLOCK_EVT_MODE_UNUSED);
392 list_del(&old->list); 391 list_del(&old->list);
393 list_add(&old->list, &clockevents_released); 392 list_add(&old->list, &clockevents_released);
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index da2f760e780c..d3ad022136e5 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -647,7 +647,7 @@ static void clocksource_enqueue(struct clocksource *cs)
647 647
648/** 648/**
649 * __clocksource_updatefreq_scale - Used update clocksource with new freq 649 * __clocksource_updatefreq_scale - Used update clocksource with new freq
650 * @t: clocksource to be registered 650 * @cs: clocksource to be registered
651 * @scale: Scale factor multiplied against freq to get clocksource hz 651 * @scale: Scale factor multiplied against freq to get clocksource hz
652 * @freq: clocksource frequency (cycles per second) divided by scale 652 * @freq: clocksource frequency (cycles per second) divided by scale
653 * 653 *
@@ -699,7 +699,7 @@ EXPORT_SYMBOL_GPL(__clocksource_updatefreq_scale);
699 699
700/** 700/**
701 * __clocksource_register_scale - Used to install new clocksources 701 * __clocksource_register_scale - Used to install new clocksources
702 * @t: clocksource to be registered 702 * @cs: clocksource to be registered
703 * @scale: Scale factor multiplied against freq to get clocksource hz 703 * @scale: Scale factor multiplied against freq to get clocksource hz
704 * @freq: clocksource frequency (cycles per second) divided by scale 704 * @freq: clocksource frequency (cycles per second) divided by scale
705 * 705 *
@@ -727,7 +727,7 @@ EXPORT_SYMBOL_GPL(__clocksource_register_scale);
727 727
728/** 728/**
729 * clocksource_register - Used to install new clocksources 729 * clocksource_register - Used to install new clocksources
730 * @t: clocksource to be registered 730 * @cs: clocksource to be registered
731 * 731 *
732 * Returns -EBUSY if registration fails, zero otherwise. 732 * Returns -EBUSY if registration fails, zero otherwise.
733 */ 733 */
@@ -761,6 +761,8 @@ static void __clocksource_change_rating(struct clocksource *cs, int rating)
761 761
762/** 762/**
763 * clocksource_change_rating - Change the rating of a registered clocksource 763 * clocksource_change_rating - Change the rating of a registered clocksource
764 * @cs: clocksource to be changed
765 * @rating: new rating
764 */ 766 */
765void clocksource_change_rating(struct clocksource *cs, int rating) 767void clocksource_change_rating(struct clocksource *cs, int rating)
766{ 768{
@@ -772,6 +774,7 @@ EXPORT_SYMBOL(clocksource_change_rating);
772 774
773/** 775/**
774 * clocksource_unregister - remove a registered clocksource 776 * clocksource_unregister - remove a registered clocksource
777 * @cs: clocksource to be unregistered
775 */ 778 */
776void clocksource_unregister(struct clocksource *cs) 779void clocksource_unregister(struct clocksource *cs)
777{ 780{
@@ -787,6 +790,7 @@ EXPORT_SYMBOL(clocksource_unregister);
787/** 790/**
788 * sysfs_show_current_clocksources - sysfs interface for current clocksource 791 * sysfs_show_current_clocksources - sysfs interface for current clocksource
789 * @dev: unused 792 * @dev: unused
793 * @attr: unused
790 * @buf: char buffer to be filled with clocksource list 794 * @buf: char buffer to be filled with clocksource list
791 * 795 *
792 * Provides sysfs interface for listing current clocksource. 796 * Provides sysfs interface for listing current clocksource.
@@ -807,6 +811,7 @@ sysfs_show_current_clocksources(struct sys_device *dev,
807/** 811/**
808 * sysfs_override_clocksource - interface for manually overriding clocksource 812 * sysfs_override_clocksource - interface for manually overriding clocksource
809 * @dev: unused 813 * @dev: unused
814 * @attr: unused
810 * @buf: name of override clocksource 815 * @buf: name of override clocksource
811 * @count: length of buffer 816 * @count: length of buffer
812 * 817 *
@@ -842,6 +847,7 @@ static ssize_t sysfs_override_clocksource(struct sys_device *dev,
842/** 847/**
843 * sysfs_show_available_clocksources - sysfs interface for listing clocksource 848 * sysfs_show_available_clocksources - sysfs interface for listing clocksource
844 * @dev: unused 849 * @dev: unused
850 * @attr: unused
845 * @buf: char buffer to be filled with clocksource list 851 * @buf: char buffer to be filled with clocksource list
846 * 852 *
847 * Provides sysfs interface for listing registered clocksources 853 * Provides sysfs interface for listing registered clocksources