aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/audit.c6
-rw-r--r--kernel/auditsc.c2
-rw-r--r--kernel/exit.c1
-rw-r--r--kernel/kexec.c7
-rw-r--r--kernel/posix-cpu-timers.c110
-rw-r--r--kernel/posix-timers.c2
-rw-r--r--kernel/power/swsusp.c2
-rw-r--r--kernel/sched.c1
-rw-r--r--kernel/signal.c14
9 files changed, 75 insertions, 70 deletions
diff --git a/kernel/audit.c b/kernel/audit.c
index aefa73a8a586..0c56320d38dc 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -133,7 +133,7 @@ struct audit_buffer {
133 struct list_head list; 133 struct list_head list;
134 struct sk_buff *skb; /* formatted skb ready to send */ 134 struct sk_buff *skb; /* formatted skb ready to send */
135 struct audit_context *ctx; /* NULL or associated context */ 135 struct audit_context *ctx; /* NULL or associated context */
136 int gfp_mask; 136 gfp_t gfp_mask;
137}; 137};
138 138
139static void audit_set_pid(struct audit_buffer *ab, pid_t pid) 139static void audit_set_pid(struct audit_buffer *ab, pid_t pid)
@@ -647,7 +647,7 @@ static inline void audit_get_stamp(struct audit_context *ctx,
647 * will be written at syscall exit. If there is no associated task, tsk 647 * will be written at syscall exit. If there is no associated task, tsk
648 * should be NULL. */ 648 * should be NULL. */
649 649
650struct audit_buffer *audit_log_start(struct audit_context *ctx, int gfp_mask, 650struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask,
651 int type) 651 int type)
652{ 652{
653 struct audit_buffer *ab = NULL; 653 struct audit_buffer *ab = NULL;
@@ -879,7 +879,7 @@ void audit_log_end(struct audit_buffer *ab)
879/* Log an audit record. This is a convenience function that calls 879/* Log an audit record. This is a convenience function that calls
880 * audit_log_start, audit_log_vformat, and audit_log_end. It may be 880 * audit_log_start, audit_log_vformat, and audit_log_end. It may be
881 * called in any context. */ 881 * called in any context. */
882void audit_log(struct audit_context *ctx, int gfp_mask, int type, 882void audit_log(struct audit_context *ctx, gfp_t gfp_mask, int type,
883 const char *fmt, ...) 883 const char *fmt, ...)
884{ 884{
885 struct audit_buffer *ab; 885 struct audit_buffer *ab;
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index 88696f639aab..d8a68509e729 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -803,7 +803,7 @@ static void audit_log_task_info(struct audit_buffer *ab)
803 up_read(&mm->mmap_sem); 803 up_read(&mm->mmap_sem);
804} 804}
805 805
806static void audit_log_exit(struct audit_context *context, unsigned int gfp_mask) 806static void audit_log_exit(struct audit_context *context, gfp_t gfp_mask)
807{ 807{
808 int i; 808 int i;
809 struct audit_buffer *ab; 809 struct audit_buffer *ab;
diff --git a/kernel/exit.c b/kernel/exit.c
index 43077732619b..3b25b182d2be 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -843,6 +843,7 @@ fastcall NORET_TYPE void do_exit(long code)
843 group_dead = atomic_dec_and_test(&tsk->signal->live); 843 group_dead = atomic_dec_and_test(&tsk->signal->live);
844 if (group_dead) { 844 if (group_dead) {
845 del_timer_sync(&tsk->signal->real_timer); 845 del_timer_sync(&tsk->signal->real_timer);
846 exit_itimers(tsk->signal);
846 acct_process(code); 847 acct_process(code);
847 } 848 }
848 exit_mm(tsk); 849 exit_mm(tsk);
diff --git a/kernel/kexec.c b/kernel/kexec.c
index cdd4dcd8fb63..36c5d9cd4cc1 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -90,7 +90,7 @@ int kexec_should_crash(struct task_struct *p)
90static int kimage_is_destination_range(struct kimage *image, 90static int kimage_is_destination_range(struct kimage *image,
91 unsigned long start, unsigned long end); 91 unsigned long start, unsigned long end);
92static struct page *kimage_alloc_page(struct kimage *image, 92static struct page *kimage_alloc_page(struct kimage *image,
93 unsigned int gfp_mask, 93 gfp_t gfp_mask,
94 unsigned long dest); 94 unsigned long dest);
95 95
96static int do_kimage_alloc(struct kimage **rimage, unsigned long entry, 96static int do_kimage_alloc(struct kimage **rimage, unsigned long entry,
@@ -326,8 +326,7 @@ static int kimage_is_destination_range(struct kimage *image,
326 return 0; 326 return 0;
327} 327}
328 328
329static struct page *kimage_alloc_pages(unsigned int gfp_mask, 329static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order)
330 unsigned int order)
331{ 330{
332 struct page *pages; 331 struct page *pages;
333 332
@@ -654,7 +653,7 @@ static kimage_entry_t *kimage_dst_used(struct kimage *image,
654} 653}
655 654
656static struct page *kimage_alloc_page(struct kimage *image, 655static struct page *kimage_alloc_page(struct kimage *image,
657 unsigned int gfp_mask, 656 gfp_t gfp_mask,
658 unsigned long destination) 657 unsigned long destination)
659{ 658{
660 /* 659 /*
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index b3f3edc475de..bf374fceb39c 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -91,7 +91,7 @@ static inline union cpu_time_count cpu_time_sub(clockid_t which_clock,
91 * Update expiry time from increment, and increase overrun count, 91 * Update expiry time from increment, and increase overrun count,
92 * given the current clock sample. 92 * given the current clock sample.
93 */ 93 */
94static inline void bump_cpu_timer(struct k_itimer *timer, 94static void bump_cpu_timer(struct k_itimer *timer,
95 union cpu_time_count now) 95 union cpu_time_count now)
96{ 96{
97 int i; 97 int i;
@@ -110,7 +110,7 @@ static inline void bump_cpu_timer(struct k_itimer *timer,
110 for (i = 0; incr < delta - incr; i++) 110 for (i = 0; incr < delta - incr; i++)
111 incr = incr << 1; 111 incr = incr << 1;
112 for (; i >= 0; incr >>= 1, i--) { 112 for (; i >= 0; incr >>= 1, i--) {
113 if (delta <= incr) 113 if (delta < incr)
114 continue; 114 continue;
115 timer->it.cpu.expires.sched += incr; 115 timer->it.cpu.expires.sched += incr;
116 timer->it_overrun += 1 << i; 116 timer->it_overrun += 1 << i;
@@ -128,7 +128,7 @@ static inline void bump_cpu_timer(struct k_itimer *timer,
128 for (i = 0; cputime_lt(incr, cputime_sub(delta, incr)); i++) 128 for (i = 0; cputime_lt(incr, cputime_sub(delta, incr)); i++)
129 incr = cputime_add(incr, incr); 129 incr = cputime_add(incr, incr);
130 for (; i >= 0; incr = cputime_halve(incr), i--) { 130 for (; i >= 0; incr = cputime_halve(incr), i--) {
131 if (cputime_le(delta, incr)) 131 if (cputime_lt(delta, incr))
132 continue; 132 continue;
133 timer->it.cpu.expires.cpu = 133 timer->it.cpu.expires.cpu =
134 cputime_add(timer->it.cpu.expires.cpu, incr); 134 cputime_add(timer->it.cpu.expires.cpu, incr);
@@ -380,28 +380,31 @@ int posix_cpu_timer_create(struct k_itimer *new_timer)
380int posix_cpu_timer_del(struct k_itimer *timer) 380int posix_cpu_timer_del(struct k_itimer *timer)
381{ 381{
382 struct task_struct *p = timer->it.cpu.task; 382 struct task_struct *p = timer->it.cpu.task;
383 int ret = 0;
383 384
384 if (timer->it.cpu.firing) 385 if (likely(p != NULL)) {
385 return TIMER_RETRY; 386 read_lock(&tasklist_lock);
386 387 if (unlikely(p->signal == NULL)) {
387 if (unlikely(p == NULL)) 388 /*
388 return 0; 389 * We raced with the reaping of the task.
390 * The deletion should have cleared us off the list.
391 */
392 BUG_ON(!list_empty(&timer->it.cpu.entry));
393 } else {
394 spin_lock(&p->sighand->siglock);
395 if (timer->it.cpu.firing)
396 ret = TIMER_RETRY;
397 else
398 list_del(&timer->it.cpu.entry);
399 spin_unlock(&p->sighand->siglock);
400 }
401 read_unlock(&tasklist_lock);
389 402
390 spin_lock(&p->sighand->siglock); 403 if (!ret)
391 if (!list_empty(&timer->it.cpu.entry)) { 404 put_task_struct(p);
392 /*
393 * Take us off the task's timer list. We don't need to
394 * take tasklist_lock and check for the task being reaped.
395 * If it was reaped, it already called posix_cpu_timers_exit
396 * and posix_cpu_timers_exit_group to clear all the timers
397 * that pointed to it.
398 */
399 list_del(&timer->it.cpu.entry);
400 put_task_struct(p);
401 } 405 }
402 spin_unlock(&p->sighand->siglock);
403 406
404 return 0; 407 return ret;
405} 408}
406 409
407/* 410/*
@@ -418,8 +421,6 @@ static void cleanup_timers(struct list_head *head,
418 cputime_t ptime = cputime_add(utime, stime); 421 cputime_t ptime = cputime_add(utime, stime);
419 422
420 list_for_each_entry_safe(timer, next, head, entry) { 423 list_for_each_entry_safe(timer, next, head, entry) {
421 put_task_struct(timer->task);
422 timer->task = NULL;
423 list_del_init(&timer->entry); 424 list_del_init(&timer->entry);
424 if (cputime_lt(timer->expires.cpu, ptime)) { 425 if (cputime_lt(timer->expires.cpu, ptime)) {
425 timer->expires.cpu = cputime_zero; 426 timer->expires.cpu = cputime_zero;
@@ -431,8 +432,6 @@ static void cleanup_timers(struct list_head *head,
431 432
432 ++head; 433 ++head;
433 list_for_each_entry_safe(timer, next, head, entry) { 434 list_for_each_entry_safe(timer, next, head, entry) {
434 put_task_struct(timer->task);
435 timer->task = NULL;
436 list_del_init(&timer->entry); 435 list_del_init(&timer->entry);
437 if (cputime_lt(timer->expires.cpu, utime)) { 436 if (cputime_lt(timer->expires.cpu, utime)) {
438 timer->expires.cpu = cputime_zero; 437 timer->expires.cpu = cputime_zero;
@@ -444,8 +443,6 @@ static void cleanup_timers(struct list_head *head,
444 443
445 ++head; 444 ++head;
446 list_for_each_entry_safe(timer, next, head, entry) { 445 list_for_each_entry_safe(timer, next, head, entry) {
447 put_task_struct(timer->task);
448 timer->task = NULL;
449 list_del_init(&timer->entry); 446 list_del_init(&timer->entry);
450 if (timer->expires.sched < sched_time) { 447 if (timer->expires.sched < sched_time) {
451 timer->expires.sched = 0; 448 timer->expires.sched = 0;
@@ -489,6 +486,9 @@ static void process_timer_rebalance(struct task_struct *p,
489 struct task_struct *t = p; 486 struct task_struct *t = p;
490 unsigned int nthreads = atomic_read(&p->signal->live); 487 unsigned int nthreads = atomic_read(&p->signal->live);
491 488
489 if (!nthreads)
490 return;
491
492 switch (clock_idx) { 492 switch (clock_idx) {
493 default: 493 default:
494 BUG(); 494 BUG();
@@ -497,7 +497,7 @@ static void process_timer_rebalance(struct task_struct *p,
497 left = cputime_div(cputime_sub(expires.cpu, val.cpu), 497 left = cputime_div(cputime_sub(expires.cpu, val.cpu),
498 nthreads); 498 nthreads);
499 do { 499 do {
500 if (!unlikely(t->exit_state)) { 500 if (!unlikely(t->flags & PF_EXITING)) {
501 ticks = cputime_add(prof_ticks(t), left); 501 ticks = cputime_add(prof_ticks(t), left);
502 if (cputime_eq(t->it_prof_expires, 502 if (cputime_eq(t->it_prof_expires,
503 cputime_zero) || 503 cputime_zero) ||
@@ -512,7 +512,7 @@ static void process_timer_rebalance(struct task_struct *p,
512 left = cputime_div(cputime_sub(expires.cpu, val.cpu), 512 left = cputime_div(cputime_sub(expires.cpu, val.cpu),
513 nthreads); 513 nthreads);
514 do { 514 do {
515 if (!unlikely(t->exit_state)) { 515 if (!unlikely(t->flags & PF_EXITING)) {
516 ticks = cputime_add(virt_ticks(t), left); 516 ticks = cputime_add(virt_ticks(t), left);
517 if (cputime_eq(t->it_virt_expires, 517 if (cputime_eq(t->it_virt_expires,
518 cputime_zero) || 518 cputime_zero) ||
@@ -527,7 +527,7 @@ static void process_timer_rebalance(struct task_struct *p,
527 nsleft = expires.sched - val.sched; 527 nsleft = expires.sched - val.sched;
528 do_div(nsleft, nthreads); 528 do_div(nsleft, nthreads);
529 do { 529 do {
530 if (!unlikely(t->exit_state)) { 530 if (!unlikely(t->flags & PF_EXITING)) {
531 ns = t->sched_time + nsleft; 531 ns = t->sched_time + nsleft;
532 if (t->it_sched_expires == 0 || 532 if (t->it_sched_expires == 0 ||
533 t->it_sched_expires > ns) { 533 t->it_sched_expires > ns) {
@@ -566,6 +566,9 @@ static void arm_timer(struct k_itimer *timer, union cpu_time_count now)
566 struct cpu_timer_list *next; 566 struct cpu_timer_list *next;
567 unsigned long i; 567 unsigned long i;
568 568
569 if (CPUCLOCK_PERTHREAD(timer->it_clock) && (p->flags & PF_EXITING))
570 return;
571
569 head = (CPUCLOCK_PERTHREAD(timer->it_clock) ? 572 head = (CPUCLOCK_PERTHREAD(timer->it_clock) ?
570 p->cpu_timers : p->signal->cpu_timers); 573 p->cpu_timers : p->signal->cpu_timers);
571 head += CPUCLOCK_WHICH(timer->it_clock); 574 head += CPUCLOCK_WHICH(timer->it_clock);
@@ -576,17 +579,15 @@ static void arm_timer(struct k_itimer *timer, union cpu_time_count now)
576 listpos = head; 579 listpos = head;
577 if (CPUCLOCK_WHICH(timer->it_clock) == CPUCLOCK_SCHED) { 580 if (CPUCLOCK_WHICH(timer->it_clock) == CPUCLOCK_SCHED) {
578 list_for_each_entry(next, head, entry) { 581 list_for_each_entry(next, head, entry) {
579 if (next->expires.sched > nt->expires.sched) { 582 if (next->expires.sched > nt->expires.sched)
580 listpos = &next->entry;
581 break; 583 break;
582 } 584 listpos = &next->entry;
583 } 585 }
584 } else { 586 } else {
585 list_for_each_entry(next, head, entry) { 587 list_for_each_entry(next, head, entry) {
586 if (cputime_gt(next->expires.cpu, nt->expires.cpu)) { 588 if (cputime_gt(next->expires.cpu, nt->expires.cpu))
587 listpos = &next->entry;
588 break; 589 break;
589 } 590 listpos = &next->entry;
590 } 591 }
591 } 592 }
592 list_add(&nt->entry, listpos); 593 list_add(&nt->entry, listpos);
@@ -730,9 +731,15 @@ int posix_cpu_timer_set(struct k_itimer *timer, int flags,
730 * Disarm any old timer after extracting its expiry time. 731 * Disarm any old timer after extracting its expiry time.
731 */ 732 */
732 BUG_ON(!irqs_disabled()); 733 BUG_ON(!irqs_disabled());
734
735 ret = 0;
733 spin_lock(&p->sighand->siglock); 736 spin_lock(&p->sighand->siglock);
734 old_expires = timer->it.cpu.expires; 737 old_expires = timer->it.cpu.expires;
735 list_del_init(&timer->it.cpu.entry); 738 if (unlikely(timer->it.cpu.firing)) {
739 timer->it.cpu.firing = -1;
740 ret = TIMER_RETRY;
741 } else
742 list_del_init(&timer->it.cpu.entry);
736 spin_unlock(&p->sighand->siglock); 743 spin_unlock(&p->sighand->siglock);
737 744
738 /* 745 /*
@@ -780,7 +787,7 @@ int posix_cpu_timer_set(struct k_itimer *timer, int flags,
780 } 787 }
781 } 788 }
782 789
783 if (unlikely(timer->it.cpu.firing)) { 790 if (unlikely(ret)) {
784 /* 791 /*
785 * We are colliding with the timer actually firing. 792 * We are colliding with the timer actually firing.
786 * Punt after filling in the timer's old value, and 793 * Punt after filling in the timer's old value, and
@@ -788,8 +795,6 @@ int posix_cpu_timer_set(struct k_itimer *timer, int flags,
788 * it as an overrun (thanks to bump_cpu_timer above). 795 * it as an overrun (thanks to bump_cpu_timer above).
789 */ 796 */
790 read_unlock(&tasklist_lock); 797 read_unlock(&tasklist_lock);
791 timer->it.cpu.firing = -1;
792 ret = TIMER_RETRY;
793 goto out; 798 goto out;
794 } 799 }
795 800
@@ -955,14 +960,16 @@ void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp)
955static void check_thread_timers(struct task_struct *tsk, 960static void check_thread_timers(struct task_struct *tsk,
956 struct list_head *firing) 961 struct list_head *firing)
957{ 962{
963 int maxfire;
958 struct list_head *timers = tsk->cpu_timers; 964 struct list_head *timers = tsk->cpu_timers;
959 965
966 maxfire = 20;
960 tsk->it_prof_expires = cputime_zero; 967 tsk->it_prof_expires = cputime_zero;
961 while (!list_empty(timers)) { 968 while (!list_empty(timers)) {
962 struct cpu_timer_list *t = list_entry(timers->next, 969 struct cpu_timer_list *t = list_entry(timers->next,
963 struct cpu_timer_list, 970 struct cpu_timer_list,
964 entry); 971 entry);
965 if (cputime_lt(prof_ticks(tsk), t->expires.cpu)) { 972 if (!--maxfire || cputime_lt(prof_ticks(tsk), t->expires.cpu)) {
966 tsk->it_prof_expires = t->expires.cpu; 973 tsk->it_prof_expires = t->expires.cpu;
967 break; 974 break;
968 } 975 }
@@ -971,12 +978,13 @@ static void check_thread_timers(struct task_struct *tsk,
971 } 978 }
972 979
973 ++timers; 980 ++timers;
981 maxfire = 20;
974 tsk->it_virt_expires = cputime_zero; 982 tsk->it_virt_expires = cputime_zero;
975 while (!list_empty(timers)) { 983 while (!list_empty(timers)) {
976 struct cpu_timer_list *t = list_entry(timers->next, 984 struct cpu_timer_list *t = list_entry(timers->next,
977 struct cpu_timer_list, 985 struct cpu_timer_list,
978 entry); 986 entry);
979 if (cputime_lt(virt_ticks(tsk), t->expires.cpu)) { 987 if (!--maxfire || cputime_lt(virt_ticks(tsk), t->expires.cpu)) {
980 tsk->it_virt_expires = t->expires.cpu; 988 tsk->it_virt_expires = t->expires.cpu;
981 break; 989 break;
982 } 990 }
@@ -985,12 +993,13 @@ static void check_thread_timers(struct task_struct *tsk,
985 } 993 }
986 994
987 ++timers; 995 ++timers;
996 maxfire = 20;
988 tsk->it_sched_expires = 0; 997 tsk->it_sched_expires = 0;
989 while (!list_empty(timers)) { 998 while (!list_empty(timers)) {
990 struct cpu_timer_list *t = list_entry(timers->next, 999 struct cpu_timer_list *t = list_entry(timers->next,
991 struct cpu_timer_list, 1000 struct cpu_timer_list,
992 entry); 1001 entry);
993 if (tsk->sched_time < t->expires.sched) { 1002 if (!--maxfire || tsk->sched_time < t->expires.sched) {
994 tsk->it_sched_expires = t->expires.sched; 1003 tsk->it_sched_expires = t->expires.sched;
995 break; 1004 break;
996 } 1005 }
@@ -1007,6 +1016,7 @@ static void check_thread_timers(struct task_struct *tsk,
1007static void check_process_timers(struct task_struct *tsk, 1016static void check_process_timers(struct task_struct *tsk,
1008 struct list_head *firing) 1017 struct list_head *firing)
1009{ 1018{
1019 int maxfire;
1010 struct signal_struct *const sig = tsk->signal; 1020 struct signal_struct *const sig = tsk->signal;
1011 cputime_t utime, stime, ptime, virt_expires, prof_expires; 1021 cputime_t utime, stime, ptime, virt_expires, prof_expires;
1012 unsigned long long sched_time, sched_expires; 1022 unsigned long long sched_time, sched_expires;
@@ -1039,12 +1049,13 @@ static void check_process_timers(struct task_struct *tsk,
1039 } while (t != tsk); 1049 } while (t != tsk);
1040 ptime = cputime_add(utime, stime); 1050 ptime = cputime_add(utime, stime);
1041 1051
1052 maxfire = 20;
1042 prof_expires = cputime_zero; 1053 prof_expires = cputime_zero;
1043 while (!list_empty(timers)) { 1054 while (!list_empty(timers)) {
1044 struct cpu_timer_list *t = list_entry(timers->next, 1055 struct cpu_timer_list *t = list_entry(timers->next,
1045 struct cpu_timer_list, 1056 struct cpu_timer_list,
1046 entry); 1057 entry);
1047 if (cputime_lt(ptime, t->expires.cpu)) { 1058 if (!--maxfire || cputime_lt(ptime, t->expires.cpu)) {
1048 prof_expires = t->expires.cpu; 1059 prof_expires = t->expires.cpu;
1049 break; 1060 break;
1050 } 1061 }
@@ -1053,12 +1064,13 @@ static void check_process_timers(struct task_struct *tsk,
1053 } 1064 }
1054 1065
1055 ++timers; 1066 ++timers;
1067 maxfire = 20;
1056 virt_expires = cputime_zero; 1068 virt_expires = cputime_zero;
1057 while (!list_empty(timers)) { 1069 while (!list_empty(timers)) {
1058 struct cpu_timer_list *t = list_entry(timers->next, 1070 struct cpu_timer_list *t = list_entry(timers->next,
1059 struct cpu_timer_list, 1071 struct cpu_timer_list,
1060 entry); 1072 entry);
1061 if (cputime_lt(utime, t->expires.cpu)) { 1073 if (!--maxfire || cputime_lt(utime, t->expires.cpu)) {
1062 virt_expires = t->expires.cpu; 1074 virt_expires = t->expires.cpu;
1063 break; 1075 break;
1064 } 1076 }
@@ -1067,12 +1079,13 @@ static void check_process_timers(struct task_struct *tsk,
1067 } 1079 }
1068 1080
1069 ++timers; 1081 ++timers;
1082 maxfire = 20;
1070 sched_expires = 0; 1083 sched_expires = 0;
1071 while (!list_empty(timers)) { 1084 while (!list_empty(timers)) {
1072 struct cpu_timer_list *t = list_entry(timers->next, 1085 struct cpu_timer_list *t = list_entry(timers->next,
1073 struct cpu_timer_list, 1086 struct cpu_timer_list,
1074 entry); 1087 entry);
1075 if (sched_time < t->expires.sched) { 1088 if (!--maxfire || sched_time < t->expires.sched) {
1076 sched_expires = t->expires.sched; 1089 sched_expires = t->expires.sched;
1077 break; 1090 break;
1078 } 1091 }
@@ -1155,6 +1168,9 @@ static void check_process_timers(struct task_struct *tsk,
1155 unsigned long long sched_left, sched; 1168 unsigned long long sched_left, sched;
1156 const unsigned int nthreads = atomic_read(&sig->live); 1169 const unsigned int nthreads = atomic_read(&sig->live);
1157 1170
1171 if (!nthreads)
1172 return;
1173
1158 prof_left = cputime_sub(prof_expires, utime); 1174 prof_left = cputime_sub(prof_expires, utime);
1159 prof_left = cputime_sub(prof_left, stime); 1175 prof_left = cputime_sub(prof_left, stime);
1160 prof_left = cputime_div(prof_left, nthreads); 1176 prof_left = cputime_div(prof_left, nthreads);
@@ -1191,7 +1207,7 @@ static void check_process_timers(struct task_struct *tsk,
1191 1207
1192 do { 1208 do {
1193 t = next_thread(t); 1209 t = next_thread(t);
1194 } while (unlikely(t->exit_state)); 1210 } while (unlikely(t->flags & PF_EXITING));
1195 } while (t != tsk); 1211 } while (t != tsk);
1196 } 1212 }
1197} 1213}
diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
index b7b532acd9fc..dda3cda73c77 100644
--- a/kernel/posix-timers.c
+++ b/kernel/posix-timers.c
@@ -1157,7 +1157,7 @@ retry_delete:
1157} 1157}
1158 1158
1159/* 1159/*
1160 * This is called by __exit_signal, only when there are no more 1160 * This is called by do_exit or de_thread, only when there are no more
1161 * references to the shared signal_struct. 1161 * references to the shared signal_struct.
1162 */ 1162 */
1163void exit_itimers(struct signal_struct *sig) 1163void exit_itimers(struct signal_struct *sig)
diff --git a/kernel/power/swsusp.c b/kernel/power/swsusp.c
index 2d5c45676442..10bc5ec496d7 100644
--- a/kernel/power/swsusp.c
+++ b/kernel/power/swsusp.c
@@ -1095,7 +1095,7 @@ static inline void eat_page(void *page)
1095 *eaten_memory = c; 1095 *eaten_memory = c;
1096} 1096}
1097 1097
1098unsigned long get_usable_page(unsigned gfp_mask) 1098unsigned long get_usable_page(gfp_t gfp_mask)
1099{ 1099{
1100 unsigned long m; 1100 unsigned long m;
1101 1101
diff --git a/kernel/sched.c b/kernel/sched.c
index 1f31a528fdba..1e5cafdf4e27 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3879,6 +3879,7 @@ EXPORT_SYMBOL(cpu_present_map);
3879 3879
3880#ifndef CONFIG_SMP 3880#ifndef CONFIG_SMP
3881cpumask_t cpu_online_map = CPU_MASK_ALL; 3881cpumask_t cpu_online_map = CPU_MASK_ALL;
3882EXPORT_SYMBOL_GPL(cpu_online_map);
3882cpumask_t cpu_possible_map = CPU_MASK_ALL; 3883cpumask_t cpu_possible_map = CPU_MASK_ALL;
3883#endif 3884#endif
3884 3885
diff --git a/kernel/signal.c b/kernel/signal.c
index 50c992643771..f2b96b08fb44 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -397,20 +397,8 @@ void __exit_signal(struct task_struct *tsk)
397 flush_sigqueue(&tsk->pending); 397 flush_sigqueue(&tsk->pending);
398 if (sig) { 398 if (sig) {
399 /* 399 /*
400 * We are cleaning up the signal_struct here. We delayed 400 * We are cleaning up the signal_struct here.
401 * calling exit_itimers until after flush_sigqueue, just in
402 * case our thread-local pending queue contained a queued
403 * timer signal that would have been cleared in
404 * exit_itimers. When that called sigqueue_free, it would
405 * attempt to re-take the tasklist_lock and deadlock. This
406 * can never happen if we ensure that all queues the
407 * timer's signal might be queued on have been flushed
408 * first. The shared_pending queue, and our own pending
409 * queue are the only queues the timer could be on, since
410 * there are no other threads left in the group and timer
411 * signals are constrained to threads inside the group.
412 */ 401 */
413 exit_itimers(sig);
414 exit_thread_group_keys(sig); 402 exit_thread_group_keys(sig);
415 kmem_cache_free(signal_cachep, sig); 403 kmem_cache_free(signal_cachep, sig);
416 } 404 }