aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/acct.c115
-rw-r--r--kernel/compat.c7
-rw-r--r--kernel/exit.c9
-rw-r--r--kernel/fork.c1
-rw-r--r--kernel/hrtimer.c15
-rw-r--r--kernel/kthread.c61
-rw-r--r--kernel/module.c2
-rw-r--r--kernel/power/Kconfig2
-rw-r--r--kernel/power/disk.c2
-rw-r--r--kernel/power/main.c4
-rw-r--r--kernel/printk.c52
-rw-r--r--kernel/sched.c6
-rw-r--r--kernel/softirq.c2
-rw-r--r--kernel/softlockup.c4
-rw-r--r--kernel/stop_machine.c17
-rw-r--r--kernel/sys.c10
-rw-r--r--kernel/sysctl.c11
-rw-r--r--kernel/timer.c2
-rw-r--r--kernel/workqueue.c30
19 files changed, 226 insertions, 126 deletions
diff --git a/kernel/acct.c b/kernel/acct.c
index 6802020e0ceb..368c4f03fe0e 100644
--- a/kernel/acct.c
+++ b/kernel/acct.c
@@ -75,7 +75,7 @@ int acct_parm[3] = {4, 2, 30};
75/* 75/*
76 * External references and all of the globals. 76 * External references and all of the globals.
77 */ 77 */
78static void do_acct_process(long, struct file *); 78static void do_acct_process(struct file *);
79 79
80/* 80/*
81 * This structure is used so that all the data protected by lock 81 * This structure is used so that all the data protected by lock
@@ -196,7 +196,7 @@ static void acct_file_reopen(struct file *file)
196 if (old_acct) { 196 if (old_acct) {
197 mnt_unpin(old_acct->f_vfsmnt); 197 mnt_unpin(old_acct->f_vfsmnt);
198 spin_unlock(&acct_globals.lock); 198 spin_unlock(&acct_globals.lock);
199 do_acct_process(0, old_acct); 199 do_acct_process(old_acct);
200 filp_close(old_acct, NULL); 200 filp_close(old_acct, NULL);
201 spin_lock(&acct_globals.lock); 201 spin_lock(&acct_globals.lock);
202 } 202 }
@@ -419,16 +419,15 @@ static u32 encode_float(u64 value)
419/* 419/*
420 * do_acct_process does all actual work. Caller holds the reference to file. 420 * do_acct_process does all actual work. Caller holds the reference to file.
421 */ 421 */
422static void do_acct_process(long exitcode, struct file *file) 422static void do_acct_process(struct file *file)
423{ 423{
424 struct pacct_struct *pacct = &current->signal->pacct;
424 acct_t ac; 425 acct_t ac;
425 mm_segment_t fs; 426 mm_segment_t fs;
426 unsigned long vsize;
427 unsigned long flim; 427 unsigned long flim;
428 u64 elapsed; 428 u64 elapsed;
429 u64 run_time; 429 u64 run_time;
430 struct timespec uptime; 430 struct timespec uptime;
431 unsigned long jiffies;
432 431
433 /* 432 /*
434 * First check to see if there is enough free_space to continue 433 * First check to see if there is enough free_space to continue
@@ -469,12 +468,6 @@ static void do_acct_process(long exitcode, struct file *file)
469#endif 468#endif
470 do_div(elapsed, AHZ); 469 do_div(elapsed, AHZ);
471 ac.ac_btime = xtime.tv_sec - elapsed; 470 ac.ac_btime = xtime.tv_sec - elapsed;
472 jiffies = cputime_to_jiffies(cputime_add(current->utime,
473 current->signal->utime));
474 ac.ac_utime = encode_comp_t(jiffies_to_AHZ(jiffies));
475 jiffies = cputime_to_jiffies(cputime_add(current->stime,
476 current->signal->stime));
477 ac.ac_stime = encode_comp_t(jiffies_to_AHZ(jiffies));
478 /* we really need to bite the bullet and change layout */ 471 /* we really need to bite the bullet and change layout */
479 ac.ac_uid = current->uid; 472 ac.ac_uid = current->uid;
480 ac.ac_gid = current->gid; 473 ac.ac_gid = current->gid;
@@ -496,37 +489,18 @@ static void do_acct_process(long exitcode, struct file *file)
496 old_encode_dev(tty_devnum(current->signal->tty)) : 0; 489 old_encode_dev(tty_devnum(current->signal->tty)) : 0;
497 read_unlock(&tasklist_lock); 490 read_unlock(&tasklist_lock);
498 491
499 ac.ac_flag = 0; 492 spin_lock(&current->sighand->siglock);
500 if (current->flags & PF_FORKNOEXEC) 493 ac.ac_utime = encode_comp_t(jiffies_to_AHZ(cputime_to_jiffies(pacct->ac_utime)));
501 ac.ac_flag |= AFORK; 494 ac.ac_stime = encode_comp_t(jiffies_to_AHZ(cputime_to_jiffies(pacct->ac_stime)));
502 if (current->flags & PF_SUPERPRIV) 495 ac.ac_flag = pacct->ac_flag;
503 ac.ac_flag |= ASU; 496 ac.ac_mem = encode_comp_t(pacct->ac_mem);
504 if (current->flags & PF_DUMPCORE) 497 ac.ac_minflt = encode_comp_t(pacct->ac_minflt);
505 ac.ac_flag |= ACORE; 498 ac.ac_majflt = encode_comp_t(pacct->ac_majflt);
506 if (current->flags & PF_SIGNALED) 499 ac.ac_exitcode = pacct->ac_exitcode;
507 ac.ac_flag |= AXSIG; 500 spin_unlock(&current->sighand->siglock);
508
509 vsize = 0;
510 if (current->mm) {
511 struct vm_area_struct *vma;
512 down_read(&current->mm->mmap_sem);
513 vma = current->mm->mmap;
514 while (vma) {
515 vsize += vma->vm_end - vma->vm_start;
516 vma = vma->vm_next;
517 }
518 up_read(&current->mm->mmap_sem);
519 }
520 vsize = vsize / 1024;
521 ac.ac_mem = encode_comp_t(vsize);
522 ac.ac_io = encode_comp_t(0 /* current->io_usage */); /* %% */ 501 ac.ac_io = encode_comp_t(0 /* current->io_usage */); /* %% */
523 ac.ac_rw = encode_comp_t(ac.ac_io / 1024); 502 ac.ac_rw = encode_comp_t(ac.ac_io / 1024);
524 ac.ac_minflt = encode_comp_t(current->signal->min_flt +
525 current->min_flt);
526 ac.ac_majflt = encode_comp_t(current->signal->maj_flt +
527 current->maj_flt);
528 ac.ac_swaps = encode_comp_t(0); 503 ac.ac_swaps = encode_comp_t(0);
529 ac.ac_exitcode = exitcode;
530 504
531 /* 505 /*
532 * Kernel segment override to datasegment and write it 506 * Kernel segment override to datasegment and write it
@@ -546,12 +520,63 @@ static void do_acct_process(long exitcode, struct file *file)
546} 520}
547 521
548/** 522/**
523 * acct_init_pacct - initialize a new pacct_struct
524 */
525void acct_init_pacct(struct pacct_struct *pacct)
526{
527 memset(pacct, 0, sizeof(struct pacct_struct));
528 pacct->ac_utime = pacct->ac_stime = cputime_zero;
529}
530
531/**
532 * acct_collect - collect accounting information into pacct_struct
533 * @exitcode: task exit code
534 * @group_dead: not 0, if this thread is the last one in the process.
535 */
536void acct_collect(long exitcode, int group_dead)
537{
538 struct pacct_struct *pacct = &current->signal->pacct;
539 unsigned long vsize = 0;
540
541 if (group_dead && current->mm) {
542 struct vm_area_struct *vma;
543 down_read(&current->mm->mmap_sem);
544 vma = current->mm->mmap;
545 while (vma) {
546 vsize += vma->vm_end - vma->vm_start;
547 vma = vma->vm_next;
548 }
549 up_read(&current->mm->mmap_sem);
550 }
551
552 spin_lock_irq(&current->sighand->siglock);
553 if (group_dead)
554 pacct->ac_mem = vsize / 1024;
555 if (thread_group_leader(current)) {
556 pacct->ac_exitcode = exitcode;
557 if (current->flags & PF_FORKNOEXEC)
558 pacct->ac_flag |= AFORK;
559 }
560 if (current->flags & PF_SUPERPRIV)
561 pacct->ac_flag |= ASU;
562 if (current->flags & PF_DUMPCORE)
563 pacct->ac_flag |= ACORE;
564 if (current->flags & PF_SIGNALED)
565 pacct->ac_flag |= AXSIG;
566 pacct->ac_utime = cputime_add(pacct->ac_utime, current->utime);
567 pacct->ac_stime = cputime_add(pacct->ac_stime, current->stime);
568 pacct->ac_minflt += current->min_flt;
569 pacct->ac_majflt += current->maj_flt;
570 spin_unlock_irq(&current->sighand->siglock);
571}
572
573/**
549 * acct_process - now just a wrapper around do_acct_process 574 * acct_process - now just a wrapper around do_acct_process
550 * @exitcode: task exit code 575 * @exitcode: task exit code
551 * 576 *
552 * handles process accounting for an exiting task 577 * handles process accounting for an exiting task
553 */ 578 */
554void acct_process(long exitcode) 579void acct_process()
555{ 580{
556 struct file *file = NULL; 581 struct file *file = NULL;
557 582
@@ -570,7 +595,7 @@ void acct_process(long exitcode)
570 get_file(file); 595 get_file(file);
571 spin_unlock(&acct_globals.lock); 596 spin_unlock(&acct_globals.lock);
572 597
573 do_acct_process(exitcode, file); 598 do_acct_process(file);
574 fput(file); 599 fput(file);
575} 600}
576 601
@@ -599,9 +624,7 @@ void acct_update_integrals(struct task_struct *tsk)
599 */ 624 */
600void acct_clear_integrals(struct task_struct *tsk) 625void acct_clear_integrals(struct task_struct *tsk)
601{ 626{
602 if (tsk) { 627 tsk->acct_stimexpd = 0;
603 tsk->acct_stimexpd = 0; 628 tsk->acct_rss_mem1 = 0;
604 tsk->acct_rss_mem1 = 0; 629 tsk->acct_vm_mem1 = 0;
605 tsk->acct_vm_mem1 = 0;
606 }
607} 630}
diff --git a/kernel/compat.c b/kernel/compat.c
index 2f672332430f..126dee9530aa 100644
--- a/kernel/compat.c
+++ b/kernel/compat.c
@@ -730,17 +730,10 @@ void
730sigset_from_compat (sigset_t *set, compat_sigset_t *compat) 730sigset_from_compat (sigset_t *set, compat_sigset_t *compat)
731{ 731{
732 switch (_NSIG_WORDS) { 732 switch (_NSIG_WORDS) {
733#if defined (__COMPAT_ENDIAN_SWAP__)
734 case 4: set->sig[3] = compat->sig[7] | (((long)compat->sig[6]) << 32 );
735 case 3: set->sig[2] = compat->sig[5] | (((long)compat->sig[4]) << 32 );
736 case 2: set->sig[1] = compat->sig[3] | (((long)compat->sig[2]) << 32 );
737 case 1: set->sig[0] = compat->sig[1] | (((long)compat->sig[0]) << 32 );
738#else
739 case 4: set->sig[3] = compat->sig[6] | (((long)compat->sig[7]) << 32 ); 733 case 4: set->sig[3] = compat->sig[6] | (((long)compat->sig[7]) << 32 );
740 case 3: set->sig[2] = compat->sig[4] | (((long)compat->sig[5]) << 32 ); 734 case 3: set->sig[2] = compat->sig[4] | (((long)compat->sig[5]) << 32 );
741 case 2: set->sig[1] = compat->sig[2] | (((long)compat->sig[3]) << 32 ); 735 case 2: set->sig[1] = compat->sig[2] | (((long)compat->sig[3]) << 32 );
742 case 1: set->sig[0] = compat->sig[0] | (((long)compat->sig[1]) << 32 ); 736 case 1: set->sig[0] = compat->sig[0] | (((long)compat->sig[1]) << 32 );
743#endif
744 } 737 }
745} 738}
746 739
diff --git a/kernel/exit.c b/kernel/exit.c
index a3baf92462bd..e76bd02e930e 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -36,6 +36,7 @@
36#include <linux/compat.h> 36#include <linux/compat.h>
37#include <linux/pipe_fs_i.h> 37#include <linux/pipe_fs_i.h>
38#include <linux/audit.h> /* for audit_free() */ 38#include <linux/audit.h> /* for audit_free() */
39#include <linux/resource.h>
39 40
40#include <asm/uaccess.h> 41#include <asm/uaccess.h>
41#include <asm/unistd.h> 42#include <asm/unistd.h>
@@ -45,8 +46,6 @@
45extern void sem_exit (void); 46extern void sem_exit (void);
46extern struct task_struct *child_reaper; 47extern struct task_struct *child_reaper;
47 48
48int getrusage(struct task_struct *, int, struct rusage __user *);
49
50static void exit_mm(struct task_struct * tsk); 49static void exit_mm(struct task_struct * tsk);
51 50
52static void __unhash_process(struct task_struct *p) 51static void __unhash_process(struct task_struct *p)
@@ -895,11 +894,11 @@ fastcall NORET_TYPE void do_exit(long code)
895 if (group_dead) { 894 if (group_dead) {
896 hrtimer_cancel(&tsk->signal->real_timer); 895 hrtimer_cancel(&tsk->signal->real_timer);
897 exit_itimers(tsk->signal); 896 exit_itimers(tsk->signal);
898 acct_process(code);
899 } 897 }
898 acct_collect(code, group_dead);
900 if (unlikely(tsk->robust_list)) 899 if (unlikely(tsk->robust_list))
901 exit_robust_list(tsk); 900 exit_robust_list(tsk);
902#ifdef CONFIG_COMPAT 901#if defined(CONFIG_FUTEX) && defined(CONFIG_COMPAT)
903 if (unlikely(tsk->compat_robust_list)) 902 if (unlikely(tsk->compat_robust_list))
904 compat_exit_robust_list(tsk); 903 compat_exit_robust_list(tsk);
905#endif 904#endif
@@ -907,6 +906,8 @@ fastcall NORET_TYPE void do_exit(long code)
907 audit_free(tsk); 906 audit_free(tsk);
908 exit_mm(tsk); 907 exit_mm(tsk);
909 908
909 if (group_dead)
910 acct_process();
910 exit_sem(tsk); 911 exit_sem(tsk);
911 __exit_files(tsk); 912 __exit_files(tsk);
912 __exit_fs(tsk); 913 __exit_fs(tsk);
diff --git a/kernel/fork.c b/kernel/fork.c
index 49adc0e8d47c..dfd10cb370c3 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -874,6 +874,7 @@ static inline int copy_signal(unsigned long clone_flags, struct task_struct * ts
874 tsk->it_prof_expires = 874 tsk->it_prof_expires =
875 secs_to_cputime(sig->rlim[RLIMIT_CPU].rlim_cur); 875 secs_to_cputime(sig->rlim[RLIMIT_CPU].rlim_cur);
876 } 876 }
877 acct_init_pacct(&sig->pacct);
877 878
878 return 0; 879 return 0;
879} 880}
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index 18324305724a..55601b3ce60e 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -98,7 +98,6 @@ static DEFINE_PER_CPU(struct hrtimer_base, hrtimer_bases[MAX_HRTIMER_BASES]) =
98 98
99/** 99/**
100 * ktime_get_ts - get the monotonic clock in timespec format 100 * ktime_get_ts - get the monotonic clock in timespec format
101 *
102 * @ts: pointer to timespec variable 101 * @ts: pointer to timespec variable
103 * 102 *
104 * The function calculates the monotonic clock from the realtime 103 * The function calculates the monotonic clock from the realtime
@@ -238,7 +237,6 @@ lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
238# ifndef CONFIG_KTIME_SCALAR 237# ifndef CONFIG_KTIME_SCALAR
239/** 238/**
240 * ktime_add_ns - Add a scalar nanoseconds value to a ktime_t variable 239 * ktime_add_ns - Add a scalar nanoseconds value to a ktime_t variable
241 *
242 * @kt: addend 240 * @kt: addend
243 * @nsec: the scalar nsec value to add 241 * @nsec: the scalar nsec value to add
244 * 242 *
@@ -299,7 +297,6 @@ void unlock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
299 297
300/** 298/**
301 * hrtimer_forward - forward the timer expiry 299 * hrtimer_forward - forward the timer expiry
302 *
303 * @timer: hrtimer to forward 300 * @timer: hrtimer to forward
304 * @now: forward past this time 301 * @now: forward past this time
305 * @interval: the interval to forward 302 * @interval: the interval to forward
@@ -411,7 +408,6 @@ remove_hrtimer(struct hrtimer *timer, struct hrtimer_base *base)
411 408
412/** 409/**
413 * hrtimer_start - (re)start an relative timer on the current CPU 410 * hrtimer_start - (re)start an relative timer on the current CPU
414 *
415 * @timer: the timer to be added 411 * @timer: the timer to be added
416 * @tim: expiry time 412 * @tim: expiry time
417 * @mode: expiry mode: absolute (HRTIMER_ABS) or relative (HRTIMER_REL) 413 * @mode: expiry mode: absolute (HRTIMER_ABS) or relative (HRTIMER_REL)
@@ -460,14 +456,13 @@ EXPORT_SYMBOL_GPL(hrtimer_start);
460 456
461/** 457/**
462 * hrtimer_try_to_cancel - try to deactivate a timer 458 * hrtimer_try_to_cancel - try to deactivate a timer
463 *
464 * @timer: hrtimer to stop 459 * @timer: hrtimer to stop
465 * 460 *
466 * Returns: 461 * Returns:
467 * 0 when the timer was not active 462 * 0 when the timer was not active
468 * 1 when the timer was active 463 * 1 when the timer was active
469 * -1 when the timer is currently excuting the callback function and 464 * -1 when the timer is currently excuting the callback function and
470 * can not be stopped 465 * cannot be stopped
471 */ 466 */
472int hrtimer_try_to_cancel(struct hrtimer *timer) 467int hrtimer_try_to_cancel(struct hrtimer *timer)
473{ 468{
@@ -489,7 +484,6 @@ EXPORT_SYMBOL_GPL(hrtimer_try_to_cancel);
489 484
490/** 485/**
491 * hrtimer_cancel - cancel a timer and wait for the handler to finish. 486 * hrtimer_cancel - cancel a timer and wait for the handler to finish.
492 *
493 * @timer: the timer to be cancelled 487 * @timer: the timer to be cancelled
494 * 488 *
495 * Returns: 489 * Returns:
@@ -510,7 +504,6 @@ EXPORT_SYMBOL_GPL(hrtimer_cancel);
510 504
511/** 505/**
512 * hrtimer_get_remaining - get remaining time for the timer 506 * hrtimer_get_remaining - get remaining time for the timer
513 *
514 * @timer: the timer to read 507 * @timer: the timer to read
515 */ 508 */
516ktime_t hrtimer_get_remaining(const struct hrtimer *timer) 509ktime_t hrtimer_get_remaining(const struct hrtimer *timer)
@@ -564,7 +557,6 @@ ktime_t hrtimer_get_next_event(void)
564 557
565/** 558/**
566 * hrtimer_init - initialize a timer to the given clock 559 * hrtimer_init - initialize a timer to the given clock
567 *
568 * @timer: the timer to be initialized 560 * @timer: the timer to be initialized
569 * @clock_id: the clock to be used 561 * @clock_id: the clock to be used
570 * @mode: timer mode abs/rel 562 * @mode: timer mode abs/rel
@@ -576,7 +568,7 @@ void hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
576 568
577 memset(timer, 0, sizeof(struct hrtimer)); 569 memset(timer, 0, sizeof(struct hrtimer));
578 570
579 bases = per_cpu(hrtimer_bases, raw_smp_processor_id()); 571 bases = __raw_get_cpu_var(hrtimer_bases);
580 572
581 if (clock_id == CLOCK_REALTIME && mode != HRTIMER_ABS) 573 if (clock_id == CLOCK_REALTIME && mode != HRTIMER_ABS)
582 clock_id = CLOCK_MONOTONIC; 574 clock_id = CLOCK_MONOTONIC;
@@ -588,7 +580,6 @@ EXPORT_SYMBOL_GPL(hrtimer_init);
588 580
589/** 581/**
590 * hrtimer_get_res - get the timer resolution for a clock 582 * hrtimer_get_res - get the timer resolution for a clock
591 *
592 * @which_clock: which clock to query 583 * @which_clock: which clock to query
593 * @tp: pointer to timespec variable to store the resolution 584 * @tp: pointer to timespec variable to store the resolution
594 * 585 *
@@ -599,7 +590,7 @@ int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp)
599{ 590{
600 struct hrtimer_base *bases; 591 struct hrtimer_base *bases;
601 592
602 bases = per_cpu(hrtimer_bases, raw_smp_processor_id()); 593 bases = __raw_get_cpu_var(hrtimer_bases);
603 *tp = ktime_to_timespec(bases[which_clock].resolution); 594 *tp = ktime_to_timespec(bases[which_clock].resolution);
604 595
605 return 0; 596 return 0;
diff --git a/kernel/kthread.c b/kernel/kthread.c
index c5f3c6613b6d..24be714b04c7 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -45,6 +45,13 @@ struct kthread_stop_info
45static DEFINE_MUTEX(kthread_stop_lock); 45static DEFINE_MUTEX(kthread_stop_lock);
46static struct kthread_stop_info kthread_stop_info; 46static struct kthread_stop_info kthread_stop_info;
47 47
48/**
49 * kthread_should_stop - should this kthread return now?
50 *
51 * When someone calls kthread_stop on your kthread, it will be woken
52 * and this will return true. You should then return, and your return
53 * value will be passed through to kthread_stop().
54 */
48int kthread_should_stop(void) 55int kthread_should_stop(void)
49{ 56{
50 return (kthread_stop_info.k == current); 57 return (kthread_stop_info.k == current);
@@ -122,6 +129,25 @@ static void keventd_create_kthread(void *_create)
122 complete(&create->done); 129 complete(&create->done);
123} 130}
124 131
132/**
133 * kthread_create - create a kthread.
134 * @threadfn: the function to run until signal_pending(current).
135 * @data: data ptr for @threadfn.
136 * @namefmt: printf-style name for the thread.
137 *
138 * Description: This helper function creates and names a kernel
139 * thread. The thread will be stopped: use wake_up_process() to start
140 * it. See also kthread_run(), kthread_create_on_cpu().
141 *
142 * When woken, the thread will run @threadfn() with @data as its
143 * argument. @threadfn can either call do_exit() directly if it is a
144 * standalone thread for which noone will call kthread_stop(), or
145 * return when 'kthread_should_stop()' is true (which means
146 * kthread_stop() has been called). The return value should be zero
147 * or a negative error number; it will be passed to kthread_stop().
148 *
149 * Returns a task_struct or ERR_PTR(-ENOMEM).
150 */
125struct task_struct *kthread_create(int (*threadfn)(void *data), 151struct task_struct *kthread_create(int (*threadfn)(void *data),
126 void *data, 152 void *data,
127 const char namefmt[], 153 const char namefmt[],
@@ -156,6 +182,15 @@ struct task_struct *kthread_create(int (*threadfn)(void *data),
156} 182}
157EXPORT_SYMBOL(kthread_create); 183EXPORT_SYMBOL(kthread_create);
158 184
185/**
186 * kthread_bind - bind a just-created kthread to a cpu.
187 * @k: thread created by kthread_create().
188 * @cpu: cpu (might not be online, must be possible) for @k to run on.
189 *
190 * Description: This function is equivalent to set_cpus_allowed(),
191 * except that @cpu doesn't need to be online, and the thread must be
192 * stopped (i.e., just returned from kthread_create().
193 */
159void kthread_bind(struct task_struct *k, unsigned int cpu) 194void kthread_bind(struct task_struct *k, unsigned int cpu)
160{ 195{
161 BUG_ON(k->state != TASK_INTERRUPTIBLE); 196 BUG_ON(k->state != TASK_INTERRUPTIBLE);
@@ -166,12 +201,36 @@ void kthread_bind(struct task_struct *k, unsigned int cpu)
166} 201}
167EXPORT_SYMBOL(kthread_bind); 202EXPORT_SYMBOL(kthread_bind);
168 203
204/**
205 * kthread_stop - stop a thread created by kthread_create().
206 * @k: thread created by kthread_create().
207 *
208 * Sets kthread_should_stop() for @k to return true, wakes it, and
209 * waits for it to exit. Your threadfn() must not call do_exit()
210 * itself if you use this function! This can also be called after
211 * kthread_create() instead of calling wake_up_process(): the thread
212 * will exit without calling threadfn().
213 *
214 * Returns the result of threadfn(), or %-EINTR if wake_up_process()
215 * was never called.
216 */
169int kthread_stop(struct task_struct *k) 217int kthread_stop(struct task_struct *k)
170{ 218{
171 return kthread_stop_sem(k, NULL); 219 return kthread_stop_sem(k, NULL);
172} 220}
173EXPORT_SYMBOL(kthread_stop); 221EXPORT_SYMBOL(kthread_stop);
174 222
223/**
224 * kthread_stop_sem - stop a thread created by kthread_create().
225 * @k: thread created by kthread_create().
226 * @s: semaphore that @k waits on while idle.
227 *
228 * Does essentially the same thing as kthread_stop() above, but wakes
229 * @k by calling up(@s).
230 *
231 * Returns the result of threadfn(), or %-EINTR if wake_up_process()
232 * was never called.
233 */
175int kthread_stop_sem(struct task_struct *k, struct semaphore *s) 234int kthread_stop_sem(struct task_struct *k, struct semaphore *s)
176{ 235{
177 int ret; 236 int ret;
@@ -210,5 +269,5 @@ static __init int helper_init(void)
210 269
211 return 0; 270 return 0;
212} 271}
213core_initcall(helper_init);
214 272
273core_initcall(helper_init);
diff --git a/kernel/module.c b/kernel/module.c
index bbe04862e1b0..d75275de1c28 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -1326,7 +1326,7 @@ int is_exported(const char *name, const struct module *mod)
1326 if (!mod && lookup_symbol(name, __start___ksymtab, __stop___ksymtab)) 1326 if (!mod && lookup_symbol(name, __start___ksymtab, __stop___ksymtab))
1327 return 1; 1327 return 1;
1328 else 1328 else
1329 if (lookup_symbol(name, mod->syms, mod->syms + mod->num_syms)) 1329 if (mod && lookup_symbol(name, mod->syms, mod->syms + mod->num_syms))
1330 return 1; 1330 return 1;
1331 else 1331 else
1332 return 0; 1332 return 0;
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index cdf315e794ff..fc311a4673a2 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -38,7 +38,7 @@ config PM_DEBUG
38 38
39config PM_TRACE 39config PM_TRACE
40 bool "Suspend/resume event tracing" 40 bool "Suspend/resume event tracing"
41 depends on PM && PM_DEBUG && X86 41 depends on PM && PM_DEBUG && X86_32
42 default y 42 default y
43 ---help--- 43 ---help---
44 This enables some cheesy code to save the last PM event point in the 44 This enables some cheesy code to save the last PM event point in the
diff --git a/kernel/power/disk.c b/kernel/power/disk.c
index 81d4d982f3f0..e13e74067845 100644
--- a/kernel/power/disk.c
+++ b/kernel/power/disk.c
@@ -231,7 +231,7 @@ static int software_resume(void)
231late_initcall(software_resume); 231late_initcall(software_resume);
232 232
233 233
234static char * pm_disk_modes[] = { 234static const char * const pm_disk_modes[] = {
235 [PM_DISK_FIRMWARE] = "firmware", 235 [PM_DISK_FIRMWARE] = "firmware",
236 [PM_DISK_PLATFORM] = "platform", 236 [PM_DISK_PLATFORM] = "platform",
237 [PM_DISK_SHUTDOWN] = "shutdown", 237 [PM_DISK_SHUTDOWN] = "shutdown",
diff --git a/kernel/power/main.c b/kernel/power/main.c
index cdf0f07af92f..6d295c776794 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -145,7 +145,7 @@ static void suspend_finish(suspend_state_t state)
145 145
146 146
147 147
148static char *pm_states[PM_SUSPEND_MAX] = { 148static const char * const pm_states[PM_SUSPEND_MAX] = {
149 [PM_SUSPEND_STANDBY] = "standby", 149 [PM_SUSPEND_STANDBY] = "standby",
150 [PM_SUSPEND_MEM] = "mem", 150 [PM_SUSPEND_MEM] = "mem",
151#ifdef CONFIG_SOFTWARE_SUSPEND 151#ifdef CONFIG_SOFTWARE_SUSPEND
@@ -262,7 +262,7 @@ static ssize_t state_show(struct subsystem * subsys, char * buf)
262static ssize_t state_store(struct subsystem * subsys, const char * buf, size_t n) 262static ssize_t state_store(struct subsystem * subsys, const char * buf, size_t n)
263{ 263{
264 suspend_state_t state = PM_SUSPEND_STANDBY; 264 suspend_state_t state = PM_SUSPEND_STANDBY;
265 char ** s; 265 const char * const *s;
266 char *p; 266 char *p;
267 int error; 267 int error;
268 int len; 268 int len;
diff --git a/kernel/printk.c b/kernel/printk.c
index 19a955619294..95b7fe17f124 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -24,6 +24,7 @@
24#include <linux/console.h> 24#include <linux/console.h>
25#include <linux/init.h> 25#include <linux/init.h>
26#include <linux/module.h> 26#include <linux/module.h>
27#include <linux/moduleparam.h>
27#include <linux/interrupt.h> /* For in_interrupt() */ 28#include <linux/interrupt.h> /* For in_interrupt() */
28#include <linux/config.h> 29#include <linux/config.h>
29#include <linux/delay.h> 30#include <linux/delay.h>
@@ -327,7 +328,9 @@ static void __call_console_drivers(unsigned long start, unsigned long end)
327 struct console *con; 328 struct console *con;
328 329
329 for (con = console_drivers; con; con = con->next) { 330 for (con = console_drivers; con; con = con->next) {
330 if ((con->flags & CON_ENABLED) && con->write) 331 if ((con->flags & CON_ENABLED) && con->write &&
332 (cpu_online(smp_processor_id()) ||
333 (con->flags & CON_ANYTIME)))
331 con->write(con, &LOG_BUF(start), end - start); 334 con->write(con, &LOG_BUF(start), end - start);
332 } 335 }
333} 336}
@@ -437,6 +440,7 @@ static int printk_time = 1;
437#else 440#else
438static int printk_time = 0; 441static int printk_time = 0;
439#endif 442#endif
443module_param(printk_time, int, S_IRUGO | S_IWUSR);
440 444
441static int __init printk_time_setup(char *str) 445static int __init printk_time_setup(char *str)
442{ 446{
@@ -453,6 +457,18 @@ __attribute__((weak)) unsigned long long printk_clock(void)
453 return sched_clock(); 457 return sched_clock();
454} 458}
455 459
460/* Check if we have any console registered that can be called early in boot. */
461static int have_callable_console(void)
462{
463 struct console *con;
464
465 for (con = console_drivers; con; con = con->next)
466 if (con->flags & CON_ANYTIME)
467 return 1;
468
469 return 0;
470}
471
456/** 472/**
457 * printk - print a kernel message 473 * printk - print a kernel message
458 * @fmt: format string 474 * @fmt: format string
@@ -566,27 +582,29 @@ asmlinkage int vprintk(const char *fmt, va_list args)
566 log_level_unknown = 1; 582 log_level_unknown = 1;
567 } 583 }
568 584
569 if (!cpu_online(smp_processor_id())) { 585 if (!down_trylock(&console_sem)) {
570 /* 586 /*
571 * Some console drivers may assume that per-cpu resources have 587 * We own the drivers. We can drop the spinlock and
572 * been allocated. So don't allow them to be called by this 588 * let release_console_sem() print the text, maybe ...
573 * CPU until it is officially up. We shouldn't be calling into
574 * random console drivers on a CPU which doesn't exist yet..
575 */ 589 */
590 console_locked = 1;
576 printk_cpu = UINT_MAX; 591 printk_cpu = UINT_MAX;
577 spin_unlock_irqrestore(&logbuf_lock, flags); 592 spin_unlock_irqrestore(&logbuf_lock, flags);
578 goto out; 593
579 }
580 if (!down_trylock(&console_sem)) {
581 console_locked = 1;
582 /* 594 /*
583 * We own the drivers. We can drop the spinlock and let 595 * Console drivers may assume that per-cpu resources have
584 * release_console_sem() print the text 596 * been allocated. So unless they're explicitly marked as
597 * being able to cope (CON_ANYTIME) don't call them until
598 * this CPU is officially up.
585 */ 599 */
586 printk_cpu = UINT_MAX; 600 if (cpu_online(smp_processor_id()) || have_callable_console()) {
587 spin_unlock_irqrestore(&logbuf_lock, flags); 601 console_may_schedule = 0;
588 console_may_schedule = 0; 602 release_console_sem();
589 release_console_sem(); 603 } else {
604 /* Release by hand to avoid flushing the buffer. */
605 console_locked = 0;
606 up(&console_sem);
607 }
590 } else { 608 } else {
591 /* 609 /*
592 * Someone else owns the drivers. We drop the spinlock, which 610 * Someone else owns the drivers. We drop the spinlock, which
@@ -596,7 +614,7 @@ asmlinkage int vprintk(const char *fmt, va_list args)
596 printk_cpu = UINT_MAX; 614 printk_cpu = UINT_MAX;
597 spin_unlock_irqrestore(&logbuf_lock, flags); 615 spin_unlock_irqrestore(&logbuf_lock, flags);
598 } 616 }
599out: 617
600 preempt_enable(); 618 preempt_enable();
601 return printed_len; 619 return printed_len;
602} 620}
diff --git a/kernel/sched.c b/kernel/sched.c
index 5dbc42694477..f06d059edef5 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -4152,7 +4152,7 @@ EXPORT_SYMBOL(yield);
4152 */ 4152 */
4153void __sched io_schedule(void) 4153void __sched io_schedule(void)
4154{ 4154{
4155 struct runqueue *rq = &per_cpu(runqueues, raw_smp_processor_id()); 4155 struct runqueue *rq = &__raw_get_cpu_var(runqueues);
4156 4156
4157 atomic_inc(&rq->nr_iowait); 4157 atomic_inc(&rq->nr_iowait);
4158 schedule(); 4158 schedule();
@@ -4163,7 +4163,7 @@ EXPORT_SYMBOL(io_schedule);
4163 4163
4164long __sched io_schedule_timeout(long timeout) 4164long __sched io_schedule_timeout(long timeout)
4165{ 4165{
4166 struct runqueue *rq = &per_cpu(runqueues, raw_smp_processor_id()); 4166 struct runqueue *rq = &__raw_get_cpu_var(runqueues);
4167 long ret; 4167 long ret;
4168 4168
4169 atomic_inc(&rq->nr_iowait); 4169 atomic_inc(&rq->nr_iowait);
@@ -4756,6 +4756,8 @@ static int migration_call(struct notifier_block *nfb, unsigned long action,
4756 break; 4756 break;
4757#ifdef CONFIG_HOTPLUG_CPU 4757#ifdef CONFIG_HOTPLUG_CPU
4758 case CPU_UP_CANCELED: 4758 case CPU_UP_CANCELED:
4759 if (!cpu_rq(cpu)->migration_thread)
4760 break;
4759 /* Unbind it from offline cpu so it can run. Fall thru. */ 4761 /* Unbind it from offline cpu so it can run. Fall thru. */
4760 kthread_bind(cpu_rq(cpu)->migration_thread, 4762 kthread_bind(cpu_rq(cpu)->migration_thread,
4761 any_online_cpu(cpu_online_map)); 4763 any_online_cpu(cpu_online_map));
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 336f92d64e2e..9e2f1c6e73d7 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -470,6 +470,8 @@ static int cpu_callback(struct notifier_block *nfb,
470 break; 470 break;
471#ifdef CONFIG_HOTPLUG_CPU 471#ifdef CONFIG_HOTPLUG_CPU
472 case CPU_UP_CANCELED: 472 case CPU_UP_CANCELED:
473 if (!per_cpu(ksoftirqd, hotcpu))
474 break;
473 /* Unbind so it can run. Fall thru. */ 475 /* Unbind so it can run. Fall thru. */
474 kthread_bind(per_cpu(ksoftirqd, hotcpu), 476 kthread_bind(per_cpu(ksoftirqd, hotcpu),
475 any_online_cpu(cpu_online_map)); 477 any_online_cpu(cpu_online_map));
diff --git a/kernel/softlockup.c b/kernel/softlockup.c
index 14c7faf02909..b5c3b94e01ce 100644
--- a/kernel/softlockup.c
+++ b/kernel/softlockup.c
@@ -36,7 +36,7 @@ static struct notifier_block panic_block = {
36 36
37void touch_softlockup_watchdog(void) 37void touch_softlockup_watchdog(void)
38{ 38{
39 per_cpu(touch_timestamp, raw_smp_processor_id()) = jiffies; 39 __raw_get_cpu_var(touch_timestamp) = jiffies;
40} 40}
41EXPORT_SYMBOL(touch_softlockup_watchdog); 41EXPORT_SYMBOL(touch_softlockup_watchdog);
42 42
@@ -127,6 +127,8 @@ cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
127 break; 127 break;
128#ifdef CONFIG_HOTPLUG_CPU 128#ifdef CONFIG_HOTPLUG_CPU
129 case CPU_UP_CANCELED: 129 case CPU_UP_CANCELED:
130 if (!per_cpu(watchdog_task, hotcpu))
131 break;
130 /* Unbind so it can run. Fall thru. */ 132 /* Unbind so it can run. Fall thru. */
131 kthread_bind(per_cpu(watchdog_task, hotcpu), 133 kthread_bind(per_cpu(watchdog_task, hotcpu),
132 any_online_cpu(cpu_online_map)); 134 any_online_cpu(cpu_online_map));
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index dcfb5d731466..2c0aacc37c55 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -4,6 +4,7 @@
4#include <linux/cpu.h> 4#include <linux/cpu.h>
5#include <linux/err.h> 5#include <linux/err.h>
6#include <linux/syscalls.h> 6#include <linux/syscalls.h>
7#include <linux/kthread.h>
7#include <asm/atomic.h> 8#include <asm/atomic.h>
8#include <asm/semaphore.h> 9#include <asm/semaphore.h>
9#include <asm/uaccess.h> 10#include <asm/uaccess.h>
@@ -25,13 +26,11 @@ static unsigned int stopmachine_num_threads;
25static atomic_t stopmachine_thread_ack; 26static atomic_t stopmachine_thread_ack;
26static DECLARE_MUTEX(stopmachine_mutex); 27static DECLARE_MUTEX(stopmachine_mutex);
27 28
28static int stopmachine(void *cpu) 29static int stopmachine(void *unused)
29{ 30{
30 int irqs_disabled = 0; 31 int irqs_disabled = 0;
31 int prepared = 0; 32 int prepared = 0;
32 33
33 set_cpus_allowed(current, cpumask_of_cpu((int)(long)cpu));
34
35 /* Ack: we are alive */ 34 /* Ack: we are alive */
36 smp_mb(); /* Theoretically the ack = 0 might not be on this CPU yet. */ 35 smp_mb(); /* Theoretically the ack = 0 might not be on this CPU yet. */
37 atomic_inc(&stopmachine_thread_ack); 36 atomic_inc(&stopmachine_thread_ack);
@@ -85,7 +84,8 @@ static void stopmachine_set_state(enum stopmachine_state state)
85 84
86static int stop_machine(void) 85static int stop_machine(void)
87{ 86{
88 int i, ret = 0; 87 int ret = 0;
88 unsigned int i;
89 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; 89 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
90 90
91 /* One high-prio thread per cpu. We'll do this one. */ 91 /* One high-prio thread per cpu. We'll do this one. */
@@ -96,11 +96,16 @@ static int stop_machine(void)
96 stopmachine_state = STOPMACHINE_WAIT; 96 stopmachine_state = STOPMACHINE_WAIT;
97 97
98 for_each_online_cpu(i) { 98 for_each_online_cpu(i) {
99 struct task_struct *tsk;
99 if (i == raw_smp_processor_id()) 100 if (i == raw_smp_processor_id())
100 continue; 101 continue;
101 ret = kernel_thread(stopmachine, (void *)(long)i,CLONE_KERNEL); 102 tsk = kthread_create(stopmachine, NULL, "stopmachine");
102 if (ret < 0) 103 if (IS_ERR(tsk)) {
104 ret = PTR_ERR(tsk);
103 break; 105 break;
106 }
107 kthread_bind(tsk, i);
108 wake_up_process(tsk);
104 stopmachine_num_threads++; 109 stopmachine_num_threads++;
105 } 110 }
106 111
diff --git a/kernel/sys.c b/kernel/sys.c
index 90930b28d2ca..2d5179c67cec 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -137,14 +137,15 @@ static int __kprobes notifier_call_chain(struct notifier_block **nl,
137 unsigned long val, void *v) 137 unsigned long val, void *v)
138{ 138{
139 int ret = NOTIFY_DONE; 139 int ret = NOTIFY_DONE;
140 struct notifier_block *nb; 140 struct notifier_block *nb, *next_nb;
141 141
142 nb = rcu_dereference(*nl); 142 nb = rcu_dereference(*nl);
143 while (nb) { 143 while (nb) {
144 next_nb = rcu_dereference(nb->next);
144 ret = nb->notifier_call(nb, val, v); 145 ret = nb->notifier_call(nb, val, v);
145 if ((ret & NOTIFY_STOP_MASK) == NOTIFY_STOP_MASK) 146 if ((ret & NOTIFY_STOP_MASK) == NOTIFY_STOP_MASK)
146 break; 147 break;
147 nb = rcu_dereference(nb->next); 148 nb = next_nb;
148 } 149 }
149 return ret; 150 return ret;
150} 151}
@@ -588,7 +589,7 @@ void emergency_restart(void)
588} 589}
589EXPORT_SYMBOL_GPL(emergency_restart); 590EXPORT_SYMBOL_GPL(emergency_restart);
590 591
591void kernel_restart_prepare(char *cmd) 592static void kernel_restart_prepare(char *cmd)
592{ 593{
593 blocking_notifier_call_chain(&reboot_notifier_list, SYS_RESTART, cmd); 594 blocking_notifier_call_chain(&reboot_notifier_list, SYS_RESTART, cmd);
594 system_state = SYSTEM_RESTART; 595 system_state = SYSTEM_RESTART;
@@ -622,7 +623,7 @@ EXPORT_SYMBOL_GPL(kernel_restart);
622 * Move into place and start executing a preloaded standalone 623 * Move into place and start executing a preloaded standalone
623 * executable. If nothing was preloaded return an error. 624 * executable. If nothing was preloaded return an error.
624 */ 625 */
625void kernel_kexec(void) 626static void kernel_kexec(void)
626{ 627{
627#ifdef CONFIG_KEXEC 628#ifdef CONFIG_KEXEC
628 struct kimage *image; 629 struct kimage *image;
@@ -636,7 +637,6 @@ void kernel_kexec(void)
636 machine_kexec(image); 637 machine_kexec(image);
637#endif 638#endif
638} 639}
639EXPORT_SYMBOL_GPL(kernel_kexec);
640 640
641void kernel_shutdown_prepare(enum system_states state) 641void kernel_shutdown_prepare(enum system_states state)
642{ 642{
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index eb8bd214e7d7..2c0e65819448 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -143,7 +143,6 @@ static struct ctl_table_header root_table_header =
143 143
144static ctl_table kern_table[]; 144static ctl_table kern_table[];
145static ctl_table vm_table[]; 145static ctl_table vm_table[];
146static ctl_table proc_table[];
147static ctl_table fs_table[]; 146static ctl_table fs_table[];
148static ctl_table debug_table[]; 147static ctl_table debug_table[];
149static ctl_table dev_table[]; 148static ctl_table dev_table[];
@@ -203,12 +202,6 @@ static ctl_table root_table[] = {
203 }, 202 },
204#endif 203#endif
205 { 204 {
206 .ctl_name = CTL_PROC,
207 .procname = "proc",
208 .mode = 0555,
209 .child = proc_table,
210 },
211 {
212 .ctl_name = CTL_FS, 205 .ctl_name = CTL_FS,
213 .procname = "fs", 206 .procname = "fs",
214 .mode = 0555, 207 .mode = 0555,
@@ -927,10 +920,6 @@ static ctl_table vm_table[] = {
927 { .ctl_name = 0 } 920 { .ctl_name = 0 }
928}; 921};
929 922
930static ctl_table proc_table[] = {
931 { .ctl_name = 0 }
932};
933
934static ctl_table fs_table[] = { 923static ctl_table fs_table[] = {
935 { 924 {
936 .ctl_name = FS_NRINODE, 925 .ctl_name = FS_NRINODE,
diff --git a/kernel/timer.c b/kernel/timer.c
index f35b3939e937..eb97371b87d8 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -146,7 +146,7 @@ static void internal_add_timer(tvec_base_t *base, struct timer_list *timer)
146void fastcall init_timer(struct timer_list *timer) 146void fastcall init_timer(struct timer_list *timer)
147{ 147{
148 timer->entry.next = NULL; 148 timer->entry.next = NULL;
149 timer->base = per_cpu(tvec_bases, raw_smp_processor_id()); 149 timer->base = __raw_get_cpu_var(tvec_bases);
150} 150}
151EXPORT_SYMBOL(init_timer); 151EXPORT_SYMBOL(init_timer);
152 152
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 740c5abceb07..565cf7a1febd 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -428,22 +428,34 @@ int schedule_delayed_work_on(int cpu,
428 return ret; 428 return ret;
429} 429}
430 430
431int schedule_on_each_cpu(void (*func) (void *info), void *info) 431/**
432 * schedule_on_each_cpu - call a function on each online CPU from keventd
433 * @func: the function to call
434 * @info: a pointer to pass to func()
435 *
436 * Returns zero on success.
437 * Returns -ve errno on failure.
438 *
439 * Appears to be racy against CPU hotplug.
440 *
441 * schedule_on_each_cpu() is very slow.
442 */
443int schedule_on_each_cpu(void (*func)(void *info), void *info)
432{ 444{
433 int cpu; 445 int cpu;
434 struct work_struct *work; 446 struct work_struct *works;
435 447
436 work = kmalloc(NR_CPUS * sizeof(struct work_struct), GFP_KERNEL); 448 works = alloc_percpu(struct work_struct);
437 449 if (!works)
438 if (!work)
439 return -ENOMEM; 450 return -ENOMEM;
451
440 for_each_online_cpu(cpu) { 452 for_each_online_cpu(cpu) {
441 INIT_WORK(work + cpu, func, info); 453 INIT_WORK(per_cpu_ptr(works, cpu), func, info);
442 __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), 454 __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu),
443 work + cpu); 455 per_cpu_ptr(works, cpu));
444 } 456 }
445 flush_workqueue(keventd_wq); 457 flush_workqueue(keventd_wq);
446 kfree(work); 458 free_percpu(works);
447 return 0; 459 return 0;
448} 460}
449 461
@@ -578,6 +590,8 @@ static int workqueue_cpu_callback(struct notifier_block *nfb,
578 590
579 case CPU_UP_CANCELED: 591 case CPU_UP_CANCELED:
580 list_for_each_entry(wq, &workqueues, list) { 592 list_for_each_entry(wq, &workqueues, list) {
593 if (!per_cpu_ptr(wq->cpu_wq, hotcpu)->thread)
594 continue;
581 /* Unbind so it can run. */ 595 /* Unbind so it can run. */
582 kthread_bind(per_cpu_ptr(wq->cpu_wq, hotcpu)->thread, 596 kthread_bind(per_cpu_ptr(wq->cpu_wq, hotcpu)->thread,
583 any_online_cpu(cpu_online_map)); 597 any_online_cpu(cpu_online_map));