aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/fork.c2
-rw-r--r--kernel/hrtimer.c70
-rw-r--r--kernel/itimer.c11
-rw-r--r--kernel/posix-timers.c53
-rw-r--r--kernel/power/console.c12
-rw-r--r--kernel/power/disk.c6
-rw-r--r--kernel/power/power.h9
-rw-r--r--kernel/power/swsusp.c8
-rw-r--r--kernel/rcutorture.c10
-rw-r--r--kernel/sched.c8
-rw-r--r--kernel/sysctl.c12
-rw-r--r--kernel/time.c2
-rw-r--r--kernel/user.c32
13 files changed, 123 insertions, 112 deletions
diff --git a/kernel/fork.c b/kernel/fork.c
index 4ae8cfc1c89c..7f0ab5ee948c 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -802,7 +802,7 @@ static inline int copy_signal(unsigned long clone_flags, struct task_struct * ts
802 init_sigpending(&sig->shared_pending); 802 init_sigpending(&sig->shared_pending);
803 INIT_LIST_HEAD(&sig->posix_timers); 803 INIT_LIST_HEAD(&sig->posix_timers);
804 804
805 hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC); 805 hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC, HRTIMER_REL);
806 sig->it_real_incr.tv64 = 0; 806 sig->it_real_incr.tv64 = 0;
807 sig->real_timer.function = it_real_fn; 807 sig->real_timer.function = it_real_fn;
808 sig->real_timer.data = tsk; 808 sig->real_timer.data = tsk;
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index f1c4155b49ac..2b6e1757aedd 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -21,6 +21,12 @@
21 * Credits: 21 * Credits:
22 * based on kernel/timer.c 22 * based on kernel/timer.c
23 * 23 *
24 * Help, testing, suggestions, bugfixes, improvements were
25 * provided by:
26 *
27 * George Anzinger, Andrew Morton, Steven Rostedt, Roman Zippel
28 * et. al.
29 *
24 * For licencing details see kernel-base/COPYING 30 * For licencing details see kernel-base/COPYING
25 */ 31 */
26 32
@@ -66,6 +72,12 @@ EXPORT_SYMBOL_GPL(ktime_get_real);
66 72
67/* 73/*
68 * The timer bases: 74 * The timer bases:
75 *
76 * Note: If we want to add new timer bases, we have to skip the two
77 * clock ids captured by the cpu-timers. We do this by holding empty
78 * entries rather than doing math adjustment of the clock ids.
79 * This ensures that we capture erroneous accesses to these clock ids
80 * rather than moving them into the range of valid clock id's.
69 */ 81 */
70 82
71#define MAX_HRTIMER_BASES 2 83#define MAX_HRTIMER_BASES 2
@@ -483,29 +495,25 @@ ktime_t hrtimer_get_remaining(const struct hrtimer *timer)
483} 495}
484 496
485/** 497/**
486 * hrtimer_rebase - rebase an initialized hrtimer to a different base 498 * hrtimer_init - initialize a timer to the given clock
487 * 499 *
488 * @timer: the timer to be rebased 500 * @timer: the timer to be initialized
489 * @clock_id: the clock to be used 501 * @clock_id: the clock to be used
502 * @mode: timer mode abs/rel
490 */ 503 */
491void hrtimer_rebase(struct hrtimer *timer, const clockid_t clock_id) 504void hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
505 enum hrtimer_mode mode)
492{ 506{
493 struct hrtimer_base *bases; 507 struct hrtimer_base *bases;
494 508
509 memset(timer, 0, sizeof(struct hrtimer));
510
495 bases = per_cpu(hrtimer_bases, raw_smp_processor_id()); 511 bases = per_cpu(hrtimer_bases, raw_smp_processor_id());
496 timer->base = &bases[clock_id];
497}
498 512
499/** 513 if (clock_id == CLOCK_REALTIME && mode != HRTIMER_ABS)
500 * hrtimer_init - initialize a timer to the given clock 514 clock_id = CLOCK_MONOTONIC;
501 * 515
502 * @timer: the timer to be initialized 516 timer->base = &bases[clock_id];
503 * @clock_id: the clock to be used
504 */
505void hrtimer_init(struct hrtimer *timer, const clockid_t clock_id)
506{
507 memset(timer, 0, sizeof(struct hrtimer));
508 hrtimer_rebase(timer, clock_id);
509} 517}
510 518
511/** 519/**
@@ -550,6 +558,7 @@ static inline void run_hrtimer_queue(struct hrtimer_base *base)
550 fn = timer->function; 558 fn = timer->function;
551 data = timer->data; 559 data = timer->data;
552 set_curr_timer(base, timer); 560 set_curr_timer(base, timer);
561 timer->state = HRTIMER_RUNNING;
553 __remove_hrtimer(timer, base); 562 __remove_hrtimer(timer, base);
554 spin_unlock_irq(&base->lock); 563 spin_unlock_irq(&base->lock);
555 564
@@ -565,6 +574,10 @@ static inline void run_hrtimer_queue(struct hrtimer_base *base)
565 574
566 spin_lock_irq(&base->lock); 575 spin_lock_irq(&base->lock);
567 576
577 /* Another CPU has added back the timer */
578 if (timer->state != HRTIMER_RUNNING)
579 continue;
580
568 if (restart == HRTIMER_RESTART) 581 if (restart == HRTIMER_RESTART)
569 enqueue_hrtimer(timer, base); 582 enqueue_hrtimer(timer, base);
570 else 583 else
@@ -638,8 +651,7 @@ schedule_hrtimer_interruptible(struct hrtimer *timer,
638 return schedule_hrtimer(timer, mode); 651 return schedule_hrtimer(timer, mode);
639} 652}
640 653
641static long __sched 654static long __sched nanosleep_restart(struct restart_block *restart)
642nanosleep_restart(struct restart_block *restart, clockid_t clockid)
643{ 655{
644 struct timespec __user *rmtp; 656 struct timespec __user *rmtp;
645 struct timespec tu; 657 struct timespec tu;
@@ -649,7 +661,7 @@ nanosleep_restart(struct restart_block *restart, clockid_t clockid)
649 661
650 restart->fn = do_no_restart_syscall; 662 restart->fn = do_no_restart_syscall;
651 663
652 hrtimer_init(&timer, clockid); 664 hrtimer_init(&timer, (clockid_t) restart->arg3, HRTIMER_ABS);
653 665
654 timer.expires.tv64 = ((u64)restart->arg1 << 32) | (u64) restart->arg0; 666 timer.expires.tv64 = ((u64)restart->arg1 << 32) | (u64) restart->arg0;
655 667
@@ -669,16 +681,6 @@ nanosleep_restart(struct restart_block *restart, clockid_t clockid)
669 return -ERESTART_RESTARTBLOCK; 681 return -ERESTART_RESTARTBLOCK;
670} 682}
671 683
672static long __sched nanosleep_restart_mono(struct restart_block *restart)
673{
674 return nanosleep_restart(restart, CLOCK_MONOTONIC);
675}
676
677static long __sched nanosleep_restart_real(struct restart_block *restart)
678{
679 return nanosleep_restart(restart, CLOCK_REALTIME);
680}
681
682long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp, 684long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
683 const enum hrtimer_mode mode, const clockid_t clockid) 685 const enum hrtimer_mode mode, const clockid_t clockid)
684{ 686{
@@ -687,7 +689,7 @@ long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
687 struct timespec tu; 689 struct timespec tu;
688 ktime_t rem; 690 ktime_t rem;
689 691
690 hrtimer_init(&timer, clockid); 692 hrtimer_init(&timer, clockid, mode);
691 693
692 timer.expires = timespec_to_ktime(*rqtp); 694 timer.expires = timespec_to_ktime(*rqtp);
693 695
@@ -695,7 +697,7 @@ long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
695 if (rem.tv64 <= 0) 697 if (rem.tv64 <= 0)
696 return 0; 698 return 0;
697 699
698 /* Absolute timers do not update the rmtp value: */ 700 /* Absolute timers do not update the rmtp value and restart: */
699 if (mode == HRTIMER_ABS) 701 if (mode == HRTIMER_ABS)
700 return -ERESTARTNOHAND; 702 return -ERESTARTNOHAND;
701 703
@@ -705,11 +707,11 @@ long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
705 return -EFAULT; 707 return -EFAULT;
706 708
707 restart = &current_thread_info()->restart_block; 709 restart = &current_thread_info()->restart_block;
708 restart->fn = (clockid == CLOCK_MONOTONIC) ? 710 restart->fn = nanosleep_restart;
709 nanosleep_restart_mono : nanosleep_restart_real;
710 restart->arg0 = timer.expires.tv64 & 0xFFFFFFFF; 711 restart->arg0 = timer.expires.tv64 & 0xFFFFFFFF;
711 restart->arg1 = timer.expires.tv64 >> 32; 712 restart->arg1 = timer.expires.tv64 >> 32;
712 restart->arg2 = (unsigned long) rmtp; 713 restart->arg2 = (unsigned long) rmtp;
714 restart->arg3 = (unsigned long) timer.base->index;
713 715
714 return -ERESTART_RESTARTBLOCK; 716 return -ERESTART_RESTARTBLOCK;
715} 717}
@@ -736,10 +738,8 @@ static void __devinit init_hrtimers_cpu(int cpu)
736 struct hrtimer_base *base = per_cpu(hrtimer_bases, cpu); 738 struct hrtimer_base *base = per_cpu(hrtimer_bases, cpu);
737 int i; 739 int i;
738 740
739 for (i = 0; i < MAX_HRTIMER_BASES; i++) { 741 for (i = 0; i < MAX_HRTIMER_BASES; i++, base++)
740 spin_lock_init(&base->lock); 742 spin_lock_init(&base->lock);
741 base++;
742 }
743} 743}
744 744
745#ifdef CONFIG_HOTPLUG_CPU 745#ifdef CONFIG_HOTPLUG_CPU
diff --git a/kernel/itimer.c b/kernel/itimer.c
index c2c05c4ff28d..379be2f8c84c 100644
--- a/kernel/itimer.c
+++ b/kernel/itimer.c
@@ -49,9 +49,11 @@ int do_getitimer(int which, struct itimerval *value)
49 49
50 switch (which) { 50 switch (which) {
51 case ITIMER_REAL: 51 case ITIMER_REAL:
52 spin_lock_irq(&tsk->sighand->siglock);
52 value->it_value = itimer_get_remtime(&tsk->signal->real_timer); 53 value->it_value = itimer_get_remtime(&tsk->signal->real_timer);
53 value->it_interval = 54 value->it_interval =
54 ktime_to_timeval(tsk->signal->it_real_incr); 55 ktime_to_timeval(tsk->signal->it_real_incr);
56 spin_unlock_irq(&tsk->sighand->siglock);
55 break; 57 break;
56 case ITIMER_VIRTUAL: 58 case ITIMER_VIRTUAL:
57 read_lock(&tasklist_lock); 59 read_lock(&tasklist_lock);
@@ -150,18 +152,25 @@ int do_setitimer(int which, struct itimerval *value, struct itimerval *ovalue)
150 152
151 switch (which) { 153 switch (which) {
152 case ITIMER_REAL: 154 case ITIMER_REAL:
155again:
156 spin_lock_irq(&tsk->sighand->siglock);
153 timer = &tsk->signal->real_timer; 157 timer = &tsk->signal->real_timer;
154 hrtimer_cancel(timer);
155 if (ovalue) { 158 if (ovalue) {
156 ovalue->it_value = itimer_get_remtime(timer); 159 ovalue->it_value = itimer_get_remtime(timer);
157 ovalue->it_interval 160 ovalue->it_interval
158 = ktime_to_timeval(tsk->signal->it_real_incr); 161 = ktime_to_timeval(tsk->signal->it_real_incr);
159 } 162 }
163 /* We are sharing ->siglock with it_real_fn() */
164 if (hrtimer_try_to_cancel(timer) < 0) {
165 spin_unlock_irq(&tsk->sighand->siglock);
166 goto again;
167 }
160 tsk->signal->it_real_incr = 168 tsk->signal->it_real_incr =
161 timeval_to_ktime(value->it_interval); 169 timeval_to_ktime(value->it_interval);
162 expires = timeval_to_ktime(value->it_value); 170 expires = timeval_to_ktime(value->it_value);
163 if (expires.tv64 != 0) 171 if (expires.tv64 != 0)
164 hrtimer_start(timer, expires, HRTIMER_REL); 172 hrtimer_start(timer, expires, HRTIMER_REL);
173 spin_unlock_irq(&tsk->sighand->siglock);
165 break; 174 break;
166 case ITIMER_VIRTUAL: 175 case ITIMER_VIRTUAL:
167 nval = timeval_to_cputime(&value->it_value); 176 nval = timeval_to_cputime(&value->it_value);
diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
index 197208b3aa2a..216f574b5ffb 100644
--- a/kernel/posix-timers.c
+++ b/kernel/posix-timers.c
@@ -194,9 +194,7 @@ static inline int common_clock_set(const clockid_t which_clock,
194 194
195static int common_timer_create(struct k_itimer *new_timer) 195static int common_timer_create(struct k_itimer *new_timer)
196{ 196{
197 hrtimer_init(&new_timer->it.real.timer, new_timer->it_clock); 197 hrtimer_init(&new_timer->it.real.timer, new_timer->it_clock, 0);
198 new_timer->it.real.timer.data = new_timer;
199 new_timer->it.real.timer.function = posix_timer_fn;
200 return 0; 198 return 0;
201} 199}
202 200
@@ -290,7 +288,8 @@ void do_schedule_next_timer(struct siginfo *info)
290 info->si_overrun = timr->it_overrun_last; 288 info->si_overrun = timr->it_overrun_last;
291 } 289 }
292 290
293 unlock_timer(timr, flags); 291 if (timr)
292 unlock_timer(timr, flags);
294} 293}
295 294
296int posix_timer_event(struct k_itimer *timr,int si_private) 295int posix_timer_event(struct k_itimer *timr,int si_private)
@@ -692,6 +691,7 @@ common_timer_set(struct k_itimer *timr, int flags,
692 struct itimerspec *new_setting, struct itimerspec *old_setting) 691 struct itimerspec *new_setting, struct itimerspec *old_setting)
693{ 692{
694 struct hrtimer *timer = &timr->it.real.timer; 693 struct hrtimer *timer = &timr->it.real.timer;
694 enum hrtimer_mode mode;
695 695
696 if (old_setting) 696 if (old_setting)
697 common_timer_get(timr, old_setting); 697 common_timer_get(timr, old_setting);
@@ -713,14 +713,10 @@ common_timer_set(struct k_itimer *timr, int flags,
713 if (!new_setting->it_value.tv_sec && !new_setting->it_value.tv_nsec) 713 if (!new_setting->it_value.tv_sec && !new_setting->it_value.tv_nsec)
714 return 0; 714 return 0;
715 715
716 /* Posix madness. Only absolute CLOCK_REALTIME timers 716 mode = flags & TIMER_ABSTIME ? HRTIMER_ABS : HRTIMER_REL;
717 * are affected by clock sets. So we must reiniatilize 717 hrtimer_init(&timr->it.real.timer, timr->it_clock, mode);
718 * the timer. 718 timr->it.real.timer.data = timr;
719 */ 719 timr->it.real.timer.function = posix_timer_fn;
720 if (timr->it_clock == CLOCK_REALTIME && (flags & TIMER_ABSTIME))
721 hrtimer_rebase(timer, CLOCK_REALTIME);
722 else
723 hrtimer_rebase(timer, CLOCK_MONOTONIC);
724 720
725 timer->expires = timespec_to_ktime(new_setting->it_value); 721 timer->expires = timespec_to_ktime(new_setting->it_value);
726 722
@@ -728,11 +724,15 @@ common_timer_set(struct k_itimer *timr, int flags,
728 timr->it.real.interval = timespec_to_ktime(new_setting->it_interval); 724 timr->it.real.interval = timespec_to_ktime(new_setting->it_interval);
729 725
730 /* SIGEV_NONE timers are not queued ! See common_timer_get */ 726 /* SIGEV_NONE timers are not queued ! See common_timer_get */
731 if (((timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE)) 727 if (((timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE)) {
728 /* Setup correct expiry time for relative timers */
729 if (mode == HRTIMER_REL)
730 timer->expires = ktime_add(timer->expires,
731 timer->base->get_time());
732 return 0; 732 return 0;
733 }
733 734
734 hrtimer_start(timer, timer->expires, (flags & TIMER_ABSTIME) ? 735 hrtimer_start(timer, timer->expires, mode);
735 HRTIMER_ABS : HRTIMER_REL);
736 return 0; 736 return 0;
737} 737}
738 738
@@ -875,12 +875,6 @@ int do_posix_clock_nosettime(const clockid_t clockid, struct timespec *tp)
875} 875}
876EXPORT_SYMBOL_GPL(do_posix_clock_nosettime); 876EXPORT_SYMBOL_GPL(do_posix_clock_nosettime);
877 877
878int do_posix_clock_notimer_create(struct k_itimer *timer)
879{
880 return -EINVAL;
881}
882EXPORT_SYMBOL_GPL(do_posix_clock_notimer_create);
883
884int do_posix_clock_nonanosleep(const clockid_t clock, int flags, 878int do_posix_clock_nonanosleep(const clockid_t clock, int flags,
885 struct timespec *t, struct timespec __user *r) 879 struct timespec *t, struct timespec __user *r)
886{ 880{
@@ -947,21 +941,8 @@ sys_clock_getres(const clockid_t which_clock, struct timespec __user *tp)
947static int common_nsleep(const clockid_t which_clock, int flags, 941static int common_nsleep(const clockid_t which_clock, int flags,
948 struct timespec *tsave, struct timespec __user *rmtp) 942 struct timespec *tsave, struct timespec __user *rmtp)
949{ 943{
950 int mode = flags & TIMER_ABSTIME ? HRTIMER_ABS : HRTIMER_REL; 944 return hrtimer_nanosleep(tsave, rmtp, flags & TIMER_ABSTIME ?
951 int clockid = which_clock; 945 HRTIMER_ABS : HRTIMER_REL, which_clock);
952
953 switch (which_clock) {
954 case CLOCK_REALTIME:
955 /* Posix madness. Only absolute timers on clock realtime
956 are affected by clock set. */
957 if (mode != HRTIMER_ABS)
958 clockid = CLOCK_MONOTONIC;
959 case CLOCK_MONOTONIC:
960 break;
961 default:
962 return -EINVAL;
963 }
964 return hrtimer_nanosleep(tsave, rmtp, mode, clockid);
965} 946}
966 947
967asmlinkage long 948asmlinkage long
diff --git a/kernel/power/console.c b/kernel/power/console.c
index 7ff375e7c95f..579d239d129f 100644
--- a/kernel/power/console.c
+++ b/kernel/power/console.c
@@ -9,18 +9,11 @@
9#include <linux/console.h> 9#include <linux/console.h>
10#include "power.h" 10#include "power.h"
11 11
12static int new_loglevel = 10;
13static int orig_loglevel;
14#ifdef SUSPEND_CONSOLE 12#ifdef SUSPEND_CONSOLE
15static int orig_fgconsole, orig_kmsg; 13static int orig_fgconsole, orig_kmsg;
16#endif
17 14
18int pm_prepare_console(void) 15int pm_prepare_console(void)
19{ 16{
20 orig_loglevel = console_loglevel;
21 console_loglevel = new_loglevel;
22
23#ifdef SUSPEND_CONSOLE
24 acquire_console_sem(); 17 acquire_console_sem();
25 18
26 orig_fgconsole = fg_console; 19 orig_fgconsole = fg_console;
@@ -41,18 +34,15 @@ int pm_prepare_console(void)
41 } 34 }
42 orig_kmsg = kmsg_redirect; 35 orig_kmsg = kmsg_redirect;
43 kmsg_redirect = SUSPEND_CONSOLE; 36 kmsg_redirect = SUSPEND_CONSOLE;
44#endif
45 return 0; 37 return 0;
46} 38}
47 39
48void pm_restore_console(void) 40void pm_restore_console(void)
49{ 41{
50 console_loglevel = orig_loglevel;
51#ifdef SUSPEND_CONSOLE
52 acquire_console_sem(); 42 acquire_console_sem();
53 set_console(orig_fgconsole); 43 set_console(orig_fgconsole);
54 release_console_sem(); 44 release_console_sem();
55 kmsg_redirect = orig_kmsg; 45 kmsg_redirect = orig_kmsg;
56#endif
57 return; 46 return;
58} 47}
48#endif
diff --git a/kernel/power/disk.c b/kernel/power/disk.c
index e24446f8d8cd..f2b3b0ea512a 100644
--- a/kernel/power/disk.c
+++ b/kernel/power/disk.c
@@ -367,14 +367,14 @@ power_attr(resume);
367 367
368static ssize_t image_size_show(struct subsystem * subsys, char *buf) 368static ssize_t image_size_show(struct subsystem * subsys, char *buf)
369{ 369{
370 return sprintf(buf, "%u\n", image_size); 370 return sprintf(buf, "%lu\n", image_size);
371} 371}
372 372
373static ssize_t image_size_store(struct subsystem * subsys, const char * buf, size_t n) 373static ssize_t image_size_store(struct subsystem * subsys, const char * buf, size_t n)
374{ 374{
375 unsigned int size; 375 unsigned long size;
376 376
377 if (sscanf(buf, "%u", &size) == 1) { 377 if (sscanf(buf, "%lu", &size) == 1) {
378 image_size = size; 378 image_size = size;
379 return n; 379 return n;
380 } 380 }
diff --git a/kernel/power/power.h b/kernel/power/power.h
index 7e8492fd1423..d8f0d1a76bae 100644
--- a/kernel/power/power.h
+++ b/kernel/power/power.h
@@ -42,8 +42,13 @@ static struct subsys_attribute _name##_attr = { \
42 42
43extern struct subsystem power_subsys; 43extern struct subsystem power_subsys;
44 44
45#ifdef SUSPEND_CONSOLE
45extern int pm_prepare_console(void); 46extern int pm_prepare_console(void);
46extern void pm_restore_console(void); 47extern void pm_restore_console(void);
48#else
49static int pm_prepare_console(void) { return 0; }
50static void pm_restore_console(void) {}
51#endif
47 52
48/* References to section boundaries */ 53/* References to section boundaries */
49extern const void __nosave_begin, __nosave_end; 54extern const void __nosave_begin, __nosave_end;
@@ -51,8 +56,8 @@ extern const void __nosave_begin, __nosave_end;
51extern unsigned int nr_copy_pages; 56extern unsigned int nr_copy_pages;
52extern struct pbe *pagedir_nosave; 57extern struct pbe *pagedir_nosave;
53 58
54/* Preferred image size in MB (default 500) */ 59/* Preferred image size in bytes (default 500 MB) */
55extern unsigned int image_size; 60extern unsigned long image_size;
56 61
57extern asmlinkage int swsusp_arch_suspend(void); 62extern asmlinkage int swsusp_arch_suspend(void);
58extern asmlinkage int swsusp_arch_resume(void); 63extern asmlinkage int swsusp_arch_resume(void);
diff --git a/kernel/power/swsusp.c b/kernel/power/swsusp.c
index 55a18d26abed..59c91c148e82 100644
--- a/kernel/power/swsusp.c
+++ b/kernel/power/swsusp.c
@@ -70,12 +70,12 @@
70#include "power.h" 70#include "power.h"
71 71
72/* 72/*
73 * Preferred image size in MB (tunable via /sys/power/image_size). 73 * Preferred image size in bytes (tunable via /sys/power/image_size).
74 * When it is set to N, swsusp will do its best to ensure the image 74 * When it is set to N, swsusp will do its best to ensure the image
75 * size will not exceed N MB, but if that is impossible, it will 75 * size will not exceed N bytes, but if that is impossible, it will
76 * try to create the smallest image possible. 76 * try to create the smallest image possible.
77 */ 77 */
78unsigned int image_size = 500; 78unsigned long image_size = 500 * 1024 * 1024;
79 79
80#ifdef CONFIG_HIGHMEM 80#ifdef CONFIG_HIGHMEM
81unsigned int count_highmem_pages(void); 81unsigned int count_highmem_pages(void);
@@ -590,7 +590,7 @@ int swsusp_shrink_memory(void)
590 if (!tmp) 590 if (!tmp)
591 return -ENOMEM; 591 return -ENOMEM;
592 pages += tmp; 592 pages += tmp;
593 } else if (size > (image_size * 1024 * 1024) / PAGE_SIZE) { 593 } else if (size > image_size / PAGE_SIZE) {
594 tmp = shrink_all_memory(SHRINK_BITE); 594 tmp = shrink_all_memory(SHRINK_BITE);
595 pages += tmp; 595 pages += tmp;
596 } 596 }
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
index 773219907dd8..7712912dbc84 100644
--- a/kernel/rcutorture.c
+++ b/kernel/rcutorture.c
@@ -114,16 +114,16 @@ rcu_torture_alloc(void)
114{ 114{
115 struct list_head *p; 115 struct list_head *p;
116 116
117 spin_lock(&rcu_torture_lock); 117 spin_lock_bh(&rcu_torture_lock);
118 if (list_empty(&rcu_torture_freelist)) { 118 if (list_empty(&rcu_torture_freelist)) {
119 atomic_inc(&n_rcu_torture_alloc_fail); 119 atomic_inc(&n_rcu_torture_alloc_fail);
120 spin_unlock(&rcu_torture_lock); 120 spin_unlock_bh(&rcu_torture_lock);
121 return NULL; 121 return NULL;
122 } 122 }
123 atomic_inc(&n_rcu_torture_alloc); 123 atomic_inc(&n_rcu_torture_alloc);
124 p = rcu_torture_freelist.next; 124 p = rcu_torture_freelist.next;
125 list_del_init(p); 125 list_del_init(p);
126 spin_unlock(&rcu_torture_lock); 126 spin_unlock_bh(&rcu_torture_lock);
127 return container_of(p, struct rcu_torture, rtort_free); 127 return container_of(p, struct rcu_torture, rtort_free);
128} 128}
129 129
@@ -134,9 +134,9 @@ static void
134rcu_torture_free(struct rcu_torture *p) 134rcu_torture_free(struct rcu_torture *p)
135{ 135{
136 atomic_inc(&n_rcu_torture_free); 136 atomic_inc(&n_rcu_torture_free);
137 spin_lock(&rcu_torture_lock); 137 spin_lock_bh(&rcu_torture_lock);
138 list_add_tail(&p->rtort_free, &rcu_torture_freelist); 138 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
139 spin_unlock(&rcu_torture_lock); 139 spin_unlock_bh(&rcu_torture_lock);
140} 140}
141 141
142static void 142static void
diff --git a/kernel/sched.c b/kernel/sched.c
index 3ee2ae45125f..f77f23f8f479 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -4031,7 +4031,7 @@ long sched_getaffinity(pid_t pid, cpumask_t *mask)
4031 goto out_unlock; 4031 goto out_unlock;
4032 4032
4033 retval = 0; 4033 retval = 0;
4034 cpus_and(*mask, p->cpus_allowed, cpu_possible_map); 4034 cpus_and(*mask, p->cpus_allowed, cpu_online_map);
4035 4035
4036out_unlock: 4036out_unlock:
4037 read_unlock(&tasklist_lock); 4037 read_unlock(&tasklist_lock);
@@ -5141,7 +5141,7 @@ static void init_sched_build_groups(struct sched_group groups[], cpumask_t span,
5141#define SEARCH_SCOPE 2 5141#define SEARCH_SCOPE 2
5142#define MIN_CACHE_SIZE (64*1024U) 5142#define MIN_CACHE_SIZE (64*1024U)
5143#define DEFAULT_CACHE_SIZE (5*1024*1024U) 5143#define DEFAULT_CACHE_SIZE (5*1024*1024U)
5144#define ITERATIONS 2 5144#define ITERATIONS 1
5145#define SIZE_THRESH 130 5145#define SIZE_THRESH 130
5146#define COST_THRESH 130 5146#define COST_THRESH 130
5147 5147
@@ -5480,9 +5480,9 @@ static unsigned long long measure_migration_cost(int cpu1, int cpu2)
5480 break; 5480 break;
5481 } 5481 }
5482 /* 5482 /*
5483 * Increase the cachesize in 5% steps: 5483 * Increase the cachesize in 10% steps:
5484 */ 5484 */
5485 size = size * 20 / 19; 5485 size = size * 10 / 9;
5486 } 5486 }
5487 5487
5488 if (migration_debug) 5488 if (migration_debug)
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index cb99a42f8b37..71dd6f62efec 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -878,7 +878,17 @@ static ctl_table vm_table[] = {
878 .maxlen = sizeof(zone_reclaim_mode), 878 .maxlen = sizeof(zone_reclaim_mode),
879 .mode = 0644, 879 .mode = 0644,
880 .proc_handler = &proc_dointvec, 880 .proc_handler = &proc_dointvec,
881 .strategy = &zero, 881 .strategy = &sysctl_intvec,
882 .extra1 = &zero,
883 },
884 {
885 .ctl_name = VM_ZONE_RECLAIM_INTERVAL,
886 .procname = "zone_reclaim_interval",
887 .data = &zone_reclaim_interval,
888 .maxlen = sizeof(zone_reclaim_interval),
889 .mode = 0644,
890 .proc_handler = &proc_dointvec_jiffies,
891 .strategy = &sysctl_jiffies,
882 }, 892 },
883#endif 893#endif
884 { .ctl_name = 0 } 894 { .ctl_name = 0 }
diff --git a/kernel/time.c b/kernel/time.c
index 7477b1d2079e..1f23e683d6aa 100644
--- a/kernel/time.c
+++ b/kernel/time.c
@@ -155,7 +155,7 @@ int do_sys_settimeofday(struct timespec *tv, struct timezone *tz)
155 static int firsttime = 1; 155 static int firsttime = 1;
156 int error = 0; 156 int error = 0;
157 157
158 if (!timespec_valid(tv)) 158 if (tv && !timespec_valid(tv))
159 return -EINVAL; 159 return -EINVAL;
160 160
161 error = security_settime(tv, tz); 161 error = security_settime(tv, tz);
diff --git a/kernel/user.c b/kernel/user.c
index 89e562feb1b1..d9deae43a9ab 100644
--- a/kernel/user.c
+++ b/kernel/user.c
@@ -13,6 +13,7 @@
13#include <linux/slab.h> 13#include <linux/slab.h>
14#include <linux/bitops.h> 14#include <linux/bitops.h>
15#include <linux/key.h> 15#include <linux/key.h>
16#include <linux/interrupt.h>
16 17
17/* 18/*
18 * UID task count cache, to get fast user lookup in "alloc_uid" 19 * UID task count cache, to get fast user lookup in "alloc_uid"
@@ -27,6 +28,16 @@
27 28
28static kmem_cache_t *uid_cachep; 29static kmem_cache_t *uid_cachep;
29static struct list_head uidhash_table[UIDHASH_SZ]; 30static struct list_head uidhash_table[UIDHASH_SZ];
31
32/*
33 * The uidhash_lock is mostly taken from process context, but it is
34 * occasionally also taken from softirq/tasklet context, when
35 * task-structs get RCU-freed. Hence all locking must be softirq-safe.
36 * But free_uid() is also called with local interrupts disabled, and running
37 * local_bh_enable() with local interrupts disabled is an error - we'll run
38 * softirq callbacks, and they can unconditionally enable interrupts, and
39 * the caller of free_uid() didn't expect that..
40 */
30static DEFINE_SPINLOCK(uidhash_lock); 41static DEFINE_SPINLOCK(uidhash_lock);
31 42
32struct user_struct root_user = { 43struct user_struct root_user = {
@@ -82,15 +93,19 @@ static inline struct user_struct *uid_hash_find(uid_t uid, struct list_head *has
82struct user_struct *find_user(uid_t uid) 93struct user_struct *find_user(uid_t uid)
83{ 94{
84 struct user_struct *ret; 95 struct user_struct *ret;
96 unsigned long flags;
85 97
86 spin_lock(&uidhash_lock); 98 spin_lock_irqsave(&uidhash_lock, flags);
87 ret = uid_hash_find(uid, uidhashentry(uid)); 99 ret = uid_hash_find(uid, uidhashentry(uid));
88 spin_unlock(&uidhash_lock); 100 spin_unlock_irqrestore(&uidhash_lock, flags);
89 return ret; 101 return ret;
90} 102}
91 103
92void free_uid(struct user_struct *up) 104void free_uid(struct user_struct *up)
93{ 105{
106 unsigned long flags;
107
108 local_irq_save(flags);
94 if (up && atomic_dec_and_lock(&up->__count, &uidhash_lock)) { 109 if (up && atomic_dec_and_lock(&up->__count, &uidhash_lock)) {
95 uid_hash_remove(up); 110 uid_hash_remove(up);
96 key_put(up->uid_keyring); 111 key_put(up->uid_keyring);
@@ -98,6 +113,7 @@ void free_uid(struct user_struct *up)
98 kmem_cache_free(uid_cachep, up); 113 kmem_cache_free(uid_cachep, up);
99 spin_unlock(&uidhash_lock); 114 spin_unlock(&uidhash_lock);
100 } 115 }
116 local_irq_restore(flags);
101} 117}
102 118
103struct user_struct * alloc_uid(uid_t uid) 119struct user_struct * alloc_uid(uid_t uid)
@@ -105,9 +121,9 @@ struct user_struct * alloc_uid(uid_t uid)
105 struct list_head *hashent = uidhashentry(uid); 121 struct list_head *hashent = uidhashentry(uid);
106 struct user_struct *up; 122 struct user_struct *up;
107 123
108 spin_lock(&uidhash_lock); 124 spin_lock_irq(&uidhash_lock);
109 up = uid_hash_find(uid, hashent); 125 up = uid_hash_find(uid, hashent);
110 spin_unlock(&uidhash_lock); 126 spin_unlock_irq(&uidhash_lock);
111 127
112 if (!up) { 128 if (!up) {
113 struct user_struct *new; 129 struct user_struct *new;
@@ -137,7 +153,7 @@ struct user_struct * alloc_uid(uid_t uid)
137 * Before adding this, check whether we raced 153 * Before adding this, check whether we raced
138 * on adding the same user already.. 154 * on adding the same user already..
139 */ 155 */
140 spin_lock(&uidhash_lock); 156 spin_lock_irq(&uidhash_lock);
141 up = uid_hash_find(uid, hashent); 157 up = uid_hash_find(uid, hashent);
142 if (up) { 158 if (up) {
143 key_put(new->uid_keyring); 159 key_put(new->uid_keyring);
@@ -147,7 +163,7 @@ struct user_struct * alloc_uid(uid_t uid)
147 uid_hash_insert(new, hashent); 163 uid_hash_insert(new, hashent);
148 up = new; 164 up = new;
149 } 165 }
150 spin_unlock(&uidhash_lock); 166 spin_unlock_irq(&uidhash_lock);
151 167
152 } 168 }
153 return up; 169 return up;
@@ -183,9 +199,9 @@ static int __init uid_cache_init(void)
183 INIT_LIST_HEAD(uidhash_table + n); 199 INIT_LIST_HEAD(uidhash_table + n);
184 200
185 /* Insert the root user immediately (init already runs as root) */ 201 /* Insert the root user immediately (init already runs as root) */
186 spin_lock(&uidhash_lock); 202 spin_lock_irq(&uidhash_lock);
187 uid_hash_insert(&root_user, uidhashentry(0)); 203 uid_hash_insert(&root_user, uidhashentry(0));
188 spin_unlock(&uidhash_lock); 204 spin_unlock_irq(&uidhash_lock);
189 205
190 return 0; 206 return 0;
191} 207}