diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/Makefile | 3 | ||||
-rw-r--r-- | kernel/cpu.c | 2 | ||||
-rw-r--r-- | kernel/elfcore.c | 28 | ||||
-rw-r--r-- | kernel/exit.c | 5 | ||||
-rw-r--r-- | kernel/fork.c | 19 | ||||
-rw-r--r-- | kernel/module.c | 3 | ||||
-rw-r--r-- | kernel/panic.c | 46 | ||||
-rw-r--r-- | kernel/params.c | 6 | ||||
-rw-r--r-- | kernel/perf_event.c | 15 | ||||
-rw-r--r-- | kernel/pid.c | 2 | ||||
-rw-r--r-- | kernel/posix-cpu-timers.c | 36 | ||||
-rw-r--r-- | kernel/power/hibernate.c | 9 | ||||
-rw-r--r-- | kernel/power/suspend.c | 3 | ||||
-rw-r--r-- | kernel/printk.c | 3 | ||||
-rw-r--r-- | kernel/relay.c | 5 | ||||
-rw-r--r-- | kernel/sched.c | 8 | ||||
-rw-r--r-- | kernel/sched_cpupri.c | 2 | ||||
-rw-r--r-- | kernel/sched_rt.c | 5 | ||||
-rw-r--r-- | kernel/signal.c | 2 | ||||
-rw-r--r-- | kernel/sys.c | 3 | ||||
-rw-r--r-- | kernel/tsacct.c | 1 |
21 files changed, 144 insertions, 62 deletions
diff --git a/kernel/Makefile b/kernel/Makefile index 7b974699f8c2..a987aa1676b5 100644 --- a/kernel/Makefile +++ b/kernel/Makefile | |||
@@ -91,6 +91,9 @@ obj-$(CONFIG_TASK_DELAY_ACCT) += delayacct.o | |||
91 | obj-$(CONFIG_TASKSTATS) += taskstats.o tsacct.o | 91 | obj-$(CONFIG_TASKSTATS) += taskstats.o tsacct.o |
92 | obj-$(CONFIG_TRACEPOINTS) += tracepoint.o | 92 | obj-$(CONFIG_TRACEPOINTS) += tracepoint.o |
93 | obj-$(CONFIG_LATENCYTOP) += latencytop.o | 93 | obj-$(CONFIG_LATENCYTOP) += latencytop.o |
94 | obj-$(CONFIG_BINFMT_ELF) += elfcore.o | ||
95 | obj-$(CONFIG_COMPAT_BINFMT_ELF) += elfcore.o | ||
96 | obj-$(CONFIG_BINFMT_ELF_FDPIC) += elfcore.o | ||
94 | obj-$(CONFIG_FUNCTION_TRACER) += trace/ | 97 | obj-$(CONFIG_FUNCTION_TRACER) += trace/ |
95 | obj-$(CONFIG_TRACING) += trace/ | 98 | obj-$(CONFIG_TRACING) += trace/ |
96 | obj-$(CONFIG_X86_DS) += trace/ | 99 | obj-$(CONFIG_X86_DS) += trace/ |
diff --git a/kernel/cpu.c b/kernel/cpu.c index 677f25376a38..f8cced2692b3 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c | |||
@@ -338,7 +338,7 @@ int __cpuinit cpu_up(unsigned int cpu) | |||
338 | if (!cpu_possible(cpu)) { | 338 | if (!cpu_possible(cpu)) { |
339 | printk(KERN_ERR "can't online cpu %d because it is not " | 339 | printk(KERN_ERR "can't online cpu %d because it is not " |
340 | "configured as may-hotadd at boot time\n", cpu); | 340 | "configured as may-hotadd at boot time\n", cpu); |
341 | #if defined(CONFIG_IA64) || defined(CONFIG_X86_64) | 341 | #if defined(CONFIG_IA64) |
342 | printk(KERN_ERR "please check additional_cpus= boot " | 342 | printk(KERN_ERR "please check additional_cpus= boot " |
343 | "parameter\n"); | 343 | "parameter\n"); |
344 | #endif | 344 | #endif |
diff --git a/kernel/elfcore.c b/kernel/elfcore.c new file mode 100644 index 000000000000..ff915efef66d --- /dev/null +++ b/kernel/elfcore.c | |||
@@ -0,0 +1,28 @@ | |||
1 | #include <linux/elf.h> | ||
2 | #include <linux/fs.h> | ||
3 | #include <linux/mm.h> | ||
4 | |||
5 | #include <asm/elf.h> | ||
6 | |||
7 | |||
8 | Elf_Half __weak elf_core_extra_phdrs(void) | ||
9 | { | ||
10 | return 0; | ||
11 | } | ||
12 | |||
13 | int __weak elf_core_write_extra_phdrs(struct file *file, loff_t offset, size_t *size, | ||
14 | unsigned long limit) | ||
15 | { | ||
16 | return 1; | ||
17 | } | ||
18 | |||
19 | int __weak elf_core_write_extra_data(struct file *file, size_t *size, | ||
20 | unsigned long limit) | ||
21 | { | ||
22 | return 1; | ||
23 | } | ||
24 | |||
25 | size_t __weak elf_core_extra_data_size(void) | ||
26 | { | ||
27 | return 0; | ||
28 | } | ||
diff --git a/kernel/exit.c b/kernel/exit.c index 45ed043b8bf5..ce1e48c2d93d 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
@@ -952,7 +952,8 @@ NORET_TYPE void do_exit(long code) | |||
952 | preempt_count()); | 952 | preempt_count()); |
953 | 953 | ||
954 | acct_update_integrals(tsk); | 954 | acct_update_integrals(tsk); |
955 | 955 | /* sync mm's RSS info before statistics gathering */ | |
956 | sync_mm_rss(tsk, tsk->mm); | ||
956 | group_dead = atomic_dec_and_test(&tsk->signal->live); | 957 | group_dead = atomic_dec_and_test(&tsk->signal->live); |
957 | if (group_dead) { | 958 | if (group_dead) { |
958 | hrtimer_cancel(&tsk->signal->real_timer); | 959 | hrtimer_cancel(&tsk->signal->real_timer); |
@@ -1188,7 +1189,7 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p) | |||
1188 | 1189 | ||
1189 | if (unlikely(wo->wo_flags & WNOWAIT)) { | 1190 | if (unlikely(wo->wo_flags & WNOWAIT)) { |
1190 | int exit_code = p->exit_code; | 1191 | int exit_code = p->exit_code; |
1191 | int why, status; | 1192 | int why; |
1192 | 1193 | ||
1193 | get_task_struct(p); | 1194 | get_task_struct(p); |
1194 | read_unlock(&tasklist_lock); | 1195 | read_unlock(&tasklist_lock); |
diff --git a/kernel/fork.c b/kernel/fork.c index 17bbf093356d..b0ec34abc0bb 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -329,15 +329,17 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) | |||
329 | if (!tmp) | 329 | if (!tmp) |
330 | goto fail_nomem; | 330 | goto fail_nomem; |
331 | *tmp = *mpnt; | 331 | *tmp = *mpnt; |
332 | INIT_LIST_HEAD(&tmp->anon_vma_chain); | ||
332 | pol = mpol_dup(vma_policy(mpnt)); | 333 | pol = mpol_dup(vma_policy(mpnt)); |
333 | retval = PTR_ERR(pol); | 334 | retval = PTR_ERR(pol); |
334 | if (IS_ERR(pol)) | 335 | if (IS_ERR(pol)) |
335 | goto fail_nomem_policy; | 336 | goto fail_nomem_policy; |
336 | vma_set_policy(tmp, pol); | 337 | vma_set_policy(tmp, pol); |
338 | if (anon_vma_fork(tmp, mpnt)) | ||
339 | goto fail_nomem_anon_vma_fork; | ||
337 | tmp->vm_flags &= ~VM_LOCKED; | 340 | tmp->vm_flags &= ~VM_LOCKED; |
338 | tmp->vm_mm = mm; | 341 | tmp->vm_mm = mm; |
339 | tmp->vm_next = NULL; | 342 | tmp->vm_next = NULL; |
340 | anon_vma_link(tmp); | ||
341 | file = tmp->vm_file; | 343 | file = tmp->vm_file; |
342 | if (file) { | 344 | if (file) { |
343 | struct inode *inode = file->f_path.dentry->d_inode; | 345 | struct inode *inode = file->f_path.dentry->d_inode; |
@@ -392,6 +394,8 @@ out: | |||
392 | flush_tlb_mm(oldmm); | 394 | flush_tlb_mm(oldmm); |
393 | up_write(&oldmm->mmap_sem); | 395 | up_write(&oldmm->mmap_sem); |
394 | return retval; | 396 | return retval; |
397 | fail_nomem_anon_vma_fork: | ||
398 | mpol_put(pol); | ||
395 | fail_nomem_policy: | 399 | fail_nomem_policy: |
396 | kmem_cache_free(vm_area_cachep, tmp); | 400 | kmem_cache_free(vm_area_cachep, tmp); |
397 | fail_nomem: | 401 | fail_nomem: |
@@ -455,8 +459,7 @@ static struct mm_struct * mm_init(struct mm_struct * mm, struct task_struct *p) | |||
455 | (current->mm->flags & MMF_INIT_MASK) : default_dump_filter; | 459 | (current->mm->flags & MMF_INIT_MASK) : default_dump_filter; |
456 | mm->core_state = NULL; | 460 | mm->core_state = NULL; |
457 | mm->nr_ptes = 0; | 461 | mm->nr_ptes = 0; |
458 | set_mm_counter(mm, file_rss, 0); | 462 | memset(&mm->rss_stat, 0, sizeof(mm->rss_stat)); |
459 | set_mm_counter(mm, anon_rss, 0); | ||
460 | spin_lock_init(&mm->page_table_lock); | 463 | spin_lock_init(&mm->page_table_lock); |
461 | mm->free_area_cache = TASK_UNMAPPED_BASE; | 464 | mm->free_area_cache = TASK_UNMAPPED_BASE; |
462 | mm->cached_hole_size = ~0UL; | 465 | mm->cached_hole_size = ~0UL; |
@@ -825,6 +828,8 @@ void __cleanup_sighand(struct sighand_struct *sighand) | |||
825 | */ | 828 | */ |
826 | static void posix_cpu_timers_init_group(struct signal_struct *sig) | 829 | static void posix_cpu_timers_init_group(struct signal_struct *sig) |
827 | { | 830 | { |
831 | unsigned long cpu_limit; | ||
832 | |||
828 | /* Thread group counters. */ | 833 | /* Thread group counters. */ |
829 | thread_group_cputime_init(sig); | 834 | thread_group_cputime_init(sig); |
830 | 835 | ||
@@ -839,9 +844,9 @@ static void posix_cpu_timers_init_group(struct signal_struct *sig) | |||
839 | sig->cputime_expires.virt_exp = cputime_zero; | 844 | sig->cputime_expires.virt_exp = cputime_zero; |
840 | sig->cputime_expires.sched_exp = 0; | 845 | sig->cputime_expires.sched_exp = 0; |
841 | 846 | ||
842 | if (sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) { | 847 | cpu_limit = ACCESS_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur); |
843 | sig->cputime_expires.prof_exp = | 848 | if (cpu_limit != RLIM_INFINITY) { |
844 | secs_to_cputime(sig->rlim[RLIMIT_CPU].rlim_cur); | 849 | sig->cputime_expires.prof_exp = secs_to_cputime(cpu_limit); |
845 | sig->cputimer.running = 1; | 850 | sig->cputimer.running = 1; |
846 | } | 851 | } |
847 | 852 | ||
@@ -1034,7 +1039,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1034 | #endif | 1039 | #endif |
1035 | retval = -EAGAIN; | 1040 | retval = -EAGAIN; |
1036 | if (atomic_read(&p->real_cred->user->processes) >= | 1041 | if (atomic_read(&p->real_cred->user->processes) >= |
1037 | p->signal->rlim[RLIMIT_NPROC].rlim_cur) { | 1042 | task_rlimit(p, RLIMIT_NPROC)) { |
1038 | if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) && | 1043 | if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) && |
1039 | p->real_cred->user != INIT_USER) | 1044 | p->real_cred->user != INIT_USER) |
1040 | goto bad_fork_free; | 1045 | goto bad_fork_free; |
diff --git a/kernel/module.c b/kernel/module.c index e5538d5f00ad..c968d3606dca 100644 --- a/kernel/module.c +++ b/kernel/module.c | |||
@@ -1085,6 +1085,7 @@ static void add_sect_attrs(struct module *mod, unsigned int nsect, | |||
1085 | if (sattr->name == NULL) | 1085 | if (sattr->name == NULL) |
1086 | goto out; | 1086 | goto out; |
1087 | sect_attrs->nsections++; | 1087 | sect_attrs->nsections++; |
1088 | sysfs_attr_init(&sattr->mattr.attr); | ||
1088 | sattr->mattr.show = module_sect_show; | 1089 | sattr->mattr.show = module_sect_show; |
1089 | sattr->mattr.store = NULL; | 1090 | sattr->mattr.store = NULL; |
1090 | sattr->mattr.attr.name = sattr->name; | 1091 | sattr->mattr.attr.name = sattr->name; |
@@ -1180,6 +1181,7 @@ static void add_notes_attrs(struct module *mod, unsigned int nsect, | |||
1180 | if (sect_empty(&sechdrs[i])) | 1181 | if (sect_empty(&sechdrs[i])) |
1181 | continue; | 1182 | continue; |
1182 | if (sechdrs[i].sh_type == SHT_NOTE) { | 1183 | if (sechdrs[i].sh_type == SHT_NOTE) { |
1184 | sysfs_bin_attr_init(nattr); | ||
1183 | nattr->attr.name = mod->sect_attrs->attrs[loaded].name; | 1185 | nattr->attr.name = mod->sect_attrs->attrs[loaded].name; |
1184 | nattr->attr.mode = S_IRUGO; | 1186 | nattr->attr.mode = S_IRUGO; |
1185 | nattr->size = sechdrs[i].sh_size; | 1187 | nattr->size = sechdrs[i].sh_size; |
@@ -1252,6 +1254,7 @@ int module_add_modinfo_attrs(struct module *mod) | |||
1252 | if (!attr->test || | 1254 | if (!attr->test || |
1253 | (attr->test && attr->test(mod))) { | 1255 | (attr->test && attr->test(mod))) { |
1254 | memcpy(temp_attr, attr, sizeof(*temp_attr)); | 1256 | memcpy(temp_attr, attr, sizeof(*temp_attr)); |
1257 | sysfs_attr_init(&temp_attr->attr); | ||
1255 | error = sysfs_create_file(&mod->mkobj.kobj,&temp_attr->attr); | 1258 | error = sysfs_create_file(&mod->mkobj.kobj,&temp_attr->attr); |
1256 | ++temp_attr; | 1259 | ++temp_attr; |
1257 | } | 1260 | } |
diff --git a/kernel/panic.c b/kernel/panic.c index c787333282b8..13d966b4c14a 100644 --- a/kernel/panic.c +++ b/kernel/panic.c | |||
@@ -36,15 +36,36 @@ ATOMIC_NOTIFIER_HEAD(panic_notifier_list); | |||
36 | 36 | ||
37 | EXPORT_SYMBOL(panic_notifier_list); | 37 | EXPORT_SYMBOL(panic_notifier_list); |
38 | 38 | ||
39 | static long no_blink(long time) | ||
40 | { | ||
41 | return 0; | ||
42 | } | ||
43 | |||
44 | /* Returns how long it waited in ms */ | 39 | /* Returns how long it waited in ms */ |
45 | long (*panic_blink)(long time); | 40 | long (*panic_blink)(long time); |
46 | EXPORT_SYMBOL(panic_blink); | 41 | EXPORT_SYMBOL(panic_blink); |
47 | 42 | ||
43 | static void panic_blink_one_second(void) | ||
44 | { | ||
45 | static long i = 0, end; | ||
46 | |||
47 | if (panic_blink) { | ||
48 | end = i + MSEC_PER_SEC; | ||
49 | |||
50 | while (i < end) { | ||
51 | i += panic_blink(i); | ||
52 | mdelay(1); | ||
53 | i++; | ||
54 | } | ||
55 | } else { | ||
56 | /* | ||
57 | * When running under a hypervisor a small mdelay may get | ||
58 | * rounded up to the hypervisor timeslice. For example, with | ||
59 | * a 1ms in 10ms hypervisor timeslice we might inflate a | ||
60 | * mdelay(1) loop by 10x. | ||
61 | * | ||
62 | * If we have nothing to blink, spin on 1 second calls to | ||
63 | * mdelay to avoid this. | ||
64 | */ | ||
65 | mdelay(MSEC_PER_SEC); | ||
66 | } | ||
67 | } | ||
68 | |||
48 | /** | 69 | /** |
49 | * panic - halt the system | 70 | * panic - halt the system |
50 | * @fmt: The text string to print | 71 | * @fmt: The text string to print |
@@ -95,9 +116,6 @@ NORET_TYPE void panic(const char * fmt, ...) | |||
95 | 116 | ||
96 | bust_spinlocks(0); | 117 | bust_spinlocks(0); |
97 | 118 | ||
98 | if (!panic_blink) | ||
99 | panic_blink = no_blink; | ||
100 | |||
101 | if (panic_timeout > 0) { | 119 | if (panic_timeout > 0) { |
102 | /* | 120 | /* |
103 | * Delay timeout seconds before rebooting the machine. | 121 | * Delay timeout seconds before rebooting the machine. |
@@ -105,11 +123,9 @@ NORET_TYPE void panic(const char * fmt, ...) | |||
105 | */ | 123 | */ |
106 | printk(KERN_EMERG "Rebooting in %d seconds..", panic_timeout); | 124 | printk(KERN_EMERG "Rebooting in %d seconds..", panic_timeout); |
107 | 125 | ||
108 | for (i = 0; i < panic_timeout*1000; ) { | 126 | for (i = 0; i < panic_timeout; i++) { |
109 | touch_nmi_watchdog(); | 127 | touch_nmi_watchdog(); |
110 | i += panic_blink(i); | 128 | panic_blink_one_second(); |
111 | mdelay(1); | ||
112 | i++; | ||
113 | } | 129 | } |
114 | /* | 130 | /* |
115 | * This will not be a clean reboot, with everything | 131 | * This will not be a clean reboot, with everything |
@@ -135,11 +151,9 @@ NORET_TYPE void panic(const char * fmt, ...) | |||
135 | } | 151 | } |
136 | #endif | 152 | #endif |
137 | local_irq_enable(); | 153 | local_irq_enable(); |
138 | for (i = 0; ; ) { | 154 | while (1) { |
139 | touch_softlockup_watchdog(); | 155 | touch_softlockup_watchdog(); |
140 | i += panic_blink(i); | 156 | panic_blink_one_second(); |
141 | mdelay(1); | ||
142 | i++; | ||
143 | } | 157 | } |
144 | } | 158 | } |
145 | 159 | ||
diff --git a/kernel/params.c b/kernel/params.c index cf1b69183127..d55a53ec9234 100644 --- a/kernel/params.c +++ b/kernel/params.c | |||
@@ -24,7 +24,6 @@ | |||
24 | #include <linux/err.h> | 24 | #include <linux/err.h> |
25 | #include <linux/slab.h> | 25 | #include <linux/slab.h> |
26 | #include <linux/ctype.h> | 26 | #include <linux/ctype.h> |
27 | #include <linux/string.h> | ||
28 | 27 | ||
29 | #if 0 | 28 | #if 0 |
30 | #define DEBUGP printk | 29 | #define DEBUGP printk |
@@ -517,6 +516,7 @@ static __modinit int add_sysfs_param(struct module_kobject *mk, | |||
517 | new->grp.attrs = attrs; | 516 | new->grp.attrs = attrs; |
518 | 517 | ||
519 | /* Tack new one on the end. */ | 518 | /* Tack new one on the end. */ |
519 | sysfs_attr_init(&new->attrs[num].mattr.attr); | ||
520 | new->attrs[num].param = kp; | 520 | new->attrs[num].param = kp; |
521 | new->attrs[num].mattr.show = param_attr_show; | 521 | new->attrs[num].mattr.show = param_attr_show; |
522 | new->attrs[num].mattr.store = param_attr_store; | 522 | new->attrs[num].mattr.store = param_attr_store; |
@@ -723,7 +723,7 @@ static ssize_t module_attr_store(struct kobject *kobj, | |||
723 | return ret; | 723 | return ret; |
724 | } | 724 | } |
725 | 725 | ||
726 | static struct sysfs_ops module_sysfs_ops = { | 726 | static const struct sysfs_ops module_sysfs_ops = { |
727 | .show = module_attr_show, | 727 | .show = module_attr_show, |
728 | .store = module_attr_store, | 728 | .store = module_attr_store, |
729 | }; | 729 | }; |
@@ -737,7 +737,7 @@ static int uevent_filter(struct kset *kset, struct kobject *kobj) | |||
737 | return 0; | 737 | return 0; |
738 | } | 738 | } |
739 | 739 | ||
740 | static struct kset_uevent_ops module_uevent_ops = { | 740 | static const struct kset_uevent_ops module_uevent_ops = { |
741 | .filter = uevent_filter, | 741 | .filter = uevent_filter, |
742 | }; | 742 | }; |
743 | 743 | ||
diff --git a/kernel/perf_event.c b/kernel/perf_event.c index a661e7991865..f40560b86544 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c | |||
@@ -2610,7 +2610,7 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma) | |||
2610 | if (user_locked > user_lock_limit) | 2610 | if (user_locked > user_lock_limit) |
2611 | extra = user_locked - user_lock_limit; | 2611 | extra = user_locked - user_lock_limit; |
2612 | 2612 | ||
2613 | lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur; | 2613 | lock_limit = rlimit(RLIMIT_MEMLOCK); |
2614 | lock_limit >>= PAGE_SHIFT; | 2614 | lock_limit >>= PAGE_SHIFT; |
2615 | locked = vma->vm_mm->locked_vm + extra; | 2615 | locked = vma->vm_mm->locked_vm + extra; |
2616 | 2616 | ||
@@ -5481,13 +5481,16 @@ void __init perf_event_init(void) | |||
5481 | register_cpu_notifier(&perf_cpu_nb); | 5481 | register_cpu_notifier(&perf_cpu_nb); |
5482 | } | 5482 | } |
5483 | 5483 | ||
5484 | static ssize_t perf_show_reserve_percpu(struct sysdev_class *class, char *buf) | 5484 | static ssize_t perf_show_reserve_percpu(struct sysdev_class *class, |
5485 | struct sysdev_class_attribute *attr, | ||
5486 | char *buf) | ||
5485 | { | 5487 | { |
5486 | return sprintf(buf, "%d\n", perf_reserved_percpu); | 5488 | return sprintf(buf, "%d\n", perf_reserved_percpu); |
5487 | } | 5489 | } |
5488 | 5490 | ||
5489 | static ssize_t | 5491 | static ssize_t |
5490 | perf_set_reserve_percpu(struct sysdev_class *class, | 5492 | perf_set_reserve_percpu(struct sysdev_class *class, |
5493 | struct sysdev_class_attribute *attr, | ||
5491 | const char *buf, | 5494 | const char *buf, |
5492 | size_t count) | 5495 | size_t count) |
5493 | { | 5496 | { |
@@ -5516,13 +5519,17 @@ perf_set_reserve_percpu(struct sysdev_class *class, | |||
5516 | return count; | 5519 | return count; |
5517 | } | 5520 | } |
5518 | 5521 | ||
5519 | static ssize_t perf_show_overcommit(struct sysdev_class *class, char *buf) | 5522 | static ssize_t perf_show_overcommit(struct sysdev_class *class, |
5523 | struct sysdev_class_attribute *attr, | ||
5524 | char *buf) | ||
5520 | { | 5525 | { |
5521 | return sprintf(buf, "%d\n", perf_overcommit); | 5526 | return sprintf(buf, "%d\n", perf_overcommit); |
5522 | } | 5527 | } |
5523 | 5528 | ||
5524 | static ssize_t | 5529 | static ssize_t |
5525 | perf_set_overcommit(struct sysdev_class *class, const char *buf, size_t count) | 5530 | perf_set_overcommit(struct sysdev_class *class, |
5531 | struct sysdev_class_attribute *attr, | ||
5532 | const char *buf, size_t count) | ||
5526 | { | 5533 | { |
5527 | unsigned long val; | 5534 | unsigned long val; |
5528 | int err; | 5535 | int err; |
diff --git a/kernel/pid.c b/kernel/pid.c index b08e697cd83f..86b296943e5f 100644 --- a/kernel/pid.c +++ b/kernel/pid.c | |||
@@ -376,7 +376,7 @@ struct task_struct *pid_task(struct pid *pid, enum pid_type type) | |||
376 | EXPORT_SYMBOL(pid_task); | 376 | EXPORT_SYMBOL(pid_task); |
377 | 377 | ||
378 | /* | 378 | /* |
379 | * Must be called under rcu_read_lock() or with tasklist_lock read-held. | 379 | * Must be called under rcu_read_lock(). |
380 | */ | 380 | */ |
381 | struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns) | 381 | struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns) |
382 | { | 382 | { |
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c index 438ff4523513..1a22dfd42df9 100644 --- a/kernel/posix-cpu-timers.c +++ b/kernel/posix-cpu-timers.c | |||
@@ -982,6 +982,7 @@ static void check_thread_timers(struct task_struct *tsk, | |||
982 | int maxfire; | 982 | int maxfire; |
983 | struct list_head *timers = tsk->cpu_timers; | 983 | struct list_head *timers = tsk->cpu_timers; |
984 | struct signal_struct *const sig = tsk->signal; | 984 | struct signal_struct *const sig = tsk->signal; |
985 | unsigned long soft; | ||
985 | 986 | ||
986 | maxfire = 20; | 987 | maxfire = 20; |
987 | tsk->cputime_expires.prof_exp = cputime_zero; | 988 | tsk->cputime_expires.prof_exp = cputime_zero; |
@@ -1030,9 +1031,10 @@ static void check_thread_timers(struct task_struct *tsk, | |||
1030 | /* | 1031 | /* |
1031 | * Check for the special case thread timers. | 1032 | * Check for the special case thread timers. |
1032 | */ | 1033 | */ |
1033 | if (sig->rlim[RLIMIT_RTTIME].rlim_cur != RLIM_INFINITY) { | 1034 | soft = ACCESS_ONCE(sig->rlim[RLIMIT_RTTIME].rlim_cur); |
1034 | unsigned long hard = sig->rlim[RLIMIT_RTTIME].rlim_max; | 1035 | if (soft != RLIM_INFINITY) { |
1035 | unsigned long *soft = &sig->rlim[RLIMIT_RTTIME].rlim_cur; | 1036 | unsigned long hard = |
1037 | ACCESS_ONCE(sig->rlim[RLIMIT_RTTIME].rlim_max); | ||
1036 | 1038 | ||
1037 | if (hard != RLIM_INFINITY && | 1039 | if (hard != RLIM_INFINITY && |
1038 | tsk->rt.timeout > DIV_ROUND_UP(hard, USEC_PER_SEC/HZ)) { | 1040 | tsk->rt.timeout > DIV_ROUND_UP(hard, USEC_PER_SEC/HZ)) { |
@@ -1043,14 +1045,13 @@ static void check_thread_timers(struct task_struct *tsk, | |||
1043 | __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk); | 1045 | __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk); |
1044 | return; | 1046 | return; |
1045 | } | 1047 | } |
1046 | if (tsk->rt.timeout > DIV_ROUND_UP(*soft, USEC_PER_SEC/HZ)) { | 1048 | if (tsk->rt.timeout > DIV_ROUND_UP(soft, USEC_PER_SEC/HZ)) { |
1047 | /* | 1049 | /* |
1048 | * At the soft limit, send a SIGXCPU every second. | 1050 | * At the soft limit, send a SIGXCPU every second. |
1049 | */ | 1051 | */ |
1050 | if (sig->rlim[RLIMIT_RTTIME].rlim_cur | 1052 | if (soft < hard) { |
1051 | < sig->rlim[RLIMIT_RTTIME].rlim_max) { | 1053 | soft += USEC_PER_SEC; |
1052 | sig->rlim[RLIMIT_RTTIME].rlim_cur += | 1054 | sig->rlim[RLIMIT_RTTIME].rlim_cur = soft; |
1053 | USEC_PER_SEC; | ||
1054 | } | 1055 | } |
1055 | printk(KERN_INFO | 1056 | printk(KERN_INFO |
1056 | "RT Watchdog Timeout: %s[%d]\n", | 1057 | "RT Watchdog Timeout: %s[%d]\n", |
@@ -1121,6 +1122,7 @@ static void check_process_timers(struct task_struct *tsk, | |||
1121 | unsigned long long sum_sched_runtime, sched_expires; | 1122 | unsigned long long sum_sched_runtime, sched_expires; |
1122 | struct list_head *timers = sig->cpu_timers; | 1123 | struct list_head *timers = sig->cpu_timers; |
1123 | struct task_cputime cputime; | 1124 | struct task_cputime cputime; |
1125 | unsigned long soft; | ||
1124 | 1126 | ||
1125 | /* | 1127 | /* |
1126 | * Don't sample the current process CPU clocks if there are no timers. | 1128 | * Don't sample the current process CPU clocks if there are no timers. |
@@ -1193,11 +1195,13 @@ static void check_process_timers(struct task_struct *tsk, | |||
1193 | SIGPROF); | 1195 | SIGPROF); |
1194 | check_cpu_itimer(tsk, &sig->it[CPUCLOCK_VIRT], &virt_expires, utime, | 1196 | check_cpu_itimer(tsk, &sig->it[CPUCLOCK_VIRT], &virt_expires, utime, |
1195 | SIGVTALRM); | 1197 | SIGVTALRM); |
1196 | 1198 | soft = ACCESS_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur); | |
1197 | if (sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) { | 1199 | if (soft != RLIM_INFINITY) { |
1198 | unsigned long psecs = cputime_to_secs(ptime); | 1200 | unsigned long psecs = cputime_to_secs(ptime); |
1201 | unsigned long hard = | ||
1202 | ACCESS_ONCE(sig->rlim[RLIMIT_CPU].rlim_max); | ||
1199 | cputime_t x; | 1203 | cputime_t x; |
1200 | if (psecs >= sig->rlim[RLIMIT_CPU].rlim_max) { | 1204 | if (psecs >= hard) { |
1201 | /* | 1205 | /* |
1202 | * At the hard limit, we just die. | 1206 | * At the hard limit, we just die. |
1203 | * No need to calculate anything else now. | 1207 | * No need to calculate anything else now. |
@@ -1205,17 +1209,17 @@ static void check_process_timers(struct task_struct *tsk, | |||
1205 | __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk); | 1209 | __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk); |
1206 | return; | 1210 | return; |
1207 | } | 1211 | } |
1208 | if (psecs >= sig->rlim[RLIMIT_CPU].rlim_cur) { | 1212 | if (psecs >= soft) { |
1209 | /* | 1213 | /* |
1210 | * At the soft limit, send a SIGXCPU every second. | 1214 | * At the soft limit, send a SIGXCPU every second. |
1211 | */ | 1215 | */ |
1212 | __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk); | 1216 | __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk); |
1213 | if (sig->rlim[RLIMIT_CPU].rlim_cur | 1217 | if (soft < hard) { |
1214 | < sig->rlim[RLIMIT_CPU].rlim_max) { | 1218 | soft++; |
1215 | sig->rlim[RLIMIT_CPU].rlim_cur++; | 1219 | sig->rlim[RLIMIT_CPU].rlim_cur = soft; |
1216 | } | 1220 | } |
1217 | } | 1221 | } |
1218 | x = secs_to_cputime(sig->rlim[RLIMIT_CPU].rlim_cur); | 1222 | x = secs_to_cputime(soft); |
1219 | if (cputime_eq(prof_expires, cputime_zero) || | 1223 | if (cputime_eq(prof_expires, cputime_zero) || |
1220 | cputime_lt(x, prof_expires)) { | 1224 | cputime_lt(x, prof_expires)) { |
1221 | prof_expires = x; | 1225 | prof_expires = x; |
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c index bbfe472d7524..da5288ec2392 100644 --- a/kernel/power/hibernate.c +++ b/kernel/power/hibernate.c | |||
@@ -323,6 +323,7 @@ static int create_image(int platform_mode) | |||
323 | int hibernation_snapshot(int platform_mode) | 323 | int hibernation_snapshot(int platform_mode) |
324 | { | 324 | { |
325 | int error; | 325 | int error; |
326 | gfp_t saved_mask; | ||
326 | 327 | ||
327 | error = platform_begin(platform_mode); | 328 | error = platform_begin(platform_mode); |
328 | if (error) | 329 | if (error) |
@@ -334,6 +335,7 @@ int hibernation_snapshot(int platform_mode) | |||
334 | goto Close; | 335 | goto Close; |
335 | 336 | ||
336 | suspend_console(); | 337 | suspend_console(); |
338 | saved_mask = clear_gfp_allowed_mask(GFP_IOFS); | ||
337 | error = dpm_suspend_start(PMSG_FREEZE); | 339 | error = dpm_suspend_start(PMSG_FREEZE); |
338 | if (error) | 340 | if (error) |
339 | goto Recover_platform; | 341 | goto Recover_platform; |
@@ -351,6 +353,7 @@ int hibernation_snapshot(int platform_mode) | |||
351 | 353 | ||
352 | dpm_resume_end(in_suspend ? | 354 | dpm_resume_end(in_suspend ? |
353 | (error ? PMSG_RECOVER : PMSG_THAW) : PMSG_RESTORE); | 355 | (error ? PMSG_RECOVER : PMSG_THAW) : PMSG_RESTORE); |
356 | set_gfp_allowed_mask(saved_mask); | ||
354 | resume_console(); | 357 | resume_console(); |
355 | Close: | 358 | Close: |
356 | platform_end(platform_mode); | 359 | platform_end(platform_mode); |
@@ -445,14 +448,17 @@ static int resume_target_kernel(bool platform_mode) | |||
445 | int hibernation_restore(int platform_mode) | 448 | int hibernation_restore(int platform_mode) |
446 | { | 449 | { |
447 | int error; | 450 | int error; |
451 | gfp_t saved_mask; | ||
448 | 452 | ||
449 | pm_prepare_console(); | 453 | pm_prepare_console(); |
450 | suspend_console(); | 454 | suspend_console(); |
455 | saved_mask = clear_gfp_allowed_mask(GFP_IOFS); | ||
451 | error = dpm_suspend_start(PMSG_QUIESCE); | 456 | error = dpm_suspend_start(PMSG_QUIESCE); |
452 | if (!error) { | 457 | if (!error) { |
453 | error = resume_target_kernel(platform_mode); | 458 | error = resume_target_kernel(platform_mode); |
454 | dpm_resume_end(PMSG_RECOVER); | 459 | dpm_resume_end(PMSG_RECOVER); |
455 | } | 460 | } |
461 | set_gfp_allowed_mask(saved_mask); | ||
456 | resume_console(); | 462 | resume_console(); |
457 | pm_restore_console(); | 463 | pm_restore_console(); |
458 | return error; | 464 | return error; |
@@ -466,6 +472,7 @@ int hibernation_restore(int platform_mode) | |||
466 | int hibernation_platform_enter(void) | 472 | int hibernation_platform_enter(void) |
467 | { | 473 | { |
468 | int error; | 474 | int error; |
475 | gfp_t saved_mask; | ||
469 | 476 | ||
470 | if (!hibernation_ops) | 477 | if (!hibernation_ops) |
471 | return -ENOSYS; | 478 | return -ENOSYS; |
@@ -481,6 +488,7 @@ int hibernation_platform_enter(void) | |||
481 | 488 | ||
482 | entering_platform_hibernation = true; | 489 | entering_platform_hibernation = true; |
483 | suspend_console(); | 490 | suspend_console(); |
491 | saved_mask = clear_gfp_allowed_mask(GFP_IOFS); | ||
484 | error = dpm_suspend_start(PMSG_HIBERNATE); | 492 | error = dpm_suspend_start(PMSG_HIBERNATE); |
485 | if (error) { | 493 | if (error) { |
486 | if (hibernation_ops->recover) | 494 | if (hibernation_ops->recover) |
@@ -518,6 +526,7 @@ int hibernation_platform_enter(void) | |||
518 | Resume_devices: | 526 | Resume_devices: |
519 | entering_platform_hibernation = false; | 527 | entering_platform_hibernation = false; |
520 | dpm_resume_end(PMSG_RESTORE); | 528 | dpm_resume_end(PMSG_RESTORE); |
529 | set_gfp_allowed_mask(saved_mask); | ||
521 | resume_console(); | 530 | resume_console(); |
522 | 531 | ||
523 | Close: | 532 | Close: |
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c index 6f10dfc2d3e9..44cce10b582d 100644 --- a/kernel/power/suspend.c +++ b/kernel/power/suspend.c | |||
@@ -189,6 +189,7 @@ static int suspend_enter(suspend_state_t state) | |||
189 | int suspend_devices_and_enter(suspend_state_t state) | 189 | int suspend_devices_and_enter(suspend_state_t state) |
190 | { | 190 | { |
191 | int error; | 191 | int error; |
192 | gfp_t saved_mask; | ||
192 | 193 | ||
193 | if (!suspend_ops) | 194 | if (!suspend_ops) |
194 | return -ENOSYS; | 195 | return -ENOSYS; |
@@ -199,6 +200,7 @@ int suspend_devices_and_enter(suspend_state_t state) | |||
199 | goto Close; | 200 | goto Close; |
200 | } | 201 | } |
201 | suspend_console(); | 202 | suspend_console(); |
203 | saved_mask = clear_gfp_allowed_mask(GFP_IOFS); | ||
202 | suspend_test_start(); | 204 | suspend_test_start(); |
203 | error = dpm_suspend_start(PMSG_SUSPEND); | 205 | error = dpm_suspend_start(PMSG_SUSPEND); |
204 | if (error) { | 206 | if (error) { |
@@ -215,6 +217,7 @@ int suspend_devices_and_enter(suspend_state_t state) | |||
215 | suspend_test_start(); | 217 | suspend_test_start(); |
216 | dpm_resume_end(PMSG_RESUME); | 218 | dpm_resume_end(PMSG_RESUME); |
217 | suspend_test_finish("resume devices"); | 219 | suspend_test_finish("resume devices"); |
220 | set_gfp_allowed_mask(saved_mask); | ||
218 | resume_console(); | 221 | resume_console(); |
219 | Close: | 222 | Close: |
220 | if (suspend_ops->end) | 223 | if (suspend_ops->end) |
diff --git a/kernel/printk.c b/kernel/printk.c index 40674122ecf2..75077ad0b537 100644 --- a/kernel/printk.c +++ b/kernel/printk.c | |||
@@ -70,8 +70,6 @@ int console_printk[4] = { | |||
70 | DEFAULT_CONSOLE_LOGLEVEL, /* default_console_loglevel */ | 70 | DEFAULT_CONSOLE_LOGLEVEL, /* default_console_loglevel */ |
71 | }; | 71 | }; |
72 | 72 | ||
73 | static int saved_console_loglevel = -1; | ||
74 | |||
75 | /* | 73 | /* |
76 | * Low level drivers may need that to know if they can schedule in | 74 | * Low level drivers may need that to know if they can schedule in |
77 | * their unblank() callback or not. So let's export it. | 75 | * their unblank() callback or not. So let's export it. |
@@ -146,6 +144,7 @@ static char __log_buf[__LOG_BUF_LEN]; | |||
146 | static char *log_buf = __log_buf; | 144 | static char *log_buf = __log_buf; |
147 | static int log_buf_len = __LOG_BUF_LEN; | 145 | static int log_buf_len = __LOG_BUF_LEN; |
148 | static unsigned logged_chars; /* Number of chars produced since last read+clear operation */ | 146 | static unsigned logged_chars; /* Number of chars produced since last read+clear operation */ |
147 | static int saved_console_loglevel = -1; | ||
149 | 148 | ||
150 | #ifdef CONFIG_KEXEC | 149 | #ifdef CONFIG_KEXEC |
151 | /* | 150 | /* |
diff --git a/kernel/relay.c b/kernel/relay.c index c705a41b4ba3..3d97f2821611 100644 --- a/kernel/relay.c +++ b/kernel/relay.c | |||
@@ -1215,14 +1215,14 @@ static void relay_page_release(struct splice_pipe_desc *spd, unsigned int i) | |||
1215 | /* | 1215 | /* |
1216 | * subbuf_splice_actor - splice up to one subbuf's worth of data | 1216 | * subbuf_splice_actor - splice up to one subbuf's worth of data |
1217 | */ | 1217 | */ |
1218 | static int subbuf_splice_actor(struct file *in, | 1218 | static ssize_t subbuf_splice_actor(struct file *in, |
1219 | loff_t *ppos, | 1219 | loff_t *ppos, |
1220 | struct pipe_inode_info *pipe, | 1220 | struct pipe_inode_info *pipe, |
1221 | size_t len, | 1221 | size_t len, |
1222 | unsigned int flags, | 1222 | unsigned int flags, |
1223 | int *nonpad_ret) | 1223 | int *nonpad_ret) |
1224 | { | 1224 | { |
1225 | unsigned int pidx, poff, total_len, subbuf_pages, nr_pages, ret; | 1225 | unsigned int pidx, poff, total_len, subbuf_pages, nr_pages; |
1226 | struct rchan_buf *rbuf = in->private_data; | 1226 | struct rchan_buf *rbuf = in->private_data; |
1227 | unsigned int subbuf_size = rbuf->chan->subbuf_size; | 1227 | unsigned int subbuf_size = rbuf->chan->subbuf_size; |
1228 | uint64_t pos = (uint64_t) *ppos; | 1228 | uint64_t pos = (uint64_t) *ppos; |
@@ -1241,6 +1241,7 @@ static int subbuf_splice_actor(struct file *in, | |||
1241 | .ops = &relay_pipe_buf_ops, | 1241 | .ops = &relay_pipe_buf_ops, |
1242 | .spd_release = relay_page_release, | 1242 | .spd_release = relay_page_release, |
1243 | }; | 1243 | }; |
1244 | ssize_t ret; | ||
1244 | 1245 | ||
1245 | if (rbuf->subbufs_produced == rbuf->subbufs_consumed) | 1246 | if (rbuf->subbufs_produced == rbuf->subbufs_consumed) |
1246 | return 0; | 1247 | return 0; |
diff --git a/kernel/sched.c b/kernel/sched.c index abb36b16b93b..150b6988de49 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -4353,7 +4353,7 @@ int can_nice(const struct task_struct *p, const int nice) | |||
4353 | /* convert nice value [19,-20] to rlimit style value [1,40] */ | 4353 | /* convert nice value [19,-20] to rlimit style value [1,40] */ |
4354 | int nice_rlim = 20 - nice; | 4354 | int nice_rlim = 20 - nice; |
4355 | 4355 | ||
4356 | return (nice_rlim <= p->signal->rlim[RLIMIT_NICE].rlim_cur || | 4356 | return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) || |
4357 | capable(CAP_SYS_NICE)); | 4357 | capable(CAP_SYS_NICE)); |
4358 | } | 4358 | } |
4359 | 4359 | ||
@@ -4530,7 +4530,7 @@ recheck: | |||
4530 | 4530 | ||
4531 | if (!lock_task_sighand(p, &flags)) | 4531 | if (!lock_task_sighand(p, &flags)) |
4532 | return -ESRCH; | 4532 | return -ESRCH; |
4533 | rlim_rtprio = p->signal->rlim[RLIMIT_RTPRIO].rlim_cur; | 4533 | rlim_rtprio = task_rlimit(p, RLIMIT_RTPRIO); |
4534 | unlock_task_sighand(p, &flags); | 4534 | unlock_task_sighand(p, &flags); |
4535 | 4535 | ||
4536 | /* can't set/change the rt policy */ | 4536 | /* can't set/change the rt policy */ |
@@ -7406,11 +7406,13 @@ static ssize_t sched_power_savings_store(const char *buf, size_t count, int smt) | |||
7406 | 7406 | ||
7407 | #ifdef CONFIG_SCHED_MC | 7407 | #ifdef CONFIG_SCHED_MC |
7408 | static ssize_t sched_mc_power_savings_show(struct sysdev_class *class, | 7408 | static ssize_t sched_mc_power_savings_show(struct sysdev_class *class, |
7409 | struct sysdev_class_attribute *attr, | ||
7409 | char *page) | 7410 | char *page) |
7410 | { | 7411 | { |
7411 | return sprintf(page, "%u\n", sched_mc_power_savings); | 7412 | return sprintf(page, "%u\n", sched_mc_power_savings); |
7412 | } | 7413 | } |
7413 | static ssize_t sched_mc_power_savings_store(struct sysdev_class *class, | 7414 | static ssize_t sched_mc_power_savings_store(struct sysdev_class *class, |
7415 | struct sysdev_class_attribute *attr, | ||
7414 | const char *buf, size_t count) | 7416 | const char *buf, size_t count) |
7415 | { | 7417 | { |
7416 | return sched_power_savings_store(buf, count, 0); | 7418 | return sched_power_savings_store(buf, count, 0); |
@@ -7422,11 +7424,13 @@ static SYSDEV_CLASS_ATTR(sched_mc_power_savings, 0644, | |||
7422 | 7424 | ||
7423 | #ifdef CONFIG_SCHED_SMT | 7425 | #ifdef CONFIG_SCHED_SMT |
7424 | static ssize_t sched_smt_power_savings_show(struct sysdev_class *dev, | 7426 | static ssize_t sched_smt_power_savings_show(struct sysdev_class *dev, |
7427 | struct sysdev_class_attribute *attr, | ||
7425 | char *page) | 7428 | char *page) |
7426 | { | 7429 | { |
7427 | return sprintf(page, "%u\n", sched_smt_power_savings); | 7430 | return sprintf(page, "%u\n", sched_smt_power_savings); |
7428 | } | 7431 | } |
7429 | static ssize_t sched_smt_power_savings_store(struct sysdev_class *dev, | 7432 | static ssize_t sched_smt_power_savings_store(struct sysdev_class *dev, |
7433 | struct sysdev_class_attribute *attr, | ||
7430 | const char *buf, size_t count) | 7434 | const char *buf, size_t count) |
7431 | { | 7435 | { |
7432 | return sched_power_savings_store(buf, count, 1); | 7436 | return sched_power_savings_store(buf, count, 1); |
diff --git a/kernel/sched_cpupri.c b/kernel/sched_cpupri.c index eeb3506c4834..82095bf2099f 100644 --- a/kernel/sched_cpupri.c +++ b/kernel/sched_cpupri.c | |||
@@ -47,7 +47,7 @@ static int convert_prio(int prio) | |||
47 | } | 47 | } |
48 | 48 | ||
49 | #define for_each_cpupri_active(array, idx) \ | 49 | #define for_each_cpupri_active(array, idx) \ |
50 | for_each_bit(idx, array, CPUPRI_NR_PRIORITIES) | 50 | for_each_set_bit(idx, array, CPUPRI_NR_PRIORITIES) |
51 | 51 | ||
52 | /** | 52 | /** |
53 | * cpupri_find - find the best (lowest-pri) CPU in the system | 53 | * cpupri_find - find the best (lowest-pri) CPU in the system |
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index bf3e38fdbe6d..5a6ed1f0990a 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c | |||
@@ -1662,8 +1662,9 @@ static void watchdog(struct rq *rq, struct task_struct *p) | |||
1662 | if (!p->signal) | 1662 | if (!p->signal) |
1663 | return; | 1663 | return; |
1664 | 1664 | ||
1665 | soft = p->signal->rlim[RLIMIT_RTTIME].rlim_cur; | 1665 | /* max may change after cur was read, this will be fixed next tick */ |
1666 | hard = p->signal->rlim[RLIMIT_RTTIME].rlim_max; | 1666 | soft = task_rlimit(p, RLIMIT_RTTIME); |
1667 | hard = task_rlimit_max(p, RLIMIT_RTTIME); | ||
1667 | 1668 | ||
1668 | if (soft != RLIM_INFINITY) { | 1669 | if (soft != RLIM_INFINITY) { |
1669 | unsigned long next; | 1670 | unsigned long next; |
diff --git a/kernel/signal.c b/kernel/signal.c index 5bb9baffa4f1..dbd7fe073c55 100644 --- a/kernel/signal.c +++ b/kernel/signal.c | |||
@@ -245,7 +245,7 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi | |||
245 | 245 | ||
246 | if (override_rlimit || | 246 | if (override_rlimit || |
247 | atomic_read(&user->sigpending) <= | 247 | atomic_read(&user->sigpending) <= |
248 | t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur) { | 248 | task_rlimit(t, RLIMIT_SIGPENDING)) { |
249 | q = kmem_cache_alloc(sigqueue_cachep, flags); | 249 | q = kmem_cache_alloc(sigqueue_cachep, flags); |
250 | } else { | 250 | } else { |
251 | print_dropped_signal(sig); | 251 | print_dropped_signal(sig); |
diff --git a/kernel/sys.c b/kernel/sys.c index 877fe4f8e05e..9814e43fb23b 100644 --- a/kernel/sys.c +++ b/kernel/sys.c | |||
@@ -571,8 +571,7 @@ static int set_user(struct cred *new) | |||
571 | if (!new_user) | 571 | if (!new_user) |
572 | return -EAGAIN; | 572 | return -EAGAIN; |
573 | 573 | ||
574 | if (atomic_read(&new_user->processes) >= | 574 | if (atomic_read(&new_user->processes) >= rlimit(RLIMIT_NPROC) && |
575 | current->signal->rlim[RLIMIT_NPROC].rlim_cur && | ||
576 | new_user != INIT_USER) { | 575 | new_user != INIT_USER) { |
577 | free_uid(new_user); | 576 | free_uid(new_user); |
578 | return -EAGAIN; | 577 | return -EAGAIN; |
diff --git a/kernel/tsacct.c b/kernel/tsacct.c index 00d59d048edf..0a67e041edf8 100644 --- a/kernel/tsacct.c +++ b/kernel/tsacct.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/tsacct_kern.h> | 21 | #include <linux/tsacct_kern.h> |
22 | #include <linux/acct.h> | 22 | #include <linux/acct.h> |
23 | #include <linux/jiffies.h> | 23 | #include <linux/jiffies.h> |
24 | #include <linux/mm.h> | ||
24 | 25 | ||
25 | /* | 26 | /* |
26 | * fill in basic accounting fields | 27 | * fill in basic accounting fields |