aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/Makefile1
-rw-r--r--kernel/cgroup.c9
-rw-r--r--kernel/cpu_acct.c186
-rw-r--r--kernel/irq/handle.c8
-rw-r--r--kernel/marker.c41
-rw-r--r--kernel/params.c15
-rw-r--r--kernel/pid.c2
-rw-r--r--kernel/power/disk.c12
-rw-r--r--kernel/resource.c2
-rw-r--r--kernel/sched.c14
-rw-r--r--kernel/signal.c2
-rw-r--r--kernel/sysctl.c4
-rw-r--r--kernel/sysctl_check.c2
-rw-r--r--kernel/taskstats.c36
14 files changed, 73 insertions, 261 deletions
diff --git a/kernel/Makefile b/kernel/Makefile
index f60afe742599..dfa96956dae0 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -40,7 +40,6 @@ obj-$(CONFIG_COMPAT) += compat.o
40obj-$(CONFIG_CGROUPS) += cgroup.o 40obj-$(CONFIG_CGROUPS) += cgroup.o
41obj-$(CONFIG_CGROUP_DEBUG) += cgroup_debug.o 41obj-$(CONFIG_CGROUP_DEBUG) += cgroup_debug.o
42obj-$(CONFIG_CPUSETS) += cpuset.o 42obj-$(CONFIG_CPUSETS) += cpuset.o
43obj-$(CONFIG_CGROUP_CPUACCT) += cpu_acct.o
44obj-$(CONFIG_CGROUP_NS) += ns_cgroup.o 43obj-$(CONFIG_CGROUP_NS) += ns_cgroup.o
45obj-$(CONFIG_IKCONFIG) += configs.o 44obj-$(CONFIG_IKCONFIG) += configs.o
46obj-$(CONFIG_STOP_MACHINE) += stop_machine.o 45obj-$(CONFIG_STOP_MACHINE) += stop_machine.o
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 3fe21e19c96e..1a3c23936d43 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -1,6 +1,4 @@
1/* 1/*
2 * kernel/cgroup.c
3 *
4 * Generic process-grouping system. 2 * Generic process-grouping system.
5 * 3 *
6 * Based originally on the cpuset system, extracted by Paul Menage 4 * Based originally on the cpuset system, extracted by Paul Menage
@@ -2200,7 +2198,8 @@ static void cgroup_init_subsys(struct cgroup_subsys *ss)
2200{ 2198{
2201 struct cgroup_subsys_state *css; 2199 struct cgroup_subsys_state *css;
2202 struct list_head *l; 2200 struct list_head *l;
2203 printk(KERN_ERR "Initializing cgroup subsys %s\n", ss->name); 2201
2202 printk(KERN_INFO "Initializing cgroup subsys %s\n", ss->name);
2204 2203
2205 /* Create the top cgroup state for this subsystem */ 2204 /* Create the top cgroup state for this subsystem */
2206 ss->root = &rootnode; 2205 ss->root = &rootnode;
@@ -2273,7 +2272,7 @@ int __init cgroup_init_early(void)
2273 BUG_ON(!ss->create); 2272 BUG_ON(!ss->create);
2274 BUG_ON(!ss->destroy); 2273 BUG_ON(!ss->destroy);
2275 if (ss->subsys_id != i) { 2274 if (ss->subsys_id != i) {
2276 printk(KERN_ERR "Subsys %s id == %d\n", 2275 printk(KERN_ERR "cgroup: Subsys %s id == %d\n",
2277 ss->name, ss->subsys_id); 2276 ss->name, ss->subsys_id);
2278 BUG(); 2277 BUG();
2279 } 2278 }
@@ -2605,7 +2604,7 @@ int cgroup_clone(struct task_struct *tsk, struct cgroup_subsys *subsys)
2605 dentry = lookup_one_len(nodename, parent->dentry, strlen(nodename)); 2604 dentry = lookup_one_len(nodename, parent->dentry, strlen(nodename));
2606 if (IS_ERR(dentry)) { 2605 if (IS_ERR(dentry)) {
2607 printk(KERN_INFO 2606 printk(KERN_INFO
2608 "Couldn't allocate dentry for %s: %ld\n", nodename, 2607 "cgroup: Couldn't allocate dentry for %s: %ld\n", nodename,
2609 PTR_ERR(dentry)); 2608 PTR_ERR(dentry));
2610 ret = PTR_ERR(dentry); 2609 ret = PTR_ERR(dentry);
2611 goto out_release; 2610 goto out_release;
diff --git a/kernel/cpu_acct.c b/kernel/cpu_acct.c
deleted file mode 100644
index 731e47e7f164..000000000000
--- a/kernel/cpu_acct.c
+++ /dev/null
@@ -1,186 +0,0 @@
1/*
2 * kernel/cpu_acct.c - CPU accounting cgroup subsystem
3 *
4 * Copyright (C) Google Inc, 2006
5 *
6 * Developed by Paul Menage (menage@google.com) and Balbir Singh
7 * (balbir@in.ibm.com)
8 *
9 */
10
11/*
12 * Example cgroup subsystem for reporting total CPU usage of tasks in a
13 * cgroup, along with percentage load over a time interval
14 */
15
16#include <linux/module.h>
17#include <linux/cgroup.h>
18#include <linux/fs.h>
19#include <linux/rcupdate.h>
20
21#include <asm/div64.h>
22
23struct cpuacct {
24 struct cgroup_subsys_state css;
25 spinlock_t lock;
26 /* total time used by this class */
27 cputime64_t time;
28
29 /* time when next load calculation occurs */
30 u64 next_interval_check;
31
32 /* time used in current period */
33 cputime64_t current_interval_time;
34
35 /* time used in last period */
36 cputime64_t last_interval_time;
37};
38
39struct cgroup_subsys cpuacct_subsys;
40
41static inline struct cpuacct *cgroup_ca(struct cgroup *cont)
42{
43 return container_of(cgroup_subsys_state(cont, cpuacct_subsys_id),
44 struct cpuacct, css);
45}
46
47static inline struct cpuacct *task_ca(struct task_struct *task)
48{
49 return container_of(task_subsys_state(task, cpuacct_subsys_id),
50 struct cpuacct, css);
51}
52
53#define INTERVAL (HZ * 10)
54
55static inline u64 next_interval_boundary(u64 now)
56{
57 /* calculate the next interval boundary beyond the
58 * current time */
59 do_div(now, INTERVAL);
60 return (now + 1) * INTERVAL;
61}
62
63static struct cgroup_subsys_state *cpuacct_create(
64 struct cgroup_subsys *ss, struct cgroup *cont)
65{
66 struct cpuacct *ca = kzalloc(sizeof(*ca), GFP_KERNEL);
67
68 if (!ca)
69 return ERR_PTR(-ENOMEM);
70 spin_lock_init(&ca->lock);
71 ca->next_interval_check = next_interval_boundary(get_jiffies_64());
72 return &ca->css;
73}
74
75static void cpuacct_destroy(struct cgroup_subsys *ss,
76 struct cgroup *cont)
77{
78 kfree(cgroup_ca(cont));
79}
80
81/* Lazily update the load calculation if necessary. Called with ca locked */
82static void cpuusage_update(struct cpuacct *ca)
83{
84 u64 now = get_jiffies_64();
85
86 /* If we're not due for an update, return */
87 if (ca->next_interval_check > now)
88 return;
89
90 if (ca->next_interval_check <= (now - INTERVAL)) {
91 /* If it's been more than an interval since the last
92 * check, then catch up - the last interval must have
93 * been zero load */
94 ca->last_interval_time = 0;
95 ca->next_interval_check = next_interval_boundary(now);
96 } else {
97 /* If a steal takes the last interval time negative,
98 * then we just ignore it */
99 if ((s64)ca->current_interval_time > 0)
100 ca->last_interval_time = ca->current_interval_time;
101 else
102 ca->last_interval_time = 0;
103 ca->next_interval_check += INTERVAL;
104 }
105 ca->current_interval_time = 0;
106}
107
108static u64 cpuusage_read(struct cgroup *cont, struct cftype *cft)
109{
110 struct cpuacct *ca = cgroup_ca(cont);
111 u64 time;
112
113 spin_lock_irq(&ca->lock);
114 cpuusage_update(ca);
115 time = cputime64_to_jiffies64(ca->time);
116 spin_unlock_irq(&ca->lock);
117
118 /* Convert 64-bit jiffies to seconds */
119 time *= 1000;
120 do_div(time, HZ);
121 return time;
122}
123
124static u64 load_read(struct cgroup *cont, struct cftype *cft)
125{
126 struct cpuacct *ca = cgroup_ca(cont);
127 u64 time;
128
129 /* Find the time used in the previous interval */
130 spin_lock_irq(&ca->lock);
131 cpuusage_update(ca);
132 time = cputime64_to_jiffies64(ca->last_interval_time);
133 spin_unlock_irq(&ca->lock);
134
135 /* Convert time to a percentage, to give the load in the
136 * previous period */
137 time *= 100;
138 do_div(time, INTERVAL);
139
140 return time;
141}
142
143static struct cftype files[] = {
144 {
145 .name = "usage",
146 .read_uint = cpuusage_read,
147 },
148 {
149 .name = "load",
150 .read_uint = load_read,
151 }
152};
153
154static int cpuacct_populate(struct cgroup_subsys *ss, struct cgroup *cont)
155{
156 return cgroup_add_files(cont, ss, files, ARRAY_SIZE(files));
157}
158
159void cpuacct_charge(struct task_struct *task, cputime_t cputime)
160{
161
162 struct cpuacct *ca;
163 unsigned long flags;
164
165 if (!cpuacct_subsys.active)
166 return;
167 rcu_read_lock();
168 ca = task_ca(task);
169 if (ca) {
170 spin_lock_irqsave(&ca->lock, flags);
171 cpuusage_update(ca);
172 ca->time = cputime64_add(ca->time, cputime);
173 ca->current_interval_time =
174 cputime64_add(ca->current_interval_time, cputime);
175 spin_unlock_irqrestore(&ca->lock, flags);
176 }
177 rcu_read_unlock();
178}
179
180struct cgroup_subsys cpuacct_subsys = {
181 .name = "cpuacct",
182 .create = cpuacct_create,
183 .destroy = cpuacct_destroy,
184 .populate = cpuacct_populate,
185 .subsys_id = cpuacct_subsys_id,
186};
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index e391cbb1f566..dc335ad27525 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -178,9 +178,11 @@ fastcall unsigned int __do_IRQ(unsigned int irq)
178 */ 178 */
179 if (desc->chip->ack) 179 if (desc->chip->ack)
180 desc->chip->ack(irq); 180 desc->chip->ack(irq);
181 action_ret = handle_IRQ_event(irq, desc->action); 181 if (likely(!(desc->status & IRQ_DISABLED))) {
182 if (!noirqdebug) 182 action_ret = handle_IRQ_event(irq, desc->action);
183 note_interrupt(irq, desc, action_ret); 183 if (!noirqdebug)
184 note_interrupt(irq, desc, action_ret);
185 }
184 desc->chip->end(irq); 186 desc->chip->end(irq);
185 return 1; 187 return 1;
186 } 188 }
diff --git a/kernel/marker.c b/kernel/marker.c
index ccb48d9a3657..5323cfaedbce 100644
--- a/kernel/marker.c
+++ b/kernel/marker.c
@@ -28,7 +28,7 @@ extern struct marker __start___markers[];
28extern struct marker __stop___markers[]; 28extern struct marker __stop___markers[];
29 29
30/* 30/*
31 * module_mutex nests inside markers_mutex. Markers mutex protects the builtin 31 * markers_mutex nests inside module_mutex. Markers mutex protects the builtin
32 * and module markers, the hash table and deferred_sync. 32 * and module markers, the hash table and deferred_sync.
33 */ 33 */
34static DEFINE_MUTEX(markers_mutex); 34static DEFINE_MUTEX(markers_mutex);
@@ -257,7 +257,6 @@ static void disable_marker(struct marker *elem)
257 * @refcount: number of references left to the given probe_module (out) 257 * @refcount: number of references left to the given probe_module (out)
258 * 258 *
259 * Updates the probe callback corresponding to a range of markers. 259 * Updates the probe callback corresponding to a range of markers.
260 * Must be called with markers_mutex held.
261 */ 260 */
262void marker_update_probe_range(struct marker *begin, 261void marker_update_probe_range(struct marker *begin,
263 struct marker *end, struct module *probe_module, 262 struct marker *end, struct module *probe_module,
@@ -266,6 +265,7 @@ void marker_update_probe_range(struct marker *begin,
266 struct marker *iter; 265 struct marker *iter;
267 struct marker_entry *mark_entry; 266 struct marker_entry *mark_entry;
268 267
268 mutex_lock(&markers_mutex);
269 for (iter = begin; iter < end; iter++) { 269 for (iter = begin; iter < end; iter++) {
270 mark_entry = get_marker(iter->name); 270 mark_entry = get_marker(iter->name);
271 if (mark_entry && mark_entry->refcount) { 271 if (mark_entry && mark_entry->refcount) {
@@ -281,6 +281,7 @@ void marker_update_probe_range(struct marker *begin,
281 disable_marker(iter); 281 disable_marker(iter);
282 } 282 }
283 } 283 }
284 mutex_unlock(&markers_mutex);
284} 285}
285 286
286/* 287/*
@@ -293,7 +294,6 @@ static void marker_update_probes(struct module *probe_module)
293{ 294{
294 int refcount = 0; 295 int refcount = 0;
295 296
296 mutex_lock(&markers_mutex);
297 /* Core kernel markers */ 297 /* Core kernel markers */
298 marker_update_probe_range(__start___markers, 298 marker_update_probe_range(__start___markers,
299 __stop___markers, probe_module, &refcount); 299 __stop___markers, probe_module, &refcount);
@@ -303,7 +303,6 @@ static void marker_update_probes(struct module *probe_module)
303 synchronize_sched(); 303 synchronize_sched();
304 deferred_sync = 0; 304 deferred_sync = 0;
305 } 305 }
306 mutex_unlock(&markers_mutex);
307} 306}
308 307
309/** 308/**
@@ -320,7 +319,7 @@ int marker_probe_register(const char *name, const char *format,
320 marker_probe_func *probe, void *private) 319 marker_probe_func *probe, void *private)
321{ 320{
322 struct marker_entry *entry; 321 struct marker_entry *entry;
323 int ret = 0, need_update = 0; 322 int ret = 0;
324 323
325 mutex_lock(&markers_mutex); 324 mutex_lock(&markers_mutex);
326 entry = get_marker(name); 325 entry = get_marker(name);
@@ -335,11 +334,11 @@ int marker_probe_register(const char *name, const char *format,
335 ret = add_marker(name, format, probe, private); 334 ret = add_marker(name, format, probe, private);
336 if (ret) 335 if (ret)
337 goto end; 336 goto end;
338 need_update = 1; 337 mutex_unlock(&markers_mutex);
338 marker_update_probes(NULL);
339 return ret;
339end: 340end:
340 mutex_unlock(&markers_mutex); 341 mutex_unlock(&markers_mutex);
341 if (need_update)
342 marker_update_probes(NULL);
343 return ret; 342 return ret;
344} 343}
345EXPORT_SYMBOL_GPL(marker_probe_register); 344EXPORT_SYMBOL_GPL(marker_probe_register);
@@ -355,7 +354,6 @@ void *marker_probe_unregister(const char *name)
355 struct module *probe_module; 354 struct module *probe_module;
356 struct marker_entry *entry; 355 struct marker_entry *entry;
357 void *private; 356 void *private;
358 int need_update = 0;
359 357
360 mutex_lock(&markers_mutex); 358 mutex_lock(&markers_mutex);
361 entry = get_marker(name); 359 entry = get_marker(name);
@@ -368,11 +366,11 @@ void *marker_probe_unregister(const char *name)
368 probe_module = __module_text_address((unsigned long)entry->probe); 366 probe_module = __module_text_address((unsigned long)entry->probe);
369 private = remove_marker(name); 367 private = remove_marker(name);
370 deferred_sync = 1; 368 deferred_sync = 1;
371 need_update = 1; 369 mutex_unlock(&markers_mutex);
370 marker_update_probes(probe_module);
371 return private;
372end: 372end:
373 mutex_unlock(&markers_mutex); 373 mutex_unlock(&markers_mutex);
374 if (need_update)
375 marker_update_probes(probe_module);
376 return private; 374 return private;
377} 375}
378EXPORT_SYMBOL_GPL(marker_probe_unregister); 376EXPORT_SYMBOL_GPL(marker_probe_unregister);
@@ -392,7 +390,6 @@ void *marker_probe_unregister_private_data(void *private)
392 struct marker_entry *entry; 390 struct marker_entry *entry;
393 int found = 0; 391 int found = 0;
394 unsigned int i; 392 unsigned int i;
395 int need_update = 0;
396 393
397 mutex_lock(&markers_mutex); 394 mutex_lock(&markers_mutex);
398 for (i = 0; i < MARKER_TABLE_SIZE; i++) { 395 for (i = 0; i < MARKER_TABLE_SIZE; i++) {
@@ -414,11 +411,11 @@ iter_end:
414 probe_module = __module_text_address((unsigned long)entry->probe); 411 probe_module = __module_text_address((unsigned long)entry->probe);
415 private = remove_marker(entry->name); 412 private = remove_marker(entry->name);
416 deferred_sync = 1; 413 deferred_sync = 1;
417 need_update = 1; 414 mutex_unlock(&markers_mutex);
415 marker_update_probes(probe_module);
416 return private;
418end: 417end:
419 mutex_unlock(&markers_mutex); 418 mutex_unlock(&markers_mutex);
420 if (need_update)
421 marker_update_probes(probe_module);
422 return private; 419 return private;
423} 420}
424EXPORT_SYMBOL_GPL(marker_probe_unregister_private_data); 421EXPORT_SYMBOL_GPL(marker_probe_unregister_private_data);
@@ -434,7 +431,7 @@ EXPORT_SYMBOL_GPL(marker_probe_unregister_private_data);
434int marker_arm(const char *name) 431int marker_arm(const char *name)
435{ 432{
436 struct marker_entry *entry; 433 struct marker_entry *entry;
437 int ret = 0, need_update = 0; 434 int ret = 0;
438 435
439 mutex_lock(&markers_mutex); 436 mutex_lock(&markers_mutex);
440 entry = get_marker(name); 437 entry = get_marker(name);
@@ -447,11 +444,9 @@ int marker_arm(const char *name)
447 */ 444 */
448 if (entry->refcount++) 445 if (entry->refcount++)
449 goto end; 446 goto end;
450 need_update = 1;
451end: 447end:
452 mutex_unlock(&markers_mutex); 448 mutex_unlock(&markers_mutex);
453 if (need_update) 449 marker_update_probes(NULL);
454 marker_update_probes(NULL);
455 return ret; 450 return ret;
456} 451}
457EXPORT_SYMBOL_GPL(marker_arm); 452EXPORT_SYMBOL_GPL(marker_arm);
@@ -467,7 +462,7 @@ EXPORT_SYMBOL_GPL(marker_arm);
467int marker_disarm(const char *name) 462int marker_disarm(const char *name)
468{ 463{
469 struct marker_entry *entry; 464 struct marker_entry *entry;
470 int ret = 0, need_update = 0; 465 int ret = 0;
471 466
472 mutex_lock(&markers_mutex); 467 mutex_lock(&markers_mutex);
473 entry = get_marker(name); 468 entry = get_marker(name);
@@ -486,11 +481,9 @@ int marker_disarm(const char *name)
486 ret = -EPERM; 481 ret = -EPERM;
487 goto end; 482 goto end;
488 } 483 }
489 need_update = 1;
490end: 484end:
491 mutex_unlock(&markers_mutex); 485 mutex_unlock(&markers_mutex);
492 if (need_update) 486 marker_update_probes(NULL);
493 marker_update_probes(NULL);
494 return ret; 487 return ret;
495} 488}
496EXPORT_SYMBOL_GPL(marker_disarm); 489EXPORT_SYMBOL_GPL(marker_disarm);
diff --git a/kernel/params.c b/kernel/params.c
index 16f269e9ddc9..2a4c51487e72 100644
--- a/kernel/params.c
+++ b/kernel/params.c
@@ -592,19 +592,16 @@ static void __init param_sysfs_builtin(void)
592 592
593 for (i=0; i < __stop___param - __start___param; i++) { 593 for (i=0; i < __stop___param - __start___param; i++) {
594 char *dot; 594 char *dot;
595 size_t kplen; 595 size_t max_name_len;
596 596
597 kp = &__start___param[i]; 597 kp = &__start___param[i];
598 kplen = strlen(kp->name); 598 max_name_len =
599 min_t(size_t, MAX_KBUILD_MODNAME, strlen(kp->name));
599 600
600 /* We do not handle args without periods. */ 601 dot = memchr(kp->name, '.', max_name_len);
601 if (kplen > MAX_KBUILD_MODNAME) {
602 DEBUGP("kernel parameter name is too long: %s\n", kp->name);
603 continue;
604 }
605 dot = memchr(kp->name, '.', kplen);
606 if (!dot) { 602 if (!dot) {
607 DEBUGP("couldn't find period in %s\n", kp->name); 603 DEBUGP("couldn't find period in first %d characters "
604 "of %s\n", MAX_KBUILD_MODNAME, kp->name);
608 continue; 605 continue;
609 } 606 }
610 name_len = dot - kp->name; 607 name_len = dot - kp->name;
diff --git a/kernel/pid.c b/kernel/pid.c
index d1db36b94674..f815455431bf 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -537,6 +537,7 @@ err_alloc:
537 return NULL; 537 return NULL;
538} 538}
539 539
540#ifdef CONFIG_PID_NS
540static struct pid_namespace *create_pid_namespace(int level) 541static struct pid_namespace *create_pid_namespace(int level)
541{ 542{
542 struct pid_namespace *ns; 543 struct pid_namespace *ns;
@@ -621,6 +622,7 @@ void free_pid_ns(struct kref *kref)
621 if (parent != NULL) 622 if (parent != NULL)
622 put_pid_ns(parent); 623 put_pid_ns(parent);
623} 624}
625#endif /* CONFIG_PID_NS */
624 626
625void zap_pid_ns_processes(struct pid_namespace *pid_ns) 627void zap_pid_ns_processes(struct pid_namespace *pid_ns)
626{ 628{
diff --git a/kernel/power/disk.c b/kernel/power/disk.c
index 8b15f777010a..05b64790fe83 100644
--- a/kernel/power/disk.c
+++ b/kernel/power/disk.c
@@ -456,7 +456,17 @@ static int software_resume(void)
456 int error; 456 int error;
457 unsigned int flags; 457 unsigned int flags;
458 458
459 mutex_lock(&pm_mutex); 459 /*
460 * name_to_dev_t() below takes a sysfs buffer mutex when sysfs
461 * is configured into the kernel. Since the regular hibernate
462 * trigger path is via sysfs which takes a buffer mutex before
463 * calling hibernate functions (which take pm_mutex) this can
464 * cause lockdep to complain about a possible ABBA deadlock
465 * which cannot happen since we're in the boot code here and
466 * sysfs can't be invoked yet. Therefore, we use a subclass
467 * here to avoid lockdep complaining.
468 */
469 mutex_lock_nested(&pm_mutex, SINGLE_DEPTH_NESTING);
460 if (!swsusp_resume_device) { 470 if (!swsusp_resume_device) {
461 if (!strlen(resume_file)) { 471 if (!strlen(resume_file)) {
462 mutex_unlock(&pm_mutex); 472 mutex_unlock(&pm_mutex);
diff --git a/kernel/resource.c b/kernel/resource.c
index a358142ff48f..2eb553d9b517 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -277,7 +277,7 @@ walk_memory_resource(unsigned long start_pfn, unsigned long nr_pages, void *arg,
277 int ret = -1; 277 int ret = -1;
278 res.start = (u64) start_pfn << PAGE_SHIFT; 278 res.start = (u64) start_pfn << PAGE_SHIFT;
279 res.end = ((u64)(start_pfn + nr_pages) << PAGE_SHIFT) - 1; 279 res.end = ((u64)(start_pfn + nr_pages) << PAGE_SHIFT) - 1;
280 res.flags = IORESOURCE_MEM; 280 res.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
281 orig_end = res.end; 281 orig_end = res.end;
282 while ((res.start < res.end) && (find_next_system_ram(&res) >= 0)) { 282 while ((res.start < res.end) && (find_next_system_ram(&res) >= 0)) {
283 pfn = (unsigned long)(res.start >> PAGE_SHIFT); 283 pfn = (unsigned long)(res.start >> PAGE_SHIFT);
diff --git a/kernel/sched.c b/kernel/sched.c
index b18f231a4875..4fb3532dd7e8 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -52,7 +52,6 @@
52#include <linux/cpu.h> 52#include <linux/cpu.h>
53#include <linux/cpuset.h> 53#include <linux/cpuset.h>
54#include <linux/percpu.h> 54#include <linux/percpu.h>
55#include <linux/cpu_acct.h>
56#include <linux/kthread.h> 55#include <linux/kthread.h>
57#include <linux/seq_file.h> 56#include <linux/seq_file.h>
58#include <linux/sysctl.h> 57#include <linux/sysctl.h>
@@ -3338,13 +3337,9 @@ void account_user_time(struct task_struct *p, cputime_t cputime)
3338{ 3337{
3339 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; 3338 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
3340 cputime64_t tmp; 3339 cputime64_t tmp;
3341 struct rq *rq = this_rq();
3342 3340
3343 p->utime = cputime_add(p->utime, cputime); 3341 p->utime = cputime_add(p->utime, cputime);
3344 3342
3345 if (p != rq->idle)
3346 cpuacct_charge(p, cputime);
3347
3348 /* Add user time to cpustat. */ 3343 /* Add user time to cpustat. */
3349 tmp = cputime_to_cputime64(cputime); 3344 tmp = cputime_to_cputime64(cputime);
3350 if (TASK_NICE(p) > 0) 3345 if (TASK_NICE(p) > 0)
@@ -3408,10 +3403,9 @@ void account_system_time(struct task_struct *p, int hardirq_offset,
3408 cpustat->irq = cputime64_add(cpustat->irq, tmp); 3403 cpustat->irq = cputime64_add(cpustat->irq, tmp);
3409 else if (softirq_count()) 3404 else if (softirq_count())
3410 cpustat->softirq = cputime64_add(cpustat->softirq, tmp); 3405 cpustat->softirq = cputime64_add(cpustat->softirq, tmp);
3411 else if (p != rq->idle) { 3406 else if (p != rq->idle)
3412 cpustat->system = cputime64_add(cpustat->system, tmp); 3407 cpustat->system = cputime64_add(cpustat->system, tmp);
3413 cpuacct_charge(p, cputime); 3408 else if (atomic_read(&rq->nr_iowait) > 0)
3414 } else if (atomic_read(&rq->nr_iowait) > 0)
3415 cpustat->iowait = cputime64_add(cpustat->iowait, tmp); 3409 cpustat->iowait = cputime64_add(cpustat->iowait, tmp);
3416 else 3410 else
3417 cpustat->idle = cputime64_add(cpustat->idle, tmp); 3411 cpustat->idle = cputime64_add(cpustat->idle, tmp);
@@ -3447,10 +3441,8 @@ void account_steal_time(struct task_struct *p, cputime_t steal)
3447 cpustat->iowait = cputime64_add(cpustat->iowait, tmp); 3441 cpustat->iowait = cputime64_add(cpustat->iowait, tmp);
3448 else 3442 else
3449 cpustat->idle = cputime64_add(cpustat->idle, tmp); 3443 cpustat->idle = cputime64_add(cpustat->idle, tmp);
3450 } else { 3444 } else
3451 cpustat->steal = cputime64_add(cpustat->steal, tmp); 3445 cpustat->steal = cputime64_add(cpustat->steal, tmp);
3452 cpuacct_charge(p, -tmp);
3453 }
3454} 3446}
3455 3447
3456/* 3448/*
diff --git a/kernel/signal.c b/kernel/signal.c
index 909a0cc6bc70..afa4f781f924 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -55,7 +55,7 @@ static int sig_ignored(struct task_struct *t, int sig)
55 * signal handler may change by the time it is 55 * signal handler may change by the time it is
56 * unblocked. 56 * unblocked.
57 */ 57 */
58 if (sigismember(&t->blocked, sig)) 58 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
59 return 0; 59 return 0;
60 60
61 /* Is it explicitly or implicitly ignored? */ 61 /* Is it explicitly or implicitly ignored? */
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 3a1744fed2b6..0deed82a6156 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -2620,6 +2620,10 @@ static int deprecated_sysctl_warning(struct __sysctl_args *args)
2620 int name[CTL_MAXNAME]; 2620 int name[CTL_MAXNAME];
2621 int i; 2621 int i;
2622 2622
2623 /* Check args->nlen. */
2624 if (args->nlen < 0 || args->nlen > CTL_MAXNAME)
2625 return -ENOTDIR;
2626
2623 /* Read in the sysctl name for better debug message logging */ 2627 /* Read in the sysctl name for better debug message logging */
2624 for (i = 0; i < args->nlen; i++) 2628 for (i = 0; i < args->nlen; i++)
2625 if (get_user(name[i], args->name + i)) 2629 if (get_user(name[i], args->name + i))
diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c
index 5a2f2b2bf888..4abc6d2306f4 100644
--- a/kernel/sysctl_check.c
+++ b/kernel/sysctl_check.c
@@ -738,7 +738,7 @@ static struct trans_ctl_table trans_net_table[] = {
738 { NET_ROSE, "rose", trans_net_rose_table }, 738 { NET_ROSE, "rose", trans_net_rose_table },
739 { NET_IPV6, "ipv6", trans_net_ipv6_table }, 739 { NET_IPV6, "ipv6", trans_net_ipv6_table },
740 { NET_X25, "x25", trans_net_x25_table }, 740 { NET_X25, "x25", trans_net_x25_table },
741 { NET_TR, "tr", trans_net_tr_table }, 741 { NET_TR, "token-ring", trans_net_tr_table },
742 { NET_DECNET, "decnet", trans_net_decnet_table }, 742 { NET_DECNET, "decnet", trans_net_decnet_table },
743 /* NET_ECONET not used */ 743 /* NET_ECONET not used */
744 { NET_SCTP, "sctp", trans_net_sctp_table }, 744 { NET_SCTP, "sctp", trans_net_sctp_table },
diff --git a/kernel/taskstats.c b/kernel/taskstats.c
index 354e74bc17c1..07e86a828073 100644
--- a/kernel/taskstats.c
+++ b/kernel/taskstats.c
@@ -398,31 +398,31 @@ static int cgroupstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
398 398
399 fd = nla_get_u32(info->attrs[CGROUPSTATS_CMD_ATTR_FD]); 399 fd = nla_get_u32(info->attrs[CGROUPSTATS_CMD_ATTR_FD]);
400 file = fget_light(fd, &fput_needed); 400 file = fget_light(fd, &fput_needed);
401 if (file) { 401 if (!file)
402 size = nla_total_size(sizeof(struct cgroupstats)); 402 return 0;
403 403
404 rc = prepare_reply(info, CGROUPSTATS_CMD_NEW, &rep_skb, 404 size = nla_total_size(sizeof(struct cgroupstats));
405 size);
406 if (rc < 0)
407 goto err;
408 405
409 na = nla_reserve(rep_skb, CGROUPSTATS_TYPE_CGROUP_STATS, 406 rc = prepare_reply(info, CGROUPSTATS_CMD_NEW, &rep_skb,
410 sizeof(struct cgroupstats)); 407 size);
411 stats = nla_data(na); 408 if (rc < 0)
412 memset(stats, 0, sizeof(*stats)); 409 goto err;
413 410
414 rc = cgroupstats_build(stats, file->f_dentry); 411 na = nla_reserve(rep_skb, CGROUPSTATS_TYPE_CGROUP_STATS,
415 if (rc < 0) 412 sizeof(struct cgroupstats));
416 goto err; 413 stats = nla_data(na);
414 memset(stats, 0, sizeof(*stats));
417 415
418 fput_light(file, fput_needed); 416 rc = cgroupstats_build(stats, file->f_dentry);
419 return send_reply(rep_skb, info->snd_pid); 417 if (rc < 0) {
418 nlmsg_free(rep_skb);
419 goto err;
420 } 420 }
421 421
422 rc = send_reply(rep_skb, info->snd_pid);
423
422err: 424err:
423 if (file) 425 fput_light(file, fput_needed);
424 fput_light(file, fput_needed);
425 nlmsg_free(rep_skb);
426 return rc; 426 return rc;
427} 427}
428 428