diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/Makefile | 1 | ||||
-rw-r--r-- | kernel/cgroup.c | 9 | ||||
-rw-r--r-- | kernel/cpu_acct.c | 186 | ||||
-rw-r--r-- | kernel/exit.c | 3 | ||||
-rw-r--r-- | kernel/irq/handle.c | 8 | ||||
-rw-r--r-- | kernel/marker.c | 41 | ||||
-rw-r--r-- | kernel/params.c | 15 | ||||
-rw-r--r-- | kernel/pid.c | 2 | ||||
-rw-r--r-- | kernel/power/disk.c | 12 | ||||
-rw-r--r-- | kernel/resource.c | 2 | ||||
-rw-r--r-- | kernel/sched.c | 76 | ||||
-rw-r--r-- | kernel/sched_fair.c | 2 | ||||
-rw-r--r-- | kernel/sys.c | 20 | ||||
-rw-r--r-- | kernel/sysctl.c | 4 | ||||
-rw-r--r-- | kernel/sysctl_check.c | 2 | ||||
-rw-r--r-- | kernel/taskstats.c | 36 | ||||
-rw-r--r-- | kernel/time/ntp.c | 2 |
17 files changed, 103 insertions, 318 deletions
diff --git a/kernel/Makefile b/kernel/Makefile index f60afe742599..dfa96956dae0 100644 --- a/kernel/Makefile +++ b/kernel/Makefile | |||
@@ -40,7 +40,6 @@ obj-$(CONFIG_COMPAT) += compat.o | |||
40 | obj-$(CONFIG_CGROUPS) += cgroup.o | 40 | obj-$(CONFIG_CGROUPS) += cgroup.o |
41 | obj-$(CONFIG_CGROUP_DEBUG) += cgroup_debug.o | 41 | obj-$(CONFIG_CGROUP_DEBUG) += cgroup_debug.o |
42 | obj-$(CONFIG_CPUSETS) += cpuset.o | 42 | obj-$(CONFIG_CPUSETS) += cpuset.o |
43 | obj-$(CONFIG_CGROUP_CPUACCT) += cpu_acct.o | ||
44 | obj-$(CONFIG_CGROUP_NS) += ns_cgroup.o | 43 | obj-$(CONFIG_CGROUP_NS) += ns_cgroup.o |
45 | obj-$(CONFIG_IKCONFIG) += configs.o | 44 | obj-$(CONFIG_IKCONFIG) += configs.o |
46 | obj-$(CONFIG_STOP_MACHINE) += stop_machine.o | 45 | obj-$(CONFIG_STOP_MACHINE) += stop_machine.o |
diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 3fe21e19c96e..1a3c23936d43 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c | |||
@@ -1,6 +1,4 @@ | |||
1 | /* | 1 | /* |
2 | * kernel/cgroup.c | ||
3 | * | ||
4 | * Generic process-grouping system. | 2 | * Generic process-grouping system. |
5 | * | 3 | * |
6 | * Based originally on the cpuset system, extracted by Paul Menage | 4 | * Based originally on the cpuset system, extracted by Paul Menage |
@@ -2200,7 +2198,8 @@ static void cgroup_init_subsys(struct cgroup_subsys *ss) | |||
2200 | { | 2198 | { |
2201 | struct cgroup_subsys_state *css; | 2199 | struct cgroup_subsys_state *css; |
2202 | struct list_head *l; | 2200 | struct list_head *l; |
2203 | printk(KERN_ERR "Initializing cgroup subsys %s\n", ss->name); | 2201 | |
2202 | printk(KERN_INFO "Initializing cgroup subsys %s\n", ss->name); | ||
2204 | 2203 | ||
2205 | /* Create the top cgroup state for this subsystem */ | 2204 | /* Create the top cgroup state for this subsystem */ |
2206 | ss->root = &rootnode; | 2205 | ss->root = &rootnode; |
@@ -2273,7 +2272,7 @@ int __init cgroup_init_early(void) | |||
2273 | BUG_ON(!ss->create); | 2272 | BUG_ON(!ss->create); |
2274 | BUG_ON(!ss->destroy); | 2273 | BUG_ON(!ss->destroy); |
2275 | if (ss->subsys_id != i) { | 2274 | if (ss->subsys_id != i) { |
2276 | printk(KERN_ERR "Subsys %s id == %d\n", | 2275 | printk(KERN_ERR "cgroup: Subsys %s id == %d\n", |
2277 | ss->name, ss->subsys_id); | 2276 | ss->name, ss->subsys_id); |
2278 | BUG(); | 2277 | BUG(); |
2279 | } | 2278 | } |
@@ -2605,7 +2604,7 @@ int cgroup_clone(struct task_struct *tsk, struct cgroup_subsys *subsys) | |||
2605 | dentry = lookup_one_len(nodename, parent->dentry, strlen(nodename)); | 2604 | dentry = lookup_one_len(nodename, parent->dentry, strlen(nodename)); |
2606 | if (IS_ERR(dentry)) { | 2605 | if (IS_ERR(dentry)) { |
2607 | printk(KERN_INFO | 2606 | printk(KERN_INFO |
2608 | "Couldn't allocate dentry for %s: %ld\n", nodename, | 2607 | "cgroup: Couldn't allocate dentry for %s: %ld\n", nodename, |
2609 | PTR_ERR(dentry)); | 2608 | PTR_ERR(dentry)); |
2610 | ret = PTR_ERR(dentry); | 2609 | ret = PTR_ERR(dentry); |
2611 | goto out_release; | 2610 | goto out_release; |
diff --git a/kernel/cpu_acct.c b/kernel/cpu_acct.c deleted file mode 100644 index 731e47e7f164..000000000000 --- a/kernel/cpu_acct.c +++ /dev/null | |||
@@ -1,186 +0,0 @@ | |||
1 | /* | ||
2 | * kernel/cpu_acct.c - CPU accounting cgroup subsystem | ||
3 | * | ||
4 | * Copyright (C) Google Inc, 2006 | ||
5 | * | ||
6 | * Developed by Paul Menage (menage@google.com) and Balbir Singh | ||
7 | * (balbir@in.ibm.com) | ||
8 | * | ||
9 | */ | ||
10 | |||
11 | /* | ||
12 | * Example cgroup subsystem for reporting total CPU usage of tasks in a | ||
13 | * cgroup, along with percentage load over a time interval | ||
14 | */ | ||
15 | |||
16 | #include <linux/module.h> | ||
17 | #include <linux/cgroup.h> | ||
18 | #include <linux/fs.h> | ||
19 | #include <linux/rcupdate.h> | ||
20 | |||
21 | #include <asm/div64.h> | ||
22 | |||
23 | struct cpuacct { | ||
24 | struct cgroup_subsys_state css; | ||
25 | spinlock_t lock; | ||
26 | /* total time used by this class */ | ||
27 | cputime64_t time; | ||
28 | |||
29 | /* time when next load calculation occurs */ | ||
30 | u64 next_interval_check; | ||
31 | |||
32 | /* time used in current period */ | ||
33 | cputime64_t current_interval_time; | ||
34 | |||
35 | /* time used in last period */ | ||
36 | cputime64_t last_interval_time; | ||
37 | }; | ||
38 | |||
39 | struct cgroup_subsys cpuacct_subsys; | ||
40 | |||
41 | static inline struct cpuacct *cgroup_ca(struct cgroup *cont) | ||
42 | { | ||
43 | return container_of(cgroup_subsys_state(cont, cpuacct_subsys_id), | ||
44 | struct cpuacct, css); | ||
45 | } | ||
46 | |||
47 | static inline struct cpuacct *task_ca(struct task_struct *task) | ||
48 | { | ||
49 | return container_of(task_subsys_state(task, cpuacct_subsys_id), | ||
50 | struct cpuacct, css); | ||
51 | } | ||
52 | |||
53 | #define INTERVAL (HZ * 10) | ||
54 | |||
55 | static inline u64 next_interval_boundary(u64 now) | ||
56 | { | ||
57 | /* calculate the next interval boundary beyond the | ||
58 | * current time */ | ||
59 | do_div(now, INTERVAL); | ||
60 | return (now + 1) * INTERVAL; | ||
61 | } | ||
62 | |||
63 | static struct cgroup_subsys_state *cpuacct_create( | ||
64 | struct cgroup_subsys *ss, struct cgroup *cont) | ||
65 | { | ||
66 | struct cpuacct *ca = kzalloc(sizeof(*ca), GFP_KERNEL); | ||
67 | |||
68 | if (!ca) | ||
69 | return ERR_PTR(-ENOMEM); | ||
70 | spin_lock_init(&ca->lock); | ||
71 | ca->next_interval_check = next_interval_boundary(get_jiffies_64()); | ||
72 | return &ca->css; | ||
73 | } | ||
74 | |||
75 | static void cpuacct_destroy(struct cgroup_subsys *ss, | ||
76 | struct cgroup *cont) | ||
77 | { | ||
78 | kfree(cgroup_ca(cont)); | ||
79 | } | ||
80 | |||
81 | /* Lazily update the load calculation if necessary. Called with ca locked */ | ||
82 | static void cpuusage_update(struct cpuacct *ca) | ||
83 | { | ||
84 | u64 now = get_jiffies_64(); | ||
85 | |||
86 | /* If we're not due for an update, return */ | ||
87 | if (ca->next_interval_check > now) | ||
88 | return; | ||
89 | |||
90 | if (ca->next_interval_check <= (now - INTERVAL)) { | ||
91 | /* If it's been more than an interval since the last | ||
92 | * check, then catch up - the last interval must have | ||
93 | * been zero load */ | ||
94 | ca->last_interval_time = 0; | ||
95 | ca->next_interval_check = next_interval_boundary(now); | ||
96 | } else { | ||
97 | /* If a steal takes the last interval time negative, | ||
98 | * then we just ignore it */ | ||
99 | if ((s64)ca->current_interval_time > 0) | ||
100 | ca->last_interval_time = ca->current_interval_time; | ||
101 | else | ||
102 | ca->last_interval_time = 0; | ||
103 | ca->next_interval_check += INTERVAL; | ||
104 | } | ||
105 | ca->current_interval_time = 0; | ||
106 | } | ||
107 | |||
108 | static u64 cpuusage_read(struct cgroup *cont, struct cftype *cft) | ||
109 | { | ||
110 | struct cpuacct *ca = cgroup_ca(cont); | ||
111 | u64 time; | ||
112 | |||
113 | spin_lock_irq(&ca->lock); | ||
114 | cpuusage_update(ca); | ||
115 | time = cputime64_to_jiffies64(ca->time); | ||
116 | spin_unlock_irq(&ca->lock); | ||
117 | |||
118 | /* Convert 64-bit jiffies to seconds */ | ||
119 | time *= 1000; | ||
120 | do_div(time, HZ); | ||
121 | return time; | ||
122 | } | ||
123 | |||
124 | static u64 load_read(struct cgroup *cont, struct cftype *cft) | ||
125 | { | ||
126 | struct cpuacct *ca = cgroup_ca(cont); | ||
127 | u64 time; | ||
128 | |||
129 | /* Find the time used in the previous interval */ | ||
130 | spin_lock_irq(&ca->lock); | ||
131 | cpuusage_update(ca); | ||
132 | time = cputime64_to_jiffies64(ca->last_interval_time); | ||
133 | spin_unlock_irq(&ca->lock); | ||
134 | |||
135 | /* Convert time to a percentage, to give the load in the | ||
136 | * previous period */ | ||
137 | time *= 100; | ||
138 | do_div(time, INTERVAL); | ||
139 | |||
140 | return time; | ||
141 | } | ||
142 | |||
143 | static struct cftype files[] = { | ||
144 | { | ||
145 | .name = "usage", | ||
146 | .read_uint = cpuusage_read, | ||
147 | }, | ||
148 | { | ||
149 | .name = "load", | ||
150 | .read_uint = load_read, | ||
151 | } | ||
152 | }; | ||
153 | |||
154 | static int cpuacct_populate(struct cgroup_subsys *ss, struct cgroup *cont) | ||
155 | { | ||
156 | return cgroup_add_files(cont, ss, files, ARRAY_SIZE(files)); | ||
157 | } | ||
158 | |||
159 | void cpuacct_charge(struct task_struct *task, cputime_t cputime) | ||
160 | { | ||
161 | |||
162 | struct cpuacct *ca; | ||
163 | unsigned long flags; | ||
164 | |||
165 | if (!cpuacct_subsys.active) | ||
166 | return; | ||
167 | rcu_read_lock(); | ||
168 | ca = task_ca(task); | ||
169 | if (ca) { | ||
170 | spin_lock_irqsave(&ca->lock, flags); | ||
171 | cpuusage_update(ca); | ||
172 | ca->time = cputime64_add(ca->time, cputime); | ||
173 | ca->current_interval_time = | ||
174 | cputime64_add(ca->current_interval_time, cputime); | ||
175 | spin_unlock_irqrestore(&ca->lock, flags); | ||
176 | } | ||
177 | rcu_read_unlock(); | ||
178 | } | ||
179 | |||
180 | struct cgroup_subsys cpuacct_subsys = { | ||
181 | .name = "cpuacct", | ||
182 | .create = cpuacct_create, | ||
183 | .destroy = cpuacct_destroy, | ||
184 | .populate = cpuacct_populate, | ||
185 | .subsys_id = cpuacct_subsys_id, | ||
186 | }; | ||
diff --git a/kernel/exit.c b/kernel/exit.c index f1aec27f1df0..cd0f1d4137a7 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
@@ -1386,8 +1386,7 @@ static int wait_task_stopped(struct task_struct *p, int delayed_group_leader, | |||
1386 | int why = (p->ptrace & PT_PTRACED) ? CLD_TRAPPED : CLD_STOPPED; | 1386 | int why = (p->ptrace & PT_PTRACED) ? CLD_TRAPPED : CLD_STOPPED; |
1387 | 1387 | ||
1388 | exit_code = p->exit_code; | 1388 | exit_code = p->exit_code; |
1389 | if (unlikely(!exit_code) || | 1389 | if (unlikely(!exit_code) || unlikely(p->exit_state)) |
1390 | unlikely(p->state & TASK_TRACED)) | ||
1391 | goto bail_ref; | 1390 | goto bail_ref; |
1392 | return wait_noreap_copyout(p, pid, uid, | 1391 | return wait_noreap_copyout(p, pid, uid, |
1393 | why, (exit_code << 8) | 0x7f, | 1392 | why, (exit_code << 8) | 0x7f, |
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c index e391cbb1f566..dc335ad27525 100644 --- a/kernel/irq/handle.c +++ b/kernel/irq/handle.c | |||
@@ -178,9 +178,11 @@ fastcall unsigned int __do_IRQ(unsigned int irq) | |||
178 | */ | 178 | */ |
179 | if (desc->chip->ack) | 179 | if (desc->chip->ack) |
180 | desc->chip->ack(irq); | 180 | desc->chip->ack(irq); |
181 | action_ret = handle_IRQ_event(irq, desc->action); | 181 | if (likely(!(desc->status & IRQ_DISABLED))) { |
182 | if (!noirqdebug) | 182 | action_ret = handle_IRQ_event(irq, desc->action); |
183 | note_interrupt(irq, desc, action_ret); | 183 | if (!noirqdebug) |
184 | note_interrupt(irq, desc, action_ret); | ||
185 | } | ||
184 | desc->chip->end(irq); | 186 | desc->chip->end(irq); |
185 | return 1; | 187 | return 1; |
186 | } | 188 | } |
diff --git a/kernel/marker.c b/kernel/marker.c index ccb48d9a3657..5323cfaedbce 100644 --- a/kernel/marker.c +++ b/kernel/marker.c | |||
@@ -28,7 +28,7 @@ extern struct marker __start___markers[]; | |||
28 | extern struct marker __stop___markers[]; | 28 | extern struct marker __stop___markers[]; |
29 | 29 | ||
30 | /* | 30 | /* |
31 | * module_mutex nests inside markers_mutex. Markers mutex protects the builtin | 31 | * markers_mutex nests inside module_mutex. Markers mutex protects the builtin |
32 | * and module markers, the hash table and deferred_sync. | 32 | * and module markers, the hash table and deferred_sync. |
33 | */ | 33 | */ |
34 | static DEFINE_MUTEX(markers_mutex); | 34 | static DEFINE_MUTEX(markers_mutex); |
@@ -257,7 +257,6 @@ static void disable_marker(struct marker *elem) | |||
257 | * @refcount: number of references left to the given probe_module (out) | 257 | * @refcount: number of references left to the given probe_module (out) |
258 | * | 258 | * |
259 | * Updates the probe callback corresponding to a range of markers. | 259 | * Updates the probe callback corresponding to a range of markers. |
260 | * Must be called with markers_mutex held. | ||
261 | */ | 260 | */ |
262 | void marker_update_probe_range(struct marker *begin, | 261 | void marker_update_probe_range(struct marker *begin, |
263 | struct marker *end, struct module *probe_module, | 262 | struct marker *end, struct module *probe_module, |
@@ -266,6 +265,7 @@ void marker_update_probe_range(struct marker *begin, | |||
266 | struct marker *iter; | 265 | struct marker *iter; |
267 | struct marker_entry *mark_entry; | 266 | struct marker_entry *mark_entry; |
268 | 267 | ||
268 | mutex_lock(&markers_mutex); | ||
269 | for (iter = begin; iter < end; iter++) { | 269 | for (iter = begin; iter < end; iter++) { |
270 | mark_entry = get_marker(iter->name); | 270 | mark_entry = get_marker(iter->name); |
271 | if (mark_entry && mark_entry->refcount) { | 271 | if (mark_entry && mark_entry->refcount) { |
@@ -281,6 +281,7 @@ void marker_update_probe_range(struct marker *begin, | |||
281 | disable_marker(iter); | 281 | disable_marker(iter); |
282 | } | 282 | } |
283 | } | 283 | } |
284 | mutex_unlock(&markers_mutex); | ||
284 | } | 285 | } |
285 | 286 | ||
286 | /* | 287 | /* |
@@ -293,7 +294,6 @@ static void marker_update_probes(struct module *probe_module) | |||
293 | { | 294 | { |
294 | int refcount = 0; | 295 | int refcount = 0; |
295 | 296 | ||
296 | mutex_lock(&markers_mutex); | ||
297 | /* Core kernel markers */ | 297 | /* Core kernel markers */ |
298 | marker_update_probe_range(__start___markers, | 298 | marker_update_probe_range(__start___markers, |
299 | __stop___markers, probe_module, &refcount); | 299 | __stop___markers, probe_module, &refcount); |
@@ -303,7 +303,6 @@ static void marker_update_probes(struct module *probe_module) | |||
303 | synchronize_sched(); | 303 | synchronize_sched(); |
304 | deferred_sync = 0; | 304 | deferred_sync = 0; |
305 | } | 305 | } |
306 | mutex_unlock(&markers_mutex); | ||
307 | } | 306 | } |
308 | 307 | ||
309 | /** | 308 | /** |
@@ -320,7 +319,7 @@ int marker_probe_register(const char *name, const char *format, | |||
320 | marker_probe_func *probe, void *private) | 319 | marker_probe_func *probe, void *private) |
321 | { | 320 | { |
322 | struct marker_entry *entry; | 321 | struct marker_entry *entry; |
323 | int ret = 0, need_update = 0; | 322 | int ret = 0; |
324 | 323 | ||
325 | mutex_lock(&markers_mutex); | 324 | mutex_lock(&markers_mutex); |
326 | entry = get_marker(name); | 325 | entry = get_marker(name); |
@@ -335,11 +334,11 @@ int marker_probe_register(const char *name, const char *format, | |||
335 | ret = add_marker(name, format, probe, private); | 334 | ret = add_marker(name, format, probe, private); |
336 | if (ret) | 335 | if (ret) |
337 | goto end; | 336 | goto end; |
338 | need_update = 1; | 337 | mutex_unlock(&markers_mutex); |
338 | marker_update_probes(NULL); | ||
339 | return ret; | ||
339 | end: | 340 | end: |
340 | mutex_unlock(&markers_mutex); | 341 | mutex_unlock(&markers_mutex); |
341 | if (need_update) | ||
342 | marker_update_probes(NULL); | ||
343 | return ret; | 342 | return ret; |
344 | } | 343 | } |
345 | EXPORT_SYMBOL_GPL(marker_probe_register); | 344 | EXPORT_SYMBOL_GPL(marker_probe_register); |
@@ -355,7 +354,6 @@ void *marker_probe_unregister(const char *name) | |||
355 | struct module *probe_module; | 354 | struct module *probe_module; |
356 | struct marker_entry *entry; | 355 | struct marker_entry *entry; |
357 | void *private; | 356 | void *private; |
358 | int need_update = 0; | ||
359 | 357 | ||
360 | mutex_lock(&markers_mutex); | 358 | mutex_lock(&markers_mutex); |
361 | entry = get_marker(name); | 359 | entry = get_marker(name); |
@@ -368,11 +366,11 @@ void *marker_probe_unregister(const char *name) | |||
368 | probe_module = __module_text_address((unsigned long)entry->probe); | 366 | probe_module = __module_text_address((unsigned long)entry->probe); |
369 | private = remove_marker(name); | 367 | private = remove_marker(name); |
370 | deferred_sync = 1; | 368 | deferred_sync = 1; |
371 | need_update = 1; | 369 | mutex_unlock(&markers_mutex); |
370 | marker_update_probes(probe_module); | ||
371 | return private; | ||
372 | end: | 372 | end: |
373 | mutex_unlock(&markers_mutex); | 373 | mutex_unlock(&markers_mutex); |
374 | if (need_update) | ||
375 | marker_update_probes(probe_module); | ||
376 | return private; | 374 | return private; |
377 | } | 375 | } |
378 | EXPORT_SYMBOL_GPL(marker_probe_unregister); | 376 | EXPORT_SYMBOL_GPL(marker_probe_unregister); |
@@ -392,7 +390,6 @@ void *marker_probe_unregister_private_data(void *private) | |||
392 | struct marker_entry *entry; | 390 | struct marker_entry *entry; |
393 | int found = 0; | 391 | int found = 0; |
394 | unsigned int i; | 392 | unsigned int i; |
395 | int need_update = 0; | ||
396 | 393 | ||
397 | mutex_lock(&markers_mutex); | 394 | mutex_lock(&markers_mutex); |
398 | for (i = 0; i < MARKER_TABLE_SIZE; i++) { | 395 | for (i = 0; i < MARKER_TABLE_SIZE; i++) { |
@@ -414,11 +411,11 @@ iter_end: | |||
414 | probe_module = __module_text_address((unsigned long)entry->probe); | 411 | probe_module = __module_text_address((unsigned long)entry->probe); |
415 | private = remove_marker(entry->name); | 412 | private = remove_marker(entry->name); |
416 | deferred_sync = 1; | 413 | deferred_sync = 1; |
417 | need_update = 1; | 414 | mutex_unlock(&markers_mutex); |
415 | marker_update_probes(probe_module); | ||
416 | return private; | ||
418 | end: | 417 | end: |
419 | mutex_unlock(&markers_mutex); | 418 | mutex_unlock(&markers_mutex); |
420 | if (need_update) | ||
421 | marker_update_probes(probe_module); | ||
422 | return private; | 419 | return private; |
423 | } | 420 | } |
424 | EXPORT_SYMBOL_GPL(marker_probe_unregister_private_data); | 421 | EXPORT_SYMBOL_GPL(marker_probe_unregister_private_data); |
@@ -434,7 +431,7 @@ EXPORT_SYMBOL_GPL(marker_probe_unregister_private_data); | |||
434 | int marker_arm(const char *name) | 431 | int marker_arm(const char *name) |
435 | { | 432 | { |
436 | struct marker_entry *entry; | 433 | struct marker_entry *entry; |
437 | int ret = 0, need_update = 0; | 434 | int ret = 0; |
438 | 435 | ||
439 | mutex_lock(&markers_mutex); | 436 | mutex_lock(&markers_mutex); |
440 | entry = get_marker(name); | 437 | entry = get_marker(name); |
@@ -447,11 +444,9 @@ int marker_arm(const char *name) | |||
447 | */ | 444 | */ |
448 | if (entry->refcount++) | 445 | if (entry->refcount++) |
449 | goto end; | 446 | goto end; |
450 | need_update = 1; | ||
451 | end: | 447 | end: |
452 | mutex_unlock(&markers_mutex); | 448 | mutex_unlock(&markers_mutex); |
453 | if (need_update) | 449 | marker_update_probes(NULL); |
454 | marker_update_probes(NULL); | ||
455 | return ret; | 450 | return ret; |
456 | } | 451 | } |
457 | EXPORT_SYMBOL_GPL(marker_arm); | 452 | EXPORT_SYMBOL_GPL(marker_arm); |
@@ -467,7 +462,7 @@ EXPORT_SYMBOL_GPL(marker_arm); | |||
467 | int marker_disarm(const char *name) | 462 | int marker_disarm(const char *name) |
468 | { | 463 | { |
469 | struct marker_entry *entry; | 464 | struct marker_entry *entry; |
470 | int ret = 0, need_update = 0; | 465 | int ret = 0; |
471 | 466 | ||
472 | mutex_lock(&markers_mutex); | 467 | mutex_lock(&markers_mutex); |
473 | entry = get_marker(name); | 468 | entry = get_marker(name); |
@@ -486,11 +481,9 @@ int marker_disarm(const char *name) | |||
486 | ret = -EPERM; | 481 | ret = -EPERM; |
487 | goto end; | 482 | goto end; |
488 | } | 483 | } |
489 | need_update = 1; | ||
490 | end: | 484 | end: |
491 | mutex_unlock(&markers_mutex); | 485 | mutex_unlock(&markers_mutex); |
492 | if (need_update) | 486 | marker_update_probes(NULL); |
493 | marker_update_probes(NULL); | ||
494 | return ret; | 487 | return ret; |
495 | } | 488 | } |
496 | EXPORT_SYMBOL_GPL(marker_disarm); | 489 | EXPORT_SYMBOL_GPL(marker_disarm); |
diff --git a/kernel/params.c b/kernel/params.c index 16f269e9ddc9..2a4c51487e72 100644 --- a/kernel/params.c +++ b/kernel/params.c | |||
@@ -592,19 +592,16 @@ static void __init param_sysfs_builtin(void) | |||
592 | 592 | ||
593 | for (i=0; i < __stop___param - __start___param; i++) { | 593 | for (i=0; i < __stop___param - __start___param; i++) { |
594 | char *dot; | 594 | char *dot; |
595 | size_t kplen; | 595 | size_t max_name_len; |
596 | 596 | ||
597 | kp = &__start___param[i]; | 597 | kp = &__start___param[i]; |
598 | kplen = strlen(kp->name); | 598 | max_name_len = |
599 | min_t(size_t, MAX_KBUILD_MODNAME, strlen(kp->name)); | ||
599 | 600 | ||
600 | /* We do not handle args without periods. */ | 601 | dot = memchr(kp->name, '.', max_name_len); |
601 | if (kplen > MAX_KBUILD_MODNAME) { | ||
602 | DEBUGP("kernel parameter name is too long: %s\n", kp->name); | ||
603 | continue; | ||
604 | } | ||
605 | dot = memchr(kp->name, '.', kplen); | ||
606 | if (!dot) { | 602 | if (!dot) { |
607 | DEBUGP("couldn't find period in %s\n", kp->name); | 603 | DEBUGP("couldn't find period in first %d characters " |
604 | "of %s\n", MAX_KBUILD_MODNAME, kp->name); | ||
608 | continue; | 605 | continue; |
609 | } | 606 | } |
610 | name_len = dot - kp->name; | 607 | name_len = dot - kp->name; |
diff --git a/kernel/pid.c b/kernel/pid.c index d1db36b94674..f815455431bf 100644 --- a/kernel/pid.c +++ b/kernel/pid.c | |||
@@ -537,6 +537,7 @@ err_alloc: | |||
537 | return NULL; | 537 | return NULL; |
538 | } | 538 | } |
539 | 539 | ||
540 | #ifdef CONFIG_PID_NS | ||
540 | static struct pid_namespace *create_pid_namespace(int level) | 541 | static struct pid_namespace *create_pid_namespace(int level) |
541 | { | 542 | { |
542 | struct pid_namespace *ns; | 543 | struct pid_namespace *ns; |
@@ -621,6 +622,7 @@ void free_pid_ns(struct kref *kref) | |||
621 | if (parent != NULL) | 622 | if (parent != NULL) |
622 | put_pid_ns(parent); | 623 | put_pid_ns(parent); |
623 | } | 624 | } |
625 | #endif /* CONFIG_PID_NS */ | ||
624 | 626 | ||
625 | void zap_pid_ns_processes(struct pid_namespace *pid_ns) | 627 | void zap_pid_ns_processes(struct pid_namespace *pid_ns) |
626 | { | 628 | { |
diff --git a/kernel/power/disk.c b/kernel/power/disk.c index 8b15f777010a..05b64790fe83 100644 --- a/kernel/power/disk.c +++ b/kernel/power/disk.c | |||
@@ -456,7 +456,17 @@ static int software_resume(void) | |||
456 | int error; | 456 | int error; |
457 | unsigned int flags; | 457 | unsigned int flags; |
458 | 458 | ||
459 | mutex_lock(&pm_mutex); | 459 | /* |
460 | * name_to_dev_t() below takes a sysfs buffer mutex when sysfs | ||
461 | * is configured into the kernel. Since the regular hibernate | ||
462 | * trigger path is via sysfs which takes a buffer mutex before | ||
463 | * calling hibernate functions (which take pm_mutex) this can | ||
464 | * cause lockdep to complain about a possible ABBA deadlock | ||
465 | * which cannot happen since we're in the boot code here and | ||
466 | * sysfs can't be invoked yet. Therefore, we use a subclass | ||
467 | * here to avoid lockdep complaining. | ||
468 | */ | ||
469 | mutex_lock_nested(&pm_mutex, SINGLE_DEPTH_NESTING); | ||
460 | if (!swsusp_resume_device) { | 470 | if (!swsusp_resume_device) { |
461 | if (!strlen(resume_file)) { | 471 | if (!strlen(resume_file)) { |
462 | mutex_unlock(&pm_mutex); | 472 | mutex_unlock(&pm_mutex); |
diff --git a/kernel/resource.c b/kernel/resource.c index a358142ff48f..2eb553d9b517 100644 --- a/kernel/resource.c +++ b/kernel/resource.c | |||
@@ -277,7 +277,7 @@ walk_memory_resource(unsigned long start_pfn, unsigned long nr_pages, void *arg, | |||
277 | int ret = -1; | 277 | int ret = -1; |
278 | res.start = (u64) start_pfn << PAGE_SHIFT; | 278 | res.start = (u64) start_pfn << PAGE_SHIFT; |
279 | res.end = ((u64)(start_pfn + nr_pages) << PAGE_SHIFT) - 1; | 279 | res.end = ((u64)(start_pfn + nr_pages) << PAGE_SHIFT) - 1; |
280 | res.flags = IORESOURCE_MEM; | 280 | res.flags = IORESOURCE_MEM | IORESOURCE_BUSY; |
281 | orig_end = res.end; | 281 | orig_end = res.end; |
282 | while ((res.start < res.end) && (find_next_system_ram(&res) >= 0)) { | 282 | while ((res.start < res.end) && (find_next_system_ram(&res) >= 0)) { |
283 | pfn = (unsigned long)(res.start >> PAGE_SHIFT); | 283 | pfn = (unsigned long)(res.start >> PAGE_SHIFT); |
diff --git a/kernel/sched.c b/kernel/sched.c index b18f231a4875..38933cafea8a 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -52,7 +52,6 @@ | |||
52 | #include <linux/cpu.h> | 52 | #include <linux/cpu.h> |
53 | #include <linux/cpuset.h> | 53 | #include <linux/cpuset.h> |
54 | #include <linux/percpu.h> | 54 | #include <linux/percpu.h> |
55 | #include <linux/cpu_acct.h> | ||
56 | #include <linux/kthread.h> | 55 | #include <linux/kthread.h> |
57 | #include <linux/seq_file.h> | 56 | #include <linux/seq_file.h> |
58 | #include <linux/sysctl.h> | 57 | #include <linux/sysctl.h> |
@@ -217,15 +216,15 @@ static inline struct task_group *task_group(struct task_struct *p) | |||
217 | } | 216 | } |
218 | 217 | ||
219 | /* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */ | 218 | /* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */ |
220 | static inline void set_task_cfs_rq(struct task_struct *p) | 219 | static inline void set_task_cfs_rq(struct task_struct *p, unsigned int cpu) |
221 | { | 220 | { |
222 | p->se.cfs_rq = task_group(p)->cfs_rq[task_cpu(p)]; | 221 | p->se.cfs_rq = task_group(p)->cfs_rq[cpu]; |
223 | p->se.parent = task_group(p)->se[task_cpu(p)]; | 222 | p->se.parent = task_group(p)->se[cpu]; |
224 | } | 223 | } |
225 | 224 | ||
226 | #else | 225 | #else |
227 | 226 | ||
228 | static inline void set_task_cfs_rq(struct task_struct *p) { } | 227 | static inline void set_task_cfs_rq(struct task_struct *p, unsigned int cpu) { } |
229 | 228 | ||
230 | #endif /* CONFIG_FAIR_GROUP_SCHED */ | 229 | #endif /* CONFIG_FAIR_GROUP_SCHED */ |
231 | 230 | ||
@@ -456,18 +455,18 @@ static void update_rq_clock(struct rq *rq) | |||
456 | */ | 455 | */ |
457 | enum { | 456 | enum { |
458 | SCHED_FEAT_NEW_FAIR_SLEEPERS = 1, | 457 | SCHED_FEAT_NEW_FAIR_SLEEPERS = 1, |
459 | SCHED_FEAT_START_DEBIT = 2, | 458 | SCHED_FEAT_WAKEUP_PREEMPT = 2, |
460 | SCHED_FEAT_TREE_AVG = 4, | 459 | SCHED_FEAT_START_DEBIT = 4, |
461 | SCHED_FEAT_APPROX_AVG = 8, | 460 | SCHED_FEAT_TREE_AVG = 8, |
462 | SCHED_FEAT_WAKEUP_PREEMPT = 16, | 461 | SCHED_FEAT_APPROX_AVG = 16, |
463 | }; | 462 | }; |
464 | 463 | ||
465 | const_debug unsigned int sysctl_sched_features = | 464 | const_debug unsigned int sysctl_sched_features = |
466 | SCHED_FEAT_NEW_FAIR_SLEEPERS * 1 | | 465 | SCHED_FEAT_NEW_FAIR_SLEEPERS * 1 | |
466 | SCHED_FEAT_WAKEUP_PREEMPT * 1 | | ||
467 | SCHED_FEAT_START_DEBIT * 1 | | 467 | SCHED_FEAT_START_DEBIT * 1 | |
468 | SCHED_FEAT_TREE_AVG * 0 | | 468 | SCHED_FEAT_TREE_AVG * 0 | |
469 | SCHED_FEAT_APPROX_AVG * 0 | | 469 | SCHED_FEAT_APPROX_AVG * 0; |
470 | SCHED_FEAT_WAKEUP_PREEMPT * 1; | ||
471 | 470 | ||
472 | #define sched_feat(x) (sysctl_sched_features & SCHED_FEAT_##x) | 471 | #define sched_feat(x) (sysctl_sched_features & SCHED_FEAT_##x) |
473 | 472 | ||
@@ -1023,10 +1022,16 @@ unsigned long weighted_cpuload(const int cpu) | |||
1023 | 1022 | ||
1024 | static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu) | 1023 | static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu) |
1025 | { | 1024 | { |
1025 | set_task_cfs_rq(p, cpu); | ||
1026 | #ifdef CONFIG_SMP | 1026 | #ifdef CONFIG_SMP |
1027 | /* | ||
1028 | * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be | ||
1029 | * successfuly executed on another CPU. We must ensure that updates of | ||
1030 | * per-task data have been completed by this moment. | ||
1031 | */ | ||
1032 | smp_wmb(); | ||
1027 | task_thread_info(p)->cpu = cpu; | 1033 | task_thread_info(p)->cpu = cpu; |
1028 | #endif | 1034 | #endif |
1029 | set_task_cfs_rq(p); | ||
1030 | } | 1035 | } |
1031 | 1036 | ||
1032 | #ifdef CONFIG_SMP | 1037 | #ifdef CONFIG_SMP |
@@ -3338,13 +3343,9 @@ void account_user_time(struct task_struct *p, cputime_t cputime) | |||
3338 | { | 3343 | { |
3339 | struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; | 3344 | struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; |
3340 | cputime64_t tmp; | 3345 | cputime64_t tmp; |
3341 | struct rq *rq = this_rq(); | ||
3342 | 3346 | ||
3343 | p->utime = cputime_add(p->utime, cputime); | 3347 | p->utime = cputime_add(p->utime, cputime); |
3344 | 3348 | ||
3345 | if (p != rq->idle) | ||
3346 | cpuacct_charge(p, cputime); | ||
3347 | |||
3348 | /* Add user time to cpustat. */ | 3349 | /* Add user time to cpustat. */ |
3349 | tmp = cputime_to_cputime64(cputime); | 3350 | tmp = cputime_to_cputime64(cputime); |
3350 | if (TASK_NICE(p) > 0) | 3351 | if (TASK_NICE(p) > 0) |
@@ -3395,10 +3396,8 @@ void account_system_time(struct task_struct *p, int hardirq_offset, | |||
3395 | struct rq *rq = this_rq(); | 3396 | struct rq *rq = this_rq(); |
3396 | cputime64_t tmp; | 3397 | cputime64_t tmp; |
3397 | 3398 | ||
3398 | if (p->flags & PF_VCPU) { | 3399 | if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) |
3399 | account_guest_time(p, cputime); | 3400 | return account_guest_time(p, cputime); |
3400 | return; | ||
3401 | } | ||
3402 | 3401 | ||
3403 | p->stime = cputime_add(p->stime, cputime); | 3402 | p->stime = cputime_add(p->stime, cputime); |
3404 | 3403 | ||
@@ -3408,10 +3407,9 @@ void account_system_time(struct task_struct *p, int hardirq_offset, | |||
3408 | cpustat->irq = cputime64_add(cpustat->irq, tmp); | 3407 | cpustat->irq = cputime64_add(cpustat->irq, tmp); |
3409 | else if (softirq_count()) | 3408 | else if (softirq_count()) |
3410 | cpustat->softirq = cputime64_add(cpustat->softirq, tmp); | 3409 | cpustat->softirq = cputime64_add(cpustat->softirq, tmp); |
3411 | else if (p != rq->idle) { | 3410 | else if (p != rq->idle) |
3412 | cpustat->system = cputime64_add(cpustat->system, tmp); | 3411 | cpustat->system = cputime64_add(cpustat->system, tmp); |
3413 | cpuacct_charge(p, cputime); | 3412 | else if (atomic_read(&rq->nr_iowait) > 0) |
3414 | } else if (atomic_read(&rq->nr_iowait) > 0) | ||
3415 | cpustat->iowait = cputime64_add(cpustat->iowait, tmp); | 3413 | cpustat->iowait = cputime64_add(cpustat->iowait, tmp); |
3416 | else | 3414 | else |
3417 | cpustat->idle = cputime64_add(cpustat->idle, tmp); | 3415 | cpustat->idle = cputime64_add(cpustat->idle, tmp); |
@@ -3447,10 +3445,8 @@ void account_steal_time(struct task_struct *p, cputime_t steal) | |||
3447 | cpustat->iowait = cputime64_add(cpustat->iowait, tmp); | 3445 | cpustat->iowait = cputime64_add(cpustat->iowait, tmp); |
3448 | else | 3446 | else |
3449 | cpustat->idle = cputime64_add(cpustat->idle, tmp); | 3447 | cpustat->idle = cputime64_add(cpustat->idle, tmp); |
3450 | } else { | 3448 | } else |
3451 | cpustat->steal = cputime64_add(cpustat->steal, tmp); | 3449 | cpustat->steal = cputime64_add(cpustat->steal, tmp); |
3452 | cpuacct_charge(p, -tmp); | ||
3453 | } | ||
3454 | } | 3450 | } |
3455 | 3451 | ||
3456 | /* | 3452 | /* |
@@ -5286,23 +5282,9 @@ static void migrate_live_tasks(int src_cpu) | |||
5286 | } | 5282 | } |
5287 | 5283 | ||
5288 | /* | 5284 | /* |
5289 | * activate_idle_task - move idle task to the _front_ of runqueue. | ||
5290 | */ | ||
5291 | static void activate_idle_task(struct task_struct *p, struct rq *rq) | ||
5292 | { | ||
5293 | update_rq_clock(rq); | ||
5294 | |||
5295 | if (p->state == TASK_UNINTERRUPTIBLE) | ||
5296 | rq->nr_uninterruptible--; | ||
5297 | |||
5298 | enqueue_task(rq, p, 0); | ||
5299 | inc_nr_running(p, rq); | ||
5300 | } | ||
5301 | |||
5302 | /* | ||
5303 | * Schedules idle task to be the next runnable task on current CPU. | 5285 | * Schedules idle task to be the next runnable task on current CPU. |
5304 | * It does so by boosting its priority to highest possible and adding it to | 5286 | * It does so by boosting its priority to highest possible. |
5305 | * the _front_ of the runqueue. Used by CPU offline code. | 5287 | * Used by CPU offline code. |
5306 | */ | 5288 | */ |
5307 | void sched_idle_next(void) | 5289 | void sched_idle_next(void) |
5308 | { | 5290 | { |
@@ -5322,8 +5304,8 @@ void sched_idle_next(void) | |||
5322 | 5304 | ||
5323 | __setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1); | 5305 | __setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1); |
5324 | 5306 | ||
5325 | /* Add idle task to the _front_ of its priority queue: */ | 5307 | update_rq_clock(rq); |
5326 | activate_idle_task(p, rq); | 5308 | activate_task(rq, p, 0); |
5327 | 5309 | ||
5328 | spin_unlock_irqrestore(&rq->lock, flags); | 5310 | spin_unlock_irqrestore(&rq->lock, flags); |
5329 | } | 5311 | } |
@@ -7097,8 +7079,10 @@ void sched_move_task(struct task_struct *tsk) | |||
7097 | 7079 | ||
7098 | rq = task_rq_lock(tsk, &flags); | 7080 | rq = task_rq_lock(tsk, &flags); |
7099 | 7081 | ||
7100 | if (tsk->sched_class != &fair_sched_class) | 7082 | if (tsk->sched_class != &fair_sched_class) { |
7083 | set_task_cfs_rq(tsk, task_cpu(tsk)); | ||
7101 | goto done; | 7084 | goto done; |
7085 | } | ||
7102 | 7086 | ||
7103 | update_rq_clock(rq); | 7087 | update_rq_clock(rq); |
7104 | 7088 | ||
@@ -7111,7 +7095,7 @@ void sched_move_task(struct task_struct *tsk) | |||
7111 | tsk->sched_class->put_prev_task(rq, tsk); | 7095 | tsk->sched_class->put_prev_task(rq, tsk); |
7112 | } | 7096 | } |
7113 | 7097 | ||
7114 | set_task_cfs_rq(tsk); | 7098 | set_task_cfs_rq(tsk, task_cpu(tsk)); |
7115 | 7099 | ||
7116 | if (on_rq) { | 7100 | if (on_rq) { |
7117 | if (unlikely(running)) | 7101 | if (unlikely(running)) |
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index d3c03070872d..ee00da284b12 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -43,7 +43,7 @@ unsigned int sysctl_sched_min_granularity = 1000000ULL; | |||
43 | /* | 43 | /* |
44 | * is kept at sysctl_sched_latency / sysctl_sched_min_granularity | 44 | * is kept at sysctl_sched_latency / sysctl_sched_min_granularity |
45 | */ | 45 | */ |
46 | unsigned int sched_nr_latency = 20; | 46 | static unsigned int sched_nr_latency = 20; |
47 | 47 | ||
48 | /* | 48 | /* |
49 | * After fork, child runs first. (default) If set to 0 then | 49 | * After fork, child runs first. (default) If set to 0 then |
diff --git a/kernel/sys.c b/kernel/sys.c index 304b5410d746..d1fe71eb4546 100644 --- a/kernel/sys.c +++ b/kernel/sys.c | |||
@@ -1750,7 +1750,7 @@ asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3, | |||
1750 | } | 1750 | } |
1751 | 1751 | ||
1752 | asmlinkage long sys_getcpu(unsigned __user *cpup, unsigned __user *nodep, | 1752 | asmlinkage long sys_getcpu(unsigned __user *cpup, unsigned __user *nodep, |
1753 | struct getcpu_cache __user *cache) | 1753 | struct getcpu_cache __user *unused) |
1754 | { | 1754 | { |
1755 | int err = 0; | 1755 | int err = 0; |
1756 | int cpu = raw_smp_processor_id(); | 1756 | int cpu = raw_smp_processor_id(); |
@@ -1758,24 +1758,6 @@ asmlinkage long sys_getcpu(unsigned __user *cpup, unsigned __user *nodep, | |||
1758 | err |= put_user(cpu, cpup); | 1758 | err |= put_user(cpu, cpup); |
1759 | if (nodep) | 1759 | if (nodep) |
1760 | err |= put_user(cpu_to_node(cpu), nodep); | 1760 | err |= put_user(cpu_to_node(cpu), nodep); |
1761 | if (cache) { | ||
1762 | /* | ||
1763 | * The cache is not needed for this implementation, | ||
1764 | * but make sure user programs pass something | ||
1765 | * valid. vsyscall implementations can instead make | ||
1766 | * good use of the cache. Only use t0 and t1 because | ||
1767 | * these are available in both 32bit and 64bit ABI (no | ||
1768 | * need for a compat_getcpu). 32bit has enough | ||
1769 | * padding | ||
1770 | */ | ||
1771 | unsigned long t0, t1; | ||
1772 | get_user(t0, &cache->blob[0]); | ||
1773 | get_user(t1, &cache->blob[1]); | ||
1774 | t0++; | ||
1775 | t1++; | ||
1776 | put_user(t0, &cache->blob[0]); | ||
1777 | put_user(t1, &cache->blob[1]); | ||
1778 | } | ||
1779 | return err ? -EFAULT : 0; | 1761 | return err ? -EFAULT : 0; |
1780 | } | 1762 | } |
1781 | 1763 | ||
diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 3a1744fed2b6..0deed82a6156 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c | |||
@@ -2620,6 +2620,10 @@ static int deprecated_sysctl_warning(struct __sysctl_args *args) | |||
2620 | int name[CTL_MAXNAME]; | 2620 | int name[CTL_MAXNAME]; |
2621 | int i; | 2621 | int i; |
2622 | 2622 | ||
2623 | /* Check args->nlen. */ | ||
2624 | if (args->nlen < 0 || args->nlen > CTL_MAXNAME) | ||
2625 | return -ENOTDIR; | ||
2626 | |||
2623 | /* Read in the sysctl name for better debug message logging */ | 2627 | /* Read in the sysctl name for better debug message logging */ |
2624 | for (i = 0; i < args->nlen; i++) | 2628 | for (i = 0; i < args->nlen; i++) |
2625 | if (get_user(name[i], args->name + i)) | 2629 | if (get_user(name[i], args->name + i)) |
diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c index 5a2f2b2bf888..4abc6d2306f4 100644 --- a/kernel/sysctl_check.c +++ b/kernel/sysctl_check.c | |||
@@ -738,7 +738,7 @@ static struct trans_ctl_table trans_net_table[] = { | |||
738 | { NET_ROSE, "rose", trans_net_rose_table }, | 738 | { NET_ROSE, "rose", trans_net_rose_table }, |
739 | { NET_IPV6, "ipv6", trans_net_ipv6_table }, | 739 | { NET_IPV6, "ipv6", trans_net_ipv6_table }, |
740 | { NET_X25, "x25", trans_net_x25_table }, | 740 | { NET_X25, "x25", trans_net_x25_table }, |
741 | { NET_TR, "tr", trans_net_tr_table }, | 741 | { NET_TR, "token-ring", trans_net_tr_table }, |
742 | { NET_DECNET, "decnet", trans_net_decnet_table }, | 742 | { NET_DECNET, "decnet", trans_net_decnet_table }, |
743 | /* NET_ECONET not used */ | 743 | /* NET_ECONET not used */ |
744 | { NET_SCTP, "sctp", trans_net_sctp_table }, | 744 | { NET_SCTP, "sctp", trans_net_sctp_table }, |
diff --git a/kernel/taskstats.c b/kernel/taskstats.c index 354e74bc17c1..07e86a828073 100644 --- a/kernel/taskstats.c +++ b/kernel/taskstats.c | |||
@@ -398,31 +398,31 @@ static int cgroupstats_user_cmd(struct sk_buff *skb, struct genl_info *info) | |||
398 | 398 | ||
399 | fd = nla_get_u32(info->attrs[CGROUPSTATS_CMD_ATTR_FD]); | 399 | fd = nla_get_u32(info->attrs[CGROUPSTATS_CMD_ATTR_FD]); |
400 | file = fget_light(fd, &fput_needed); | 400 | file = fget_light(fd, &fput_needed); |
401 | if (file) { | 401 | if (!file) |
402 | size = nla_total_size(sizeof(struct cgroupstats)); | 402 | return 0; |
403 | 403 | ||
404 | rc = prepare_reply(info, CGROUPSTATS_CMD_NEW, &rep_skb, | 404 | size = nla_total_size(sizeof(struct cgroupstats)); |
405 | size); | ||
406 | if (rc < 0) | ||
407 | goto err; | ||
408 | 405 | ||
409 | na = nla_reserve(rep_skb, CGROUPSTATS_TYPE_CGROUP_STATS, | 406 | rc = prepare_reply(info, CGROUPSTATS_CMD_NEW, &rep_skb, |
410 | sizeof(struct cgroupstats)); | 407 | size); |
411 | stats = nla_data(na); | 408 | if (rc < 0) |
412 | memset(stats, 0, sizeof(*stats)); | 409 | goto err; |
413 | 410 | ||
414 | rc = cgroupstats_build(stats, file->f_dentry); | 411 | na = nla_reserve(rep_skb, CGROUPSTATS_TYPE_CGROUP_STATS, |
415 | if (rc < 0) | 412 | sizeof(struct cgroupstats)); |
416 | goto err; | 413 | stats = nla_data(na); |
414 | memset(stats, 0, sizeof(*stats)); | ||
417 | 415 | ||
418 | fput_light(file, fput_needed); | 416 | rc = cgroupstats_build(stats, file->f_dentry); |
419 | return send_reply(rep_skb, info->snd_pid); | 417 | if (rc < 0) { |
418 | nlmsg_free(rep_skb); | ||
419 | goto err; | ||
420 | } | 420 | } |
421 | 421 | ||
422 | rc = send_reply(rep_skb, info->snd_pid); | ||
423 | |||
422 | err: | 424 | err: |
423 | if (file) | 425 | fput_light(file, fput_needed); |
424 | fput_light(file, fput_needed); | ||
425 | nlmsg_free(rep_skb); | ||
426 | return rc; | 426 | return rc; |
427 | } | 427 | } |
428 | 428 | ||
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c index de6a2d6b3ebb..14a2ecf2b318 100644 --- a/kernel/time/ntp.c +++ b/kernel/time/ntp.c | |||
@@ -205,7 +205,7 @@ static void sync_cmos_clock(unsigned long dummy) | |||
205 | return; | 205 | return; |
206 | 206 | ||
207 | getnstimeofday(&now); | 207 | getnstimeofday(&now); |
208 | if (abs(xtime.tv_nsec - (NSEC_PER_SEC / 2)) <= tick_nsec / 2) | 208 | if (abs(now.tv_nsec - (NSEC_PER_SEC / 2)) <= tick_nsec / 2) |
209 | fail = update_persistent_clock(now); | 209 | fail = update_persistent_clock(now); |
210 | 210 | ||
211 | next.tv_nsec = (NSEC_PER_SEC / 2) - now.tv_nsec; | 211 | next.tv_nsec = (NSEC_PER_SEC / 2) - now.tv_nsec; |