diff options
author | Paul Mundt <lethal@linux-sh.org> | 2011-01-06 04:27:34 -0500 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2011-01-06 04:27:34 -0500 |
commit | 1928e87bcf185f56008d0746f887b691c1cb8c4a (patch) | |
tree | bc8db9e9cb40b73742d05e2e68189bd647687249 /kernel | |
parent | ca9c20ce2b383032b71bdae9ec0b468d428ca8d4 (diff) | |
parent | 3c0eee3fe6a3a1c745379547c7e7c904aa64f6d5 (diff) |
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/torvalds/linux-2.6
Conflicts:
drivers/video/sh_mobile_lcdcfb.c
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/kthread.c | 11 | ||||
-rw-r--r-- | kernel/taskstats.c | 57 | ||||
-rw-r--r-- | kernel/trace/ring_buffer.c | 9 | ||||
-rw-r--r-- | kernel/user.c | 1 | ||||
-rw-r--r-- | kernel/watchdog.c | 3 |
5 files changed, 66 insertions, 15 deletions
diff --git a/kernel/kthread.c b/kernel/kthread.c index 2dc3786349d1..ca61bbdd44b2 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c | |||
@@ -265,6 +265,17 @@ int kthreadd(void *unused) | |||
265 | return 0; | 265 | return 0; |
266 | } | 266 | } |
267 | 267 | ||
268 | void __init_kthread_worker(struct kthread_worker *worker, | ||
269 | const char *name, | ||
270 | struct lock_class_key *key) | ||
271 | { | ||
272 | spin_lock_init(&worker->lock); | ||
273 | lockdep_set_class_and_name(&worker->lock, key, name); | ||
274 | INIT_LIST_HEAD(&worker->work_list); | ||
275 | worker->task = NULL; | ||
276 | } | ||
277 | EXPORT_SYMBOL_GPL(__init_kthread_worker); | ||
278 | |||
268 | /** | 279 | /** |
269 | * kthread_worker_fn - kthread function to process kthread_worker | 280 | * kthread_worker_fn - kthread function to process kthread_worker |
270 | * @worker_ptr: pointer to initialized kthread_worker | 281 | * @worker_ptr: pointer to initialized kthread_worker |
diff --git a/kernel/taskstats.c b/kernel/taskstats.c index c8231fb15708..3308fd7f1b52 100644 --- a/kernel/taskstats.c +++ b/kernel/taskstats.c | |||
@@ -349,25 +349,47 @@ static int parse(struct nlattr *na, struct cpumask *mask) | |||
349 | return ret; | 349 | return ret; |
350 | } | 350 | } |
351 | 351 | ||
352 | #ifdef CONFIG_IA64 | ||
353 | #define TASKSTATS_NEEDS_PADDING 1 | ||
354 | #endif | ||
355 | |||
352 | static struct taskstats *mk_reply(struct sk_buff *skb, int type, u32 pid) | 356 | static struct taskstats *mk_reply(struct sk_buff *skb, int type, u32 pid) |
353 | { | 357 | { |
354 | struct nlattr *na, *ret; | 358 | struct nlattr *na, *ret; |
355 | int aggr; | 359 | int aggr; |
356 | 360 | ||
357 | /* If we don't pad, we end up with alignment on a 4 byte boundary. | ||
358 | * This causes lots of runtime warnings on systems requiring 8 byte | ||
359 | * alignment */ | ||
360 | u32 pids[2] = { pid, 0 }; | ||
361 | int pid_size = ALIGN(sizeof(pid), sizeof(long)); | ||
362 | |||
363 | aggr = (type == TASKSTATS_TYPE_PID) | 361 | aggr = (type == TASKSTATS_TYPE_PID) |
364 | ? TASKSTATS_TYPE_AGGR_PID | 362 | ? TASKSTATS_TYPE_AGGR_PID |
365 | : TASKSTATS_TYPE_AGGR_TGID; | 363 | : TASKSTATS_TYPE_AGGR_TGID; |
366 | 364 | ||
365 | /* | ||
366 | * The taskstats structure is internally aligned on 8 byte | ||
367 | * boundaries but the layout of the aggregrate reply, with | ||
368 | * two NLA headers and the pid (each 4 bytes), actually | ||
369 | * force the entire structure to be unaligned. This causes | ||
370 | * the kernel to issue unaligned access warnings on some | ||
371 | * architectures like ia64. Unfortunately, some software out there | ||
372 | * doesn't properly unroll the NLA packet and assumes that the start | ||
373 | * of the taskstats structure will always be 20 bytes from the start | ||
374 | * of the netlink payload. Aligning the start of the taskstats | ||
375 | * structure breaks this software, which we don't want. So, for now | ||
376 | * the alignment only happens on architectures that require it | ||
377 | * and those users will have to update to fixed versions of those | ||
378 | * packages. Space is reserved in the packet only when needed. | ||
379 | * This ifdef should be removed in several years e.g. 2012 once | ||
380 | * we can be confident that fixed versions are installed on most | ||
381 | * systems. We add the padding before the aggregate since the | ||
382 | * aggregate is already a defined type. | ||
383 | */ | ||
384 | #ifdef TASKSTATS_NEEDS_PADDING | ||
385 | if (nla_put(skb, TASKSTATS_TYPE_NULL, 0, NULL) < 0) | ||
386 | goto err; | ||
387 | #endif | ||
367 | na = nla_nest_start(skb, aggr); | 388 | na = nla_nest_start(skb, aggr); |
368 | if (!na) | 389 | if (!na) |
369 | goto err; | 390 | goto err; |
370 | if (nla_put(skb, type, pid_size, pids) < 0) | 391 | |
392 | if (nla_put(skb, type, sizeof(pid), &pid) < 0) | ||
371 | goto err; | 393 | goto err; |
372 | ret = nla_reserve(skb, TASKSTATS_TYPE_STATS, sizeof(struct taskstats)); | 394 | ret = nla_reserve(skb, TASKSTATS_TYPE_STATS, sizeof(struct taskstats)); |
373 | if (!ret) | 395 | if (!ret) |
@@ -456,6 +478,18 @@ out: | |||
456 | return rc; | 478 | return rc; |
457 | } | 479 | } |
458 | 480 | ||
481 | static size_t taskstats_packet_size(void) | ||
482 | { | ||
483 | size_t size; | ||
484 | |||
485 | size = nla_total_size(sizeof(u32)) + | ||
486 | nla_total_size(sizeof(struct taskstats)) + nla_total_size(0); | ||
487 | #ifdef TASKSTATS_NEEDS_PADDING | ||
488 | size += nla_total_size(0); /* Padding for alignment */ | ||
489 | #endif | ||
490 | return size; | ||
491 | } | ||
492 | |||
459 | static int cmd_attr_pid(struct genl_info *info) | 493 | static int cmd_attr_pid(struct genl_info *info) |
460 | { | 494 | { |
461 | struct taskstats *stats; | 495 | struct taskstats *stats; |
@@ -464,8 +498,7 @@ static int cmd_attr_pid(struct genl_info *info) | |||
464 | u32 pid; | 498 | u32 pid; |
465 | int rc; | 499 | int rc; |
466 | 500 | ||
467 | size = nla_total_size(sizeof(u32)) + | 501 | size = taskstats_packet_size(); |
468 | nla_total_size(sizeof(struct taskstats)) + nla_total_size(0); | ||
469 | 502 | ||
470 | rc = prepare_reply(info, TASKSTATS_CMD_NEW, &rep_skb, size); | 503 | rc = prepare_reply(info, TASKSTATS_CMD_NEW, &rep_skb, size); |
471 | if (rc < 0) | 504 | if (rc < 0) |
@@ -494,8 +527,7 @@ static int cmd_attr_tgid(struct genl_info *info) | |||
494 | u32 tgid; | 527 | u32 tgid; |
495 | int rc; | 528 | int rc; |
496 | 529 | ||
497 | size = nla_total_size(sizeof(u32)) + | 530 | size = taskstats_packet_size(); |
498 | nla_total_size(sizeof(struct taskstats)) + nla_total_size(0); | ||
499 | 531 | ||
500 | rc = prepare_reply(info, TASKSTATS_CMD_NEW, &rep_skb, size); | 532 | rc = prepare_reply(info, TASKSTATS_CMD_NEW, &rep_skb, size); |
501 | if (rc < 0) | 533 | if (rc < 0) |
@@ -570,8 +602,7 @@ void taskstats_exit(struct task_struct *tsk, int group_dead) | |||
570 | /* | 602 | /* |
571 | * Size includes space for nested attributes | 603 | * Size includes space for nested attributes |
572 | */ | 604 | */ |
573 | size = nla_total_size(sizeof(u32)) + | 605 | size = taskstats_packet_size(); |
574 | nla_total_size(sizeof(struct taskstats)) + nla_total_size(0); | ||
575 | 606 | ||
576 | is_thread_group = !!taskstats_tgid_alloc(tsk); | 607 | is_thread_group = !!taskstats_tgid_alloc(tsk); |
577 | if (is_thread_group) { | 608 | if (is_thread_group) { |
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 9ed509a015d8..bd1c35a4fbcc 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -3853,6 +3853,13 @@ int ring_buffer_read_page(struct ring_buffer *buffer, | |||
3853 | 3853 | ||
3854 | /* Need to copy one event at a time */ | 3854 | /* Need to copy one event at a time */ |
3855 | do { | 3855 | do { |
3856 | /* We need the size of one event, because | ||
3857 | * rb_advance_reader only advances by one event, | ||
3858 | * whereas rb_event_ts_length may include the size of | ||
3859 | * one or two events. | ||
3860 | * We have already ensured there's enough space if this | ||
3861 | * is a time extend. */ | ||
3862 | size = rb_event_length(event); | ||
3856 | memcpy(bpage->data + pos, rpage->data + rpos, size); | 3863 | memcpy(bpage->data + pos, rpage->data + rpos, size); |
3857 | 3864 | ||
3858 | len -= size; | 3865 | len -= size; |
@@ -3867,7 +3874,7 @@ int ring_buffer_read_page(struct ring_buffer *buffer, | |||
3867 | event = rb_reader_event(cpu_buffer); | 3874 | event = rb_reader_event(cpu_buffer); |
3868 | /* Always keep the time extend and data together */ | 3875 | /* Always keep the time extend and data together */ |
3869 | size = rb_event_ts_length(event); | 3876 | size = rb_event_ts_length(event); |
3870 | } while (len > size); | 3877 | } while (len >= size); |
3871 | 3878 | ||
3872 | /* update bpage */ | 3879 | /* update bpage */ |
3873 | local_set(&bpage->commit, pos); | 3880 | local_set(&bpage->commit, pos); |
diff --git a/kernel/user.c b/kernel/user.c index 2c7d8d5914b1..5c598ca781df 100644 --- a/kernel/user.c +++ b/kernel/user.c | |||
@@ -158,6 +158,7 @@ struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid) | |||
158 | spin_lock_irq(&uidhash_lock); | 158 | spin_lock_irq(&uidhash_lock); |
159 | up = uid_hash_find(uid, hashent); | 159 | up = uid_hash_find(uid, hashent); |
160 | if (up) { | 160 | if (up) { |
161 | put_user_ns(ns); | ||
161 | key_put(new->uid_keyring); | 162 | key_put(new->uid_keyring); |
162 | key_put(new->session_keyring); | 163 | key_put(new->session_keyring); |
163 | kmem_cache_free(uid_cachep, new); | 164 | kmem_cache_free(uid_cachep, new); |
diff --git a/kernel/watchdog.c b/kernel/watchdog.c index 6e3c41a4024c..5b082156cd21 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c | |||
@@ -364,7 +364,8 @@ static int watchdog_nmi_enable(int cpu) | |||
364 | goto out_save; | 364 | goto out_save; |
365 | } | 365 | } |
366 | 366 | ||
367 | printk(KERN_ERR "NMI watchdog failed to create perf event on cpu%i: %p\n", cpu, event); | 367 | printk(KERN_ERR "NMI watchdog disabled for cpu%i: unable to create perf event: %ld\n", |
368 | cpu, PTR_ERR(event)); | ||
368 | return PTR_ERR(event); | 369 | return PTR_ERR(event); |
369 | 370 | ||
370 | /* success path */ | 371 | /* success path */ |