aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/taskstats.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/taskstats.c')
-rw-r--r--kernel/taskstats.c62
1 files changed, 46 insertions, 16 deletions
diff --git a/kernel/taskstats.c b/kernel/taskstats.c
index c8231fb15708..69691eb4b715 100644
--- a/kernel/taskstats.c
+++ b/kernel/taskstats.c
@@ -89,8 +89,7 @@ static int prepare_reply(struct genl_info *info, u8 cmd, struct sk_buff **skbp,
89 return -ENOMEM; 89 return -ENOMEM;
90 90
91 if (!info) { 91 if (!info) {
92 int seq = get_cpu_var(taskstats_seqnum)++; 92 int seq = this_cpu_inc_return(taskstats_seqnum) - 1;
93 put_cpu_var(taskstats_seqnum);
94 93
95 reply = genlmsg_put(skb, 0, seq, &family, 0, cmd); 94 reply = genlmsg_put(skb, 0, seq, &family, 0, cmd);
96 } else 95 } else
@@ -349,25 +348,47 @@ static int parse(struct nlattr *na, struct cpumask *mask)
349 return ret; 348 return ret;
350} 349}
351 350
351#ifdef CONFIG_IA64
352#define TASKSTATS_NEEDS_PADDING 1
353#endif
354
352static struct taskstats *mk_reply(struct sk_buff *skb, int type, u32 pid) 355static struct taskstats *mk_reply(struct sk_buff *skb, int type, u32 pid)
353{ 356{
354 struct nlattr *na, *ret; 357 struct nlattr *na, *ret;
355 int aggr; 358 int aggr;
356 359
357 /* If we don't pad, we end up with alignment on a 4 byte boundary.
358 * This causes lots of runtime warnings on systems requiring 8 byte
359 * alignment */
360 u32 pids[2] = { pid, 0 };
361 int pid_size = ALIGN(sizeof(pid), sizeof(long));
362
363 aggr = (type == TASKSTATS_TYPE_PID) 360 aggr = (type == TASKSTATS_TYPE_PID)
364 ? TASKSTATS_TYPE_AGGR_PID 361 ? TASKSTATS_TYPE_AGGR_PID
365 : TASKSTATS_TYPE_AGGR_TGID; 362 : TASKSTATS_TYPE_AGGR_TGID;
366 363
364 /*
365 * The taskstats structure is internally aligned on 8 byte
366 * boundaries but the layout of the aggregrate reply, with
367 * two NLA headers and the pid (each 4 bytes), actually
368 * force the entire structure to be unaligned. This causes
369 * the kernel to issue unaligned access warnings on some
370 * architectures like ia64. Unfortunately, some software out there
371 * doesn't properly unroll the NLA packet and assumes that the start
372 * of the taskstats structure will always be 20 bytes from the start
373 * of the netlink payload. Aligning the start of the taskstats
374 * structure breaks this software, which we don't want. So, for now
375 * the alignment only happens on architectures that require it
376 * and those users will have to update to fixed versions of those
377 * packages. Space is reserved in the packet only when needed.
378 * This ifdef should be removed in several years e.g. 2012 once
379 * we can be confident that fixed versions are installed on most
380 * systems. We add the padding before the aggregate since the
381 * aggregate is already a defined type.
382 */
383#ifdef TASKSTATS_NEEDS_PADDING
384 if (nla_put(skb, TASKSTATS_TYPE_NULL, 0, NULL) < 0)
385 goto err;
386#endif
367 na = nla_nest_start(skb, aggr); 387 na = nla_nest_start(skb, aggr);
368 if (!na) 388 if (!na)
369 goto err; 389 goto err;
370 if (nla_put(skb, type, pid_size, pids) < 0) 390
391 if (nla_put(skb, type, sizeof(pid), &pid) < 0)
371 goto err; 392 goto err;
372 ret = nla_reserve(skb, TASKSTATS_TYPE_STATS, sizeof(struct taskstats)); 393 ret = nla_reserve(skb, TASKSTATS_TYPE_STATS, sizeof(struct taskstats));
373 if (!ret) 394 if (!ret)
@@ -456,6 +477,18 @@ out:
456 return rc; 477 return rc;
457} 478}
458 479
480static size_t taskstats_packet_size(void)
481{
482 size_t size;
483
484 size = nla_total_size(sizeof(u32)) +
485 nla_total_size(sizeof(struct taskstats)) + nla_total_size(0);
486#ifdef TASKSTATS_NEEDS_PADDING
487 size += nla_total_size(0); /* Padding for alignment */
488#endif
489 return size;
490}
491
459static int cmd_attr_pid(struct genl_info *info) 492static int cmd_attr_pid(struct genl_info *info)
460{ 493{
461 struct taskstats *stats; 494 struct taskstats *stats;
@@ -464,8 +497,7 @@ static int cmd_attr_pid(struct genl_info *info)
464 u32 pid; 497 u32 pid;
465 int rc; 498 int rc;
466 499
467 size = nla_total_size(sizeof(u32)) + 500 size = taskstats_packet_size();
468 nla_total_size(sizeof(struct taskstats)) + nla_total_size(0);
469 501
470 rc = prepare_reply(info, TASKSTATS_CMD_NEW, &rep_skb, size); 502 rc = prepare_reply(info, TASKSTATS_CMD_NEW, &rep_skb, size);
471 if (rc < 0) 503 if (rc < 0)
@@ -494,8 +526,7 @@ static int cmd_attr_tgid(struct genl_info *info)
494 u32 tgid; 526 u32 tgid;
495 int rc; 527 int rc;
496 528
497 size = nla_total_size(sizeof(u32)) + 529 size = taskstats_packet_size();
498 nla_total_size(sizeof(struct taskstats)) + nla_total_size(0);
499 530
500 rc = prepare_reply(info, TASKSTATS_CMD_NEW, &rep_skb, size); 531 rc = prepare_reply(info, TASKSTATS_CMD_NEW, &rep_skb, size);
501 if (rc < 0) 532 if (rc < 0)
@@ -570,8 +601,7 @@ void taskstats_exit(struct task_struct *tsk, int group_dead)
570 /* 601 /*
571 * Size includes space for nested attributes 602 * Size includes space for nested attributes
572 */ 603 */
573 size = nla_total_size(sizeof(u32)) + 604 size = taskstats_packet_size();
574 nla_total_size(sizeof(struct taskstats)) + nla_total_size(0);
575 605
576 is_thread_group = !!taskstats_tgid_alloc(tsk); 606 is_thread_group = !!taskstats_tgid_alloc(tsk);
577 if (is_thread_group) { 607 if (is_thread_group) {
@@ -581,7 +611,7 @@ void taskstats_exit(struct task_struct *tsk, int group_dead)
581 fill_tgid_exit(tsk); 611 fill_tgid_exit(tsk);
582 } 612 }
583 613
584 listeners = &__raw_get_cpu_var(listener_array); 614 listeners = __this_cpu_ptr(&listener_array);
585 if (list_empty(&listeners->list)) 615 if (list_empty(&listeners->list))
586 return; 616 return;
587 617