diff options
Diffstat (limited to 'kernel/taskstats.c')
-rw-r--r-- | kernel/taskstats.c | 169 |
1 files changed, 85 insertions, 84 deletions
diff --git a/kernel/taskstats.c b/kernel/taskstats.c index d3d28919d4b4..4c3476fa058d 100644 --- a/kernel/taskstats.c +++ b/kernel/taskstats.c | |||
@@ -34,7 +34,7 @@ | |||
34 | 34 | ||
35 | static DEFINE_PER_CPU(__u32, taskstats_seqnum) = { 0 }; | 35 | static DEFINE_PER_CPU(__u32, taskstats_seqnum) = { 0 }; |
36 | static int family_registered; | 36 | static int family_registered; |
37 | kmem_cache_t *taskstats_cache; | 37 | struct kmem_cache *taskstats_cache; |
38 | 38 | ||
39 | static struct genl_family family = { | 39 | static struct genl_family family = { |
40 | .id = GENL_ID_GENERATE, | 40 | .id = GENL_ID_GENERATE, |
@@ -69,7 +69,7 @@ enum actions { | |||
69 | }; | 69 | }; |
70 | 70 | ||
71 | static int prepare_reply(struct genl_info *info, u8 cmd, struct sk_buff **skbp, | 71 | static int prepare_reply(struct genl_info *info, u8 cmd, struct sk_buff **skbp, |
72 | void **replyp, size_t size) | 72 | size_t size) |
73 | { | 73 | { |
74 | struct sk_buff *skb; | 74 | struct sk_buff *skb; |
75 | void *reply; | 75 | void *reply; |
@@ -94,7 +94,6 @@ static int prepare_reply(struct genl_info *info, u8 cmd, struct sk_buff **skbp, | |||
94 | } | 94 | } |
95 | 95 | ||
96 | *skbp = skb; | 96 | *skbp = skb; |
97 | *replyp = reply; | ||
98 | return 0; | 97 | return 0; |
99 | } | 98 | } |
100 | 99 | ||
@@ -119,10 +118,10 @@ static int send_reply(struct sk_buff *skb, pid_t pid) | |||
119 | /* | 118 | /* |
120 | * Send taskstats data in @skb to listeners registered for @cpu's exit data | 119 | * Send taskstats data in @skb to listeners registered for @cpu's exit data |
121 | */ | 120 | */ |
122 | static void send_cpu_listeners(struct sk_buff *skb, unsigned int cpu) | 121 | static void send_cpu_listeners(struct sk_buff *skb, |
122 | struct listener_list *listeners) | ||
123 | { | 123 | { |
124 | struct genlmsghdr *genlhdr = nlmsg_data((struct nlmsghdr *)skb->data); | 124 | struct genlmsghdr *genlhdr = nlmsg_data((struct nlmsghdr *)skb->data); |
125 | struct listener_list *listeners; | ||
126 | struct listener *s, *tmp; | 125 | struct listener *s, *tmp; |
127 | struct sk_buff *skb_next, *skb_cur = skb; | 126 | struct sk_buff *skb_next, *skb_cur = skb; |
128 | void *reply = genlmsg_data(genlhdr); | 127 | void *reply = genlmsg_data(genlhdr); |
@@ -135,7 +134,6 @@ static void send_cpu_listeners(struct sk_buff *skb, unsigned int cpu) | |||
135 | } | 134 | } |
136 | 135 | ||
137 | rc = 0; | 136 | rc = 0; |
138 | listeners = &per_cpu(listener_array, cpu); | ||
139 | down_read(&listeners->sem); | 137 | down_read(&listeners->sem); |
140 | list_for_each_entry(s, &listeners->list, list) { | 138 | list_for_each_entry(s, &listeners->list, list) { |
141 | skb_next = NULL; | 139 | skb_next = NULL; |
@@ -186,6 +184,7 @@ static int fill_pid(pid_t pid, struct task_struct *tsk, | |||
186 | } else | 184 | } else |
187 | get_task_struct(tsk); | 185 | get_task_struct(tsk); |
188 | 186 | ||
187 | memset(stats, 0, sizeof(*stats)); | ||
189 | /* | 188 | /* |
190 | * Each accounting subsystem adds calls to its functions to | 189 | * Each accounting subsystem adds calls to its functions to |
191 | * fill in relevant parts of struct taskstsats as follows | 190 | * fill in relevant parts of struct taskstsats as follows |
@@ -228,6 +227,8 @@ static int fill_tgid(pid_t tgid, struct task_struct *first, | |||
228 | 227 | ||
229 | if (first->signal->stats) | 228 | if (first->signal->stats) |
230 | memcpy(stats, first->signal->stats, sizeof(*stats)); | 229 | memcpy(stats, first->signal->stats, sizeof(*stats)); |
230 | else | ||
231 | memset(stats, 0, sizeof(*stats)); | ||
231 | 232 | ||
232 | tsk = first; | 233 | tsk = first; |
233 | do { | 234 | do { |
@@ -344,14 +345,36 @@ static int parse(struct nlattr *na, cpumask_t *mask) | |||
344 | return ret; | 345 | return ret; |
345 | } | 346 | } |
346 | 347 | ||
348 | static struct taskstats *mk_reply(struct sk_buff *skb, int type, u32 pid) | ||
349 | { | ||
350 | struct nlattr *na, *ret; | ||
351 | int aggr; | ||
352 | |||
353 | aggr = (type == TASKSTATS_TYPE_PID) | ||
354 | ? TASKSTATS_TYPE_AGGR_PID | ||
355 | : TASKSTATS_TYPE_AGGR_TGID; | ||
356 | |||
357 | na = nla_nest_start(skb, aggr); | ||
358 | if (!na) | ||
359 | goto err; | ||
360 | if (nla_put(skb, type, sizeof(pid), &pid) < 0) | ||
361 | goto err; | ||
362 | ret = nla_reserve(skb, TASKSTATS_TYPE_STATS, sizeof(struct taskstats)); | ||
363 | if (!ret) | ||
364 | goto err; | ||
365 | nla_nest_end(skb, na); | ||
366 | |||
367 | return nla_data(ret); | ||
368 | err: | ||
369 | return NULL; | ||
370 | } | ||
371 | |||
347 | static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info) | 372 | static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info) |
348 | { | 373 | { |
349 | int rc = 0; | 374 | int rc = 0; |
350 | struct sk_buff *rep_skb; | 375 | struct sk_buff *rep_skb; |
351 | struct taskstats stats; | 376 | struct taskstats *stats; |
352 | void *reply; | ||
353 | size_t size; | 377 | size_t size; |
354 | struct nlattr *na; | ||
355 | cpumask_t mask; | 378 | cpumask_t mask; |
356 | 379 | ||
357 | rc = parse(info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK], &mask); | 380 | rc = parse(info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK], &mask); |
@@ -372,83 +395,71 @@ static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info) | |||
372 | size = nla_total_size(sizeof(u32)) + | 395 | size = nla_total_size(sizeof(u32)) + |
373 | nla_total_size(sizeof(struct taskstats)) + nla_total_size(0); | 396 | nla_total_size(sizeof(struct taskstats)) + nla_total_size(0); |
374 | 397 | ||
375 | memset(&stats, 0, sizeof(stats)); | 398 | rc = prepare_reply(info, TASKSTATS_CMD_NEW, &rep_skb, size); |
376 | rc = prepare_reply(info, TASKSTATS_CMD_NEW, &rep_skb, &reply, size); | ||
377 | if (rc < 0) | 399 | if (rc < 0) |
378 | return rc; | 400 | return rc; |
379 | 401 | ||
402 | rc = -EINVAL; | ||
380 | if (info->attrs[TASKSTATS_CMD_ATTR_PID]) { | 403 | if (info->attrs[TASKSTATS_CMD_ATTR_PID]) { |
381 | u32 pid = nla_get_u32(info->attrs[TASKSTATS_CMD_ATTR_PID]); | 404 | u32 pid = nla_get_u32(info->attrs[TASKSTATS_CMD_ATTR_PID]); |
382 | rc = fill_pid(pid, NULL, &stats); | 405 | stats = mk_reply(rep_skb, TASKSTATS_TYPE_PID, pid); |
383 | if (rc < 0) | 406 | if (!stats) |
384 | goto err; | 407 | goto err; |
385 | 408 | ||
386 | na = nla_nest_start(rep_skb, TASKSTATS_TYPE_AGGR_PID); | 409 | rc = fill_pid(pid, NULL, stats); |
387 | NLA_PUT_U32(rep_skb, TASKSTATS_TYPE_PID, pid); | 410 | if (rc < 0) |
388 | NLA_PUT_TYPE(rep_skb, struct taskstats, TASKSTATS_TYPE_STATS, | 411 | goto err; |
389 | stats); | ||
390 | } else if (info->attrs[TASKSTATS_CMD_ATTR_TGID]) { | 412 | } else if (info->attrs[TASKSTATS_CMD_ATTR_TGID]) { |
391 | u32 tgid = nla_get_u32(info->attrs[TASKSTATS_CMD_ATTR_TGID]); | 413 | u32 tgid = nla_get_u32(info->attrs[TASKSTATS_CMD_ATTR_TGID]); |
392 | rc = fill_tgid(tgid, NULL, &stats); | 414 | stats = mk_reply(rep_skb, TASKSTATS_TYPE_TGID, tgid); |
393 | if (rc < 0) | 415 | if (!stats) |
394 | goto err; | 416 | goto err; |
395 | 417 | ||
396 | na = nla_nest_start(rep_skb, TASKSTATS_TYPE_AGGR_TGID); | 418 | rc = fill_tgid(tgid, NULL, stats); |
397 | NLA_PUT_U32(rep_skb, TASKSTATS_TYPE_TGID, tgid); | 419 | if (rc < 0) |
398 | NLA_PUT_TYPE(rep_skb, struct taskstats, TASKSTATS_TYPE_STATS, | 420 | goto err; |
399 | stats); | 421 | } else |
400 | } else { | ||
401 | rc = -EINVAL; | ||
402 | goto err; | 422 | goto err; |
403 | } | ||
404 | |||
405 | nla_nest_end(rep_skb, na); | ||
406 | 423 | ||
407 | return send_reply(rep_skb, info->snd_pid); | 424 | return send_reply(rep_skb, info->snd_pid); |
408 | |||
409 | nla_put_failure: | ||
410 | rc = genlmsg_cancel(rep_skb, reply); | ||
411 | err: | 425 | err: |
412 | nlmsg_free(rep_skb); | 426 | nlmsg_free(rep_skb); |
413 | return rc; | 427 | return rc; |
414 | } | 428 | } |
415 | 429 | ||
416 | void taskstats_exit_alloc(struct taskstats **ptidstats, unsigned int *mycpu) | 430 | static struct taskstats *taskstats_tgid_alloc(struct task_struct *tsk) |
417 | { | 431 | { |
418 | struct listener_list *listeners; | 432 | struct signal_struct *sig = tsk->signal; |
419 | struct taskstats *tmp; | 433 | struct taskstats *stats; |
420 | /* | ||
421 | * This is the cpu on which the task is exiting currently and will | ||
422 | * be the one for which the exit event is sent, even if the cpu | ||
423 | * on which this function is running changes later. | ||
424 | */ | ||
425 | *mycpu = raw_smp_processor_id(); | ||
426 | 434 | ||
427 | *ptidstats = NULL; | 435 | if (sig->stats || thread_group_empty(tsk)) |
428 | tmp = kmem_cache_zalloc(taskstats_cache, SLAB_KERNEL); | 436 | goto ret; |
429 | if (!tmp) | ||
430 | return; | ||
431 | 437 | ||
432 | listeners = &per_cpu(listener_array, *mycpu); | 438 | /* No problem if kmem_cache_zalloc() fails */ |
433 | down_read(&listeners->sem); | 439 | stats = kmem_cache_zalloc(taskstats_cache, GFP_KERNEL); |
434 | if (!list_empty(&listeners->list)) { | 440 | |
435 | *ptidstats = tmp; | 441 | spin_lock_irq(&tsk->sighand->siglock); |
436 | tmp = NULL; | 442 | if (!sig->stats) { |
443 | sig->stats = stats; | ||
444 | stats = NULL; | ||
437 | } | 445 | } |
438 | up_read(&listeners->sem); | 446 | spin_unlock_irq(&tsk->sighand->siglock); |
439 | kfree(tmp); | 447 | |
448 | if (stats) | ||
449 | kmem_cache_free(taskstats_cache, stats); | ||
450 | ret: | ||
451 | return sig->stats; | ||
440 | } | 452 | } |
441 | 453 | ||
442 | /* Send pid data out on exit */ | 454 | /* Send pid data out on exit */ |
443 | void taskstats_exit_send(struct task_struct *tsk, struct taskstats *tidstats, | 455 | void taskstats_exit(struct task_struct *tsk, int group_dead) |
444 | int group_dead, unsigned int mycpu) | ||
445 | { | 456 | { |
446 | int rc; | 457 | int rc; |
458 | struct listener_list *listeners; | ||
459 | struct taskstats *stats; | ||
447 | struct sk_buff *rep_skb; | 460 | struct sk_buff *rep_skb; |
448 | void *reply; | ||
449 | size_t size; | 461 | size_t size; |
450 | int is_thread_group; | 462 | int is_thread_group; |
451 | struct nlattr *na; | ||
452 | 463 | ||
453 | if (!family_registered) | 464 | if (!family_registered) |
454 | return; | 465 | return; |
@@ -459,7 +470,7 @@ void taskstats_exit_send(struct task_struct *tsk, struct taskstats *tidstats, | |||
459 | size = nla_total_size(sizeof(u32)) + | 470 | size = nla_total_size(sizeof(u32)) + |
460 | nla_total_size(sizeof(struct taskstats)) + nla_total_size(0); | 471 | nla_total_size(sizeof(struct taskstats)) + nla_total_size(0); |
461 | 472 | ||
462 | is_thread_group = (tsk->signal->stats != NULL); | 473 | is_thread_group = !!taskstats_tgid_alloc(tsk); |
463 | if (is_thread_group) { | 474 | if (is_thread_group) { |
464 | /* PID + STATS + TGID + STATS */ | 475 | /* PID + STATS + TGID + STATS */ |
465 | size = 2 * size; | 476 | size = 2 * size; |
@@ -467,49 +478,39 @@ void taskstats_exit_send(struct task_struct *tsk, struct taskstats *tidstats, | |||
467 | fill_tgid_exit(tsk); | 478 | fill_tgid_exit(tsk); |
468 | } | 479 | } |
469 | 480 | ||
470 | if (!tidstats) | 481 | listeners = &__raw_get_cpu_var(listener_array); |
482 | if (list_empty(&listeners->list)) | ||
471 | return; | 483 | return; |
472 | 484 | ||
473 | rc = prepare_reply(NULL, TASKSTATS_CMD_NEW, &rep_skb, &reply, size); | 485 | rc = prepare_reply(NULL, TASKSTATS_CMD_NEW, &rep_skb, size); |
474 | if (rc < 0) | ||
475 | goto ret; | ||
476 | |||
477 | rc = fill_pid(tsk->pid, tsk, tidstats); | ||
478 | if (rc < 0) | 486 | if (rc < 0) |
479 | goto err_skb; | 487 | return; |
480 | 488 | ||
481 | na = nla_nest_start(rep_skb, TASKSTATS_TYPE_AGGR_PID); | 489 | stats = mk_reply(rep_skb, TASKSTATS_TYPE_PID, tsk->pid); |
482 | NLA_PUT_U32(rep_skb, TASKSTATS_TYPE_PID, (u32)tsk->pid); | 490 | if (!stats) |
483 | NLA_PUT_TYPE(rep_skb, struct taskstats, TASKSTATS_TYPE_STATS, | 491 | goto err; |
484 | *tidstats); | ||
485 | nla_nest_end(rep_skb, na); | ||
486 | 492 | ||
487 | if (!is_thread_group) | 493 | rc = fill_pid(tsk->pid, tsk, stats); |
488 | goto send; | 494 | if (rc < 0) |
495 | goto err; | ||
489 | 496 | ||
490 | /* | 497 | /* |
491 | * Doesn't matter if tsk is the leader or the last group member leaving | 498 | * Doesn't matter if tsk is the leader or the last group member leaving |
492 | */ | 499 | */ |
493 | if (!group_dead) | 500 | if (!is_thread_group || !group_dead) |
494 | goto send; | 501 | goto send; |
495 | 502 | ||
496 | na = nla_nest_start(rep_skb, TASKSTATS_TYPE_AGGR_TGID); | 503 | stats = mk_reply(rep_skb, TASKSTATS_TYPE_TGID, tsk->tgid); |
497 | NLA_PUT_U32(rep_skb, TASKSTATS_TYPE_TGID, (u32)tsk->tgid); | 504 | if (!stats) |
498 | /* No locking needed for tsk->signal->stats since group is dead */ | 505 | goto err; |
499 | NLA_PUT_TYPE(rep_skb, struct taskstats, TASKSTATS_TYPE_STATS, | 506 | |
500 | *tsk->signal->stats); | 507 | memcpy(stats, tsk->signal->stats, sizeof(*stats)); |
501 | nla_nest_end(rep_skb, na); | ||
502 | 508 | ||
503 | send: | 509 | send: |
504 | send_cpu_listeners(rep_skb, mycpu); | 510 | send_cpu_listeners(rep_skb, listeners); |
505 | return; | 511 | return; |
506 | 512 | err: | |
507 | nla_put_failure: | ||
508 | genlmsg_cancel(rep_skb, reply); | ||
509 | err_skb: | ||
510 | nlmsg_free(rep_skb); | 513 | nlmsg_free(rep_skb); |
511 | ret: | ||
512 | return; | ||
513 | } | 514 | } |
514 | 515 | ||
515 | static struct genl_ops taskstats_ops = { | 516 | static struct genl_ops taskstats_ops = { |