diff options
author | Tejun Heo <tj@kernel.org> | 2012-03-05 16:15:14 -0500 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2012-03-06 15:27:23 -0500 |
commit | 0381411e4b1a52cee134eb73750e5e3cc1155d09 (patch) | |
tree | 32aa1d0b8a2ca8277e60e8b78dd731d193440924 /block/blk-cgroup.c | |
parent | 923adde1be1df57cebd80c563058e503376645e8 (diff) |
blkcg: let blkcg core handle policy private data allocation
Currently, blkg's are embedded in private data blkcg policy private
data structure and thus allocated and freed by policies. This leads
to duplicate codes in policies, hinders implementing common part in
blkcg core with strong semantics, and forces duplicate blkg's for the
same cgroup-q association.
This patch introduces struct blkg_policy_data which is a separate data
structure chained from blkg. Policies specifies the amount of private
data it needs in its blkio_policy_type->pdata_size and blkcg core
takes care of allocating them along with blkg which can be accessed
using blkg_to_pdata(). blkg can be determined from pdata using
pdata_to_blkg(). blkio_alloc_group_fn() method is accordingly updated
to blkio_init_group_fn().
For consistency, tg_of_blkg() and cfqg_of_blkg() are replaced with
blkg_to_tg() and blkg_to_cfqg() respectively, and functions to map in
the reverse direction are added.
Except that policy specific data now lives in a separate data
structure from blkg, this patch doesn't introduce any functional
difference.
This will be used to unify blkg's for different policies.
Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Vivek Goyal <vgoyal@redhat.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-cgroup.c')
-rw-r--r-- | block/blk-cgroup.c | 86 |
1 files changed, 67 insertions, 19 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index 266c0707d588..14367499cfed 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c | |||
@@ -422,6 +422,70 @@ void blkiocg_update_io_merged_stats(struct blkio_group *blkg, bool direction, | |||
422 | } | 422 | } |
423 | EXPORT_SYMBOL_GPL(blkiocg_update_io_merged_stats); | 423 | EXPORT_SYMBOL_GPL(blkiocg_update_io_merged_stats); |
424 | 424 | ||
425 | /** | ||
426 | * blkg_free - free a blkg | ||
427 | * @blkg: blkg to free | ||
428 | * | ||
429 | * Free @blkg which may be partially allocated. | ||
430 | */ | ||
431 | static void blkg_free(struct blkio_group *blkg) | ||
432 | { | ||
433 | if (blkg) { | ||
434 | free_percpu(blkg->stats_cpu); | ||
435 | kfree(blkg->pd); | ||
436 | kfree(blkg); | ||
437 | } | ||
438 | } | ||
439 | |||
440 | /** | ||
441 | * blkg_alloc - allocate a blkg | ||
442 | * @blkcg: block cgroup the new blkg is associated with | ||
443 | * @q: request_queue the new blkg is associated with | ||
444 | * @pol: policy the new blkg is associated with | ||
445 | * | ||
446 | * Allocate a new blkg assocating @blkcg and @q for @pol. | ||
447 | * | ||
448 | * FIXME: Should be called with queue locked but currently isn't due to | ||
449 | * percpu stat breakage. | ||
450 | */ | ||
451 | static struct blkio_group *blkg_alloc(struct blkio_cgroup *blkcg, | ||
452 | struct request_queue *q, | ||
453 | struct blkio_policy_type *pol) | ||
454 | { | ||
455 | struct blkio_group *blkg; | ||
456 | |||
457 | /* alloc and init base part */ | ||
458 | blkg = kzalloc_node(sizeof(*blkg), GFP_ATOMIC, q->node); | ||
459 | if (!blkg) | ||
460 | return NULL; | ||
461 | |||
462 | spin_lock_init(&blkg->stats_lock); | ||
463 | rcu_assign_pointer(blkg->q, q); | ||
464 | blkg->blkcg = blkcg; | ||
465 | blkg->plid = pol->plid; | ||
466 | cgroup_path(blkcg->css.cgroup, blkg->path, sizeof(blkg->path)); | ||
467 | |||
468 | /* alloc per-policy data */ | ||
469 | blkg->pd = kzalloc_node(sizeof(*blkg->pd) + pol->pdata_size, GFP_ATOMIC, | ||
470 | q->node); | ||
471 | if (!blkg->pd) { | ||
472 | blkg_free(blkg); | ||
473 | return NULL; | ||
474 | } | ||
475 | |||
476 | /* broken, read comment in the callsite */ | ||
477 | blkg->stats_cpu = alloc_percpu(struct blkio_group_stats_cpu); | ||
478 | if (!blkg->stats_cpu) { | ||
479 | blkg_free(blkg); | ||
480 | return NULL; | ||
481 | } | ||
482 | |||
483 | /* attach pd to blkg and invoke per-policy init */ | ||
484 | blkg->pd->blkg = blkg; | ||
485 | pol->ops.blkio_init_group_fn(blkg); | ||
486 | return blkg; | ||
487 | } | ||
488 | |||
425 | struct blkio_group *blkg_lookup_create(struct blkio_cgroup *blkcg, | 489 | struct blkio_group *blkg_lookup_create(struct blkio_cgroup *blkcg, |
426 | struct request_queue *q, | 490 | struct request_queue *q, |
427 | enum blkio_policy_id plid, | 491 | enum blkio_policy_id plid, |
@@ -463,19 +527,7 @@ struct blkio_group *blkg_lookup_create(struct blkio_cgroup *blkcg, | |||
463 | spin_unlock_irq(q->queue_lock); | 527 | spin_unlock_irq(q->queue_lock); |
464 | rcu_read_unlock(); | 528 | rcu_read_unlock(); |
465 | 529 | ||
466 | new_blkg = pol->ops.blkio_alloc_group_fn(q, blkcg); | 530 | new_blkg = blkg_alloc(blkcg, q, pol); |
467 | if (new_blkg) { | ||
468 | new_blkg->stats_cpu = alloc_percpu(struct blkio_group_stats_cpu); | ||
469 | |||
470 | spin_lock_init(&new_blkg->stats_lock); | ||
471 | rcu_assign_pointer(new_blkg->q, q); | ||
472 | new_blkg->blkcg = blkcg; | ||
473 | new_blkg->plid = plid; | ||
474 | cgroup_path(blkcg->css.cgroup, new_blkg->path, | ||
475 | sizeof(new_blkg->path)); | ||
476 | } else { | ||
477 | css_put(&blkcg->css); | ||
478 | } | ||
479 | 531 | ||
480 | rcu_read_lock(); | 532 | rcu_read_lock(); |
481 | spin_lock_irq(q->queue_lock); | 533 | spin_lock_irq(q->queue_lock); |
@@ -492,7 +544,7 @@ struct blkio_group *blkg_lookup_create(struct blkio_cgroup *blkcg, | |||
492 | goto out; | 544 | goto out; |
493 | 545 | ||
494 | /* did alloc fail? */ | 546 | /* did alloc fail? */ |
495 | if (unlikely(!new_blkg || !new_blkg->stats_cpu)) { | 547 | if (unlikely(!new_blkg)) { |
496 | blkg = ERR_PTR(-ENOMEM); | 548 | blkg = ERR_PTR(-ENOMEM); |
497 | goto out; | 549 | goto out; |
498 | } | 550 | } |
@@ -504,11 +556,7 @@ struct blkio_group *blkg_lookup_create(struct blkio_cgroup *blkcg, | |||
504 | pol->ops.blkio_link_group_fn(q, blkg); | 556 | pol->ops.blkio_link_group_fn(q, blkg); |
505 | spin_unlock(&blkcg->lock); | 557 | spin_unlock(&blkcg->lock); |
506 | out: | 558 | out: |
507 | if (new_blkg) { | 559 | blkg_free(new_blkg); |
508 | free_percpu(new_blkg->stats_cpu); | ||
509 | kfree(new_blkg); | ||
510 | css_put(&blkcg->css); | ||
511 | } | ||
512 | return blkg; | 560 | return blkg; |
513 | } | 561 | } |
514 | EXPORT_SYMBOL_GPL(blkg_lookup_create); | 562 | EXPORT_SYMBOL_GPL(blkg_lookup_create); |