diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-11-14 02:30:30 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-11-14 02:30:30 -0500 |
commit | 5e30025a319910695f5010dc0fb53a23299da14d (patch) | |
tree | 4292bcf78de221c7de1774ccf5ad0ac5a9315c26 /block | |
parent | 7971e23a66c94f1b9bd2d64a3e86dfbfa8c60121 (diff) | |
parent | 90d3839b90fe379557dae4a44735a6af78f42885 (diff) |
Merge branch 'core-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull core locking changes from Ingo Molnar:
"The biggest changes:
- add lockdep support for seqcount/seqlocks structures, this
unearthed both bugs and required extra annotation.
- move the various kernel locking primitives to the new
kernel/locking/ directory"
* 'core-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (21 commits)
block: Use u64_stats_init() to initialize seqcounts
locking/lockdep: Mark __lockdep_count_forward_deps() as static
lockdep/proc: Fix lock-time avg computation
locking/doc: Update references to kernel/mutex.c
ipv6: Fix possible ipv6 seqlock deadlock
cpuset: Fix potential deadlock w/ set_mems_allowed
seqcount: Add lockdep functionality to seqcount/seqlock structures
net: Explicitly initialize u64_stats_sync structures for lockdep
locking: Move the percpu-rwsem code to kernel/locking/
locking: Move the lglocks code to kernel/locking/
locking: Move the rwsem code to kernel/locking/
locking: Move the rtmutex code to kernel/locking/
locking: Move the semaphore core to kernel/locking/
locking: Move the spinlock code to kernel/locking/
locking: Move the lockdep code to kernel/locking/
locking: Move the mutex code to kernel/locking/
hung_task debugging: Add tracepoint to report the hang
x86/locking/kconfig: Update paravirt spinlock Kconfig description
lockstat: Report avg wait and hold times
lockdep, x86/alternatives: Drop ancient lockdep fixup message
...
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-cgroup.h | 10 | ||||
-rw-r--r-- | block/blk-throttle.c | 10 | ||||
-rw-r--r-- | block/cfq-iosched.c | 25 |
3 files changed, 45 insertions, 0 deletions
diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h index ae6969a7ffd4..1610b22edf09 100644 --- a/block/blk-cgroup.h +++ b/block/blk-cgroup.h | |||
@@ -402,6 +402,11 @@ struct request_list *__blk_queue_next_rl(struct request_list *rl, | |||
402 | #define blk_queue_for_each_rl(rl, q) \ | 402 | #define blk_queue_for_each_rl(rl, q) \ |
403 | for ((rl) = &(q)->root_rl; (rl); (rl) = __blk_queue_next_rl((rl), (q))) | 403 | for ((rl) = &(q)->root_rl; (rl); (rl) = __blk_queue_next_rl((rl), (q))) |
404 | 404 | ||
405 | static inline void blkg_stat_init(struct blkg_stat *stat) | ||
406 | { | ||
407 | u64_stats_init(&stat->syncp); | ||
408 | } | ||
409 | |||
405 | /** | 410 | /** |
406 | * blkg_stat_add - add a value to a blkg_stat | 411 | * blkg_stat_add - add a value to a blkg_stat |
407 | * @stat: target blkg_stat | 412 | * @stat: target blkg_stat |
@@ -458,6 +463,11 @@ static inline void blkg_stat_merge(struct blkg_stat *to, struct blkg_stat *from) | |||
458 | blkg_stat_add(to, blkg_stat_read(from)); | 463 | blkg_stat_add(to, blkg_stat_read(from)); |
459 | } | 464 | } |
460 | 465 | ||
466 | static inline void blkg_rwstat_init(struct blkg_rwstat *rwstat) | ||
467 | { | ||
468 | u64_stats_init(&rwstat->syncp); | ||
469 | } | ||
470 | |||
461 | /** | 471 | /** |
462 | * blkg_rwstat_add - add a value to a blkg_rwstat | 472 | * blkg_rwstat_add - add a value to a blkg_rwstat |
463 | * @rwstat: target blkg_rwstat | 473 | * @rwstat: target blkg_rwstat |
diff --git a/block/blk-throttle.c b/block/blk-throttle.c index 8331aba9426f..06534049afba 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c | |||
@@ -256,6 +256,12 @@ static struct throtl_data *sq_to_td(struct throtl_service_queue *sq) | |||
256 | } \ | 256 | } \ |
257 | } while (0) | 257 | } while (0) |
258 | 258 | ||
259 | static void tg_stats_init(struct tg_stats_cpu *tg_stats) | ||
260 | { | ||
261 | blkg_rwstat_init(&tg_stats->service_bytes); | ||
262 | blkg_rwstat_init(&tg_stats->serviced); | ||
263 | } | ||
264 | |||
259 | /* | 265 | /* |
260 | * Worker for allocating per cpu stat for tgs. This is scheduled on the | 266 | * Worker for allocating per cpu stat for tgs. This is scheduled on the |
261 | * system_wq once there are some groups on the alloc_list waiting for | 267 | * system_wq once there are some groups on the alloc_list waiting for |
@@ -269,12 +275,16 @@ static void tg_stats_alloc_fn(struct work_struct *work) | |||
269 | 275 | ||
270 | alloc_stats: | 276 | alloc_stats: |
271 | if (!stats_cpu) { | 277 | if (!stats_cpu) { |
278 | int cpu; | ||
279 | |||
272 | stats_cpu = alloc_percpu(struct tg_stats_cpu); | 280 | stats_cpu = alloc_percpu(struct tg_stats_cpu); |
273 | if (!stats_cpu) { | 281 | if (!stats_cpu) { |
274 | /* allocation failed, try again after some time */ | 282 | /* allocation failed, try again after some time */ |
275 | schedule_delayed_work(dwork, msecs_to_jiffies(10)); | 283 | schedule_delayed_work(dwork, msecs_to_jiffies(10)); |
276 | return; | 284 | return; |
277 | } | 285 | } |
286 | for_each_possible_cpu(cpu) | ||
287 | tg_stats_init(per_cpu_ptr(stats_cpu, cpu)); | ||
278 | } | 288 | } |
279 | 289 | ||
280 | spin_lock_irq(&tg_stats_alloc_lock); | 290 | spin_lock_irq(&tg_stats_alloc_lock); |
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 434944cbd761..4d5cec1ad80d 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -1508,6 +1508,29 @@ static void cfq_init_cfqg_base(struct cfq_group *cfqg) | |||
1508 | } | 1508 | } |
1509 | 1509 | ||
1510 | #ifdef CONFIG_CFQ_GROUP_IOSCHED | 1510 | #ifdef CONFIG_CFQ_GROUP_IOSCHED |
1511 | static void cfqg_stats_init(struct cfqg_stats *stats) | ||
1512 | { | ||
1513 | blkg_rwstat_init(&stats->service_bytes); | ||
1514 | blkg_rwstat_init(&stats->serviced); | ||
1515 | blkg_rwstat_init(&stats->merged); | ||
1516 | blkg_rwstat_init(&stats->service_time); | ||
1517 | blkg_rwstat_init(&stats->wait_time); | ||
1518 | blkg_rwstat_init(&stats->queued); | ||
1519 | |||
1520 | blkg_stat_init(&stats->sectors); | ||
1521 | blkg_stat_init(&stats->time); | ||
1522 | |||
1523 | #ifdef CONFIG_DEBUG_BLK_CGROUP | ||
1524 | blkg_stat_init(&stats->unaccounted_time); | ||
1525 | blkg_stat_init(&stats->avg_queue_size_sum); | ||
1526 | blkg_stat_init(&stats->avg_queue_size_samples); | ||
1527 | blkg_stat_init(&stats->dequeue); | ||
1528 | blkg_stat_init(&stats->group_wait_time); | ||
1529 | blkg_stat_init(&stats->idle_time); | ||
1530 | blkg_stat_init(&stats->empty_time); | ||
1531 | #endif | ||
1532 | } | ||
1533 | |||
1511 | static void cfq_pd_init(struct blkcg_gq *blkg) | 1534 | static void cfq_pd_init(struct blkcg_gq *blkg) |
1512 | { | 1535 | { |
1513 | struct cfq_group *cfqg = blkg_to_cfqg(blkg); | 1536 | struct cfq_group *cfqg = blkg_to_cfqg(blkg); |
@@ -1515,6 +1538,8 @@ static void cfq_pd_init(struct blkcg_gq *blkg) | |||
1515 | cfq_init_cfqg_base(cfqg); | 1538 | cfq_init_cfqg_base(cfqg); |
1516 | cfqg->weight = blkg->blkcg->cfq_weight; | 1539 | cfqg->weight = blkg->blkcg->cfq_weight; |
1517 | cfqg->leaf_weight = blkg->blkcg->cfq_leaf_weight; | 1540 | cfqg->leaf_weight = blkg->blkcg->cfq_leaf_weight; |
1541 | cfqg_stats_init(&cfqg->stats); | ||
1542 | cfqg_stats_init(&cfqg->dead_stats); | ||
1518 | } | 1543 | } |
1519 | 1544 | ||
1520 | static void cfq_pd_offline(struct blkcg_gq *blkg) | 1545 | static void cfq_pd_offline(struct blkcg_gq *blkg) |