aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2010-03-04 05:47:50 -0500
committerIngo Molnar <mingo@elte.hu>2010-03-04 05:47:52 -0500
commit4f16d4e0c9a4b20d9f0db365587b96d6001efd7d (patch)
treefa25dcf285b26f1fac2bf267d0d1cd2c4eba90b8 /lib
parent1e259e0a9982078896f3404240096cbea01daca4 (diff)
parent6630125419ef37ff8781713c5e9d416f2a4ba357 (diff)
Merge branch 'perf/core' into perf/urgent
Merge reason: Switch from pre-merge topical split to the post-merge urgent track Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig.debug8
-rw-r--r--lib/idr.c9
2 files changed, 14 insertions, 3 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 25c3ed594c54..4dc24cc13f5c 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -520,6 +520,14 @@ config LOCK_STAT
520 520
521 For more details, see Documentation/lockstat.txt 521 For more details, see Documentation/lockstat.txt
522 522
523 This also enables lock events required by "perf lock",
524 subcommand of perf.
525 If you want to use "perf lock", you also need to turn on
526 CONFIG_EVENT_TRACING.
527
528 CONFIG_LOCK_STAT defines "contended" and "acquired" lock events.
529 (CONFIG_LOCKDEP defines "acquire" and "release" events.)
530
523config DEBUG_LOCKDEP 531config DEBUG_LOCKDEP
524 bool "Lock dependency engine debugging" 532 bool "Lock dependency engine debugging"
525 depends on DEBUG_KERNEL && LOCKDEP 533 depends on DEBUG_KERNEL && LOCKDEP
diff --git a/lib/idr.c b/lib/idr.c
index ba7d37cf7847..0dc782216d4b 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -140,7 +140,8 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
140 id = *starting_id; 140 id = *starting_id;
141 restart: 141 restart:
142 p = idp->top; 142 p = idp->top;
143 l = p->layer; 143 l = idp->layers;
144 pa[l--] = NULL;
144 while (1) { 145 while (1) {
145 /* 146 /*
146 * We run around this while until we reach the leaf node... 147 * We run around this while until we reach the leaf node...
@@ -154,11 +155,13 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
154 oid = id; 155 oid = id;
155 id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1; 156 id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
156 157
157 /* did id go over the limit? */ 158 /* if already at the top layer, we need to grow */
158 if (id >= (1 << (idp->layers * IDR_BITS))) { 159 if (id >= 1 << (idp->layers * IDR_BITS)) {
159 *starting_id = id; 160 *starting_id = id;
160 return IDR_NEED_TO_GROW; 161 return IDR_NEED_TO_GROW;
161 } 162 }
163 p = pa[l];
164 BUG_ON(!p);
162 165
163 /* If we need to go up one layer, continue the 166 /* If we need to go up one layer, continue the
164 * loop; otherwise, restart from the top. 167 * loop; otherwise, restart from the top.