diff options
author | Miguel Ojeda Sandonis <maxextreme@gmail.com> | 2006-12-10 05:20:38 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.osdl.org> | 2006-12-10 12:57:20 -0500 |
commit | 33859f7f9788da2ac9aa23be4dc8e948112809ca (patch) | |
tree | 42862c87cf0577580a952e67dfa6b126e91719ac /kernel/sched.c | |
parent | 62ab616d54371a65f595c199aad1e1755b837d25 (diff) |
[PATCH] kernel/sched.c: whitespace cleanups
[akpm@osdl.org: additional cleanups]
Signed-off-by: Miguel Ojeda Sandonis <maxextreme@gmail.com>
Acked-by: Ingo Molnar <mingo@elte.hu>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 95 |
1 files changed, 55 insertions, 40 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 48e35c916326..8a0afb97af71 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -466,7 +466,8 @@ static int show_schedstat(struct seq_file *seq, void *v) | |||
466 | seq_printf(seq, "domain%d %s", dcnt++, mask_str); | 466 | seq_printf(seq, "domain%d %s", dcnt++, mask_str); |
467 | for (itype = SCHED_IDLE; itype < MAX_IDLE_TYPES; | 467 | for (itype = SCHED_IDLE; itype < MAX_IDLE_TYPES; |
468 | itype++) { | 468 | itype++) { |
469 | seq_printf(seq, " %lu %lu %lu %lu %lu %lu %lu %lu", | 469 | seq_printf(seq, " %lu %lu %lu %lu %lu %lu %lu " |
470 | "%lu", | ||
470 | sd->lb_cnt[itype], | 471 | sd->lb_cnt[itype], |
471 | sd->lb_balanced[itype], | 472 | sd->lb_balanced[itype], |
472 | sd->lb_failed[itype], | 473 | sd->lb_failed[itype], |
@@ -476,11 +477,13 @@ static int show_schedstat(struct seq_file *seq, void *v) | |||
476 | sd->lb_nobusyq[itype], | 477 | sd->lb_nobusyq[itype], |
477 | sd->lb_nobusyg[itype]); | 478 | sd->lb_nobusyg[itype]); |
478 | } | 479 | } |
479 | seq_printf(seq, " %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu\n", | 480 | seq_printf(seq, " %lu %lu %lu %lu %lu %lu %lu %lu %lu" |
481 | " %lu %lu %lu\n", | ||
480 | sd->alb_cnt, sd->alb_failed, sd->alb_pushed, | 482 | sd->alb_cnt, sd->alb_failed, sd->alb_pushed, |
481 | sd->sbe_cnt, sd->sbe_balanced, sd->sbe_pushed, | 483 | sd->sbe_cnt, sd->sbe_balanced, sd->sbe_pushed, |
482 | sd->sbf_cnt, sd->sbf_balanced, sd->sbf_pushed, | 484 | sd->sbf_cnt, sd->sbf_balanced, sd->sbf_pushed, |
483 | sd->ttwu_wake_remote, sd->ttwu_move_affine, sd->ttwu_move_balance); | 485 | sd->ttwu_wake_remote, sd->ttwu_move_affine, |
486 | sd->ttwu_move_balance); | ||
484 | } | 487 | } |
485 | preempt_enable(); | 488 | preempt_enable(); |
486 | #endif | 489 | #endif |
@@ -1454,7 +1457,9 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync) | |||
1454 | 1457 | ||
1455 | if (this_sd->flags & SD_WAKE_AFFINE) { | 1458 | if (this_sd->flags & SD_WAKE_AFFINE) { |
1456 | unsigned long tl = this_load; | 1459 | unsigned long tl = this_load; |
1457 | unsigned long tl_per_task = cpu_avg_load_per_task(this_cpu); | 1460 | unsigned long tl_per_task; |
1461 | |||
1462 | tl_per_task = cpu_avg_load_per_task(this_cpu); | ||
1458 | 1463 | ||
1459 | /* | 1464 | /* |
1460 | * If sync wakeup then subtract the (maximum possible) | 1465 | * If sync wakeup then subtract the (maximum possible) |
@@ -2487,18 +2492,21 @@ small_imbalance: | |||
2487 | pwr_now /= SCHED_LOAD_SCALE; | 2492 | pwr_now /= SCHED_LOAD_SCALE; |
2488 | 2493 | ||
2489 | /* Amount of load we'd subtract */ | 2494 | /* Amount of load we'd subtract */ |
2490 | tmp = busiest_load_per_task*SCHED_LOAD_SCALE/busiest->cpu_power; | 2495 | tmp = busiest_load_per_task * SCHED_LOAD_SCALE / |
2496 | busiest->cpu_power; | ||
2491 | if (max_load > tmp) | 2497 | if (max_load > tmp) |
2492 | pwr_move += busiest->cpu_power * | 2498 | pwr_move += busiest->cpu_power * |
2493 | min(busiest_load_per_task, max_load - tmp); | 2499 | min(busiest_load_per_task, max_load - tmp); |
2494 | 2500 | ||
2495 | /* Amount of load we'd add */ | 2501 | /* Amount of load we'd add */ |
2496 | if (max_load*busiest->cpu_power < | 2502 | if (max_load * busiest->cpu_power < |
2497 | busiest_load_per_task*SCHED_LOAD_SCALE) | 2503 | busiest_load_per_task * SCHED_LOAD_SCALE) |
2498 | tmp = max_load*busiest->cpu_power/this->cpu_power; | 2504 | tmp = max_load * busiest->cpu_power / this->cpu_power; |
2499 | else | 2505 | else |
2500 | tmp = busiest_load_per_task*SCHED_LOAD_SCALE/this->cpu_power; | 2506 | tmp = busiest_load_per_task * SCHED_LOAD_SCALE / |
2501 | pwr_move += this->cpu_power*min(this_load_per_task, this_load + tmp); | 2507 | this->cpu_power; |
2508 | pwr_move += this->cpu_power * | ||
2509 | min(this_load_per_task, this_load + tmp); | ||
2502 | pwr_move /= SCHED_LOAD_SCALE; | 2510 | pwr_move /= SCHED_LOAD_SCALE; |
2503 | 2511 | ||
2504 | /* Move if we gain throughput */ | 2512 | /* Move if we gain throughput */ |
@@ -3366,7 +3374,8 @@ void fastcall add_preempt_count(int val) | |||
3366 | /* | 3374 | /* |
3367 | * Spinlock count overflowing soon? | 3375 | * Spinlock count overflowing soon? |
3368 | */ | 3376 | */ |
3369 | DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >= PREEMPT_MASK-10); | 3377 | DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >= |
3378 | PREEMPT_MASK - 10); | ||
3370 | } | 3379 | } |
3371 | EXPORT_SYMBOL(add_preempt_count); | 3380 | EXPORT_SYMBOL(add_preempt_count); |
3372 | 3381 | ||
@@ -5439,16 +5448,19 @@ static void sched_domain_debug(struct sched_domain *sd, int cpu) | |||
5439 | if (!(sd->flags & SD_LOAD_BALANCE)) { | 5448 | if (!(sd->flags & SD_LOAD_BALANCE)) { |
5440 | printk("does not load-balance\n"); | 5449 | printk("does not load-balance\n"); |
5441 | if (sd->parent) | 5450 | if (sd->parent) |
5442 | printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain has parent"); | 5451 | printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain" |
5452 | " has parent"); | ||
5443 | break; | 5453 | break; |
5444 | } | 5454 | } |
5445 | 5455 | ||
5446 | printk("span %s\n", str); | 5456 | printk("span %s\n", str); |
5447 | 5457 | ||
5448 | if (!cpu_isset(cpu, sd->span)) | 5458 | if (!cpu_isset(cpu, sd->span)) |
5449 | printk(KERN_ERR "ERROR: domain->span does not contain CPU%d\n", cpu); | 5459 | printk(KERN_ERR "ERROR: domain->span does not contain " |
5460 | "CPU%d\n", cpu); | ||
5450 | if (!cpu_isset(cpu, group->cpumask)) | 5461 | if (!cpu_isset(cpu, group->cpumask)) |
5451 | printk(KERN_ERR "ERROR: domain->groups does not contain CPU%d\n", cpu); | 5462 | printk(KERN_ERR "ERROR: domain->groups does not contain" |
5463 | " CPU%d\n", cpu); | ||
5452 | 5464 | ||
5453 | printk(KERN_DEBUG); | 5465 | printk(KERN_DEBUG); |
5454 | for (i = 0; i < level + 2; i++) | 5466 | for (i = 0; i < level + 2; i++) |
@@ -5463,7 +5475,8 @@ static void sched_domain_debug(struct sched_domain *sd, int cpu) | |||
5463 | 5475 | ||
5464 | if (!group->cpu_power) { | 5476 | if (!group->cpu_power) { |
5465 | printk("\n"); | 5477 | printk("\n"); |
5466 | printk(KERN_ERR "ERROR: domain->cpu_power not set\n"); | 5478 | printk(KERN_ERR "ERROR: domain->cpu_power not " |
5479 | "set\n"); | ||
5467 | } | 5480 | } |
5468 | 5481 | ||
5469 | if (!cpus_weight(group->cpumask)) { | 5482 | if (!cpus_weight(group->cpumask)) { |
@@ -5486,15 +5499,17 @@ static void sched_domain_debug(struct sched_domain *sd, int cpu) | |||
5486 | printk("\n"); | 5499 | printk("\n"); |
5487 | 5500 | ||
5488 | if (!cpus_equal(sd->span, groupmask)) | 5501 | if (!cpus_equal(sd->span, groupmask)) |
5489 | printk(KERN_ERR "ERROR: groups don't span domain->span\n"); | 5502 | printk(KERN_ERR "ERROR: groups don't span " |
5503 | "domain->span\n"); | ||
5490 | 5504 | ||
5491 | level++; | 5505 | level++; |
5492 | sd = sd->parent; | 5506 | sd = sd->parent; |
5507 | if (!sd) | ||
5508 | continue; | ||
5493 | 5509 | ||
5494 | if (sd) { | 5510 | if (!cpus_subset(groupmask, sd->span)) |
5495 | if (!cpus_subset(groupmask, sd->span)) | 5511 | printk(KERN_ERR "ERROR: parent span is not a superset " |
5496 | printk(KERN_ERR "ERROR: parent span is not a superset of domain->span\n"); | 5512 | "of domain->span\n"); |
5497 | } | ||
5498 | 5513 | ||
5499 | } while (sd); | 5514 | } while (sd); |
5500 | } | 5515 | } |
@@ -5812,8 +5827,9 @@ __setup("max_cache_size=", setup_max_cache_size); | |||
5812 | */ | 5827 | */ |
5813 | static void touch_cache(void *__cache, unsigned long __size) | 5828 | static void touch_cache(void *__cache, unsigned long __size) |
5814 | { | 5829 | { |
5815 | unsigned long size = __size/sizeof(long), chunk1 = size/3, | 5830 | unsigned long size = __size / sizeof(long); |
5816 | chunk2 = 2*size/3; | 5831 | unsigned long chunk1 = size / 3; |
5832 | unsigned long chunk2 = 2 * size / 3; | ||
5817 | unsigned long *cache = __cache; | 5833 | unsigned long *cache = __cache; |
5818 | int i; | 5834 | int i; |
5819 | 5835 | ||
@@ -5922,11 +5938,11 @@ measure_cost(int cpu1, int cpu2, void *cache, unsigned int size) | |||
5922 | */ | 5938 | */ |
5923 | measure_one(cache, size, cpu1, cpu2); | 5939 | measure_one(cache, size, cpu1, cpu2); |
5924 | for (i = 0; i < ITERATIONS; i++) | 5940 | for (i = 0; i < ITERATIONS; i++) |
5925 | cost1 += measure_one(cache, size - i*1024, cpu1, cpu2); | 5941 | cost1 += measure_one(cache, size - i * 1024, cpu1, cpu2); |
5926 | 5942 | ||
5927 | measure_one(cache, size, cpu2, cpu1); | 5943 | measure_one(cache, size, cpu2, cpu1); |
5928 | for (i = 0; i < ITERATIONS; i++) | 5944 | for (i = 0; i < ITERATIONS; i++) |
5929 | cost1 += measure_one(cache, size - i*1024, cpu2, cpu1); | 5945 | cost1 += measure_one(cache, size - i * 1024, cpu2, cpu1); |
5930 | 5946 | ||
5931 | /* | 5947 | /* |
5932 | * (We measure the non-migrating [cached] cost on both | 5948 | * (We measure the non-migrating [cached] cost on both |
@@ -5936,17 +5952,17 @@ measure_cost(int cpu1, int cpu2, void *cache, unsigned int size) | |||
5936 | 5952 | ||
5937 | measure_one(cache, size, cpu1, cpu1); | 5953 | measure_one(cache, size, cpu1, cpu1); |
5938 | for (i = 0; i < ITERATIONS; i++) | 5954 | for (i = 0; i < ITERATIONS; i++) |
5939 | cost2 += measure_one(cache, size - i*1024, cpu1, cpu1); | 5955 | cost2 += measure_one(cache, size - i * 1024, cpu1, cpu1); |
5940 | 5956 | ||
5941 | measure_one(cache, size, cpu2, cpu2); | 5957 | measure_one(cache, size, cpu2, cpu2); |
5942 | for (i = 0; i < ITERATIONS; i++) | 5958 | for (i = 0; i < ITERATIONS; i++) |
5943 | cost2 += measure_one(cache, size - i*1024, cpu2, cpu2); | 5959 | cost2 += measure_one(cache, size - i * 1024, cpu2, cpu2); |
5944 | 5960 | ||
5945 | /* | 5961 | /* |
5946 | * Get the per-iteration migration cost: | 5962 | * Get the per-iteration migration cost: |
5947 | */ | 5963 | */ |
5948 | do_div(cost1, 2*ITERATIONS); | 5964 | do_div(cost1, 2 * ITERATIONS); |
5949 | do_div(cost2, 2*ITERATIONS); | 5965 | do_div(cost2, 2 * ITERATIONS); |
5950 | 5966 | ||
5951 | return cost1 - cost2; | 5967 | return cost1 - cost2; |
5952 | } | 5968 | } |
@@ -5984,7 +6000,7 @@ static unsigned long long measure_migration_cost(int cpu1, int cpu2) | |||
5984 | */ | 6000 | */ |
5985 | cache = vmalloc(max_size); | 6001 | cache = vmalloc(max_size); |
5986 | if (!cache) { | 6002 | if (!cache) { |
5987 | printk("could not vmalloc %d bytes for cache!\n", 2*max_size); | 6003 | printk("could not vmalloc %d bytes for cache!\n", 2 * max_size); |
5988 | return 1000000; /* return 1 msec on very small boxen */ | 6004 | return 1000000; /* return 1 msec on very small boxen */ |
5989 | } | 6005 | } |
5990 | 6006 | ||
@@ -6009,7 +6025,8 @@ static unsigned long long measure_migration_cost(int cpu1, int cpu2) | |||
6009 | avg_fluct = (avg_fluct + fluct)/2; | 6025 | avg_fluct = (avg_fluct + fluct)/2; |
6010 | 6026 | ||
6011 | if (migration_debug) | 6027 | if (migration_debug) |
6012 | printk("-> [%d][%d][%7d] %3ld.%ld [%3ld.%ld] (%ld): (%8Ld %8Ld)\n", | 6028 | printk("-> [%d][%d][%7d] %3ld.%ld [%3ld.%ld] (%ld): " |
6029 | "(%8Ld %8Ld)\n", | ||
6013 | cpu1, cpu2, size, | 6030 | cpu1, cpu2, size, |
6014 | (long)cost / 1000000, | 6031 | (long)cost / 1000000, |
6015 | ((long)cost / 100000) % 10, | 6032 | ((long)cost / 100000) % 10, |
@@ -6104,20 +6121,18 @@ static void calibrate_migration_costs(const cpumask_t *cpu_map) | |||
6104 | -1 | 6121 | -1 |
6105 | #endif | 6122 | #endif |
6106 | ); | 6123 | ); |
6107 | if (system_state == SYSTEM_BOOTING) { | 6124 | if (system_state == SYSTEM_BOOTING && num_online_cpus() > 1) { |
6108 | if (num_online_cpus() > 1) { | 6125 | printk("migration_cost="); |
6109 | printk("migration_cost="); | 6126 | for (distance = 0; distance <= max_distance; distance++) { |
6110 | for (distance = 0; distance <= max_distance; distance++) { | 6127 | if (distance) |
6111 | if (distance) | 6128 | printk(","); |
6112 | printk(","); | 6129 | printk("%ld", (long)migration_cost[distance] / 1000); |
6113 | printk("%ld", (long)migration_cost[distance] / 1000); | ||
6114 | } | ||
6115 | printk("\n"); | ||
6116 | } | 6130 | } |
6131 | printk("\n"); | ||
6117 | } | 6132 | } |
6118 | j1 = jiffies; | 6133 | j1 = jiffies; |
6119 | if (migration_debug) | 6134 | if (migration_debug) |
6120 | printk("migration: %ld seconds\n", (j1-j0)/HZ); | 6135 | printk("migration: %ld seconds\n", (j1-j0) / HZ); |
6121 | 6136 | ||
6122 | /* | 6137 | /* |
6123 | * Move back to the original CPU. NUMA-Q gets confused | 6138 | * Move back to the original CPU. NUMA-Q gets confused |