diff options
author | Ingo Molnar <mingo@elte.hu> | 2006-07-03 03:25:40 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-07-03 18:27:10 -0400 |
commit | 48f24c4da1ee7f3f22289cb85e8b8a73e4df4db5 (patch) | |
tree | c827ac2ad2bba931753b8213123f059bcf773534 /kernel/sched.c | |
parent | 829035fd709119d9def124a6d40b94d317573e6f (diff) |
[PATCH] sched: clean up fallout of recent changes
Clean up some of the impact of recent (and not so recent) scheduler
changes:
- turning macros into nice inline functions
- sanitizing and unifying variable definitions
- whitespace, style consistency, 80-lines, comment correctness, spelling
and curly braces police
Due to the macro hell and variable placement simplifications there's even 26
bytes of .text saved:
text data bss dec hex filename
25510 4153 192 29855 749f sched.o.before
25484 4153 192 29829 7485 sched.o.after
[akpm@osdl.org: build fix]
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 360 |
1 files changed, 194 insertions, 166 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index f4778d1aef69..b0326141f841 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -184,9 +184,6 @@ static inline unsigned int task_timeslice(task_t *p) | |||
184 | return static_prio_timeslice(p->static_prio); | 184 | return static_prio_timeslice(p->static_prio); |
185 | } | 185 | } |
186 | 186 | ||
187 | #define task_hot(p, now, sd) ((long long) ((now) - (p)->last_ran) \ | ||
188 | < (long long) (sd)->cache_hot_time) | ||
189 | |||
190 | /* | 187 | /* |
191 | * These are the runqueue data structures: | 188 | * These are the runqueue data structures: |
192 | */ | 189 | */ |
@@ -278,8 +275,8 @@ static DEFINE_PER_CPU(struct runqueue, runqueues); | |||
278 | * The domain tree of any CPU may only be accessed from within | 275 | * The domain tree of any CPU may only be accessed from within |
279 | * preempt-disabled sections. | 276 | * preempt-disabled sections. |
280 | */ | 277 | */ |
281 | #define for_each_domain(cpu, domain) \ | 278 | #define for_each_domain(cpu, __sd) \ |
282 | for (domain = rcu_dereference(cpu_rq(cpu)->sd); domain; domain = domain->parent) | 279 | for (__sd = rcu_dereference(cpu_rq(cpu)->sd); __sd; __sd = __sd->parent) |
283 | 280 | ||
284 | #define cpu_rq(cpu) (&per_cpu(runqueues, (cpu))) | 281 | #define cpu_rq(cpu) (&per_cpu(runqueues, (cpu))) |
285 | #define this_rq() (&__get_cpu_var(runqueues)) | 282 | #define this_rq() (&__get_cpu_var(runqueues)) |
@@ -1039,6 +1036,7 @@ static int migrate_task(task_t *p, int dest_cpu, migration_req_t *req) | |||
1039 | req->task = p; | 1036 | req->task = p; |
1040 | req->dest_cpu = dest_cpu; | 1037 | req->dest_cpu = dest_cpu; |
1041 | list_add(&req->list, &rq->migration_queue); | 1038 | list_add(&req->list, &rq->migration_queue); |
1039 | |||
1042 | return 1; | 1040 | return 1; |
1043 | } | 1041 | } |
1044 | 1042 | ||
@@ -1135,7 +1133,7 @@ static inline unsigned long cpu_avg_load_per_task(int cpu) | |||
1135 | runqueue_t *rq = cpu_rq(cpu); | 1133 | runqueue_t *rq = cpu_rq(cpu); |
1136 | unsigned long n = rq->nr_running; | 1134 | unsigned long n = rq->nr_running; |
1137 | 1135 | ||
1138 | return n ? rq->raw_weighted_load / n : SCHED_LOAD_SCALE; | 1136 | return n ? rq->raw_weighted_load / n : SCHED_LOAD_SCALE; |
1139 | } | 1137 | } |
1140 | 1138 | ||
1141 | /* | 1139 | /* |
@@ -1494,7 +1492,6 @@ int fastcall wake_up_process(task_t *p) | |||
1494 | return try_to_wake_up(p, TASK_STOPPED | TASK_TRACED | | 1492 | return try_to_wake_up(p, TASK_STOPPED | TASK_TRACED | |
1495 | TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE, 0); | 1493 | TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE, 0); |
1496 | } | 1494 | } |
1497 | |||
1498 | EXPORT_SYMBOL(wake_up_process); | 1495 | EXPORT_SYMBOL(wake_up_process); |
1499 | 1496 | ||
1500 | int fastcall wake_up_state(task_t *p, unsigned int state) | 1497 | int fastcall wake_up_state(task_t *p, unsigned int state) |
@@ -1867,6 +1864,15 @@ unsigned long nr_active(void) | |||
1867 | #ifdef CONFIG_SMP | 1864 | #ifdef CONFIG_SMP |
1868 | 1865 | ||
1869 | /* | 1866 | /* |
1867 | * Is this task likely cache-hot: | ||
1868 | */ | ||
1869 | static inline int | ||
1870 | task_hot(struct task_struct *p, unsigned long long now, struct sched_domain *sd) | ||
1871 | { | ||
1872 | return (long long)(now - p->last_ran) < (long long)sd->cache_hot_time; | ||
1873 | } | ||
1874 | |||
1875 | /* | ||
1870 | * double_rq_lock - safely lock two runqueues | 1876 | * double_rq_lock - safely lock two runqueues |
1871 | * | 1877 | * |
1872 | * Note this does not disable interrupts like task_rq_lock, | 1878 | * Note this does not disable interrupts like task_rq_lock, |
@@ -2029,6 +2035,7 @@ int can_migrate_task(task_t *p, runqueue_t *rq, int this_cpu, | |||
2029 | } | 2035 | } |
2030 | 2036 | ||
2031 | #define rq_best_prio(rq) min((rq)->curr->prio, (rq)->best_expired_prio) | 2037 | #define rq_best_prio(rq) min((rq)->curr->prio, (rq)->best_expired_prio) |
2038 | |||
2032 | /* | 2039 | /* |
2033 | * move_tasks tries to move up to max_nr_move tasks and max_load_move weighted | 2040 | * move_tasks tries to move up to max_nr_move tasks and max_load_move weighted |
2034 | * load from busiest to this_rq, as part of a balancing operation within | 2041 | * load from busiest to this_rq, as part of a balancing operation within |
@@ -2041,11 +2048,10 @@ static int move_tasks(runqueue_t *this_rq, int this_cpu, runqueue_t *busiest, | |||
2041 | struct sched_domain *sd, enum idle_type idle, | 2048 | struct sched_domain *sd, enum idle_type idle, |
2042 | int *all_pinned) | 2049 | int *all_pinned) |
2043 | { | 2050 | { |
2051 | int idx, pulled = 0, pinned = 0, this_best_prio, best_prio, | ||
2052 | best_prio_seen, skip_for_load; | ||
2044 | prio_array_t *array, *dst_array; | 2053 | prio_array_t *array, *dst_array; |
2045 | struct list_head *head, *curr; | 2054 | struct list_head *head, *curr; |
2046 | int idx, pulled = 0, pinned = 0, this_best_prio, busiest_best_prio; | ||
2047 | int busiest_best_prio_seen; | ||
2048 | int skip_for_load; /* skip the task based on weighted load issues */ | ||
2049 | long rem_load_move; | 2055 | long rem_load_move; |
2050 | task_t *tmp; | 2056 | task_t *tmp; |
2051 | 2057 | ||
@@ -2055,15 +2061,15 @@ static int move_tasks(runqueue_t *this_rq, int this_cpu, runqueue_t *busiest, | |||
2055 | rem_load_move = max_load_move; | 2061 | rem_load_move = max_load_move; |
2056 | pinned = 1; | 2062 | pinned = 1; |
2057 | this_best_prio = rq_best_prio(this_rq); | 2063 | this_best_prio = rq_best_prio(this_rq); |
2058 | busiest_best_prio = rq_best_prio(busiest); | 2064 | best_prio = rq_best_prio(busiest); |
2059 | /* | 2065 | /* |
2060 | * Enable handling of the case where there is more than one task | 2066 | * Enable handling of the case where there is more than one task |
2061 | * with the best priority. If the current running task is one | 2067 | * with the best priority. If the current running task is one |
2062 | * of those with prio==busiest_best_prio we know it won't be moved | 2068 | * of those with prio==best_prio we know it won't be moved |
2063 | * and therefore it's safe to override the skip (based on load) of | 2069 | * and therefore it's safe to override the skip (based on load) of |
2064 | * any task we find with that prio. | 2070 | * any task we find with that prio. |
2065 | */ | 2071 | */ |
2066 | busiest_best_prio_seen = busiest_best_prio == busiest->curr->prio; | 2072 | best_prio_seen = best_prio == busiest->curr->prio; |
2067 | 2073 | ||
2068 | /* | 2074 | /* |
2069 | * We first consider expired tasks. Those will likely not be | 2075 | * We first consider expired tasks. Those will likely not be |
@@ -2110,10 +2116,11 @@ skip_queue: | |||
2110 | */ | 2116 | */ |
2111 | skip_for_load = tmp->load_weight > rem_load_move; | 2117 | skip_for_load = tmp->load_weight > rem_load_move; |
2112 | if (skip_for_load && idx < this_best_prio) | 2118 | if (skip_for_load && idx < this_best_prio) |
2113 | skip_for_load = !busiest_best_prio_seen && idx == busiest_best_prio; | 2119 | skip_for_load = !best_prio_seen && idx == best_prio; |
2114 | if (skip_for_load || | 2120 | if (skip_for_load || |
2115 | !can_migrate_task(tmp, busiest, this_cpu, sd, idle, &pinned)) { | 2121 | !can_migrate_task(tmp, busiest, this_cpu, sd, idle, &pinned)) { |
2116 | busiest_best_prio_seen |= idx == busiest_best_prio; | 2122 | |
2123 | best_prio_seen |= idx == best_prio; | ||
2117 | if (curr != head) | 2124 | if (curr != head) |
2118 | goto skip_queue; | 2125 | goto skip_queue; |
2119 | idx++; | 2126 | idx++; |
@@ -2156,8 +2163,8 @@ out: | |||
2156 | 2163 | ||
2157 | /* | 2164 | /* |
2158 | * find_busiest_group finds and returns the busiest CPU group within the | 2165 | * find_busiest_group finds and returns the busiest CPU group within the |
2159 | * domain. It calculates and returns the amount of weighted load which should be | 2166 | * domain. It calculates and returns the amount of weighted load which |
2160 | * moved to restore balance via the imbalance parameter. | 2167 | * should be moved to restore balance via the imbalance parameter. |
2161 | */ | 2168 | */ |
2162 | static struct sched_group * | 2169 | static struct sched_group * |
2163 | find_busiest_group(struct sched_domain *sd, int this_cpu, | 2170 | find_busiest_group(struct sched_domain *sd, int this_cpu, |
@@ -2279,7 +2286,7 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, | |||
2279 | * capacity but still has some space to pick up some load | 2286 | * capacity but still has some space to pick up some load |
2280 | * from other group and save more power | 2287 | * from other group and save more power |
2281 | */ | 2288 | */ |
2282 | if (sum_nr_running <= group_capacity - 1) | 2289 | if (sum_nr_running <= group_capacity - 1) { |
2283 | if (sum_nr_running > leader_nr_running || | 2290 | if (sum_nr_running > leader_nr_running || |
2284 | (sum_nr_running == leader_nr_running && | 2291 | (sum_nr_running == leader_nr_running && |
2285 | first_cpu(group->cpumask) > | 2292 | first_cpu(group->cpumask) > |
@@ -2287,7 +2294,7 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, | |||
2287 | group_leader = group; | 2294 | group_leader = group; |
2288 | leader_nr_running = sum_nr_running; | 2295 | leader_nr_running = sum_nr_running; |
2289 | } | 2296 | } |
2290 | 2297 | } | |
2291 | group_next: | 2298 | group_next: |
2292 | #endif | 2299 | #endif |
2293 | group = group->next; | 2300 | group = group->next; |
@@ -2342,8 +2349,7 @@ group_next: | |||
2342 | * moved | 2349 | * moved |
2343 | */ | 2350 | */ |
2344 | if (*imbalance < busiest_load_per_task) { | 2351 | if (*imbalance < busiest_load_per_task) { |
2345 | unsigned long pwr_now, pwr_move; | 2352 | unsigned long tmp, pwr_now, pwr_move; |
2346 | unsigned long tmp; | ||
2347 | unsigned int imbn; | 2353 | unsigned int imbn; |
2348 | 2354 | ||
2349 | small_imbalance: | 2355 | small_imbalance: |
@@ -2415,22 +2421,23 @@ ret: | |||
2415 | /* | 2421 | /* |
2416 | * find_busiest_queue - find the busiest runqueue among the cpus in group. | 2422 | * find_busiest_queue - find the busiest runqueue among the cpus in group. |
2417 | */ | 2423 | */ |
2418 | static runqueue_t *find_busiest_queue(struct sched_group *group, | 2424 | static runqueue_t * |
2419 | enum idle_type idle, unsigned long imbalance) | 2425 | find_busiest_queue(struct sched_group *group, enum idle_type idle, |
2426 | unsigned long imbalance) | ||
2420 | { | 2427 | { |
2428 | runqueue_t *busiest = NULL, *rq; | ||
2421 | unsigned long max_load = 0; | 2429 | unsigned long max_load = 0; |
2422 | runqueue_t *busiest = NULL, *rqi; | ||
2423 | int i; | 2430 | int i; |
2424 | 2431 | ||
2425 | for_each_cpu_mask(i, group->cpumask) { | 2432 | for_each_cpu_mask(i, group->cpumask) { |
2426 | rqi = cpu_rq(i); | 2433 | rq = cpu_rq(i); |
2427 | 2434 | ||
2428 | if (rqi->nr_running == 1 && rqi->raw_weighted_load > imbalance) | 2435 | if (rq->nr_running == 1 && rq->raw_weighted_load > imbalance) |
2429 | continue; | 2436 | continue; |
2430 | 2437 | ||
2431 | if (rqi->raw_weighted_load > max_load) { | 2438 | if (rq->raw_weighted_load > max_load) { |
2432 | max_load = rqi->raw_weighted_load; | 2439 | max_load = rq->raw_weighted_load; |
2433 | busiest = rqi; | 2440 | busiest = rq; |
2434 | } | 2441 | } |
2435 | } | 2442 | } |
2436 | 2443 | ||
@@ -2443,7 +2450,11 @@ static runqueue_t *find_busiest_queue(struct sched_group *group, | |||
2443 | */ | 2450 | */ |
2444 | #define MAX_PINNED_INTERVAL 512 | 2451 | #define MAX_PINNED_INTERVAL 512 |
2445 | 2452 | ||
2446 | #define minus_1_or_zero(n) ((n) > 0 ? (n) - 1 : 0) | 2453 | static inline unsigned long minus_1_or_zero(unsigned long n) |
2454 | { | ||
2455 | return n > 0 ? n - 1 : 0; | ||
2456 | } | ||
2457 | |||
2447 | /* | 2458 | /* |
2448 | * Check this_cpu to ensure it is balanced within domain. Attempt to move | 2459 | * Check this_cpu to ensure it is balanced within domain. Attempt to move |
2449 | * tasks if there is an imbalance. | 2460 | * tasks if there is an imbalance. |
@@ -2453,12 +2464,10 @@ static runqueue_t *find_busiest_queue(struct sched_group *group, | |||
2453 | static int load_balance(int this_cpu, runqueue_t *this_rq, | 2464 | static int load_balance(int this_cpu, runqueue_t *this_rq, |
2454 | struct sched_domain *sd, enum idle_type idle) | 2465 | struct sched_domain *sd, enum idle_type idle) |
2455 | { | 2466 | { |
2467 | int nr_moved, all_pinned = 0, active_balance = 0, sd_idle = 0; | ||
2456 | struct sched_group *group; | 2468 | struct sched_group *group; |
2457 | runqueue_t *busiest; | ||
2458 | unsigned long imbalance; | 2469 | unsigned long imbalance; |
2459 | int nr_moved, all_pinned = 0; | 2470 | runqueue_t *busiest; |
2460 | int active_balance = 0; | ||
2461 | int sd_idle = 0; | ||
2462 | 2471 | ||
2463 | if (idle != NOT_IDLE && sd->flags & SD_SHARE_CPUPOWER && | 2472 | if (idle != NOT_IDLE && sd->flags & SD_SHARE_CPUPOWER && |
2464 | !sched_smt_power_savings) | 2473 | !sched_smt_power_savings) |
@@ -2492,8 +2501,8 @@ static int load_balance(int this_cpu, runqueue_t *this_rq, | |||
2492 | */ | 2501 | */ |
2493 | double_rq_lock(this_rq, busiest); | 2502 | double_rq_lock(this_rq, busiest); |
2494 | nr_moved = move_tasks(this_rq, this_cpu, busiest, | 2503 | nr_moved = move_tasks(this_rq, this_cpu, busiest, |
2495 | minus_1_or_zero(busiest->nr_running), | 2504 | minus_1_or_zero(busiest->nr_running), |
2496 | imbalance, sd, idle, &all_pinned); | 2505 | imbalance, sd, idle, &all_pinned); |
2497 | double_rq_unlock(this_rq, busiest); | 2506 | double_rq_unlock(this_rq, busiest); |
2498 | 2507 | ||
2499 | /* All tasks on this runqueue were pinned by CPU affinity */ | 2508 | /* All tasks on this runqueue were pinned by CPU affinity */ |
@@ -2566,7 +2575,8 @@ out_one_pinned: | |||
2566 | (sd->balance_interval < sd->max_interval)) | 2575 | (sd->balance_interval < sd->max_interval)) |
2567 | sd->balance_interval *= 2; | 2576 | sd->balance_interval *= 2; |
2568 | 2577 | ||
2569 | if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER && !sched_smt_power_savings) | 2578 | if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER && |
2579 | !sched_smt_power_savings) | ||
2570 | return -1; | 2580 | return -1; |
2571 | return 0; | 2581 | return 0; |
2572 | } | 2582 | } |
@@ -2578,8 +2588,8 @@ out_one_pinned: | |||
2578 | * Called from schedule when this_rq is about to become idle (NEWLY_IDLE). | 2588 | * Called from schedule when this_rq is about to become idle (NEWLY_IDLE). |
2579 | * this_rq is locked. | 2589 | * this_rq is locked. |
2580 | */ | 2590 | */ |
2581 | static int load_balance_newidle(int this_cpu, runqueue_t *this_rq, | 2591 | static int |
2582 | struct sched_domain *sd) | 2592 | load_balance_newidle(int this_cpu, runqueue_t *this_rq, struct sched_domain *sd) |
2583 | { | 2593 | { |
2584 | struct sched_group *group; | 2594 | struct sched_group *group; |
2585 | runqueue_t *busiest = NULL; | 2595 | runqueue_t *busiest = NULL; |
@@ -2628,9 +2638,11 @@ static int load_balance_newidle(int this_cpu, runqueue_t *this_rq, | |||
2628 | 2638 | ||
2629 | out_balanced: | 2639 | out_balanced: |
2630 | schedstat_inc(sd, lb_balanced[NEWLY_IDLE]); | 2640 | schedstat_inc(sd, lb_balanced[NEWLY_IDLE]); |
2631 | if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER && !sched_smt_power_savings) | 2641 | if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER && |
2642 | !sched_smt_power_savings) | ||
2632 | return -1; | 2643 | return -1; |
2633 | sd->nr_balance_failed = 0; | 2644 | sd->nr_balance_failed = 0; |
2645 | |||
2634 | return 0; | 2646 | return 0; |
2635 | } | 2647 | } |
2636 | 2648 | ||
@@ -2644,10 +2656,9 @@ static void idle_balance(int this_cpu, runqueue_t *this_rq) | |||
2644 | 2656 | ||
2645 | for_each_domain(this_cpu, sd) { | 2657 | for_each_domain(this_cpu, sd) { |
2646 | if (sd->flags & SD_BALANCE_NEWIDLE) { | 2658 | if (sd->flags & SD_BALANCE_NEWIDLE) { |
2647 | if (load_balance_newidle(this_cpu, this_rq, sd)) { | 2659 | /* If we've pulled tasks over stop searching: */ |
2648 | /* We've pulled tasks over so stop searching */ | 2660 | if (load_balance_newidle(this_cpu, this_rq, sd)) |
2649 | break; | 2661 | break; |
2650 | } | ||
2651 | } | 2662 | } |
2652 | } | 2663 | } |
2653 | } | 2664 | } |
@@ -2666,8 +2677,8 @@ static void active_load_balance(runqueue_t *busiest_rq, int busiest_cpu) | |||
2666 | runqueue_t *target_rq; | 2677 | runqueue_t *target_rq; |
2667 | int target_cpu = busiest_rq->push_cpu; | 2678 | int target_cpu = busiest_rq->push_cpu; |
2668 | 2679 | ||
2680 | /* Is there any task to move? */ | ||
2669 | if (busiest_rq->nr_running <= 1) | 2681 | if (busiest_rq->nr_running <= 1) |
2670 | /* no task to move */ | ||
2671 | return; | 2682 | return; |
2672 | 2683 | ||
2673 | target_rq = cpu_rq(target_cpu); | 2684 | target_rq = cpu_rq(target_cpu); |
@@ -2685,21 +2696,20 @@ static void active_load_balance(runqueue_t *busiest_rq, int busiest_cpu) | |||
2685 | /* Search for an sd spanning us and the target CPU. */ | 2696 | /* Search for an sd spanning us and the target CPU. */ |
2686 | for_each_domain(target_cpu, sd) { | 2697 | for_each_domain(target_cpu, sd) { |
2687 | if ((sd->flags & SD_LOAD_BALANCE) && | 2698 | if ((sd->flags & SD_LOAD_BALANCE) && |
2688 | cpu_isset(busiest_cpu, sd->span)) | 2699 | cpu_isset(busiest_cpu, sd->span)) |
2689 | break; | 2700 | break; |
2690 | } | 2701 | } |
2691 | 2702 | ||
2692 | if (unlikely(sd == NULL)) | 2703 | if (likely(sd)) { |
2693 | goto out; | 2704 | schedstat_inc(sd, alb_cnt); |
2694 | |||
2695 | schedstat_inc(sd, alb_cnt); | ||
2696 | 2705 | ||
2697 | if (move_tasks(target_rq, target_cpu, busiest_rq, 1, | 2706 | if (move_tasks(target_rq, target_cpu, busiest_rq, 1, |
2698 | RTPRIO_TO_LOAD_WEIGHT(100), sd, SCHED_IDLE, NULL)) | 2707 | RTPRIO_TO_LOAD_WEIGHT(100), sd, SCHED_IDLE, |
2699 | schedstat_inc(sd, alb_pushed); | 2708 | NULL)) |
2700 | else | 2709 | schedstat_inc(sd, alb_pushed); |
2701 | schedstat_inc(sd, alb_failed); | 2710 | else |
2702 | out: | 2711 | schedstat_inc(sd, alb_failed); |
2712 | } | ||
2703 | spin_unlock(&target_rq->lock); | 2713 | spin_unlock(&target_rq->lock); |
2704 | } | 2714 | } |
2705 | 2715 | ||
@@ -2712,23 +2722,27 @@ out: | |||
2712 | * Balancing parameters are set up in arch_init_sched_domains. | 2722 | * Balancing parameters are set up in arch_init_sched_domains. |
2713 | */ | 2723 | */ |
2714 | 2724 | ||
2715 | /* Don't have all balancing operations going off at once */ | 2725 | /* Don't have all balancing operations going off at once: */ |
2716 | #define CPU_OFFSET(cpu) (HZ * cpu / NR_CPUS) | 2726 | static inline unsigned long cpu_offset(int cpu) |
2727 | { | ||
2728 | return jiffies + cpu * HZ / NR_CPUS; | ||
2729 | } | ||
2717 | 2730 | ||
2718 | static void rebalance_tick(int this_cpu, runqueue_t *this_rq, | 2731 | static void |
2719 | enum idle_type idle) | 2732 | rebalance_tick(int this_cpu, runqueue_t *this_rq, enum idle_type idle) |
2720 | { | 2733 | { |
2721 | unsigned long old_load, this_load; | 2734 | unsigned long this_load, interval, j = cpu_offset(this_cpu); |
2722 | unsigned long j = jiffies + CPU_OFFSET(this_cpu); | ||
2723 | struct sched_domain *sd; | 2735 | struct sched_domain *sd; |
2724 | int i; | 2736 | int i, scale; |
2725 | 2737 | ||
2726 | this_load = this_rq->raw_weighted_load; | 2738 | this_load = this_rq->raw_weighted_load; |
2727 | /* Update our load */ | 2739 | |
2728 | for (i = 0; i < 3; i++) { | 2740 | /* Update our load: */ |
2729 | unsigned long new_load = this_load; | 2741 | for (i = 0, scale = 1; i < 3; i++, scale <<= 1) { |
2730 | int scale = 1 << i; | 2742 | unsigned long old_load, new_load; |
2743 | |||
2731 | old_load = this_rq->cpu_load[i]; | 2744 | old_load = this_rq->cpu_load[i]; |
2745 | new_load = this_load; | ||
2732 | /* | 2746 | /* |
2733 | * Round up the averaging division if load is increasing. This | 2747 | * Round up the averaging division if load is increasing. This |
2734 | * prevents us from getting stuck on 9 if the load is 10, for | 2748 | * prevents us from getting stuck on 9 if the load is 10, for |
@@ -2740,8 +2754,6 @@ static void rebalance_tick(int this_cpu, runqueue_t *this_rq, | |||
2740 | } | 2754 | } |
2741 | 2755 | ||
2742 | for_each_domain(this_cpu, sd) { | 2756 | for_each_domain(this_cpu, sd) { |
2743 | unsigned long interval; | ||
2744 | |||
2745 | if (!(sd->flags & SD_LOAD_BALANCE)) | 2757 | if (!(sd->flags & SD_LOAD_BALANCE)) |
2746 | continue; | 2758 | continue; |
2747 | 2759 | ||
@@ -2782,6 +2794,7 @@ static inline void idle_balance(int cpu, runqueue_t *rq) | |||
2782 | static inline int wake_priority_sleeper(runqueue_t *rq) | 2794 | static inline int wake_priority_sleeper(runqueue_t *rq) |
2783 | { | 2795 | { |
2784 | int ret = 0; | 2796 | int ret = 0; |
2797 | |||
2785 | #ifdef CONFIG_SCHED_SMT | 2798 | #ifdef CONFIG_SCHED_SMT |
2786 | spin_lock(&rq->lock); | 2799 | spin_lock(&rq->lock); |
2787 | /* | 2800 | /* |
@@ -2805,25 +2818,26 @@ EXPORT_PER_CPU_SYMBOL(kstat); | |||
2805 | * This is called on clock ticks and on context switches. | 2818 | * This is called on clock ticks and on context switches. |
2806 | * Bank in p->sched_time the ns elapsed since the last tick or switch. | 2819 | * Bank in p->sched_time the ns elapsed since the last tick or switch. |
2807 | */ | 2820 | */ |
2808 | static inline void update_cpu_clock(task_t *p, runqueue_t *rq, | 2821 | static inline void |
2809 | unsigned long long now) | 2822 | update_cpu_clock(task_t *p, runqueue_t *rq, unsigned long long now) |
2810 | { | 2823 | { |
2811 | unsigned long long last = max(p->timestamp, rq->timestamp_last_tick); | 2824 | p->sched_time += now - max(p->timestamp, rq->timestamp_last_tick); |
2812 | p->sched_time += now - last; | ||
2813 | } | 2825 | } |
2814 | 2826 | ||
2815 | /* | 2827 | /* |
2816 | * Return current->sched_time plus any more ns on the sched_clock | 2828 | * Return current->sched_time plus any more ns on the sched_clock |
2817 | * that have not yet been banked. | 2829 | * that have not yet been banked. |
2818 | */ | 2830 | */ |
2819 | unsigned long long current_sched_time(const task_t *tsk) | 2831 | unsigned long long current_sched_time(const task_t *p) |
2820 | { | 2832 | { |
2821 | unsigned long long ns; | 2833 | unsigned long long ns; |
2822 | unsigned long flags; | 2834 | unsigned long flags; |
2835 | |||
2823 | local_irq_save(flags); | 2836 | local_irq_save(flags); |
2824 | ns = max(tsk->timestamp, task_rq(tsk)->timestamp_last_tick); | 2837 | ns = max(p->timestamp, task_rq(p)->timestamp_last_tick); |
2825 | ns = tsk->sched_time + (sched_clock() - ns); | 2838 | ns = p->sched_time + sched_clock() - ns; |
2826 | local_irq_restore(flags); | 2839 | local_irq_restore(flags); |
2840 | |||
2827 | return ns; | 2841 | return ns; |
2828 | } | 2842 | } |
2829 | 2843 | ||
@@ -2837,11 +2851,16 @@ unsigned long long current_sched_time(const task_t *tsk) | |||
2837 | * increasing number of running tasks. We also ignore the interactivity | 2851 | * increasing number of running tasks. We also ignore the interactivity |
2838 | * if a better static_prio task has expired: | 2852 | * if a better static_prio task has expired: |
2839 | */ | 2853 | */ |
2840 | #define EXPIRED_STARVING(rq) \ | 2854 | static inline int expired_starving(runqueue_t *rq) |
2841 | ((STARVATION_LIMIT && ((rq)->expired_timestamp && \ | 2855 | { |
2842 | (jiffies - (rq)->expired_timestamp >= \ | 2856 | if (rq->curr->static_prio > rq->best_expired_prio) |
2843 | STARVATION_LIMIT * ((rq)->nr_running) + 1))) || \ | 2857 | return 1; |
2844 | ((rq)->curr->static_prio > (rq)->best_expired_prio)) | 2858 | if (!STARVATION_LIMIT || !rq->expired_timestamp) |
2859 | return 0; | ||
2860 | if (jiffies - rq->expired_timestamp > STARVATION_LIMIT * rq->nr_running) | ||
2861 | return 1; | ||
2862 | return 0; | ||
2863 | } | ||
2845 | 2864 | ||
2846 | /* | 2865 | /* |
2847 | * Account user cpu time to a process. | 2866 | * Account user cpu time to a process. |
@@ -2925,10 +2944,10 @@ void account_steal_time(struct task_struct *p, cputime_t steal) | |||
2925 | */ | 2944 | */ |
2926 | void scheduler_tick(void) | 2945 | void scheduler_tick(void) |
2927 | { | 2946 | { |
2947 | unsigned long long now = sched_clock(); | ||
2928 | int cpu = smp_processor_id(); | 2948 | int cpu = smp_processor_id(); |
2929 | runqueue_t *rq = this_rq(); | 2949 | runqueue_t *rq = this_rq(); |
2930 | task_t *p = current; | 2950 | task_t *p = current; |
2931 | unsigned long long now = sched_clock(); | ||
2932 | 2951 | ||
2933 | update_cpu_clock(p, rq, now); | 2952 | update_cpu_clock(p, rq, now); |
2934 | 2953 | ||
@@ -2978,7 +2997,7 @@ void scheduler_tick(void) | |||
2978 | 2997 | ||
2979 | if (!rq->expired_timestamp) | 2998 | if (!rq->expired_timestamp) |
2980 | rq->expired_timestamp = jiffies; | 2999 | rq->expired_timestamp = jiffies; |
2981 | if (!TASK_INTERACTIVE(p) || EXPIRED_STARVING(rq)) { | 3000 | if (!TASK_INTERACTIVE(p) || expired_starving(rq)) { |
2982 | enqueue_task(p, rq->expired); | 3001 | enqueue_task(p, rq->expired); |
2983 | if (p->static_prio < rq->best_expired_prio) | 3002 | if (p->static_prio < rq->best_expired_prio) |
2984 | rq->best_expired_prio = p->static_prio; | 3003 | rq->best_expired_prio = p->static_prio; |
@@ -3137,9 +3156,8 @@ unlock: | |||
3137 | static inline void wake_sleeping_dependent(int this_cpu) | 3156 | static inline void wake_sleeping_dependent(int this_cpu) |
3138 | { | 3157 | { |
3139 | } | 3158 | } |
3140 | 3159 | static inline int | |
3141 | static inline int dependent_sleeper(int this_cpu, runqueue_t *this_rq, | 3160 | dependent_sleeper(int this_cpu, runqueue_t *this_rq, task_t *p) |
3142 | task_t *p) | ||
3143 | { | 3161 | { |
3144 | return 0; | 3162 | return 0; |
3145 | } | 3163 | } |
@@ -3193,14 +3211,14 @@ static inline int interactive_sleep(enum sleep_type sleep_type) | |||
3193 | */ | 3211 | */ |
3194 | asmlinkage void __sched schedule(void) | 3212 | asmlinkage void __sched schedule(void) |
3195 | { | 3213 | { |
3196 | long *switch_count; | ||
3197 | task_t *prev, *next; | ||
3198 | runqueue_t *rq; | ||
3199 | prio_array_t *array; | ||
3200 | struct list_head *queue; | 3214 | struct list_head *queue; |
3201 | unsigned long long now; | 3215 | unsigned long long now; |
3202 | unsigned long run_time; | 3216 | unsigned long run_time; |
3203 | int cpu, idx, new_prio; | 3217 | int cpu, idx, new_prio; |
3218 | task_t *prev, *next; | ||
3219 | prio_array_t *array; | ||
3220 | long *switch_count; | ||
3221 | runqueue_t *rq; | ||
3204 | 3222 | ||
3205 | /* | 3223 | /* |
3206 | * Test if we are atomic. Since do_exit() needs to call into | 3224 | * Test if we are atomic. Since do_exit() needs to call into |
@@ -3353,7 +3371,6 @@ switch_tasks: | |||
3353 | if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) | 3371 | if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) |
3354 | goto need_resched; | 3372 | goto need_resched; |
3355 | } | 3373 | } |
3356 | |||
3357 | EXPORT_SYMBOL(schedule); | 3374 | EXPORT_SYMBOL(schedule); |
3358 | 3375 | ||
3359 | #ifdef CONFIG_PREEMPT | 3376 | #ifdef CONFIG_PREEMPT |
@@ -3398,7 +3415,6 @@ need_resched: | |||
3398 | if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) | 3415 | if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) |
3399 | goto need_resched; | 3416 | goto need_resched; |
3400 | } | 3417 | } |
3401 | |||
3402 | EXPORT_SYMBOL(preempt_schedule); | 3418 | EXPORT_SYMBOL(preempt_schedule); |
3403 | 3419 | ||
3404 | /* | 3420 | /* |
@@ -3447,10 +3463,8 @@ need_resched: | |||
3447 | int default_wake_function(wait_queue_t *curr, unsigned mode, int sync, | 3463 | int default_wake_function(wait_queue_t *curr, unsigned mode, int sync, |
3448 | void *key) | 3464 | void *key) |
3449 | { | 3465 | { |
3450 | task_t *p = curr->private; | 3466 | return try_to_wake_up(curr->private, mode, sync); |
3451 | return try_to_wake_up(p, mode, sync); | ||
3452 | } | 3467 | } |
3453 | |||
3454 | EXPORT_SYMBOL(default_wake_function); | 3468 | EXPORT_SYMBOL(default_wake_function); |
3455 | 3469 | ||
3456 | /* | 3470 | /* |
@@ -3468,13 +3482,11 @@ static void __wake_up_common(wait_queue_head_t *q, unsigned int mode, | |||
3468 | struct list_head *tmp, *next; | 3482 | struct list_head *tmp, *next; |
3469 | 3483 | ||
3470 | list_for_each_safe(tmp, next, &q->task_list) { | 3484 | list_for_each_safe(tmp, next, &q->task_list) { |
3471 | wait_queue_t *curr; | 3485 | wait_queue_t *curr = list_entry(tmp, wait_queue_t, task_list); |
3472 | unsigned flags; | 3486 | unsigned flags = curr->flags; |
3473 | curr = list_entry(tmp, wait_queue_t, task_list); | 3487 | |
3474 | flags = curr->flags; | ||
3475 | if (curr->func(curr, mode, sync, key) && | 3488 | if (curr->func(curr, mode, sync, key) && |
3476 | (flags & WQ_FLAG_EXCLUSIVE) && | 3489 | (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive) |
3477 | !--nr_exclusive) | ||
3478 | break; | 3490 | break; |
3479 | } | 3491 | } |
3480 | } | 3492 | } |
@@ -3495,7 +3507,6 @@ void fastcall __wake_up(wait_queue_head_t *q, unsigned int mode, | |||
3495 | __wake_up_common(q, mode, nr_exclusive, 0, key); | 3507 | __wake_up_common(q, mode, nr_exclusive, 0, key); |
3496 | spin_unlock_irqrestore(&q->lock, flags); | 3508 | spin_unlock_irqrestore(&q->lock, flags); |
3497 | } | 3509 | } |
3498 | |||
3499 | EXPORT_SYMBOL(__wake_up); | 3510 | EXPORT_SYMBOL(__wake_up); |
3500 | 3511 | ||
3501 | /* | 3512 | /* |
@@ -3564,6 +3575,7 @@ EXPORT_SYMBOL(complete_all); | |||
3564 | void fastcall __sched wait_for_completion(struct completion *x) | 3575 | void fastcall __sched wait_for_completion(struct completion *x) |
3565 | { | 3576 | { |
3566 | might_sleep(); | 3577 | might_sleep(); |
3578 | |||
3567 | spin_lock_irq(&x->wait.lock); | 3579 | spin_lock_irq(&x->wait.lock); |
3568 | if (!x->done) { | 3580 | if (!x->done) { |
3569 | DECLARE_WAITQUEUE(wait, current); | 3581 | DECLARE_WAITQUEUE(wait, current); |
@@ -3708,7 +3720,6 @@ void fastcall __sched interruptible_sleep_on(wait_queue_head_t *q) | |||
3708 | schedule(); | 3720 | schedule(); |
3709 | SLEEP_ON_TAIL | 3721 | SLEEP_ON_TAIL |
3710 | } | 3722 | } |
3711 | |||
3712 | EXPORT_SYMBOL(interruptible_sleep_on); | 3723 | EXPORT_SYMBOL(interruptible_sleep_on); |
3713 | 3724 | ||
3714 | long fastcall __sched | 3725 | long fastcall __sched |
@@ -3724,7 +3735,6 @@ interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout) | |||
3724 | 3735 | ||
3725 | return timeout; | 3736 | return timeout; |
3726 | } | 3737 | } |
3727 | |||
3728 | EXPORT_SYMBOL(interruptible_sleep_on_timeout); | 3738 | EXPORT_SYMBOL(interruptible_sleep_on_timeout); |
3729 | 3739 | ||
3730 | void fastcall __sched sleep_on(wait_queue_head_t *q) | 3740 | void fastcall __sched sleep_on(wait_queue_head_t *q) |
@@ -3737,7 +3747,6 @@ void fastcall __sched sleep_on(wait_queue_head_t *q) | |||
3737 | schedule(); | 3747 | schedule(); |
3738 | SLEEP_ON_TAIL | 3748 | SLEEP_ON_TAIL |
3739 | } | 3749 | } |
3740 | |||
3741 | EXPORT_SYMBOL(sleep_on); | 3750 | EXPORT_SYMBOL(sleep_on); |
3742 | 3751 | ||
3743 | long fastcall __sched sleep_on_timeout(wait_queue_head_t *q, long timeout) | 3752 | long fastcall __sched sleep_on_timeout(wait_queue_head_t *q, long timeout) |
@@ -3810,10 +3819,10 @@ void rt_mutex_setprio(task_t *p, int prio) | |||
3810 | 3819 | ||
3811 | void set_user_nice(task_t *p, long nice) | 3820 | void set_user_nice(task_t *p, long nice) |
3812 | { | 3821 | { |
3822 | int old_prio, delta; | ||
3813 | unsigned long flags; | 3823 | unsigned long flags; |
3814 | prio_array_t *array; | 3824 | prio_array_t *array; |
3815 | runqueue_t *rq; | 3825 | runqueue_t *rq; |
3816 | int old_prio, delta; | ||
3817 | 3826 | ||
3818 | if (TASK_NICE(p) == nice || nice < -20 || nice > 19) | 3827 | if (TASK_NICE(p) == nice || nice < -20 || nice > 19) |
3819 | return; | 3828 | return; |
@@ -3868,6 +3877,7 @@ int can_nice(const task_t *p, const int nice) | |||
3868 | { | 3877 | { |
3869 | /* convert nice value [19,-20] to rlimit style value [1,40] */ | 3878 | /* convert nice value [19,-20] to rlimit style value [1,40] */ |
3870 | int nice_rlim = 20 - nice; | 3879 | int nice_rlim = 20 - nice; |
3880 | |||
3871 | return (nice_rlim <= p->signal->rlim[RLIMIT_NICE].rlim_cur || | 3881 | return (nice_rlim <= p->signal->rlim[RLIMIT_NICE].rlim_cur || |
3872 | capable(CAP_SYS_NICE)); | 3882 | capable(CAP_SYS_NICE)); |
3873 | } | 3883 | } |
@@ -3883,8 +3893,7 @@ int can_nice(const task_t *p, const int nice) | |||
3883 | */ | 3893 | */ |
3884 | asmlinkage long sys_nice(int increment) | 3894 | asmlinkage long sys_nice(int increment) |
3885 | { | 3895 | { |
3886 | int retval; | 3896 | long nice, retval; |
3887 | long nice; | ||
3888 | 3897 | ||
3889 | /* | 3898 | /* |
3890 | * Setpriority might change our priority at the same moment. | 3899 | * Setpriority might change our priority at the same moment. |
@@ -3969,6 +3978,7 @@ static inline task_t *find_process_by_pid(pid_t pid) | |||
3969 | static void __setscheduler(struct task_struct *p, int policy, int prio) | 3978 | static void __setscheduler(struct task_struct *p, int policy, int prio) |
3970 | { | 3979 | { |
3971 | BUG_ON(p->array); | 3980 | BUG_ON(p->array); |
3981 | |||
3972 | p->policy = policy; | 3982 | p->policy = policy; |
3973 | p->rt_priority = prio; | 3983 | p->rt_priority = prio; |
3974 | p->normal_prio = normal_prio(p); | 3984 | p->normal_prio = normal_prio(p); |
@@ -3992,8 +4002,7 @@ static void __setscheduler(struct task_struct *p, int policy, int prio) | |||
3992 | int sched_setscheduler(struct task_struct *p, int policy, | 4002 | int sched_setscheduler(struct task_struct *p, int policy, |
3993 | struct sched_param *param) | 4003 | struct sched_param *param) |
3994 | { | 4004 | { |
3995 | int retval; | 4005 | int retval, oldprio, oldpolicy = -1; |
3996 | int oldprio, oldpolicy = -1; | ||
3997 | prio_array_t *array; | 4006 | prio_array_t *array; |
3998 | unsigned long flags; | 4007 | unsigned long flags; |
3999 | runqueue_t *rq; | 4008 | runqueue_t *rq; |
@@ -4495,7 +4504,6 @@ void __sched yield(void) | |||
4495 | set_current_state(TASK_RUNNING); | 4504 | set_current_state(TASK_RUNNING); |
4496 | sys_sched_yield(); | 4505 | sys_sched_yield(); |
4497 | } | 4506 | } |
4498 | |||
4499 | EXPORT_SYMBOL(yield); | 4507 | EXPORT_SYMBOL(yield); |
4500 | 4508 | ||
4501 | /* | 4509 | /* |
@@ -4513,7 +4521,6 @@ void __sched io_schedule(void) | |||
4513 | schedule(); | 4521 | schedule(); |
4514 | atomic_dec(&rq->nr_iowait); | 4522 | atomic_dec(&rq->nr_iowait); |
4515 | } | 4523 | } |
4516 | |||
4517 | EXPORT_SYMBOL(io_schedule); | 4524 | EXPORT_SYMBOL(io_schedule); |
4518 | 4525 | ||
4519 | long __sched io_schedule_timeout(long timeout) | 4526 | long __sched io_schedule_timeout(long timeout) |
@@ -4615,19 +4622,22 @@ out_unlock: | |||
4615 | 4622 | ||
4616 | static inline struct task_struct *eldest_child(struct task_struct *p) | 4623 | static inline struct task_struct *eldest_child(struct task_struct *p) |
4617 | { | 4624 | { |
4618 | if (list_empty(&p->children)) return NULL; | 4625 | if (list_empty(&p->children)) |
4626 | return NULL; | ||
4619 | return list_entry(p->children.next,struct task_struct,sibling); | 4627 | return list_entry(p->children.next,struct task_struct,sibling); |
4620 | } | 4628 | } |
4621 | 4629 | ||
4622 | static inline struct task_struct *older_sibling(struct task_struct *p) | 4630 | static inline struct task_struct *older_sibling(struct task_struct *p) |
4623 | { | 4631 | { |
4624 | if (p->sibling.prev==&p->parent->children) return NULL; | 4632 | if (p->sibling.prev==&p->parent->children) |
4633 | return NULL; | ||
4625 | return list_entry(p->sibling.prev,struct task_struct,sibling); | 4634 | return list_entry(p->sibling.prev,struct task_struct,sibling); |
4626 | } | 4635 | } |
4627 | 4636 | ||
4628 | static inline struct task_struct *younger_sibling(struct task_struct *p) | 4637 | static inline struct task_struct *younger_sibling(struct task_struct *p) |
4629 | { | 4638 | { |
4630 | if (p->sibling.next==&p->parent->children) return NULL; | 4639 | if (p->sibling.next==&p->parent->children) |
4640 | return NULL; | ||
4631 | return list_entry(p->sibling.next,struct task_struct,sibling); | 4641 | return list_entry(p->sibling.next,struct task_struct,sibling); |
4632 | } | 4642 | } |
4633 | 4643 | ||
@@ -4786,9 +4796,9 @@ cpumask_t nohz_cpu_mask = CPU_MASK_NONE; | |||
4786 | int set_cpus_allowed(task_t *p, cpumask_t new_mask) | 4796 | int set_cpus_allowed(task_t *p, cpumask_t new_mask) |
4787 | { | 4797 | { |
4788 | unsigned long flags; | 4798 | unsigned long flags; |
4789 | int ret = 0; | ||
4790 | migration_req_t req; | 4799 | migration_req_t req; |
4791 | runqueue_t *rq; | 4800 | runqueue_t *rq; |
4801 | int ret = 0; | ||
4792 | 4802 | ||
4793 | rq = task_rq_lock(p, &flags); | 4803 | rq = task_rq_lock(p, &flags); |
4794 | if (!cpus_intersects(new_mask, cpu_online_map)) { | 4804 | if (!cpus_intersects(new_mask, cpu_online_map)) { |
@@ -4811,9 +4821,9 @@ int set_cpus_allowed(task_t *p, cpumask_t new_mask) | |||
4811 | } | 4821 | } |
4812 | out: | 4822 | out: |
4813 | task_rq_unlock(rq, &flags); | 4823 | task_rq_unlock(rq, &flags); |
4824 | |||
4814 | return ret; | 4825 | return ret; |
4815 | } | 4826 | } |
4816 | |||
4817 | EXPORT_SYMBOL_GPL(set_cpus_allowed); | 4827 | EXPORT_SYMBOL_GPL(set_cpus_allowed); |
4818 | 4828 | ||
4819 | /* | 4829 | /* |
@@ -4874,8 +4884,8 @@ out: | |||
4874 | */ | 4884 | */ |
4875 | static int migration_thread(void *data) | 4885 | static int migration_thread(void *data) |
4876 | { | 4886 | { |
4877 | runqueue_t *rq; | ||
4878 | int cpu = (long)data; | 4887 | int cpu = (long)data; |
4888 | runqueue_t *rq; | ||
4879 | 4889 | ||
4880 | rq = cpu_rq(cpu); | 4890 | rq = cpu_rq(cpu); |
4881 | BUG_ON(rq->migration_thread != current); | 4891 | BUG_ON(rq->migration_thread != current); |
@@ -4932,7 +4942,7 @@ wait_to_die: | |||
4932 | 4942 | ||
4933 | #ifdef CONFIG_HOTPLUG_CPU | 4943 | #ifdef CONFIG_HOTPLUG_CPU |
4934 | /* Figure out where task on dead CPU should go, use force if neccessary. */ | 4944 | /* Figure out where task on dead CPU should go, use force if neccessary. */ |
4935 | static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *tsk) | 4945 | static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p) |
4936 | { | 4946 | { |
4937 | runqueue_t *rq; | 4947 | runqueue_t *rq; |
4938 | unsigned long flags; | 4948 | unsigned long flags; |
@@ -4942,18 +4952,18 @@ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *tsk) | |||
4942 | restart: | 4952 | restart: |
4943 | /* On same node? */ | 4953 | /* On same node? */ |
4944 | mask = node_to_cpumask(cpu_to_node(dead_cpu)); | 4954 | mask = node_to_cpumask(cpu_to_node(dead_cpu)); |
4945 | cpus_and(mask, mask, tsk->cpus_allowed); | 4955 | cpus_and(mask, mask, p->cpus_allowed); |
4946 | dest_cpu = any_online_cpu(mask); | 4956 | dest_cpu = any_online_cpu(mask); |
4947 | 4957 | ||
4948 | /* On any allowed CPU? */ | 4958 | /* On any allowed CPU? */ |
4949 | if (dest_cpu == NR_CPUS) | 4959 | if (dest_cpu == NR_CPUS) |
4950 | dest_cpu = any_online_cpu(tsk->cpus_allowed); | 4960 | dest_cpu = any_online_cpu(p->cpus_allowed); |
4951 | 4961 | ||
4952 | /* No more Mr. Nice Guy. */ | 4962 | /* No more Mr. Nice Guy. */ |
4953 | if (dest_cpu == NR_CPUS) { | 4963 | if (dest_cpu == NR_CPUS) { |
4954 | rq = task_rq_lock(tsk, &flags); | 4964 | rq = task_rq_lock(p, &flags); |
4955 | cpus_setall(tsk->cpus_allowed); | 4965 | cpus_setall(p->cpus_allowed); |
4956 | dest_cpu = any_online_cpu(tsk->cpus_allowed); | 4966 | dest_cpu = any_online_cpu(p->cpus_allowed); |
4957 | task_rq_unlock(rq, &flags); | 4967 | task_rq_unlock(rq, &flags); |
4958 | 4968 | ||
4959 | /* | 4969 | /* |
@@ -4961,12 +4971,12 @@ restart: | |||
4961 | * kernel threads (both mm NULL), since they never | 4971 | * kernel threads (both mm NULL), since they never |
4962 | * leave kernel. | 4972 | * leave kernel. |
4963 | */ | 4973 | */ |
4964 | if (tsk->mm && printk_ratelimit()) | 4974 | if (p->mm && printk_ratelimit()) |
4965 | printk(KERN_INFO "process %d (%s) no " | 4975 | printk(KERN_INFO "process %d (%s) no " |
4966 | "longer affine to cpu%d\n", | 4976 | "longer affine to cpu%d\n", |
4967 | tsk->pid, tsk->comm, dead_cpu); | 4977 | p->pid, p->comm, dead_cpu); |
4968 | } | 4978 | } |
4969 | if (!__migrate_task(tsk, dead_cpu, dest_cpu)) | 4979 | if (!__migrate_task(p, dead_cpu, dest_cpu)) |
4970 | goto restart; | 4980 | goto restart; |
4971 | } | 4981 | } |
4972 | 4982 | ||
@@ -4993,48 +5003,51 @@ static void migrate_nr_uninterruptible(runqueue_t *rq_src) | |||
4993 | /* Run through task list and migrate tasks from the dead cpu. */ | 5003 | /* Run through task list and migrate tasks from the dead cpu. */ |
4994 | static void migrate_live_tasks(int src_cpu) | 5004 | static void migrate_live_tasks(int src_cpu) |
4995 | { | 5005 | { |
4996 | struct task_struct *tsk, *t; | 5006 | struct task_struct *p, *t; |
4997 | 5007 | ||
4998 | write_lock_irq(&tasklist_lock); | 5008 | write_lock_irq(&tasklist_lock); |
4999 | 5009 | ||
5000 | do_each_thread(t, tsk) { | 5010 | do_each_thread(t, p) { |
5001 | if (tsk == current) | 5011 | if (p == current) |
5002 | continue; | 5012 | continue; |
5003 | 5013 | ||
5004 | if (task_cpu(tsk) == src_cpu) | 5014 | if (task_cpu(p) == src_cpu) |
5005 | move_task_off_dead_cpu(src_cpu, tsk); | 5015 | move_task_off_dead_cpu(src_cpu, p); |
5006 | } while_each_thread(t, tsk); | 5016 | } while_each_thread(t, p); |
5007 | 5017 | ||
5008 | write_unlock_irq(&tasklist_lock); | 5018 | write_unlock_irq(&tasklist_lock); |
5009 | } | 5019 | } |
5010 | 5020 | ||
5011 | /* Schedules idle task to be the next runnable task on current CPU. | 5021 | /* Schedules idle task to be the next runnable task on current CPU. |
5012 | * It does so by boosting its priority to highest possible and adding it to | 5022 | * It does so by boosting its priority to highest possible and adding it to |
5013 | * the _front_ of runqueue. Used by CPU offline code. | 5023 | * the _front_ of the runqueue. Used by CPU offline code. |
5014 | */ | 5024 | */ |
5015 | void sched_idle_next(void) | 5025 | void sched_idle_next(void) |
5016 | { | 5026 | { |
5017 | int cpu = smp_processor_id(); | 5027 | int this_cpu = smp_processor_id(); |
5018 | runqueue_t *rq = this_rq(); | 5028 | runqueue_t *rq = cpu_rq(this_cpu); |
5019 | struct task_struct *p = rq->idle; | 5029 | struct task_struct *p = rq->idle; |
5020 | unsigned long flags; | 5030 | unsigned long flags; |
5021 | 5031 | ||
5022 | /* cpu has to be offline */ | 5032 | /* cpu has to be offline */ |
5023 | BUG_ON(cpu_online(cpu)); | 5033 | BUG_ON(cpu_online(this_cpu)); |
5024 | 5034 | ||
5025 | /* Strictly not necessary since rest of the CPUs are stopped by now | 5035 | /* |
5026 | * and interrupts disabled on current cpu. | 5036 | * Strictly not necessary since rest of the CPUs are stopped by now |
5037 | * and interrupts disabled on the current cpu. | ||
5027 | */ | 5038 | */ |
5028 | spin_lock_irqsave(&rq->lock, flags); | 5039 | spin_lock_irqsave(&rq->lock, flags); |
5029 | 5040 | ||
5030 | __setscheduler(p, SCHED_FIFO, MAX_RT_PRIO-1); | 5041 | __setscheduler(p, SCHED_FIFO, MAX_RT_PRIO-1); |
5031 | /* Add idle task to _front_ of it's priority queue */ | 5042 | |
5043 | /* Add idle task to the _front_ of its priority queue: */ | ||
5032 | __activate_idle_task(p, rq); | 5044 | __activate_idle_task(p, rq); |
5033 | 5045 | ||
5034 | spin_unlock_irqrestore(&rq->lock, flags); | 5046 | spin_unlock_irqrestore(&rq->lock, flags); |
5035 | } | 5047 | } |
5036 | 5048 | ||
5037 | /* Ensures that the idle task is using init_mm right before its cpu goes | 5049 | /* |
5050 | * Ensures that the idle task is using init_mm right before its cpu goes | ||
5038 | * offline. | 5051 | * offline. |
5039 | */ | 5052 | */ |
5040 | void idle_task_exit(void) | 5053 | void idle_task_exit(void) |
@@ -5048,17 +5061,17 @@ void idle_task_exit(void) | |||
5048 | mmdrop(mm); | 5061 | mmdrop(mm); |
5049 | } | 5062 | } |
5050 | 5063 | ||
5051 | static void migrate_dead(unsigned int dead_cpu, task_t *tsk) | 5064 | static void migrate_dead(unsigned int dead_cpu, task_t *p) |
5052 | { | 5065 | { |
5053 | struct runqueue *rq = cpu_rq(dead_cpu); | 5066 | struct runqueue *rq = cpu_rq(dead_cpu); |
5054 | 5067 | ||
5055 | /* Must be exiting, otherwise would be on tasklist. */ | 5068 | /* Must be exiting, otherwise would be on tasklist. */ |
5056 | BUG_ON(tsk->exit_state != EXIT_ZOMBIE && tsk->exit_state != EXIT_DEAD); | 5069 | BUG_ON(p->exit_state != EXIT_ZOMBIE && p->exit_state != EXIT_DEAD); |
5057 | 5070 | ||
5058 | /* Cannot have done final schedule yet: would have vanished. */ | 5071 | /* Cannot have done final schedule yet: would have vanished. */ |
5059 | BUG_ON(tsk->flags & PF_DEAD); | 5072 | BUG_ON(p->flags & PF_DEAD); |
5060 | 5073 | ||
5061 | get_task_struct(tsk); | 5074 | get_task_struct(p); |
5062 | 5075 | ||
5063 | /* | 5076 | /* |
5064 | * Drop lock around migration; if someone else moves it, | 5077 | * Drop lock around migration; if someone else moves it, |
@@ -5066,21 +5079,22 @@ static void migrate_dead(unsigned int dead_cpu, task_t *tsk) | |||
5066 | * fine. | 5079 | * fine. |
5067 | */ | 5080 | */ |
5068 | spin_unlock_irq(&rq->lock); | 5081 | spin_unlock_irq(&rq->lock); |
5069 | move_task_off_dead_cpu(dead_cpu, tsk); | 5082 | move_task_off_dead_cpu(dead_cpu, p); |
5070 | spin_lock_irq(&rq->lock); | 5083 | spin_lock_irq(&rq->lock); |
5071 | 5084 | ||
5072 | put_task_struct(tsk); | 5085 | put_task_struct(p); |
5073 | } | 5086 | } |
5074 | 5087 | ||
5075 | /* release_task() removes task from tasklist, so we won't find dead tasks. */ | 5088 | /* release_task() removes task from tasklist, so we won't find dead tasks. */ |
5076 | static void migrate_dead_tasks(unsigned int dead_cpu) | 5089 | static void migrate_dead_tasks(unsigned int dead_cpu) |
5077 | { | 5090 | { |
5078 | unsigned arr, i; | ||
5079 | struct runqueue *rq = cpu_rq(dead_cpu); | 5091 | struct runqueue *rq = cpu_rq(dead_cpu); |
5092 | unsigned int arr, i; | ||
5080 | 5093 | ||
5081 | for (arr = 0; arr < 2; arr++) { | 5094 | for (arr = 0; arr < 2; arr++) { |
5082 | for (i = 0; i < MAX_PRIO; i++) { | 5095 | for (i = 0; i < MAX_PRIO; i++) { |
5083 | struct list_head *list = &rq->arrays[arr].queue[i]; | 5096 | struct list_head *list = &rq->arrays[arr].queue[i]; |
5097 | |||
5084 | while (!list_empty(list)) | 5098 | while (!list_empty(list)) |
5085 | migrate_dead(dead_cpu, | 5099 | migrate_dead(dead_cpu, |
5086 | list_entry(list->next, task_t, | 5100 | list_entry(list->next, task_t, |
@@ -5094,12 +5108,11 @@ static void migrate_dead_tasks(unsigned int dead_cpu) | |||
5094 | * migration_call - callback that gets triggered when a CPU is added. | 5108 | * migration_call - callback that gets triggered when a CPU is added. |
5095 | * Here we can start up the necessary migration thread for the new CPU. | 5109 | * Here we can start up the necessary migration thread for the new CPU. |
5096 | */ | 5110 | */ |
5097 | static int __cpuinit migration_call(struct notifier_block *nfb, | 5111 | static int __cpuinit |
5098 | unsigned long action, | 5112 | migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) |
5099 | void *hcpu) | ||
5100 | { | 5113 | { |
5101 | int cpu = (long)hcpu; | ||
5102 | struct task_struct *p; | 5114 | struct task_struct *p; |
5115 | int cpu = (long)hcpu; | ||
5103 | struct runqueue *rq; | 5116 | struct runqueue *rq; |
5104 | unsigned long flags; | 5117 | unsigned long flags; |
5105 | 5118 | ||
@@ -5116,10 +5129,12 @@ static int __cpuinit migration_call(struct notifier_block *nfb, | |||
5116 | task_rq_unlock(rq, &flags); | 5129 | task_rq_unlock(rq, &flags); |
5117 | cpu_rq(cpu)->migration_thread = p; | 5130 | cpu_rq(cpu)->migration_thread = p; |
5118 | break; | 5131 | break; |
5132 | |||
5119 | case CPU_ONLINE: | 5133 | case CPU_ONLINE: |
5120 | /* Strictly unneccessary, as first user will wake it. */ | 5134 | /* Strictly unneccessary, as first user will wake it. */ |
5121 | wake_up_process(cpu_rq(cpu)->migration_thread); | 5135 | wake_up_process(cpu_rq(cpu)->migration_thread); |
5122 | break; | 5136 | break; |
5137 | |||
5123 | #ifdef CONFIG_HOTPLUG_CPU | 5138 | #ifdef CONFIG_HOTPLUG_CPU |
5124 | case CPU_UP_CANCELED: | 5139 | case CPU_UP_CANCELED: |
5125 | if (!cpu_rq(cpu)->migration_thread) | 5140 | if (!cpu_rq(cpu)->migration_thread) |
@@ -5130,6 +5145,7 @@ static int __cpuinit migration_call(struct notifier_block *nfb, | |||
5130 | kthread_stop(cpu_rq(cpu)->migration_thread); | 5145 | kthread_stop(cpu_rq(cpu)->migration_thread); |
5131 | cpu_rq(cpu)->migration_thread = NULL; | 5146 | cpu_rq(cpu)->migration_thread = NULL; |
5132 | break; | 5147 | break; |
5148 | |||
5133 | case CPU_DEAD: | 5149 | case CPU_DEAD: |
5134 | migrate_live_tasks(cpu); | 5150 | migrate_live_tasks(cpu); |
5135 | rq = cpu_rq(cpu); | 5151 | rq = cpu_rq(cpu); |
@@ -5174,10 +5190,12 @@ static struct notifier_block __cpuinitdata migration_notifier = { | |||
5174 | int __init migration_init(void) | 5190 | int __init migration_init(void) |
5175 | { | 5191 | { |
5176 | void *cpu = (void *)(long)smp_processor_id(); | 5192 | void *cpu = (void *)(long)smp_processor_id(); |
5177 | /* Start one for boot CPU. */ | 5193 | |
5194 | /* Start one for the boot CPU: */ | ||
5178 | migration_call(&migration_notifier, CPU_UP_PREPARE, cpu); | 5195 | migration_call(&migration_notifier, CPU_UP_PREPARE, cpu); |
5179 | migration_call(&migration_notifier, CPU_ONLINE, cpu); | 5196 | migration_call(&migration_notifier, CPU_ONLINE, cpu); |
5180 | register_cpu_notifier(&migration_notifier); | 5197 | register_cpu_notifier(&migration_notifier); |
5198 | |||
5181 | return 0; | 5199 | return 0; |
5182 | } | 5200 | } |
5183 | #endif | 5201 | #endif |
@@ -5273,7 +5291,7 @@ static void sched_domain_debug(struct sched_domain *sd, int cpu) | |||
5273 | } while (sd); | 5291 | } while (sd); |
5274 | } | 5292 | } |
5275 | #else | 5293 | #else |
5276 | #define sched_domain_debug(sd, cpu) {} | 5294 | # define sched_domain_debug(sd, cpu) do { } while (0) |
5277 | #endif | 5295 | #endif |
5278 | 5296 | ||
5279 | static int sd_degenerate(struct sched_domain *sd) | 5297 | static int sd_degenerate(struct sched_domain *sd) |
@@ -5299,8 +5317,8 @@ static int sd_degenerate(struct sched_domain *sd) | |||
5299 | return 1; | 5317 | return 1; |
5300 | } | 5318 | } |
5301 | 5319 | ||
5302 | static int sd_parent_degenerate(struct sched_domain *sd, | 5320 | static int |
5303 | struct sched_domain *parent) | 5321 | sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent) |
5304 | { | 5322 | { |
5305 | unsigned long cflags = sd->flags, pflags = parent->flags; | 5323 | unsigned long cflags = sd->flags, pflags = parent->flags; |
5306 | 5324 | ||
@@ -5595,8 +5613,8 @@ static void touch_cache(void *__cache, unsigned long __size) | |||
5595 | /* | 5613 | /* |
5596 | * Measure the cache-cost of one task migration. Returns in units of nsec. | 5614 | * Measure the cache-cost of one task migration. Returns in units of nsec. |
5597 | */ | 5615 | */ |
5598 | static unsigned long long measure_one(void *cache, unsigned long size, | 5616 | static unsigned long long |
5599 | int source, int target) | 5617 | measure_one(void *cache, unsigned long size, int source, int target) |
5600 | { | 5618 | { |
5601 | cpumask_t mask, saved_mask; | 5619 | cpumask_t mask, saved_mask; |
5602 | unsigned long long t0, t1, t2, t3, cost; | 5620 | unsigned long long t0, t1, t2, t3, cost; |
@@ -5946,9 +5964,9 @@ static int find_next_best_node(int node, unsigned long *used_nodes) | |||
5946 | */ | 5964 | */ |
5947 | static cpumask_t sched_domain_node_span(int node) | 5965 | static cpumask_t sched_domain_node_span(int node) |
5948 | { | 5966 | { |
5949 | int i; | ||
5950 | cpumask_t span, nodemask; | ||
5951 | DECLARE_BITMAP(used_nodes, MAX_NUMNODES); | 5967 | DECLARE_BITMAP(used_nodes, MAX_NUMNODES); |
5968 | cpumask_t span, nodemask; | ||
5969 | int i; | ||
5952 | 5970 | ||
5953 | cpus_clear(span); | 5971 | cpus_clear(span); |
5954 | bitmap_zero(used_nodes, MAX_NUMNODES); | 5972 | bitmap_zero(used_nodes, MAX_NUMNODES); |
@@ -5959,6 +5977,7 @@ static cpumask_t sched_domain_node_span(int node) | |||
5959 | 5977 | ||
5960 | for (i = 1; i < SD_NODES_PER_DOMAIN; i++) { | 5978 | for (i = 1; i < SD_NODES_PER_DOMAIN; i++) { |
5961 | int next_node = find_next_best_node(node, used_nodes); | 5979 | int next_node = find_next_best_node(node, used_nodes); |
5980 | |||
5962 | nodemask = node_to_cpumask(next_node); | 5981 | nodemask = node_to_cpumask(next_node); |
5963 | cpus_or(span, span, nodemask); | 5982 | cpus_or(span, span, nodemask); |
5964 | } | 5983 | } |
@@ -5968,19 +5987,23 @@ static cpumask_t sched_domain_node_span(int node) | |||
5968 | #endif | 5987 | #endif |
5969 | 5988 | ||
5970 | int sched_smt_power_savings = 0, sched_mc_power_savings = 0; | 5989 | int sched_smt_power_savings = 0, sched_mc_power_savings = 0; |
5990 | |||
5971 | /* | 5991 | /* |
5972 | * At the moment, CONFIG_SCHED_SMT is never defined, but leave it in so we | 5992 | * SMT sched-domains: |
5973 | * can switch it on easily if needed. | ||
5974 | */ | 5993 | */ |
5975 | #ifdef CONFIG_SCHED_SMT | 5994 | #ifdef CONFIG_SCHED_SMT |
5976 | static DEFINE_PER_CPU(struct sched_domain, cpu_domains); | 5995 | static DEFINE_PER_CPU(struct sched_domain, cpu_domains); |
5977 | static struct sched_group sched_group_cpus[NR_CPUS]; | 5996 | static struct sched_group sched_group_cpus[NR_CPUS]; |
5997 | |||
5978 | static int cpu_to_cpu_group(int cpu) | 5998 | static int cpu_to_cpu_group(int cpu) |
5979 | { | 5999 | { |
5980 | return cpu; | 6000 | return cpu; |
5981 | } | 6001 | } |
5982 | #endif | 6002 | #endif |
5983 | 6003 | ||
6004 | /* | ||
6005 | * multi-core sched-domains: | ||
6006 | */ | ||
5984 | #ifdef CONFIG_SCHED_MC | 6007 | #ifdef CONFIG_SCHED_MC |
5985 | static DEFINE_PER_CPU(struct sched_domain, core_domains); | 6008 | static DEFINE_PER_CPU(struct sched_domain, core_domains); |
5986 | static struct sched_group *sched_group_core_bycpu[NR_CPUS]; | 6009 | static struct sched_group *sched_group_core_bycpu[NR_CPUS]; |
@@ -6000,9 +6023,10 @@ static int cpu_to_core_group(int cpu) | |||
6000 | 6023 | ||
6001 | static DEFINE_PER_CPU(struct sched_domain, phys_domains); | 6024 | static DEFINE_PER_CPU(struct sched_domain, phys_domains); |
6002 | static struct sched_group *sched_group_phys_bycpu[NR_CPUS]; | 6025 | static struct sched_group *sched_group_phys_bycpu[NR_CPUS]; |
6026 | |||
6003 | static int cpu_to_phys_group(int cpu) | 6027 | static int cpu_to_phys_group(int cpu) |
6004 | { | 6028 | { |
6005 | #if defined(CONFIG_SCHED_MC) | 6029 | #ifdef CONFIG_SCHED_MC |
6006 | cpumask_t mask = cpu_coregroup_map(cpu); | 6030 | cpumask_t mask = cpu_coregroup_map(cpu); |
6007 | return first_cpu(mask); | 6031 | return first_cpu(mask); |
6008 | #elif defined(CONFIG_SCHED_SMT) | 6032 | #elif defined(CONFIG_SCHED_SMT) |
@@ -6548,6 +6572,7 @@ static ssize_t sched_power_savings_store(const char *buf, size_t count, int smt) | |||
6548 | int sched_create_sysfs_power_savings_entries(struct sysdev_class *cls) | 6572 | int sched_create_sysfs_power_savings_entries(struct sysdev_class *cls) |
6549 | { | 6573 | { |
6550 | int err = 0; | 6574 | int err = 0; |
6575 | |||
6551 | #ifdef CONFIG_SCHED_SMT | 6576 | #ifdef CONFIG_SCHED_SMT |
6552 | if (smt_capable()) | 6577 | if (smt_capable()) |
6553 | err = sysfs_create_file(&cls->kset.kobj, | 6578 | err = sysfs_create_file(&cls->kset.kobj, |
@@ -6567,7 +6592,8 @@ static ssize_t sched_mc_power_savings_show(struct sys_device *dev, char *page) | |||
6567 | { | 6592 | { |
6568 | return sprintf(page, "%u\n", sched_mc_power_savings); | 6593 | return sprintf(page, "%u\n", sched_mc_power_savings); |
6569 | } | 6594 | } |
6570 | static ssize_t sched_mc_power_savings_store(struct sys_device *dev, const char *buf, size_t count) | 6595 | static ssize_t sched_mc_power_savings_store(struct sys_device *dev, |
6596 | const char *buf, size_t count) | ||
6571 | { | 6597 | { |
6572 | return sched_power_savings_store(buf, count, 0); | 6598 | return sched_power_savings_store(buf, count, 0); |
6573 | } | 6599 | } |
@@ -6580,7 +6606,8 @@ static ssize_t sched_smt_power_savings_show(struct sys_device *dev, char *page) | |||
6580 | { | 6606 | { |
6581 | return sprintf(page, "%u\n", sched_smt_power_savings); | 6607 | return sprintf(page, "%u\n", sched_smt_power_savings); |
6582 | } | 6608 | } |
6583 | static ssize_t sched_smt_power_savings_store(struct sys_device *dev, const char *buf, size_t count) | 6609 | static ssize_t sched_smt_power_savings_store(struct sys_device *dev, |
6610 | const char *buf, size_t count) | ||
6584 | { | 6611 | { |
6585 | return sched_power_savings_store(buf, count, 1); | 6612 | return sched_power_savings_store(buf, count, 1); |
6586 | } | 6613 | } |
@@ -6642,6 +6669,7 @@ int in_sched_functions(unsigned long addr) | |||
6642 | { | 6669 | { |
6643 | /* Linker adds these: start and end of __sched functions */ | 6670 | /* Linker adds these: start and end of __sched functions */ |
6644 | extern char __sched_text_start[], __sched_text_end[]; | 6671 | extern char __sched_text_start[], __sched_text_end[]; |
6672 | |||
6645 | return in_lock_functions(addr) || | 6673 | return in_lock_functions(addr) || |
6646 | (addr >= (unsigned long)__sched_text_start | 6674 | (addr >= (unsigned long)__sched_text_start |
6647 | && addr < (unsigned long)__sched_text_end); | 6675 | && addr < (unsigned long)__sched_text_end); |
@@ -6649,11 +6677,11 @@ int in_sched_functions(unsigned long addr) | |||
6649 | 6677 | ||
6650 | void __init sched_init(void) | 6678 | void __init sched_init(void) |
6651 | { | 6679 | { |
6652 | runqueue_t *rq; | ||
6653 | int i, j, k; | 6680 | int i, j, k; |
6654 | 6681 | ||
6655 | for_each_possible_cpu(i) { | 6682 | for_each_possible_cpu(i) { |
6656 | prio_array_t *array; | 6683 | prio_array_t *array; |
6684 | runqueue_t *rq; | ||
6657 | 6685 | ||
6658 | rq = cpu_rq(i); | 6686 | rq = cpu_rq(i); |
6659 | spin_lock_init(&rq->lock); | 6687 | spin_lock_init(&rq->lock); |
@@ -6704,7 +6732,7 @@ void __init sched_init(void) | |||
6704 | #ifdef CONFIG_DEBUG_SPINLOCK_SLEEP | 6732 | #ifdef CONFIG_DEBUG_SPINLOCK_SLEEP |
6705 | void __might_sleep(char *file, int line) | 6733 | void __might_sleep(char *file, int line) |
6706 | { | 6734 | { |
6707 | #if defined(in_atomic) | 6735 | #ifdef in_atomic |
6708 | static unsigned long prev_jiffy; /* ratelimiting */ | 6736 | static unsigned long prev_jiffy; /* ratelimiting */ |
6709 | 6737 | ||
6710 | if ((in_atomic() || irqs_disabled()) && | 6738 | if ((in_atomic() || irqs_disabled()) && |