aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-09-10 07:36:25 -0400
committerIngo Molnar <mingo@elte.hu>2009-09-15 10:01:04 -0400
commitaaee1203ca52b9db799433c33c9bffc33cdf8909 (patch)
tree27da699a8034090f8be9e24d1b7cedb8f6cdf804 /kernel
parentf5f08f39ee4c5fd0a757d25d9e04d696676b3df7 (diff)
sched: Move sched_balance_self() into sched_fair.c
Move the sched_balance_self() code into sched_fair.c This facilitates the merger of sched_balance_self() and sched_fair::select_task_rq(). Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c146
-rw-r--r--kernel/sched_fair.c145
2 files changed, 145 insertions, 146 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index b56d1505d058..60400a22401f 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2269,152 +2269,6 @@ void kick_process(struct task_struct *p)
2269 preempt_enable(); 2269 preempt_enable();
2270} 2270}
2271EXPORT_SYMBOL_GPL(kick_process); 2271EXPORT_SYMBOL_GPL(kick_process);
2272
2273/*
2274 * find_idlest_group finds and returns the least busy CPU group within the
2275 * domain.
2276 */
2277static struct sched_group *
2278find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
2279{
2280 struct sched_group *idlest = NULL, *this = NULL, *group = sd->groups;
2281 unsigned long min_load = ULONG_MAX, this_load = 0;
2282 int load_idx = sd->forkexec_idx;
2283 int imbalance = 100 + (sd->imbalance_pct-100)/2;
2284
2285 do {
2286 unsigned long load, avg_load;
2287 int local_group;
2288 int i;
2289
2290 /* Skip over this group if it has no CPUs allowed */
2291 if (!cpumask_intersects(sched_group_cpus(group),
2292 &p->cpus_allowed))
2293 continue;
2294
2295 local_group = cpumask_test_cpu(this_cpu,
2296 sched_group_cpus(group));
2297
2298 /* Tally up the load of all CPUs in the group */
2299 avg_load = 0;
2300
2301 for_each_cpu(i, sched_group_cpus(group)) {
2302 /* Bias balancing toward cpus of our domain */
2303 if (local_group)
2304 load = source_load(i, load_idx);
2305 else
2306 load = target_load(i, load_idx);
2307
2308 avg_load += load;
2309 }
2310
2311 /* Adjust by relative CPU power of the group */
2312 avg_load = (avg_load * SCHED_LOAD_SCALE) / group->cpu_power;
2313
2314 if (local_group) {
2315 this_load = avg_load;
2316 this = group;
2317 } else if (avg_load < min_load) {
2318 min_load = avg_load;
2319 idlest = group;
2320 }
2321 } while (group = group->next, group != sd->groups);
2322
2323 if (!idlest || 100*this_load < imbalance*min_load)
2324 return NULL;
2325 return idlest;
2326}
2327
2328/*
2329 * find_idlest_cpu - find the idlest cpu among the cpus in group.
2330 */
2331static int
2332find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
2333{
2334 unsigned long load, min_load = ULONG_MAX;
2335 int idlest = -1;
2336 int i;
2337
2338 /* Traverse only the allowed CPUs */
2339 for_each_cpu_and(i, sched_group_cpus(group), &p->cpus_allowed) {
2340 load = weighted_cpuload(i);
2341
2342 if (load < min_load || (load == min_load && i == this_cpu)) {
2343 min_load = load;
2344 idlest = i;
2345 }
2346 }
2347
2348 return idlest;
2349}
2350
2351/*
2352 * sched_balance_self: balance the current task (running on cpu) in domains
2353 * that have the 'flag' flag set. In practice, this is SD_BALANCE_FORK and
2354 * SD_BALANCE_EXEC.
2355 *
2356 * Balance, ie. select the least loaded group.
2357 *
2358 * Returns the target CPU number, or the same CPU if no balancing is needed.
2359 *
2360 * preempt must be disabled.
2361 */
2362static int sched_balance_self(int cpu, int flag)
2363{
2364 struct task_struct *t = current;
2365 struct sched_domain *tmp, *sd = NULL;
2366
2367 for_each_domain(cpu, tmp) {
2368 /*
2369 * If power savings logic is enabled for a domain, stop there.
2370 */
2371 if (tmp->flags & SD_POWERSAVINGS_BALANCE)
2372 break;
2373 if (tmp->flags & flag)
2374 sd = tmp;
2375 }
2376
2377 if (sd)
2378 update_shares(sd);
2379
2380 while (sd) {
2381 struct sched_group *group;
2382 int new_cpu, weight;
2383
2384 if (!(sd->flags & flag)) {
2385 sd = sd->child;
2386 continue;
2387 }
2388
2389 group = find_idlest_group(sd, t, cpu);
2390 if (!group) {
2391 sd = sd->child;
2392 continue;
2393 }
2394
2395 new_cpu = find_idlest_cpu(group, t, cpu);
2396 if (new_cpu == -1 || new_cpu == cpu) {
2397 /* Now try balancing at a lower domain level of cpu */
2398 sd = sd->child;
2399 continue;
2400 }
2401
2402 /* Now try balancing at a lower domain level of new_cpu */
2403 cpu = new_cpu;
2404 weight = cpumask_weight(sched_domain_span(sd));
2405 sd = NULL;
2406 for_each_domain(cpu, tmp) {
2407 if (weight <= cpumask_weight(sched_domain_span(tmp)))
2408 break;
2409 if (tmp->flags & flag)
2410 sd = tmp;
2411 }
2412 /* while loop will break here if sd == NULL */
2413 }
2414
2415 return cpu;
2416}
2417
2418#endif /* CONFIG_SMP */ 2272#endif /* CONFIG_SMP */
2419 2273
2420/** 2274/**
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 4f6356e70ad6..a82d71d3afed 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1360,6 +1360,151 @@ static int select_task_rq_fair(struct task_struct *p, int sync)
1360out: 1360out:
1361 return wake_idle(new_cpu, p); 1361 return wake_idle(new_cpu, p);
1362} 1362}
1363
1364/*
1365 * find_idlest_group finds and returns the least busy CPU group within the
1366 * domain.
1367 */
1368static struct sched_group *
1369find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
1370{
1371 struct sched_group *idlest = NULL, *this = NULL, *group = sd->groups;
1372 unsigned long min_load = ULONG_MAX, this_load = 0;
1373 int load_idx = sd->forkexec_idx;
1374 int imbalance = 100 + (sd->imbalance_pct-100)/2;
1375
1376 do {
1377 unsigned long load, avg_load;
1378 int local_group;
1379 int i;
1380
1381 /* Skip over this group if it has no CPUs allowed */
1382 if (!cpumask_intersects(sched_group_cpus(group),
1383 &p->cpus_allowed))
1384 continue;
1385
1386 local_group = cpumask_test_cpu(this_cpu,
1387 sched_group_cpus(group));
1388
1389 /* Tally up the load of all CPUs in the group */
1390 avg_load = 0;
1391
1392 for_each_cpu(i, sched_group_cpus(group)) {
1393 /* Bias balancing toward cpus of our domain */
1394 if (local_group)
1395 load = source_load(i, load_idx);
1396 else
1397 load = target_load(i, load_idx);
1398
1399 avg_load += load;
1400 }
1401
1402 /* Adjust by relative CPU power of the group */
1403 avg_load = (avg_load * SCHED_LOAD_SCALE) / group->cpu_power;
1404
1405 if (local_group) {
1406 this_load = avg_load;
1407 this = group;
1408 } else if (avg_load < min_load) {
1409 min_load = avg_load;
1410 idlest = group;
1411 }
1412 } while (group = group->next, group != sd->groups);
1413
1414 if (!idlest || 100*this_load < imbalance*min_load)
1415 return NULL;
1416 return idlest;
1417}
1418
1419/*
1420 * find_idlest_cpu - find the idlest cpu among the cpus in group.
1421 */
1422static int
1423find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
1424{
1425 unsigned long load, min_load = ULONG_MAX;
1426 int idlest = -1;
1427 int i;
1428
1429 /* Traverse only the allowed CPUs */
1430 for_each_cpu_and(i, sched_group_cpus(group), &p->cpus_allowed) {
1431 load = weighted_cpuload(i);
1432
1433 if (load < min_load || (load == min_load && i == this_cpu)) {
1434 min_load = load;
1435 idlest = i;
1436 }
1437 }
1438
1439 return idlest;
1440}
1441
1442/*
1443 * sched_balance_self: balance the current task (running on cpu) in domains
1444 * that have the 'flag' flag set. In practice, this is SD_BALANCE_FORK and
1445 * SD_BALANCE_EXEC.
1446 *
1447 * Balance, ie. select the least loaded group.
1448 *
1449 * Returns the target CPU number, or the same CPU if no balancing is needed.
1450 *
1451 * preempt must be disabled.
1452 */
1453static int sched_balance_self(int cpu, int flag)
1454{
1455 struct task_struct *t = current;
1456 struct sched_domain *tmp, *sd = NULL;
1457
1458 for_each_domain(cpu, tmp) {
1459 /*
1460 * If power savings logic is enabled for a domain, stop there.
1461 */
1462 if (tmp->flags & SD_POWERSAVINGS_BALANCE)
1463 break;
1464 if (tmp->flags & flag)
1465 sd = tmp;
1466 }
1467
1468 if (sd)
1469 update_shares(sd);
1470
1471 while (sd) {
1472 struct sched_group *group;
1473 int new_cpu, weight;
1474
1475 if (!(sd->flags & flag)) {
1476 sd = sd->child;
1477 continue;
1478 }
1479
1480 group = find_idlest_group(sd, t, cpu);
1481 if (!group) {
1482 sd = sd->child;
1483 continue;
1484 }
1485
1486 new_cpu = find_idlest_cpu(group, t, cpu);
1487 if (new_cpu == -1 || new_cpu == cpu) {
1488 /* Now try balancing at a lower domain level of cpu */
1489 sd = sd->child;
1490 continue;
1491 }
1492
1493 /* Now try balancing at a lower domain level of new_cpu */
1494 cpu = new_cpu;
1495 weight = cpumask_weight(sched_domain_span(sd));
1496 sd = NULL;
1497 for_each_domain(cpu, tmp) {
1498 if (weight <= cpumask_weight(sched_domain_span(tmp)))
1499 break;
1500 if (tmp->flags & flag)
1501 sd = tmp;
1502 }
1503 /* while loop will break here if sd == NULL */
1504 }
1505
1506 return cpu;
1507}
1363#endif /* CONFIG_SMP */ 1508#endif /* CONFIG_SMP */
1364 1509
1365/* 1510/*