diff options
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r-- | kernel/sched_fair.c | 145 |
1 files changed, 145 insertions, 0 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 4f6356e70ad6..a82d71d3afed 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -1360,6 +1360,151 @@ static int select_task_rq_fair(struct task_struct *p, int sync) | |||
1360 | out: | 1360 | out: |
1361 | return wake_idle(new_cpu, p); | 1361 | return wake_idle(new_cpu, p); |
1362 | } | 1362 | } |
1363 | |||
1364 | /* | ||
1365 | * find_idlest_group finds and returns the least busy CPU group within the | ||
1366 | * domain. | ||
1367 | */ | ||
1368 | static struct sched_group * | ||
1369 | find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu) | ||
1370 | { | ||
1371 | struct sched_group *idlest = NULL, *this = NULL, *group = sd->groups; | ||
1372 | unsigned long min_load = ULONG_MAX, this_load = 0; | ||
1373 | int load_idx = sd->forkexec_idx; | ||
1374 | int imbalance = 100 + (sd->imbalance_pct-100)/2; | ||
1375 | |||
1376 | do { | ||
1377 | unsigned long load, avg_load; | ||
1378 | int local_group; | ||
1379 | int i; | ||
1380 | |||
1381 | /* Skip over this group if it has no CPUs allowed */ | ||
1382 | if (!cpumask_intersects(sched_group_cpus(group), | ||
1383 | &p->cpus_allowed)) | ||
1384 | continue; | ||
1385 | |||
1386 | local_group = cpumask_test_cpu(this_cpu, | ||
1387 | sched_group_cpus(group)); | ||
1388 | |||
1389 | /* Tally up the load of all CPUs in the group */ | ||
1390 | avg_load = 0; | ||
1391 | |||
1392 | for_each_cpu(i, sched_group_cpus(group)) { | ||
1393 | /* Bias balancing toward cpus of our domain */ | ||
1394 | if (local_group) | ||
1395 | load = source_load(i, load_idx); | ||
1396 | else | ||
1397 | load = target_load(i, load_idx); | ||
1398 | |||
1399 | avg_load += load; | ||
1400 | } | ||
1401 | |||
1402 | /* Adjust by relative CPU power of the group */ | ||
1403 | avg_load = (avg_load * SCHED_LOAD_SCALE) / group->cpu_power; | ||
1404 | |||
1405 | if (local_group) { | ||
1406 | this_load = avg_load; | ||
1407 | this = group; | ||
1408 | } else if (avg_load < min_load) { | ||
1409 | min_load = avg_load; | ||
1410 | idlest = group; | ||
1411 | } | ||
1412 | } while (group = group->next, group != sd->groups); | ||
1413 | |||
1414 | if (!idlest || 100*this_load < imbalance*min_load) | ||
1415 | return NULL; | ||
1416 | return idlest; | ||
1417 | } | ||
1418 | |||
1419 | /* | ||
1420 | * find_idlest_cpu - find the idlest cpu among the cpus in group. | ||
1421 | */ | ||
1422 | static int | ||
1423 | find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu) | ||
1424 | { | ||
1425 | unsigned long load, min_load = ULONG_MAX; | ||
1426 | int idlest = -1; | ||
1427 | int i; | ||
1428 | |||
1429 | /* Traverse only the allowed CPUs */ | ||
1430 | for_each_cpu_and(i, sched_group_cpus(group), &p->cpus_allowed) { | ||
1431 | load = weighted_cpuload(i); | ||
1432 | |||
1433 | if (load < min_load || (load == min_load && i == this_cpu)) { | ||
1434 | min_load = load; | ||
1435 | idlest = i; | ||
1436 | } | ||
1437 | } | ||
1438 | |||
1439 | return idlest; | ||
1440 | } | ||
1441 | |||
1442 | /* | ||
1443 | * sched_balance_self: balance the current task (running on cpu) in domains | ||
1444 | * that have the 'flag' flag set. In practice, this is SD_BALANCE_FORK and | ||
1445 | * SD_BALANCE_EXEC. | ||
1446 | * | ||
1447 | * Balance, ie. select the least loaded group. | ||
1448 | * | ||
1449 | * Returns the target CPU number, or the same CPU if no balancing is needed. | ||
1450 | * | ||
1451 | * preempt must be disabled. | ||
1452 | */ | ||
1453 | static int sched_balance_self(int cpu, int flag) | ||
1454 | { | ||
1455 | struct task_struct *t = current; | ||
1456 | struct sched_domain *tmp, *sd = NULL; | ||
1457 | |||
1458 | for_each_domain(cpu, tmp) { | ||
1459 | /* | ||
1460 | * If power savings logic is enabled for a domain, stop there. | ||
1461 | */ | ||
1462 | if (tmp->flags & SD_POWERSAVINGS_BALANCE) | ||
1463 | break; | ||
1464 | if (tmp->flags & flag) | ||
1465 | sd = tmp; | ||
1466 | } | ||
1467 | |||
1468 | if (sd) | ||
1469 | update_shares(sd); | ||
1470 | |||
1471 | while (sd) { | ||
1472 | struct sched_group *group; | ||
1473 | int new_cpu, weight; | ||
1474 | |||
1475 | if (!(sd->flags & flag)) { | ||
1476 | sd = sd->child; | ||
1477 | continue; | ||
1478 | } | ||
1479 | |||
1480 | group = find_idlest_group(sd, t, cpu); | ||
1481 | if (!group) { | ||
1482 | sd = sd->child; | ||
1483 | continue; | ||
1484 | } | ||
1485 | |||
1486 | new_cpu = find_idlest_cpu(group, t, cpu); | ||
1487 | if (new_cpu == -1 || new_cpu == cpu) { | ||
1488 | /* Now try balancing at a lower domain level of cpu */ | ||
1489 | sd = sd->child; | ||
1490 | continue; | ||
1491 | } | ||
1492 | |||
1493 | /* Now try balancing at a lower domain level of new_cpu */ | ||
1494 | cpu = new_cpu; | ||
1495 | weight = cpumask_weight(sched_domain_span(sd)); | ||
1496 | sd = NULL; | ||
1497 | for_each_domain(cpu, tmp) { | ||
1498 | if (weight <= cpumask_weight(sched_domain_span(tmp))) | ||
1499 | break; | ||
1500 | if (tmp->flags & flag) | ||
1501 | sd = tmp; | ||
1502 | } | ||
1503 | /* while loop will break here if sd == NULL */ | ||
1504 | } | ||
1505 | |||
1506 | return cpu; | ||
1507 | } | ||
1363 | #endif /* CONFIG_SMP */ | 1508 | #endif /* CONFIG_SMP */ |
1364 | 1509 | ||
1365 | /* | 1510 | /* |