diff options
author | Paul Mackerras <paulus@samba.org> | 2006-03-28 21:24:50 -0500 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2006-03-28 21:24:50 -0500 |
commit | bac30d1a78d0f11c613968fc8b351a91ed465386 (patch) | |
tree | e52f3c876522a2f6047a6ec1c27df2e8a79486b8 /kernel/sched.c | |
parent | e8222502ee6157e2713da9e0792c21f4ad458d50 (diff) | |
parent | ca9ba4471c1203bb6e759b76e83167fec54fe590 (diff) |
Merge ../linux-2.6
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 154 |
1 files changed, 106 insertions, 48 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 78acdefeccca..a9ecac398bb9 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -145,7 +145,8 @@ | |||
145 | (v1) * (v2_max) / (v1_max) | 145 | (v1) * (v2_max) / (v1_max) |
146 | 146 | ||
147 | #define DELTA(p) \ | 147 | #define DELTA(p) \ |
148 | (SCALE(TASK_NICE(p), 40, MAX_BONUS) + INTERACTIVE_DELTA) | 148 | (SCALE(TASK_NICE(p) + 20, 40, MAX_BONUS) - 20 * MAX_BONUS / 40 + \ |
149 | INTERACTIVE_DELTA) | ||
149 | 150 | ||
150 | #define TASK_INTERACTIVE(p) \ | 151 | #define TASK_INTERACTIVE(p) \ |
151 | ((p)->prio <= (p)->static_prio - DELTA(p)) | 152 | ((p)->prio <= (p)->static_prio - DELTA(p)) |
@@ -1624,7 +1625,7 @@ unsigned long nr_uninterruptible(void) | |||
1624 | { | 1625 | { |
1625 | unsigned long i, sum = 0; | 1626 | unsigned long i, sum = 0; |
1626 | 1627 | ||
1627 | for_each_cpu(i) | 1628 | for_each_possible_cpu(i) |
1628 | sum += cpu_rq(i)->nr_uninterruptible; | 1629 | sum += cpu_rq(i)->nr_uninterruptible; |
1629 | 1630 | ||
1630 | /* | 1631 | /* |
@@ -1641,7 +1642,7 @@ unsigned long long nr_context_switches(void) | |||
1641 | { | 1642 | { |
1642 | unsigned long long i, sum = 0; | 1643 | unsigned long long i, sum = 0; |
1643 | 1644 | ||
1644 | for_each_cpu(i) | 1645 | for_each_possible_cpu(i) |
1645 | sum += cpu_rq(i)->nr_switches; | 1646 | sum += cpu_rq(i)->nr_switches; |
1646 | 1647 | ||
1647 | return sum; | 1648 | return sum; |
@@ -1651,7 +1652,7 @@ unsigned long nr_iowait(void) | |||
1651 | { | 1652 | { |
1652 | unsigned long i, sum = 0; | 1653 | unsigned long i, sum = 0; |
1653 | 1654 | ||
1654 | for_each_cpu(i) | 1655 | for_each_possible_cpu(i) |
1655 | sum += atomic_read(&cpu_rq(i)->nr_iowait); | 1656 | sum += atomic_read(&cpu_rq(i)->nr_iowait); |
1656 | 1657 | ||
1657 | return sum; | 1658 | return sum; |
@@ -2878,13 +2879,11 @@ asmlinkage void __sched schedule(void) | |||
2878 | * schedule() atomically, we ignore that path for now. | 2879 | * schedule() atomically, we ignore that path for now. |
2879 | * Otherwise, whine if we are scheduling when we should not be. | 2880 | * Otherwise, whine if we are scheduling when we should not be. |
2880 | */ | 2881 | */ |
2881 | if (likely(!current->exit_state)) { | 2882 | if (unlikely(in_atomic() && !current->exit_state)) { |
2882 | if (unlikely(in_atomic())) { | 2883 | printk(KERN_ERR "BUG: scheduling while atomic: " |
2883 | printk(KERN_ERR "BUG: scheduling while atomic: " | 2884 | "%s/0x%08x/%d\n", |
2884 | "%s/0x%08x/%d\n", | 2885 | current->comm, preempt_count(), current->pid); |
2885 | current->comm, preempt_count(), current->pid); | 2886 | dump_stack(); |
2886 | dump_stack(); | ||
2887 | } | ||
2888 | } | 2887 | } |
2889 | profile_hit(SCHED_PROFILING, __builtin_return_address(0)); | 2888 | profile_hit(SCHED_PROFILING, __builtin_return_address(0)); |
2890 | 2889 | ||
@@ -5575,11 +5574,31 @@ static int cpu_to_cpu_group(int cpu) | |||
5575 | } | 5574 | } |
5576 | #endif | 5575 | #endif |
5577 | 5576 | ||
5577 | #ifdef CONFIG_SCHED_MC | ||
5578 | static DEFINE_PER_CPU(struct sched_domain, core_domains); | ||
5579 | static struct sched_group sched_group_core[NR_CPUS]; | ||
5580 | #endif | ||
5581 | |||
5582 | #if defined(CONFIG_SCHED_MC) && defined(CONFIG_SCHED_SMT) | ||
5583 | static int cpu_to_core_group(int cpu) | ||
5584 | { | ||
5585 | return first_cpu(cpu_sibling_map[cpu]); | ||
5586 | } | ||
5587 | #elif defined(CONFIG_SCHED_MC) | ||
5588 | static int cpu_to_core_group(int cpu) | ||
5589 | { | ||
5590 | return cpu; | ||
5591 | } | ||
5592 | #endif | ||
5593 | |||
5578 | static DEFINE_PER_CPU(struct sched_domain, phys_domains); | 5594 | static DEFINE_PER_CPU(struct sched_domain, phys_domains); |
5579 | static struct sched_group sched_group_phys[NR_CPUS]; | 5595 | static struct sched_group sched_group_phys[NR_CPUS]; |
5580 | static int cpu_to_phys_group(int cpu) | 5596 | static int cpu_to_phys_group(int cpu) |
5581 | { | 5597 | { |
5582 | #ifdef CONFIG_SCHED_SMT | 5598 | #if defined(CONFIG_SCHED_MC) |
5599 | cpumask_t mask = cpu_coregroup_map(cpu); | ||
5600 | return first_cpu(mask); | ||
5601 | #elif defined(CONFIG_SCHED_SMT) | ||
5583 | return first_cpu(cpu_sibling_map[cpu]); | 5602 | return first_cpu(cpu_sibling_map[cpu]); |
5584 | #else | 5603 | #else |
5585 | return cpu; | 5604 | return cpu; |
@@ -5602,6 +5621,32 @@ static int cpu_to_allnodes_group(int cpu) | |||
5602 | { | 5621 | { |
5603 | return cpu_to_node(cpu); | 5622 | return cpu_to_node(cpu); |
5604 | } | 5623 | } |
5624 | static void init_numa_sched_groups_power(struct sched_group *group_head) | ||
5625 | { | ||
5626 | struct sched_group *sg = group_head; | ||
5627 | int j; | ||
5628 | |||
5629 | if (!sg) | ||
5630 | return; | ||
5631 | next_sg: | ||
5632 | for_each_cpu_mask(j, sg->cpumask) { | ||
5633 | struct sched_domain *sd; | ||
5634 | |||
5635 | sd = &per_cpu(phys_domains, j); | ||
5636 | if (j != first_cpu(sd->groups->cpumask)) { | ||
5637 | /* | ||
5638 | * Only add "power" once for each | ||
5639 | * physical package. | ||
5640 | */ | ||
5641 | continue; | ||
5642 | } | ||
5643 | |||
5644 | sg->cpu_power += sd->groups->cpu_power; | ||
5645 | } | ||
5646 | sg = sg->next; | ||
5647 | if (sg != group_head) | ||
5648 | goto next_sg; | ||
5649 | } | ||
5605 | #endif | 5650 | #endif |
5606 | 5651 | ||
5607 | /* | 5652 | /* |
@@ -5677,6 +5722,17 @@ void build_sched_domains(const cpumask_t *cpu_map) | |||
5677 | sd->parent = p; | 5722 | sd->parent = p; |
5678 | sd->groups = &sched_group_phys[group]; | 5723 | sd->groups = &sched_group_phys[group]; |
5679 | 5724 | ||
5725 | #ifdef CONFIG_SCHED_MC | ||
5726 | p = sd; | ||
5727 | sd = &per_cpu(core_domains, i); | ||
5728 | group = cpu_to_core_group(i); | ||
5729 | *sd = SD_MC_INIT; | ||
5730 | sd->span = cpu_coregroup_map(i); | ||
5731 | cpus_and(sd->span, sd->span, *cpu_map); | ||
5732 | sd->parent = p; | ||
5733 | sd->groups = &sched_group_core[group]; | ||
5734 | #endif | ||
5735 | |||
5680 | #ifdef CONFIG_SCHED_SMT | 5736 | #ifdef CONFIG_SCHED_SMT |
5681 | p = sd; | 5737 | p = sd; |
5682 | sd = &per_cpu(cpu_domains, i); | 5738 | sd = &per_cpu(cpu_domains, i); |
@@ -5702,6 +5758,19 @@ void build_sched_domains(const cpumask_t *cpu_map) | |||
5702 | } | 5758 | } |
5703 | #endif | 5759 | #endif |
5704 | 5760 | ||
5761 | #ifdef CONFIG_SCHED_MC | ||
5762 | /* Set up multi-core groups */ | ||
5763 | for_each_cpu_mask(i, *cpu_map) { | ||
5764 | cpumask_t this_core_map = cpu_coregroup_map(i); | ||
5765 | cpus_and(this_core_map, this_core_map, *cpu_map); | ||
5766 | if (i != first_cpu(this_core_map)) | ||
5767 | continue; | ||
5768 | init_sched_build_groups(sched_group_core, this_core_map, | ||
5769 | &cpu_to_core_group); | ||
5770 | } | ||
5771 | #endif | ||
5772 | |||
5773 | |||
5705 | /* Set up physical groups */ | 5774 | /* Set up physical groups */ |
5706 | for (i = 0; i < MAX_NUMNODES; i++) { | 5775 | for (i = 0; i < MAX_NUMNODES; i++) { |
5707 | cpumask_t nodemask = node_to_cpumask(i); | 5776 | cpumask_t nodemask = node_to_cpumask(i); |
@@ -5798,51 +5867,38 @@ void build_sched_domains(const cpumask_t *cpu_map) | |||
5798 | power = SCHED_LOAD_SCALE; | 5867 | power = SCHED_LOAD_SCALE; |
5799 | sd->groups->cpu_power = power; | 5868 | sd->groups->cpu_power = power; |
5800 | #endif | 5869 | #endif |
5870 | #ifdef CONFIG_SCHED_MC | ||
5871 | sd = &per_cpu(core_domains, i); | ||
5872 | power = SCHED_LOAD_SCALE + (cpus_weight(sd->groups->cpumask)-1) | ||
5873 | * SCHED_LOAD_SCALE / 10; | ||
5874 | sd->groups->cpu_power = power; | ||
5801 | 5875 | ||
5802 | sd = &per_cpu(phys_domains, i); | 5876 | sd = &per_cpu(phys_domains, i); |
5877 | |||
5878 | /* | ||
5879 | * This has to be < 2 * SCHED_LOAD_SCALE | ||
5880 | * Lets keep it SCHED_LOAD_SCALE, so that | ||
5881 | * while calculating NUMA group's cpu_power | ||
5882 | * we can simply do | ||
5883 | * numa_group->cpu_power += phys_group->cpu_power; | ||
5884 | * | ||
5885 | * See "only add power once for each physical pkg" | ||
5886 | * comment below | ||
5887 | */ | ||
5888 | sd->groups->cpu_power = SCHED_LOAD_SCALE; | ||
5889 | #else | ||
5890 | sd = &per_cpu(phys_domains, i); | ||
5803 | power = SCHED_LOAD_SCALE + SCHED_LOAD_SCALE * | 5891 | power = SCHED_LOAD_SCALE + SCHED_LOAD_SCALE * |
5804 | (cpus_weight(sd->groups->cpumask)-1) / 10; | 5892 | (cpus_weight(sd->groups->cpumask)-1) / 10; |
5805 | sd->groups->cpu_power = power; | 5893 | sd->groups->cpu_power = power; |
5806 | |||
5807 | #ifdef CONFIG_NUMA | ||
5808 | sd = &per_cpu(allnodes_domains, i); | ||
5809 | if (sd->groups) { | ||
5810 | power = SCHED_LOAD_SCALE + SCHED_LOAD_SCALE * | ||
5811 | (cpus_weight(sd->groups->cpumask)-1) / 10; | ||
5812 | sd->groups->cpu_power = power; | ||
5813 | } | ||
5814 | #endif | 5894 | #endif |
5815 | } | 5895 | } |
5816 | 5896 | ||
5817 | #ifdef CONFIG_NUMA | 5897 | #ifdef CONFIG_NUMA |
5818 | for (i = 0; i < MAX_NUMNODES; i++) { | 5898 | for (i = 0; i < MAX_NUMNODES; i++) |
5819 | struct sched_group *sg = sched_group_nodes[i]; | 5899 | init_numa_sched_groups_power(sched_group_nodes[i]); |
5820 | int j; | ||
5821 | |||
5822 | if (sg == NULL) | ||
5823 | continue; | ||
5824 | next_sg: | ||
5825 | for_each_cpu_mask(j, sg->cpumask) { | ||
5826 | struct sched_domain *sd; | ||
5827 | int power; | ||
5828 | |||
5829 | sd = &per_cpu(phys_domains, j); | ||
5830 | if (j != first_cpu(sd->groups->cpumask)) { | ||
5831 | /* | ||
5832 | * Only add "power" once for each | ||
5833 | * physical package. | ||
5834 | */ | ||
5835 | continue; | ||
5836 | } | ||
5837 | power = SCHED_LOAD_SCALE + SCHED_LOAD_SCALE * | ||
5838 | (cpus_weight(sd->groups->cpumask)-1) / 10; | ||
5839 | 5900 | ||
5840 | sg->cpu_power += power; | 5901 | init_numa_sched_groups_power(sched_group_allnodes); |
5841 | } | ||
5842 | sg = sg->next; | ||
5843 | if (sg != sched_group_nodes[i]) | ||
5844 | goto next_sg; | ||
5845 | } | ||
5846 | #endif | 5902 | #endif |
5847 | 5903 | ||
5848 | /* Attach the domains */ | 5904 | /* Attach the domains */ |
@@ -5850,6 +5906,8 @@ next_sg: | |||
5850 | struct sched_domain *sd; | 5906 | struct sched_domain *sd; |
5851 | #ifdef CONFIG_SCHED_SMT | 5907 | #ifdef CONFIG_SCHED_SMT |
5852 | sd = &per_cpu(cpu_domains, i); | 5908 | sd = &per_cpu(cpu_domains, i); |
5909 | #elif defined(CONFIG_SCHED_MC) | ||
5910 | sd = &per_cpu(core_domains, i); | ||
5853 | #else | 5911 | #else |
5854 | sd = &per_cpu(phys_domains, i); | 5912 | sd = &per_cpu(phys_domains, i); |
5855 | #endif | 5913 | #endif |
@@ -6022,7 +6080,7 @@ void __init sched_init(void) | |||
6022 | runqueue_t *rq; | 6080 | runqueue_t *rq; |
6023 | int i, j, k; | 6081 | int i, j, k; |
6024 | 6082 | ||
6025 | for_each_cpu(i) { | 6083 | for_each_possible_cpu(i) { |
6026 | prio_array_t *array; | 6084 | prio_array_t *array; |
6027 | 6085 | ||
6028 | rq = cpu_rq(i); | 6086 | rq = cpu_rq(i); |