diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-01-18 12:15:49 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-01-18 12:15:49 -0500 |
commit | af37501c792107c2bde1524bdae38d9a247b841a (patch) | |
tree | b50ee90d29e72956b8b7d8d19677fe5996755d49 /kernel | |
parent | d859e29fe34cb833071b20aef860ee94fbad9bb2 (diff) | |
parent | 99937d6455cea95405ac681c86a857d0fcd530bd (diff) |
Merge branch 'core/percpu' into perfcounters/core
Conflicts:
arch/x86/include/asm/pda.h
We merge tip/core/percpu into tip/perfcounters/core because of a
semantic and contextual conflict: the former eliminates the PDA,
while the latter extends it with apic_perf_irqs field.
Resolve the conflict by moving the new field to the irq_cpustat
structure on 64-bit too.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/Makefile | 6 | ||||
-rw-r--r-- | kernel/async.c | 21 | ||||
-rw-r--r-- | kernel/irq/chip.c | 5 | ||||
-rw-r--r-- | kernel/irq/handle.c | 57 | ||||
-rw-r--r-- | kernel/irq/internals.h | 7 | ||||
-rw-r--r-- | kernel/irq/manage.c | 12 | ||||
-rw-r--r-- | kernel/irq/migration.c | 12 | ||||
-rw-r--r-- | kernel/irq/numa_migrate.c | 19 | ||||
-rw-r--r-- | kernel/irq/proc.c | 4 | ||||
-rw-r--r-- | kernel/rcutorture.c | 113 | ||||
-rw-r--r-- | kernel/sched.c | 13 | ||||
-rw-r--r-- | kernel/sched_debug.c | 21 | ||||
-rw-r--r-- | kernel/sched_rt.c | 36 | ||||
-rw-r--r-- | kernel/softirq.c | 5 | ||||
-rw-r--r-- | kernel/up.c | 21 |
15 files changed, 238 insertions, 114 deletions
diff --git a/kernel/Makefile b/kernel/Makefile index 8b2628c7914b..e4115926c536 100644 --- a/kernel/Makefile +++ b/kernel/Makefile | |||
@@ -40,7 +40,11 @@ obj-$(CONFIG_RT_MUTEXES) += rtmutex.o | |||
40 | obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o | 40 | obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o |
41 | obj-$(CONFIG_RT_MUTEX_TESTER) += rtmutex-tester.o | 41 | obj-$(CONFIG_RT_MUTEX_TESTER) += rtmutex-tester.o |
42 | obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o | 42 | obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o |
43 | obj-$(CONFIG_USE_GENERIC_SMP_HELPERS) += smp.o | 43 | ifeq ($(CONFIG_USE_GENERIC_SMP_HELPERS),y) |
44 | obj-y += smp.o | ||
45 | else | ||
46 | obj-y += up.o | ||
47 | endif | ||
44 | obj-$(CONFIG_SMP) += spinlock.o | 48 | obj-$(CONFIG_SMP) += spinlock.o |
45 | obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o | 49 | obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o |
46 | obj-$(CONFIG_PROVE_LOCKING) += spinlock.o | 50 | obj-$(CONFIG_PROVE_LOCKING) += spinlock.o |
diff --git a/kernel/async.c b/kernel/async.c index f286e9f2b736..608b32b42812 100644 --- a/kernel/async.c +++ b/kernel/async.c | |||
@@ -90,12 +90,12 @@ extern int initcall_debug; | |||
90 | static async_cookie_t __lowest_in_progress(struct list_head *running) | 90 | static async_cookie_t __lowest_in_progress(struct list_head *running) |
91 | { | 91 | { |
92 | struct async_entry *entry; | 92 | struct async_entry *entry; |
93 | if (!list_empty(&async_pending)) { | 93 | if (!list_empty(running)) { |
94 | entry = list_first_entry(&async_pending, | 94 | entry = list_first_entry(running, |
95 | struct async_entry, list); | 95 | struct async_entry, list); |
96 | return entry->cookie; | 96 | return entry->cookie; |
97 | } else if (!list_empty(running)) { | 97 | } else if (!list_empty(&async_pending)) { |
98 | entry = list_first_entry(running, | 98 | entry = list_first_entry(&async_pending, |
99 | struct async_entry, list); | 99 | struct async_entry, list); |
100 | return entry->cookie; | 100 | return entry->cookie; |
101 | } else { | 101 | } else { |
@@ -104,6 +104,17 @@ static async_cookie_t __lowest_in_progress(struct list_head *running) | |||
104 | } | 104 | } |
105 | 105 | ||
106 | } | 106 | } |
107 | |||
108 | static async_cookie_t lowest_in_progress(struct list_head *running) | ||
109 | { | ||
110 | unsigned long flags; | ||
111 | async_cookie_t ret; | ||
112 | |||
113 | spin_lock_irqsave(&async_lock, flags); | ||
114 | ret = __lowest_in_progress(running); | ||
115 | spin_unlock_irqrestore(&async_lock, flags); | ||
116 | return ret; | ||
117 | } | ||
107 | /* | 118 | /* |
108 | * pick the first pending entry and run it | 119 | * pick the first pending entry and run it |
109 | */ | 120 | */ |
@@ -229,7 +240,7 @@ void async_synchronize_cookie_special(async_cookie_t cookie, struct list_head *r | |||
229 | starttime = ktime_get(); | 240 | starttime = ktime_get(); |
230 | } | 241 | } |
231 | 242 | ||
232 | wait_event(async_done, __lowest_in_progress(running) >= cookie); | 243 | wait_event(async_done, lowest_in_progress(running) >= cookie); |
233 | 244 | ||
234 | if (initcall_debug && system_state == SYSTEM_BOOTING) { | 245 | if (initcall_debug && system_state == SYSTEM_BOOTING) { |
235 | endtime = ktime_get(); | 246 | endtime = ktime_get(); |
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index f63c706d25e1..c248eba98b43 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c | |||
@@ -46,7 +46,10 @@ void dynamic_irq_init(unsigned int irq) | |||
46 | desc->irq_count = 0; | 46 | desc->irq_count = 0; |
47 | desc->irqs_unhandled = 0; | 47 | desc->irqs_unhandled = 0; |
48 | #ifdef CONFIG_SMP | 48 | #ifdef CONFIG_SMP |
49 | cpumask_setall(&desc->affinity); | 49 | cpumask_setall(desc->affinity); |
50 | #ifdef CONFIG_GENERIC_PENDING_IRQ | ||
51 | cpumask_clear(desc->pending_mask); | ||
52 | #endif | ||
50 | #endif | 53 | #endif |
51 | spin_unlock_irqrestore(&desc->lock, flags); | 54 | spin_unlock_irqrestore(&desc->lock, flags); |
52 | } | 55 | } |
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c index c20db0be9173..375d68cd5bf0 100644 --- a/kernel/irq/handle.c +++ b/kernel/irq/handle.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/kernel_stat.h> | 17 | #include <linux/kernel_stat.h> |
18 | #include <linux/rculist.h> | 18 | #include <linux/rculist.h> |
19 | #include <linux/hash.h> | 19 | #include <linux/hash.h> |
20 | #include <linux/bootmem.h> | ||
20 | 21 | ||
21 | #include "internals.h" | 22 | #include "internals.h" |
22 | 23 | ||
@@ -57,6 +58,7 @@ int nr_irqs = NR_IRQS; | |||
57 | EXPORT_SYMBOL_GPL(nr_irqs); | 58 | EXPORT_SYMBOL_GPL(nr_irqs); |
58 | 59 | ||
59 | #ifdef CONFIG_SPARSE_IRQ | 60 | #ifdef CONFIG_SPARSE_IRQ |
61 | |||
60 | static struct irq_desc irq_desc_init = { | 62 | static struct irq_desc irq_desc_init = { |
61 | .irq = -1, | 63 | .irq = -1, |
62 | .status = IRQ_DISABLED, | 64 | .status = IRQ_DISABLED, |
@@ -64,9 +66,6 @@ static struct irq_desc irq_desc_init = { | |||
64 | .handle_irq = handle_bad_irq, | 66 | .handle_irq = handle_bad_irq, |
65 | .depth = 1, | 67 | .depth = 1, |
66 | .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock), | 68 | .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock), |
67 | #ifdef CONFIG_SMP | ||
68 | .affinity = CPU_MASK_ALL | ||
69 | #endif | ||
70 | }; | 69 | }; |
71 | 70 | ||
72 | void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr) | 71 | void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr) |
@@ -101,6 +100,10 @@ static void init_one_irq_desc(int irq, struct irq_desc *desc, int cpu) | |||
101 | printk(KERN_ERR "can not alloc kstat_irqs\n"); | 100 | printk(KERN_ERR "can not alloc kstat_irqs\n"); |
102 | BUG_ON(1); | 101 | BUG_ON(1); |
103 | } | 102 | } |
103 | if (!init_alloc_desc_masks(desc, cpu, false)) { | ||
104 | printk(KERN_ERR "can not alloc irq_desc cpumasks\n"); | ||
105 | BUG_ON(1); | ||
106 | } | ||
104 | arch_init_chip_data(desc, cpu); | 107 | arch_init_chip_data(desc, cpu); |
105 | } | 108 | } |
106 | 109 | ||
@@ -109,7 +112,7 @@ static void init_one_irq_desc(int irq, struct irq_desc *desc, int cpu) | |||
109 | */ | 112 | */ |
110 | DEFINE_SPINLOCK(sparse_irq_lock); | 113 | DEFINE_SPINLOCK(sparse_irq_lock); |
111 | 114 | ||
112 | struct irq_desc *irq_desc_ptrs[NR_IRQS] __read_mostly; | 115 | struct irq_desc **irq_desc_ptrs __read_mostly; |
113 | 116 | ||
114 | static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_smp = { | 117 | static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_smp = { |
115 | [0 ... NR_IRQS_LEGACY-1] = { | 118 | [0 ... NR_IRQS_LEGACY-1] = { |
@@ -119,14 +122,10 @@ static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_sm | |||
119 | .handle_irq = handle_bad_irq, | 122 | .handle_irq = handle_bad_irq, |
120 | .depth = 1, | 123 | .depth = 1, |
121 | .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock), | 124 | .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock), |
122 | #ifdef CONFIG_SMP | ||
123 | .affinity = CPU_MASK_ALL | ||
124 | #endif | ||
125 | } | 125 | } |
126 | }; | 126 | }; |
127 | 127 | ||
128 | /* FIXME: use bootmem alloc ...*/ | 128 | static unsigned int *kstat_irqs_legacy; |
129 | static unsigned int kstat_irqs_legacy[NR_IRQS_LEGACY][NR_CPUS]; | ||
130 | 129 | ||
131 | int __init early_irq_init(void) | 130 | int __init early_irq_init(void) |
132 | { | 131 | { |
@@ -134,18 +133,30 @@ int __init early_irq_init(void) | |||
134 | int legacy_count; | 133 | int legacy_count; |
135 | int i; | 134 | int i; |
136 | 135 | ||
136 | /* initialize nr_irqs based on nr_cpu_ids */ | ||
137 | arch_probe_nr_irqs(); | ||
138 | printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d\n", NR_IRQS, nr_irqs); | ||
139 | |||
137 | desc = irq_desc_legacy; | 140 | desc = irq_desc_legacy; |
138 | legacy_count = ARRAY_SIZE(irq_desc_legacy); | 141 | legacy_count = ARRAY_SIZE(irq_desc_legacy); |
139 | 142 | ||
143 | /* allocate irq_desc_ptrs array based on nr_irqs */ | ||
144 | irq_desc_ptrs = alloc_bootmem(nr_irqs * sizeof(void *)); | ||
145 | |||
146 | /* allocate based on nr_cpu_ids */ | ||
147 | /* FIXME: invert kstat_irgs, and it'd be a per_cpu_alloc'd thing */ | ||
148 | kstat_irqs_legacy = alloc_bootmem(NR_IRQS_LEGACY * nr_cpu_ids * | ||
149 | sizeof(int)); | ||
150 | |||
140 | for (i = 0; i < legacy_count; i++) { | 151 | for (i = 0; i < legacy_count; i++) { |
141 | desc[i].irq = i; | 152 | desc[i].irq = i; |
142 | desc[i].kstat_irqs = kstat_irqs_legacy[i]; | 153 | desc[i].kstat_irqs = kstat_irqs_legacy + i * nr_cpu_ids; |
143 | lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); | 154 | lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); |
144 | 155 | init_alloc_desc_masks(&desc[i], 0, true); | |
145 | irq_desc_ptrs[i] = desc + i; | 156 | irq_desc_ptrs[i] = desc + i; |
146 | } | 157 | } |
147 | 158 | ||
148 | for (i = legacy_count; i < NR_IRQS; i++) | 159 | for (i = legacy_count; i < nr_irqs; i++) |
149 | irq_desc_ptrs[i] = NULL; | 160 | irq_desc_ptrs[i] = NULL; |
150 | 161 | ||
151 | return arch_early_irq_init(); | 162 | return arch_early_irq_init(); |
@@ -153,7 +164,10 @@ int __init early_irq_init(void) | |||
153 | 164 | ||
154 | struct irq_desc *irq_to_desc(unsigned int irq) | 165 | struct irq_desc *irq_to_desc(unsigned int irq) |
155 | { | 166 | { |
156 | return (irq < NR_IRQS) ? irq_desc_ptrs[irq] : NULL; | 167 | if (irq_desc_ptrs && irq < nr_irqs) |
168 | return irq_desc_ptrs[irq]; | ||
169 | |||
170 | return NULL; | ||
157 | } | 171 | } |
158 | 172 | ||
159 | struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu) | 173 | struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu) |
@@ -162,10 +176,9 @@ struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu) | |||
162 | unsigned long flags; | 176 | unsigned long flags; |
163 | int node; | 177 | int node; |
164 | 178 | ||
165 | if (irq >= NR_IRQS) { | 179 | if (irq >= nr_irqs) { |
166 | printk(KERN_WARNING "irq >= NR_IRQS in irq_to_desc_alloc: %d %d\n", | 180 | WARN(1, "irq (%d) >= nr_irqs (%d) in irq_to_desc_alloc\n", |
167 | irq, NR_IRQS); | 181 | irq, nr_irqs); |
168 | WARN_ON(1); | ||
169 | return NULL; | 182 | return NULL; |
170 | } | 183 | } |
171 | 184 | ||
@@ -207,9 +220,6 @@ struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = { | |||
207 | .handle_irq = handle_bad_irq, | 220 | .handle_irq = handle_bad_irq, |
208 | .depth = 1, | 221 | .depth = 1, |
209 | .lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock), | 222 | .lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock), |
210 | #ifdef CONFIG_SMP | ||
211 | .affinity = CPU_MASK_ALL | ||
212 | #endif | ||
213 | } | 223 | } |
214 | }; | 224 | }; |
215 | 225 | ||
@@ -219,12 +229,15 @@ int __init early_irq_init(void) | |||
219 | int count; | 229 | int count; |
220 | int i; | 230 | int i; |
221 | 231 | ||
232 | printk(KERN_INFO "NR_IRQS:%d\n", NR_IRQS); | ||
233 | |||
222 | desc = irq_desc; | 234 | desc = irq_desc; |
223 | count = ARRAY_SIZE(irq_desc); | 235 | count = ARRAY_SIZE(irq_desc); |
224 | 236 | ||
225 | for (i = 0; i < count; i++) | 237 | for (i = 0; i < count; i++) { |
226 | desc[i].irq = i; | 238 | desc[i].irq = i; |
227 | 239 | init_alloc_desc_masks(&desc[i], 0, true); | |
240 | } | ||
228 | return arch_early_irq_init(); | 241 | return arch_early_irq_init(); |
229 | } | 242 | } |
230 | 243 | ||
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h index e6d0a43cc125..40416a81a0f5 100644 --- a/kernel/irq/internals.h +++ b/kernel/irq/internals.h | |||
@@ -16,7 +16,14 @@ extern int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, | |||
16 | extern struct lock_class_key irq_desc_lock_class; | 16 | extern struct lock_class_key irq_desc_lock_class; |
17 | extern void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr); | 17 | extern void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr); |
18 | extern spinlock_t sparse_irq_lock; | 18 | extern spinlock_t sparse_irq_lock; |
19 | |||
20 | #ifdef CONFIG_SPARSE_IRQ | ||
21 | /* irq_desc_ptrs allocated at boot time */ | ||
22 | extern struct irq_desc **irq_desc_ptrs; | ||
23 | #else | ||
24 | /* irq_desc_ptrs is a fixed size array */ | ||
19 | extern struct irq_desc *irq_desc_ptrs[NR_IRQS]; | 25 | extern struct irq_desc *irq_desc_ptrs[NR_IRQS]; |
26 | #endif | ||
20 | 27 | ||
21 | #ifdef CONFIG_PROC_FS | 28 | #ifdef CONFIG_PROC_FS |
22 | extern void register_irq_proc(unsigned int irq, struct irq_desc *desc); | 29 | extern void register_irq_proc(unsigned int irq, struct irq_desc *desc); |
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index cd0cd8dcb345..b98739af4558 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
@@ -98,14 +98,14 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask) | |||
98 | 98 | ||
99 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 99 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
100 | if (desc->status & IRQ_MOVE_PCNTXT || desc->status & IRQ_DISABLED) { | 100 | if (desc->status & IRQ_MOVE_PCNTXT || desc->status & IRQ_DISABLED) { |
101 | cpumask_copy(&desc->affinity, cpumask); | 101 | cpumask_copy(desc->affinity, cpumask); |
102 | desc->chip->set_affinity(irq, cpumask); | 102 | desc->chip->set_affinity(irq, cpumask); |
103 | } else { | 103 | } else { |
104 | desc->status |= IRQ_MOVE_PENDING; | 104 | desc->status |= IRQ_MOVE_PENDING; |
105 | cpumask_copy(&desc->pending_mask, cpumask); | 105 | cpumask_copy(desc->pending_mask, cpumask); |
106 | } | 106 | } |
107 | #else | 107 | #else |
108 | cpumask_copy(&desc->affinity, cpumask); | 108 | cpumask_copy(desc->affinity, cpumask); |
109 | desc->chip->set_affinity(irq, cpumask); | 109 | desc->chip->set_affinity(irq, cpumask); |
110 | #endif | 110 | #endif |
111 | desc->status |= IRQ_AFFINITY_SET; | 111 | desc->status |= IRQ_AFFINITY_SET; |
@@ -127,16 +127,16 @@ int do_irq_select_affinity(unsigned int irq, struct irq_desc *desc) | |||
127 | * one of the targets is online. | 127 | * one of the targets is online. |
128 | */ | 128 | */ |
129 | if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) { | 129 | if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) { |
130 | if (cpumask_any_and(&desc->affinity, cpu_online_mask) | 130 | if (cpumask_any_and(desc->affinity, cpu_online_mask) |
131 | < nr_cpu_ids) | 131 | < nr_cpu_ids) |
132 | goto set_affinity; | 132 | goto set_affinity; |
133 | else | 133 | else |
134 | desc->status &= ~IRQ_AFFINITY_SET; | 134 | desc->status &= ~IRQ_AFFINITY_SET; |
135 | } | 135 | } |
136 | 136 | ||
137 | cpumask_and(&desc->affinity, cpu_online_mask, irq_default_affinity); | 137 | cpumask_and(desc->affinity, cpu_online_mask, irq_default_affinity); |
138 | set_affinity: | 138 | set_affinity: |
139 | desc->chip->set_affinity(irq, &desc->affinity); | 139 | desc->chip->set_affinity(irq, desc->affinity); |
140 | 140 | ||
141 | return 0; | 141 | return 0; |
142 | } | 142 | } |
diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c index bd72329e630c..e05ad9be43b7 100644 --- a/kernel/irq/migration.c +++ b/kernel/irq/migration.c | |||
@@ -18,7 +18,7 @@ void move_masked_irq(int irq) | |||
18 | 18 | ||
19 | desc->status &= ~IRQ_MOVE_PENDING; | 19 | desc->status &= ~IRQ_MOVE_PENDING; |
20 | 20 | ||
21 | if (unlikely(cpumask_empty(&desc->pending_mask))) | 21 | if (unlikely(cpumask_empty(desc->pending_mask))) |
22 | return; | 22 | return; |
23 | 23 | ||
24 | if (!desc->chip->set_affinity) | 24 | if (!desc->chip->set_affinity) |
@@ -38,13 +38,13 @@ void move_masked_irq(int irq) | |||
38 | * For correct operation this depends on the caller | 38 | * For correct operation this depends on the caller |
39 | * masking the irqs. | 39 | * masking the irqs. |
40 | */ | 40 | */ |
41 | if (likely(cpumask_any_and(&desc->pending_mask, cpu_online_mask) | 41 | if (likely(cpumask_any_and(desc->pending_mask, cpu_online_mask) |
42 | < nr_cpu_ids)) { | 42 | < nr_cpu_ids)) { |
43 | cpumask_and(&desc->affinity, | 43 | cpumask_and(desc->affinity, |
44 | &desc->pending_mask, cpu_online_mask); | 44 | desc->pending_mask, cpu_online_mask); |
45 | desc->chip->set_affinity(irq, &desc->affinity); | 45 | desc->chip->set_affinity(irq, desc->affinity); |
46 | } | 46 | } |
47 | cpumask_clear(&desc->pending_mask); | 47 | cpumask_clear(desc->pending_mask); |
48 | } | 48 | } |
49 | 49 | ||
50 | void move_native_irq(int irq) | 50 | void move_native_irq(int irq) |
diff --git a/kernel/irq/numa_migrate.c b/kernel/irq/numa_migrate.c index ecf765c6a77a..666260e4c065 100644 --- a/kernel/irq/numa_migrate.c +++ b/kernel/irq/numa_migrate.c | |||
@@ -38,15 +38,22 @@ static void free_kstat_irqs(struct irq_desc *old_desc, struct irq_desc *desc) | |||
38 | old_desc->kstat_irqs = NULL; | 38 | old_desc->kstat_irqs = NULL; |
39 | } | 39 | } |
40 | 40 | ||
41 | static void init_copy_one_irq_desc(int irq, struct irq_desc *old_desc, | 41 | static bool init_copy_one_irq_desc(int irq, struct irq_desc *old_desc, |
42 | struct irq_desc *desc, int cpu) | 42 | struct irq_desc *desc, int cpu) |
43 | { | 43 | { |
44 | memcpy(desc, old_desc, sizeof(struct irq_desc)); | 44 | memcpy(desc, old_desc, sizeof(struct irq_desc)); |
45 | if (!init_alloc_desc_masks(desc, cpu, false)) { | ||
46 | printk(KERN_ERR "irq %d: can not get new irq_desc cpumask " | ||
47 | "for migration.\n", irq); | ||
48 | return false; | ||
49 | } | ||
45 | spin_lock_init(&desc->lock); | 50 | spin_lock_init(&desc->lock); |
46 | desc->cpu = cpu; | 51 | desc->cpu = cpu; |
47 | lockdep_set_class(&desc->lock, &irq_desc_lock_class); | 52 | lockdep_set_class(&desc->lock, &irq_desc_lock_class); |
48 | init_copy_kstat_irqs(old_desc, desc, cpu, nr_cpu_ids); | 53 | init_copy_kstat_irqs(old_desc, desc, cpu, nr_cpu_ids); |
54 | init_copy_desc_masks(old_desc, desc); | ||
49 | arch_init_copy_chip_data(old_desc, desc, cpu); | 55 | arch_init_copy_chip_data(old_desc, desc, cpu); |
56 | return true; | ||
50 | } | 57 | } |
51 | 58 | ||
52 | static void free_one_irq_desc(struct irq_desc *old_desc, struct irq_desc *desc) | 59 | static void free_one_irq_desc(struct irq_desc *old_desc, struct irq_desc *desc) |
@@ -76,12 +83,18 @@ static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc, | |||
76 | node = cpu_to_node(cpu); | 83 | node = cpu_to_node(cpu); |
77 | desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node); | 84 | desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node); |
78 | if (!desc) { | 85 | if (!desc) { |
79 | printk(KERN_ERR "irq %d: can not get new irq_desc for migration.\n", irq); | 86 | printk(KERN_ERR "irq %d: can not get new irq_desc " |
87 | "for migration.\n", irq); | ||
88 | /* still use old one */ | ||
89 | desc = old_desc; | ||
90 | goto out_unlock; | ||
91 | } | ||
92 | if (!init_copy_one_irq_desc(irq, old_desc, desc, cpu)) { | ||
80 | /* still use old one */ | 93 | /* still use old one */ |
94 | kfree(desc); | ||
81 | desc = old_desc; | 95 | desc = old_desc; |
82 | goto out_unlock; | 96 | goto out_unlock; |
83 | } | 97 | } |
84 | init_copy_one_irq_desc(irq, old_desc, desc, cpu); | ||
85 | 98 | ||
86 | irq_desc_ptrs[irq] = desc; | 99 | irq_desc_ptrs[irq] = desc; |
87 | 100 | ||
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c index aae3f742bcec..692363dd591f 100644 --- a/kernel/irq/proc.c +++ b/kernel/irq/proc.c | |||
@@ -20,11 +20,11 @@ static struct proc_dir_entry *root_irq_dir; | |||
20 | static int irq_affinity_proc_show(struct seq_file *m, void *v) | 20 | static int irq_affinity_proc_show(struct seq_file *m, void *v) |
21 | { | 21 | { |
22 | struct irq_desc *desc = irq_to_desc((long)m->private); | 22 | struct irq_desc *desc = irq_to_desc((long)m->private); |
23 | const struct cpumask *mask = &desc->affinity; | 23 | const struct cpumask *mask = desc->affinity; |
24 | 24 | ||
25 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 25 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
26 | if (desc->status & IRQ_MOVE_PENDING) | 26 | if (desc->status & IRQ_MOVE_PENDING) |
27 | mask = &desc->pending_mask; | 27 | mask = desc->pending_mask; |
28 | #endif | 28 | #endif |
29 | seq_cpumask(m, mask); | 29 | seq_cpumask(m, mask); |
30 | seq_putc(m, '\n'); | 30 | seq_putc(m, '\n'); |
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c index 1cff28db56b6..7c4142a79f0a 100644 --- a/kernel/rcutorture.c +++ b/kernel/rcutorture.c | |||
@@ -136,29 +136,47 @@ static int stutter_pause_test = 0; | |||
136 | #endif | 136 | #endif |
137 | int rcutorture_runnable = RCUTORTURE_RUNNABLE_INIT; | 137 | int rcutorture_runnable = RCUTORTURE_RUNNABLE_INIT; |
138 | 138 | ||
139 | #define FULLSTOP_SHUTDOWN 1 /* Bail due to system shutdown/panic. */ | 139 | /* Mediate rmmod and system shutdown. Concurrent rmmod & shutdown illegal! */ |
140 | #define FULLSTOP_CLEANUP 2 /* Orderly shutdown. */ | 140 | |
141 | static int fullstop; /* stop generating callbacks at test end. */ | 141 | #define FULLSTOP_DONTSTOP 0 /* Normal operation. */ |
142 | DEFINE_MUTEX(fullstop_mutex); /* protect fullstop transitions and */ | 142 | #define FULLSTOP_SHUTDOWN 1 /* System shutdown with rcutorture running. */ |
143 | /* spawning of kthreads. */ | 143 | #define FULLSTOP_RMMOD 2 /* Normal rmmod of rcutorture. */ |
144 | static int fullstop = FULLSTOP_RMMOD; | ||
145 | DEFINE_MUTEX(fullstop_mutex); /* Protect fullstop transitions and spawning */ | ||
146 | /* of kthreads. */ | ||
144 | 147 | ||
145 | /* | 148 | /* |
146 | * Detect and respond to a signal-based shutdown. | 149 | * Detect and respond to a system shutdown. |
147 | */ | 150 | */ |
148 | static int | 151 | static int |
149 | rcutorture_shutdown_notify(struct notifier_block *unused1, | 152 | rcutorture_shutdown_notify(struct notifier_block *unused1, |
150 | unsigned long unused2, void *unused3) | 153 | unsigned long unused2, void *unused3) |
151 | { | 154 | { |
152 | if (fullstop) | ||
153 | return NOTIFY_DONE; | ||
154 | mutex_lock(&fullstop_mutex); | 155 | mutex_lock(&fullstop_mutex); |
155 | if (!fullstop) | 156 | if (fullstop == FULLSTOP_DONTSTOP) |
156 | fullstop = FULLSTOP_SHUTDOWN; | 157 | fullstop = FULLSTOP_SHUTDOWN; |
158 | else | ||
159 | printk(KERN_WARNING /* but going down anyway, so... */ | ||
160 | "Concurrent 'rmmod rcutorture' and shutdown illegal!\n"); | ||
157 | mutex_unlock(&fullstop_mutex); | 161 | mutex_unlock(&fullstop_mutex); |
158 | return NOTIFY_DONE; | 162 | return NOTIFY_DONE; |
159 | } | 163 | } |
160 | 164 | ||
161 | /* | 165 | /* |
166 | * Absorb kthreads into a kernel function that won't return, so that | ||
167 | * they won't ever access module text or data again. | ||
168 | */ | ||
169 | static void rcutorture_shutdown_absorb(char *title) | ||
170 | { | ||
171 | if (ACCESS_ONCE(fullstop) == FULLSTOP_SHUTDOWN) { | ||
172 | printk(KERN_NOTICE | ||
173 | "rcutorture thread %s parking due to system shutdown\n", | ||
174 | title); | ||
175 | schedule_timeout_uninterruptible(MAX_SCHEDULE_TIMEOUT); | ||
176 | } | ||
177 | } | ||
178 | |||
179 | /* | ||
162 | * Allocate an element from the rcu_tortures pool. | 180 | * Allocate an element from the rcu_tortures pool. |
163 | */ | 181 | */ |
164 | static struct rcu_torture * | 182 | static struct rcu_torture * |
@@ -219,13 +237,14 @@ rcu_random(struct rcu_random_state *rrsp) | |||
219 | } | 237 | } |
220 | 238 | ||
221 | static void | 239 | static void |
222 | rcu_stutter_wait(void) | 240 | rcu_stutter_wait(char *title) |
223 | { | 241 | { |
224 | while ((stutter_pause_test || !rcutorture_runnable) && !fullstop) { | 242 | while (stutter_pause_test || !rcutorture_runnable) { |
225 | if (rcutorture_runnable) | 243 | if (rcutorture_runnable) |
226 | schedule_timeout_interruptible(1); | 244 | schedule_timeout_interruptible(1); |
227 | else | 245 | else |
228 | schedule_timeout_interruptible(round_jiffies_relative(HZ)); | 246 | schedule_timeout_interruptible(round_jiffies_relative(HZ)); |
247 | rcutorture_shutdown_absorb(title); | ||
229 | } | 248 | } |
230 | } | 249 | } |
231 | 250 | ||
@@ -287,7 +306,7 @@ rcu_torture_cb(struct rcu_head *p) | |||
287 | int i; | 306 | int i; |
288 | struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu); | 307 | struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu); |
289 | 308 | ||
290 | if (fullstop) { | 309 | if (fullstop != FULLSTOP_DONTSTOP) { |
291 | /* Test is ending, just drop callbacks on the floor. */ | 310 | /* Test is ending, just drop callbacks on the floor. */ |
292 | /* The next initialization will pick up the pieces. */ | 311 | /* The next initialization will pick up the pieces. */ |
293 | return; | 312 | return; |
@@ -619,10 +638,11 @@ rcu_torture_writer(void *arg) | |||
619 | } | 638 | } |
620 | rcu_torture_current_version++; | 639 | rcu_torture_current_version++; |
621 | oldbatch = cur_ops->completed(); | 640 | oldbatch = cur_ops->completed(); |
622 | rcu_stutter_wait(); | 641 | rcu_stutter_wait("rcu_torture_writer"); |
623 | } while (!kthread_should_stop() && !fullstop); | 642 | } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP); |
624 | VERBOSE_PRINTK_STRING("rcu_torture_writer task stopping"); | 643 | VERBOSE_PRINTK_STRING("rcu_torture_writer task stopping"); |
625 | while (!kthread_should_stop() && fullstop != FULLSTOP_SHUTDOWN) | 644 | rcutorture_shutdown_absorb("rcu_torture_writer"); |
645 | while (!kthread_should_stop()) | ||
626 | schedule_timeout_uninterruptible(1); | 646 | schedule_timeout_uninterruptible(1); |
627 | return 0; | 647 | return 0; |
628 | } | 648 | } |
@@ -643,11 +663,12 @@ rcu_torture_fakewriter(void *arg) | |||
643 | schedule_timeout_uninterruptible(1 + rcu_random(&rand)%10); | 663 | schedule_timeout_uninterruptible(1 + rcu_random(&rand)%10); |
644 | udelay(rcu_random(&rand) & 0x3ff); | 664 | udelay(rcu_random(&rand) & 0x3ff); |
645 | cur_ops->sync(); | 665 | cur_ops->sync(); |
646 | rcu_stutter_wait(); | 666 | rcu_stutter_wait("rcu_torture_fakewriter"); |
647 | } while (!kthread_should_stop() && !fullstop); | 667 | } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP); |
648 | 668 | ||
649 | VERBOSE_PRINTK_STRING("rcu_torture_fakewriter task stopping"); | 669 | VERBOSE_PRINTK_STRING("rcu_torture_fakewriter task stopping"); |
650 | while (!kthread_should_stop() && fullstop != FULLSTOP_SHUTDOWN) | 670 | rcutorture_shutdown_absorb("rcu_torture_fakewriter"); |
671 | while (!kthread_should_stop()) | ||
651 | schedule_timeout_uninterruptible(1); | 672 | schedule_timeout_uninterruptible(1); |
652 | return 0; | 673 | return 0; |
653 | } | 674 | } |
@@ -752,12 +773,13 @@ rcu_torture_reader(void *arg) | |||
752 | preempt_enable(); | 773 | preempt_enable(); |
753 | cur_ops->readunlock(idx); | 774 | cur_ops->readunlock(idx); |
754 | schedule(); | 775 | schedule(); |
755 | rcu_stutter_wait(); | 776 | rcu_stutter_wait("rcu_torture_reader"); |
756 | } while (!kthread_should_stop() && !fullstop); | 777 | } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP); |
757 | VERBOSE_PRINTK_STRING("rcu_torture_reader task stopping"); | 778 | VERBOSE_PRINTK_STRING("rcu_torture_reader task stopping"); |
779 | rcutorture_shutdown_absorb("rcu_torture_reader"); | ||
758 | if (irqreader && cur_ops->irqcapable) | 780 | if (irqreader && cur_ops->irqcapable) |
759 | del_timer_sync(&t); | 781 | del_timer_sync(&t); |
760 | while (!kthread_should_stop() && fullstop != FULLSTOP_SHUTDOWN) | 782 | while (!kthread_should_stop()) |
761 | schedule_timeout_uninterruptible(1); | 783 | schedule_timeout_uninterruptible(1); |
762 | return 0; | 784 | return 0; |
763 | } | 785 | } |
@@ -854,7 +876,8 @@ rcu_torture_stats(void *arg) | |||
854 | do { | 876 | do { |
855 | schedule_timeout_interruptible(stat_interval * HZ); | 877 | schedule_timeout_interruptible(stat_interval * HZ); |
856 | rcu_torture_stats_print(); | 878 | rcu_torture_stats_print(); |
857 | } while (!kthread_should_stop() && !fullstop); | 879 | rcutorture_shutdown_absorb("rcu_torture_stats"); |
880 | } while (!kthread_should_stop()); | ||
858 | VERBOSE_PRINTK_STRING("rcu_torture_stats task stopping"); | 881 | VERBOSE_PRINTK_STRING("rcu_torture_stats task stopping"); |
859 | return 0; | 882 | return 0; |
860 | } | 883 | } |
@@ -866,52 +889,49 @@ static int rcu_idle_cpu; /* Force all torture tasks off this CPU */ | |||
866 | */ | 889 | */ |
867 | static void rcu_torture_shuffle_tasks(void) | 890 | static void rcu_torture_shuffle_tasks(void) |
868 | { | 891 | { |
869 | cpumask_var_t tmp_mask; | 892 | cpumask_t tmp_mask; |
870 | int i; | 893 | int i; |
871 | 894 | ||
872 | if (!alloc_cpumask_var(&tmp_mask, GFP_KERNEL)) | 895 | cpus_setall(tmp_mask); |
873 | BUG(); | ||
874 | |||
875 | cpumask_setall(tmp_mask); | ||
876 | get_online_cpus(); | 896 | get_online_cpus(); |
877 | 897 | ||
878 | /* No point in shuffling if there is only one online CPU (ex: UP) */ | 898 | /* No point in shuffling if there is only one online CPU (ex: UP) */ |
879 | if (num_online_cpus() == 1) | 899 | if (num_online_cpus() == 1) { |
880 | goto out; | 900 | put_online_cpus(); |
901 | return; | ||
902 | } | ||
881 | 903 | ||
882 | if (rcu_idle_cpu != -1) | 904 | if (rcu_idle_cpu != -1) |
883 | cpumask_clear_cpu(rcu_idle_cpu, tmp_mask); | 905 | cpu_clear(rcu_idle_cpu, tmp_mask); |
884 | 906 | ||
885 | set_cpus_allowed_ptr(current, tmp_mask); | 907 | set_cpus_allowed_ptr(current, &tmp_mask); |
886 | 908 | ||
887 | if (reader_tasks) { | 909 | if (reader_tasks) { |
888 | for (i = 0; i < nrealreaders; i++) | 910 | for (i = 0; i < nrealreaders; i++) |
889 | if (reader_tasks[i]) | 911 | if (reader_tasks[i]) |
890 | set_cpus_allowed_ptr(reader_tasks[i], | 912 | set_cpus_allowed_ptr(reader_tasks[i], |
891 | tmp_mask); | 913 | &tmp_mask); |
892 | } | 914 | } |
893 | 915 | ||
894 | if (fakewriter_tasks) { | 916 | if (fakewriter_tasks) { |
895 | for (i = 0; i < nfakewriters; i++) | 917 | for (i = 0; i < nfakewriters; i++) |
896 | if (fakewriter_tasks[i]) | 918 | if (fakewriter_tasks[i]) |
897 | set_cpus_allowed_ptr(fakewriter_tasks[i], | 919 | set_cpus_allowed_ptr(fakewriter_tasks[i], |
898 | tmp_mask); | 920 | &tmp_mask); |
899 | } | 921 | } |
900 | 922 | ||
901 | if (writer_task) | 923 | if (writer_task) |
902 | set_cpus_allowed_ptr(writer_task, tmp_mask); | 924 | set_cpus_allowed_ptr(writer_task, &tmp_mask); |
903 | 925 | ||
904 | if (stats_task) | 926 | if (stats_task) |
905 | set_cpus_allowed_ptr(stats_task, tmp_mask); | 927 | set_cpus_allowed_ptr(stats_task, &tmp_mask); |
906 | 928 | ||
907 | if (rcu_idle_cpu == -1) | 929 | if (rcu_idle_cpu == -1) |
908 | rcu_idle_cpu = num_online_cpus() - 1; | 930 | rcu_idle_cpu = num_online_cpus() - 1; |
909 | else | 931 | else |
910 | rcu_idle_cpu--; | 932 | rcu_idle_cpu--; |
911 | 933 | ||
912 | out: | ||
913 | put_online_cpus(); | 934 | put_online_cpus(); |
914 | free_cpumask_var(tmp_mask); | ||
915 | } | 935 | } |
916 | 936 | ||
917 | /* Shuffle tasks across CPUs, with the intent of allowing each CPU in the | 937 | /* Shuffle tasks across CPUs, with the intent of allowing each CPU in the |
@@ -925,7 +945,8 @@ rcu_torture_shuffle(void *arg) | |||
925 | do { | 945 | do { |
926 | schedule_timeout_interruptible(shuffle_interval * HZ); | 946 | schedule_timeout_interruptible(shuffle_interval * HZ); |
927 | rcu_torture_shuffle_tasks(); | 947 | rcu_torture_shuffle_tasks(); |
928 | } while (!kthread_should_stop() && !fullstop); | 948 | rcutorture_shutdown_absorb("rcu_torture_shuffle"); |
949 | } while (!kthread_should_stop()); | ||
929 | VERBOSE_PRINTK_STRING("rcu_torture_shuffle task stopping"); | 950 | VERBOSE_PRINTK_STRING("rcu_torture_shuffle task stopping"); |
930 | return 0; | 951 | return 0; |
931 | } | 952 | } |
@@ -940,10 +961,11 @@ rcu_torture_stutter(void *arg) | |||
940 | do { | 961 | do { |
941 | schedule_timeout_interruptible(stutter * HZ); | 962 | schedule_timeout_interruptible(stutter * HZ); |
942 | stutter_pause_test = 1; | 963 | stutter_pause_test = 1; |
943 | if (!kthread_should_stop() && !fullstop) | 964 | if (!kthread_should_stop()) |
944 | schedule_timeout_interruptible(stutter * HZ); | 965 | schedule_timeout_interruptible(stutter * HZ); |
945 | stutter_pause_test = 0; | 966 | stutter_pause_test = 0; |
946 | } while (!kthread_should_stop() && !fullstop); | 967 | rcutorture_shutdown_absorb("rcu_torture_stutter"); |
968 | } while (!kthread_should_stop()); | ||
947 | VERBOSE_PRINTK_STRING("rcu_torture_stutter task stopping"); | 969 | VERBOSE_PRINTK_STRING("rcu_torture_stutter task stopping"); |
948 | return 0; | 970 | return 0; |
949 | } | 971 | } |
@@ -970,15 +992,16 @@ rcu_torture_cleanup(void) | |||
970 | int i; | 992 | int i; |
971 | 993 | ||
972 | mutex_lock(&fullstop_mutex); | 994 | mutex_lock(&fullstop_mutex); |
973 | if (!fullstop) { | 995 | if (fullstop == FULLSTOP_SHUTDOWN) { |
974 | /* If being signaled, let it happen, then exit. */ | 996 | printk(KERN_WARNING /* but going down anyway, so... */ |
997 | "Concurrent 'rmmod rcutorture' and shutdown illegal!\n"); | ||
975 | mutex_unlock(&fullstop_mutex); | 998 | mutex_unlock(&fullstop_mutex); |
976 | schedule_timeout_interruptible(10 * HZ); | 999 | schedule_timeout_uninterruptible(10); |
977 | if (cur_ops->cb_barrier != NULL) | 1000 | if (cur_ops->cb_barrier != NULL) |
978 | cur_ops->cb_barrier(); | 1001 | cur_ops->cb_barrier(); |
979 | return; | 1002 | return; |
980 | } | 1003 | } |
981 | fullstop = FULLSTOP_CLEANUP; | 1004 | fullstop = FULLSTOP_RMMOD; |
982 | mutex_unlock(&fullstop_mutex); | 1005 | mutex_unlock(&fullstop_mutex); |
983 | unregister_reboot_notifier(&rcutorture_nb); | 1006 | unregister_reboot_notifier(&rcutorture_nb); |
984 | if (stutter_task) { | 1007 | if (stutter_task) { |
@@ -1078,7 +1101,7 @@ rcu_torture_init(void) | |||
1078 | else | 1101 | else |
1079 | nrealreaders = 2 * num_online_cpus(); | 1102 | nrealreaders = 2 * num_online_cpus(); |
1080 | rcu_torture_print_module_parms("Start of test"); | 1103 | rcu_torture_print_module_parms("Start of test"); |
1081 | fullstop = 0; | 1104 | fullstop = FULLSTOP_DONTSTOP; |
1082 | 1105 | ||
1083 | /* Set up the freelist. */ | 1106 | /* Set up the freelist. */ |
1084 | 1107 | ||
diff --git a/kernel/sched.c b/kernel/sched.c index 43fd21233b93..ce9fecab5f02 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -125,6 +125,9 @@ DEFINE_TRACE(sched_switch); | |||
125 | DEFINE_TRACE(sched_migrate_task); | 125 | DEFINE_TRACE(sched_migrate_task); |
126 | 126 | ||
127 | #ifdef CONFIG_SMP | 127 | #ifdef CONFIG_SMP |
128 | |||
129 | static void double_rq_lock(struct rq *rq1, struct rq *rq2); | ||
130 | |||
128 | /* | 131 | /* |
129 | * Divide a load by a sched group cpu_power : (load / sg->__cpu_power) | 132 | * Divide a load by a sched group cpu_power : (load / sg->__cpu_power) |
130 | * Since cpu_power is a 'constant', we can use a reciprocal divide. | 133 | * Since cpu_power is a 'constant', we can use a reciprocal divide. |
@@ -7352,10 +7355,10 @@ cpu_to_phys_group(int cpu, const struct cpumask *cpu_map, | |||
7352 | * groups, so roll our own. Now each node has its own list of groups which | 7355 | * groups, so roll our own. Now each node has its own list of groups which |
7353 | * gets dynamically allocated. | 7356 | * gets dynamically allocated. |
7354 | */ | 7357 | */ |
7355 | static DEFINE_PER_CPU(struct sched_domain, node_domains); | 7358 | static DEFINE_PER_CPU(struct static_sched_domain, node_domains); |
7356 | static struct sched_group ***sched_group_nodes_bycpu; | 7359 | static struct sched_group ***sched_group_nodes_bycpu; |
7357 | 7360 | ||
7358 | static DEFINE_PER_CPU(struct sched_domain, allnodes_domains); | 7361 | static DEFINE_PER_CPU(struct static_sched_domain, allnodes_domains); |
7359 | static DEFINE_PER_CPU(struct static_sched_group, sched_group_allnodes); | 7362 | static DEFINE_PER_CPU(struct static_sched_group, sched_group_allnodes); |
7360 | 7363 | ||
7361 | static int cpu_to_allnodes_group(int cpu, const struct cpumask *cpu_map, | 7364 | static int cpu_to_allnodes_group(int cpu, const struct cpumask *cpu_map, |
@@ -7630,7 +7633,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map, | |||
7630 | #ifdef CONFIG_NUMA | 7633 | #ifdef CONFIG_NUMA |
7631 | if (cpumask_weight(cpu_map) > | 7634 | if (cpumask_weight(cpu_map) > |
7632 | SD_NODES_PER_DOMAIN*cpumask_weight(nodemask)) { | 7635 | SD_NODES_PER_DOMAIN*cpumask_weight(nodemask)) { |
7633 | sd = &per_cpu(allnodes_domains, i); | 7636 | sd = &per_cpu(allnodes_domains, i).sd; |
7634 | SD_INIT(sd, ALLNODES); | 7637 | SD_INIT(sd, ALLNODES); |
7635 | set_domain_attribute(sd, attr); | 7638 | set_domain_attribute(sd, attr); |
7636 | cpumask_copy(sched_domain_span(sd), cpu_map); | 7639 | cpumask_copy(sched_domain_span(sd), cpu_map); |
@@ -7640,7 +7643,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map, | |||
7640 | } else | 7643 | } else |
7641 | p = NULL; | 7644 | p = NULL; |
7642 | 7645 | ||
7643 | sd = &per_cpu(node_domains, i); | 7646 | sd = &per_cpu(node_domains, i).sd; |
7644 | SD_INIT(sd, NODE); | 7647 | SD_INIT(sd, NODE); |
7645 | set_domain_attribute(sd, attr); | 7648 | set_domain_attribute(sd, attr); |
7646 | sched_domain_node_span(cpu_to_node(i), sched_domain_span(sd)); | 7649 | sched_domain_node_span(cpu_to_node(i), sched_domain_span(sd)); |
@@ -7758,7 +7761,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map, | |||
7758 | for_each_cpu(j, nodemask) { | 7761 | for_each_cpu(j, nodemask) { |
7759 | struct sched_domain *sd; | 7762 | struct sched_domain *sd; |
7760 | 7763 | ||
7761 | sd = &per_cpu(node_domains, j); | 7764 | sd = &per_cpu(node_domains, j).sd; |
7762 | sd->groups = sg; | 7765 | sd->groups = sg; |
7763 | } | 7766 | } |
7764 | sg->__cpu_power = 0; | 7767 | sg->__cpu_power = 0; |
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c index 4293cfa9681d..16eeba4e4169 100644 --- a/kernel/sched_debug.c +++ b/kernel/sched_debug.c | |||
@@ -145,6 +145,19 @@ static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu) | |||
145 | read_unlock_irqrestore(&tasklist_lock, flags); | 145 | read_unlock_irqrestore(&tasklist_lock, flags); |
146 | } | 146 | } |
147 | 147 | ||
148 | #if defined(CONFIG_CGROUP_SCHED) && \ | ||
149 | (defined(CONFIG_FAIR_GROUP_SCHED) || defined(CONFIG_RT_GROUP_SCHED)) | ||
150 | static void task_group_path(struct task_group *tg, char *buf, int buflen) | ||
151 | { | ||
152 | /* may be NULL if the underlying cgroup isn't fully-created yet */ | ||
153 | if (!tg->css.cgroup) { | ||
154 | buf[0] = '\0'; | ||
155 | return; | ||
156 | } | ||
157 | cgroup_path(tg->css.cgroup, buf, buflen); | ||
158 | } | ||
159 | #endif | ||
160 | |||
148 | void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) | 161 | void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) |
149 | { | 162 | { |
150 | s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1, | 163 | s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1, |
@@ -154,10 +167,10 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) | |||
154 | unsigned long flags; | 167 | unsigned long flags; |
155 | 168 | ||
156 | #if defined(CONFIG_CGROUP_SCHED) && defined(CONFIG_FAIR_GROUP_SCHED) | 169 | #if defined(CONFIG_CGROUP_SCHED) && defined(CONFIG_FAIR_GROUP_SCHED) |
157 | char path[128] = ""; | 170 | char path[128]; |
158 | struct task_group *tg = cfs_rq->tg; | 171 | struct task_group *tg = cfs_rq->tg; |
159 | 172 | ||
160 | cgroup_path(tg->css.cgroup, path, sizeof(path)); | 173 | task_group_path(tg, path, sizeof(path)); |
161 | 174 | ||
162 | SEQ_printf(m, "\ncfs_rq[%d]:%s\n", cpu, path); | 175 | SEQ_printf(m, "\ncfs_rq[%d]:%s\n", cpu, path); |
163 | #elif defined(CONFIG_USER_SCHED) && defined(CONFIG_FAIR_GROUP_SCHED) | 176 | #elif defined(CONFIG_USER_SCHED) && defined(CONFIG_FAIR_GROUP_SCHED) |
@@ -208,10 +221,10 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) | |||
208 | void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq) | 221 | void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq) |
209 | { | 222 | { |
210 | #if defined(CONFIG_CGROUP_SCHED) && defined(CONFIG_RT_GROUP_SCHED) | 223 | #if defined(CONFIG_CGROUP_SCHED) && defined(CONFIG_RT_GROUP_SCHED) |
211 | char path[128] = ""; | 224 | char path[128]; |
212 | struct task_group *tg = rt_rq->tg; | 225 | struct task_group *tg = rt_rq->tg; |
213 | 226 | ||
214 | cgroup_path(tg->css.cgroup, path, sizeof(path)); | 227 | task_group_path(tg, path, sizeof(path)); |
215 | 228 | ||
216 | SEQ_printf(m, "\nrt_rq[%d]:%s\n", cpu, path); | 229 | SEQ_printf(m, "\nrt_rq[%d]:%s\n", cpu, path); |
217 | #else | 230 | #else |
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index 954e1a81b796..da932f4c8524 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c | |||
@@ -960,16 +960,17 @@ static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu) | |||
960 | 960 | ||
961 | static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask); | 961 | static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask); |
962 | 962 | ||
963 | static inline int pick_optimal_cpu(int this_cpu, cpumask_t *mask) | 963 | static inline int pick_optimal_cpu(int this_cpu, |
964 | const struct cpumask *mask) | ||
964 | { | 965 | { |
965 | int first; | 966 | int first; |
966 | 967 | ||
967 | /* "this_cpu" is cheaper to preempt than a remote processor */ | 968 | /* "this_cpu" is cheaper to preempt than a remote processor */ |
968 | if ((this_cpu != -1) && cpu_isset(this_cpu, *mask)) | 969 | if ((this_cpu != -1) && cpumask_test_cpu(this_cpu, mask)) |
969 | return this_cpu; | 970 | return this_cpu; |
970 | 971 | ||
971 | first = first_cpu(*mask); | 972 | first = cpumask_first(mask); |
972 | if (first != NR_CPUS) | 973 | if (first < nr_cpu_ids) |
973 | return first; | 974 | return first; |
974 | 975 | ||
975 | return -1; | 976 | return -1; |
@@ -981,6 +982,7 @@ static int find_lowest_rq(struct task_struct *task) | |||
981 | struct cpumask *lowest_mask = __get_cpu_var(local_cpu_mask); | 982 | struct cpumask *lowest_mask = __get_cpu_var(local_cpu_mask); |
982 | int this_cpu = smp_processor_id(); | 983 | int this_cpu = smp_processor_id(); |
983 | int cpu = task_cpu(task); | 984 | int cpu = task_cpu(task); |
985 | cpumask_var_t domain_mask; | ||
984 | 986 | ||
985 | if (task->rt.nr_cpus_allowed == 1) | 987 | if (task->rt.nr_cpus_allowed == 1) |
986 | return -1; /* No other targets possible */ | 988 | return -1; /* No other targets possible */ |
@@ -1013,19 +1015,25 @@ static int find_lowest_rq(struct task_struct *task) | |||
1013 | if (this_cpu == cpu) | 1015 | if (this_cpu == cpu) |
1014 | this_cpu = -1; /* Skip this_cpu opt if the same */ | 1016 | this_cpu = -1; /* Skip this_cpu opt if the same */ |
1015 | 1017 | ||
1016 | for_each_domain(cpu, sd) { | 1018 | if (alloc_cpumask_var(&domain_mask, GFP_ATOMIC)) { |
1017 | if (sd->flags & SD_WAKE_AFFINE) { | 1019 | for_each_domain(cpu, sd) { |
1018 | cpumask_t domain_mask; | 1020 | if (sd->flags & SD_WAKE_AFFINE) { |
1019 | int best_cpu; | 1021 | int best_cpu; |
1020 | 1022 | ||
1021 | cpumask_and(&domain_mask, sched_domain_span(sd), | 1023 | cpumask_and(domain_mask, |
1022 | lowest_mask); | 1024 | sched_domain_span(sd), |
1025 | lowest_mask); | ||
1023 | 1026 | ||
1024 | best_cpu = pick_optimal_cpu(this_cpu, | 1027 | best_cpu = pick_optimal_cpu(this_cpu, |
1025 | &domain_mask); | 1028 | domain_mask); |
1026 | if (best_cpu != -1) | 1029 | |
1027 | return best_cpu; | 1030 | if (best_cpu != -1) { |
1031 | free_cpumask_var(domain_mask); | ||
1032 | return best_cpu; | ||
1033 | } | ||
1034 | } | ||
1028 | } | 1035 | } |
1036 | free_cpumask_var(domain_mask); | ||
1029 | } | 1037 | } |
1030 | 1038 | ||
1031 | /* | 1039 | /* |
diff --git a/kernel/softirq.c b/kernel/softirq.c index bdbe9de9cd8d..0365b4899a3d 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c | |||
@@ -795,6 +795,11 @@ int __init __weak early_irq_init(void) | |||
795 | return 0; | 795 | return 0; |
796 | } | 796 | } |
797 | 797 | ||
798 | int __init __weak arch_probe_nr_irqs(void) | ||
799 | { | ||
800 | return 0; | ||
801 | } | ||
802 | |||
798 | int __init __weak arch_early_irq_init(void) | 803 | int __init __weak arch_early_irq_init(void) |
799 | { | 804 | { |
800 | return 0; | 805 | return 0; |
diff --git a/kernel/up.c b/kernel/up.c new file mode 100644 index 000000000000..1ff27a28bb7d --- /dev/null +++ b/kernel/up.c | |||
@@ -0,0 +1,21 @@ | |||
1 | /* | ||
2 | * Uniprocessor-only support functions. The counterpart to kernel/smp.c | ||
3 | */ | ||
4 | |||
5 | #include <linux/interrupt.h> | ||
6 | #include <linux/kernel.h> | ||
7 | #include <linux/module.h> | ||
8 | #include <linux/smp.h> | ||
9 | |||
10 | int smp_call_function_single(int cpu, void (*func) (void *info), void *info, | ||
11 | int wait) | ||
12 | { | ||
13 | WARN_ON(cpu != 0); | ||
14 | |||
15 | local_irq_disable(); | ||
16 | (func)(info); | ||
17 | local_irq_enable(); | ||
18 | |||
19 | return 0; | ||
20 | } | ||
21 | EXPORT_SYMBOL(smp_call_function_single); | ||