diff options
| author | Ingo Molnar <mingo@elte.hu> | 2009-03-08 11:48:51 -0400 |
|---|---|---|
| committer | Ingo Molnar <mingo@elte.hu> | 2009-03-08 11:48:51 -0400 |
| commit | dba58e39ced7af63f2748d12bbb2b4ac83c72391 (patch) | |
| tree | ee15a5e7667b51d0d0f7e8cb39064652f7c84c28 /kernel | |
| parent | 9de36825b321fe9fe9cf73260554251af579f4ca (diff) | |
| parent | 78ff7fae04554b49d29226ed12536268c2500d1f (diff) | |
Merge branches 'tracing/doc', 'tracing/ftrace', 'tracing/printk' and 'tracing/textedit' into tracing/core
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/exit.c | 5 | ||||
| -rw-r--r-- | kernel/fork.c | 5 | ||||
| -rw-r--r-- | kernel/irq/chip.c | 5 | ||||
| -rw-r--r-- | kernel/irq/handle.c | 57 | ||||
| -rw-r--r-- | kernel/irq/internals.h | 7 | ||||
| -rw-r--r-- | kernel/irq/manage.c | 12 | ||||
| -rw-r--r-- | kernel/irq/migration.c | 12 | ||||
| -rw-r--r-- | kernel/irq/numa_migrate.c | 19 | ||||
| -rw-r--r-- | kernel/irq/proc.c | 4 | ||||
| -rw-r--r-- | kernel/kexec.c | 2 | ||||
| -rw-r--r-- | kernel/kprobes.c | 15 | ||||
| -rw-r--r-- | kernel/module.c | 64 | ||||
| -rw-r--r-- | kernel/panic.c | 8 | ||||
| -rw-r--r-- | kernel/sched.c | 13 | ||||
| -rw-r--r-- | kernel/sched_rt.c | 32 | ||||
| -rw-r--r-- | kernel/softirq.c | 5 | ||||
| -rw-r--r-- | kernel/stop_machine.c | 2 | ||||
| -rw-r--r-- | kernel/trace/blktrace.c | 2 |
18 files changed, 183 insertions, 86 deletions
diff --git a/kernel/exit.c b/kernel/exit.c index efd30ccf3858..167e1e3ad7c6 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
| @@ -980,12 +980,9 @@ static void check_stack_usage(void) | |||
| 980 | { | 980 | { |
| 981 | static DEFINE_SPINLOCK(low_water_lock); | 981 | static DEFINE_SPINLOCK(low_water_lock); |
| 982 | static int lowest_to_date = THREAD_SIZE; | 982 | static int lowest_to_date = THREAD_SIZE; |
| 983 | unsigned long *n = end_of_stack(current); | ||
| 984 | unsigned long free; | 983 | unsigned long free; |
| 985 | 984 | ||
| 986 | while (*n == 0) | 985 | free = stack_not_used(current); |
| 987 | n++; | ||
| 988 | free = (unsigned long)n - (unsigned long)end_of_stack(current); | ||
| 989 | 986 | ||
| 990 | if (free >= lowest_to_date) | 987 | if (free >= lowest_to_date) |
| 991 | return; | 988 | return; |
diff --git a/kernel/fork.c b/kernel/fork.c index a66fbde20715..8de303bdd4e5 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
| @@ -61,6 +61,7 @@ | |||
| 61 | #include <linux/proc_fs.h> | 61 | #include <linux/proc_fs.h> |
| 62 | #include <linux/blkdev.h> | 62 | #include <linux/blkdev.h> |
| 63 | #include <trace/sched.h> | 63 | #include <trace/sched.h> |
| 64 | #include <linux/magic.h> | ||
| 64 | 65 | ||
| 65 | #include <asm/pgtable.h> | 66 | #include <asm/pgtable.h> |
| 66 | #include <asm/pgalloc.h> | 67 | #include <asm/pgalloc.h> |
| @@ -212,6 +213,8 @@ static struct task_struct *dup_task_struct(struct task_struct *orig) | |||
| 212 | { | 213 | { |
| 213 | struct task_struct *tsk; | 214 | struct task_struct *tsk; |
| 214 | struct thread_info *ti; | 215 | struct thread_info *ti; |
| 216 | unsigned long *stackend; | ||
| 217 | |||
| 215 | int err; | 218 | int err; |
| 216 | 219 | ||
| 217 | prepare_to_copy(orig); | 220 | prepare_to_copy(orig); |
| @@ -237,6 +240,8 @@ static struct task_struct *dup_task_struct(struct task_struct *orig) | |||
| 237 | goto out; | 240 | goto out; |
| 238 | 241 | ||
| 239 | setup_thread_stack(tsk, orig); | 242 | setup_thread_stack(tsk, orig); |
| 243 | stackend = end_of_stack(tsk); | ||
| 244 | *stackend = STACK_END_MAGIC; /* for overflow detection */ | ||
| 240 | 245 | ||
| 241 | #ifdef CONFIG_CC_STACKPROTECTOR | 246 | #ifdef CONFIG_CC_STACKPROTECTOR |
| 242 | tsk->stack_canary = get_random_int(); | 247 | tsk->stack_canary = get_random_int(); |
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index 7de11bd64dfe..122fef4b0bd3 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c | |||
| @@ -46,7 +46,10 @@ void dynamic_irq_init(unsigned int irq) | |||
| 46 | desc->irq_count = 0; | 46 | desc->irq_count = 0; |
| 47 | desc->irqs_unhandled = 0; | 47 | desc->irqs_unhandled = 0; |
| 48 | #ifdef CONFIG_SMP | 48 | #ifdef CONFIG_SMP |
| 49 | cpumask_setall(&desc->affinity); | 49 | cpumask_setall(desc->affinity); |
| 50 | #ifdef CONFIG_GENERIC_PENDING_IRQ | ||
| 51 | cpumask_clear(desc->pending_mask); | ||
| 52 | #endif | ||
| 50 | #endif | 53 | #endif |
| 51 | spin_unlock_irqrestore(&desc->lock, flags); | 54 | spin_unlock_irqrestore(&desc->lock, flags); |
| 52 | } | 55 | } |
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c index 4709a7c870d7..412370ab9a34 100644 --- a/kernel/irq/handle.c +++ b/kernel/irq/handle.c | |||
| @@ -18,6 +18,7 @@ | |||
| 18 | #include <linux/rculist.h> | 18 | #include <linux/rculist.h> |
| 19 | #include <linux/hash.h> | 19 | #include <linux/hash.h> |
| 20 | #include <trace/irq.h> | 20 | #include <trace/irq.h> |
| 21 | #include <linux/bootmem.h> | ||
| 21 | 22 | ||
| 22 | #include "internals.h" | 23 | #include "internals.h" |
| 23 | 24 | ||
| @@ -70,6 +71,7 @@ int nr_irqs = NR_IRQS; | |||
| 70 | EXPORT_SYMBOL_GPL(nr_irqs); | 71 | EXPORT_SYMBOL_GPL(nr_irqs); |
| 71 | 72 | ||
| 72 | #ifdef CONFIG_SPARSE_IRQ | 73 | #ifdef CONFIG_SPARSE_IRQ |
| 74 | |||
| 73 | static struct irq_desc irq_desc_init = { | 75 | static struct irq_desc irq_desc_init = { |
| 74 | .irq = -1, | 76 | .irq = -1, |
| 75 | .status = IRQ_DISABLED, | 77 | .status = IRQ_DISABLED, |
| @@ -77,9 +79,6 @@ static struct irq_desc irq_desc_init = { | |||
| 77 | .handle_irq = handle_bad_irq, | 79 | .handle_irq = handle_bad_irq, |
| 78 | .depth = 1, | 80 | .depth = 1, |
| 79 | .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock), | 81 | .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock), |
| 80 | #ifdef CONFIG_SMP | ||
| 81 | .affinity = CPU_MASK_ALL | ||
| 82 | #endif | ||
| 83 | }; | 82 | }; |
| 84 | 83 | ||
| 85 | void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr) | 84 | void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr) |
| @@ -114,6 +113,10 @@ static void init_one_irq_desc(int irq, struct irq_desc *desc, int cpu) | |||
| 114 | printk(KERN_ERR "can not alloc kstat_irqs\n"); | 113 | printk(KERN_ERR "can not alloc kstat_irqs\n"); |
| 115 | BUG_ON(1); | 114 | BUG_ON(1); |
| 116 | } | 115 | } |
| 116 | if (!init_alloc_desc_masks(desc, cpu, false)) { | ||
| 117 | printk(KERN_ERR "can not alloc irq_desc cpumasks\n"); | ||
| 118 | BUG_ON(1); | ||
| 119 | } | ||
| 117 | arch_init_chip_data(desc, cpu); | 120 | arch_init_chip_data(desc, cpu); |
| 118 | } | 121 | } |
| 119 | 122 | ||
| @@ -122,7 +125,7 @@ static void init_one_irq_desc(int irq, struct irq_desc *desc, int cpu) | |||
| 122 | */ | 125 | */ |
| 123 | DEFINE_SPINLOCK(sparse_irq_lock); | 126 | DEFINE_SPINLOCK(sparse_irq_lock); |
| 124 | 127 | ||
| 125 | struct irq_desc *irq_desc_ptrs[NR_IRQS] __read_mostly; | 128 | struct irq_desc **irq_desc_ptrs __read_mostly; |
| 126 | 129 | ||
| 127 | static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_smp = { | 130 | static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_smp = { |
| 128 | [0 ... NR_IRQS_LEGACY-1] = { | 131 | [0 ... NR_IRQS_LEGACY-1] = { |
| @@ -132,14 +135,10 @@ static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_sm | |||
| 132 | .handle_irq = handle_bad_irq, | 135 | .handle_irq = handle_bad_irq, |
| 133 | .depth = 1, | 136 | .depth = 1, |
| 134 | .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock), | 137 | .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock), |
| 135 | #ifdef CONFIG_SMP | ||
| 136 | .affinity = CPU_MASK_ALL | ||
| 137 | #endif | ||
| 138 | } | 138 | } |
| 139 | }; | 139 | }; |
| 140 | 140 | ||
| 141 | /* FIXME: use bootmem alloc ...*/ | 141 | static unsigned int *kstat_irqs_legacy; |
| 142 | static unsigned int kstat_irqs_legacy[NR_IRQS_LEGACY][NR_CPUS]; | ||
| 143 | 142 | ||
| 144 | int __init early_irq_init(void) | 143 | int __init early_irq_init(void) |
| 145 | { | 144 | { |
| @@ -149,18 +148,30 @@ int __init early_irq_init(void) | |||
| 149 | 148 | ||
| 150 | init_irq_default_affinity(); | 149 | init_irq_default_affinity(); |
| 151 | 150 | ||
| 151 | /* initialize nr_irqs based on nr_cpu_ids */ | ||
| 152 | arch_probe_nr_irqs(); | ||
| 153 | printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d\n", NR_IRQS, nr_irqs); | ||
| 154 | |||
| 152 | desc = irq_desc_legacy; | 155 | desc = irq_desc_legacy; |
| 153 | legacy_count = ARRAY_SIZE(irq_desc_legacy); | 156 | legacy_count = ARRAY_SIZE(irq_desc_legacy); |
| 154 | 157 | ||
| 158 | /* allocate irq_desc_ptrs array based on nr_irqs */ | ||
| 159 | irq_desc_ptrs = alloc_bootmem(nr_irqs * sizeof(void *)); | ||
| 160 | |||
| 161 | /* allocate based on nr_cpu_ids */ | ||
| 162 | /* FIXME: invert kstat_irgs, and it'd be a per_cpu_alloc'd thing */ | ||
| 163 | kstat_irqs_legacy = alloc_bootmem(NR_IRQS_LEGACY * nr_cpu_ids * | ||
| 164 | sizeof(int)); | ||
| 165 | |||
| 155 | for (i = 0; i < legacy_count; i++) { | 166 | for (i = 0; i < legacy_count; i++) { |
| 156 | desc[i].irq = i; | 167 | desc[i].irq = i; |
| 157 | desc[i].kstat_irqs = kstat_irqs_legacy[i]; | 168 | desc[i].kstat_irqs = kstat_irqs_legacy + i * nr_cpu_ids; |
| 158 | lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); | 169 | lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); |
| 159 | 170 | init_alloc_desc_masks(&desc[i], 0, true); | |
| 160 | irq_desc_ptrs[i] = desc + i; | 171 | irq_desc_ptrs[i] = desc + i; |
| 161 | } | 172 | } |
| 162 | 173 | ||
| 163 | for (i = legacy_count; i < NR_IRQS; i++) | 174 | for (i = legacy_count; i < nr_irqs; i++) |
| 164 | irq_desc_ptrs[i] = NULL; | 175 | irq_desc_ptrs[i] = NULL; |
| 165 | 176 | ||
| 166 | return arch_early_irq_init(); | 177 | return arch_early_irq_init(); |
| @@ -168,7 +179,10 @@ int __init early_irq_init(void) | |||
| 168 | 179 | ||
| 169 | struct irq_desc *irq_to_desc(unsigned int irq) | 180 | struct irq_desc *irq_to_desc(unsigned int irq) |
| 170 | { | 181 | { |
| 171 | return (irq < NR_IRQS) ? irq_desc_ptrs[irq] : NULL; | 182 | if (irq_desc_ptrs && irq < nr_irqs) |
| 183 | return irq_desc_ptrs[irq]; | ||
| 184 | |||
| 185 | return NULL; | ||
| 172 | } | 186 | } |
| 173 | 187 | ||
| 174 | struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu) | 188 | struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu) |
| @@ -177,10 +191,9 @@ struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu) | |||
| 177 | unsigned long flags; | 191 | unsigned long flags; |
| 178 | int node; | 192 | int node; |
| 179 | 193 | ||
| 180 | if (irq >= NR_IRQS) { | 194 | if (irq >= nr_irqs) { |
| 181 | printk(KERN_WARNING "irq >= NR_IRQS in irq_to_desc_alloc: %d %d\n", | 195 | WARN(1, "irq (%d) >= nr_irqs (%d) in irq_to_desc_alloc\n", |
| 182 | irq, NR_IRQS); | 196 | irq, nr_irqs); |
| 183 | WARN_ON(1); | ||
| 184 | return NULL; | 197 | return NULL; |
| 185 | } | 198 | } |
| 186 | 199 | ||
| @@ -222,9 +235,6 @@ struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = { | |||
| 222 | .handle_irq = handle_bad_irq, | 235 | .handle_irq = handle_bad_irq, |
| 223 | .depth = 1, | 236 | .depth = 1, |
| 224 | .lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock), | 237 | .lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock), |
| 225 | #ifdef CONFIG_SMP | ||
| 226 | .affinity = CPU_MASK_ALL | ||
| 227 | #endif | ||
| 228 | } | 238 | } |
| 229 | }; | 239 | }; |
| 230 | 240 | ||
| @@ -236,12 +246,15 @@ int __init early_irq_init(void) | |||
| 236 | 246 | ||
| 237 | init_irq_default_affinity(); | 247 | init_irq_default_affinity(); |
| 238 | 248 | ||
| 249 | printk(KERN_INFO "NR_IRQS:%d\n", NR_IRQS); | ||
| 250 | |||
| 239 | desc = irq_desc; | 251 | desc = irq_desc; |
| 240 | count = ARRAY_SIZE(irq_desc); | 252 | count = ARRAY_SIZE(irq_desc); |
| 241 | 253 | ||
| 242 | for (i = 0; i < count; i++) | 254 | for (i = 0; i < count; i++) { |
| 243 | desc[i].irq = i; | 255 | desc[i].irq = i; |
| 244 | 256 | init_alloc_desc_masks(&desc[i], 0, true); | |
| 257 | } | ||
| 245 | return arch_early_irq_init(); | 258 | return arch_early_irq_init(); |
| 246 | } | 259 | } |
| 247 | 260 | ||
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h index e6d0a43cc125..40416a81a0f5 100644 --- a/kernel/irq/internals.h +++ b/kernel/irq/internals.h | |||
| @@ -16,7 +16,14 @@ extern int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, | |||
| 16 | extern struct lock_class_key irq_desc_lock_class; | 16 | extern struct lock_class_key irq_desc_lock_class; |
| 17 | extern void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr); | 17 | extern void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr); |
| 18 | extern spinlock_t sparse_irq_lock; | 18 | extern spinlock_t sparse_irq_lock; |
| 19 | |||
| 20 | #ifdef CONFIG_SPARSE_IRQ | ||
| 21 | /* irq_desc_ptrs allocated at boot time */ | ||
| 22 | extern struct irq_desc **irq_desc_ptrs; | ||
| 23 | #else | ||
| 24 | /* irq_desc_ptrs is a fixed size array */ | ||
| 19 | extern struct irq_desc *irq_desc_ptrs[NR_IRQS]; | 25 | extern struct irq_desc *irq_desc_ptrs[NR_IRQS]; |
| 26 | #endif | ||
| 20 | 27 | ||
| 21 | #ifdef CONFIG_PROC_FS | 28 | #ifdef CONFIG_PROC_FS |
| 22 | extern void register_irq_proc(unsigned int irq, struct irq_desc *desc); | 29 | extern void register_irq_proc(unsigned int irq, struct irq_desc *desc); |
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 291f03664552..a3a5dc9ef346 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
| @@ -90,14 +90,14 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask) | |||
| 90 | 90 | ||
| 91 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 91 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
| 92 | if (desc->status & IRQ_MOVE_PCNTXT || desc->status & IRQ_DISABLED) { | 92 | if (desc->status & IRQ_MOVE_PCNTXT || desc->status & IRQ_DISABLED) { |
| 93 | cpumask_copy(&desc->affinity, cpumask); | 93 | cpumask_copy(desc->affinity, cpumask); |
| 94 | desc->chip->set_affinity(irq, cpumask); | 94 | desc->chip->set_affinity(irq, cpumask); |
| 95 | } else { | 95 | } else { |
| 96 | desc->status |= IRQ_MOVE_PENDING; | 96 | desc->status |= IRQ_MOVE_PENDING; |
| 97 | cpumask_copy(&desc->pending_mask, cpumask); | 97 | cpumask_copy(desc->pending_mask, cpumask); |
| 98 | } | 98 | } |
| 99 | #else | 99 | #else |
| 100 | cpumask_copy(&desc->affinity, cpumask); | 100 | cpumask_copy(desc->affinity, cpumask); |
| 101 | desc->chip->set_affinity(irq, cpumask); | 101 | desc->chip->set_affinity(irq, cpumask); |
| 102 | #endif | 102 | #endif |
| 103 | desc->status |= IRQ_AFFINITY_SET; | 103 | desc->status |= IRQ_AFFINITY_SET; |
| @@ -119,16 +119,16 @@ int do_irq_select_affinity(unsigned int irq, struct irq_desc *desc) | |||
| 119 | * one of the targets is online. | 119 | * one of the targets is online. |
| 120 | */ | 120 | */ |
| 121 | if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) { | 121 | if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) { |
| 122 | if (cpumask_any_and(&desc->affinity, cpu_online_mask) | 122 | if (cpumask_any_and(desc->affinity, cpu_online_mask) |
| 123 | < nr_cpu_ids) | 123 | < nr_cpu_ids) |
| 124 | goto set_affinity; | 124 | goto set_affinity; |
| 125 | else | 125 | else |
| 126 | desc->status &= ~IRQ_AFFINITY_SET; | 126 | desc->status &= ~IRQ_AFFINITY_SET; |
| 127 | } | 127 | } |
| 128 | 128 | ||
| 129 | cpumask_and(&desc->affinity, cpu_online_mask, irq_default_affinity); | 129 | cpumask_and(desc->affinity, cpu_online_mask, irq_default_affinity); |
| 130 | set_affinity: | 130 | set_affinity: |
| 131 | desc->chip->set_affinity(irq, &desc->affinity); | 131 | desc->chip->set_affinity(irq, desc->affinity); |
| 132 | 132 | ||
| 133 | return 0; | 133 | return 0; |
| 134 | } | 134 | } |
diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c index bd72329e630c..e05ad9be43b7 100644 --- a/kernel/irq/migration.c +++ b/kernel/irq/migration.c | |||
| @@ -18,7 +18,7 @@ void move_masked_irq(int irq) | |||
| 18 | 18 | ||
| 19 | desc->status &= ~IRQ_MOVE_PENDING; | 19 | desc->status &= ~IRQ_MOVE_PENDING; |
| 20 | 20 | ||
| 21 | if (unlikely(cpumask_empty(&desc->pending_mask))) | 21 | if (unlikely(cpumask_empty(desc->pending_mask))) |
| 22 | return; | 22 | return; |
| 23 | 23 | ||
| 24 | if (!desc->chip->set_affinity) | 24 | if (!desc->chip->set_affinity) |
| @@ -38,13 +38,13 @@ void move_masked_irq(int irq) | |||
| 38 | * For correct operation this depends on the caller | 38 | * For correct operation this depends on the caller |
| 39 | * masking the irqs. | 39 | * masking the irqs. |
| 40 | */ | 40 | */ |
| 41 | if (likely(cpumask_any_and(&desc->pending_mask, cpu_online_mask) | 41 | if (likely(cpumask_any_and(desc->pending_mask, cpu_online_mask) |
| 42 | < nr_cpu_ids)) { | 42 | < nr_cpu_ids)) { |
| 43 | cpumask_and(&desc->affinity, | 43 | cpumask_and(desc->affinity, |
| 44 | &desc->pending_mask, cpu_online_mask); | 44 | desc->pending_mask, cpu_online_mask); |
| 45 | desc->chip->set_affinity(irq, &desc->affinity); | 45 | desc->chip->set_affinity(irq, desc->affinity); |
| 46 | } | 46 | } |
| 47 | cpumask_clear(&desc->pending_mask); | 47 | cpumask_clear(desc->pending_mask); |
| 48 | } | 48 | } |
| 49 | 49 | ||
| 50 | void move_native_irq(int irq) | 50 | void move_native_irq(int irq) |
diff --git a/kernel/irq/numa_migrate.c b/kernel/irq/numa_migrate.c index acd88356ac76..7f9b80434e32 100644 --- a/kernel/irq/numa_migrate.c +++ b/kernel/irq/numa_migrate.c | |||
| @@ -38,15 +38,22 @@ static void free_kstat_irqs(struct irq_desc *old_desc, struct irq_desc *desc) | |||
| 38 | old_desc->kstat_irqs = NULL; | 38 | old_desc->kstat_irqs = NULL; |
| 39 | } | 39 | } |
| 40 | 40 | ||
| 41 | static void init_copy_one_irq_desc(int irq, struct irq_desc *old_desc, | 41 | static bool init_copy_one_irq_desc(int irq, struct irq_desc *old_desc, |
| 42 | struct irq_desc *desc, int cpu) | 42 | struct irq_desc *desc, int cpu) |
| 43 | { | 43 | { |
| 44 | memcpy(desc, old_desc, sizeof(struct irq_desc)); | 44 | memcpy(desc, old_desc, sizeof(struct irq_desc)); |
| 45 | if (!init_alloc_desc_masks(desc, cpu, false)) { | ||
| 46 | printk(KERN_ERR "irq %d: can not get new irq_desc cpumask " | ||
| 47 | "for migration.\n", irq); | ||
| 48 | return false; | ||
| 49 | } | ||
| 45 | spin_lock_init(&desc->lock); | 50 | spin_lock_init(&desc->lock); |
| 46 | desc->cpu = cpu; | 51 | desc->cpu = cpu; |
| 47 | lockdep_set_class(&desc->lock, &irq_desc_lock_class); | 52 | lockdep_set_class(&desc->lock, &irq_desc_lock_class); |
| 48 | init_copy_kstat_irqs(old_desc, desc, cpu, nr_cpu_ids); | 53 | init_copy_kstat_irqs(old_desc, desc, cpu, nr_cpu_ids); |
| 54 | init_copy_desc_masks(old_desc, desc); | ||
| 49 | arch_init_copy_chip_data(old_desc, desc, cpu); | 55 | arch_init_copy_chip_data(old_desc, desc, cpu); |
| 56 | return true; | ||
| 50 | } | 57 | } |
| 51 | 58 | ||
| 52 | static void free_one_irq_desc(struct irq_desc *old_desc, struct irq_desc *desc) | 59 | static void free_one_irq_desc(struct irq_desc *old_desc, struct irq_desc *desc) |
| @@ -76,12 +83,18 @@ static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc, | |||
| 76 | node = cpu_to_node(cpu); | 83 | node = cpu_to_node(cpu); |
| 77 | desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node); | 84 | desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node); |
| 78 | if (!desc) { | 85 | if (!desc) { |
| 79 | printk(KERN_ERR "irq %d: can not get new irq_desc for migration.\n", irq); | 86 | printk(KERN_ERR "irq %d: can not get new irq_desc " |
| 87 | "for migration.\n", irq); | ||
| 88 | /* still use old one */ | ||
| 89 | desc = old_desc; | ||
| 90 | goto out_unlock; | ||
| 91 | } | ||
| 92 | if (!init_copy_one_irq_desc(irq, old_desc, desc, cpu)) { | ||
| 80 | /* still use old one */ | 93 | /* still use old one */ |
| 94 | kfree(desc); | ||
| 81 | desc = old_desc; | 95 | desc = old_desc; |
| 82 | goto out_unlock; | 96 | goto out_unlock; |
| 83 | } | 97 | } |
| 84 | init_copy_one_irq_desc(irq, old_desc, desc, cpu); | ||
| 85 | 98 | ||
| 86 | irq_desc_ptrs[irq] = desc; | 99 | irq_desc_ptrs[irq] = desc; |
| 87 | spin_unlock_irqrestore(&sparse_irq_lock, flags); | 100 | spin_unlock_irqrestore(&sparse_irq_lock, flags); |
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c index aae3f742bcec..692363dd591f 100644 --- a/kernel/irq/proc.c +++ b/kernel/irq/proc.c | |||
| @@ -20,11 +20,11 @@ static struct proc_dir_entry *root_irq_dir; | |||
| 20 | static int irq_affinity_proc_show(struct seq_file *m, void *v) | 20 | static int irq_affinity_proc_show(struct seq_file *m, void *v) |
| 21 | { | 21 | { |
| 22 | struct irq_desc *desc = irq_to_desc((long)m->private); | 22 | struct irq_desc *desc = irq_to_desc((long)m->private); |
| 23 | const struct cpumask *mask = &desc->affinity; | 23 | const struct cpumask *mask = desc->affinity; |
| 24 | 24 | ||
| 25 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 25 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
| 26 | if (desc->status & IRQ_MOVE_PENDING) | 26 | if (desc->status & IRQ_MOVE_PENDING) |
| 27 | mask = &desc->pending_mask; | 27 | mask = desc->pending_mask; |
| 28 | #endif | 28 | #endif |
| 29 | seq_cpumask(m, mask); | 29 | seq_cpumask(m, mask); |
| 30 | seq_putc(m, '\n'); | 30 | seq_putc(m, '\n'); |
diff --git a/kernel/kexec.c b/kernel/kexec.c index 483899578259..c7fd6692939d 100644 --- a/kernel/kexec.c +++ b/kernel/kexec.c | |||
| @@ -1130,7 +1130,7 @@ void crash_save_cpu(struct pt_regs *regs, int cpu) | |||
| 1130 | return; | 1130 | return; |
| 1131 | memset(&prstatus, 0, sizeof(prstatus)); | 1131 | memset(&prstatus, 0, sizeof(prstatus)); |
| 1132 | prstatus.pr_pid = current->pid; | 1132 | prstatus.pr_pid = current->pid; |
| 1133 | elf_core_copy_regs(&prstatus.pr_reg, regs); | 1133 | elf_core_copy_kernel_regs(&prstatus.pr_reg, regs); |
| 1134 | buf = append_elf_note(buf, KEXEC_CORE_NOTE_NAME, NT_PRSTATUS, | 1134 | buf = append_elf_note(buf, KEXEC_CORE_NOTE_NAME, NT_PRSTATUS, |
| 1135 | &prstatus, sizeof(prstatus)); | 1135 | &prstatus, sizeof(prstatus)); |
| 1136 | final_note(buf); | 1136 | final_note(buf); |
diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 7ba8cd9845cb..479d4d5672f9 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c | |||
| @@ -43,6 +43,7 @@ | |||
| 43 | #include <linux/seq_file.h> | 43 | #include <linux/seq_file.h> |
| 44 | #include <linux/debugfs.h> | 44 | #include <linux/debugfs.h> |
| 45 | #include <linux/kdebug.h> | 45 | #include <linux/kdebug.h> |
| 46 | #include <linux/memory.h> | ||
| 46 | 47 | ||
| 47 | #include <asm-generic/sections.h> | 48 | #include <asm-generic/sections.h> |
| 48 | #include <asm/cacheflush.h> | 49 | #include <asm/cacheflush.h> |
| @@ -699,9 +700,10 @@ int __kprobes register_kprobe(struct kprobe *p) | |||
| 699 | goto out; | 700 | goto out; |
| 700 | } | 701 | } |
| 701 | 702 | ||
| 703 | mutex_lock(&text_mutex); | ||
| 702 | ret = arch_prepare_kprobe(p); | 704 | ret = arch_prepare_kprobe(p); |
| 703 | if (ret) | 705 | if (ret) |
| 704 | goto out; | 706 | goto out_unlock_text; |
| 705 | 707 | ||
| 706 | INIT_HLIST_NODE(&p->hlist); | 708 | INIT_HLIST_NODE(&p->hlist); |
| 707 | hlist_add_head_rcu(&p->hlist, | 709 | hlist_add_head_rcu(&p->hlist, |
| @@ -710,6 +712,8 @@ int __kprobes register_kprobe(struct kprobe *p) | |||
| 710 | if (kprobe_enabled) | 712 | if (kprobe_enabled) |
| 711 | arch_arm_kprobe(p); | 713 | arch_arm_kprobe(p); |
| 712 | 714 | ||
| 715 | out_unlock_text: | ||
| 716 | mutex_unlock(&text_mutex); | ||
| 713 | out: | 717 | out: |
| 714 | mutex_unlock(&kprobe_mutex); | 718 | mutex_unlock(&kprobe_mutex); |
| 715 | 719 | ||
| @@ -746,8 +750,11 @@ valid_p: | |||
| 746 | * enabled and not gone - otherwise, the breakpoint would | 750 | * enabled and not gone - otherwise, the breakpoint would |
| 747 | * already have been removed. We save on flushing icache. | 751 | * already have been removed. We save on flushing icache. |
| 748 | */ | 752 | */ |
| 749 | if (kprobe_enabled && !kprobe_gone(old_p)) | 753 | if (kprobe_enabled && !kprobe_gone(old_p)) { |
| 754 | mutex_lock(&text_mutex); | ||
| 750 | arch_disarm_kprobe(p); | 755 | arch_disarm_kprobe(p); |
| 756 | mutex_unlock(&text_mutex); | ||
| 757 | } | ||
| 751 | hlist_del_rcu(&old_p->hlist); | 758 | hlist_del_rcu(&old_p->hlist); |
| 752 | } else { | 759 | } else { |
| 753 | if (p->break_handler && !kprobe_gone(p)) | 760 | if (p->break_handler && !kprobe_gone(p)) |
| @@ -1280,12 +1287,14 @@ static void __kprobes enable_all_kprobes(void) | |||
| 1280 | if (kprobe_enabled) | 1287 | if (kprobe_enabled) |
| 1281 | goto already_enabled; | 1288 | goto already_enabled; |
| 1282 | 1289 | ||
| 1290 | mutex_lock(&text_mutex); | ||
| 1283 | for (i = 0; i < KPROBE_TABLE_SIZE; i++) { | 1291 | for (i = 0; i < KPROBE_TABLE_SIZE; i++) { |
| 1284 | head = &kprobe_table[i]; | 1292 | head = &kprobe_table[i]; |
| 1285 | hlist_for_each_entry_rcu(p, node, head, hlist) | 1293 | hlist_for_each_entry_rcu(p, node, head, hlist) |
| 1286 | if (!kprobe_gone(p)) | 1294 | if (!kprobe_gone(p)) |
| 1287 | arch_arm_kprobe(p); | 1295 | arch_arm_kprobe(p); |
| 1288 | } | 1296 | } |
| 1297 | mutex_unlock(&text_mutex); | ||
| 1289 | 1298 | ||
| 1290 | kprobe_enabled = true; | 1299 | kprobe_enabled = true; |
| 1291 | printk(KERN_INFO "Kprobes globally enabled\n"); | 1300 | printk(KERN_INFO "Kprobes globally enabled\n"); |
| @@ -1310,6 +1319,7 @@ static void __kprobes disable_all_kprobes(void) | |||
| 1310 | 1319 | ||
| 1311 | kprobe_enabled = false; | 1320 | kprobe_enabled = false; |
| 1312 | printk(KERN_INFO "Kprobes globally disabled\n"); | 1321 | printk(KERN_INFO "Kprobes globally disabled\n"); |
| 1322 | mutex_lock(&text_mutex); | ||
| 1313 | for (i = 0; i < KPROBE_TABLE_SIZE; i++) { | 1323 | for (i = 0; i < KPROBE_TABLE_SIZE; i++) { |
| 1314 | head = &kprobe_table[i]; | 1324 | head = &kprobe_table[i]; |
| 1315 | hlist_for_each_entry_rcu(p, node, head, hlist) { | 1325 | hlist_for_each_entry_rcu(p, node, head, hlist) { |
| @@ -1318,6 +1328,7 @@ static void __kprobes disable_all_kprobes(void) | |||
| 1318 | } | 1328 | } |
| 1319 | } | 1329 | } |
| 1320 | 1330 | ||
| 1331 | mutex_unlock(&text_mutex); | ||
| 1321 | mutex_unlock(&kprobe_mutex); | 1332 | mutex_unlock(&kprobe_mutex); |
| 1322 | /* Allow all currently running kprobes to complete */ | 1333 | /* Allow all currently running kprobes to complete */ |
| 1323 | synchronize_sched(); | 1334 | synchronize_sched(); |
diff --git a/kernel/module.c b/kernel/module.c index 22d7379709da..90a6d63d9211 100644 --- a/kernel/module.c +++ b/kernel/module.c | |||
| @@ -51,6 +51,7 @@ | |||
| 51 | #include <linux/tracepoint.h> | 51 | #include <linux/tracepoint.h> |
| 52 | #include <linux/ftrace.h> | 52 | #include <linux/ftrace.h> |
| 53 | #include <linux/async.h> | 53 | #include <linux/async.h> |
| 54 | #include <linux/percpu.h> | ||
| 54 | 55 | ||
| 55 | #if 0 | 56 | #if 0 |
| 56 | #define DEBUGP printk | 57 | #define DEBUGP printk |
| @@ -366,6 +367,34 @@ static struct module *find_module(const char *name) | |||
| 366 | } | 367 | } |
| 367 | 368 | ||
| 368 | #ifdef CONFIG_SMP | 369 | #ifdef CONFIG_SMP |
| 370 | |||
| 371 | #ifdef CONFIG_HAVE_DYNAMIC_PER_CPU_AREA | ||
| 372 | |||
| 373 | static void *percpu_modalloc(unsigned long size, unsigned long align, | ||
| 374 | const char *name) | ||
| 375 | { | ||
| 376 | void *ptr; | ||
| 377 | |||
| 378 | if (align > PAGE_SIZE) { | ||
| 379 | printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n", | ||
| 380 | name, align, PAGE_SIZE); | ||
| 381 | align = PAGE_SIZE; | ||
| 382 | } | ||
| 383 | |||
| 384 | ptr = __alloc_percpu(size, align); | ||
| 385 | if (!ptr) | ||
| 386 | printk(KERN_WARNING | ||
| 387 | "Could not allocate %lu bytes percpu data\n", size); | ||
| 388 | return ptr; | ||
| 389 | } | ||
| 390 | |||
| 391 | static void percpu_modfree(void *freeme) | ||
| 392 | { | ||
| 393 | free_percpu(freeme); | ||
| 394 | } | ||
| 395 | |||
| 396 | #else /* ... !CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */ | ||
| 397 | |||
| 369 | /* Number of blocks used and allocated. */ | 398 | /* Number of blocks used and allocated. */ |
| 370 | static unsigned int pcpu_num_used, pcpu_num_allocated; | 399 | static unsigned int pcpu_num_used, pcpu_num_allocated; |
| 371 | /* Size of each block. -ve means used. */ | 400 | /* Size of each block. -ve means used. */ |
| @@ -480,21 +509,6 @@ static void percpu_modfree(void *freeme) | |||
| 480 | } | 509 | } |
| 481 | } | 510 | } |
| 482 | 511 | ||
| 483 | static unsigned int find_pcpusec(Elf_Ehdr *hdr, | ||
| 484 | Elf_Shdr *sechdrs, | ||
| 485 | const char *secstrings) | ||
| 486 | { | ||
| 487 | return find_sec(hdr, sechdrs, secstrings, ".data.percpu"); | ||
| 488 | } | ||
| 489 | |||
| 490 | static void percpu_modcopy(void *pcpudest, const void *from, unsigned long size) | ||
| 491 | { | ||
| 492 | int cpu; | ||
| 493 | |||
| 494 | for_each_possible_cpu(cpu) | ||
| 495 | memcpy(pcpudest + per_cpu_offset(cpu), from, size); | ||
| 496 | } | ||
| 497 | |||
| 498 | static int percpu_modinit(void) | 512 | static int percpu_modinit(void) |
| 499 | { | 513 | { |
| 500 | pcpu_num_used = 2; | 514 | pcpu_num_used = 2; |
| @@ -513,7 +527,26 @@ static int percpu_modinit(void) | |||
| 513 | return 0; | 527 | return 0; |
| 514 | } | 528 | } |
| 515 | __initcall(percpu_modinit); | 529 | __initcall(percpu_modinit); |
| 530 | |||
| 531 | #endif /* CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */ | ||
| 532 | |||
| 533 | static unsigned int find_pcpusec(Elf_Ehdr *hdr, | ||
| 534 | Elf_Shdr *sechdrs, | ||
| 535 | const char *secstrings) | ||
| 536 | { | ||
| 537 | return find_sec(hdr, sechdrs, secstrings, ".data.percpu"); | ||
| 538 | } | ||
| 539 | |||
| 540 | static void percpu_modcopy(void *pcpudest, const void *from, unsigned long size) | ||
| 541 | { | ||
| 542 | int cpu; | ||
| 543 | |||
| 544 | for_each_possible_cpu(cpu) | ||
| 545 | memcpy(pcpudest + per_cpu_offset(cpu), from, size); | ||
| 546 | } | ||
| 547 | |||
| 516 | #else /* ... !CONFIG_SMP */ | 548 | #else /* ... !CONFIG_SMP */ |
| 549 | |||
| 517 | static inline void *percpu_modalloc(unsigned long size, unsigned long align, | 550 | static inline void *percpu_modalloc(unsigned long size, unsigned long align, |
| 518 | const char *name) | 551 | const char *name) |
| 519 | { | 552 | { |
| @@ -535,6 +568,7 @@ static inline void percpu_modcopy(void *pcpudst, const void *src, | |||
| 535 | /* pcpusec should be 0, and size of that section should be 0. */ | 568 | /* pcpusec should be 0, and size of that section should be 0. */ |
| 536 | BUG_ON(size != 0); | 569 | BUG_ON(size != 0); |
| 537 | } | 570 | } |
| 571 | |||
| 538 | #endif /* CONFIG_SMP */ | 572 | #endif /* CONFIG_SMP */ |
| 539 | 573 | ||
| 540 | #define MODINFO_ATTR(field) \ | 574 | #define MODINFO_ATTR(field) \ |
diff --git a/kernel/panic.c b/kernel/panic.c index 2a2ff36ff44d..32fe4eff1b89 100644 --- a/kernel/panic.c +++ b/kernel/panic.c | |||
| @@ -74,6 +74,9 @@ NORET_TYPE void panic(const char * fmt, ...) | |||
| 74 | vsnprintf(buf, sizeof(buf), fmt, args); | 74 | vsnprintf(buf, sizeof(buf), fmt, args); |
| 75 | va_end(args); | 75 | va_end(args); |
| 76 | printk(KERN_EMERG "Kernel panic - not syncing: %s\n",buf); | 76 | printk(KERN_EMERG "Kernel panic - not syncing: %s\n",buf); |
| 77 | #ifdef CONFIG_DEBUG_BUGVERBOSE | ||
| 78 | dump_stack(); | ||
| 79 | #endif | ||
| 77 | bust_spinlocks(0); | 80 | bust_spinlocks(0); |
| 78 | 81 | ||
| 79 | /* | 82 | /* |
| @@ -355,15 +358,18 @@ EXPORT_SYMBOL(warn_slowpath); | |||
| 355 | #endif | 358 | #endif |
| 356 | 359 | ||
| 357 | #ifdef CONFIG_CC_STACKPROTECTOR | 360 | #ifdef CONFIG_CC_STACKPROTECTOR |
| 361 | |||
| 358 | /* | 362 | /* |
| 359 | * Called when gcc's -fstack-protector feature is used, and | 363 | * Called when gcc's -fstack-protector feature is used, and |
| 360 | * gcc detects corruption of the on-stack canary value | 364 | * gcc detects corruption of the on-stack canary value |
| 361 | */ | 365 | */ |
| 362 | void __stack_chk_fail(void) | 366 | void __stack_chk_fail(void) |
| 363 | { | 367 | { |
| 364 | panic("stack-protector: Kernel stack is corrupted"); | 368 | panic("stack-protector: Kernel stack is corrupted in: %p\n", |
| 369 | __builtin_return_address(0)); | ||
| 365 | } | 370 | } |
| 366 | EXPORT_SYMBOL(__stack_chk_fail); | 371 | EXPORT_SYMBOL(__stack_chk_fail); |
| 372 | |||
| 367 | #endif | 373 | #endif |
| 368 | 374 | ||
| 369 | core_param(panic, panic_timeout, int, 0644); | 375 | core_param(panic, panic_timeout, int, 0644); |
diff --git a/kernel/sched.c b/kernel/sched.c index e1f676e20119..7299083e69e7 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
| @@ -6009,12 +6009,7 @@ void sched_show_task(struct task_struct *p) | |||
| 6009 | printk(KERN_CONT " %016lx ", thread_saved_pc(p)); | 6009 | printk(KERN_CONT " %016lx ", thread_saved_pc(p)); |
| 6010 | #endif | 6010 | #endif |
| 6011 | #ifdef CONFIG_DEBUG_STACK_USAGE | 6011 | #ifdef CONFIG_DEBUG_STACK_USAGE |
| 6012 | { | 6012 | free = stack_not_used(p); |
| 6013 | unsigned long *n = end_of_stack(p); | ||
| 6014 | while (!*n) | ||
| 6015 | n++; | ||
| 6016 | free = (unsigned long)n - (unsigned long)end_of_stack(p); | ||
| 6017 | } | ||
| 6018 | #endif | 6013 | #endif |
| 6019 | printk(KERN_CONT "%5lu %5d %6d\n", free, | 6014 | printk(KERN_CONT "%5lu %5d %6d\n", free, |
| 6020 | task_pid_nr(p), task_pid_nr(p->real_parent)); | 6015 | task_pid_nr(p), task_pid_nr(p->real_parent)); |
| @@ -9555,7 +9550,7 @@ cpuacct_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp) | |||
| 9555 | 9550 | ||
| 9556 | static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu) | 9551 | static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu) |
| 9557 | { | 9552 | { |
| 9558 | u64 *cpuusage = percpu_ptr(ca->cpuusage, cpu); | 9553 | u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu); |
| 9559 | u64 data; | 9554 | u64 data; |
| 9560 | 9555 | ||
| 9561 | #ifndef CONFIG_64BIT | 9556 | #ifndef CONFIG_64BIT |
| @@ -9574,7 +9569,7 @@ static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu) | |||
| 9574 | 9569 | ||
| 9575 | static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val) | 9570 | static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val) |
| 9576 | { | 9571 | { |
| 9577 | u64 *cpuusage = percpu_ptr(ca->cpuusage, cpu); | 9572 | u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu); |
| 9578 | 9573 | ||
| 9579 | #ifndef CONFIG_64BIT | 9574 | #ifndef CONFIG_64BIT |
| 9580 | /* | 9575 | /* |
| @@ -9670,7 +9665,7 @@ static void cpuacct_charge(struct task_struct *tsk, u64 cputime) | |||
| 9670 | ca = task_ca(tsk); | 9665 | ca = task_ca(tsk); |
| 9671 | 9666 | ||
| 9672 | for (; ca; ca = ca->parent) { | 9667 | for (; ca; ca = ca->parent) { |
| 9673 | u64 *cpuusage = percpu_ptr(ca->cpuusage, cpu); | 9668 | u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu); |
| 9674 | *cpuusage += cputime; | 9669 | *cpuusage += cputime; |
| 9675 | } | 9670 | } |
| 9676 | } | 9671 | } |
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index bac1061cea2f..da932f4c8524 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c | |||
| @@ -960,12 +960,13 @@ static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu) | |||
| 960 | 960 | ||
| 961 | static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask); | 961 | static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask); |
| 962 | 962 | ||
| 963 | static inline int pick_optimal_cpu(int this_cpu, cpumask_t *mask) | 963 | static inline int pick_optimal_cpu(int this_cpu, |
| 964 | const struct cpumask *mask) | ||
| 964 | { | 965 | { |
| 965 | int first; | 966 | int first; |
| 966 | 967 | ||
| 967 | /* "this_cpu" is cheaper to preempt than a remote processor */ | 968 | /* "this_cpu" is cheaper to preempt than a remote processor */ |
| 968 | if ((this_cpu != -1) && cpu_isset(this_cpu, *mask)) | 969 | if ((this_cpu != -1) && cpumask_test_cpu(this_cpu, mask)) |
| 969 | return this_cpu; | 970 | return this_cpu; |
| 970 | 971 | ||
| 971 | first = cpumask_first(mask); | 972 | first = cpumask_first(mask); |
| @@ -981,6 +982,7 @@ static int find_lowest_rq(struct task_struct *task) | |||
| 981 | struct cpumask *lowest_mask = __get_cpu_var(local_cpu_mask); | 982 | struct cpumask *lowest_mask = __get_cpu_var(local_cpu_mask); |
| 982 | int this_cpu = smp_processor_id(); | 983 | int this_cpu = smp_processor_id(); |
| 983 | int cpu = task_cpu(task); | 984 | int cpu = task_cpu(task); |
| 985 | cpumask_var_t domain_mask; | ||
| 984 | 986 | ||
| 985 | if (task->rt.nr_cpus_allowed == 1) | 987 | if (task->rt.nr_cpus_allowed == 1) |
| 986 | return -1; /* No other targets possible */ | 988 | return -1; /* No other targets possible */ |
| @@ -1013,19 +1015,25 @@ static int find_lowest_rq(struct task_struct *task) | |||
| 1013 | if (this_cpu == cpu) | 1015 | if (this_cpu == cpu) |
| 1014 | this_cpu = -1; /* Skip this_cpu opt if the same */ | 1016 | this_cpu = -1; /* Skip this_cpu opt if the same */ |
| 1015 | 1017 | ||
| 1016 | for_each_domain(cpu, sd) { | 1018 | if (alloc_cpumask_var(&domain_mask, GFP_ATOMIC)) { |
| 1017 | if (sd->flags & SD_WAKE_AFFINE) { | 1019 | for_each_domain(cpu, sd) { |
| 1018 | cpumask_t domain_mask; | 1020 | if (sd->flags & SD_WAKE_AFFINE) { |
| 1019 | int best_cpu; | 1021 | int best_cpu; |
| 1020 | 1022 | ||
| 1021 | cpumask_and(&domain_mask, sched_domain_span(sd), | 1023 | cpumask_and(domain_mask, |
| 1022 | lowest_mask); | 1024 | sched_domain_span(sd), |
| 1025 | lowest_mask); | ||
| 1023 | 1026 | ||
| 1024 | best_cpu = pick_optimal_cpu(this_cpu, | 1027 | best_cpu = pick_optimal_cpu(this_cpu, |
| 1025 | &domain_mask); | 1028 | domain_mask); |
| 1026 | if (best_cpu != -1) | 1029 | |
| 1027 | return best_cpu; | 1030 | if (best_cpu != -1) { |
| 1031 | free_cpumask_var(domain_mask); | ||
| 1032 | return best_cpu; | ||
| 1033 | } | ||
| 1034 | } | ||
| 1028 | } | 1035 | } |
| 1036 | free_cpumask_var(domain_mask); | ||
| 1029 | } | 1037 | } |
| 1030 | 1038 | ||
| 1031 | /* | 1039 | /* |
diff --git a/kernel/softirq.c b/kernel/softirq.c index 6edfc2c11d99..98dd68eea9e6 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c | |||
| @@ -806,6 +806,11 @@ int __init __weak early_irq_init(void) | |||
| 806 | return 0; | 806 | return 0; |
| 807 | } | 807 | } |
| 808 | 808 | ||
| 809 | int __init __weak arch_probe_nr_irqs(void) | ||
| 810 | { | ||
| 811 | return 0; | ||
| 812 | } | ||
| 813 | |||
| 809 | int __init __weak arch_early_irq_init(void) | 814 | int __init __weak arch_early_irq_init(void) |
| 810 | { | 815 | { |
| 811 | return 0; | 816 | return 0; |
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c index 0cd415ee62a2..74541ca49536 100644 --- a/kernel/stop_machine.c +++ b/kernel/stop_machine.c | |||
| @@ -170,7 +170,7 @@ int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus) | |||
| 170 | * doesn't hit this CPU until we're ready. */ | 170 | * doesn't hit this CPU until we're ready. */ |
| 171 | get_cpu(); | 171 | get_cpu(); |
| 172 | for_each_online_cpu(i) { | 172 | for_each_online_cpu(i) { |
| 173 | sm_work = percpu_ptr(stop_machine_work, i); | 173 | sm_work = per_cpu_ptr(stop_machine_work, i); |
| 174 | INIT_WORK(sm_work, stop_cpu); | 174 | INIT_WORK(sm_work, stop_cpu); |
| 175 | queue_work_on(i, stop_machine_wq, sm_work); | 175 | queue_work_on(i, stop_machine_wq, sm_work); |
| 176 | } | 176 | } |
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index e39679a72a3b..d24a10b8411a 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c | |||
| @@ -423,7 +423,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev, | |||
| 423 | if (!bt->sequence) | 423 | if (!bt->sequence) |
| 424 | goto err; | 424 | goto err; |
| 425 | 425 | ||
| 426 | bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG); | 426 | bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG, __alignof__(char)); |
| 427 | if (!bt->msg_data) | 427 | if (!bt->msg_data) |
| 428 | goto err; | 428 | goto err; |
| 429 | 429 | ||
