aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-12-08 07:52:00 -0500
committerIngo Molnar <mingo@elte.hu>2008-12-08 07:52:00 -0500
commit4d117c5c6b00254e51c61ff5b506ccaba21a5a03 (patch)
treeecb9d3ee4bfd9610c459c1eb0c0357c2905f3ae4 /kernel
parent6c415b9234a8c71f290e5d4fddc467f103f32719 (diff)
parent43714539eab42b2fa3653ea7bd667b36c2291b11 (diff)
Merge branch 'sched/urgent' into sched/core
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cpu.c2
-rw-r--r--kernel/cpuset.c2
-rw-r--r--kernel/irq/internals.h2
-rw-r--r--kernel/irq/manage.c68
-rw-r--r--kernel/irq/migration.c11
-rw-r--r--kernel/irq/proc.c2
-rw-r--r--kernel/lockdep.c4
-rw-r--r--kernel/profile.c4
-rw-r--r--kernel/ptrace.c4
-rw-r--r--kernel/sched.c2
-rw-r--r--kernel/trace/ring_buffer.c2
-rw-r--r--kernel/trace/trace_mmiotrace.c16
-rw-r--r--kernel/trace/trace_stack.c24
13 files changed, 88 insertions, 55 deletions
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 5a732c5ef08b..8ea32e8d68b0 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -462,7 +462,7 @@ out:
462 * It must be called by the arch code on the new cpu, before the new cpu 462 * It must be called by the arch code on the new cpu, before the new cpu
463 * enables interrupts and before the "boot" cpu returns from __cpu_up(). 463 * enables interrupts and before the "boot" cpu returns from __cpu_up().
464 */ 464 */
465void notify_cpu_starting(unsigned int cpu) 465void __cpuinit notify_cpu_starting(unsigned int cpu)
466{ 466{
467 unsigned long val = CPU_STARTING; 467 unsigned long val = CPU_STARTING;
468 468
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index da7ff6137f37..96c0ba13b8cd 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -585,7 +585,7 @@ static int generate_sched_domains(cpumask_t **domains,
585 int i, j, k; /* indices for partition finding loops */ 585 int i, j, k; /* indices for partition finding loops */
586 cpumask_t *doms; /* resulting partition; i.e. sched domains */ 586 cpumask_t *doms; /* resulting partition; i.e. sched domains */
587 struct sched_domain_attr *dattr; /* attributes for custom domains */ 587 struct sched_domain_attr *dattr; /* attributes for custom domains */
588 int ndoms; /* number of sched domains in result */ 588 int ndoms = 0; /* number of sched domains in result */
589 int nslot; /* next empty doms[] cpumask_t slot */ 589 int nslot; /* next empty doms[] cpumask_t slot */
590 590
591 doms = NULL; 591 doms = NULL;
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index c9767e641980..64c1c7253dae 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -25,6 +25,8 @@ static inline void unregister_handler_proc(unsigned int irq,
25 struct irqaction *action) { } 25 struct irqaction *action) { }
26#endif 26#endif
27 27
28extern int irq_select_affinity_usr(unsigned int irq);
29
28/* 30/*
29 * Debugging printout: 31 * Debugging printout:
30 */ 32 */
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index c498a1b8c621..801addda3c43 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -82,24 +82,27 @@ int irq_can_set_affinity(unsigned int irq)
82int irq_set_affinity(unsigned int irq, cpumask_t cpumask) 82int irq_set_affinity(unsigned int irq, cpumask_t cpumask)
83{ 83{
84 struct irq_desc *desc = irq_to_desc(irq); 84 struct irq_desc *desc = irq_to_desc(irq);
85 unsigned long flags;
85 86
86 if (!desc->chip->set_affinity) 87 if (!desc->chip->set_affinity)
87 return -EINVAL; 88 return -EINVAL;
88 89
90 spin_lock_irqsave(&desc->lock, flags);
91
89#ifdef CONFIG_GENERIC_PENDING_IRQ 92#ifdef CONFIG_GENERIC_PENDING_IRQ
90 if (desc->status & IRQ_MOVE_PCNTXT || desc->status & IRQ_DISABLED) { 93 if (desc->status & IRQ_MOVE_PCNTXT || desc->status & IRQ_DISABLED) {
91 unsigned long flags;
92
93 spin_lock_irqsave(&desc->lock, flags);
94 desc->affinity = cpumask; 94 desc->affinity = cpumask;
95 desc->chip->set_affinity(irq, cpumask); 95 desc->chip->set_affinity(irq, cpumask);
96 spin_unlock_irqrestore(&desc->lock, flags); 96 } else {
97 } else 97 desc->status |= IRQ_MOVE_PENDING;
98 set_pending_irq(irq, cpumask); 98 desc->pending_mask = cpumask;
99 }
99#else 100#else
100 desc->affinity = cpumask; 101 desc->affinity = cpumask;
101 desc->chip->set_affinity(irq, cpumask); 102 desc->chip->set_affinity(irq, cpumask);
102#endif 103#endif
104 desc->status |= IRQ_AFFINITY_SET;
105 spin_unlock_irqrestore(&desc->lock, flags);
103 return 0; 106 return 0;
104} 107}
105 108
@@ -107,24 +110,59 @@ int irq_set_affinity(unsigned int irq, cpumask_t cpumask)
107/* 110/*
108 * Generic version of the affinity autoselector. 111 * Generic version of the affinity autoselector.
109 */ 112 */
110int irq_select_affinity(unsigned int irq) 113int do_irq_select_affinity(unsigned int irq, struct irq_desc *desc)
111{ 114{
112 cpumask_t mask; 115 cpumask_t mask;
113 struct irq_desc *desc;
114 116
115 if (!irq_can_set_affinity(irq)) 117 if (!irq_can_set_affinity(irq))
116 return 0; 118 return 0;
117 119
118 cpus_and(mask, cpu_online_map, irq_default_affinity); 120 cpus_and(mask, cpu_online_map, irq_default_affinity);
119 121
120 desc = irq_to_desc(irq); 122 /*
123 * Preserve an userspace affinity setup, but make sure that
124 * one of the targets is online.
125 */
126 if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) {
127 if (cpus_intersects(desc->affinity, cpu_online_map))
128 mask = desc->affinity;
129 else
130 desc->status &= ~IRQ_AFFINITY_SET;
131 }
132
121 desc->affinity = mask; 133 desc->affinity = mask;
122 desc->chip->set_affinity(irq, mask); 134 desc->chip->set_affinity(irq, mask);
123 135
124 return 0; 136 return 0;
125} 137}
138#else
139static inline int do_irq_select_affinity(unsigned int irq, struct irq_desc *d)
140{
141 return irq_select_affinity(irq);
142}
126#endif 143#endif
127 144
145/*
146 * Called when affinity is set via /proc/irq
147 */
148int irq_select_affinity_usr(unsigned int irq)
149{
150 struct irq_desc *desc = irq_to_desc(irq);
151 unsigned long flags;
152 int ret;
153
154 spin_lock_irqsave(&desc->lock, flags);
155 ret = do_irq_select_affinity(irq, desc);
156 spin_unlock_irqrestore(&desc->lock, flags);
157
158 return ret;
159}
160
161#else
162static inline int do_irq_select_affinity(int irq, struct irq_desc *desc)
163{
164 return 0;
165}
128#endif 166#endif
129 167
130/** 168/**
@@ -327,7 +365,7 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
327 * IRQF_TRIGGER_* but the PIC does not support multiple 365 * IRQF_TRIGGER_* but the PIC does not support multiple
328 * flow-types? 366 * flow-types?
329 */ 367 */
330 pr_warning("No set_type function for IRQ %d (%s)\n", irq, 368 pr_debug("No set_type function for IRQ %d (%s)\n", irq,
331 chip ? (chip->name ? : "unknown") : "unknown"); 369 chip ? (chip->name ? : "unknown") : "unknown");
332 return 0; 370 return 0;
333 } 371 }
@@ -445,8 +483,12 @@ __setup_irq(unsigned int irq, struct irq_desc * desc, struct irqaction *new)
445 /* Undo nested disables: */ 483 /* Undo nested disables: */
446 desc->depth = 1; 484 desc->depth = 1;
447 485
486 /* Exclude IRQ from balancing if requested */
487 if (new->flags & IRQF_NOBALANCING)
488 desc->status |= IRQ_NO_BALANCING;
489
448 /* Set default affinity mask once everything is setup */ 490 /* Set default affinity mask once everything is setup */
449 irq_select_affinity(irq); 491 do_irq_select_affinity(irq, desc);
450 492
451 } else if ((new->flags & IRQF_TRIGGER_MASK) 493 } else if ((new->flags & IRQF_TRIGGER_MASK)
452 && (new->flags & IRQF_TRIGGER_MASK) 494 && (new->flags & IRQF_TRIGGER_MASK)
@@ -459,10 +501,6 @@ __setup_irq(unsigned int irq, struct irq_desc * desc, struct irqaction *new)
459 501
460 *p = new; 502 *p = new;
461 503
462 /* Exclude IRQ from balancing */
463 if (new->flags & IRQF_NOBALANCING)
464 desc->status |= IRQ_NO_BALANCING;
465
466 /* Reset broken irq detection when installing new handler */ 504 /* Reset broken irq detection when installing new handler */
467 desc->irq_count = 0; 505 desc->irq_count = 0;
468 desc->irqs_unhandled = 0; 506 desc->irqs_unhandled = 0;
diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c
index 90b920d3f52b..9db681d95814 100644
--- a/kernel/irq/migration.c
+++ b/kernel/irq/migration.c
@@ -1,17 +1,6 @@
1 1
2#include <linux/irq.h> 2#include <linux/irq.h>
3 3
4void set_pending_irq(unsigned int irq, cpumask_t mask)
5{
6 struct irq_desc *desc = irq_to_desc(irq);
7 unsigned long flags;
8
9 spin_lock_irqsave(&desc->lock, flags);
10 desc->status |= IRQ_MOVE_PENDING;
11 desc->pending_mask = mask;
12 spin_unlock_irqrestore(&desc->lock, flags);
13}
14
15void move_masked_irq(int irq) 4void move_masked_irq(int irq)
16{ 5{
17 struct irq_desc *desc = irq_to_desc(irq); 6 struct irq_desc *desc = irq_to_desc(irq);
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
index 4d161c70ba55..d257e7d6a8a4 100644
--- a/kernel/irq/proc.c
+++ b/kernel/irq/proc.c
@@ -62,7 +62,7 @@ static ssize_t irq_affinity_proc_write(struct file *file,
62 if (!cpus_intersects(new_value, cpu_online_map)) 62 if (!cpus_intersects(new_value, cpu_online_map))
63 /* Special case for empty set - allow the architecture 63 /* Special case for empty set - allow the architecture
64 code to set default SMP affinity. */ 64 code to set default SMP affinity. */
65 return irq_select_affinity(irq) ? -EINVAL : count; 65 return irq_select_affinity_usr(irq) ? -EINVAL : count;
66 66
67 irq_set_affinity(irq, new_value); 67 irq_set_affinity(irq, new_value);
68 68
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 06e157119d2b..46a404173db2 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -3276,10 +3276,10 @@ void __init lockdep_info(void)
3276{ 3276{
3277 printk("Lock dependency validator: Copyright (c) 2006 Red Hat, Inc., Ingo Molnar\n"); 3277 printk("Lock dependency validator: Copyright (c) 2006 Red Hat, Inc., Ingo Molnar\n");
3278 3278
3279 printk("... MAX_LOCKDEP_SUBCLASSES: %lu\n", MAX_LOCKDEP_SUBCLASSES); 3279 printk("... MAX_LOCKDEP_SUBCLASSES: %lu\n", MAX_LOCKDEP_SUBCLASSES);
3280 printk("... MAX_LOCK_DEPTH: %lu\n", MAX_LOCK_DEPTH); 3280 printk("... MAX_LOCK_DEPTH: %lu\n", MAX_LOCK_DEPTH);
3281 printk("... MAX_LOCKDEP_KEYS: %lu\n", MAX_LOCKDEP_KEYS); 3281 printk("... MAX_LOCKDEP_KEYS: %lu\n", MAX_LOCKDEP_KEYS);
3282 printk("... CLASSHASH_SIZE: %lu\n", CLASSHASH_SIZE); 3282 printk("... CLASSHASH_SIZE: %lu\n", CLASSHASH_SIZE);
3283 printk("... MAX_LOCKDEP_ENTRIES: %lu\n", MAX_LOCKDEP_ENTRIES); 3283 printk("... MAX_LOCKDEP_ENTRIES: %lu\n", MAX_LOCKDEP_ENTRIES);
3284 printk("... MAX_LOCKDEP_CHAINS: %lu\n", MAX_LOCKDEP_CHAINS); 3284 printk("... MAX_LOCKDEP_CHAINS: %lu\n", MAX_LOCKDEP_CHAINS);
3285 printk("... CHAINHASH_SIZE: %lu\n", CHAINHASH_SIZE); 3285 printk("... CHAINHASH_SIZE: %lu\n", CHAINHASH_SIZE);
diff --git a/kernel/profile.c b/kernel/profile.c
index 5b7d1ac7124c..dc41827fbfee 100644
--- a/kernel/profile.c
+++ b/kernel/profile.c
@@ -351,7 +351,7 @@ out:
351 put_cpu(); 351 put_cpu();
352} 352}
353 353
354static int __devinit profile_cpu_callback(struct notifier_block *info, 354static int __cpuinit profile_cpu_callback(struct notifier_block *info,
355 unsigned long action, void *__cpu) 355 unsigned long action, void *__cpu)
356{ 356{
357 int node, cpu = (unsigned long)__cpu; 357 int node, cpu = (unsigned long)__cpu;
@@ -596,7 +596,7 @@ out_cleanup:
596#define create_hash_tables() ({ 0; }) 596#define create_hash_tables() ({ 0; })
597#endif 597#endif
598 598
599int create_proc_profile(void) 599int __ref create_proc_profile(void) /* false positive from hotcpu_notifier */
600{ 600{
601 struct proc_dir_entry *entry; 601 struct proc_dir_entry *entry;
602 602
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 1e68e4c39e2c..4c8bcd7dd8e0 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -612,7 +612,7 @@ int generic_ptrace_pokedata(struct task_struct *tsk, long addr, long data)
612 return (copied == sizeof(data)) ? 0 : -EIO; 612 return (copied == sizeof(data)) ? 0 : -EIO;
613} 613}
614 614
615#if defined CONFIG_COMPAT && defined __ARCH_WANT_COMPAT_SYS_PTRACE 615#if defined CONFIG_COMPAT
616#include <linux/compat.h> 616#include <linux/compat.h>
617 617
618int compat_ptrace_request(struct task_struct *child, compat_long_t request, 618int compat_ptrace_request(struct task_struct *child, compat_long_t request,
@@ -709,4 +709,4 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
709 unlock_kernel(); 709 unlock_kernel();
710 return ret; 710 return ret;
711} 711}
712#endif /* CONFIG_COMPAT && __ARCH_WANT_COMPAT_SYS_PTRACE */ 712#endif /* CONFIG_COMPAT */
diff --git a/kernel/sched.c b/kernel/sched.c
index 4c7388ef5be7..152828239ef0 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1443,7 +1443,7 @@ static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd);
1443static unsigned long cpu_avg_load_per_task(int cpu) 1443static unsigned long cpu_avg_load_per_task(int cpu)
1444{ 1444{
1445 struct rq *rq = cpu_rq(cpu); 1445 struct rq *rq = cpu_rq(cpu);
1446 unsigned long nr_running = rq->nr_running; 1446 unsigned long nr_running = ACCESS_ONCE(rq->nr_running);
1447 1447
1448 if (nr_running) 1448 if (nr_running)
1449 rq->avg_load_per_task = rq->load.weight / nr_running; 1449 rq->avg_load_per_task = rq->load.weight / nr_running;
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index f780e9552f91..668bbb5ef2bd 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -1215,7 +1215,7 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer,
1215 1215
1216 out: 1216 out:
1217 if (resched) 1217 if (resched)
1218 preempt_enable_notrace(); 1218 preempt_enable_no_resched_notrace();
1219 else 1219 else
1220 preempt_enable_notrace(); 1220 preempt_enable_notrace();
1221 return NULL; 1221 return NULL;
diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
index f28484618ff0..e62cbf78eab6 100644
--- a/kernel/trace/trace_mmiotrace.c
+++ b/kernel/trace/trace_mmiotrace.c
@@ -18,12 +18,14 @@ struct header_iter {
18 18
19static struct trace_array *mmio_trace_array; 19static struct trace_array *mmio_trace_array;
20static bool overrun_detected; 20static bool overrun_detected;
21static unsigned long prev_overruns;
21 22
22static void mmio_reset_data(struct trace_array *tr) 23static void mmio_reset_data(struct trace_array *tr)
23{ 24{
24 int cpu; 25 int cpu;
25 26
26 overrun_detected = false; 27 overrun_detected = false;
28 prev_overruns = 0;
27 tr->time_start = ftrace_now(tr->cpu); 29 tr->time_start = ftrace_now(tr->cpu);
28 30
29 for_each_online_cpu(cpu) 31 for_each_online_cpu(cpu)
@@ -128,16 +130,12 @@ static void mmio_close(struct trace_iterator *iter)
128 130
129static unsigned long count_overruns(struct trace_iterator *iter) 131static unsigned long count_overruns(struct trace_iterator *iter)
130{ 132{
131 int cpu;
132 unsigned long cnt = 0; 133 unsigned long cnt = 0;
133/* FIXME: */ 134 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
134#if 0 135
135 for_each_online_cpu(cpu) { 136 if (over > prev_overruns)
136 cnt += iter->overrun[cpu]; 137 cnt = over - prev_overruns;
137 iter->overrun[cpu] = 0; 138 prev_overruns = over;
138 }
139#endif
140 (void)cpu;
141 return cnt; 139 return cnt;
142} 140}
143 141
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
index be682b62fe58..3bdb44bde4b7 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -184,11 +184,16 @@ static struct file_operations stack_max_size_fops = {
184static void * 184static void *
185t_next(struct seq_file *m, void *v, loff_t *pos) 185t_next(struct seq_file *m, void *v, loff_t *pos)
186{ 186{
187 long i = (long)m->private; 187 long i;
188 188
189 (*pos)++; 189 (*pos)++;
190 190
191 i++; 191 if (v == SEQ_START_TOKEN)
192 i = 0;
193 else {
194 i = *(long *)v;
195 i++;
196 }
192 197
193 if (i >= max_stack_trace.nr_entries || 198 if (i >= max_stack_trace.nr_entries ||
194 stack_dump_trace[i] == ULONG_MAX) 199 stack_dump_trace[i] == ULONG_MAX)
@@ -201,12 +206,15 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
201 206
202static void *t_start(struct seq_file *m, loff_t *pos) 207static void *t_start(struct seq_file *m, loff_t *pos)
203{ 208{
204 void *t = &m->private; 209 void *t = SEQ_START_TOKEN;
205 loff_t l = 0; 210 loff_t l = 0;
206 211
207 local_irq_disable(); 212 local_irq_disable();
208 __raw_spin_lock(&max_stack_lock); 213 __raw_spin_lock(&max_stack_lock);
209 214
215 if (*pos == 0)
216 return SEQ_START_TOKEN;
217
210 for (; t && l < *pos; t = t_next(m, t, &l)) 218 for (; t && l < *pos; t = t_next(m, t, &l))
211 ; 219 ;
212 220
@@ -235,10 +243,10 @@ static int trace_lookup_stack(struct seq_file *m, long i)
235 243
236static int t_show(struct seq_file *m, void *v) 244static int t_show(struct seq_file *m, void *v)
237{ 245{
238 long i = *(long *)v; 246 long i;
239 int size; 247 int size;
240 248
241 if (i < 0) { 249 if (v == SEQ_START_TOKEN) {
242 seq_printf(m, " Depth Size Location" 250 seq_printf(m, " Depth Size Location"
243 " (%d entries)\n" 251 " (%d entries)\n"
244 " ----- ---- --------\n", 252 " ----- ---- --------\n",
@@ -246,6 +254,8 @@ static int t_show(struct seq_file *m, void *v)
246 return 0; 254 return 0;
247 } 255 }
248 256
257 i = *(long *)v;
258
249 if (i >= max_stack_trace.nr_entries || 259 if (i >= max_stack_trace.nr_entries ||
250 stack_dump_trace[i] == ULONG_MAX) 260 stack_dump_trace[i] == ULONG_MAX)
251 return 0; 261 return 0;
@@ -275,10 +285,6 @@ static int stack_trace_open(struct inode *inode, struct file *file)
275 int ret; 285 int ret;
276 286
277 ret = seq_open(file, &stack_trace_seq_ops); 287 ret = seq_open(file, &stack_trace_seq_ops);
278 if (!ret) {
279 struct seq_file *m = file->private_data;
280 m->private = (void *)-1;
281 }
282 288
283 return ret; 289 return ret;
284} 290}