aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2009-08-26 23:12:40 -0400
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2009-08-26 23:12:40 -0400
commitf480fe3916de2e2cbb6e384cb685f0f1d8272188 (patch)
tree141c60a2a263f1c01907e692d4aef1e6b699d8c7 /kernel
parent4f0dbc2781b9dc457159b676289f874ab2dc3560 (diff)
parentf415c413f458837bd0c27086b79aca889f9435e4 (diff)
Merge commit 'origin/master' into next
Diffstat (limited to 'kernel')
-rw-r--r--kernel/perf_counter.c17
-rw-r--r--kernel/time/clockevents.c16
-rw-r--r--kernel/time/tick-broadcast.c7
-rw-r--r--kernel/time/timer_list.c2
-rw-r--r--kernel/trace/ftrace.c17
-rw-r--r--kernel/trace/trace.c12
6 files changed, 43 insertions, 28 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index 534e20d14d63..f274e1959885 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -1503,10 +1503,21 @@ static void perf_counter_enable_on_exec(struct task_struct *task)
1503 */ 1503 */
1504static void __perf_counter_read(void *info) 1504static void __perf_counter_read(void *info)
1505{ 1505{
1506 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
1506 struct perf_counter *counter = info; 1507 struct perf_counter *counter = info;
1507 struct perf_counter_context *ctx = counter->ctx; 1508 struct perf_counter_context *ctx = counter->ctx;
1508 unsigned long flags; 1509 unsigned long flags;
1509 1510
1511 /*
1512 * If this is a task context, we need to check whether it is
1513 * the current task context of this cpu. If not it has been
1514 * scheduled out before the smp call arrived. In that case
1515 * counter->count would have been updated to a recent sample
1516 * when the counter was scheduled out.
1517 */
1518 if (ctx->task && cpuctx->task_ctx != ctx)
1519 return;
1520
1510 local_irq_save(flags); 1521 local_irq_save(flags);
1511 if (ctx->is_active) 1522 if (ctx->is_active)
1512 update_context_time(ctx); 1523 update_context_time(ctx);
@@ -1780,7 +1791,7 @@ static int perf_counter_read_group(struct perf_counter *counter,
1780 size += err; 1791 size += err;
1781 1792
1782 list_for_each_entry(sub, &leader->sibling_list, list_entry) { 1793 list_for_each_entry(sub, &leader->sibling_list, list_entry) {
1783 err = perf_counter_read_entry(counter, read_format, 1794 err = perf_counter_read_entry(sub, read_format,
1784 buf + size); 1795 buf + size);
1785 if (err < 0) 1796 if (err < 0)
1786 return err; 1797 return err;
@@ -2008,6 +2019,10 @@ int perf_counter_task_disable(void)
2008 return 0; 2019 return 0;
2009} 2020}
2010 2021
2022#ifndef PERF_COUNTER_INDEX_OFFSET
2023# define PERF_COUNTER_INDEX_OFFSET 0
2024#endif
2025
2011static int perf_counter_index(struct perf_counter *counter) 2026static int perf_counter_index(struct perf_counter *counter)
2012{ 2027{
2013 if (counter->state != PERF_COUNTER_STATE_ACTIVE) 2028 if (counter->state != PERF_COUNTER_STATE_ACTIVE)
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
index a6dcd67b041d..620b58abdc32 100644
--- a/kernel/time/clockevents.c
+++ b/kernel/time/clockevents.c
@@ -137,11 +137,12 @@ int clockevents_program_event(struct clock_event_device *dev, ktime_t expires,
137 */ 137 */
138int clockevents_register_notifier(struct notifier_block *nb) 138int clockevents_register_notifier(struct notifier_block *nb)
139{ 139{
140 unsigned long flags;
140 int ret; 141 int ret;
141 142
142 spin_lock(&clockevents_lock); 143 spin_lock_irqsave(&clockevents_lock, flags);
143 ret = raw_notifier_chain_register(&clockevents_chain, nb); 144 ret = raw_notifier_chain_register(&clockevents_chain, nb);
144 spin_unlock(&clockevents_lock); 145 spin_unlock_irqrestore(&clockevents_lock, flags);
145 146
146 return ret; 147 return ret;
147} 148}
@@ -178,16 +179,18 @@ static void clockevents_notify_released(void)
178 */ 179 */
179void clockevents_register_device(struct clock_event_device *dev) 180void clockevents_register_device(struct clock_event_device *dev)
180{ 181{
182 unsigned long flags;
183
181 BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED); 184 BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED);
182 BUG_ON(!dev->cpumask); 185 BUG_ON(!dev->cpumask);
183 186
184 spin_lock(&clockevents_lock); 187 spin_lock_irqsave(&clockevents_lock, flags);
185 188
186 list_add(&dev->list, &clockevent_devices); 189 list_add(&dev->list, &clockevent_devices);
187 clockevents_do_notify(CLOCK_EVT_NOTIFY_ADD, dev); 190 clockevents_do_notify(CLOCK_EVT_NOTIFY_ADD, dev);
188 clockevents_notify_released(); 191 clockevents_notify_released();
189 192
190 spin_unlock(&clockevents_lock); 193 spin_unlock_irqrestore(&clockevents_lock, flags);
191} 194}
192EXPORT_SYMBOL_GPL(clockevents_register_device); 195EXPORT_SYMBOL_GPL(clockevents_register_device);
193 196
@@ -235,8 +238,9 @@ void clockevents_exchange_device(struct clock_event_device *old,
235void clockevents_notify(unsigned long reason, void *arg) 238void clockevents_notify(unsigned long reason, void *arg)
236{ 239{
237 struct list_head *node, *tmp; 240 struct list_head *node, *tmp;
241 unsigned long flags;
238 242
239 spin_lock(&clockevents_lock); 243 spin_lock_irqsave(&clockevents_lock, flags);
240 clockevents_do_notify(reason, arg); 244 clockevents_do_notify(reason, arg);
241 245
242 switch (reason) { 246 switch (reason) {
@@ -251,7 +255,7 @@ void clockevents_notify(unsigned long reason, void *arg)
251 default: 255 default:
252 break; 256 break;
253 } 257 }
254 spin_unlock(&clockevents_lock); 258 spin_unlock_irqrestore(&clockevents_lock, flags);
255} 259}
256EXPORT_SYMBOL_GPL(clockevents_notify); 260EXPORT_SYMBOL_GPL(clockevents_notify);
257#endif 261#endif
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index 877dbedc3118..c2ec25087a35 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -205,11 +205,11 @@ static void tick_handle_periodic_broadcast(struct clock_event_device *dev)
205 * Powerstate information: The system enters/leaves a state, where 205 * Powerstate information: The system enters/leaves a state, where
206 * affected devices might stop 206 * affected devices might stop
207 */ 207 */
208static void tick_do_broadcast_on_off(void *why) 208static void tick_do_broadcast_on_off(unsigned long *reason)
209{ 209{
210 struct clock_event_device *bc, *dev; 210 struct clock_event_device *bc, *dev;
211 struct tick_device *td; 211 struct tick_device *td;
212 unsigned long flags, *reason = why; 212 unsigned long flags;
213 int cpu, bc_stopped; 213 int cpu, bc_stopped;
214 214
215 spin_lock_irqsave(&tick_broadcast_lock, flags); 215 spin_lock_irqsave(&tick_broadcast_lock, flags);
@@ -276,8 +276,7 @@ void tick_broadcast_on_off(unsigned long reason, int *oncpu)
276 printk(KERN_ERR "tick-broadcast: ignoring broadcast for " 276 printk(KERN_ERR "tick-broadcast: ignoring broadcast for "
277 "offline CPU #%d\n", *oncpu); 277 "offline CPU #%d\n", *oncpu);
278 else 278 else
279 smp_call_function_single(*oncpu, tick_do_broadcast_on_off, 279 tick_do_broadcast_on_off(&reason);
280 &reason, 1);
281} 280}
282 281
283/* 282/*
diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
index a999b92a1277..fddd69d16e03 100644
--- a/kernel/time/timer_list.c
+++ b/kernel/time/timer_list.c
@@ -286,7 +286,7 @@ static int __init init_timer_list_procfs(void)
286{ 286{
287 struct proc_dir_entry *pe; 287 struct proc_dir_entry *pe;
288 288
289 pe = proc_create("timer_list", 0644, NULL, &timer_list_fops); 289 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
290 if (!pe) 290 if (!pe)
291 return -ENOMEM; 291 return -ENOMEM;
292 return 0; 292 return 0;
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 1e1d23c26308..25edd5cc5935 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -2278,7 +2278,11 @@ ftrace_regex_write(struct file *file, const char __user *ubuf,
2278 read++; 2278 read++;
2279 cnt--; 2279 cnt--;
2280 2280
2281 if (!(iter->flags & ~FTRACE_ITER_CONT)) { 2281 /*
2282 * If the parser haven't finished with the last write,
2283 * continue reading the user input without skipping spaces.
2284 */
2285 if (!(iter->flags & FTRACE_ITER_CONT)) {
2282 /* skip white space */ 2286 /* skip white space */
2283 while (cnt && isspace(ch)) { 2287 while (cnt && isspace(ch)) {
2284 ret = get_user(ch, ubuf++); 2288 ret = get_user(ch, ubuf++);
@@ -2288,8 +2292,9 @@ ftrace_regex_write(struct file *file, const char __user *ubuf,
2288 cnt--; 2292 cnt--;
2289 } 2293 }
2290 2294
2295 /* only spaces were written */
2291 if (isspace(ch)) { 2296 if (isspace(ch)) {
2292 file->f_pos += read; 2297 *ppos += read;
2293 ret = read; 2298 ret = read;
2294 goto out; 2299 goto out;
2295 } 2300 }
@@ -2319,12 +2324,12 @@ ftrace_regex_write(struct file *file, const char __user *ubuf,
2319 if (ret) 2324 if (ret)
2320 goto out; 2325 goto out;
2321 iter->buffer_idx = 0; 2326 iter->buffer_idx = 0;
2322 } else 2327 } else {
2323 iter->flags |= FTRACE_ITER_CONT; 2328 iter->flags |= FTRACE_ITER_CONT;
2329 iter->buffer[iter->buffer_idx++] = ch;
2330 }
2324 2331
2325 2332 *ppos += read;
2326 file->f_pos += read;
2327
2328 ret = read; 2333 ret = read;
2329 out: 2334 out:
2330 mutex_unlock(&ftrace_regex_lock); 2335 mutex_unlock(&ftrace_regex_lock);
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index c22b40f8f576..8c358395d338 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -3896,17 +3896,9 @@ trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
3896 if (ret < 0) 3896 if (ret < 0)
3897 return ret; 3897 return ret;
3898 3898
3899 switch (val) { 3899 if (val != 0 && val != 1)
3900 case 0:
3901 trace_flags &= ~(1 << index);
3902 break;
3903 case 1:
3904 trace_flags |= 1 << index;
3905 break;
3906
3907 default:
3908 return -EINVAL; 3900 return -EINVAL;
3909 } 3901 set_tracer_flags(1 << index, val);
3910 3902
3911 *ppos += cnt; 3903 *ppos += cnt;
3912 3904