diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-09-04 03:28:52 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-09-04 03:29:05 -0400 |
commit | 29e2035bddecce3eb584a8304528b50da8370a24 (patch) | |
tree | 13155df7d90a8e287b83a1cd6c0d02c3018212ab /kernel | |
parent | 868489660dabc0c28087cca3dbc1adbbc398c6fe (diff) | |
parent | 37d0892c5a94e208cf863e3b7bac014edee4346d (diff) |
Merge branch 'linus' into core/rcu
Merge reason: Avoid fuzz in init/main.c and update from rc6 to rc8.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/fork.c | 21 | ||||
-rw-r--r-- | kernel/irq/manage.c | 10 | ||||
-rw-r--r-- | kernel/module.c | 10 | ||||
-rw-r--r-- | kernel/perf_counter.c | 17 | ||||
-rw-r--r-- | kernel/sysctl.c | 7 | ||||
-rw-r--r-- | kernel/time/clockevents.c | 16 | ||||
-rw-r--r-- | kernel/time/tick-broadcast.c | 7 | ||||
-rw-r--r-- | kernel/time/timer_list.c | 2 | ||||
-rw-r--r-- | kernel/trace/ftrace.c | 17 | ||||
-rw-r--r-- | kernel/trace/trace.c | 12 |
10 files changed, 68 insertions, 51 deletions
diff --git a/kernel/fork.c b/kernel/fork.c index 642e8b5edf00..637520ca0386 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -426,7 +426,6 @@ static struct mm_struct * mm_init(struct mm_struct * mm, struct task_struct *p) | |||
426 | init_rwsem(&mm->mmap_sem); | 426 | init_rwsem(&mm->mmap_sem); |
427 | INIT_LIST_HEAD(&mm->mmlist); | 427 | INIT_LIST_HEAD(&mm->mmlist); |
428 | mm->flags = (current->mm) ? current->mm->flags : default_dump_filter; | 428 | mm->flags = (current->mm) ? current->mm->flags : default_dump_filter; |
429 | mm->oom_adj = (current->mm) ? current->mm->oom_adj : 0; | ||
430 | mm->core_state = NULL; | 429 | mm->core_state = NULL; |
431 | mm->nr_ptes = 0; | 430 | mm->nr_ptes = 0; |
432 | set_mm_counter(mm, file_rss, 0); | 431 | set_mm_counter(mm, file_rss, 0); |
@@ -816,11 +815,8 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) | |||
816 | { | 815 | { |
817 | struct signal_struct *sig; | 816 | struct signal_struct *sig; |
818 | 817 | ||
819 | if (clone_flags & CLONE_THREAD) { | 818 | if (clone_flags & CLONE_THREAD) |
820 | atomic_inc(¤t->signal->count); | ||
821 | atomic_inc(¤t->signal->live); | ||
822 | return 0; | 819 | return 0; |
823 | } | ||
824 | 820 | ||
825 | sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL); | 821 | sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL); |
826 | tsk->signal = sig; | 822 | tsk->signal = sig; |
@@ -878,16 +874,6 @@ void __cleanup_signal(struct signal_struct *sig) | |||
878 | kmem_cache_free(signal_cachep, sig); | 874 | kmem_cache_free(signal_cachep, sig); |
879 | } | 875 | } |
880 | 876 | ||
881 | static void cleanup_signal(struct task_struct *tsk) | ||
882 | { | ||
883 | struct signal_struct *sig = tsk->signal; | ||
884 | |||
885 | atomic_dec(&sig->live); | ||
886 | |||
887 | if (atomic_dec_and_test(&sig->count)) | ||
888 | __cleanup_signal(sig); | ||
889 | } | ||
890 | |||
891 | static void copy_flags(unsigned long clone_flags, struct task_struct *p) | 877 | static void copy_flags(unsigned long clone_flags, struct task_struct *p) |
892 | { | 878 | { |
893 | unsigned long new_flags = p->flags; | 879 | unsigned long new_flags = p->flags; |
@@ -1237,6 +1223,8 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1237 | } | 1223 | } |
1238 | 1224 | ||
1239 | if (clone_flags & CLONE_THREAD) { | 1225 | if (clone_flags & CLONE_THREAD) { |
1226 | atomic_inc(¤t->signal->count); | ||
1227 | atomic_inc(¤t->signal->live); | ||
1240 | p->group_leader = current->group_leader; | 1228 | p->group_leader = current->group_leader; |
1241 | list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group); | 1229 | list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group); |
1242 | } | 1230 | } |
@@ -1280,7 +1268,8 @@ bad_fork_cleanup_mm: | |||
1280 | if (p->mm) | 1268 | if (p->mm) |
1281 | mmput(p->mm); | 1269 | mmput(p->mm); |
1282 | bad_fork_cleanup_signal: | 1270 | bad_fork_cleanup_signal: |
1283 | cleanup_signal(p); | 1271 | if (!(clone_flags & CLONE_THREAD)) |
1272 | __cleanup_signal(p->signal); | ||
1284 | bad_fork_cleanup_sighand: | 1273 | bad_fork_cleanup_sighand: |
1285 | __cleanup_sighand(p->sighand); | 1274 | __cleanup_sighand(p->sighand); |
1286 | bad_fork_cleanup_fs: | 1275 | bad_fork_cleanup_fs: |
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index d222515a5a06..0ec9ed831737 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
@@ -607,7 +607,6 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
607 | */ | 607 | */ |
608 | get_task_struct(t); | 608 | get_task_struct(t); |
609 | new->thread = t; | 609 | new->thread = t; |
610 | wake_up_process(t); | ||
611 | } | 610 | } |
612 | 611 | ||
613 | /* | 612 | /* |
@@ -690,6 +689,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
690 | (int)(new->flags & IRQF_TRIGGER_MASK)); | 689 | (int)(new->flags & IRQF_TRIGGER_MASK)); |
691 | } | 690 | } |
692 | 691 | ||
692 | new->irq = irq; | ||
693 | *old_ptr = new; | 693 | *old_ptr = new; |
694 | 694 | ||
695 | /* Reset broken irq detection when installing new handler */ | 695 | /* Reset broken irq detection when installing new handler */ |
@@ -707,7 +707,13 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
707 | 707 | ||
708 | spin_unlock_irqrestore(&desc->lock, flags); | 708 | spin_unlock_irqrestore(&desc->lock, flags); |
709 | 709 | ||
710 | new->irq = irq; | 710 | /* |
711 | * Strictly no need to wake it up, but hung_task complains | ||
712 | * when no hard interrupt wakes the thread up. | ||
713 | */ | ||
714 | if (new->thread) | ||
715 | wake_up_process(new->thread); | ||
716 | |||
711 | register_irq_proc(irq, desc); | 717 | register_irq_proc(irq, desc); |
712 | new->dir = NULL; | 718 | new->dir = NULL; |
713 | register_handler_proc(irq, new); | 719 | register_handler_proc(irq, new); |
diff --git a/kernel/module.c b/kernel/module.c index fd1411403558..2d537186191f 100644 --- a/kernel/module.c +++ b/kernel/module.c | |||
@@ -909,16 +909,18 @@ void __symbol_put(const char *symbol) | |||
909 | } | 909 | } |
910 | EXPORT_SYMBOL(__symbol_put); | 910 | EXPORT_SYMBOL(__symbol_put); |
911 | 911 | ||
912 | /* Note this assumes addr is a function, which it currently always is. */ | ||
912 | void symbol_put_addr(void *addr) | 913 | void symbol_put_addr(void *addr) |
913 | { | 914 | { |
914 | struct module *modaddr; | 915 | struct module *modaddr; |
916 | unsigned long a = (unsigned long)dereference_function_descriptor(addr); | ||
915 | 917 | ||
916 | if (core_kernel_text((unsigned long)addr)) | 918 | if (core_kernel_text(a)) |
917 | return; | 919 | return; |
918 | 920 | ||
919 | /* module_text_address is safe here: we're supposed to have reference | 921 | /* module_text_address is safe here: we're supposed to have reference |
920 | * to module from symbol_get, so it can't go away. */ | 922 | * to module from symbol_get, so it can't go away. */ |
921 | modaddr = __module_text_address((unsigned long)addr); | 923 | modaddr = __module_text_address(a); |
922 | BUG_ON(!modaddr); | 924 | BUG_ON(!modaddr); |
923 | module_put(modaddr); | 925 | module_put(modaddr); |
924 | } | 926 | } |
@@ -1272,6 +1274,10 @@ static void add_notes_attrs(struct module *mod, unsigned int nsect, | |||
1272 | struct module_notes_attrs *notes_attrs; | 1274 | struct module_notes_attrs *notes_attrs; |
1273 | struct bin_attribute *nattr; | 1275 | struct bin_attribute *nattr; |
1274 | 1276 | ||
1277 | /* failed to create section attributes, so can't create notes */ | ||
1278 | if (!mod->sect_attrs) | ||
1279 | return; | ||
1280 | |||
1275 | /* Count notes sections and allocate structures. */ | 1281 | /* Count notes sections and allocate structures. */ |
1276 | notes = 0; | 1282 | notes = 0; |
1277 | for (i = 0; i < nsect; i++) | 1283 | for (i = 0; i < nsect; i++) |
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 534e20d14d63..f274e1959885 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c | |||
@@ -1503,10 +1503,21 @@ static void perf_counter_enable_on_exec(struct task_struct *task) | |||
1503 | */ | 1503 | */ |
1504 | static void __perf_counter_read(void *info) | 1504 | static void __perf_counter_read(void *info) |
1505 | { | 1505 | { |
1506 | struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); | ||
1506 | struct perf_counter *counter = info; | 1507 | struct perf_counter *counter = info; |
1507 | struct perf_counter_context *ctx = counter->ctx; | 1508 | struct perf_counter_context *ctx = counter->ctx; |
1508 | unsigned long flags; | 1509 | unsigned long flags; |
1509 | 1510 | ||
1511 | /* | ||
1512 | * If this is a task context, we need to check whether it is | ||
1513 | * the current task context of this cpu. If not it has been | ||
1514 | * scheduled out before the smp call arrived. In that case | ||
1515 | * counter->count would have been updated to a recent sample | ||
1516 | * when the counter was scheduled out. | ||
1517 | */ | ||
1518 | if (ctx->task && cpuctx->task_ctx != ctx) | ||
1519 | return; | ||
1520 | |||
1510 | local_irq_save(flags); | 1521 | local_irq_save(flags); |
1511 | if (ctx->is_active) | 1522 | if (ctx->is_active) |
1512 | update_context_time(ctx); | 1523 | update_context_time(ctx); |
@@ -1780,7 +1791,7 @@ static int perf_counter_read_group(struct perf_counter *counter, | |||
1780 | size += err; | 1791 | size += err; |
1781 | 1792 | ||
1782 | list_for_each_entry(sub, &leader->sibling_list, list_entry) { | 1793 | list_for_each_entry(sub, &leader->sibling_list, list_entry) { |
1783 | err = perf_counter_read_entry(counter, read_format, | 1794 | err = perf_counter_read_entry(sub, read_format, |
1784 | buf + size); | 1795 | buf + size); |
1785 | if (err < 0) | 1796 | if (err < 0) |
1786 | return err; | 1797 | return err; |
@@ -2008,6 +2019,10 @@ int perf_counter_task_disable(void) | |||
2008 | return 0; | 2019 | return 0; |
2009 | } | 2020 | } |
2010 | 2021 | ||
2022 | #ifndef PERF_COUNTER_INDEX_OFFSET | ||
2023 | # define PERF_COUNTER_INDEX_OFFSET 0 | ||
2024 | #endif | ||
2025 | |||
2011 | static int perf_counter_index(struct perf_counter *counter) | 2026 | static int perf_counter_index(struct perf_counter *counter) |
2012 | { | 2027 | { |
2013 | if (counter->state != PERF_COUNTER_STATE_ACTIVE) | 2028 | if (counter->state != PERF_COUNTER_STATE_ACTIVE) |
diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 98e02328c67d..58be76017fd0 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c | |||
@@ -49,6 +49,7 @@ | |||
49 | #include <linux/acpi.h> | 49 | #include <linux/acpi.h> |
50 | #include <linux/reboot.h> | 50 | #include <linux/reboot.h> |
51 | #include <linux/ftrace.h> | 51 | #include <linux/ftrace.h> |
52 | #include <linux/security.h> | ||
52 | #include <linux/slow-work.h> | 53 | #include <linux/slow-work.h> |
53 | #include <linux/perf_counter.h> | 54 | #include <linux/perf_counter.h> |
54 | 55 | ||
@@ -1306,10 +1307,10 @@ static struct ctl_table vm_table[] = { | |||
1306 | { | 1307 | { |
1307 | .ctl_name = CTL_UNNUMBERED, | 1308 | .ctl_name = CTL_UNNUMBERED, |
1308 | .procname = "mmap_min_addr", | 1309 | .procname = "mmap_min_addr", |
1309 | .data = &mmap_min_addr, | 1310 | .data = &dac_mmap_min_addr, |
1310 | .maxlen = sizeof(unsigned long), | 1311 | .maxlen = sizeof(unsigned long), |
1311 | .mode = 0644, | 1312 | .mode = 0644, |
1312 | .proc_handler = &proc_doulongvec_minmax, | 1313 | .proc_handler = &mmap_min_addr_handler, |
1313 | }, | 1314 | }, |
1314 | #ifdef CONFIG_NUMA | 1315 | #ifdef CONFIG_NUMA |
1315 | { | 1316 | { |
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c index a6dcd67b041d..620b58abdc32 100644 --- a/kernel/time/clockevents.c +++ b/kernel/time/clockevents.c | |||
@@ -137,11 +137,12 @@ int clockevents_program_event(struct clock_event_device *dev, ktime_t expires, | |||
137 | */ | 137 | */ |
138 | int clockevents_register_notifier(struct notifier_block *nb) | 138 | int clockevents_register_notifier(struct notifier_block *nb) |
139 | { | 139 | { |
140 | unsigned long flags; | ||
140 | int ret; | 141 | int ret; |
141 | 142 | ||
142 | spin_lock(&clockevents_lock); | 143 | spin_lock_irqsave(&clockevents_lock, flags); |
143 | ret = raw_notifier_chain_register(&clockevents_chain, nb); | 144 | ret = raw_notifier_chain_register(&clockevents_chain, nb); |
144 | spin_unlock(&clockevents_lock); | 145 | spin_unlock_irqrestore(&clockevents_lock, flags); |
145 | 146 | ||
146 | return ret; | 147 | return ret; |
147 | } | 148 | } |
@@ -178,16 +179,18 @@ static void clockevents_notify_released(void) | |||
178 | */ | 179 | */ |
179 | void clockevents_register_device(struct clock_event_device *dev) | 180 | void clockevents_register_device(struct clock_event_device *dev) |
180 | { | 181 | { |
182 | unsigned long flags; | ||
183 | |||
181 | BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED); | 184 | BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED); |
182 | BUG_ON(!dev->cpumask); | 185 | BUG_ON(!dev->cpumask); |
183 | 186 | ||
184 | spin_lock(&clockevents_lock); | 187 | spin_lock_irqsave(&clockevents_lock, flags); |
185 | 188 | ||
186 | list_add(&dev->list, &clockevent_devices); | 189 | list_add(&dev->list, &clockevent_devices); |
187 | clockevents_do_notify(CLOCK_EVT_NOTIFY_ADD, dev); | 190 | clockevents_do_notify(CLOCK_EVT_NOTIFY_ADD, dev); |
188 | clockevents_notify_released(); | 191 | clockevents_notify_released(); |
189 | 192 | ||
190 | spin_unlock(&clockevents_lock); | 193 | spin_unlock_irqrestore(&clockevents_lock, flags); |
191 | } | 194 | } |
192 | EXPORT_SYMBOL_GPL(clockevents_register_device); | 195 | EXPORT_SYMBOL_GPL(clockevents_register_device); |
193 | 196 | ||
@@ -235,8 +238,9 @@ void clockevents_exchange_device(struct clock_event_device *old, | |||
235 | void clockevents_notify(unsigned long reason, void *arg) | 238 | void clockevents_notify(unsigned long reason, void *arg) |
236 | { | 239 | { |
237 | struct list_head *node, *tmp; | 240 | struct list_head *node, *tmp; |
241 | unsigned long flags; | ||
238 | 242 | ||
239 | spin_lock(&clockevents_lock); | 243 | spin_lock_irqsave(&clockevents_lock, flags); |
240 | clockevents_do_notify(reason, arg); | 244 | clockevents_do_notify(reason, arg); |
241 | 245 | ||
242 | switch (reason) { | 246 | switch (reason) { |
@@ -251,7 +255,7 @@ void clockevents_notify(unsigned long reason, void *arg) | |||
251 | default: | 255 | default: |
252 | break; | 256 | break; |
253 | } | 257 | } |
254 | spin_unlock(&clockevents_lock); | 258 | spin_unlock_irqrestore(&clockevents_lock, flags); |
255 | } | 259 | } |
256 | EXPORT_SYMBOL_GPL(clockevents_notify); | 260 | EXPORT_SYMBOL_GPL(clockevents_notify); |
257 | #endif | 261 | #endif |
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index 877dbedc3118..c2ec25087a35 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c | |||
@@ -205,11 +205,11 @@ static void tick_handle_periodic_broadcast(struct clock_event_device *dev) | |||
205 | * Powerstate information: The system enters/leaves a state, where | 205 | * Powerstate information: The system enters/leaves a state, where |
206 | * affected devices might stop | 206 | * affected devices might stop |
207 | */ | 207 | */ |
208 | static void tick_do_broadcast_on_off(void *why) | 208 | static void tick_do_broadcast_on_off(unsigned long *reason) |
209 | { | 209 | { |
210 | struct clock_event_device *bc, *dev; | 210 | struct clock_event_device *bc, *dev; |
211 | struct tick_device *td; | 211 | struct tick_device *td; |
212 | unsigned long flags, *reason = why; | 212 | unsigned long flags; |
213 | int cpu, bc_stopped; | 213 | int cpu, bc_stopped; |
214 | 214 | ||
215 | spin_lock_irqsave(&tick_broadcast_lock, flags); | 215 | spin_lock_irqsave(&tick_broadcast_lock, flags); |
@@ -276,8 +276,7 @@ void tick_broadcast_on_off(unsigned long reason, int *oncpu) | |||
276 | printk(KERN_ERR "tick-broadcast: ignoring broadcast for " | 276 | printk(KERN_ERR "tick-broadcast: ignoring broadcast for " |
277 | "offline CPU #%d\n", *oncpu); | 277 | "offline CPU #%d\n", *oncpu); |
278 | else | 278 | else |
279 | smp_call_function_single(*oncpu, tick_do_broadcast_on_off, | 279 | tick_do_broadcast_on_off(&reason); |
280 | &reason, 1); | ||
281 | } | 280 | } |
282 | 281 | ||
283 | /* | 282 | /* |
diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c index a999b92a1277..fddd69d16e03 100644 --- a/kernel/time/timer_list.c +++ b/kernel/time/timer_list.c | |||
@@ -286,7 +286,7 @@ static int __init init_timer_list_procfs(void) | |||
286 | { | 286 | { |
287 | struct proc_dir_entry *pe; | 287 | struct proc_dir_entry *pe; |
288 | 288 | ||
289 | pe = proc_create("timer_list", 0644, NULL, &timer_list_fops); | 289 | pe = proc_create("timer_list", 0444, NULL, &timer_list_fops); |
290 | if (!pe) | 290 | if (!pe) |
291 | return -ENOMEM; | 291 | return -ENOMEM; |
292 | return 0; | 292 | return 0; |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 1e1d23c26308..25edd5cc5935 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -2278,7 +2278,11 @@ ftrace_regex_write(struct file *file, const char __user *ubuf, | |||
2278 | read++; | 2278 | read++; |
2279 | cnt--; | 2279 | cnt--; |
2280 | 2280 | ||
2281 | if (!(iter->flags & ~FTRACE_ITER_CONT)) { | 2281 | /* |
2282 | * If the parser haven't finished with the last write, | ||
2283 | * continue reading the user input without skipping spaces. | ||
2284 | */ | ||
2285 | if (!(iter->flags & FTRACE_ITER_CONT)) { | ||
2282 | /* skip white space */ | 2286 | /* skip white space */ |
2283 | while (cnt && isspace(ch)) { | 2287 | while (cnt && isspace(ch)) { |
2284 | ret = get_user(ch, ubuf++); | 2288 | ret = get_user(ch, ubuf++); |
@@ -2288,8 +2292,9 @@ ftrace_regex_write(struct file *file, const char __user *ubuf, | |||
2288 | cnt--; | 2292 | cnt--; |
2289 | } | 2293 | } |
2290 | 2294 | ||
2295 | /* only spaces were written */ | ||
2291 | if (isspace(ch)) { | 2296 | if (isspace(ch)) { |
2292 | file->f_pos += read; | 2297 | *ppos += read; |
2293 | ret = read; | 2298 | ret = read; |
2294 | goto out; | 2299 | goto out; |
2295 | } | 2300 | } |
@@ -2319,12 +2324,12 @@ ftrace_regex_write(struct file *file, const char __user *ubuf, | |||
2319 | if (ret) | 2324 | if (ret) |
2320 | goto out; | 2325 | goto out; |
2321 | iter->buffer_idx = 0; | 2326 | iter->buffer_idx = 0; |
2322 | } else | 2327 | } else { |
2323 | iter->flags |= FTRACE_ITER_CONT; | 2328 | iter->flags |= FTRACE_ITER_CONT; |
2329 | iter->buffer[iter->buffer_idx++] = ch; | ||
2330 | } | ||
2324 | 2331 | ||
2325 | 2332 | *ppos += read; | |
2326 | file->f_pos += read; | ||
2327 | |||
2328 | ret = read; | 2333 | ret = read; |
2329 | out: | 2334 | out: |
2330 | mutex_unlock(&ftrace_regex_lock); | 2335 | mutex_unlock(&ftrace_regex_lock); |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index c22b40f8f576..8c358395d338 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -3896,17 +3896,9 @@ trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt, | |||
3896 | if (ret < 0) | 3896 | if (ret < 0) |
3897 | return ret; | 3897 | return ret; |
3898 | 3898 | ||
3899 | switch (val) { | 3899 | if (val != 0 && val != 1) |
3900 | case 0: | ||
3901 | trace_flags &= ~(1 << index); | ||
3902 | break; | ||
3903 | case 1: | ||
3904 | trace_flags |= 1 << index; | ||
3905 | break; | ||
3906 | |||
3907 | default: | ||
3908 | return -EINVAL; | 3900 | return -EINVAL; |
3909 | } | 3901 | set_tracer_flags(1 << index, val); |
3910 | 3902 | ||
3911 | *ppos += cnt; | 3903 | *ppos += cnt; |
3912 | 3904 | ||