diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-05-12 06:17:30 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-05-12 06:17:36 -0400 |
commit | 6cda3eb62ef42aa5acd649bf99c8db544e0f4051 (patch) | |
tree | 93f74ca002f5756c8e157611174f9540b5cf41c0 /kernel | |
parent | b9c61b70075c87a8612624736faf4a2de5b1ed30 (diff) | |
parent | cec6be6d1069d697beb490bbb40a290d5ff554a2 (diff) |
Merge branch 'x86/apic' into irq/numa
Merge reason: both topics modify the APIC code but were able to do it in
parallel so far. An upcoming patch generates a conflict so
merge them to avoid the conflict.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/auditfilter.c | 4 | ||||
-rw-r--r-- | kernel/irq/handle.c | 2 | ||||
-rw-r--r-- | kernel/kprobes.c | 31 | ||||
-rw-r--r-- | kernel/panic.c | 13 | ||||
-rw-r--r-- | kernel/posix-cpu-timers.c | 8 | ||||
-rw-r--r-- | kernel/sched.c | 2 | ||||
-rw-r--r-- | kernel/sysctl.c | 5 | ||||
-rw-r--r-- | kernel/time/tick-common.c | 12 | ||||
-rw-r--r-- | kernel/trace/trace.c | 1 |
9 files changed, 55 insertions, 23 deletions
diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c index a6fe71fd5d1b..713098ee5a02 100644 --- a/kernel/auditfilter.c +++ b/kernel/auditfilter.c | |||
@@ -1028,7 +1028,7 @@ static void audit_update_watch(struct audit_parent *parent, | |||
1028 | 1028 | ||
1029 | if (audit_enabled) { | 1029 | if (audit_enabled) { |
1030 | struct audit_buffer *ab; | 1030 | struct audit_buffer *ab; |
1031 | ab = audit_log_start(NULL, GFP_KERNEL, | 1031 | ab = audit_log_start(NULL, GFP_NOFS, |
1032 | AUDIT_CONFIG_CHANGE); | 1032 | AUDIT_CONFIG_CHANGE); |
1033 | audit_log_format(ab, "auid=%u ses=%u", | 1033 | audit_log_format(ab, "auid=%u ses=%u", |
1034 | audit_get_loginuid(current), | 1034 | audit_get_loginuid(current), |
@@ -1067,7 +1067,7 @@ static void audit_remove_parent_watches(struct audit_parent *parent) | |||
1067 | e = container_of(r, struct audit_entry, rule); | 1067 | e = container_of(r, struct audit_entry, rule); |
1068 | if (audit_enabled) { | 1068 | if (audit_enabled) { |
1069 | struct audit_buffer *ab; | 1069 | struct audit_buffer *ab; |
1070 | ab = audit_log_start(NULL, GFP_KERNEL, | 1070 | ab = audit_log_start(NULL, GFP_NOFS, |
1071 | AUDIT_CONFIG_CHANGE); | 1071 | AUDIT_CONFIG_CHANGE); |
1072 | audit_log_format(ab, "auid=%u ses=%u", | 1072 | audit_log_format(ab, "auid=%u ses=%u", |
1073 | audit_get_loginuid(current), | 1073 | audit_get_loginuid(current), |
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c index a6368db2618b..a3c671e0f165 100644 --- a/kernel/irq/handle.c +++ b/kernel/irq/handle.c | |||
@@ -360,8 +360,6 @@ irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action) | |||
360 | irqreturn_t ret, retval = IRQ_NONE; | 360 | irqreturn_t ret, retval = IRQ_NONE; |
361 | unsigned int status = 0; | 361 | unsigned int status = 0; |
362 | 362 | ||
363 | WARN_ONCE(!in_irq(), "BUG: IRQ handler called from non-hardirq context!"); | ||
364 | |||
365 | if (!(action->flags & IRQF_DISABLED)) | 363 | if (!(action->flags & IRQF_DISABLED)) |
366 | local_irq_enable_in_hardirq(); | 364 | local_irq_enable_in_hardirq(); |
367 | 365 | ||
diff --git a/kernel/kprobes.c b/kernel/kprobes.c index a5e74ddee0e2..c0fa54b276d9 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c | |||
@@ -319,6 +319,22 @@ struct kprobe __kprobes *get_kprobe(void *addr) | |||
319 | return NULL; | 319 | return NULL; |
320 | } | 320 | } |
321 | 321 | ||
322 | /* Arm a kprobe with text_mutex */ | ||
323 | static void __kprobes arm_kprobe(struct kprobe *kp) | ||
324 | { | ||
325 | mutex_lock(&text_mutex); | ||
326 | arch_arm_kprobe(kp); | ||
327 | mutex_unlock(&text_mutex); | ||
328 | } | ||
329 | |||
330 | /* Disarm a kprobe with text_mutex */ | ||
331 | static void __kprobes disarm_kprobe(struct kprobe *kp) | ||
332 | { | ||
333 | mutex_lock(&text_mutex); | ||
334 | arch_disarm_kprobe(kp); | ||
335 | mutex_unlock(&text_mutex); | ||
336 | } | ||
337 | |||
322 | /* | 338 | /* |
323 | * Aggregate handlers for multiple kprobes support - these handlers | 339 | * Aggregate handlers for multiple kprobes support - these handlers |
324 | * take care of invoking the individual kprobe handlers on p->list | 340 | * take care of invoking the individual kprobe handlers on p->list |
@@ -538,7 +554,7 @@ static int __kprobes add_new_kprobe(struct kprobe *ap, struct kprobe *p) | |||
538 | ap->flags &= ~KPROBE_FLAG_DISABLED; | 554 | ap->flags &= ~KPROBE_FLAG_DISABLED; |
539 | if (!kprobes_all_disarmed) | 555 | if (!kprobes_all_disarmed) |
540 | /* Arm the breakpoint again. */ | 556 | /* Arm the breakpoint again. */ |
541 | arch_arm_kprobe(ap); | 557 | arm_kprobe(ap); |
542 | } | 558 | } |
543 | return 0; | 559 | return 0; |
544 | } | 560 | } |
@@ -789,11 +805,8 @@ static int __kprobes __unregister_kprobe_top(struct kprobe *p) | |||
789 | * enabled and not gone - otherwise, the breakpoint would | 805 | * enabled and not gone - otherwise, the breakpoint would |
790 | * already have been removed. We save on flushing icache. | 806 | * already have been removed. We save on flushing icache. |
791 | */ | 807 | */ |
792 | if (!kprobes_all_disarmed && !kprobe_disabled(old_p)) { | 808 | if (!kprobes_all_disarmed && !kprobe_disabled(old_p)) |
793 | mutex_lock(&text_mutex); | 809 | disarm_kprobe(p); |
794 | arch_disarm_kprobe(p); | ||
795 | mutex_unlock(&text_mutex); | ||
796 | } | ||
797 | hlist_del_rcu(&old_p->hlist); | 810 | hlist_del_rcu(&old_p->hlist); |
798 | } else { | 811 | } else { |
799 | if (p->break_handler && !kprobe_gone(p)) | 812 | if (p->break_handler && !kprobe_gone(p)) |
@@ -810,7 +823,7 @@ noclean: | |||
810 | if (!kprobe_disabled(old_p)) { | 823 | if (!kprobe_disabled(old_p)) { |
811 | try_to_disable_aggr_kprobe(old_p); | 824 | try_to_disable_aggr_kprobe(old_p); |
812 | if (!kprobes_all_disarmed && kprobe_disabled(old_p)) | 825 | if (!kprobes_all_disarmed && kprobe_disabled(old_p)) |
813 | arch_disarm_kprobe(old_p); | 826 | disarm_kprobe(old_p); |
814 | } | 827 | } |
815 | } | 828 | } |
816 | return 0; | 829 | return 0; |
@@ -1364,7 +1377,7 @@ int __kprobes disable_kprobe(struct kprobe *kp) | |||
1364 | try_to_disable_aggr_kprobe(p); | 1377 | try_to_disable_aggr_kprobe(p); |
1365 | 1378 | ||
1366 | if (!kprobes_all_disarmed && kprobe_disabled(p)) | 1379 | if (!kprobes_all_disarmed && kprobe_disabled(p)) |
1367 | arch_disarm_kprobe(p); | 1380 | disarm_kprobe(p); |
1368 | out: | 1381 | out: |
1369 | mutex_unlock(&kprobe_mutex); | 1382 | mutex_unlock(&kprobe_mutex); |
1370 | return ret; | 1383 | return ret; |
@@ -1393,7 +1406,7 @@ int __kprobes enable_kprobe(struct kprobe *kp) | |||
1393 | } | 1406 | } |
1394 | 1407 | ||
1395 | if (!kprobes_all_disarmed && kprobe_disabled(p)) | 1408 | if (!kprobes_all_disarmed && kprobe_disabled(p)) |
1396 | arch_arm_kprobe(p); | 1409 | arm_kprobe(p); |
1397 | 1410 | ||
1398 | p->flags &= ~KPROBE_FLAG_DISABLED; | 1411 | p->flags &= ~KPROBE_FLAG_DISABLED; |
1399 | if (p != kp) | 1412 | if (p != kp) |
diff --git a/kernel/panic.c b/kernel/panic.c index 3dcaa1661357..874ecf1307ae 100644 --- a/kernel/panic.c +++ b/kernel/panic.c | |||
@@ -340,7 +340,7 @@ void oops_exit(void) | |||
340 | } | 340 | } |
341 | 341 | ||
342 | #ifdef WANT_WARN_ON_SLOWPATH | 342 | #ifdef WANT_WARN_ON_SLOWPATH |
343 | void warn_slowpath(const char *file, int line, const char *fmt, ...) | 343 | void warn_slowpath_fmt(const char *file, int line, const char *fmt, ...) |
344 | { | 344 | { |
345 | va_list args; | 345 | va_list args; |
346 | char function[KSYM_SYMBOL_LEN]; | 346 | char function[KSYM_SYMBOL_LEN]; |
@@ -356,7 +356,7 @@ void warn_slowpath(const char *file, int line, const char *fmt, ...) | |||
356 | if (board) | 356 | if (board) |
357 | printk(KERN_WARNING "Hardware name: %s\n", board); | 357 | printk(KERN_WARNING "Hardware name: %s\n", board); |
358 | 358 | ||
359 | if (fmt) { | 359 | if (*fmt) { |
360 | va_start(args, fmt); | 360 | va_start(args, fmt); |
361 | vprintk(fmt, args); | 361 | vprintk(fmt, args); |
362 | va_end(args); | 362 | va_end(args); |
@@ -367,7 +367,14 @@ void warn_slowpath(const char *file, int line, const char *fmt, ...) | |||
367 | print_oops_end_marker(); | 367 | print_oops_end_marker(); |
368 | add_taint(TAINT_WARN); | 368 | add_taint(TAINT_WARN); |
369 | } | 369 | } |
370 | EXPORT_SYMBOL(warn_slowpath); | 370 | EXPORT_SYMBOL(warn_slowpath_fmt); |
371 | |||
372 | void warn_slowpath_null(const char *file, int line) | ||
373 | { | ||
374 | static const char *empty = ""; | ||
375 | warn_slowpath_fmt(file, line, empty); | ||
376 | } | ||
377 | EXPORT_SYMBOL(warn_slowpath_null); | ||
371 | #endif | 378 | #endif |
372 | 379 | ||
373 | #ifdef CONFIG_CC_STACKPROTECTOR | 380 | #ifdef CONFIG_CC_STACKPROTECTOR |
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c index c9dcf98b4463..bece7c0b67b2 100644 --- a/kernel/posix-cpu-timers.c +++ b/kernel/posix-cpu-timers.c | |||
@@ -1420,19 +1420,19 @@ void run_posix_cpu_timers(struct task_struct *tsk) | |||
1420 | * timer call will interfere. | 1420 | * timer call will interfere. |
1421 | */ | 1421 | */ |
1422 | list_for_each_entry_safe(timer, next, &firing, it.cpu.entry) { | 1422 | list_for_each_entry_safe(timer, next, &firing, it.cpu.entry) { |
1423 | int firing; | 1423 | int cpu_firing; |
1424 | |||
1424 | spin_lock(&timer->it_lock); | 1425 | spin_lock(&timer->it_lock); |
1425 | list_del_init(&timer->it.cpu.entry); | 1426 | list_del_init(&timer->it.cpu.entry); |
1426 | firing = timer->it.cpu.firing; | 1427 | cpu_firing = timer->it.cpu.firing; |
1427 | timer->it.cpu.firing = 0; | 1428 | timer->it.cpu.firing = 0; |
1428 | /* | 1429 | /* |
1429 | * The firing flag is -1 if we collided with a reset | 1430 | * The firing flag is -1 if we collided with a reset |
1430 | * of the timer, which already reported this | 1431 | * of the timer, which already reported this |
1431 | * almost-firing as an overrun. So don't generate an event. | 1432 | * almost-firing as an overrun. So don't generate an event. |
1432 | */ | 1433 | */ |
1433 | if (likely(firing >= 0)) { | 1434 | if (likely(cpu_firing >= 0)) |
1434 | cpu_timer_fire(timer); | 1435 | cpu_timer_fire(timer); |
1435 | } | ||
1436 | spin_unlock(&timer->it_lock); | 1436 | spin_unlock(&timer->it_lock); |
1437 | } | 1437 | } |
1438 | } | 1438 | } |
diff --git a/kernel/sched.c b/kernel/sched.c index b902e587a3a0..26efa475bdc1 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -4732,7 +4732,7 @@ void account_process_tick(struct task_struct *p, int user_tick) | |||
4732 | 4732 | ||
4733 | if (user_tick) | 4733 | if (user_tick) |
4734 | account_user_time(p, one_jiffy, one_jiffy_scaled); | 4734 | account_user_time(p, one_jiffy, one_jiffy_scaled); |
4735 | else if (p != rq->idle) | 4735 | else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET)) |
4736 | account_system_time(p, HARDIRQ_OFFSET, one_jiffy, | 4736 | account_system_time(p, HARDIRQ_OFFSET, one_jiffy, |
4737 | one_jiffy_scaled); | 4737 | one_jiffy_scaled); |
4738 | else | 4738 | else |
diff --git a/kernel/sysctl.c b/kernel/sysctl.c index e3d2c7dd59b9..ea78fa101ad6 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c | |||
@@ -103,6 +103,9 @@ static unsigned long one_ul = 1; | |||
103 | static int one_hundred = 100; | 103 | static int one_hundred = 100; |
104 | static int one_thousand = 1000; | 104 | static int one_thousand = 1000; |
105 | 105 | ||
106 | /* this is needed for the proc_doulongvec_minmax of vm_dirty_bytes */ | ||
107 | static unsigned long dirty_bytes_min = 2 * PAGE_SIZE; | ||
108 | |||
106 | /* this is needed for the proc_dointvec_minmax for [fs_]overflow UID and GID */ | 109 | /* this is needed for the proc_dointvec_minmax for [fs_]overflow UID and GID */ |
107 | static int maxolduid = 65535; | 110 | static int maxolduid = 65535; |
108 | static int minolduid; | 111 | static int minolduid; |
@@ -1006,7 +1009,7 @@ static struct ctl_table vm_table[] = { | |||
1006 | .mode = 0644, | 1009 | .mode = 0644, |
1007 | .proc_handler = &dirty_bytes_handler, | 1010 | .proc_handler = &dirty_bytes_handler, |
1008 | .strategy = &sysctl_intvec, | 1011 | .strategy = &sysctl_intvec, |
1009 | .extra1 = &one_ul, | 1012 | .extra1 = &dirty_bytes_min, |
1010 | }, | 1013 | }, |
1011 | { | 1014 | { |
1012 | .procname = "dirty_writeback_centisecs", | 1015 | .procname = "dirty_writeback_centisecs", |
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c index 21a5ca849514..83c4417b6a3c 100644 --- a/kernel/time/tick-common.c +++ b/kernel/time/tick-common.c | |||
@@ -93,7 +93,17 @@ void tick_handle_periodic(struct clock_event_device *dev) | |||
93 | for (;;) { | 93 | for (;;) { |
94 | if (!clockevents_program_event(dev, next, ktime_get())) | 94 | if (!clockevents_program_event(dev, next, ktime_get())) |
95 | return; | 95 | return; |
96 | tick_periodic(cpu); | 96 | /* |
97 | * Have to be careful here. If we're in oneshot mode, | ||
98 | * before we call tick_periodic() in a loop, we need | ||
99 | * to be sure we're using a real hardware clocksource. | ||
100 | * Otherwise we could get trapped in an infinite | ||
101 | * loop, as the tick_periodic() increments jiffies, | ||
102 | * when then will increment time, posibly causing | ||
103 | * the loop to trigger again and again. | ||
104 | */ | ||
105 | if (timekeeping_valid_for_hres()) | ||
106 | tick_periodic(cpu); | ||
97 | next = ktime_add(next, tick_period); | 107 | next = ktime_add(next, tick_period); |
98 | } | 108 | } |
99 | } | 109 | } |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 1ce5dc6372b8..a884c09006c4 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -3448,6 +3448,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos, | |||
3448 | if (!ref) | 3448 | if (!ref) |
3449 | break; | 3449 | break; |
3450 | 3450 | ||
3451 | ref->ref = 1; | ||
3451 | ref->buffer = info->tr->buffer; | 3452 | ref->buffer = info->tr->buffer; |
3452 | ref->page = ring_buffer_alloc_read_page(ref->buffer); | 3453 | ref->page = ring_buffer_alloc_read_page(ref->buffer); |
3453 | if (!ref->page) { | 3454 | if (!ref->page) { |