diff options
Diffstat (limited to 'init')
-rw-r--r-- | init/Kconfig | 9 | ||||
-rw-r--r-- | init/main.c | 46 |
2 files changed, 36 insertions, 19 deletions
diff --git a/init/Kconfig b/init/Kconfig index 86b00c53fade..f291f086caa1 100644 --- a/init/Kconfig +++ b/init/Kconfig | |||
@@ -354,7 +354,7 @@ config RT_GROUP_SCHED | |||
354 | setting below. If enabled, it will also make it impossible to | 354 | setting below. If enabled, it will also make it impossible to |
355 | schedule realtime tasks for non-root users until you allocate | 355 | schedule realtime tasks for non-root users until you allocate |
356 | realtime bandwidth for them. | 356 | realtime bandwidth for them. |
357 | See Documentation/sched-rt-group.txt for more information. | 357 | See Documentation/scheduler/sched-rt-group.txt for more information. |
358 | 358 | ||
359 | choice | 359 | choice |
360 | depends on GROUP_SCHED | 360 | depends on GROUP_SCHED |
@@ -771,8 +771,7 @@ config SLAB | |||
771 | help | 771 | help |
772 | The regular slab allocator that is established and known to work | 772 | The regular slab allocator that is established and known to work |
773 | well in all environments. It organizes cache hot objects in | 773 | well in all environments. It organizes cache hot objects in |
774 | per cpu and per node queues. SLAB is the default choice for | 774 | per cpu and per node queues. |
775 | a slab allocator. | ||
776 | 775 | ||
777 | config SLUB | 776 | config SLUB |
778 | bool "SLUB (Unqueued Allocator)" | 777 | bool "SLUB (Unqueued Allocator)" |
@@ -781,7 +780,8 @@ config SLUB | |||
781 | instead of managing queues of cached objects (SLAB approach). | 780 | instead of managing queues of cached objects (SLAB approach). |
782 | Per cpu caching is realized using slabs of objects instead | 781 | Per cpu caching is realized using slabs of objects instead |
783 | of queues of objects. SLUB can use memory efficiently | 782 | of queues of objects. SLUB can use memory efficiently |
784 | and has enhanced diagnostics. | 783 | and has enhanced diagnostics. SLUB is the default choice for |
784 | a slab allocator. | ||
785 | 785 | ||
786 | config SLOB | 786 | config SLOB |
787 | depends on EMBEDDED | 787 | depends on EMBEDDED |
@@ -808,6 +808,7 @@ config TRACEPOINTS | |||
808 | 808 | ||
809 | config MARKERS | 809 | config MARKERS |
810 | bool "Activate markers" | 810 | bool "Activate markers" |
811 | depends on TRACEPOINTS | ||
811 | help | 812 | help |
812 | Place an empty function call at each marker site. Can be | 813 | Place an empty function call at each marker site. Can be |
813 | dynamically changed for a probe function. | 814 | dynamically changed for a probe function. |
diff --git a/init/main.c b/init/main.c index 7e117a231af1..9d761aa53296 100644 --- a/init/main.c +++ b/init/main.c | |||
@@ -63,6 +63,7 @@ | |||
63 | #include <linux/signal.h> | 63 | #include <linux/signal.h> |
64 | #include <linux/idr.h> | 64 | #include <linux/idr.h> |
65 | #include <linux/ftrace.h> | 65 | #include <linux/ftrace.h> |
66 | #include <trace/boot.h> | ||
66 | 67 | ||
67 | #include <asm/io.h> | 68 | #include <asm/io.h> |
68 | #include <asm/bugs.h> | 69 | #include <asm/bugs.h> |
@@ -539,6 +540,15 @@ void __init __weak thread_info_cache_init(void) | |||
539 | { | 540 | { |
540 | } | 541 | } |
541 | 542 | ||
543 | void __init __weak arch_early_irq_init(void) | ||
544 | { | ||
545 | } | ||
546 | |||
547 | void __init __weak early_irq_init(void) | ||
548 | { | ||
549 | arch_early_irq_init(); | ||
550 | } | ||
551 | |||
542 | asmlinkage void __init start_kernel(void) | 552 | asmlinkage void __init start_kernel(void) |
543 | { | 553 | { |
544 | char * command_line; | 554 | char * command_line; |
@@ -603,6 +613,8 @@ asmlinkage void __init start_kernel(void) | |||
603 | sort_main_extable(); | 613 | sort_main_extable(); |
604 | trap_init(); | 614 | trap_init(); |
605 | rcu_init(); | 615 | rcu_init(); |
616 | /* init some links before init_ISA_irqs() */ | ||
617 | early_irq_init(); | ||
606 | init_IRQ(); | 618 | init_IRQ(); |
607 | pidhash_init(); | 619 | pidhash_init(); |
608 | init_timers(); | 620 | init_timers(); |
@@ -703,31 +715,35 @@ core_param(initcall_debug, initcall_debug, bool, 0644); | |||
703 | int do_one_initcall(initcall_t fn) | 715 | int do_one_initcall(initcall_t fn) |
704 | { | 716 | { |
705 | int count = preempt_count(); | 717 | int count = preempt_count(); |
706 | ktime_t delta; | 718 | ktime_t calltime, delta, rettime; |
707 | char msgbuf[64]; | 719 | char msgbuf[64]; |
708 | struct boot_trace it; | 720 | struct boot_trace_call call; |
721 | struct boot_trace_ret ret; | ||
709 | 722 | ||
710 | if (initcall_debug) { | 723 | if (initcall_debug) { |
711 | it.caller = task_pid_nr(current); | 724 | call.caller = task_pid_nr(current); |
712 | printk("calling %pF @ %i\n", fn, it.caller); | 725 | printk("calling %pF @ %i\n", fn, call.caller); |
713 | it.calltime = ktime_get(); | 726 | calltime = ktime_get(); |
727 | trace_boot_call(&call, fn); | ||
728 | enable_boot_trace(); | ||
714 | } | 729 | } |
715 | 730 | ||
716 | it.result = fn(); | 731 | ret.result = fn(); |
717 | 732 | ||
718 | if (initcall_debug) { | 733 | if (initcall_debug) { |
719 | it.rettime = ktime_get(); | 734 | disable_boot_trace(); |
720 | delta = ktime_sub(it.rettime, it.calltime); | 735 | rettime = ktime_get(); |
721 | it.duration = (unsigned long long) delta.tv64 >> 10; | 736 | delta = ktime_sub(rettime, calltime); |
737 | ret.duration = (unsigned long long) ktime_to_ns(delta) >> 10; | ||
738 | trace_boot_ret(&ret, fn); | ||
722 | printk("initcall %pF returned %d after %Ld usecs\n", fn, | 739 | printk("initcall %pF returned %d after %Ld usecs\n", fn, |
723 | it.result, it.duration); | 740 | ret.result, ret.duration); |
724 | trace_boot(&it, fn); | ||
725 | } | 741 | } |
726 | 742 | ||
727 | msgbuf[0] = 0; | 743 | msgbuf[0] = 0; |
728 | 744 | ||
729 | if (it.result && it.result != -ENODEV && initcall_debug) | 745 | if (ret.result && ret.result != -ENODEV && initcall_debug) |
730 | sprintf(msgbuf, "error code %d ", it.result); | 746 | sprintf(msgbuf, "error code %d ", ret.result); |
731 | 747 | ||
732 | if (preempt_count() != count) { | 748 | if (preempt_count() != count) { |
733 | strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf)); | 749 | strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf)); |
@@ -741,7 +757,7 @@ int do_one_initcall(initcall_t fn) | |||
741 | printk("initcall %pF returned with %s\n", fn, msgbuf); | 757 | printk("initcall %pF returned with %s\n", fn, msgbuf); |
742 | } | 758 | } |
743 | 759 | ||
744 | return it.result; | 760 | return ret.result; |
745 | } | 761 | } |
746 | 762 | ||
747 | 763 | ||
@@ -882,7 +898,7 @@ static int __init kernel_init(void * unused) | |||
882 | * we're essentially up and running. Get rid of the | 898 | * we're essentially up and running. Get rid of the |
883 | * initmem segments and start the user-mode stuff.. | 899 | * initmem segments and start the user-mode stuff.. |
884 | */ | 900 | */ |
885 | stop_boot_trace(); | 901 | |
886 | init_post(); | 902 | init_post(); |
887 | return 0; | 903 | return 0; |
888 | } | 904 | } |