aboutsummaryrefslogtreecommitdiffstats
path: root/init
diff options
context:
space:
mode:
Diffstat (limited to 'init')
-rw-r--r--init/Kconfig149
-rw-r--r--init/do_mounts_md.c2
-rw-r--r--init/do_mounts_rd.c2
-rw-r--r--init/initramfs.c53
-rw-r--r--init/main.c65
5 files changed, 233 insertions, 38 deletions
diff --git a/init/Kconfig b/init/Kconfig
index 8a8e2d00c40e..13627191a60d 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -299,6 +299,13 @@ config CGROUP_NS
299 for instance virtual servers and checkpoint/restart 299 for instance virtual servers and checkpoint/restart
300 jobs. 300 jobs.
301 301
302config CGROUP_FREEZER
303 bool "control group freezer subsystem"
304 depends on CGROUPS
305 help
306 Provides a way to freeze and unfreeze all tasks in a
307 cgroup.
308
302config CGROUP_DEVICE 309config CGROUP_DEVICE
303 bool "Device controller for cgroups" 310 bool "Device controller for cgroups"
304 depends on CGROUPS && EXPERIMENTAL 311 depends on CGROUPS && EXPERIMENTAL
@@ -347,7 +354,7 @@ config RT_GROUP_SCHED
347 setting below. If enabled, it will also make it impossible to 354 setting below. If enabled, it will also make it impossible to
348 schedule realtime tasks for non-root users until you allocate 355 schedule realtime tasks for non-root users until you allocate
349 realtime bandwidth for them. 356 realtime bandwidth for them.
350 See Documentation/sched-rt-group.txt for more information. 357 See Documentation/scheduler/sched-rt-group.txt for more information.
351 358
352choice 359choice
353 depends on GROUP_SCHED 360 depends on GROUP_SCHED
@@ -394,16 +401,20 @@ config CGROUP_MEM_RES_CTLR
394 depends on CGROUPS && RESOURCE_COUNTERS 401 depends on CGROUPS && RESOURCE_COUNTERS
395 select MM_OWNER 402 select MM_OWNER
396 help 403 help
397 Provides a memory resource controller that manages both page cache and 404 Provides a memory resource controller that manages both anonymous
398 RSS memory. 405 memory and page cache. (See Documentation/controllers/memory.txt)
399 406
400 Note that setting this option increases fixed memory overhead 407 Note that setting this option increases fixed memory overhead
401 associated with each page of memory in the system by 4/8 bytes 408 associated with each page of memory in the system. By this,
402 and also increases cache misses because struct page on many 64bit 409 20(40)bytes/PAGE_SIZE on 32(64)bit system will be occupied by memory
403 systems will not fit into a single cache line anymore. 410 usage tracking struct at boot. Total amount of this is printed out
411 at boot.
404 412
405 Only enable when you're ok with these trade offs and really 413 Only enable when you're ok with these trade offs and really
406 sure you need the memory resource controller. 414 sure you need the memory resource controller. Even when you enable
415 this, you can set "cgroup_disable=memory" at your boot option to
416 disable memory resource controller and you can avoid overheads.
417 (and lose benefits of memory resource contoller)
407 418
408 This config option also selects MM_OWNER config option, which 419 This config option also selects MM_OWNER config option, which
409 could in turn add some fork/exit overhead. 420 could in turn add some fork/exit overhead.
@@ -577,6 +588,13 @@ config KALLSYMS_ALL
577 588
578 Say N. 589 Say N.
579 590
591config KALLSYMS_STRIP_GENERATED
592 bool "Strip machine generated symbols from kallsyms"
593 depends on KALLSYMS_ALL
594 default y
595 help
596 Say N if you want kallsyms to retain even machine generated symbols.
597
580config KALLSYMS_EXTRA_PASS 598config KALLSYMS_EXTRA_PASS
581 bool "Do an extra kallsyms pass" 599 bool "Do an extra kallsyms pass"
582 depends on KALLSYMS 600 depends on KALLSYMS
@@ -713,6 +731,14 @@ config SHMEM
713 option replaces shmem and tmpfs with the much simpler ramfs code, 731 option replaces shmem and tmpfs with the much simpler ramfs code,
714 which may be appropriate on small systems without swap. 732 which may be appropriate on small systems without swap.
715 733
734config AIO
735 bool "Enable AIO support" if EMBEDDED
736 default y
737 help
738 This option enables POSIX asynchronous I/O which may by used
739 by some high performance threaded applications. Disabling
740 this option saves about 7k.
741
716config VM_EVENT_COUNTERS 742config VM_EVENT_COUNTERS
717 default y 743 default y
718 bool "Enable VM event counters for /proc/vmstat" if EMBEDDED 744 bool "Enable VM event counters for /proc/vmstat" if EMBEDDED
@@ -722,6 +748,15 @@ config VM_EVENT_COUNTERS
722 on EMBEDDED systems. /proc/vmstat will only show page counts 748 on EMBEDDED systems. /proc/vmstat will only show page counts
723 if VM event counters are disabled. 749 if VM event counters are disabled.
724 750
751config PCI_QUIRKS
752 default y
753 bool "Enable PCI quirk workarounds" if EMBEDDED
754 depends on PCI
755 help
756 This enables workarounds for various PCI chipset
757 bugs/quirks. Disable this only if your target machine is
758 unaffected by PCI quirks.
759
725config SLUB_DEBUG 760config SLUB_DEBUG
726 default y 761 default y
727 bool "Enable SLUB debugging support" if EMBEDDED 762 bool "Enable SLUB debugging support" if EMBEDDED
@@ -743,8 +778,7 @@ config SLAB
743 help 778 help
744 The regular slab allocator that is established and known to work 779 The regular slab allocator that is established and known to work
745 well in all environments. It organizes cache hot objects in 780 well in all environments. It organizes cache hot objects in
746 per cpu and per node queues. SLAB is the default choice for 781 per cpu and per node queues.
747 a slab allocator.
748 782
749config SLUB 783config SLUB
750 bool "SLUB (Unqueued Allocator)" 784 bool "SLUB (Unqueued Allocator)"
@@ -753,7 +787,8 @@ config SLUB
753 instead of managing queues of cached objects (SLAB approach). 787 instead of managing queues of cached objects (SLAB approach).
754 Per cpu caching is realized using slabs of objects instead 788 Per cpu caching is realized using slabs of objects instead
755 of queues of objects. SLUB can use memory efficiently 789 of queues of objects. SLUB can use memory efficiently
756 and has enhanced diagnostics. 790 and has enhanced diagnostics. SLUB is the default choice for
791 a slab allocator.
757 792
758config SLOB 793config SLOB
759 depends on EMBEDDED 794 depends on EMBEDDED
@@ -771,8 +806,16 @@ config PROFILING
771 Say Y here to enable the extended profiling support mechanisms used 806 Say Y here to enable the extended profiling support mechanisms used
772 by profilers such as OProfile. 807 by profilers such as OProfile.
773 808
809#
810# Place an empty function call at each tracepoint site. Can be
811# dynamically changed for a probe function.
812#
813config TRACEPOINTS
814 bool
815
774config MARKERS 816config MARKERS
775 bool "Activate markers" 817 bool "Activate markers"
818 depends on TRACEPOINTS
776 help 819 help
777 Place an empty function call at each marker site. Can be 820 Place an empty function call at each marker site. Can be
778 dynamically changed for a probe function. 821 dynamically changed for a probe function.
@@ -893,10 +936,90 @@ source "block/Kconfig"
893config PREEMPT_NOTIFIERS 936config PREEMPT_NOTIFIERS
894 bool 937 bool
895 938
939choice
940 prompt "RCU Implementation"
941 default CLASSIC_RCU
942
896config CLASSIC_RCU 943config CLASSIC_RCU
897 def_bool !PREEMPT_RCU 944 bool "Classic RCU"
898 help 945 help
899 This option selects the classic RCU implementation that is 946 This option selects the classic RCU implementation that is
900 designed for best read-side performance on non-realtime 947 designed for best read-side performance on non-realtime
901 systems. Classic RCU is the default. Note that the 948 systems.
902 PREEMPT_RCU symbol is used to select/deselect this option. 949
950 Select this option if you are unsure.
951
952config TREE_RCU
953 bool "Tree-based hierarchical RCU"
954 help
955 This option selects the RCU implementation that is
956 designed for very large SMP system with hundreds or
957 thousands of CPUs.
958
959config PREEMPT_RCU
960 bool "Preemptible RCU"
961 depends on PREEMPT
962 help
963 This option reduces the latency of the kernel by making certain
964 RCU sections preemptible. Normally RCU code is non-preemptible, if
965 this option is selected then read-only RCU sections become
966 preemptible. This helps latency, but may expose bugs due to
967 now-naive assumptions about each RCU read-side critical section
968 remaining on a given CPU through its execution.
969
970endchoice
971
972config RCU_TRACE
973 bool "Enable tracing for RCU"
974 depends on TREE_RCU || PREEMPT_RCU
975 help
976 This option provides tracing in RCU which presents stats
977 in debugfs for debugging RCU implementation.
978
979 Say Y here if you want to enable RCU tracing
980 Say N if you are unsure.
981
982config RCU_FANOUT
983 int "Tree-based hierarchical RCU fanout value"
984 range 2 64 if 64BIT
985 range 2 32 if !64BIT
986 depends on TREE_RCU
987 default 64 if 64BIT
988 default 32 if !64BIT
989 help
990 This option controls the fanout of hierarchical implementations
991 of RCU, allowing RCU to work efficiently on machines with
992 large numbers of CPUs. This value must be at least the cube
993 root of NR_CPUS, which allows NR_CPUS up to 32,768 for 32-bit
994 systems and up to 262,144 for 64-bit systems.
995
996 Select a specific number if testing RCU itself.
997 Take the default if unsure.
998
999config RCU_FANOUT_EXACT
1000 bool "Disable tree-based hierarchical RCU auto-balancing"
1001 depends on TREE_RCU
1002 default n
1003 help
1004 This option forces use of the exact RCU_FANOUT value specified,
1005 regardless of imbalances in the hierarchy. This is useful for
1006 testing RCU itself, and might one day be useful on systems with
1007 strong NUMA behavior.
1008
1009 Without RCU_FANOUT_EXACT, the code will balance the hierarchy.
1010
1011 Say N if unsure.
1012
1013config TREE_RCU_TRACE
1014 def_bool RCU_TRACE && TREE_RCU
1015 select DEBUG_FS
1016 help
1017 This option provides tracing for the TREE_RCU implementation,
1018 permitting Makefile to trivially select kernel/rcutree_trace.c.
1019
1020config PREEMPT_RCU_TRACE
1021 def_bool RCU_TRACE && PREEMPT_RCU
1022 select DEBUG_FS
1023 help
1024 This option provides tracing for the PREEMPT_RCU implementation,
1025 permitting Makefile to trivially select kernel/rcupreempt_trace.c.
diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
index 48b3fadd83ed..d6da5cdd3c38 100644
--- a/init/do_mounts_md.c
+++ b/init/do_mounts_md.c
@@ -1,4 +1,4 @@
1 1#include <linux/delay.h>
2#include <linux/raid/md.h> 2#include <linux/raid/md.h>
3 3
4#include "do_mounts.h" 4#include "do_mounts.h"
diff --git a/init/do_mounts_rd.c b/init/do_mounts_rd.c
index fedef93b586f..a7c748fa977a 100644
--- a/init/do_mounts_rd.c
+++ b/init/do_mounts_rd.c
@@ -71,7 +71,7 @@ identify_ramdisk_image(int fd, int start_block)
71 sys_read(fd, buf, size); 71 sys_read(fd, buf, size);
72 72
73 /* 73 /*
74 * If it matches the gzip magic numbers, return -1 74 * If it matches the gzip magic numbers, return 0
75 */ 75 */
76 if (buf[0] == 037 && ((buf[1] == 0213) || (buf[1] == 0236))) { 76 if (buf[0] == 037 && ((buf[1] == 0213) || (buf[1] == 0236))) {
77 printk(KERN_NOTICE 77 printk(KERN_NOTICE
diff --git a/init/initramfs.c b/init/initramfs.c
index 644fc01ad5f0..4f5ba75aaa7c 100644
--- a/init/initramfs.c
+++ b/init/initramfs.c
@@ -6,6 +6,7 @@
6#include <linux/delay.h> 6#include <linux/delay.h>
7#include <linux/string.h> 7#include <linux/string.h>
8#include <linux/syscalls.h> 8#include <linux/syscalls.h>
9#include <linux/utime.h>
9 10
10static __initdata char *message; 11static __initdata char *message;
11static void __init error(char *x) 12static void __init error(char *x)
@@ -72,6 +73,49 @@ static void __init free_hash(void)
72 } 73 }
73} 74}
74 75
76static long __init do_utime(char __user *filename, time_t mtime)
77{
78 struct timespec t[2];
79
80 t[0].tv_sec = mtime;
81 t[0].tv_nsec = 0;
82 t[1].tv_sec = mtime;
83 t[1].tv_nsec = 0;
84
85 return do_utimes(AT_FDCWD, filename, t, AT_SYMLINK_NOFOLLOW);
86}
87
88static __initdata LIST_HEAD(dir_list);
89struct dir_entry {
90 struct list_head list;
91 char *name;
92 time_t mtime;
93};
94
95static void __init dir_add(const char *name, time_t mtime)
96{
97 struct dir_entry *de = kmalloc(sizeof(struct dir_entry), GFP_KERNEL);
98 if (!de)
99 panic("can't allocate dir_entry buffer");
100 INIT_LIST_HEAD(&de->list);
101 de->name = kstrdup(name, GFP_KERNEL);
102 de->mtime = mtime;
103 list_add(&de->list, &dir_list);
104}
105
106static void __init dir_utime(void)
107{
108 struct dir_entry *de, *tmp;
109 list_for_each_entry_safe(de, tmp, &dir_list, list) {
110 list_del(&de->list);
111 do_utime(de->name, de->mtime);
112 kfree(de->name);
113 kfree(de);
114 }
115}
116
117static __initdata time_t mtime;
118
75/* cpio header parsing */ 119/* cpio header parsing */
76 120
77static __initdata unsigned long ino, major, minor, nlink; 121static __initdata unsigned long ino, major, minor, nlink;
@@ -97,6 +141,7 @@ static void __init parse_header(char *s)
97 uid = parsed[2]; 141 uid = parsed[2];
98 gid = parsed[3]; 142 gid = parsed[3];
99 nlink = parsed[4]; 143 nlink = parsed[4];
144 mtime = parsed[5];
100 body_len = parsed[6]; 145 body_len = parsed[6];
101 major = parsed[7]; 146 major = parsed[7];
102 minor = parsed[8]; 147 minor = parsed[8];
@@ -130,6 +175,7 @@ static inline void __init eat(unsigned n)
130 count -= n; 175 count -= n;
131} 176}
132 177
178static __initdata char *vcollected;
133static __initdata char *collected; 179static __initdata char *collected;
134static __initdata int remains; 180static __initdata int remains;
135static __initdata char *collect; 181static __initdata char *collect;
@@ -271,6 +317,7 @@ static int __init do_name(void)
271 if (wfd >= 0) { 317 if (wfd >= 0) {
272 sys_fchown(wfd, uid, gid); 318 sys_fchown(wfd, uid, gid);
273 sys_fchmod(wfd, mode); 319 sys_fchmod(wfd, mode);
320 vcollected = kstrdup(collected, GFP_KERNEL);
274 state = CopyFile; 321 state = CopyFile;
275 } 322 }
276 } 323 }
@@ -278,12 +325,14 @@ static int __init do_name(void)
278 sys_mkdir(collected, mode); 325 sys_mkdir(collected, mode);
279 sys_chown(collected, uid, gid); 326 sys_chown(collected, uid, gid);
280 sys_chmod(collected, mode); 327 sys_chmod(collected, mode);
328 dir_add(collected, mtime);
281 } else if (S_ISBLK(mode) || S_ISCHR(mode) || 329 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
282 S_ISFIFO(mode) || S_ISSOCK(mode)) { 330 S_ISFIFO(mode) || S_ISSOCK(mode)) {
283 if (maybe_link() == 0) { 331 if (maybe_link() == 0) {
284 sys_mknod(collected, mode, rdev); 332 sys_mknod(collected, mode, rdev);
285 sys_chown(collected, uid, gid); 333 sys_chown(collected, uid, gid);
286 sys_chmod(collected, mode); 334 sys_chmod(collected, mode);
335 do_utime(collected, mtime);
287 } 336 }
288 } 337 }
289 return 0; 338 return 0;
@@ -294,6 +343,8 @@ static int __init do_copy(void)
294 if (count >= body_len) { 343 if (count >= body_len) {
295 sys_write(wfd, victim, body_len); 344 sys_write(wfd, victim, body_len);
296 sys_close(wfd); 345 sys_close(wfd);
346 do_utime(vcollected, mtime);
347 kfree(vcollected);
297 eat(body_len); 348 eat(body_len);
298 state = SkipIt; 349 state = SkipIt;
299 return 0; 350 return 0;
@@ -311,6 +362,7 @@ static int __init do_symlink(void)
311 clean_path(collected, 0); 362 clean_path(collected, 0);
312 sys_symlink(collected + N_ALIGN(name_len), collected); 363 sys_symlink(collected + N_ALIGN(name_len), collected);
313 sys_lchown(collected, uid, gid); 364 sys_lchown(collected, uid, gid);
365 do_utime(collected, mtime);
314 state = SkipIt; 366 state = SkipIt;
315 next_state = Reset; 367 next_state = Reset;
316 return 0; 368 return 0;
@@ -466,6 +518,7 @@ static char * __init unpack_to_rootfs(char *buf, unsigned len, int check_only)
466 buf += inptr; 518 buf += inptr;
467 len -= inptr; 519 len -= inptr;
468 } 520 }
521 dir_utime();
469 kfree(window); 522 kfree(window);
470 kfree(name_buf); 523 kfree(name_buf);
471 kfree(symlink_buf); 524 kfree(symlink_buf);
diff --git a/init/main.c b/init/main.c
index 6aaff34a38c0..07da4dea50c3 100644
--- a/init/main.c
+++ b/init/main.c
@@ -28,6 +28,7 @@
28#include <linux/gfp.h> 28#include <linux/gfp.h>
29#include <linux/percpu.h> 29#include <linux/percpu.h>
30#include <linux/kmod.h> 30#include <linux/kmod.h>
31#include <linux/vmalloc.h>
31#include <linux/kernel_stat.h> 32#include <linux/kernel_stat.h>
32#include <linux/start_kernel.h> 33#include <linux/start_kernel.h>
33#include <linux/security.h> 34#include <linux/security.h>
@@ -52,6 +53,7 @@
52#include <linux/key.h> 53#include <linux/key.h>
53#include <linux/unwind.h> 54#include <linux/unwind.h>
54#include <linux/buffer_head.h> 55#include <linux/buffer_head.h>
56#include <linux/page_cgroup.h>
55#include <linux/debug_locks.h> 57#include <linux/debug_locks.h>
56#include <linux/debugobjects.h> 58#include <linux/debugobjects.h>
57#include <linux/lockdep.h> 59#include <linux/lockdep.h>
@@ -61,6 +63,8 @@
61#include <linux/sched.h> 63#include <linux/sched.h>
62#include <linux/signal.h> 64#include <linux/signal.h>
63#include <linux/idr.h> 65#include <linux/idr.h>
66#include <linux/ftrace.h>
67#include <trace/boot.h>
64 68
65#include <asm/io.h> 69#include <asm/io.h>
66#include <asm/bugs.h> 70#include <asm/bugs.h>
@@ -537,6 +541,15 @@ void __init __weak thread_info_cache_init(void)
537{ 541{
538} 542}
539 543
544void __init __weak arch_early_irq_init(void)
545{
546}
547
548void __init __weak early_irq_init(void)
549{
550 arch_early_irq_init();
551}
552
540asmlinkage void __init start_kernel(void) 553asmlinkage void __init start_kernel(void)
541{ 554{
542 char * command_line; 555 char * command_line;
@@ -607,6 +620,8 @@ asmlinkage void __init start_kernel(void)
607 sort_main_extable(); 620 sort_main_extable();
608 trap_init(); 621 trap_init();
609 rcu_init(); 622 rcu_init();
623 /* init some links before init_ISA_irqs() */
624 early_irq_init();
610 init_IRQ(); 625 init_IRQ();
611 pidhash_init(); 626 pidhash_init();
612 init_timers(); 627 init_timers();
@@ -649,8 +664,10 @@ asmlinkage void __init start_kernel(void)
649 initrd_start = 0; 664 initrd_start = 0;
650 } 665 }
651#endif 666#endif
667 vmalloc_init();
652 vfs_caches_init_early(); 668 vfs_caches_init_early();
653 cpuset_init_early(); 669 cpuset_init_early();
670 page_cgroup_init();
654 mem_init(); 671 mem_init();
655 enable_debug_pagealloc(); 672 enable_debug_pagealloc();
656 cpu_hotplug_init(); 673 cpu_hotplug_init();
@@ -671,10 +688,10 @@ asmlinkage void __init start_kernel(void)
671 efi_enter_virtual_mode(); 688 efi_enter_virtual_mode();
672#endif 689#endif
673 thread_info_cache_init(); 690 thread_info_cache_init();
691 cred_init();
674 fork_init(num_physpages); 692 fork_init(num_physpages);
675 proc_caches_init(); 693 proc_caches_init();
676 buffer_init(); 694 buffer_init();
677 unnamed_dev_init();
678 key_init(); 695 key_init();
679 security_init(); 696 security_init();
680 vfs_caches_init(num_physpages); 697 vfs_caches_init(num_physpages);
@@ -694,46 +711,47 @@ asmlinkage void __init start_kernel(void)
694 711
695 acpi_early_init(); /* before LAPIC and SMP init */ 712 acpi_early_init(); /* before LAPIC and SMP init */
696 713
714 ftrace_init();
715
697 /* Do the rest non-__init'ed, we're now alive */ 716 /* Do the rest non-__init'ed, we're now alive */
698 rest_init(); 717 rest_init();
699} 718}
700 719
701static int initcall_debug; 720static int initcall_debug;
702 721core_param(initcall_debug, initcall_debug, bool, 0644);
703static int __init initcall_debug_setup(char *str)
704{
705 initcall_debug = 1;
706 return 1;
707}
708__setup("initcall_debug", initcall_debug_setup);
709 722
710int do_one_initcall(initcall_t fn) 723int do_one_initcall(initcall_t fn)
711{ 724{
712 int count = preempt_count(); 725 int count = preempt_count();
713 ktime_t t0, t1, delta; 726 ktime_t calltime, delta, rettime;
714 char msgbuf[64]; 727 char msgbuf[64];
715 int result; 728 struct boot_trace_call call;
729 struct boot_trace_ret ret;
716 730
717 if (initcall_debug) { 731 if (initcall_debug) {
718 printk("calling %pF @ %i\n", fn, task_pid_nr(current)); 732 call.caller = task_pid_nr(current);
719 t0 = ktime_get(); 733 printk("calling %pF @ %i\n", fn, call.caller);
734 calltime = ktime_get();
735 trace_boot_call(&call, fn);
736 enable_boot_trace();
720 } 737 }
721 738
722 result = fn(); 739 ret.result = fn();
723 740
724 if (initcall_debug) { 741 if (initcall_debug) {
725 t1 = ktime_get(); 742 disable_boot_trace();
726 delta = ktime_sub(t1, t0); 743 rettime = ktime_get();
727 744 delta = ktime_sub(rettime, calltime);
728 printk("initcall %pF returned %d after %Ld msecs\n", 745 ret.duration = (unsigned long long) ktime_to_ns(delta) >> 10;
729 fn, result, 746 trace_boot_ret(&ret, fn);
730 (unsigned long long) delta.tv64 >> 20); 747 printk("initcall %pF returned %d after %Ld usecs\n", fn,
748 ret.result, ret.duration);
731 } 749 }
732 750
733 msgbuf[0] = 0; 751 msgbuf[0] = 0;
734 752
735 if (result && result != -ENODEV && initcall_debug) 753 if (ret.result && ret.result != -ENODEV && initcall_debug)
736 sprintf(msgbuf, "error code %d ", result); 754 sprintf(msgbuf, "error code %d ", ret.result);
737 755
738 if (preempt_count() != count) { 756 if (preempt_count() != count) {
739 strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf)); 757 strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
@@ -747,7 +765,7 @@ int do_one_initcall(initcall_t fn)
747 printk("initcall %pF returned with %s\n", fn, msgbuf); 765 printk("initcall %pF returned with %s\n", fn, msgbuf);
748 } 766 }
749 767
750 return result; 768 return ret.result;
751} 769}
752 770
753 771
@@ -774,7 +792,6 @@ static void __init do_initcalls(void)
774static void __init do_basic_setup(void) 792static void __init do_basic_setup(void)
775{ 793{
776 rcu_init_sched(); /* needed by module_init stage. */ 794 rcu_init_sched(); /* needed by module_init stage. */
777 /* drivers will send hotplug events */
778 init_workqueues(); 795 init_workqueues();
779 usermodehelper_init(); 796 usermodehelper_init();
780 driver_init(); 797 driver_init();
@@ -862,6 +879,7 @@ static int __init kernel_init(void * unused)
862 smp_prepare_cpus(setup_max_cpus); 879 smp_prepare_cpus(setup_max_cpus);
863 880
864 do_pre_smp_initcalls(); 881 do_pre_smp_initcalls();
882 start_boot_trace();
865 883
866 smp_init(); 884 smp_init();
867 sched_init_smp(); 885 sched_init_smp();
@@ -888,6 +906,7 @@ static int __init kernel_init(void * unused)
888 * we're essentially up and running. Get rid of the 906 * we're essentially up and running. Get rid of the
889 * initmem segments and start the user-mode stuff.. 907 * initmem segments and start the user-mode stuff..
890 */ 908 */
909
891 init_post(); 910 init_post();
892 return 0; 911 return 0;
893} 912}