aboutsummaryrefslogtreecommitdiffstats
path: root/init
diff options
context:
space:
mode:
authorTakashi Iwai <tiwai@suse.de>2008-12-20 17:39:47 -0500
committerTakashi Iwai <tiwai@suse.de>2008-12-20 17:39:47 -0500
commit55fa518867978e1f5fd8353098f80d125ac734d7 (patch)
tree3502b331c1f9ec4cac25dc8ba30b6a0a324e350c /init
parentbb1f24bf00a85f666b56a09b7cdbfd221af16c2c (diff)
parenteea0579fc85e64e9f05361d5aacf496fe7a151aa (diff)
Merge branch 'topic/pcsp-fix' into topic/misc
Diffstat (limited to 'init')
-rw-r--r--init/Kconfig65
-rw-r--r--init/do_mounts.c4
-rw-r--r--init/do_mounts_md.c42
-rw-r--r--init/do_mounts_rd.c2
-rw-r--r--init/initramfs.c53
-rw-r--r--init/main.c48
6 files changed, 160 insertions, 54 deletions
diff --git a/init/Kconfig b/init/Kconfig
index c11da38837e5..f763762d544a 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -299,6 +299,13 @@ config CGROUP_NS
299 for instance virtual servers and checkpoint/restart 299 for instance virtual servers and checkpoint/restart
300 jobs. 300 jobs.
301 301
302config CGROUP_FREEZER
303 bool "control group freezer subsystem"
304 depends on CGROUPS
305 help
306 Provides a way to freeze and unfreeze all tasks in a
307 cgroup.
308
302config CGROUP_DEVICE 309config CGROUP_DEVICE
303 bool "Device controller for cgroups" 310 bool "Device controller for cgroups"
304 depends on CGROUPS && EXPERIMENTAL 311 depends on CGROUPS && EXPERIMENTAL
@@ -347,7 +354,7 @@ config RT_GROUP_SCHED
347 setting below. If enabled, it will also make it impossible to 354 setting below. If enabled, it will also make it impossible to
348 schedule realtime tasks for non-root users until you allocate 355 schedule realtime tasks for non-root users until you allocate
349 realtime bandwidth for them. 356 realtime bandwidth for them.
350 See Documentation/sched-rt-group.txt for more information. 357 See Documentation/scheduler/sched-rt-group.txt for more information.
351 358
352choice 359choice
353 depends on GROUP_SCHED 360 depends on GROUP_SCHED
@@ -394,16 +401,20 @@ config CGROUP_MEM_RES_CTLR
394 depends on CGROUPS && RESOURCE_COUNTERS 401 depends on CGROUPS && RESOURCE_COUNTERS
395 select MM_OWNER 402 select MM_OWNER
396 help 403 help
397 Provides a memory resource controller that manages both page cache and 404 Provides a memory resource controller that manages both anonymous
398 RSS memory. 405 memory and page cache. (See Documentation/controllers/memory.txt)
399 406
400 Note that setting this option increases fixed memory overhead 407 Note that setting this option increases fixed memory overhead
401 associated with each page of memory in the system by 4/8 bytes 408 associated with each page of memory in the system. By this,
402 and also increases cache misses because struct page on many 64bit 409 20(40)bytes/PAGE_SIZE on 32(64)bit system will be occupied by memory
403 systems will not fit into a single cache line anymore. 410 usage tracking struct at boot. Total amount of this is printed out
411 at boot.
404 412
405 Only enable when you're ok with these trade offs and really 413 Only enable when you're ok with these trade offs and really
406 sure you need the memory resource controller. 414 sure you need the memory resource controller. Even when you enable
415 this, you can set "cgroup_disable=memory" at your boot option to
416 disable memory resource controller and you can avoid overheads.
417 (and lose benefits of memory resource contoller)
407 418
408 This config option also selects MM_OWNER config option, which 419 This config option also selects MM_OWNER config option, which
409 could in turn add some fork/exit overhead. 420 could in turn add some fork/exit overhead.
@@ -713,6 +724,14 @@ config SHMEM
713 option replaces shmem and tmpfs with the much simpler ramfs code, 724 option replaces shmem and tmpfs with the much simpler ramfs code,
714 which may be appropriate on small systems without swap. 725 which may be appropriate on small systems without swap.
715 726
727config AIO
728 bool "Enable AIO support" if EMBEDDED
729 default y
730 help
731 This option enables POSIX asynchronous I/O which may by used
732 by some high performance threaded applications. Disabling
733 this option saves about 7k.
734
716config VM_EVENT_COUNTERS 735config VM_EVENT_COUNTERS
717 default y 736 default y
718 bool "Enable VM event counters for /proc/vmstat" if EMBEDDED 737 bool "Enable VM event counters for /proc/vmstat" if EMBEDDED
@@ -722,6 +741,15 @@ config VM_EVENT_COUNTERS
722 on EMBEDDED systems. /proc/vmstat will only show page counts 741 on EMBEDDED systems. /proc/vmstat will only show page counts
723 if VM event counters are disabled. 742 if VM event counters are disabled.
724 743
744config PCI_QUIRKS
745 default y
746 bool "Enable PCI quirk workarounds" if EMBEDDED
747 depends on PCI
748 help
749 This enables workarounds for various PCI chipset
750 bugs/quirks. Disable this only if your target machine is
751 unaffected by PCI quirks.
752
725config SLUB_DEBUG 753config SLUB_DEBUG
726 default y 754 default y
727 bool "Enable SLUB debugging support" if EMBEDDED 755 bool "Enable SLUB debugging support" if EMBEDDED
@@ -743,8 +771,7 @@ config SLAB
743 help 771 help
744 The regular slab allocator that is established and known to work 772 The regular slab allocator that is established and known to work
745 well in all environments. It organizes cache hot objects in 773 well in all environments. It organizes cache hot objects in
746 per cpu and per node queues. SLAB is the default choice for 774 per cpu and per node queues.
747 a slab allocator.
748 775
749config SLUB 776config SLUB
750 bool "SLUB (Unqueued Allocator)" 777 bool "SLUB (Unqueued Allocator)"
@@ -753,7 +780,8 @@ config SLUB
753 instead of managing queues of cached objects (SLAB approach). 780 instead of managing queues of cached objects (SLAB approach).
754 Per cpu caching is realized using slabs of objects instead 781 Per cpu caching is realized using slabs of objects instead
755 of queues of objects. SLUB can use memory efficiently 782 of queues of objects. SLUB can use memory efficiently
756 and has enhanced diagnostics. 783 and has enhanced diagnostics. SLUB is the default choice for
784 a slab allocator.
757 785
758config SLOB 786config SLOB
759 depends on EMBEDDED 787 depends on EMBEDDED
@@ -771,6 +799,13 @@ config PROFILING
771 Say Y here to enable the extended profiling support mechanisms used 799 Say Y here to enable the extended profiling support mechanisms used
772 by profilers such as OProfile. 800 by profilers such as OProfile.
773 801
802#
803# Place an empty function call at each tracepoint site. Can be
804# dynamically changed for a probe function.
805#
806config TRACEPOINTS
807 bool
808
774config MARKERS 809config MARKERS
775 bool "Activate markers" 810 bool "Activate markers"
776 help 811 help
@@ -779,16 +814,6 @@ config MARKERS
779 814
780source "arch/Kconfig" 815source "arch/Kconfig"
781 816
782config PROC_PAGE_MONITOR
783 default y
784 depends on PROC_FS && MMU
785 bool "Enable /proc page monitoring" if EMBEDDED
786 help
787 Various /proc files exist to monitor process memory utilization:
788 /proc/pid/smaps, /proc/pid/clear_refs, /proc/pid/pagemap,
789 /proc/kpagecount, and /proc/kpageflags. Disabling these
790 interfaces will reduce the size of the kernel by approximately 4kb.
791
792endmenu # General setup 817endmenu # General setup
793 818
794config HAVE_GENERIC_DMA_COHERENT 819config HAVE_GENERIC_DMA_COHERENT
diff --git a/init/do_mounts.c b/init/do_mounts.c
index 3715feb8446d..d055b1914c3d 100644
--- a/init/do_mounts.c
+++ b/init/do_mounts.c
@@ -263,6 +263,10 @@ retry:
263 printk("Please append a correct \"root=\" boot option; here are the available partitions:\n"); 263 printk("Please append a correct \"root=\" boot option; here are the available partitions:\n");
264 264
265 printk_all_partitions(); 265 printk_all_partitions();
266#ifdef CONFIG_DEBUG_BLOCK_EXT_DEVT
267 printk("DEBUG_BLOCK_EXT_DEVT is enabled, you need to specify "
268 "explicit textual name for \"root=\" boot option.\n");
269#endif
266 panic("VFS: Unable to mount root fs on %s", b); 270 panic("VFS: Unable to mount root fs on %s", b);
267 } 271 }
268 272
diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
index 693d24694a6c..d6da5cdd3c38 100644
--- a/init/do_mounts_md.c
+++ b/init/do_mounts_md.c
@@ -1,4 +1,4 @@
1 1#include <linux/delay.h>
2#include <linux/raid/md.h> 2#include <linux/raid/md.h>
3 3
4#include "do_mounts.h" 4#include "do_mounts.h"
@@ -12,7 +12,12 @@
12 * The code for that is here. 12 * The code for that is here.
13 */ 13 */
14 14
15static int __initdata raid_noautodetect, raid_autopart; 15#ifdef CONFIG_MD_AUTODETECT
16static int __initdata raid_noautodetect;
17#else
18static int __initdata raid_noautodetect=1;
19#endif
20static int __initdata raid_autopart;
16 21
17static struct { 22static struct {
18 int minor; 23 int minor;
@@ -252,6 +257,8 @@ static int __init raid_setup(char *str)
252 257
253 if (!strncmp(str, "noautodetect", wlen)) 258 if (!strncmp(str, "noautodetect", wlen))
254 raid_noautodetect = 1; 259 raid_noautodetect = 1;
260 if (!strncmp(str, "autodetect", wlen))
261 raid_noautodetect = 0;
255 if (strncmp(str, "partitionable", wlen)==0) 262 if (strncmp(str, "partitionable", wlen)==0)
256 raid_autopart = 1; 263 raid_autopart = 1;
257 if (strncmp(str, "part", wlen)==0) 264 if (strncmp(str, "part", wlen)==0)
@@ -264,17 +271,32 @@ static int __init raid_setup(char *str)
264__setup("raid=", raid_setup); 271__setup("raid=", raid_setup);
265__setup("md=", md_setup); 272__setup("md=", md_setup);
266 273
274static void autodetect_raid(void)
275{
276 int fd;
277
278 /*
279 * Since we don't want to detect and use half a raid array, we need to
280 * wait for the known devices to complete their probing
281 */
282 printk(KERN_INFO "md: Waiting for all devices to be available before autodetect\n");
283 printk(KERN_INFO "md: If you don't use raid, use raid=noautodetect\n");
284 while (driver_probe_done() < 0)
285 msleep(100);
286 fd = sys_open("/dev/md0", 0, 0);
287 if (fd >= 0) {
288 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
289 sys_close(fd);
290 }
291}
292
267void __init md_run_setup(void) 293void __init md_run_setup(void)
268{ 294{
269 create_dev("/dev/md0", MKDEV(MD_MAJOR, 0)); 295 create_dev("/dev/md0", MKDEV(MD_MAJOR, 0));
296
270 if (raid_noautodetect) 297 if (raid_noautodetect)
271 printk(KERN_INFO "md: Skipping autodetection of RAID arrays. (raid=noautodetect)\n"); 298 printk(KERN_INFO "md: Skipping autodetection of RAID arrays. (raid=autodetect will force)\n");
272 else { 299 else
273 int fd = sys_open("/dev/md0", 0, 0); 300 autodetect_raid();
274 if (fd >= 0) {
275 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
276 sys_close(fd);
277 }
278 }
279 md_setup_drive(); 301 md_setup_drive();
280} 302}
diff --git a/init/do_mounts_rd.c b/init/do_mounts_rd.c
index fedef93b586f..a7c748fa977a 100644
--- a/init/do_mounts_rd.c
+++ b/init/do_mounts_rd.c
@@ -71,7 +71,7 @@ identify_ramdisk_image(int fd, int start_block)
71 sys_read(fd, buf, size); 71 sys_read(fd, buf, size);
72 72
73 /* 73 /*
74 * If it matches the gzip magic numbers, return -1 74 * If it matches the gzip magic numbers, return 0
75 */ 75 */
76 if (buf[0] == 037 && ((buf[1] == 0213) || (buf[1] == 0236))) { 76 if (buf[0] == 037 && ((buf[1] == 0213) || (buf[1] == 0236))) {
77 printk(KERN_NOTICE 77 printk(KERN_NOTICE
diff --git a/init/initramfs.c b/init/initramfs.c
index 644fc01ad5f0..4f5ba75aaa7c 100644
--- a/init/initramfs.c
+++ b/init/initramfs.c
@@ -6,6 +6,7 @@
6#include <linux/delay.h> 6#include <linux/delay.h>
7#include <linux/string.h> 7#include <linux/string.h>
8#include <linux/syscalls.h> 8#include <linux/syscalls.h>
9#include <linux/utime.h>
9 10
10static __initdata char *message; 11static __initdata char *message;
11static void __init error(char *x) 12static void __init error(char *x)
@@ -72,6 +73,49 @@ static void __init free_hash(void)
72 } 73 }
73} 74}
74 75
76static long __init do_utime(char __user *filename, time_t mtime)
77{
78 struct timespec t[2];
79
80 t[0].tv_sec = mtime;
81 t[0].tv_nsec = 0;
82 t[1].tv_sec = mtime;
83 t[1].tv_nsec = 0;
84
85 return do_utimes(AT_FDCWD, filename, t, AT_SYMLINK_NOFOLLOW);
86}
87
88static __initdata LIST_HEAD(dir_list);
89struct dir_entry {
90 struct list_head list;
91 char *name;
92 time_t mtime;
93};
94
95static void __init dir_add(const char *name, time_t mtime)
96{
97 struct dir_entry *de = kmalloc(sizeof(struct dir_entry), GFP_KERNEL);
98 if (!de)
99 panic("can't allocate dir_entry buffer");
100 INIT_LIST_HEAD(&de->list);
101 de->name = kstrdup(name, GFP_KERNEL);
102 de->mtime = mtime;
103 list_add(&de->list, &dir_list);
104}
105
106static void __init dir_utime(void)
107{
108 struct dir_entry *de, *tmp;
109 list_for_each_entry_safe(de, tmp, &dir_list, list) {
110 list_del(&de->list);
111 do_utime(de->name, de->mtime);
112 kfree(de->name);
113 kfree(de);
114 }
115}
116
117static __initdata time_t mtime;
118
75/* cpio header parsing */ 119/* cpio header parsing */
76 120
77static __initdata unsigned long ino, major, minor, nlink; 121static __initdata unsigned long ino, major, minor, nlink;
@@ -97,6 +141,7 @@ static void __init parse_header(char *s)
97 uid = parsed[2]; 141 uid = parsed[2];
98 gid = parsed[3]; 142 gid = parsed[3];
99 nlink = parsed[4]; 143 nlink = parsed[4];
144 mtime = parsed[5];
100 body_len = parsed[6]; 145 body_len = parsed[6];
101 major = parsed[7]; 146 major = parsed[7];
102 minor = parsed[8]; 147 minor = parsed[8];
@@ -130,6 +175,7 @@ static inline void __init eat(unsigned n)
130 count -= n; 175 count -= n;
131} 176}
132 177
178static __initdata char *vcollected;
133static __initdata char *collected; 179static __initdata char *collected;
134static __initdata int remains; 180static __initdata int remains;
135static __initdata char *collect; 181static __initdata char *collect;
@@ -271,6 +317,7 @@ static int __init do_name(void)
271 if (wfd >= 0) { 317 if (wfd >= 0) {
272 sys_fchown(wfd, uid, gid); 318 sys_fchown(wfd, uid, gid);
273 sys_fchmod(wfd, mode); 319 sys_fchmod(wfd, mode);
320 vcollected = kstrdup(collected, GFP_KERNEL);
274 state = CopyFile; 321 state = CopyFile;
275 } 322 }
276 } 323 }
@@ -278,12 +325,14 @@ static int __init do_name(void)
278 sys_mkdir(collected, mode); 325 sys_mkdir(collected, mode);
279 sys_chown(collected, uid, gid); 326 sys_chown(collected, uid, gid);
280 sys_chmod(collected, mode); 327 sys_chmod(collected, mode);
328 dir_add(collected, mtime);
281 } else if (S_ISBLK(mode) || S_ISCHR(mode) || 329 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
282 S_ISFIFO(mode) || S_ISSOCK(mode)) { 330 S_ISFIFO(mode) || S_ISSOCK(mode)) {
283 if (maybe_link() == 0) { 331 if (maybe_link() == 0) {
284 sys_mknod(collected, mode, rdev); 332 sys_mknod(collected, mode, rdev);
285 sys_chown(collected, uid, gid); 333 sys_chown(collected, uid, gid);
286 sys_chmod(collected, mode); 334 sys_chmod(collected, mode);
335 do_utime(collected, mtime);
287 } 336 }
288 } 337 }
289 return 0; 338 return 0;
@@ -294,6 +343,8 @@ static int __init do_copy(void)
294 if (count >= body_len) { 343 if (count >= body_len) {
295 sys_write(wfd, victim, body_len); 344 sys_write(wfd, victim, body_len);
296 sys_close(wfd); 345 sys_close(wfd);
346 do_utime(vcollected, mtime);
347 kfree(vcollected);
297 eat(body_len); 348 eat(body_len);
298 state = SkipIt; 349 state = SkipIt;
299 return 0; 350 return 0;
@@ -311,6 +362,7 @@ static int __init do_symlink(void)
311 clean_path(collected, 0); 362 clean_path(collected, 0);
312 sys_symlink(collected + N_ALIGN(name_len), collected); 363 sys_symlink(collected + N_ALIGN(name_len), collected);
313 sys_lchown(collected, uid, gid); 364 sys_lchown(collected, uid, gid);
365 do_utime(collected, mtime);
314 state = SkipIt; 366 state = SkipIt;
315 next_state = Reset; 367 next_state = Reset;
316 return 0; 368 return 0;
@@ -466,6 +518,7 @@ static char * __init unpack_to_rootfs(char *buf, unsigned len, int check_only)
466 buf += inptr; 518 buf += inptr;
467 len -= inptr; 519 len -= inptr;
468 } 520 }
521 dir_utime();
469 kfree(window); 522 kfree(window);
470 kfree(name_buf); 523 kfree(name_buf);
471 kfree(symlink_buf); 524 kfree(symlink_buf);
diff --git a/init/main.c b/init/main.c
index 3820323c4c84..7e117a231af1 100644
--- a/init/main.c
+++ b/init/main.c
@@ -27,6 +27,7 @@
27#include <linux/gfp.h> 27#include <linux/gfp.h>
28#include <linux/percpu.h> 28#include <linux/percpu.h>
29#include <linux/kmod.h> 29#include <linux/kmod.h>
30#include <linux/vmalloc.h>
30#include <linux/kernel_stat.h> 31#include <linux/kernel_stat.h>
31#include <linux/start_kernel.h> 32#include <linux/start_kernel.h>
32#include <linux/security.h> 33#include <linux/security.h>
@@ -51,6 +52,7 @@
51#include <linux/key.h> 52#include <linux/key.h>
52#include <linux/unwind.h> 53#include <linux/unwind.h>
53#include <linux/buffer_head.h> 54#include <linux/buffer_head.h>
55#include <linux/page_cgroup.h>
54#include <linux/debug_locks.h> 56#include <linux/debug_locks.h>
55#include <linux/debugobjects.h> 57#include <linux/debugobjects.h>
56#include <linux/lockdep.h> 58#include <linux/lockdep.h>
@@ -60,6 +62,7 @@
60#include <linux/sched.h> 62#include <linux/sched.h>
61#include <linux/signal.h> 63#include <linux/signal.h>
62#include <linux/idr.h> 64#include <linux/idr.h>
65#include <linux/ftrace.h>
63 66
64#include <asm/io.h> 67#include <asm/io.h>
65#include <asm/bugs.h> 68#include <asm/bugs.h>
@@ -642,8 +645,10 @@ asmlinkage void __init start_kernel(void)
642 initrd_start = 0; 645 initrd_start = 0;
643 } 646 }
644#endif 647#endif
648 vmalloc_init();
645 vfs_caches_init_early(); 649 vfs_caches_init_early();
646 cpuset_init_early(); 650 cpuset_init_early();
651 page_cgroup_init();
647 mem_init(); 652 mem_init();
648 enable_debug_pagealloc(); 653 enable_debug_pagealloc();
649 cpu_hotplug_init(); 654 cpu_hotplug_init();
@@ -667,7 +672,6 @@ asmlinkage void __init start_kernel(void)
667 fork_init(num_physpages); 672 fork_init(num_physpages);
668 proc_caches_init(); 673 proc_caches_init();
669 buffer_init(); 674 buffer_init();
670 unnamed_dev_init();
671 key_init(); 675 key_init();
672 security_init(); 676 security_init();
673 vfs_caches_init(num_physpages); 677 vfs_caches_init(num_physpages);
@@ -687,46 +691,43 @@ asmlinkage void __init start_kernel(void)
687 691
688 acpi_early_init(); /* before LAPIC and SMP init */ 692 acpi_early_init(); /* before LAPIC and SMP init */
689 693
694 ftrace_init();
695
690 /* Do the rest non-__init'ed, we're now alive */ 696 /* Do the rest non-__init'ed, we're now alive */
691 rest_init(); 697 rest_init();
692} 698}
693 699
694static int initcall_debug; 700static int initcall_debug;
695 701core_param(initcall_debug, initcall_debug, bool, 0644);
696static int __init initcall_debug_setup(char *str)
697{
698 initcall_debug = 1;
699 return 1;
700}
701__setup("initcall_debug", initcall_debug_setup);
702 702
703int do_one_initcall(initcall_t fn) 703int do_one_initcall(initcall_t fn)
704{ 704{
705 int count = preempt_count(); 705 int count = preempt_count();
706 ktime_t t0, t1, delta; 706 ktime_t delta;
707 char msgbuf[64]; 707 char msgbuf[64];
708 int result; 708 struct boot_trace it;
709 709
710 if (initcall_debug) { 710 if (initcall_debug) {
711 printk("calling %pF\n", fn); 711 it.caller = task_pid_nr(current);
712 t0 = ktime_get(); 712 printk("calling %pF @ %i\n", fn, it.caller);
713 it.calltime = ktime_get();
713 } 714 }
714 715
715 result = fn(); 716 it.result = fn();
716 717
717 if (initcall_debug) { 718 if (initcall_debug) {
718 t1 = ktime_get(); 719 it.rettime = ktime_get();
719 delta = ktime_sub(t1, t0); 720 delta = ktime_sub(it.rettime, it.calltime);
720 721 it.duration = (unsigned long long) delta.tv64 >> 10;
721 printk("initcall %pF returned %d after %Ld msecs\n", 722 printk("initcall %pF returned %d after %Ld usecs\n", fn,
722 fn, result, 723 it.result, it.duration);
723 (unsigned long long) delta.tv64 >> 20); 724 trace_boot(&it, fn);
724 } 725 }
725 726
726 msgbuf[0] = 0; 727 msgbuf[0] = 0;
727 728
728 if (result && result != -ENODEV && initcall_debug) 729 if (it.result && it.result != -ENODEV && initcall_debug)
729 sprintf(msgbuf, "error code %d ", result); 730 sprintf(msgbuf, "error code %d ", it.result);
730 731
731 if (preempt_count() != count) { 732 if (preempt_count() != count) {
732 strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf)); 733 strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
@@ -740,7 +741,7 @@ int do_one_initcall(initcall_t fn)
740 printk("initcall %pF returned with %s\n", fn, msgbuf); 741 printk("initcall %pF returned with %s\n", fn, msgbuf);
741 } 742 }
742 743
743 return result; 744 return it.result;
744} 745}
745 746
746 747
@@ -767,7 +768,6 @@ static void __init do_initcalls(void)
767static void __init do_basic_setup(void) 768static void __init do_basic_setup(void)
768{ 769{
769 rcu_init_sched(); /* needed by module_init stage. */ 770 rcu_init_sched(); /* needed by module_init stage. */
770 /* drivers will send hotplug events */
771 init_workqueues(); 771 init_workqueues();
772 usermodehelper_init(); 772 usermodehelper_init();
773 driver_init(); 773 driver_init();
@@ -855,6 +855,7 @@ static int __init kernel_init(void * unused)
855 smp_prepare_cpus(setup_max_cpus); 855 smp_prepare_cpus(setup_max_cpus);
856 856
857 do_pre_smp_initcalls(); 857 do_pre_smp_initcalls();
858 start_boot_trace();
858 859
859 smp_init(); 860 smp_init();
860 sched_init_smp(); 861 sched_init_smp();
@@ -881,6 +882,7 @@ static int __init kernel_init(void * unused)
881 * we're essentially up and running. Get rid of the 882 * we're essentially up and running. Get rid of the
882 * initmem segments and start the user-mode stuff.. 883 * initmem segments and start the user-mode stuff..
883 */ 884 */
885 stop_boot_trace();
884 init_post(); 886 init_post();
885 return 0; 887 return 0;
886} 888}