aboutsummaryrefslogtreecommitdiffstats
path: root/init
diff options
context:
space:
mode:
Diffstat (limited to 'init')
-rw-r--r--init/Kconfig143
-rw-r--r--init/do_mounts.c1
-rw-r--r--init/do_mounts_initrd.c4
-rw-r--r--init/do_mounts_rd.c1
-rw-r--r--init/initramfs.c15
-rw-r--r--init/main.c60
6 files changed, 135 insertions, 89 deletions
diff --git a/init/Kconfig b/init/Kconfig
index 313506d8be6e..5cff9a980c39 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -404,6 +404,22 @@ config RCU_FANOUT_EXACT
404 404
405 Say N if unsure. 405 Say N if unsure.
406 406
407config RCU_FAST_NO_HZ
408 bool "Accelerate last non-dyntick-idle CPU's grace periods"
409 depends on TREE_RCU && NO_HZ && SMP
410 default n
411 help
412 This option causes RCU to attempt to accelerate grace periods
413 in order to allow the final CPU to enter dynticks-idle state
414 more quickly. On the other hand, this option increases the
415 overhead of the dynticks-idle checking, particularly on systems
416 with large numbers of CPUs.
417
418 Say Y if energy efficiency is critically important, particularly
419 if you have relatively few CPUs.
420
421 Say N if you are unsure.
422
407config TREE_RCU_TRACE 423config TREE_RCU_TRACE
408 def_bool RCU_TRACE && ( TREE_RCU || TREE_PREEMPT_RCU ) 424 def_bool RCU_TRACE && ( TREE_RCU || TREE_PREEMPT_RCU )
409 select DEBUG_FS 425 select DEBUG_FS
@@ -453,59 +469,9 @@ config LOG_BUF_SHIFT
453config HAVE_UNSTABLE_SCHED_CLOCK 469config HAVE_UNSTABLE_SCHED_CLOCK
454 bool 470 bool
455 471
456config GROUP_SCHED
457 bool "Group CPU scheduler"
458 depends on EXPERIMENTAL
459 default n
460 help
461 This feature lets CPU scheduler recognize task groups and control CPU
462 bandwidth allocation to such task groups.
463 In order to create a group from arbitrary set of processes, use
464 CONFIG_CGROUPS. (See Control Group support.)
465
466config FAIR_GROUP_SCHED
467 bool "Group scheduling for SCHED_OTHER"
468 depends on GROUP_SCHED
469 default GROUP_SCHED
470
471config RT_GROUP_SCHED
472 bool "Group scheduling for SCHED_RR/FIFO"
473 depends on EXPERIMENTAL
474 depends on GROUP_SCHED
475 default n
476 help
477 This feature lets you explicitly allocate real CPU bandwidth
478 to users or control groups (depending on the "Basis for grouping tasks"
479 setting below. If enabled, it will also make it impossible to
480 schedule realtime tasks for non-root users until you allocate
481 realtime bandwidth for them.
482 See Documentation/scheduler/sched-rt-group.txt for more information.
483
484choice
485 depends on GROUP_SCHED
486 prompt "Basis for grouping tasks"
487 default USER_SCHED
488
489config USER_SCHED
490 bool "user id"
491 help
492 This option will choose userid as the basis for grouping
493 tasks, thus providing equal CPU bandwidth to each user.
494
495config CGROUP_SCHED
496 bool "Control groups"
497 depends on CGROUPS
498 help
499 This option allows you to create arbitrary task groups
500 using the "cgroup" pseudo filesystem and control
501 the cpu bandwidth allocated to each such task group.
502 Refer to Documentation/cgroups/cgroups.txt for more
503 information on "cgroup" pseudo filesystem.
504
505endchoice
506
507menuconfig CGROUPS 472menuconfig CGROUPS
508 boolean "Control Group support" 473 boolean "Control Group support"
474 depends on EVENTFD
509 help 475 help
510 This option adds support for grouping sets of processes together, for 476 This option adds support for grouping sets of processes together, for
511 use with process control subsystems such as Cpusets, CFS, memory 477 use with process control subsystems such as Cpusets, CFS, memory
@@ -624,6 +590,62 @@ config CGROUP_MEM_RES_CTLR_SWAP
624 Now, memory usage of swap_cgroup is 2 bytes per entry. If swap page 590 Now, memory usage of swap_cgroup is 2 bytes per entry. If swap page
625 size is 4096bytes, 512k per 1Gbytes of swap. 591 size is 4096bytes, 512k per 1Gbytes of swap.
626 592
593menuconfig CGROUP_SCHED
594 bool "Group CPU scheduler"
595 depends on EXPERIMENTAL && CGROUPS
596 default n
597 help
598 This feature lets CPU scheduler recognize task groups and control CPU
599 bandwidth allocation to such task groups. It uses cgroups to group
600 tasks.
601
602if CGROUP_SCHED
603config FAIR_GROUP_SCHED
604 bool "Group scheduling for SCHED_OTHER"
605 depends on CGROUP_SCHED
606 default CGROUP_SCHED
607
608config RT_GROUP_SCHED
609 bool "Group scheduling for SCHED_RR/FIFO"
610 depends on EXPERIMENTAL
611 depends on CGROUP_SCHED
612 default n
613 help
614 This feature lets you explicitly allocate real CPU bandwidth
615 to task groups. If enabled, it will also make it impossible to
616 schedule realtime tasks for non-root users until you allocate
617 realtime bandwidth for them.
618 See Documentation/scheduler/sched-rt-group.txt for more information.
619
620endif #CGROUP_SCHED
621
622config BLK_CGROUP
623 tristate "Block IO controller"
624 depends on CGROUPS && BLOCK
625 default n
626 ---help---
627 Generic block IO controller cgroup interface. This is the common
628 cgroup interface which should be used by various IO controlling
629 policies.
630
631 Currently, CFQ IO scheduler uses it to recognize task groups and
632 control disk bandwidth allocation (proportional time slice allocation)
633 to such task groups.
634
635 This option only enables generic Block IO controller infrastructure.
636 One needs to also enable actual IO controlling logic in CFQ for it
637 to take effect. (CONFIG_CFQ_GROUP_IOSCHED=y).
638
639 See Documentation/cgroups/blkio-controller.txt for more information.
640
641config DEBUG_BLK_CGROUP
642 bool "Enable Block IO controller debugging"
643 depends on BLK_CGROUP
644 default n
645 ---help---
646 Enable some debugging help. Currently it exports additional stat
647 files in a cgroup which can be useful for debugging.
648
627endif # CGROUPS 649endif # CGROUPS
628 650
629config MM_OWNER 651config MM_OWNER
@@ -984,19 +1006,6 @@ config PERF_EVENTS
984 1006
985 Say Y if unsure. 1007 Say Y if unsure.
986 1008
987config EVENT_PROFILE
988 bool "Tracepoint profiling sources"
989 depends on PERF_EVENTS && EVENT_TRACING
990 default y
991 help
992 Allow the use of tracepoints as software performance events.
993
994 When this is enabled, you can create perf events based on
995 tracepoints using PERF_TYPE_TRACEPOINT and the tracepoint ID
996 found in debugfs://tracing/events/*/*/id. (The -e/--events
997 option to the perf tool can parse and interpret symbolic
998 tracepoints, in the subsystem:tracepoint_name format.)
999
1000config PERF_COUNTERS 1009config PERF_COUNTERS
1001 bool "Kernel performance counters (old config option)" 1010 bool "Kernel performance counters (old config option)"
1002 depends on HAVE_PERF_EVENTS 1011 depends on HAVE_PERF_EVENTS
@@ -1120,7 +1129,7 @@ config MMAP_ALLOW_UNINITIALIZED
1120 See Documentation/nommu-mmap.txt for more information. 1129 See Documentation/nommu-mmap.txt for more information.
1121 1130
1122config PROFILING 1131config PROFILING
1123 bool "Profiling support (EXPERIMENTAL)" 1132 bool "Profiling support"
1124 help 1133 help
1125 Say Y here to enable the extended profiling support mechanisms used 1134 Say Y here to enable the extended profiling support mechanisms used
1126 by profilers such as OProfile. 1135 by profilers such as OProfile.
@@ -1270,4 +1279,8 @@ source "block/Kconfig"
1270config PREEMPT_NOTIFIERS 1279config PREEMPT_NOTIFIERS
1271 bool 1280 bool
1272 1281
1282config PADATA
1283 depends on SMP
1284 bool
1285
1273source "kernel/Kconfig.locks" 1286source "kernel/Kconfig.locks"
diff --git a/init/do_mounts.c b/init/do_mounts.c
index bb008d064c1a..02e3ca4fc527 100644
--- a/init/do_mounts.c
+++ b/init/do_mounts.c
@@ -15,6 +15,7 @@
15#include <linux/initrd.h> 15#include <linux/initrd.h>
16#include <linux/async.h> 16#include <linux/async.h>
17#include <linux/fs_struct.h> 17#include <linux/fs_struct.h>
18#include <linux/slab.h>
18 19
19#include <linux/nfs_fs.h> 20#include <linux/nfs_fs.h>
20#include <linux/nfs_fs_sb.h> 21#include <linux/nfs_fs_sb.h>
diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
index 614241b5200c..2b108538d0d9 100644
--- a/init/do_mounts_initrd.c
+++ b/init/do_mounts_initrd.c
@@ -30,11 +30,7 @@ static int __init do_linuxrc(void * shell)
30 extern char * envp_init[]; 30 extern char * envp_init[];
31 31
32 sys_close(old_fd);sys_close(root_fd); 32 sys_close(old_fd);sys_close(root_fd);
33 sys_close(0);sys_close(1);sys_close(2);
34 sys_setsid(); 33 sys_setsid();
35 (void) sys_open("/dev/console",O_RDWR,0);
36 (void) sys_dup(0);
37 (void) sys_dup(0);
38 return kernel_execve(shell, argv, envp_init); 34 return kernel_execve(shell, argv, envp_init);
39} 35}
40 36
diff --git a/init/do_mounts_rd.c b/init/do_mounts_rd.c
index 027a402708de..bf3ef667bf36 100644
--- a/init/do_mounts_rd.c
+++ b/init/do_mounts_rd.c
@@ -7,6 +7,7 @@
7#include <linux/cramfs_fs.h> 7#include <linux/cramfs_fs.h>
8#include <linux/initrd.h> 8#include <linux/initrd.h>
9#include <linux/string.h> 9#include <linux/string.h>
10#include <linux/slab.h>
10 11
11#include "do_mounts.h" 12#include "do_mounts.h"
12#include "../fs/squashfs/squashfs_fs.h" 13#include "../fs/squashfs/squashfs_fs.h"
diff --git a/init/initramfs.c b/init/initramfs.c
index b37d34beb90b..4b9c20205092 100644
--- a/init/initramfs.c
+++ b/init/initramfs.c
@@ -457,7 +457,8 @@ static char * __init unpack_to_rootfs(char *buf, unsigned len)
457 compress_name); 457 compress_name);
458 message = msg_buf; 458 message = msg_buf;
459 } 459 }
460 } 460 } else
461 error("junk in compressed archive");
461 if (state != Reset) 462 if (state != Reset)
462 error("junk in compressed archive"); 463 error("junk in compressed archive");
463 this_header = saved_offset + my_inptr; 464 this_header = saved_offset + my_inptr;
@@ -525,7 +526,7 @@ static void __init clean_rootfs(void)
525 int fd; 526 int fd;
526 void *buf; 527 void *buf;
527 struct linux_dirent64 *dirp; 528 struct linux_dirent64 *dirp;
528 int count; 529 int num;
529 530
530 fd = sys_open("/", O_RDONLY, 0); 531 fd = sys_open("/", O_RDONLY, 0);
531 WARN_ON(fd < 0); 532 WARN_ON(fd < 0);
@@ -539,9 +540,9 @@ static void __init clean_rootfs(void)
539 } 540 }
540 541
541 dirp = buf; 542 dirp = buf;
542 count = sys_getdents64(fd, dirp, BUF_SIZE); 543 num = sys_getdents64(fd, dirp, BUF_SIZE);
543 while (count > 0) { 544 while (num > 0) {
544 while (count > 0) { 545 while (num > 0) {
545 struct stat st; 546 struct stat st;
546 int ret; 547 int ret;
547 548
@@ -554,12 +555,12 @@ static void __init clean_rootfs(void)
554 sys_unlink(dirp->d_name); 555 sys_unlink(dirp->d_name);
555 } 556 }
556 557
557 count -= dirp->d_reclen; 558 num -= dirp->d_reclen;
558 dirp = (void *)dirp + dirp->d_reclen; 559 dirp = (void *)dirp + dirp->d_reclen;
559 } 560 }
560 dirp = buf; 561 dirp = buf;
561 memset(buf, 0, BUF_SIZE); 562 memset(buf, 0, BUF_SIZE);
562 count = sys_getdents64(fd, dirp, BUF_SIZE); 563 num = sys_getdents64(fd, dirp, BUF_SIZE);
563 } 564 }
564 565
565 sys_close(fd); 566 sys_close(fd);
diff --git a/init/main.c b/init/main.c
index dac44a9356a5..a42fdf4aeba9 100644
--- a/init/main.c
+++ b/init/main.c
@@ -25,7 +25,6 @@
25#include <linux/bootmem.h> 25#include <linux/bootmem.h>
26#include <linux/acpi.h> 26#include <linux/acpi.h>
27#include <linux/tty.h> 27#include <linux/tty.h>
28#include <linux/gfp.h>
29#include <linux/percpu.h> 28#include <linux/percpu.h>
30#include <linux/kmod.h> 29#include <linux/kmod.h>
31#include <linux/vmalloc.h> 30#include <linux/vmalloc.h>
@@ -63,12 +62,14 @@
63#include <linux/sched.h> 62#include <linux/sched.h>
64#include <linux/signal.h> 63#include <linux/signal.h>
65#include <linux/idr.h> 64#include <linux/idr.h>
65#include <linux/kgdb.h>
66#include <linux/ftrace.h> 66#include <linux/ftrace.h>
67#include <linux/async.h> 67#include <linux/async.h>
68#include <linux/kmemcheck.h> 68#include <linux/kmemcheck.h>
69#include <linux/kmemtrace.h> 69#include <linux/kmemtrace.h>
70#include <linux/sfi.h> 70#include <linux/sfi.h>
71#include <linux/shmem_fs.h> 71#include <linux/shmem_fs.h>
72#include <linux/slab.h>
72#include <trace/boot.h> 73#include <trace/boot.h>
73 74
74#include <asm/io.h> 75#include <asm/io.h>
@@ -124,7 +125,9 @@ static char *ramdisk_execute_command;
124 125
125#ifdef CONFIG_SMP 126#ifdef CONFIG_SMP
126/* Setup configured maximum number of CPUs to activate */ 127/* Setup configured maximum number of CPUs to activate */
127unsigned int __initdata setup_max_cpus = NR_CPUS; 128unsigned int setup_max_cpus = NR_CPUS;
129EXPORT_SYMBOL(setup_max_cpus);
130
128 131
129/* 132/*
130 * Setup routine for controlling SMP activation 133 * Setup routine for controlling SMP activation
@@ -149,6 +152,20 @@ static int __init nosmp(char *str)
149 152
150early_param("nosmp", nosmp); 153early_param("nosmp", nosmp);
151 154
155/* this is hard limit */
156static int __init nrcpus(char *str)
157{
158 int nr_cpus;
159
160 get_option(&str, &nr_cpus);
161 if (nr_cpus > 0 && nr_cpus < nr_cpu_ids)
162 nr_cpu_ids = nr_cpus;
163
164 return 0;
165}
166
167early_param("nr_cpus", nrcpus);
168
152static int __init maxcpus(char *str) 169static int __init maxcpus(char *str)
153{ 170{
154 get_option(&str, &setup_max_cpus); 171 get_option(&str, &setup_max_cpus);
@@ -160,7 +177,7 @@ static int __init maxcpus(char *str)
160 177
161early_param("maxcpus", maxcpus); 178early_param("maxcpus", maxcpus);
162#else 179#else
163const unsigned int setup_max_cpus = NR_CPUS; 180static const unsigned int setup_max_cpus = NR_CPUS;
164#endif 181#endif
165 182
166/* 183/*
@@ -407,16 +424,26 @@ static void __init setup_command_line(char *command_line)
407 * gcc-3.4 accidentally inlines this function, so use noinline. 424 * gcc-3.4 accidentally inlines this function, so use noinline.
408 */ 425 */
409 426
427static __initdata DECLARE_COMPLETION(kthreadd_done);
428
410static noinline void __init_refok rest_init(void) 429static noinline void __init_refok rest_init(void)
411 __releases(kernel_lock) 430 __releases(kernel_lock)
412{ 431{
413 int pid; 432 int pid;
414 433
415 rcu_scheduler_starting(); 434 rcu_scheduler_starting();
435 /*
436 * We need to spawn init first so that it obtains pid 1, however
437 * the init task will end up wanting to create kthreads, which, if
438 * we schedule it before we create kthreadd, will OOPS.
439 */
416 kernel_thread(kernel_init, NULL, CLONE_FS | CLONE_SIGHAND); 440 kernel_thread(kernel_init, NULL, CLONE_FS | CLONE_SIGHAND);
417 numa_default_policy(); 441 numa_default_policy();
418 pid = kernel_thread(kthreadd, NULL, CLONE_FS | CLONE_FILES); 442 pid = kernel_thread(kthreadd, NULL, CLONE_FS | CLONE_FILES);
443 rcu_read_lock();
419 kthreadd_task = find_task_by_pid_ns(pid, &init_pid_ns); 444 kthreadd_task = find_task_by_pid_ns(pid, &init_pid_ns);
445 rcu_read_unlock();
446 complete(&kthreadd_done);
420 unlock_kernel(); 447 unlock_kernel();
421 448
422 /* 449 /*
@@ -550,7 +577,7 @@ asmlinkage void __init start_kernel(void)
550 setup_per_cpu_areas(); 577 setup_per_cpu_areas();
551 smp_prepare_boot_cpu(); /* arch-specific boot-cpu hooks */ 578 smp_prepare_boot_cpu(); /* arch-specific boot-cpu hooks */
552 579
553 build_all_zonelists(); 580 build_all_zonelists(NULL);
554 page_alloc_init(); 581 page_alloc_init();
555 582
556 printk(KERN_NOTICE "Kernel command line: %s\n", boot_command_line); 583 printk(KERN_NOTICE "Kernel command line: %s\n", boot_command_line);
@@ -584,6 +611,7 @@ asmlinkage void __init start_kernel(void)
584 local_irq_disable(); 611 local_irq_disable();
585 } 612 }
586 rcu_init(); 613 rcu_init();
614 radix_tree_init();
587 /* init some links before init_ISA_irqs() */ 615 /* init some links before init_ISA_irqs() */
588 early_irq_init(); 616 early_irq_init();
589 init_IRQ(); 617 init_IRQ();
@@ -601,7 +629,7 @@ asmlinkage void __init start_kernel(void)
601 local_irq_enable(); 629 local_irq_enable();
602 630
603 /* Interrupts are enabled now so all GFP allocations are safe. */ 631 /* Interrupts are enabled now so all GFP allocations are safe. */
604 set_gfp_allowed_mask(__GFP_BITS_MASK); 632 gfp_allowed_mask = __GFP_BITS_MASK;
605 633
606 kmem_cache_init_late(); 634 kmem_cache_init_late();
607 635
@@ -658,8 +686,8 @@ asmlinkage void __init start_kernel(void)
658 buffer_init(); 686 buffer_init();
659 key_init(); 687 key_init();
660 security_init(); 688 security_init();
689 dbg_late_init();
661 vfs_caches_init(totalram_pages); 690 vfs_caches_init(totalram_pages);
662 radix_tree_init();
663 signals_init(); 691 signals_init();
664 /* rootfs populating might need page-writeback */ 692 /* rootfs populating might need page-writeback */
665 page_writeback_init(); 693 page_writeback_init();
@@ -806,11 +834,6 @@ static noinline int init_post(void)
806 system_state = SYSTEM_RUNNING; 834 system_state = SYSTEM_RUNNING;
807 numa_default_policy(); 835 numa_default_policy();
808 836
809 if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
810 printk(KERN_WARNING "Warning: unable to open an initial console.\n");
811
812 (void) sys_dup(0);
813 (void) sys_dup(0);
814 837
815 current->signal->flags |= SIGNAL_UNKILLABLE; 838 current->signal->flags |= SIGNAL_UNKILLABLE;
816 839
@@ -836,17 +859,22 @@ static noinline int init_post(void)
836 run_init_process("/bin/init"); 859 run_init_process("/bin/init");
837 run_init_process("/bin/sh"); 860 run_init_process("/bin/sh");
838 861
839 panic("No init found. Try passing init= option to kernel."); 862 panic("No init found. Try passing init= option to kernel. "
863 "See Linux Documentation/init.txt for guidance.");
840} 864}
841 865
842static int __init kernel_init(void * unused) 866static int __init kernel_init(void * unused)
843{ 867{
868 /*
869 * Wait until kthreadd is all set-up.
870 */
871 wait_for_completion(&kthreadd_done);
844 lock_kernel(); 872 lock_kernel();
845 873
846 /* 874 /*
847 * init can allocate pages on any node 875 * init can allocate pages on any node
848 */ 876 */
849 set_mems_allowed(node_possible_map); 877 set_mems_allowed(node_states[N_HIGH_MEMORY]);
850 /* 878 /*
851 * init can run on any cpu. 879 * init can run on any cpu.
852 */ 880 */
@@ -873,6 +901,12 @@ static int __init kernel_init(void * unused)
873 901
874 do_basic_setup(); 902 do_basic_setup();
875 903
904 /* Open the /dev/console on the rootfs, this should never fail */
905 if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
906 printk(KERN_WARNING "Warning: unable to open an initial console.\n");
907
908 (void) sys_dup(0);
909 (void) sys_dup(0);
876 /* 910 /*
877 * check if there is an early userspace init. If yes, let it do all 911 * check if there is an early userspace init. If yes, let it do all
878 * the work 912 * the work