aboutsummaryrefslogtreecommitdiffstats
path: root/init
diff options
context:
space:
mode:
Diffstat (limited to 'init')
-rw-r--r--init/Kconfig88
-rw-r--r--init/do_mounts.c2
-rw-r--r--init/main.c8
3 files changed, 91 insertions, 7 deletions
diff --git a/init/Kconfig b/init/Kconfig
index dd43d8e22980..4f6cdbf523eb 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -130,13 +130,16 @@ config HAVE_KERNEL_BZIP2
130config HAVE_KERNEL_LZMA 130config HAVE_KERNEL_LZMA
131 bool 131 bool
132 132
133config HAVE_KERNEL_XZ
134 bool
135
133config HAVE_KERNEL_LZO 136config HAVE_KERNEL_LZO
134 bool 137 bool
135 138
136choice 139choice
137 prompt "Kernel compression mode" 140 prompt "Kernel compression mode"
138 default KERNEL_GZIP 141 default KERNEL_GZIP
139 depends on HAVE_KERNEL_GZIP || HAVE_KERNEL_BZIP2 || HAVE_KERNEL_LZMA || HAVE_KERNEL_LZO 142 depends on HAVE_KERNEL_GZIP || HAVE_KERNEL_BZIP2 || HAVE_KERNEL_LZMA || HAVE_KERNEL_XZ || HAVE_KERNEL_LZO
140 help 143 help
141 The linux kernel is a kind of self-extracting executable. 144 The linux kernel is a kind of self-extracting executable.
142 Several compression algorithms are available, which differ 145 Several compression algorithms are available, which differ
@@ -181,6 +184,21 @@ config KERNEL_LZMA
181 two. Compression is slowest. The kernel size is about 33% 184 two. Compression is slowest. The kernel size is about 33%
182 smaller with LZMA in comparison to gzip. 185 smaller with LZMA in comparison to gzip.
183 186
187config KERNEL_XZ
188 bool "XZ"
189 depends on HAVE_KERNEL_XZ
190 help
191 XZ uses the LZMA2 algorithm and instruction set specific
192 BCJ filters which can improve compression ratio of executable
193 code. The size of the kernel is about 30% smaller with XZ in
194 comparison to gzip. On architectures for which there is a BCJ
195 filter (i386, x86_64, ARM, IA-64, PowerPC, and SPARC), XZ
196 will create a few percent smaller kernel than plain LZMA.
197
198 The speed is about the same as with LZMA: The decompression
199 speed of XZ is better than that of bzip2 but worse than gzip
200 and LZO. Compression is slow.
201
184config KERNEL_LZO 202config KERNEL_LZO
185 bool "LZO" 203 bool "LZO"
186 depends on HAVE_KERNEL_LZO 204 depends on HAVE_KERNEL_LZO
@@ -393,7 +411,6 @@ config PREEMPT_RCU
393 411
394config RCU_TRACE 412config RCU_TRACE
395 bool "Enable tracing for RCU" 413 bool "Enable tracing for RCU"
396 depends on TREE_RCU || TREE_PREEMPT_RCU
397 help 414 help
398 This option provides tracing in RCU which presents stats 415 This option provides tracing in RCU which presents stats
399 in debugfs for debugging RCU implementation. 416 in debugfs for debugging RCU implementation.
@@ -459,6 +476,60 @@ config TREE_RCU_TRACE
459 TREE_PREEMPT_RCU implementations, permitting Makefile to 476 TREE_PREEMPT_RCU implementations, permitting Makefile to
460 trivially select kernel/rcutree_trace.c. 477 trivially select kernel/rcutree_trace.c.
461 478
479config RCU_BOOST
480 bool "Enable RCU priority boosting"
481 depends on RT_MUTEXES && TINY_PREEMPT_RCU
482 default n
483 help
484 This option boosts the priority of preempted RCU readers that
485 block the current preemptible RCU grace period for too long.
486 This option also prevents heavy loads from blocking RCU
487 callback invocation for all flavors of RCU.
488
489 Say Y here if you are working with real-time apps or heavy loads
490 Say N here if you are unsure.
491
492config RCU_BOOST_PRIO
493 int "Real-time priority to boost RCU readers to"
494 range 1 99
495 depends on RCU_BOOST
496 default 1
497 help
498 This option specifies the real-time priority to which preempted
499 RCU readers are to be boosted. If you are working with CPU-bound
500 real-time applications, you should specify a priority higher then
501 the highest-priority CPU-bound application.
502
503 Specify the real-time priority, or take the default if unsure.
504
505config RCU_BOOST_DELAY
506 int "Milliseconds to delay boosting after RCU grace-period start"
507 range 0 3000
508 depends on RCU_BOOST
509 default 500
510 help
511 This option specifies the time to wait after the beginning of
512 a given grace period before priority-boosting preempted RCU
513 readers blocking that grace period. Note that any RCU reader
514 blocking an expedited RCU grace period is boosted immediately.
515
516 Accept the default if unsure.
517
518config SRCU_SYNCHRONIZE_DELAY
519 int "Microseconds to delay before waiting for readers"
520 range 0 20
521 default 10
522 help
523 This option controls how long SRCU delays before entering its
524 loop waiting on SRCU readers. The purpose of this loop is
525 to avoid the unconditional context-switch penalty that would
526 otherwise be incurred if there was an active SRCU reader,
527 in a manner similar to adaptive locking schemes. This should
528 be set to be a bit longer than the common-case SRCU read-side
529 critical-section overhead.
530
531 Accept the default if unsure.
532
462endmenu # "RCU Subsystem" 533endmenu # "RCU Subsystem"
463 534
464config IKCONFIG 535config IKCONFIG
@@ -741,6 +812,19 @@ config NET_NS
741 812
742endif # NAMESPACES 813endif # NAMESPACES
743 814
815config SCHED_AUTOGROUP
816 bool "Automatic process group scheduling"
817 select EVENTFD
818 select CGROUPS
819 select CGROUP_SCHED
820 select FAIR_GROUP_SCHED
821 help
822 This option optimizes the scheduler for common desktop workloads by
823 automatically creating and populating task groups. This separation
824 of workloads isolates aggressive CPU burners (like build jobs) from
825 desktop applications. Task group autogeneration is currently based
826 upon task session.
827
744config MM_OWNER 828config MM_OWNER
745 bool 829 bool
746 830
diff --git a/init/do_mounts.c b/init/do_mounts.c
index 830aaec9c7d5..2b54bef33b55 100644
--- a/init/do_mounts.c
+++ b/init/do_mounts.c
@@ -93,7 +93,7 @@ no_match:
93 * 93 *
94 * Returns the matching dev_t on success or 0 on failure. 94 * Returns the matching dev_t on success or 0 on failure.
95 */ 95 */
96static dev_t __init devt_from_partuuid(char *uuid_str) 96static dev_t devt_from_partuuid(char *uuid_str)
97{ 97{
98 dev_t res = 0; 98 dev_t res = 0;
99 struct device *dev = NULL; 99 struct device *dev = NULL;
diff --git a/init/main.c b/init/main.c
index 8646401f7a0e..00799c1d4628 100644
--- a/init/main.c
+++ b/init/main.c
@@ -67,6 +67,7 @@
67#include <linux/sfi.h> 67#include <linux/sfi.h>
68#include <linux/shmem_fs.h> 68#include <linux/shmem_fs.h>
69#include <linux/slab.h> 69#include <linux/slab.h>
70#include <linux/perf_event.h>
70 71
71#include <asm/io.h> 72#include <asm/io.h>
72#include <asm/bugs.h> 73#include <asm/bugs.h>
@@ -603,6 +604,8 @@ asmlinkage void __init start_kernel(void)
603 "enabled *very* early, fixing it\n"); 604 "enabled *very* early, fixing it\n");
604 local_irq_disable(); 605 local_irq_disable();
605 } 606 }
607 idr_init_cache();
608 perf_event_init();
606 rcu_init(); 609 rcu_init();
607 radix_tree_init(); 610 radix_tree_init();
608 /* init some links before init_ISA_irqs() */ 611 /* init some links before init_ISA_irqs() */
@@ -658,7 +661,6 @@ asmlinkage void __init start_kernel(void)
658 enable_debug_pagealloc(); 661 enable_debug_pagealloc();
659 kmemleak_init(); 662 kmemleak_init();
660 debug_objects_mem_init(); 663 debug_objects_mem_init();
661 idr_init_cache();
662 setup_per_cpu_pageset(); 664 setup_per_cpu_pageset();
663 numa_policy_init(); 665 numa_policy_init();
664 if (late_time_init) 666 if (late_time_init)
@@ -775,9 +777,6 @@ static void __init do_initcalls(void)
775 777
776 for (fn = __early_initcall_end; fn < __initcall_end; fn++) 778 for (fn = __early_initcall_end; fn < __initcall_end; fn++)
777 do_one_initcall(*fn); 779 do_one_initcall(*fn);
778
779 /* Make sure there is no pending stuff from the initcall sequence */
780 flush_scheduled_work();
781} 780}
782 781
783/* 782/*
@@ -882,6 +881,7 @@ static int __init kernel_init(void * unused)
882 smp_prepare_cpus(setup_max_cpus); 881 smp_prepare_cpus(setup_max_cpus);
883 882
884 do_pre_smp_initcalls(); 883 do_pre_smp_initcalls();
884 lockup_detector_init();
885 885
886 smp_init(); 886 smp_init();
887 sched_init_smp(); 887 sched_init_smp();