aboutsummaryrefslogtreecommitdiffstats
path: root/init
diff options
context:
space:
mode:
Diffstat (limited to 'init')
-rw-r--r--init/Kconfig37
-rw-r--r--init/main.c3
2 files changed, 20 insertions, 20 deletions
diff --git a/init/Kconfig b/init/Kconfig
index 8dfd094e6875..4e337906016e 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -130,13 +130,16 @@ config HAVE_KERNEL_BZIP2
130config HAVE_KERNEL_LZMA 130config HAVE_KERNEL_LZMA
131 bool 131 bool
132 132
133config HAVE_KERNEL_XZ
134 bool
135
133config HAVE_KERNEL_LZO 136config HAVE_KERNEL_LZO
134 bool 137 bool
135 138
136choice 139choice
137 prompt "Kernel compression mode" 140 prompt "Kernel compression mode"
138 default KERNEL_GZIP 141 default KERNEL_GZIP
139 depends on HAVE_KERNEL_GZIP || HAVE_KERNEL_BZIP2 || HAVE_KERNEL_LZMA || HAVE_KERNEL_LZO 142 depends on HAVE_KERNEL_GZIP || HAVE_KERNEL_BZIP2 || HAVE_KERNEL_LZMA || HAVE_KERNEL_XZ || HAVE_KERNEL_LZO
140 help 143 help
141 The linux kernel is a kind of self-extracting executable. 144 The linux kernel is a kind of self-extracting executable.
142 Several compression algorithms are available, which differ 145 Several compression algorithms are available, which differ
@@ -181,6 +184,21 @@ config KERNEL_LZMA
181 two. Compression is slowest. The kernel size is about 33% 184 two. Compression is slowest. The kernel size is about 33%
182 smaller with LZMA in comparison to gzip. 185 smaller with LZMA in comparison to gzip.
183 186
187config KERNEL_XZ
188 bool "XZ"
189 depends on HAVE_KERNEL_XZ
190 help
191 XZ uses the LZMA2 algorithm and instruction set specific
192 BCJ filters which can improve compression ratio of executable
193 code. The size of the kernel is about 30% smaller with XZ in
194 comparison to gzip. On architectures for which there is a BCJ
195 filter (i386, x86_64, ARM, IA-64, PowerPC, and SPARC), XZ
196 will create a few percent smaller kernel than plain LZMA.
197
198 The speed is about the same as with LZMA: The decompression
199 speed of XZ is better than that of bzip2 but worse than gzip
200 and LZO. Compression is slow.
201
184config KERNEL_LZO 202config KERNEL_LZO
185 bool "LZO" 203 bool "LZO"
186 depends on HAVE_KERNEL_LZO 204 depends on HAVE_KERNEL_LZO
@@ -497,21 +515,6 @@ config RCU_BOOST_DELAY
497 515
498 Accept the default if unsure. 516 Accept the default if unsure.
499 517
500config SRCU_SYNCHRONIZE_DELAY
501 int "Microseconds to delay before waiting for readers"
502 range 0 20
503 default 10
504 help
505 This option controls how long SRCU delays before entering its
506 loop waiting on SRCU readers. The purpose of this loop is
507 to avoid the unconditional context-switch penalty that would
508 otherwise be incurred if there was an active SRCU reader,
509 in a manner similar to adaptive locking schemes. This should
510 be set to be a bit longer than the common-case SRCU read-side
511 critical-section overhead.
512
513 Accept the default if unsure.
514
515endmenu # "RCU Subsystem" 518endmenu # "RCU Subsystem"
516 519
517config IKCONFIG 520config IKCONFIG
@@ -673,7 +676,7 @@ config CGROUP_MEM_RES_CTLR_SWAP_ENABLED
673 help 676 help
674 Memory Resource Controller Swap Extension comes with its price in 677 Memory Resource Controller Swap Extension comes with its price in
675 a bigger memory consumption. General purpose distribution kernels 678 a bigger memory consumption. General purpose distribution kernels
676 which want to enable the feautre but keep it disabled by default 679 which want to enable the feature but keep it disabled by default
677 and let the user enable it by swapaccount boot command line 680 and let the user enable it by swapaccount boot command line
678 parameter should have this option unselected. 681 parameter should have this option unselected.
679 For those who want to have the feature enabled by default should 682 For those who want to have the feature enabled by default should
diff --git a/init/main.c b/init/main.c
index ea51770c0170..00799c1d4628 100644
--- a/init/main.c
+++ b/init/main.c
@@ -777,9 +777,6 @@ static void __init do_initcalls(void)
777 777
778 for (fn = __early_initcall_end; fn < __initcall_end; fn++) 778 for (fn = __early_initcall_end; fn < __initcall_end; fn++)
779 do_one_initcall(*fn); 779 do_one_initcall(*fn);
780
781 /* Make sure there is no pending stuff from the initcall sequence */
782 flush_scheduled_work();
783} 780}
784 781
785/* 782/*