diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-07-09 16:33:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-07-09 16:33:36 -0400 |
commit | a82a729f04232ccd0b59406574ba4cf20027a49d (patch) | |
tree | da5912344b00ed60a1a653fc2442db7425db289d | |
parent | 899dd388853071f5c8848545209d4e2c5d95b1d9 (diff) | |
parent | 27daabd9b6a157c34a6e7a7f509fa26866e6420f (diff) |
Merge branch 'akpm' (updates from Andrew Morton)
Merge second patch-bomb from Andrew Morton:
- misc fixes
- audit stuff
- fanotify/inotify/dnotify things
- most of the rest of MM. The new cache shrinker code from Glauber and
Dave Chinner probably isn't quite stabilized yet.
- ptrace
- ipc
- partitions
- reboot cleanups
- add LZ4 decompressor, use it for kernel compression
* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (118 commits)
lib/scatterlist: error handling in __sg_alloc_table()
scsi_debug: fix do_device_access() with wrap around range
crypto: talitos: use sg_pcopy_to_buffer()
lib/scatterlist: introduce sg_pcopy_from_buffer() and sg_pcopy_to_buffer()
lib/scatterlist: factor out sg_miter_get_next_page() from sg_miter_next()
crypto: add lz4 Cryptographic API
lib: add lz4 compressor module
arm: add support for LZ4-compressed kernel
lib: add support for LZ4-compressed kernel
decompressor: add LZ4 decompressor module
lib: add weak clz/ctz functions
reboot: move arch/x86 reboot= handling to generic kernel
reboot: arm: change reboot_mode to use enum reboot_mode
reboot: arm: prepare reboot_mode for moving to generic kernel code
reboot: arm: remove unused restart_mode fields from some arm subarchs
reboot: unicore32: prepare reboot_mode for moving to generic kernel code
reboot: x86: prepare reboot_mode for moving to generic kernel code
reboot: checkpatch.pl the new kernel/reboot.c file
reboot: move shutdown/reboot related functions to kernel/reboot.c
reboot: remove -stable friendly PF_THREAD_BOUND define
...
238 files changed, 4461 insertions, 1691 deletions
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 25dc4a0e7e48..75236f1972d9 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt | |||
@@ -2681,9 +2681,17 @@ bytes respectively. Such letter suffixes can also be entirely omitted. | |||
2681 | Run specified binary instead of /init from the ramdisk, | 2681 | Run specified binary instead of /init from the ramdisk, |
2682 | used for early userspace startup. See initrd. | 2682 | used for early userspace startup. See initrd. |
2683 | 2683 | ||
2684 | reboot= [BUGS=X86-32,BUGS=ARM,BUGS=IA-64] Rebooting mode | 2684 | reboot= [KNL] |
2685 | Format: <reboot_mode>[,<reboot_mode2>[,...]] | 2685 | Format (x86 or x86_64): |
2686 | See arch/*/kernel/reboot.c or arch/*/kernel/process.c | 2686 | [w[arm] | c[old] | h[ard] | s[oft] | g[pio]] \ |
2687 | [[,]s[mp]#### \ | ||
2688 | [[,]b[ios] | a[cpi] | k[bd] | t[riple] | e[fi] | p[ci]] \ | ||
2689 | [[,]f[orce] | ||
2690 | Where reboot_mode is one of warm (soft) or cold (hard) or gpio, | ||
2691 | reboot_type is one of bios, acpi, kbd, triple, efi, or pci, | ||
2692 | reboot_force is either force or not specified, | ||
2693 | reboot_cpu is s[mp]#### with #### being the processor | ||
2694 | to be used for rebooting. | ||
2687 | 2695 | ||
2688 | relax_domain_level= | 2696 | relax_domain_level= |
2689 | [KNL, SMP] Set scheduler's default relax_domain_level. | 2697 | [KNL, SMP] Set scheduler's default relax_domain_level. |
diff --git a/Documentation/sysctl/vm.txt b/Documentation/sysctl/vm.txt index dcc75a9ed919..36ecc26c7433 100644 --- a/Documentation/sysctl/vm.txt +++ b/Documentation/sysctl/vm.txt | |||
@@ -510,7 +510,7 @@ Specify "[Dd]efault" to request automatic configuration. Autoconfiguration | |||
510 | will select "node" order in following case. | 510 | will select "node" order in following case. |
511 | (1) if the DMA zone does not exist or | 511 | (1) if the DMA zone does not exist or |
512 | (2) if the DMA zone comprises greater than 50% of the available memory or | 512 | (2) if the DMA zone comprises greater than 50% of the available memory or |
513 | (3) if any node's DMA zone comprises greater than 60% of its local memory and | 513 | (3) if any node's DMA zone comprises greater than 70% of its local memory and |
514 | the amount of local memory is big enough. | 514 | the amount of local memory is big enough. |
515 | 515 | ||
516 | Otherwise, "zone" order will be selected. Default order is recommended unless | 516 | Otherwise, "zone" order will be selected. Default order is recommended unless |
diff --git a/Documentation/vm/transhuge.txt b/Documentation/vm/transhuge.txt index 8785fb87d9c7..4a63953a41f1 100644 --- a/Documentation/vm/transhuge.txt +++ b/Documentation/vm/transhuge.txt | |||
@@ -120,8 +120,8 @@ By default kernel tries to use huge zero page on read page fault. | |||
120 | It's possible to disable huge zero page by writing 0 or enable it | 120 | It's possible to disable huge zero page by writing 0 or enable it |
121 | back by writing 1: | 121 | back by writing 1: |
122 | 122 | ||
123 | echo 0 >/sys/kernel/mm/transparent_hugepage/khugepaged/use_zero_page | 123 | echo 0 >/sys/kernel/mm/transparent_hugepage/use_zero_page |
124 | echo 1 >/sys/kernel/mm/transparent_hugepage/khugepaged/use_zero_page | 124 | echo 1 >/sys/kernel/mm/transparent_hugepage/use_zero_page |
125 | 125 | ||
126 | khugepaged will be automatically started when | 126 | khugepaged will be automatically started when |
127 | transparent_hugepage/enabled is set to "always" or "madvise, and it'll | 127 | transparent_hugepage/enabled is set to "always" or "madvise, and it'll |
diff --git a/Documentation/x86/boot.txt b/Documentation/x86/boot.txt index 3840b6f28afb..fc66d42422ee 100644 --- a/Documentation/x86/boot.txt +++ b/Documentation/x86/boot.txt | |||
@@ -657,9 +657,10 @@ Protocol: 2.08+ | |||
657 | uncompressed data should be determined using the standard magic | 657 | uncompressed data should be determined using the standard magic |
658 | numbers. The currently supported compression formats are gzip | 658 | numbers. The currently supported compression formats are gzip |
659 | (magic numbers 1F 8B or 1F 9E), bzip2 (magic number 42 5A), LZMA | 659 | (magic numbers 1F 8B or 1F 9E), bzip2 (magic number 42 5A), LZMA |
660 | (magic number 5D 00), and XZ (magic number FD 37). The uncompressed | 660 | (magic number 5D 00), XZ (magic number FD 37), and LZ4 (magic number |
661 | payload is currently always ELF (magic number 7F 45 4C 46). | 661 | 02 21). The uncompressed payload is currently always ELF (magic |
662 | 662 | number 7F 45 4C 46). | |
663 | |||
663 | Field name: payload_length | 664 | Field name: payload_length |
664 | Type: read | 665 | Type: read |
665 | Offset/size: 0x24c/4 | 666 | Offset/size: 0x24c/4 |
diff --git a/MAINTAINERS b/MAINTAINERS index e03c40eb39e0..9623bc5a39d5 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -9268,6 +9268,13 @@ F: Documentation/networking/z8530drv.txt | |||
9268 | F: drivers/net/hamradio/*scc.c | 9268 | F: drivers/net/hamradio/*scc.c |
9269 | F: drivers/net/hamradio/z8530.h | 9269 | F: drivers/net/hamradio/z8530.h |
9270 | 9270 | ||
9271 | ZBUD COMPRESSED PAGE ALLOCATOR | ||
9272 | M: Seth Jennings <sjenning@linux.vnet.ibm.com> | ||
9273 | L: linux-mm@kvack.org | ||
9274 | S: Maintained | ||
9275 | F: mm/zbud.c | ||
9276 | F: include/linux/zbud.h | ||
9277 | |||
9271 | ZD1211RW WIRELESS DRIVER | 9278 | ZD1211RW WIRELESS DRIVER |
9272 | M: Daniel Drake <dsd@gentoo.org> | 9279 | M: Daniel Drake <dsd@gentoo.org> |
9273 | M: Ulrich Kunitz <kune@deine-taler.de> | 9280 | M: Ulrich Kunitz <kune@deine-taler.de> |
@@ -9290,6 +9297,12 @@ M: "Maciej W. Rozycki" <macro@linux-mips.org> | |||
9290 | S: Maintained | 9297 | S: Maintained |
9291 | F: drivers/tty/serial/zs.* | 9298 | F: drivers/tty/serial/zs.* |
9292 | 9299 | ||
9300 | ZSWAP COMPRESSED SWAP CACHING | ||
9301 | M: Seth Jennings <sjenning@linux.vnet.ibm.com> | ||
9302 | L: linux-mm@kvack.org | ||
9303 | S: Maintained | ||
9304 | F: mm/zswap.c | ||
9305 | |||
9293 | THE REST | 9306 | THE REST |
9294 | M: Linus Torvalds <torvalds@linux-foundation.org> | 9307 | M: Linus Torvalds <torvalds@linux-foundation.org> |
9295 | L: linux-kernel@vger.kernel.org | 9308 | L: linux-kernel@vger.kernel.org |
diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c index 318164cabdfc..0fd1f0d515ff 100644 --- a/arch/arc/mm/fault.c +++ b/arch/arc/mm/fault.c | |||
@@ -207,8 +207,10 @@ out_of_memory: | |||
207 | } | 207 | } |
208 | up_read(&mm->mmap_sem); | 208 | up_read(&mm->mmap_sem); |
209 | 209 | ||
210 | if (user_mode(regs)) | 210 | if (user_mode(regs)) { |
211 | do_group_exit(SIGKILL); /* This will never return */ | 211 | pagefault_out_of_memory(); |
212 | return; | ||
213 | } | ||
212 | 214 | ||
213 | goto no_context; | 215 | goto no_context; |
214 | 216 | ||
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 5ef7af01373a..0ac9be677ebb 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig | |||
@@ -41,6 +41,7 @@ config ARM | |||
41 | select HAVE_IDE if PCI || ISA || PCMCIA | 41 | select HAVE_IDE if PCI || ISA || PCMCIA |
42 | select HAVE_IRQ_TIME_ACCOUNTING | 42 | select HAVE_IRQ_TIME_ACCOUNTING |
43 | select HAVE_KERNEL_GZIP | 43 | select HAVE_KERNEL_GZIP |
44 | select HAVE_KERNEL_LZ4 | ||
44 | select HAVE_KERNEL_LZMA | 45 | select HAVE_KERNEL_LZMA |
45 | select HAVE_KERNEL_LZO | 46 | select HAVE_KERNEL_LZO |
46 | select HAVE_KERNEL_XZ | 47 | select HAVE_KERNEL_XZ |
diff --git a/arch/arm/boot/compressed/.gitignore b/arch/arm/boot/compressed/.gitignore index f79a08efe000..47279aa96a6a 100644 --- a/arch/arm/boot/compressed/.gitignore +++ b/arch/arm/boot/compressed/.gitignore | |||
@@ -6,6 +6,7 @@ piggy.gzip | |||
6 | piggy.lzo | 6 | piggy.lzo |
7 | piggy.lzma | 7 | piggy.lzma |
8 | piggy.xzkern | 8 | piggy.xzkern |
9 | piggy.lz4 | ||
9 | vmlinux | 10 | vmlinux |
10 | vmlinux.lds | 11 | vmlinux.lds |
11 | 12 | ||
diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile index 48d0a44270bd..7ac1610252ba 100644 --- a/arch/arm/boot/compressed/Makefile +++ b/arch/arm/boot/compressed/Makefile | |||
@@ -91,6 +91,7 @@ suffix_$(CONFIG_KERNEL_GZIP) = gzip | |||
91 | suffix_$(CONFIG_KERNEL_LZO) = lzo | 91 | suffix_$(CONFIG_KERNEL_LZO) = lzo |
92 | suffix_$(CONFIG_KERNEL_LZMA) = lzma | 92 | suffix_$(CONFIG_KERNEL_LZMA) = lzma |
93 | suffix_$(CONFIG_KERNEL_XZ) = xzkern | 93 | suffix_$(CONFIG_KERNEL_XZ) = xzkern |
94 | suffix_$(CONFIG_KERNEL_LZ4) = lz4 | ||
94 | 95 | ||
95 | # Borrowed libfdt files for the ATAG compatibility mode | 96 | # Borrowed libfdt files for the ATAG compatibility mode |
96 | 97 | ||
@@ -115,7 +116,7 @@ targets := vmlinux vmlinux.lds \ | |||
115 | font.o font.c head.o misc.o $(OBJS) | 116 | font.o font.c head.o misc.o $(OBJS) |
116 | 117 | ||
117 | # Make sure files are removed during clean | 118 | # Make sure files are removed during clean |
118 | extra-y += piggy.gzip piggy.lzo piggy.lzma piggy.xzkern \ | 119 | extra-y += piggy.gzip piggy.lzo piggy.lzma piggy.xzkern piggy.lz4 \ |
119 | lib1funcs.S ashldi3.S $(libfdt) $(libfdt_hdrs) \ | 120 | lib1funcs.S ashldi3.S $(libfdt) $(libfdt_hdrs) \ |
120 | hyp-stub.S | 121 | hyp-stub.S |
121 | 122 | ||
diff --git a/arch/arm/boot/compressed/decompress.c b/arch/arm/boot/compressed/decompress.c index 24b0475cb8bf..bd245d34952d 100644 --- a/arch/arm/boot/compressed/decompress.c +++ b/arch/arm/boot/compressed/decompress.c | |||
@@ -51,6 +51,10 @@ extern char * strstr(const char * s1, const char *s2); | |||
51 | #include "../../../../lib/decompress_unxz.c" | 51 | #include "../../../../lib/decompress_unxz.c" |
52 | #endif | 52 | #endif |
53 | 53 | ||
54 | #ifdef CONFIG_KERNEL_LZ4 | ||
55 | #include "../../../../lib/decompress_unlz4.c" | ||
56 | #endif | ||
57 | |||
54 | int do_decompress(u8 *input, int len, u8 *output, void (*error)(char *x)) | 58 | int do_decompress(u8 *input, int len, u8 *output, void (*error)(char *x)) |
55 | { | 59 | { |
56 | return decompress(input, len, NULL, NULL, output, NULL, error); | 60 | return decompress(input, len, NULL, NULL, output, NULL, error); |
diff --git a/arch/arm/boot/compressed/piggy.lz4.S b/arch/arm/boot/compressed/piggy.lz4.S new file mode 100644 index 000000000000..3d9a575618a3 --- /dev/null +++ b/arch/arm/boot/compressed/piggy.lz4.S | |||
@@ -0,0 +1,6 @@ | |||
1 | .section .piggydata,#alloc | ||
2 | .globl input_data | ||
3 | input_data: | ||
4 | .incbin "arch/arm/boot/compressed/piggy.lz4" | ||
5 | .globl input_data_end | ||
6 | input_data_end: | ||
diff --git a/arch/arm/include/asm/hardware/iop3xx.h b/arch/arm/include/asm/hardware/iop3xx.h index ed94b1a366ae..423744bf18eb 100644 --- a/arch/arm/include/asm/hardware/iop3xx.h +++ b/arch/arm/include/asm/hardware/iop3xx.h | |||
@@ -223,11 +223,12 @@ extern int iop3xx_get_init_atu(void); | |||
223 | #ifndef __ASSEMBLY__ | 223 | #ifndef __ASSEMBLY__ |
224 | 224 | ||
225 | #include <linux/types.h> | 225 | #include <linux/types.h> |
226 | #include <linux/reboot.h> | ||
226 | 227 | ||
227 | void iop3xx_map_io(void); | 228 | void iop3xx_map_io(void); |
228 | void iop_init_cp6_handler(void); | 229 | void iop_init_cp6_handler(void); |
229 | void iop_init_time(unsigned long tickrate); | 230 | void iop_init_time(unsigned long tickrate); |
230 | void iop3xx_restart(char, const char *); | 231 | void iop3xx_restart(enum reboot_mode, const char *); |
231 | 232 | ||
232 | static inline u32 read_tmr0(void) | 233 | static inline u32 read_tmr0(void) |
233 | { | 234 | { |
diff --git a/arch/arm/include/asm/mach/arch.h b/arch/arm/include/asm/mach/arch.h index 75bf07910b81..441efc491b50 100644 --- a/arch/arm/include/asm/mach/arch.h +++ b/arch/arm/include/asm/mach/arch.h | |||
@@ -11,6 +11,7 @@ | |||
11 | #include <linux/types.h> | 11 | #include <linux/types.h> |
12 | 12 | ||
13 | #ifndef __ASSEMBLY__ | 13 | #ifndef __ASSEMBLY__ |
14 | #include <linux/reboot.h> | ||
14 | 15 | ||
15 | struct tag; | 16 | struct tag; |
16 | struct meminfo; | 17 | struct meminfo; |
@@ -43,7 +44,7 @@ struct machine_desc { | |||
43 | unsigned char reserve_lp0 :1; /* never has lp0 */ | 44 | unsigned char reserve_lp0 :1; /* never has lp0 */ |
44 | unsigned char reserve_lp1 :1; /* never has lp1 */ | 45 | unsigned char reserve_lp1 :1; /* never has lp1 */ |
45 | unsigned char reserve_lp2 :1; /* never has lp2 */ | 46 | unsigned char reserve_lp2 :1; /* never has lp2 */ |
46 | char restart_mode; /* default restart mode */ | 47 | enum reboot_mode reboot_mode; /* default restart mode */ |
47 | struct smp_operations *smp; /* SMP operations */ | 48 | struct smp_operations *smp; /* SMP operations */ |
48 | bool (*smp_init)(void); | 49 | bool (*smp_init)(void); |
49 | void (*fixup)(struct tag *, char **, | 50 | void (*fixup)(struct tag *, char **, |
@@ -58,7 +59,7 @@ struct machine_desc { | |||
58 | #ifdef CONFIG_MULTI_IRQ_HANDLER | 59 | #ifdef CONFIG_MULTI_IRQ_HANDLER |
59 | void (*handle_irq)(struct pt_regs *); | 60 | void (*handle_irq)(struct pt_regs *); |
60 | #endif | 61 | #endif |
61 | void (*restart)(char, const char *); | 62 | void (*restart)(enum reboot_mode, const char *); |
62 | }; | 63 | }; |
63 | 64 | ||
64 | /* | 65 | /* |
diff --git a/arch/arm/include/asm/system_misc.h b/arch/arm/include/asm/system_misc.h index 21a23e378bbe..a3d61ad984af 100644 --- a/arch/arm/include/asm/system_misc.h +++ b/arch/arm/include/asm/system_misc.h | |||
@@ -6,11 +6,12 @@ | |||
6 | #include <linux/compiler.h> | 6 | #include <linux/compiler.h> |
7 | #include <linux/linkage.h> | 7 | #include <linux/linkage.h> |
8 | #include <linux/irqflags.h> | 8 | #include <linux/irqflags.h> |
9 | #include <linux/reboot.h> | ||
9 | 10 | ||
10 | extern void cpu_init(void); | 11 | extern void cpu_init(void); |
11 | 12 | ||
12 | void soft_restart(unsigned long); | 13 | void soft_restart(unsigned long); |
13 | extern void (*arm_pm_restart)(char str, const char *cmd); | 14 | extern void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd); |
14 | extern void (*arm_pm_idle)(void); | 15 | extern void (*arm_pm_idle)(void); |
15 | 16 | ||
16 | #define UDBG_UNDEFINED (1 << 0) | 17 | #define UDBG_UNDEFINED (1 << 0) |
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 7f1efcd4a6e9..d3ca4f6915af 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #include <linux/hw_breakpoint.h> | 32 | #include <linux/hw_breakpoint.h> |
33 | #include <linux/cpuidle.h> | 33 | #include <linux/cpuidle.h> |
34 | #include <linux/leds.h> | 34 | #include <linux/leds.h> |
35 | #include <linux/reboot.h> | ||
35 | 36 | ||
36 | #include <asm/cacheflush.h> | 37 | #include <asm/cacheflush.h> |
37 | #include <asm/idmap.h> | 38 | #include <asm/idmap.h> |
@@ -113,7 +114,7 @@ void soft_restart(unsigned long addr) | |||
113 | BUG(); | 114 | BUG(); |
114 | } | 115 | } |
115 | 116 | ||
116 | static void null_restart(char mode, const char *cmd) | 117 | static void null_restart(enum reboot_mode reboot_mode, const char *cmd) |
117 | { | 118 | { |
118 | } | 119 | } |
119 | 120 | ||
@@ -123,7 +124,7 @@ static void null_restart(char mode, const char *cmd) | |||
123 | void (*pm_power_off)(void); | 124 | void (*pm_power_off)(void); |
124 | EXPORT_SYMBOL(pm_power_off); | 125 | EXPORT_SYMBOL(pm_power_off); |
125 | 126 | ||
126 | void (*arm_pm_restart)(char str, const char *cmd) = null_restart; | 127 | void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd) = null_restart; |
127 | EXPORT_SYMBOL_GPL(arm_pm_restart); | 128 | EXPORT_SYMBOL_GPL(arm_pm_restart); |
128 | 129 | ||
129 | /* | 130 | /* |
@@ -175,16 +176,6 @@ void arch_cpu_idle(void) | |||
175 | default_idle(); | 176 | default_idle(); |
176 | } | 177 | } |
177 | 178 | ||
178 | static char reboot_mode = 'h'; | ||
179 | |||
180 | int __init reboot_setup(char *str) | ||
181 | { | ||
182 | reboot_mode = str[0]; | ||
183 | return 1; | ||
184 | } | ||
185 | |||
186 | __setup("reboot=", reboot_setup); | ||
187 | |||
188 | /* | 179 | /* |
189 | * Called by kexec, immediately prior to machine_kexec(). | 180 | * Called by kexec, immediately prior to machine_kexec(). |
190 | * | 181 | * |
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c index 2bc1514d6dbe..0dd3b79b15c3 100644 --- a/arch/arm/kernel/ptrace.c +++ b/arch/arm/kernel/ptrace.c | |||
@@ -886,20 +886,12 @@ long arch_ptrace(struct task_struct *child, long request, | |||
886 | 886 | ||
887 | #ifdef CONFIG_HAVE_HW_BREAKPOINT | 887 | #ifdef CONFIG_HAVE_HW_BREAKPOINT |
888 | case PTRACE_GETHBPREGS: | 888 | case PTRACE_GETHBPREGS: |
889 | if (ptrace_get_breakpoints(child) < 0) | ||
890 | return -ESRCH; | ||
891 | |||
892 | ret = ptrace_gethbpregs(child, addr, | 889 | ret = ptrace_gethbpregs(child, addr, |
893 | (unsigned long __user *)data); | 890 | (unsigned long __user *)data); |
894 | ptrace_put_breakpoints(child); | ||
895 | break; | 891 | break; |
896 | case PTRACE_SETHBPREGS: | 892 | case PTRACE_SETHBPREGS: |
897 | if (ptrace_get_breakpoints(child) < 0) | ||
898 | return -ESRCH; | ||
899 | |||
900 | ret = ptrace_sethbpregs(child, addr, | 893 | ret = ptrace_sethbpregs(child, addr, |
901 | (unsigned long __user *)data); | 894 | (unsigned long __user *)data); |
902 | ptrace_put_breakpoints(child); | ||
903 | break; | 895 | break; |
904 | #endif | 896 | #endif |
905 | 897 | ||
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index 9b653278c9e8..63af9a7ae512 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c | |||
@@ -74,7 +74,7 @@ __setup("fpe=", fpe_setup); | |||
74 | 74 | ||
75 | extern void paging_init(struct machine_desc *desc); | 75 | extern void paging_init(struct machine_desc *desc); |
76 | extern void sanity_check_meminfo(void); | 76 | extern void sanity_check_meminfo(void); |
77 | extern void reboot_setup(char *str); | 77 | extern enum reboot_mode reboot_mode; |
78 | extern void setup_dma_zone(struct machine_desc *desc); | 78 | extern void setup_dma_zone(struct machine_desc *desc); |
79 | 79 | ||
80 | unsigned int processor_id; | 80 | unsigned int processor_id; |
@@ -861,8 +861,8 @@ void __init setup_arch(char **cmdline_p) | |||
861 | 861 | ||
862 | setup_dma_zone(mdesc); | 862 | setup_dma_zone(mdesc); |
863 | 863 | ||
864 | if (mdesc->restart_mode) | 864 | if (mdesc->reboot_mode != REBOOT_HARD) |
865 | reboot_setup(&mdesc->restart_mode); | 865 | reboot_mode = mdesc->reboot_mode; |
866 | 866 | ||
867 | init_mm.start_code = (unsigned long) _text; | 867 | init_mm.start_code = (unsigned long) _text; |
868 | init_mm.end_code = (unsigned long) _etext; | 868 | init_mm.end_code = (unsigned long) _etext; |
diff --git a/arch/arm/mach-at91/at91rm9200.c b/arch/arm/mach-at91/at91rm9200.c index 9eb574397ee1..4aad93d54d6f 100644 --- a/arch/arm/mach-at91/at91rm9200.c +++ b/arch/arm/mach-at91/at91rm9200.c | |||
@@ -11,6 +11,7 @@ | |||
11 | */ | 11 | */ |
12 | 12 | ||
13 | #include <linux/module.h> | 13 | #include <linux/module.h> |
14 | #include <linux/reboot.h> | ||
14 | 15 | ||
15 | #include <asm/irq.h> | 16 | #include <asm/irq.h> |
16 | #include <asm/mach/arch.h> | 17 | #include <asm/mach/arch.h> |
@@ -304,7 +305,7 @@ static void at91rm9200_idle(void) | |||
304 | at91_pmc_write(AT91_PMC_SCDR, AT91_PMC_PCK); | 305 | at91_pmc_write(AT91_PMC_SCDR, AT91_PMC_PCK); |
305 | } | 306 | } |
306 | 307 | ||
307 | static void at91rm9200_restart(char mode, const char *cmd) | 308 | static void at91rm9200_restart(enum reboot_mode reboot_mode, const char *cmd) |
308 | { | 309 | { |
309 | /* | 310 | /* |
310 | * Perform a hardware reset with the use of the Watchdog timer. | 311 | * Perform a hardware reset with the use of the Watchdog timer. |
diff --git a/arch/arm/mach-at91/generic.h b/arch/arm/mach-at91/generic.h index f6de36aefe85..dc6e2f5f804d 100644 --- a/arch/arm/mach-at91/generic.h +++ b/arch/arm/mach-at91/generic.h | |||
@@ -10,6 +10,7 @@ | |||
10 | 10 | ||
11 | #include <linux/clkdev.h> | 11 | #include <linux/clkdev.h> |
12 | #include <linux/of.h> | 12 | #include <linux/of.h> |
13 | #include <linux/reboot.h> | ||
13 | 14 | ||
14 | /* Map io */ | 15 | /* Map io */ |
15 | extern void __init at91_map_io(void); | 16 | extern void __init at91_map_io(void); |
@@ -60,8 +61,8 @@ extern void at91sam9_idle(void); | |||
60 | 61 | ||
61 | /* reset */ | 62 | /* reset */ |
62 | extern void at91_ioremap_rstc(u32 base_addr); | 63 | extern void at91_ioremap_rstc(u32 base_addr); |
63 | extern void at91sam9_alt_restart(char, const char *); | 64 | extern void at91sam9_alt_restart(enum reboot_mode, const char *); |
64 | extern void at91sam9g45_restart(char, const char *); | 65 | extern void at91sam9g45_restart(enum reboot_mode, const char *); |
65 | 66 | ||
66 | /* shutdown */ | 67 | /* shutdown */ |
67 | extern void at91_ioremap_shdwc(u32 base_addr); | 68 | extern void at91_ioremap_shdwc(u32 base_addr); |
diff --git a/arch/arm/mach-bcm2835/bcm2835.c b/arch/arm/mach-bcm2835/bcm2835.c index 740fa9ebe249..40686d7ef500 100644 --- a/arch/arm/mach-bcm2835/bcm2835.c +++ b/arch/arm/mach-bcm2835/bcm2835.c | |||
@@ -53,7 +53,7 @@ static void bcm2835_setup_restart(void) | |||
53 | WARN(!wdt_regs, "failed to remap watchdog regs"); | 53 | WARN(!wdt_regs, "failed to remap watchdog regs"); |
54 | } | 54 | } |
55 | 55 | ||
56 | static void bcm2835_restart(char mode, const char *cmd) | 56 | static void bcm2835_restart(enum reboot_mode mode, const char *cmd) |
57 | { | 57 | { |
58 | u32 val; | 58 | u32 val; |
59 | 59 | ||
@@ -91,7 +91,7 @@ static void bcm2835_power_off(void) | |||
91 | writel_relaxed(val, wdt_regs + PM_RSTS); | 91 | writel_relaxed(val, wdt_regs + PM_RSTS); |
92 | 92 | ||
93 | /* Continue with normal reset mechanism */ | 93 | /* Continue with normal reset mechanism */ |
94 | bcm2835_restart(0, ""); | 94 | bcm2835_restart(REBOOT_HARD, ""); |
95 | } | 95 | } |
96 | 96 | ||
97 | static struct map_desc io_map __initdata = { | 97 | static struct map_desc io_map __initdata = { |
diff --git a/arch/arm/mach-clps711x/common.c b/arch/arm/mach-clps711x/common.c index f6d1746366d4..4ca2f3ca2de4 100644 --- a/arch/arm/mach-clps711x/common.c +++ b/arch/arm/mach-clps711x/common.c | |||
@@ -384,7 +384,7 @@ void __init clps711x_timer_init(void) | |||
384 | setup_irq(IRQ_TC2OI, &clps711x_timer_irq); | 384 | setup_irq(IRQ_TC2OI, &clps711x_timer_irq); |
385 | } | 385 | } |
386 | 386 | ||
387 | void clps711x_restart(char mode, const char *cmd) | 387 | void clps711x_restart(enum reboot_mode mode, const char *cmd) |
388 | { | 388 | { |
389 | soft_restart(0); | 389 | soft_restart(0); |
390 | } | 390 | } |
diff --git a/arch/arm/mach-clps711x/common.h b/arch/arm/mach-clps711x/common.h index 2a22f4c6cc75..9a6767bfdc47 100644 --- a/arch/arm/mach-clps711x/common.h +++ b/arch/arm/mach-clps711x/common.h | |||
@@ -4,6 +4,8 @@ | |||
4 | * Common bits. | 4 | * Common bits. |
5 | */ | 5 | */ |
6 | 6 | ||
7 | #include <linux/reboot.h> | ||
8 | |||
7 | #define CLPS711X_NR_IRQS (33) | 9 | #define CLPS711X_NR_IRQS (33) |
8 | #define CLPS711X_NR_GPIO (4 * 8 + 3) | 10 | #define CLPS711X_NR_GPIO (4 * 8 + 3) |
9 | #define CLPS711X_GPIO(prt, bit) ((prt) * 8 + (bit)) | 11 | #define CLPS711X_GPIO(prt, bit) ((prt) * 8 + (bit)) |
@@ -12,5 +14,5 @@ extern void clps711x_map_io(void); | |||
12 | extern void clps711x_init_irq(void); | 14 | extern void clps711x_init_irq(void); |
13 | extern void clps711x_timer_init(void); | 15 | extern void clps711x_timer_init(void); |
14 | extern void clps711x_handle_irq(struct pt_regs *regs); | 16 | extern void clps711x_handle_irq(struct pt_regs *regs); |
15 | extern void clps711x_restart(char mode, const char *cmd); | 17 | extern void clps711x_restart(enum reboot_mode mode, const char *cmd); |
16 | extern void clps711x_init_early(void); | 18 | extern void clps711x_init_early(void); |
diff --git a/arch/arm/mach-cns3xxx/core.h b/arch/arm/mach-cns3xxx/core.h index b23b17b4da10..5218b6198dc2 100644 --- a/arch/arm/mach-cns3xxx/core.h +++ b/arch/arm/mach-cns3xxx/core.h | |||
@@ -11,6 +11,8 @@ | |||
11 | #ifndef __CNS3XXX_CORE_H | 11 | #ifndef __CNS3XXX_CORE_H |
12 | #define __CNS3XXX_CORE_H | 12 | #define __CNS3XXX_CORE_H |
13 | 13 | ||
14 | #include <linux/reboot.h> | ||
15 | |||
14 | extern void cns3xxx_timer_init(void); | 16 | extern void cns3xxx_timer_init(void); |
15 | 17 | ||
16 | #ifdef CONFIG_CACHE_L2X0 | 18 | #ifdef CONFIG_CACHE_L2X0 |
@@ -22,6 +24,6 @@ static inline void cns3xxx_l2x0_init(void) {} | |||
22 | void __init cns3xxx_map_io(void); | 24 | void __init cns3xxx_map_io(void); |
23 | void __init cns3xxx_init_irq(void); | 25 | void __init cns3xxx_init_irq(void); |
24 | void cns3xxx_power_off(void); | 26 | void cns3xxx_power_off(void); |
25 | void cns3xxx_restart(char, const char *); | 27 | void cns3xxx_restart(enum reboot_mode, const char *); |
26 | 28 | ||
27 | #endif /* __CNS3XXX_CORE_H */ | 29 | #endif /* __CNS3XXX_CORE_H */ |
diff --git a/arch/arm/mach-cns3xxx/pm.c b/arch/arm/mach-cns3xxx/pm.c index 79e3d47aad65..fb38c726e987 100644 --- a/arch/arm/mach-cns3xxx/pm.c +++ b/arch/arm/mach-cns3xxx/pm.c | |||
@@ -89,7 +89,7 @@ void cns3xxx_pwr_soft_rst(unsigned int block) | |||
89 | } | 89 | } |
90 | EXPORT_SYMBOL(cns3xxx_pwr_soft_rst); | 90 | EXPORT_SYMBOL(cns3xxx_pwr_soft_rst); |
91 | 91 | ||
92 | void cns3xxx_restart(char mode, const char *cmd) | 92 | void cns3xxx_restart(enum reboot_mode mode, const char *cmd) |
93 | { | 93 | { |
94 | /* | 94 | /* |
95 | * To reset, we hit the on-board reset register | 95 | * To reset, we hit the on-board reset register |
diff --git a/arch/arm/mach-davinci/devices-da8xx.c b/arch/arm/mach-davinci/devices-da8xx.c index eb254fe861ac..71a46a348761 100644 --- a/arch/arm/mach-davinci/devices-da8xx.c +++ b/arch/arm/mach-davinci/devices-da8xx.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/serial_8250.h> | 16 | #include <linux/serial_8250.h> |
17 | #include <linux/ahci_platform.h> | 17 | #include <linux/ahci_platform.h> |
18 | #include <linux/clk.h> | 18 | #include <linux/clk.h> |
19 | #include <linux/reboot.h> | ||
19 | 20 | ||
20 | #include <mach/cputype.h> | 21 | #include <mach/cputype.h> |
21 | #include <mach/common.h> | 22 | #include <mach/common.h> |
@@ -366,7 +367,7 @@ static struct platform_device da8xx_wdt_device = { | |||
366 | .resource = da8xx_watchdog_resources, | 367 | .resource = da8xx_watchdog_resources, |
367 | }; | 368 | }; |
368 | 369 | ||
369 | void da8xx_restart(char mode, const char *cmd) | 370 | void da8xx_restart(enum reboot_mode mode, const char *cmd) |
370 | { | 371 | { |
371 | struct device *dev; | 372 | struct device *dev; |
372 | 373 | ||
diff --git a/arch/arm/mach-davinci/devices.c b/arch/arm/mach-davinci/devices.c index 90b83d00fe2b..111573c0aad1 100644 --- a/arch/arm/mach-davinci/devices.c +++ b/arch/arm/mach-davinci/devices.c | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <linux/platform_device.h> | 13 | #include <linux/platform_device.h> |
14 | #include <linux/dma-mapping.h> | 14 | #include <linux/dma-mapping.h> |
15 | #include <linux/io.h> | 15 | #include <linux/io.h> |
16 | #include <linux/reboot.h> | ||
16 | 17 | ||
17 | #include <mach/hardware.h> | 18 | #include <mach/hardware.h> |
18 | #include <linux/platform_data/i2c-davinci.h> | 19 | #include <linux/platform_data/i2c-davinci.h> |
@@ -307,7 +308,7 @@ struct platform_device davinci_wdt_device = { | |||
307 | .resource = wdt_resources, | 308 | .resource = wdt_resources, |
308 | }; | 309 | }; |
309 | 310 | ||
310 | void davinci_restart(char mode, const char *cmd) | 311 | void davinci_restart(enum reboot_mode mode, const char *cmd) |
311 | { | 312 | { |
312 | davinci_watchdog_reset(&davinci_wdt_device); | 313 | davinci_watchdog_reset(&davinci_wdt_device); |
313 | } | 314 | } |
diff --git a/arch/arm/mach-davinci/include/mach/common.h b/arch/arm/mach-davinci/include/mach/common.h index b124b77c90c5..cce316b92c06 100644 --- a/arch/arm/mach-davinci/include/mach/common.h +++ b/arch/arm/mach-davinci/include/mach/common.h | |||
@@ -14,6 +14,7 @@ | |||
14 | 14 | ||
15 | #include <linux/compiler.h> | 15 | #include <linux/compiler.h> |
16 | #include <linux/types.h> | 16 | #include <linux/types.h> |
17 | #include <linux/reboot.h> | ||
17 | 18 | ||
18 | extern void davinci_timer_init(void); | 19 | extern void davinci_timer_init(void); |
19 | 20 | ||
@@ -81,7 +82,7 @@ extern struct davinci_soc_info davinci_soc_info; | |||
81 | 82 | ||
82 | extern void davinci_common_init(struct davinci_soc_info *soc_info); | 83 | extern void davinci_common_init(struct davinci_soc_info *soc_info); |
83 | extern void davinci_init_ide(void); | 84 | extern void davinci_init_ide(void); |
84 | void davinci_restart(char mode, const char *cmd); | 85 | void davinci_restart(enum reboot_mode mode, const char *cmd); |
85 | void davinci_init_late(void); | 86 | void davinci_init_late(void); |
86 | 87 | ||
87 | #ifdef CONFIG_DAVINCI_RESET_CLOCKS | 88 | #ifdef CONFIG_DAVINCI_RESET_CLOCKS |
diff --git a/arch/arm/mach-davinci/include/mach/da8xx.h b/arch/arm/mach-davinci/include/mach/da8xx.h index 3c797e2272f8..7b41a5e9bc31 100644 --- a/arch/arm/mach-davinci/include/mach/da8xx.h +++ b/arch/arm/mach-davinci/include/mach/da8xx.h | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/davinci_emac.h> | 17 | #include <linux/davinci_emac.h> |
18 | #include <linux/spi/spi.h> | 18 | #include <linux/spi/spi.h> |
19 | #include <linux/platform_data/davinci_asp.h> | 19 | #include <linux/platform_data/davinci_asp.h> |
20 | #include <linux/reboot.h> | ||
20 | #include <linux/videodev2.h> | 21 | #include <linux/videodev2.h> |
21 | 22 | ||
22 | #include <mach/serial.h> | 23 | #include <mach/serial.h> |
@@ -106,7 +107,7 @@ int da850_register_vpif_display | |||
106 | (struct vpif_display_config *display_config); | 107 | (struct vpif_display_config *display_config); |
107 | int da850_register_vpif_capture | 108 | int da850_register_vpif_capture |
108 | (struct vpif_capture_config *capture_config); | 109 | (struct vpif_capture_config *capture_config); |
109 | void da8xx_restart(char mode, const char *cmd); | 110 | void da8xx_restart(enum reboot_mode mode, const char *cmd); |
110 | void da8xx_rproc_reserve_cma(void); | 111 | void da8xx_rproc_reserve_cma(void); |
111 | int da8xx_register_rproc(void); | 112 | int da8xx_register_rproc(void); |
112 | 113 | ||
diff --git a/arch/arm/mach-davinci/include/mach/tnetv107x.h b/arch/arm/mach-davinci/include/mach/tnetv107x.h index 366e975effa8..16314c64f755 100644 --- a/arch/arm/mach-davinci/include/mach/tnetv107x.h +++ b/arch/arm/mach-davinci/include/mach/tnetv107x.h | |||
@@ -35,6 +35,7 @@ | |||
35 | #include <linux/serial_8250.h> | 35 | #include <linux/serial_8250.h> |
36 | #include <linux/input/matrix_keypad.h> | 36 | #include <linux/input/matrix_keypad.h> |
37 | #include <linux/mfd/ti_ssp.h> | 37 | #include <linux/mfd/ti_ssp.h> |
38 | #include <linux/reboot.h> | ||
38 | 39 | ||
39 | #include <linux/platform_data/mmc-davinci.h> | 40 | #include <linux/platform_data/mmc-davinci.h> |
40 | #include <linux/platform_data/mtd-davinci.h> | 41 | #include <linux/platform_data/mtd-davinci.h> |
@@ -54,7 +55,7 @@ extern struct platform_device tnetv107x_serial_device; | |||
54 | extern void tnetv107x_init(void); | 55 | extern void tnetv107x_init(void); |
55 | extern void tnetv107x_devices_init(struct tnetv107x_device_info *); | 56 | extern void tnetv107x_devices_init(struct tnetv107x_device_info *); |
56 | extern void tnetv107x_irq_init(void); | 57 | extern void tnetv107x_irq_init(void); |
57 | void tnetv107x_restart(char mode, const char *cmd); | 58 | void tnetv107x_restart(enum reboot_mode mode, const char *cmd); |
58 | 59 | ||
59 | #endif | 60 | #endif |
60 | 61 | ||
diff --git a/arch/arm/mach-davinci/tnetv107x.c b/arch/arm/mach-davinci/tnetv107x.c index 3b2a70d43efa..4545667ecd3c 100644 --- a/arch/arm/mach-davinci/tnetv107x.c +++ b/arch/arm/mach-davinci/tnetv107x.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/io.h> | 19 | #include <linux/io.h> |
20 | #include <linux/err.h> | 20 | #include <linux/err.h> |
21 | #include <linux/platform_device.h> | 21 | #include <linux/platform_device.h> |
22 | #include <linux/reboot.h> | ||
22 | 23 | ||
23 | #include <asm/mach/map.h> | 24 | #include <asm/mach/map.h> |
24 | 25 | ||
@@ -730,7 +731,7 @@ static void tnetv107x_watchdog_reset(struct platform_device *pdev) | |||
730 | __raw_writel(1, ®s->kick); | 731 | __raw_writel(1, ®s->kick); |
731 | } | 732 | } |
732 | 733 | ||
733 | void tnetv107x_restart(char mode, const char *cmd) | 734 | void tnetv107x_restart(enum reboot_mode mode, const char *cmd) |
734 | { | 735 | { |
735 | tnetv107x_watchdog_reset(&tnetv107x_wdt_device); | 736 | tnetv107x_watchdog_reset(&tnetv107x_wdt_device); |
736 | } | 737 | } |
diff --git a/arch/arm/mach-dove/common.c b/arch/arm/mach-dove/common.c index 2a9443d04d92..00247c771313 100644 --- a/arch/arm/mach-dove/common.c +++ b/arch/arm/mach-dove/common.c | |||
@@ -381,7 +381,7 @@ void __init dove_init(void) | |||
381 | dove_xor1_init(); | 381 | dove_xor1_init(); |
382 | } | 382 | } |
383 | 383 | ||
384 | void dove_restart(char mode, const char *cmd) | 384 | void dove_restart(enum reboot_mode mode, const char *cmd) |
385 | { | 385 | { |
386 | /* | 386 | /* |
387 | * Enable soft reset to assert RSTOUTn. | 387 | * Enable soft reset to assert RSTOUTn. |
diff --git a/arch/arm/mach-dove/common.h b/arch/arm/mach-dove/common.h index e86347928b67..1d725224d146 100644 --- a/arch/arm/mach-dove/common.h +++ b/arch/arm/mach-dove/common.h | |||
@@ -11,6 +11,8 @@ | |||
11 | #ifndef __ARCH_DOVE_COMMON_H | 11 | #ifndef __ARCH_DOVE_COMMON_H |
12 | #define __ARCH_DOVE_COMMON_H | 12 | #define __ARCH_DOVE_COMMON_H |
13 | 13 | ||
14 | #include <linux/reboot.h> | ||
15 | |||
14 | struct mv643xx_eth_platform_data; | 16 | struct mv643xx_eth_platform_data; |
15 | struct mv_sata_platform_data; | 17 | struct mv_sata_platform_data; |
16 | 18 | ||
@@ -42,6 +44,6 @@ void dove_spi1_init(void); | |||
42 | void dove_i2c_init(void); | 44 | void dove_i2c_init(void); |
43 | void dove_sdio0_init(void); | 45 | void dove_sdio0_init(void); |
44 | void dove_sdio1_init(void); | 46 | void dove_sdio1_init(void); |
45 | void dove_restart(char, const char *); | 47 | void dove_restart(enum reboot_mode, const char *); |
46 | 48 | ||
47 | #endif | 49 | #endif |
diff --git a/arch/arm/mach-ebsa110/core.c b/arch/arm/mach-ebsa110/core.c index 8a53f346cdb3..68ac934d4565 100644 --- a/arch/arm/mach-ebsa110/core.c +++ b/arch/arm/mach-ebsa110/core.c | |||
@@ -311,7 +311,7 @@ static int __init ebsa110_init(void) | |||
311 | 311 | ||
312 | arch_initcall(ebsa110_init); | 312 | arch_initcall(ebsa110_init); |
313 | 313 | ||
314 | static void ebsa110_restart(char mode, const char *cmd) | 314 | static void ebsa110_restart(enum reboot_mode mode, const char *cmd) |
315 | { | 315 | { |
316 | soft_restart(0x80000000); | 316 | soft_restart(0x80000000); |
317 | } | 317 | } |
@@ -321,7 +321,6 @@ MACHINE_START(EBSA110, "EBSA110") | |||
321 | .atag_offset = 0x400, | 321 | .atag_offset = 0x400, |
322 | .reserve_lp0 = 1, | 322 | .reserve_lp0 = 1, |
323 | .reserve_lp2 = 1, | 323 | .reserve_lp2 = 1, |
324 | .restart_mode = 's', | ||
325 | .map_io = ebsa110_map_io, | 324 | .map_io = ebsa110_map_io, |
326 | .init_early = ebsa110_init_early, | 325 | .init_early = ebsa110_init_early, |
327 | .init_irq = ebsa110_init_irq, | 326 | .init_irq = ebsa110_init_irq, |
diff --git a/arch/arm/mach-ep93xx/core.c b/arch/arm/mach-ep93xx/core.c index c49ed3dc1aea..df8612fbbc9c 100644 --- a/arch/arm/mach-ep93xx/core.c +++ b/arch/arm/mach-ep93xx/core.c | |||
@@ -35,6 +35,7 @@ | |||
35 | #include <linux/spi/spi.h> | 35 | #include <linux/spi/spi.h> |
36 | #include <linux/export.h> | 36 | #include <linux/export.h> |
37 | #include <linux/irqchip/arm-vic.h> | 37 | #include <linux/irqchip/arm-vic.h> |
38 | #include <linux/reboot.h> | ||
38 | 39 | ||
39 | #include <mach/hardware.h> | 40 | #include <mach/hardware.h> |
40 | #include <linux/platform_data/video-ep93xx.h> | 41 | #include <linux/platform_data/video-ep93xx.h> |
@@ -921,7 +922,7 @@ void __init ep93xx_init_devices(void) | |||
921 | gpio_led_register_device(-1, &ep93xx_led_data); | 922 | gpio_led_register_device(-1, &ep93xx_led_data); |
922 | } | 923 | } |
923 | 924 | ||
924 | void ep93xx_restart(char mode, const char *cmd) | 925 | void ep93xx_restart(enum reboot_mode mode, const char *cmd) |
925 | { | 926 | { |
926 | /* | 927 | /* |
927 | * Set then clear the SWRST bit to initiate a software reset | 928 | * Set then clear the SWRST bit to initiate a software reset |
diff --git a/arch/arm/mach-ep93xx/include/mach/platform.h b/arch/arm/mach-ep93xx/include/mach/platform.h index a14e1b37beff..e256e0baec2e 100644 --- a/arch/arm/mach-ep93xx/include/mach/platform.h +++ b/arch/arm/mach-ep93xx/include/mach/platform.h | |||
@@ -4,6 +4,8 @@ | |||
4 | 4 | ||
5 | #ifndef __ASSEMBLY__ | 5 | #ifndef __ASSEMBLY__ |
6 | 6 | ||
7 | #include <linux/reboot.h> | ||
8 | |||
7 | struct i2c_gpio_platform_data; | 9 | struct i2c_gpio_platform_data; |
8 | struct i2c_board_info; | 10 | struct i2c_board_info; |
9 | struct spi_board_info; | 11 | struct spi_board_info; |
@@ -55,7 +57,7 @@ void ep93xx_ide_release_gpio(struct platform_device *pdev); | |||
55 | void ep93xx_init_devices(void); | 57 | void ep93xx_init_devices(void); |
56 | extern void ep93xx_timer_init(void); | 58 | extern void ep93xx_timer_init(void); |
57 | 59 | ||
58 | void ep93xx_restart(char, const char *); | 60 | void ep93xx_restart(enum reboot_mode, const char *); |
59 | void ep93xx_init_late(void); | 61 | void ep93xx_init_late(void); |
60 | 62 | ||
61 | #ifdef CONFIG_CRUNCH | 63 | #ifdef CONFIG_CRUNCH |
diff --git a/arch/arm/mach-exynos/common.c b/arch/arm/mach-exynos/common.c index 2c655db4b78e..164685bd25c8 100644 --- a/arch/arm/mach-exynos/common.c +++ b/arch/arm/mach-exynos/common.c | |||
@@ -285,12 +285,12 @@ static struct map_desc exynos5440_iodesc0[] __initdata = { | |||
285 | }, | 285 | }, |
286 | }; | 286 | }; |
287 | 287 | ||
288 | void exynos4_restart(char mode, const char *cmd) | 288 | void exynos4_restart(enum reboot_mode mode, const char *cmd) |
289 | { | 289 | { |
290 | __raw_writel(0x1, S5P_SWRESET); | 290 | __raw_writel(0x1, S5P_SWRESET); |
291 | } | 291 | } |
292 | 292 | ||
293 | void exynos5_restart(char mode, const char *cmd) | 293 | void exynos5_restart(enum reboot_mode mode, const char *cmd) |
294 | { | 294 | { |
295 | struct device_node *np; | 295 | struct device_node *np; |
296 | u32 val; | 296 | u32 val; |
diff --git a/arch/arm/mach-exynos/common.h b/arch/arm/mach-exynos/common.h index 38d45fd23be4..3e156bcddcb4 100644 --- a/arch/arm/mach-exynos/common.h +++ b/arch/arm/mach-exynos/common.h | |||
@@ -12,6 +12,7 @@ | |||
12 | #ifndef __ARCH_ARM_MACH_EXYNOS_COMMON_H | 12 | #ifndef __ARCH_ARM_MACH_EXYNOS_COMMON_H |
13 | #define __ARCH_ARM_MACH_EXYNOS_COMMON_H | 13 | #define __ARCH_ARM_MACH_EXYNOS_COMMON_H |
14 | 14 | ||
15 | #include <linux/reboot.h> | ||
15 | #include <linux/of.h> | 16 | #include <linux/of.h> |
16 | 17 | ||
17 | void mct_init(void __iomem *base, int irq_g0, int irq_l0, int irq_l1); | 18 | void mct_init(void __iomem *base, int irq_g0, int irq_l0, int irq_l1); |
@@ -20,8 +21,8 @@ extern unsigned long xxti_f, xusbxti_f; | |||
20 | 21 | ||
21 | struct map_desc; | 22 | struct map_desc; |
22 | void exynos_init_io(void); | 23 | void exynos_init_io(void); |
23 | void exynos4_restart(char mode, const char *cmd); | 24 | void exynos4_restart(enum reboot_mode mode, const char *cmd); |
24 | void exynos5_restart(char mode, const char *cmd); | 25 | void exynos5_restart(enum reboot_mode mode, const char *cmd); |
25 | void exynos_init_late(void); | 26 | void exynos_init_late(void); |
26 | 27 | ||
27 | /* ToDo: remove these after migrating legacy exynos4 platforms to dt */ | 28 | /* ToDo: remove these after migrating legacy exynos4 platforms to dt */ |
diff --git a/arch/arm/mach-footbridge/cats-hw.c b/arch/arm/mach-footbridge/cats-hw.c index 6987a09ec219..9669cc0b6318 100644 --- a/arch/arm/mach-footbridge/cats-hw.c +++ b/arch/arm/mach-footbridge/cats-hw.c | |||
@@ -86,7 +86,7 @@ fixup_cats(struct tag *tags, char **cmdline, struct meminfo *mi) | |||
86 | MACHINE_START(CATS, "Chalice-CATS") | 86 | MACHINE_START(CATS, "Chalice-CATS") |
87 | /* Maintainer: Philip Blundell */ | 87 | /* Maintainer: Philip Blundell */ |
88 | .atag_offset = 0x100, | 88 | .atag_offset = 0x100, |
89 | .restart_mode = 's', | 89 | .reboot_mode = REBOOT_SOFT, |
90 | .fixup = fixup_cats, | 90 | .fixup = fixup_cats, |
91 | .map_io = footbridge_map_io, | 91 | .map_io = footbridge_map_io, |
92 | .init_irq = footbridge_init_irq, | 92 | .init_irq = footbridge_init_irq, |
diff --git a/arch/arm/mach-footbridge/common.c b/arch/arm/mach-footbridge/common.c index a42b369bc439..2739ca2c1334 100644 --- a/arch/arm/mach-footbridge/common.c +++ b/arch/arm/mach-footbridge/common.c | |||
@@ -198,9 +198,9 @@ void __init footbridge_map_io(void) | |||
198 | } | 198 | } |
199 | } | 199 | } |
200 | 200 | ||
201 | void footbridge_restart(char mode, const char *cmd) | 201 | void footbridge_restart(enum reboot_mode mode, const char *cmd) |
202 | { | 202 | { |
203 | if (mode == 's') { | 203 | if (mode == REBOOT_SOFT) { |
204 | /* Jump into the ROM */ | 204 | /* Jump into the ROM */ |
205 | soft_restart(0x41000000); | 205 | soft_restart(0x41000000); |
206 | } else { | 206 | } else { |
diff --git a/arch/arm/mach-footbridge/common.h b/arch/arm/mach-footbridge/common.h index a846e50a07b8..56607b3a773e 100644 --- a/arch/arm/mach-footbridge/common.h +++ b/arch/arm/mach-footbridge/common.h | |||
@@ -1,3 +1,4 @@ | |||
1 | #include <linux/reboot.h> | ||
1 | 2 | ||
2 | extern void footbridge_timer_init(void); | 3 | extern void footbridge_timer_init(void); |
3 | extern void isa_timer_init(void); | 4 | extern void isa_timer_init(void); |
@@ -8,4 +9,4 @@ extern void footbridge_map_io(void); | |||
8 | extern void footbridge_init_irq(void); | 9 | extern void footbridge_init_irq(void); |
9 | 10 | ||
10 | extern void isa_init_irq(unsigned int irq); | 11 | extern void isa_init_irq(unsigned int irq); |
11 | extern void footbridge_restart(char, const char *); | 12 | extern void footbridge_restart(enum reboot_mode, const char *); |
diff --git a/arch/arm/mach-footbridge/netwinder-hw.c b/arch/arm/mach-footbridge/netwinder-hw.c index 90ea23fdce4c..1fd2cf097e30 100644 --- a/arch/arm/mach-footbridge/netwinder-hw.c +++ b/arch/arm/mach-footbridge/netwinder-hw.c | |||
@@ -634,9 +634,9 @@ fixup_netwinder(struct tag *tags, char **cmdline, struct meminfo *mi) | |||
634 | #endif | 634 | #endif |
635 | } | 635 | } |
636 | 636 | ||
637 | static void netwinder_restart(char mode, const char *cmd) | 637 | static void netwinder_restart(enum reboot_mode mode, const char *cmd) |
638 | { | 638 | { |
639 | if (mode == 's') { | 639 | if (mode == REBOOT_SOFT) { |
640 | /* Jump into the ROM */ | 640 | /* Jump into the ROM */ |
641 | soft_restart(0x41000000); | 641 | soft_restart(0x41000000); |
642 | } else { | 642 | } else { |
diff --git a/arch/arm/mach-highbank/core.h b/arch/arm/mach-highbank/core.h index 3f65206a9b92..aea1ec5ab6f8 100644 --- a/arch/arm/mach-highbank/core.h +++ b/arch/arm/mach-highbank/core.h | |||
@@ -1,8 +1,10 @@ | |||
1 | #ifndef __HIGHBANK_CORE_H | 1 | #ifndef __HIGHBANK_CORE_H |
2 | #define __HIGHBANK_CORE_H | 2 | #define __HIGHBANK_CORE_H |
3 | 3 | ||
4 | #include <linux/reboot.h> | ||
5 | |||
4 | extern void highbank_set_cpu_jump(int cpu, void *jump_addr); | 6 | extern void highbank_set_cpu_jump(int cpu, void *jump_addr); |
5 | extern void highbank_restart(char, const char *); | 7 | extern void highbank_restart(enum reboot_mode, const char *); |
6 | extern void __iomem *scu_base_addr; | 8 | extern void __iomem *scu_base_addr; |
7 | 9 | ||
8 | #ifdef CONFIG_PM_SLEEP | 10 | #ifdef CONFIG_PM_SLEEP |
diff --git a/arch/arm/mach-highbank/system.c b/arch/arm/mach-highbank/system.c index 37d8384dcf19..2df5870b7583 100644 --- a/arch/arm/mach-highbank/system.c +++ b/arch/arm/mach-highbank/system.c | |||
@@ -15,13 +15,14 @@ | |||
15 | */ | 15 | */ |
16 | #include <linux/io.h> | 16 | #include <linux/io.h> |
17 | #include <asm/proc-fns.h> | 17 | #include <asm/proc-fns.h> |
18 | #include <linux/reboot.h> | ||
18 | 19 | ||
19 | #include "core.h" | 20 | #include "core.h" |
20 | #include "sysregs.h" | 21 | #include "sysregs.h" |
21 | 22 | ||
22 | void highbank_restart(char mode, const char *cmd) | 23 | void highbank_restart(enum reboot_mode mode, const char *cmd) |
23 | { | 24 | { |
24 | if (mode == 'h') | 25 | if (mode == REBOOT_HARD) |
25 | highbank_set_pwr_hard_reset(); | 26 | highbank_set_pwr_hard_reset(); |
26 | else | 27 | else |
27 | highbank_set_pwr_soft_reset(); | 28 | highbank_set_pwr_soft_reset(); |
diff --git a/arch/arm/mach-imx/common.h b/arch/arm/mach-imx/common.h index ee78847abf47..cb6c838b63ed 100644 --- a/arch/arm/mach-imx/common.h +++ b/arch/arm/mach-imx/common.h | |||
@@ -11,6 +11,8 @@ | |||
11 | #ifndef __ASM_ARCH_MXC_COMMON_H__ | 11 | #ifndef __ASM_ARCH_MXC_COMMON_H__ |
12 | #define __ASM_ARCH_MXC_COMMON_H__ | 12 | #define __ASM_ARCH_MXC_COMMON_H__ |
13 | 13 | ||
14 | #include <linux/reboot.h> | ||
15 | |||
14 | struct platform_device; | 16 | struct platform_device; |
15 | struct pt_regs; | 17 | struct pt_regs; |
16 | struct clk; | 18 | struct clk; |
@@ -71,7 +73,7 @@ extern int mx53_clocks_init_dt(void); | |||
71 | extern struct platform_device *mxc_register_gpio(char *name, int id, | 73 | extern struct platform_device *mxc_register_gpio(char *name, int id, |
72 | resource_size_t iobase, resource_size_t iosize, int irq, int irq_high); | 74 | resource_size_t iobase, resource_size_t iosize, int irq, int irq_high); |
73 | extern void mxc_set_cpu_type(unsigned int type); | 75 | extern void mxc_set_cpu_type(unsigned int type); |
74 | extern void mxc_restart(char, const char *); | 76 | extern void mxc_restart(enum reboot_mode, const char *); |
75 | extern void mxc_arch_reset_init(void __iomem *); | 77 | extern void mxc_arch_reset_init(void __iomem *); |
76 | extern void mxc_arch_reset_init_dt(void); | 78 | extern void mxc_arch_reset_init_dt(void); |
77 | extern int mx53_revision(void); | 79 | extern int mx53_revision(void); |
diff --git a/arch/arm/mach-imx/mach-imx6q.c b/arch/arm/mach-imx/mach-imx6q.c index f5965220a4d8..7be13f8e69a0 100644 --- a/arch/arm/mach-imx/mach-imx6q.c +++ b/arch/arm/mach-imx/mach-imx6q.c | |||
@@ -27,6 +27,7 @@ | |||
27 | #include <linux/of_platform.h> | 27 | #include <linux/of_platform.h> |
28 | #include <linux/opp.h> | 28 | #include <linux/opp.h> |
29 | #include <linux/phy.h> | 29 | #include <linux/phy.h> |
30 | #include <linux/reboot.h> | ||
30 | #include <linux/regmap.h> | 31 | #include <linux/regmap.h> |
31 | #include <linux/micrel_phy.h> | 32 | #include <linux/micrel_phy.h> |
32 | #include <linux/mfd/syscon.h> | 33 | #include <linux/mfd/syscon.h> |
@@ -67,7 +68,7 @@ static void __init imx6q_init_revision(void) | |||
67 | mxc_set_cpu_type(rev >> 16 & 0xff); | 68 | mxc_set_cpu_type(rev >> 16 & 0xff); |
68 | } | 69 | } |
69 | 70 | ||
70 | static void imx6q_restart(char mode, const char *cmd) | 71 | static void imx6q_restart(enum reboot_mode mode, const char *cmd) |
71 | { | 72 | { |
72 | struct device_node *np; | 73 | struct device_node *np; |
73 | void __iomem *wdog_base; | 74 | void __iomem *wdog_base; |
diff --git a/arch/arm/mach-imx/system.c b/arch/arm/mach-imx/system.c index 7cdc79a9657c..6fe81bb4d3c9 100644 --- a/arch/arm/mach-imx/system.c +++ b/arch/arm/mach-imx/system.c | |||
@@ -37,7 +37,7 @@ static struct clk *wdog_clk; | |||
37 | /* | 37 | /* |
38 | * Reset the system. It is called by machine_restart(). | 38 | * Reset the system. It is called by machine_restart(). |
39 | */ | 39 | */ |
40 | void mxc_restart(char mode, const char *cmd) | 40 | void mxc_restart(enum reboot_mode mode, const char *cmd) |
41 | { | 41 | { |
42 | unsigned int wcr_enable; | 42 | unsigned int wcr_enable; |
43 | 43 | ||
diff --git a/arch/arm/mach-integrator/common.h b/arch/arm/mach-integrator/common.h index 72516658be1e..ad0ac5547b2c 100644 --- a/arch/arm/mach-integrator/common.h +++ b/arch/arm/mach-integrator/common.h | |||
@@ -1,7 +1,8 @@ | |||
1 | #include <linux/reboot.h> | ||
1 | #include <linux/amba/serial.h> | 2 | #include <linux/amba/serial.h> |
2 | extern struct amba_pl010_data ap_uart_data; | 3 | extern struct amba_pl010_data ap_uart_data; |
3 | void integrator_init_early(void); | 4 | void integrator_init_early(void); |
4 | int integrator_init(bool is_cp); | 5 | int integrator_init(bool is_cp); |
5 | void integrator_reserve(void); | 6 | void integrator_reserve(void); |
6 | void integrator_restart(char, const char *); | 7 | void integrator_restart(enum reboot_mode, const char *); |
7 | void integrator_init_sysfs(struct device *parent, u32 id); | 8 | void integrator_init_sysfs(struct device *parent, u32 id); |
diff --git a/arch/arm/mach-integrator/core.c b/arch/arm/mach-integrator/core.c index 81461d218717..4cdfd7365925 100644 --- a/arch/arm/mach-integrator/core.c +++ b/arch/arm/mach-integrator/core.c | |||
@@ -124,7 +124,7 @@ void __init integrator_reserve(void) | |||
124 | /* | 124 | /* |
125 | * To reset, we hit the on-board reset register in the system FPGA | 125 | * To reset, we hit the on-board reset register in the system FPGA |
126 | */ | 126 | */ |
127 | void integrator_restart(char mode, const char *cmd) | 127 | void integrator_restart(enum reboot_mode mode, const char *cmd) |
128 | { | 128 | { |
129 | cm_control(CM_CTRL_RESET, CM_CTRL_RESET); | 129 | cm_control(CM_CTRL_RESET, CM_CTRL_RESET); |
130 | } | 130 | } |
diff --git a/arch/arm/mach-iop13xx/include/mach/iop13xx.h b/arch/arm/mach-iop13xx/include/mach/iop13xx.h index 7480f58267aa..17b40279e0a4 100644 --- a/arch/arm/mach-iop13xx/include/mach/iop13xx.h +++ b/arch/arm/mach-iop13xx/include/mach/iop13xx.h | |||
@@ -2,6 +2,9 @@ | |||
2 | #define _IOP13XX_HW_H_ | 2 | #define _IOP13XX_HW_H_ |
3 | 3 | ||
4 | #ifndef __ASSEMBLY__ | 4 | #ifndef __ASSEMBLY__ |
5 | |||
6 | #include <linux/reboot.h> | ||
7 | |||
5 | /* The ATU offsets can change based on the strapping */ | 8 | /* The ATU offsets can change based on the strapping */ |
6 | extern u32 iop13xx_atux_pmmr_offset; | 9 | extern u32 iop13xx_atux_pmmr_offset; |
7 | extern u32 iop13xx_atue_pmmr_offset; | 10 | extern u32 iop13xx_atue_pmmr_offset; |
@@ -11,7 +14,7 @@ void iop13xx_map_io(void); | |||
11 | void iop13xx_platform_init(void); | 14 | void iop13xx_platform_init(void); |
12 | void iop13xx_add_tpmi_devices(void); | 15 | void iop13xx_add_tpmi_devices(void); |
13 | void iop13xx_init_irq(void); | 16 | void iop13xx_init_irq(void); |
14 | void iop13xx_restart(char, const char *); | 17 | void iop13xx_restart(enum reboot_mode, const char *); |
15 | 18 | ||
16 | /* CPUID CP6 R0 Page 0 */ | 19 | /* CPUID CP6 R0 Page 0 */ |
17 | static inline int iop13xx_cpu_id(void) | 20 | static inline int iop13xx_cpu_id(void) |
diff --git a/arch/arm/mach-iop13xx/setup.c b/arch/arm/mach-iop13xx/setup.c index 1c5bd7637b05..96e6c7a6793b 100644 --- a/arch/arm/mach-iop13xx/setup.c +++ b/arch/arm/mach-iop13xx/setup.c | |||
@@ -594,7 +594,7 @@ __setup("iop13xx_init_adma", iop13xx_init_adma_setup); | |||
594 | __setup("iop13xx_init_uart", iop13xx_init_uart_setup); | 594 | __setup("iop13xx_init_uart", iop13xx_init_uart_setup); |
595 | __setup("iop13xx_init_i2c", iop13xx_init_i2c_setup); | 595 | __setup("iop13xx_init_i2c", iop13xx_init_i2c_setup); |
596 | 596 | ||
597 | void iop13xx_restart(char mode, const char *cmd) | 597 | void iop13xx_restart(enum reboot_mode mode, const char *cmd) |
598 | { | 598 | { |
599 | /* | 599 | /* |
600 | * Reset the internal bus (warning both cores are reset) | 600 | * Reset the internal bus (warning both cores are reset) |
diff --git a/arch/arm/mach-iop32x/n2100.c b/arch/arm/mach-iop32x/n2100.c index ea0984a7449e..069144300b77 100644 --- a/arch/arm/mach-iop32x/n2100.c +++ b/arch/arm/mach-iop32x/n2100.c | |||
@@ -286,7 +286,7 @@ static void n2100_power_off(void) | |||
286 | ; | 286 | ; |
287 | } | 287 | } |
288 | 288 | ||
289 | static void n2100_restart(char mode, const char *cmd) | 289 | static void n2100_restart(enum reboot_mode mode, const char *cmd) |
290 | { | 290 | { |
291 | gpio_line_set(N2100_HARDWARE_RESET, GPIO_LOW); | 291 | gpio_line_set(N2100_HARDWARE_RESET, GPIO_LOW); |
292 | gpio_line_config(N2100_HARDWARE_RESET, GPIO_OUT); | 292 | gpio_line_config(N2100_HARDWARE_RESET, GPIO_OUT); |
diff --git a/arch/arm/mach-ixp4xx/common.c b/arch/arm/mach-ixp4xx/common.c index 1f6c1fb353ad..5327decde5a0 100644 --- a/arch/arm/mach-ixp4xx/common.c +++ b/arch/arm/mach-ixp4xx/common.c | |||
@@ -531,9 +531,9 @@ static void __init ixp4xx_clockevent_init(void) | |||
531 | 0xf, 0xfffffffe); | 531 | 0xf, 0xfffffffe); |
532 | } | 532 | } |
533 | 533 | ||
534 | void ixp4xx_restart(char mode, const char *cmd) | 534 | void ixp4xx_restart(enum reboot_mode mode, const char *cmd) |
535 | { | 535 | { |
536 | if ( 1 && mode == 's') { | 536 | if ( 1 && mode == REBOOT_SOFT) { |
537 | /* Jump into ROM at address 0 */ | 537 | /* Jump into ROM at address 0 */ |
538 | soft_restart(0); | 538 | soft_restart(0); |
539 | } else { | 539 | } else { |
diff --git a/arch/arm/mach-ixp4xx/dsmg600-setup.c b/arch/arm/mach-ixp4xx/dsmg600-setup.c index 5d413f8c5700..686ef34c69f5 100644 --- a/arch/arm/mach-ixp4xx/dsmg600-setup.c +++ b/arch/arm/mach-ixp4xx/dsmg600-setup.c | |||
@@ -27,6 +27,7 @@ | |||
27 | #include <linux/i2c.h> | 27 | #include <linux/i2c.h> |
28 | #include <linux/i2c-gpio.h> | 28 | #include <linux/i2c-gpio.h> |
29 | 29 | ||
30 | #include <mach/hardware.h> | ||
30 | #include <asm/mach-types.h> | 31 | #include <asm/mach-types.h> |
31 | #include <asm/mach/arch.h> | 32 | #include <asm/mach/arch.h> |
32 | #include <asm/mach/flash.h> | 33 | #include <asm/mach/flash.h> |
diff --git a/arch/arm/mach-ixp4xx/include/mach/platform.h b/arch/arm/mach-ixp4xx/include/mach/platform.h index db5afb69c123..4c4c6a6f4526 100644 --- a/arch/arm/mach-ixp4xx/include/mach/platform.h +++ b/arch/arm/mach-ixp4xx/include/mach/platform.h | |||
@@ -13,6 +13,8 @@ | |||
13 | 13 | ||
14 | #ifndef __ASSEMBLY__ | 14 | #ifndef __ASSEMBLY__ |
15 | 15 | ||
16 | #include <linux/reboot.h> | ||
17 | |||
16 | #include <asm/types.h> | 18 | #include <asm/types.h> |
17 | 19 | ||
18 | #ifndef __ARMEB__ | 20 | #ifndef __ARMEB__ |
@@ -123,7 +125,7 @@ extern void ixp4xx_init_early(void); | |||
123 | extern void ixp4xx_init_irq(void); | 125 | extern void ixp4xx_init_irq(void); |
124 | extern void ixp4xx_sys_init(void); | 126 | extern void ixp4xx_sys_init(void); |
125 | extern void ixp4xx_timer_init(void); | 127 | extern void ixp4xx_timer_init(void); |
126 | extern void ixp4xx_restart(char, const char *); | 128 | extern void ixp4xx_restart(enum reboot_mode, const char *); |
127 | extern void ixp4xx_pci_preinit(void); | 129 | extern void ixp4xx_pci_preinit(void); |
128 | struct pci_sys_data; | 130 | struct pci_sys_data; |
129 | extern int ixp4xx_setup(int nr, struct pci_sys_data *sys); | 131 | extern int ixp4xx_setup(int nr, struct pci_sys_data *sys); |
diff --git a/arch/arm/mach-kirkwood/common.c b/arch/arm/mach-kirkwood/common.c index 7c72c725b711..e9238b5567ee 100644 --- a/arch/arm/mach-kirkwood/common.c +++ b/arch/arm/mach-kirkwood/common.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/mv643xx_i2c.h> | 20 | #include <linux/mv643xx_i2c.h> |
21 | #include <linux/timex.h> | 21 | #include <linux/timex.h> |
22 | #include <linux/kexec.h> | 22 | #include <linux/kexec.h> |
23 | #include <linux/reboot.h> | ||
23 | #include <net/dsa.h> | 24 | #include <net/dsa.h> |
24 | #include <asm/page.h> | 25 | #include <asm/page.h> |
25 | #include <asm/mach/map.h> | 26 | #include <asm/mach/map.h> |
@@ -722,7 +723,7 @@ void __init kirkwood_init(void) | |||
722 | #endif | 723 | #endif |
723 | } | 724 | } |
724 | 725 | ||
725 | void kirkwood_restart(char mode, const char *cmd) | 726 | void kirkwood_restart(enum reboot_mode mode, const char *cmd) |
726 | { | 727 | { |
727 | /* | 728 | /* |
728 | * Enable soft reset to assert RSTOUTn. | 729 | * Enable soft reset to assert RSTOUTn. |
diff --git a/arch/arm/mach-kirkwood/common.h b/arch/arm/mach-kirkwood/common.h index 1c09f3f93fbb..fcf3ba682e24 100644 --- a/arch/arm/mach-kirkwood/common.h +++ b/arch/arm/mach-kirkwood/common.h | |||
@@ -11,6 +11,8 @@ | |||
11 | #ifndef __ARCH_KIRKWOOD_COMMON_H | 11 | #ifndef __ARCH_KIRKWOOD_COMMON_H |
12 | #define __ARCH_KIRKWOOD_COMMON_H | 12 | #define __ARCH_KIRKWOOD_COMMON_H |
13 | 13 | ||
14 | #include <linux/reboot.h> | ||
15 | |||
14 | struct dsa_platform_data; | 16 | struct dsa_platform_data; |
15 | struct mv643xx_eth_platform_data; | 17 | struct mv643xx_eth_platform_data; |
16 | struct mv_sata_platform_data; | 18 | struct mv_sata_platform_data; |
@@ -53,7 +55,7 @@ void kirkwood_audio_init(void); | |||
53 | void kirkwood_cpuidle_init(void); | 55 | void kirkwood_cpuidle_init(void); |
54 | void kirkwood_cpufreq_init(void); | 56 | void kirkwood_cpufreq_init(void); |
55 | 57 | ||
56 | void kirkwood_restart(char, const char *); | 58 | void kirkwood_restart(enum reboot_mode, const char *); |
57 | void kirkwood_clk_init(void); | 59 | void kirkwood_clk_init(void); |
58 | 60 | ||
59 | /* board init functions for boards not fully converted to fdt */ | 61 | /* board init functions for boards not fully converted to fdt */ |
diff --git a/arch/arm/mach-ks8695/generic.h b/arch/arm/mach-ks8695/generic.h index 6e97ce462d73..43253f8e6de4 100644 --- a/arch/arm/mach-ks8695/generic.h +++ b/arch/arm/mach-ks8695/generic.h | |||
@@ -12,5 +12,5 @@ | |||
12 | 12 | ||
13 | extern __init void ks8695_map_io(void); | 13 | extern __init void ks8695_map_io(void); |
14 | extern __init void ks8695_init_irq(void); | 14 | extern __init void ks8695_init_irq(void); |
15 | extern void ks8695_restart(char, const char *); | 15 | extern void ks8695_restart(enum reboot_mode, const char *); |
16 | extern void ks8695_timer_init(void); | 16 | extern void ks8695_timer_init(void); |
diff --git a/arch/arm/mach-ks8695/time.c b/arch/arm/mach-ks8695/time.c index c272a3863d5f..426c97662f5b 100644 --- a/arch/arm/mach-ks8695/time.c +++ b/arch/arm/mach-ks8695/time.c | |||
@@ -154,11 +154,11 @@ void __init ks8695_timer_init(void) | |||
154 | setup_irq(KS8695_IRQ_TIMER1, &ks8695_timer_irq); | 154 | setup_irq(KS8695_IRQ_TIMER1, &ks8695_timer_irq); |
155 | } | 155 | } |
156 | 156 | ||
157 | void ks8695_restart(char mode, const char *cmd) | 157 | void ks8695_restart(enum reboot_mode reboot_mode, const char *cmd) |
158 | { | 158 | { |
159 | unsigned int reg; | 159 | unsigned int reg; |
160 | 160 | ||
161 | if (mode == 's') | 161 | if (reboot_mode == REBOOT_SOFT) |
162 | soft_restart(0); | 162 | soft_restart(0); |
163 | 163 | ||
164 | /* disable timer0 */ | 164 | /* disable timer0 */ |
diff --git a/arch/arm/mach-lpc32xx/common.c b/arch/arm/mach-lpc32xx/common.c index 0d4db8c544b5..d7aa54c25c59 100644 --- a/arch/arm/mach-lpc32xx/common.c +++ b/arch/arm/mach-lpc32xx/common.c | |||
@@ -207,11 +207,11 @@ void __init lpc32xx_map_io(void) | |||
207 | iotable_init(lpc32xx_io_desc, ARRAY_SIZE(lpc32xx_io_desc)); | 207 | iotable_init(lpc32xx_io_desc, ARRAY_SIZE(lpc32xx_io_desc)); |
208 | } | 208 | } |
209 | 209 | ||
210 | void lpc23xx_restart(char mode, const char *cmd) | 210 | void lpc23xx_restart(enum reboot_mode mode, const char *cmd) |
211 | { | 211 | { |
212 | switch (mode) { | 212 | switch (mode) { |
213 | case 's': | 213 | case REBOOT_SOFT: |
214 | case 'h': | 214 | case REBOOT_HARD: |
215 | lpc32xx_watchdog_reset(); | 215 | lpc32xx_watchdog_reset(); |
216 | break; | 216 | break; |
217 | 217 | ||
diff --git a/arch/arm/mach-lpc32xx/common.h b/arch/arm/mach-lpc32xx/common.h index e0b26062a272..1cd8853b2f9b 100644 --- a/arch/arm/mach-lpc32xx/common.h +++ b/arch/arm/mach-lpc32xx/common.h | |||
@@ -21,6 +21,7 @@ | |||
21 | 21 | ||
22 | #include <mach/board.h> | 22 | #include <mach/board.h> |
23 | #include <linux/platform_device.h> | 23 | #include <linux/platform_device.h> |
24 | #include <linux/reboot.h> | ||
24 | 25 | ||
25 | /* | 26 | /* |
26 | * Other arch specific structures and functions | 27 | * Other arch specific structures and functions |
@@ -29,7 +30,7 @@ extern void lpc32xx_timer_init(void); | |||
29 | extern void __init lpc32xx_init_irq(void); | 30 | extern void __init lpc32xx_init_irq(void); |
30 | extern void __init lpc32xx_map_io(void); | 31 | extern void __init lpc32xx_map_io(void); |
31 | extern void __init lpc32xx_serial_init(void); | 32 | extern void __init lpc32xx_serial_init(void); |
32 | extern void lpc23xx_restart(char, const char *); | 33 | extern void lpc23xx_restart(enum reboot_mode, const char *); |
33 | 34 | ||
34 | 35 | ||
35 | /* | 36 | /* |
diff --git a/arch/arm/mach-mmp/common.c b/arch/arm/mach-mmp/common.c index 9292b7966e3b..c03b4ab582db 100644 --- a/arch/arm/mach-mmp/common.c +++ b/arch/arm/mach-mmp/common.c | |||
@@ -47,7 +47,7 @@ void __init mmp_map_io(void) | |||
47 | mmp_chip_id = __raw_readl(MMP_CHIPID); | 47 | mmp_chip_id = __raw_readl(MMP_CHIPID); |
48 | } | 48 | } |
49 | 49 | ||
50 | void mmp_restart(char mode, const char *cmd) | 50 | void mmp_restart(enum reboot_mode mode, const char *cmd) |
51 | { | 51 | { |
52 | soft_restart(0); | 52 | soft_restart(0); |
53 | } | 53 | } |
diff --git a/arch/arm/mach-mmp/common.h b/arch/arm/mach-mmp/common.h index 0bdc50b134ce..991d7e9877de 100644 --- a/arch/arm/mach-mmp/common.h +++ b/arch/arm/mach-mmp/common.h | |||
@@ -1,10 +1,11 @@ | |||
1 | #include <linux/reboot.h> | ||
1 | #define ARRAY_AND_SIZE(x) (x), ARRAY_SIZE(x) | 2 | #define ARRAY_AND_SIZE(x) (x), ARRAY_SIZE(x) |
2 | 3 | ||
3 | extern void timer_init(int irq); | 4 | extern void timer_init(int irq); |
4 | 5 | ||
5 | extern void __init icu_init_irq(void); | 6 | extern void __init icu_init_irq(void); |
6 | extern void __init mmp_map_io(void); | 7 | extern void __init mmp_map_io(void); |
7 | extern void mmp_restart(char, const char *); | 8 | extern void mmp_restart(enum reboot_mode, const char *); |
8 | extern void __init pxa168_clk_init(void); | 9 | extern void __init pxa168_clk_init(void); |
9 | extern void __init pxa910_clk_init(void); | 10 | extern void __init pxa910_clk_init(void); |
10 | extern void __init mmp2_clk_init(void); | 11 | extern void __init mmp2_clk_init(void); |
diff --git a/arch/arm/mach-mmp/include/mach/pxa168.h b/arch/arm/mach-mmp/include/mach/pxa168.h index 7ed1df21ea1c..459c2d03eb5c 100644 --- a/arch/arm/mach-mmp/include/mach/pxa168.h +++ b/arch/arm/mach-mmp/include/mach/pxa168.h | |||
@@ -1,9 +1,11 @@ | |||
1 | #ifndef __ASM_MACH_PXA168_H | 1 | #ifndef __ASM_MACH_PXA168_H |
2 | #define __ASM_MACH_PXA168_H | 2 | #define __ASM_MACH_PXA168_H |
3 | 3 | ||
4 | #include <linux/reboot.h> | ||
5 | |||
4 | extern void pxa168_timer_init(void); | 6 | extern void pxa168_timer_init(void); |
5 | extern void __init pxa168_init_irq(void); | 7 | extern void __init pxa168_init_irq(void); |
6 | extern void pxa168_restart(char, const char *); | 8 | extern void pxa168_restart(enum reboot_mode, const char *); |
7 | extern void pxa168_clear_keypad_wakeup(void); | 9 | extern void pxa168_clear_keypad_wakeup(void); |
8 | 10 | ||
9 | #include <linux/i2c.h> | 11 | #include <linux/i2c.h> |
diff --git a/arch/arm/mach-mmp/pxa168.c b/arch/arm/mach-mmp/pxa168.c index a30dcf3b7d9e..144e997624c0 100644 --- a/arch/arm/mach-mmp/pxa168.c +++ b/arch/arm/mach-mmp/pxa168.c | |||
@@ -172,7 +172,7 @@ int __init pxa168_add_usb_host(struct mv_usb_platform_data *pdata) | |||
172 | return platform_device_register(&pxa168_device_usb_host); | 172 | return platform_device_register(&pxa168_device_usb_host); |
173 | } | 173 | } |
174 | 174 | ||
175 | void pxa168_restart(char mode, const char *cmd) | 175 | void pxa168_restart(enum reboot_mode mode, const char *cmd) |
176 | { | 176 | { |
177 | soft_restart(0xffff0000); | 177 | soft_restart(0xffff0000); |
178 | } | 178 | } |
diff --git a/arch/arm/mach-mv78xx0/common.c b/arch/arm/mach-mv78xx0/common.c index 749a7f8c4992..75062eff2494 100644 --- a/arch/arm/mach-mv78xx0/common.c +++ b/arch/arm/mach-mv78xx0/common.c | |||
@@ -413,7 +413,7 @@ void __init mv78xx0_init(void) | |||
413 | clk_init(); | 413 | clk_init(); |
414 | } | 414 | } |
415 | 415 | ||
416 | void mv78xx0_restart(char mode, const char *cmd) | 416 | void mv78xx0_restart(enum reboot_mode mode, const char *cmd) |
417 | { | 417 | { |
418 | /* | 418 | /* |
419 | * Enable soft reset to assert RSTOUTn. | 419 | * Enable soft reset to assert RSTOUTn. |
diff --git a/arch/arm/mach-mv78xx0/common.h b/arch/arm/mach-mv78xx0/common.h index 5e9485bad0ac..6889af26077d 100644 --- a/arch/arm/mach-mv78xx0/common.h +++ b/arch/arm/mach-mv78xx0/common.h | |||
@@ -11,6 +11,8 @@ | |||
11 | #ifndef __ARCH_MV78XX0_COMMON_H | 11 | #ifndef __ARCH_MV78XX0_COMMON_H |
12 | #define __ARCH_MV78XX0_COMMON_H | 12 | #define __ARCH_MV78XX0_COMMON_H |
13 | 13 | ||
14 | #include <linux/reboot.h> | ||
15 | |||
14 | struct mv643xx_eth_platform_data; | 16 | struct mv643xx_eth_platform_data; |
15 | struct mv_sata_platform_data; | 17 | struct mv_sata_platform_data; |
16 | 18 | ||
@@ -45,7 +47,7 @@ void mv78xx0_uart1_init(void); | |||
45 | void mv78xx0_uart2_init(void); | 47 | void mv78xx0_uart2_init(void); |
46 | void mv78xx0_uart3_init(void); | 48 | void mv78xx0_uart3_init(void); |
47 | void mv78xx0_i2c_init(void); | 49 | void mv78xx0_i2c_init(void); |
48 | void mv78xx0_restart(char, const char *); | 50 | void mv78xx0_restart(enum reboot_mode, const char *); |
49 | 51 | ||
50 | extern void mv78xx0_timer_init(void); | 52 | extern void mv78xx0_timer_init(void); |
51 | 53 | ||
diff --git a/arch/arm/mach-mvebu/common.h b/arch/arm/mach-mvebu/common.h index 98defd5e92cd..e366010e1d91 100644 --- a/arch/arm/mach-mvebu/common.h +++ b/arch/arm/mach-mvebu/common.h | |||
@@ -17,7 +17,9 @@ | |||
17 | 17 | ||
18 | #define ARMADA_XP_MAX_CPUS 4 | 18 | #define ARMADA_XP_MAX_CPUS 4 |
19 | 19 | ||
20 | void mvebu_restart(char mode, const char *cmd); | 20 | #include <linux/reboot.h> |
21 | |||
22 | void mvebu_restart(enum reboot_mode mode, const char *cmd); | ||
21 | 23 | ||
22 | void armada_370_xp_init_irq(void); | 24 | void armada_370_xp_init_irq(void); |
23 | void armada_370_xp_handle_irq(struct pt_regs *regs); | 25 | void armada_370_xp_handle_irq(struct pt_regs *regs); |
diff --git a/arch/arm/mach-mvebu/system-controller.c b/arch/arm/mach-mvebu/system-controller.c index b8079df8c986..f875124ff4f9 100644 --- a/arch/arm/mach-mvebu/system-controller.c +++ b/arch/arm/mach-mvebu/system-controller.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <linux/init.h> | 26 | #include <linux/init.h> |
27 | #include <linux/of_address.h> | 27 | #include <linux/of_address.h> |
28 | #include <linux/io.h> | 28 | #include <linux/io.h> |
29 | #include <linux/reboot.h> | ||
29 | 30 | ||
30 | static void __iomem *system_controller_base; | 31 | static void __iomem *system_controller_base; |
31 | 32 | ||
@@ -63,7 +64,7 @@ static struct of_device_id of_system_controller_table[] = { | |||
63 | { /* end of list */ }, | 64 | { /* end of list */ }, |
64 | }; | 65 | }; |
65 | 66 | ||
66 | void mvebu_restart(char mode, const char *cmd) | 67 | void mvebu_restart(enum reboot_mode mode, const char *cmd) |
67 | { | 68 | { |
68 | if (!system_controller_base) { | 69 | if (!system_controller_base) { |
69 | pr_err("Cannot restart, system-controller not available: check the device tree\n"); | 70 | pr_err("Cannot restart, system-controller not available: check the device tree\n"); |
diff --git a/arch/arm/mach-mxs/mach-mxs.c b/arch/arm/mach-mxs/mach-mxs.c index 7fa611c1b287..6298adb8d335 100644 --- a/arch/arm/mach-mxs/mach-mxs.c +++ b/arch/arm/mach-mxs/mach-mxs.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/gpio.h> | 20 | #include <linux/gpio.h> |
21 | #include <linux/init.h> | 21 | #include <linux/init.h> |
22 | #include <linux/irqchip/mxs.h> | 22 | #include <linux/irqchip/mxs.h> |
23 | #include <linux/reboot.h> | ||
23 | #include <linux/micrel_phy.h> | 24 | #include <linux/micrel_phy.h> |
24 | #include <linux/of_address.h> | 25 | #include <linux/of_address.h> |
25 | #include <linux/of_platform.h> | 26 | #include <linux/of_platform.h> |
@@ -500,7 +501,7 @@ static void __init mxs_machine_init(void) | |||
500 | /* | 501 | /* |
501 | * Reset the system. It is called by machine_restart(). | 502 | * Reset the system. It is called by machine_restart(). |
502 | */ | 503 | */ |
503 | static void mxs_restart(char mode, const char *cmd) | 504 | static void mxs_restart(enum reboot_mode mode, const char *cmd) |
504 | { | 505 | { |
505 | struct device_node *np; | 506 | struct device_node *np; |
506 | void __iomem *reset_addr; | 507 | void __iomem *reset_addr; |
diff --git a/arch/arm/mach-netx/generic.c b/arch/arm/mach-netx/generic.c index 1504b68f4c66..db25b0cef3a7 100644 --- a/arch/arm/mach-netx/generic.c +++ b/arch/arm/mach-netx/generic.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/platform_device.h> | 24 | #include <linux/platform_device.h> |
25 | #include <linux/io.h> | 25 | #include <linux/io.h> |
26 | #include <linux/irqchip/arm-vic.h> | 26 | #include <linux/irqchip/arm-vic.h> |
27 | #include <linux/reboot.h> | ||
27 | #include <mach/hardware.h> | 28 | #include <mach/hardware.h> |
28 | #include <asm/mach/map.h> | 29 | #include <asm/mach/map.h> |
29 | #include <mach/netx-regs.h> | 30 | #include <mach/netx-regs.h> |
@@ -187,7 +188,7 @@ static int __init netx_init(void) | |||
187 | 188 | ||
188 | subsys_initcall(netx_init); | 189 | subsys_initcall(netx_init); |
189 | 190 | ||
190 | void netx_restart(char mode, const char *cmd) | 191 | void netx_restart(enum reboot_mode mode, const char *cmd) |
191 | { | 192 | { |
192 | writel(NETX_SYSTEM_RES_CR_FIRMW_RES_EN | NETX_SYSTEM_RES_CR_FIRMW_RES, | 193 | writel(NETX_SYSTEM_RES_CR_FIRMW_RES_EN | NETX_SYSTEM_RES_CR_FIRMW_RES, |
193 | NETX_SYSTEM_RES_CR); | 194 | NETX_SYSTEM_RES_CR); |
diff --git a/arch/arm/mach-netx/generic.h b/arch/arm/mach-netx/generic.h index 768b26bbb42b..bb2ce471cc28 100644 --- a/arch/arm/mach-netx/generic.h +++ b/arch/arm/mach-netx/generic.h | |||
@@ -17,8 +17,10 @@ | |||
17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
18 | */ | 18 | */ |
19 | 19 | ||
20 | #include <linux/reboot.h> | ||
21 | |||
20 | extern void __init netx_map_io(void); | 22 | extern void __init netx_map_io(void); |
21 | extern void __init netx_init_irq(void); | 23 | extern void __init netx_init_irq(void); |
22 | extern void netx_restart(char, const char *); | 24 | extern void netx_restart(enum reboot_mode, const char *); |
23 | 25 | ||
24 | extern void netx_timer_init(void); | 26 | extern void netx_timer_init(void); |
diff --git a/arch/arm/mach-nomadik/cpu-8815.c b/arch/arm/mach-nomadik/cpu-8815.c index 2df209ed1a07..13e0df9c11ce 100644 --- a/arch/arm/mach-nomadik/cpu-8815.c +++ b/arch/arm/mach-nomadik/cpu-8815.c | |||
@@ -103,7 +103,7 @@ static void __init cpu8815_map_io(void) | |||
103 | iotable_init(cpu8815_io_desc, ARRAY_SIZE(cpu8815_io_desc)); | 103 | iotable_init(cpu8815_io_desc, ARRAY_SIZE(cpu8815_io_desc)); |
104 | } | 104 | } |
105 | 105 | ||
106 | static void cpu8815_restart(char mode, const char *cmd) | 106 | static void cpu8815_restart(enum reboot_mode mode, const char *cmd) |
107 | { | 107 | { |
108 | void __iomem *srcbase = ioremap(NOMADIK_SRC_BASE, SZ_4K); | 108 | void __iomem *srcbase = ioremap(NOMADIK_SRC_BASE, SZ_4K); |
109 | 109 | ||
diff --git a/arch/arm/mach-omap1/board-voiceblue.c b/arch/arm/mach-omap1/board-voiceblue.c index 6c116e1a4b01..4677a9ccb3cb 100644 --- a/arch/arm/mach-omap1/board-voiceblue.c +++ b/arch/arm/mach-omap1/board-voiceblue.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <linux/serial_reg.h> | 26 | #include <linux/serial_reg.h> |
27 | #include <linux/smc91x.h> | 27 | #include <linux/smc91x.h> |
28 | #include <linux/export.h> | 28 | #include <linux/export.h> |
29 | #include <linux/reboot.h> | ||
29 | 30 | ||
30 | #include <asm/mach-types.h> | 31 | #include <asm/mach-types.h> |
31 | #include <asm/mach/arch.h> | 32 | #include <asm/mach/arch.h> |
@@ -215,7 +216,7 @@ void voiceblue_wdt_ping(void) | |||
215 | gpio_set_value(0, wdt_gpio_state); | 216 | gpio_set_value(0, wdt_gpio_state); |
216 | } | 217 | } |
217 | 218 | ||
218 | static void voiceblue_restart(char mode, const char *cmd) | 219 | static void voiceblue_restart(enum reboot_mode mode, const char *cmd) |
219 | { | 220 | { |
220 | /* | 221 | /* |
221 | * Workaround for 5912/1611b bug mentioned in sprz209d.pdf p. 28 | 222 | * Workaround for 5912/1611b bug mentioned in sprz209d.pdf p. 28 |
diff --git a/arch/arm/mach-omap1/common.h b/arch/arm/mach-omap1/common.h index 14f7e9920479..abec019a5281 100644 --- a/arch/arm/mach-omap1/common.h +++ b/arch/arm/mach-omap1/common.h | |||
@@ -28,6 +28,7 @@ | |||
28 | 28 | ||
29 | #include <linux/mtd/mtd.h> | 29 | #include <linux/mtd/mtd.h> |
30 | #include <linux/i2c-omap.h> | 30 | #include <linux/i2c-omap.h> |
31 | #include <linux/reboot.h> | ||
31 | 32 | ||
32 | #include <plat/i2c.h> | 33 | #include <plat/i2c.h> |
33 | 34 | ||
@@ -70,7 +71,7 @@ static inline int omap_serial_wakeup_init(void) | |||
70 | void omap1_init_early(void); | 71 | void omap1_init_early(void); |
71 | void omap1_init_irq(void); | 72 | void omap1_init_irq(void); |
72 | void omap1_init_late(void); | 73 | void omap1_init_late(void); |
73 | void omap1_restart(char, const char *); | 74 | void omap1_restart(enum reboot_mode, const char *); |
74 | 75 | ||
75 | extern void __init omap_check_revision(void); | 76 | extern void __init omap_check_revision(void); |
76 | 77 | ||
diff --git a/arch/arm/mach-omap1/reset.c b/arch/arm/mach-omap1/reset.c index 5eebd7e889d0..72bf4bf4a702 100644 --- a/arch/arm/mach-omap1/reset.c +++ b/arch/arm/mach-omap1/reset.c | |||
@@ -3,6 +3,7 @@ | |||
3 | */ | 3 | */ |
4 | #include <linux/kernel.h> | 4 | #include <linux/kernel.h> |
5 | #include <linux/io.h> | 5 | #include <linux/io.h> |
6 | #include <linux/reboot.h> | ||
6 | 7 | ||
7 | #include <mach/hardware.h> | 8 | #include <mach/hardware.h> |
8 | 9 | ||
@@ -22,7 +23,7 @@ | |||
22 | #define OMAP_EXTWARM_RST_SRC_ID_SHIFT 5 | 23 | #define OMAP_EXTWARM_RST_SRC_ID_SHIFT 5 |
23 | 24 | ||
24 | 25 | ||
25 | void omap1_restart(char mode, const char *cmd) | 26 | void omap1_restart(enum reboot_mode mode, const char *cmd) |
26 | { | 27 | { |
27 | /* | 28 | /* |
28 | * Workaround for 5912/1611b bug mentioned in sprz209d.pdf p. 28 | 29 | * Workaround for 5912/1611b bug mentioned in sprz209d.pdf p. 28 |
diff --git a/arch/arm/mach-omap2/am33xx-restart.c b/arch/arm/mach-omap2/am33xx-restart.c index 88e4fa8af031..1eae96212315 100644 --- a/arch/arm/mach-omap2/am33xx-restart.c +++ b/arch/arm/mach-omap2/am33xx-restart.c | |||
@@ -6,6 +6,7 @@ | |||
6 | * published by the Free Software Foundation. | 6 | * published by the Free Software Foundation. |
7 | */ | 7 | */ |
8 | #include <linux/kernel.h> | 8 | #include <linux/kernel.h> |
9 | #include <linux/reboot.h> | ||
9 | 10 | ||
10 | #include "common.h" | 11 | #include "common.h" |
11 | #include "prm-regbits-33xx.h" | 12 | #include "prm-regbits-33xx.h" |
@@ -19,7 +20,7 @@ | |||
19 | * Resets the SoC. For @cmd, see the 'reboot' syscall in | 20 | * Resets the SoC. For @cmd, see the 'reboot' syscall in |
20 | * kernel/sys.c. No return value. | 21 | * kernel/sys.c. No return value. |
21 | */ | 22 | */ |
22 | void am33xx_restart(char mode, const char *cmd) | 23 | void am33xx_restart(enum reboot_mode mode, const char *cmd) |
23 | { | 24 | { |
24 | /* TODO: Handle mode and cmd if necessary */ | 25 | /* TODO: Handle mode and cmd if necessary */ |
25 | 26 | ||
diff --git a/arch/arm/mach-omap2/common.h b/arch/arm/mach-omap2/common.h index 72cab3f4f16d..dfcc182ecff9 100644 --- a/arch/arm/mach-omap2/common.h +++ b/arch/arm/mach-omap2/common.h | |||
@@ -31,6 +31,7 @@ | |||
31 | #include <linux/i2c.h> | 31 | #include <linux/i2c.h> |
32 | #include <linux/i2c/twl.h> | 32 | #include <linux/i2c/twl.h> |
33 | #include <linux/i2c-omap.h> | 33 | #include <linux/i2c-omap.h> |
34 | #include <linux/reboot.h> | ||
34 | 35 | ||
35 | #include <asm/proc-fns.h> | 36 | #include <asm/proc-fns.h> |
36 | 37 | ||
@@ -119,33 +120,33 @@ static inline void omap_soc_device_init(void) | |||
119 | #endif | 120 | #endif |
120 | 121 | ||
121 | #if defined(CONFIG_SOC_OMAP2420) || defined(CONFIG_SOC_OMAP2430) | 122 | #if defined(CONFIG_SOC_OMAP2420) || defined(CONFIG_SOC_OMAP2430) |
122 | void omap2xxx_restart(char mode, const char *cmd); | 123 | void omap2xxx_restart(enum reboot_mode mode, const char *cmd); |
123 | #else | 124 | #else |
124 | static inline void omap2xxx_restart(char mode, const char *cmd) | 125 | static inline void omap2xxx_restart(enum reboot_mode mode, const char *cmd) |
125 | { | 126 | { |
126 | } | 127 | } |
127 | #endif | 128 | #endif |
128 | 129 | ||
129 | #ifdef CONFIG_SOC_AM33XX | 130 | #ifdef CONFIG_SOC_AM33XX |
130 | void am33xx_restart(char mode, const char *cmd); | 131 | void am33xx_restart(enum reboot_mode mode, const char *cmd); |
131 | #else | 132 | #else |
132 | static inline void am33xx_restart(char mode, const char *cmd) | 133 | static inline void am33xx_restart(enum reboot_mode mode, const char *cmd) |
133 | { | 134 | { |
134 | } | 135 | } |
135 | #endif | 136 | #endif |
136 | 137 | ||
137 | #ifdef CONFIG_ARCH_OMAP3 | 138 | #ifdef CONFIG_ARCH_OMAP3 |
138 | void omap3xxx_restart(char mode, const char *cmd); | 139 | void omap3xxx_restart(enum reboot_mode mode, const char *cmd); |
139 | #else | 140 | #else |
140 | static inline void omap3xxx_restart(char mode, const char *cmd) | 141 | static inline void omap3xxx_restart(enum reboot_mode mode, const char *cmd) |
141 | { | 142 | { |
142 | } | 143 | } |
143 | #endif | 144 | #endif |
144 | 145 | ||
145 | #if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) | 146 | #if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) |
146 | void omap44xx_restart(char mode, const char *cmd); | 147 | void omap44xx_restart(enum reboot_mode mode, const char *cmd); |
147 | #else | 148 | #else |
148 | static inline void omap44xx_restart(char mode, const char *cmd) | 149 | static inline void omap44xx_restart(enum reboot_mode mode, const char *cmd) |
149 | { | 150 | { |
150 | } | 151 | } |
151 | #endif | 152 | #endif |
diff --git a/arch/arm/mach-omap2/omap2-restart.c b/arch/arm/mach-omap2/omap2-restart.c index 719b716a4494..68423e26399d 100644 --- a/arch/arm/mach-omap2/omap2-restart.c +++ b/arch/arm/mach-omap2/omap2-restart.c | |||
@@ -31,7 +31,7 @@ static struct clk *reset_virt_prcm_set_ck, *reset_sys_ck; | |||
31 | * Set the DPLL to bypass so that reboot completes successfully. No | 31 | * Set the DPLL to bypass so that reboot completes successfully. No |
32 | * return value. | 32 | * return value. |
33 | */ | 33 | */ |
34 | void omap2xxx_restart(char mode, const char *cmd) | 34 | void omap2xxx_restart(enum reboot_mode mode, const char *cmd) |
35 | { | 35 | { |
36 | u32 rate; | 36 | u32 rate; |
37 | 37 | ||
diff --git a/arch/arm/mach-omap2/omap3-restart.c b/arch/arm/mach-omap2/omap3-restart.c index 923c582189e5..5de2a0c2979d 100644 --- a/arch/arm/mach-omap2/omap3-restart.c +++ b/arch/arm/mach-omap2/omap3-restart.c | |||
@@ -12,6 +12,7 @@ | |||
12 | */ | 12 | */ |
13 | #include <linux/kernel.h> | 13 | #include <linux/kernel.h> |
14 | #include <linux/init.h> | 14 | #include <linux/init.h> |
15 | #include <linux/reboot.h> | ||
15 | 16 | ||
16 | #include "iomap.h" | 17 | #include "iomap.h" |
17 | #include "common.h" | 18 | #include "common.h" |
@@ -28,7 +29,7 @@ | |||
28 | * Resets the SoC. For @cmd, see the 'reboot' syscall in | 29 | * Resets the SoC. For @cmd, see the 'reboot' syscall in |
29 | * kernel/sys.c. No return value. | 30 | * kernel/sys.c. No return value. |
30 | */ | 31 | */ |
31 | void omap3xxx_restart(char mode, const char *cmd) | 32 | void omap3xxx_restart(enum reboot_mode mode, const char *cmd) |
32 | { | 33 | { |
33 | omap3_ctrl_write_boot_mode((cmd ? (u8)*cmd : 0)); | 34 | omap3_ctrl_write_boot_mode((cmd ? (u8)*cmd : 0)); |
34 | omap3xxx_prm_dpll3_reset(); /* never returns */ | 35 | omap3xxx_prm_dpll3_reset(); /* never returns */ |
diff --git a/arch/arm/mach-omap2/omap4-common.c b/arch/arm/mach-omap2/omap4-common.c index 38cd3a69cff3..57911430324e 100644 --- a/arch/arm/mach-omap2/omap4-common.c +++ b/arch/arm/mach-omap2/omap4-common.c | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <linux/export.h> | 23 | #include <linux/export.h> |
24 | #include <linux/irqchip/arm-gic.h> | 24 | #include <linux/irqchip/arm-gic.h> |
25 | #include <linux/of_address.h> | 25 | #include <linux/of_address.h> |
26 | #include <linux/reboot.h> | ||
26 | 27 | ||
27 | #include <asm/hardware/cache-l2x0.h> | 28 | #include <asm/hardware/cache-l2x0.h> |
28 | #include <asm/mach/map.h> | 29 | #include <asm/mach/map.h> |
diff --git a/arch/arm/mach-omap2/omap4-restart.c b/arch/arm/mach-omap2/omap4-restart.c index f90e02e11898..41dfd7da8170 100644 --- a/arch/arm/mach-omap2/omap4-restart.c +++ b/arch/arm/mach-omap2/omap4-restart.c | |||
@@ -8,6 +8,7 @@ | |||
8 | */ | 8 | */ |
9 | 9 | ||
10 | #include <linux/types.h> | 10 | #include <linux/types.h> |
11 | #include <linux/reboot.h> | ||
11 | #include "prminst44xx.h" | 12 | #include "prminst44xx.h" |
12 | 13 | ||
13 | /** | 14 | /** |
@@ -18,7 +19,7 @@ | |||
18 | * Resets the SoC. For @cmd, see the 'reboot' syscall in | 19 | * Resets the SoC. For @cmd, see the 'reboot' syscall in |
19 | * kernel/sys.c. No return value. | 20 | * kernel/sys.c. No return value. |
20 | */ | 21 | */ |
21 | void omap44xx_restart(char mode, const char *cmd) | 22 | void omap44xx_restart(enum reboot_mode mode, const char *cmd) |
22 | { | 23 | { |
23 | /* XXX Should save 'cmd' into scratchpad for use after reboot */ | 24 | /* XXX Should save 'cmd' into scratchpad for use after reboot */ |
24 | omap4_prminst_global_warm_sw_reset(); /* never returns */ | 25 | omap4_prminst_global_warm_sw_reset(); /* never returns */ |
diff --git a/arch/arm/mach-orion5x/common.c b/arch/arm/mach-orion5x/common.c index f8a6db9239bf..b41599f98a8e 100644 --- a/arch/arm/mach-orion5x/common.c +++ b/arch/arm/mach-orion5x/common.c | |||
@@ -347,7 +347,7 @@ void __init orion5x_init(void) | |||
347 | orion5x_wdt_init(); | 347 | orion5x_wdt_init(); |
348 | } | 348 | } |
349 | 349 | ||
350 | void orion5x_restart(char mode, const char *cmd) | 350 | void orion5x_restart(enum reboot_mode mode, const char *cmd) |
351 | { | 351 | { |
352 | /* | 352 | /* |
353 | * Enable and issue soft reset | 353 | * Enable and issue soft reset |
diff --git a/arch/arm/mach-orion5x/common.h b/arch/arm/mach-orion5x/common.h index cdaa01f3d186..a909afb384fb 100644 --- a/arch/arm/mach-orion5x/common.h +++ b/arch/arm/mach-orion5x/common.h | |||
@@ -1,6 +1,8 @@ | |||
1 | #ifndef __ARCH_ORION5X_COMMON_H | 1 | #ifndef __ARCH_ORION5X_COMMON_H |
2 | #define __ARCH_ORION5X_COMMON_H | 2 | #define __ARCH_ORION5X_COMMON_H |
3 | 3 | ||
4 | #include <linux/reboot.h> | ||
5 | |||
4 | struct dsa_platform_data; | 6 | struct dsa_platform_data; |
5 | struct mv643xx_eth_platform_data; | 7 | struct mv643xx_eth_platform_data; |
6 | struct mv_sata_platform_data; | 8 | struct mv_sata_platform_data; |
@@ -29,7 +31,7 @@ void orion5x_spi_init(void); | |||
29 | void orion5x_uart0_init(void); | 31 | void orion5x_uart0_init(void); |
30 | void orion5x_uart1_init(void); | 32 | void orion5x_uart1_init(void); |
31 | void orion5x_xor_init(void); | 33 | void orion5x_xor_init(void); |
32 | void orion5x_restart(char, const char *); | 34 | void orion5x_restart(enum reboot_mode, const char *); |
33 | 35 | ||
34 | /* | 36 | /* |
35 | * PCIe/PCI functions. | 37 | * PCIe/PCI functions. |
diff --git a/arch/arm/mach-orion5x/ls-chl-setup.c b/arch/arm/mach-orion5x/ls-chl-setup.c index 24f4e14e5893..6234977b5aea 100644 --- a/arch/arm/mach-orion5x/ls-chl-setup.c +++ b/arch/arm/mach-orion5x/ls-chl-setup.c | |||
@@ -139,7 +139,7 @@ static struct mv_sata_platform_data lschl_sata_data = { | |||
139 | 139 | ||
140 | static void lschl_power_off(void) | 140 | static void lschl_power_off(void) |
141 | { | 141 | { |
142 | orion5x_restart('h', NULL); | 142 | orion5x_restart(REBOOT_HARD, NULL); |
143 | } | 143 | } |
144 | 144 | ||
145 | /***************************************************************************** | 145 | /***************************************************************************** |
diff --git a/arch/arm/mach-orion5x/ls_hgl-setup.c b/arch/arm/mach-orion5x/ls_hgl-setup.c index fc653bb41e78..fe04c4b64569 100644 --- a/arch/arm/mach-orion5x/ls_hgl-setup.c +++ b/arch/arm/mach-orion5x/ls_hgl-setup.c | |||
@@ -185,7 +185,7 @@ static struct mv_sata_platform_data ls_hgl_sata_data = { | |||
185 | 185 | ||
186 | static void ls_hgl_power_off(void) | 186 | static void ls_hgl_power_off(void) |
187 | { | 187 | { |
188 | orion5x_restart('h', NULL); | 188 | orion5x_restart(REBOOT_HARD, NULL); |
189 | } | 189 | } |
190 | 190 | ||
191 | 191 | ||
diff --git a/arch/arm/mach-orion5x/lsmini-setup.c b/arch/arm/mach-orion5x/lsmini-setup.c index 18e66e617dc2..ca4dbe973daf 100644 --- a/arch/arm/mach-orion5x/lsmini-setup.c +++ b/arch/arm/mach-orion5x/lsmini-setup.c | |||
@@ -185,7 +185,7 @@ static struct mv_sata_platform_data lsmini_sata_data = { | |||
185 | 185 | ||
186 | static void lsmini_power_off(void) | 186 | static void lsmini_power_off(void) |
187 | { | 187 | { |
188 | orion5x_restart('h', NULL); | 188 | orion5x_restart(REBOOT_HARD, NULL); |
189 | } | 189 | } |
190 | 190 | ||
191 | 191 | ||
diff --git a/arch/arm/mach-picoxcell/common.c b/arch/arm/mach-picoxcell/common.c index b13f51bc35cf..ec79fea82704 100644 --- a/arch/arm/mach-picoxcell/common.c +++ b/arch/arm/mach-picoxcell/common.c | |||
@@ -11,6 +11,7 @@ | |||
11 | #include <linux/of.h> | 11 | #include <linux/of.h> |
12 | #include <linux/of_address.h> | 12 | #include <linux/of_address.h> |
13 | #include <linux/of_platform.h> | 13 | #include <linux/of_platform.h> |
14 | #include <linux/reboot.h> | ||
14 | 15 | ||
15 | #include <asm/mach/arch.h> | 16 | #include <asm/mach/arch.h> |
16 | #include <asm/mach/map.h> | 17 | #include <asm/mach/map.h> |
@@ -63,7 +64,7 @@ static const char *picoxcell_dt_match[] = { | |||
63 | NULL | 64 | NULL |
64 | }; | 65 | }; |
65 | 66 | ||
66 | static void picoxcell_wdt_restart(char mode, const char *cmd) | 67 | static void picoxcell_wdt_restart(enum reboot_mode mode, const char *cmd) |
67 | { | 68 | { |
68 | /* | 69 | /* |
69 | * Configure the watchdog to reset with the shortest possible timeout | 70 | * Configure the watchdog to reset with the shortest possible timeout |
diff --git a/arch/arm/mach-prima2/common.h b/arch/arm/mach-prima2/common.h index 81135cd88e54..a6304858474a 100644 --- a/arch/arm/mach-prima2/common.h +++ b/arch/arm/mach-prima2/common.h | |||
@@ -10,6 +10,8 @@ | |||
10 | #define __MACH_PRIMA2_COMMON_H__ | 10 | #define __MACH_PRIMA2_COMMON_H__ |
11 | 11 | ||
12 | #include <linux/init.h> | 12 | #include <linux/init.h> |
13 | #include <linux/reboot.h> | ||
14 | |||
13 | #include <asm/mach/time.h> | 15 | #include <asm/mach/time.h> |
14 | #include <asm/exception.h> | 16 | #include <asm/exception.h> |
15 | 17 | ||
@@ -22,7 +24,7 @@ extern void sirfsoc_cpu_die(unsigned int cpu); | |||
22 | 24 | ||
23 | extern void __init sirfsoc_of_irq_init(void); | 25 | extern void __init sirfsoc_of_irq_init(void); |
24 | extern void __init sirfsoc_of_clk_init(void); | 26 | extern void __init sirfsoc_of_clk_init(void); |
25 | extern void sirfsoc_restart(char, const char *); | 27 | extern void sirfsoc_restart(enum reboot_mode, const char *); |
26 | extern asmlinkage void __exception_irq_entry sirfsoc_handle_irq(struct pt_regs *regs); | 28 | extern asmlinkage void __exception_irq_entry sirfsoc_handle_irq(struct pt_regs *regs); |
27 | 29 | ||
28 | #ifndef CONFIG_DEBUG_LL | 30 | #ifndef CONFIG_DEBUG_LL |
diff --git a/arch/arm/mach-prima2/rstc.c b/arch/arm/mach-prima2/rstc.c index d5e0cbc934c0..ccb53391147a 100644 --- a/arch/arm/mach-prima2/rstc.c +++ b/arch/arm/mach-prima2/rstc.c | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <linux/device.h> | 13 | #include <linux/device.h> |
14 | #include <linux/of.h> | 14 | #include <linux/of.h> |
15 | #include <linux/of_address.h> | 15 | #include <linux/of_address.h> |
16 | #include <linux/reboot.h> | ||
16 | 17 | ||
17 | void __iomem *sirfsoc_rstc_base; | 18 | void __iomem *sirfsoc_rstc_base; |
18 | static DEFINE_MUTEX(rstc_lock); | 19 | static DEFINE_MUTEX(rstc_lock); |
@@ -84,7 +85,7 @@ int sirfsoc_reset_device(struct device *dev) | |||
84 | 85 | ||
85 | #define SIRFSOC_SYS_RST_BIT BIT(31) | 86 | #define SIRFSOC_SYS_RST_BIT BIT(31) |
86 | 87 | ||
87 | void sirfsoc_restart(char mode, const char *cmd) | 88 | void sirfsoc_restart(enum reboot_mode mode, const char *cmd) |
88 | { | 89 | { |
89 | writel(SIRFSOC_SYS_RST_BIT, sirfsoc_rstc_base); | 90 | writel(SIRFSOC_SYS_RST_BIT, sirfsoc_rstc_base); |
90 | } | 91 | } |
diff --git a/arch/arm/mach-pxa/corgi.c b/arch/arm/mach-pxa/corgi.c index a5b8fead7d61..f162f1b77cd2 100644 --- a/arch/arm/mach-pxa/corgi.c +++ b/arch/arm/mach-pxa/corgi.c | |||
@@ -663,16 +663,16 @@ static void corgi_poweroff(void) | |||
663 | /* Green LED off tells the bootloader to halt */ | 663 | /* Green LED off tells the bootloader to halt */ |
664 | gpio_set_value(CORGI_GPIO_LED_GREEN, 0); | 664 | gpio_set_value(CORGI_GPIO_LED_GREEN, 0); |
665 | 665 | ||
666 | pxa_restart('h', NULL); | 666 | pxa_restart(REBOOT_HARD, NULL); |
667 | } | 667 | } |
668 | 668 | ||
669 | static void corgi_restart(char mode, const char *cmd) | 669 | static void corgi_restart(enum reboot_mode mode, const char *cmd) |
670 | { | 670 | { |
671 | if (!machine_is_corgi()) | 671 | if (!machine_is_corgi()) |
672 | /* Green LED on tells the bootloader to reboot */ | 672 | /* Green LED on tells the bootloader to reboot */ |
673 | gpio_set_value(CORGI_GPIO_LED_GREEN, 1); | 673 | gpio_set_value(CORGI_GPIO_LED_GREEN, 1); |
674 | 674 | ||
675 | pxa_restart('h', cmd); | 675 | pxa_restart(REBOOT_HARD, cmd); |
676 | } | 676 | } |
677 | 677 | ||
678 | static void __init corgi_init(void) | 678 | static void __init corgi_init(void) |
diff --git a/arch/arm/mach-pxa/generic.h b/arch/arm/mach-pxa/generic.h index fd7ea39b78c0..8963984d1f43 100644 --- a/arch/arm/mach-pxa/generic.h +++ b/arch/arm/mach-pxa/generic.h | |||
@@ -9,6 +9,8 @@ | |||
9 | * published by the Free Software Foundation. | 9 | * published by the Free Software Foundation. |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/reboot.h> | ||
13 | |||
12 | struct irq_data; | 14 | struct irq_data; |
13 | 15 | ||
14 | extern void pxa_timer_init(void); | 16 | extern void pxa_timer_init(void); |
@@ -56,4 +58,4 @@ void __init pxa_set_btuart_info(void *info); | |||
56 | void __init pxa_set_stuart_info(void *info); | 58 | void __init pxa_set_stuart_info(void *info); |
57 | void __init pxa_set_hwuart_info(void *info); | 59 | void __init pxa_set_hwuart_info(void *info); |
58 | 60 | ||
59 | void pxa_restart(char, const char *); | 61 | void pxa_restart(enum reboot_mode, const char *); |
diff --git a/arch/arm/mach-pxa/mioa701.c b/arch/arm/mach-pxa/mioa701.c index 654b0ac84dea..acc9d3cc0762 100644 --- a/arch/arm/mach-pxa/mioa701.c +++ b/arch/arm/mach-pxa/mioa701.c | |||
@@ -37,6 +37,7 @@ | |||
37 | #include <linux/wm97xx.h> | 37 | #include <linux/wm97xx.h> |
38 | #include <linux/mtd/physmap.h> | 38 | #include <linux/mtd/physmap.h> |
39 | #include <linux/usb/gpio_vbus.h> | 39 | #include <linux/usb/gpio_vbus.h> |
40 | #include <linux/reboot.h> | ||
40 | #include <linux/regulator/max1586.h> | 41 | #include <linux/regulator/max1586.h> |
41 | #include <linux/slab.h> | 42 | #include <linux/slab.h> |
42 | #include <linux/i2c/pxa-i2c.h> | 43 | #include <linux/i2c/pxa-i2c.h> |
@@ -696,13 +697,13 @@ static void mioa701_machine_exit(void); | |||
696 | static void mioa701_poweroff(void) | 697 | static void mioa701_poweroff(void) |
697 | { | 698 | { |
698 | mioa701_machine_exit(); | 699 | mioa701_machine_exit(); |
699 | pxa_restart('s', NULL); | 700 | pxa_restart(REBOOT_SOFT, NULL); |
700 | } | 701 | } |
701 | 702 | ||
702 | static void mioa701_restart(char c, const char *cmd) | 703 | static void mioa701_restart(enum reboot_mode c, const char *cmd) |
703 | { | 704 | { |
704 | mioa701_machine_exit(); | 705 | mioa701_machine_exit(); |
705 | pxa_restart('s', cmd); | 706 | pxa_restart(REBOOT_SOFT, cmd); |
706 | } | 707 | } |
707 | 708 | ||
708 | static struct gpio global_gpios[] = { | 709 | static struct gpio global_gpios[] = { |
@@ -761,7 +762,6 @@ static void mioa701_machine_exit(void) | |||
761 | 762 | ||
762 | MACHINE_START(MIOA701, "MIO A701") | 763 | MACHINE_START(MIOA701, "MIO A701") |
763 | .atag_offset = 0x100, | 764 | .atag_offset = 0x100, |
764 | .restart_mode = 's', | ||
765 | .map_io = &pxa27x_map_io, | 765 | .map_io = &pxa27x_map_io, |
766 | .nr_irqs = PXA_NR_IRQS, | 766 | .nr_irqs = PXA_NR_IRQS, |
767 | .init_irq = &pxa27x_init_irq, | 767 | .init_irq = &pxa27x_init_irq, |
diff --git a/arch/arm/mach-pxa/poodle.c b/arch/arm/mach-pxa/poodle.c index 50ccd5f1d560..711d37e26bd8 100644 --- a/arch/arm/mach-pxa/poodle.c +++ b/arch/arm/mach-pxa/poodle.c | |||
@@ -422,7 +422,7 @@ static struct i2c_board_info __initdata poodle_i2c_devices[] = { | |||
422 | 422 | ||
423 | static void poodle_poweroff(void) | 423 | static void poodle_poweroff(void) |
424 | { | 424 | { |
425 | pxa_restart('h', NULL); | 425 | pxa_restart(REBOOT_HARD, NULL); |
426 | } | 426 | } |
427 | 427 | ||
428 | static void __init poodle_init(void) | 428 | static void __init poodle_init(void) |
diff --git a/arch/arm/mach-pxa/reset.c b/arch/arm/mach-pxa/reset.c index 3fab583755d4..0d5dd646f61f 100644 --- a/arch/arm/mach-pxa/reset.c +++ b/arch/arm/mach-pxa/reset.c | |||
@@ -83,7 +83,7 @@ static void do_hw_reset(void) | |||
83 | writel_relaxed(readl_relaxed(OSCR) + 368640, OSMR3); | 83 | writel_relaxed(readl_relaxed(OSCR) + 368640, OSMR3); |
84 | } | 84 | } |
85 | 85 | ||
86 | void pxa_restart(char mode, const char *cmd) | 86 | void pxa_restart(enum reboot_mode mode, const char *cmd) |
87 | { | 87 | { |
88 | local_irq_disable(); | 88 | local_irq_disable(); |
89 | local_fiq_disable(); | 89 | local_fiq_disable(); |
@@ -91,14 +91,14 @@ void pxa_restart(char mode, const char *cmd) | |||
91 | clear_reset_status(RESET_STATUS_ALL); | 91 | clear_reset_status(RESET_STATUS_ALL); |
92 | 92 | ||
93 | switch (mode) { | 93 | switch (mode) { |
94 | case 's': | 94 | case REBOOT_SOFT: |
95 | /* Jump into ROM at address 0 */ | 95 | /* Jump into ROM at address 0 */ |
96 | soft_restart(0); | 96 | soft_restart(0); |
97 | break; | 97 | break; |
98 | case 'g': | 98 | case REBOOT_GPIO: |
99 | do_gpio_reset(); | 99 | do_gpio_reset(); |
100 | break; | 100 | break; |
101 | case 'h': | 101 | case REBOOT_HARD: |
102 | default: | 102 | default: |
103 | do_hw_reset(); | 103 | do_hw_reset(); |
104 | break; | 104 | break; |
diff --git a/arch/arm/mach-pxa/spitz.c b/arch/arm/mach-pxa/spitz.c index 362726c49c70..2125df0444e7 100644 --- a/arch/arm/mach-pxa/spitz.c +++ b/arch/arm/mach-pxa/spitz.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include <linux/regulator/machine.h> | 31 | #include <linux/regulator/machine.h> |
32 | #include <linux/io.h> | 32 | #include <linux/io.h> |
33 | #include <linux/module.h> | 33 | #include <linux/module.h> |
34 | #include <linux/reboot.h> | ||
34 | 35 | ||
35 | #include <asm/setup.h> | 36 | #include <asm/setup.h> |
36 | #include <asm/mach-types.h> | 37 | #include <asm/mach-types.h> |
@@ -924,10 +925,10 @@ static inline void spitz_i2c_init(void) {} | |||
924 | ******************************************************************************/ | 925 | ******************************************************************************/ |
925 | static void spitz_poweroff(void) | 926 | static void spitz_poweroff(void) |
926 | { | 927 | { |
927 | pxa_restart('g', NULL); | 928 | pxa_restart(REBOOT_GPIO, NULL); |
928 | } | 929 | } |
929 | 930 | ||
930 | static void spitz_restart(char mode, const char *cmd) | 931 | static void spitz_restart(enum reboot_mode mode, const char *cmd) |
931 | { | 932 | { |
932 | uint32_t msc0 = __raw_readl(MSC0); | 933 | uint32_t msc0 = __raw_readl(MSC0); |
933 | /* Bootloader magic for a reboot */ | 934 | /* Bootloader magic for a reboot */ |
@@ -979,7 +980,6 @@ static void __init spitz_fixup(struct tag *tags, char **cmdline, | |||
979 | 980 | ||
980 | #ifdef CONFIG_MACH_SPITZ | 981 | #ifdef CONFIG_MACH_SPITZ |
981 | MACHINE_START(SPITZ, "SHARP Spitz") | 982 | MACHINE_START(SPITZ, "SHARP Spitz") |
982 | .restart_mode = 'g', | ||
983 | .fixup = spitz_fixup, | 983 | .fixup = spitz_fixup, |
984 | .map_io = pxa27x_map_io, | 984 | .map_io = pxa27x_map_io, |
985 | .nr_irqs = PXA_NR_IRQS, | 985 | .nr_irqs = PXA_NR_IRQS, |
@@ -993,7 +993,6 @@ MACHINE_END | |||
993 | 993 | ||
994 | #ifdef CONFIG_MACH_BORZOI | 994 | #ifdef CONFIG_MACH_BORZOI |
995 | MACHINE_START(BORZOI, "SHARP Borzoi") | 995 | MACHINE_START(BORZOI, "SHARP Borzoi") |
996 | .restart_mode = 'g', | ||
997 | .fixup = spitz_fixup, | 996 | .fixup = spitz_fixup, |
998 | .map_io = pxa27x_map_io, | 997 | .map_io = pxa27x_map_io, |
999 | .nr_irqs = PXA_NR_IRQS, | 998 | .nr_irqs = PXA_NR_IRQS, |
@@ -1007,7 +1006,6 @@ MACHINE_END | |||
1007 | 1006 | ||
1008 | #ifdef CONFIG_MACH_AKITA | 1007 | #ifdef CONFIG_MACH_AKITA |
1009 | MACHINE_START(AKITA, "SHARP Akita") | 1008 | MACHINE_START(AKITA, "SHARP Akita") |
1010 | .restart_mode = 'g', | ||
1011 | .fixup = spitz_fixup, | 1009 | .fixup = spitz_fixup, |
1012 | .map_io = pxa27x_map_io, | 1010 | .map_io = pxa27x_map_io, |
1013 | .nr_irqs = PXA_NR_IRQS, | 1011 | .nr_irqs = PXA_NR_IRQS, |
diff --git a/arch/arm/mach-pxa/tosa.c b/arch/arm/mach-pxa/tosa.c index 3d91d2e5bf3a..0206b915a6f6 100644 --- a/arch/arm/mach-pxa/tosa.c +++ b/arch/arm/mach-pxa/tosa.c | |||
@@ -36,6 +36,7 @@ | |||
36 | #include <linux/input/matrix_keypad.h> | 36 | #include <linux/input/matrix_keypad.h> |
37 | #include <linux/i2c/pxa-i2c.h> | 37 | #include <linux/i2c/pxa-i2c.h> |
38 | #include <linux/usb/gpio_vbus.h> | 38 | #include <linux/usb/gpio_vbus.h> |
39 | #include <linux/reboot.h> | ||
39 | 40 | ||
40 | #include <asm/setup.h> | 41 | #include <asm/setup.h> |
41 | #include <asm/mach-types.h> | 42 | #include <asm/mach-types.h> |
@@ -911,10 +912,10 @@ static struct platform_device *devices[] __initdata = { | |||
911 | 912 | ||
912 | static void tosa_poweroff(void) | 913 | static void tosa_poweroff(void) |
913 | { | 914 | { |
914 | pxa_restart('g', NULL); | 915 | pxa_restart(REBOOT_GPIO, NULL); |
915 | } | 916 | } |
916 | 917 | ||
917 | static void tosa_restart(char mode, const char *cmd) | 918 | static void tosa_restart(enum reboot_mode mode, const char *cmd) |
918 | { | 919 | { |
919 | uint32_t msc0 = __raw_readl(MSC0); | 920 | uint32_t msc0 = __raw_readl(MSC0); |
920 | 921 | ||
@@ -969,7 +970,6 @@ static void __init fixup_tosa(struct tag *tags, char **cmdline, | |||
969 | } | 970 | } |
970 | 971 | ||
971 | MACHINE_START(TOSA, "SHARP Tosa") | 972 | MACHINE_START(TOSA, "SHARP Tosa") |
972 | .restart_mode = 'g', | ||
973 | .fixup = fixup_tosa, | 973 | .fixup = fixup_tosa, |
974 | .map_io = pxa25x_map_io, | 974 | .map_io = pxa25x_map_io, |
975 | .nr_irqs = TOSA_NR_IRQS, | 975 | .nr_irqs = TOSA_NR_IRQS, |
diff --git a/arch/arm/mach-realview/realview_eb.c b/arch/arm/mach-realview/realview_eb.c index 5b1c8bfe6fa9..c85ddb2a0ad0 100644 --- a/arch/arm/mach-realview/realview_eb.c +++ b/arch/arm/mach-realview/realview_eb.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <linux/io.h> | 29 | #include <linux/io.h> |
30 | #include <linux/irqchip/arm-gic.h> | 30 | #include <linux/irqchip/arm-gic.h> |
31 | #include <linux/platform_data/clk-realview.h> | 31 | #include <linux/platform_data/clk-realview.h> |
32 | #include <linux/reboot.h> | ||
32 | 33 | ||
33 | #include <mach/hardware.h> | 34 | #include <mach/hardware.h> |
34 | #include <asm/irq.h> | 35 | #include <asm/irq.h> |
@@ -418,7 +419,7 @@ static void __init realview_eb_timer_init(void) | |||
418 | realview_eb_twd_init(); | 419 | realview_eb_twd_init(); |
419 | } | 420 | } |
420 | 421 | ||
421 | static void realview_eb_restart(char mode, const char *cmd) | 422 | static void realview_eb_restart(enum reboot_mode mode, const char *cmd) |
422 | { | 423 | { |
423 | void __iomem *reset_ctrl = __io_address(REALVIEW_SYS_RESETCTL); | 424 | void __iomem *reset_ctrl = __io_address(REALVIEW_SYS_RESETCTL); |
424 | void __iomem *lock_ctrl = __io_address(REALVIEW_SYS_LOCK); | 425 | void __iomem *lock_ctrl = __io_address(REALVIEW_SYS_LOCK); |
diff --git a/arch/arm/mach-realview/realview_pb1176.c b/arch/arm/mach-realview/realview_pb1176.c index d5e83a1f6982..c5eade76461b 100644 --- a/arch/arm/mach-realview/realview_pb1176.c +++ b/arch/arm/mach-realview/realview_pb1176.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include <linux/io.h> | 31 | #include <linux/io.h> |
32 | #include <linux/irqchip/arm-gic.h> | 32 | #include <linux/irqchip/arm-gic.h> |
33 | #include <linux/platform_data/clk-realview.h> | 33 | #include <linux/platform_data/clk-realview.h> |
34 | #include <linux/reboot.h> | ||
34 | 35 | ||
35 | #include <mach/hardware.h> | 36 | #include <mach/hardware.h> |
36 | #include <asm/irq.h> | 37 | #include <asm/irq.h> |
@@ -329,7 +330,7 @@ static void __init realview_pb1176_timer_init(void) | |||
329 | realview_timer_init(IRQ_DC1176_TIMER0); | 330 | realview_timer_init(IRQ_DC1176_TIMER0); |
330 | } | 331 | } |
331 | 332 | ||
332 | static void realview_pb1176_restart(char mode, const char *cmd) | 333 | static void realview_pb1176_restart(enum reboot_mode mode, const char *cmd) |
333 | { | 334 | { |
334 | void __iomem *reset_ctrl = __io_address(REALVIEW_SYS_RESETCTL); | 335 | void __iomem *reset_ctrl = __io_address(REALVIEW_SYS_RESETCTL); |
335 | void __iomem *lock_ctrl = __io_address(REALVIEW_SYS_LOCK); | 336 | void __iomem *lock_ctrl = __io_address(REALVIEW_SYS_LOCK); |
diff --git a/arch/arm/mach-realview/realview_pb11mp.c b/arch/arm/mach-realview/realview_pb11mp.c index c3cfe213b5e6..f4b0962578fe 100644 --- a/arch/arm/mach-realview/realview_pb11mp.c +++ b/arch/arm/mach-realview/realview_pb11mp.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <linux/io.h> | 29 | #include <linux/io.h> |
30 | #include <linux/irqchip/arm-gic.h> | 30 | #include <linux/irqchip/arm-gic.h> |
31 | #include <linux/platform_data/clk-realview.h> | 31 | #include <linux/platform_data/clk-realview.h> |
32 | #include <linux/reboot.h> | ||
32 | 33 | ||
33 | #include <mach/hardware.h> | 34 | #include <mach/hardware.h> |
34 | #include <asm/irq.h> | 35 | #include <asm/irq.h> |
@@ -316,7 +317,7 @@ static void __init realview_pb11mp_timer_init(void) | |||
316 | realview_pb11mp_twd_init(); | 317 | realview_pb11mp_twd_init(); |
317 | } | 318 | } |
318 | 319 | ||
319 | static void realview_pb11mp_restart(char mode, const char *cmd) | 320 | static void realview_pb11mp_restart(enum reboot_mode mode, const char *cmd) |
320 | { | 321 | { |
321 | void __iomem *reset_ctrl = __io_address(REALVIEW_SYS_RESETCTL); | 322 | void __iomem *reset_ctrl = __io_address(REALVIEW_SYS_RESETCTL); |
322 | void __iomem *lock_ctrl = __io_address(REALVIEW_SYS_LOCK); | 323 | void __iomem *lock_ctrl = __io_address(REALVIEW_SYS_LOCK); |
diff --git a/arch/arm/mach-realview/realview_pba8.c b/arch/arm/mach-realview/realview_pba8.c index dde652a59620..10a3e1d76891 100644 --- a/arch/arm/mach-realview/realview_pba8.c +++ b/arch/arm/mach-realview/realview_pba8.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <linux/io.h> | 29 | #include <linux/io.h> |
30 | #include <linux/irqchip/arm-gic.h> | 30 | #include <linux/irqchip/arm-gic.h> |
31 | #include <linux/platform_data/clk-realview.h> | 31 | #include <linux/platform_data/clk-realview.h> |
32 | #include <linux/reboot.h> | ||
32 | 33 | ||
33 | #include <asm/irq.h> | 34 | #include <asm/irq.h> |
34 | #include <asm/mach-types.h> | 35 | #include <asm/mach-types.h> |
@@ -264,7 +265,7 @@ static void __init realview_pba8_timer_init(void) | |||
264 | realview_timer_init(IRQ_PBA8_TIMER0_1); | 265 | realview_timer_init(IRQ_PBA8_TIMER0_1); |
265 | } | 266 | } |
266 | 267 | ||
267 | static void realview_pba8_restart(char mode, const char *cmd) | 268 | static void realview_pba8_restart(enum reboot_mode mode, const char *cmd) |
268 | { | 269 | { |
269 | void __iomem *reset_ctrl = __io_address(REALVIEW_SYS_RESETCTL); | 270 | void __iomem *reset_ctrl = __io_address(REALVIEW_SYS_RESETCTL); |
270 | void __iomem *lock_ctrl = __io_address(REALVIEW_SYS_LOCK); | 271 | void __iomem *lock_ctrl = __io_address(REALVIEW_SYS_LOCK); |
diff --git a/arch/arm/mach-realview/realview_pbx.c b/arch/arm/mach-realview/realview_pbx.c index 54f0185b01e3..9d75493e3f0c 100644 --- a/arch/arm/mach-realview/realview_pbx.c +++ b/arch/arm/mach-realview/realview_pbx.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <linux/io.h> | 28 | #include <linux/io.h> |
29 | #include <linux/irqchip/arm-gic.h> | 29 | #include <linux/irqchip/arm-gic.h> |
30 | #include <linux/platform_data/clk-realview.h> | 30 | #include <linux/platform_data/clk-realview.h> |
31 | #include <linux/reboot.h> | ||
31 | 32 | ||
32 | #include <asm/irq.h> | 33 | #include <asm/irq.h> |
33 | #include <asm/mach-types.h> | 34 | #include <asm/mach-types.h> |
@@ -344,7 +345,7 @@ static void realview_pbx_fixup(struct tag *tags, char **from, | |||
344 | #endif | 345 | #endif |
345 | } | 346 | } |
346 | 347 | ||
347 | static void realview_pbx_restart(char mode, const char *cmd) | 348 | static void realview_pbx_restart(enum reboot_mode mode, const char *cmd) |
348 | { | 349 | { |
349 | void __iomem *reset_ctrl = __io_address(REALVIEW_SYS_RESETCTL); | 350 | void __iomem *reset_ctrl = __io_address(REALVIEW_SYS_RESETCTL); |
350 | void __iomem *lock_ctrl = __io_address(REALVIEW_SYS_LOCK); | 351 | void __iomem *lock_ctrl = __io_address(REALVIEW_SYS_LOCK); |
diff --git a/arch/arm/mach-rpc/riscpc.c b/arch/arm/mach-rpc/riscpc.c index a302cf5e0fc7..09d602b10d57 100644 --- a/arch/arm/mach-rpc/riscpc.c +++ b/arch/arm/mach-rpc/riscpc.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/ata_platform.h> | 20 | #include <linux/ata_platform.h> |
21 | #include <linux/io.h> | 21 | #include <linux/io.h> |
22 | #include <linux/i2c.h> | 22 | #include <linux/i2c.h> |
23 | #include <linux/reboot.h> | ||
23 | 24 | ||
24 | #include <asm/elf.h> | 25 | #include <asm/elf.h> |
25 | #include <asm/mach-types.h> | 26 | #include <asm/mach-types.h> |
@@ -201,7 +202,7 @@ static int __init rpc_init(void) | |||
201 | 202 | ||
202 | arch_initcall(rpc_init); | 203 | arch_initcall(rpc_init); |
203 | 204 | ||
204 | static void rpc_restart(char mode, const char *cmd) | 205 | static void rpc_restart(enum reboot_mode mode, const char *cmd) |
205 | { | 206 | { |
206 | iomd_writeb(0, IOMD_ROMCR0); | 207 | iomd_writeb(0, IOMD_ROMCR0); |
207 | 208 | ||
diff --git a/arch/arm/mach-s3c24xx/common.h b/arch/arm/mach-s3c24xx/common.h index 307c3714be55..84b280654f4c 100644 --- a/arch/arm/mach-s3c24xx/common.h +++ b/arch/arm/mach-s3c24xx/common.h | |||
@@ -12,6 +12,8 @@ | |||
12 | #ifndef __ARCH_ARM_MACH_S3C24XX_COMMON_H | 12 | #ifndef __ARCH_ARM_MACH_S3C24XX_COMMON_H |
13 | #define __ARCH_ARM_MACH_S3C24XX_COMMON_H __FILE__ | 13 | #define __ARCH_ARM_MACH_S3C24XX_COMMON_H __FILE__ |
14 | 14 | ||
15 | #include <linux/reboot.h> | ||
16 | |||
15 | struct s3c2410_uartcfg; | 17 | struct s3c2410_uartcfg; |
16 | 18 | ||
17 | #ifdef CONFIG_CPU_S3C2410 | 19 | #ifdef CONFIG_CPU_S3C2410 |
@@ -20,7 +22,7 @@ extern int s3c2410a_init(void); | |||
20 | extern void s3c2410_map_io(void); | 22 | extern void s3c2410_map_io(void); |
21 | extern void s3c2410_init_uarts(struct s3c2410_uartcfg *cfg, int no); | 23 | extern void s3c2410_init_uarts(struct s3c2410_uartcfg *cfg, int no); |
22 | extern void s3c2410_init_clocks(int xtal); | 24 | extern void s3c2410_init_clocks(int xtal); |
23 | extern void s3c2410_restart(char mode, const char *cmd); | 25 | extern void s3c2410_restart(enum reboot_mode mode, const char *cmd); |
24 | extern void s3c2410_init_irq(void); | 26 | extern void s3c2410_init_irq(void); |
25 | #else | 27 | #else |
26 | #define s3c2410_init_clocks NULL | 28 | #define s3c2410_init_clocks NULL |
@@ -36,7 +38,7 @@ extern void s3c2412_map_io(void); | |||
36 | extern void s3c2412_init_uarts(struct s3c2410_uartcfg *cfg, int no); | 38 | extern void s3c2412_init_uarts(struct s3c2410_uartcfg *cfg, int no); |
37 | extern void s3c2412_init_clocks(int xtal); | 39 | extern void s3c2412_init_clocks(int xtal); |
38 | extern int s3c2412_baseclk_add(void); | 40 | extern int s3c2412_baseclk_add(void); |
39 | extern void s3c2412_restart(char mode, const char *cmd); | 41 | extern void s3c2412_restart(enum reboot_mode mode, const char *cmd); |
40 | extern void s3c2412_init_irq(void); | 42 | extern void s3c2412_init_irq(void); |
41 | #else | 43 | #else |
42 | #define s3c2412_init_clocks NULL | 44 | #define s3c2412_init_clocks NULL |
@@ -51,7 +53,7 @@ extern void s3c2416_map_io(void); | |||
51 | extern void s3c2416_init_uarts(struct s3c2410_uartcfg *cfg, int no); | 53 | extern void s3c2416_init_uarts(struct s3c2410_uartcfg *cfg, int no); |
52 | extern void s3c2416_init_clocks(int xtal); | 54 | extern void s3c2416_init_clocks(int xtal); |
53 | extern int s3c2416_baseclk_add(void); | 55 | extern int s3c2416_baseclk_add(void); |
54 | extern void s3c2416_restart(char mode, const char *cmd); | 56 | extern void s3c2416_restart(enum reboot_mode mode, const char *cmd); |
55 | extern void s3c2416_init_irq(void); | 57 | extern void s3c2416_init_irq(void); |
56 | 58 | ||
57 | extern struct syscore_ops s3c2416_irq_syscore_ops; | 59 | extern struct syscore_ops s3c2416_irq_syscore_ops; |
@@ -66,7 +68,7 @@ extern struct syscore_ops s3c2416_irq_syscore_ops; | |||
66 | extern void s3c244x_map_io(void); | 68 | extern void s3c244x_map_io(void); |
67 | extern void s3c244x_init_uarts(struct s3c2410_uartcfg *cfg, int no); | 69 | extern void s3c244x_init_uarts(struct s3c2410_uartcfg *cfg, int no); |
68 | extern void s3c244x_init_clocks(int xtal); | 70 | extern void s3c244x_init_clocks(int xtal); |
69 | extern void s3c244x_restart(char mode, const char *cmd); | 71 | extern void s3c244x_restart(enum reboot_mode mode, const char *cmd); |
70 | #else | 72 | #else |
71 | #define s3c244x_init_clocks NULL | 73 | #define s3c244x_init_clocks NULL |
72 | #define s3c244x_init_uarts NULL | 74 | #define s3c244x_init_uarts NULL |
@@ -96,7 +98,7 @@ extern void s3c2443_map_io(void); | |||
96 | extern void s3c2443_init_uarts(struct s3c2410_uartcfg *cfg, int no); | 98 | extern void s3c2443_init_uarts(struct s3c2410_uartcfg *cfg, int no); |
97 | extern void s3c2443_init_clocks(int xtal); | 99 | extern void s3c2443_init_clocks(int xtal); |
98 | extern int s3c2443_baseclk_add(void); | 100 | extern int s3c2443_baseclk_add(void); |
99 | extern void s3c2443_restart(char mode, const char *cmd); | 101 | extern void s3c2443_restart(enum reboot_mode mode, const char *cmd); |
100 | extern void s3c2443_init_irq(void); | 102 | extern void s3c2443_init_irq(void); |
101 | #else | 103 | #else |
102 | #define s3c2443_init_clocks NULL | 104 | #define s3c2443_init_clocks NULL |
diff --git a/arch/arm/mach-s3c24xx/s3c2410.c b/arch/arm/mach-s3c24xx/s3c2410.c index ff384acc65b2..34676d1d5fec 100644 --- a/arch/arm/mach-s3c24xx/s3c2410.c +++ b/arch/arm/mach-s3c24xx/s3c2410.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <linux/syscore_ops.h> | 22 | #include <linux/syscore_ops.h> |
23 | #include <linux/serial_core.h> | 23 | #include <linux/serial_core.h> |
24 | #include <linux/platform_device.h> | 24 | #include <linux/platform_device.h> |
25 | #include <linux/reboot.h> | ||
25 | #include <linux/io.h> | 26 | #include <linux/io.h> |
26 | 27 | ||
27 | #include <asm/mach/arch.h> | 28 | #include <asm/mach/arch.h> |
@@ -196,9 +197,9 @@ int __init s3c2410a_init(void) | |||
196 | return s3c2410_init(); | 197 | return s3c2410_init(); |
197 | } | 198 | } |
198 | 199 | ||
199 | void s3c2410_restart(char mode, const char *cmd) | 200 | void s3c2410_restart(enum reboot_mode mode, const char *cmd) |
200 | { | 201 | { |
201 | if (mode == 's') { | 202 | if (mode == REBOOT_SOFT) { |
202 | soft_restart(0); | 203 | soft_restart(0); |
203 | } | 204 | } |
204 | 205 | ||
diff --git a/arch/arm/mach-s3c24xx/s3c2412.c b/arch/arm/mach-s3c24xx/s3c2412.c index 0f864d4c97de..0251650cbf80 100644 --- a/arch/arm/mach-s3c24xx/s3c2412.c +++ b/arch/arm/mach-s3c24xx/s3c2412.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <linux/serial_core.h> | 22 | #include <linux/serial_core.h> |
23 | #include <linux/platform_device.h> | 23 | #include <linux/platform_device.h> |
24 | #include <linux/io.h> | 24 | #include <linux/io.h> |
25 | #include <linux/reboot.h> | ||
25 | 26 | ||
26 | #include <asm/mach/arch.h> | 27 | #include <asm/mach/arch.h> |
27 | #include <asm/mach/map.h> | 28 | #include <asm/mach/map.h> |
@@ -129,9 +130,9 @@ static void s3c2412_idle(void) | |||
129 | cpu_do_idle(); | 130 | cpu_do_idle(); |
130 | } | 131 | } |
131 | 132 | ||
132 | void s3c2412_restart(char mode, const char *cmd) | 133 | void s3c2412_restart(enum reboot_mode mode, const char *cmd) |
133 | { | 134 | { |
134 | if (mode == 's') | 135 | if (mode == REBOOT_SOFT) |
135 | soft_restart(0); | 136 | soft_restart(0); |
136 | 137 | ||
137 | /* errata "Watch-dog/Software Reset Problem" specifies that | 138 | /* errata "Watch-dog/Software Reset Problem" specifies that |
diff --git a/arch/arm/mach-s3c24xx/s3c2416.c b/arch/arm/mach-s3c24xx/s3c2416.c index b9c5d382dafb..9ef3ccfbe196 100644 --- a/arch/arm/mach-s3c24xx/s3c2416.c +++ b/arch/arm/mach-s3c24xx/s3c2416.c | |||
@@ -35,6 +35,7 @@ | |||
35 | #include <linux/syscore_ops.h> | 35 | #include <linux/syscore_ops.h> |
36 | #include <linux/clk.h> | 36 | #include <linux/clk.h> |
37 | #include <linux/io.h> | 37 | #include <linux/io.h> |
38 | #include <linux/reboot.h> | ||
38 | 39 | ||
39 | #include <asm/mach/arch.h> | 40 | #include <asm/mach/arch.h> |
40 | #include <asm/mach/map.h> | 41 | #include <asm/mach/map.h> |
@@ -79,9 +80,9 @@ static struct device s3c2416_dev = { | |||
79 | .bus = &s3c2416_subsys, | 80 | .bus = &s3c2416_subsys, |
80 | }; | 81 | }; |
81 | 82 | ||
82 | void s3c2416_restart(char mode, const char *cmd) | 83 | void s3c2416_restart(enum reboot_mode mode, const char *cmd) |
83 | { | 84 | { |
84 | if (mode == 's') | 85 | if (mode == REBOOT_SOFT) |
85 | soft_restart(0); | 86 | soft_restart(0); |
86 | 87 | ||
87 | __raw_writel(S3C2443_SWRST_RESET, S3C2443_SWRST); | 88 | __raw_writel(S3C2443_SWRST_RESET, S3C2443_SWRST); |
diff --git a/arch/arm/mach-s3c24xx/s3c2443.c b/arch/arm/mach-s3c24xx/s3c2443.c index 8328cd65bf3d..b6c71918b25c 100644 --- a/arch/arm/mach-s3c24xx/s3c2443.c +++ b/arch/arm/mach-s3c24xx/s3c2443.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <linux/device.h> | 22 | #include <linux/device.h> |
23 | #include <linux/clk.h> | 23 | #include <linux/clk.h> |
24 | #include <linux/io.h> | 24 | #include <linux/io.h> |
25 | #include <linux/reboot.h> | ||
25 | 26 | ||
26 | #include <asm/mach/arch.h> | 27 | #include <asm/mach/arch.h> |
27 | #include <asm/mach/map.h> | 28 | #include <asm/mach/map.h> |
@@ -59,9 +60,9 @@ static struct device s3c2443_dev = { | |||
59 | .bus = &s3c2443_subsys, | 60 | .bus = &s3c2443_subsys, |
60 | }; | 61 | }; |
61 | 62 | ||
62 | void s3c2443_restart(char mode, const char *cmd) | 63 | void s3c2443_restart(enum reboot_mode mode, const char *cmd) |
63 | { | 64 | { |
64 | if (mode == 's') | 65 | if (mode == REBOOT_SOFT) |
65 | soft_restart(0); | 66 | soft_restart(0); |
66 | 67 | ||
67 | __raw_writel(S3C2443_SWRST_RESET, S3C2443_SWRST); | 68 | __raw_writel(S3C2443_SWRST_RESET, S3C2443_SWRST); |
diff --git a/arch/arm/mach-s3c24xx/s3c244x.c b/arch/arm/mach-s3c24xx/s3c244x.c index d0423e2544c1..911b555029fc 100644 --- a/arch/arm/mach-s3c24xx/s3c244x.c +++ b/arch/arm/mach-s3c24xx/s3c244x.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <linux/init.h> | 18 | #include <linux/init.h> |
19 | #include <linux/serial_core.h> | 19 | #include <linux/serial_core.h> |
20 | #include <linux/platform_device.h> | 20 | #include <linux/platform_device.h> |
21 | #include <linux/reboot.h> | ||
21 | #include <linux/device.h> | 22 | #include <linux/device.h> |
22 | #include <linux/syscore_ops.h> | 23 | #include <linux/syscore_ops.h> |
23 | #include <linux/clk.h> | 24 | #include <linux/clk.h> |
@@ -198,9 +199,9 @@ struct syscore_ops s3c244x_pm_syscore_ops = { | |||
198 | .resume = s3c244x_resume, | 199 | .resume = s3c244x_resume, |
199 | }; | 200 | }; |
200 | 201 | ||
201 | void s3c244x_restart(char mode, const char *cmd) | 202 | void s3c244x_restart(enum reboot_mode mode, const char *cmd) |
202 | { | 203 | { |
203 | if (mode == 's') | 204 | if (mode == REBOOT_SOFT) |
204 | soft_restart(0); | 205 | soft_restart(0); |
205 | 206 | ||
206 | samsung_wdt_reset(); | 207 | samsung_wdt_reset(); |
diff --git a/arch/arm/mach-s3c64xx/common.c b/arch/arm/mach-s3c64xx/common.c index 1aed6f4be1ce..3f62e467b129 100644 --- a/arch/arm/mach-s3c64xx/common.c +++ b/arch/arm/mach-s3c64xx/common.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/ioport.h> | 21 | #include <linux/ioport.h> |
22 | #include <linux/serial_core.h> | 22 | #include <linux/serial_core.h> |
23 | #include <linux/platform_device.h> | 23 | #include <linux/platform_device.h> |
24 | #include <linux/reboot.h> | ||
24 | #include <linux/io.h> | 25 | #include <linux/io.h> |
25 | #include <linux/dma-mapping.h> | 26 | #include <linux/dma-mapping.h> |
26 | #include <linux/irq.h> | 27 | #include <linux/irq.h> |
@@ -381,9 +382,9 @@ static int __init s3c64xx_init_irq_eint(void) | |||
381 | } | 382 | } |
382 | arch_initcall(s3c64xx_init_irq_eint); | 383 | arch_initcall(s3c64xx_init_irq_eint); |
383 | 384 | ||
384 | void s3c64xx_restart(char mode, const char *cmd) | 385 | void s3c64xx_restart(enum reboot_mode mode, const char *cmd) |
385 | { | 386 | { |
386 | if (mode != 's') | 387 | if (mode != REBOOT_SOFT) |
387 | samsung_wdt_reset(); | 388 | samsung_wdt_reset(); |
388 | 389 | ||
389 | /* if all else fails, or mode was for soft, jump to 0 */ | 390 | /* if all else fails, or mode was for soft, jump to 0 */ |
diff --git a/arch/arm/mach-s3c64xx/common.h b/arch/arm/mach-s3c64xx/common.h index 6cfc99bdfb37..e8f990b37665 100644 --- a/arch/arm/mach-s3c64xx/common.h +++ b/arch/arm/mach-s3c64xx/common.h | |||
@@ -17,13 +17,15 @@ | |||
17 | #ifndef __ARCH_ARM_MACH_S3C64XX_COMMON_H | 17 | #ifndef __ARCH_ARM_MACH_S3C64XX_COMMON_H |
18 | #define __ARCH_ARM_MACH_S3C64XX_COMMON_H | 18 | #define __ARCH_ARM_MACH_S3C64XX_COMMON_H |
19 | 19 | ||
20 | #include <linux/reboot.h> | ||
21 | |||
20 | void s3c64xx_init_irq(u32 vic0, u32 vic1); | 22 | void s3c64xx_init_irq(u32 vic0, u32 vic1); |
21 | void s3c64xx_init_io(struct map_desc *mach_desc, int size); | 23 | void s3c64xx_init_io(struct map_desc *mach_desc, int size); |
22 | 24 | ||
23 | void s3c64xx_register_clocks(unsigned long xtal, unsigned armclk_limit); | 25 | void s3c64xx_register_clocks(unsigned long xtal, unsigned armclk_limit); |
24 | void s3c64xx_setup_clocks(void); | 26 | void s3c64xx_setup_clocks(void); |
25 | 27 | ||
26 | void s3c64xx_restart(char mode, const char *cmd); | 28 | void s3c64xx_restart(enum reboot_mode mode, const char *cmd); |
27 | void s3c64xx_init_late(void); | 29 | void s3c64xx_init_late(void); |
28 | 30 | ||
29 | #ifdef CONFIG_CPU_S3C6400 | 31 | #ifdef CONFIG_CPU_S3C6400 |
diff --git a/arch/arm/mach-s5p64x0/common.c b/arch/arm/mach-s5p64x0/common.c index 76d0053bf564..dfdfdc320ce7 100644 --- a/arch/arm/mach-s5p64x0/common.c +++ b/arch/arm/mach-s5p64x0/common.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/dma-mapping.h> | 24 | #include <linux/dma-mapping.h> |
25 | #include <linux/gpio.h> | 25 | #include <linux/gpio.h> |
26 | #include <linux/irq.h> | 26 | #include <linux/irq.h> |
27 | #include <linux/reboot.h> | ||
27 | 28 | ||
28 | #include <asm/irq.h> | 29 | #include <asm/irq.h> |
29 | #include <asm/proc-fns.h> | 30 | #include <asm/proc-fns.h> |
@@ -439,9 +440,9 @@ static int __init s5p64x0_init_irq_eint(void) | |||
439 | } | 440 | } |
440 | arch_initcall(s5p64x0_init_irq_eint); | 441 | arch_initcall(s5p64x0_init_irq_eint); |
441 | 442 | ||
442 | void s5p64x0_restart(char mode, const char *cmd) | 443 | void s5p64x0_restart(enum reboot_mode mode, const char *cmd) |
443 | { | 444 | { |
444 | if (mode != 's') | 445 | if (mode != REBOOT_SOFT) |
445 | samsung_wdt_reset(); | 446 | samsung_wdt_reset(); |
446 | 447 | ||
447 | soft_restart(0); | 448 | soft_restart(0); |
diff --git a/arch/arm/mach-s5p64x0/common.h b/arch/arm/mach-s5p64x0/common.h index f8a60fdc5884..f3a9b43cba4a 100644 --- a/arch/arm/mach-s5p64x0/common.h +++ b/arch/arm/mach-s5p64x0/common.h | |||
@@ -12,6 +12,8 @@ | |||
12 | #ifndef __ARCH_ARM_MACH_S5P64X0_COMMON_H | 12 | #ifndef __ARCH_ARM_MACH_S5P64X0_COMMON_H |
13 | #define __ARCH_ARM_MACH_S5P64X0_COMMON_H | 13 | #define __ARCH_ARM_MACH_S5P64X0_COMMON_H |
14 | 14 | ||
15 | #include <linux/reboot.h> | ||
16 | |||
15 | void s5p6440_init_irq(void); | 17 | void s5p6440_init_irq(void); |
16 | void s5p6450_init_irq(void); | 18 | void s5p6450_init_irq(void); |
17 | void s5p64x0_init_io(struct map_desc *mach_desc, int size); | 19 | void s5p64x0_init_io(struct map_desc *mach_desc, int size); |
@@ -22,7 +24,7 @@ void s5p6440_setup_clocks(void); | |||
22 | void s5p6450_register_clocks(void); | 24 | void s5p6450_register_clocks(void); |
23 | void s5p6450_setup_clocks(void); | 25 | void s5p6450_setup_clocks(void); |
24 | 26 | ||
25 | void s5p64x0_restart(char mode, const char *cmd); | 27 | void s5p64x0_restart(enum reboot_mode mode, const char *cmd); |
26 | 28 | ||
27 | #ifdef CONFIG_CPU_S5P6440 | 29 | #ifdef CONFIG_CPU_S5P6440 |
28 | 30 | ||
diff --git a/arch/arm/mach-s5pc100/common.c b/arch/arm/mach-s5pc100/common.c index 511031564d35..4bdfecf6d024 100644 --- a/arch/arm/mach-s5pc100/common.c +++ b/arch/arm/mach-s5pc100/common.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/serial_core.h> | 24 | #include <linux/serial_core.h> |
25 | #include <linux/platform_device.h> | 25 | #include <linux/platform_device.h> |
26 | #include <linux/sched.h> | 26 | #include <linux/sched.h> |
27 | #include <linux/reboot.h> | ||
27 | 28 | ||
28 | #include <asm/irq.h> | 29 | #include <asm/irq.h> |
29 | #include <asm/proc-fns.h> | 30 | #include <asm/proc-fns.h> |
@@ -217,9 +218,9 @@ void __init s5pc100_init_uarts(struct s3c2410_uartcfg *cfg, int no) | |||
217 | s3c24xx_init_uartdevs("s3c6400-uart", s5p_uart_resources, cfg, no); | 218 | s3c24xx_init_uartdevs("s3c6400-uart", s5p_uart_resources, cfg, no); |
218 | } | 219 | } |
219 | 220 | ||
220 | void s5pc100_restart(char mode, const char *cmd) | 221 | void s5pc100_restart(enum reboot_mode mode, const char *cmd) |
221 | { | 222 | { |
222 | if (mode != 's') | 223 | if (mode != REBOOT_SOFT) |
223 | samsung_wdt_reset(); | 224 | samsung_wdt_reset(); |
224 | 225 | ||
225 | soft_restart(0); | 226 | soft_restart(0); |
diff --git a/arch/arm/mach-s5pc100/common.h b/arch/arm/mach-s5pc100/common.h index c41f912e9e1f..08d782d65d7b 100644 --- a/arch/arm/mach-s5pc100/common.h +++ b/arch/arm/mach-s5pc100/common.h | |||
@@ -12,13 +12,15 @@ | |||
12 | #ifndef __ARCH_ARM_MACH_S5PC100_COMMON_H | 12 | #ifndef __ARCH_ARM_MACH_S5PC100_COMMON_H |
13 | #define __ARCH_ARM_MACH_S5PC100_COMMON_H | 13 | #define __ARCH_ARM_MACH_S5PC100_COMMON_H |
14 | 14 | ||
15 | #include <linux/reboot.h> | ||
16 | |||
15 | void s5pc100_init_io(struct map_desc *mach_desc, int size); | 17 | void s5pc100_init_io(struct map_desc *mach_desc, int size); |
16 | void s5pc100_init_irq(void); | 18 | void s5pc100_init_irq(void); |
17 | 19 | ||
18 | void s5pc100_register_clocks(void); | 20 | void s5pc100_register_clocks(void); |
19 | void s5pc100_setup_clocks(void); | 21 | void s5pc100_setup_clocks(void); |
20 | 22 | ||
21 | void s5pc100_restart(char mode, const char *cmd); | 23 | void s5pc100_restart(enum reboot_mode mode, const char *cmd); |
22 | 24 | ||
23 | extern int s5pc100_init(void); | 25 | extern int s5pc100_init(void); |
24 | extern void s5pc100_map_io(void); | 26 | extern void s5pc100_map_io(void); |
diff --git a/arch/arm/mach-s5pv210/common.c b/arch/arm/mach-s5pv210/common.c index 9dfe93e2624d..023f1a796a9c 100644 --- a/arch/arm/mach-s5pv210/common.c +++ b/arch/arm/mach-s5pv210/common.c | |||
@@ -143,7 +143,7 @@ static struct map_desc s5pv210_iodesc[] __initdata = { | |||
143 | } | 143 | } |
144 | }; | 144 | }; |
145 | 145 | ||
146 | void s5pv210_restart(char mode, const char *cmd) | 146 | void s5pv210_restart(enum reboot_mode mode, const char *cmd) |
147 | { | 147 | { |
148 | __raw_writel(0x1, S5P_SWRESET); | 148 | __raw_writel(0x1, S5P_SWRESET); |
149 | } | 149 | } |
diff --git a/arch/arm/mach-s5pv210/common.h b/arch/arm/mach-s5pv210/common.h index 0a1cc0aef720..fe1beb54e548 100644 --- a/arch/arm/mach-s5pv210/common.h +++ b/arch/arm/mach-s5pv210/common.h | |||
@@ -12,13 +12,15 @@ | |||
12 | #ifndef __ARCH_ARM_MACH_S5PV210_COMMON_H | 12 | #ifndef __ARCH_ARM_MACH_S5PV210_COMMON_H |
13 | #define __ARCH_ARM_MACH_S5PV210_COMMON_H | 13 | #define __ARCH_ARM_MACH_S5PV210_COMMON_H |
14 | 14 | ||
15 | #include <linux/reboot.h> | ||
16 | |||
15 | void s5pv210_init_io(struct map_desc *mach_desc, int size); | 17 | void s5pv210_init_io(struct map_desc *mach_desc, int size); |
16 | void s5pv210_init_irq(void); | 18 | void s5pv210_init_irq(void); |
17 | 19 | ||
18 | void s5pv210_register_clocks(void); | 20 | void s5pv210_register_clocks(void); |
19 | void s5pv210_setup_clocks(void); | 21 | void s5pv210_setup_clocks(void); |
20 | 22 | ||
21 | void s5pv210_restart(char mode, const char *cmd); | 23 | void s5pv210_restart(enum reboot_mode mode, const char *cmd); |
22 | 24 | ||
23 | extern int s5pv210_init(void); | 25 | extern int s5pv210_init(void); |
24 | extern void s5pv210_map_io(void); | 26 | extern void s5pv210_map_io(void); |
diff --git a/arch/arm/mach-sa1100/generic.c b/arch/arm/mach-sa1100/generic.c index 9db3e98e8b85..f25b6119e028 100644 --- a/arch/arm/mach-sa1100/generic.c +++ b/arch/arm/mach-sa1100/generic.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/cpufreq.h> | 19 | #include <linux/cpufreq.h> |
20 | #include <linux/ioport.h> | 20 | #include <linux/ioport.h> |
21 | #include <linux/platform_device.h> | 21 | #include <linux/platform_device.h> |
22 | #include <linux/reboot.h> | ||
22 | 23 | ||
23 | #include <video/sa1100fb.h> | 24 | #include <video/sa1100fb.h> |
24 | 25 | ||
@@ -131,9 +132,9 @@ static void sa1100_power_off(void) | |||
131 | PMCR = PMCR_SF; | 132 | PMCR = PMCR_SF; |
132 | } | 133 | } |
133 | 134 | ||
134 | void sa11x0_restart(char mode, const char *cmd) | 135 | void sa11x0_restart(enum reboot_mode mode, const char *cmd) |
135 | { | 136 | { |
136 | if (mode == 's') { | 137 | if (mode == REBOOT_SOFT) { |
137 | /* Jump into ROM at address 0 */ | 138 | /* Jump into ROM at address 0 */ |
138 | soft_restart(0); | 139 | soft_restart(0); |
139 | } else { | 140 | } else { |
diff --git a/arch/arm/mach-sa1100/generic.h b/arch/arm/mach-sa1100/generic.h index 2abc6a1f6e86..9a33695c9492 100644 --- a/arch/arm/mach-sa1100/generic.h +++ b/arch/arm/mach-sa1100/generic.h | |||
@@ -3,12 +3,13 @@ | |||
3 | * | 3 | * |
4 | * Author: Nicolas Pitre | 4 | * Author: Nicolas Pitre |
5 | */ | 5 | */ |
6 | #include <linux/reboot.h> | ||
6 | 7 | ||
7 | extern void sa1100_timer_init(void); | 8 | extern void sa1100_timer_init(void); |
8 | extern void __init sa1100_map_io(void); | 9 | extern void __init sa1100_map_io(void); |
9 | extern void __init sa1100_init_irq(void); | 10 | extern void __init sa1100_init_irq(void); |
10 | extern void __init sa1100_init_gpio(void); | 11 | extern void __init sa1100_init_gpio(void); |
11 | extern void sa11x0_restart(char, const char *); | 12 | extern void sa11x0_restart(enum reboot_mode, const char *); |
12 | extern void sa11x0_init_late(void); | 13 | extern void sa11x0_init_late(void); |
13 | 14 | ||
14 | #define SET_BANK(__nr,__start,__size) \ | 15 | #define SET_BANK(__nr,__start,__size) \ |
diff --git a/arch/arm/mach-shark/core.c b/arch/arm/mach-shark/core.c index 153555724988..1d32c5e8eab6 100644 --- a/arch/arm/mach-shark/core.c +++ b/arch/arm/mach-shark/core.c | |||
@@ -11,6 +11,7 @@ | |||
11 | #include <linux/serial_8250.h> | 11 | #include <linux/serial_8250.h> |
12 | #include <linux/io.h> | 12 | #include <linux/io.h> |
13 | #include <linux/cpu.h> | 13 | #include <linux/cpu.h> |
14 | #include <linux/reboot.h> | ||
14 | 15 | ||
15 | #include <asm/setup.h> | 16 | #include <asm/setup.h> |
16 | #include <asm/mach-types.h> | 17 | #include <asm/mach-types.h> |
@@ -24,7 +25,7 @@ | |||
24 | #define ROMCARD_SIZE 0x08000000 | 25 | #define ROMCARD_SIZE 0x08000000 |
25 | #define ROMCARD_START 0x10000000 | 26 | #define ROMCARD_START 0x10000000 |
26 | 27 | ||
27 | static void shark_restart(char mode, const char *cmd) | 28 | static void shark_restart(enum reboot_mode mode, const char *cmd) |
28 | { | 29 | { |
29 | short temp; | 30 | short temp; |
30 | /* Reset the Machine via pc[3] of the sequoia chipset */ | 31 | /* Reset the Machine via pc[3] of the sequoia chipset */ |
diff --git a/arch/arm/mach-shmobile/board-armadillo800eva.c b/arch/arm/mach-shmobile/board-armadillo800eva.c index 44a621505eeb..45221fd7e25d 100644 --- a/arch/arm/mach-shmobile/board-armadillo800eva.c +++ b/arch/arm/mach-shmobile/board-armadillo800eva.c | |||
@@ -42,6 +42,7 @@ | |||
42 | #include <linux/mmc/sh_mmcif.h> | 42 | #include <linux/mmc/sh_mmcif.h> |
43 | #include <linux/mmc/sh_mobile_sdhi.h> | 43 | #include <linux/mmc/sh_mobile_sdhi.h> |
44 | #include <linux/i2c-gpio.h> | 44 | #include <linux/i2c-gpio.h> |
45 | #include <linux/reboot.h> | ||
45 | #include <mach/common.h> | 46 | #include <mach/common.h> |
46 | #include <mach/irqs.h> | 47 | #include <mach/irqs.h> |
47 | #include <mach/r8a7740.h> | 48 | #include <mach/r8a7740.h> |
@@ -1259,7 +1260,7 @@ static void __init eva_add_early_devices(void) | |||
1259 | } | 1260 | } |
1260 | 1261 | ||
1261 | #define RESCNT2 IOMEM(0xe6188020) | 1262 | #define RESCNT2 IOMEM(0xe6188020) |
1262 | static void eva_restart(char mode, const char *cmd) | 1263 | static void eva_restart(enum reboot_mode mode, const char *cmd) |
1263 | { | 1264 | { |
1264 | /* Do soft power on reset */ | 1265 | /* Do soft power on reset */ |
1265 | writel((1 << 31), RESCNT2); | 1266 | writel((1 << 31), RESCNT2); |
diff --git a/arch/arm/mach-shmobile/board-kzm9g.c b/arch/arm/mach-shmobile/board-kzm9g.c index 165483c9bee2..1068120d339f 100644 --- a/arch/arm/mach-shmobile/board-kzm9g.c +++ b/arch/arm/mach-shmobile/board-kzm9g.c | |||
@@ -34,6 +34,7 @@ | |||
34 | #include <linux/pinctrl/machine.h> | 34 | #include <linux/pinctrl/machine.h> |
35 | #include <linux/pinctrl/pinconf-generic.h> | 35 | #include <linux/pinctrl/pinconf-generic.h> |
36 | #include <linux/platform_device.h> | 36 | #include <linux/platform_device.h> |
37 | #include <linux/reboot.h> | ||
37 | #include <linux/regulator/fixed.h> | 38 | #include <linux/regulator/fixed.h> |
38 | #include <linux/regulator/machine.h> | 39 | #include <linux/regulator/machine.h> |
39 | #include <linux/smsc911x.h> | 40 | #include <linux/smsc911x.h> |
@@ -890,7 +891,7 @@ static void __init kzm_init(void) | |||
890 | sh73a0_pm_init(); | 891 | sh73a0_pm_init(); |
891 | } | 892 | } |
892 | 893 | ||
893 | static void kzm9g_restart(char mode, const char *cmd) | 894 | static void kzm9g_restart(enum reboot_mode mode, const char *cmd) |
894 | { | 895 | { |
895 | #define RESCNT2 IOMEM(0xe6188020) | 896 | #define RESCNT2 IOMEM(0xe6188020) |
896 | /* Do soft power on reset */ | 897 | /* Do soft power on reset */ |
diff --git a/arch/arm/mach-socfpga/socfpga.c b/arch/arm/mach-socfpga/socfpga.c index 8ea11b472b91..bfce9641e32f 100644 --- a/arch/arm/mach-socfpga/socfpga.c +++ b/arch/arm/mach-socfpga/socfpga.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/of_address.h> | 19 | #include <linux/of_address.h> |
20 | #include <linux/of_irq.h> | 20 | #include <linux/of_irq.h> |
21 | #include <linux/of_platform.h> | 21 | #include <linux/of_platform.h> |
22 | #include <linux/reboot.h> | ||
22 | 23 | ||
23 | #include <asm/hardware/cache-l2x0.h> | 24 | #include <asm/hardware/cache-l2x0.h> |
24 | #include <asm/mach/arch.h> | 25 | #include <asm/mach/arch.h> |
@@ -89,13 +90,13 @@ static void __init socfpga_init_irq(void) | |||
89 | socfpga_sysmgr_init(); | 90 | socfpga_sysmgr_init(); |
90 | } | 91 | } |
91 | 92 | ||
92 | static void socfpga_cyclone5_restart(char mode, const char *cmd) | 93 | static void socfpga_cyclone5_restart(enum reboot_mode mode, const char *cmd) |
93 | { | 94 | { |
94 | u32 temp; | 95 | u32 temp; |
95 | 96 | ||
96 | temp = readl(rst_manager_base_addr + SOCFPGA_RSTMGR_CTRL); | 97 | temp = readl(rst_manager_base_addr + SOCFPGA_RSTMGR_CTRL); |
97 | 98 | ||
98 | if (mode == 'h') | 99 | if (mode == REBOOT_HARD) |
99 | temp |= RSTMGR_CTRL_SWCOLDRSTREQ; | 100 | temp |= RSTMGR_CTRL_SWCOLDRSTREQ; |
100 | else | 101 | else |
101 | temp |= RSTMGR_CTRL_SWWARMRSTREQ; | 102 | temp |= RSTMGR_CTRL_SWWARMRSTREQ; |
diff --git a/arch/arm/mach-spear/generic.h b/arch/arm/mach-spear/generic.h index a9fd45362fee..904f2c907b46 100644 --- a/arch/arm/mach-spear/generic.h +++ b/arch/arm/mach-spear/generic.h | |||
@@ -16,6 +16,8 @@ | |||
16 | #include <linux/dmaengine.h> | 16 | #include <linux/dmaengine.h> |
17 | #include <linux/amba/pl08x.h> | 17 | #include <linux/amba/pl08x.h> |
18 | #include <linux/init.h> | 18 | #include <linux/init.h> |
19 | #include <linux/reboot.h> | ||
20 | |||
19 | #include <asm/mach/time.h> | 21 | #include <asm/mach/time.h> |
20 | 22 | ||
21 | extern void spear13xx_timer_init(void); | 23 | extern void spear13xx_timer_init(void); |
@@ -32,7 +34,7 @@ void __init spear6xx_clk_init(void __iomem *misc_base); | |||
32 | void __init spear13xx_map_io(void); | 34 | void __init spear13xx_map_io(void); |
33 | void __init spear13xx_l2x0_init(void); | 35 | void __init spear13xx_l2x0_init(void); |
34 | 36 | ||
35 | void spear_restart(char, const char *); | 37 | void spear_restart(enum reboot_mode, const char *); |
36 | 38 | ||
37 | void spear13xx_secondary_startup(void); | 39 | void spear13xx_secondary_startup(void); |
38 | void __cpuinit spear13xx_cpu_die(unsigned int cpu); | 40 | void __cpuinit spear13xx_cpu_die(unsigned int cpu); |
diff --git a/arch/arm/mach-spear/restart.c b/arch/arm/mach-spear/restart.c index 2b44500bb718..ce5e098c4888 100644 --- a/arch/arm/mach-spear/restart.c +++ b/arch/arm/mach-spear/restart.c | |||
@@ -12,14 +12,15 @@ | |||
12 | */ | 12 | */ |
13 | #include <linux/io.h> | 13 | #include <linux/io.h> |
14 | #include <linux/amba/sp810.h> | 14 | #include <linux/amba/sp810.h> |
15 | #include <linux/reboot.h> | ||
15 | #include <asm/system_misc.h> | 16 | #include <asm/system_misc.h> |
16 | #include <mach/spear.h> | 17 | #include <mach/spear.h> |
17 | #include "generic.h" | 18 | #include "generic.h" |
18 | 19 | ||
19 | #define SPEAR13XX_SYS_SW_RES (VA_MISC_BASE + 0x204) | 20 | #define SPEAR13XX_SYS_SW_RES (VA_MISC_BASE + 0x204) |
20 | void spear_restart(char mode, const char *cmd) | 21 | void spear_restart(enum reboot_mode mode, const char *cmd) |
21 | { | 22 | { |
22 | if (mode == 's') { | 23 | if (mode == REBOOT_SOFT) { |
23 | /* software reset, Jump into ROM at address 0 */ | 24 | /* software reset, Jump into ROM at address 0 */ |
24 | soft_restart(0); | 25 | soft_restart(0); |
25 | } else { | 26 | } else { |
diff --git a/arch/arm/mach-sunxi/sunxi.c b/arch/arm/mach-sunxi/sunxi.c index 84485a10fc3a..38a3c55527c8 100644 --- a/arch/arm/mach-sunxi/sunxi.c +++ b/arch/arm/mach-sunxi/sunxi.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <linux/of_irq.h> | 18 | #include <linux/of_irq.h> |
19 | #include <linux/of_platform.h> | 19 | #include <linux/of_platform.h> |
20 | #include <linux/io.h> | 20 | #include <linux/io.h> |
21 | #include <linux/reboot.h> | ||
21 | 22 | ||
22 | #include <linux/clk/sunxi.h> | 23 | #include <linux/clk/sunxi.h> |
23 | 24 | ||
@@ -33,7 +34,7 @@ | |||
33 | 34 | ||
34 | static void __iomem *wdt_base; | 35 | static void __iomem *wdt_base; |
35 | 36 | ||
36 | static void sun4i_restart(char mode, const char *cmd) | 37 | static void sun4i_restart(enum reboot_mode mode, const char *cmd) |
37 | { | 38 | { |
38 | if (!wdt_base) | 39 | if (!wdt_base) |
39 | return; | 40 | return; |
diff --git a/arch/arm/mach-tegra/board.h b/arch/arm/mach-tegra/board.h index 1787327fae3a..9a6659fe2dc2 100644 --- a/arch/arm/mach-tegra/board.h +++ b/arch/arm/mach-tegra/board.h | |||
@@ -23,8 +23,9 @@ | |||
23 | #define __MACH_TEGRA_BOARD_H | 23 | #define __MACH_TEGRA_BOARD_H |
24 | 24 | ||
25 | #include <linux/types.h> | 25 | #include <linux/types.h> |
26 | #include <linux/reboot.h> | ||
26 | 27 | ||
27 | void tegra_assert_system_reset(char mode, const char *cmd); | 28 | void tegra_assert_system_reset(enum reboot_mode mode, const char *cmd); |
28 | 29 | ||
29 | void __init tegra_init_early(void); | 30 | void __init tegra_init_early(void); |
30 | void __init tegra_map_common_io(void); | 31 | void __init tegra_map_common_io(void); |
diff --git a/arch/arm/mach-tegra/common.c b/arch/arm/mach-tegra/common.c index b25153e2ebaa..94a119a35af8 100644 --- a/arch/arm/mach-tegra/common.c +++ b/arch/arm/mach-tegra/common.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <linux/io.h> | 22 | #include <linux/io.h> |
23 | #include <linux/clk.h> | 23 | #include <linux/clk.h> |
24 | #include <linux/delay.h> | 24 | #include <linux/delay.h> |
25 | #include <linux/reboot.h> | ||
25 | #include <linux/irqchip.h> | 26 | #include <linux/irqchip.h> |
26 | #include <linux/clk-provider.h> | 27 | #include <linux/clk-provider.h> |
27 | 28 | ||
@@ -68,7 +69,7 @@ void __init tegra_dt_init_irq(void) | |||
68 | } | 69 | } |
69 | #endif | 70 | #endif |
70 | 71 | ||
71 | void tegra_assert_system_reset(char mode, const char *cmd) | 72 | void tegra_assert_system_reset(enum reboot_mode mode, const char *cmd) |
72 | { | 73 | { |
73 | void __iomem *reset = IO_ADDRESS(TEGRA_PMC_BASE + 0); | 74 | void __iomem *reset = IO_ADDRESS(TEGRA_PMC_BASE + 0); |
74 | u32 reg; | 75 | u32 reg; |
diff --git a/arch/arm/mach-u300/core.c b/arch/arm/mach-u300/core.c index 4f7ac2a11452..35670b15f281 100644 --- a/arch/arm/mach-u300/core.c +++ b/arch/arm/mach-u300/core.c | |||
@@ -300,11 +300,11 @@ static void __init u300_init_check_chip(void) | |||
300 | /* Forward declare this function from the watchdog */ | 300 | /* Forward declare this function from the watchdog */ |
301 | void coh901327_watchdog_reset(void); | 301 | void coh901327_watchdog_reset(void); |
302 | 302 | ||
303 | static void u300_restart(char mode, const char *cmd) | 303 | static void u300_restart(enum reboot_mode mode, const char *cmd) |
304 | { | 304 | { |
305 | switch (mode) { | 305 | switch (mode) { |
306 | case 's': | 306 | case REBOOT_SOFT: |
307 | case 'h': | 307 | case REBOOT_HARD: |
308 | #ifdef CONFIG_COH901327_WATCHDOG | 308 | #ifdef CONFIG_COH901327_WATCHDOG |
309 | coh901327_watchdog_reset(); | 309 | coh901327_watchdog_reset(); |
310 | #endif | 310 | #endif |
diff --git a/arch/arm/mach-versatile/core.c b/arch/arm/mach-versatile/core.c index 54bb80b012ac..3b0572f30d56 100644 --- a/arch/arm/mach-versatile/core.c +++ b/arch/arm/mach-versatile/core.c | |||
@@ -38,6 +38,7 @@ | |||
38 | #include <linux/clkdev.h> | 38 | #include <linux/clkdev.h> |
39 | #include <linux/mtd/physmap.h> | 39 | #include <linux/mtd/physmap.h> |
40 | #include <linux/bitops.h> | 40 | #include <linux/bitops.h> |
41 | #include <linux/reboot.h> | ||
41 | 42 | ||
42 | #include <asm/irq.h> | 43 | #include <asm/irq.h> |
43 | #include <asm/hardware/arm_timer.h> | 44 | #include <asm/hardware/arm_timer.h> |
@@ -733,7 +734,7 @@ static void versatile_leds_event(led_event_t ledevt) | |||
733 | } | 734 | } |
734 | #endif /* CONFIG_LEDS */ | 735 | #endif /* CONFIG_LEDS */ |
735 | 736 | ||
736 | void versatile_restart(char mode, const char *cmd) | 737 | void versatile_restart(enum reboot_mode mode, const char *cmd) |
737 | { | 738 | { |
738 | void __iomem *sys = __io_address(VERSATILE_SYS_BASE); | 739 | void __iomem *sys = __io_address(VERSATILE_SYS_BASE); |
739 | u32 val; | 740 | u32 val; |
diff --git a/arch/arm/mach-versatile/core.h b/arch/arm/mach-versatile/core.h index 5c1b87d1da6b..f06d5768e428 100644 --- a/arch/arm/mach-versatile/core.h +++ b/arch/arm/mach-versatile/core.h | |||
@@ -24,13 +24,14 @@ | |||
24 | 24 | ||
25 | #include <linux/amba/bus.h> | 25 | #include <linux/amba/bus.h> |
26 | #include <linux/of_platform.h> | 26 | #include <linux/of_platform.h> |
27 | #include <linux/reboot.h> | ||
27 | 28 | ||
28 | extern void __init versatile_init(void); | 29 | extern void __init versatile_init(void); |
29 | extern void __init versatile_init_early(void); | 30 | extern void __init versatile_init_early(void); |
30 | extern void __init versatile_init_irq(void); | 31 | extern void __init versatile_init_irq(void); |
31 | extern void __init versatile_map_io(void); | 32 | extern void __init versatile_map_io(void); |
32 | extern void versatile_timer_init(void); | 33 | extern void versatile_timer_init(void); |
33 | extern void versatile_restart(char, const char *); | 34 | extern void versatile_restart(enum reboot_mode, const char *); |
34 | extern unsigned int mmc_status(struct device *dev); | 35 | extern unsigned int mmc_status(struct device *dev); |
35 | #ifdef CONFIG_OF | 36 | #ifdef CONFIG_OF |
36 | extern struct of_dev_auxdata versatile_auxdata_lookup[]; | 37 | extern struct of_dev_auxdata versatile_auxdata_lookup[]; |
diff --git a/arch/arm/mach-vt8500/vt8500.c b/arch/arm/mach-vt8500/vt8500.c index f8f2f00856e0..eefaa60d6614 100644 --- a/arch/arm/mach-vt8500/vt8500.c +++ b/arch/arm/mach-vt8500/vt8500.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/clocksource.h> | 21 | #include <linux/clocksource.h> |
22 | #include <linux/io.h> | 22 | #include <linux/io.h> |
23 | #include <linux/pm.h> | 23 | #include <linux/pm.h> |
24 | #include <linux/reboot.h> | ||
24 | 25 | ||
25 | #include <asm/mach-types.h> | 26 | #include <asm/mach-types.h> |
26 | #include <asm/mach/arch.h> | 27 | #include <asm/mach/arch.h> |
@@ -46,7 +47,7 @@ | |||
46 | 47 | ||
47 | static void __iomem *pmc_base; | 48 | static void __iomem *pmc_base; |
48 | 49 | ||
49 | void vt8500_restart(char mode, const char *cmd) | 50 | void vt8500_restart(enum reboot_mode mode, const char *cmd) |
50 | { | 51 | { |
51 | if (pmc_base) | 52 | if (pmc_base) |
52 | writel(1, pmc_base + VT8500_PMSR_REG); | 53 | writel(1, pmc_base + VT8500_PMSR_REG); |
diff --git a/arch/arm/mach-w90x900/cpu.c b/arch/arm/mach-w90x900/cpu.c index 9e4dd8b63c4a..b1eabaad50a5 100644 --- a/arch/arm/mach-w90x900/cpu.c +++ b/arch/arm/mach-w90x900/cpu.c | |||
@@ -230,9 +230,9 @@ void __init nuc900_init_clocks(void) | |||
230 | #define WTE (1 << 7) | 230 | #define WTE (1 << 7) |
231 | #define WTRE (1 << 1) | 231 | #define WTRE (1 << 1) |
232 | 232 | ||
233 | void nuc9xx_restart(char mode, const char *cmd) | 233 | void nuc9xx_restart(enum reboot_mode mode, const char *cmd) |
234 | { | 234 | { |
235 | if (mode == 's') { | 235 | if (mode == REBOOT_SOFT) { |
236 | /* Jump into ROM at address 0 */ | 236 | /* Jump into ROM at address 0 */ |
237 | soft_restart(0); | 237 | soft_restart(0); |
238 | } else { | 238 | } else { |
diff --git a/arch/arm/mach-w90x900/nuc9xx.h b/arch/arm/mach-w90x900/nuc9xx.h index 88ef4b267089..e3ab1e1381f1 100644 --- a/arch/arm/mach-w90x900/nuc9xx.h +++ b/arch/arm/mach-w90x900/nuc9xx.h | |||
@@ -14,10 +14,13 @@ | |||
14 | * published by the Free Software Foundation. | 14 | * published by the Free Software Foundation. |
15 | * | 15 | * |
16 | */ | 16 | */ |
17 | |||
18 | #include <linux/reboot.h> | ||
19 | |||
17 | struct map_desc; | 20 | struct map_desc; |
18 | 21 | ||
19 | /* core initialisation functions */ | 22 | /* core initialisation functions */ |
20 | 23 | ||
21 | extern void nuc900_init_irq(void); | 24 | extern void nuc900_init_irq(void); |
22 | extern void nuc900_timer_init(void); | 25 | extern void nuc900_timer_init(void); |
23 | extern void nuc9xx_restart(char, const char *); | 26 | extern void nuc9xx_restart(enum reboot_mode, const char *); |
diff --git a/arch/arm/plat-iop/gpio.c b/arch/arm/plat-iop/gpio.c index e4de9be78feb..697de6dc4936 100644 --- a/arch/arm/plat-iop/gpio.c +++ b/arch/arm/plat-iop/gpio.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/gpio.h> | 17 | #include <linux/gpio.h> |
18 | #include <linux/export.h> | 18 | #include <linux/export.h> |
19 | #include <asm/hardware/iop3xx.h> | 19 | #include <asm/hardware/iop3xx.h> |
20 | #include <mach/gpio.h> | ||
20 | 21 | ||
21 | void gpio_line_config(int line, int direction) | 22 | void gpio_line_config(int line, int direction) |
22 | { | 23 | { |
diff --git a/arch/arm/plat-iop/restart.c b/arch/arm/plat-iop/restart.c index 33fa699a4d28..3a4d5e5fde52 100644 --- a/arch/arm/plat-iop/restart.c +++ b/arch/arm/plat-iop/restart.c | |||
@@ -11,7 +11,7 @@ | |||
11 | #include <asm/system_misc.h> | 11 | #include <asm/system_misc.h> |
12 | #include <mach/hardware.h> | 12 | #include <mach/hardware.h> |
13 | 13 | ||
14 | void iop3xx_restart(char mode, const char *cmd) | 14 | void iop3xx_restart(enum reboot_mode mode, const char *cmd) |
15 | { | 15 | { |
16 | *IOP3XX_PCSR = 0x30; | 16 | *IOP3XX_PCSR = 0x30; |
17 | 17 | ||
diff --git a/arch/metag/mm/fault.c b/arch/metag/mm/fault.c index 2c75bf7357c5..8fddf46e6c62 100644 --- a/arch/metag/mm/fault.c +++ b/arch/metag/mm/fault.c | |||
@@ -224,8 +224,10 @@ do_sigbus: | |||
224 | */ | 224 | */ |
225 | out_of_memory: | 225 | out_of_memory: |
226 | up_read(&mm->mmap_sem); | 226 | up_read(&mm->mmap_sem); |
227 | if (user_mode(regs)) | 227 | if (user_mode(regs)) { |
228 | do_group_exit(SIGKILL); | 228 | pagefault_out_of_memory(); |
229 | return 1; | ||
230 | } | ||
229 | 231 | ||
230 | no_context: | 232 | no_context: |
231 | /* Are we prepared to handle this kernel fault? */ | 233 | /* Are we prepared to handle this kernel fault? */ |
diff --git a/arch/mn10300/mm/fault.c b/arch/mn10300/mm/fault.c index d48a84fd7fae..8a2e6ded9a44 100644 --- a/arch/mn10300/mm/fault.c +++ b/arch/mn10300/mm/fault.c | |||
@@ -345,9 +345,10 @@ no_context: | |||
345 | */ | 345 | */ |
346 | out_of_memory: | 346 | out_of_memory: |
347 | up_read(&mm->mmap_sem); | 347 | up_read(&mm->mmap_sem); |
348 | printk(KERN_ALERT "VM: killing process %s\n", tsk->comm); | 348 | if ((fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_USR) { |
349 | if ((fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_USR) | 349 | pagefault_out_of_memory(); |
350 | do_exit(SIGKILL); | 350 | return; |
351 | } | ||
351 | goto no_context; | 352 | goto no_context; |
352 | 353 | ||
353 | do_sigbus: | 354 | do_sigbus: |
diff --git a/arch/openrisc/mm/fault.c b/arch/openrisc/mm/fault.c index e2bfafce66c5..4a41f8493ab0 100644 --- a/arch/openrisc/mm/fault.c +++ b/arch/openrisc/mm/fault.c | |||
@@ -267,10 +267,10 @@ out_of_memory: | |||
267 | __asm__ __volatile__("l.nop 1"); | 267 | __asm__ __volatile__("l.nop 1"); |
268 | 268 | ||
269 | up_read(&mm->mmap_sem); | 269 | up_read(&mm->mmap_sem); |
270 | printk("VM: killing process %s\n", tsk->comm); | 270 | if (!user_mode(regs)) |
271 | if (user_mode(regs)) | 271 | goto no_context; |
272 | do_exit(SIGKILL); | 272 | pagefault_out_of_memory(); |
273 | goto no_context; | 273 | return; |
274 | 274 | ||
275 | do_sigbus: | 275 | do_sigbus: |
276 | up_read(&mm->mmap_sem); | 276 | up_read(&mm->mmap_sem); |
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c index 64f7bd5b1b0f..9a0d24c390a3 100644 --- a/arch/powerpc/kernel/ptrace.c +++ b/arch/powerpc/kernel/ptrace.c | |||
@@ -975,16 +975,12 @@ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr, | |||
975 | hw_brk.type = (data & HW_BRK_TYPE_DABR) | HW_BRK_TYPE_PRIV_ALL; | 975 | hw_brk.type = (data & HW_BRK_TYPE_DABR) | HW_BRK_TYPE_PRIV_ALL; |
976 | hw_brk.len = 8; | 976 | hw_brk.len = 8; |
977 | #ifdef CONFIG_HAVE_HW_BREAKPOINT | 977 | #ifdef CONFIG_HAVE_HW_BREAKPOINT |
978 | if (ptrace_get_breakpoints(task) < 0) | ||
979 | return -ESRCH; | ||
980 | |||
981 | bp = thread->ptrace_bps[0]; | 978 | bp = thread->ptrace_bps[0]; |
982 | if ((!data) || !(hw_brk.type & HW_BRK_TYPE_RDWR)) { | 979 | if ((!data) || !(hw_brk.type & HW_BRK_TYPE_RDWR)) { |
983 | if (bp) { | 980 | if (bp) { |
984 | unregister_hw_breakpoint(bp); | 981 | unregister_hw_breakpoint(bp); |
985 | thread->ptrace_bps[0] = NULL; | 982 | thread->ptrace_bps[0] = NULL; |
986 | } | 983 | } |
987 | ptrace_put_breakpoints(task); | ||
988 | return 0; | 984 | return 0; |
989 | } | 985 | } |
990 | if (bp) { | 986 | if (bp) { |
@@ -997,11 +993,9 @@ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr, | |||
997 | 993 | ||
998 | ret = modify_user_hw_breakpoint(bp, &attr); | 994 | ret = modify_user_hw_breakpoint(bp, &attr); |
999 | if (ret) { | 995 | if (ret) { |
1000 | ptrace_put_breakpoints(task); | ||
1001 | return ret; | 996 | return ret; |
1002 | } | 997 | } |
1003 | thread->ptrace_bps[0] = bp; | 998 | thread->ptrace_bps[0] = bp; |
1004 | ptrace_put_breakpoints(task); | ||
1005 | thread->hw_brk = hw_brk; | 999 | thread->hw_brk = hw_brk; |
1006 | return 0; | 1000 | return 0; |
1007 | } | 1001 | } |
@@ -1016,12 +1010,9 @@ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr, | |||
1016 | ptrace_triggered, NULL, task); | 1010 | ptrace_triggered, NULL, task); |
1017 | if (IS_ERR(bp)) { | 1011 | if (IS_ERR(bp)) { |
1018 | thread->ptrace_bps[0] = NULL; | 1012 | thread->ptrace_bps[0] = NULL; |
1019 | ptrace_put_breakpoints(task); | ||
1020 | return PTR_ERR(bp); | 1013 | return PTR_ERR(bp); |
1021 | } | 1014 | } |
1022 | 1015 | ||
1023 | ptrace_put_breakpoints(task); | ||
1024 | |||
1025 | #endif /* CONFIG_HAVE_HW_BREAKPOINT */ | 1016 | #endif /* CONFIG_HAVE_HW_BREAKPOINT */ |
1026 | task->thread.hw_brk = hw_brk; | 1017 | task->thread.hw_brk = hw_brk; |
1027 | #else /* CONFIG_PPC_ADV_DEBUG_REGS */ | 1018 | #else /* CONFIG_PPC_ADV_DEBUG_REGS */ |
@@ -1440,26 +1431,19 @@ static long ppc_set_hwdebug(struct task_struct *child, | |||
1440 | if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE) | 1431 | if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE) |
1441 | brk.type |= HW_BRK_TYPE_WRITE; | 1432 | brk.type |= HW_BRK_TYPE_WRITE; |
1442 | #ifdef CONFIG_HAVE_HW_BREAKPOINT | 1433 | #ifdef CONFIG_HAVE_HW_BREAKPOINT |
1443 | if (ptrace_get_breakpoints(child) < 0) | ||
1444 | return -ESRCH; | ||
1445 | |||
1446 | /* | 1434 | /* |
1447 | * Check if the request is for 'range' breakpoints. We can | 1435 | * Check if the request is for 'range' breakpoints. We can |
1448 | * support it if range < 8 bytes. | 1436 | * support it if range < 8 bytes. |
1449 | */ | 1437 | */ |
1450 | if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE) { | 1438 | if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE) |
1451 | len = bp_info->addr2 - bp_info->addr; | 1439 | len = bp_info->addr2 - bp_info->addr; |
1452 | } else if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_EXACT) | 1440 | else if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_EXACT) |
1453 | len = 1; | 1441 | len = 1; |
1454 | else { | 1442 | else |
1455 | ptrace_put_breakpoints(child); | ||
1456 | return -EINVAL; | 1443 | return -EINVAL; |
1457 | } | ||
1458 | bp = thread->ptrace_bps[0]; | 1444 | bp = thread->ptrace_bps[0]; |
1459 | if (bp) { | 1445 | if (bp) |
1460 | ptrace_put_breakpoints(child); | ||
1461 | return -ENOSPC; | 1446 | return -ENOSPC; |
1462 | } | ||
1463 | 1447 | ||
1464 | /* Create a new breakpoint request if one doesn't exist already */ | 1448 | /* Create a new breakpoint request if one doesn't exist already */ |
1465 | hw_breakpoint_init(&attr); | 1449 | hw_breakpoint_init(&attr); |
@@ -1471,11 +1455,9 @@ static long ppc_set_hwdebug(struct task_struct *child, | |||
1471 | ptrace_triggered, NULL, child); | 1455 | ptrace_triggered, NULL, child); |
1472 | if (IS_ERR(bp)) { | 1456 | if (IS_ERR(bp)) { |
1473 | thread->ptrace_bps[0] = NULL; | 1457 | thread->ptrace_bps[0] = NULL; |
1474 | ptrace_put_breakpoints(child); | ||
1475 | return PTR_ERR(bp); | 1458 | return PTR_ERR(bp); |
1476 | } | 1459 | } |
1477 | 1460 | ||
1478 | ptrace_put_breakpoints(child); | ||
1479 | return 1; | 1461 | return 1; |
1480 | #endif /* CONFIG_HAVE_HW_BREAKPOINT */ | 1462 | #endif /* CONFIG_HAVE_HW_BREAKPOINT */ |
1481 | 1463 | ||
@@ -1519,16 +1501,12 @@ static long ppc_del_hwdebug(struct task_struct *child, long data) | |||
1519 | return -EINVAL; | 1501 | return -EINVAL; |
1520 | 1502 | ||
1521 | #ifdef CONFIG_HAVE_HW_BREAKPOINT | 1503 | #ifdef CONFIG_HAVE_HW_BREAKPOINT |
1522 | if (ptrace_get_breakpoints(child) < 0) | ||
1523 | return -ESRCH; | ||
1524 | |||
1525 | bp = thread->ptrace_bps[0]; | 1504 | bp = thread->ptrace_bps[0]; |
1526 | if (bp) { | 1505 | if (bp) { |
1527 | unregister_hw_breakpoint(bp); | 1506 | unregister_hw_breakpoint(bp); |
1528 | thread->ptrace_bps[0] = NULL; | 1507 | thread->ptrace_bps[0] = NULL; |
1529 | } else | 1508 | } else |
1530 | ret = -ENOENT; | 1509 | ret = -ENOENT; |
1531 | ptrace_put_breakpoints(child); | ||
1532 | return ret; | 1510 | return ret; |
1533 | #else /* CONFIG_HAVE_HW_BREAKPOINT */ | 1511 | #else /* CONFIG_HAVE_HW_BREAKPOINT */ |
1534 | if (child->thread.hw_brk.address == 0) | 1512 | if (child->thread.hw_brk.address == 0) |
diff --git a/arch/score/mm/fault.c b/arch/score/mm/fault.c index 47b600e4b2c5..6b18fb0189ae 100644 --- a/arch/score/mm/fault.c +++ b/arch/score/mm/fault.c | |||
@@ -172,10 +172,10 @@ out_of_memory: | |||
172 | down_read(&mm->mmap_sem); | 172 | down_read(&mm->mmap_sem); |
173 | goto survive; | 173 | goto survive; |
174 | } | 174 | } |
175 | printk("VM: killing process %s\n", tsk->comm); | 175 | if (!user_mode(regs)) |
176 | if (user_mode(regs)) | 176 | goto no_context; |
177 | do_group_exit(SIGKILL); | 177 | pagefault_out_of_memory(); |
178 | goto no_context; | 178 | return; |
179 | 179 | ||
180 | do_sigbus: | 180 | do_sigbus: |
181 | up_read(&mm->mmap_sem); | 181 | up_read(&mm->mmap_sem); |
diff --git a/arch/sh/kernel/ptrace_32.c b/arch/sh/kernel/ptrace_32.c index 81f999a672f6..668c81631c08 100644 --- a/arch/sh/kernel/ptrace_32.c +++ b/arch/sh/kernel/ptrace_32.c | |||
@@ -117,11 +117,7 @@ void user_enable_single_step(struct task_struct *child) | |||
117 | 117 | ||
118 | set_tsk_thread_flag(child, TIF_SINGLESTEP); | 118 | set_tsk_thread_flag(child, TIF_SINGLESTEP); |
119 | 119 | ||
120 | if (ptrace_get_breakpoints(child) < 0) | ||
121 | return; | ||
122 | |||
123 | set_single_step(child, pc); | 120 | set_single_step(child, pc); |
124 | ptrace_put_breakpoints(child); | ||
125 | } | 121 | } |
126 | 122 | ||
127 | void user_disable_single_step(struct task_struct *child) | 123 | void user_disable_single_step(struct task_struct *child) |
diff --git a/arch/tile/mm/fault.c b/arch/tile/mm/fault.c index 3d2b81c163a6..f7f99f90cbe0 100644 --- a/arch/tile/mm/fault.c +++ b/arch/tile/mm/fault.c | |||
@@ -573,10 +573,10 @@ out_of_memory: | |||
573 | down_read(&mm->mmap_sem); | 573 | down_read(&mm->mmap_sem); |
574 | goto survive; | 574 | goto survive; |
575 | } | 575 | } |
576 | pr_alert("VM: killing process %s\n", tsk->comm); | 576 | if (is_kernel_mode) |
577 | if (!is_kernel_mode) | 577 | goto no_context; |
578 | do_group_exit(SIGKILL); | 578 | pagefault_out_of_memory(); |
579 | goto no_context; | 579 | return 0; |
580 | 580 | ||
581 | do_sigbus: | 581 | do_sigbus: |
582 | up_read(&mm->mmap_sem); | 582 | up_read(&mm->mmap_sem); |
diff --git a/arch/unicore32/kernel/process.c b/arch/unicore32/kernel/process.c index c9447691bdac..778ebba80827 100644 --- a/arch/unicore32/kernel/process.c +++ b/arch/unicore32/kernel/process.c | |||
@@ -51,16 +51,6 @@ void arch_cpu_idle(void) | |||
51 | local_irq_enable(); | 51 | local_irq_enable(); |
52 | } | 52 | } |
53 | 53 | ||
54 | static char reboot_mode = 'h'; | ||
55 | |||
56 | int __init reboot_setup(char *str) | ||
57 | { | ||
58 | reboot_mode = str[0]; | ||
59 | return 1; | ||
60 | } | ||
61 | |||
62 | __setup("reboot=", reboot_setup); | ||
63 | |||
64 | void machine_halt(void) | 54 | void machine_halt(void) |
65 | { | 55 | { |
66 | gpio_set_value(GPO_SOFT_OFF, 0); | 56 | gpio_set_value(GPO_SOFT_OFF, 0); |
@@ -88,7 +78,7 @@ void machine_restart(char *cmd) | |||
88 | * we may need it to insert some 1:1 mappings so that | 78 | * we may need it to insert some 1:1 mappings so that |
89 | * soft boot works. | 79 | * soft boot works. |
90 | */ | 80 | */ |
91 | setup_mm_for_reboot(reboot_mode); | 81 | setup_mm_for_reboot(); |
92 | 82 | ||
93 | /* Clean and invalidate caches */ | 83 | /* Clean and invalidate caches */ |
94 | flush_cache_all(); | 84 | flush_cache_all(); |
@@ -102,7 +92,7 @@ void machine_restart(char *cmd) | |||
102 | /* | 92 | /* |
103 | * Now handle reboot code. | 93 | * Now handle reboot code. |
104 | */ | 94 | */ |
105 | if (reboot_mode == 's') { | 95 | if (reboot_mode == REBOOT_SOFT) { |
106 | /* Jump into ROM at address 0xffff0000 */ | 96 | /* Jump into ROM at address 0xffff0000 */ |
107 | cpu_reset(VECTORS_BASE); | 97 | cpu_reset(VECTORS_BASE); |
108 | } else { | 98 | } else { |
diff --git a/arch/unicore32/kernel/setup.h b/arch/unicore32/kernel/setup.h index 30f749da8f73..f5c51b85ad24 100644 --- a/arch/unicore32/kernel/setup.h +++ b/arch/unicore32/kernel/setup.h | |||
@@ -22,7 +22,7 @@ extern void puv3_ps2_init(void); | |||
22 | extern void pci_puv3_preinit(void); | 22 | extern void pci_puv3_preinit(void); |
23 | extern void __init puv3_init_gpio(void); | 23 | extern void __init puv3_init_gpio(void); |
24 | 24 | ||
25 | extern void setup_mm_for_reboot(char mode); | 25 | extern void setup_mm_for_reboot(void); |
26 | 26 | ||
27 | extern char __stubs_start[], __stubs_end[]; | 27 | extern char __stubs_start[], __stubs_end[]; |
28 | extern char __vectors_start[], __vectors_end[]; | 28 | extern char __vectors_start[], __vectors_end[]; |
diff --git a/arch/unicore32/mm/mmu.c b/arch/unicore32/mm/mmu.c index 43c20b40e444..4f5a532bee13 100644 --- a/arch/unicore32/mm/mmu.c +++ b/arch/unicore32/mm/mmu.c | |||
@@ -445,7 +445,7 @@ void __init paging_init(void) | |||
445 | * the user-mode pages. This will then ensure that we have predictable | 445 | * the user-mode pages. This will then ensure that we have predictable |
446 | * results when turning the mmu off | 446 | * results when turning the mmu off |
447 | */ | 447 | */ |
448 | void setup_mm_for_reboot(char mode) | 448 | void setup_mm_for_reboot(void) |
449 | { | 449 | { |
450 | unsigned long base_pmdval; | 450 | unsigned long base_pmdval; |
451 | pgd_t *pgd; | 451 | pgd_t *pgd; |
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 265c672a2f40..b32ebf92b0ce 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -65,6 +65,7 @@ config X86 | |||
65 | select HAVE_KERNEL_LZMA | 65 | select HAVE_KERNEL_LZMA |
66 | select HAVE_KERNEL_XZ | 66 | select HAVE_KERNEL_XZ |
67 | select HAVE_KERNEL_LZO | 67 | select HAVE_KERNEL_LZO |
68 | select HAVE_KERNEL_LZ4 | ||
68 | select HAVE_HW_BREAKPOINT | 69 | select HAVE_HW_BREAKPOINT |
69 | select HAVE_MIXED_BREAKPOINTS_REGS | 70 | select HAVE_MIXED_BREAKPOINTS_REGS |
70 | select PERF_EVENTS | 71 | select PERF_EVENTS |
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile index 5ef205c5f37b..dcd90df10ab4 100644 --- a/arch/x86/boot/compressed/Makefile +++ b/arch/x86/boot/compressed/Makefile | |||
@@ -4,7 +4,8 @@ | |||
4 | # create a compressed vmlinux image from the original vmlinux | 4 | # create a compressed vmlinux image from the original vmlinux |
5 | # | 5 | # |
6 | 6 | ||
7 | targets := vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma vmlinux.bin.xz vmlinux.bin.lzo | 7 | targets := vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma \ |
8 | vmlinux.bin.xz vmlinux.bin.lzo vmlinux.bin.lz4 | ||
8 | 9 | ||
9 | KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2 | 10 | KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2 |
10 | KBUILD_CFLAGS += -fno-strict-aliasing -fPIC | 11 | KBUILD_CFLAGS += -fno-strict-aliasing -fPIC |
@@ -63,12 +64,15 @@ $(obj)/vmlinux.bin.xz: $(vmlinux.bin.all-y) FORCE | |||
63 | $(call if_changed,xzkern) | 64 | $(call if_changed,xzkern) |
64 | $(obj)/vmlinux.bin.lzo: $(vmlinux.bin.all-y) FORCE | 65 | $(obj)/vmlinux.bin.lzo: $(vmlinux.bin.all-y) FORCE |
65 | $(call if_changed,lzo) | 66 | $(call if_changed,lzo) |
67 | $(obj)/vmlinux.bin.lz4: $(vmlinux.bin.all-y) FORCE | ||
68 | $(call if_changed,lz4) | ||
66 | 69 | ||
67 | suffix-$(CONFIG_KERNEL_GZIP) := gz | 70 | suffix-$(CONFIG_KERNEL_GZIP) := gz |
68 | suffix-$(CONFIG_KERNEL_BZIP2) := bz2 | 71 | suffix-$(CONFIG_KERNEL_BZIP2) := bz2 |
69 | suffix-$(CONFIG_KERNEL_LZMA) := lzma | 72 | suffix-$(CONFIG_KERNEL_LZMA) := lzma |
70 | suffix-$(CONFIG_KERNEL_XZ) := xz | 73 | suffix-$(CONFIG_KERNEL_XZ) := xz |
71 | suffix-$(CONFIG_KERNEL_LZO) := lzo | 74 | suffix-$(CONFIG_KERNEL_LZO) := lzo |
75 | suffix-$(CONFIG_KERNEL_LZ4) := lz4 | ||
72 | 76 | ||
73 | quiet_cmd_mkpiggy = MKPIGGY $@ | 77 | quiet_cmd_mkpiggy = MKPIGGY $@ |
74 | cmd_mkpiggy = $(obj)/mkpiggy $< > $@ || ( rm -f $@ ; false ) | 78 | cmd_mkpiggy = $(obj)/mkpiggy $< > $@ || ( rm -f $@ ; false ) |
diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c index 7cb56c6ca351..0319c88290a5 100644 --- a/arch/x86/boot/compressed/misc.c +++ b/arch/x86/boot/compressed/misc.c | |||
@@ -145,6 +145,10 @@ static int lines, cols; | |||
145 | #include "../../../../lib/decompress_unlzo.c" | 145 | #include "../../../../lib/decompress_unlzo.c" |
146 | #endif | 146 | #endif |
147 | 147 | ||
148 | #ifdef CONFIG_KERNEL_LZ4 | ||
149 | #include "../../../../lib/decompress_unlz4.c" | ||
150 | #endif | ||
151 | |||
148 | static void scroll(void) | 152 | static void scroll(void) |
149 | { | 153 | { |
150 | int i; | 154 | int i; |
diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h index 75ce3f47d204..77a99ac06d00 100644 --- a/arch/x86/include/asm/emergency-restart.h +++ b/arch/x86/include/asm/emergency-restart.h | |||
@@ -1,18 +1,6 @@ | |||
1 | #ifndef _ASM_X86_EMERGENCY_RESTART_H | 1 | #ifndef _ASM_X86_EMERGENCY_RESTART_H |
2 | #define _ASM_X86_EMERGENCY_RESTART_H | 2 | #define _ASM_X86_EMERGENCY_RESTART_H |
3 | 3 | ||
4 | enum reboot_type { | ||
5 | BOOT_TRIPLE = 't', | ||
6 | BOOT_KBD = 'k', | ||
7 | BOOT_BIOS = 'b', | ||
8 | BOOT_ACPI = 'a', | ||
9 | BOOT_EFI = 'e', | ||
10 | BOOT_CF9 = 'p', | ||
11 | BOOT_CF9_COND = 'q', | ||
12 | }; | ||
13 | |||
14 | extern enum reboot_type reboot_type; | ||
15 | |||
16 | extern void machine_emergency_restart(void); | 4 | extern void machine_emergency_restart(void); |
17 | 5 | ||
18 | #endif /* _ASM_X86_EMERGENCY_RESTART_H */ | 6 | #endif /* _ASM_X86_EMERGENCY_RESTART_H */ |
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c index 39cc7f7acab3..63092afb142e 100644 --- a/arch/x86/kernel/apic/x2apic_uv_x.c +++ b/arch/x86/kernel/apic/x2apic_uv_x.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include <linux/kdebug.h> | 25 | #include <linux/kdebug.h> |
26 | #include <linux/delay.h> | 26 | #include <linux/delay.h> |
27 | #include <linux/crash_dump.h> | 27 | #include <linux/crash_dump.h> |
28 | #include <linux/reboot.h> | ||
28 | 29 | ||
29 | #include <asm/uv/uv_mmrs.h> | 30 | #include <asm/uv/uv_mmrs.h> |
30 | #include <asm/uv/uv_hub.h> | 31 | #include <asm/uv/uv_hub.h> |
@@ -36,7 +37,6 @@ | |||
36 | #include <asm/ipi.h> | 37 | #include <asm/ipi.h> |
37 | #include <asm/smp.h> | 38 | #include <asm/smp.h> |
38 | #include <asm/x86_init.h> | 39 | #include <asm/x86_init.h> |
39 | #include <asm/emergency-restart.h> | ||
40 | #include <asm/nmi.h> | 40 | #include <asm/nmi.h> |
41 | 41 | ||
42 | /* BMC sets a bit this MMR non-zero before sending an NMI */ | 42 | /* BMC sets a bit this MMR non-zero before sending an NMI */ |
diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c index 02f07634d265..f66ff162dce8 100644 --- a/arch/x86/kernel/hw_breakpoint.c +++ b/arch/x86/kernel/hw_breakpoint.c | |||
@@ -393,6 +393,9 @@ void flush_ptrace_hw_breakpoint(struct task_struct *tsk) | |||
393 | unregister_hw_breakpoint(t->ptrace_bps[i]); | 393 | unregister_hw_breakpoint(t->ptrace_bps[i]); |
394 | t->ptrace_bps[i] = NULL; | 394 | t->ptrace_bps[i] = NULL; |
395 | } | 395 | } |
396 | |||
397 | t->debugreg6 = 0; | ||
398 | t->ptrace_dr7 = 0; | ||
396 | } | 399 | } |
397 | 400 | ||
398 | void hw_breakpoint_restore(void) | 401 | void hw_breakpoint_restore(void) |
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index 29a8120e6fe8..7461f50d5bb1 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c | |||
@@ -601,30 +601,48 @@ static unsigned long ptrace_get_dr7(struct perf_event *bp[]) | |||
601 | return dr7; | 601 | return dr7; |
602 | } | 602 | } |
603 | 603 | ||
604 | static int | 604 | static int ptrace_fill_bp_fields(struct perf_event_attr *attr, |
605 | ptrace_modify_breakpoint(struct perf_event *bp, int len, int type, | 605 | int len, int type, bool disabled) |
606 | struct task_struct *tsk, int disabled) | 606 | { |
607 | int err, bp_len, bp_type; | ||
608 | |||
609 | err = arch_bp_generic_fields(len, type, &bp_len, &bp_type); | ||
610 | if (!err) { | ||
611 | attr->bp_len = bp_len; | ||
612 | attr->bp_type = bp_type; | ||
613 | attr->disabled = disabled; | ||
614 | } | ||
615 | |||
616 | return err; | ||
617 | } | ||
618 | |||
619 | static struct perf_event * | ||
620 | ptrace_register_breakpoint(struct task_struct *tsk, int len, int type, | ||
621 | unsigned long addr, bool disabled) | ||
607 | { | 622 | { |
608 | int err; | ||
609 | int gen_len, gen_type; | ||
610 | struct perf_event_attr attr; | 623 | struct perf_event_attr attr; |
624 | int err; | ||
611 | 625 | ||
612 | /* | 626 | ptrace_breakpoint_init(&attr); |
613 | * We should have at least an inactive breakpoint at this | 627 | attr.bp_addr = addr; |
614 | * slot. It means the user is writing dr7 without having | ||
615 | * written the address register first | ||
616 | */ | ||
617 | if (!bp) | ||
618 | return -EINVAL; | ||
619 | 628 | ||
620 | err = arch_bp_generic_fields(len, type, &gen_len, &gen_type); | 629 | err = ptrace_fill_bp_fields(&attr, len, type, disabled); |
621 | if (err) | 630 | if (err) |
622 | return err; | 631 | return ERR_PTR(err); |
632 | |||
633 | return register_user_hw_breakpoint(&attr, ptrace_triggered, | ||
634 | NULL, tsk); | ||
635 | } | ||
623 | 636 | ||
624 | attr = bp->attr; | 637 | static int ptrace_modify_breakpoint(struct perf_event *bp, int len, int type, |
625 | attr.bp_len = gen_len; | 638 | int disabled) |
626 | attr.bp_type = gen_type; | 639 | { |
627 | attr.disabled = disabled; | 640 | struct perf_event_attr attr = bp->attr; |
641 | int err; | ||
642 | |||
643 | err = ptrace_fill_bp_fields(&attr, len, type, disabled); | ||
644 | if (err) | ||
645 | return err; | ||
628 | 646 | ||
629 | return modify_user_hw_breakpoint(bp, &attr); | 647 | return modify_user_hw_breakpoint(bp, &attr); |
630 | } | 648 | } |
@@ -634,67 +652,50 @@ ptrace_modify_breakpoint(struct perf_event *bp, int len, int type, | |||
634 | */ | 652 | */ |
635 | static int ptrace_write_dr7(struct task_struct *tsk, unsigned long data) | 653 | static int ptrace_write_dr7(struct task_struct *tsk, unsigned long data) |
636 | { | 654 | { |
637 | struct thread_struct *thread = &(tsk->thread); | 655 | struct thread_struct *thread = &tsk->thread; |
638 | unsigned long old_dr7; | 656 | unsigned long old_dr7; |
639 | int i, orig_ret = 0, rc = 0; | 657 | bool second_pass = false; |
640 | int enabled, second_pass = 0; | 658 | int i, rc, ret = 0; |
641 | unsigned len, type; | ||
642 | struct perf_event *bp; | ||
643 | |||
644 | if (ptrace_get_breakpoints(tsk) < 0) | ||
645 | return -ESRCH; | ||
646 | 659 | ||
647 | data &= ~DR_CONTROL_RESERVED; | 660 | data &= ~DR_CONTROL_RESERVED; |
648 | old_dr7 = ptrace_get_dr7(thread->ptrace_bps); | 661 | old_dr7 = ptrace_get_dr7(thread->ptrace_bps); |
662 | |||
649 | restore: | 663 | restore: |
650 | /* | 664 | rc = 0; |
651 | * Loop through all the hardware breakpoints, making the | ||
652 | * appropriate changes to each. | ||
653 | */ | ||
654 | for (i = 0; i < HBP_NUM; i++) { | 665 | for (i = 0; i < HBP_NUM; i++) { |
655 | enabled = decode_dr7(data, i, &len, &type); | 666 | unsigned len, type; |
656 | bp = thread->ptrace_bps[i]; | 667 | bool disabled = !decode_dr7(data, i, &len, &type); |
657 | 668 | struct perf_event *bp = thread->ptrace_bps[i]; | |
658 | if (!enabled) { | 669 | |
659 | if (bp) { | 670 | if (!bp) { |
660 | /* | 671 | if (disabled) |
661 | * Don't unregister the breakpoints right-away, | 672 | continue; |
662 | * unless all register_user_hw_breakpoint() | 673 | |
663 | * requests have succeeded. This prevents | 674 | bp = ptrace_register_breakpoint(tsk, |
664 | * any window of opportunity for debug | 675 | len, type, 0, disabled); |
665 | * register grabbing by other users. | 676 | if (IS_ERR(bp)) { |
666 | */ | 677 | rc = PTR_ERR(bp); |
667 | if (!second_pass) | 678 | break; |
668 | continue; | ||
669 | |||
670 | rc = ptrace_modify_breakpoint(bp, len, type, | ||
671 | tsk, 1); | ||
672 | if (rc) | ||
673 | break; | ||
674 | } | 679 | } |
680 | |||
681 | thread->ptrace_bps[i] = bp; | ||
675 | continue; | 682 | continue; |
676 | } | 683 | } |
677 | 684 | ||
678 | rc = ptrace_modify_breakpoint(bp, len, type, tsk, 0); | 685 | rc = ptrace_modify_breakpoint(bp, len, type, disabled); |
679 | if (rc) | 686 | if (rc) |
680 | break; | 687 | break; |
681 | } | 688 | } |
682 | /* | 689 | |
683 | * Make a second pass to free the remaining unused breakpoints | 690 | /* Restore if the first pass failed, second_pass shouldn't fail. */ |
684 | * or to restore the original breakpoints if an error occurred. | 691 | if (rc && !WARN_ON(second_pass)) { |
685 | */ | 692 | ret = rc; |
686 | if (!second_pass) { | 693 | data = old_dr7; |
687 | second_pass = 1; | 694 | second_pass = true; |
688 | if (rc < 0) { | ||
689 | orig_ret = rc; | ||
690 | data = old_dr7; | ||
691 | } | ||
692 | goto restore; | 695 | goto restore; |
693 | } | 696 | } |
694 | 697 | ||
695 | ptrace_put_breakpoints(tsk); | 698 | return ret; |
696 | |||
697 | return ((orig_ret < 0) ? orig_ret : rc); | ||
698 | } | 699 | } |
699 | 700 | ||
700 | /* | 701 | /* |
@@ -702,25 +703,17 @@ restore: | |||
702 | */ | 703 | */ |
703 | static unsigned long ptrace_get_debugreg(struct task_struct *tsk, int n) | 704 | static unsigned long ptrace_get_debugreg(struct task_struct *tsk, int n) |
704 | { | 705 | { |
705 | struct thread_struct *thread = &(tsk->thread); | 706 | struct thread_struct *thread = &tsk->thread; |
706 | unsigned long val = 0; | 707 | unsigned long val = 0; |
707 | 708 | ||
708 | if (n < HBP_NUM) { | 709 | if (n < HBP_NUM) { |
709 | struct perf_event *bp; | 710 | struct perf_event *bp = thread->ptrace_bps[n]; |
710 | 711 | ||
711 | if (ptrace_get_breakpoints(tsk) < 0) | 712 | if (bp) |
712 | return -ESRCH; | ||
713 | |||
714 | bp = thread->ptrace_bps[n]; | ||
715 | if (!bp) | ||
716 | val = 0; | ||
717 | else | ||
718 | val = bp->hw.info.address; | 713 | val = bp->hw.info.address; |
719 | |||
720 | ptrace_put_breakpoints(tsk); | ||
721 | } else if (n == 6) { | 714 | } else if (n == 6) { |
722 | val = thread->debugreg6; | 715 | val = thread->debugreg6; |
723 | } else if (n == 7) { | 716 | } else if (n == 7) { |
724 | val = thread->ptrace_dr7; | 717 | val = thread->ptrace_dr7; |
725 | } | 718 | } |
726 | return val; | 719 | return val; |
@@ -729,29 +722,14 @@ static unsigned long ptrace_get_debugreg(struct task_struct *tsk, int n) | |||
729 | static int ptrace_set_breakpoint_addr(struct task_struct *tsk, int nr, | 722 | static int ptrace_set_breakpoint_addr(struct task_struct *tsk, int nr, |
730 | unsigned long addr) | 723 | unsigned long addr) |
731 | { | 724 | { |
732 | struct perf_event *bp; | ||
733 | struct thread_struct *t = &tsk->thread; | 725 | struct thread_struct *t = &tsk->thread; |
734 | struct perf_event_attr attr; | 726 | struct perf_event *bp = t->ptrace_bps[nr]; |
735 | int err = 0; | 727 | int err = 0; |
736 | 728 | ||
737 | if (ptrace_get_breakpoints(tsk) < 0) | 729 | if (!bp) { |
738 | return -ESRCH; | ||
739 | |||
740 | if (!t->ptrace_bps[nr]) { | ||
741 | ptrace_breakpoint_init(&attr); | ||
742 | /* | ||
743 | * Put stub len and type to register (reserve) an inactive but | ||
744 | * correct bp | ||
745 | */ | ||
746 | attr.bp_addr = addr; | ||
747 | attr.bp_len = HW_BREAKPOINT_LEN_1; | ||
748 | attr.bp_type = HW_BREAKPOINT_W; | ||
749 | attr.disabled = 1; | ||
750 | |||
751 | bp = register_user_hw_breakpoint(&attr, ptrace_triggered, | ||
752 | NULL, tsk); | ||
753 | |||
754 | /* | 730 | /* |
731 | * Put stub len and type to create an inactive but correct bp. | ||
732 | * | ||
755 | * CHECKME: the previous code returned -EIO if the addr wasn't | 733 | * CHECKME: the previous code returned -EIO if the addr wasn't |
756 | * a valid task virtual addr. The new one will return -EINVAL in | 734 | * a valid task virtual addr. The new one will return -EINVAL in |
757 | * this case. | 735 | * this case. |
@@ -760,22 +738,20 @@ static int ptrace_set_breakpoint_addr(struct task_struct *tsk, int nr, | |||
760 | * writing for the user. And anyway this is the previous | 738 | * writing for the user. And anyway this is the previous |
761 | * behaviour. | 739 | * behaviour. |
762 | */ | 740 | */ |
763 | if (IS_ERR(bp)) { | 741 | bp = ptrace_register_breakpoint(tsk, |
742 | X86_BREAKPOINT_LEN_1, X86_BREAKPOINT_WRITE, | ||
743 | addr, true); | ||
744 | if (IS_ERR(bp)) | ||
764 | err = PTR_ERR(bp); | 745 | err = PTR_ERR(bp); |
765 | goto put; | 746 | else |
766 | } | 747 | t->ptrace_bps[nr] = bp; |
767 | |||
768 | t->ptrace_bps[nr] = bp; | ||
769 | } else { | 748 | } else { |
770 | bp = t->ptrace_bps[nr]; | 749 | struct perf_event_attr attr = bp->attr; |
771 | 750 | ||
772 | attr = bp->attr; | ||
773 | attr.bp_addr = addr; | 751 | attr.bp_addr = addr; |
774 | err = modify_user_hw_breakpoint(bp, &attr); | 752 | err = modify_user_hw_breakpoint(bp, &attr); |
775 | } | 753 | } |
776 | 754 | ||
777 | put: | ||
778 | ptrace_put_breakpoints(tsk); | ||
779 | return err; | 755 | return err; |
780 | } | 756 | } |
781 | 757 | ||
@@ -785,30 +761,20 @@ put: | |||
785 | static int ptrace_set_debugreg(struct task_struct *tsk, int n, | 761 | static int ptrace_set_debugreg(struct task_struct *tsk, int n, |
786 | unsigned long val) | 762 | unsigned long val) |
787 | { | 763 | { |
788 | struct thread_struct *thread = &(tsk->thread); | 764 | struct thread_struct *thread = &tsk->thread; |
789 | int rc = 0; | ||
790 | |||
791 | /* There are no DR4 or DR5 registers */ | 765 | /* There are no DR4 or DR5 registers */ |
792 | if (n == 4 || n == 5) | 766 | int rc = -EIO; |
793 | return -EIO; | ||
794 | 767 | ||
795 | if (n == 6) { | ||
796 | thread->debugreg6 = val; | ||
797 | goto ret_path; | ||
798 | } | ||
799 | if (n < HBP_NUM) { | 768 | if (n < HBP_NUM) { |
800 | rc = ptrace_set_breakpoint_addr(tsk, n, val); | 769 | rc = ptrace_set_breakpoint_addr(tsk, n, val); |
801 | if (rc) | 770 | } else if (n == 6) { |
802 | return rc; | 771 | thread->debugreg6 = val; |
803 | } | 772 | rc = 0; |
804 | /* All that's left is DR7 */ | 773 | } else if (n == 7) { |
805 | if (n == 7) { | ||
806 | rc = ptrace_write_dr7(tsk, val); | 774 | rc = ptrace_write_dr7(tsk, val); |
807 | if (!rc) | 775 | if (!rc) |
808 | thread->ptrace_dr7 = val; | 776 | thread->ptrace_dr7 = val; |
809 | } | 777 | } |
810 | |||
811 | ret_path: | ||
812 | return rc; | 778 | return rc; |
813 | } | 779 | } |
814 | 780 | ||
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index 76fa1e9a2b39..563ed91e6faa 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c | |||
@@ -36,22 +36,6 @@ void (*pm_power_off)(void); | |||
36 | EXPORT_SYMBOL(pm_power_off); | 36 | EXPORT_SYMBOL(pm_power_off); |
37 | 37 | ||
38 | static const struct desc_ptr no_idt = {}; | 38 | static const struct desc_ptr no_idt = {}; |
39 | static int reboot_mode; | ||
40 | enum reboot_type reboot_type = BOOT_ACPI; | ||
41 | int reboot_force; | ||
42 | |||
43 | /* | ||
44 | * This variable is used privately to keep track of whether or not | ||
45 | * reboot_type is still set to its default value (i.e., reboot= hasn't | ||
46 | * been set on the command line). This is needed so that we can | ||
47 | * suppress DMI scanning for reboot quirks. Without it, it's | ||
48 | * impossible to override a faulty reboot quirk without recompiling. | ||
49 | */ | ||
50 | static int reboot_default = 1; | ||
51 | |||
52 | #ifdef CONFIG_SMP | ||
53 | static int reboot_cpu = -1; | ||
54 | #endif | ||
55 | 39 | ||
56 | /* | 40 | /* |
57 | * This is set if we need to go through the 'emergency' path. | 41 | * This is set if we need to go through the 'emergency' path. |
@@ -64,79 +48,6 @@ static int reboot_emergency; | |||
64 | bool port_cf9_safe = false; | 48 | bool port_cf9_safe = false; |
65 | 49 | ||
66 | /* | 50 | /* |
67 | * reboot=b[ios] | s[mp] | t[riple] | k[bd] | e[fi] [, [w]arm | [c]old] | p[ci] | ||
68 | * warm Don't set the cold reboot flag | ||
69 | * cold Set the cold reboot flag | ||
70 | * bios Reboot by jumping through the BIOS | ||
71 | * smp Reboot by executing reset on BSP or other CPU | ||
72 | * triple Force a triple fault (init) | ||
73 | * kbd Use the keyboard controller. cold reset (default) | ||
74 | * acpi Use the RESET_REG in the FADT | ||
75 | * efi Use efi reset_system runtime service | ||
76 | * pci Use the so-called "PCI reset register", CF9 | ||
77 | * force Avoid anything that could hang. | ||
78 | */ | ||
79 | static int __init reboot_setup(char *str) | ||
80 | { | ||
81 | for (;;) { | ||
82 | /* | ||
83 | * Having anything passed on the command line via | ||
84 | * reboot= will cause us to disable DMI checking | ||
85 | * below. | ||
86 | */ | ||
87 | reboot_default = 0; | ||
88 | |||
89 | switch (*str) { | ||
90 | case 'w': | ||
91 | reboot_mode = 0x1234; | ||
92 | break; | ||
93 | |||
94 | case 'c': | ||
95 | reboot_mode = 0; | ||
96 | break; | ||
97 | |||
98 | #ifdef CONFIG_SMP | ||
99 | case 's': | ||
100 | if (isdigit(*(str+1))) { | ||
101 | reboot_cpu = (int) (*(str+1) - '0'); | ||
102 | if (isdigit(*(str+2))) | ||
103 | reboot_cpu = reboot_cpu*10 + (int)(*(str+2) - '0'); | ||
104 | } | ||
105 | /* | ||
106 | * We will leave sorting out the final value | ||
107 | * when we are ready to reboot, since we might not | ||
108 | * have detected BSP APIC ID or smp_num_cpu | ||
109 | */ | ||
110 | break; | ||
111 | #endif /* CONFIG_SMP */ | ||
112 | |||
113 | case 'b': | ||
114 | case 'a': | ||
115 | case 'k': | ||
116 | case 't': | ||
117 | case 'e': | ||
118 | case 'p': | ||
119 | reboot_type = *str; | ||
120 | break; | ||
121 | |||
122 | case 'f': | ||
123 | reboot_force = 1; | ||
124 | break; | ||
125 | } | ||
126 | |||
127 | str = strchr(str, ','); | ||
128 | if (str) | ||
129 | str++; | ||
130 | else | ||
131 | break; | ||
132 | } | ||
133 | return 1; | ||
134 | } | ||
135 | |||
136 | __setup("reboot=", reboot_setup); | ||
137 | |||
138 | |||
139 | /* | ||
140 | * Reboot options and system auto-detection code provided by | 51 | * Reboot options and system auto-detection code provided by |
141 | * Dell Inc. so their systems "just work". :-) | 52 | * Dell Inc. so their systems "just work". :-) |
142 | */ | 53 | */ |
@@ -536,6 +447,7 @@ static void native_machine_emergency_restart(void) | |||
536 | int i; | 447 | int i; |
537 | int attempt = 0; | 448 | int attempt = 0; |
538 | int orig_reboot_type = reboot_type; | 449 | int orig_reboot_type = reboot_type; |
450 | unsigned short mode; | ||
539 | 451 | ||
540 | if (reboot_emergency) | 452 | if (reboot_emergency) |
541 | emergency_vmx_disable_all(); | 453 | emergency_vmx_disable_all(); |
@@ -543,7 +455,8 @@ static void native_machine_emergency_restart(void) | |||
543 | tboot_shutdown(TB_SHUTDOWN_REBOOT); | 455 | tboot_shutdown(TB_SHUTDOWN_REBOOT); |
544 | 456 | ||
545 | /* Tell the BIOS if we want cold or warm reboot */ | 457 | /* Tell the BIOS if we want cold or warm reboot */ |
546 | *((unsigned short *)__va(0x472)) = reboot_mode; | 458 | mode = reboot_mode == REBOOT_WARM ? 0x1234 : 0; |
459 | *((unsigned short *)__va(0x472)) = mode; | ||
547 | 460 | ||
548 | for (;;) { | 461 | for (;;) { |
549 | /* Could also try the reset bit in the Hammer NB */ | 462 | /* Could also try the reset bit in the Hammer NB */ |
@@ -585,7 +498,7 @@ static void native_machine_emergency_restart(void) | |||
585 | 498 | ||
586 | case BOOT_EFI: | 499 | case BOOT_EFI: |
587 | if (efi_enabled(EFI_RUNTIME_SERVICES)) | 500 | if (efi_enabled(EFI_RUNTIME_SERVICES)) |
588 | efi.reset_system(reboot_mode ? | 501 | efi.reset_system(reboot_mode == REBOOT_WARM ? |
589 | EFI_RESET_WARM : | 502 | EFI_RESET_WARM : |
590 | EFI_RESET_COLD, | 503 | EFI_RESET_COLD, |
591 | EFI_SUCCESS, 0, NULL); | 504 | EFI_SUCCESS, 0, NULL); |
@@ -614,26 +527,10 @@ void native_machine_shutdown(void) | |||
614 | { | 527 | { |
615 | /* Stop the cpus and apics */ | 528 | /* Stop the cpus and apics */ |
616 | #ifdef CONFIG_SMP | 529 | #ifdef CONFIG_SMP |
617 | |||
618 | /* The boot cpu is always logical cpu 0 */ | ||
619 | int reboot_cpu_id = 0; | ||
620 | |||
621 | /* See if there has been given a command line override */ | ||
622 | if ((reboot_cpu != -1) && (reboot_cpu < nr_cpu_ids) && | ||
623 | cpu_online(reboot_cpu)) | ||
624 | reboot_cpu_id = reboot_cpu; | ||
625 | |||
626 | /* Make certain the cpu I'm about to reboot on is online */ | ||
627 | if (!cpu_online(reboot_cpu_id)) | ||
628 | reboot_cpu_id = smp_processor_id(); | ||
629 | |||
630 | /* Make certain I only run on the appropriate processor */ | ||
631 | set_cpus_allowed_ptr(current, cpumask_of(reboot_cpu_id)); | ||
632 | |||
633 | /* | 530 | /* |
634 | * O.K Now that I'm on the appropriate processor, stop all of the | 531 | * Stop all of the others. Also disable the local irq to |
635 | * others. Also disable the local irq to not receive the per-cpu | 532 | * not receive the per-cpu timer interrupt which may trigger |
636 | * timer interrupt which may trigger scheduler's load balance. | 533 | * scheduler's load balance. |
637 | */ | 534 | */ |
638 | local_irq_disable(); | 535 | local_irq_disable(); |
639 | stop_other_cpus(); | 536 | stop_other_cpus(); |
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c index 17fda6a8b3c2..dfa537a03be1 100644 --- a/arch/x86/mm/pgtable.c +++ b/arch/x86/mm/pgtable.c | |||
@@ -240,7 +240,6 @@ static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp) | |||
240 | static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[]) | 240 | static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[]) |
241 | { | 241 | { |
242 | pud_t *pud; | 242 | pud_t *pud; |
243 | unsigned long addr; | ||
244 | int i; | 243 | int i; |
245 | 244 | ||
246 | if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */ | 245 | if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */ |
@@ -248,8 +247,7 @@ static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[]) | |||
248 | 247 | ||
249 | pud = pud_offset(pgd, 0); | 248 | pud = pud_offset(pgd, 0); |
250 | 249 | ||
251 | for (addr = i = 0; i < PREALLOCATED_PMDS; | 250 | for (i = 0; i < PREALLOCATED_PMDS; i++, pud++) { |
252 | i++, pud++, addr += PUD_SIZE) { | ||
253 | pmd_t *pmd = pmds[i]; | 251 | pmd_t *pmd = pmds[i]; |
254 | 252 | ||
255 | if (i >= KERNEL_PGD_BOUNDARY) | 253 | if (i >= KERNEL_PGD_BOUNDARY) |
diff --git a/block/partitions/Kconfig b/block/partitions/Kconfig index 75a54e1adbb5..4cebb2f0d2f4 100644 --- a/block/partitions/Kconfig +++ b/block/partitions/Kconfig | |||
@@ -68,6 +68,17 @@ config ACORN_PARTITION_RISCIX | |||
68 | of machines called RISCiX. If you say 'Y' here, Linux will be able | 68 | of machines called RISCiX. If you say 'Y' here, Linux will be able |
69 | to read disks partitioned under RISCiX. | 69 | to read disks partitioned under RISCiX. |
70 | 70 | ||
71 | config AIX_PARTITION | ||
72 | bool "AIX basic partition table support" if PARTITION_ADVANCED | ||
73 | help | ||
74 | Say Y here if you would like to be able to read the hard disk | ||
75 | partition table format used by IBM or Motorola PowerPC machines | ||
76 | running AIX. AIX actually uses a Logical Volume Manager, where | ||
77 | "logical volumes" can be spread across one or multiple disks, | ||
78 | but this driver works only for the simple case of partitions which | ||
79 | are contiguous. | ||
80 | Otherwise, say N. | ||
81 | |||
71 | config OSF_PARTITION | 82 | config OSF_PARTITION |
72 | bool "Alpha OSF partition support" if PARTITION_ADVANCED | 83 | bool "Alpha OSF partition support" if PARTITION_ADVANCED |
73 | default y if ALPHA | 84 | default y if ALPHA |
diff --git a/block/partitions/Makefile b/block/partitions/Makefile index 03af8eac51da..2be4d7ba4e3a 100644 --- a/block/partitions/Makefile +++ b/block/partitions/Makefile | |||
@@ -7,6 +7,7 @@ obj-$(CONFIG_BLOCK) := check.o | |||
7 | obj-$(CONFIG_ACORN_PARTITION) += acorn.o | 7 | obj-$(CONFIG_ACORN_PARTITION) += acorn.o |
8 | obj-$(CONFIG_AMIGA_PARTITION) += amiga.o | 8 | obj-$(CONFIG_AMIGA_PARTITION) += amiga.o |
9 | obj-$(CONFIG_ATARI_PARTITION) += atari.o | 9 | obj-$(CONFIG_ATARI_PARTITION) += atari.o |
10 | obj-$(CONFIG_AIX_PARTITION) += aix.o | ||
10 | obj-$(CONFIG_MAC_PARTITION) += mac.o | 11 | obj-$(CONFIG_MAC_PARTITION) += mac.o |
11 | obj-$(CONFIG_LDM_PARTITION) += ldm.o | 12 | obj-$(CONFIG_LDM_PARTITION) += ldm.o |
12 | obj-$(CONFIG_MSDOS_PARTITION) += msdos.o | 13 | obj-$(CONFIG_MSDOS_PARTITION) += msdos.o |
diff --git a/block/partitions/aix.c b/block/partitions/aix.c new file mode 100644 index 000000000000..43be471d9b1d --- /dev/null +++ b/block/partitions/aix.c | |||
@@ -0,0 +1,293 @@ | |||
1 | /* | ||
2 | * fs/partitions/aix.c | ||
3 | * | ||
4 | * Copyright (C) 2012-2013 Philippe De Muyter <phdm@macqel.be> | ||
5 | */ | ||
6 | |||
7 | #include "check.h" | ||
8 | #include "aix.h" | ||
9 | |||
10 | struct lvm_rec { | ||
11 | char lvm_id[4]; /* "_LVM" */ | ||
12 | char reserved4[16]; | ||
13 | __be32 lvmarea_len; | ||
14 | __be32 vgda_len; | ||
15 | __be32 vgda_psn[2]; | ||
16 | char reserved36[10]; | ||
17 | __be16 pp_size; /* log2(pp_size) */ | ||
18 | char reserved46[12]; | ||
19 | __be16 version; | ||
20 | }; | ||
21 | |||
22 | struct vgda { | ||
23 | __be32 secs; | ||
24 | __be32 usec; | ||
25 | char reserved8[16]; | ||
26 | __be16 numlvs; | ||
27 | __be16 maxlvs; | ||
28 | __be16 pp_size; | ||
29 | __be16 numpvs; | ||
30 | __be16 total_vgdas; | ||
31 | __be16 vgda_size; | ||
32 | }; | ||
33 | |||
34 | struct lvd { | ||
35 | __be16 lv_ix; | ||
36 | __be16 res2; | ||
37 | __be16 res4; | ||
38 | __be16 maxsize; | ||
39 | __be16 lv_state; | ||
40 | __be16 mirror; | ||
41 | __be16 mirror_policy; | ||
42 | __be16 num_lps; | ||
43 | __be16 res10[8]; | ||
44 | }; | ||
45 | |||
46 | struct lvname { | ||
47 | char name[64]; | ||
48 | }; | ||
49 | |||
50 | struct ppe { | ||
51 | __be16 lv_ix; | ||
52 | unsigned short res2; | ||
53 | unsigned short res4; | ||
54 | __be16 lp_ix; | ||
55 | unsigned short res8[12]; | ||
56 | }; | ||
57 | |||
58 | struct pvd { | ||
59 | char reserved0[16]; | ||
60 | __be16 pp_count; | ||
61 | char reserved18[2]; | ||
62 | __be32 psn_part1; | ||
63 | char reserved24[8]; | ||
64 | struct ppe ppe[1016]; | ||
65 | }; | ||
66 | |||
67 | #define LVM_MAXLVS 256 | ||
68 | |||
69 | /** | ||
70 | * last_lba(): return number of last logical block of device | ||
71 | * @bdev: block device | ||
72 | * | ||
73 | * Description: Returns last LBA value on success, 0 on error. | ||
74 | * This is stored (by sd and ide-geometry) in | ||
75 | * the part[0] entry for this disk, and is the number of | ||
76 | * physical sectors available on the disk. | ||
77 | */ | ||
78 | static u64 last_lba(struct block_device *bdev) | ||
79 | { | ||
80 | if (!bdev || !bdev->bd_inode) | ||
81 | return 0; | ||
82 | return (bdev->bd_inode->i_size >> 9) - 1ULL; | ||
83 | } | ||
84 | |||
85 | /** | ||
86 | * read_lba(): Read bytes from disk, starting at given LBA | ||
87 | * @state | ||
88 | * @lba | ||
89 | * @buffer | ||
90 | * @count | ||
91 | * | ||
92 | * Description: Reads @count bytes from @state->bdev into @buffer. | ||
93 | * Returns number of bytes read on success, 0 on error. | ||
94 | */ | ||
95 | static size_t read_lba(struct parsed_partitions *state, u64 lba, u8 *buffer, | ||
96 | size_t count) | ||
97 | { | ||
98 | size_t totalreadcount = 0; | ||
99 | |||
100 | if (!buffer || lba + count / 512 > last_lba(state->bdev)) | ||
101 | return 0; | ||
102 | |||
103 | while (count) { | ||
104 | int copied = 512; | ||
105 | Sector sect; | ||
106 | unsigned char *data = read_part_sector(state, lba++, §); | ||
107 | if (!data) | ||
108 | break; | ||
109 | if (copied > count) | ||
110 | copied = count; | ||
111 | memcpy(buffer, data, copied); | ||
112 | put_dev_sector(sect); | ||
113 | buffer += copied; | ||
114 | totalreadcount += copied; | ||
115 | count -= copied; | ||
116 | } | ||
117 | return totalreadcount; | ||
118 | } | ||
119 | |||
120 | /** | ||
121 | * alloc_pvd(): reads physical volume descriptor | ||
122 | * @state | ||
123 | * @lba | ||
124 | * | ||
125 | * Description: Returns pvd on success, NULL on error. | ||
126 | * Allocates space for pvd and fill it with disk blocks at @lba | ||
127 | * Notes: remember to free pvd when you're done! | ||
128 | */ | ||
129 | static struct pvd *alloc_pvd(struct parsed_partitions *state, u32 lba) | ||
130 | { | ||
131 | size_t count = sizeof(struct pvd); | ||
132 | struct pvd *p; | ||
133 | |||
134 | p = kmalloc(count, GFP_KERNEL); | ||
135 | if (!p) | ||
136 | return NULL; | ||
137 | |||
138 | if (read_lba(state, lba, (u8 *) p, count) < count) { | ||
139 | kfree(p); | ||
140 | return NULL; | ||
141 | } | ||
142 | return p; | ||
143 | } | ||
144 | |||
145 | /** | ||
146 | * alloc_lvn(): reads logical volume names | ||
147 | * @state | ||
148 | * @lba | ||
149 | * | ||
150 | * Description: Returns lvn on success, NULL on error. | ||
151 | * Allocates space for lvn and fill it with disk blocks at @lba | ||
152 | * Notes: remember to free lvn when you're done! | ||
153 | */ | ||
154 | static struct lvname *alloc_lvn(struct parsed_partitions *state, u32 lba) | ||
155 | { | ||
156 | size_t count = sizeof(struct lvname) * LVM_MAXLVS; | ||
157 | struct lvname *p; | ||
158 | |||
159 | p = kmalloc(count, GFP_KERNEL); | ||
160 | if (!p) | ||
161 | return NULL; | ||
162 | |||
163 | if (read_lba(state, lba, (u8 *) p, count) < count) { | ||
164 | kfree(p); | ||
165 | return NULL; | ||
166 | } | ||
167 | return p; | ||
168 | } | ||
169 | |||
170 | int aix_partition(struct parsed_partitions *state) | ||
171 | { | ||
172 | int ret = 0; | ||
173 | Sector sect; | ||
174 | unsigned char *d; | ||
175 | u32 pp_bytes_size; | ||
176 | u32 pp_blocks_size = 0; | ||
177 | u32 vgda_sector = 0; | ||
178 | u32 vgda_len = 0; | ||
179 | int numlvs = 0; | ||
180 | struct pvd *pvd; | ||
181 | struct lv_info { | ||
182 | unsigned short pps_per_lv; | ||
183 | unsigned short pps_found; | ||
184 | unsigned char lv_is_contiguous; | ||
185 | } *lvip; | ||
186 | struct lvname *n = NULL; | ||
187 | |||
188 | d = read_part_sector(state, 7, §); | ||
189 | if (d) { | ||
190 | struct lvm_rec *p = (struct lvm_rec *)d; | ||
191 | u16 lvm_version = be16_to_cpu(p->version); | ||
192 | char tmp[64]; | ||
193 | |||
194 | if (lvm_version == 1) { | ||
195 | int pp_size_log2 = be16_to_cpu(p->pp_size); | ||
196 | |||
197 | pp_bytes_size = 1 << pp_size_log2; | ||
198 | pp_blocks_size = pp_bytes_size / 512; | ||
199 | snprintf(tmp, sizeof(tmp), | ||
200 | " AIX LVM header version %u found\n", | ||
201 | lvm_version); | ||
202 | vgda_len = be32_to_cpu(p->vgda_len); | ||
203 | vgda_sector = be32_to_cpu(p->vgda_psn[0]); | ||
204 | } else { | ||
205 | snprintf(tmp, sizeof(tmp), | ||
206 | " unsupported AIX LVM version %d found\n", | ||
207 | lvm_version); | ||
208 | } | ||
209 | strlcat(state->pp_buf, tmp, PAGE_SIZE); | ||
210 | put_dev_sector(sect); | ||
211 | } | ||
212 | if (vgda_sector && (d = read_part_sector(state, vgda_sector, §))) { | ||
213 | struct vgda *p = (struct vgda *)d; | ||
214 | |||
215 | numlvs = be16_to_cpu(p->numlvs); | ||
216 | put_dev_sector(sect); | ||
217 | } | ||
218 | lvip = kzalloc(sizeof(struct lv_info) * state->limit, GFP_KERNEL); | ||
219 | if (!lvip) | ||
220 | return 0; | ||
221 | if (numlvs && (d = read_part_sector(state, vgda_sector + 1, §))) { | ||
222 | struct lvd *p = (struct lvd *)d; | ||
223 | int i; | ||
224 | |||
225 | n = alloc_lvn(state, vgda_sector + vgda_len - 33); | ||
226 | if (n) { | ||
227 | int foundlvs = 0; | ||
228 | |||
229 | for (i = 0; foundlvs < numlvs && i < state->limit; i += 1) { | ||
230 | lvip[i].pps_per_lv = be16_to_cpu(p[i].num_lps); | ||
231 | if (lvip[i].pps_per_lv) | ||
232 | foundlvs += 1; | ||
233 | } | ||
234 | } | ||
235 | put_dev_sector(sect); | ||
236 | } | ||
237 | pvd = alloc_pvd(state, vgda_sector + 17); | ||
238 | if (pvd) { | ||
239 | int numpps = be16_to_cpu(pvd->pp_count); | ||
240 | int psn_part1 = be32_to_cpu(pvd->psn_part1); | ||
241 | int i; | ||
242 | int cur_lv_ix = -1; | ||
243 | int next_lp_ix = 1; | ||
244 | int lp_ix; | ||
245 | |||
246 | for (i = 0; i < numpps; i += 1) { | ||
247 | struct ppe *p = pvd->ppe + i; | ||
248 | unsigned int lv_ix; | ||
249 | |||
250 | lp_ix = be16_to_cpu(p->lp_ix); | ||
251 | if (!lp_ix) { | ||
252 | next_lp_ix = 1; | ||
253 | continue; | ||
254 | } | ||
255 | lv_ix = be16_to_cpu(p->lv_ix) - 1; | ||
256 | if (lv_ix > state->limit) { | ||
257 | cur_lv_ix = -1; | ||
258 | continue; | ||
259 | } | ||
260 | lvip[lv_ix].pps_found += 1; | ||
261 | if (lp_ix == 1) { | ||
262 | cur_lv_ix = lv_ix; | ||
263 | next_lp_ix = 1; | ||
264 | } else if (lv_ix != cur_lv_ix || lp_ix != next_lp_ix) { | ||
265 | next_lp_ix = 1; | ||
266 | continue; | ||
267 | } | ||
268 | if (lp_ix == lvip[lv_ix].pps_per_lv) { | ||
269 | char tmp[70]; | ||
270 | |||
271 | put_partition(state, lv_ix + 1, | ||
272 | (i + 1 - lp_ix) * pp_blocks_size + psn_part1, | ||
273 | lvip[lv_ix].pps_per_lv * pp_blocks_size); | ||
274 | snprintf(tmp, sizeof(tmp), " <%s>\n", | ||
275 | n[lv_ix].name); | ||
276 | strlcat(state->pp_buf, tmp, PAGE_SIZE); | ||
277 | lvip[lv_ix].lv_is_contiguous = 1; | ||
278 | ret = 1; | ||
279 | next_lp_ix = 1; | ||
280 | } else | ||
281 | next_lp_ix += 1; | ||
282 | } | ||
283 | for (i = 0; i < state->limit; i += 1) | ||
284 | if (lvip[i].pps_found && !lvip[i].lv_is_contiguous) | ||
285 | pr_warn("partition %s (%u pp's found) is " | ||
286 | "not contiguous\n", | ||
287 | n[i].name, lvip[i].pps_found); | ||
288 | kfree(pvd); | ||
289 | } | ||
290 | kfree(n); | ||
291 | kfree(lvip); | ||
292 | return ret; | ||
293 | } | ||
diff --git a/block/partitions/aix.h b/block/partitions/aix.h new file mode 100644 index 000000000000..e0c66a987523 --- /dev/null +++ b/block/partitions/aix.h | |||
@@ -0,0 +1 @@ | |||
extern int aix_partition(struct parsed_partitions *state); | |||
diff --git a/block/partitions/msdos.c b/block/partitions/msdos.c index 7681cd295ab8..9123f250b425 100644 --- a/block/partitions/msdos.c +++ b/block/partitions/msdos.c | |||
@@ -23,6 +23,7 @@ | |||
23 | #include "check.h" | 23 | #include "check.h" |
24 | #include "msdos.h" | 24 | #include "msdos.h" |
25 | #include "efi.h" | 25 | #include "efi.h" |
26 | #include "aix.h" | ||
26 | 27 | ||
27 | /* | 28 | /* |
28 | * Many architectures don't like unaligned accesses, while | 29 | * Many architectures don't like unaligned accesses, while |
@@ -90,7 +91,7 @@ static int aix_magic_present(struct parsed_partitions *state, unsigned char *p) | |||
90 | if (d[0] == '_' && d[1] == 'L' && d[2] == 'V' && d[3] == 'M') | 91 | if (d[0] == '_' && d[1] == 'L' && d[2] == 'V' && d[3] == 'M') |
91 | ret = 1; | 92 | ret = 1; |
92 | put_dev_sector(sect); | 93 | put_dev_sector(sect); |
93 | }; | 94 | } |
94 | return ret; | 95 | return ret; |
95 | } | 96 | } |
96 | 97 | ||
@@ -142,7 +143,7 @@ static void parse_extended(struct parsed_partitions *state, | |||
142 | return; | 143 | return; |
143 | 144 | ||
144 | if (!msdos_magic_present(data + 510)) | 145 | if (!msdos_magic_present(data + 510)) |
145 | goto done; | 146 | goto done; |
146 | 147 | ||
147 | p = (struct partition *) (data + 0x1be); | 148 | p = (struct partition *) (data + 0x1be); |
148 | 149 | ||
@@ -155,7 +156,7 @@ static void parse_extended(struct parsed_partitions *state, | |||
155 | * and OS/2 seems to use all four entries. | 156 | * and OS/2 seems to use all four entries. |
156 | */ | 157 | */ |
157 | 158 | ||
158 | /* | 159 | /* |
159 | * First process the data partition(s) | 160 | * First process the data partition(s) |
160 | */ | 161 | */ |
161 | for (i=0; i<4; i++, p++) { | 162 | for (i=0; i<4; i++, p++) { |
@@ -263,7 +264,7 @@ static void parse_solaris_x86(struct parsed_partitions *state, | |||
263 | } | 264 | } |
264 | 265 | ||
265 | #if defined(CONFIG_BSD_DISKLABEL) | 266 | #if defined(CONFIG_BSD_DISKLABEL) |
266 | /* | 267 | /* |
267 | * Create devices for BSD partitions listed in a disklabel, under a | 268 | * Create devices for BSD partitions listed in a disklabel, under a |
268 | * dos-like partition. See parse_extended() for more information. | 269 | * dos-like partition. See parse_extended() for more information. |
269 | */ | 270 | */ |
@@ -294,7 +295,7 @@ static void parse_bsd(struct parsed_partitions *state, | |||
294 | 295 | ||
295 | if (state->next == state->limit) | 296 | if (state->next == state->limit) |
296 | break; | 297 | break; |
297 | if (p->p_fstype == BSD_FS_UNUSED) | 298 | if (p->p_fstype == BSD_FS_UNUSED) |
298 | continue; | 299 | continue; |
299 | bsd_start = le32_to_cpu(p->p_offset); | 300 | bsd_start = le32_to_cpu(p->p_offset); |
300 | bsd_size = le32_to_cpu(p->p_size); | 301 | bsd_size = le32_to_cpu(p->p_size); |
@@ -441,7 +442,7 @@ static struct { | |||
441 | {NEW_SOLARIS_X86_PARTITION, parse_solaris_x86}, | 442 | {NEW_SOLARIS_X86_PARTITION, parse_solaris_x86}, |
442 | {0, NULL}, | 443 | {0, NULL}, |
443 | }; | 444 | }; |
444 | 445 | ||
445 | int msdos_partition(struct parsed_partitions *state) | 446 | int msdos_partition(struct parsed_partitions *state) |
446 | { | 447 | { |
447 | sector_t sector_size = bdev_logical_block_size(state->bdev) / 512; | 448 | sector_t sector_size = bdev_logical_block_size(state->bdev) / 512; |
@@ -462,8 +463,12 @@ int msdos_partition(struct parsed_partitions *state) | |||
462 | */ | 463 | */ |
463 | if (aix_magic_present(state, data)) { | 464 | if (aix_magic_present(state, data)) { |
464 | put_dev_sector(sect); | 465 | put_dev_sector(sect); |
466 | #ifdef CONFIG_AIX_PARTITION | ||
467 | return aix_partition(state); | ||
468 | #else | ||
465 | strlcat(state->pp_buf, " [AIX]", PAGE_SIZE); | 469 | strlcat(state->pp_buf, " [AIX]", PAGE_SIZE); |
466 | return 0; | 470 | return 0; |
471 | #endif | ||
467 | } | 472 | } |
468 | 473 | ||
469 | if (!msdos_magic_present(data + 510)) { | 474 | if (!msdos_magic_present(data + 510)) { |
diff --git a/crypto/Kconfig b/crypto/Kconfig index 904ffe838567..69ce573f1224 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig | |||
@@ -1336,6 +1336,22 @@ config CRYPTO_842 | |||
1336 | help | 1336 | help |
1337 | This is the 842 algorithm. | 1337 | This is the 842 algorithm. |
1338 | 1338 | ||
1339 | config CRYPTO_LZ4 | ||
1340 | tristate "LZ4 compression algorithm" | ||
1341 | select CRYPTO_ALGAPI | ||
1342 | select LZ4_COMPRESS | ||
1343 | select LZ4_DECOMPRESS | ||
1344 | help | ||
1345 | This is the LZ4 algorithm. | ||
1346 | |||
1347 | config CRYPTO_LZ4HC | ||
1348 | tristate "LZ4HC compression algorithm" | ||
1349 | select CRYPTO_ALGAPI | ||
1350 | select LZ4HC_COMPRESS | ||
1351 | select LZ4_DECOMPRESS | ||
1352 | help | ||
1353 | This is the LZ4 high compression mode algorithm. | ||
1354 | |||
1339 | comment "Random Number Generation" | 1355 | comment "Random Number Generation" |
1340 | 1356 | ||
1341 | config CRYPTO_ANSI_CPRNG | 1357 | config CRYPTO_ANSI_CPRNG |
diff --git a/crypto/Makefile b/crypto/Makefile index 62af87df8729..2d5ed08a239f 100644 --- a/crypto/Makefile +++ b/crypto/Makefile | |||
@@ -86,6 +86,8 @@ obj-$(CONFIG_CRYPTO_CRC32) += crc32.o | |||
86 | obj-$(CONFIG_CRYPTO_CRCT10DIF) += crct10dif.o | 86 | obj-$(CONFIG_CRYPTO_CRCT10DIF) += crct10dif.o |
87 | obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o authencesn.o | 87 | obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o authencesn.o |
88 | obj-$(CONFIG_CRYPTO_LZO) += lzo.o | 88 | obj-$(CONFIG_CRYPTO_LZO) += lzo.o |
89 | obj-$(CONFIG_CRYPTO_LZ4) += lz4.o | ||
90 | obj-$(CONFIG_CRYPTO_LZ4HC) += lz4hc.o | ||
89 | obj-$(CONFIG_CRYPTO_842) += 842.o | 91 | obj-$(CONFIG_CRYPTO_842) += 842.o |
90 | obj-$(CONFIG_CRYPTO_RNG2) += rng.o | 92 | obj-$(CONFIG_CRYPTO_RNG2) += rng.o |
91 | obj-$(CONFIG_CRYPTO_RNG2) += krng.o | 93 | obj-$(CONFIG_CRYPTO_RNG2) += krng.o |
diff --git a/crypto/lz4.c b/crypto/lz4.c new file mode 100644 index 000000000000..4586dd15b0d8 --- /dev/null +++ b/crypto/lz4.c | |||
@@ -0,0 +1,106 @@ | |||
1 | /* | ||
2 | * Cryptographic API. | ||
3 | * | ||
4 | * Copyright (c) 2013 Chanho Min <chanho.min@lge.com> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms of the GNU General Public License version 2 as published by | ||
8 | * the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
11 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
12 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
13 | * more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License along with | ||
16 | * this program; if not, write to the Free Software Foundation, Inc., 51 | ||
17 | * Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | ||
18 | * | ||
19 | */ | ||
20 | |||
21 | #include <linux/init.h> | ||
22 | #include <linux/module.h> | ||
23 | #include <linux/crypto.h> | ||
24 | #include <linux/vmalloc.h> | ||
25 | #include <linux/lz4.h> | ||
26 | |||
27 | struct lz4_ctx { | ||
28 | void *lz4_comp_mem; | ||
29 | }; | ||
30 | |||
31 | static int lz4_init(struct crypto_tfm *tfm) | ||
32 | { | ||
33 | struct lz4_ctx *ctx = crypto_tfm_ctx(tfm); | ||
34 | |||
35 | ctx->lz4_comp_mem = vmalloc(LZ4_MEM_COMPRESS); | ||
36 | if (!ctx->lz4_comp_mem) | ||
37 | return -ENOMEM; | ||
38 | |||
39 | return 0; | ||
40 | } | ||
41 | |||
42 | static void lz4_exit(struct crypto_tfm *tfm) | ||
43 | { | ||
44 | struct lz4_ctx *ctx = crypto_tfm_ctx(tfm); | ||
45 | vfree(ctx->lz4_comp_mem); | ||
46 | } | ||
47 | |||
48 | static int lz4_compress_crypto(struct crypto_tfm *tfm, const u8 *src, | ||
49 | unsigned int slen, u8 *dst, unsigned int *dlen) | ||
50 | { | ||
51 | struct lz4_ctx *ctx = crypto_tfm_ctx(tfm); | ||
52 | size_t tmp_len = *dlen; | ||
53 | int err; | ||
54 | |||
55 | err = lz4_compress(src, slen, dst, &tmp_len, ctx->lz4_comp_mem); | ||
56 | |||
57 | if (err < 0) | ||
58 | return -EINVAL; | ||
59 | |||
60 | *dlen = tmp_len; | ||
61 | return 0; | ||
62 | } | ||
63 | |||
64 | static int lz4_decompress_crypto(struct crypto_tfm *tfm, const u8 *src, | ||
65 | unsigned int slen, u8 *dst, unsigned int *dlen) | ||
66 | { | ||
67 | int err; | ||
68 | size_t tmp_len = *dlen; | ||
69 | size_t __slen = slen; | ||
70 | |||
71 | err = lz4_decompress(src, &__slen, dst, tmp_len); | ||
72 | if (err < 0) | ||
73 | return -EINVAL; | ||
74 | |||
75 | *dlen = tmp_len; | ||
76 | return err; | ||
77 | } | ||
78 | |||
79 | static struct crypto_alg alg_lz4 = { | ||
80 | .cra_name = "lz4", | ||
81 | .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, | ||
82 | .cra_ctxsize = sizeof(struct lz4_ctx), | ||
83 | .cra_module = THIS_MODULE, | ||
84 | .cra_list = LIST_HEAD_INIT(alg_lz4.cra_list), | ||
85 | .cra_init = lz4_init, | ||
86 | .cra_exit = lz4_exit, | ||
87 | .cra_u = { .compress = { | ||
88 | .coa_compress = lz4_compress_crypto, | ||
89 | .coa_decompress = lz4_decompress_crypto } } | ||
90 | }; | ||
91 | |||
92 | static int __init lz4_mod_init(void) | ||
93 | { | ||
94 | return crypto_register_alg(&alg_lz4); | ||
95 | } | ||
96 | |||
97 | static void __exit lz4_mod_fini(void) | ||
98 | { | ||
99 | crypto_unregister_alg(&alg_lz4); | ||
100 | } | ||
101 | |||
102 | module_init(lz4_mod_init); | ||
103 | module_exit(lz4_mod_fini); | ||
104 | |||
105 | MODULE_LICENSE("GPL"); | ||
106 | MODULE_DESCRIPTION("LZ4 Compression Algorithm"); | ||
diff --git a/crypto/lz4hc.c b/crypto/lz4hc.c new file mode 100644 index 000000000000..151ba31d34e3 --- /dev/null +++ b/crypto/lz4hc.c | |||
@@ -0,0 +1,106 @@ | |||
1 | /* | ||
2 | * Cryptographic API. | ||
3 | * | ||
4 | * Copyright (c) 2013 Chanho Min <chanho.min@lge.com> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms of the GNU General Public License version 2 as published by | ||
8 | * the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
11 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
12 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
13 | * more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License along with | ||
16 | * this program; if not, write to the Free Software Foundation, Inc., 51 | ||
17 | * Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | ||
18 | * | ||
19 | */ | ||
20 | #include <linux/init.h> | ||
21 | #include <linux/module.h> | ||
22 | #include <linux/crypto.h> | ||
23 | #include <linux/vmalloc.h> | ||
24 | #include <linux/lz4.h> | ||
25 | |||
26 | struct lz4hc_ctx { | ||
27 | void *lz4hc_comp_mem; | ||
28 | }; | ||
29 | |||
30 | static int lz4hc_init(struct crypto_tfm *tfm) | ||
31 | { | ||
32 | struct lz4hc_ctx *ctx = crypto_tfm_ctx(tfm); | ||
33 | |||
34 | ctx->lz4hc_comp_mem = vmalloc(LZ4HC_MEM_COMPRESS); | ||
35 | if (!ctx->lz4hc_comp_mem) | ||
36 | return -ENOMEM; | ||
37 | |||
38 | return 0; | ||
39 | } | ||
40 | |||
41 | static void lz4hc_exit(struct crypto_tfm *tfm) | ||
42 | { | ||
43 | struct lz4hc_ctx *ctx = crypto_tfm_ctx(tfm); | ||
44 | |||
45 | vfree(ctx->lz4hc_comp_mem); | ||
46 | } | ||
47 | |||
48 | static int lz4hc_compress_crypto(struct crypto_tfm *tfm, const u8 *src, | ||
49 | unsigned int slen, u8 *dst, unsigned int *dlen) | ||
50 | { | ||
51 | struct lz4hc_ctx *ctx = crypto_tfm_ctx(tfm); | ||
52 | size_t tmp_len = *dlen; | ||
53 | int err; | ||
54 | |||
55 | err = lz4hc_compress(src, slen, dst, &tmp_len, ctx->lz4hc_comp_mem); | ||
56 | |||
57 | if (err < 0) | ||
58 | return -EINVAL; | ||
59 | |||
60 | *dlen = tmp_len; | ||
61 | return 0; | ||
62 | } | ||
63 | |||
64 | static int lz4hc_decompress_crypto(struct crypto_tfm *tfm, const u8 *src, | ||
65 | unsigned int slen, u8 *dst, unsigned int *dlen) | ||
66 | { | ||
67 | int err; | ||
68 | size_t tmp_len = *dlen; | ||
69 | size_t __slen = slen; | ||
70 | |||
71 | err = lz4_decompress(src, &__slen, dst, tmp_len); | ||
72 | if (err < 0) | ||
73 | return -EINVAL; | ||
74 | |||
75 | *dlen = tmp_len; | ||
76 | return err; | ||
77 | } | ||
78 | |||
79 | static struct crypto_alg alg_lz4hc = { | ||
80 | .cra_name = "lz4hc", | ||
81 | .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, | ||
82 | .cra_ctxsize = sizeof(struct lz4hc_ctx), | ||
83 | .cra_module = THIS_MODULE, | ||
84 | .cra_list = LIST_HEAD_INIT(alg_lz4hc.cra_list), | ||
85 | .cra_init = lz4hc_init, | ||
86 | .cra_exit = lz4hc_exit, | ||
87 | .cra_u = { .compress = { | ||
88 | .coa_compress = lz4hc_compress_crypto, | ||
89 | .coa_decompress = lz4hc_decompress_crypto } } | ||
90 | }; | ||
91 | |||
92 | static int __init lz4hc_mod_init(void) | ||
93 | { | ||
94 | return crypto_register_alg(&alg_lz4hc); | ||
95 | } | ||
96 | |||
97 | static void __exit lz4hc_mod_fini(void) | ||
98 | { | ||
99 | crypto_unregister_alg(&alg_lz4hc); | ||
100 | } | ||
101 | |||
102 | module_init(lz4hc_mod_init); | ||
103 | module_exit(lz4hc_mod_fini); | ||
104 | |||
105 | MODULE_LICENSE("GPL"); | ||
106 | MODULE_DESCRIPTION("LZ4HC Compression Algorithm"); | ||
diff --git a/drivers/char/mwave/tp3780i.c b/drivers/char/mwave/tp3780i.c index c68969708068..04e6d6a27994 100644 --- a/drivers/char/mwave/tp3780i.c +++ b/drivers/char/mwave/tp3780i.c | |||
@@ -479,6 +479,7 @@ int tp3780I_QueryAbilities(THINKPAD_BD_DATA * pBDData, MW_ABILITIES * pAbilities | |||
479 | PRINTK_2(TRACE_TP3780I, | 479 | PRINTK_2(TRACE_TP3780I, |
480 | "tp3780i::tp3780I_QueryAbilities entry pBDData %p\n", pBDData); | 480 | "tp3780i::tp3780I_QueryAbilities entry pBDData %p\n", pBDData); |
481 | 481 | ||
482 | memset(pAbilities, 0, sizeof(*pAbilities)); | ||
482 | /* fill out standard constant fields */ | 483 | /* fill out standard constant fields */ |
483 | pAbilities->instr_per_sec = pBDData->rDspSettings.uIps; | 484 | pAbilities->instr_per_sec = pBDData->rDspSettings.uIps; |
484 | pAbilities->data_size = pBDData->rDspSettings.uDStoreSize; | 485 | pAbilities->data_size = pBDData->rDspSettings.uDStoreSize; |
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index 5b2b5e61e4f9..661dc3eb1d66 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c | |||
@@ -1112,64 +1112,6 @@ static int sg_count(struct scatterlist *sg_list, int nbytes, bool *chained) | |||
1112 | return sg_nents; | 1112 | return sg_nents; |
1113 | } | 1113 | } |
1114 | 1114 | ||
1115 | /** | ||
1116 | * sg_copy_end_to_buffer - Copy end data from SG list to a linear buffer | ||
1117 | * @sgl: The SG list | ||
1118 | * @nents: Number of SG entries | ||
1119 | * @buf: Where to copy to | ||
1120 | * @buflen: The number of bytes to copy | ||
1121 | * @skip: The number of bytes to skip before copying. | ||
1122 | * Note: skip + buflen should equal SG total size. | ||
1123 | * | ||
1124 | * Returns the number of copied bytes. | ||
1125 | * | ||
1126 | **/ | ||
1127 | static size_t sg_copy_end_to_buffer(struct scatterlist *sgl, unsigned int nents, | ||
1128 | void *buf, size_t buflen, unsigned int skip) | ||
1129 | { | ||
1130 | unsigned int offset = 0; | ||
1131 | unsigned int boffset = 0; | ||
1132 | struct sg_mapping_iter miter; | ||
1133 | unsigned long flags; | ||
1134 | unsigned int sg_flags = SG_MITER_ATOMIC; | ||
1135 | size_t total_buffer = buflen + skip; | ||
1136 | |||
1137 | sg_flags |= SG_MITER_FROM_SG; | ||
1138 | |||
1139 | sg_miter_start(&miter, sgl, nents, sg_flags); | ||
1140 | |||
1141 | local_irq_save(flags); | ||
1142 | |||
1143 | while (sg_miter_next(&miter) && offset < total_buffer) { | ||
1144 | unsigned int len; | ||
1145 | unsigned int ignore; | ||
1146 | |||
1147 | if ((offset + miter.length) > skip) { | ||
1148 | if (offset < skip) { | ||
1149 | /* Copy part of this segment */ | ||
1150 | ignore = skip - offset; | ||
1151 | len = miter.length - ignore; | ||
1152 | if (boffset + len > buflen) | ||
1153 | len = buflen - boffset; | ||
1154 | memcpy(buf + boffset, miter.addr + ignore, len); | ||
1155 | } else { | ||
1156 | /* Copy all of this segment (up to buflen) */ | ||
1157 | len = miter.length; | ||
1158 | if (boffset + len > buflen) | ||
1159 | len = buflen - boffset; | ||
1160 | memcpy(buf + boffset, miter.addr, len); | ||
1161 | } | ||
1162 | boffset += len; | ||
1163 | } | ||
1164 | offset += miter.length; | ||
1165 | } | ||
1166 | |||
1167 | sg_miter_stop(&miter); | ||
1168 | |||
1169 | local_irq_restore(flags); | ||
1170 | return boffset; | ||
1171 | } | ||
1172 | |||
1173 | /* | 1115 | /* |
1174 | * allocate and map the extended descriptor | 1116 | * allocate and map the extended descriptor |
1175 | */ | 1117 | */ |
@@ -1800,7 +1742,7 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes) | |||
1800 | 1742 | ||
1801 | if (to_hash_later) { | 1743 | if (to_hash_later) { |
1802 | int nents = sg_count(areq->src, nbytes, &chained); | 1744 | int nents = sg_count(areq->src, nbytes, &chained); |
1803 | sg_copy_end_to_buffer(areq->src, nents, | 1745 | sg_pcopy_to_buffer(areq->src, nents, |
1804 | req_ctx->bufnext, | 1746 | req_ctx->bufnext, |
1805 | to_hash_later, | 1747 | to_hash_later, |
1806 | nbytes - to_hash_later); | 1748 | nbytes - to_hash_later); |
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c index c9cc08c2dbba..cc727ec78c4e 100644 --- a/drivers/dma/iop-adma.c +++ b/drivers/dma/iop-adma.c | |||
@@ -1017,7 +1017,7 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device) | |||
1017 | struct page *xor_srcs[IOP_ADMA_NUM_SRC_TEST]; | 1017 | struct page *xor_srcs[IOP_ADMA_NUM_SRC_TEST]; |
1018 | struct page *zero_sum_srcs[IOP_ADMA_NUM_SRC_TEST + 1]; | 1018 | struct page *zero_sum_srcs[IOP_ADMA_NUM_SRC_TEST + 1]; |
1019 | dma_addr_t dma_srcs[IOP_ADMA_NUM_SRC_TEST + 1]; | 1019 | dma_addr_t dma_srcs[IOP_ADMA_NUM_SRC_TEST + 1]; |
1020 | dma_addr_t dma_addr, dest_dma; | 1020 | dma_addr_t dest_dma; |
1021 | struct dma_async_tx_descriptor *tx; | 1021 | struct dma_async_tx_descriptor *tx; |
1022 | struct dma_chan *dma_chan; | 1022 | struct dma_chan *dma_chan; |
1023 | dma_cookie_t cookie; | 1023 | dma_cookie_t cookie; |
@@ -1516,7 +1516,7 @@ static int iop_adma_probe(struct platform_device *pdev) | |||
1516 | goto err_free_iop_chan; | 1516 | goto err_free_iop_chan; |
1517 | } | 1517 | } |
1518 | 1518 | ||
1519 | dev_info(&pdev->dev, "Intel(R) IOP: ( %s%s%s%s%s%s%s)\n", | 1519 | dev_info(&pdev->dev, "Intel(R) IOP: ( %s%s%s%s%s%s)\n", |
1520 | dma_has_cap(DMA_PQ, dma_dev->cap_mask) ? "pq " : "", | 1520 | dma_has_cap(DMA_PQ, dma_dev->cap_mask) ? "pq " : "", |
1521 | dma_has_cap(DMA_PQ_VAL, dma_dev->cap_mask) ? "pq_val " : "", | 1521 | dma_has_cap(DMA_PQ_VAL, dma_dev->cap_mask) ? "pq_val " : "", |
1522 | dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "", | 1522 | dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "", |
diff --git a/drivers/iommu/msm_iommu_dev.c b/drivers/iommu/msm_iommu_dev.c index 9144a6beed92..6ba351477132 100644 --- a/drivers/iommu/msm_iommu_dev.c +++ b/drivers/iommu/msm_iommu_dev.c | |||
@@ -291,25 +291,20 @@ static int msm_iommu_ctx_probe(struct platform_device *pdev) | |||
291 | { | 291 | { |
292 | struct msm_iommu_ctx_dev *c = pdev->dev.platform_data; | 292 | struct msm_iommu_ctx_dev *c = pdev->dev.platform_data; |
293 | struct msm_iommu_drvdata *drvdata; | 293 | struct msm_iommu_drvdata *drvdata; |
294 | struct msm_iommu_ctx_drvdata *ctx_drvdata = NULL; | 294 | struct msm_iommu_ctx_drvdata *ctx_drvdata; |
295 | int i, ret; | 295 | int i, ret; |
296 | if (!c || !pdev->dev.parent) { | ||
297 | ret = -EINVAL; | ||
298 | goto fail; | ||
299 | } | ||
300 | 296 | ||
301 | drvdata = dev_get_drvdata(pdev->dev.parent); | 297 | if (!c || !pdev->dev.parent) |
298 | return -EINVAL; | ||
302 | 299 | ||
303 | if (!drvdata) { | 300 | drvdata = dev_get_drvdata(pdev->dev.parent); |
304 | ret = -ENODEV; | 301 | if (!drvdata) |
305 | goto fail; | 302 | return -ENODEV; |
306 | } | ||
307 | 303 | ||
308 | ctx_drvdata = kzalloc(sizeof(*ctx_drvdata), GFP_KERNEL); | 304 | ctx_drvdata = kzalloc(sizeof(*ctx_drvdata), GFP_KERNEL); |
309 | if (!ctx_drvdata) { | 305 | if (!ctx_drvdata) |
310 | ret = -ENOMEM; | 306 | return -ENOMEM; |
311 | goto fail; | 307 | |
312 | } | ||
313 | ctx_drvdata->num = c->num; | 308 | ctx_drvdata->num = c->num; |
314 | ctx_drvdata->pdev = pdev; | 309 | ctx_drvdata->pdev = pdev; |
315 | 310 | ||
@@ -403,6 +398,7 @@ static int __init msm_iommu_driver_init(void) | |||
403 | 398 | ||
404 | ret = platform_driver_register(&msm_iommu_ctx_driver); | 399 | ret = platform_driver_register(&msm_iommu_ctx_driver); |
405 | if (ret != 0) { | 400 | if (ret != 0) { |
401 | platform_driver_unregister(&msm_iommu_driver); | ||
406 | pr_err("Failed to register IOMMU context driver\n"); | 402 | pr_err("Failed to register IOMMU context driver\n"); |
407 | goto error; | 403 | goto error; |
408 | } | 404 | } |
diff --git a/drivers/power/reset/restart-poweroff.c b/drivers/power/reset/restart-poweroff.c index 059cd1501e2a..5758033e0c16 100644 --- a/drivers/power/reset/restart-poweroff.c +++ b/drivers/power/reset/restart-poweroff.c | |||
@@ -15,11 +15,12 @@ | |||
15 | #include <linux/platform_device.h> | 15 | #include <linux/platform_device.h> |
16 | #include <linux/of_platform.h> | 16 | #include <linux/of_platform.h> |
17 | #include <linux/module.h> | 17 | #include <linux/module.h> |
18 | #include <linux/reboot.h> | ||
18 | #include <asm/system_misc.h> | 19 | #include <asm/system_misc.h> |
19 | 20 | ||
20 | static void restart_poweroff_do_poweroff(void) | 21 | static void restart_poweroff_do_poweroff(void) |
21 | { | 22 | { |
22 | arm_pm_restart('h', NULL); | 23 | arm_pm_restart(REBOOT_HARD, NULL); |
23 | } | 24 | } |
24 | 25 | ||
25 | static int restart_poweroff_probe(struct platform_device *pdev) | 26 | static int restart_poweroff_probe(struct platform_device *pdev) |
diff --git a/drivers/power/reset/vexpress-poweroff.c b/drivers/power/reset/vexpress-poweroff.c index 469e6962b2cf..476aa495c110 100644 --- a/drivers/power/reset/vexpress-poweroff.c +++ b/drivers/power/reset/vexpress-poweroff.c | |||
@@ -48,7 +48,7 @@ static void vexpress_power_off(void) | |||
48 | 48 | ||
49 | static struct device *vexpress_restart_device; | 49 | static struct device *vexpress_restart_device; |
50 | 50 | ||
51 | static void vexpress_restart(char str, const char *cmd) | 51 | static void vexpress_restart(enum reboot_mode reboot_mode, const char *cmd) |
52 | { | 52 | { |
53 | vexpress_reset_do(vexpress_restart_device, "restart"); | 53 | vexpress_reset_do(vexpress_restart_device, "restart"); |
54 | } | 54 | } |
diff --git a/drivers/rtc/rtc-stmp3xxx.c b/drivers/rtc/rtc-stmp3xxx.c index 90a3e864b8fe..767fee2ab340 100644 --- a/drivers/rtc/rtc-stmp3xxx.c +++ b/drivers/rtc/rtc-stmp3xxx.c | |||
@@ -261,7 +261,12 @@ static int stmp3xxx_rtc_probe(struct platform_device *pdev) | |||
261 | 261 | ||
262 | platform_set_drvdata(pdev, rtc_data); | 262 | platform_set_drvdata(pdev, rtc_data); |
263 | 263 | ||
264 | stmp_reset_block(rtc_data->io); | 264 | err = stmp_reset_block(rtc_data->io); |
265 | if (err) { | ||
266 | dev_err(&pdev->dev, "stmp_reset_block failed: %d\n", err); | ||
267 | return err; | ||
268 | } | ||
269 | |||
265 | writel(STMP3XXX_RTC_PERSISTENT0_ALARM_EN | | 270 | writel(STMP3XXX_RTC_PERSISTENT0_ALARM_EN | |
266 | STMP3XXX_RTC_PERSISTENT0_ALARM_WAKE_EN | | 271 | STMP3XXX_RTC_PERSISTENT0_ALARM_WAKE_EN | |
267 | STMP3XXX_RTC_PERSISTENT0_ALARM_WAKE, | 272 | STMP3XXX_RTC_PERSISTENT0_ALARM_WAKE, |
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index 0a537a0515ca..d055450c2a4a 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c | |||
@@ -439,10 +439,7 @@ static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr, | |||
439 | 439 | ||
440 | act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents, | 440 | act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents, |
441 | arr, arr_len); | 441 | arr, arr_len); |
442 | if (sdb->resid) | 442 | sdb->resid = scsi_bufflen(scp) - act_len; |
443 | sdb->resid -= act_len; | ||
444 | else | ||
445 | sdb->resid = scsi_bufflen(scp) - act_len; | ||
446 | 443 | ||
447 | return 0; | 444 | return 0; |
448 | } | 445 | } |
@@ -1693,24 +1690,48 @@ static int check_device_access_params(struct sdebug_dev_info *devi, | |||
1693 | return 0; | 1690 | return 0; |
1694 | } | 1691 | } |
1695 | 1692 | ||
1693 | /* Returns number of bytes copied or -1 if error. */ | ||
1696 | static int do_device_access(struct scsi_cmnd *scmd, | 1694 | static int do_device_access(struct scsi_cmnd *scmd, |
1697 | struct sdebug_dev_info *devi, | 1695 | struct sdebug_dev_info *devi, |
1698 | unsigned long long lba, unsigned int num, int write) | 1696 | unsigned long long lba, unsigned int num, int write) |
1699 | { | 1697 | { |
1700 | int ret; | 1698 | int ret; |
1701 | unsigned long long block, rest = 0; | 1699 | unsigned long long block, rest = 0; |
1702 | int (*func)(struct scsi_cmnd *, unsigned char *, int); | 1700 | struct scsi_data_buffer *sdb; |
1701 | enum dma_data_direction dir; | ||
1702 | size_t (*func)(struct scatterlist *, unsigned int, void *, size_t, | ||
1703 | off_t); | ||
1704 | |||
1705 | if (write) { | ||
1706 | sdb = scsi_out(scmd); | ||
1707 | dir = DMA_TO_DEVICE; | ||
1708 | func = sg_pcopy_to_buffer; | ||
1709 | } else { | ||
1710 | sdb = scsi_in(scmd); | ||
1711 | dir = DMA_FROM_DEVICE; | ||
1712 | func = sg_pcopy_from_buffer; | ||
1713 | } | ||
1703 | 1714 | ||
1704 | func = write ? fetch_to_dev_buffer : fill_from_dev_buffer; | 1715 | if (!sdb->length) |
1716 | return 0; | ||
1717 | if (!(scsi_bidi_cmnd(scmd) || scmd->sc_data_direction == dir)) | ||
1718 | return -1; | ||
1705 | 1719 | ||
1706 | block = do_div(lba, sdebug_store_sectors); | 1720 | block = do_div(lba, sdebug_store_sectors); |
1707 | if (block + num > sdebug_store_sectors) | 1721 | if (block + num > sdebug_store_sectors) |
1708 | rest = block + num - sdebug_store_sectors; | 1722 | rest = block + num - sdebug_store_sectors; |
1709 | 1723 | ||
1710 | ret = func(scmd, fake_storep + (block * scsi_debug_sector_size), | 1724 | ret = func(sdb->table.sgl, sdb->table.nents, |
1711 | (num - rest) * scsi_debug_sector_size); | 1725 | fake_storep + (block * scsi_debug_sector_size), |
1712 | if (!ret && rest) | 1726 | (num - rest) * scsi_debug_sector_size, 0); |
1713 | ret = func(scmd, fake_storep, rest * scsi_debug_sector_size); | 1727 | if (ret != (num - rest) * scsi_debug_sector_size) |
1728 | return ret; | ||
1729 | |||
1730 | if (rest) { | ||
1731 | ret += func(sdb->table.sgl, sdb->table.nents, | ||
1732 | fake_storep, rest * scsi_debug_sector_size, | ||
1733 | (num - rest) * scsi_debug_sector_size); | ||
1734 | } | ||
1714 | 1735 | ||
1715 | return ret; | 1736 | return ret; |
1716 | } | 1737 | } |
@@ -1849,7 +1870,12 @@ static int resp_read(struct scsi_cmnd *SCpnt, unsigned long long lba, | |||
1849 | read_lock_irqsave(&atomic_rw, iflags); | 1870 | read_lock_irqsave(&atomic_rw, iflags); |
1850 | ret = do_device_access(SCpnt, devip, lba, num, 0); | 1871 | ret = do_device_access(SCpnt, devip, lba, num, 0); |
1851 | read_unlock_irqrestore(&atomic_rw, iflags); | 1872 | read_unlock_irqrestore(&atomic_rw, iflags); |
1852 | return ret; | 1873 | if (ret == -1) |
1874 | return DID_ERROR << 16; | ||
1875 | |||
1876 | scsi_in(SCpnt)->resid = scsi_bufflen(SCpnt) - ret; | ||
1877 | |||
1878 | return 0; | ||
1853 | } | 1879 | } |
1854 | 1880 | ||
1855 | void dump_sector(unsigned char *buf, int len) | 1881 | void dump_sector(unsigned char *buf, int len) |
diff --git a/fs/fat/fat.h b/fs/fat/fat.h index 21664fcf3616..4241e6f39e86 100644 --- a/fs/fat/fat.h +++ b/fs/fat/fat.h | |||
@@ -86,6 +86,7 @@ struct msdos_sb_info { | |||
86 | const void *dir_ops; /* Opaque; default directory operations */ | 86 | const void *dir_ops; /* Opaque; default directory operations */ |
87 | int dir_per_block; /* dir entries per block */ | 87 | int dir_per_block; /* dir entries per block */ |
88 | int dir_per_block_bits; /* log2(dir_per_block) */ | 88 | int dir_per_block_bits; /* log2(dir_per_block) */ |
89 | unsigned int vol_id; /*volume ID*/ | ||
89 | 90 | ||
90 | int fatent_shift; | 91 | int fatent_shift; |
91 | struct fatent_operations *fatent_ops; | 92 | struct fatent_operations *fatent_ops; |
diff --git a/fs/fat/file.c b/fs/fat/file.c index b0b632e50ddb..9b104f543056 100644 --- a/fs/fat/file.c +++ b/fs/fat/file.c | |||
@@ -114,6 +114,12 @@ out: | |||
114 | return err; | 114 | return err; |
115 | } | 115 | } |
116 | 116 | ||
117 | static int fat_ioctl_get_volume_id(struct inode *inode, u32 __user *user_attr) | ||
118 | { | ||
119 | struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb); | ||
120 | return put_user(sbi->vol_id, user_attr); | ||
121 | } | ||
122 | |||
117 | long fat_generic_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) | 123 | long fat_generic_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) |
118 | { | 124 | { |
119 | struct inode *inode = file_inode(filp); | 125 | struct inode *inode = file_inode(filp); |
@@ -124,6 +130,8 @@ long fat_generic_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) | |||
124 | return fat_ioctl_get_attributes(inode, user_attr); | 130 | return fat_ioctl_get_attributes(inode, user_attr); |
125 | case FAT_IOCTL_SET_ATTRIBUTES: | 131 | case FAT_IOCTL_SET_ATTRIBUTES: |
126 | return fat_ioctl_set_attributes(filp, user_attr); | 132 | return fat_ioctl_set_attributes(filp, user_attr); |
133 | case FAT_IOCTL_GET_VOLUME_ID: | ||
134 | return fat_ioctl_get_volume_id(inode, user_attr); | ||
127 | default: | 135 | default: |
128 | return -ENOTTY; /* Inappropriate ioctl for device */ | 136 | return -ENOTTY; /* Inappropriate ioctl for device */ |
129 | } | 137 | } |
diff --git a/fs/fat/inode.c b/fs/fat/inode.c index 5d4513cb1b3c..11b51bb55b42 100644 --- a/fs/fat/inode.c +++ b/fs/fat/inode.c | |||
@@ -1415,6 +1415,18 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, int isvfat, | |||
1415 | brelse(fsinfo_bh); | 1415 | brelse(fsinfo_bh); |
1416 | } | 1416 | } |
1417 | 1417 | ||
1418 | /* interpret volume ID as a little endian 32 bit integer */ | ||
1419 | if (sbi->fat_bits == 32) | ||
1420 | sbi->vol_id = (((u32)b->fat32.vol_id[0]) | | ||
1421 | ((u32)b->fat32.vol_id[1] << 8) | | ||
1422 | ((u32)b->fat32.vol_id[2] << 16) | | ||
1423 | ((u32)b->fat32.vol_id[3] << 24)); | ||
1424 | else /* fat 16 or 12 */ | ||
1425 | sbi->vol_id = (((u32)b->fat16.vol_id[0]) | | ||
1426 | ((u32)b->fat16.vol_id[1] << 8) | | ||
1427 | ((u32)b->fat16.vol_id[2] << 16) | | ||
1428 | ((u32)b->fat16.vol_id[3] << 24)); | ||
1429 | |||
1418 | sbi->dir_per_block = sb->s_blocksize / sizeof(struct msdos_dir_entry); | 1430 | sbi->dir_per_block = sb->s_blocksize / sizeof(struct msdos_dir_entry); |
1419 | sbi->dir_per_block_bits = ffs(sbi->dir_per_block) - 1; | 1431 | sbi->dir_per_block_bits = ffs(sbi->dir_per_block) - 1; |
1420 | 1432 | ||
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index a85ac4e33436..68851ff2fd41 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c | |||
@@ -963,7 +963,7 @@ static long wb_check_old_data_flush(struct bdi_writeback *wb) | |||
963 | /* | 963 | /* |
964 | * Retrieve work items and do the writeback they describe | 964 | * Retrieve work items and do the writeback they describe |
965 | */ | 965 | */ |
966 | long wb_do_writeback(struct bdi_writeback *wb, int force_wait) | 966 | static long wb_do_writeback(struct bdi_writeback *wb) |
967 | { | 967 | { |
968 | struct backing_dev_info *bdi = wb->bdi; | 968 | struct backing_dev_info *bdi = wb->bdi; |
969 | struct wb_writeback_work *work; | 969 | struct wb_writeback_work *work; |
@@ -971,12 +971,6 @@ long wb_do_writeback(struct bdi_writeback *wb, int force_wait) | |||
971 | 971 | ||
972 | set_bit(BDI_writeback_running, &wb->bdi->state); | 972 | set_bit(BDI_writeback_running, &wb->bdi->state); |
973 | while ((work = get_next_work_item(bdi)) != NULL) { | 973 | while ((work = get_next_work_item(bdi)) != NULL) { |
974 | /* | ||
975 | * Override sync mode, in case we must wait for completion | ||
976 | * because this thread is exiting now. | ||
977 | */ | ||
978 | if (force_wait) | ||
979 | work->sync_mode = WB_SYNC_ALL; | ||
980 | 974 | ||
981 | trace_writeback_exec(bdi, work); | 975 | trace_writeback_exec(bdi, work); |
982 | 976 | ||
@@ -1025,7 +1019,7 @@ void bdi_writeback_workfn(struct work_struct *work) | |||
1025 | * rescuer as work_list needs to be drained. | 1019 | * rescuer as work_list needs to be drained. |
1026 | */ | 1020 | */ |
1027 | do { | 1021 | do { |
1028 | pages_written = wb_do_writeback(wb, 0); | 1022 | pages_written = wb_do_writeback(wb); |
1029 | trace_writeback_pages_written(pages_written); | 1023 | trace_writeback_pages_written(pages_written); |
1030 | } while (!list_empty(&bdi->work_list)); | 1024 | } while (!list_empty(&bdi->work_list)); |
1031 | } else { | 1025 | } else { |
diff --git a/fs/notify/dnotify/dnotify.c b/fs/notify/dnotify/dnotify.c index 2bfe6dc413a0..1fedd5f7ccc4 100644 --- a/fs/notify/dnotify/dnotify.c +++ b/fs/notify/dnotify/dnotify.c | |||
@@ -31,7 +31,6 @@ int dir_notify_enable __read_mostly = 1; | |||
31 | static struct kmem_cache *dnotify_struct_cache __read_mostly; | 31 | static struct kmem_cache *dnotify_struct_cache __read_mostly; |
32 | static struct kmem_cache *dnotify_mark_cache __read_mostly; | 32 | static struct kmem_cache *dnotify_mark_cache __read_mostly; |
33 | static struct fsnotify_group *dnotify_group __read_mostly; | 33 | static struct fsnotify_group *dnotify_group __read_mostly; |
34 | static DEFINE_MUTEX(dnotify_mark_mutex); | ||
35 | 34 | ||
36 | /* | 35 | /* |
37 | * dnotify will attach one of these to each inode (i_fsnotify_marks) which | 36 | * dnotify will attach one of these to each inode (i_fsnotify_marks) which |
@@ -183,7 +182,7 @@ void dnotify_flush(struct file *filp, fl_owner_t id) | |||
183 | return; | 182 | return; |
184 | dn_mark = container_of(fsn_mark, struct dnotify_mark, fsn_mark); | 183 | dn_mark = container_of(fsn_mark, struct dnotify_mark, fsn_mark); |
185 | 184 | ||
186 | mutex_lock(&dnotify_mark_mutex); | 185 | mutex_lock(&dnotify_group->mark_mutex); |
187 | 186 | ||
188 | spin_lock(&fsn_mark->lock); | 187 | spin_lock(&fsn_mark->lock); |
189 | prev = &dn_mark->dn; | 188 | prev = &dn_mark->dn; |
@@ -199,11 +198,12 @@ void dnotify_flush(struct file *filp, fl_owner_t id) | |||
199 | 198 | ||
200 | spin_unlock(&fsn_mark->lock); | 199 | spin_unlock(&fsn_mark->lock); |
201 | 200 | ||
202 | /* nothing else could have found us thanks to the dnotify_mark_mutex */ | 201 | /* nothing else could have found us thanks to the dnotify_groups |
202 | mark_mutex */ | ||
203 | if (dn_mark->dn == NULL) | 203 | if (dn_mark->dn == NULL) |
204 | fsnotify_destroy_mark(fsn_mark, dnotify_group); | 204 | fsnotify_destroy_mark_locked(fsn_mark, dnotify_group); |
205 | 205 | ||
206 | mutex_unlock(&dnotify_mark_mutex); | 206 | mutex_unlock(&dnotify_group->mark_mutex); |
207 | 207 | ||
208 | fsnotify_put_mark(fsn_mark); | 208 | fsnotify_put_mark(fsn_mark); |
209 | } | 209 | } |
@@ -326,7 +326,7 @@ int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg) | |||
326 | new_dn_mark->dn = NULL; | 326 | new_dn_mark->dn = NULL; |
327 | 327 | ||
328 | /* this is needed to prevent the fcntl/close race described below */ | 328 | /* this is needed to prevent the fcntl/close race described below */ |
329 | mutex_lock(&dnotify_mark_mutex); | 329 | mutex_lock(&dnotify_group->mark_mutex); |
330 | 330 | ||
331 | /* add the new_fsn_mark or find an old one. */ | 331 | /* add the new_fsn_mark or find an old one. */ |
332 | fsn_mark = fsnotify_find_inode_mark(dnotify_group, inode); | 332 | fsn_mark = fsnotify_find_inode_mark(dnotify_group, inode); |
@@ -334,7 +334,8 @@ int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg) | |||
334 | dn_mark = container_of(fsn_mark, struct dnotify_mark, fsn_mark); | 334 | dn_mark = container_of(fsn_mark, struct dnotify_mark, fsn_mark); |
335 | spin_lock(&fsn_mark->lock); | 335 | spin_lock(&fsn_mark->lock); |
336 | } else { | 336 | } else { |
337 | fsnotify_add_mark(new_fsn_mark, dnotify_group, inode, NULL, 0); | 337 | fsnotify_add_mark_locked(new_fsn_mark, dnotify_group, inode, |
338 | NULL, 0); | ||
338 | spin_lock(&new_fsn_mark->lock); | 339 | spin_lock(&new_fsn_mark->lock); |
339 | fsn_mark = new_fsn_mark; | 340 | fsn_mark = new_fsn_mark; |
340 | dn_mark = new_dn_mark; | 341 | dn_mark = new_dn_mark; |
@@ -348,9 +349,9 @@ int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg) | |||
348 | 349 | ||
349 | /* if (f != filp) means that we lost a race and another task/thread | 350 | /* if (f != filp) means that we lost a race and another task/thread |
350 | * actually closed the fd we are still playing with before we grabbed | 351 | * actually closed the fd we are still playing with before we grabbed |
351 | * the dnotify_mark_mutex and fsn_mark->lock. Since closing the fd is the | 352 | * the dnotify_groups mark_mutex and fsn_mark->lock. Since closing the |
352 | * only time we clean up the marks we need to get our mark off | 353 | * fd is the only time we clean up the marks we need to get our mark |
353 | * the list. */ | 354 | * off the list. */ |
354 | if (f != filp) { | 355 | if (f != filp) { |
355 | /* if we added ourselves, shoot ourselves, it's possible that | 356 | /* if we added ourselves, shoot ourselves, it's possible that |
356 | * the flush actually did shoot this fsn_mark. That's fine too | 357 | * the flush actually did shoot this fsn_mark. That's fine too |
@@ -385,9 +386,9 @@ out: | |||
385 | spin_unlock(&fsn_mark->lock); | 386 | spin_unlock(&fsn_mark->lock); |
386 | 387 | ||
387 | if (destroy) | 388 | if (destroy) |
388 | fsnotify_destroy_mark(fsn_mark, dnotify_group); | 389 | fsnotify_destroy_mark_locked(fsn_mark, dnotify_group); |
389 | 390 | ||
390 | mutex_unlock(&dnotify_mark_mutex); | 391 | mutex_unlock(&dnotify_group->mark_mutex); |
391 | fsnotify_put_mark(fsn_mark); | 392 | fsnotify_put_mark(fsn_mark); |
392 | out_err: | 393 | out_err: |
393 | if (new_fsn_mark) | 394 | if (new_fsn_mark) |
diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c index 1ea52f7c031f..e44cb6427df3 100644 --- a/fs/notify/fanotify/fanotify_user.c +++ b/fs/notify/fanotify/fanotify_user.c | |||
@@ -122,6 +122,7 @@ static int fill_event_metadata(struct fsnotify_group *group, | |||
122 | metadata->event_len = FAN_EVENT_METADATA_LEN; | 122 | metadata->event_len = FAN_EVENT_METADATA_LEN; |
123 | metadata->metadata_len = FAN_EVENT_METADATA_LEN; | 123 | metadata->metadata_len = FAN_EVENT_METADATA_LEN; |
124 | metadata->vers = FANOTIFY_METADATA_VERSION; | 124 | metadata->vers = FANOTIFY_METADATA_VERSION; |
125 | metadata->reserved = 0; | ||
125 | metadata->mask = event->mask & FAN_ALL_OUTGOING_EVENTS; | 126 | metadata->mask = event->mask & FAN_ALL_OUTGOING_EVENTS; |
126 | metadata->pid = pid_vnr(event->tgid); | 127 | metadata->pid = pid_vnr(event->tgid); |
127 | if (unlikely(event->mask & FAN_Q_OVERFLOW)) | 128 | if (unlikely(event->mask & FAN_Q_OVERFLOW)) |
@@ -523,14 +524,18 @@ static int fanotify_remove_vfsmount_mark(struct fsnotify_group *group, | |||
523 | __u32 removed; | 524 | __u32 removed; |
524 | int destroy_mark; | 525 | int destroy_mark; |
525 | 526 | ||
527 | mutex_lock(&group->mark_mutex); | ||
526 | fsn_mark = fsnotify_find_vfsmount_mark(group, mnt); | 528 | fsn_mark = fsnotify_find_vfsmount_mark(group, mnt); |
527 | if (!fsn_mark) | 529 | if (!fsn_mark) { |
530 | mutex_unlock(&group->mark_mutex); | ||
528 | return -ENOENT; | 531 | return -ENOENT; |
532 | } | ||
529 | 533 | ||
530 | removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags, | 534 | removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags, |
531 | &destroy_mark); | 535 | &destroy_mark); |
532 | if (destroy_mark) | 536 | if (destroy_mark) |
533 | fsnotify_destroy_mark(fsn_mark, group); | 537 | fsnotify_destroy_mark_locked(fsn_mark, group); |
538 | mutex_unlock(&group->mark_mutex); | ||
534 | 539 | ||
535 | fsnotify_put_mark(fsn_mark); | 540 | fsnotify_put_mark(fsn_mark); |
536 | if (removed & real_mount(mnt)->mnt_fsnotify_mask) | 541 | if (removed & real_mount(mnt)->mnt_fsnotify_mask) |
@@ -547,14 +552,19 @@ static int fanotify_remove_inode_mark(struct fsnotify_group *group, | |||
547 | __u32 removed; | 552 | __u32 removed; |
548 | int destroy_mark; | 553 | int destroy_mark; |
549 | 554 | ||
555 | mutex_lock(&group->mark_mutex); | ||
550 | fsn_mark = fsnotify_find_inode_mark(group, inode); | 556 | fsn_mark = fsnotify_find_inode_mark(group, inode); |
551 | if (!fsn_mark) | 557 | if (!fsn_mark) { |
558 | mutex_unlock(&group->mark_mutex); | ||
552 | return -ENOENT; | 559 | return -ENOENT; |
560 | } | ||
553 | 561 | ||
554 | removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags, | 562 | removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags, |
555 | &destroy_mark); | 563 | &destroy_mark); |
556 | if (destroy_mark) | 564 | if (destroy_mark) |
557 | fsnotify_destroy_mark(fsn_mark, group); | 565 | fsnotify_destroy_mark_locked(fsn_mark, group); |
566 | mutex_unlock(&group->mark_mutex); | ||
567 | |||
558 | /* matches the fsnotify_find_inode_mark() */ | 568 | /* matches the fsnotify_find_inode_mark() */ |
559 | fsnotify_put_mark(fsn_mark); | 569 | fsnotify_put_mark(fsn_mark); |
560 | if (removed & inode->i_fsnotify_mask) | 570 | if (removed & inode->i_fsnotify_mask) |
@@ -590,35 +600,55 @@ static __u32 fanotify_mark_add_to_mask(struct fsnotify_mark *fsn_mark, | |||
590 | return mask & ~oldmask; | 600 | return mask & ~oldmask; |
591 | } | 601 | } |
592 | 602 | ||
603 | static struct fsnotify_mark *fanotify_add_new_mark(struct fsnotify_group *group, | ||
604 | struct inode *inode, | ||
605 | struct vfsmount *mnt) | ||
606 | { | ||
607 | struct fsnotify_mark *mark; | ||
608 | int ret; | ||
609 | |||
610 | if (atomic_read(&group->num_marks) > group->fanotify_data.max_marks) | ||
611 | return ERR_PTR(-ENOSPC); | ||
612 | |||
613 | mark = kmem_cache_alloc(fanotify_mark_cache, GFP_KERNEL); | ||
614 | if (!mark) | ||
615 | return ERR_PTR(-ENOMEM); | ||
616 | |||
617 | fsnotify_init_mark(mark, fanotify_free_mark); | ||
618 | ret = fsnotify_add_mark_locked(mark, group, inode, mnt, 0); | ||
619 | if (ret) { | ||
620 | fsnotify_put_mark(mark); | ||
621 | return ERR_PTR(ret); | ||
622 | } | ||
623 | |||
624 | return mark; | ||
625 | } | ||
626 | |||
627 | |||
593 | static int fanotify_add_vfsmount_mark(struct fsnotify_group *group, | 628 | static int fanotify_add_vfsmount_mark(struct fsnotify_group *group, |
594 | struct vfsmount *mnt, __u32 mask, | 629 | struct vfsmount *mnt, __u32 mask, |
595 | unsigned int flags) | 630 | unsigned int flags) |
596 | { | 631 | { |
597 | struct fsnotify_mark *fsn_mark; | 632 | struct fsnotify_mark *fsn_mark; |
598 | __u32 added; | 633 | __u32 added; |
599 | int ret = 0; | ||
600 | 634 | ||
635 | mutex_lock(&group->mark_mutex); | ||
601 | fsn_mark = fsnotify_find_vfsmount_mark(group, mnt); | 636 | fsn_mark = fsnotify_find_vfsmount_mark(group, mnt); |
602 | if (!fsn_mark) { | 637 | if (!fsn_mark) { |
603 | if (atomic_read(&group->num_marks) > group->fanotify_data.max_marks) | 638 | fsn_mark = fanotify_add_new_mark(group, NULL, mnt); |
604 | return -ENOSPC; | 639 | if (IS_ERR(fsn_mark)) { |
605 | 640 | mutex_unlock(&group->mark_mutex); | |
606 | fsn_mark = kmem_cache_alloc(fanotify_mark_cache, GFP_KERNEL); | 641 | return PTR_ERR(fsn_mark); |
607 | if (!fsn_mark) | 642 | } |
608 | return -ENOMEM; | ||
609 | |||
610 | fsnotify_init_mark(fsn_mark, fanotify_free_mark); | ||
611 | ret = fsnotify_add_mark(fsn_mark, group, NULL, mnt, 0); | ||
612 | if (ret) | ||
613 | goto err; | ||
614 | } | 643 | } |
615 | added = fanotify_mark_add_to_mask(fsn_mark, mask, flags); | 644 | added = fanotify_mark_add_to_mask(fsn_mark, mask, flags); |
645 | mutex_unlock(&group->mark_mutex); | ||
616 | 646 | ||
617 | if (added & ~real_mount(mnt)->mnt_fsnotify_mask) | 647 | if (added & ~real_mount(mnt)->mnt_fsnotify_mask) |
618 | fsnotify_recalc_vfsmount_mask(mnt); | 648 | fsnotify_recalc_vfsmount_mask(mnt); |
619 | err: | 649 | |
620 | fsnotify_put_mark(fsn_mark); | 650 | fsnotify_put_mark(fsn_mark); |
621 | return ret; | 651 | return 0; |
622 | } | 652 | } |
623 | 653 | ||
624 | static int fanotify_add_inode_mark(struct fsnotify_group *group, | 654 | static int fanotify_add_inode_mark(struct fsnotify_group *group, |
@@ -627,7 +657,6 @@ static int fanotify_add_inode_mark(struct fsnotify_group *group, | |||
627 | { | 657 | { |
628 | struct fsnotify_mark *fsn_mark; | 658 | struct fsnotify_mark *fsn_mark; |
629 | __u32 added; | 659 | __u32 added; |
630 | int ret = 0; | ||
631 | 660 | ||
632 | pr_debug("%s: group=%p inode=%p\n", __func__, group, inode); | 661 | pr_debug("%s: group=%p inode=%p\n", __func__, group, inode); |
633 | 662 | ||
@@ -641,27 +670,23 @@ static int fanotify_add_inode_mark(struct fsnotify_group *group, | |||
641 | (atomic_read(&inode->i_writecount) > 0)) | 670 | (atomic_read(&inode->i_writecount) > 0)) |
642 | return 0; | 671 | return 0; |
643 | 672 | ||
673 | mutex_lock(&group->mark_mutex); | ||
644 | fsn_mark = fsnotify_find_inode_mark(group, inode); | 674 | fsn_mark = fsnotify_find_inode_mark(group, inode); |
645 | if (!fsn_mark) { | 675 | if (!fsn_mark) { |
646 | if (atomic_read(&group->num_marks) > group->fanotify_data.max_marks) | 676 | fsn_mark = fanotify_add_new_mark(group, inode, NULL); |
647 | return -ENOSPC; | 677 | if (IS_ERR(fsn_mark)) { |
648 | 678 | mutex_unlock(&group->mark_mutex); | |
649 | fsn_mark = kmem_cache_alloc(fanotify_mark_cache, GFP_KERNEL); | 679 | return PTR_ERR(fsn_mark); |
650 | if (!fsn_mark) | 680 | } |
651 | return -ENOMEM; | ||
652 | |||
653 | fsnotify_init_mark(fsn_mark, fanotify_free_mark); | ||
654 | ret = fsnotify_add_mark(fsn_mark, group, inode, NULL, 0); | ||
655 | if (ret) | ||
656 | goto err; | ||
657 | } | 681 | } |
658 | added = fanotify_mark_add_to_mask(fsn_mark, mask, flags); | 682 | added = fanotify_mark_add_to_mask(fsn_mark, mask, flags); |
683 | mutex_unlock(&group->mark_mutex); | ||
659 | 684 | ||
660 | if (added & ~inode->i_fsnotify_mask) | 685 | if (added & ~inode->i_fsnotify_mask) |
661 | fsnotify_recalc_inode_mask(inode); | 686 | fsnotify_recalc_inode_mask(inode); |
662 | err: | 687 | |
663 | fsnotify_put_mark(fsn_mark); | 688 | fsnotify_put_mark(fsn_mark); |
664 | return ret; | 689 | return 0; |
665 | } | 690 | } |
666 | 691 | ||
667 | /* fanotify syscalls */ | 692 | /* fanotify syscalls */ |
diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c index 959815c1e017..60f954a891ab 100644 --- a/fs/notify/inotify/inotify_user.c +++ b/fs/notify/inotify/inotify_user.c | |||
@@ -636,7 +636,8 @@ static int inotify_new_watch(struct fsnotify_group *group, | |||
636 | goto out_err; | 636 | goto out_err; |
637 | 637 | ||
638 | /* we are on the idr, now get on the inode */ | 638 | /* we are on the idr, now get on the inode */ |
639 | ret = fsnotify_add_mark(&tmp_i_mark->fsn_mark, group, inode, NULL, 0); | 639 | ret = fsnotify_add_mark_locked(&tmp_i_mark->fsn_mark, group, inode, |
640 | NULL, 0); | ||
640 | if (ret) { | 641 | if (ret) { |
641 | /* we failed to get on the inode, get off the idr */ | 642 | /* we failed to get on the inode, get off the idr */ |
642 | inotify_remove_from_idr(group, tmp_i_mark); | 643 | inotify_remove_from_idr(group, tmp_i_mark); |
@@ -660,19 +661,13 @@ static int inotify_update_watch(struct fsnotify_group *group, struct inode *inod | |||
660 | { | 661 | { |
661 | int ret = 0; | 662 | int ret = 0; |
662 | 663 | ||
663 | retry: | 664 | mutex_lock(&group->mark_mutex); |
664 | /* try to update and existing watch with the new arg */ | 665 | /* try to update and existing watch with the new arg */ |
665 | ret = inotify_update_existing_watch(group, inode, arg); | 666 | ret = inotify_update_existing_watch(group, inode, arg); |
666 | /* no mark present, try to add a new one */ | 667 | /* no mark present, try to add a new one */ |
667 | if (ret == -ENOENT) | 668 | if (ret == -ENOENT) |
668 | ret = inotify_new_watch(group, inode, arg); | 669 | ret = inotify_new_watch(group, inode, arg); |
669 | /* | 670 | mutex_unlock(&group->mark_mutex); |
670 | * inotify_new_watch could race with another thread which did an | ||
671 | * inotify_new_watch between the update_existing and the add watch | ||
672 | * here, go back and try to update an existing mark again. | ||
673 | */ | ||
674 | if (ret == -EEXIST) | ||
675 | goto retry; | ||
676 | 671 | ||
677 | return ret; | 672 | return ret; |
678 | } | 673 | } |
diff --git a/fs/notify/mark.c b/fs/notify/mark.c index fc6b49bf7360..923fe4a5f503 100644 --- a/fs/notify/mark.c +++ b/fs/notify/mark.c | |||
@@ -20,28 +20,29 @@ | |||
20 | * fsnotify inode mark locking/lifetime/and refcnting | 20 | * fsnotify inode mark locking/lifetime/and refcnting |
21 | * | 21 | * |
22 | * REFCNT: | 22 | * REFCNT: |
23 | * The mark->refcnt tells how many "things" in the kernel currently are | 23 | * The group->recnt and mark->refcnt tell how many "things" in the kernel |
24 | * referencing this object. The object typically will live inside the kernel | 24 | * currently are referencing the objects. Both kind of objects typically will |
25 | * with a refcnt of 2, one for each list it is on (i_list, g_list). Any task | 25 | * live inside the kernel with a refcnt of 2, one for its creation and one for |
26 | * which can find this object holding the appropriete locks, can take a reference | 26 | * the reference a group and a mark hold to each other. |
27 | * and the object itself is guaranteed to survive until the reference is dropped. | 27 | * If you are holding the appropriate locks, you can take a reference and the |
28 | * object itself is guaranteed to survive until the reference is dropped. | ||
28 | * | 29 | * |
29 | * LOCKING: | 30 | * LOCKING: |
30 | * There are 3 spinlocks involved with fsnotify inode marks and they MUST | 31 | * There are 3 locks involved with fsnotify inode marks and they MUST be taken |
31 | * be taken in order as follows: | 32 | * in order as follows: |
32 | * | 33 | * |
34 | * group->mark_mutex | ||
33 | * mark->lock | 35 | * mark->lock |
34 | * group->mark_lock | ||
35 | * inode->i_lock | 36 | * inode->i_lock |
36 | * | 37 | * |
37 | * mark->lock protects 2 things, mark->group and mark->inode. You must hold | 38 | * group->mark_mutex protects the marks_list anchored inside a given group and |
38 | * that lock to dereference either of these things (they could be NULL even with | 39 | * each mark is hooked via the g_list. It also protects the groups private |
39 | * the lock) | 40 | * data (i.e group limits). |
40 | * | 41 | |
41 | * group->mark_lock protects the marks_list anchored inside a given group | 42 | * mark->lock protects the marks attributes like its masks and flags. |
42 | * and each mark is hooked via the g_list. It also sorta protects the | 43 | * Furthermore it protects the access to a reference of the group that the mark |
43 | * free_g_list, which when used is anchored by a private list on the stack of the | 44 | * is assigned to as well as the access to a reference of the inode/vfsmount |
44 | * task which held the group->mark_lock. | 45 | * that is being watched by the mark. |
45 | * | 46 | * |
46 | * inode->i_lock protects the i_fsnotify_marks list anchored inside a | 47 | * inode->i_lock protects the i_fsnotify_marks list anchored inside a |
47 | * given inode and each mark is hooked via the i_list. (and sorta the | 48 | * given inode and each mark is hooked via the i_list. (and sorta the |
@@ -64,18 +65,11 @@ | |||
64 | * inode. We take i_lock and walk the i_fsnotify_marks safely. For each | 65 | * inode. We take i_lock and walk the i_fsnotify_marks safely. For each |
65 | * mark on the list we take a reference (so the mark can't disappear under us). | 66 | * mark on the list we take a reference (so the mark can't disappear under us). |
66 | * We remove that mark form the inode's list of marks and we add this mark to a | 67 | * We remove that mark form the inode's list of marks and we add this mark to a |
67 | * private list anchored on the stack using i_free_list; At this point we no | 68 | * private list anchored on the stack using i_free_list; we walk i_free_list |
68 | * longer fear anything finding the mark using the inode's list of marks. | 69 | * and before we destroy the mark we make sure that we dont race with a |
69 | * | 70 | * concurrent destroy_group by getting a ref to the marks group and taking the |
70 | * We can safely and locklessly run the private list on the stack of everything | 71 | * groups mutex. |
71 | * we just unattached from the original inode. For each mark on the private list | 72 | |
72 | * we grab the mark-> and can thus dereference mark->group and mark->inode. If | ||
73 | * we see the group and inode are not NULL we take those locks. Now holding all | ||
74 | * 3 locks we can completely remove the mark from other tasks finding it in the | ||
75 | * future. Remember, 10 things might already be referencing this mark, but they | ||
76 | * better be holding a ref. We drop our reference we took before we unhooked it | ||
77 | * from the inode. When the ref hits 0 we can free the mark. | ||
78 | * | ||
79 | * Very similarly for freeing by group, except we use free_g_list. | 73 | * Very similarly for freeing by group, except we use free_g_list. |
80 | * | 74 | * |
81 | * This has the very interesting property of being able to run concurrently with | 75 | * This has the very interesting property of being able to run concurrently with |
diff --git a/include/linux/audit.h b/include/linux/audit.h index b20b03852f21..729a4d165bcc 100644 --- a/include/linux/audit.h +++ b/include/linux/audit.h | |||
@@ -103,8 +103,11 @@ extern void __audit_syscall_exit(int ret_success, long ret_value); | |||
103 | extern struct filename *__audit_reusename(const __user char *uptr); | 103 | extern struct filename *__audit_reusename(const __user char *uptr); |
104 | extern void __audit_getname(struct filename *name); | 104 | extern void __audit_getname(struct filename *name); |
105 | extern void audit_putname(struct filename *name); | 105 | extern void audit_putname(struct filename *name); |
106 | |||
107 | #define AUDIT_INODE_PARENT 1 /* dentry represents the parent */ | ||
108 | #define AUDIT_INODE_HIDDEN 2 /* audit record should be hidden */ | ||
106 | extern void __audit_inode(struct filename *name, const struct dentry *dentry, | 109 | extern void __audit_inode(struct filename *name, const struct dentry *dentry, |
107 | unsigned int parent); | 110 | unsigned int flags); |
108 | extern void __audit_inode_child(const struct inode *parent, | 111 | extern void __audit_inode_child(const struct inode *parent, |
109 | const struct dentry *dentry, | 112 | const struct dentry *dentry, |
110 | const unsigned char type); | 113 | const unsigned char type); |
@@ -148,10 +151,22 @@ static inline void audit_getname(struct filename *name) | |||
148 | if (unlikely(!audit_dummy_context())) | 151 | if (unlikely(!audit_dummy_context())) |
149 | __audit_getname(name); | 152 | __audit_getname(name); |
150 | } | 153 | } |
151 | static inline void audit_inode(struct filename *name, const struct dentry *dentry, | 154 | static inline void audit_inode(struct filename *name, |
155 | const struct dentry *dentry, | ||
152 | unsigned int parent) { | 156 | unsigned int parent) { |
157 | if (unlikely(!audit_dummy_context())) { | ||
158 | unsigned int flags = 0; | ||
159 | if (parent) | ||
160 | flags |= AUDIT_INODE_PARENT; | ||
161 | __audit_inode(name, dentry, flags); | ||
162 | } | ||
163 | } | ||
164 | static inline void audit_inode_parent_hidden(struct filename *name, | ||
165 | const struct dentry *dentry) | ||
166 | { | ||
153 | if (unlikely(!audit_dummy_context())) | 167 | if (unlikely(!audit_dummy_context())) |
154 | __audit_inode(name, dentry, parent); | 168 | __audit_inode(name, dentry, |
169 | AUDIT_INODE_PARENT | AUDIT_INODE_HIDDEN); | ||
155 | } | 170 | } |
156 | static inline void audit_inode_child(const struct inode *parent, | 171 | static inline void audit_inode_child(const struct inode *parent, |
157 | const struct dentry *dentry, | 172 | const struct dentry *dentry, |
@@ -311,7 +326,7 @@ static inline void audit_putname(struct filename *name) | |||
311 | { } | 326 | { } |
312 | static inline void __audit_inode(struct filename *name, | 327 | static inline void __audit_inode(struct filename *name, |
313 | const struct dentry *dentry, | 328 | const struct dentry *dentry, |
314 | unsigned int parent) | 329 | unsigned int flags) |
315 | { } | 330 | { } |
316 | static inline void __audit_inode_child(const struct inode *parent, | 331 | static inline void __audit_inode_child(const struct inode *parent, |
317 | const struct dentry *dentry, | 332 | const struct dentry *dentry, |
@@ -321,6 +336,9 @@ static inline void audit_inode(struct filename *name, | |||
321 | const struct dentry *dentry, | 336 | const struct dentry *dentry, |
322 | unsigned int parent) | 337 | unsigned int parent) |
323 | { } | 338 | { } |
339 | static inline void audit_inode_parent_hidden(struct filename *name, | ||
340 | const struct dentry *dentry) | ||
341 | { } | ||
324 | static inline void audit_inode_child(const struct inode *parent, | 342 | static inline void audit_inode_child(const struct inode *parent, |
325 | const struct dentry *dentry, | 343 | const struct dentry *dentry, |
326 | const unsigned char type) | 344 | const unsigned char type) |
diff --git a/include/linux/decompress/unlz4.h b/include/linux/decompress/unlz4.h new file mode 100644 index 000000000000..d5b68bf3ec92 --- /dev/null +++ b/include/linux/decompress/unlz4.h | |||
@@ -0,0 +1,10 @@ | |||
1 | #ifndef DECOMPRESS_UNLZ4_H | ||
2 | #define DECOMPRESS_UNLZ4_H | ||
3 | |||
4 | int unlz4(unsigned char *inbuf, int len, | ||
5 | int(*fill)(void*, unsigned int), | ||
6 | int(*flush)(void*, unsigned int), | ||
7 | unsigned char *output, | ||
8 | int *pos, | ||
9 | void(*error)(char *x)); | ||
10 | #endif | ||
diff --git a/include/linux/fs.h b/include/linux/fs.h index 834c9e5113d9..a35b10e9a680 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
@@ -372,8 +372,8 @@ struct address_space_operations { | |||
372 | int (*get_xip_mem)(struct address_space *, pgoff_t, int, | 372 | int (*get_xip_mem)(struct address_space *, pgoff_t, int, |
373 | void **, unsigned long *); | 373 | void **, unsigned long *); |
374 | /* | 374 | /* |
375 | * migrate the contents of a page to the specified target. If sync | 375 | * migrate the contents of a page to the specified target. If |
376 | * is false, it must not block. | 376 | * migrate_mode is MIGRATE_ASYNC, it must not block. |
377 | */ | 377 | */ |
378 | int (*migratepage) (struct address_space *, | 378 | int (*migratepage) (struct address_space *, |
379 | struct page *, struct page *, enum migrate_mode); | 379 | struct page *, struct page *, enum migrate_mode); |
diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 0f615eb23d05..9b4dd491f7e8 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h | |||
@@ -209,7 +209,7 @@ static inline int allocflags_to_migratetype(gfp_t gfp_flags) | |||
209 | * 0x9 => DMA or NORMAL (MOVABLE+DMA) | 209 | * 0x9 => DMA or NORMAL (MOVABLE+DMA) |
210 | * 0xa => MOVABLE (Movable is valid only if HIGHMEM is set too) | 210 | * 0xa => MOVABLE (Movable is valid only if HIGHMEM is set too) |
211 | * 0xb => BAD (MOVABLE+HIGHMEM+DMA) | 211 | * 0xb => BAD (MOVABLE+HIGHMEM+DMA) |
212 | * 0xc => DMA32 (MOVABLE+HIGHMEM+DMA32) | 212 | * 0xc => DMA32 (MOVABLE+DMA32) |
213 | * 0xd => BAD (MOVABLE+DMA32+DMA) | 213 | * 0xd => BAD (MOVABLE+DMA32+DMA) |
214 | * 0xe => BAD (MOVABLE+DMA32+HIGHMEM) | 214 | * 0xe => BAD (MOVABLE+DMA32+HIGHMEM) |
215 | * 0xf => BAD (MOVABLE+DMA32+HIGHMEM+DMA) | 215 | * 0xf => BAD (MOVABLE+DMA32+HIGHMEM+DMA) |
diff --git a/include/linux/lz4.h b/include/linux/lz4.h new file mode 100644 index 000000000000..d21c13f10a64 --- /dev/null +++ b/include/linux/lz4.h | |||
@@ -0,0 +1,87 @@ | |||
1 | #ifndef __LZ4_H__ | ||
2 | #define __LZ4_H__ | ||
3 | /* | ||
4 | * LZ4 Kernel Interface | ||
5 | * | ||
6 | * Copyright (C) 2013, LG Electronics, Kyungsik Lee <kyungsik.lee@lge.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | */ | ||
12 | #define LZ4_MEM_COMPRESS (4096 * sizeof(unsigned char *)) | ||
13 | #define LZ4HC_MEM_COMPRESS (65538 * sizeof(unsigned char *)) | ||
14 | |||
15 | /* | ||
16 | * lz4_compressbound() | ||
17 | * Provides the maximum size that LZ4 may output in a "worst case" scenario | ||
18 | * (input data not compressible) | ||
19 | */ | ||
20 | static inline size_t lz4_compressbound(size_t isize) | ||
21 | { | ||
22 | return isize + (isize / 255) + 16; | ||
23 | } | ||
24 | |||
25 | /* | ||
26 | * lz4_compress() | ||
27 | * src : source address of the original data | ||
28 | * src_len : size of the original data | ||
29 | * dst : output buffer address of the compressed data | ||
30 | * This requires 'dst' of size LZ4_COMPRESSBOUND. | ||
31 | * dst_len : is the output size, which is returned after compress done | ||
32 | * workmem : address of the working memory. | ||
33 | * This requires 'workmem' of size LZ4_MEM_COMPRESS. | ||
34 | * return : Success if return 0 | ||
35 | * Error if return (< 0) | ||
36 | * note : Destination buffer and workmem must be already allocated with | ||
37 | * the defined size. | ||
38 | */ | ||
39 | int lz4_compress(const unsigned char *src, size_t src_len, | ||
40 | unsigned char *dst, size_t *dst_len, void *wrkmem); | ||
41 | |||
42 | /* | ||
43 | * lz4hc_compress() | ||
44 | * src : source address of the original data | ||
45 | * src_len : size of the original data | ||
46 | * dst : output buffer address of the compressed data | ||
47 | * This requires 'dst' of size LZ4_COMPRESSBOUND. | ||
48 | * dst_len : is the output size, which is returned after compress done | ||
49 | * workmem : address of the working memory. | ||
50 | * This requires 'workmem' of size LZ4HC_MEM_COMPRESS. | ||
51 | * return : Success if return 0 | ||
52 | * Error if return (< 0) | ||
53 | * note : Destination buffer and workmem must be already allocated with | ||
54 | * the defined size. | ||
55 | */ | ||
56 | int lz4hc_compress(const unsigned char *src, size_t src_len, | ||
57 | unsigned char *dst, size_t *dst_len, void *wrkmem); | ||
58 | |||
59 | /* | ||
60 | * lz4_decompress() | ||
61 | * src : source address of the compressed data | ||
62 | * src_len : is the input size, whcih is returned after decompress done | ||
63 | * dest : output buffer address of the decompressed data | ||
64 | * actual_dest_len: is the size of uncompressed data, supposing it's known | ||
65 | * return : Success if return 0 | ||
66 | * Error if return (< 0) | ||
67 | * note : Destination buffer must be already allocated. | ||
68 | * slightly faster than lz4_decompress_unknownoutputsize() | ||
69 | */ | ||
70 | int lz4_decompress(const char *src, size_t *src_len, char *dest, | ||
71 | size_t actual_dest_len); | ||
72 | |||
73 | /* | ||
74 | * lz4_decompress_unknownoutputsize() | ||
75 | * src : source address of the compressed data | ||
76 | * src_len : is the input size, therefore the compressed size | ||
77 | * dest : output buffer address of the decompressed data | ||
78 | * dest_len: is the max size of the destination buffer, which is | ||
79 | * returned with actual size of decompressed data after | ||
80 | * decompress done | ||
81 | * return : Success if return 0 | ||
82 | * Error if return (< 0) | ||
83 | * note : Destination buffer must be already allocated. | ||
84 | */ | ||
85 | int lz4_decompress_unknownoutputsize(const char *src, size_t src_len, | ||
86 | char *dest, size_t *dest_len); | ||
87 | #endif | ||
diff --git a/include/linux/mm.h b/include/linux/mm.h index b87681adf0ba..f0224608d15e 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -151,12 +151,6 @@ extern unsigned int kobjsize(const void *objp); | |||
151 | #define VM_STACK_FLAGS (VM_GROWSDOWN | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT) | 151 | #define VM_STACK_FLAGS (VM_GROWSDOWN | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT) |
152 | #endif | 152 | #endif |
153 | 153 | ||
154 | #define VM_READHINTMASK (VM_SEQ_READ | VM_RAND_READ) | ||
155 | #define VM_ClearReadHint(v) (v)->vm_flags &= ~VM_READHINTMASK | ||
156 | #define VM_NormalReadHint(v) (!((v)->vm_flags & VM_READHINTMASK)) | ||
157 | #define VM_SequentialReadHint(v) ((v)->vm_flags & VM_SEQ_READ) | ||
158 | #define VM_RandomReadHint(v) ((v)->vm_flags & VM_RAND_READ) | ||
159 | |||
160 | /* | 154 | /* |
161 | * Special vmas that are non-mergable, non-mlock()able. | 155 | * Special vmas that are non-mergable, non-mlock()able. |
162 | * Note: mm/huge_memory.c VM_NO_THP depends on this definition. | 156 | * Note: mm/huge_memory.c VM_NO_THP depends on this definition. |
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index ae19af5ec02c..af4a3b77a8de 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h | |||
@@ -869,11 +869,6 @@ static inline int is_highmem_idx(enum zone_type idx) | |||
869 | #endif | 869 | #endif |
870 | } | 870 | } |
871 | 871 | ||
872 | static inline int is_normal_idx(enum zone_type idx) | ||
873 | { | ||
874 | return (idx == ZONE_NORMAL); | ||
875 | } | ||
876 | |||
877 | /** | 872 | /** |
878 | * is_highmem - helper function to quickly check if a struct zone is a | 873 | * is_highmem - helper function to quickly check if a struct zone is a |
879 | * highmem zone or not. This is an attempt to keep references | 874 | * highmem zone or not. This is an attempt to keep references |
@@ -892,29 +887,6 @@ static inline int is_highmem(struct zone *zone) | |||
892 | #endif | 887 | #endif |
893 | } | 888 | } |
894 | 889 | ||
895 | static inline int is_normal(struct zone *zone) | ||
896 | { | ||
897 | return zone == zone->zone_pgdat->node_zones + ZONE_NORMAL; | ||
898 | } | ||
899 | |||
900 | static inline int is_dma32(struct zone *zone) | ||
901 | { | ||
902 | #ifdef CONFIG_ZONE_DMA32 | ||
903 | return zone == zone->zone_pgdat->node_zones + ZONE_DMA32; | ||
904 | #else | ||
905 | return 0; | ||
906 | #endif | ||
907 | } | ||
908 | |||
909 | static inline int is_dma(struct zone *zone) | ||
910 | { | ||
911 | #ifdef CONFIG_ZONE_DMA | ||
912 | return zone == zone->zone_pgdat->node_zones + ZONE_DMA; | ||
913 | #else | ||
914 | return 0; | ||
915 | #endif | ||
916 | } | ||
917 | |||
918 | /* These two functions are used to setup the per zone pages min values */ | 890 | /* These two functions are used to setup the per zone pages min values */ |
919 | struct ctl_table; | 891 | struct ctl_table; |
920 | int min_free_kbytes_sysctl_handler(struct ctl_table *, int, | 892 | int min_free_kbytes_sysctl_handler(struct ctl_table *, int, |
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h index 89573a33ab3c..07d0df6bf768 100644 --- a/include/linux/ptrace.h +++ b/include/linux/ptrace.h | |||
@@ -142,9 +142,6 @@ static inline void ptrace_init_task(struct task_struct *child, bool ptrace) | |||
142 | { | 142 | { |
143 | INIT_LIST_HEAD(&child->ptrace_entry); | 143 | INIT_LIST_HEAD(&child->ptrace_entry); |
144 | INIT_LIST_HEAD(&child->ptraced); | 144 | INIT_LIST_HEAD(&child->ptraced); |
145 | #ifdef CONFIG_HAVE_HW_BREAKPOINT | ||
146 | atomic_set(&child->ptrace_bp_refcnt, 1); | ||
147 | #endif | ||
148 | child->jobctl = 0; | 145 | child->jobctl = 0; |
149 | child->ptrace = 0; | 146 | child->ptrace = 0; |
150 | child->parent = child->real_parent; | 147 | child->parent = child->real_parent; |
@@ -351,11 +348,4 @@ extern int task_current_syscall(struct task_struct *target, long *callno, | |||
351 | unsigned long args[6], unsigned int maxargs, | 348 | unsigned long args[6], unsigned int maxargs, |
352 | unsigned long *sp, unsigned long *pc); | 349 | unsigned long *sp, unsigned long *pc); |
353 | 350 | ||
354 | #ifdef CONFIG_HAVE_HW_BREAKPOINT | ||
355 | extern int ptrace_get_breakpoints(struct task_struct *tsk); | ||
356 | extern void ptrace_put_breakpoints(struct task_struct *tsk); | ||
357 | #else | ||
358 | static inline void ptrace_put_breakpoints(struct task_struct *tsk) { } | ||
359 | #endif /* CONFIG_HAVE_HW_BREAKPOINT */ | ||
360 | |||
361 | #endif | 351 | #endif |
diff --git a/include/linux/reboot.h b/include/linux/reboot.h index 23b36304cd88..8e00f9f6f963 100644 --- a/include/linux/reboot.h +++ b/include/linux/reboot.h | |||
@@ -10,6 +10,31 @@ | |||
10 | #define SYS_HALT 0x0002 /* Notify of system halt */ | 10 | #define SYS_HALT 0x0002 /* Notify of system halt */ |
11 | #define SYS_POWER_OFF 0x0003 /* Notify of system power off */ | 11 | #define SYS_POWER_OFF 0x0003 /* Notify of system power off */ |
12 | 12 | ||
13 | enum reboot_mode { | ||
14 | REBOOT_COLD = 0, | ||
15 | REBOOT_WARM, | ||
16 | REBOOT_HARD, | ||
17 | REBOOT_SOFT, | ||
18 | REBOOT_GPIO, | ||
19 | }; | ||
20 | extern enum reboot_mode reboot_mode; | ||
21 | |||
22 | enum reboot_type { | ||
23 | BOOT_TRIPLE = 't', | ||
24 | BOOT_KBD = 'k', | ||
25 | BOOT_BIOS = 'b', | ||
26 | BOOT_ACPI = 'a', | ||
27 | BOOT_EFI = 'e', | ||
28 | BOOT_CF9 = 'p', | ||
29 | BOOT_CF9_COND = 'q', | ||
30 | }; | ||
31 | extern enum reboot_type reboot_type; | ||
32 | |||
33 | extern int reboot_default; | ||
34 | extern int reboot_cpu; | ||
35 | extern int reboot_force; | ||
36 | |||
37 | |||
13 | extern int register_reboot_notifier(struct notifier_block *); | 38 | extern int register_reboot_notifier(struct notifier_block *); |
14 | extern int unregister_reboot_notifier(struct notifier_block *); | 39 | extern int unregister_reboot_notifier(struct notifier_block *); |
15 | 40 | ||
@@ -26,7 +51,7 @@ extern void machine_shutdown(void); | |||
26 | struct pt_regs; | 51 | struct pt_regs; |
27 | extern void machine_crash_shutdown(struct pt_regs *); | 52 | extern void machine_crash_shutdown(struct pt_regs *); |
28 | 53 | ||
29 | /* | 54 | /* |
30 | * Architecture independent implemenations of sys_reboot commands. | 55 | * Architecture independent implemenations of sys_reboot commands. |
31 | */ | 56 | */ |
32 | 57 | ||
diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h index 26806775b11b..adae88f5b0ab 100644 --- a/include/linux/scatterlist.h +++ b/include/linux/scatterlist.h | |||
@@ -244,6 +244,11 @@ size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents, | |||
244 | size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents, | 244 | size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents, |
245 | void *buf, size_t buflen); | 245 | void *buf, size_t buflen); |
246 | 246 | ||
247 | size_t sg_pcopy_from_buffer(struct scatterlist *sgl, unsigned int nents, | ||
248 | void *buf, size_t buflen, off_t skip); | ||
249 | size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents, | ||
250 | void *buf, size_t buflen, off_t skip); | ||
251 | |||
247 | /* | 252 | /* |
248 | * Maximum number of entries that will be allocated in one piece, if | 253 | * Maximum number of entries that will be allocated in one piece, if |
249 | * a list larger than this is required then chaining will be utilized. | 254 | * a list larger than this is required then chaining will be utilized. |
diff --git a/include/linux/sched.h b/include/linux/sched.h index cdd5407b37e2..75324d8157e3 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -1401,9 +1401,6 @@ struct task_struct { | |||
1401 | } memcg_batch; | 1401 | } memcg_batch; |
1402 | unsigned int memcg_kmem_skip_account; | 1402 | unsigned int memcg_kmem_skip_account; |
1403 | #endif | 1403 | #endif |
1404 | #ifdef CONFIG_HAVE_HW_BREAKPOINT | ||
1405 | atomic_t ptrace_bp_refcnt; | ||
1406 | #endif | ||
1407 | #ifdef CONFIG_UPROBES | 1404 | #ifdef CONFIG_UPROBES |
1408 | struct uprobe_task *utask; | 1405 | struct uprobe_task *utask; |
1409 | #endif | 1406 | #endif |
diff --git a/include/linux/sem.h b/include/linux/sem.h index 53d42650b193..976ce3a19f1b 100644 --- a/include/linux/sem.h +++ b/include/linux/sem.h | |||
@@ -12,10 +12,12 @@ struct task_struct; | |||
12 | struct sem_array { | 12 | struct sem_array { |
13 | struct kern_ipc_perm ____cacheline_aligned_in_smp | 13 | struct kern_ipc_perm ____cacheline_aligned_in_smp |
14 | sem_perm; /* permissions .. see ipc.h */ | 14 | sem_perm; /* permissions .. see ipc.h */ |
15 | time_t sem_otime; /* last semop time */ | ||
16 | time_t sem_ctime; /* last change time */ | 15 | time_t sem_ctime; /* last change time */ |
17 | struct sem *sem_base; /* ptr to first semaphore in array */ | 16 | struct sem *sem_base; /* ptr to first semaphore in array */ |
18 | struct list_head sem_pending; /* pending operations to be processed */ | 17 | struct list_head pending_alter; /* pending operations */ |
18 | /* that alter the array */ | ||
19 | struct list_head pending_const; /* pending complex operations */ | ||
20 | /* that do not alter semvals */ | ||
19 | struct list_head list_id; /* undo requests on this array */ | 21 | struct list_head list_id; /* undo requests on this array */ |
20 | int sem_nsems; /* no. of semaphores in array */ | 22 | int sem_nsems; /* no. of semaphores in array */ |
21 | int complex_count; /* pending complex operations */ | 23 | int complex_count; /* pending complex operations */ |
diff --git a/include/linux/vexpress.h b/include/linux/vexpress.h index ea7168a68081..617c01b8f74a 100644 --- a/include/linux/vexpress.h +++ b/include/linux/vexpress.h | |||
@@ -15,6 +15,7 @@ | |||
15 | #define _LINUX_VEXPRESS_H | 15 | #define _LINUX_VEXPRESS_H |
16 | 16 | ||
17 | #include <linux/device.h> | 17 | #include <linux/device.h> |
18 | #include <linux/reboot.h> | ||
18 | 19 | ||
19 | #define VEXPRESS_SITE_MB 0 | 20 | #define VEXPRESS_SITE_MB 0 |
20 | #define VEXPRESS_SITE_DB1 1 | 21 | #define VEXPRESS_SITE_DB1 1 |
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index dd0a2c810529..4b8a89189a29 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h | |||
@@ -10,12 +10,12 @@ | |||
10 | struct vm_area_struct; /* vma defining user mapping in mm_types.h */ | 10 | struct vm_area_struct; /* vma defining user mapping in mm_types.h */ |
11 | 11 | ||
12 | /* bits in flags of vmalloc's vm_struct below */ | 12 | /* bits in flags of vmalloc's vm_struct below */ |
13 | #define VM_IOREMAP 0x00000001 /* ioremap() and friends */ | 13 | #define VM_IOREMAP 0x00000001 /* ioremap() and friends */ |
14 | #define VM_ALLOC 0x00000002 /* vmalloc() */ | 14 | #define VM_ALLOC 0x00000002 /* vmalloc() */ |
15 | #define VM_MAP 0x00000004 /* vmap()ed pages */ | 15 | #define VM_MAP 0x00000004 /* vmap()ed pages */ |
16 | #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */ | 16 | #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */ |
17 | #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */ | 17 | #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */ |
18 | #define VM_UNLIST 0x00000020 /* vm_struct is not listed in vmlist */ | 18 | #define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */ |
19 | /* bits [20..32] reserved for arch specific ioremap internals */ | 19 | /* bits [20..32] reserved for arch specific ioremap internals */ |
20 | 20 | ||
21 | /* | 21 | /* |
diff --git a/include/linux/writeback.h b/include/linux/writeback.h index abfe11787af3..4e198ca1f685 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h | |||
@@ -47,11 +47,16 @@ enum wb_reason { | |||
47 | WB_REASON_LAPTOP_TIMER, | 47 | WB_REASON_LAPTOP_TIMER, |
48 | WB_REASON_FREE_MORE_MEM, | 48 | WB_REASON_FREE_MORE_MEM, |
49 | WB_REASON_FS_FREE_SPACE, | 49 | WB_REASON_FS_FREE_SPACE, |
50 | /* | ||
51 | * There is no bdi forker thread any more and works are done | ||
52 | * by emergency worker, however, this is TPs userland visible | ||
53 | * and we'll be exposing exactly the same information, | ||
54 | * so it has a mismatch name. | ||
55 | */ | ||
50 | WB_REASON_FORKER_THREAD, | 56 | WB_REASON_FORKER_THREAD, |
51 | 57 | ||
52 | WB_REASON_MAX, | 58 | WB_REASON_MAX, |
53 | }; | 59 | }; |
54 | extern const char *wb_reason_name[]; | ||
55 | 60 | ||
56 | /* | 61 | /* |
57 | * A control structure which tells the writeback code what to do. These are | 62 | * A control structure which tells the writeback code what to do. These are |
@@ -95,7 +100,6 @@ int try_to_writeback_inodes_sb_nr(struct super_block *, unsigned long nr, | |||
95 | void sync_inodes_sb(struct super_block *); | 100 | void sync_inodes_sb(struct super_block *); |
96 | long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages, | 101 | long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages, |
97 | enum wb_reason reason); | 102 | enum wb_reason reason); |
98 | long wb_do_writeback(struct bdi_writeback *wb, int force_wait); | ||
99 | void wakeup_flusher_threads(long nr_pages, enum wb_reason reason); | 103 | void wakeup_flusher_threads(long nr_pages, enum wb_reason reason); |
100 | void inode_wait_for_writeback(struct inode *inode); | 104 | void inode_wait_for_writeback(struct inode *inode); |
101 | 105 | ||
diff --git a/include/uapi/linux/msdos_fs.h b/include/uapi/linux/msdos_fs.h index f055e58b3147..e284ff919d6e 100644 --- a/include/uapi/linux/msdos_fs.h +++ b/include/uapi/linux/msdos_fs.h | |||
@@ -104,6 +104,8 @@ struct __fat_dirent { | |||
104 | /* <linux/videotext.h> has used 0x72 ('r') in collision, so skip a few */ | 104 | /* <linux/videotext.h> has used 0x72 ('r') in collision, so skip a few */ |
105 | #define FAT_IOCTL_GET_ATTRIBUTES _IOR('r', 0x10, __u32) | 105 | #define FAT_IOCTL_GET_ATTRIBUTES _IOR('r', 0x10, __u32) |
106 | #define FAT_IOCTL_SET_ATTRIBUTES _IOW('r', 0x11, __u32) | 106 | #define FAT_IOCTL_SET_ATTRIBUTES _IOW('r', 0x11, __u32) |
107 | /*Android kernel has used 0x12, so we use 0x13*/ | ||
108 | #define FAT_IOCTL_GET_VOLUME_ID _IOR('r', 0x13, __u32) | ||
107 | 109 | ||
108 | struct fat_boot_sector { | 110 | struct fat_boot_sector { |
109 | __u8 ignored[3]; /* Boot strap short or near jump */ | 111 | __u8 ignored[3]; /* Boot strap short or near jump */ |
@@ -128,6 +130,10 @@ struct fat_boot_sector { | |||
128 | __u8 drive_number; /* Physical drive number */ | 130 | __u8 drive_number; /* Physical drive number */ |
129 | __u8 state; /* undocumented, but used | 131 | __u8 state; /* undocumented, but used |
130 | for mount state. */ | 132 | for mount state. */ |
133 | __u8 signature; /* extended boot signature */ | ||
134 | __u8 vol_id[4]; /* volume ID */ | ||
135 | __u8 vol_label[11]; /* volume label */ | ||
136 | __u8 fs_type[8]; /* file system type */ | ||
131 | /* other fiealds are not added here */ | 137 | /* other fiealds are not added here */ |
132 | } fat16; | 138 | } fat16; |
133 | 139 | ||
@@ -147,6 +153,10 @@ struct fat_boot_sector { | |||
147 | __u8 drive_number; /* Physical drive number */ | 153 | __u8 drive_number; /* Physical drive number */ |
148 | __u8 state; /* undocumented, but used | 154 | __u8 state; /* undocumented, but used |
149 | for mount state. */ | 155 | for mount state. */ |
156 | __u8 signature; /* extended boot signature */ | ||
157 | __u8 vol_id[4]; /* volume ID */ | ||
158 | __u8 vol_label[11]; /* volume label */ | ||
159 | __u8 fs_type[8]; /* file system type */ | ||
150 | /* other fiealds are not added here */ | 160 | /* other fiealds are not added here */ |
151 | } fat32; | 161 | } fat32; |
152 | }; | 162 | }; |
diff --git a/init/Kconfig b/init/Kconfig index ea1be003275a..54d3fa5ae723 100644 --- a/init/Kconfig +++ b/init/Kconfig | |||
@@ -112,10 +112,13 @@ config HAVE_KERNEL_XZ | |||
112 | config HAVE_KERNEL_LZO | 112 | config HAVE_KERNEL_LZO |
113 | bool | 113 | bool |
114 | 114 | ||
115 | config HAVE_KERNEL_LZ4 | ||
116 | bool | ||
117 | |||
115 | choice | 118 | choice |
116 | prompt "Kernel compression mode" | 119 | prompt "Kernel compression mode" |
117 | default KERNEL_GZIP | 120 | default KERNEL_GZIP |
118 | depends on HAVE_KERNEL_GZIP || HAVE_KERNEL_BZIP2 || HAVE_KERNEL_LZMA || HAVE_KERNEL_XZ || HAVE_KERNEL_LZO | 121 | depends on HAVE_KERNEL_GZIP || HAVE_KERNEL_BZIP2 || HAVE_KERNEL_LZMA || HAVE_KERNEL_XZ || HAVE_KERNEL_LZO || HAVE_KERNEL_LZ4 |
119 | help | 122 | help |
120 | The linux kernel is a kind of self-extracting executable. | 123 | The linux kernel is a kind of self-extracting executable. |
121 | Several compression algorithms are available, which differ | 124 | Several compression algorithms are available, which differ |
@@ -182,6 +185,18 @@ config KERNEL_LZO | |||
182 | size is about 10% bigger than gzip; however its speed | 185 | size is about 10% bigger than gzip; however its speed |
183 | (both compression and decompression) is the fastest. | 186 | (both compression and decompression) is the fastest. |
184 | 187 | ||
188 | config KERNEL_LZ4 | ||
189 | bool "LZ4" | ||
190 | depends on HAVE_KERNEL_LZ4 | ||
191 | help | ||
192 | LZ4 is an LZ77-type compressor with a fixed, byte-oriented encoding. | ||
193 | A preliminary version of LZ4 de/compression tool is available at | ||
194 | <https://code.google.com/p/lz4/>. | ||
195 | |||
196 | Its compression ratio is worse than LZO. The size of the kernel | ||
197 | is about 8% bigger than LZO. But the decompression speed is | ||
198 | faster than LZO. | ||
199 | |||
185 | endchoice | 200 | endchoice |
186 | 201 | ||
187 | config DEFAULT_HOSTNAME | 202 | config DEFAULT_HOSTNAME |
diff --git a/ipc/mqueue.c b/ipc/mqueue.c index e4e47f647446..ae1996d3c539 100644 --- a/ipc/mqueue.c +++ b/ipc/mqueue.c | |||
@@ -823,6 +823,7 @@ SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, umode_t, mode, | |||
823 | error = ro; | 823 | error = ro; |
824 | goto out; | 824 | goto out; |
825 | } | 825 | } |
826 | audit_inode_parent_hidden(name, root); | ||
826 | filp = do_create(ipc_ns, root->d_inode, | 827 | filp = do_create(ipc_ns, root->d_inode, |
827 | &path, oflag, mode, | 828 | &path, oflag, mode, |
828 | u_attr ? &attr : NULL); | 829 | u_attr ? &attr : NULL); |
@@ -868,6 +869,7 @@ SYSCALL_DEFINE1(mq_unlink, const char __user *, u_name) | |||
868 | if (IS_ERR(name)) | 869 | if (IS_ERR(name)) |
869 | return PTR_ERR(name); | 870 | return PTR_ERR(name); |
870 | 871 | ||
872 | audit_inode_parent_hidden(name, mnt->mnt_root); | ||
871 | err = mnt_want_write(mnt); | 873 | err = mnt_want_write(mnt); |
872 | if (err) | 874 | if (err) |
873 | goto out_name; | 875 | goto out_name; |
@@ -141,27 +141,23 @@ void __init msg_init(void) | |||
141 | IPC_MSG_IDS, sysvipc_msg_proc_show); | 141 | IPC_MSG_IDS, sysvipc_msg_proc_show); |
142 | } | 142 | } |
143 | 143 | ||
144 | /* | 144 | static inline struct msg_queue *msq_obtain_object(struct ipc_namespace *ns, int id) |
145 | * msg_lock_(check_) routines are called in the paths where the rw_mutex | ||
146 | * is not held. | ||
147 | */ | ||
148 | static inline struct msg_queue *msg_lock(struct ipc_namespace *ns, int id) | ||
149 | { | 145 | { |
150 | struct kern_ipc_perm *ipcp = ipc_lock(&msg_ids(ns), id); | 146 | struct kern_ipc_perm *ipcp = ipc_obtain_object(&msg_ids(ns), id); |
151 | 147 | ||
152 | if (IS_ERR(ipcp)) | 148 | if (IS_ERR(ipcp)) |
153 | return (struct msg_queue *)ipcp; | 149 | return ERR_CAST(ipcp); |
154 | 150 | ||
155 | return container_of(ipcp, struct msg_queue, q_perm); | 151 | return container_of(ipcp, struct msg_queue, q_perm); |
156 | } | 152 | } |
157 | 153 | ||
158 | static inline struct msg_queue *msg_lock_check(struct ipc_namespace *ns, | 154 | static inline struct msg_queue *msq_obtain_object_check(struct ipc_namespace *ns, |
159 | int id) | 155 | int id) |
160 | { | 156 | { |
161 | struct kern_ipc_perm *ipcp = ipc_lock_check(&msg_ids(ns), id); | 157 | struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&msg_ids(ns), id); |
162 | 158 | ||
163 | if (IS_ERR(ipcp)) | 159 | if (IS_ERR(ipcp)) |
164 | return (struct msg_queue *)ipcp; | 160 | return ERR_CAST(ipcp); |
165 | 161 | ||
166 | return container_of(ipcp, struct msg_queue, q_perm); | 162 | return container_of(ipcp, struct msg_queue, q_perm); |
167 | } | 163 | } |
@@ -199,9 +195,7 @@ static int newque(struct ipc_namespace *ns, struct ipc_params *params) | |||
199 | return retval; | 195 | return retval; |
200 | } | 196 | } |
201 | 197 | ||
202 | /* | 198 | /* ipc_addid() locks msq upon success. */ |
203 | * ipc_addid() locks msq | ||
204 | */ | ||
205 | id = ipc_addid(&msg_ids(ns), &msq->q_perm, ns->msg_ctlmni); | 199 | id = ipc_addid(&msg_ids(ns), &msq->q_perm, ns->msg_ctlmni); |
206 | if (id < 0) { | 200 | if (id < 0) { |
207 | security_msg_queue_free(msq); | 201 | security_msg_queue_free(msq); |
@@ -218,7 +212,8 @@ static int newque(struct ipc_namespace *ns, struct ipc_params *params) | |||
218 | INIT_LIST_HEAD(&msq->q_receivers); | 212 | INIT_LIST_HEAD(&msq->q_receivers); |
219 | INIT_LIST_HEAD(&msq->q_senders); | 213 | INIT_LIST_HEAD(&msq->q_senders); |
220 | 214 | ||
221 | msg_unlock(msq); | 215 | ipc_unlock_object(&msq->q_perm); |
216 | rcu_read_unlock(); | ||
222 | 217 | ||
223 | return msq->q_perm.id; | 218 | return msq->q_perm.id; |
224 | } | 219 | } |
@@ -408,31 +403,39 @@ static int msgctl_down(struct ipc_namespace *ns, int msqid, int cmd, | |||
408 | return -EFAULT; | 403 | return -EFAULT; |
409 | } | 404 | } |
410 | 405 | ||
411 | ipcp = ipcctl_pre_down(ns, &msg_ids(ns), msqid, cmd, | 406 | down_write(&msg_ids(ns).rw_mutex); |
412 | &msqid64.msg_perm, msqid64.msg_qbytes); | 407 | rcu_read_lock(); |
413 | if (IS_ERR(ipcp)) | 408 | |
414 | return PTR_ERR(ipcp); | 409 | ipcp = ipcctl_pre_down_nolock(ns, &msg_ids(ns), msqid, cmd, |
410 | &msqid64.msg_perm, msqid64.msg_qbytes); | ||
411 | if (IS_ERR(ipcp)) { | ||
412 | err = PTR_ERR(ipcp); | ||
413 | goto out_unlock1; | ||
414 | } | ||
415 | 415 | ||
416 | msq = container_of(ipcp, struct msg_queue, q_perm); | 416 | msq = container_of(ipcp, struct msg_queue, q_perm); |
417 | 417 | ||
418 | err = security_msg_queue_msgctl(msq, cmd); | 418 | err = security_msg_queue_msgctl(msq, cmd); |
419 | if (err) | 419 | if (err) |
420 | goto out_unlock; | 420 | goto out_unlock1; |
421 | 421 | ||
422 | switch (cmd) { | 422 | switch (cmd) { |
423 | case IPC_RMID: | 423 | case IPC_RMID: |
424 | ipc_lock_object(&msq->q_perm); | ||
425 | /* freeque unlocks the ipc object and rcu */ | ||
424 | freeque(ns, ipcp); | 426 | freeque(ns, ipcp); |
425 | goto out_up; | 427 | goto out_up; |
426 | case IPC_SET: | 428 | case IPC_SET: |
427 | if (msqid64.msg_qbytes > ns->msg_ctlmnb && | 429 | if (msqid64.msg_qbytes > ns->msg_ctlmnb && |
428 | !capable(CAP_SYS_RESOURCE)) { | 430 | !capable(CAP_SYS_RESOURCE)) { |
429 | err = -EPERM; | 431 | err = -EPERM; |
430 | goto out_unlock; | 432 | goto out_unlock1; |
431 | } | 433 | } |
432 | 434 | ||
435 | ipc_lock_object(&msq->q_perm); | ||
433 | err = ipc_update_perm(&msqid64.msg_perm, ipcp); | 436 | err = ipc_update_perm(&msqid64.msg_perm, ipcp); |
434 | if (err) | 437 | if (err) |
435 | goto out_unlock; | 438 | goto out_unlock0; |
436 | 439 | ||
437 | msq->q_qbytes = msqid64.msg_qbytes; | 440 | msq->q_qbytes = msqid64.msg_qbytes; |
438 | 441 | ||
@@ -448,25 +451,23 @@ static int msgctl_down(struct ipc_namespace *ns, int msqid, int cmd, | |||
448 | break; | 451 | break; |
449 | default: | 452 | default: |
450 | err = -EINVAL; | 453 | err = -EINVAL; |
454 | goto out_unlock1; | ||
451 | } | 455 | } |
452 | out_unlock: | 456 | |
453 | msg_unlock(msq); | 457 | out_unlock0: |
458 | ipc_unlock_object(&msq->q_perm); | ||
459 | out_unlock1: | ||
460 | rcu_read_unlock(); | ||
454 | out_up: | 461 | out_up: |
455 | up_write(&msg_ids(ns).rw_mutex); | 462 | up_write(&msg_ids(ns).rw_mutex); |
456 | return err; | 463 | return err; |
457 | } | 464 | } |
458 | 465 | ||
459 | SYSCALL_DEFINE3(msgctl, int, msqid, int, cmd, struct msqid_ds __user *, buf) | 466 | static int msgctl_nolock(struct ipc_namespace *ns, int msqid, |
467 | int cmd, int version, void __user *buf) | ||
460 | { | 468 | { |
469 | int err; | ||
461 | struct msg_queue *msq; | 470 | struct msg_queue *msq; |
462 | int err, version; | ||
463 | struct ipc_namespace *ns; | ||
464 | |||
465 | if (msqid < 0 || cmd < 0) | ||
466 | return -EINVAL; | ||
467 | |||
468 | version = ipc_parse_version(&cmd); | ||
469 | ns = current->nsproxy->ipc_ns; | ||
470 | 471 | ||
471 | switch (cmd) { | 472 | switch (cmd) { |
472 | case IPC_INFO: | 473 | case IPC_INFO: |
@@ -477,6 +478,7 @@ SYSCALL_DEFINE3(msgctl, int, msqid, int, cmd, struct msqid_ds __user *, buf) | |||
477 | 478 | ||
478 | if (!buf) | 479 | if (!buf) |
479 | return -EFAULT; | 480 | return -EFAULT; |
481 | |||
480 | /* | 482 | /* |
481 | * We must not return kernel stack data. | 483 | * We must not return kernel stack data. |
482 | * due to padding, it's not enough | 484 | * due to padding, it's not enough |
@@ -508,7 +510,8 @@ SYSCALL_DEFINE3(msgctl, int, msqid, int, cmd, struct msqid_ds __user *, buf) | |||
508 | return -EFAULT; | 510 | return -EFAULT; |
509 | return (max_id < 0) ? 0 : max_id; | 511 | return (max_id < 0) ? 0 : max_id; |
510 | } | 512 | } |
511 | case MSG_STAT: /* msqid is an index rather than a msg queue id */ | 513 | |
514 | case MSG_STAT: | ||
512 | case IPC_STAT: | 515 | case IPC_STAT: |
513 | { | 516 | { |
514 | struct msqid64_ds tbuf; | 517 | struct msqid64_ds tbuf; |
@@ -517,17 +520,25 @@ SYSCALL_DEFINE3(msgctl, int, msqid, int, cmd, struct msqid_ds __user *, buf) | |||
517 | if (!buf) | 520 | if (!buf) |
518 | return -EFAULT; | 521 | return -EFAULT; |
519 | 522 | ||
523 | memset(&tbuf, 0, sizeof(tbuf)); | ||
524 | |||
525 | rcu_read_lock(); | ||
520 | if (cmd == MSG_STAT) { | 526 | if (cmd == MSG_STAT) { |
521 | msq = msg_lock(ns, msqid); | 527 | msq = msq_obtain_object(ns, msqid); |
522 | if (IS_ERR(msq)) | 528 | if (IS_ERR(msq)) { |
523 | return PTR_ERR(msq); | 529 | err = PTR_ERR(msq); |
530 | goto out_unlock; | ||
531 | } | ||
524 | success_return = msq->q_perm.id; | 532 | success_return = msq->q_perm.id; |
525 | } else { | 533 | } else { |
526 | msq = msg_lock_check(ns, msqid); | 534 | msq = msq_obtain_object_check(ns, msqid); |
527 | if (IS_ERR(msq)) | 535 | if (IS_ERR(msq)) { |
528 | return PTR_ERR(msq); | 536 | err = PTR_ERR(msq); |
537 | goto out_unlock; | ||
538 | } | ||
529 | success_return = 0; | 539 | success_return = 0; |
530 | } | 540 | } |
541 | |||
531 | err = -EACCES; | 542 | err = -EACCES; |
532 | if (ipcperms(ns, &msq->q_perm, S_IRUGO)) | 543 | if (ipcperms(ns, &msq->q_perm, S_IRUGO)) |
533 | goto out_unlock; | 544 | goto out_unlock; |
@@ -536,8 +547,6 @@ SYSCALL_DEFINE3(msgctl, int, msqid, int, cmd, struct msqid_ds __user *, buf) | |||
536 | if (err) | 547 | if (err) |
537 | goto out_unlock; | 548 | goto out_unlock; |
538 | 549 | ||
539 | memset(&tbuf, 0, sizeof(tbuf)); | ||
540 | |||
541 | kernel_to_ipc64_perm(&msq->q_perm, &tbuf.msg_perm); | 550 | kernel_to_ipc64_perm(&msq->q_perm, &tbuf.msg_perm); |
542 | tbuf.msg_stime = msq->q_stime; | 551 | tbuf.msg_stime = msq->q_stime; |
543 | tbuf.msg_rtime = msq->q_rtime; | 552 | tbuf.msg_rtime = msq->q_rtime; |
@@ -547,24 +556,48 @@ SYSCALL_DEFINE3(msgctl, int, msqid, int, cmd, struct msqid_ds __user *, buf) | |||
547 | tbuf.msg_qbytes = msq->q_qbytes; | 556 | tbuf.msg_qbytes = msq->q_qbytes; |
548 | tbuf.msg_lspid = msq->q_lspid; | 557 | tbuf.msg_lspid = msq->q_lspid; |
549 | tbuf.msg_lrpid = msq->q_lrpid; | 558 | tbuf.msg_lrpid = msq->q_lrpid; |
550 | msg_unlock(msq); | 559 | rcu_read_unlock(); |
560 | |||
551 | if (copy_msqid_to_user(buf, &tbuf, version)) | 561 | if (copy_msqid_to_user(buf, &tbuf, version)) |
552 | return -EFAULT; | 562 | return -EFAULT; |
553 | return success_return; | 563 | return success_return; |
554 | } | 564 | } |
555 | case IPC_SET: | 565 | |
556 | case IPC_RMID: | ||
557 | err = msgctl_down(ns, msqid, cmd, buf, version); | ||
558 | return err; | ||
559 | default: | 566 | default: |
560 | return -EINVAL; | 567 | return -EINVAL; |
561 | } | 568 | } |
562 | 569 | ||
570 | return err; | ||
563 | out_unlock: | 571 | out_unlock: |
564 | msg_unlock(msq); | 572 | rcu_read_unlock(); |
565 | return err; | 573 | return err; |
566 | } | 574 | } |
567 | 575 | ||
576 | SYSCALL_DEFINE3(msgctl, int, msqid, int, cmd, struct msqid_ds __user *, buf) | ||
577 | { | ||
578 | int version; | ||
579 | struct ipc_namespace *ns; | ||
580 | |||
581 | if (msqid < 0 || cmd < 0) | ||
582 | return -EINVAL; | ||
583 | |||
584 | version = ipc_parse_version(&cmd); | ||
585 | ns = current->nsproxy->ipc_ns; | ||
586 | |||
587 | switch (cmd) { | ||
588 | case IPC_INFO: | ||
589 | case MSG_INFO: | ||
590 | case MSG_STAT: /* msqid is an index rather than a msg queue id */ | ||
591 | case IPC_STAT: | ||
592 | return msgctl_nolock(ns, msqid, cmd, version, buf); | ||
593 | case IPC_SET: | ||
594 | case IPC_RMID: | ||
595 | return msgctl_down(ns, msqid, cmd, buf, version); | ||
596 | default: | ||
597 | return -EINVAL; | ||
598 | } | ||
599 | } | ||
600 | |||
568 | static int testmsg(struct msg_msg *msg, long type, int mode) | 601 | static int testmsg(struct msg_msg *msg, long type, int mode) |
569 | { | 602 | { |
570 | switch(mode) | 603 | switch(mode) |
@@ -640,10 +673,11 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext, | |||
640 | msg->m_type = mtype; | 673 | msg->m_type = mtype; |
641 | msg->m_ts = msgsz; | 674 | msg->m_ts = msgsz; |
642 | 675 | ||
643 | msq = msg_lock_check(ns, msqid); | 676 | rcu_read_lock(); |
677 | msq = msq_obtain_object_check(ns, msqid); | ||
644 | if (IS_ERR(msq)) { | 678 | if (IS_ERR(msq)) { |
645 | err = PTR_ERR(msq); | 679 | err = PTR_ERR(msq); |
646 | goto out_free; | 680 | goto out_unlock1; |
647 | } | 681 | } |
648 | 682 | ||
649 | for (;;) { | 683 | for (;;) { |
@@ -651,11 +685,11 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext, | |||
651 | 685 | ||
652 | err = -EACCES; | 686 | err = -EACCES; |
653 | if (ipcperms(ns, &msq->q_perm, S_IWUGO)) | 687 | if (ipcperms(ns, &msq->q_perm, S_IWUGO)) |
654 | goto out_unlock_free; | 688 | goto out_unlock1; |
655 | 689 | ||
656 | err = security_msg_queue_msgsnd(msq, msg, msgflg); | 690 | err = security_msg_queue_msgsnd(msq, msg, msgflg); |
657 | if (err) | 691 | if (err) |
658 | goto out_unlock_free; | 692 | goto out_unlock1; |
659 | 693 | ||
660 | if (msgsz + msq->q_cbytes <= msq->q_qbytes && | 694 | if (msgsz + msq->q_cbytes <= msq->q_qbytes && |
661 | 1 + msq->q_qnum <= msq->q_qbytes) { | 695 | 1 + msq->q_qnum <= msq->q_qbytes) { |
@@ -665,32 +699,41 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext, | |||
665 | /* queue full, wait: */ | 699 | /* queue full, wait: */ |
666 | if (msgflg & IPC_NOWAIT) { | 700 | if (msgflg & IPC_NOWAIT) { |
667 | err = -EAGAIN; | 701 | err = -EAGAIN; |
668 | goto out_unlock_free; | 702 | goto out_unlock1; |
669 | } | 703 | } |
704 | |||
705 | ipc_lock_object(&msq->q_perm); | ||
670 | ss_add(msq, &s); | 706 | ss_add(msq, &s); |
671 | 707 | ||
672 | if (!ipc_rcu_getref(msq)) { | 708 | if (!ipc_rcu_getref(msq)) { |
673 | err = -EIDRM; | 709 | err = -EIDRM; |
674 | goto out_unlock_free; | 710 | goto out_unlock0; |
675 | } | 711 | } |
676 | 712 | ||
677 | msg_unlock(msq); | 713 | ipc_unlock_object(&msq->q_perm); |
714 | rcu_read_unlock(); | ||
678 | schedule(); | 715 | schedule(); |
679 | 716 | ||
680 | ipc_lock_by_ptr(&msq->q_perm); | 717 | rcu_read_lock(); |
718 | ipc_lock_object(&msq->q_perm); | ||
719 | |||
681 | ipc_rcu_putref(msq); | 720 | ipc_rcu_putref(msq); |
682 | if (msq->q_perm.deleted) { | 721 | if (msq->q_perm.deleted) { |
683 | err = -EIDRM; | 722 | err = -EIDRM; |
684 | goto out_unlock_free; | 723 | goto out_unlock0; |
685 | } | 724 | } |
725 | |||
686 | ss_del(&s); | 726 | ss_del(&s); |
687 | 727 | ||
688 | if (signal_pending(current)) { | 728 | if (signal_pending(current)) { |
689 | err = -ERESTARTNOHAND; | 729 | err = -ERESTARTNOHAND; |
690 | goto out_unlock_free; | 730 | goto out_unlock0; |
691 | } | 731 | } |
732 | |||
733 | ipc_unlock_object(&msq->q_perm); | ||
692 | } | 734 | } |
693 | 735 | ||
736 | ipc_lock_object(&msq->q_perm); | ||
694 | msq->q_lspid = task_tgid_vnr(current); | 737 | msq->q_lspid = task_tgid_vnr(current); |
695 | msq->q_stime = get_seconds(); | 738 | msq->q_stime = get_seconds(); |
696 | 739 | ||
@@ -706,9 +749,10 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext, | |||
706 | err = 0; | 749 | err = 0; |
707 | msg = NULL; | 750 | msg = NULL; |
708 | 751 | ||
709 | out_unlock_free: | 752 | out_unlock0: |
710 | msg_unlock(msq); | 753 | ipc_unlock_object(&msq->q_perm); |
711 | out_free: | 754 | out_unlock1: |
755 | rcu_read_unlock(); | ||
712 | if (msg != NULL) | 756 | if (msg != NULL) |
713 | free_msg(msg); | 757 | free_msg(msg); |
714 | return err; | 758 | return err; |
@@ -816,21 +860,19 @@ static struct msg_msg *find_msg(struct msg_queue *msq, long *msgtyp, int mode) | |||
816 | return ERR_PTR(-EAGAIN); | 860 | return ERR_PTR(-EAGAIN); |
817 | } | 861 | } |
818 | 862 | ||
819 | 863 | long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp, int msgflg, | |
820 | long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp, | ||
821 | int msgflg, | ||
822 | long (*msg_handler)(void __user *, struct msg_msg *, size_t)) | 864 | long (*msg_handler)(void __user *, struct msg_msg *, size_t)) |
823 | { | 865 | { |
824 | struct msg_queue *msq; | ||
825 | struct msg_msg *msg; | ||
826 | int mode; | 866 | int mode; |
867 | struct msg_queue *msq; | ||
827 | struct ipc_namespace *ns; | 868 | struct ipc_namespace *ns; |
828 | struct msg_msg *copy = NULL; | 869 | struct msg_msg *msg, *copy = NULL; |
829 | 870 | ||
830 | ns = current->nsproxy->ipc_ns; | 871 | ns = current->nsproxy->ipc_ns; |
831 | 872 | ||
832 | if (msqid < 0 || (long) bufsz < 0) | 873 | if (msqid < 0 || (long) bufsz < 0) |
833 | return -EINVAL; | 874 | return -EINVAL; |
875 | |||
834 | if (msgflg & MSG_COPY) { | 876 | if (msgflg & MSG_COPY) { |
835 | copy = prepare_copy(buf, min_t(size_t, bufsz, ns->msg_ctlmax)); | 877 | copy = prepare_copy(buf, min_t(size_t, bufsz, ns->msg_ctlmax)); |
836 | if (IS_ERR(copy)) | 878 | if (IS_ERR(copy)) |
@@ -838,8 +880,10 @@ long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp, | |||
838 | } | 880 | } |
839 | mode = convert_mode(&msgtyp, msgflg); | 881 | mode = convert_mode(&msgtyp, msgflg); |
840 | 882 | ||
841 | msq = msg_lock_check(ns, msqid); | 883 | rcu_read_lock(); |
884 | msq = msq_obtain_object_check(ns, msqid); | ||
842 | if (IS_ERR(msq)) { | 885 | if (IS_ERR(msq)) { |
886 | rcu_read_unlock(); | ||
843 | free_copy(copy); | 887 | free_copy(copy); |
844 | return PTR_ERR(msq); | 888 | return PTR_ERR(msq); |
845 | } | 889 | } |
@@ -849,10 +893,10 @@ long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp, | |||
849 | 893 | ||
850 | msg = ERR_PTR(-EACCES); | 894 | msg = ERR_PTR(-EACCES); |
851 | if (ipcperms(ns, &msq->q_perm, S_IRUGO)) | 895 | if (ipcperms(ns, &msq->q_perm, S_IRUGO)) |
852 | goto out_unlock; | 896 | goto out_unlock1; |
853 | 897 | ||
898 | ipc_lock_object(&msq->q_perm); | ||
854 | msg = find_msg(msq, &msgtyp, mode); | 899 | msg = find_msg(msq, &msgtyp, mode); |
855 | |||
856 | if (!IS_ERR(msg)) { | 900 | if (!IS_ERR(msg)) { |
857 | /* | 901 | /* |
858 | * Found a suitable message. | 902 | * Found a suitable message. |
@@ -860,7 +904,7 @@ long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp, | |||
860 | */ | 904 | */ |
861 | if ((bufsz < msg->m_ts) && !(msgflg & MSG_NOERROR)) { | 905 | if ((bufsz < msg->m_ts) && !(msgflg & MSG_NOERROR)) { |
862 | msg = ERR_PTR(-E2BIG); | 906 | msg = ERR_PTR(-E2BIG); |
863 | goto out_unlock; | 907 | goto out_unlock0; |
864 | } | 908 | } |
865 | /* | 909 | /* |
866 | * If we are copying, then do not unlink message and do | 910 | * If we are copying, then do not unlink message and do |
@@ -868,8 +912,9 @@ long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp, | |||
868 | */ | 912 | */ |
869 | if (msgflg & MSG_COPY) { | 913 | if (msgflg & MSG_COPY) { |
870 | msg = copy_msg(msg, copy); | 914 | msg = copy_msg(msg, copy); |
871 | goto out_unlock; | 915 | goto out_unlock0; |
872 | } | 916 | } |
917 | |||
873 | list_del(&msg->m_list); | 918 | list_del(&msg->m_list); |
874 | msq->q_qnum--; | 919 | msq->q_qnum--; |
875 | msq->q_rtime = get_seconds(); | 920 | msq->q_rtime = get_seconds(); |
@@ -878,14 +923,16 @@ long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp, | |||
878 | atomic_sub(msg->m_ts, &ns->msg_bytes); | 923 | atomic_sub(msg->m_ts, &ns->msg_bytes); |
879 | atomic_dec(&ns->msg_hdrs); | 924 | atomic_dec(&ns->msg_hdrs); |
880 | ss_wakeup(&msq->q_senders, 0); | 925 | ss_wakeup(&msq->q_senders, 0); |
881 | msg_unlock(msq); | 926 | |
882 | break; | 927 | goto out_unlock0; |
883 | } | 928 | } |
929 | |||
884 | /* No message waiting. Wait for a message */ | 930 | /* No message waiting. Wait for a message */ |
885 | if (msgflg & IPC_NOWAIT) { | 931 | if (msgflg & IPC_NOWAIT) { |
886 | msg = ERR_PTR(-ENOMSG); | 932 | msg = ERR_PTR(-ENOMSG); |
887 | goto out_unlock; | 933 | goto out_unlock0; |
888 | } | 934 | } |
935 | |||
889 | list_add_tail(&msr_d.r_list, &msq->q_receivers); | 936 | list_add_tail(&msr_d.r_list, &msq->q_receivers); |
890 | msr_d.r_tsk = current; | 937 | msr_d.r_tsk = current; |
891 | msr_d.r_msgtype = msgtyp; | 938 | msr_d.r_msgtype = msgtyp; |
@@ -896,8 +943,9 @@ long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp, | |||
896 | msr_d.r_maxsize = bufsz; | 943 | msr_d.r_maxsize = bufsz; |
897 | msr_d.r_msg = ERR_PTR(-EAGAIN); | 944 | msr_d.r_msg = ERR_PTR(-EAGAIN); |
898 | current->state = TASK_INTERRUPTIBLE; | 945 | current->state = TASK_INTERRUPTIBLE; |
899 | msg_unlock(msq); | ||
900 | 946 | ||
947 | ipc_unlock_object(&msq->q_perm); | ||
948 | rcu_read_unlock(); | ||
901 | schedule(); | 949 | schedule(); |
902 | 950 | ||
903 | /* Lockless receive, part 1: | 951 | /* Lockless receive, part 1: |
@@ -908,7 +956,7 @@ long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp, | |||
908 | * Prior to destruction, expunge_all(-EIRDM) changes r_msg. | 956 | * Prior to destruction, expunge_all(-EIRDM) changes r_msg. |
909 | * Thus if r_msg is -EAGAIN, then the queue not yet destroyed. | 957 | * Thus if r_msg is -EAGAIN, then the queue not yet destroyed. |
910 | * rcu_read_lock() prevents preemption between reading r_msg | 958 | * rcu_read_lock() prevents preemption between reading r_msg |
911 | * and the spin_lock() inside ipc_lock_by_ptr(). | 959 | * and acquiring the q_perm.lock in ipc_lock_object(). |
912 | */ | 960 | */ |
913 | rcu_read_lock(); | 961 | rcu_read_lock(); |
914 | 962 | ||
@@ -927,32 +975,34 @@ long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp, | |||
927 | * If there is a message or an error then accept it without | 975 | * If there is a message or an error then accept it without |
928 | * locking. | 976 | * locking. |
929 | */ | 977 | */ |
930 | if (msg != ERR_PTR(-EAGAIN)) { | 978 | if (msg != ERR_PTR(-EAGAIN)) |
931 | rcu_read_unlock(); | 979 | goto out_unlock1; |
932 | break; | ||
933 | } | ||
934 | 980 | ||
935 | /* Lockless receive, part 3: | 981 | /* Lockless receive, part 3: |
936 | * Acquire the queue spinlock. | 982 | * Acquire the queue spinlock. |
937 | */ | 983 | */ |
938 | ipc_lock_by_ptr(&msq->q_perm); | 984 | ipc_lock_object(&msq->q_perm); |
939 | rcu_read_unlock(); | ||
940 | 985 | ||
941 | /* Lockless receive, part 4: | 986 | /* Lockless receive, part 4: |
942 | * Repeat test after acquiring the spinlock. | 987 | * Repeat test after acquiring the spinlock. |
943 | */ | 988 | */ |
944 | msg = (struct msg_msg*)msr_d.r_msg; | 989 | msg = (struct msg_msg*)msr_d.r_msg; |
945 | if (msg != ERR_PTR(-EAGAIN)) | 990 | if (msg != ERR_PTR(-EAGAIN)) |
946 | goto out_unlock; | 991 | goto out_unlock0; |
947 | 992 | ||
948 | list_del(&msr_d.r_list); | 993 | list_del(&msr_d.r_list); |
949 | if (signal_pending(current)) { | 994 | if (signal_pending(current)) { |
950 | msg = ERR_PTR(-ERESTARTNOHAND); | 995 | msg = ERR_PTR(-ERESTARTNOHAND); |
951 | out_unlock: | 996 | goto out_unlock0; |
952 | msg_unlock(msq); | ||
953 | break; | ||
954 | } | 997 | } |
998 | |||
999 | ipc_unlock_object(&msq->q_perm); | ||
955 | } | 1000 | } |
1001 | |||
1002 | out_unlock0: | ||
1003 | ipc_unlock_object(&msq->q_perm); | ||
1004 | out_unlock1: | ||
1005 | rcu_read_unlock(); | ||
956 | if (IS_ERR(msg)) { | 1006 | if (IS_ERR(msg)) { |
957 | free_copy(copy); | 1007 | free_copy(copy); |
958 | return PTR_ERR(msg); | 1008 | return PTR_ERR(msg); |
@@ -95,8 +95,12 @@ struct sem { | |||
95 | int semval; /* current value */ | 95 | int semval; /* current value */ |
96 | int sempid; /* pid of last operation */ | 96 | int sempid; /* pid of last operation */ |
97 | spinlock_t lock; /* spinlock for fine-grained semtimedop */ | 97 | spinlock_t lock; /* spinlock for fine-grained semtimedop */ |
98 | struct list_head sem_pending; /* pending single-sop operations */ | 98 | struct list_head pending_alter; /* pending single-sop operations */ |
99 | }; | 99 | /* that alter the semaphore */ |
100 | struct list_head pending_const; /* pending single-sop operations */ | ||
101 | /* that do not alter the semaphore*/ | ||
102 | time_t sem_otime; /* candidate for sem_otime */ | ||
103 | } ____cacheline_aligned_in_smp; | ||
100 | 104 | ||
101 | /* One queue for each sleeping process in the system. */ | 105 | /* One queue for each sleeping process in the system. */ |
102 | struct sem_queue { | 106 | struct sem_queue { |
@@ -150,12 +154,15 @@ static int sysvipc_sem_proc_show(struct seq_file *s, void *it); | |||
150 | #define SEMOPM_FAST 64 /* ~ 372 bytes on stack */ | 154 | #define SEMOPM_FAST 64 /* ~ 372 bytes on stack */ |
151 | 155 | ||
152 | /* | 156 | /* |
153 | * linked list protection: | 157 | * Locking: |
154 | * sem_undo.id_next, | 158 | * sem_undo.id_next, |
155 | * sem_array.sem_pending{,last}, | 159 | * sem_array.complex_count, |
156 | * sem_array.sem_undo: sem_lock() for read/write | 160 | * sem_array.pending{_alter,_cont}, |
161 | * sem_array.sem_undo: global sem_lock() for read/write | ||
157 | * sem_undo.proc_next: only "current" is allowed to read/write that field. | 162 | * sem_undo.proc_next: only "current" is allowed to read/write that field. |
158 | * | 163 | * |
164 | * sem_array.sem_base[i].pending_{const,alter}: | ||
165 | * global or semaphore sem_lock() for read/write | ||
159 | */ | 166 | */ |
160 | 167 | ||
161 | #define sc_semmsl sem_ctls[0] | 168 | #define sc_semmsl sem_ctls[0] |
@@ -189,6 +196,53 @@ void __init sem_init (void) | |||
189 | IPC_SEM_IDS, sysvipc_sem_proc_show); | 196 | IPC_SEM_IDS, sysvipc_sem_proc_show); |
190 | } | 197 | } |
191 | 198 | ||
199 | /** | ||
200 | * unmerge_queues - unmerge queues, if possible. | ||
201 | * @sma: semaphore array | ||
202 | * | ||
203 | * The function unmerges the wait queues if complex_count is 0. | ||
204 | * It must be called prior to dropping the global semaphore array lock. | ||
205 | */ | ||
206 | static void unmerge_queues(struct sem_array *sma) | ||
207 | { | ||
208 | struct sem_queue *q, *tq; | ||
209 | |||
210 | /* complex operations still around? */ | ||
211 | if (sma->complex_count) | ||
212 | return; | ||
213 | /* | ||
214 | * We will switch back to simple mode. | ||
215 | * Move all pending operation back into the per-semaphore | ||
216 | * queues. | ||
217 | */ | ||
218 | list_for_each_entry_safe(q, tq, &sma->pending_alter, list) { | ||
219 | struct sem *curr; | ||
220 | curr = &sma->sem_base[q->sops[0].sem_num]; | ||
221 | |||
222 | list_add_tail(&q->list, &curr->pending_alter); | ||
223 | } | ||
224 | INIT_LIST_HEAD(&sma->pending_alter); | ||
225 | } | ||
226 | |||
227 | /** | ||
228 | * merge_queues - Merge single semop queues into global queue | ||
229 | * @sma: semaphore array | ||
230 | * | ||
231 | * This function merges all per-semaphore queues into the global queue. | ||
232 | * It is necessary to achieve FIFO ordering for the pending single-sop | ||
233 | * operations when a multi-semop operation must sleep. | ||
234 | * Only the alter operations must be moved, the const operations can stay. | ||
235 | */ | ||
236 | static void merge_queues(struct sem_array *sma) | ||
237 | { | ||
238 | int i; | ||
239 | for (i = 0; i < sma->sem_nsems; i++) { | ||
240 | struct sem *sem = sma->sem_base + i; | ||
241 | |||
242 | list_splice_init(&sem->pending_alter, &sma->pending_alter); | ||
243 | } | ||
244 | } | ||
245 | |||
192 | /* | 246 | /* |
193 | * If the request contains only one semaphore operation, and there are | 247 | * If the request contains only one semaphore operation, and there are |
194 | * no complex transactions pending, lock only the semaphore involved. | 248 | * no complex transactions pending, lock only the semaphore involved. |
@@ -246,7 +300,7 @@ static inline int sem_lock(struct sem_array *sma, struct sembuf *sops, | |||
246 | * their critical section while the array lock is held. | 300 | * their critical section while the array lock is held. |
247 | */ | 301 | */ |
248 | lock_array: | 302 | lock_array: |
249 | spin_lock(&sma->sem_perm.lock); | 303 | ipc_lock_object(&sma->sem_perm); |
250 | for (i = 0; i < sma->sem_nsems; i++) { | 304 | for (i = 0; i < sma->sem_nsems; i++) { |
251 | struct sem *sem = sma->sem_base + i; | 305 | struct sem *sem = sma->sem_base + i; |
252 | spin_unlock_wait(&sem->lock); | 306 | spin_unlock_wait(&sem->lock); |
@@ -259,7 +313,8 @@ static inline int sem_lock(struct sem_array *sma, struct sembuf *sops, | |||
259 | static inline void sem_unlock(struct sem_array *sma, int locknum) | 313 | static inline void sem_unlock(struct sem_array *sma, int locknum) |
260 | { | 314 | { |
261 | if (locknum == -1) { | 315 | if (locknum == -1) { |
262 | spin_unlock(&sma->sem_perm.lock); | 316 | unmerge_queues(sma); |
317 | ipc_unlock_object(&sma->sem_perm); | ||
263 | } else { | 318 | } else { |
264 | struct sem *sem = sma->sem_base + locknum; | 319 | struct sem *sem = sma->sem_base + locknum; |
265 | spin_unlock(&sem->lock); | 320 | spin_unlock(&sem->lock); |
@@ -337,7 +392,7 @@ static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s) | |||
337 | * Without the check/retry algorithm a lockless wakeup is possible: | 392 | * Without the check/retry algorithm a lockless wakeup is possible: |
338 | * - queue.status is initialized to -EINTR before blocking. | 393 | * - queue.status is initialized to -EINTR before blocking. |
339 | * - wakeup is performed by | 394 | * - wakeup is performed by |
340 | * * unlinking the queue entry from sma->sem_pending | 395 | * * unlinking the queue entry from the pending list |
341 | * * setting queue.status to IN_WAKEUP | 396 | * * setting queue.status to IN_WAKEUP |
342 | * This is the notification for the blocked thread that a | 397 | * This is the notification for the blocked thread that a |
343 | * result value is imminent. | 398 | * result value is imminent. |
@@ -418,12 +473,14 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params) | |||
418 | sma->sem_base = (struct sem *) &sma[1]; | 473 | sma->sem_base = (struct sem *) &sma[1]; |
419 | 474 | ||
420 | for (i = 0; i < nsems; i++) { | 475 | for (i = 0; i < nsems; i++) { |
421 | INIT_LIST_HEAD(&sma->sem_base[i].sem_pending); | 476 | INIT_LIST_HEAD(&sma->sem_base[i].pending_alter); |
477 | INIT_LIST_HEAD(&sma->sem_base[i].pending_const); | ||
422 | spin_lock_init(&sma->sem_base[i].lock); | 478 | spin_lock_init(&sma->sem_base[i].lock); |
423 | } | 479 | } |
424 | 480 | ||
425 | sma->complex_count = 0; | 481 | sma->complex_count = 0; |
426 | INIT_LIST_HEAD(&sma->sem_pending); | 482 | INIT_LIST_HEAD(&sma->pending_alter); |
483 | INIT_LIST_HEAD(&sma->pending_const); | ||
427 | INIT_LIST_HEAD(&sma->list_id); | 484 | INIT_LIST_HEAD(&sma->list_id); |
428 | sma->sem_nsems = nsems; | 485 | sma->sem_nsems = nsems; |
429 | sma->sem_ctime = get_seconds(); | 486 | sma->sem_ctime = get_seconds(); |
@@ -482,12 +539,19 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg) | |||
482 | return ipcget(ns, &sem_ids(ns), &sem_ops, &sem_params); | 539 | return ipcget(ns, &sem_ids(ns), &sem_ops, &sem_params); |
483 | } | 540 | } |
484 | 541 | ||
485 | /* | 542 | /** perform_atomic_semop - Perform (if possible) a semaphore operation |
486 | * Determine whether a sequence of semaphore operations would succeed | 543 | * @sma: semaphore array |
487 | * all at once. Return 0 if yes, 1 if need to sleep, else return error code. | 544 | * @sops: array with operations that should be checked |
545 | * @nsems: number of sops | ||
546 | * @un: undo array | ||
547 | * @pid: pid that did the change | ||
548 | * | ||
549 | * Returns 0 if the operation was possible. | ||
550 | * Returns 1 if the operation is impossible, the caller must sleep. | ||
551 | * Negative values are error codes. | ||
488 | */ | 552 | */ |
489 | 553 | ||
490 | static int try_atomic_semop (struct sem_array * sma, struct sembuf * sops, | 554 | static int perform_atomic_semop(struct sem_array *sma, struct sembuf *sops, |
491 | int nsops, struct sem_undo *un, int pid) | 555 | int nsops, struct sem_undo *un, int pid) |
492 | { | 556 | { |
493 | int result, sem_op; | 557 | int result, sem_op; |
@@ -609,60 +673,132 @@ static void unlink_queue(struct sem_array *sma, struct sem_queue *q) | |||
609 | * update_queue is O(N^2) when it restarts scanning the whole queue of | 673 | * update_queue is O(N^2) when it restarts scanning the whole queue of |
610 | * waiting operations. Therefore this function checks if the restart is | 674 | * waiting operations. Therefore this function checks if the restart is |
611 | * really necessary. It is called after a previously waiting operation | 675 | * really necessary. It is called after a previously waiting operation |
612 | * was completed. | 676 | * modified the array. |
677 | * Note that wait-for-zero operations are handled without restart. | ||
613 | */ | 678 | */ |
614 | static int check_restart(struct sem_array *sma, struct sem_queue *q) | 679 | static int check_restart(struct sem_array *sma, struct sem_queue *q) |
615 | { | 680 | { |
616 | struct sem *curr; | 681 | /* pending complex alter operations are too difficult to analyse */ |
617 | struct sem_queue *h; | 682 | if (!list_empty(&sma->pending_alter)) |
618 | |||
619 | /* if the operation didn't modify the array, then no restart */ | ||
620 | if (q->alter == 0) | ||
621 | return 0; | ||
622 | |||
623 | /* pending complex operations are too difficult to analyse */ | ||
624 | if (sma->complex_count) | ||
625 | return 1; | 683 | return 1; |
626 | 684 | ||
627 | /* we were a sleeping complex operation. Too difficult */ | 685 | /* we were a sleeping complex operation. Too difficult */ |
628 | if (q->nsops > 1) | 686 | if (q->nsops > 1) |
629 | return 1; | 687 | return 1; |
630 | 688 | ||
631 | curr = sma->sem_base + q->sops[0].sem_num; | 689 | /* It is impossible that someone waits for the new value: |
690 | * - complex operations always restart. | ||
691 | * - wait-for-zero are handled seperately. | ||
692 | * - q is a previously sleeping simple operation that | ||
693 | * altered the array. It must be a decrement, because | ||
694 | * simple increments never sleep. | ||
695 | * - If there are older (higher priority) decrements | ||
696 | * in the queue, then they have observed the original | ||
697 | * semval value and couldn't proceed. The operation | ||
698 | * decremented to value - thus they won't proceed either. | ||
699 | */ | ||
700 | return 0; | ||
701 | } | ||
702 | |||
703 | /** | ||
704 | * wake_const_ops(sma, semnum, pt) - Wake up non-alter tasks | ||
705 | * @sma: semaphore array. | ||
706 | * @semnum: semaphore that was modified. | ||
707 | * @pt: list head for the tasks that must be woken up. | ||
708 | * | ||
709 | * wake_const_ops must be called after a semaphore in a semaphore array | ||
710 | * was set to 0. If complex const operations are pending, wake_const_ops must | ||
711 | * be called with semnum = -1, as well as with the number of each modified | ||
712 | * semaphore. | ||
713 | * The tasks that must be woken up are added to @pt. The return code | ||
714 | * is stored in q->pid. | ||
715 | * The function returns 1 if at least one operation was completed successfully. | ||
716 | */ | ||
717 | static int wake_const_ops(struct sem_array *sma, int semnum, | ||
718 | struct list_head *pt) | ||
719 | { | ||
720 | struct sem_queue *q; | ||
721 | struct list_head *walk; | ||
722 | struct list_head *pending_list; | ||
723 | int semop_completed = 0; | ||
632 | 724 | ||
633 | /* No-one waits on this queue */ | 725 | if (semnum == -1) |
634 | if (list_empty(&curr->sem_pending)) | 726 | pending_list = &sma->pending_const; |
635 | return 0; | 727 | else |
728 | pending_list = &sma->sem_base[semnum].pending_const; | ||
729 | |||
730 | walk = pending_list->next; | ||
731 | while (walk != pending_list) { | ||
732 | int error; | ||
733 | |||
734 | q = container_of(walk, struct sem_queue, list); | ||
735 | walk = walk->next; | ||
736 | |||
737 | error = perform_atomic_semop(sma, q->sops, q->nsops, | ||
738 | q->undo, q->pid); | ||
636 | 739 | ||
637 | /* the new semaphore value */ | 740 | if (error <= 0) { |
638 | if (curr->semval) { | 741 | /* operation completed, remove from queue & wakeup */ |
639 | /* It is impossible that someone waits for the new value: | 742 | |
640 | * - q is a previously sleeping simple operation that | 743 | unlink_queue(sma, q); |
641 | * altered the array. It must be a decrement, because | 744 | |
642 | * simple increments never sleep. | 745 | wake_up_sem_queue_prepare(pt, q, error); |
643 | * - The value is not 0, thus wait-for-zero won't proceed. | 746 | if (error == 0) |
644 | * - If there are older (higher priority) decrements | 747 | semop_completed = 1; |
645 | * in the queue, then they have observed the original | 748 | } |
646 | * semval value and couldn't proceed. The operation | 749 | } |
647 | * decremented to value - thus they won't proceed either. | 750 | return semop_completed; |
751 | } | ||
752 | |||
753 | /** | ||
754 | * do_smart_wakeup_zero(sma, sops, nsops, pt) - wakeup all wait for zero tasks | ||
755 | * @sma: semaphore array | ||
756 | * @sops: operations that were performed | ||
757 | * @nsops: number of operations | ||
758 | * @pt: list head of the tasks that must be woken up. | ||
759 | * | ||
760 | * do_smart_wakeup_zero() checks all required queue for wait-for-zero | ||
761 | * operations, based on the actual changes that were performed on the | ||
762 | * semaphore array. | ||
763 | * The function returns 1 if at least one operation was completed successfully. | ||
764 | */ | ||
765 | static int do_smart_wakeup_zero(struct sem_array *sma, struct sembuf *sops, | ||
766 | int nsops, struct list_head *pt) | ||
767 | { | ||
768 | int i; | ||
769 | int semop_completed = 0; | ||
770 | int got_zero = 0; | ||
771 | |||
772 | /* first: the per-semaphore queues, if known */ | ||
773 | if (sops) { | ||
774 | for (i = 0; i < nsops; i++) { | ||
775 | int num = sops[i].sem_num; | ||
776 | |||
777 | if (sma->sem_base[num].semval == 0) { | ||
778 | got_zero = 1; | ||
779 | semop_completed |= wake_const_ops(sma, num, pt); | ||
780 | } | ||
781 | } | ||
782 | } else { | ||
783 | /* | ||
784 | * No sops means modified semaphores not known. | ||
785 | * Assume all were changed. | ||
648 | */ | 786 | */ |
649 | BUG_ON(q->sops[0].sem_op >= 0); | 787 | for (i = 0; i < sma->sem_nsems; i++) { |
650 | return 0; | 788 | if (sma->sem_base[i].semval == 0) { |
789 | got_zero = 1; | ||
790 | semop_completed |= wake_const_ops(sma, i, pt); | ||
791 | } | ||
792 | } | ||
651 | } | 793 | } |
652 | /* | 794 | /* |
653 | * semval is 0. Check if there are wait-for-zero semops. | 795 | * If one of the modified semaphores got 0, |
654 | * They must be the first entries in the per-semaphore queue | 796 | * then check the global queue, too. |
655 | */ | 797 | */ |
656 | h = list_first_entry(&curr->sem_pending, struct sem_queue, list); | 798 | if (got_zero) |
657 | BUG_ON(h->nsops != 1); | 799 | semop_completed |= wake_const_ops(sma, -1, pt); |
658 | BUG_ON(h->sops[0].sem_num != q->sops[0].sem_num); | ||
659 | |||
660 | /* Yes, there is a wait-for-zero semop. Restart */ | ||
661 | if (h->sops[0].sem_op == 0) | ||
662 | return 1; | ||
663 | 800 | ||
664 | /* Again - no-one is waiting for the new value. */ | 801 | return semop_completed; |
665 | return 0; | ||
666 | } | 802 | } |
667 | 803 | ||
668 | 804 | ||
@@ -678,6 +814,8 @@ static int check_restart(struct sem_array *sma, struct sem_queue *q) | |||
678 | * semaphore. | 814 | * semaphore. |
679 | * The tasks that must be woken up are added to @pt. The return code | 815 | * The tasks that must be woken up are added to @pt. The return code |
680 | * is stored in q->pid. | 816 | * is stored in q->pid. |
817 | * The function internally checks if const operations can now succeed. | ||
818 | * | ||
681 | * The function return 1 if at least one semop was completed successfully. | 819 | * The function return 1 if at least one semop was completed successfully. |
682 | */ | 820 | */ |
683 | static int update_queue(struct sem_array *sma, int semnum, struct list_head *pt) | 821 | static int update_queue(struct sem_array *sma, int semnum, struct list_head *pt) |
@@ -688,9 +826,9 @@ static int update_queue(struct sem_array *sma, int semnum, struct list_head *pt) | |||
688 | int semop_completed = 0; | 826 | int semop_completed = 0; |
689 | 827 | ||
690 | if (semnum == -1) | 828 | if (semnum == -1) |
691 | pending_list = &sma->sem_pending; | 829 | pending_list = &sma->pending_alter; |
692 | else | 830 | else |
693 | pending_list = &sma->sem_base[semnum].sem_pending; | 831 | pending_list = &sma->sem_base[semnum].pending_alter; |
694 | 832 | ||
695 | again: | 833 | again: |
696 | walk = pending_list->next; | 834 | walk = pending_list->next; |
@@ -702,16 +840,15 @@ again: | |||
702 | 840 | ||
703 | /* If we are scanning the single sop, per-semaphore list of | 841 | /* If we are scanning the single sop, per-semaphore list of |
704 | * one semaphore and that semaphore is 0, then it is not | 842 | * one semaphore and that semaphore is 0, then it is not |
705 | * necessary to scan the "alter" entries: simple increments | 843 | * necessary to scan further: simple increments |
706 | * that affect only one entry succeed immediately and cannot | 844 | * that affect only one entry succeed immediately and cannot |
707 | * be in the per semaphore pending queue, and decrements | 845 | * be in the per semaphore pending queue, and decrements |
708 | * cannot be successful if the value is already 0. | 846 | * cannot be successful if the value is already 0. |
709 | */ | 847 | */ |
710 | if (semnum != -1 && sma->sem_base[semnum].semval == 0 && | 848 | if (semnum != -1 && sma->sem_base[semnum].semval == 0) |
711 | q->alter) | ||
712 | break; | 849 | break; |
713 | 850 | ||
714 | error = try_atomic_semop(sma, q->sops, q->nsops, | 851 | error = perform_atomic_semop(sma, q->sops, q->nsops, |
715 | q->undo, q->pid); | 852 | q->undo, q->pid); |
716 | 853 | ||
717 | /* Does q->sleeper still need to sleep? */ | 854 | /* Does q->sleeper still need to sleep? */ |
@@ -724,6 +861,7 @@ again: | |||
724 | restart = 0; | 861 | restart = 0; |
725 | } else { | 862 | } else { |
726 | semop_completed = 1; | 863 | semop_completed = 1; |
864 | do_smart_wakeup_zero(sma, q->sops, q->nsops, pt); | ||
727 | restart = check_restart(sma, q); | 865 | restart = check_restart(sma, q); |
728 | } | 866 | } |
729 | 867 | ||
@@ -742,8 +880,8 @@ again: | |||
742 | * @otime: force setting otime | 880 | * @otime: force setting otime |
743 | * @pt: list head of the tasks that must be woken up. | 881 | * @pt: list head of the tasks that must be woken up. |
744 | * | 882 | * |
745 | * do_smart_update() does the required called to update_queue, based on the | 883 | * do_smart_update() does the required calls to update_queue and wakeup_zero, |
746 | * actual changes that were performed on the semaphore array. | 884 | * based on the actual changes that were performed on the semaphore array. |
747 | * Note that the function does not do the actual wake-up: the caller is | 885 | * Note that the function does not do the actual wake-up: the caller is |
748 | * responsible for calling wake_up_sem_queue_do(@pt). | 886 | * responsible for calling wake_up_sem_queue_do(@pt). |
749 | * It is safe to perform this call after dropping all locks. | 887 | * It is safe to perform this call after dropping all locks. |
@@ -752,49 +890,46 @@ static void do_smart_update(struct sem_array *sma, struct sembuf *sops, int nsop | |||
752 | int otime, struct list_head *pt) | 890 | int otime, struct list_head *pt) |
753 | { | 891 | { |
754 | int i; | 892 | int i; |
755 | int progress; | ||
756 | |||
757 | progress = 1; | ||
758 | retry_global: | ||
759 | if (sma->complex_count) { | ||
760 | if (update_queue(sma, -1, pt)) { | ||
761 | progress = 1; | ||
762 | otime = 1; | ||
763 | sops = NULL; | ||
764 | } | ||
765 | } | ||
766 | if (!progress) | ||
767 | goto done; | ||
768 | 893 | ||
769 | if (!sops) { | 894 | otime |= do_smart_wakeup_zero(sma, sops, nsops, pt); |
770 | /* No semops; something special is going on. */ | ||
771 | for (i = 0; i < sma->sem_nsems; i++) { | ||
772 | if (update_queue(sma, i, pt)) { | ||
773 | otime = 1; | ||
774 | progress = 1; | ||
775 | } | ||
776 | } | ||
777 | goto done_checkretry; | ||
778 | } | ||
779 | 895 | ||
780 | /* Check the semaphores that were modified. */ | 896 | if (!list_empty(&sma->pending_alter)) { |
781 | for (i = 0; i < nsops; i++) { | 897 | /* semaphore array uses the global queue - just process it. */ |
782 | if (sops[i].sem_op > 0 || | 898 | otime |= update_queue(sma, -1, pt); |
783 | (sops[i].sem_op < 0 && | 899 | } else { |
784 | sma->sem_base[sops[i].sem_num].semval == 0)) | 900 | if (!sops) { |
785 | if (update_queue(sma, sops[i].sem_num, pt)) { | 901 | /* |
786 | otime = 1; | 902 | * No sops, thus the modified semaphores are not |
787 | progress = 1; | 903 | * known. Check all. |
904 | */ | ||
905 | for (i = 0; i < sma->sem_nsems; i++) | ||
906 | otime |= update_queue(sma, i, pt); | ||
907 | } else { | ||
908 | /* | ||
909 | * Check the semaphores that were increased: | ||
910 | * - No complex ops, thus all sleeping ops are | ||
911 | * decrease. | ||
912 | * - if we decreased the value, then any sleeping | ||
913 | * semaphore ops wont be able to run: If the | ||
914 | * previous value was too small, then the new | ||
915 | * value will be too small, too. | ||
916 | */ | ||
917 | for (i = 0; i < nsops; i++) { | ||
918 | if (sops[i].sem_op > 0) { | ||
919 | otime |= update_queue(sma, | ||
920 | sops[i].sem_num, pt); | ||
921 | } | ||
788 | } | 922 | } |
923 | } | ||
789 | } | 924 | } |
790 | done_checkretry: | 925 | if (otime) { |
791 | if (progress) { | 926 | if (sops == NULL) { |
792 | progress = 0; | 927 | sma->sem_base[0].sem_otime = get_seconds(); |
793 | goto retry_global; | 928 | } else { |
929 | sma->sem_base[sops[0].sem_num].sem_otime = | ||
930 | get_seconds(); | ||
931 | } | ||
794 | } | 932 | } |
795 | done: | ||
796 | if (otime) | ||
797 | sma->sem_otime = get_seconds(); | ||
798 | } | 933 | } |
799 | 934 | ||
800 | 935 | ||
@@ -813,14 +948,14 @@ static int count_semncnt (struct sem_array * sma, ushort semnum) | |||
813 | struct sem_queue * q; | 948 | struct sem_queue * q; |
814 | 949 | ||
815 | semncnt = 0; | 950 | semncnt = 0; |
816 | list_for_each_entry(q, &sma->sem_base[semnum].sem_pending, list) { | 951 | list_for_each_entry(q, &sma->sem_base[semnum].pending_alter, list) { |
817 | struct sembuf * sops = q->sops; | 952 | struct sembuf * sops = q->sops; |
818 | BUG_ON(sops->sem_num != semnum); | 953 | BUG_ON(sops->sem_num != semnum); |
819 | if ((sops->sem_op < 0) && !(sops->sem_flg & IPC_NOWAIT)) | 954 | if ((sops->sem_op < 0) && !(sops->sem_flg & IPC_NOWAIT)) |
820 | semncnt++; | 955 | semncnt++; |
821 | } | 956 | } |
822 | 957 | ||
823 | list_for_each_entry(q, &sma->sem_pending, list) { | 958 | list_for_each_entry(q, &sma->pending_alter, list) { |
824 | struct sembuf * sops = q->sops; | 959 | struct sembuf * sops = q->sops; |
825 | int nsops = q->nsops; | 960 | int nsops = q->nsops; |
826 | int i; | 961 | int i; |
@@ -839,14 +974,14 @@ static int count_semzcnt (struct sem_array * sma, ushort semnum) | |||
839 | struct sem_queue * q; | 974 | struct sem_queue * q; |
840 | 975 | ||
841 | semzcnt = 0; | 976 | semzcnt = 0; |
842 | list_for_each_entry(q, &sma->sem_base[semnum].sem_pending, list) { | 977 | list_for_each_entry(q, &sma->sem_base[semnum].pending_const, list) { |
843 | struct sembuf * sops = q->sops; | 978 | struct sembuf * sops = q->sops; |
844 | BUG_ON(sops->sem_num != semnum); | 979 | BUG_ON(sops->sem_num != semnum); |
845 | if ((sops->sem_op == 0) && !(sops->sem_flg & IPC_NOWAIT)) | 980 | if ((sops->sem_op == 0) && !(sops->sem_flg & IPC_NOWAIT)) |
846 | semzcnt++; | 981 | semzcnt++; |
847 | } | 982 | } |
848 | 983 | ||
849 | list_for_each_entry(q, &sma->sem_pending, list) { | 984 | list_for_each_entry(q, &sma->pending_const, list) { |
850 | struct sembuf * sops = q->sops; | 985 | struct sembuf * sops = q->sops; |
851 | int nsops = q->nsops; | 986 | int nsops = q->nsops; |
852 | int i; | 987 | int i; |
@@ -872,7 +1007,7 @@ static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp) | |||
872 | int i; | 1007 | int i; |
873 | 1008 | ||
874 | /* Free the existing undo structures for this semaphore set. */ | 1009 | /* Free the existing undo structures for this semaphore set. */ |
875 | assert_spin_locked(&sma->sem_perm.lock); | 1010 | ipc_assert_locked_object(&sma->sem_perm); |
876 | list_for_each_entry_safe(un, tu, &sma->list_id, list_id) { | 1011 | list_for_each_entry_safe(un, tu, &sma->list_id, list_id) { |
877 | list_del(&un->list_id); | 1012 | list_del(&un->list_id); |
878 | spin_lock(&un->ulp->lock); | 1013 | spin_lock(&un->ulp->lock); |
@@ -884,13 +1019,22 @@ static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp) | |||
884 | 1019 | ||
885 | /* Wake up all pending processes and let them fail with EIDRM. */ | 1020 | /* Wake up all pending processes and let them fail with EIDRM. */ |
886 | INIT_LIST_HEAD(&tasks); | 1021 | INIT_LIST_HEAD(&tasks); |
887 | list_for_each_entry_safe(q, tq, &sma->sem_pending, list) { | 1022 | list_for_each_entry_safe(q, tq, &sma->pending_const, list) { |
1023 | unlink_queue(sma, q); | ||
1024 | wake_up_sem_queue_prepare(&tasks, q, -EIDRM); | ||
1025 | } | ||
1026 | |||
1027 | list_for_each_entry_safe(q, tq, &sma->pending_alter, list) { | ||
888 | unlink_queue(sma, q); | 1028 | unlink_queue(sma, q); |
889 | wake_up_sem_queue_prepare(&tasks, q, -EIDRM); | 1029 | wake_up_sem_queue_prepare(&tasks, q, -EIDRM); |
890 | } | 1030 | } |
891 | for (i = 0; i < sma->sem_nsems; i++) { | 1031 | for (i = 0; i < sma->sem_nsems; i++) { |
892 | struct sem *sem = sma->sem_base + i; | 1032 | struct sem *sem = sma->sem_base + i; |
893 | list_for_each_entry_safe(q, tq, &sem->sem_pending, list) { | 1033 | list_for_each_entry_safe(q, tq, &sem->pending_const, list) { |
1034 | unlink_queue(sma, q); | ||
1035 | wake_up_sem_queue_prepare(&tasks, q, -EIDRM); | ||
1036 | } | ||
1037 | list_for_each_entry_safe(q, tq, &sem->pending_alter, list) { | ||
894 | unlink_queue(sma, q); | 1038 | unlink_queue(sma, q); |
895 | wake_up_sem_queue_prepare(&tasks, q, -EIDRM); | 1039 | wake_up_sem_queue_prepare(&tasks, q, -EIDRM); |
896 | } | 1040 | } |
@@ -931,6 +1075,21 @@ static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in, | |||
931 | } | 1075 | } |
932 | } | 1076 | } |
933 | 1077 | ||
1078 | static time_t get_semotime(struct sem_array *sma) | ||
1079 | { | ||
1080 | int i; | ||
1081 | time_t res; | ||
1082 | |||
1083 | res = sma->sem_base[0].sem_otime; | ||
1084 | for (i = 1; i < sma->sem_nsems; i++) { | ||
1085 | time_t to = sma->sem_base[i].sem_otime; | ||
1086 | |||
1087 | if (to > res) | ||
1088 | res = to; | ||
1089 | } | ||
1090 | return res; | ||
1091 | } | ||
1092 | |||
934 | static int semctl_nolock(struct ipc_namespace *ns, int semid, | 1093 | static int semctl_nolock(struct ipc_namespace *ns, int semid, |
935 | int cmd, int version, void __user *p) | 1094 | int cmd, int version, void __user *p) |
936 | { | 1095 | { |
@@ -1004,9 +1163,9 @@ static int semctl_nolock(struct ipc_namespace *ns, int semid, | |||
1004 | goto out_unlock; | 1163 | goto out_unlock; |
1005 | 1164 | ||
1006 | kernel_to_ipc64_perm(&sma->sem_perm, &tbuf.sem_perm); | 1165 | kernel_to_ipc64_perm(&sma->sem_perm, &tbuf.sem_perm); |
1007 | tbuf.sem_otime = sma->sem_otime; | 1166 | tbuf.sem_otime = get_semotime(sma); |
1008 | tbuf.sem_ctime = sma->sem_ctime; | 1167 | tbuf.sem_ctime = sma->sem_ctime; |
1009 | tbuf.sem_nsems = sma->sem_nsems; | 1168 | tbuf.sem_nsems = sma->sem_nsems; |
1010 | rcu_read_unlock(); | 1169 | rcu_read_unlock(); |
1011 | if (copy_semid_to_user(p, &tbuf, version)) | 1170 | if (copy_semid_to_user(p, &tbuf, version)) |
1012 | return -EFAULT; | 1171 | return -EFAULT; |
@@ -1070,7 +1229,7 @@ static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum, | |||
1070 | 1229 | ||
1071 | curr = &sma->sem_base[semnum]; | 1230 | curr = &sma->sem_base[semnum]; |
1072 | 1231 | ||
1073 | assert_spin_locked(&sma->sem_perm.lock); | 1232 | ipc_assert_locked_object(&sma->sem_perm); |
1074 | list_for_each_entry(un, &sma->list_id, list_id) | 1233 | list_for_each_entry(un, &sma->list_id, list_id) |
1075 | un->semadj[semnum] = 0; | 1234 | un->semadj[semnum] = 0; |
1076 | 1235 | ||
@@ -1199,7 +1358,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum, | |||
1199 | for (i = 0; i < nsems; i++) | 1358 | for (i = 0; i < nsems; i++) |
1200 | sma->sem_base[i].semval = sem_io[i]; | 1359 | sma->sem_base[i].semval = sem_io[i]; |
1201 | 1360 | ||
1202 | assert_spin_locked(&sma->sem_perm.lock); | 1361 | ipc_assert_locked_object(&sma->sem_perm); |
1203 | list_for_each_entry(un, &sma->list_id, list_id) { | 1362 | list_for_each_entry(un, &sma->list_id, list_id) { |
1204 | for (i = 0; i < nsems; i++) | 1363 | for (i = 0; i < nsems; i++) |
1205 | un->semadj[i] = 0; | 1364 | un->semadj[i] = 0; |
@@ -1289,39 +1448,43 @@ static int semctl_down(struct ipc_namespace *ns, int semid, | |||
1289 | return -EFAULT; | 1448 | return -EFAULT; |
1290 | } | 1449 | } |
1291 | 1450 | ||
1451 | down_write(&sem_ids(ns).rw_mutex); | ||
1452 | rcu_read_lock(); | ||
1453 | |||
1292 | ipcp = ipcctl_pre_down_nolock(ns, &sem_ids(ns), semid, cmd, | 1454 | ipcp = ipcctl_pre_down_nolock(ns, &sem_ids(ns), semid, cmd, |
1293 | &semid64.sem_perm, 0); | 1455 | &semid64.sem_perm, 0); |
1294 | if (IS_ERR(ipcp)) | 1456 | if (IS_ERR(ipcp)) { |
1295 | return PTR_ERR(ipcp); | 1457 | err = PTR_ERR(ipcp); |
1458 | goto out_unlock1; | ||
1459 | } | ||
1296 | 1460 | ||
1297 | sma = container_of(ipcp, struct sem_array, sem_perm); | 1461 | sma = container_of(ipcp, struct sem_array, sem_perm); |
1298 | 1462 | ||
1299 | err = security_sem_semctl(sma, cmd); | 1463 | err = security_sem_semctl(sma, cmd); |
1300 | if (err) { | 1464 | if (err) |
1301 | rcu_read_unlock(); | 1465 | goto out_unlock1; |
1302 | goto out_up; | ||
1303 | } | ||
1304 | 1466 | ||
1305 | switch(cmd){ | 1467 | switch (cmd) { |
1306 | case IPC_RMID: | 1468 | case IPC_RMID: |
1307 | sem_lock(sma, NULL, -1); | 1469 | sem_lock(sma, NULL, -1); |
1470 | /* freeary unlocks the ipc object and rcu */ | ||
1308 | freeary(ns, ipcp); | 1471 | freeary(ns, ipcp); |
1309 | goto out_up; | 1472 | goto out_up; |
1310 | case IPC_SET: | 1473 | case IPC_SET: |
1311 | sem_lock(sma, NULL, -1); | 1474 | sem_lock(sma, NULL, -1); |
1312 | err = ipc_update_perm(&semid64.sem_perm, ipcp); | 1475 | err = ipc_update_perm(&semid64.sem_perm, ipcp); |
1313 | if (err) | 1476 | if (err) |
1314 | goto out_unlock; | 1477 | goto out_unlock0; |
1315 | sma->sem_ctime = get_seconds(); | 1478 | sma->sem_ctime = get_seconds(); |
1316 | break; | 1479 | break; |
1317 | default: | 1480 | default: |
1318 | rcu_read_unlock(); | ||
1319 | err = -EINVAL; | 1481 | err = -EINVAL; |
1320 | goto out_up; | 1482 | goto out_unlock1; |
1321 | } | 1483 | } |
1322 | 1484 | ||
1323 | out_unlock: | 1485 | out_unlock0: |
1324 | sem_unlock(sma, -1); | 1486 | sem_unlock(sma, -1); |
1487 | out_unlock1: | ||
1325 | rcu_read_unlock(); | 1488 | rcu_read_unlock(); |
1326 | out_up: | 1489 | out_up: |
1327 | up_write(&sem_ids(ns).rw_mutex); | 1490 | up_write(&sem_ids(ns).rw_mutex); |
@@ -1496,7 +1659,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid) | |||
1496 | new->semid = semid; | 1659 | new->semid = semid; |
1497 | assert_spin_locked(&ulp->lock); | 1660 | assert_spin_locked(&ulp->lock); |
1498 | list_add_rcu(&new->list_proc, &ulp->list_proc); | 1661 | list_add_rcu(&new->list_proc, &ulp->list_proc); |
1499 | assert_spin_locked(&sma->sem_perm.lock); | 1662 | ipc_assert_locked_object(&sma->sem_perm); |
1500 | list_add(&new->list_id, &sma->list_id); | 1663 | list_add(&new->list_id, &sma->list_id); |
1501 | un = new; | 1664 | un = new; |
1502 | 1665 | ||
@@ -1533,7 +1696,6 @@ static int get_queue_result(struct sem_queue *q) | |||
1533 | return error; | 1696 | return error; |
1534 | } | 1697 | } |
1535 | 1698 | ||
1536 | |||
1537 | SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops, | 1699 | SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops, |
1538 | unsigned, nsops, const struct timespec __user *, timeout) | 1700 | unsigned, nsops, const struct timespec __user *, timeout) |
1539 | { | 1701 | { |
@@ -1631,7 +1793,8 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops, | |||
1631 | if (un && un->semid == -1) | 1793 | if (un && un->semid == -1) |
1632 | goto out_unlock_free; | 1794 | goto out_unlock_free; |
1633 | 1795 | ||
1634 | error = try_atomic_semop (sma, sops, nsops, un, task_tgid_vnr(current)); | 1796 | error = perform_atomic_semop(sma, sops, nsops, un, |
1797 | task_tgid_vnr(current)); | ||
1635 | if (error <= 0) { | 1798 | if (error <= 0) { |
1636 | if (alter && error == 0) | 1799 | if (alter && error == 0) |
1637 | do_smart_update(sma, sops, nsops, 1, &tasks); | 1800 | do_smart_update(sma, sops, nsops, 1, &tasks); |
@@ -1653,15 +1816,27 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops, | |||
1653 | struct sem *curr; | 1816 | struct sem *curr; |
1654 | curr = &sma->sem_base[sops->sem_num]; | 1817 | curr = &sma->sem_base[sops->sem_num]; |
1655 | 1818 | ||
1656 | if (alter) | 1819 | if (alter) { |
1657 | list_add_tail(&queue.list, &curr->sem_pending); | 1820 | if (sma->complex_count) { |
1658 | else | 1821 | list_add_tail(&queue.list, |
1659 | list_add(&queue.list, &curr->sem_pending); | 1822 | &sma->pending_alter); |
1823 | } else { | ||
1824 | |||
1825 | list_add_tail(&queue.list, | ||
1826 | &curr->pending_alter); | ||
1827 | } | ||
1828 | } else { | ||
1829 | list_add_tail(&queue.list, &curr->pending_const); | ||
1830 | } | ||
1660 | } else { | 1831 | } else { |
1832 | if (!sma->complex_count) | ||
1833 | merge_queues(sma); | ||
1834 | |||
1661 | if (alter) | 1835 | if (alter) |
1662 | list_add_tail(&queue.list, &sma->sem_pending); | 1836 | list_add_tail(&queue.list, &sma->pending_alter); |
1663 | else | 1837 | else |
1664 | list_add(&queue.list, &sma->sem_pending); | 1838 | list_add_tail(&queue.list, &sma->pending_const); |
1839 | |||
1665 | sma->complex_count++; | 1840 | sma->complex_count++; |
1666 | } | 1841 | } |
1667 | 1842 | ||
@@ -1833,7 +2008,7 @@ void exit_sem(struct task_struct *tsk) | |||
1833 | } | 2008 | } |
1834 | 2009 | ||
1835 | /* remove un from the linked lists */ | 2010 | /* remove un from the linked lists */ |
1836 | assert_spin_locked(&sma->sem_perm.lock); | 2011 | ipc_assert_locked_object(&sma->sem_perm); |
1837 | list_del(&un->list_id); | 2012 | list_del(&un->list_id); |
1838 | 2013 | ||
1839 | spin_lock(&ulp->lock); | 2014 | spin_lock(&ulp->lock); |
@@ -1882,6 +2057,9 @@ static int sysvipc_sem_proc_show(struct seq_file *s, void *it) | |||
1882 | { | 2057 | { |
1883 | struct user_namespace *user_ns = seq_user_ns(s); | 2058 | struct user_namespace *user_ns = seq_user_ns(s); |
1884 | struct sem_array *sma = it; | 2059 | struct sem_array *sma = it; |
2060 | time_t sem_otime; | ||
2061 | |||
2062 | sem_otime = get_semotime(sma); | ||
1885 | 2063 | ||
1886 | return seq_printf(s, | 2064 | return seq_printf(s, |
1887 | "%10d %10d %4o %10u %5u %5u %5u %5u %10lu %10lu\n", | 2065 | "%10d %10d %4o %10u %5u %5u %5u %5u %10lu %10lu\n", |
@@ -1893,7 +2071,7 @@ static int sysvipc_sem_proc_show(struct seq_file *s, void *it) | |||
1893 | from_kgid_munged(user_ns, sma->sem_perm.gid), | 2071 | from_kgid_munged(user_ns, sma->sem_perm.gid), |
1894 | from_kuid_munged(user_ns, sma->sem_perm.cuid), | 2072 | from_kuid_munged(user_ns, sma->sem_perm.cuid), |
1895 | from_kgid_munged(user_ns, sma->sem_perm.cgid), | 2073 | from_kgid_munged(user_ns, sma->sem_perm.cgid), |
1896 | sma->sem_otime, | 2074 | sem_otime, |
1897 | sma->sem_ctime); | 2075 | sma->sem_ctime); |
1898 | } | 2076 | } |
1899 | #endif | 2077 | #endif |
@@ -141,7 +141,7 @@ static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id) | |||
141 | static inline void shm_lock_by_ptr(struct shmid_kernel *ipcp) | 141 | static inline void shm_lock_by_ptr(struct shmid_kernel *ipcp) |
142 | { | 142 | { |
143 | rcu_read_lock(); | 143 | rcu_read_lock(); |
144 | spin_lock(&ipcp->shm_perm.lock); | 144 | ipc_lock_object(&ipcp->shm_perm); |
145 | } | 145 | } |
146 | 146 | ||
147 | static inline struct shmid_kernel *shm_lock_check(struct ipc_namespace *ns, | 147 | static inline struct shmid_kernel *shm_lock_check(struct ipc_namespace *ns, |
@@ -491,10 +491,10 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params) | |||
491 | 491 | ||
492 | sprintf (name, "SYSV%08x", key); | 492 | sprintf (name, "SYSV%08x", key); |
493 | if (shmflg & SHM_HUGETLB) { | 493 | if (shmflg & SHM_HUGETLB) { |
494 | struct hstate *hs = hstate_sizelog((shmflg >> SHM_HUGE_SHIFT) | 494 | struct hstate *hs; |
495 | & SHM_HUGE_MASK); | ||
496 | size_t hugesize; | 495 | size_t hugesize; |
497 | 496 | ||
497 | hs = hstate_sizelog((shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK); | ||
498 | if (!hs) { | 498 | if (!hs) { |
499 | error = -EINVAL; | 499 | error = -EINVAL; |
500 | goto no_file; | 500 | goto no_file; |
@@ -535,6 +535,7 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params) | |||
535 | shp->shm_nattch = 0; | 535 | shp->shm_nattch = 0; |
536 | shp->shm_file = file; | 536 | shp->shm_file = file; |
537 | shp->shm_creator = current; | 537 | shp->shm_creator = current; |
538 | |||
538 | /* | 539 | /* |
539 | * shmid gets reported as "inode#" in /proc/pid/maps. | 540 | * shmid gets reported as "inode#" in /proc/pid/maps. |
540 | * proc-ps tools use this. Changing this will break them. | 541 | * proc-ps tools use this. Changing this will break them. |
@@ -543,7 +544,9 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params) | |||
543 | 544 | ||
544 | ns->shm_tot += numpages; | 545 | ns->shm_tot += numpages; |
545 | error = shp->shm_perm.id; | 546 | error = shp->shm_perm.id; |
546 | shm_unlock(shp); | 547 | |
548 | ipc_unlock_object(&shp->shm_perm); | ||
549 | rcu_read_unlock(); | ||
547 | return error; | 550 | return error; |
548 | 551 | ||
549 | no_id: | 552 | no_id: |
@@ -754,31 +757,42 @@ static int shmctl_down(struct ipc_namespace *ns, int shmid, int cmd, | |||
754 | return -EFAULT; | 757 | return -EFAULT; |
755 | } | 758 | } |
756 | 759 | ||
760 | down_write(&shm_ids(ns).rw_mutex); | ||
761 | rcu_read_lock(); | ||
762 | |||
757 | ipcp = ipcctl_pre_down(ns, &shm_ids(ns), shmid, cmd, | 763 | ipcp = ipcctl_pre_down(ns, &shm_ids(ns), shmid, cmd, |
758 | &shmid64.shm_perm, 0); | 764 | &shmid64.shm_perm, 0); |
759 | if (IS_ERR(ipcp)) | 765 | if (IS_ERR(ipcp)) { |
760 | return PTR_ERR(ipcp); | 766 | err = PTR_ERR(ipcp); |
767 | /* the ipc lock is not held upon failure */ | ||
768 | goto out_unlock1; | ||
769 | } | ||
761 | 770 | ||
762 | shp = container_of(ipcp, struct shmid_kernel, shm_perm); | 771 | shp = container_of(ipcp, struct shmid_kernel, shm_perm); |
763 | 772 | ||
764 | err = security_shm_shmctl(shp, cmd); | 773 | err = security_shm_shmctl(shp, cmd); |
765 | if (err) | 774 | if (err) |
766 | goto out_unlock; | 775 | goto out_unlock0; |
776 | |||
767 | switch (cmd) { | 777 | switch (cmd) { |
768 | case IPC_RMID: | 778 | case IPC_RMID: |
779 | /* do_shm_rmid unlocks the ipc object and rcu */ | ||
769 | do_shm_rmid(ns, ipcp); | 780 | do_shm_rmid(ns, ipcp); |
770 | goto out_up; | 781 | goto out_up; |
771 | case IPC_SET: | 782 | case IPC_SET: |
772 | err = ipc_update_perm(&shmid64.shm_perm, ipcp); | 783 | err = ipc_update_perm(&shmid64.shm_perm, ipcp); |
773 | if (err) | 784 | if (err) |
774 | goto out_unlock; | 785 | goto out_unlock0; |
775 | shp->shm_ctim = get_seconds(); | 786 | shp->shm_ctim = get_seconds(); |
776 | break; | 787 | break; |
777 | default: | 788 | default: |
778 | err = -EINVAL; | 789 | err = -EINVAL; |
779 | } | 790 | } |
780 | out_unlock: | 791 | |
781 | shm_unlock(shp); | 792 | out_unlock0: |
793 | ipc_unlock_object(&shp->shm_perm); | ||
794 | out_unlock1: | ||
795 | rcu_read_unlock(); | ||
782 | out_up: | 796 | out_up: |
783 | up_write(&shm_ids(ns).rw_mutex); | 797 | up_write(&shm_ids(ns).rw_mutex); |
784 | return err; | 798 | return err; |
diff --git a/ipc/util.c b/ipc/util.c index 809ec5ec8122..4704223bfad4 100644 --- a/ipc/util.c +++ b/ipc/util.c | |||
@@ -246,9 +246,8 @@ int ipc_get_maxid(struct ipc_ids *ids) | |||
246 | * is returned. The 'new' entry is returned in a locked state on success. | 246 | * is returned. The 'new' entry is returned in a locked state on success. |
247 | * On failure the entry is not locked and a negative err-code is returned. | 247 | * On failure the entry is not locked and a negative err-code is returned. |
248 | * | 248 | * |
249 | * Called with ipc_ids.rw_mutex held as a writer. | 249 | * Called with writer ipc_ids.rw_mutex held. |
250 | */ | 250 | */ |
251 | |||
252 | int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size) | 251 | int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size) |
253 | { | 252 | { |
254 | kuid_t euid; | 253 | kuid_t euid; |
@@ -469,9 +468,7 @@ void ipc_free(void* ptr, int size) | |||
469 | struct ipc_rcu { | 468 | struct ipc_rcu { |
470 | struct rcu_head rcu; | 469 | struct rcu_head rcu; |
471 | atomic_t refcount; | 470 | atomic_t refcount; |
472 | /* "void *" makes sure alignment of following data is sane. */ | 471 | } ____cacheline_aligned_in_smp; |
473 | void *data[0]; | ||
474 | }; | ||
475 | 472 | ||
476 | /** | 473 | /** |
477 | * ipc_rcu_alloc - allocate ipc and rcu space | 474 | * ipc_rcu_alloc - allocate ipc and rcu space |
@@ -489,12 +486,14 @@ void *ipc_rcu_alloc(int size) | |||
489 | if (unlikely(!out)) | 486 | if (unlikely(!out)) |
490 | return NULL; | 487 | return NULL; |
491 | atomic_set(&out->refcount, 1); | 488 | atomic_set(&out->refcount, 1); |
492 | return out->data; | 489 | return out + 1; |
493 | } | 490 | } |
494 | 491 | ||
495 | int ipc_rcu_getref(void *ptr) | 492 | int ipc_rcu_getref(void *ptr) |
496 | { | 493 | { |
497 | return atomic_inc_not_zero(&container_of(ptr, struct ipc_rcu, data)->refcount); | 494 | struct ipc_rcu *p = ((struct ipc_rcu *)ptr) - 1; |
495 | |||
496 | return atomic_inc_not_zero(&p->refcount); | ||
498 | } | 497 | } |
499 | 498 | ||
500 | /** | 499 | /** |
@@ -508,7 +507,7 @@ static void ipc_schedule_free(struct rcu_head *head) | |||
508 | 507 | ||
509 | void ipc_rcu_putref(void *ptr) | 508 | void ipc_rcu_putref(void *ptr) |
510 | { | 509 | { |
511 | struct ipc_rcu *p = container_of(ptr, struct ipc_rcu, data); | 510 | struct ipc_rcu *p = ((struct ipc_rcu *)ptr) - 1; |
512 | 511 | ||
513 | if (!atomic_dec_and_test(&p->refcount)) | 512 | if (!atomic_dec_and_test(&p->refcount)) |
514 | return; | 513 | return; |
@@ -747,8 +746,10 @@ int ipc_update_perm(struct ipc64_perm *in, struct kern_ipc_perm *out) | |||
747 | * It must be called without any lock held and | 746 | * It must be called without any lock held and |
748 | * - retrieves the ipc with the given id in the given table. | 747 | * - retrieves the ipc with the given id in the given table. |
749 | * - performs some audit and permission check, depending on the given cmd | 748 | * - performs some audit and permission check, depending on the given cmd |
750 | * - returns the ipc with both ipc and rw_mutex locks held in case of success | 749 | * - returns the ipc with the ipc lock held in case of success |
751 | * or an err-code without any lock held otherwise. | 750 | * or an err-code without any lock held otherwise. |
751 | * | ||
752 | * Call holding the both the rw_mutex and the rcu read lock. | ||
752 | */ | 753 | */ |
753 | struct kern_ipc_perm *ipcctl_pre_down(struct ipc_namespace *ns, | 754 | struct kern_ipc_perm *ipcctl_pre_down(struct ipc_namespace *ns, |
754 | struct ipc_ids *ids, int id, int cmd, | 755 | struct ipc_ids *ids, int id, int cmd, |
@@ -773,13 +774,10 @@ struct kern_ipc_perm *ipcctl_pre_down_nolock(struct ipc_namespace *ns, | |||
773 | int err = -EPERM; | 774 | int err = -EPERM; |
774 | struct kern_ipc_perm *ipcp; | 775 | struct kern_ipc_perm *ipcp; |
775 | 776 | ||
776 | down_write(&ids->rw_mutex); | ||
777 | rcu_read_lock(); | ||
778 | |||
779 | ipcp = ipc_obtain_object_check(ids, id); | 777 | ipcp = ipc_obtain_object_check(ids, id); |
780 | if (IS_ERR(ipcp)) { | 778 | if (IS_ERR(ipcp)) { |
781 | err = PTR_ERR(ipcp); | 779 | err = PTR_ERR(ipcp); |
782 | goto out_up; | 780 | goto err; |
783 | } | 781 | } |
784 | 782 | ||
785 | audit_ipc_obj(ipcp); | 783 | audit_ipc_obj(ipcp); |
@@ -790,16 +788,8 @@ struct kern_ipc_perm *ipcctl_pre_down_nolock(struct ipc_namespace *ns, | |||
790 | euid = current_euid(); | 788 | euid = current_euid(); |
791 | if (uid_eq(euid, ipcp->cuid) || uid_eq(euid, ipcp->uid) || | 789 | if (uid_eq(euid, ipcp->cuid) || uid_eq(euid, ipcp->uid) || |
792 | ns_capable(ns->user_ns, CAP_SYS_ADMIN)) | 790 | ns_capable(ns->user_ns, CAP_SYS_ADMIN)) |
793 | return ipcp; | 791 | return ipcp; /* successful lookup */ |
794 | 792 | err: | |
795 | out_up: | ||
796 | /* | ||
797 | * Unsuccessful lookup, unlock and return | ||
798 | * the corresponding error. | ||
799 | */ | ||
800 | rcu_read_unlock(); | ||
801 | up_write(&ids->rw_mutex); | ||
802 | |||
803 | return ERR_PTR(err); | 793 | return ERR_PTR(err); |
804 | } | 794 | } |
805 | 795 | ||
diff --git a/ipc/util.h b/ipc/util.h index 2b0bdd5d92ce..b6a6a88f3002 100644 --- a/ipc/util.h +++ b/ipc/util.h | |||
@@ -159,21 +159,31 @@ static inline int ipc_checkid(struct kern_ipc_perm *ipcp, int uid) | |||
159 | return uid / SEQ_MULTIPLIER != ipcp->seq; | 159 | return uid / SEQ_MULTIPLIER != ipcp->seq; |
160 | } | 160 | } |
161 | 161 | ||
162 | static inline void ipc_lock_by_ptr(struct kern_ipc_perm *perm) | 162 | static inline void ipc_lock_object(struct kern_ipc_perm *perm) |
163 | { | 163 | { |
164 | rcu_read_lock(); | ||
165 | spin_lock(&perm->lock); | 164 | spin_lock(&perm->lock); |
166 | } | 165 | } |
167 | 166 | ||
168 | static inline void ipc_unlock(struct kern_ipc_perm *perm) | 167 | static inline void ipc_unlock_object(struct kern_ipc_perm *perm) |
169 | { | 168 | { |
170 | spin_unlock(&perm->lock); | 169 | spin_unlock(&perm->lock); |
171 | rcu_read_unlock(); | ||
172 | } | 170 | } |
173 | 171 | ||
174 | static inline void ipc_lock_object(struct kern_ipc_perm *perm) | 172 | static inline void ipc_assert_locked_object(struct kern_ipc_perm *perm) |
175 | { | 173 | { |
176 | spin_lock(&perm->lock); | 174 | assert_spin_locked(&perm->lock); |
175 | } | ||
176 | |||
177 | static inline void ipc_lock_by_ptr(struct kern_ipc_perm *perm) | ||
178 | { | ||
179 | rcu_read_lock(); | ||
180 | ipc_lock_object(perm); | ||
181 | } | ||
182 | |||
183 | static inline void ipc_unlock(struct kern_ipc_perm *perm) | ||
184 | { | ||
185 | ipc_unlock_object(perm); | ||
186 | rcu_read_unlock(); | ||
177 | } | 187 | } |
178 | 188 | ||
179 | struct kern_ipc_perm *ipc_lock_check(struct ipc_ids *ids, int id); | 189 | struct kern_ipc_perm *ipc_lock_check(struct ipc_ids *ids, int id); |
diff --git a/kernel/Makefile b/kernel/Makefile index 271fd3119af9..470839d1a30e 100644 --- a/kernel/Makefile +++ b/kernel/Makefile | |||
@@ -9,7 +9,7 @@ obj-y = fork.o exec_domain.o panic.o printk.o \ | |||
9 | rcupdate.o extable.o params.o posix-timers.o \ | 9 | rcupdate.o extable.o params.o posix-timers.o \ |
10 | kthread.o wait.o sys_ni.o posix-cpu-timers.o mutex.o \ | 10 | kthread.o wait.o sys_ni.o posix-cpu-timers.o mutex.o \ |
11 | hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \ | 11 | hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \ |
12 | notifier.o ksysfs.o cred.o \ | 12 | notifier.o ksysfs.o cred.o reboot.o \ |
13 | async.o range.o groups.o lglock.o smpboot.o | 13 | async.o range.o groups.o lglock.o smpboot.o |
14 | 14 | ||
15 | ifdef CONFIG_FUNCTION_TRACER | 15 | ifdef CONFIG_FUNCTION_TRACER |
diff --git a/kernel/audit.h b/kernel/audit.h index 1c95131ef760..123c9b7c3979 100644 --- a/kernel/audit.h +++ b/kernel/audit.h | |||
@@ -85,6 +85,7 @@ struct audit_names { | |||
85 | 85 | ||
86 | struct filename *name; | 86 | struct filename *name; |
87 | int name_len; /* number of chars to log */ | 87 | int name_len; /* number of chars to log */ |
88 | bool hidden; /* don't log this record */ | ||
88 | bool name_put; /* call __putname()? */ | 89 | bool name_put; /* call __putname()? */ |
89 | 90 | ||
90 | unsigned long ino; | 91 | unsigned long ino; |
diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c index 6bd4a90d1991..f7aee8be7fb2 100644 --- a/kernel/auditfilter.c +++ b/kernel/auditfilter.c | |||
@@ -423,7 +423,7 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data, | |||
423 | f->lsm_rule = NULL; | 423 | f->lsm_rule = NULL; |
424 | 424 | ||
425 | /* Support legacy tests for a valid loginuid */ | 425 | /* Support legacy tests for a valid loginuid */ |
426 | if ((f->type == AUDIT_LOGINUID) && (f->val == 4294967295)) { | 426 | if ((f->type == AUDIT_LOGINUID) && (f->val == ~0U)) { |
427 | f->type = AUDIT_LOGINUID_SET; | 427 | f->type = AUDIT_LOGINUID_SET; |
428 | f->val = 0; | 428 | f->val = 0; |
429 | } | 429 | } |
@@ -865,6 +865,12 @@ static inline int audit_add_rule(struct audit_entry *entry) | |||
865 | err = audit_add_watch(&entry->rule, &list); | 865 | err = audit_add_watch(&entry->rule, &list); |
866 | if (err) { | 866 | if (err) { |
867 | mutex_unlock(&audit_filter_mutex); | 867 | mutex_unlock(&audit_filter_mutex); |
868 | /* | ||
869 | * normally audit_add_tree_rule() will free it | ||
870 | * on failure | ||
871 | */ | ||
872 | if (tree) | ||
873 | audit_put_tree(tree); | ||
868 | goto error; | 874 | goto error; |
869 | } | 875 | } |
870 | } | 876 | } |
diff --git a/kernel/auditsc.c b/kernel/auditsc.c index 3c8a601324a2..9845cb32b60a 100644 --- a/kernel/auditsc.c +++ b/kernel/auditsc.c | |||
@@ -1399,8 +1399,11 @@ static void audit_log_exit(struct audit_context *context, struct task_struct *ts | |||
1399 | } | 1399 | } |
1400 | 1400 | ||
1401 | i = 0; | 1401 | i = 0; |
1402 | list_for_each_entry(n, &context->names_list, list) | 1402 | list_for_each_entry(n, &context->names_list, list) { |
1403 | if (n->hidden) | ||
1404 | continue; | ||
1403 | audit_log_name(context, n, NULL, i++, &call_panic); | 1405 | audit_log_name(context, n, NULL, i++, &call_panic); |
1406 | } | ||
1404 | 1407 | ||
1405 | /* Send end of event record to help user space know we are finished */ | 1408 | /* Send end of event record to help user space know we are finished */ |
1406 | ab = audit_log_start(context, GFP_KERNEL, AUDIT_EOE); | 1409 | ab = audit_log_start(context, GFP_KERNEL, AUDIT_EOE); |
@@ -1769,14 +1772,15 @@ void audit_putname(struct filename *name) | |||
1769 | * __audit_inode - store the inode and device from a lookup | 1772 | * __audit_inode - store the inode and device from a lookup |
1770 | * @name: name being audited | 1773 | * @name: name being audited |
1771 | * @dentry: dentry being audited | 1774 | * @dentry: dentry being audited |
1772 | * @parent: does this dentry represent the parent? | 1775 | * @flags: attributes for this particular entry |
1773 | */ | 1776 | */ |
1774 | void __audit_inode(struct filename *name, const struct dentry *dentry, | 1777 | void __audit_inode(struct filename *name, const struct dentry *dentry, |
1775 | unsigned int parent) | 1778 | unsigned int flags) |
1776 | { | 1779 | { |
1777 | struct audit_context *context = current->audit_context; | 1780 | struct audit_context *context = current->audit_context; |
1778 | const struct inode *inode = dentry->d_inode; | 1781 | const struct inode *inode = dentry->d_inode; |
1779 | struct audit_names *n; | 1782 | struct audit_names *n; |
1783 | bool parent = flags & AUDIT_INODE_PARENT; | ||
1780 | 1784 | ||
1781 | if (!context->in_syscall) | 1785 | if (!context->in_syscall) |
1782 | return; | 1786 | return; |
@@ -1831,6 +1835,8 @@ out: | |||
1831 | if (parent) { | 1835 | if (parent) { |
1832 | n->name_len = n->name ? parent_len(n->name->name) : AUDIT_NAME_FULL; | 1836 | n->name_len = n->name ? parent_len(n->name->name) : AUDIT_NAME_FULL; |
1833 | n->type = AUDIT_TYPE_PARENT; | 1837 | n->type = AUDIT_TYPE_PARENT; |
1838 | if (flags & AUDIT_INODE_HIDDEN) | ||
1839 | n->hidden = true; | ||
1834 | } else { | 1840 | } else { |
1835 | n->name_len = AUDIT_NAME_FULL; | 1841 | n->name_len = AUDIT_NAME_FULL; |
1836 | n->type = AUDIT_TYPE_NORMAL; | 1842 | n->type = AUDIT_TYPE_NORMAL; |
diff --git a/kernel/exit.c b/kernel/exit.c index fafe75d9e6f6..a949819055d5 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
@@ -808,7 +808,7 @@ void do_exit(long code) | |||
808 | /* | 808 | /* |
809 | * FIXME: do that only when needed, using sched_exit tracepoint | 809 | * FIXME: do that only when needed, using sched_exit tracepoint |
810 | */ | 810 | */ |
811 | ptrace_put_breakpoints(tsk); | 811 | flush_ptrace_hw_breakpoint(tsk); |
812 | 812 | ||
813 | exit_notify(tsk, group_dead); | 813 | exit_notify(tsk, group_dead); |
814 | #ifdef CONFIG_NUMA | 814 | #ifdef CONFIG_NUMA |
diff --git a/kernel/panic.c b/kernel/panic.c index 167ec097ce8b..97712319f128 100644 --- a/kernel/panic.c +++ b/kernel/panic.c | |||
@@ -399,8 +399,9 @@ struct slowpath_args { | |||
399 | static void warn_slowpath_common(const char *file, int line, void *caller, | 399 | static void warn_slowpath_common(const char *file, int line, void *caller, |
400 | unsigned taint, struct slowpath_args *args) | 400 | unsigned taint, struct slowpath_args *args) |
401 | { | 401 | { |
402 | printk(KERN_WARNING "------------[ cut here ]------------\n"); | 402 | pr_warn("------------[ cut here ]------------\n"); |
403 | printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller); | 403 | pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pS()\n", |
404 | raw_smp_processor_id(), current->pid, file, line, caller); | ||
404 | 405 | ||
405 | if (args) | 406 | if (args) |
406 | vprintk(args->fmt, args->args); | 407 | vprintk(args->fmt, args->args); |
diff --git a/kernel/ptrace.c b/kernel/ptrace.c index ba5e6cea181a..4041f5747e73 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c | |||
@@ -469,6 +469,7 @@ static int ptrace_detach(struct task_struct *child, unsigned int data) | |||
469 | /* Architecture-specific hardware disable .. */ | 469 | /* Architecture-specific hardware disable .. */ |
470 | ptrace_disable(child); | 470 | ptrace_disable(child); |
471 | clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); | 471 | clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); |
472 | flush_ptrace_hw_breakpoint(child); | ||
472 | 473 | ||
473 | write_lock_irq(&tasklist_lock); | 474 | write_lock_irq(&tasklist_lock); |
474 | /* | 475 | /* |
@@ -1221,19 +1222,3 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid, | |||
1221 | return ret; | 1222 | return ret; |
1222 | } | 1223 | } |
1223 | #endif /* CONFIG_COMPAT */ | 1224 | #endif /* CONFIG_COMPAT */ |
1224 | |||
1225 | #ifdef CONFIG_HAVE_HW_BREAKPOINT | ||
1226 | int ptrace_get_breakpoints(struct task_struct *tsk) | ||
1227 | { | ||
1228 | if (atomic_inc_not_zero(&tsk->ptrace_bp_refcnt)) | ||
1229 | return 0; | ||
1230 | |||
1231 | return -1; | ||
1232 | } | ||
1233 | |||
1234 | void ptrace_put_breakpoints(struct task_struct *tsk) | ||
1235 | { | ||
1236 | if (atomic_dec_and_test(&tsk->ptrace_bp_refcnt)) | ||
1237 | flush_ptrace_hw_breakpoint(tsk); | ||
1238 | } | ||
1239 | #endif /* CONFIG_HAVE_HW_BREAKPOINT */ | ||
diff --git a/kernel/reboot.c b/kernel/reboot.c new file mode 100644 index 000000000000..269ed9384cc4 --- /dev/null +++ b/kernel/reboot.c | |||
@@ -0,0 +1,419 @@ | |||
1 | /* | ||
2 | * linux/kernel/reboot.c | ||
3 | * | ||
4 | * Copyright (C) 2013 Linus Torvalds | ||
5 | */ | ||
6 | |||
7 | #define pr_fmt(fmt) "reboot: " fmt | ||
8 | |||
9 | #include <linux/ctype.h> | ||
10 | #include <linux/export.h> | ||
11 | #include <linux/kexec.h> | ||
12 | #include <linux/kmod.h> | ||
13 | #include <linux/kmsg_dump.h> | ||
14 | #include <linux/reboot.h> | ||
15 | #include <linux/suspend.h> | ||
16 | #include <linux/syscalls.h> | ||
17 | #include <linux/syscore_ops.h> | ||
18 | #include <linux/uaccess.h> | ||
19 | |||
20 | /* | ||
21 | * this indicates whether you can reboot with ctrl-alt-del: the default is yes | ||
22 | */ | ||
23 | |||
24 | int C_A_D = 1; | ||
25 | struct pid *cad_pid; | ||
26 | EXPORT_SYMBOL(cad_pid); | ||
27 | |||
28 | #if defined(CONFIG_ARM) || defined(CONFIG_UNICORE32) | ||
29 | #define DEFAULT_REBOOT_MODE = REBOOT_HARD | ||
30 | #else | ||
31 | #define DEFAULT_REBOOT_MODE | ||
32 | #endif | ||
33 | enum reboot_mode reboot_mode DEFAULT_REBOOT_MODE; | ||
34 | |||
35 | int reboot_default; | ||
36 | int reboot_cpu; | ||
37 | enum reboot_type reboot_type = BOOT_ACPI; | ||
38 | int reboot_force; | ||
39 | |||
40 | /* | ||
41 | * If set, this is used for preparing the system to power off. | ||
42 | */ | ||
43 | |||
44 | void (*pm_power_off_prepare)(void); | ||
45 | |||
46 | /** | ||
47 | * emergency_restart - reboot the system | ||
48 | * | ||
49 | * Without shutting down any hardware or taking any locks | ||
50 | * reboot the system. This is called when we know we are in | ||
51 | * trouble so this is our best effort to reboot. This is | ||
52 | * safe to call in interrupt context. | ||
53 | */ | ||
54 | void emergency_restart(void) | ||
55 | { | ||
56 | kmsg_dump(KMSG_DUMP_EMERG); | ||
57 | machine_emergency_restart(); | ||
58 | } | ||
59 | EXPORT_SYMBOL_GPL(emergency_restart); | ||
60 | |||
61 | void kernel_restart_prepare(char *cmd) | ||
62 | { | ||
63 | blocking_notifier_call_chain(&reboot_notifier_list, SYS_RESTART, cmd); | ||
64 | system_state = SYSTEM_RESTART; | ||
65 | usermodehelper_disable(); | ||
66 | device_shutdown(); | ||
67 | } | ||
68 | |||
69 | /** | ||
70 | * register_reboot_notifier - Register function to be called at reboot time | ||
71 | * @nb: Info about notifier function to be called | ||
72 | * | ||
73 | * Registers a function with the list of functions | ||
74 | * to be called at reboot time. | ||
75 | * | ||
76 | * Currently always returns zero, as blocking_notifier_chain_register() | ||
77 | * always returns zero. | ||
78 | */ | ||
79 | int register_reboot_notifier(struct notifier_block *nb) | ||
80 | { | ||
81 | return blocking_notifier_chain_register(&reboot_notifier_list, nb); | ||
82 | } | ||
83 | EXPORT_SYMBOL(register_reboot_notifier); | ||
84 | |||
85 | /** | ||
86 | * unregister_reboot_notifier - Unregister previously registered reboot notifier | ||
87 | * @nb: Hook to be unregistered | ||
88 | * | ||
89 | * Unregisters a previously registered reboot | ||
90 | * notifier function. | ||
91 | * | ||
92 | * Returns zero on success, or %-ENOENT on failure. | ||
93 | */ | ||
94 | int unregister_reboot_notifier(struct notifier_block *nb) | ||
95 | { | ||
96 | return blocking_notifier_chain_unregister(&reboot_notifier_list, nb); | ||
97 | } | ||
98 | EXPORT_SYMBOL(unregister_reboot_notifier); | ||
99 | |||
100 | static void migrate_to_reboot_cpu(void) | ||
101 | { | ||
102 | /* The boot cpu is always logical cpu 0 */ | ||
103 | int cpu = reboot_cpu; | ||
104 | |||
105 | cpu_hotplug_disable(); | ||
106 | |||
107 | /* Make certain the cpu I'm about to reboot on is online */ | ||
108 | if (!cpu_online(cpu)) | ||
109 | cpu = cpumask_first(cpu_online_mask); | ||
110 | |||
111 | /* Prevent races with other tasks migrating this task */ | ||
112 | current->flags |= PF_NO_SETAFFINITY; | ||
113 | |||
114 | /* Make certain I only run on the appropriate processor */ | ||
115 | set_cpus_allowed_ptr(current, cpumask_of(cpu)); | ||
116 | } | ||
117 | |||
118 | /** | ||
119 | * kernel_restart - reboot the system | ||
120 | * @cmd: pointer to buffer containing command to execute for restart | ||
121 | * or %NULL | ||
122 | * | ||
123 | * Shutdown everything and perform a clean reboot. | ||
124 | * This is not safe to call in interrupt context. | ||
125 | */ | ||
126 | void kernel_restart(char *cmd) | ||
127 | { | ||
128 | kernel_restart_prepare(cmd); | ||
129 | migrate_to_reboot_cpu(); | ||
130 | syscore_shutdown(); | ||
131 | if (!cmd) | ||
132 | pr_emerg("Restarting system\n"); | ||
133 | else | ||
134 | pr_emerg("Restarting system with command '%s'\n", cmd); | ||
135 | kmsg_dump(KMSG_DUMP_RESTART); | ||
136 | machine_restart(cmd); | ||
137 | } | ||
138 | EXPORT_SYMBOL_GPL(kernel_restart); | ||
139 | |||
140 | static void kernel_shutdown_prepare(enum system_states state) | ||
141 | { | ||
142 | blocking_notifier_call_chain(&reboot_notifier_list, | ||
143 | (state == SYSTEM_HALT) ? SYS_HALT : SYS_POWER_OFF, NULL); | ||
144 | system_state = state; | ||
145 | usermodehelper_disable(); | ||
146 | device_shutdown(); | ||
147 | } | ||
148 | /** | ||
149 | * kernel_halt - halt the system | ||
150 | * | ||
151 | * Shutdown everything and perform a clean system halt. | ||
152 | */ | ||
153 | void kernel_halt(void) | ||
154 | { | ||
155 | kernel_shutdown_prepare(SYSTEM_HALT); | ||
156 | migrate_to_reboot_cpu(); | ||
157 | syscore_shutdown(); | ||
158 | pr_emerg("System halted\n"); | ||
159 | kmsg_dump(KMSG_DUMP_HALT); | ||
160 | machine_halt(); | ||
161 | } | ||
162 | EXPORT_SYMBOL_GPL(kernel_halt); | ||
163 | |||
164 | /** | ||
165 | * kernel_power_off - power_off the system | ||
166 | * | ||
167 | * Shutdown everything and perform a clean system power_off. | ||
168 | */ | ||
169 | void kernel_power_off(void) | ||
170 | { | ||
171 | kernel_shutdown_prepare(SYSTEM_POWER_OFF); | ||
172 | if (pm_power_off_prepare) | ||
173 | pm_power_off_prepare(); | ||
174 | migrate_to_reboot_cpu(); | ||
175 | syscore_shutdown(); | ||
176 | pr_emerg("Power down\n"); | ||
177 | kmsg_dump(KMSG_DUMP_POWEROFF); | ||
178 | machine_power_off(); | ||
179 | } | ||
180 | EXPORT_SYMBOL_GPL(kernel_power_off); | ||
181 | |||
182 | static DEFINE_MUTEX(reboot_mutex); | ||
183 | |||
184 | /* | ||
185 | * Reboot system call: for obvious reasons only root may call it, | ||
186 | * and even root needs to set up some magic numbers in the registers | ||
187 | * so that some mistake won't make this reboot the whole machine. | ||
188 | * You can also set the meaning of the ctrl-alt-del-key here. | ||
189 | * | ||
190 | * reboot doesn't sync: do that yourself before calling this. | ||
191 | */ | ||
192 | SYSCALL_DEFINE4(reboot, int, magic1, int, magic2, unsigned int, cmd, | ||
193 | void __user *, arg) | ||
194 | { | ||
195 | struct pid_namespace *pid_ns = task_active_pid_ns(current); | ||
196 | char buffer[256]; | ||
197 | int ret = 0; | ||
198 | |||
199 | /* We only trust the superuser with rebooting the system. */ | ||
200 | if (!ns_capable(pid_ns->user_ns, CAP_SYS_BOOT)) | ||
201 | return -EPERM; | ||
202 | |||
203 | /* For safety, we require "magic" arguments. */ | ||
204 | if (magic1 != LINUX_REBOOT_MAGIC1 || | ||
205 | (magic2 != LINUX_REBOOT_MAGIC2 && | ||
206 | magic2 != LINUX_REBOOT_MAGIC2A && | ||
207 | magic2 != LINUX_REBOOT_MAGIC2B && | ||
208 | magic2 != LINUX_REBOOT_MAGIC2C)) | ||
209 | return -EINVAL; | ||
210 | |||
211 | /* | ||
212 | * If pid namespaces are enabled and the current task is in a child | ||
213 | * pid_namespace, the command is handled by reboot_pid_ns() which will | ||
214 | * call do_exit(). | ||
215 | */ | ||
216 | ret = reboot_pid_ns(pid_ns, cmd); | ||
217 | if (ret) | ||
218 | return ret; | ||
219 | |||
220 | /* Instead of trying to make the power_off code look like | ||
221 | * halt when pm_power_off is not set do it the easy way. | ||
222 | */ | ||
223 | if ((cmd == LINUX_REBOOT_CMD_POWER_OFF) && !pm_power_off) | ||
224 | cmd = LINUX_REBOOT_CMD_HALT; | ||
225 | |||
226 | mutex_lock(&reboot_mutex); | ||
227 | switch (cmd) { | ||
228 | case LINUX_REBOOT_CMD_RESTART: | ||
229 | kernel_restart(NULL); | ||
230 | break; | ||
231 | |||
232 | case LINUX_REBOOT_CMD_CAD_ON: | ||
233 | C_A_D = 1; | ||
234 | break; | ||
235 | |||
236 | case LINUX_REBOOT_CMD_CAD_OFF: | ||
237 | C_A_D = 0; | ||
238 | break; | ||
239 | |||
240 | case LINUX_REBOOT_CMD_HALT: | ||
241 | kernel_halt(); | ||
242 | do_exit(0); | ||
243 | panic("cannot halt"); | ||
244 | |||
245 | case LINUX_REBOOT_CMD_POWER_OFF: | ||
246 | kernel_power_off(); | ||
247 | do_exit(0); | ||
248 | break; | ||
249 | |||
250 | case LINUX_REBOOT_CMD_RESTART2: | ||
251 | ret = strncpy_from_user(&buffer[0], arg, sizeof(buffer) - 1); | ||
252 | if (ret < 0) { | ||
253 | ret = -EFAULT; | ||
254 | break; | ||
255 | } | ||
256 | buffer[sizeof(buffer) - 1] = '\0'; | ||
257 | |||
258 | kernel_restart(buffer); | ||
259 | break; | ||
260 | |||
261 | #ifdef CONFIG_KEXEC | ||
262 | case LINUX_REBOOT_CMD_KEXEC: | ||
263 | ret = kernel_kexec(); | ||
264 | break; | ||
265 | #endif | ||
266 | |||
267 | #ifdef CONFIG_HIBERNATION | ||
268 | case LINUX_REBOOT_CMD_SW_SUSPEND: | ||
269 | ret = hibernate(); | ||
270 | break; | ||
271 | #endif | ||
272 | |||
273 | default: | ||
274 | ret = -EINVAL; | ||
275 | break; | ||
276 | } | ||
277 | mutex_unlock(&reboot_mutex); | ||
278 | return ret; | ||
279 | } | ||
280 | |||
281 | static void deferred_cad(struct work_struct *dummy) | ||
282 | { | ||
283 | kernel_restart(NULL); | ||
284 | } | ||
285 | |||
286 | /* | ||
287 | * This function gets called by ctrl-alt-del - ie the keyboard interrupt. | ||
288 | * As it's called within an interrupt, it may NOT sync: the only choice | ||
289 | * is whether to reboot at once, or just ignore the ctrl-alt-del. | ||
290 | */ | ||
291 | void ctrl_alt_del(void) | ||
292 | { | ||
293 | static DECLARE_WORK(cad_work, deferred_cad); | ||
294 | |||
295 | if (C_A_D) | ||
296 | schedule_work(&cad_work); | ||
297 | else | ||
298 | kill_cad_pid(SIGINT, 1); | ||
299 | } | ||
300 | |||
301 | char poweroff_cmd[POWEROFF_CMD_PATH_LEN] = "/sbin/poweroff"; | ||
302 | |||
303 | static int __orderly_poweroff(bool force) | ||
304 | { | ||
305 | char **argv; | ||
306 | static char *envp[] = { | ||
307 | "HOME=/", | ||
308 | "PATH=/sbin:/bin:/usr/sbin:/usr/bin", | ||
309 | NULL | ||
310 | }; | ||
311 | int ret; | ||
312 | |||
313 | argv = argv_split(GFP_KERNEL, poweroff_cmd, NULL); | ||
314 | if (argv) { | ||
315 | ret = call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC); | ||
316 | argv_free(argv); | ||
317 | } else { | ||
318 | ret = -ENOMEM; | ||
319 | } | ||
320 | |||
321 | if (ret && force) { | ||
322 | pr_warn("Failed to start orderly shutdown: forcing the issue\n"); | ||
323 | /* | ||
324 | * I guess this should try to kick off some daemon to sync and | ||
325 | * poweroff asap. Or not even bother syncing if we're doing an | ||
326 | * emergency shutdown? | ||
327 | */ | ||
328 | emergency_sync(); | ||
329 | kernel_power_off(); | ||
330 | } | ||
331 | |||
332 | return ret; | ||
333 | } | ||
334 | |||
335 | static bool poweroff_force; | ||
336 | |||
337 | static void poweroff_work_func(struct work_struct *work) | ||
338 | { | ||
339 | __orderly_poweroff(poweroff_force); | ||
340 | } | ||
341 | |||
342 | static DECLARE_WORK(poweroff_work, poweroff_work_func); | ||
343 | |||
344 | /** | ||
345 | * orderly_poweroff - Trigger an orderly system poweroff | ||
346 | * @force: force poweroff if command execution fails | ||
347 | * | ||
348 | * This may be called from any context to trigger a system shutdown. | ||
349 | * If the orderly shutdown fails, it will force an immediate shutdown. | ||
350 | */ | ||
351 | int orderly_poweroff(bool force) | ||
352 | { | ||
353 | if (force) /* do not override the pending "true" */ | ||
354 | poweroff_force = true; | ||
355 | schedule_work(&poweroff_work); | ||
356 | return 0; | ||
357 | } | ||
358 | EXPORT_SYMBOL_GPL(orderly_poweroff); | ||
359 | |||
360 | static int __init reboot_setup(char *str) | ||
361 | { | ||
362 | for (;;) { | ||
363 | /* | ||
364 | * Having anything passed on the command line via | ||
365 | * reboot= will cause us to disable DMI checking | ||
366 | * below. | ||
367 | */ | ||
368 | reboot_default = 0; | ||
369 | |||
370 | switch (*str) { | ||
371 | case 'w': | ||
372 | reboot_mode = REBOOT_WARM; | ||
373 | break; | ||
374 | |||
375 | case 'c': | ||
376 | reboot_mode = REBOOT_COLD; | ||
377 | break; | ||
378 | |||
379 | case 'h': | ||
380 | reboot_mode = REBOOT_HARD; | ||
381 | break; | ||
382 | |||
383 | case 's': | ||
384 | if (isdigit(*(str+1))) | ||
385 | reboot_cpu = simple_strtoul(str+1, NULL, 0); | ||
386 | else if (str[1] == 'm' && str[2] == 'p' && | ||
387 | isdigit(*(str+3))) | ||
388 | reboot_cpu = simple_strtoul(str+3, NULL, 0); | ||
389 | else | ||
390 | reboot_mode = REBOOT_SOFT; | ||
391 | break; | ||
392 | |||
393 | case 'g': | ||
394 | reboot_mode = REBOOT_GPIO; | ||
395 | break; | ||
396 | |||
397 | case 'b': | ||
398 | case 'a': | ||
399 | case 'k': | ||
400 | case 't': | ||
401 | case 'e': | ||
402 | case 'p': | ||
403 | reboot_type = *str; | ||
404 | break; | ||
405 | |||
406 | case 'f': | ||
407 | reboot_force = 1; | ||
408 | break; | ||
409 | } | ||
410 | |||
411 | str = strchr(str, ','); | ||
412 | if (str) | ||
413 | str++; | ||
414 | else | ||
415 | break; | ||
416 | } | ||
417 | return 1; | ||
418 | } | ||
419 | __setup("reboot=", reboot_setup); | ||
diff --git a/kernel/sys.c b/kernel/sys.c index 071de900c824..771129b299f8 100644 --- a/kernel/sys.c +++ b/kernel/sys.c | |||
@@ -116,20 +116,6 @@ EXPORT_SYMBOL(fs_overflowuid); | |||
116 | EXPORT_SYMBOL(fs_overflowgid); | 116 | EXPORT_SYMBOL(fs_overflowgid); |
117 | 117 | ||
118 | /* | 118 | /* |
119 | * this indicates whether you can reboot with ctrl-alt-del: the default is yes | ||
120 | */ | ||
121 | |||
122 | int C_A_D = 1; | ||
123 | struct pid *cad_pid; | ||
124 | EXPORT_SYMBOL(cad_pid); | ||
125 | |||
126 | /* | ||
127 | * If set, this is used for preparing the system to power off. | ||
128 | */ | ||
129 | |||
130 | void (*pm_power_off_prepare)(void); | ||
131 | |||
132 | /* | ||
133 | * Returns true if current's euid is same as p's uid or euid, | 119 | * Returns true if current's euid is same as p's uid or euid, |
134 | * or has CAP_SYS_NICE to p's user_ns. | 120 | * or has CAP_SYS_NICE to p's user_ns. |
135 | * | 121 | * |
@@ -308,266 +294,6 @@ out_unlock: | |||
308 | return retval; | 294 | return retval; |
309 | } | 295 | } |
310 | 296 | ||
311 | /** | ||
312 | * emergency_restart - reboot the system | ||
313 | * | ||
314 | * Without shutting down any hardware or taking any locks | ||
315 | * reboot the system. This is called when we know we are in | ||
316 | * trouble so this is our best effort to reboot. This is | ||
317 | * safe to call in interrupt context. | ||
318 | */ | ||
319 | void emergency_restart(void) | ||
320 | { | ||
321 | kmsg_dump(KMSG_DUMP_EMERG); | ||
322 | machine_emergency_restart(); | ||
323 | } | ||
324 | EXPORT_SYMBOL_GPL(emergency_restart); | ||
325 | |||
326 | void kernel_restart_prepare(char *cmd) | ||
327 | { | ||
328 | blocking_notifier_call_chain(&reboot_notifier_list, SYS_RESTART, cmd); | ||
329 | system_state = SYSTEM_RESTART; | ||
330 | usermodehelper_disable(); | ||
331 | device_shutdown(); | ||
332 | } | ||
333 | |||
334 | /** | ||
335 | * register_reboot_notifier - Register function to be called at reboot time | ||
336 | * @nb: Info about notifier function to be called | ||
337 | * | ||
338 | * Registers a function with the list of functions | ||
339 | * to be called at reboot time. | ||
340 | * | ||
341 | * Currently always returns zero, as blocking_notifier_chain_register() | ||
342 | * always returns zero. | ||
343 | */ | ||
344 | int register_reboot_notifier(struct notifier_block *nb) | ||
345 | { | ||
346 | return blocking_notifier_chain_register(&reboot_notifier_list, nb); | ||
347 | } | ||
348 | EXPORT_SYMBOL(register_reboot_notifier); | ||
349 | |||
350 | /** | ||
351 | * unregister_reboot_notifier - Unregister previously registered reboot notifier | ||
352 | * @nb: Hook to be unregistered | ||
353 | * | ||
354 | * Unregisters a previously registered reboot | ||
355 | * notifier function. | ||
356 | * | ||
357 | * Returns zero on success, or %-ENOENT on failure. | ||
358 | */ | ||
359 | int unregister_reboot_notifier(struct notifier_block *nb) | ||
360 | { | ||
361 | return blocking_notifier_chain_unregister(&reboot_notifier_list, nb); | ||
362 | } | ||
363 | EXPORT_SYMBOL(unregister_reboot_notifier); | ||
364 | |||
365 | /* Add backwards compatibility for stable trees. */ | ||
366 | #ifndef PF_NO_SETAFFINITY | ||
367 | #define PF_NO_SETAFFINITY PF_THREAD_BOUND | ||
368 | #endif | ||
369 | |||
370 | static void migrate_to_reboot_cpu(void) | ||
371 | { | ||
372 | /* The boot cpu is always logical cpu 0 */ | ||
373 | int cpu = 0; | ||
374 | |||
375 | cpu_hotplug_disable(); | ||
376 | |||
377 | /* Make certain the cpu I'm about to reboot on is online */ | ||
378 | if (!cpu_online(cpu)) | ||
379 | cpu = cpumask_first(cpu_online_mask); | ||
380 | |||
381 | /* Prevent races with other tasks migrating this task */ | ||
382 | current->flags |= PF_NO_SETAFFINITY; | ||
383 | |||
384 | /* Make certain I only run on the appropriate processor */ | ||
385 | set_cpus_allowed_ptr(current, cpumask_of(cpu)); | ||
386 | } | ||
387 | |||
388 | /** | ||
389 | * kernel_restart - reboot the system | ||
390 | * @cmd: pointer to buffer containing command to execute for restart | ||
391 | * or %NULL | ||
392 | * | ||
393 | * Shutdown everything and perform a clean reboot. | ||
394 | * This is not safe to call in interrupt context. | ||
395 | */ | ||
396 | void kernel_restart(char *cmd) | ||
397 | { | ||
398 | kernel_restart_prepare(cmd); | ||
399 | migrate_to_reboot_cpu(); | ||
400 | syscore_shutdown(); | ||
401 | if (!cmd) | ||
402 | printk(KERN_EMERG "Restarting system.\n"); | ||
403 | else | ||
404 | printk(KERN_EMERG "Restarting system with command '%s'.\n", cmd); | ||
405 | kmsg_dump(KMSG_DUMP_RESTART); | ||
406 | machine_restart(cmd); | ||
407 | } | ||
408 | EXPORT_SYMBOL_GPL(kernel_restart); | ||
409 | |||
410 | static void kernel_shutdown_prepare(enum system_states state) | ||
411 | { | ||
412 | blocking_notifier_call_chain(&reboot_notifier_list, | ||
413 | (state == SYSTEM_HALT)?SYS_HALT:SYS_POWER_OFF, NULL); | ||
414 | system_state = state; | ||
415 | usermodehelper_disable(); | ||
416 | device_shutdown(); | ||
417 | } | ||
418 | /** | ||
419 | * kernel_halt - halt the system | ||
420 | * | ||
421 | * Shutdown everything and perform a clean system halt. | ||
422 | */ | ||
423 | void kernel_halt(void) | ||
424 | { | ||
425 | kernel_shutdown_prepare(SYSTEM_HALT); | ||
426 | migrate_to_reboot_cpu(); | ||
427 | syscore_shutdown(); | ||
428 | printk(KERN_EMERG "System halted.\n"); | ||
429 | kmsg_dump(KMSG_DUMP_HALT); | ||
430 | machine_halt(); | ||
431 | } | ||
432 | |||
433 | EXPORT_SYMBOL_GPL(kernel_halt); | ||
434 | |||
435 | /** | ||
436 | * kernel_power_off - power_off the system | ||
437 | * | ||
438 | * Shutdown everything and perform a clean system power_off. | ||
439 | */ | ||
440 | void kernel_power_off(void) | ||
441 | { | ||
442 | kernel_shutdown_prepare(SYSTEM_POWER_OFF); | ||
443 | if (pm_power_off_prepare) | ||
444 | pm_power_off_prepare(); | ||
445 | migrate_to_reboot_cpu(); | ||
446 | syscore_shutdown(); | ||
447 | printk(KERN_EMERG "Power down.\n"); | ||
448 | kmsg_dump(KMSG_DUMP_POWEROFF); | ||
449 | machine_power_off(); | ||
450 | } | ||
451 | EXPORT_SYMBOL_GPL(kernel_power_off); | ||
452 | |||
453 | static DEFINE_MUTEX(reboot_mutex); | ||
454 | |||
455 | /* | ||
456 | * Reboot system call: for obvious reasons only root may call it, | ||
457 | * and even root needs to set up some magic numbers in the registers | ||
458 | * so that some mistake won't make this reboot the whole machine. | ||
459 | * You can also set the meaning of the ctrl-alt-del-key here. | ||
460 | * | ||
461 | * reboot doesn't sync: do that yourself before calling this. | ||
462 | */ | ||
463 | SYSCALL_DEFINE4(reboot, int, magic1, int, magic2, unsigned int, cmd, | ||
464 | void __user *, arg) | ||
465 | { | ||
466 | struct pid_namespace *pid_ns = task_active_pid_ns(current); | ||
467 | char buffer[256]; | ||
468 | int ret = 0; | ||
469 | |||
470 | /* We only trust the superuser with rebooting the system. */ | ||
471 | if (!ns_capable(pid_ns->user_ns, CAP_SYS_BOOT)) | ||
472 | return -EPERM; | ||
473 | |||
474 | /* For safety, we require "magic" arguments. */ | ||
475 | if (magic1 != LINUX_REBOOT_MAGIC1 || | ||
476 | (magic2 != LINUX_REBOOT_MAGIC2 && | ||
477 | magic2 != LINUX_REBOOT_MAGIC2A && | ||
478 | magic2 != LINUX_REBOOT_MAGIC2B && | ||
479 | magic2 != LINUX_REBOOT_MAGIC2C)) | ||
480 | return -EINVAL; | ||
481 | |||
482 | /* | ||
483 | * If pid namespaces are enabled and the current task is in a child | ||
484 | * pid_namespace, the command is handled by reboot_pid_ns() which will | ||
485 | * call do_exit(). | ||
486 | */ | ||
487 | ret = reboot_pid_ns(pid_ns, cmd); | ||
488 | if (ret) | ||
489 | return ret; | ||
490 | |||
491 | /* Instead of trying to make the power_off code look like | ||
492 | * halt when pm_power_off is not set do it the easy way. | ||
493 | */ | ||
494 | if ((cmd == LINUX_REBOOT_CMD_POWER_OFF) && !pm_power_off) | ||
495 | cmd = LINUX_REBOOT_CMD_HALT; | ||
496 | |||
497 | mutex_lock(&reboot_mutex); | ||
498 | switch (cmd) { | ||
499 | case LINUX_REBOOT_CMD_RESTART: | ||
500 | kernel_restart(NULL); | ||
501 | break; | ||
502 | |||
503 | case LINUX_REBOOT_CMD_CAD_ON: | ||
504 | C_A_D = 1; | ||
505 | break; | ||
506 | |||
507 | case LINUX_REBOOT_CMD_CAD_OFF: | ||
508 | C_A_D = 0; | ||
509 | break; | ||
510 | |||
511 | case LINUX_REBOOT_CMD_HALT: | ||
512 | kernel_halt(); | ||
513 | do_exit(0); | ||
514 | panic("cannot halt.\n"); | ||
515 | |||
516 | case LINUX_REBOOT_CMD_POWER_OFF: | ||
517 | kernel_power_off(); | ||
518 | do_exit(0); | ||
519 | break; | ||
520 | |||
521 | case LINUX_REBOOT_CMD_RESTART2: | ||
522 | if (strncpy_from_user(&buffer[0], arg, sizeof(buffer) - 1) < 0) { | ||
523 | ret = -EFAULT; | ||
524 | break; | ||
525 | } | ||
526 | buffer[sizeof(buffer) - 1] = '\0'; | ||
527 | |||
528 | kernel_restart(buffer); | ||
529 | break; | ||
530 | |||
531 | #ifdef CONFIG_KEXEC | ||
532 | case LINUX_REBOOT_CMD_KEXEC: | ||
533 | ret = kernel_kexec(); | ||
534 | break; | ||
535 | #endif | ||
536 | |||
537 | #ifdef CONFIG_HIBERNATION | ||
538 | case LINUX_REBOOT_CMD_SW_SUSPEND: | ||
539 | ret = hibernate(); | ||
540 | break; | ||
541 | #endif | ||
542 | |||
543 | default: | ||
544 | ret = -EINVAL; | ||
545 | break; | ||
546 | } | ||
547 | mutex_unlock(&reboot_mutex); | ||
548 | return ret; | ||
549 | } | ||
550 | |||
551 | static void deferred_cad(struct work_struct *dummy) | ||
552 | { | ||
553 | kernel_restart(NULL); | ||
554 | } | ||
555 | |||
556 | /* | ||
557 | * This function gets called by ctrl-alt-del - ie the keyboard interrupt. | ||
558 | * As it's called within an interrupt, it may NOT sync: the only choice | ||
559 | * is whether to reboot at once, or just ignore the ctrl-alt-del. | ||
560 | */ | ||
561 | void ctrl_alt_del(void) | ||
562 | { | ||
563 | static DECLARE_WORK(cad_work, deferred_cad); | ||
564 | |||
565 | if (C_A_D) | ||
566 | schedule_work(&cad_work); | ||
567 | else | ||
568 | kill_cad_pid(SIGINT, 1); | ||
569 | } | ||
570 | |||
571 | /* | 297 | /* |
572 | * Unprivileged users may change the real gid to the effective gid | 298 | * Unprivileged users may change the real gid to the effective gid |
573 | * or vice versa. (BSD-style) | 299 | * or vice versa. (BSD-style) |
@@ -2292,68 +2018,6 @@ SYSCALL_DEFINE3(getcpu, unsigned __user *, cpup, unsigned __user *, nodep, | |||
2292 | return err ? -EFAULT : 0; | 2018 | return err ? -EFAULT : 0; |
2293 | } | 2019 | } |
2294 | 2020 | ||
2295 | char poweroff_cmd[POWEROFF_CMD_PATH_LEN] = "/sbin/poweroff"; | ||
2296 | |||
2297 | static int __orderly_poweroff(bool force) | ||
2298 | { | ||
2299 | char **argv; | ||
2300 | static char *envp[] = { | ||
2301 | "HOME=/", | ||
2302 | "PATH=/sbin:/bin:/usr/sbin:/usr/bin", | ||
2303 | NULL | ||
2304 | }; | ||
2305 | int ret; | ||
2306 | |||
2307 | argv = argv_split(GFP_KERNEL, poweroff_cmd, NULL); | ||
2308 | if (argv) { | ||
2309 | ret = call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC); | ||
2310 | argv_free(argv); | ||
2311 | } else { | ||
2312 | printk(KERN_WARNING "%s failed to allocate memory for \"%s\"\n", | ||
2313 | __func__, poweroff_cmd); | ||
2314 | ret = -ENOMEM; | ||
2315 | } | ||
2316 | |||
2317 | if (ret && force) { | ||
2318 | printk(KERN_WARNING "Failed to start orderly shutdown: " | ||
2319 | "forcing the issue\n"); | ||
2320 | /* | ||
2321 | * I guess this should try to kick off some daemon to sync and | ||
2322 | * poweroff asap. Or not even bother syncing if we're doing an | ||
2323 | * emergency shutdown? | ||
2324 | */ | ||
2325 | emergency_sync(); | ||
2326 | kernel_power_off(); | ||
2327 | } | ||
2328 | |||
2329 | return ret; | ||
2330 | } | ||
2331 | |||
2332 | static bool poweroff_force; | ||
2333 | |||
2334 | static void poweroff_work_func(struct work_struct *work) | ||
2335 | { | ||
2336 | __orderly_poweroff(poweroff_force); | ||
2337 | } | ||
2338 | |||
2339 | static DECLARE_WORK(poweroff_work, poweroff_work_func); | ||
2340 | |||
2341 | /** | ||
2342 | * orderly_poweroff - Trigger an orderly system poweroff | ||
2343 | * @force: force poweroff if command execution fails | ||
2344 | * | ||
2345 | * This may be called from any context to trigger a system shutdown. | ||
2346 | * If the orderly shutdown fails, it will force an immediate shutdown. | ||
2347 | */ | ||
2348 | int orderly_poweroff(bool force) | ||
2349 | { | ||
2350 | if (force) /* do not override the pending "true" */ | ||
2351 | poweroff_force = true; | ||
2352 | schedule_work(&poweroff_work); | ||
2353 | return 0; | ||
2354 | } | ||
2355 | EXPORT_SYMBOL_GPL(orderly_poweroff); | ||
2356 | |||
2357 | /** | 2021 | /** |
2358 | * do_sysinfo - fill in sysinfo struct | 2022 | * do_sysinfo - fill in sysinfo struct |
2359 | * @info: pointer to buffer to fill | 2023 | * @info: pointer to buffer to fill |
diff --git a/lib/Kconfig b/lib/Kconfig index f1ed53c3aa44..35da51359d40 100644 --- a/lib/Kconfig +++ b/lib/Kconfig | |||
@@ -194,6 +194,15 @@ config LZO_COMPRESS | |||
194 | config LZO_DECOMPRESS | 194 | config LZO_DECOMPRESS |
195 | tristate | 195 | tristate |
196 | 196 | ||
197 | config LZ4_COMPRESS | ||
198 | tristate | ||
199 | |||
200 | config LZ4HC_COMPRESS | ||
201 | tristate | ||
202 | |||
203 | config LZ4_DECOMPRESS | ||
204 | tristate | ||
205 | |||
197 | source "lib/xz/Kconfig" | 206 | source "lib/xz/Kconfig" |
198 | 207 | ||
199 | # | 208 | # |
@@ -218,6 +227,10 @@ config DECOMPRESS_LZO | |||
218 | select LZO_DECOMPRESS | 227 | select LZO_DECOMPRESS |
219 | tristate | 228 | tristate |
220 | 229 | ||
230 | config DECOMPRESS_LZ4 | ||
231 | select LZ4_DECOMPRESS | ||
232 | tristate | ||
233 | |||
221 | # | 234 | # |
222 | # Generic allocator support is selected if needed | 235 | # Generic allocator support is selected if needed |
223 | # | 236 | # |
diff --git a/lib/Makefile b/lib/Makefile index c09e38eca87a..7baccfd8a4e9 100644 --- a/lib/Makefile +++ b/lib/Makefile | |||
@@ -23,7 +23,7 @@ lib-y += kobject.o klist.o | |||
23 | 23 | ||
24 | obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ | 24 | obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ |
25 | bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \ | 25 | bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \ |
26 | gcd.o lcm.o list_sort.o uuid.o flex_array.o iovec.o \ | 26 | gcd.o lcm.o list_sort.o uuid.o flex_array.o iovec.o clz_ctz.o \ |
27 | bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o kfifo.o | 27 | bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o kfifo.o |
28 | obj-y += string_helpers.o | 28 | obj-y += string_helpers.o |
29 | obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o | 29 | obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o |
@@ -75,6 +75,9 @@ obj-$(CONFIG_REED_SOLOMON) += reed_solomon/ | |||
75 | obj-$(CONFIG_BCH) += bch.o | 75 | obj-$(CONFIG_BCH) += bch.o |
76 | obj-$(CONFIG_LZO_COMPRESS) += lzo/ | 76 | obj-$(CONFIG_LZO_COMPRESS) += lzo/ |
77 | obj-$(CONFIG_LZO_DECOMPRESS) += lzo/ | 77 | obj-$(CONFIG_LZO_DECOMPRESS) += lzo/ |
78 | obj-$(CONFIG_LZ4_COMPRESS) += lz4/ | ||
79 | obj-$(CONFIG_LZ4HC_COMPRESS) += lz4/ | ||
80 | obj-$(CONFIG_LZ4_DECOMPRESS) += lz4/ | ||
78 | obj-$(CONFIG_XZ_DEC) += xz/ | 81 | obj-$(CONFIG_XZ_DEC) += xz/ |
79 | obj-$(CONFIG_RAID6_PQ) += raid6/ | 82 | obj-$(CONFIG_RAID6_PQ) += raid6/ |
80 | 83 | ||
@@ -83,6 +86,7 @@ lib-$(CONFIG_DECOMPRESS_BZIP2) += decompress_bunzip2.o | |||
83 | lib-$(CONFIG_DECOMPRESS_LZMA) += decompress_unlzma.o | 86 | lib-$(CONFIG_DECOMPRESS_LZMA) += decompress_unlzma.o |
84 | lib-$(CONFIG_DECOMPRESS_XZ) += decompress_unxz.o | 87 | lib-$(CONFIG_DECOMPRESS_XZ) += decompress_unxz.o |
85 | lib-$(CONFIG_DECOMPRESS_LZO) += decompress_unlzo.o | 88 | lib-$(CONFIG_DECOMPRESS_LZO) += decompress_unlzo.o |
89 | lib-$(CONFIG_DECOMPRESS_LZ4) += decompress_unlz4.o | ||
86 | 90 | ||
87 | obj-$(CONFIG_TEXTSEARCH) += textsearch.o | 91 | obj-$(CONFIG_TEXTSEARCH) += textsearch.o |
88 | obj-$(CONFIG_TEXTSEARCH_KMP) += ts_kmp.o | 92 | obj-$(CONFIG_TEXTSEARCH_KMP) += ts_kmp.o |
diff --git a/lib/clz_ctz.c b/lib/clz_ctz.c new file mode 100644 index 000000000000..a8f8379eb49f --- /dev/null +++ b/lib/clz_ctz.c | |||
@@ -0,0 +1,58 @@ | |||
1 | /* | ||
2 | * lib/clz_ctz.c | ||
3 | * | ||
4 | * Copyright (C) 2013 Chanho Min <chanho.min@lge.com> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | * __c[lt]z[sd]i2 can be overridden by linking arch-specific versions. | ||
11 | */ | ||
12 | |||
13 | #include <linux/export.h> | ||
14 | #include <linux/kernel.h> | ||
15 | |||
16 | int __weak __ctzsi2(int val) | ||
17 | { | ||
18 | return __ffs(val); | ||
19 | } | ||
20 | EXPORT_SYMBOL(__ctzsi2); | ||
21 | |||
22 | int __weak __clzsi2(int val) | ||
23 | { | ||
24 | return 32 - fls(val); | ||
25 | } | ||
26 | EXPORT_SYMBOL(__clzsi2); | ||
27 | |||
28 | #if BITS_PER_LONG == 32 | ||
29 | |||
30 | int __weak __clzdi2(long val) | ||
31 | { | ||
32 | return 32 - fls((int)val); | ||
33 | } | ||
34 | EXPORT_SYMBOL(__clzdi2); | ||
35 | |||
36 | int __weak __ctzdi2(long val) | ||
37 | { | ||
38 | return __ffs((u32)val); | ||
39 | } | ||
40 | EXPORT_SYMBOL(__ctzdi2); | ||
41 | |||
42 | #elif BITS_PER_LONG == 64 | ||
43 | |||
44 | int __weak __clzdi2(long val) | ||
45 | { | ||
46 | return 64 - fls64((u64)val); | ||
47 | } | ||
48 | EXPORT_SYMBOL(__clzdi2); | ||
49 | |||
50 | int __weak __ctzdi2(long val) | ||
51 | { | ||
52 | return __ffs64((u64)val); | ||
53 | } | ||
54 | EXPORT_SYMBOL(__ctzdi2); | ||
55 | |||
56 | #else | ||
57 | #error BITS_PER_LONG not 32 or 64 | ||
58 | #endif | ||
diff --git a/lib/decompress.c b/lib/decompress.c index f8fdedaf7b3d..4d1cd0397aab 100644 --- a/lib/decompress.c +++ b/lib/decompress.c | |||
@@ -11,6 +11,7 @@ | |||
11 | #include <linux/decompress/unxz.h> | 11 | #include <linux/decompress/unxz.h> |
12 | #include <linux/decompress/inflate.h> | 12 | #include <linux/decompress/inflate.h> |
13 | #include <linux/decompress/unlzo.h> | 13 | #include <linux/decompress/unlzo.h> |
14 | #include <linux/decompress/unlz4.h> | ||
14 | 15 | ||
15 | #include <linux/types.h> | 16 | #include <linux/types.h> |
16 | #include <linux/string.h> | 17 | #include <linux/string.h> |
@@ -31,6 +32,9 @@ | |||
31 | #ifndef CONFIG_DECOMPRESS_LZO | 32 | #ifndef CONFIG_DECOMPRESS_LZO |
32 | # define unlzo NULL | 33 | # define unlzo NULL |
33 | #endif | 34 | #endif |
35 | #ifndef CONFIG_DECOMPRESS_LZ4 | ||
36 | # define unlz4 NULL | ||
37 | #endif | ||
34 | 38 | ||
35 | struct compress_format { | 39 | struct compress_format { |
36 | unsigned char magic[2]; | 40 | unsigned char magic[2]; |
@@ -45,6 +49,7 @@ static const struct compress_format compressed_formats[] __initconst = { | |||
45 | { {0x5d, 0x00}, "lzma", unlzma }, | 49 | { {0x5d, 0x00}, "lzma", unlzma }, |
46 | { {0xfd, 0x37}, "xz", unxz }, | 50 | { {0xfd, 0x37}, "xz", unxz }, |
47 | { {0x89, 0x4c}, "lzo", unlzo }, | 51 | { {0x89, 0x4c}, "lzo", unlzo }, |
52 | { {0x02, 0x21}, "lz4", unlz4 }, | ||
48 | { {0, 0}, NULL, NULL } | 53 | { {0, 0}, NULL, NULL } |
49 | }; | 54 | }; |
50 | 55 | ||
diff --git a/lib/decompress_unlz4.c b/lib/decompress_unlz4.c new file mode 100644 index 000000000000..3e67cfad16ad --- /dev/null +++ b/lib/decompress_unlz4.c | |||
@@ -0,0 +1,187 @@ | |||
1 | /* | ||
2 | * Wrapper for decompressing LZ4-compressed kernel, initramfs, and initrd | ||
3 | * | ||
4 | * Copyright (C) 2013, LG Electronics, Kyungsik Lee <kyungsik.lee@lge.com> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | */ | ||
10 | |||
11 | #ifdef STATIC | ||
12 | #define PREBOOT | ||
13 | #include "lz4/lz4_decompress.c" | ||
14 | #else | ||
15 | #include <linux/decompress/unlz4.h> | ||
16 | #endif | ||
17 | #include <linux/types.h> | ||
18 | #include <linux/lz4.h> | ||
19 | #include <linux/decompress/mm.h> | ||
20 | #include <linux/compiler.h> | ||
21 | |||
22 | #include <asm/unaligned.h> | ||
23 | |||
24 | /* | ||
25 | * Note: Uncompressed chunk size is used in the compressor side | ||
26 | * (userspace side for compression). | ||
27 | * It is hardcoded because there is not proper way to extract it | ||
28 | * from the binary stream which is generated by the preliminary | ||
29 | * version of LZ4 tool so far. | ||
30 | */ | ||
31 | #define LZ4_DEFAULT_UNCOMPRESSED_CHUNK_SIZE (8 << 20) | ||
32 | #define ARCHIVE_MAGICNUMBER 0x184C2102 | ||
33 | |||
34 | STATIC inline int INIT unlz4(u8 *input, int in_len, | ||
35 | int (*fill) (void *, unsigned int), | ||
36 | int (*flush) (void *, unsigned int), | ||
37 | u8 *output, int *posp, | ||
38 | void (*error) (char *x)) | ||
39 | { | ||
40 | int ret = -1; | ||
41 | size_t chunksize = 0; | ||
42 | size_t uncomp_chunksize = LZ4_DEFAULT_UNCOMPRESSED_CHUNK_SIZE; | ||
43 | u8 *inp; | ||
44 | u8 *inp_start; | ||
45 | u8 *outp; | ||
46 | int size = in_len; | ||
47 | #ifdef PREBOOT | ||
48 | size_t out_len = get_unaligned_le32(input + in_len); | ||
49 | #endif | ||
50 | size_t dest_len; | ||
51 | |||
52 | |||
53 | if (output) { | ||
54 | outp = output; | ||
55 | } else if (!flush) { | ||
56 | error("NULL output pointer and no flush function provided"); | ||
57 | goto exit_0; | ||
58 | } else { | ||
59 | outp = large_malloc(uncomp_chunksize); | ||
60 | if (!outp) { | ||
61 | error("Could not allocate output buffer"); | ||
62 | goto exit_0; | ||
63 | } | ||
64 | } | ||
65 | |||
66 | if (input && fill) { | ||
67 | error("Both input pointer and fill function provided,"); | ||
68 | goto exit_1; | ||
69 | } else if (input) { | ||
70 | inp = input; | ||
71 | } else if (!fill) { | ||
72 | error("NULL input pointer and missing fill function"); | ||
73 | goto exit_1; | ||
74 | } else { | ||
75 | inp = large_malloc(lz4_compressbound(uncomp_chunksize)); | ||
76 | if (!inp) { | ||
77 | error("Could not allocate input buffer"); | ||
78 | goto exit_1; | ||
79 | } | ||
80 | } | ||
81 | inp_start = inp; | ||
82 | |||
83 | if (posp) | ||
84 | *posp = 0; | ||
85 | |||
86 | if (fill) | ||
87 | fill(inp, 4); | ||
88 | |||
89 | chunksize = get_unaligned_le32(inp); | ||
90 | if (chunksize == ARCHIVE_MAGICNUMBER) { | ||
91 | inp += 4; | ||
92 | size -= 4; | ||
93 | } else { | ||
94 | error("invalid header"); | ||
95 | goto exit_2; | ||
96 | } | ||
97 | |||
98 | if (posp) | ||
99 | *posp += 4; | ||
100 | |||
101 | for (;;) { | ||
102 | |||
103 | if (fill) | ||
104 | fill(inp, 4); | ||
105 | |||
106 | chunksize = get_unaligned_le32(inp); | ||
107 | if (chunksize == ARCHIVE_MAGICNUMBER) { | ||
108 | inp += 4; | ||
109 | size -= 4; | ||
110 | if (posp) | ||
111 | *posp += 4; | ||
112 | continue; | ||
113 | } | ||
114 | inp += 4; | ||
115 | size -= 4; | ||
116 | |||
117 | if (posp) | ||
118 | *posp += 4; | ||
119 | |||
120 | if (fill) { | ||
121 | if (chunksize > lz4_compressbound(uncomp_chunksize)) { | ||
122 | error("chunk length is longer than allocated"); | ||
123 | goto exit_2; | ||
124 | } | ||
125 | fill(inp, chunksize); | ||
126 | } | ||
127 | #ifdef PREBOOT | ||
128 | if (out_len >= uncomp_chunksize) { | ||
129 | dest_len = uncomp_chunksize; | ||
130 | out_len -= dest_len; | ||
131 | } else | ||
132 | dest_len = out_len; | ||
133 | ret = lz4_decompress(inp, &chunksize, outp, dest_len); | ||
134 | #else | ||
135 | dest_len = uncomp_chunksize; | ||
136 | ret = lz4_decompress_unknownoutputsize(inp, chunksize, outp, | ||
137 | &dest_len); | ||
138 | #endif | ||
139 | if (ret < 0) { | ||
140 | error("Decoding failed"); | ||
141 | goto exit_2; | ||
142 | } | ||
143 | |||
144 | if (flush && flush(outp, dest_len) != dest_len) | ||
145 | goto exit_2; | ||
146 | if (output) | ||
147 | outp += dest_len; | ||
148 | if (posp) | ||
149 | *posp += chunksize; | ||
150 | |||
151 | size -= chunksize; | ||
152 | |||
153 | if (size == 0) | ||
154 | break; | ||
155 | else if (size < 0) { | ||
156 | error("data corrupted"); | ||
157 | goto exit_2; | ||
158 | } | ||
159 | |||
160 | inp += chunksize; | ||
161 | if (fill) | ||
162 | inp = inp_start; | ||
163 | } | ||
164 | |||
165 | ret = 0; | ||
166 | exit_2: | ||
167 | if (!input) | ||
168 | large_free(inp_start); | ||
169 | exit_1: | ||
170 | if (!output) | ||
171 | large_free(outp); | ||
172 | exit_0: | ||
173 | return ret; | ||
174 | } | ||
175 | |||
176 | #ifdef PREBOOT | ||
177 | STATIC int INIT decompress(unsigned char *buf, int in_len, | ||
178 | int(*fill)(void*, unsigned int), | ||
179 | int(*flush)(void*, unsigned int), | ||
180 | unsigned char *output, | ||
181 | int *posp, | ||
182 | void(*error)(char *x) | ||
183 | ) | ||
184 | { | ||
185 | return unlz4(buf, in_len - 4, fill, flush, output, posp, error); | ||
186 | } | ||
187 | #endif | ||
diff --git a/lib/lz4/Makefile b/lib/lz4/Makefile new file mode 100644 index 000000000000..8085d04e9309 --- /dev/null +++ b/lib/lz4/Makefile | |||
@@ -0,0 +1,3 @@ | |||
1 | obj-$(CONFIG_LZ4_COMPRESS) += lz4_compress.o | ||
2 | obj-$(CONFIG_LZ4HC_COMPRESS) += lz4hc_compress.o | ||
3 | obj-$(CONFIG_LZ4_DECOMPRESS) += lz4_decompress.o | ||
diff --git a/lib/lz4/lz4_compress.c b/lib/lz4/lz4_compress.c new file mode 100644 index 000000000000..fd94058bd7f9 --- /dev/null +++ b/lib/lz4/lz4_compress.c | |||
@@ -0,0 +1,443 @@ | |||
1 | /* | ||
2 | * LZ4 - Fast LZ compression algorithm | ||
3 | * Copyright (C) 2011-2012, Yann Collet. | ||
4 | * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) | ||
5 | |||
6 | * Redistribution and use in source and binary forms, with or without | ||
7 | * modification, are permitted provided that the following conditions are | ||
8 | * met: | ||
9 | * | ||
10 | * * Redistributions of source code must retain the above copyright | ||
11 | * notice, this list of conditions and the following disclaimer. | ||
12 | * * Redistributions in binary form must reproduce the above | ||
13 | * copyright notice, this list of conditions and the following disclaimer | ||
14 | * in the documentation and/or other materials provided with the | ||
15 | * distribution. | ||
16 | * | ||
17 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
18 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
19 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
20 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
21 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
22 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
23 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
24 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
25 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
26 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
27 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
28 | * | ||
29 | * You can contact the author at : | ||
30 | * - LZ4 homepage : http://fastcompression.blogspot.com/p/lz4.html | ||
31 | * - LZ4 source repository : http://code.google.com/p/lz4/ | ||
32 | * | ||
33 | * Changed for kernel use by: | ||
34 | * Chanho Min <chanho.min@lge.com> | ||
35 | */ | ||
36 | |||
37 | #include <linux/module.h> | ||
38 | #include <linux/kernel.h> | ||
39 | #include <linux/lz4.h> | ||
40 | #include <asm/unaligned.h> | ||
41 | #include "lz4defs.h" | ||
42 | |||
43 | /* | ||
44 | * LZ4_compressCtx : | ||
45 | * ----------------- | ||
46 | * Compress 'isize' bytes from 'source' into an output buffer 'dest' of | ||
47 | * maximum size 'maxOutputSize'. * If it cannot achieve it, compression | ||
48 | * will stop, and result of the function will be zero. | ||
49 | * return : the number of bytes written in buffer 'dest', or 0 if the | ||
50 | * compression fails | ||
51 | */ | ||
52 | static inline int lz4_compressctx(void *ctx, | ||
53 | const char *source, | ||
54 | char *dest, | ||
55 | int isize, | ||
56 | int maxoutputsize) | ||
57 | { | ||
58 | HTYPE *hashtable = (HTYPE *)ctx; | ||
59 | const u8 *ip = (u8 *)source; | ||
60 | #if LZ4_ARCH64 | ||
61 | const BYTE * const base = ip; | ||
62 | #else | ||
63 | const int base = 0; | ||
64 | #endif | ||
65 | const u8 *anchor = ip; | ||
66 | const u8 *const iend = ip + isize; | ||
67 | const u8 *const mflimit = iend - MFLIMIT; | ||
68 | #define MATCHLIMIT (iend - LASTLITERALS) | ||
69 | |||
70 | u8 *op = (u8 *) dest; | ||
71 | u8 *const oend = op + maxoutputsize; | ||
72 | int length; | ||
73 | const int skipstrength = SKIPSTRENGTH; | ||
74 | u32 forwardh; | ||
75 | int lastrun; | ||
76 | |||
77 | /* Init */ | ||
78 | if (isize < MINLENGTH) | ||
79 | goto _last_literals; | ||
80 | |||
81 | memset((void *)hashtable, 0, LZ4_MEM_COMPRESS); | ||
82 | |||
83 | /* First Byte */ | ||
84 | hashtable[LZ4_HASH_VALUE(ip)] = ip - base; | ||
85 | ip++; | ||
86 | forwardh = LZ4_HASH_VALUE(ip); | ||
87 | |||
88 | /* Main Loop */ | ||
89 | for (;;) { | ||
90 | int findmatchattempts = (1U << skipstrength) + 3; | ||
91 | const u8 *forwardip = ip; | ||
92 | const u8 *ref; | ||
93 | u8 *token; | ||
94 | |||
95 | /* Find a match */ | ||
96 | do { | ||
97 | u32 h = forwardh; | ||
98 | int step = findmatchattempts++ >> skipstrength; | ||
99 | ip = forwardip; | ||
100 | forwardip = ip + step; | ||
101 | |||
102 | if (unlikely(forwardip > mflimit)) | ||
103 | goto _last_literals; | ||
104 | |||
105 | forwardh = LZ4_HASH_VALUE(forwardip); | ||
106 | ref = base + hashtable[h]; | ||
107 | hashtable[h] = ip - base; | ||
108 | } while ((ref < ip - MAX_DISTANCE) || (A32(ref) != A32(ip))); | ||
109 | |||
110 | /* Catch up */ | ||
111 | while ((ip > anchor) && (ref > (u8 *)source) && | ||
112 | unlikely(ip[-1] == ref[-1])) { | ||
113 | ip--; | ||
114 | ref--; | ||
115 | } | ||
116 | |||
117 | /* Encode Literal length */ | ||
118 | length = (int)(ip - anchor); | ||
119 | token = op++; | ||
120 | /* check output limit */ | ||
121 | if (unlikely(op + length + (2 + 1 + LASTLITERALS) + | ||
122 | (length >> 8) > oend)) | ||
123 | return 0; | ||
124 | |||
125 | if (length >= (int)RUN_MASK) { | ||
126 | int len; | ||
127 | *token = (RUN_MASK << ML_BITS); | ||
128 | len = length - RUN_MASK; | ||
129 | for (; len > 254 ; len -= 255) | ||
130 | *op++ = 255; | ||
131 | *op++ = (u8)len; | ||
132 | } else | ||
133 | *token = (length << ML_BITS); | ||
134 | |||
135 | /* Copy Literals */ | ||
136 | LZ4_BLINDCOPY(anchor, op, length); | ||
137 | _next_match: | ||
138 | /* Encode Offset */ | ||
139 | LZ4_WRITE_LITTLEENDIAN_16(op, (u16)(ip - ref)); | ||
140 | |||
141 | /* Start Counting */ | ||
142 | ip += MINMATCH; | ||
143 | /* MinMatch verified */ | ||
144 | ref += MINMATCH; | ||
145 | anchor = ip; | ||
146 | while (likely(ip < MATCHLIMIT - (STEPSIZE - 1))) { | ||
147 | #if LZ4_ARCH64 | ||
148 | u64 diff = A64(ref) ^ A64(ip); | ||
149 | #else | ||
150 | u32 diff = A32(ref) ^ A32(ip); | ||
151 | #endif | ||
152 | if (!diff) { | ||
153 | ip += STEPSIZE; | ||
154 | ref += STEPSIZE; | ||
155 | continue; | ||
156 | } | ||
157 | ip += LZ4_NBCOMMONBYTES(diff); | ||
158 | goto _endcount; | ||
159 | } | ||
160 | #if LZ4_ARCH64 | ||
161 | if ((ip < (MATCHLIMIT - 3)) && (A32(ref) == A32(ip))) { | ||
162 | ip += 4; | ||
163 | ref += 4; | ||
164 | } | ||
165 | #endif | ||
166 | if ((ip < (MATCHLIMIT - 1)) && (A16(ref) == A16(ip))) { | ||
167 | ip += 2; | ||
168 | ref += 2; | ||
169 | } | ||
170 | if ((ip < MATCHLIMIT) && (*ref == *ip)) | ||
171 | ip++; | ||
172 | _endcount: | ||
173 | /* Encode MatchLength */ | ||
174 | length = (int)(ip - anchor); | ||
175 | /* Check output limit */ | ||
176 | if (unlikely(op + (1 + LASTLITERALS) + (length >> 8) > oend)) | ||
177 | return 0; | ||
178 | if (length >= (int)ML_MASK) { | ||
179 | *token += ML_MASK; | ||
180 | length -= ML_MASK; | ||
181 | for (; length > 509 ; length -= 510) { | ||
182 | *op++ = 255; | ||
183 | *op++ = 255; | ||
184 | } | ||
185 | if (length > 254) { | ||
186 | length -= 255; | ||
187 | *op++ = 255; | ||
188 | } | ||
189 | *op++ = (u8)length; | ||
190 | } else | ||
191 | *token += length; | ||
192 | |||
193 | /* Test end of chunk */ | ||
194 | if (ip > mflimit) { | ||
195 | anchor = ip; | ||
196 | break; | ||
197 | } | ||
198 | |||
199 | /* Fill table */ | ||
200 | hashtable[LZ4_HASH_VALUE(ip-2)] = ip - 2 - base; | ||
201 | |||
202 | /* Test next position */ | ||
203 | ref = base + hashtable[LZ4_HASH_VALUE(ip)]; | ||
204 | hashtable[LZ4_HASH_VALUE(ip)] = ip - base; | ||
205 | if ((ref > ip - (MAX_DISTANCE + 1)) && (A32(ref) == A32(ip))) { | ||
206 | token = op++; | ||
207 | *token = 0; | ||
208 | goto _next_match; | ||
209 | } | ||
210 | |||
211 | /* Prepare next loop */ | ||
212 | anchor = ip++; | ||
213 | forwardh = LZ4_HASH_VALUE(ip); | ||
214 | } | ||
215 | |||
216 | _last_literals: | ||
217 | /* Encode Last Literals */ | ||
218 | lastrun = (int)(iend - anchor); | ||
219 | if (((char *)op - dest) + lastrun + 1 | ||
220 | + ((lastrun + 255 - RUN_MASK) / 255) > (u32)maxoutputsize) | ||
221 | return 0; | ||
222 | |||
223 | if (lastrun >= (int)RUN_MASK) { | ||
224 | *op++ = (RUN_MASK << ML_BITS); | ||
225 | lastrun -= RUN_MASK; | ||
226 | for (; lastrun > 254 ; lastrun -= 255) | ||
227 | *op++ = 255; | ||
228 | *op++ = (u8)lastrun; | ||
229 | } else | ||
230 | *op++ = (lastrun << ML_BITS); | ||
231 | memcpy(op, anchor, iend - anchor); | ||
232 | op += iend - anchor; | ||
233 | |||
234 | /* End */ | ||
235 | return (int)(((char *)op) - dest); | ||
236 | } | ||
237 | |||
238 | static inline int lz4_compress64kctx(void *ctx, | ||
239 | const char *source, | ||
240 | char *dest, | ||
241 | int isize, | ||
242 | int maxoutputsize) | ||
243 | { | ||
244 | u16 *hashtable = (u16 *)ctx; | ||
245 | const u8 *ip = (u8 *) source; | ||
246 | const u8 *anchor = ip; | ||
247 | const u8 *const base = ip; | ||
248 | const u8 *const iend = ip + isize; | ||
249 | const u8 *const mflimit = iend - MFLIMIT; | ||
250 | #define MATCHLIMIT (iend - LASTLITERALS) | ||
251 | |||
252 | u8 *op = (u8 *) dest; | ||
253 | u8 *const oend = op + maxoutputsize; | ||
254 | int len, length; | ||
255 | const int skipstrength = SKIPSTRENGTH; | ||
256 | u32 forwardh; | ||
257 | int lastrun; | ||
258 | |||
259 | /* Init */ | ||
260 | if (isize < MINLENGTH) | ||
261 | goto _last_literals; | ||
262 | |||
263 | memset((void *)hashtable, 0, LZ4_MEM_COMPRESS); | ||
264 | |||
265 | /* First Byte */ | ||
266 | ip++; | ||
267 | forwardh = LZ4_HASH64K_VALUE(ip); | ||
268 | |||
269 | /* Main Loop */ | ||
270 | for (;;) { | ||
271 | int findmatchattempts = (1U << skipstrength) + 3; | ||
272 | const u8 *forwardip = ip; | ||
273 | const u8 *ref; | ||
274 | u8 *token; | ||
275 | |||
276 | /* Find a match */ | ||
277 | do { | ||
278 | u32 h = forwardh; | ||
279 | int step = findmatchattempts++ >> skipstrength; | ||
280 | ip = forwardip; | ||
281 | forwardip = ip + step; | ||
282 | |||
283 | if (forwardip > mflimit) | ||
284 | goto _last_literals; | ||
285 | |||
286 | forwardh = LZ4_HASH64K_VALUE(forwardip); | ||
287 | ref = base + hashtable[h]; | ||
288 | hashtable[h] = (u16)(ip - base); | ||
289 | } while (A32(ref) != A32(ip)); | ||
290 | |||
291 | /* Catch up */ | ||
292 | while ((ip > anchor) && (ref > (u8 *)source) | ||
293 | && (ip[-1] == ref[-1])) { | ||
294 | ip--; | ||
295 | ref--; | ||
296 | } | ||
297 | |||
298 | /* Encode Literal length */ | ||
299 | length = (int)(ip - anchor); | ||
300 | token = op++; | ||
301 | /* Check output limit */ | ||
302 | if (unlikely(op + length + (2 + 1 + LASTLITERALS) | ||
303 | + (length >> 8) > oend)) | ||
304 | return 0; | ||
305 | if (length >= (int)RUN_MASK) { | ||
306 | *token = (RUN_MASK << ML_BITS); | ||
307 | len = length - RUN_MASK; | ||
308 | for (; len > 254 ; len -= 255) | ||
309 | *op++ = 255; | ||
310 | *op++ = (u8)len; | ||
311 | } else | ||
312 | *token = (length << ML_BITS); | ||
313 | |||
314 | /* Copy Literals */ | ||
315 | LZ4_BLINDCOPY(anchor, op, length); | ||
316 | |||
317 | _next_match: | ||
318 | /* Encode Offset */ | ||
319 | LZ4_WRITE_LITTLEENDIAN_16(op, (u16)(ip - ref)); | ||
320 | |||
321 | /* Start Counting */ | ||
322 | ip += MINMATCH; | ||
323 | /* MinMatch verified */ | ||
324 | ref += MINMATCH; | ||
325 | anchor = ip; | ||
326 | |||
327 | while (ip < MATCHLIMIT - (STEPSIZE - 1)) { | ||
328 | #if LZ4_ARCH64 | ||
329 | u64 diff = A64(ref) ^ A64(ip); | ||
330 | #else | ||
331 | u32 diff = A32(ref) ^ A32(ip); | ||
332 | #endif | ||
333 | |||
334 | if (!diff) { | ||
335 | ip += STEPSIZE; | ||
336 | ref += STEPSIZE; | ||
337 | continue; | ||
338 | } | ||
339 | ip += LZ4_NBCOMMONBYTES(diff); | ||
340 | goto _endcount; | ||
341 | } | ||
342 | #if LZ4_ARCH64 | ||
343 | if ((ip < (MATCHLIMIT - 3)) && (A32(ref) == A32(ip))) { | ||
344 | ip += 4; | ||
345 | ref += 4; | ||
346 | } | ||
347 | #endif | ||
348 | if ((ip < (MATCHLIMIT - 1)) && (A16(ref) == A16(ip))) { | ||
349 | ip += 2; | ||
350 | ref += 2; | ||
351 | } | ||
352 | if ((ip < MATCHLIMIT) && (*ref == *ip)) | ||
353 | ip++; | ||
354 | _endcount: | ||
355 | |||
356 | /* Encode MatchLength */ | ||
357 | len = (int)(ip - anchor); | ||
358 | /* Check output limit */ | ||
359 | if (unlikely(op + (1 + LASTLITERALS) + (len >> 8) > oend)) | ||
360 | return 0; | ||
361 | if (len >= (int)ML_MASK) { | ||
362 | *token += ML_MASK; | ||
363 | len -= ML_MASK; | ||
364 | for (; len > 509 ; len -= 510) { | ||
365 | *op++ = 255; | ||
366 | *op++ = 255; | ||
367 | } | ||
368 | if (len > 254) { | ||
369 | len -= 255; | ||
370 | *op++ = 255; | ||
371 | } | ||
372 | *op++ = (u8)len; | ||
373 | } else | ||
374 | *token += len; | ||
375 | |||
376 | /* Test end of chunk */ | ||
377 | if (ip > mflimit) { | ||
378 | anchor = ip; | ||
379 | break; | ||
380 | } | ||
381 | |||
382 | /* Fill table */ | ||
383 | hashtable[LZ4_HASH64K_VALUE(ip-2)] = (u16)(ip - 2 - base); | ||
384 | |||
385 | /* Test next position */ | ||
386 | ref = base + hashtable[LZ4_HASH64K_VALUE(ip)]; | ||
387 | hashtable[LZ4_HASH64K_VALUE(ip)] = (u16)(ip - base); | ||
388 | if (A32(ref) == A32(ip)) { | ||
389 | token = op++; | ||
390 | *token = 0; | ||
391 | goto _next_match; | ||
392 | } | ||
393 | |||
394 | /* Prepare next loop */ | ||
395 | anchor = ip++; | ||
396 | forwardh = LZ4_HASH64K_VALUE(ip); | ||
397 | } | ||
398 | |||
399 | _last_literals: | ||
400 | /* Encode Last Literals */ | ||
401 | lastrun = (int)(iend - anchor); | ||
402 | if (op + lastrun + 1 + (lastrun - RUN_MASK + 255) / 255 > oend) | ||
403 | return 0; | ||
404 | if (lastrun >= (int)RUN_MASK) { | ||
405 | *op++ = (RUN_MASK << ML_BITS); | ||
406 | lastrun -= RUN_MASK; | ||
407 | for (; lastrun > 254 ; lastrun -= 255) | ||
408 | *op++ = 255; | ||
409 | *op++ = (u8)lastrun; | ||
410 | } else | ||
411 | *op++ = (lastrun << ML_BITS); | ||
412 | memcpy(op, anchor, iend - anchor); | ||
413 | op += iend - anchor; | ||
414 | /* End */ | ||
415 | return (int)(((char *)op) - dest); | ||
416 | } | ||
417 | |||
418 | int lz4_compress(const unsigned char *src, size_t src_len, | ||
419 | unsigned char *dst, size_t *dst_len, void *wrkmem) | ||
420 | { | ||
421 | int ret = -1; | ||
422 | int out_len = 0; | ||
423 | |||
424 | if (src_len < LZ4_64KLIMIT) | ||
425 | out_len = lz4_compress64kctx(wrkmem, src, dst, src_len, | ||
426 | lz4_compressbound(src_len)); | ||
427 | else | ||
428 | out_len = lz4_compressctx(wrkmem, src, dst, src_len, | ||
429 | lz4_compressbound(src_len)); | ||
430 | |||
431 | if (out_len < 0) | ||
432 | goto exit; | ||
433 | |||
434 | *dst_len = out_len; | ||
435 | |||
436 | return 0; | ||
437 | exit: | ||
438 | return ret; | ||
439 | } | ||
440 | EXPORT_SYMBOL_GPL(lz4_compress); | ||
441 | |||
442 | MODULE_LICENSE("GPL"); | ||
443 | MODULE_DESCRIPTION("LZ4 compressor"); | ||
diff --git a/lib/lz4/lz4_decompress.c b/lib/lz4/lz4_decompress.c new file mode 100644 index 000000000000..d3414eae73a1 --- /dev/null +++ b/lib/lz4/lz4_decompress.c | |||
@@ -0,0 +1,326 @@ | |||
1 | /* | ||
2 | * LZ4 Decompressor for Linux kernel | ||
3 | * | ||
4 | * Copyright (C) 2013, LG Electronics, Kyungsik Lee <kyungsik.lee@lge.com> | ||
5 | * | ||
6 | * Based on LZ4 implementation by Yann Collet. | ||
7 | * | ||
8 | * LZ4 - Fast LZ compression algorithm | ||
9 | * Copyright (C) 2011-2012, Yann Collet. | ||
10 | * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) | ||
11 | * | ||
12 | * Redistribution and use in source and binary forms, with or without | ||
13 | * modification, are permitted provided that the following conditions are | ||
14 | * met: | ||
15 | * | ||
16 | * * Redistributions of source code must retain the above copyright | ||
17 | * notice, this list of conditions and the following disclaimer. | ||
18 | * * Redistributions in binary form must reproduce the above | ||
19 | * copyright notice, this list of conditions and the following disclaimer | ||
20 | * in the documentation and/or other materials provided with the | ||
21 | * distribution. | ||
22 | * | ||
23 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
24 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
25 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
26 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
27 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
28 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
29 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
30 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
31 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
32 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
33 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
34 | * | ||
35 | * You can contact the author at : | ||
36 | * - LZ4 homepage : http://fastcompression.blogspot.com/p/lz4.html | ||
37 | * - LZ4 source repository : http://code.google.com/p/lz4/ | ||
38 | */ | ||
39 | |||
40 | #ifndef STATIC | ||
41 | #include <linux/module.h> | ||
42 | #include <linux/kernel.h> | ||
43 | #endif | ||
44 | #include <linux/lz4.h> | ||
45 | |||
46 | #include <asm/unaligned.h> | ||
47 | |||
48 | #include "lz4defs.h" | ||
49 | |||
50 | static int lz4_uncompress(const char *source, char *dest, int osize) | ||
51 | { | ||
52 | const BYTE *ip = (const BYTE *) source; | ||
53 | const BYTE *ref; | ||
54 | BYTE *op = (BYTE *) dest; | ||
55 | BYTE * const oend = op + osize; | ||
56 | BYTE *cpy; | ||
57 | unsigned token; | ||
58 | size_t length; | ||
59 | size_t dec32table[] = {0, 3, 2, 3, 0, 0, 0, 0}; | ||
60 | #if LZ4_ARCH64 | ||
61 | size_t dec64table[] = {0, 0, 0, -1, 0, 1, 2, 3}; | ||
62 | #endif | ||
63 | |||
64 | while (1) { | ||
65 | |||
66 | /* get runlength */ | ||
67 | token = *ip++; | ||
68 | length = (token >> ML_BITS); | ||
69 | if (length == RUN_MASK) { | ||
70 | size_t len; | ||
71 | |||
72 | len = *ip++; | ||
73 | for (; len == 255; length += 255) | ||
74 | len = *ip++; | ||
75 | length += len; | ||
76 | } | ||
77 | |||
78 | /* copy literals */ | ||
79 | cpy = op + length; | ||
80 | if (unlikely(cpy > oend - COPYLENGTH)) { | ||
81 | /* | ||
82 | * Error: not enough place for another match | ||
83 | * (min 4) + 5 literals | ||
84 | */ | ||
85 | if (cpy != oend) | ||
86 | goto _output_error; | ||
87 | |||
88 | memcpy(op, ip, length); | ||
89 | ip += length; | ||
90 | break; /* EOF */ | ||
91 | } | ||
92 | LZ4_WILDCOPY(ip, op, cpy); | ||
93 | ip -= (op - cpy); | ||
94 | op = cpy; | ||
95 | |||
96 | /* get offset */ | ||
97 | LZ4_READ_LITTLEENDIAN_16(ref, cpy, ip); | ||
98 | ip += 2; | ||
99 | |||
100 | /* Error: offset create reference outside destination buffer */ | ||
101 | if (unlikely(ref < (BYTE *const) dest)) | ||
102 | goto _output_error; | ||
103 | |||
104 | /* get matchlength */ | ||
105 | length = token & ML_MASK; | ||
106 | if (length == ML_MASK) { | ||
107 | for (; *ip == 255; length += 255) | ||
108 | ip++; | ||
109 | length += *ip++; | ||
110 | } | ||
111 | |||
112 | /* copy repeated sequence */ | ||
113 | if (unlikely((op - ref) < STEPSIZE)) { | ||
114 | #if LZ4_ARCH64 | ||
115 | size_t dec64 = dec64table[op - ref]; | ||
116 | #else | ||
117 | const int dec64 = 0; | ||
118 | #endif | ||
119 | op[0] = ref[0]; | ||
120 | op[1] = ref[1]; | ||
121 | op[2] = ref[2]; | ||
122 | op[3] = ref[3]; | ||
123 | op += 4; | ||
124 | ref += 4; | ||
125 | ref -= dec32table[op-ref]; | ||
126 | PUT4(ref, op); | ||
127 | op += STEPSIZE - 4; | ||
128 | ref -= dec64; | ||
129 | } else { | ||
130 | LZ4_COPYSTEP(ref, op); | ||
131 | } | ||
132 | cpy = op + length - (STEPSIZE - 4); | ||
133 | if (cpy > (oend - COPYLENGTH)) { | ||
134 | |||
135 | /* Error: request to write beyond destination buffer */ | ||
136 | if (cpy > oend) | ||
137 | goto _output_error; | ||
138 | LZ4_SECURECOPY(ref, op, (oend - COPYLENGTH)); | ||
139 | while (op < cpy) | ||
140 | *op++ = *ref++; | ||
141 | op = cpy; | ||
142 | /* | ||
143 | * Check EOF (should never happen, since last 5 bytes | ||
144 | * are supposed to be literals) | ||
145 | */ | ||
146 | if (op == oend) | ||
147 | goto _output_error; | ||
148 | continue; | ||
149 | } | ||
150 | LZ4_SECURECOPY(ref, op, cpy); | ||
151 | op = cpy; /* correction */ | ||
152 | } | ||
153 | /* end of decoding */ | ||
154 | return (int) (((char *)ip) - source); | ||
155 | |||
156 | /* write overflow error detected */ | ||
157 | _output_error: | ||
158 | return (int) (-(((char *)ip) - source)); | ||
159 | } | ||
160 | |||
161 | static int lz4_uncompress_unknownoutputsize(const char *source, char *dest, | ||
162 | int isize, size_t maxoutputsize) | ||
163 | { | ||
164 | const BYTE *ip = (const BYTE *) source; | ||
165 | const BYTE *const iend = ip + isize; | ||
166 | const BYTE *ref; | ||
167 | |||
168 | |||
169 | BYTE *op = (BYTE *) dest; | ||
170 | BYTE * const oend = op + maxoutputsize; | ||
171 | BYTE *cpy; | ||
172 | |||
173 | size_t dec32table[] = {0, 3, 2, 3, 0, 0, 0, 0}; | ||
174 | #if LZ4_ARCH64 | ||
175 | size_t dec64table[] = {0, 0, 0, -1, 0, 1, 2, 3}; | ||
176 | #endif | ||
177 | |||
178 | /* Main Loop */ | ||
179 | while (ip < iend) { | ||
180 | |||
181 | unsigned token; | ||
182 | size_t length; | ||
183 | |||
184 | /* get runlength */ | ||
185 | token = *ip++; | ||
186 | length = (token >> ML_BITS); | ||
187 | if (length == RUN_MASK) { | ||
188 | int s = 255; | ||
189 | while ((ip < iend) && (s == 255)) { | ||
190 | s = *ip++; | ||
191 | length += s; | ||
192 | } | ||
193 | } | ||
194 | /* copy literals */ | ||
195 | cpy = op + length; | ||
196 | if ((cpy > oend - COPYLENGTH) || | ||
197 | (ip + length > iend - COPYLENGTH)) { | ||
198 | |||
199 | if (cpy > oend) | ||
200 | goto _output_error;/* writes beyond buffer */ | ||
201 | |||
202 | if (ip + length != iend) | ||
203 | goto _output_error;/* | ||
204 | * Error: LZ4 format requires | ||
205 | * to consume all input | ||
206 | * at this stage | ||
207 | */ | ||
208 | memcpy(op, ip, length); | ||
209 | op += length; | ||
210 | break;/* Necessarily EOF, due to parsing restrictions */ | ||
211 | } | ||
212 | LZ4_WILDCOPY(ip, op, cpy); | ||
213 | ip -= (op - cpy); | ||
214 | op = cpy; | ||
215 | |||
216 | /* get offset */ | ||
217 | LZ4_READ_LITTLEENDIAN_16(ref, cpy, ip); | ||
218 | ip += 2; | ||
219 | if (ref < (BYTE * const) dest) | ||
220 | goto _output_error; | ||
221 | /* | ||
222 | * Error : offset creates reference | ||
223 | * outside of destination buffer | ||
224 | */ | ||
225 | |||
226 | /* get matchlength */ | ||
227 | length = (token & ML_MASK); | ||
228 | if (length == ML_MASK) { | ||
229 | while (ip < iend) { | ||
230 | int s = *ip++; | ||
231 | length += s; | ||
232 | if (s == 255) | ||
233 | continue; | ||
234 | break; | ||
235 | } | ||
236 | } | ||
237 | |||
238 | /* copy repeated sequence */ | ||
239 | if (unlikely((op - ref) < STEPSIZE)) { | ||
240 | #if LZ4_ARCH64 | ||
241 | size_t dec64 = dec64table[op - ref]; | ||
242 | #else | ||
243 | const int dec64 = 0; | ||
244 | #endif | ||
245 | op[0] = ref[0]; | ||
246 | op[1] = ref[1]; | ||
247 | op[2] = ref[2]; | ||
248 | op[3] = ref[3]; | ||
249 | op += 4; | ||
250 | ref += 4; | ||
251 | ref -= dec32table[op - ref]; | ||
252 | PUT4(ref, op); | ||
253 | op += STEPSIZE - 4; | ||
254 | ref -= dec64; | ||
255 | } else { | ||
256 | LZ4_COPYSTEP(ref, op); | ||
257 | } | ||
258 | cpy = op + length - (STEPSIZE-4); | ||
259 | if (cpy > oend - COPYLENGTH) { | ||
260 | if (cpy > oend) | ||
261 | goto _output_error; /* write outside of buf */ | ||
262 | |||
263 | LZ4_SECURECOPY(ref, op, (oend - COPYLENGTH)); | ||
264 | while (op < cpy) | ||
265 | *op++ = *ref++; | ||
266 | op = cpy; | ||
267 | /* | ||
268 | * Check EOF (should never happen, since last 5 bytes | ||
269 | * are supposed to be literals) | ||
270 | */ | ||
271 | if (op == oend) | ||
272 | goto _output_error; | ||
273 | continue; | ||
274 | } | ||
275 | LZ4_SECURECOPY(ref, op, cpy); | ||
276 | op = cpy; /* correction */ | ||
277 | } | ||
278 | /* end of decoding */ | ||
279 | return (int) (((char *) op) - dest); | ||
280 | |||
281 | /* write overflow error detected */ | ||
282 | _output_error: | ||
283 | return (int) (-(((char *) ip) - source)); | ||
284 | } | ||
285 | |||
286 | int lz4_decompress(const char *src, size_t *src_len, char *dest, | ||
287 | size_t actual_dest_len) | ||
288 | { | ||
289 | int ret = -1; | ||
290 | int input_len = 0; | ||
291 | |||
292 | input_len = lz4_uncompress(src, dest, actual_dest_len); | ||
293 | if (input_len < 0) | ||
294 | goto exit_0; | ||
295 | *src_len = input_len; | ||
296 | |||
297 | return 0; | ||
298 | exit_0: | ||
299 | return ret; | ||
300 | } | ||
301 | #ifndef STATIC | ||
302 | EXPORT_SYMBOL_GPL(lz4_decompress); | ||
303 | #endif | ||
304 | |||
305 | int lz4_decompress_unknownoutputsize(const char *src, size_t src_len, | ||
306 | char *dest, size_t *dest_len) | ||
307 | { | ||
308 | int ret = -1; | ||
309 | int out_len = 0; | ||
310 | |||
311 | out_len = lz4_uncompress_unknownoutputsize(src, dest, src_len, | ||
312 | *dest_len); | ||
313 | if (out_len < 0) | ||
314 | goto exit_0; | ||
315 | *dest_len = out_len; | ||
316 | |||
317 | return 0; | ||
318 | exit_0: | ||
319 | return ret; | ||
320 | } | ||
321 | #ifndef STATIC | ||
322 | EXPORT_SYMBOL_GPL(lz4_decompress_unknownoutputsize); | ||
323 | |||
324 | MODULE_LICENSE("GPL"); | ||
325 | MODULE_DESCRIPTION("LZ4 Decompressor"); | ||
326 | #endif | ||
diff --git a/lib/lz4/lz4defs.h b/lib/lz4/lz4defs.h new file mode 100644 index 000000000000..abcecdc2d0f2 --- /dev/null +++ b/lib/lz4/lz4defs.h | |||
@@ -0,0 +1,156 @@ | |||
1 | /* | ||
2 | * lz4defs.h -- architecture specific defines | ||
3 | * | ||
4 | * Copyright (C) 2013, LG Electronics, Kyungsik Lee <kyungsik.lee@lge.com> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | */ | ||
10 | |||
11 | /* | ||
12 | * Detects 64 bits mode | ||
13 | */ | ||
14 | #if (defined(__x86_64__) || defined(__x86_64) || defined(__amd64__) \ | ||
15 | || defined(__ppc64__) || defined(__LP64__)) | ||
16 | #define LZ4_ARCH64 1 | ||
17 | #else | ||
18 | #define LZ4_ARCH64 0 | ||
19 | #endif | ||
20 | |||
21 | /* | ||
22 | * Architecture-specific macros | ||
23 | */ | ||
24 | #define BYTE u8 | ||
25 | typedef struct _U16_S { u16 v; } U16_S; | ||
26 | typedef struct _U32_S { u32 v; } U32_S; | ||
27 | typedef struct _U64_S { u64 v; } U64_S; | ||
28 | #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) \ | ||
29 | || defined(CONFIG_ARM) && __LINUX_ARM_ARCH__ >= 6 \ | ||
30 | && defined(ARM_EFFICIENT_UNALIGNED_ACCESS) | ||
31 | |||
32 | #define A16(x) (((U16_S *)(x))->v) | ||
33 | #define A32(x) (((U32_S *)(x))->v) | ||
34 | #define A64(x) (((U64_S *)(x))->v) | ||
35 | |||
36 | #define PUT4(s, d) (A32(d) = A32(s)) | ||
37 | #define PUT8(s, d) (A64(d) = A64(s)) | ||
38 | #define LZ4_WRITE_LITTLEENDIAN_16(p, v) \ | ||
39 | do { \ | ||
40 | A16(p) = v; \ | ||
41 | p += 2; \ | ||
42 | } while (0) | ||
43 | #else /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */ | ||
44 | |||
45 | #define A64(x) get_unaligned((u64 *)&(((U16_S *)(x))->v)) | ||
46 | #define A32(x) get_unaligned((u32 *)&(((U16_S *)(x))->v)) | ||
47 | #define A16(x) get_unaligned((u16 *)&(((U16_S *)(x))->v)) | ||
48 | |||
49 | #define PUT4(s, d) \ | ||
50 | put_unaligned(get_unaligned((const u32 *) s), (u32 *) d) | ||
51 | #define PUT8(s, d) \ | ||
52 | put_unaligned(get_unaligned((const u64 *) s), (u64 *) d) | ||
53 | |||
54 | #define LZ4_WRITE_LITTLEENDIAN_16(p, v) \ | ||
55 | do { \ | ||
56 | put_unaligned(v, (u16 *)(p)); \ | ||
57 | p += 2; \ | ||
58 | } while (0) | ||
59 | #endif | ||
60 | |||
61 | #define COPYLENGTH 8 | ||
62 | #define ML_BITS 4 | ||
63 | #define ML_MASK ((1U << ML_BITS) - 1) | ||
64 | #define RUN_BITS (8 - ML_BITS) | ||
65 | #define RUN_MASK ((1U << RUN_BITS) - 1) | ||
66 | #define MEMORY_USAGE 14 | ||
67 | #define MINMATCH 4 | ||
68 | #define SKIPSTRENGTH 6 | ||
69 | #define LASTLITERALS 5 | ||
70 | #define MFLIMIT (COPYLENGTH + MINMATCH) | ||
71 | #define MINLENGTH (MFLIMIT + 1) | ||
72 | #define MAXD_LOG 16 | ||
73 | #define MAXD (1 << MAXD_LOG) | ||
74 | #define MAXD_MASK (u32)(MAXD - 1) | ||
75 | #define MAX_DISTANCE (MAXD - 1) | ||
76 | #define HASH_LOG (MAXD_LOG - 1) | ||
77 | #define HASHTABLESIZE (1 << HASH_LOG) | ||
78 | #define MAX_NB_ATTEMPTS 256 | ||
79 | #define OPTIMAL_ML (int)((ML_MASK-1)+MINMATCH) | ||
80 | #define LZ4_64KLIMIT ((1<<16) + (MFLIMIT - 1)) | ||
81 | #define HASHLOG64K ((MEMORY_USAGE - 2) + 1) | ||
82 | #define HASH64KTABLESIZE (1U << HASHLOG64K) | ||
83 | #define LZ4_HASH_VALUE(p) (((A32(p)) * 2654435761U) >> \ | ||
84 | ((MINMATCH * 8) - (MEMORY_USAGE-2))) | ||
85 | #define LZ4_HASH64K_VALUE(p) (((A32(p)) * 2654435761U) >> \ | ||
86 | ((MINMATCH * 8) - HASHLOG64K)) | ||
87 | #define HASH_VALUE(p) (((A32(p)) * 2654435761U) >> \ | ||
88 | ((MINMATCH * 8) - HASH_LOG)) | ||
89 | |||
90 | #if LZ4_ARCH64/* 64-bit */ | ||
91 | #define STEPSIZE 8 | ||
92 | |||
93 | #define LZ4_COPYSTEP(s, d) \ | ||
94 | do { \ | ||
95 | PUT8(s, d); \ | ||
96 | d += 8; \ | ||
97 | s += 8; \ | ||
98 | } while (0) | ||
99 | |||
100 | #define LZ4_COPYPACKET(s, d) LZ4_COPYSTEP(s, d) | ||
101 | |||
102 | #define LZ4_SECURECOPY(s, d, e) \ | ||
103 | do { \ | ||
104 | if (d < e) { \ | ||
105 | LZ4_WILDCOPY(s, d, e); \ | ||
106 | } \ | ||
107 | } while (0) | ||
108 | #define HTYPE u32 | ||
109 | |||
110 | #ifdef __BIG_ENDIAN | ||
111 | #define LZ4_NBCOMMONBYTES(val) (__builtin_clzll(val) >> 3) | ||
112 | #else | ||
113 | #define LZ4_NBCOMMONBYTES(val) (__builtin_ctzll(val) >> 3) | ||
114 | #endif | ||
115 | |||
116 | #else /* 32-bit */ | ||
117 | #define STEPSIZE 4 | ||
118 | |||
119 | #define LZ4_COPYSTEP(s, d) \ | ||
120 | do { \ | ||
121 | PUT4(s, d); \ | ||
122 | d += 4; \ | ||
123 | s += 4; \ | ||
124 | } while (0) | ||
125 | |||
126 | #define LZ4_COPYPACKET(s, d) \ | ||
127 | do { \ | ||
128 | LZ4_COPYSTEP(s, d); \ | ||
129 | LZ4_COPYSTEP(s, d); \ | ||
130 | } while (0) | ||
131 | |||
132 | #define LZ4_SECURECOPY LZ4_WILDCOPY | ||
133 | #define HTYPE const u8* | ||
134 | |||
135 | #ifdef __BIG_ENDIAN | ||
136 | #define LZ4_NBCOMMONBYTES(val) (__builtin_clz(val) >> 3) | ||
137 | #else | ||
138 | #define LZ4_NBCOMMONBYTES(val) (__builtin_ctz(val) >> 3) | ||
139 | #endif | ||
140 | |||
141 | #endif | ||
142 | |||
143 | #define LZ4_READ_LITTLEENDIAN_16(d, s, p) \ | ||
144 | (d = s - get_unaligned_le16(p)) | ||
145 | |||
146 | #define LZ4_WILDCOPY(s, d, e) \ | ||
147 | do { \ | ||
148 | LZ4_COPYPACKET(s, d); \ | ||
149 | } while (d < e) | ||
150 | |||
151 | #define LZ4_BLINDCOPY(s, d, l) \ | ||
152 | do { \ | ||
153 | u8 *e = (d) + l; \ | ||
154 | LZ4_WILDCOPY(s, d, e); \ | ||
155 | d = e; \ | ||
156 | } while (0) | ||
diff --git a/lib/lz4/lz4hc_compress.c b/lib/lz4/lz4hc_compress.c new file mode 100644 index 000000000000..eb1a74f5e368 --- /dev/null +++ b/lib/lz4/lz4hc_compress.c | |||
@@ -0,0 +1,539 @@ | |||
1 | /* | ||
2 | * LZ4 HC - High Compression Mode of LZ4 | ||
3 | * Copyright (C) 2011-2012, Yann Collet. | ||
4 | * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) | ||
5 | * | ||
6 | * Redistribution and use in source and binary forms, with or without | ||
7 | * modification, are permitted provided that the following conditions are | ||
8 | * met: | ||
9 | * | ||
10 | * * Redistributions of source code must retain the above copyright | ||
11 | * notice, this list of conditions and the following disclaimer. | ||
12 | * * Redistributions in binary form must reproduce the above | ||
13 | * copyright notice, this list of conditions and the following disclaimer | ||
14 | * in the documentation and/or other materials provided with the | ||
15 | * distribution. | ||
16 | * | ||
17 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
18 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
19 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
20 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
21 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
22 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
23 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
24 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
25 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
26 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
27 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
28 | * | ||
29 | * You can contact the author at : | ||
30 | * - LZ4 homepage : http://fastcompression.blogspot.com/p/lz4.html | ||
31 | * - LZ4 source repository : http://code.google.com/p/lz4/ | ||
32 | * | ||
33 | * Changed for kernel use by: | ||
34 | * Chanho Min <chanho.min@lge.com> | ||
35 | */ | ||
36 | |||
37 | #include <linux/module.h> | ||
38 | #include <linux/kernel.h> | ||
39 | #include <linux/lz4.h> | ||
40 | #include <asm/unaligned.h> | ||
41 | #include "lz4defs.h" | ||
42 | |||
43 | struct lz4hc_data { | ||
44 | const u8 *base; | ||
45 | HTYPE hashtable[HASHTABLESIZE]; | ||
46 | u16 chaintable[MAXD]; | ||
47 | const u8 *nexttoupdate; | ||
48 | } __attribute__((__packed__)); | ||
49 | |||
50 | static inline int lz4hc_init(struct lz4hc_data *hc4, const u8 *base) | ||
51 | { | ||
52 | memset((void *)hc4->hashtable, 0, sizeof(hc4->hashtable)); | ||
53 | memset(hc4->chaintable, 0xFF, sizeof(hc4->chaintable)); | ||
54 | |||
55 | #if LZ4_ARCH64 | ||
56 | hc4->nexttoupdate = base + 1; | ||
57 | #else | ||
58 | hc4->nexttoupdate = base; | ||
59 | #endif | ||
60 | hc4->base = base; | ||
61 | return 1; | ||
62 | } | ||
63 | |||
64 | /* Update chains up to ip (excluded) */ | ||
65 | static inline void lz4hc_insert(struct lz4hc_data *hc4, const u8 *ip) | ||
66 | { | ||
67 | u16 *chaintable = hc4->chaintable; | ||
68 | HTYPE *hashtable = hc4->hashtable; | ||
69 | #if LZ4_ARCH64 | ||
70 | const BYTE * const base = hc4->base; | ||
71 | #else | ||
72 | const int base = 0; | ||
73 | #endif | ||
74 | |||
75 | while (hc4->nexttoupdate < ip) { | ||
76 | const u8 *p = hc4->nexttoupdate; | ||
77 | size_t delta = p - (hashtable[HASH_VALUE(p)] + base); | ||
78 | if (delta > MAX_DISTANCE) | ||
79 | delta = MAX_DISTANCE; | ||
80 | chaintable[(size_t)(p) & MAXD_MASK] = (u16)delta; | ||
81 | hashtable[HASH_VALUE(p)] = (p) - base; | ||
82 | hc4->nexttoupdate++; | ||
83 | } | ||
84 | } | ||
85 | |||
86 | static inline size_t lz4hc_commonlength(const u8 *p1, const u8 *p2, | ||
87 | const u8 *const matchlimit) | ||
88 | { | ||
89 | const u8 *p1t = p1; | ||
90 | |||
91 | while (p1t < matchlimit - (STEPSIZE - 1)) { | ||
92 | #if LZ4_ARCH64 | ||
93 | u64 diff = A64(p2) ^ A64(p1t); | ||
94 | #else | ||
95 | u32 diff = A32(p2) ^ A32(p1t); | ||
96 | #endif | ||
97 | if (!diff) { | ||
98 | p1t += STEPSIZE; | ||
99 | p2 += STEPSIZE; | ||
100 | continue; | ||
101 | } | ||
102 | p1t += LZ4_NBCOMMONBYTES(diff); | ||
103 | return p1t - p1; | ||
104 | } | ||
105 | #if LZ4_ARCH64 | ||
106 | if ((p1t < (matchlimit-3)) && (A32(p2) == A32(p1t))) { | ||
107 | p1t += 4; | ||
108 | p2 += 4; | ||
109 | } | ||
110 | #endif | ||
111 | |||
112 | if ((p1t < (matchlimit - 1)) && (A16(p2) == A16(p1t))) { | ||
113 | p1t += 2; | ||
114 | p2 += 2; | ||
115 | } | ||
116 | if ((p1t < matchlimit) && (*p2 == *p1t)) | ||
117 | p1t++; | ||
118 | return p1t - p1; | ||
119 | } | ||
120 | |||
121 | static inline int lz4hc_insertandfindbestmatch(struct lz4hc_data *hc4, | ||
122 | const u8 *ip, const u8 *const matchlimit, const u8 **matchpos) | ||
123 | { | ||
124 | u16 *const chaintable = hc4->chaintable; | ||
125 | HTYPE *const hashtable = hc4->hashtable; | ||
126 | const u8 *ref; | ||
127 | #if LZ4_ARCH64 | ||
128 | const BYTE * const base = hc4->base; | ||
129 | #else | ||
130 | const int base = 0; | ||
131 | #endif | ||
132 | int nbattempts = MAX_NB_ATTEMPTS; | ||
133 | size_t repl = 0, ml = 0; | ||
134 | u16 delta; | ||
135 | |||
136 | /* HC4 match finder */ | ||
137 | lz4hc_insert(hc4, ip); | ||
138 | ref = hashtable[HASH_VALUE(ip)] + base; | ||
139 | |||
140 | /* potential repetition */ | ||
141 | if (ref >= ip-4) { | ||
142 | /* confirmed */ | ||
143 | if (A32(ref) == A32(ip)) { | ||
144 | delta = (u16)(ip-ref); | ||
145 | repl = ml = lz4hc_commonlength(ip + MINMATCH, | ||
146 | ref + MINMATCH, matchlimit) + MINMATCH; | ||
147 | *matchpos = ref; | ||
148 | } | ||
149 | ref -= (size_t)chaintable[(size_t)(ref) & MAXD_MASK]; | ||
150 | } | ||
151 | |||
152 | while ((ref >= ip - MAX_DISTANCE) && nbattempts) { | ||
153 | nbattempts--; | ||
154 | if (*(ref + ml) == *(ip + ml)) { | ||
155 | if (A32(ref) == A32(ip)) { | ||
156 | size_t mlt = | ||
157 | lz4hc_commonlength(ip + MINMATCH, | ||
158 | ref + MINMATCH, matchlimit) + MINMATCH; | ||
159 | if (mlt > ml) { | ||
160 | ml = mlt; | ||
161 | *matchpos = ref; | ||
162 | } | ||
163 | } | ||
164 | } | ||
165 | ref -= (size_t)chaintable[(size_t)(ref) & MAXD_MASK]; | ||
166 | } | ||
167 | |||
168 | /* Complete table */ | ||
169 | if (repl) { | ||
170 | const BYTE *ptr = ip; | ||
171 | const BYTE *end; | ||
172 | end = ip + repl - (MINMATCH-1); | ||
173 | /* Pre-Load */ | ||
174 | while (ptr < end - delta) { | ||
175 | chaintable[(size_t)(ptr) & MAXD_MASK] = delta; | ||
176 | ptr++; | ||
177 | } | ||
178 | do { | ||
179 | chaintable[(size_t)(ptr) & MAXD_MASK] = delta; | ||
180 | /* Head of chain */ | ||
181 | hashtable[HASH_VALUE(ptr)] = (ptr) - base; | ||
182 | ptr++; | ||
183 | } while (ptr < end); | ||
184 | hc4->nexttoupdate = end; | ||
185 | } | ||
186 | |||
187 | return (int)ml; | ||
188 | } | ||
189 | |||
190 | static inline int lz4hc_insertandgetwidermatch(struct lz4hc_data *hc4, | ||
191 | const u8 *ip, const u8 *startlimit, const u8 *matchlimit, int longest, | ||
192 | const u8 **matchpos, const u8 **startpos) | ||
193 | { | ||
194 | u16 *const chaintable = hc4->chaintable; | ||
195 | HTYPE *const hashtable = hc4->hashtable; | ||
196 | #if LZ4_ARCH64 | ||
197 | const BYTE * const base = hc4->base; | ||
198 | #else | ||
199 | const int base = 0; | ||
200 | #endif | ||
201 | const u8 *ref; | ||
202 | int nbattempts = MAX_NB_ATTEMPTS; | ||
203 | int delta = (int)(ip - startlimit); | ||
204 | |||
205 | /* First Match */ | ||
206 | lz4hc_insert(hc4, ip); | ||
207 | ref = hashtable[HASH_VALUE(ip)] + base; | ||
208 | |||
209 | while ((ref >= ip - MAX_DISTANCE) && (ref >= hc4->base) | ||
210 | && (nbattempts)) { | ||
211 | nbattempts--; | ||
212 | if (*(startlimit + longest) == *(ref - delta + longest)) { | ||
213 | if (A32(ref) == A32(ip)) { | ||
214 | const u8 *reft = ref + MINMATCH; | ||
215 | const u8 *ipt = ip + MINMATCH; | ||
216 | const u8 *startt = ip; | ||
217 | |||
218 | while (ipt < matchlimit-(STEPSIZE - 1)) { | ||
219 | #if LZ4_ARCH64 | ||
220 | u64 diff = A64(reft) ^ A64(ipt); | ||
221 | #else | ||
222 | u32 diff = A32(reft) ^ A32(ipt); | ||
223 | #endif | ||
224 | |||
225 | if (!diff) { | ||
226 | ipt += STEPSIZE; | ||
227 | reft += STEPSIZE; | ||
228 | continue; | ||
229 | } | ||
230 | ipt += LZ4_NBCOMMONBYTES(diff); | ||
231 | goto _endcount; | ||
232 | } | ||
233 | #if LZ4_ARCH64 | ||
234 | if ((ipt < (matchlimit - 3)) | ||
235 | && (A32(reft) == A32(ipt))) { | ||
236 | ipt += 4; | ||
237 | reft += 4; | ||
238 | } | ||
239 | ipt += 2; | ||
240 | #endif | ||
241 | if ((ipt < (matchlimit - 1)) | ||
242 | && (A16(reft) == A16(ipt))) { | ||
243 | reft += 2; | ||
244 | } | ||
245 | if ((ipt < matchlimit) && (*reft == *ipt)) | ||
246 | ipt++; | ||
247 | _endcount: | ||
248 | reft = ref; | ||
249 | |||
250 | while ((startt > startlimit) | ||
251 | && (reft > hc4->base) | ||
252 | && (startt[-1] == reft[-1])) { | ||
253 | startt--; | ||
254 | reft--; | ||
255 | } | ||
256 | |||
257 | if ((ipt - startt) > longest) { | ||
258 | longest = (int)(ipt - startt); | ||
259 | *matchpos = reft; | ||
260 | *startpos = startt; | ||
261 | } | ||
262 | } | ||
263 | } | ||
264 | ref -= (size_t)chaintable[(size_t)(ref) & MAXD_MASK]; | ||
265 | } | ||
266 | return longest; | ||
267 | } | ||
268 | |||
269 | static inline int lz4_encodesequence(const u8 **ip, u8 **op, const u8 **anchor, | ||
270 | int ml, const u8 *ref) | ||
271 | { | ||
272 | int length, len; | ||
273 | u8 *token; | ||
274 | |||
275 | /* Encode Literal length */ | ||
276 | length = (int)(*ip - *anchor); | ||
277 | token = (*op)++; | ||
278 | if (length >= (int)RUN_MASK) { | ||
279 | *token = (RUN_MASK << ML_BITS); | ||
280 | len = length - RUN_MASK; | ||
281 | for (; len > 254 ; len -= 255) | ||
282 | *(*op)++ = 255; | ||
283 | *(*op)++ = (u8)len; | ||
284 | } else | ||
285 | *token = (length << ML_BITS); | ||
286 | |||
287 | /* Copy Literals */ | ||
288 | LZ4_BLINDCOPY(*anchor, *op, length); | ||
289 | |||
290 | /* Encode Offset */ | ||
291 | LZ4_WRITE_LITTLEENDIAN_16(*op, (u16)(*ip - ref)); | ||
292 | |||
293 | /* Encode MatchLength */ | ||
294 | len = (int)(ml - MINMATCH); | ||
295 | if (len >= (int)ML_MASK) { | ||
296 | *token += ML_MASK; | ||
297 | len -= ML_MASK; | ||
298 | for (; len > 509 ; len -= 510) { | ||
299 | *(*op)++ = 255; | ||
300 | *(*op)++ = 255; | ||
301 | } | ||
302 | if (len > 254) { | ||
303 | len -= 255; | ||
304 | *(*op)++ = 255; | ||
305 | } | ||
306 | *(*op)++ = (u8)len; | ||
307 | } else | ||
308 | *token += len; | ||
309 | |||
310 | /* Prepare next loop */ | ||
311 | *ip += ml; | ||
312 | *anchor = *ip; | ||
313 | |||
314 | return 0; | ||
315 | } | ||
316 | |||
317 | static int lz4_compresshcctx(struct lz4hc_data *ctx, | ||
318 | const char *source, | ||
319 | char *dest, | ||
320 | int isize) | ||
321 | { | ||
322 | const u8 *ip = (const u8 *)source; | ||
323 | const u8 *anchor = ip; | ||
324 | const u8 *const iend = ip + isize; | ||
325 | const u8 *const mflimit = iend - MFLIMIT; | ||
326 | const u8 *const matchlimit = (iend - LASTLITERALS); | ||
327 | |||
328 | u8 *op = (u8 *)dest; | ||
329 | |||
330 | int ml, ml2, ml3, ml0; | ||
331 | const u8 *ref = NULL; | ||
332 | const u8 *start2 = NULL; | ||
333 | const u8 *ref2 = NULL; | ||
334 | const u8 *start3 = NULL; | ||
335 | const u8 *ref3 = NULL; | ||
336 | const u8 *start0; | ||
337 | const u8 *ref0; | ||
338 | int lastrun; | ||
339 | |||
340 | ip++; | ||
341 | |||
342 | /* Main Loop */ | ||
343 | while (ip < mflimit) { | ||
344 | ml = lz4hc_insertandfindbestmatch(ctx, ip, matchlimit, (&ref)); | ||
345 | if (!ml) { | ||
346 | ip++; | ||
347 | continue; | ||
348 | } | ||
349 | |||
350 | /* saved, in case we would skip too much */ | ||
351 | start0 = ip; | ||
352 | ref0 = ref; | ||
353 | ml0 = ml; | ||
354 | _search2: | ||
355 | if (ip+ml < mflimit) | ||
356 | ml2 = lz4hc_insertandgetwidermatch(ctx, ip + ml - 2, | ||
357 | ip + 1, matchlimit, ml, &ref2, &start2); | ||
358 | else | ||
359 | ml2 = ml; | ||
360 | /* No better match */ | ||
361 | if (ml2 == ml) { | ||
362 | lz4_encodesequence(&ip, &op, &anchor, ml, ref); | ||
363 | continue; | ||
364 | } | ||
365 | |||
366 | if (start0 < ip) { | ||
367 | /* empirical */ | ||
368 | if (start2 < ip + ml0) { | ||
369 | ip = start0; | ||
370 | ref = ref0; | ||
371 | ml = ml0; | ||
372 | } | ||
373 | } | ||
374 | /* | ||
375 | * Here, start0==ip | ||
376 | * First Match too small : removed | ||
377 | */ | ||
378 | if ((start2 - ip) < 3) { | ||
379 | ml = ml2; | ||
380 | ip = start2; | ||
381 | ref = ref2; | ||
382 | goto _search2; | ||
383 | } | ||
384 | |||
385 | _search3: | ||
386 | /* | ||
387 | * Currently we have : | ||
388 | * ml2 > ml1, and | ||
389 | * ip1+3 <= ip2 (usually < ip1+ml1) | ||
390 | */ | ||
391 | if ((start2 - ip) < OPTIMAL_ML) { | ||
392 | int correction; | ||
393 | int new_ml = ml; | ||
394 | if (new_ml > OPTIMAL_ML) | ||
395 | new_ml = OPTIMAL_ML; | ||
396 | if (ip + new_ml > start2 + ml2 - MINMATCH) | ||
397 | new_ml = (int)(start2 - ip) + ml2 - MINMATCH; | ||
398 | correction = new_ml - (int)(start2 - ip); | ||
399 | if (correction > 0) { | ||
400 | start2 += correction; | ||
401 | ref2 += correction; | ||
402 | ml2 -= correction; | ||
403 | } | ||
404 | } | ||
405 | /* | ||
406 | * Now, we have start2 = ip+new_ml, | ||
407 | * with new_ml=min(ml, OPTIMAL_ML=18) | ||
408 | */ | ||
409 | if (start2 + ml2 < mflimit) | ||
410 | ml3 = lz4hc_insertandgetwidermatch(ctx, | ||
411 | start2 + ml2 - 3, start2, matchlimit, | ||
412 | ml2, &ref3, &start3); | ||
413 | else | ||
414 | ml3 = ml2; | ||
415 | |||
416 | /* No better match : 2 sequences to encode */ | ||
417 | if (ml3 == ml2) { | ||
418 | /* ip & ref are known; Now for ml */ | ||
419 | if (start2 < ip+ml) | ||
420 | ml = (int)(start2 - ip); | ||
421 | |||
422 | /* Now, encode 2 sequences */ | ||
423 | lz4_encodesequence(&ip, &op, &anchor, ml, ref); | ||
424 | ip = start2; | ||
425 | lz4_encodesequence(&ip, &op, &anchor, ml2, ref2); | ||
426 | continue; | ||
427 | } | ||
428 | |||
429 | /* Not enough space for match 2 : remove it */ | ||
430 | if (start3 < ip + ml + 3) { | ||
431 | /* | ||
432 | * can write Seq1 immediately ==> Seq2 is removed, | ||
433 | * so Seq3 becomes Seq1 | ||
434 | */ | ||
435 | if (start3 >= (ip + ml)) { | ||
436 | if (start2 < ip + ml) { | ||
437 | int correction = | ||
438 | (int)(ip + ml - start2); | ||
439 | start2 += correction; | ||
440 | ref2 += correction; | ||
441 | ml2 -= correction; | ||
442 | if (ml2 < MINMATCH) { | ||
443 | start2 = start3; | ||
444 | ref2 = ref3; | ||
445 | ml2 = ml3; | ||
446 | } | ||
447 | } | ||
448 | |||
449 | lz4_encodesequence(&ip, &op, &anchor, ml, ref); | ||
450 | ip = start3; | ||
451 | ref = ref3; | ||
452 | ml = ml3; | ||
453 | |||
454 | start0 = start2; | ||
455 | ref0 = ref2; | ||
456 | ml0 = ml2; | ||
457 | goto _search2; | ||
458 | } | ||
459 | |||
460 | start2 = start3; | ||
461 | ref2 = ref3; | ||
462 | ml2 = ml3; | ||
463 | goto _search3; | ||
464 | } | ||
465 | |||
466 | /* | ||
467 | * OK, now we have 3 ascending matches; let's write at least | ||
468 | * the first one ip & ref are known; Now for ml | ||
469 | */ | ||
470 | if (start2 < ip + ml) { | ||
471 | if ((start2 - ip) < (int)ML_MASK) { | ||
472 | int correction; | ||
473 | if (ml > OPTIMAL_ML) | ||
474 | ml = OPTIMAL_ML; | ||
475 | if (ip + ml > start2 + ml2 - MINMATCH) | ||
476 | ml = (int)(start2 - ip) + ml2 | ||
477 | - MINMATCH; | ||
478 | correction = ml - (int)(start2 - ip); | ||
479 | if (correction > 0) { | ||
480 | start2 += correction; | ||
481 | ref2 += correction; | ||
482 | ml2 -= correction; | ||
483 | } | ||
484 | } else | ||
485 | ml = (int)(start2 - ip); | ||
486 | } | ||
487 | lz4_encodesequence(&ip, &op, &anchor, ml, ref); | ||
488 | |||
489 | ip = start2; | ||
490 | ref = ref2; | ||
491 | ml = ml2; | ||
492 | |||
493 | start2 = start3; | ||
494 | ref2 = ref3; | ||
495 | ml2 = ml3; | ||
496 | |||
497 | goto _search3; | ||
498 | } | ||
499 | |||
500 | /* Encode Last Literals */ | ||
501 | lastrun = (int)(iend - anchor); | ||
502 | if (lastrun >= (int)RUN_MASK) { | ||
503 | *op++ = (RUN_MASK << ML_BITS); | ||
504 | lastrun -= RUN_MASK; | ||
505 | for (; lastrun > 254 ; lastrun -= 255) | ||
506 | *op++ = 255; | ||
507 | *op++ = (u8) lastrun; | ||
508 | } else | ||
509 | *op++ = (lastrun << ML_BITS); | ||
510 | memcpy(op, anchor, iend - anchor); | ||
511 | op += iend - anchor; | ||
512 | /* End */ | ||
513 | return (int) (((char *)op) - dest); | ||
514 | } | ||
515 | |||
516 | int lz4hc_compress(const unsigned char *src, size_t src_len, | ||
517 | unsigned char *dst, size_t *dst_len, void *wrkmem) | ||
518 | { | ||
519 | int ret = -1; | ||
520 | int out_len = 0; | ||
521 | |||
522 | struct lz4hc_data *hc4 = (struct lz4hc_data *)wrkmem; | ||
523 | lz4hc_init(hc4, (const u8 *)src); | ||
524 | out_len = lz4_compresshcctx((struct lz4hc_data *)hc4, (const u8 *)src, | ||
525 | (char *)dst, (int)src_len); | ||
526 | |||
527 | if (out_len < 0) | ||
528 | goto exit; | ||
529 | |||
530 | *dst_len = out_len; | ||
531 | return 0; | ||
532 | |||
533 | exit: | ||
534 | return ret; | ||
535 | } | ||
536 | EXPORT_SYMBOL_GPL(lz4hc_compress); | ||
537 | |||
538 | MODULE_LICENSE("GPL"); | ||
539 | MODULE_DESCRIPTION("LZ4HC compressor"); | ||
diff --git a/lib/scatterlist.c b/lib/scatterlist.c index a1cf8cae60e7..a685c8a79578 100644 --- a/lib/scatterlist.c +++ b/lib/scatterlist.c | |||
@@ -247,13 +247,15 @@ int __sg_alloc_table(struct sg_table *table, unsigned int nents, | |||
247 | struct scatterlist *sg, *prv; | 247 | struct scatterlist *sg, *prv; |
248 | unsigned int left; | 248 | unsigned int left; |
249 | 249 | ||
250 | memset(table, 0, sizeof(*table)); | ||
251 | |||
252 | if (nents == 0) | ||
253 | return -EINVAL; | ||
250 | #ifndef ARCH_HAS_SG_CHAIN | 254 | #ifndef ARCH_HAS_SG_CHAIN |
251 | if (WARN_ON_ONCE(nents > max_ents)) | 255 | if (WARN_ON_ONCE(nents > max_ents)) |
252 | return -EINVAL; | 256 | return -EINVAL; |
253 | #endif | 257 | #endif |
254 | 258 | ||
255 | memset(table, 0, sizeof(*table)); | ||
256 | |||
257 | left = nents; | 259 | left = nents; |
258 | prv = NULL; | 260 | prv = NULL; |
259 | do { | 261 | do { |
@@ -453,6 +455,65 @@ void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl, | |||
453 | } | 455 | } |
454 | EXPORT_SYMBOL(sg_miter_start); | 456 | EXPORT_SYMBOL(sg_miter_start); |
455 | 457 | ||
458 | static bool sg_miter_get_next_page(struct sg_mapping_iter *miter) | ||
459 | { | ||
460 | if (!miter->__remaining) { | ||
461 | struct scatterlist *sg; | ||
462 | unsigned long pgoffset; | ||
463 | |||
464 | if (!__sg_page_iter_next(&miter->piter)) | ||
465 | return false; | ||
466 | |||
467 | sg = miter->piter.sg; | ||
468 | pgoffset = miter->piter.sg_pgoffset; | ||
469 | |||
470 | miter->__offset = pgoffset ? 0 : sg->offset; | ||
471 | miter->__remaining = sg->offset + sg->length - | ||
472 | (pgoffset << PAGE_SHIFT) - miter->__offset; | ||
473 | miter->__remaining = min_t(unsigned long, miter->__remaining, | ||
474 | PAGE_SIZE - miter->__offset); | ||
475 | } | ||
476 | |||
477 | return true; | ||
478 | } | ||
479 | |||
480 | /** | ||
481 | * sg_miter_skip - reposition mapping iterator | ||
482 | * @miter: sg mapping iter to be skipped | ||
483 | * @offset: number of bytes to plus the current location | ||
484 | * | ||
485 | * Description: | ||
486 | * Sets the offset of @miter to its current location plus @offset bytes. | ||
487 | * If mapping iterator @miter has been proceeded by sg_miter_next(), this | ||
488 | * stops @miter. | ||
489 | * | ||
490 | * Context: | ||
491 | * Don't care if @miter is stopped, or not proceeded yet. | ||
492 | * Otherwise, preemption disabled if the SG_MITER_ATOMIC is set. | ||
493 | * | ||
494 | * Returns: | ||
495 | * true if @miter contains the valid mapping. false if end of sg | ||
496 | * list is reached. | ||
497 | */ | ||
498 | static bool sg_miter_skip(struct sg_mapping_iter *miter, off_t offset) | ||
499 | { | ||
500 | sg_miter_stop(miter); | ||
501 | |||
502 | while (offset) { | ||
503 | off_t consumed; | ||
504 | |||
505 | if (!sg_miter_get_next_page(miter)) | ||
506 | return false; | ||
507 | |||
508 | consumed = min_t(off_t, offset, miter->__remaining); | ||
509 | miter->__offset += consumed; | ||
510 | miter->__remaining -= consumed; | ||
511 | offset -= consumed; | ||
512 | } | ||
513 | |||
514 | return true; | ||
515 | } | ||
516 | |||
456 | /** | 517 | /** |
457 | * sg_miter_next - proceed mapping iterator to the next mapping | 518 | * sg_miter_next - proceed mapping iterator to the next mapping |
458 | * @miter: sg mapping iter to proceed | 519 | * @miter: sg mapping iter to proceed |
@@ -478,22 +539,9 @@ bool sg_miter_next(struct sg_mapping_iter *miter) | |||
478 | * Get to the next page if necessary. | 539 | * Get to the next page if necessary. |
479 | * __remaining, __offset is adjusted by sg_miter_stop | 540 | * __remaining, __offset is adjusted by sg_miter_stop |
480 | */ | 541 | */ |
481 | if (!miter->__remaining) { | 542 | if (!sg_miter_get_next_page(miter)) |
482 | struct scatterlist *sg; | 543 | return false; |
483 | unsigned long pgoffset; | ||
484 | |||
485 | if (!__sg_page_iter_next(&miter->piter)) | ||
486 | return false; | ||
487 | |||
488 | sg = miter->piter.sg; | ||
489 | pgoffset = miter->piter.sg_pgoffset; | ||
490 | 544 | ||
491 | miter->__offset = pgoffset ? 0 : sg->offset; | ||
492 | miter->__remaining = sg->offset + sg->length - | ||
493 | (pgoffset << PAGE_SHIFT) - miter->__offset; | ||
494 | miter->__remaining = min_t(unsigned long, miter->__remaining, | ||
495 | PAGE_SIZE - miter->__offset); | ||
496 | } | ||
497 | miter->page = sg_page_iter_page(&miter->piter); | 545 | miter->page = sg_page_iter_page(&miter->piter); |
498 | miter->consumed = miter->length = miter->__remaining; | 546 | miter->consumed = miter->length = miter->__remaining; |
499 | 547 | ||
@@ -552,14 +600,16 @@ EXPORT_SYMBOL(sg_miter_stop); | |||
552 | * @nents: Number of SG entries | 600 | * @nents: Number of SG entries |
553 | * @buf: Where to copy from | 601 | * @buf: Where to copy from |
554 | * @buflen: The number of bytes to copy | 602 | * @buflen: The number of bytes to copy |
555 | * @to_buffer: transfer direction (non zero == from an sg list to a | 603 | * @skip: Number of bytes to skip before copying |
556 | * buffer, 0 == from a buffer to an sg list | 604 | * @to_buffer: transfer direction (true == from an sg list to a |
605 | * buffer, false == from a buffer to an sg list | ||
557 | * | 606 | * |
558 | * Returns the number of copied bytes. | 607 | * Returns the number of copied bytes. |
559 | * | 608 | * |
560 | **/ | 609 | **/ |
561 | static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, | 610 | static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, |
562 | void *buf, size_t buflen, int to_buffer) | 611 | void *buf, size_t buflen, off_t skip, |
612 | bool to_buffer) | ||
563 | { | 613 | { |
564 | unsigned int offset = 0; | 614 | unsigned int offset = 0; |
565 | struct sg_mapping_iter miter; | 615 | struct sg_mapping_iter miter; |
@@ -573,6 +623,9 @@ static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, | |||
573 | 623 | ||
574 | sg_miter_start(&miter, sgl, nents, sg_flags); | 624 | sg_miter_start(&miter, sgl, nents, sg_flags); |
575 | 625 | ||
626 | if (!sg_miter_skip(&miter, skip)) | ||
627 | return false; | ||
628 | |||
576 | local_irq_save(flags); | 629 | local_irq_save(flags); |
577 | 630 | ||
578 | while (sg_miter_next(&miter) && offset < buflen) { | 631 | while (sg_miter_next(&miter) && offset < buflen) { |
@@ -607,7 +660,7 @@ static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, | |||
607 | size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents, | 660 | size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents, |
608 | void *buf, size_t buflen) | 661 | void *buf, size_t buflen) |
609 | { | 662 | { |
610 | return sg_copy_buffer(sgl, nents, buf, buflen, 0); | 663 | return sg_copy_buffer(sgl, nents, buf, buflen, 0, false); |
611 | } | 664 | } |
612 | EXPORT_SYMBOL(sg_copy_from_buffer); | 665 | EXPORT_SYMBOL(sg_copy_from_buffer); |
613 | 666 | ||
@@ -624,6 +677,42 @@ EXPORT_SYMBOL(sg_copy_from_buffer); | |||
624 | size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents, | 677 | size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents, |
625 | void *buf, size_t buflen) | 678 | void *buf, size_t buflen) |
626 | { | 679 | { |
627 | return sg_copy_buffer(sgl, nents, buf, buflen, 1); | 680 | return sg_copy_buffer(sgl, nents, buf, buflen, 0, true); |
628 | } | 681 | } |
629 | EXPORT_SYMBOL(sg_copy_to_buffer); | 682 | EXPORT_SYMBOL(sg_copy_to_buffer); |
683 | |||
684 | /** | ||
685 | * sg_pcopy_from_buffer - Copy from a linear buffer to an SG list | ||
686 | * @sgl: The SG list | ||
687 | * @nents: Number of SG entries | ||
688 | * @buf: Where to copy from | ||
689 | * @skip: Number of bytes to skip before copying | ||
690 | * @buflen: The number of bytes to copy | ||
691 | * | ||
692 | * Returns the number of copied bytes. | ||
693 | * | ||
694 | **/ | ||
695 | size_t sg_pcopy_from_buffer(struct scatterlist *sgl, unsigned int nents, | ||
696 | void *buf, size_t buflen, off_t skip) | ||
697 | { | ||
698 | return sg_copy_buffer(sgl, nents, buf, buflen, skip, false); | ||
699 | } | ||
700 | EXPORT_SYMBOL(sg_pcopy_from_buffer); | ||
701 | |||
702 | /** | ||
703 | * sg_pcopy_to_buffer - Copy from an SG list to a linear buffer | ||
704 | * @sgl: The SG list | ||
705 | * @nents: Number of SG entries | ||
706 | * @buf: Where to copy to | ||
707 | * @skip: Number of bytes to skip before copying | ||
708 | * @buflen: The number of bytes to copy | ||
709 | * | ||
710 | * Returns the number of copied bytes. | ||
711 | * | ||
712 | **/ | ||
713 | size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents, | ||
714 | void *buf, size_t buflen, off_t skip) | ||
715 | { | ||
716 | return sg_copy_buffer(sgl, nents, buf, buflen, skip, true); | ||
717 | } | ||
718 | EXPORT_SYMBOL(sg_pcopy_to_buffer); | ||
diff --git a/mm/filemap.c b/mm/filemap.c index 7905fe721aa8..4b51ac1acae7 100644 --- a/mm/filemap.c +++ b/mm/filemap.c | |||
@@ -1539,12 +1539,12 @@ static void do_sync_mmap_readahead(struct vm_area_struct *vma, | |||
1539 | struct address_space *mapping = file->f_mapping; | 1539 | struct address_space *mapping = file->f_mapping; |
1540 | 1540 | ||
1541 | /* If we don't want any read-ahead, don't bother */ | 1541 | /* If we don't want any read-ahead, don't bother */ |
1542 | if (VM_RandomReadHint(vma)) | 1542 | if (vma->vm_flags & VM_RAND_READ) |
1543 | return; | 1543 | return; |
1544 | if (!ra->ra_pages) | 1544 | if (!ra->ra_pages) |
1545 | return; | 1545 | return; |
1546 | 1546 | ||
1547 | if (VM_SequentialReadHint(vma)) { | 1547 | if (vma->vm_flags & VM_SEQ_READ) { |
1548 | page_cache_sync_readahead(mapping, ra, file, offset, | 1548 | page_cache_sync_readahead(mapping, ra, file, offset, |
1549 | ra->ra_pages); | 1549 | ra->ra_pages); |
1550 | return; | 1550 | return; |
@@ -1584,7 +1584,7 @@ static void do_async_mmap_readahead(struct vm_area_struct *vma, | |||
1584 | struct address_space *mapping = file->f_mapping; | 1584 | struct address_space *mapping = file->f_mapping; |
1585 | 1585 | ||
1586 | /* If we don't want any read-ahead, don't bother */ | 1586 | /* If we don't want any read-ahead, don't bother */ |
1587 | if (VM_RandomReadHint(vma)) | 1587 | if (vma->vm_flags & VM_RAND_READ) |
1588 | return; | 1588 | return; |
1589 | if (ra->mmap_miss > 0) | 1589 | if (ra->mmap_miss > 0) |
1590 | ra->mmap_miss--; | 1590 | ra->mmap_miss--; |
diff --git a/mm/internal.h b/mm/internal.h index 8562de0a5197..4390ac6c106e 100644 --- a/mm/internal.h +++ b/mm/internal.h | |||
@@ -32,11 +32,6 @@ static inline void set_page_refcounted(struct page *page) | |||
32 | set_page_count(page, 1); | 32 | set_page_count(page, 1); |
33 | } | 33 | } |
34 | 34 | ||
35 | static inline void __put_page(struct page *page) | ||
36 | { | ||
37 | atomic_dec(&page->_count); | ||
38 | } | ||
39 | |||
40 | static inline void __get_page_tail_foll(struct page *page, | 35 | static inline void __get_page_tail_foll(struct page *page, |
41 | bool get_page_head) | 36 | bool get_page_head) |
42 | { | 37 | { |
diff --git a/mm/memblock.c b/mm/memblock.c index c5fad932fa51..a847bfe6f3ba 100644 --- a/mm/memblock.c +++ b/mm/memblock.c | |||
@@ -566,7 +566,7 @@ int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size) | |||
566 | /** | 566 | /** |
567 | * __next_free_mem_range - next function for for_each_free_mem_range() | 567 | * __next_free_mem_range - next function for for_each_free_mem_range() |
568 | * @idx: pointer to u64 loop variable | 568 | * @idx: pointer to u64 loop variable |
569 | * @nid: nid: node selector, %MAX_NUMNODES for all nodes | 569 | * @nid: node selector, %MAX_NUMNODES for all nodes |
570 | * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL | 570 | * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL |
571 | * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL | 571 | * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL |
572 | * @out_nid: ptr to int for nid of the range, can be %NULL | 572 | * @out_nid: ptr to int for nid of the range, can be %NULL |
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 2e851f453814..d12ca6f3c293 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -187,10 +187,6 @@ struct mem_cgroup_per_node { | |||
187 | struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES]; | 187 | struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES]; |
188 | }; | 188 | }; |
189 | 189 | ||
190 | struct mem_cgroup_lru_info { | ||
191 | struct mem_cgroup_per_node *nodeinfo[0]; | ||
192 | }; | ||
193 | |||
194 | /* | 190 | /* |
195 | * Cgroups above their limits are maintained in a RB-Tree, independent of | 191 | * Cgroups above their limits are maintained in a RB-Tree, independent of |
196 | * their hierarchy representation | 192 | * their hierarchy representation |
@@ -267,28 +263,10 @@ struct mem_cgroup { | |||
267 | /* vmpressure notifications */ | 263 | /* vmpressure notifications */ |
268 | struct vmpressure vmpressure; | 264 | struct vmpressure vmpressure; |
269 | 265 | ||
270 | union { | 266 | /* |
271 | /* | 267 | * the counter to account for mem+swap usage. |
272 | * the counter to account for mem+swap usage. | 268 | */ |
273 | */ | 269 | struct res_counter memsw; |
274 | struct res_counter memsw; | ||
275 | |||
276 | /* | ||
277 | * rcu_freeing is used only when freeing struct mem_cgroup, | ||
278 | * so put it into a union to avoid wasting more memory. | ||
279 | * It must be disjoint from the css field. It could be | ||
280 | * in a union with the res field, but res plays a much | ||
281 | * larger part in mem_cgroup life than memsw, and might | ||
282 | * be of interest, even at time of free, when debugging. | ||
283 | * So share rcu_head with the less interesting memsw. | ||
284 | */ | ||
285 | struct rcu_head rcu_freeing; | ||
286 | /* | ||
287 | * We also need some space for a worker in deferred freeing. | ||
288 | * By the time we call it, rcu_freeing is no longer in use. | ||
289 | */ | ||
290 | struct work_struct work_freeing; | ||
291 | }; | ||
292 | 270 | ||
293 | /* | 271 | /* |
294 | * the counter to account for kernel memory usage. | 272 | * the counter to account for kernel memory usage. |
@@ -303,8 +281,6 @@ struct mem_cgroup { | |||
303 | bool oom_lock; | 281 | bool oom_lock; |
304 | atomic_t under_oom; | 282 | atomic_t under_oom; |
305 | 283 | ||
306 | atomic_t refcnt; | ||
307 | |||
308 | int swappiness; | 284 | int swappiness; |
309 | /* OOM-Killer disable */ | 285 | /* OOM-Killer disable */ |
310 | int oom_kill_disable; | 286 | int oom_kill_disable; |
@@ -366,14 +342,8 @@ struct mem_cgroup { | |||
366 | atomic_t numainfo_updating; | 342 | atomic_t numainfo_updating; |
367 | #endif | 343 | #endif |
368 | 344 | ||
369 | /* | 345 | struct mem_cgroup_per_node *nodeinfo[0]; |
370 | * Per cgroup active and inactive list, similar to the | 346 | /* WARNING: nodeinfo must be the last member here */ |
371 | * per zone LRU lists. | ||
372 | * | ||
373 | * WARNING: This has to be the last element of the struct. Don't | ||
374 | * add new fields after this point. | ||
375 | */ | ||
376 | struct mem_cgroup_lru_info info; | ||
377 | }; | 347 | }; |
378 | 348 | ||
379 | static size_t memcg_size(void) | 349 | static size_t memcg_size(void) |
@@ -416,6 +386,11 @@ static void memcg_kmem_clear_activated(struct mem_cgroup *memcg) | |||
416 | 386 | ||
417 | static void memcg_kmem_mark_dead(struct mem_cgroup *memcg) | 387 | static void memcg_kmem_mark_dead(struct mem_cgroup *memcg) |
418 | { | 388 | { |
389 | /* | ||
390 | * Our caller must use css_get() first, because memcg_uncharge_kmem() | ||
391 | * will call css_put() if it sees the memcg is dead. | ||
392 | */ | ||
393 | smp_wmb(); | ||
419 | if (test_bit(KMEM_ACCOUNTED_ACTIVE, &memcg->kmem_account_flags)) | 394 | if (test_bit(KMEM_ACCOUNTED_ACTIVE, &memcg->kmem_account_flags)) |
420 | set_bit(KMEM_ACCOUNTED_DEAD, &memcg->kmem_account_flags); | 395 | set_bit(KMEM_ACCOUNTED_DEAD, &memcg->kmem_account_flags); |
421 | } | 396 | } |
@@ -508,9 +483,6 @@ enum res_type { | |||
508 | */ | 483 | */ |
509 | static DEFINE_MUTEX(memcg_create_mutex); | 484 | static DEFINE_MUTEX(memcg_create_mutex); |
510 | 485 | ||
511 | static void mem_cgroup_get(struct mem_cgroup *memcg); | ||
512 | static void mem_cgroup_put(struct mem_cgroup *memcg); | ||
513 | |||
514 | static inline | 486 | static inline |
515 | struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *s) | 487 | struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *s) |
516 | { | 488 | { |
@@ -561,15 +533,15 @@ void sock_update_memcg(struct sock *sk) | |||
561 | */ | 533 | */ |
562 | if (sk->sk_cgrp) { | 534 | if (sk->sk_cgrp) { |
563 | BUG_ON(mem_cgroup_is_root(sk->sk_cgrp->memcg)); | 535 | BUG_ON(mem_cgroup_is_root(sk->sk_cgrp->memcg)); |
564 | mem_cgroup_get(sk->sk_cgrp->memcg); | 536 | css_get(&sk->sk_cgrp->memcg->css); |
565 | return; | 537 | return; |
566 | } | 538 | } |
567 | 539 | ||
568 | rcu_read_lock(); | 540 | rcu_read_lock(); |
569 | memcg = mem_cgroup_from_task(current); | 541 | memcg = mem_cgroup_from_task(current); |
570 | cg_proto = sk->sk_prot->proto_cgroup(memcg); | 542 | cg_proto = sk->sk_prot->proto_cgroup(memcg); |
571 | if (!mem_cgroup_is_root(memcg) && memcg_proto_active(cg_proto)) { | 543 | if (!mem_cgroup_is_root(memcg) && |
572 | mem_cgroup_get(memcg); | 544 | memcg_proto_active(cg_proto) && css_tryget(&memcg->css)) { |
573 | sk->sk_cgrp = cg_proto; | 545 | sk->sk_cgrp = cg_proto; |
574 | } | 546 | } |
575 | rcu_read_unlock(); | 547 | rcu_read_unlock(); |
@@ -583,7 +555,7 @@ void sock_release_memcg(struct sock *sk) | |||
583 | struct mem_cgroup *memcg; | 555 | struct mem_cgroup *memcg; |
584 | WARN_ON(!sk->sk_cgrp->memcg); | 556 | WARN_ON(!sk->sk_cgrp->memcg); |
585 | memcg = sk->sk_cgrp->memcg; | 557 | memcg = sk->sk_cgrp->memcg; |
586 | mem_cgroup_put(memcg); | 558 | css_put(&sk->sk_cgrp->memcg->css); |
587 | } | 559 | } |
588 | } | 560 | } |
589 | 561 | ||
@@ -683,7 +655,7 @@ static struct mem_cgroup_per_zone * | |||
683 | mem_cgroup_zoneinfo(struct mem_cgroup *memcg, int nid, int zid) | 655 | mem_cgroup_zoneinfo(struct mem_cgroup *memcg, int nid, int zid) |
684 | { | 656 | { |
685 | VM_BUG_ON((unsigned)nid >= nr_node_ids); | 657 | VM_BUG_ON((unsigned)nid >= nr_node_ids); |
686 | return &memcg->info.nodeinfo[nid]->zoneinfo[zid]; | 658 | return &memcg->nodeinfo[nid]->zoneinfo[zid]; |
687 | } | 659 | } |
688 | 660 | ||
689 | struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg) | 661 | struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg) |
@@ -3060,8 +3032,16 @@ static void memcg_uncharge_kmem(struct mem_cgroup *memcg, u64 size) | |||
3060 | if (res_counter_uncharge(&memcg->kmem, size)) | 3032 | if (res_counter_uncharge(&memcg->kmem, size)) |
3061 | return; | 3033 | return; |
3062 | 3034 | ||
3035 | /* | ||
3036 | * Releases a reference taken in kmem_cgroup_css_offline in case | ||
3037 | * this last uncharge is racing with the offlining code or it is | ||
3038 | * outliving the memcg existence. | ||
3039 | * | ||
3040 | * The memory barrier imposed by test&clear is paired with the | ||
3041 | * explicit one in memcg_kmem_mark_dead(). | ||
3042 | */ | ||
3063 | if (memcg_kmem_test_and_clear_dead(memcg)) | 3043 | if (memcg_kmem_test_and_clear_dead(memcg)) |
3064 | mem_cgroup_put(memcg); | 3044 | css_put(&memcg->css); |
3065 | } | 3045 | } |
3066 | 3046 | ||
3067 | void memcg_cache_list_add(struct mem_cgroup *memcg, struct kmem_cache *cachep) | 3047 | void memcg_cache_list_add(struct mem_cgroup *memcg, struct kmem_cache *cachep) |
@@ -3252,7 +3232,7 @@ void memcg_release_cache(struct kmem_cache *s) | |||
3252 | list_del(&s->memcg_params->list); | 3232 | list_del(&s->memcg_params->list); |
3253 | mutex_unlock(&memcg->slab_caches_mutex); | 3233 | mutex_unlock(&memcg->slab_caches_mutex); |
3254 | 3234 | ||
3255 | mem_cgroup_put(memcg); | 3235 | css_put(&memcg->css); |
3256 | out: | 3236 | out: |
3257 | kfree(s->memcg_params); | 3237 | kfree(s->memcg_params); |
3258 | } | 3238 | } |
@@ -3412,16 +3392,18 @@ static struct kmem_cache *memcg_create_kmem_cache(struct mem_cgroup *memcg, | |||
3412 | 3392 | ||
3413 | mutex_lock(&memcg_cache_mutex); | 3393 | mutex_lock(&memcg_cache_mutex); |
3414 | new_cachep = cachep->memcg_params->memcg_caches[idx]; | 3394 | new_cachep = cachep->memcg_params->memcg_caches[idx]; |
3415 | if (new_cachep) | 3395 | if (new_cachep) { |
3396 | css_put(&memcg->css); | ||
3416 | goto out; | 3397 | goto out; |
3398 | } | ||
3417 | 3399 | ||
3418 | new_cachep = kmem_cache_dup(memcg, cachep); | 3400 | new_cachep = kmem_cache_dup(memcg, cachep); |
3419 | if (new_cachep == NULL) { | 3401 | if (new_cachep == NULL) { |
3420 | new_cachep = cachep; | 3402 | new_cachep = cachep; |
3403 | css_put(&memcg->css); | ||
3421 | goto out; | 3404 | goto out; |
3422 | } | 3405 | } |
3423 | 3406 | ||
3424 | mem_cgroup_get(memcg); | ||
3425 | atomic_set(&new_cachep->memcg_params->nr_pages , 0); | 3407 | atomic_set(&new_cachep->memcg_params->nr_pages , 0); |
3426 | 3408 | ||
3427 | cachep->memcg_params->memcg_caches[idx] = new_cachep; | 3409 | cachep->memcg_params->memcg_caches[idx] = new_cachep; |
@@ -3509,8 +3491,6 @@ static void memcg_create_cache_work_func(struct work_struct *w) | |||
3509 | 3491 | ||
3510 | cw = container_of(w, struct create_work, work); | 3492 | cw = container_of(w, struct create_work, work); |
3511 | memcg_create_kmem_cache(cw->memcg, cw->cachep); | 3493 | memcg_create_kmem_cache(cw->memcg, cw->cachep); |
3512 | /* Drop the reference gotten when we enqueued. */ | ||
3513 | css_put(&cw->memcg->css); | ||
3514 | kfree(cw); | 3494 | kfree(cw); |
3515 | } | 3495 | } |
3516 | 3496 | ||
@@ -3647,6 +3627,34 @@ __memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **_memcg, int order) | |||
3647 | int ret; | 3627 | int ret; |
3648 | 3628 | ||
3649 | *_memcg = NULL; | 3629 | *_memcg = NULL; |
3630 | |||
3631 | /* | ||
3632 | * Disabling accounting is only relevant for some specific memcg | ||
3633 | * internal allocations. Therefore we would initially not have such | ||
3634 | * check here, since direct calls to the page allocator that are marked | ||
3635 | * with GFP_KMEMCG only happen outside memcg core. We are mostly | ||
3636 | * concerned with cache allocations, and by having this test at | ||
3637 | * memcg_kmem_get_cache, we are already able to relay the allocation to | ||
3638 | * the root cache and bypass the memcg cache altogether. | ||
3639 | * | ||
3640 | * There is one exception, though: the SLUB allocator does not create | ||
3641 | * large order caches, but rather service large kmallocs directly from | ||
3642 | * the page allocator. Therefore, the following sequence when backed by | ||
3643 | * the SLUB allocator: | ||
3644 | * | ||
3645 | * memcg_stop_kmem_account(); | ||
3646 | * kmalloc(<large_number>) | ||
3647 | * memcg_resume_kmem_account(); | ||
3648 | * | ||
3649 | * would effectively ignore the fact that we should skip accounting, | ||
3650 | * since it will drive us directly to this function without passing | ||
3651 | * through the cache selector memcg_kmem_get_cache. Such large | ||
3652 | * allocations are extremely rare but can happen, for instance, for the | ||
3653 | * cache arrays. We bring this test here. | ||
3654 | */ | ||
3655 | if (!current->mm || current->memcg_kmem_skip_account) | ||
3656 | return true; | ||
3657 | |||
3650 | memcg = try_get_mem_cgroup_from_mm(current->mm); | 3658 | memcg = try_get_mem_cgroup_from_mm(current->mm); |
3651 | 3659 | ||
3652 | /* | 3660 | /* |
@@ -4200,12 +4208,12 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype, | |||
4200 | unlock_page_cgroup(pc); | 4208 | unlock_page_cgroup(pc); |
4201 | /* | 4209 | /* |
4202 | * even after unlock, we have memcg->res.usage here and this memcg | 4210 | * even after unlock, we have memcg->res.usage here and this memcg |
4203 | * will never be freed. | 4211 | * will never be freed, so it's safe to call css_get(). |
4204 | */ | 4212 | */ |
4205 | memcg_check_events(memcg, page); | 4213 | memcg_check_events(memcg, page); |
4206 | if (do_swap_account && ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT) { | 4214 | if (do_swap_account && ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT) { |
4207 | mem_cgroup_swap_statistics(memcg, true); | 4215 | mem_cgroup_swap_statistics(memcg, true); |
4208 | mem_cgroup_get(memcg); | 4216 | css_get(&memcg->css); |
4209 | } | 4217 | } |
4210 | /* | 4218 | /* |
4211 | * Migration does not charge the res_counter for the | 4219 | * Migration does not charge the res_counter for the |
@@ -4317,7 +4325,7 @@ mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout) | |||
4317 | 4325 | ||
4318 | /* | 4326 | /* |
4319 | * record memcg information, if swapout && memcg != NULL, | 4327 | * record memcg information, if swapout && memcg != NULL, |
4320 | * mem_cgroup_get() was called in uncharge(). | 4328 | * css_get() was called in uncharge(). |
4321 | */ | 4329 | */ |
4322 | if (do_swap_account && swapout && memcg) | 4330 | if (do_swap_account && swapout && memcg) |
4323 | swap_cgroup_record(ent, css_id(&memcg->css)); | 4331 | swap_cgroup_record(ent, css_id(&memcg->css)); |
@@ -4348,7 +4356,7 @@ void mem_cgroup_uncharge_swap(swp_entry_t ent) | |||
4348 | if (!mem_cgroup_is_root(memcg)) | 4356 | if (!mem_cgroup_is_root(memcg)) |
4349 | res_counter_uncharge(&memcg->memsw, PAGE_SIZE); | 4357 | res_counter_uncharge(&memcg->memsw, PAGE_SIZE); |
4350 | mem_cgroup_swap_statistics(memcg, false); | 4358 | mem_cgroup_swap_statistics(memcg, false); |
4351 | mem_cgroup_put(memcg); | 4359 | css_put(&memcg->css); |
4352 | } | 4360 | } |
4353 | rcu_read_unlock(); | 4361 | rcu_read_unlock(); |
4354 | } | 4362 | } |
@@ -4382,11 +4390,14 @@ static int mem_cgroup_move_swap_account(swp_entry_t entry, | |||
4382 | * This function is only called from task migration context now. | 4390 | * This function is only called from task migration context now. |
4383 | * It postpones res_counter and refcount handling till the end | 4391 | * It postpones res_counter and refcount handling till the end |
4384 | * of task migration(mem_cgroup_clear_mc()) for performance | 4392 | * of task migration(mem_cgroup_clear_mc()) for performance |
4385 | * improvement. But we cannot postpone mem_cgroup_get(to) | 4393 | * improvement. But we cannot postpone css_get(to) because if |
4386 | * because if the process that has been moved to @to does | 4394 | * the process that has been moved to @to does swap-in, the |
4387 | * swap-in, the refcount of @to might be decreased to 0. | 4395 | * refcount of @to might be decreased to 0. |
4396 | * | ||
4397 | * We are in attach() phase, so the cgroup is guaranteed to be | ||
4398 | * alive, so we can just call css_get(). | ||
4388 | */ | 4399 | */ |
4389 | mem_cgroup_get(to); | 4400 | css_get(&to->css); |
4390 | return 0; | 4401 | return 0; |
4391 | } | 4402 | } |
4392 | return -EINVAL; | 4403 | return -EINVAL; |
@@ -5165,14 +5176,6 @@ static int memcg_update_kmem_limit(struct cgroup *cont, u64 val) | |||
5165 | * starts accounting before all call sites are patched | 5176 | * starts accounting before all call sites are patched |
5166 | */ | 5177 | */ |
5167 | memcg_kmem_set_active(memcg); | 5178 | memcg_kmem_set_active(memcg); |
5168 | |||
5169 | /* | ||
5170 | * kmem charges can outlive the cgroup. In the case of slab | ||
5171 | * pages, for instance, a page contain objects from various | ||
5172 | * processes, so it is unfeasible to migrate them away. We | ||
5173 | * need to reference count the memcg because of that. | ||
5174 | */ | ||
5175 | mem_cgroup_get(memcg); | ||
5176 | } else | 5179 | } else |
5177 | ret = res_counter_set_limit(&memcg->kmem, val); | 5180 | ret = res_counter_set_limit(&memcg->kmem, val); |
5178 | out: | 5181 | out: |
@@ -5205,16 +5208,16 @@ static int memcg_propagate_kmem(struct mem_cgroup *memcg) | |||
5205 | goto out; | 5208 | goto out; |
5206 | 5209 | ||
5207 | /* | 5210 | /* |
5208 | * destroy(), called if we fail, will issue static_key_slow_inc() and | 5211 | * __mem_cgroup_free() will issue static_key_slow_dec() because this |
5209 | * mem_cgroup_put() if kmem is enabled. We have to either call them | 5212 | * memcg is active already. If the later initialization fails then the |
5210 | * unconditionally, or clear the KMEM_ACTIVE flag. I personally find | 5213 | * cgroup core triggers the cleanup so we do not have to do it here. |
5211 | * this more consistent, since it always leads to the same destroy path | ||
5212 | */ | 5214 | */ |
5213 | mem_cgroup_get(memcg); | ||
5214 | static_key_slow_inc(&memcg_kmem_enabled_key); | 5215 | static_key_slow_inc(&memcg_kmem_enabled_key); |
5215 | 5216 | ||
5216 | mutex_lock(&set_limit_mutex); | 5217 | mutex_lock(&set_limit_mutex); |
5218 | memcg_stop_kmem_account(); | ||
5217 | ret = memcg_update_cache_sizes(memcg); | 5219 | ret = memcg_update_cache_sizes(memcg); |
5220 | memcg_resume_kmem_account(); | ||
5218 | mutex_unlock(&set_limit_mutex); | 5221 | mutex_unlock(&set_limit_mutex); |
5219 | out: | 5222 | out: |
5220 | return ret; | 5223 | return ret; |
@@ -5893,23 +5896,43 @@ static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss) | |||
5893 | return mem_cgroup_sockets_init(memcg, ss); | 5896 | return mem_cgroup_sockets_init(memcg, ss); |
5894 | } | 5897 | } |
5895 | 5898 | ||
5896 | static void kmem_cgroup_destroy(struct mem_cgroup *memcg) | 5899 | static void memcg_destroy_kmem(struct mem_cgroup *memcg) |
5897 | { | 5900 | { |
5898 | mem_cgroup_sockets_destroy(memcg); | 5901 | mem_cgroup_sockets_destroy(memcg); |
5902 | } | ||
5903 | |||
5904 | static void kmem_cgroup_css_offline(struct mem_cgroup *memcg) | ||
5905 | { | ||
5906 | if (!memcg_kmem_is_active(memcg)) | ||
5907 | return; | ||
5908 | |||
5909 | /* | ||
5910 | * kmem charges can outlive the cgroup. In the case of slab | ||
5911 | * pages, for instance, a page contain objects from various | ||
5912 | * processes. As we prevent from taking a reference for every | ||
5913 | * such allocation we have to be careful when doing uncharge | ||
5914 | * (see memcg_uncharge_kmem) and here during offlining. | ||
5915 | * | ||
5916 | * The idea is that that only the _last_ uncharge which sees | ||
5917 | * the dead memcg will drop the last reference. An additional | ||
5918 | * reference is taken here before the group is marked dead | ||
5919 | * which is then paired with css_put during uncharge resp. here. | ||
5920 | * | ||
5921 | * Although this might sound strange as this path is called from | ||
5922 | * css_offline() when the referencemight have dropped down to 0 | ||
5923 | * and shouldn't be incremented anymore (css_tryget would fail) | ||
5924 | * we do not have other options because of the kmem allocations | ||
5925 | * lifetime. | ||
5926 | */ | ||
5927 | css_get(&memcg->css); | ||
5899 | 5928 | ||
5900 | memcg_kmem_mark_dead(memcg); | 5929 | memcg_kmem_mark_dead(memcg); |
5901 | 5930 | ||
5902 | if (res_counter_read_u64(&memcg->kmem, RES_USAGE) != 0) | 5931 | if (res_counter_read_u64(&memcg->kmem, RES_USAGE) != 0) |
5903 | return; | 5932 | return; |
5904 | 5933 | ||
5905 | /* | ||
5906 | * Charges already down to 0, undo mem_cgroup_get() done in the charge | ||
5907 | * path here, being careful not to race with memcg_uncharge_kmem: it is | ||
5908 | * possible that the charges went down to 0 between mark_dead and the | ||
5909 | * res_counter read, so in that case, we don't need the put | ||
5910 | */ | ||
5911 | if (memcg_kmem_test_and_clear_dead(memcg)) | 5934 | if (memcg_kmem_test_and_clear_dead(memcg)) |
5912 | mem_cgroup_put(memcg); | 5935 | css_put(&memcg->css); |
5913 | } | 5936 | } |
5914 | #else | 5937 | #else |
5915 | static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss) | 5938 | static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss) |
@@ -5917,7 +5940,11 @@ static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss) | |||
5917 | return 0; | 5940 | return 0; |
5918 | } | 5941 | } |
5919 | 5942 | ||
5920 | static void kmem_cgroup_destroy(struct mem_cgroup *memcg) | 5943 | static void memcg_destroy_kmem(struct mem_cgroup *memcg) |
5944 | { | ||
5945 | } | ||
5946 | |||
5947 | static void kmem_cgroup_css_offline(struct mem_cgroup *memcg) | ||
5921 | { | 5948 | { |
5922 | } | 5949 | } |
5923 | #endif | 5950 | #endif |
@@ -6087,13 +6114,13 @@ static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node) | |||
6087 | mz->on_tree = false; | 6114 | mz->on_tree = false; |
6088 | mz->memcg = memcg; | 6115 | mz->memcg = memcg; |
6089 | } | 6116 | } |
6090 | memcg->info.nodeinfo[node] = pn; | 6117 | memcg->nodeinfo[node] = pn; |
6091 | return 0; | 6118 | return 0; |
6092 | } | 6119 | } |
6093 | 6120 | ||
6094 | static void free_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node) | 6121 | static void free_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node) |
6095 | { | 6122 | { |
6096 | kfree(memcg->info.nodeinfo[node]); | 6123 | kfree(memcg->nodeinfo[node]); |
6097 | } | 6124 | } |
6098 | 6125 | ||
6099 | static struct mem_cgroup *mem_cgroup_alloc(void) | 6126 | static struct mem_cgroup *mem_cgroup_alloc(void) |
@@ -6166,49 +6193,6 @@ static void __mem_cgroup_free(struct mem_cgroup *memcg) | |||
6166 | vfree(memcg); | 6193 | vfree(memcg); |
6167 | } | 6194 | } |
6168 | 6195 | ||
6169 | |||
6170 | /* | ||
6171 | * Helpers for freeing a kmalloc()ed/vzalloc()ed mem_cgroup by RCU, | ||
6172 | * but in process context. The work_freeing structure is overlaid | ||
6173 | * on the rcu_freeing structure, which itself is overlaid on memsw. | ||
6174 | */ | ||
6175 | static void free_work(struct work_struct *work) | ||
6176 | { | ||
6177 | struct mem_cgroup *memcg; | ||
6178 | |||
6179 | memcg = container_of(work, struct mem_cgroup, work_freeing); | ||
6180 | __mem_cgroup_free(memcg); | ||
6181 | } | ||
6182 | |||
6183 | static void free_rcu(struct rcu_head *rcu_head) | ||
6184 | { | ||
6185 | struct mem_cgroup *memcg; | ||
6186 | |||
6187 | memcg = container_of(rcu_head, struct mem_cgroup, rcu_freeing); | ||
6188 | INIT_WORK(&memcg->work_freeing, free_work); | ||
6189 | schedule_work(&memcg->work_freeing); | ||
6190 | } | ||
6191 | |||
6192 | static void mem_cgroup_get(struct mem_cgroup *memcg) | ||
6193 | { | ||
6194 | atomic_inc(&memcg->refcnt); | ||
6195 | } | ||
6196 | |||
6197 | static void __mem_cgroup_put(struct mem_cgroup *memcg, int count) | ||
6198 | { | ||
6199 | if (atomic_sub_and_test(count, &memcg->refcnt)) { | ||
6200 | struct mem_cgroup *parent = parent_mem_cgroup(memcg); | ||
6201 | call_rcu(&memcg->rcu_freeing, free_rcu); | ||
6202 | if (parent) | ||
6203 | mem_cgroup_put(parent); | ||
6204 | } | ||
6205 | } | ||
6206 | |||
6207 | static void mem_cgroup_put(struct mem_cgroup *memcg) | ||
6208 | { | ||
6209 | __mem_cgroup_put(memcg, 1); | ||
6210 | } | ||
6211 | |||
6212 | /* | 6196 | /* |
6213 | * Returns the parent mem_cgroup in memcgroup hierarchy with hierarchy enabled. | 6197 | * Returns the parent mem_cgroup in memcgroup hierarchy with hierarchy enabled. |
6214 | */ | 6198 | */ |
@@ -6268,7 +6252,6 @@ mem_cgroup_css_alloc(struct cgroup *cont) | |||
6268 | 6252 | ||
6269 | memcg->last_scanned_node = MAX_NUMNODES; | 6253 | memcg->last_scanned_node = MAX_NUMNODES; |
6270 | INIT_LIST_HEAD(&memcg->oom_notify); | 6254 | INIT_LIST_HEAD(&memcg->oom_notify); |
6271 | atomic_set(&memcg->refcnt, 1); | ||
6272 | memcg->move_charge_at_immigrate = 0; | 6255 | memcg->move_charge_at_immigrate = 0; |
6273 | mutex_init(&memcg->thresholds_lock); | 6256 | mutex_init(&memcg->thresholds_lock); |
6274 | spin_lock_init(&memcg->move_lock); | 6257 | spin_lock_init(&memcg->move_lock); |
@@ -6304,12 +6287,9 @@ mem_cgroup_css_online(struct cgroup *cont) | |||
6304 | res_counter_init(&memcg->kmem, &parent->kmem); | 6287 | res_counter_init(&memcg->kmem, &parent->kmem); |
6305 | 6288 | ||
6306 | /* | 6289 | /* |
6307 | * We increment refcnt of the parent to ensure that we can | 6290 | * No need to take a reference to the parent because cgroup |
6308 | * safely access it on res_counter_charge/uncharge. | 6291 | * core guarantees its existence. |
6309 | * This refcnt will be decremented when freeing this | ||
6310 | * mem_cgroup(see mem_cgroup_put). | ||
6311 | */ | 6292 | */ |
6312 | mem_cgroup_get(parent); | ||
6313 | } else { | 6293 | } else { |
6314 | res_counter_init(&memcg->res, NULL); | 6294 | res_counter_init(&memcg->res, NULL); |
6315 | res_counter_init(&memcg->memsw, NULL); | 6295 | res_counter_init(&memcg->memsw, NULL); |
@@ -6325,16 +6305,6 @@ mem_cgroup_css_online(struct cgroup *cont) | |||
6325 | 6305 | ||
6326 | error = memcg_init_kmem(memcg, &mem_cgroup_subsys); | 6306 | error = memcg_init_kmem(memcg, &mem_cgroup_subsys); |
6327 | mutex_unlock(&memcg_create_mutex); | 6307 | mutex_unlock(&memcg_create_mutex); |
6328 | if (error) { | ||
6329 | /* | ||
6330 | * We call put now because our (and parent's) refcnts | ||
6331 | * are already in place. mem_cgroup_put() will internally | ||
6332 | * call __mem_cgroup_free, so return directly | ||
6333 | */ | ||
6334 | mem_cgroup_put(memcg); | ||
6335 | if (parent->use_hierarchy) | ||
6336 | mem_cgroup_put(parent); | ||
6337 | } | ||
6338 | return error; | 6308 | return error; |
6339 | } | 6309 | } |
6340 | 6310 | ||
@@ -6360,6 +6330,8 @@ static void mem_cgroup_css_offline(struct cgroup *cont) | |||
6360 | { | 6330 | { |
6361 | struct mem_cgroup *memcg = mem_cgroup_from_cont(cont); | 6331 | struct mem_cgroup *memcg = mem_cgroup_from_cont(cont); |
6362 | 6332 | ||
6333 | kmem_cgroup_css_offline(memcg); | ||
6334 | |||
6363 | mem_cgroup_invalidate_reclaim_iterators(memcg); | 6335 | mem_cgroup_invalidate_reclaim_iterators(memcg); |
6364 | mem_cgroup_reparent_charges(memcg); | 6336 | mem_cgroup_reparent_charges(memcg); |
6365 | mem_cgroup_destroy_all_caches(memcg); | 6337 | mem_cgroup_destroy_all_caches(memcg); |
@@ -6369,9 +6341,8 @@ static void mem_cgroup_css_free(struct cgroup *cont) | |||
6369 | { | 6341 | { |
6370 | struct mem_cgroup *memcg = mem_cgroup_from_cont(cont); | 6342 | struct mem_cgroup *memcg = mem_cgroup_from_cont(cont); |
6371 | 6343 | ||
6372 | kmem_cgroup_destroy(memcg); | 6344 | memcg_destroy_kmem(memcg); |
6373 | 6345 | __mem_cgroup_free(memcg); | |
6374 | mem_cgroup_put(memcg); | ||
6375 | } | 6346 | } |
6376 | 6347 | ||
6377 | #ifdef CONFIG_MMU | 6348 | #ifdef CONFIG_MMU |
@@ -6680,6 +6651,7 @@ static void __mem_cgroup_clear_mc(void) | |||
6680 | { | 6651 | { |
6681 | struct mem_cgroup *from = mc.from; | 6652 | struct mem_cgroup *from = mc.from; |
6682 | struct mem_cgroup *to = mc.to; | 6653 | struct mem_cgroup *to = mc.to; |
6654 | int i; | ||
6683 | 6655 | ||
6684 | /* we must uncharge all the leftover precharges from mc.to */ | 6656 | /* we must uncharge all the leftover precharges from mc.to */ |
6685 | if (mc.precharge) { | 6657 | if (mc.precharge) { |
@@ -6700,7 +6672,9 @@ static void __mem_cgroup_clear_mc(void) | |||
6700 | if (!mem_cgroup_is_root(mc.from)) | 6672 | if (!mem_cgroup_is_root(mc.from)) |
6701 | res_counter_uncharge(&mc.from->memsw, | 6673 | res_counter_uncharge(&mc.from->memsw, |
6702 | PAGE_SIZE * mc.moved_swap); | 6674 | PAGE_SIZE * mc.moved_swap); |
6703 | __mem_cgroup_put(mc.from, mc.moved_swap); | 6675 | |
6676 | for (i = 0; i < mc.moved_swap; i++) | ||
6677 | css_put(&mc.from->css); | ||
6704 | 6678 | ||
6705 | if (!mem_cgroup_is_root(mc.to)) { | 6679 | if (!mem_cgroup_is_root(mc.to)) { |
6706 | /* | 6680 | /* |
@@ -6710,7 +6684,7 @@ static void __mem_cgroup_clear_mc(void) | |||
6710 | res_counter_uncharge(&mc.to->res, | 6684 | res_counter_uncharge(&mc.to->res, |
6711 | PAGE_SIZE * mc.moved_swap); | 6685 | PAGE_SIZE * mc.moved_swap); |
6712 | } | 6686 | } |
6713 | /* we've already done mem_cgroup_get(mc.to) */ | 6687 | /* we've already done css_get(mc.to) */ |
6714 | mc.moved_swap = 0; | 6688 | mc.moved_swap = 0; |
6715 | } | 6689 | } |
6716 | memcg_oom_recover(from); | 6690 | memcg_oom_recover(from); |
diff --git a/mm/memory.c b/mm/memory.c index b68812d682b6..1ce2e2a734fc 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -1150,7 +1150,7 @@ again: | |||
1150 | if (pte_dirty(ptent)) | 1150 | if (pte_dirty(ptent)) |
1151 | set_page_dirty(page); | 1151 | set_page_dirty(page); |
1152 | if (pte_young(ptent) && | 1152 | if (pte_young(ptent) && |
1153 | likely(!VM_SequentialReadHint(vma))) | 1153 | likely(!(vma->vm_flags & VM_SEQ_READ))) |
1154 | mark_page_accessed(page); | 1154 | mark_page_accessed(page); |
1155 | rss[MM_FILEPAGES]--; | 1155 | rss[MM_FILEPAGES]--; |
1156 | } | 1156 | } |
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index f5ba127b2051..ca1dd3aa5eee 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c | |||
@@ -208,13 +208,13 @@ void register_page_bootmem_info_node(struct pglist_data *pgdat) | |||
208 | pfn = pgdat->node_start_pfn; | 208 | pfn = pgdat->node_start_pfn; |
209 | end_pfn = pgdat_end_pfn(pgdat); | 209 | end_pfn = pgdat_end_pfn(pgdat); |
210 | 210 | ||
211 | /* register_section info */ | 211 | /* register section info */ |
212 | for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) { | 212 | for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) { |
213 | /* | 213 | /* |
214 | * Some platforms can assign the same pfn to multiple nodes - on | 214 | * Some platforms can assign the same pfn to multiple nodes - on |
215 | * node0 as well as nodeN. To avoid registering a pfn against | 215 | * node0 as well as nodeN. To avoid registering a pfn against |
216 | * multiple nodes we check that this pfn does not already | 216 | * multiple nodes we check that this pfn does not already |
217 | * reside in some other node. | 217 | * reside in some other nodes. |
218 | */ | 218 | */ |
219 | if (pfn_valid(pfn) && (pfn_to_nid(pfn) == node)) | 219 | if (pfn_valid(pfn) && (pfn_to_nid(pfn) == node)) |
220 | register_page_bootmem_info_section(pfn); | 220 | register_page_bootmem_info_section(pfn); |
@@ -914,19 +914,19 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_typ | |||
914 | if ((zone_idx(zone) > ZONE_NORMAL || online_type == ONLINE_MOVABLE) && | 914 | if ((zone_idx(zone) > ZONE_NORMAL || online_type == ONLINE_MOVABLE) && |
915 | !can_online_high_movable(zone)) { | 915 | !can_online_high_movable(zone)) { |
916 | unlock_memory_hotplug(); | 916 | unlock_memory_hotplug(); |
917 | return -1; | 917 | return -EINVAL; |
918 | } | 918 | } |
919 | 919 | ||
920 | if (online_type == ONLINE_KERNEL && zone_idx(zone) == ZONE_MOVABLE) { | 920 | if (online_type == ONLINE_KERNEL && zone_idx(zone) == ZONE_MOVABLE) { |
921 | if (move_pfn_range_left(zone - 1, zone, pfn, pfn + nr_pages)) { | 921 | if (move_pfn_range_left(zone - 1, zone, pfn, pfn + nr_pages)) { |
922 | unlock_memory_hotplug(); | 922 | unlock_memory_hotplug(); |
923 | return -1; | 923 | return -EINVAL; |
924 | } | 924 | } |
925 | } | 925 | } |
926 | if (online_type == ONLINE_MOVABLE && zone_idx(zone) == ZONE_MOVABLE - 1) { | 926 | if (online_type == ONLINE_MOVABLE && zone_idx(zone) == ZONE_MOVABLE - 1) { |
927 | if (move_pfn_range_right(zone, zone + 1, pfn, pfn + nr_pages)) { | 927 | if (move_pfn_range_right(zone, zone + 1, pfn, pfn + nr_pages)) { |
928 | unlock_memory_hotplug(); | 928 | unlock_memory_hotplug(); |
929 | return -1; | 929 | return -EINVAL; |
930 | } | 930 | } |
931 | } | 931 | } |
932 | 932 | ||
@@ -1358,18 +1358,19 @@ SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len, | |||
1358 | 1358 | ||
1359 | if (!(flags & MAP_ANONYMOUS)) { | 1359 | if (!(flags & MAP_ANONYMOUS)) { |
1360 | audit_mmap_fd(fd, flags); | 1360 | audit_mmap_fd(fd, flags); |
1361 | if (unlikely(flags & MAP_HUGETLB)) | ||
1362 | return -EINVAL; | ||
1363 | file = fget(fd); | 1361 | file = fget(fd); |
1364 | if (!file) | 1362 | if (!file) |
1365 | goto out; | 1363 | goto out; |
1366 | if (is_file_hugepages(file)) | 1364 | if (is_file_hugepages(file)) |
1367 | len = ALIGN(len, huge_page_size(hstate_file(file))); | 1365 | len = ALIGN(len, huge_page_size(hstate_file(file))); |
1366 | retval = -EINVAL; | ||
1367 | if (unlikely(flags & MAP_HUGETLB && !is_file_hugepages(file))) | ||
1368 | goto out_fput; | ||
1368 | } else if (flags & MAP_HUGETLB) { | 1369 | } else if (flags & MAP_HUGETLB) { |
1369 | struct user_struct *user = NULL; | 1370 | struct user_struct *user = NULL; |
1370 | struct hstate *hs = hstate_sizelog((flags >> MAP_HUGE_SHIFT) & | 1371 | struct hstate *hs; |
1371 | SHM_HUGE_MASK); | ||
1372 | 1372 | ||
1373 | hs = hstate_sizelog((flags >> MAP_HUGE_SHIFT) & SHM_HUGE_MASK); | ||
1373 | if (!hs) | 1374 | if (!hs) |
1374 | return -EINVAL; | 1375 | return -EINVAL; |
1375 | 1376 | ||
@@ -1391,6 +1392,7 @@ SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len, | |||
1391 | flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); | 1392 | flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); |
1392 | 1393 | ||
1393 | retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff); | 1394 | retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff); |
1395 | out_fput: | ||
1394 | if (file) | 1396 | if (file) |
1395 | fput(file); | 1397 | fput(file); |
1396 | out: | 1398 | out: |
diff --git a/mm/mremap.c b/mm/mremap.c index 3708655378e9..457d34ef3bf2 100644 --- a/mm/mremap.c +++ b/mm/mremap.c | |||
@@ -456,13 +456,14 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len, | |||
456 | unsigned long charged = 0; | 456 | unsigned long charged = 0; |
457 | bool locked = false; | 457 | bool locked = false; |
458 | 458 | ||
459 | down_write(¤t->mm->mmap_sem); | ||
460 | |||
461 | if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE)) | 459 | if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE)) |
462 | goto out; | 460 | return ret; |
461 | |||
462 | if (flags & MREMAP_FIXED && !(flags & MREMAP_MAYMOVE)) | ||
463 | return ret; | ||
463 | 464 | ||
464 | if (addr & ~PAGE_MASK) | 465 | if (addr & ~PAGE_MASK) |
465 | goto out; | 466 | return ret; |
466 | 467 | ||
467 | old_len = PAGE_ALIGN(old_len); | 468 | old_len = PAGE_ALIGN(old_len); |
468 | new_len = PAGE_ALIGN(new_len); | 469 | new_len = PAGE_ALIGN(new_len); |
@@ -473,12 +474,13 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len, | |||
473 | * a zero new-len is nonsensical. | 474 | * a zero new-len is nonsensical. |
474 | */ | 475 | */ |
475 | if (!new_len) | 476 | if (!new_len) |
476 | goto out; | 477 | return ret; |
478 | |||
479 | down_write(¤t->mm->mmap_sem); | ||
477 | 480 | ||
478 | if (flags & MREMAP_FIXED) { | 481 | if (flags & MREMAP_FIXED) { |
479 | if (flags & MREMAP_MAYMOVE) | 482 | ret = mremap_to(addr, old_len, new_addr, new_len, |
480 | ret = mremap_to(addr, old_len, new_addr, new_len, | 483 | &locked); |
481 | &locked); | ||
482 | goto out; | 484 | goto out; |
483 | } | 485 | } |
484 | 486 | ||
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 327516b7aee9..b100255dedda 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -204,6 +204,7 @@ static char * const zone_names[MAX_NR_ZONES] = { | |||
204 | }; | 204 | }; |
205 | 205 | ||
206 | int min_free_kbytes = 1024; | 206 | int min_free_kbytes = 1024; |
207 | int user_min_free_kbytes; | ||
207 | 208 | ||
208 | static unsigned long __meminitdata nr_kernel_pages; | 209 | static unsigned long __meminitdata nr_kernel_pages; |
209 | static unsigned long __meminitdata nr_all_pages; | 210 | static unsigned long __meminitdata nr_all_pages; |
@@ -1046,7 +1047,7 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype) | |||
1046 | * MIGRATE_CMA areas. | 1047 | * MIGRATE_CMA areas. |
1047 | */ | 1048 | */ |
1048 | if (!is_migrate_cma(migratetype) && | 1049 | if (!is_migrate_cma(migratetype) && |
1049 | (unlikely(current_order >= pageblock_order / 2) || | 1050 | (current_order >= pageblock_order / 2 || |
1050 | start_migratetype == MIGRATE_RECLAIMABLE || | 1051 | start_migratetype == MIGRATE_RECLAIMABLE || |
1051 | page_group_by_mobility_disabled)) { | 1052 | page_group_by_mobility_disabled)) { |
1052 | int pages; | 1053 | int pages; |
@@ -3153,12 +3154,10 @@ static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref) | |||
3153 | * Add all populated zones of a node to the zonelist. | 3154 | * Add all populated zones of a node to the zonelist. |
3154 | */ | 3155 | */ |
3155 | static int build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist, | 3156 | static int build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist, |
3156 | int nr_zones, enum zone_type zone_type) | 3157 | int nr_zones) |
3157 | { | 3158 | { |
3158 | struct zone *zone; | 3159 | struct zone *zone; |
3159 | 3160 | enum zone_type zone_type = MAX_NR_ZONES; | |
3160 | BUG_ON(zone_type >= MAX_NR_ZONES); | ||
3161 | zone_type++; | ||
3162 | 3161 | ||
3163 | do { | 3162 | do { |
3164 | zone_type--; | 3163 | zone_type--; |
@@ -3168,8 +3167,8 @@ static int build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist, | |||
3168 | &zonelist->_zonerefs[nr_zones++]); | 3167 | &zonelist->_zonerefs[nr_zones++]); |
3169 | check_highest_zone(zone_type); | 3168 | check_highest_zone(zone_type); |
3170 | } | 3169 | } |
3171 | |||
3172 | } while (zone_type); | 3170 | } while (zone_type); |
3171 | |||
3173 | return nr_zones; | 3172 | return nr_zones; |
3174 | } | 3173 | } |
3175 | 3174 | ||
@@ -3363,8 +3362,7 @@ static void build_zonelists_in_node_order(pg_data_t *pgdat, int node) | |||
3363 | zonelist = &pgdat->node_zonelists[0]; | 3362 | zonelist = &pgdat->node_zonelists[0]; |
3364 | for (j = 0; zonelist->_zonerefs[j].zone != NULL; j++) | 3363 | for (j = 0; zonelist->_zonerefs[j].zone != NULL; j++) |
3365 | ; | 3364 | ; |
3366 | j = build_zonelists_node(NODE_DATA(node), zonelist, j, | 3365 | j = build_zonelists_node(NODE_DATA(node), zonelist, j); |
3367 | MAX_NR_ZONES - 1); | ||
3368 | zonelist->_zonerefs[j].zone = NULL; | 3366 | zonelist->_zonerefs[j].zone = NULL; |
3369 | zonelist->_zonerefs[j].zone_idx = 0; | 3367 | zonelist->_zonerefs[j].zone_idx = 0; |
3370 | } | 3368 | } |
@@ -3378,7 +3376,7 @@ static void build_thisnode_zonelists(pg_data_t *pgdat) | |||
3378 | struct zonelist *zonelist; | 3376 | struct zonelist *zonelist; |
3379 | 3377 | ||
3380 | zonelist = &pgdat->node_zonelists[1]; | 3378 | zonelist = &pgdat->node_zonelists[1]; |
3381 | j = build_zonelists_node(pgdat, zonelist, 0, MAX_NR_ZONES - 1); | 3379 | j = build_zonelists_node(pgdat, zonelist, 0); |
3382 | zonelist->_zonerefs[j].zone = NULL; | 3380 | zonelist->_zonerefs[j].zone = NULL; |
3383 | zonelist->_zonerefs[j].zone_idx = 0; | 3381 | zonelist->_zonerefs[j].zone_idx = 0; |
3384 | } | 3382 | } |
@@ -3586,7 +3584,7 @@ static void build_zonelists(pg_data_t *pgdat) | |||
3586 | local_node = pgdat->node_id; | 3584 | local_node = pgdat->node_id; |
3587 | 3585 | ||
3588 | zonelist = &pgdat->node_zonelists[0]; | 3586 | zonelist = &pgdat->node_zonelists[0]; |
3589 | j = build_zonelists_node(pgdat, zonelist, 0, MAX_NR_ZONES - 1); | 3587 | j = build_zonelists_node(pgdat, zonelist, 0); |
3590 | 3588 | ||
3591 | /* | 3589 | /* |
3592 | * Now we build the zonelist so that it contains the zones | 3590 | * Now we build the zonelist so that it contains the zones |
@@ -3599,14 +3597,12 @@ static void build_zonelists(pg_data_t *pgdat) | |||
3599 | for (node = local_node + 1; node < MAX_NUMNODES; node++) { | 3597 | for (node = local_node + 1; node < MAX_NUMNODES; node++) { |
3600 | if (!node_online(node)) | 3598 | if (!node_online(node)) |
3601 | continue; | 3599 | continue; |
3602 | j = build_zonelists_node(NODE_DATA(node), zonelist, j, | 3600 | j = build_zonelists_node(NODE_DATA(node), zonelist, j); |
3603 | MAX_NR_ZONES - 1); | ||
3604 | } | 3601 | } |
3605 | for (node = 0; node < local_node; node++) { | 3602 | for (node = 0; node < local_node; node++) { |
3606 | if (!node_online(node)) | 3603 | if (!node_online(node)) |
3607 | continue; | 3604 | continue; |
3608 | j = build_zonelists_node(NODE_DATA(node), zonelist, j, | 3605 | j = build_zonelists_node(NODE_DATA(node), zonelist, j); |
3609 | MAX_NR_ZONES - 1); | ||
3610 | } | 3606 | } |
3611 | 3607 | ||
3612 | zonelist->_zonerefs[j].zone = NULL; | 3608 | zonelist->_zonerefs[j].zone = NULL; |
@@ -4421,13 +4417,13 @@ static void __meminit adjust_zone_range_for_zone_movable(int nid, | |||
4421 | */ | 4417 | */ |
4422 | static unsigned long __meminit zone_spanned_pages_in_node(int nid, | 4418 | static unsigned long __meminit zone_spanned_pages_in_node(int nid, |
4423 | unsigned long zone_type, | 4419 | unsigned long zone_type, |
4420 | unsigned long node_start_pfn, | ||
4421 | unsigned long node_end_pfn, | ||
4424 | unsigned long *ignored) | 4422 | unsigned long *ignored) |
4425 | { | 4423 | { |
4426 | unsigned long node_start_pfn, node_end_pfn; | ||
4427 | unsigned long zone_start_pfn, zone_end_pfn; | 4424 | unsigned long zone_start_pfn, zone_end_pfn; |
4428 | 4425 | ||
4429 | /* Get the start and end of the node and zone */ | 4426 | /* Get the start and end of the zone */ |
4430 | get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn); | ||
4431 | zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type]; | 4427 | zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type]; |
4432 | zone_end_pfn = arch_zone_highest_possible_pfn[zone_type]; | 4428 | zone_end_pfn = arch_zone_highest_possible_pfn[zone_type]; |
4433 | adjust_zone_range_for_zone_movable(nid, zone_type, | 4429 | adjust_zone_range_for_zone_movable(nid, zone_type, |
@@ -4482,14 +4478,14 @@ unsigned long __init absent_pages_in_range(unsigned long start_pfn, | |||
4482 | /* Return the number of page frames in holes in a zone on a node */ | 4478 | /* Return the number of page frames in holes in a zone on a node */ |
4483 | static unsigned long __meminit zone_absent_pages_in_node(int nid, | 4479 | static unsigned long __meminit zone_absent_pages_in_node(int nid, |
4484 | unsigned long zone_type, | 4480 | unsigned long zone_type, |
4481 | unsigned long node_start_pfn, | ||
4482 | unsigned long node_end_pfn, | ||
4485 | unsigned long *ignored) | 4483 | unsigned long *ignored) |
4486 | { | 4484 | { |
4487 | unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type]; | 4485 | unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type]; |
4488 | unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type]; | 4486 | unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type]; |
4489 | unsigned long node_start_pfn, node_end_pfn; | ||
4490 | unsigned long zone_start_pfn, zone_end_pfn; | 4487 | unsigned long zone_start_pfn, zone_end_pfn; |
4491 | 4488 | ||
4492 | get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn); | ||
4493 | zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high); | 4489 | zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high); |
4494 | zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high); | 4490 | zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high); |
4495 | 4491 | ||
@@ -4502,6 +4498,8 @@ static unsigned long __meminit zone_absent_pages_in_node(int nid, | |||
4502 | #else /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ | 4498 | #else /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ |
4503 | static inline unsigned long __meminit zone_spanned_pages_in_node(int nid, | 4499 | static inline unsigned long __meminit zone_spanned_pages_in_node(int nid, |
4504 | unsigned long zone_type, | 4500 | unsigned long zone_type, |
4501 | unsigned long node_start_pfn, | ||
4502 | unsigned long node_end_pfn, | ||
4505 | unsigned long *zones_size) | 4503 | unsigned long *zones_size) |
4506 | { | 4504 | { |
4507 | return zones_size[zone_type]; | 4505 | return zones_size[zone_type]; |
@@ -4509,6 +4507,8 @@ static inline unsigned long __meminit zone_spanned_pages_in_node(int nid, | |||
4509 | 4507 | ||
4510 | static inline unsigned long __meminit zone_absent_pages_in_node(int nid, | 4508 | static inline unsigned long __meminit zone_absent_pages_in_node(int nid, |
4511 | unsigned long zone_type, | 4509 | unsigned long zone_type, |
4510 | unsigned long node_start_pfn, | ||
4511 | unsigned long node_end_pfn, | ||
4512 | unsigned long *zholes_size) | 4512 | unsigned long *zholes_size) |
4513 | { | 4513 | { |
4514 | if (!zholes_size) | 4514 | if (!zholes_size) |
@@ -4520,21 +4520,27 @@ static inline unsigned long __meminit zone_absent_pages_in_node(int nid, | |||
4520 | #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ | 4520 | #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ |
4521 | 4521 | ||
4522 | static void __meminit calculate_node_totalpages(struct pglist_data *pgdat, | 4522 | static void __meminit calculate_node_totalpages(struct pglist_data *pgdat, |
4523 | unsigned long *zones_size, unsigned long *zholes_size) | 4523 | unsigned long node_start_pfn, |
4524 | unsigned long node_end_pfn, | ||
4525 | unsigned long *zones_size, | ||
4526 | unsigned long *zholes_size) | ||
4524 | { | 4527 | { |
4525 | unsigned long realtotalpages, totalpages = 0; | 4528 | unsigned long realtotalpages, totalpages = 0; |
4526 | enum zone_type i; | 4529 | enum zone_type i; |
4527 | 4530 | ||
4528 | for (i = 0; i < MAX_NR_ZONES; i++) | 4531 | for (i = 0; i < MAX_NR_ZONES; i++) |
4529 | totalpages += zone_spanned_pages_in_node(pgdat->node_id, i, | 4532 | totalpages += zone_spanned_pages_in_node(pgdat->node_id, i, |
4530 | zones_size); | 4533 | node_start_pfn, |
4534 | node_end_pfn, | ||
4535 | zones_size); | ||
4531 | pgdat->node_spanned_pages = totalpages; | 4536 | pgdat->node_spanned_pages = totalpages; |
4532 | 4537 | ||
4533 | realtotalpages = totalpages; | 4538 | realtotalpages = totalpages; |
4534 | for (i = 0; i < MAX_NR_ZONES; i++) | 4539 | for (i = 0; i < MAX_NR_ZONES; i++) |
4535 | realtotalpages -= | 4540 | realtotalpages -= |
4536 | zone_absent_pages_in_node(pgdat->node_id, i, | 4541 | zone_absent_pages_in_node(pgdat->node_id, i, |
4537 | zholes_size); | 4542 | node_start_pfn, node_end_pfn, |
4543 | zholes_size); | ||
4538 | pgdat->node_present_pages = realtotalpages; | 4544 | pgdat->node_present_pages = realtotalpages; |
4539 | printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id, | 4545 | printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id, |
4540 | realtotalpages); | 4546 | realtotalpages); |
@@ -4643,6 +4649,7 @@ static unsigned long __paginginit calc_memmap_size(unsigned long spanned_pages, | |||
4643 | * NOTE: pgdat should get zeroed by caller. | 4649 | * NOTE: pgdat should get zeroed by caller. |
4644 | */ | 4650 | */ |
4645 | static void __paginginit free_area_init_core(struct pglist_data *pgdat, | 4651 | static void __paginginit free_area_init_core(struct pglist_data *pgdat, |
4652 | unsigned long node_start_pfn, unsigned long node_end_pfn, | ||
4646 | unsigned long *zones_size, unsigned long *zholes_size) | 4653 | unsigned long *zones_size, unsigned long *zholes_size) |
4647 | { | 4654 | { |
4648 | enum zone_type j; | 4655 | enum zone_type j; |
@@ -4664,8 +4671,11 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat, | |||
4664 | struct zone *zone = pgdat->node_zones + j; | 4671 | struct zone *zone = pgdat->node_zones + j; |
4665 | unsigned long size, realsize, freesize, memmap_pages; | 4672 | unsigned long size, realsize, freesize, memmap_pages; |
4666 | 4673 | ||
4667 | size = zone_spanned_pages_in_node(nid, j, zones_size); | 4674 | size = zone_spanned_pages_in_node(nid, j, node_start_pfn, |
4675 | node_end_pfn, zones_size); | ||
4668 | realsize = freesize = size - zone_absent_pages_in_node(nid, j, | 4676 | realsize = freesize = size - zone_absent_pages_in_node(nid, j, |
4677 | node_start_pfn, | ||
4678 | node_end_pfn, | ||
4669 | zholes_size); | 4679 | zholes_size); |
4670 | 4680 | ||
4671 | /* | 4681 | /* |
@@ -4779,6 +4789,8 @@ void __paginginit free_area_init_node(int nid, unsigned long *zones_size, | |||
4779 | unsigned long node_start_pfn, unsigned long *zholes_size) | 4789 | unsigned long node_start_pfn, unsigned long *zholes_size) |
4780 | { | 4790 | { |
4781 | pg_data_t *pgdat = NODE_DATA(nid); | 4791 | pg_data_t *pgdat = NODE_DATA(nid); |
4792 | unsigned long start_pfn = 0; | ||
4793 | unsigned long end_pfn = 0; | ||
4782 | 4794 | ||
4783 | /* pg_data_t should be reset to zero when it's allocated */ | 4795 | /* pg_data_t should be reset to zero when it's allocated */ |
4784 | WARN_ON(pgdat->nr_zones || pgdat->classzone_idx); | 4796 | WARN_ON(pgdat->nr_zones || pgdat->classzone_idx); |
@@ -4786,7 +4798,11 @@ void __paginginit free_area_init_node(int nid, unsigned long *zones_size, | |||
4786 | pgdat->node_id = nid; | 4798 | pgdat->node_id = nid; |
4787 | pgdat->node_start_pfn = node_start_pfn; | 4799 | pgdat->node_start_pfn = node_start_pfn; |
4788 | init_zone_allows_reclaim(nid); | 4800 | init_zone_allows_reclaim(nid); |
4789 | calculate_node_totalpages(pgdat, zones_size, zholes_size); | 4801 | #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP |
4802 | get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); | ||
4803 | #endif | ||
4804 | calculate_node_totalpages(pgdat, start_pfn, end_pfn, | ||
4805 | zones_size, zholes_size); | ||
4790 | 4806 | ||
4791 | alloc_node_mem_map(pgdat); | 4807 | alloc_node_mem_map(pgdat); |
4792 | #ifdef CONFIG_FLAT_NODE_MEM_MAP | 4808 | #ifdef CONFIG_FLAT_NODE_MEM_MAP |
@@ -4795,7 +4811,8 @@ void __paginginit free_area_init_node(int nid, unsigned long *zones_size, | |||
4795 | (unsigned long)pgdat->node_mem_map); | 4811 | (unsigned long)pgdat->node_mem_map); |
4796 | #endif | 4812 | #endif |
4797 | 4813 | ||
4798 | free_area_init_core(pgdat, zones_size, zholes_size); | 4814 | free_area_init_core(pgdat, start_pfn, end_pfn, |
4815 | zones_size, zholes_size); | ||
4799 | } | 4816 | } |
4800 | 4817 | ||
4801 | #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP | 4818 | #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP |
@@ -5573,14 +5590,21 @@ static void __meminit setup_per_zone_inactive_ratio(void) | |||
5573 | int __meminit init_per_zone_wmark_min(void) | 5590 | int __meminit init_per_zone_wmark_min(void) |
5574 | { | 5591 | { |
5575 | unsigned long lowmem_kbytes; | 5592 | unsigned long lowmem_kbytes; |
5593 | int new_min_free_kbytes; | ||
5576 | 5594 | ||
5577 | lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10); | 5595 | lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10); |
5578 | 5596 | new_min_free_kbytes = int_sqrt(lowmem_kbytes * 16); | |
5579 | min_free_kbytes = int_sqrt(lowmem_kbytes * 16); | 5597 | |
5580 | if (min_free_kbytes < 128) | 5598 | if (new_min_free_kbytes > user_min_free_kbytes) { |
5581 | min_free_kbytes = 128; | 5599 | min_free_kbytes = new_min_free_kbytes; |
5582 | if (min_free_kbytes > 65536) | 5600 | if (min_free_kbytes < 128) |
5583 | min_free_kbytes = 65536; | 5601 | min_free_kbytes = 128; |
5602 | if (min_free_kbytes > 65536) | ||
5603 | min_free_kbytes = 65536; | ||
5604 | } else { | ||
5605 | pr_warn("min_free_kbytes is not updated to %d because user defined value %d is preferred\n", | ||
5606 | new_min_free_kbytes, user_min_free_kbytes); | ||
5607 | } | ||
5584 | setup_per_zone_wmarks(); | 5608 | setup_per_zone_wmarks(); |
5585 | refresh_zone_stat_thresholds(); | 5609 | refresh_zone_stat_thresholds(); |
5586 | setup_per_zone_lowmem_reserve(); | 5610 | setup_per_zone_lowmem_reserve(); |
@@ -5598,8 +5622,10 @@ int min_free_kbytes_sysctl_handler(ctl_table *table, int write, | |||
5598 | void __user *buffer, size_t *length, loff_t *ppos) | 5622 | void __user *buffer, size_t *length, loff_t *ppos) |
5599 | { | 5623 | { |
5600 | proc_dointvec(table, write, buffer, length, ppos); | 5624 | proc_dointvec(table, write, buffer, length, ppos); |
5601 | if (write) | 5625 | if (write) { |
5626 | user_min_free_kbytes = min_free_kbytes; | ||
5602 | setup_per_zone_wmarks(); | 5627 | setup_per_zone_wmarks(); |
5628 | } | ||
5603 | return 0; | 5629 | return 0; |
5604 | } | 5630 | } |
5605 | 5631 | ||
@@ -720,7 +720,7 @@ int page_referenced_one(struct page *page, struct vm_area_struct *vma, | |||
720 | * mapping is already gone, the unmap path will have | 720 | * mapping is already gone, the unmap path will have |
721 | * set PG_referenced or activated the page. | 721 | * set PG_referenced or activated the page. |
722 | */ | 722 | */ |
723 | if (likely(!VM_SequentialReadHint(vma))) | 723 | if (likely(!(vma->vm_flags & VM_SEQ_READ))) |
724 | referenced++; | 724 | referenced++; |
725 | } | 725 | } |
726 | pte_unmap_unlock(pte, ptl); | 726 | pte_unmap_unlock(pte, ptl); |
diff --git a/mm/sparse.c b/mm/sparse.c index b38400f0fb8d..308d50331bc3 100644 --- a/mm/sparse.c +++ b/mm/sparse.c | |||
@@ -753,6 +753,7 @@ out: | |||
753 | return ret; | 753 | return ret; |
754 | } | 754 | } |
755 | 755 | ||
756 | #ifdef CONFIG_MEMORY_HOTREMOVE | ||
756 | #ifdef CONFIG_MEMORY_FAILURE | 757 | #ifdef CONFIG_MEMORY_FAILURE |
757 | static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages) | 758 | static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages) |
758 | { | 759 | { |
@@ -774,7 +775,6 @@ static inline void clear_hwpoisoned_pages(struct page *memmap, int nr_pages) | |||
774 | } | 775 | } |
775 | #endif | 776 | #endif |
776 | 777 | ||
777 | #ifdef CONFIG_MEMORY_HOTREMOVE | ||
778 | static void free_section_usemap(struct page *memmap, unsigned long *usemap) | 778 | static void free_section_usemap(struct page *memmap, unsigned long *usemap) |
779 | { | 779 | { |
780 | struct page *usemap_page; | 780 | struct page *usemap_page; |
diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 91a10472a39a..13a54953a273 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c | |||
@@ -388,12 +388,12 @@ nocache: | |||
388 | addr = ALIGN(first->va_end, align); | 388 | addr = ALIGN(first->va_end, align); |
389 | if (addr < vstart) | 389 | if (addr < vstart) |
390 | goto nocache; | 390 | goto nocache; |
391 | if (addr + size - 1 < addr) | 391 | if (addr + size < addr) |
392 | goto overflow; | 392 | goto overflow; |
393 | 393 | ||
394 | } else { | 394 | } else { |
395 | addr = ALIGN(vstart, align); | 395 | addr = ALIGN(vstart, align); |
396 | if (addr + size - 1 < addr) | 396 | if (addr + size < addr) |
397 | goto overflow; | 397 | goto overflow; |
398 | 398 | ||
399 | n = vmap_area_root.rb_node; | 399 | n = vmap_area_root.rb_node; |
@@ -420,7 +420,7 @@ nocache: | |||
420 | if (addr + cached_hole_size < first->va_start) | 420 | if (addr + cached_hole_size < first->va_start) |
421 | cached_hole_size = first->va_start - addr; | 421 | cached_hole_size = first->va_start - addr; |
422 | addr = ALIGN(first->va_end, align); | 422 | addr = ALIGN(first->va_end, align); |
423 | if (addr + size - 1 < addr) | 423 | if (addr + size < addr) |
424 | goto overflow; | 424 | goto overflow; |
425 | 425 | ||
426 | if (list_is_last(&first->list, &vmap_area_list)) | 426 | if (list_is_last(&first->list, &vmap_area_list)) |
@@ -754,7 +754,6 @@ struct vmap_block { | |||
754 | struct vmap_area *va; | 754 | struct vmap_area *va; |
755 | struct vmap_block_queue *vbq; | 755 | struct vmap_block_queue *vbq; |
756 | unsigned long free, dirty; | 756 | unsigned long free, dirty; |
757 | DECLARE_BITMAP(alloc_map, VMAP_BBMAP_BITS); | ||
758 | DECLARE_BITMAP(dirty_map, VMAP_BBMAP_BITS); | 757 | DECLARE_BITMAP(dirty_map, VMAP_BBMAP_BITS); |
759 | struct list_head free_list; | 758 | struct list_head free_list; |
760 | struct rcu_head rcu_head; | 759 | struct rcu_head rcu_head; |
@@ -820,7 +819,6 @@ static struct vmap_block *new_vmap_block(gfp_t gfp_mask) | |||
820 | vb->va = va; | 819 | vb->va = va; |
821 | vb->free = VMAP_BBMAP_BITS; | 820 | vb->free = VMAP_BBMAP_BITS; |
822 | vb->dirty = 0; | 821 | vb->dirty = 0; |
823 | bitmap_zero(vb->alloc_map, VMAP_BBMAP_BITS); | ||
824 | bitmap_zero(vb->dirty_map, VMAP_BBMAP_BITS); | 822 | bitmap_zero(vb->dirty_map, VMAP_BBMAP_BITS); |
825 | INIT_LIST_HEAD(&vb->free_list); | 823 | INIT_LIST_HEAD(&vb->free_list); |
826 | 824 | ||
@@ -873,7 +871,6 @@ static void purge_fragmented_blocks(int cpu) | |||
873 | if (vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS) { | 871 | if (vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS) { |
874 | vb->free = 0; /* prevent further allocs after releasing lock */ | 872 | vb->free = 0; /* prevent further allocs after releasing lock */ |
875 | vb->dirty = VMAP_BBMAP_BITS; /* prevent purging it again */ | 873 | vb->dirty = VMAP_BBMAP_BITS; /* prevent purging it again */ |
876 | bitmap_fill(vb->alloc_map, VMAP_BBMAP_BITS); | ||
877 | bitmap_fill(vb->dirty_map, VMAP_BBMAP_BITS); | 874 | bitmap_fill(vb->dirty_map, VMAP_BBMAP_BITS); |
878 | spin_lock(&vbq->lock); | 875 | spin_lock(&vbq->lock); |
879 | list_del_rcu(&vb->free_list); | 876 | list_del_rcu(&vb->free_list); |
@@ -891,11 +888,6 @@ static void purge_fragmented_blocks(int cpu) | |||
891 | } | 888 | } |
892 | } | 889 | } |
893 | 890 | ||
894 | static void purge_fragmented_blocks_thiscpu(void) | ||
895 | { | ||
896 | purge_fragmented_blocks(smp_processor_id()); | ||
897 | } | ||
898 | |||
899 | static void purge_fragmented_blocks_allcpus(void) | 891 | static void purge_fragmented_blocks_allcpus(void) |
900 | { | 892 | { |
901 | int cpu; | 893 | int cpu; |
@@ -910,7 +902,6 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask) | |||
910 | struct vmap_block *vb; | 902 | struct vmap_block *vb; |
911 | unsigned long addr = 0; | 903 | unsigned long addr = 0; |
912 | unsigned int order; | 904 | unsigned int order; |
913 | int purge = 0; | ||
914 | 905 | ||
915 | BUG_ON(size & ~PAGE_MASK); | 906 | BUG_ON(size & ~PAGE_MASK); |
916 | BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC); | 907 | BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC); |
@@ -934,17 +925,7 @@ again: | |||
934 | if (vb->free < 1UL << order) | 925 | if (vb->free < 1UL << order) |
935 | goto next; | 926 | goto next; |
936 | 927 | ||
937 | i = bitmap_find_free_region(vb->alloc_map, | 928 | i = VMAP_BBMAP_BITS - vb->free; |
938 | VMAP_BBMAP_BITS, order); | ||
939 | |||
940 | if (i < 0) { | ||
941 | if (vb->free + vb->dirty == VMAP_BBMAP_BITS) { | ||
942 | /* fragmented and no outstanding allocations */ | ||
943 | BUG_ON(vb->dirty != VMAP_BBMAP_BITS); | ||
944 | purge = 1; | ||
945 | } | ||
946 | goto next; | ||
947 | } | ||
948 | addr = vb->va->va_start + (i << PAGE_SHIFT); | 929 | addr = vb->va->va_start + (i << PAGE_SHIFT); |
949 | BUG_ON(addr_to_vb_idx(addr) != | 930 | BUG_ON(addr_to_vb_idx(addr) != |
950 | addr_to_vb_idx(vb->va->va_start)); | 931 | addr_to_vb_idx(vb->va->va_start)); |
@@ -960,9 +941,6 @@ next: | |||
960 | spin_unlock(&vb->lock); | 941 | spin_unlock(&vb->lock); |
961 | } | 942 | } |
962 | 943 | ||
963 | if (purge) | ||
964 | purge_fragmented_blocks_thiscpu(); | ||
965 | |||
966 | put_cpu_var(vmap_block_queue); | 944 | put_cpu_var(vmap_block_queue); |
967 | rcu_read_unlock(); | 945 | rcu_read_unlock(); |
968 | 946 | ||
@@ -1311,15 +1289,15 @@ static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va, | |||
1311 | spin_unlock(&vmap_area_lock); | 1289 | spin_unlock(&vmap_area_lock); |
1312 | } | 1290 | } |
1313 | 1291 | ||
1314 | static void clear_vm_unlist(struct vm_struct *vm) | 1292 | static void clear_vm_uninitialized_flag(struct vm_struct *vm) |
1315 | { | 1293 | { |
1316 | /* | 1294 | /* |
1317 | * Before removing VM_UNLIST, | 1295 | * Before removing VM_UNINITIALIZED, |
1318 | * we should make sure that vm has proper values. | 1296 | * we should make sure that vm has proper values. |
1319 | * Pair with smp_rmb() in show_numa_info(). | 1297 | * Pair with smp_rmb() in show_numa_info(). |
1320 | */ | 1298 | */ |
1321 | smp_wmb(); | 1299 | smp_wmb(); |
1322 | vm->flags &= ~VM_UNLIST; | 1300 | vm->flags &= ~VM_UNINITIALIZED; |
1323 | } | 1301 | } |
1324 | 1302 | ||
1325 | static struct vm_struct *__get_vm_area_node(unsigned long size, | 1303 | static struct vm_struct *__get_vm_area_node(unsigned long size, |
@@ -1453,7 +1431,7 @@ static void __vunmap(const void *addr, int deallocate_pages) | |||
1453 | return; | 1431 | return; |
1454 | 1432 | ||
1455 | if (WARN(!PAGE_ALIGNED(addr), "Trying to vfree() bad address (%p)\n", | 1433 | if (WARN(!PAGE_ALIGNED(addr), "Trying to vfree() bad address (%p)\n", |
1456 | addr)); | 1434 | addr)) |
1457 | return; | 1435 | return; |
1458 | 1436 | ||
1459 | area = remove_vm_area(addr); | 1437 | area = remove_vm_area(addr); |
@@ -1499,7 +1477,6 @@ static void __vunmap(const void *addr, int deallocate_pages) | |||
1499 | * conventions for vfree() arch-depenedent would be a really bad idea) | 1477 | * conventions for vfree() arch-depenedent would be a really bad idea) |
1500 | * | 1478 | * |
1501 | * NOTE: assumes that the object at *addr has a size >= sizeof(llist_node) | 1479 | * NOTE: assumes that the object at *addr has a size >= sizeof(llist_node) |
1502 | * | ||
1503 | */ | 1480 | */ |
1504 | void vfree(const void *addr) | 1481 | void vfree(const void *addr) |
1505 | { | 1482 | { |
@@ -1511,8 +1488,8 @@ void vfree(const void *addr) | |||
1511 | return; | 1488 | return; |
1512 | if (unlikely(in_interrupt())) { | 1489 | if (unlikely(in_interrupt())) { |
1513 | struct vfree_deferred *p = &__get_cpu_var(vfree_deferred); | 1490 | struct vfree_deferred *p = &__get_cpu_var(vfree_deferred); |
1514 | llist_add((struct llist_node *)addr, &p->list); | 1491 | if (llist_add((struct llist_node *)addr, &p->list)) |
1515 | schedule_work(&p->wq); | 1492 | schedule_work(&p->wq); |
1516 | } else | 1493 | } else |
1517 | __vunmap(addr, 1); | 1494 | __vunmap(addr, 1); |
1518 | } | 1495 | } |
@@ -1657,21 +1634,21 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align, | |||
1657 | if (!size || (size >> PAGE_SHIFT) > totalram_pages) | 1634 | if (!size || (size >> PAGE_SHIFT) > totalram_pages) |
1658 | goto fail; | 1635 | goto fail; |
1659 | 1636 | ||
1660 | area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST, | 1637 | area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED, |
1661 | start, end, node, gfp_mask, caller); | 1638 | start, end, node, gfp_mask, caller); |
1662 | if (!area) | 1639 | if (!area) |
1663 | goto fail; | 1640 | goto fail; |
1664 | 1641 | ||
1665 | addr = __vmalloc_area_node(area, gfp_mask, prot, node, caller); | 1642 | addr = __vmalloc_area_node(area, gfp_mask, prot, node, caller); |
1666 | if (!addr) | 1643 | if (!addr) |
1667 | return NULL; | 1644 | goto fail; |
1668 | 1645 | ||
1669 | /* | 1646 | /* |
1670 | * In this function, newly allocated vm_struct has VM_UNLIST flag. | 1647 | * In this function, newly allocated vm_struct has VM_UNINITIALIZED |
1671 | * It means that vm_struct is not fully initialized. | 1648 | * flag. It means that vm_struct is not fully initialized. |
1672 | * Now, it is fully initialized, so remove this flag here. | 1649 | * Now, it is fully initialized, so remove this flag here. |
1673 | */ | 1650 | */ |
1674 | clear_vm_unlist(area); | 1651 | clear_vm_uninitialized_flag(area); |
1675 | 1652 | ||
1676 | /* | 1653 | /* |
1677 | * A ref_count = 3 is needed because the vm_struct and vmap_area | 1654 | * A ref_count = 3 is needed because the vm_struct and vmap_area |
@@ -2591,11 +2568,6 @@ static void show_numa_info(struct seq_file *m, struct vm_struct *v) | |||
2591 | if (!counters) | 2568 | if (!counters) |
2592 | return; | 2569 | return; |
2593 | 2570 | ||
2594 | /* Pair with smp_wmb() in clear_vm_unlist() */ | ||
2595 | smp_rmb(); | ||
2596 | if (v->flags & VM_UNLIST) | ||
2597 | return; | ||
2598 | |||
2599 | memset(counters, 0, nr_node_ids * sizeof(unsigned int)); | 2571 | memset(counters, 0, nr_node_ids * sizeof(unsigned int)); |
2600 | 2572 | ||
2601 | for (nr = 0; nr < v->nr_pages; nr++) | 2573 | for (nr = 0; nr < v->nr_pages; nr++) |
@@ -2624,6 +2596,11 @@ static int s_show(struct seq_file *m, void *p) | |||
2624 | 2596 | ||
2625 | v = va->vm; | 2597 | v = va->vm; |
2626 | 2598 | ||
2599 | /* Pair with smp_wmb() in clear_vm_uninitialized_flag() */ | ||
2600 | smp_rmb(); | ||
2601 | if (v->flags & VM_UNINITIALIZED) | ||
2602 | return 0; | ||
2603 | |||
2627 | seq_printf(m, "0x%pK-0x%pK %7ld", | 2604 | seq_printf(m, "0x%pK-0x%pK %7ld", |
2628 | v->addr, v->addr + v->size, v->size); | 2605 | v->addr, v->addr + v->size, v->size); |
2629 | 2606 | ||
diff --git a/mm/vmscan.c b/mm/vmscan.c index 99b3ac7771ad..2cff0d491c6d 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -1443,25 +1443,11 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec, | |||
1443 | * as there is no guarantee the dirtying process is throttled in the | 1443 | * as there is no guarantee the dirtying process is throttled in the |
1444 | * same way balance_dirty_pages() manages. | 1444 | * same way balance_dirty_pages() manages. |
1445 | * | 1445 | * |
1446 | * This scales the number of dirty pages that must be under writeback | ||
1447 | * before a zone gets flagged ZONE_WRITEBACK. It is a simple backoff | ||
1448 | * function that has the most effect in the range DEF_PRIORITY to | ||
1449 | * DEF_PRIORITY-2 which is the priority reclaim is considered to be | ||
1450 | * in trouble and reclaim is considered to be in trouble. | ||
1451 | * | ||
1452 | * DEF_PRIORITY 100% isolated pages must be PageWriteback to throttle | ||
1453 | * DEF_PRIORITY-1 50% must be PageWriteback | ||
1454 | * DEF_PRIORITY-2 25% must be PageWriteback, kswapd in trouble | ||
1455 | * ... | ||
1456 | * DEF_PRIORITY-6 For SWAP_CLUSTER_MAX isolated pages, throttle if any | ||
1457 | * isolated page is PageWriteback | ||
1458 | * | ||
1459 | * Once a zone is flagged ZONE_WRITEBACK, kswapd will count the number | 1446 | * Once a zone is flagged ZONE_WRITEBACK, kswapd will count the number |
1460 | * of pages under pages flagged for immediate reclaim and stall if any | 1447 | * of pages under pages flagged for immediate reclaim and stall if any |
1461 | * are encountered in the nr_immediate check below. | 1448 | * are encountered in the nr_immediate check below. |
1462 | */ | 1449 | */ |
1463 | if (nr_writeback && nr_writeback >= | 1450 | if (nr_writeback && nr_writeback == nr_taken) |
1464 | (nr_taken >> (DEF_PRIORITY - sc->priority))) | ||
1465 | zone_set_flag(zone, ZONE_WRITEBACK); | 1451 | zone_set_flag(zone, ZONE_WRITEBACK); |
1466 | 1452 | ||
1467 | /* | 1453 | /* |
@@ -2361,8 +2347,10 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist, | |||
2361 | aborted_reclaim = shrink_zones(zonelist, sc); | 2347 | aborted_reclaim = shrink_zones(zonelist, sc); |
2362 | 2348 | ||
2363 | /* | 2349 | /* |
2364 | * Don't shrink slabs when reclaiming memory from | 2350 | * Don't shrink slabs when reclaiming memory from over limit |
2365 | * over limit cgroups | 2351 | * cgroups but do shrink slab at least once when aborting |
2352 | * reclaim for compaction to avoid unevenly scanning file/anon | ||
2353 | * LRU pages over slab pages. | ||
2366 | */ | 2354 | */ |
2367 | if (global_reclaim(sc)) { | 2355 | if (global_reclaim(sc)) { |
2368 | unsigned long lru_pages = 0; | 2356 | unsigned long lru_pages = 0; |
@@ -2404,7 +2392,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist, | |||
2404 | WB_REASON_TRY_TO_FREE_PAGES); | 2392 | WB_REASON_TRY_TO_FREE_PAGES); |
2405 | sc->may_writepage = 1; | 2393 | sc->may_writepage = 1; |
2406 | } | 2394 | } |
2407 | } while (--sc->priority >= 0); | 2395 | } while (--sc->priority >= 0 && !aborted_reclaim); |
2408 | 2396 | ||
2409 | out: | 2397 | out: |
2410 | delayacct_freepages_end(); | 2398 | delayacct_freepages_end(); |
diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib index f97869f1f09b..6031e2380638 100644 --- a/scripts/Makefile.lib +++ b/scripts/Makefile.lib | |||
@@ -311,6 +311,11 @@ cmd_lzo = (cat $(filter-out FORCE,$^) | \ | |||
311 | lzop -9 && $(call size_append, $(filter-out FORCE,$^))) > $@ || \ | 311 | lzop -9 && $(call size_append, $(filter-out FORCE,$^))) > $@ || \ |
312 | (rm -f $@ ; false) | 312 | (rm -f $@ ; false) |
313 | 313 | ||
314 | quiet_cmd_lz4 = LZ4 $@ | ||
315 | cmd_lz4 = (cat $(filter-out FORCE,$^) | \ | ||
316 | lz4c -l -c1 stdin stdout && $(call size_append, $(filter-out FORCE,$^))) > $@ || \ | ||
317 | (rm -f $@ ; false) | ||
318 | |||
314 | # U-Boot mkimage | 319 | # U-Boot mkimage |
315 | # --------------------------------------------------------------------------- | 320 | # --------------------------------------------------------------------------- |
316 | 321 | ||
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl index 6afcd1239ca5..2ee9eb750560 100755 --- a/scripts/checkpatch.pl +++ b/scripts/checkpatch.pl | |||
@@ -6,6 +6,7 @@ | |||
6 | # Licensed under the terms of the GNU GPL License version 2 | 6 | # Licensed under the terms of the GNU GPL License version 2 |
7 | 7 | ||
8 | use strict; | 8 | use strict; |
9 | use POSIX; | ||
9 | 10 | ||
10 | my $P = $0; | 11 | my $P = $0; |
11 | $P =~ s@.*/@@g; | 12 | $P =~ s@.*/@@g; |
@@ -399,37 +400,52 @@ sub seed_camelcase_includes { | |||
399 | return if ($camelcase_seeded); | 400 | return if ($camelcase_seeded); |
400 | 401 | ||
401 | my $files; | 402 | my $files; |
402 | my $camelcase_git_file = ""; | 403 | my $camelcase_cache = ""; |
404 | my @include_files = (); | ||
405 | |||
406 | $camelcase_seeded = 1; | ||
403 | 407 | ||
404 | if (-d ".git") { | 408 | if (-d ".git") { |
405 | my $git_last_include_commit = `git log --no-merges --pretty=format:"%h%n" -1 -- include`; | 409 | my $git_last_include_commit = `git log --no-merges --pretty=format:"%h%n" -1 -- include`; |
406 | chomp $git_last_include_commit; | 410 | chomp $git_last_include_commit; |
407 | $camelcase_git_file = ".checkpatch-camelcase.$git_last_include_commit"; | 411 | $camelcase_cache = ".checkpatch-camelcase.git.$git_last_include_commit"; |
408 | if (-f $camelcase_git_file) { | ||
409 | open(my $camelcase_file, '<', "$camelcase_git_file") | ||
410 | or warn "$P: Can't read '$camelcase_git_file' $!\n"; | ||
411 | while (<$camelcase_file>) { | ||
412 | chomp; | ||
413 | $camelcase{$_} = 1; | ||
414 | } | ||
415 | close($camelcase_file); | ||
416 | |||
417 | return; | ||
418 | } | ||
419 | $files = `git ls-files include`; | ||
420 | } else { | 412 | } else { |
413 | my $last_mod_date = 0; | ||
421 | $files = `find $root/include -name "*.h"`; | 414 | $files = `find $root/include -name "*.h"`; |
415 | @include_files = split('\n', $files); | ||
416 | foreach my $file (@include_files) { | ||
417 | my $date = POSIX::strftime("%Y%m%d%H%M", | ||
418 | localtime((stat $file)[9])); | ||
419 | $last_mod_date = $date if ($last_mod_date < $date); | ||
420 | } | ||
421 | $camelcase_cache = ".checkpatch-camelcase.date.$last_mod_date"; | ||
422 | } | ||
423 | |||
424 | if ($camelcase_cache ne "" && -f $camelcase_cache) { | ||
425 | open(my $camelcase_file, '<', "$camelcase_cache") | ||
426 | or warn "$P: Can't read '$camelcase_cache' $!\n"; | ||
427 | while (<$camelcase_file>) { | ||
428 | chomp; | ||
429 | $camelcase{$_} = 1; | ||
430 | } | ||
431 | close($camelcase_file); | ||
432 | |||
433 | return; | ||
434 | } | ||
435 | |||
436 | if (-d ".git") { | ||
437 | $files = `git ls-files "include/*.h"`; | ||
438 | @include_files = split('\n', $files); | ||
422 | } | 439 | } |
423 | my @include_files = split('\n', $files); | 440 | |
424 | foreach my $file (@include_files) { | 441 | foreach my $file (@include_files) { |
425 | seed_camelcase_file($file); | 442 | seed_camelcase_file($file); |
426 | } | 443 | } |
427 | $camelcase_seeded = 1; | ||
428 | 444 | ||
429 | if ($camelcase_git_file ne "") { | 445 | if ($camelcase_cache ne "") { |
430 | unlink glob ".checkpatch-camelcase.*"; | 446 | unlink glob ".checkpatch-camelcase.*"; |
431 | open(my $camelcase_file, '>', "$camelcase_git_file") | 447 | open(my $camelcase_file, '>', "$camelcase_cache") |
432 | or warn "$P: Can't write '$camelcase_git_file' $!\n"; | 448 | or warn "$P: Can't write '$camelcase_cache' $!\n"; |
433 | foreach (sort { lc($a) cmp lc($b) } keys(%camelcase)) { | 449 | foreach (sort { lc($a) cmp lc($b) } keys(%camelcase)) { |
434 | print $camelcase_file ("$_\n"); | 450 | print $camelcase_file ("$_\n"); |
435 | } | 451 | } |
diff --git a/usr/Kconfig b/usr/Kconfig index 085872bb2bb5..642f503d3e9f 100644 --- a/usr/Kconfig +++ b/usr/Kconfig | |||
@@ -90,6 +90,15 @@ config RD_LZO | |||
90 | Support loading of a LZO encoded initial ramdisk or cpio buffer | 90 | Support loading of a LZO encoded initial ramdisk or cpio buffer |
91 | If unsure, say N. | 91 | If unsure, say N. |
92 | 92 | ||
93 | config RD_LZ4 | ||
94 | bool "Support initial ramdisks compressed using LZ4" if EXPERT | ||
95 | default !EXPERT | ||
96 | depends on BLK_DEV_INITRD | ||
97 | select DECOMPRESS_LZ4 | ||
98 | help | ||
99 | Support loading of a LZ4 encoded initial ramdisk or cpio buffer | ||
100 | If unsure, say N. | ||
101 | |||
93 | choice | 102 | choice |
94 | prompt "Built-in initramfs compression mode" if INITRAMFS_SOURCE!="" | 103 | prompt "Built-in initramfs compression mode" if INITRAMFS_SOURCE!="" |
95 | help | 104 | help |