aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-04-30 20:37:43 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-04-30 20:37:43 -0400
commit5f56886521d6ddd3648777fae44d82382dd8c87f (patch)
treeaa0db6331cdb01c23f1884439840aadd31bbcca4
parentf1e9a236e5ddab6c349611ee86f54291916f226c (diff)
parente2a8b0a779787314eca1061308a8182e6c5bfabd (diff)
Merge branch 'akpm' (incoming from Andrew)
Merge third batch of fixes from Andrew Morton: "Most of the rest. I still have two large patchsets against AIO and IPC, but they're a bit stuck behind other trees and I'm about to vanish for six days. - random fixlets - inotify - more of the MM queue - show_stack() cleanups - DMI update - kthread/workqueue things - compat cleanups - epoll udpates - binfmt updates - nilfs2 - hfs - hfsplus - ptrace - kmod - coredump - kexec - rbtree - pids - pidns - pps - semaphore tweaks - some w1 patches - relay updates - core Kconfig changes - sysrq tweaks" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (109 commits) Documentation/sysrq: fix inconstistent help message of sysrq key ethernet/emac/sysrq: fix inconstistent help message of sysrq key sparc/sysrq: fix inconstistent help message of sysrq key powerpc/xmon/sysrq: fix inconstistent help message of sysrq key ARM/etm/sysrq: fix inconstistent help message of sysrq key power/sysrq: fix inconstistent help message of sysrq key kgdb/sysrq: fix inconstistent help message of sysrq key lib/decompress.c: fix initconst notifier-error-inject: fix module names in Kconfig kernel/sys.c: make prctl(PR_SET_MM) generally available UAPI: remove empty Kbuild files menuconfig: print more info for symbol without prompts init/Kconfig: re-order CONFIG_EXPERT options to fix menuconfig display kconfig menu: move Virtualization drivers near other virtualization options Kconfig: consolidate CONFIG_DEBUG_STRICT_USER_COPY_CHECKS relay: use macro PAGE_ALIGN instead of FIX_SIZE kernel/relay.c: move FIX_SIZE macro into relay.c kernel/relay.c: remove unused function argument actor drivers/w1/slaves/w1_ds2760.c: fix the error handling in w1_ds2760_add_slave() drivers/w1/slaves/w1_ds2781.c: fix the error handling in w1_ds2781_add_slave() ...
-rw-r--r--Documentation/sysrq.txt20
-rw-r--r--arch/alpha/kernel/process.c1
-rw-r--r--arch/alpha/kernel/traps.c7
-rw-r--r--arch/arc/kernel/stacktrace.c7
-rw-r--r--arch/arc/kernel/troubleshoot.c3
-rw-r--r--arch/arm/kernel/etm.c2
-rw-r--r--arch/arm/kernel/process.c8
-rw-r--r--arch/arm/kernel/traps.c7
-rw-r--r--arch/arm64/kernel/process.c7
-rw-r--r--arch/arm64/kernel/traps.c7
-rw-r--r--arch/avr32/kernel/process.c13
-rw-r--r--arch/blackfin/kernel/dumpstack.c1
-rw-r--r--arch/blackfin/kernel/trace.c2
-rw-r--r--arch/c6x/kernel/traps.c10
-rw-r--r--arch/cris/arch-v10/kernel/process.c3
-rw-r--r--arch/cris/arch-v32/kernel/process.c3
-rw-r--r--arch/cris/kernel/traps.c7
-rw-r--r--arch/frv/kernel/traps.c14
-rw-r--r--arch/h8300/kernel/process.c2
-rw-r--r--arch/h8300/kernel/traps.c7
-rw-r--r--arch/hexagon/kernel/traps.c8
-rw-r--r--arch/hexagon/kernel/vm_events.c2
-rw-r--r--arch/ia64/kernel/process.c12
-rw-r--r--arch/ia64/kernel/setup.c1
-rw-r--r--arch/m32r/kernel/process.c2
-rw-r--r--arch/m32r/kernel/traps.c9
-rw-r--r--arch/m68k/kernel/traps.c12
-rw-r--r--arch/metag/kernel/process.c2
-rw-r--r--arch/metag/kernel/traps.c6
-rw-r--r--arch/microblaze/kernel/process.c2
-rw-r--r--arch/microblaze/kernel/traps.c6
-rw-r--r--arch/mips/kernel/traps.c15
-rw-r--r--arch/mn10300/kernel/process.c1
-rw-r--r--arch/mn10300/kernel/traps.c11
-rw-r--r--arch/openrisc/kernel/process.c1
-rw-r--r--arch/openrisc/kernel/traps.c11
-rw-r--r--arch/parisc/Kconfig1
-rw-r--r--arch/parisc/Kconfig.debug14
-rw-r--r--arch/parisc/kernel/traps.c10
-rw-r--r--arch/powerpc/kernel/process.c14
-rw-r--r--arch/powerpc/xmon/xmon.c2
-rw-r--r--arch/s390/Kconfig1
-rw-r--r--arch/s390/Kconfig.debug14
-rw-r--r--arch/s390/kernel/dumpstack.c26
-rw-r--r--arch/s390/lib/Makefile1
-rw-r--r--arch/score/kernel/traps.c12
-rw-r--r--arch/sh/kernel/dumpstack.c6
-rw-r--r--arch/sh/kernel/process_32.c6
-rw-r--r--arch/sh/kernel/process_64.c1
-rw-r--r--arch/sparc/kernel/process_32.c23
-rw-r--r--arch/sparc/kernel/process_64.c6
-rw-r--r--arch/sparc/kernel/traps_64.c7
-rw-r--r--arch/sparc/lib/Makefile1
-rw-r--r--arch/sparc/lib/usercopy.c9
-rw-r--r--arch/tile/Kconfig8
-rw-r--r--arch/tile/include/asm/uaccess.h7
-rw-r--r--arch/tile/kernel/process.c3
-rw-r--r--arch/tile/lib/uaccess.c8
-rw-r--r--arch/um/kernel/sysrq.c12
-rw-r--r--arch/um/sys-ppc/sysrq.c2
-rw-r--r--arch/unicore32/kernel/process.c6
-rw-r--r--arch/unicore32/kernel/traps.c6
-rw-r--r--arch/x86/Kconfig1
-rw-r--r--arch/x86/Kconfig.debug14
-rw-r--r--arch/x86/ia32/ia32_aout.c1
-rw-r--r--arch/x86/include/asm/bug.h3
-rw-r--r--arch/x86/kernel/dumpstack.c28
-rw-r--r--arch/x86/kernel/dumpstack_32.c4
-rw-r--r--arch/x86/kernel/dumpstack_64.c6
-rw-r--r--arch/x86/kernel/process.c24
-rw-r--r--arch/x86/kernel/process_32.c2
-rw-r--r--arch/x86/kernel/process_64.c1
-rw-r--r--arch/x86/kernel/setup.c1
-rw-r--r--arch/x86/lib/usercopy_32.c6
-rw-r--r--arch/xtensa/kernel/traps.c10
-rw-r--r--drivers/Kconfig4
-rw-r--r--drivers/block/aoe/aoechr.c3
-rw-r--r--drivers/block/nbd.c2
-rw-r--r--drivers/char/random.c1
-rw-r--r--drivers/firmware/dmi_scan.c137
-rw-r--r--drivers/memstick/host/r592.c8
-rw-r--r--drivers/message/i2o/i2o_config.c10
-rw-r--r--drivers/net/ethernet/ibm/emac/debug.c2
-rw-r--r--drivers/pps/Kconfig6
-rw-r--r--drivers/pps/kc.c6
-rw-r--r--drivers/staging/speakup/kobjects.c6
-rw-r--r--drivers/staging/speakup/speakup.h1
-rw-r--r--drivers/staging/speakup/varhandlers.c46
-rw-r--r--drivers/staging/zcache/Kconfig6
-rw-r--r--drivers/staging/zcache/ramster.h6
-rw-r--r--drivers/staging/zcache/ramster/debug.c2
-rw-r--r--drivers/staging/zcache/ramster/nodemanager.c9
-rw-r--r--drivers/staging/zcache/ramster/ramster.c29
-rw-r--r--drivers/staging/zcache/ramster/ramster.h2
-rw-r--r--drivers/staging/zcache/ramster/ramster_nodemanager.h2
-rw-r--r--drivers/staging/zcache/tmem.c6
-rw-r--r--drivers/staging/zcache/tmem.h8
-rw-r--r--drivers/staging/zcache/zcache-main.c64
-rw-r--r--drivers/staging/zcache/zcache.h2
-rw-r--r--drivers/usb/storage/realtek_cr.c5
-rw-r--r--drivers/w1/slaves/w1_bq27000.c4
-rw-r--r--drivers/w1/slaves/w1_ds2760.c3
-rw-r--r--drivers/w1/slaves/w1_ds2780.c3
-rw-r--r--drivers/w1/slaves/w1_ds2781.c3
-rw-r--r--drivers/xen/Kconfig4
-rw-r--r--drivers/xen/tmem.c55
-rw-r--r--drivers/xen/xen-selfballoon.c13
-rw-r--r--fs/Kconfig.binfmt14
-rw-r--r--fs/Makefile5
-rw-r--r--fs/binfmt_aout.c1
-rw-r--r--fs/binfmt_elf.c6
-rw-r--r--fs/binfmt_elf_fdpic.c2
-rw-r--r--fs/binfmt_misc.c24
-rw-r--r--fs/block_dev.c8
-rw-r--r--fs/coredump.c84
-rw-r--r--fs/dcache.c4
-rw-r--r--fs/eventpoll.c128
-rw-r--r--fs/exec.c13
-rw-r--r--fs/fs-writeback.c1
-rw-r--r--fs/hfs/bfind.c10
-rw-r--r--fs/hfs/bitmap.c4
-rw-r--r--fs/hfs/bnode.c39
-rw-r--r--fs/hfs/brec.c19
-rw-r--r--fs/hfs/btree.c31
-rw-r--r--fs/hfs/catalog.c24
-rw-r--r--fs/hfs/dir.c20
-rw-r--r--fs/hfs/extent.c68
-rw-r--r--fs/hfs/hfs_fs.h22
-rw-r--r--fs/hfs/inode.c15
-rw-r--r--fs/hfs/mdb.c23
-rw-r--r--fs/hfs/super.c47
-rw-r--r--fs/hfsplus/attributes.c26
-rw-r--r--fs/hfsplus/bfind.c14
-rw-r--r--fs/hfsplus/bitmap.c13
-rw-r--r--fs/hfsplus/bnode.c36
-rw-r--r--fs/hfsplus/brec.c14
-rw-r--r--fs/hfsplus/btree.c29
-rw-r--r--fs/hfsplus/catalog.c11
-rw-r--r--fs/hfsplus/dir.c14
-rw-r--r--fs/hfsplus/extents.c53
-rw-r--r--fs/hfsplus/hfsplus_fs.h20
-rw-r--r--fs/hfsplus/inode.c4
-rw-r--r--fs/hfsplus/options.c22
-rw-r--r--fs/hfsplus/super.c56
-rw-r--r--fs/hfsplus/wrapper.c8
-rw-r--r--fs/hfsplus/xattr.c41
-rw-r--r--fs/nilfs2/inode.c17
-rw-r--r--fs/nilfs2/mdt.c19
-rw-r--r--fs/nilfs2/page.c70
-rw-r--r--fs/nilfs2/page.h3
-rw-r--r--fs/notify/inotify/inotify_user.c6
-rw-r--r--fs/proc/base.c5
-rw-r--r--include/Kbuild3
-rw-r--r--include/linux/cleancache.h4
-rw-r--r--include/linux/dmi.h2
-rw-r--r--include/linux/frontswap.h36
-rw-r--r--include/linux/fs.h2
-rw-r--r--include/linux/kernel.h2
-rw-r--r--include/linux/kmod.h17
-rw-r--r--include/linux/kthread.h1
-rw-r--r--include/linux/memory.h2
-rw-r--r--include/linux/pid_namespace.h4
-rw-r--r--include/linux/printk.h15
-rw-r--r--include/linux/relay.h3
-rw-r--r--include/linux/sched.h19
-rw-r--r--include/linux/smp.h1
-rw-r--r--include/linux/string_helpers.h58
-rw-r--r--include/linux/workqueue.h5
-rw-r--r--include/scsi/Kbuild1
-rw-r--r--include/uapi/linux/ptrace.h12
-rw-r--r--include/xen/tmem.h8
-rw-r--r--init/Kconfig80
-rw-r--r--init/do_mounts_initrd.c8
-rw-r--r--kernel/compat.c65
-rw-r--r--kernel/debug/debug_core.c2
-rw-r--r--kernel/kexec.c30
-rw-r--r--kernel/kmod.c98
-rw-r--r--kernel/kthread.c19
-rw-r--r--kernel/panic.c6
-rw-r--r--kernel/pid.c11
-rw-r--r--kernel/pid_namespace.c2
-rw-r--r--kernel/power/poweroff.c2
-rw-r--r--kernel/printk.c62
-rw-r--r--kernel/ptrace.c80
-rw-r--r--kernel/range.c3
-rw-r--r--kernel/relay.c14
-rw-r--r--kernel/sched/core.c1
-rw-r--r--kernel/semaphore.c8
-rw-r--r--kernel/signal.c9
-rw-r--r--kernel/smp.c91
-rw-r--r--kernel/softirq.c6
-rw-r--r--kernel/sys.c221
-rw-r--r--kernel/timer.c143
-rw-r--r--kernel/workqueue.c79
-rw-r--r--kernel/workqueue_internal.h12
-rw-r--r--lib/Kconfig.debug25
-rw-r--r--lib/Makefile5
-rw-r--r--lib/decompress.c2
-rw-r--r--lib/dump_stack.c11
-rw-r--r--lib/dynamic_debug.c48
-rw-r--r--lib/rbtree_test.c9
-rw-r--r--lib/string_helpers.c133
-rw-r--r--lib/test-string_helpers.c103
-rw-r--r--lib/usercopy.c (renamed from arch/s390/lib/usercopy.c)3
-rw-r--r--mm/cleancache.c265
-rw-r--r--mm/frontswap.c156
-rw-r--r--mm/swapfile.c17
-rw-r--r--scripts/kconfig/menu.c27
-rw-r--r--security/keys/request_key.c13
-rw-r--r--tools/testing/selftests/Makefile8
-rw-r--r--tools/testing/selftests/ptrace/Makefile10
-rw-r--r--tools/testing/selftests/ptrace/peeksiginfo.c214
-rw-r--r--tools/testing/selftests/soft-dirty/Makefile10
-rw-r--r--tools/testing/selftests/soft-dirty/soft-dirty.c114
214 files changed, 2820 insertions, 1690 deletions
diff --git a/Documentation/sysrq.txt b/Documentation/sysrq.txt
index 2a4cdda4828e..8cb4d7842a5f 100644
--- a/Documentation/sysrq.txt
+++ b/Documentation/sysrq.txt
@@ -129,9 +129,9 @@ On all - write a character to /proc/sysrq-trigger. e.g.:
129 129
130* Okay, so what can I use them for? 130* Okay, so what can I use them for?
131~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 131~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
132Well, un'R'aw is very handy when your X server or a svgalib program crashes. 132Well, unraw(r) is very handy when your X server or a svgalib program crashes.
133 133
134sa'K' (Secure Access Key) is useful when you want to be sure there is no 134sak(k) (Secure Access Key) is useful when you want to be sure there is no
135trojan program running at console which could grab your password 135trojan program running at console which could grab your password
136when you would try to login. It will kill all programs on given console, 136when you would try to login. It will kill all programs on given console,
137thus letting you make sure that the login prompt you see is actually 137thus letting you make sure that the login prompt you see is actually
@@ -143,20 +143,20 @@ IMPORTANT: such. :IMPORTANT
143useful when you want to exit a program that will not let you switch consoles. 143useful when you want to exit a program that will not let you switch consoles.
144(For example, X or a svgalib program.) 144(For example, X or a svgalib program.)
145 145
146re'B'oot is good when you're unable to shut down. But you should also 'S'ync 146reboot(b) is good when you're unable to shut down. But you should also
147and 'U'mount first. 147sync(s) and umount(u) first.
148 148
149'C'rash can be used to manually trigger a crashdump when the system is hung. 149crash(c) can be used to manually trigger a crashdump when the system is hung.
150Note that this just triggers a crash if there is no dump mechanism available. 150Note that this just triggers a crash if there is no dump mechanism available.
151 151
152'S'ync is great when your system is locked up, it allows you to sync your 152sync(s) is great when your system is locked up, it allows you to sync your
153disks and will certainly lessen the chance of data loss and fscking. Note 153disks and will certainly lessen the chance of data loss and fscking. Note
154that the sync hasn't taken place until you see the "OK" and "Done" appear 154that the sync hasn't taken place until you see the "OK" and "Done" appear
155on the screen. (If the kernel is really in strife, you may not ever get the 155on the screen. (If the kernel is really in strife, you may not ever get the
156OK or Done message...) 156OK or Done message...)
157 157
158'U'mount is basically useful in the same ways as 'S'ync. I generally 'S'ync, 158umount(u) is basically useful in the same ways as sync(s). I generally sync(s),
159'U'mount, then re'B'oot when my system locks. It's saved me many a fsck. 159umount(u), then reboot(b) when my system locks. It's saved me many a fsck.
160Again, the unmount (remount read-only) hasn't taken place until you see the 160Again, the unmount (remount read-only) hasn't taken place until you see the
161"OK" and "Done" message appear on the screen. 161"OK" and "Done" message appear on the screen.
162 162
@@ -165,11 +165,11 @@ kernel messages you do not want to see. Selecting '0' will prevent all but
165the most urgent kernel messages from reaching your console. (They will 165the most urgent kernel messages from reaching your console. (They will
166still be logged if syslogd/klogd are alive, though.) 166still be logged if syslogd/klogd are alive, though.)
167 167
168t'E'rm and k'I'll are useful if you have some sort of runaway process you 168term(e) and kill(i) are useful if you have some sort of runaway process you
169are unable to kill any other way, especially if it's spawning other 169are unable to kill any other way, especially if it's spawning other
170processes. 170processes.
171 171
172"'J'ust thaw it" is useful if your system becomes unresponsive due to a frozen 172"just thaw it(j)" is useful if your system becomes unresponsive due to a frozen
173(probably root) filesystem via the FIFREEZE ioctl. 173(probably root) filesystem via the FIFREEZE ioctl.
174 174
175* Sometimes SysRq seems to get 'stuck' after using it, what can I do? 175* Sometimes SysRq seems to get 'stuck' after using it, what can I do?
diff --git a/arch/alpha/kernel/process.c b/arch/alpha/kernel/process.c
index a3fd8a29ccac..ab80a80d38a2 100644
--- a/arch/alpha/kernel/process.c
+++ b/arch/alpha/kernel/process.c
@@ -175,6 +175,7 @@ machine_power_off(void)
175void 175void
176show_regs(struct pt_regs *regs) 176show_regs(struct pt_regs *regs)
177{ 177{
178 show_regs_print_info(KERN_DEFAULT);
178 dik_show_regs(regs, NULL); 179 dik_show_regs(regs, NULL);
179} 180}
180 181
diff --git a/arch/alpha/kernel/traps.c b/arch/alpha/kernel/traps.c
index 4037461a6493..affccb959a9e 100644
--- a/arch/alpha/kernel/traps.c
+++ b/arch/alpha/kernel/traps.c
@@ -169,13 +169,6 @@ void show_stack(struct task_struct *task, unsigned long *sp)
169 dik_show_trace(sp); 169 dik_show_trace(sp);
170} 170}
171 171
172void dump_stack(void)
173{
174 show_stack(NULL, NULL);
175}
176
177EXPORT_SYMBOL(dump_stack);
178
179void 172void
180die_if_kernel(char * str, struct pt_regs *regs, long err, unsigned long *r9_15) 173die_if_kernel(char * str, struct pt_regs *regs, long err, unsigned long *r9_15)
181{ 174{
diff --git a/arch/arc/kernel/stacktrace.c b/arch/arc/kernel/stacktrace.c
index a63ff842564b..ca0207b9d5b6 100644
--- a/arch/arc/kernel/stacktrace.c
+++ b/arch/arc/kernel/stacktrace.c
@@ -220,13 +220,6 @@ void show_stack(struct task_struct *tsk, unsigned long *sp)
220 show_stacktrace(tsk, NULL); 220 show_stacktrace(tsk, NULL);
221} 221}
222 222
223/* Expected by Rest of kernel code */
224void dump_stack(void)
225{
226 show_stacktrace(NULL, NULL);
227}
228EXPORT_SYMBOL(dump_stack);
229
230/* Another API expected by schedular, shows up in "ps" as Wait Channel 223/* Another API expected by schedular, shows up in "ps" as Wait Channel
231 * Ofcourse just returning schedule( ) would be pointless so unwind until 224 * Ofcourse just returning schedule( ) would be pointless so unwind until
232 * the function is not in schedular code 225 * the function is not in schedular code
diff --git a/arch/arc/kernel/troubleshoot.c b/arch/arc/kernel/troubleshoot.c
index 7c10873c311f..0aec01985bf9 100644
--- a/arch/arc/kernel/troubleshoot.c
+++ b/arch/arc/kernel/troubleshoot.c
@@ -71,7 +71,7 @@ void print_task_path_n_nm(struct task_struct *tsk, char *buf)
71 } 71 }
72 72
73done: 73done:
74 pr_info("%s, TGID %u\n", path_nm, tsk->tgid); 74 pr_info("Path: %s\n", path_nm);
75} 75}
76EXPORT_SYMBOL(print_task_path_n_nm); 76EXPORT_SYMBOL(print_task_path_n_nm);
77 77
@@ -163,6 +163,7 @@ void show_regs(struct pt_regs *regs)
163 return; 163 return;
164 164
165 print_task_path_n_nm(tsk, buf); 165 print_task_path_n_nm(tsk, buf);
166 show_regs_print_info(KERN_INFO);
166 167
167 if (current->thread.cause_code) 168 if (current->thread.cause_code)
168 show_ecr_verbose(regs); 169 show_ecr_verbose(regs);
diff --git a/arch/arm/kernel/etm.c b/arch/arm/kernel/etm.c
index 9b6de8c988f3..8ff0ecdc637f 100644
--- a/arch/arm/kernel/etm.c
+++ b/arch/arm/kernel/etm.c
@@ -254,7 +254,7 @@ static void sysrq_etm_dump(int key)
254 254
255static struct sysrq_key_op sysrq_etm_op = { 255static struct sysrq_key_op sysrq_etm_op = {
256 .handler = sysrq_etm_dump, 256 .handler = sysrq_etm_dump,
257 .help_msg = "ETM buffer dump", 257 .help_msg = "etm-buffer-dump(v)",
258 .action_msg = "etm", 258 .action_msg = "etm",
259}; 259};
260 260
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index c9a5e2ce8aa9..ae58d3b37d9d 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -225,11 +225,8 @@ void __show_regs(struct pt_regs *regs)
225 unsigned long flags; 225 unsigned long flags;
226 char buf[64]; 226 char buf[64];
227 227
228 printk("CPU: %d %s (%s %.*s)\n", 228 show_regs_print_info(KERN_DEFAULT);
229 raw_smp_processor_id(), print_tainted(), 229
230 init_utsname()->release,
231 (int)strcspn(init_utsname()->version, " "),
232 init_utsname()->version);
233 print_symbol("PC is at %s\n", instruction_pointer(regs)); 230 print_symbol("PC is at %s\n", instruction_pointer(regs));
234 print_symbol("LR is at %s\n", regs->ARM_lr); 231 print_symbol("LR is at %s\n", regs->ARM_lr);
235 printk("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n" 232 printk("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n"
@@ -284,7 +281,6 @@ void __show_regs(struct pt_regs *regs)
284void show_regs(struct pt_regs * regs) 281void show_regs(struct pt_regs * regs)
285{ 282{
286 printk("\n"); 283 printk("\n");
287 printk("Pid: %d, comm: %20s\n", task_pid_nr(current), current->comm);
288 __show_regs(regs); 284 __show_regs(regs);
289 dump_stack(); 285 dump_stack();
290} 286}
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index 1c089119b2d7..18b32e8e4497 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -204,13 +204,6 @@ static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
204} 204}
205#endif 205#endif
206 206
207void dump_stack(void)
208{
209 dump_backtrace(NULL, NULL);
210}
211
212EXPORT_SYMBOL(dump_stack);
213
214void show_stack(struct task_struct *tsk, unsigned long *sp) 207void show_stack(struct task_struct *tsk, unsigned long *sp)
215{ 208{
216 dump_backtrace(NULL, tsk); 209 dump_backtrace(NULL, tsk);
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 6f3822f98dcd..f4919721f7dd 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -145,11 +145,7 @@ void __show_regs(struct pt_regs *regs)
145{ 145{
146 int i; 146 int i;
147 147
148 printk("CPU: %d %s (%s %.*s)\n", 148 show_regs_print_info(KERN_DEFAULT);
149 raw_smp_processor_id(), print_tainted(),
150 init_utsname()->release,
151 (int)strcspn(init_utsname()->version, " "),
152 init_utsname()->version);
153 print_symbol("PC is at %s\n", instruction_pointer(regs)); 149 print_symbol("PC is at %s\n", instruction_pointer(regs));
154 print_symbol("LR is at %s\n", regs->regs[30]); 150 print_symbol("LR is at %s\n", regs->regs[30]);
155 printk("pc : [<%016llx>] lr : [<%016llx>] pstate: %08llx\n", 151 printk("pc : [<%016llx>] lr : [<%016llx>] pstate: %08llx\n",
@@ -166,7 +162,6 @@ void __show_regs(struct pt_regs *regs)
166void show_regs(struct pt_regs * regs) 162void show_regs(struct pt_regs * regs)
167{ 163{
168 printk("\n"); 164 printk("\n");
169 printk("Pid: %d, comm: %20s\n", task_pid_nr(current), current->comm);
170 __show_regs(regs); 165 __show_regs(regs);
171} 166}
172 167
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index b3c5f628bdb4..61d7dd29f756 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -167,13 +167,6 @@ static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
167 } 167 }
168} 168}
169 169
170void dump_stack(void)
171{
172 dump_backtrace(NULL, NULL);
173}
174
175EXPORT_SYMBOL(dump_stack);
176
177void show_stack(struct task_struct *tsk, unsigned long *sp) 170void show_stack(struct task_struct *tsk, unsigned long *sp)
178{ 171{
179 dump_backtrace(NULL, tsk); 172 dump_backtrace(NULL, tsk);
diff --git a/arch/avr32/kernel/process.c b/arch/avr32/kernel/process.c
index 073c3c2fa521..e7b61494c312 100644
--- a/arch/avr32/kernel/process.c
+++ b/arch/avr32/kernel/process.c
@@ -204,14 +204,6 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
204 show_stack_log_lvl(tsk, (unsigned long)stack, NULL, ""); 204 show_stack_log_lvl(tsk, (unsigned long)stack, NULL, "");
205} 205}
206 206
207void dump_stack(void)
208{
209 unsigned long stack;
210
211 show_trace_log_lvl(current, &stack, NULL, "");
212}
213EXPORT_SYMBOL(dump_stack);
214
215static const char *cpu_modes[] = { 207static const char *cpu_modes[] = {
216 "Application", "Supervisor", "Interrupt level 0", "Interrupt level 1", 208 "Application", "Supervisor", "Interrupt level 0", "Interrupt level 1",
217 "Interrupt level 2", "Interrupt level 3", "Exception", "NMI" 209 "Interrupt level 2", "Interrupt level 3", "Exception", "NMI"
@@ -223,6 +215,8 @@ void show_regs_log_lvl(struct pt_regs *regs, const char *log_lvl)
223 unsigned long lr = regs->lr; 215 unsigned long lr = regs->lr;
224 unsigned long mode = (regs->sr & MODE_MASK) >> MODE_SHIFT; 216 unsigned long mode = (regs->sr & MODE_MASK) >> MODE_SHIFT;
225 217
218 show_regs_print_info(log_lvl);
219
226 if (!user_mode(regs)) { 220 if (!user_mode(regs)) {
227 sp = (unsigned long)regs + FRAME_SIZE_FULL; 221 sp = (unsigned long)regs + FRAME_SIZE_FULL;
228 222
@@ -260,9 +254,6 @@ void show_regs_log_lvl(struct pt_regs *regs, const char *log_lvl)
260 regs->sr & SR_I0M ? '0' : '.', 254 regs->sr & SR_I0M ? '0' : '.',
261 regs->sr & SR_GM ? 'G' : 'g'); 255 regs->sr & SR_GM ? 'G' : 'g');
262 printk("%sCPU Mode: %s\n", log_lvl, cpu_modes[mode]); 256 printk("%sCPU Mode: %s\n", log_lvl, cpu_modes[mode]);
263 printk("%sProcess: %s [%d] (task: %p thread: %p)\n",
264 log_lvl, current->comm, current->pid, current,
265 task_thread_info(current));
266} 257}
267 258
268void show_regs(struct pt_regs *regs) 259void show_regs(struct pt_regs *regs)
diff --git a/arch/blackfin/kernel/dumpstack.c b/arch/blackfin/kernel/dumpstack.c
index 5cfbaa298211..95ba6d9e9a3d 100644
--- a/arch/blackfin/kernel/dumpstack.c
+++ b/arch/blackfin/kernel/dumpstack.c
@@ -168,6 +168,7 @@ void dump_stack(void)
168#endif 168#endif
169 trace_buffer_save(tflags); 169 trace_buffer_save(tflags);
170 dump_bfin_trace_buffer(); 170 dump_bfin_trace_buffer();
171 dump_stack_print_info(KERN_DEFAULT);
171 show_stack(current, &stack); 172 show_stack(current, &stack);
172 trace_buffer_restore(tflags); 173 trace_buffer_restore(tflags);
173} 174}
diff --git a/arch/blackfin/kernel/trace.c b/arch/blackfin/kernel/trace.c
index f7f7a18abca9..c36efa0c7163 100644
--- a/arch/blackfin/kernel/trace.c
+++ b/arch/blackfin/kernel/trace.c
@@ -853,6 +853,8 @@ void show_regs(struct pt_regs *fp)
853 unsigned char in_atomic = (bfin_read_IPEND() & 0x10) || in_atomic(); 853 unsigned char in_atomic = (bfin_read_IPEND() & 0x10) || in_atomic();
854 854
855 pr_notice("\n"); 855 pr_notice("\n");
856 show_regs_print_info(KERN_NOTICE);
857
856 if (CPUID != bfin_cpuid()) 858 if (CPUID != bfin_cpuid())
857 pr_notice("Compiled for cpu family 0x%04x (Rev %d), " 859 pr_notice("Compiled for cpu family 0x%04x (Rev %d), "
858 "but running on:0x%04x (Rev %d)\n", 860 "but running on:0x%04x (Rev %d)\n",
diff --git a/arch/c6x/kernel/traps.c b/arch/c6x/kernel/traps.c
index 1be74e5b4788..dcc2c2f6d67c 100644
--- a/arch/c6x/kernel/traps.c
+++ b/arch/c6x/kernel/traps.c
@@ -31,6 +31,7 @@ void __init trap_init(void)
31void show_regs(struct pt_regs *regs) 31void show_regs(struct pt_regs *regs)
32{ 32{
33 pr_err("\n"); 33 pr_err("\n");
34 show_regs_print_info(KERN_ERR);
34 pr_err("PC: %08lx SP: %08lx\n", regs->pc, regs->sp); 35 pr_err("PC: %08lx SP: %08lx\n", regs->pc, regs->sp);
35 pr_err("Status: %08lx ORIG_A4: %08lx\n", regs->csr, regs->orig_a4); 36 pr_err("Status: %08lx ORIG_A4: %08lx\n", regs->csr, regs->orig_a4);
36 pr_err("A0: %08lx B0: %08lx\n", regs->a0, regs->b0); 37 pr_err("A0: %08lx B0: %08lx\n", regs->a0, regs->b0);
@@ -67,15 +68,6 @@ void show_regs(struct pt_regs *regs)
67 pr_err("A31: %08lx B31: %08lx\n", regs->a31, regs->b31); 68 pr_err("A31: %08lx B31: %08lx\n", regs->a31, regs->b31);
68} 69}
69 70
70void dump_stack(void)
71{
72 unsigned long stack;
73
74 show_stack(current, &stack);
75}
76EXPORT_SYMBOL(dump_stack);
77
78
79void die(char *str, struct pt_regs *fp, int nr) 71void die(char *str, struct pt_regs *fp, int nr)
80{ 72{
81 console_verbose(); 73 console_verbose();
diff --git a/arch/cris/arch-v10/kernel/process.c b/arch/cris/arch-v10/kernel/process.c
index 2ba23c13df68..753e9a03cf87 100644
--- a/arch/cris/arch-v10/kernel/process.c
+++ b/arch/cris/arch-v10/kernel/process.c
@@ -176,6 +176,9 @@ unsigned long get_wchan(struct task_struct *p)
176void show_regs(struct pt_regs * regs) 176void show_regs(struct pt_regs * regs)
177{ 177{
178 unsigned long usp = rdusp(); 178 unsigned long usp = rdusp();
179
180 show_regs_print_info(KERN_DEFAULT);
181
179 printk("IRP: %08lx SRP: %08lx DCCR: %08lx USP: %08lx MOF: %08lx\n", 182 printk("IRP: %08lx SRP: %08lx DCCR: %08lx USP: %08lx MOF: %08lx\n",
180 regs->irp, regs->srp, regs->dccr, usp, regs->mof ); 183 regs->irp, regs->srp, regs->dccr, usp, regs->mof );
181 printk(" r0: %08lx r1: %08lx r2: %08lx r3: %08lx\n", 184 printk(" r0: %08lx r1: %08lx r2: %08lx r3: %08lx\n",
diff --git a/arch/cris/arch-v32/kernel/process.c b/arch/cris/arch-v32/kernel/process.c
index 57451faa9b20..cebd32e2a8fb 100644
--- a/arch/cris/arch-v32/kernel/process.c
+++ b/arch/cris/arch-v32/kernel/process.c
@@ -164,6 +164,9 @@ get_wchan(struct task_struct *p)
164void show_regs(struct pt_regs * regs) 164void show_regs(struct pt_regs * regs)
165{ 165{
166 unsigned long usp = rdusp(); 166 unsigned long usp = rdusp();
167
168 show_regs_print_info(KERN_DEFAULT);
169
167 printk("ERP: %08lx SRP: %08lx CCS: %08lx USP: %08lx MOF: %08lx\n", 170 printk("ERP: %08lx SRP: %08lx CCS: %08lx USP: %08lx MOF: %08lx\n",
168 regs->erp, regs->srp, regs->ccs, usp, regs->mof); 171 regs->erp, regs->srp, regs->ccs, usp, regs->mof);
169 172
diff --git a/arch/cris/kernel/traps.c b/arch/cris/kernel/traps.c
index a11ad3229f8c..0ffda73734f5 100644
--- a/arch/cris/kernel/traps.c
+++ b/arch/cris/kernel/traps.c
@@ -147,13 +147,6 @@ show_stack(void)
147#endif 147#endif
148 148
149void 149void
150dump_stack(void)
151{
152 show_stack(NULL, NULL);
153}
154EXPORT_SYMBOL(dump_stack);
155
156void
157set_nmi_handler(void (*handler)(struct pt_regs *)) 150set_nmi_handler(void (*handler)(struct pt_regs *))
158{ 151{
159 nmi_handler = handler; 152 nmi_handler = handler;
diff --git a/arch/frv/kernel/traps.c b/arch/frv/kernel/traps.c
index 5cfd1420b091..4bff48c19d29 100644
--- a/arch/frv/kernel/traps.c
+++ b/arch/frv/kernel/traps.c
@@ -466,17 +466,6 @@ asmlinkage void compound_exception(unsigned long esfr1,
466 BUG(); 466 BUG();
467} /* end compound_exception() */ 467} /* end compound_exception() */
468 468
469/*****************************************************************************/
470/*
471 * The architecture-independent backtrace generator
472 */
473void dump_stack(void)
474{
475 show_stack(NULL, NULL);
476}
477
478EXPORT_SYMBOL(dump_stack);
479
480void show_stack(struct task_struct *task, unsigned long *sp) 469void show_stack(struct task_struct *task, unsigned long *sp)
481{ 470{
482} 471}
@@ -508,6 +497,7 @@ void show_regs(struct pt_regs *regs)
508 int loop; 497 int loop;
509 498
510 printk("\n"); 499 printk("\n");
500 show_regs_print_info(KERN_DEFAULT);
511 501
512 printk("Frame: @%08lx [%s]\n", 502 printk("Frame: @%08lx [%s]\n",
513 (unsigned long) regs, 503 (unsigned long) regs,
@@ -522,8 +512,6 @@ void show_regs(struct pt_regs *regs)
522 else 512 else
523 printk(" | "); 513 printk(" | ");
524 } 514 }
525
526 printk("Process %s (pid: %d)\n", current->comm, current->pid);
527} 515}
528 516
529void die_if_kernel(const char *str, ...) 517void die_if_kernel(const char *str, ...)
diff --git a/arch/h8300/kernel/process.c b/arch/h8300/kernel/process.c
index a17d2cd463d2..1a744ab7e7e5 100644
--- a/arch/h8300/kernel/process.c
+++ b/arch/h8300/kernel/process.c
@@ -83,6 +83,8 @@ void machine_power_off(void)
83 83
84void show_regs(struct pt_regs * regs) 84void show_regs(struct pt_regs * regs)
85{ 85{
86 show_regs_print_info(KERN_DEFAULT);
87
86 printk("\nPC: %08lx Status: %02x", 88 printk("\nPC: %08lx Status: %02x",
87 regs->pc, regs->ccr); 89 regs->pc, regs->ccr);
88 printk("\nORIG_ER0: %08lx ER0: %08lx ER1: %08lx", 90 printk("\nORIG_ER0: %08lx ER0: %08lx ER1: %08lx",
diff --git a/arch/h8300/kernel/traps.c b/arch/h8300/kernel/traps.c
index 7833aa3e7c7d..cfe494dbe3da 100644
--- a/arch/h8300/kernel/traps.c
+++ b/arch/h8300/kernel/traps.c
@@ -164,10 +164,3 @@ void show_trace_task(struct task_struct *tsk)
164{ 164{
165 show_stack(tsk,(unsigned long *)tsk->thread.esp0); 165 show_stack(tsk,(unsigned long *)tsk->thread.esp0);
166} 166}
167
168void dump_stack(void)
169{
170 show_stack(NULL,NULL);
171}
172
173EXPORT_SYMBOL(dump_stack);
diff --git a/arch/hexagon/kernel/traps.c b/arch/hexagon/kernel/traps.c
index be5e2dd9c9d3..cc2171b2aa04 100644
--- a/arch/hexagon/kernel/traps.c
+++ b/arch/hexagon/kernel/traps.c
@@ -191,14 +191,6 @@ void show_stack(struct task_struct *task, unsigned long *fp)
191 do_show_stack(task, fp, 0); 191 do_show_stack(task, fp, 0);
192} 192}
193 193
194void dump_stack(void)
195{
196 unsigned long *fp;
197 asm("%0 = r30" : "=r" (fp));
198 show_stack(current, fp);
199}
200EXPORT_SYMBOL(dump_stack);
201
202int die(const char *str, struct pt_regs *regs, long err) 194int die(const char *str, struct pt_regs *regs, long err)
203{ 195{
204 static struct { 196 static struct {
diff --git a/arch/hexagon/kernel/vm_events.c b/arch/hexagon/kernel/vm_events.c
index 9b5a4a295a68..f337281ebe67 100644
--- a/arch/hexagon/kernel/vm_events.c
+++ b/arch/hexagon/kernel/vm_events.c
@@ -33,6 +33,8 @@
33 */ 33 */
34void show_regs(struct pt_regs *regs) 34void show_regs(struct pt_regs *regs)
35{ 35{
36 show_regs_print_info(KERN_EMERG);
37
36 printk(KERN_EMERG "restart_r0: \t0x%08lx syscall_nr: %ld\n", 38 printk(KERN_EMERG "restart_r0: \t0x%08lx syscall_nr: %ld\n",
37 regs->restart_r0, regs->syscall_nr); 39 regs->restart_r0, regs->syscall_nr);
38 printk(KERN_EMERG "preds: \t\t0x%08lx\n", regs->preds); 40 printk(KERN_EMERG "preds: \t\t0x%08lx\n", regs->preds);
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c
index a26fc640e4ce..55d4ba47a907 100644
--- a/arch/ia64/kernel/process.c
+++ b/arch/ia64/kernel/process.c
@@ -96,21 +96,13 @@ show_stack (struct task_struct *task, unsigned long *sp)
96} 96}
97 97
98void 98void
99dump_stack (void)
100{
101 show_stack(NULL, NULL);
102}
103
104EXPORT_SYMBOL(dump_stack);
105
106void
107show_regs (struct pt_regs *regs) 99show_regs (struct pt_regs *regs)
108{ 100{
109 unsigned long ip = regs->cr_iip + ia64_psr(regs)->ri; 101 unsigned long ip = regs->cr_iip + ia64_psr(regs)->ri;
110 102
111 print_modules(); 103 print_modules();
112 printk("\nPid: %d, CPU %d, comm: %20s\n", task_pid_nr(current), 104 printk("\n");
113 smp_processor_id(), current->comm); 105 show_regs_print_info(KERN_DEFAULT);
114 printk("psr : %016lx ifs : %016lx ip : [<%016lx>] %s (%s)\n", 106 printk("psr : %016lx ifs : %016lx ip : [<%016lx>] %s (%s)\n",
115 regs->cr_ipsr, regs->cr_ifs, ip, print_tainted(), 107 regs->cr_ipsr, regs->cr_ifs, ip, print_tainted(),
116 init_utsname()->release); 108 init_utsname()->release);
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index 2029cc0d2fc6..13bfdd22afc8 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -1063,6 +1063,7 @@ check_bugs (void)
1063static int __init run_dmi_scan(void) 1063static int __init run_dmi_scan(void)
1064{ 1064{
1065 dmi_scan_machine(); 1065 dmi_scan_machine();
1066 dmi_set_dump_stack_arch_desc();
1066 return 0; 1067 return 0;
1067} 1068}
1068core_initcall(run_dmi_scan); 1069core_initcall(run_dmi_scan);
diff --git a/arch/m32r/kernel/process.c b/arch/m32r/kernel/process.c
index e2d049018c3b..e69221d581d5 100644
--- a/arch/m32r/kernel/process.c
+++ b/arch/m32r/kernel/process.c
@@ -73,6 +73,8 @@ void machine_power_off(void)
73void show_regs(struct pt_regs * regs) 73void show_regs(struct pt_regs * regs)
74{ 74{
75 printk("\n"); 75 printk("\n");
76 show_regs_print_info(KERN_DEFAULT);
77
76 printk("BPC[%08lx]:PSW[%08lx]:LR [%08lx]:FP [%08lx]\n", \ 78 printk("BPC[%08lx]:PSW[%08lx]:LR [%08lx]:FP [%08lx]\n", \
77 regs->bpc, regs->psw, regs->lr, regs->fp); 79 regs->bpc, regs->psw, regs->lr, regs->fp);
78 printk("BBPC[%08lx]:BBPSW[%08lx]:SPU[%08lx]:SPI[%08lx]\n", \ 80 printk("BBPC[%08lx]:BBPSW[%08lx]:SPU[%08lx]:SPI[%08lx]\n", \
diff --git a/arch/m32r/kernel/traps.c b/arch/m32r/kernel/traps.c
index 9fe3467a5133..a7a424f852e4 100644
--- a/arch/m32r/kernel/traps.c
+++ b/arch/m32r/kernel/traps.c
@@ -167,15 +167,6 @@ void show_stack(struct task_struct *task, unsigned long *sp)
167 show_trace(task, sp); 167 show_trace(task, sp);
168} 168}
169 169
170void dump_stack(void)
171{
172 unsigned long stack;
173
174 show_trace(current, &stack);
175}
176
177EXPORT_SYMBOL(dump_stack);
178
179static void show_registers(struct pt_regs *regs) 170static void show_registers(struct pt_regs *regs)
180{ 171{
181 int i = 0; 172 int i = 0;
diff --git a/arch/m68k/kernel/traps.c b/arch/m68k/kernel/traps.c
index f32ab22e7ed3..88fcd8c70e7b 100644
--- a/arch/m68k/kernel/traps.c
+++ b/arch/m68k/kernel/traps.c
@@ -992,18 +992,6 @@ void show_stack(struct task_struct *task, unsigned long *stack)
992} 992}
993 993
994/* 994/*
995 * The architecture-independent backtrace generator
996 */
997void dump_stack(void)
998{
999 unsigned long stack;
1000
1001 show_trace(&stack);
1002}
1003
1004EXPORT_SYMBOL(dump_stack);
1005
1006/*
1007 * The vector number returned in the frame pointer may also contain 995 * The vector number returned in the frame pointer may also contain
1008 * the "fs" (Fault Status) bits on ColdFire. These are in the bottom 996 * the "fs" (Fault Status) bits on ColdFire. These are in the bottom
1009 * 2 bits, and upper 2 bits. So we need to mask out the real vector 997 * 2 bits, and upper 2 bits. So we need to mask out the real vector
diff --git a/arch/metag/kernel/process.c b/arch/metag/kernel/process.c
index dc5923544560..483dff986a23 100644
--- a/arch/metag/kernel/process.c
+++ b/arch/metag/kernel/process.c
@@ -129,6 +129,8 @@ void show_regs(struct pt_regs *regs)
129 "D1.7 " 129 "D1.7 "
130 }; 130 };
131 131
132 show_regs_print_info(KERN_INFO);
133
132 pr_info(" pt_regs @ %p\n", regs); 134 pr_info(" pt_regs @ %p\n", regs);
133 pr_info(" SaveMask = 0x%04hx\n", regs->ctx.SaveMask); 135 pr_info(" SaveMask = 0x%04hx\n", regs->ctx.SaveMask);
134 pr_info(" Flags = 0x%04hx (%c%c%c%c)\n", regs->ctx.Flags, 136 pr_info(" Flags = 0x%04hx (%c%c%c%c)\n", regs->ctx.Flags,
diff --git a/arch/metag/kernel/traps.c b/arch/metag/kernel/traps.c
index 8961f247b500..2ceeaae5b199 100644
--- a/arch/metag/kernel/traps.c
+++ b/arch/metag/kernel/traps.c
@@ -987,9 +987,3 @@ void show_stack(struct task_struct *tsk, unsigned long *sp)
987 987
988 show_trace(tsk, sp, NULL); 988 show_trace(tsk, sp, NULL);
989} 989}
990
991void dump_stack(void)
992{
993 show_stack(NULL, NULL);
994}
995EXPORT_SYMBOL(dump_stack);
diff --git a/arch/microblaze/kernel/process.c b/arch/microblaze/kernel/process.c
index 7cce2e9c1719..a55893807274 100644
--- a/arch/microblaze/kernel/process.c
+++ b/arch/microblaze/kernel/process.c
@@ -20,6 +20,8 @@
20 20
21void show_regs(struct pt_regs *regs) 21void show_regs(struct pt_regs *regs)
22{ 22{
23 show_regs_print_info(KERN_INFO);
24
23 pr_info(" Registers dump: mode=%X\r\n", regs->pt_mode); 25 pr_info(" Registers dump: mode=%X\r\n", regs->pt_mode);
24 pr_info(" r1=%08lX, r2=%08lX, r3=%08lX, r4=%08lX\n", 26 pr_info(" r1=%08lX, r2=%08lX, r3=%08lX, r4=%08lX\n",
25 regs->r1, regs->r2, regs->r3, regs->r4); 27 regs->r1, regs->r2, regs->r3, regs->r4);
diff --git a/arch/microblaze/kernel/traps.c b/arch/microblaze/kernel/traps.c
index 30e6b5004a6a..cb619533a192 100644
--- a/arch/microblaze/kernel/traps.c
+++ b/arch/microblaze/kernel/traps.c
@@ -75,9 +75,3 @@ void show_stack(struct task_struct *task, unsigned long *sp)
75 75
76 debug_show_held_locks(task); 76 debug_show_held_locks(task);
77} 77}
78
79void dump_stack(void)
80{
81 show_stack(NULL, NULL);
82}
83EXPORT_SYMBOL(dump_stack);
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index c3abb88170fc..25225515451f 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -206,19 +206,6 @@ void show_stack(struct task_struct *task, unsigned long *sp)
206 show_stacktrace(task, &regs); 206 show_stacktrace(task, &regs);
207} 207}
208 208
209/*
210 * The architecture-independent dump_stack generator
211 */
212void dump_stack(void)
213{
214 struct pt_regs regs;
215
216 prepare_frametrace(&regs);
217 show_backtrace(current, &regs);
218}
219
220EXPORT_SYMBOL(dump_stack);
221
222static void show_code(unsigned int __user *pc) 209static void show_code(unsigned int __user *pc)
223{ 210{
224 long i; 211 long i;
@@ -244,7 +231,7 @@ static void __show_regs(const struct pt_regs *regs)
244 unsigned int cause = regs->cp0_cause; 231 unsigned int cause = regs->cp0_cause;
245 int i; 232 int i;
246 233
247 printk("Cpu %d\n", smp_processor_id()); 234 show_regs_print_info(KERN_DEFAULT);
248 235
249 /* 236 /*
250 * Saved main processor registers 237 * Saved main processor registers
diff --git a/arch/mn10300/kernel/process.c b/arch/mn10300/kernel/process.c
index 2da39fb8b3b2..3707da583d05 100644
--- a/arch/mn10300/kernel/process.c
+++ b/arch/mn10300/kernel/process.c
@@ -97,6 +97,7 @@ void machine_power_off(void)
97 97
98void show_regs(struct pt_regs *regs) 98void show_regs(struct pt_regs *regs)
99{ 99{
100 show_regs_print_info(KERN_DEFAULT);
100} 101}
101 102
102/* 103/*
diff --git a/arch/mn10300/kernel/traps.c b/arch/mn10300/kernel/traps.c
index b900e5afa0ae..a7a987c7954f 100644
--- a/arch/mn10300/kernel/traps.c
+++ b/arch/mn10300/kernel/traps.c
@@ -294,17 +294,6 @@ void show_stack(struct task_struct *task, unsigned long *sp)
294} 294}
295 295
296/* 296/*
297 * the architecture-independent dump_stack generator
298 */
299void dump_stack(void)
300{
301 unsigned long stack;
302
303 show_stack(current, &stack);
304}
305EXPORT_SYMBOL(dump_stack);
306
307/*
308 * dump the register file in the specified exception frame 297 * dump the register file in the specified exception frame
309 */ 298 */
310void show_registers_only(struct pt_regs *regs) 299void show_registers_only(struct pt_regs *regs)
diff --git a/arch/openrisc/kernel/process.c b/arch/openrisc/kernel/process.c
index 00c233bf0d06..386af258591d 100644
--- a/arch/openrisc/kernel/process.c
+++ b/arch/openrisc/kernel/process.c
@@ -90,6 +90,7 @@ void show_regs(struct pt_regs *regs)
90{ 90{
91 extern void show_registers(struct pt_regs *regs); 91 extern void show_registers(struct pt_regs *regs);
92 92
93 show_regs_print_info(KERN_DEFAULT);
93 /* __PHX__ cleanup this mess */ 94 /* __PHX__ cleanup this mess */
94 show_registers(regs); 95 show_registers(regs);
95} 96}
diff --git a/arch/openrisc/kernel/traps.c b/arch/openrisc/kernel/traps.c
index 5cce396016d0..3d3f6062f49c 100644
--- a/arch/openrisc/kernel/traps.c
+++ b/arch/openrisc/kernel/traps.c
@@ -105,17 +105,6 @@ void show_trace_task(struct task_struct *tsk)
105 */ 105 */
106} 106}
107 107
108/*
109 * The architecture-independent backtrace generator
110 */
111void dump_stack(void)
112{
113 unsigned long stack;
114
115 show_stack(current, &stack);
116}
117EXPORT_SYMBOL(dump_stack);
118
119void show_registers(struct pt_regs *regs) 108void show_registers(struct pt_regs *regs)
120{ 109{
121 int i; 110 int i;
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index 0339181bf3ac..433e75a2ee9a 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -1,5 +1,6 @@
1config PARISC 1config PARISC
2 def_bool y 2 def_bool y
3 select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
3 select HAVE_IDE 4 select HAVE_IDE
4 select HAVE_OPROFILE 5 select HAVE_OPROFILE
5 select HAVE_FUNCTION_TRACER if 64BIT 6 select HAVE_FUNCTION_TRACER if 64BIT
diff --git a/arch/parisc/Kconfig.debug b/arch/parisc/Kconfig.debug
index 7305ac8f7f5b..bc989e522a04 100644
--- a/arch/parisc/Kconfig.debug
+++ b/arch/parisc/Kconfig.debug
@@ -12,18 +12,4 @@ config DEBUG_RODATA
12 portion of the kernel code won't be covered by a TLB anymore. 12 portion of the kernel code won't be covered by a TLB anymore.
13 If in doubt, say "N". 13 If in doubt, say "N".
14 14
15config DEBUG_STRICT_USER_COPY_CHECKS
16 bool "Strict copy size checks"
17 depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING
18 ---help---
19 Enabling this option turns a certain set of sanity checks for user
20 copy operations into compile time failures.
21
22 The copy_from_user() etc checks are there to help test if there
23 are sufficient security checks on the length argument of
24 the copy operation, by having gcc prove that the argument is
25 within bounds.
26
27 If unsure, or if you run an older (pre 4.4) gcc, say N.
28
29endmenu 15endmenu
diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
index aeb8f8f2c07a..f702bff0bed9 100644
--- a/arch/parisc/kernel/traps.c
+++ b/arch/parisc/kernel/traps.c
@@ -126,6 +126,8 @@ void show_regs(struct pt_regs *regs)
126 user = user_mode(regs); 126 user = user_mode(regs);
127 level = user ? KERN_DEBUG : KERN_CRIT; 127 level = user ? KERN_DEBUG : KERN_CRIT;
128 128
129 show_regs_print_info(level);
130
129 print_gr(level, regs); 131 print_gr(level, regs);
130 132
131 for (i = 0; i < 8; i += 4) 133 for (i = 0; i < 8; i += 4)
@@ -158,14 +160,6 @@ void show_regs(struct pt_regs *regs)
158 } 160 }
159} 161}
160 162
161
162void dump_stack(void)
163{
164 show_stack(NULL, NULL);
165}
166
167EXPORT_SYMBOL(dump_stack);
168
169static void do_show_stack(struct unwind_frame_info *info) 163static void do_show_stack(struct unwind_frame_info *info)
170{ 164{
171 int i = 1; 165 int i = 1;
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 16e77a81ab4f..13a8d9d0b5cb 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -831,6 +831,8 @@ void show_regs(struct pt_regs * regs)
831{ 831{
832 int i, trap; 832 int i, trap;
833 833
834 show_regs_print_info(KERN_DEFAULT);
835
834 printk("NIP: "REG" LR: "REG" CTR: "REG"\n", 836 printk("NIP: "REG" LR: "REG" CTR: "REG"\n",
835 regs->nip, regs->link, regs->ctr); 837 regs->nip, regs->link, regs->ctr);
836 printk("REGS: %p TRAP: %04lx %s (%s)\n", 838 printk("REGS: %p TRAP: %04lx %s (%s)\n",
@@ -850,12 +852,6 @@ void show_regs(struct pt_regs * regs)
850#else 852#else
851 printk("DAR: "REG", DSISR: %08lx\n", regs->dar, regs->dsisr); 853 printk("DAR: "REG", DSISR: %08lx\n", regs->dar, regs->dsisr);
852#endif 854#endif
853 printk("TASK = %p[%d] '%s' THREAD: %p",
854 current, task_pid_nr(current), current->comm, task_thread_info(current));
855
856#ifdef CONFIG_SMP
857 printk(" CPU: %d", raw_smp_processor_id());
858#endif /* CONFIG_SMP */
859 855
860 for (i = 0; i < 32; i++) { 856 for (i = 0; i < 32; i++) {
861 if ((i % REGS_PER_LINE) == 0) 857 if ((i % REGS_PER_LINE) == 0)
@@ -1362,12 +1358,6 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
1362 } while (count++ < kstack_depth_to_print); 1358 } while (count++ < kstack_depth_to_print);
1363} 1359}
1364 1360
1365void dump_stack(void)
1366{
1367 show_stack(current, NULL);
1368}
1369EXPORT_SYMBOL(dump_stack);
1370
1371#ifdef CONFIG_PPC64 1361#ifdef CONFIG_PPC64
1372/* Called with hard IRQs off */ 1362/* Called with hard IRQs off */
1373void __ppc64_runlatch_on(void) 1363void __ppc64_runlatch_on(void)
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index 13f85defabed..3e34cd224b7c 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -2947,7 +2947,7 @@ static void sysrq_handle_xmon(int key)
2947 2947
2948static struct sysrq_key_op sysrq_xmon_op = { 2948static struct sysrq_key_op sysrq_xmon_op = {
2949 .handler = sysrq_handle_xmon, 2949 .handler = sysrq_handle_xmon,
2950 .help_msg = "Xmon", 2950 .help_msg = "xmon(x)",
2951 .action_msg = "Entering xmon", 2951 .action_msg = "Entering xmon",
2952}; 2952};
2953 2953
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index bda6ba6f3cf5..ce640aff61a1 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -91,6 +91,7 @@ config S390
91 select ARCH_INLINE_WRITE_UNLOCK_BH 91 select ARCH_INLINE_WRITE_UNLOCK_BH
92 select ARCH_INLINE_WRITE_UNLOCK_IRQ 92 select ARCH_INLINE_WRITE_UNLOCK_IRQ
93 select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE 93 select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE
94 select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
94 select ARCH_SAVE_PAGE_KEYS if HIBERNATION 95 select ARCH_SAVE_PAGE_KEYS if HIBERNATION
95 select ARCH_WANT_IPC_PARSE_VERSION 96 select ARCH_WANT_IPC_PARSE_VERSION
96 select BUILDTIME_EXTABLE_SORT 97 select BUILDTIME_EXTABLE_SORT
diff --git a/arch/s390/Kconfig.debug b/arch/s390/Kconfig.debug
index fc32a2df4974..c56878e1245f 100644
--- a/arch/s390/Kconfig.debug
+++ b/arch/s390/Kconfig.debug
@@ -17,20 +17,6 @@ config STRICT_DEVMEM
17 17
18 If you are unsure, say Y. 18 If you are unsure, say Y.
19 19
20config DEBUG_STRICT_USER_COPY_CHECKS
21 def_bool n
22 prompt "Strict user copy size checks"
23 ---help---
24 Enabling this option turns a certain set of sanity checks for user
25 copy operations into compile time warnings.
26
27 The copy_from_user() etc checks are there to help test if there
28 are sufficient security checks on the length argument of
29 the copy operation, by having gcc prove that the argument is
30 within bounds.
31
32 If unsure, or if you run an older (pre 4.4) gcc, say N.
33
34config S390_PTDUMP 20config S390_PTDUMP
35 bool "Export kernel pagetable layout to userspace via debugfs" 21 bool "Export kernel pagetable layout to userspace via debugfs"
36 depends on DEBUG_KERNEL 22 depends on DEBUG_KERNEL
diff --git a/arch/s390/kernel/dumpstack.c b/arch/s390/kernel/dumpstack.c
index 03dce39d01ee..298297477257 100644
--- a/arch/s390/kernel/dumpstack.c
+++ b/arch/s390/kernel/dumpstack.c
@@ -129,23 +129,6 @@ static void show_last_breaking_event(struct pt_regs *regs)
129#endif 129#endif
130} 130}
131 131
132/*
133 * The architecture-independent dump_stack generator
134 */
135void dump_stack(void)
136{
137 printk("CPU: %d %s %s %.*s\n",
138 task_thread_info(current)->cpu, print_tainted(),
139 init_utsname()->release,
140 (int)strcspn(init_utsname()->version, " "),
141 init_utsname()->version);
142 printk("Process %s (pid: %d, task: %p, ksp: %p)\n",
143 current->comm, current->pid, current,
144 (void *) current->thread.ksp);
145 show_stack(NULL, NULL);
146}
147EXPORT_SYMBOL(dump_stack);
148
149static inline int mask_bits(struct pt_regs *regs, unsigned long bits) 132static inline int mask_bits(struct pt_regs *regs, unsigned long bits)
150{ 133{
151 return (regs->psw.mask & bits) / ((~bits + 1) & bits); 134 return (regs->psw.mask & bits) / ((~bits + 1) & bits);
@@ -183,14 +166,7 @@ void show_registers(struct pt_regs *regs)
183 166
184void show_regs(struct pt_regs *regs) 167void show_regs(struct pt_regs *regs)
185{ 168{
186 printk("CPU: %d %s %s %.*s\n", 169 show_regs_print_info(KERN_DEFAULT);
187 task_thread_info(current)->cpu, print_tainted(),
188 init_utsname()->release,
189 (int)strcspn(init_utsname()->version, " "),
190 init_utsname()->version);
191 printk("Process %s (pid: %d, task: %p, ksp: %p)\n",
192 current->comm, current->pid, current,
193 (void *) current->thread.ksp);
194 show_registers(regs); 170 show_registers(regs);
195 /* Show stack backtrace if pt_regs is from kernel mode */ 171 /* Show stack backtrace if pt_regs is from kernel mode */
196 if (!user_mode(regs)) 172 if (!user_mode(regs))
diff --git a/arch/s390/lib/Makefile b/arch/s390/lib/Makefile
index 6ab0d0b5cec8..20b0e97a7df2 100644
--- a/arch/s390/lib/Makefile
+++ b/arch/s390/lib/Makefile
@@ -3,7 +3,6 @@
3# 3#
4 4
5lib-y += delay.o string.o uaccess_std.o uaccess_pt.o 5lib-y += delay.o string.o uaccess_std.o uaccess_pt.o
6obj-y += usercopy.o
7obj-$(CONFIG_32BIT) += div64.o qrnnd.o ucmpdi2.o mem32.o 6obj-$(CONFIG_32BIT) += div64.o qrnnd.o ucmpdi2.o mem32.o
8obj-$(CONFIG_64BIT) += mem64.o 7obj-$(CONFIG_64BIT) += mem64.o
9lib-$(CONFIG_64BIT) += uaccess_mvcos.o 8lib-$(CONFIG_64BIT) += uaccess_mvcos.o
diff --git a/arch/score/kernel/traps.c b/arch/score/kernel/traps.c
index 0e46fb19a848..1517a7dcd6d9 100644
--- a/arch/score/kernel/traps.c
+++ b/arch/score/kernel/traps.c
@@ -117,6 +117,8 @@ static void show_code(unsigned int *pc)
117 */ 117 */
118void show_regs(struct pt_regs *regs) 118void show_regs(struct pt_regs *regs)
119{ 119{
120 show_regs_print_info(KERN_DEFAULT);
121
120 printk("r0 : %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n", 122 printk("r0 : %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
121 regs->regs[0], regs->regs[1], regs->regs[2], regs->regs[3], 123 regs->regs[0], regs->regs[1], regs->regs[2], regs->regs[3],
122 regs->regs[4], regs->regs[5], regs->regs[6], regs->regs[7]); 124 regs->regs[4], regs->regs[5], regs->regs[6], regs->regs[7]);
@@ -149,16 +151,6 @@ static void show_registers(struct pt_regs *regs)
149 printk(KERN_NOTICE "\n"); 151 printk(KERN_NOTICE "\n");
150} 152}
151 153
152/*
153 * The architecture-independent dump_stack generator
154 */
155void dump_stack(void)
156{
157 show_stack(current_thread_info()->task,
158 (long *) get_irq_regs()->regs[0]);
159}
160EXPORT_SYMBOL(dump_stack);
161
162void __die(const char *str, struct pt_regs *regs, const char *file, 154void __die(const char *str, struct pt_regs *regs, const char *file,
163 const char *func, unsigned long line) 155 const char *func, unsigned long line)
164{ 156{
diff --git a/arch/sh/kernel/dumpstack.c b/arch/sh/kernel/dumpstack.c
index 7617dc4129ac..b959f5592604 100644
--- a/arch/sh/kernel/dumpstack.c
+++ b/arch/sh/kernel/dumpstack.c
@@ -158,9 +158,3 @@ void show_stack(struct task_struct *tsk, unsigned long *sp)
158 (unsigned long)task_stack_page(tsk)); 158 (unsigned long)task_stack_page(tsk));
159 show_trace(tsk, sp, NULL); 159 show_trace(tsk, sp, NULL);
160} 160}
161
162void dump_stack(void)
163{
164 show_stack(NULL, NULL);
165}
166EXPORT_SYMBOL(dump_stack);
diff --git a/arch/sh/kernel/process_32.c b/arch/sh/kernel/process_32.c
index 73eb66fc6253..ebd3933005b4 100644
--- a/arch/sh/kernel/process_32.c
+++ b/arch/sh/kernel/process_32.c
@@ -32,11 +32,7 @@
32void show_regs(struct pt_regs * regs) 32void show_regs(struct pt_regs * regs)
33{ 33{
34 printk("\n"); 34 printk("\n");
35 printk("Pid : %d, Comm: \t\t%s\n", task_pid_nr(current), current->comm); 35 show_regs_print_info(KERN_DEFAULT);
36 printk("CPU : %d \t\t%s (%s %.*s)\n\n",
37 smp_processor_id(), print_tainted(), init_utsname()->release,
38 (int)strcspn(init_utsname()->version, " "),
39 init_utsname()->version);
40 36
41 print_symbol("PC is at %s\n", instruction_pointer(regs)); 37 print_symbol("PC is at %s\n", instruction_pointer(regs));
42 print_symbol("PR is at %s\n", regs->pr); 38 print_symbol("PR is at %s\n", regs->pr);
diff --git a/arch/sh/kernel/process_64.c b/arch/sh/kernel/process_64.c
index e611c85144b1..174d124b419e 100644
--- a/arch/sh/kernel/process_64.c
+++ b/arch/sh/kernel/process_64.c
@@ -40,6 +40,7 @@ void show_regs(struct pt_regs *regs)
40 unsigned long long ah, al, bh, bl, ch, cl; 40 unsigned long long ah, al, bh, bl, ch, cl;
41 41
42 printk("\n"); 42 printk("\n");
43 show_regs_print_info(KERN_DEFAULT);
43 44
44 ah = (regs->pc) >> 32; 45 ah = (regs->pc) >> 32;
45 al = (regs->pc) & 0xffffffff; 46 al = (regs->pc) & 0xffffffff;
diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
index c85241006e32..fdd819dfdacf 100644
--- a/arch/sparc/kernel/process_32.c
+++ b/arch/sparc/kernel/process_32.c
@@ -112,6 +112,8 @@ void show_regs(struct pt_regs *r)
112{ 112{
113 struct reg_window32 *rw = (struct reg_window32 *) r->u_regs[14]; 113 struct reg_window32 *rw = (struct reg_window32 *) r->u_regs[14];
114 114
115 show_regs_print_info(KERN_DEFAULT);
116
115 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n", 117 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
116 r->psr, r->pc, r->npc, r->y, print_tainted()); 118 r->psr, r->pc, r->npc, r->y, print_tainted());
117 printk("PC: <%pS>\n", (void *) r->pc); 119 printk("PC: <%pS>\n", (void *) r->pc);
@@ -142,11 +144,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
142 struct reg_window32 *rw; 144 struct reg_window32 *rw;
143 int count = 0; 145 int count = 0;
144 146
145 if (tsk != NULL) 147 if (!tsk)
146 task_base = (unsigned long) task_stack_page(tsk); 148 tsk = current;
147 else 149
148 task_base = (unsigned long) current_thread_info(); 150 if (tsk == current && !_ksp)
151 __asm__ __volatile__("mov %%fp, %0" : "=r" (_ksp));
149 152
153 task_base = (unsigned long) task_stack_page(tsk);
150 fp = (unsigned long) _ksp; 154 fp = (unsigned long) _ksp;
151 do { 155 do {
152 /* Bogus frame pointer? */ 156 /* Bogus frame pointer? */
@@ -162,17 +166,6 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
162 printk("\n"); 166 printk("\n");
163} 167}
164 168
165void dump_stack(void)
166{
167 unsigned long *ksp;
168
169 __asm__ __volatile__("mov %%fp, %0"
170 : "=r" (ksp));
171 show_stack(current, ksp);
172}
173
174EXPORT_SYMBOL(dump_stack);
175
176/* 169/*
177 * Note: sparc64 has a pretty intricated thread_saved_pc, check it out. 170 * Note: sparc64 has a pretty intricated thread_saved_pc, check it out.
178 */ 171 */
diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
index 9fbf0d14a361..baebab215492 100644
--- a/arch/sparc/kernel/process_64.c
+++ b/arch/sparc/kernel/process_64.c
@@ -163,6 +163,8 @@ static void show_regwindow(struct pt_regs *regs)
163 163
164void show_regs(struct pt_regs *regs) 164void show_regs(struct pt_regs *regs)
165{ 165{
166 show_regs_print_info(KERN_DEFAULT);
167
166 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate, 168 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
167 regs->tpc, regs->tnpc, regs->y, print_tainted()); 169 regs->tpc, regs->tnpc, regs->y, print_tainted());
168 printk("TPC: <%pS>\n", (void *) regs->tpc); 170 printk("TPC: <%pS>\n", (void *) regs->tpc);
@@ -292,7 +294,7 @@ static void sysrq_handle_globreg(int key)
292 294
293static struct sysrq_key_op sparc_globalreg_op = { 295static struct sysrq_key_op sparc_globalreg_op = {
294 .handler = sysrq_handle_globreg, 296 .handler = sysrq_handle_globreg,
295 .help_msg = "global-regs(Y)", 297 .help_msg = "global-regs(y)",
296 .action_msg = "Show Global CPU Regs", 298 .action_msg = "Show Global CPU Regs",
297}; 299};
298 300
@@ -362,7 +364,7 @@ static void sysrq_handle_globpmu(int key)
362 364
363static struct sysrq_key_op sparc_globalpmu_op = { 365static struct sysrq_key_op sparc_globalpmu_op = {
364 .handler = sysrq_handle_globpmu, 366 .handler = sysrq_handle_globpmu,
365 .help_msg = "global-pmu(X)", 367 .help_msg = "global-pmu(x)",
366 .action_msg = "Show Global PMU Regs", 368 .action_msg = "Show Global PMU Regs",
367}; 369};
368 370
diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
index 8d38ca97aa23..b3f833ab90eb 100644
--- a/arch/sparc/kernel/traps_64.c
+++ b/arch/sparc/kernel/traps_64.c
@@ -2350,13 +2350,6 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
2350 } while (++count < 16); 2350 } while (++count < 16);
2351} 2351}
2352 2352
2353void dump_stack(void)
2354{
2355 show_stack(current, NULL);
2356}
2357
2358EXPORT_SYMBOL(dump_stack);
2359
2360static inline struct reg_window *kernel_stack_up(struct reg_window *rw) 2353static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
2361{ 2354{
2362 unsigned long fp = rw->ins[6]; 2355 unsigned long fp = rw->ins[6];
diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
index 8410065f2862..dbe119b63b48 100644
--- a/arch/sparc/lib/Makefile
+++ b/arch/sparc/lib/Makefile
@@ -45,4 +45,3 @@ obj-y += iomap.o
45obj-$(CONFIG_SPARC32) += atomic32.o ucmpdi2.o 45obj-$(CONFIG_SPARC32) += atomic32.o ucmpdi2.o
46obj-y += ksyms.o 46obj-y += ksyms.o
47obj-$(CONFIG_SPARC64) += PeeCeeI.o 47obj-$(CONFIG_SPARC64) += PeeCeeI.o
48obj-y += usercopy.o
diff --git a/arch/sparc/lib/usercopy.c b/arch/sparc/lib/usercopy.c
deleted file mode 100644
index 5c4284ce1c03..000000000000
--- a/arch/sparc/lib/usercopy.c
+++ /dev/null
@@ -1,9 +0,0 @@
1#include <linux/module.h>
2#include <linux/kernel.h>
3#include <linux/bug.h>
4
5void copy_from_user_overflow(void)
6{
7 WARN(1, "Buffer overflow detected!\n");
8}
9EXPORT_SYMBOL(copy_from_user_overflow);
diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
index 25877aebc685..0f712f4e1b33 100644
--- a/arch/tile/Kconfig
+++ b/arch/tile/Kconfig
@@ -19,6 +19,7 @@ config TILE
19 select HAVE_SYSCALL_WRAPPERS if TILEGX 19 select HAVE_SYSCALL_WRAPPERS if TILEGX
20 select VIRT_TO_BUS 20 select VIRT_TO_BUS
21 select SYS_HYPERVISOR 21 select SYS_HYPERVISOR
22 select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
22 select ARCH_HAVE_NMI_SAFE_CMPXCHG 23 select ARCH_HAVE_NMI_SAFE_CMPXCHG
23 select GENERIC_CLOCKEVENTS 24 select GENERIC_CLOCKEVENTS
24 select MODULES_USE_ELF_RELA 25 select MODULES_USE_ELF_RELA
@@ -114,13 +115,6 @@ config STRICT_DEVMEM
114config SMP 115config SMP
115 def_bool y 116 def_bool y
116 117
117# Allow checking for compile-time determined overflow errors in
118# copy_from_user(). There are still unprovable places in the
119# generic code as of 2.6.34, so this option is not really compatible
120# with -Werror, which is more useful in general.
121config DEBUG_COPY_FROM_USER
122 def_bool n
123
124config HVC_TILE 118config HVC_TILE
125 depends on TTY 119 depends on TTY
126 select HVC_DRIVER 120 select HVC_DRIVER
diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h
index 9ab078a4605d..8a082bc6bca5 100644
--- a/arch/tile/include/asm/uaccess.h
+++ b/arch/tile/include/asm/uaccess.h
@@ -395,7 +395,12 @@ _copy_from_user(void *to, const void __user *from, unsigned long n)
395 return n; 395 return n;
396} 396}
397 397
398#ifdef CONFIG_DEBUG_COPY_FROM_USER 398#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
399/*
400 * There are still unprovable places in the generic code as of 2.6.34, so this
401 * option is not really compatible with -Werror, which is more useful in
402 * general.
403 */
399extern void copy_from_user_overflow(void) 404extern void copy_from_user_overflow(void)
400 __compiletime_warning("copy_from_user() size is not provably correct"); 405 __compiletime_warning("copy_from_user() size is not provably correct");
401 406
diff --git a/arch/tile/kernel/process.c b/arch/tile/kernel/process.c
index 80b2a18deb87..8ac304484f98 100644
--- a/arch/tile/kernel/process.c
+++ b/arch/tile/kernel/process.c
@@ -573,8 +573,7 @@ void show_regs(struct pt_regs *regs)
573 int i; 573 int i;
574 574
575 pr_err("\n"); 575 pr_err("\n");
576 pr_err(" Pid: %d, comm: %20s, CPU: %d\n", 576 show_regs_print_info(KERN_ERR);
577 tsk->pid, tsk->comm, smp_processor_id());
578#ifdef __tilegx__ 577#ifdef __tilegx__
579 for (i = 0; i < 51; i += 3) 578 for (i = 0; i < 51; i += 3)
580 pr_err(" r%-2d: "REGFMT" r%-2d: "REGFMT" r%-2d: "REGFMT"\n", 579 pr_err(" r%-2d: "REGFMT" r%-2d: "REGFMT" r%-2d: "REGFMT"\n",
diff --git a/arch/tile/lib/uaccess.c b/arch/tile/lib/uaccess.c
index f8d398c9ee7f..030abe3ee4f1 100644
--- a/arch/tile/lib/uaccess.c
+++ b/arch/tile/lib/uaccess.c
@@ -22,11 +22,3 @@ int __range_ok(unsigned long addr, unsigned long size)
22 is_arch_mappable_range(addr, size)); 22 is_arch_mappable_range(addr, size));
23} 23}
24EXPORT_SYMBOL(__range_ok); 24EXPORT_SYMBOL(__range_ok);
25
26#ifdef CONFIG_DEBUG_COPY_FROM_USER
27void copy_from_user_overflow(void)
28{
29 WARN(1, "Buffer overflow detected!\n");
30}
31EXPORT_SYMBOL(copy_from_user_overflow);
32#endif
diff --git a/arch/um/kernel/sysrq.c b/arch/um/kernel/sysrq.c
index e562ff80409a..7d101a2a1541 100644
--- a/arch/um/kernel/sysrq.c
+++ b/arch/um/kernel/sysrq.c
@@ -35,18 +35,6 @@ void show_trace(struct task_struct *task, unsigned long * stack)
35} 35}
36#endif 36#endif
37 37
38/*
39 * stack dumps generator - this is used by arch-independent code.
40 * And this is identical to i386 currently.
41 */
42void dump_stack(void)
43{
44 unsigned long stack;
45
46 show_trace(current, &stack);
47}
48EXPORT_SYMBOL(dump_stack);
49
50/*Stolen from arch/i386/kernel/traps.c */ 38/*Stolen from arch/i386/kernel/traps.c */
51static const int kstack_depth_to_print = 24; 39static const int kstack_depth_to_print = 24;
52 40
diff --git a/arch/um/sys-ppc/sysrq.c b/arch/um/sys-ppc/sysrq.c
index f889449f9285..1ff1ad7f27da 100644
--- a/arch/um/sys-ppc/sysrq.c
+++ b/arch/um/sys-ppc/sysrq.c
@@ -11,6 +11,8 @@
11void show_regs(struct pt_regs_subarch *regs) 11void show_regs(struct pt_regs_subarch *regs)
12{ 12{
13 printk("\n"); 13 printk("\n");
14 show_regs_print_info(KERN_DEFAULT);
15
14 printk("show_regs(): insert regs here.\n"); 16 printk("show_regs(): insert regs here.\n");
15#if 0 17#if 0
16 printk("\n"); 18 printk("\n");
diff --git a/arch/unicore32/kernel/process.c b/arch/unicore32/kernel/process.c
index 7fab86d7c5d4..c9447691bdac 100644
--- a/arch/unicore32/kernel/process.c
+++ b/arch/unicore32/kernel/process.c
@@ -144,11 +144,7 @@ void __show_regs(struct pt_regs *regs)
144 unsigned long flags; 144 unsigned long flags;
145 char buf[64]; 145 char buf[64];
146 146
147 printk(KERN_DEFAULT "CPU: %d %s (%s %.*s)\n", 147 show_regs_print_info(KERN_DEFAULT);
148 raw_smp_processor_id(), print_tainted(),
149 init_utsname()->release,
150 (int)strcspn(init_utsname()->version, " "),
151 init_utsname()->version);
152 print_symbol("PC is at %s\n", instruction_pointer(regs)); 148 print_symbol("PC is at %s\n", instruction_pointer(regs));
153 print_symbol("LR is at %s\n", regs->UCreg_lr); 149 print_symbol("LR is at %s\n", regs->UCreg_lr);
154 printk(KERN_DEFAULT "pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n" 150 printk(KERN_DEFAULT "pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n"
diff --git a/arch/unicore32/kernel/traps.c b/arch/unicore32/kernel/traps.c
index 0870b68d2ad9..c54e32410ead 100644
--- a/arch/unicore32/kernel/traps.c
+++ b/arch/unicore32/kernel/traps.c
@@ -170,12 +170,6 @@ static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
170 c_backtrace(fp, mode); 170 c_backtrace(fp, mode);
171} 171}
172 172
173void dump_stack(void)
174{
175 dump_backtrace(NULL, NULL);
176}
177EXPORT_SYMBOL(dump_stack);
178
179void show_stack(struct task_struct *tsk, unsigned long *sp) 173void show_stack(struct task_struct *tsk, unsigned long *sp)
180{ 174{
181 dump_backtrace(NULL, tsk); 175 dump_backtrace(NULL, tsk);
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 05b057dca4a7..5db2117ae288 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -20,6 +20,7 @@ config X86_64
20### Arch settings 20### Arch settings
21config X86 21config X86
22 def_bool y 22 def_bool y
23 select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
23 select HAVE_AOUT if X86_32 24 select HAVE_AOUT if X86_32
24 select HAVE_UNSTABLE_SCHED_CLOCK 25 select HAVE_UNSTABLE_SCHED_CLOCK
25 select ARCH_SUPPORTS_NUMA_BALANCING 26 select ARCH_SUPPORTS_NUMA_BALANCING
diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
index 16f738385dcb..c198b7e13e7b 100644
--- a/arch/x86/Kconfig.debug
+++ b/arch/x86/Kconfig.debug
@@ -292,20 +292,6 @@ config OPTIMIZE_INLINING
292 292
293 If unsure, say N. 293 If unsure, say N.
294 294
295config DEBUG_STRICT_USER_COPY_CHECKS
296 bool "Strict copy size checks"
297 depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING
298 ---help---
299 Enabling this option turns a certain set of sanity checks for user
300 copy operations into compile time failures.
301
302 The copy_from_user() etc checks are there to help test if there
303 are sufficient security checks on the length argument of
304 the copy operation, by having gcc prove that the argument is
305 within bounds.
306
307 If unsure, or if you run an older (pre 4.4) gcc, say N.
308
309config DEBUG_NMI_SELFTEST 295config DEBUG_NMI_SELFTEST
310 bool "NMI Selftest" 296 bool "NMI Selftest"
311 depends on DEBUG_KERNEL && X86_LOCAL_APIC 297 depends on DEBUG_KERNEL && X86_LOCAL_APIC
diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
index 03abf9b70011..81e94d972f1b 100644
--- a/arch/x86/ia32/ia32_aout.c
+++ b/arch/x86/ia32/ia32_aout.c
@@ -162,7 +162,6 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
162 fs = get_fs(); 162 fs = get_fs();
163 set_fs(KERNEL_DS); 163 set_fs(KERNEL_DS);
164 has_dumped = 1; 164 has_dumped = 1;
165 current->flags |= PF_DUMPCORE;
166 strncpy(dump.u_comm, current->comm, sizeof(current->comm)); 165 strncpy(dump.u_comm, current->comm, sizeof(current->comm));
167 dump.u_ar0 = offsetof(struct user32, regs); 166 dump.u_ar0 = offsetof(struct user32, regs);
168 dump.signal = signr; 167 dump.signal = signr;
diff --git a/arch/x86/include/asm/bug.h b/arch/x86/include/asm/bug.h
index 11e1152222d0..2f03ff018d36 100644
--- a/arch/x86/include/asm/bug.h
+++ b/arch/x86/include/asm/bug.h
@@ -37,7 +37,4 @@ do { \
37 37
38#include <asm-generic/bug.h> 38#include <asm-generic/bug.h>
39 39
40
41extern void show_regs_common(void);
42
43#endif /* _ASM_X86_BUG_H */ 40#endif /* _ASM_X86_BUG_H */
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
index c8797d55b245..deb6421c9e69 100644
--- a/arch/x86/kernel/dumpstack.c
+++ b/arch/x86/kernel/dumpstack.c
@@ -176,26 +176,20 @@ void show_trace(struct task_struct *task, struct pt_regs *regs,
176 176
177void show_stack(struct task_struct *task, unsigned long *sp) 177void show_stack(struct task_struct *task, unsigned long *sp)
178{ 178{
179 show_stack_log_lvl(task, NULL, sp, 0, ""); 179 unsigned long bp = 0;
180}
181
182/*
183 * The architecture-independent dump_stack generator
184 */
185void dump_stack(void)
186{
187 unsigned long bp;
188 unsigned long stack; 180 unsigned long stack;
189 181
190 bp = stack_frame(current, NULL); 182 /*
191 printk("Pid: %d, comm: %.20s %s %s %.*s\n", 183 * Stack frames below this one aren't interesting. Don't show them
192 current->pid, current->comm, print_tainted(), 184 * if we're printing for %current.
193 init_utsname()->release, 185 */
194 (int)strcspn(init_utsname()->version, " "), 186 if (!sp && (!task || task == current)) {
195 init_utsname()->version); 187 sp = &stack;
196 show_trace(NULL, NULL, &stack, bp); 188 bp = stack_frame(current, NULL);
189 }
190
191 show_stack_log_lvl(task, NULL, sp, bp, "");
197} 192}
198EXPORT_SYMBOL(dump_stack);
199 193
200static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED; 194static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
201static int die_owner = -1; 195static int die_owner = -1;
diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
index 1038a417ea53..f2a1770ca176 100644
--- a/arch/x86/kernel/dumpstack_32.c
+++ b/arch/x86/kernel/dumpstack_32.c
@@ -86,11 +86,9 @@ void show_regs(struct pt_regs *regs)
86{ 86{
87 int i; 87 int i;
88 88
89 show_regs_print_info(KERN_EMERG);
89 __show_regs(regs, !user_mode_vm(regs)); 90 __show_regs(regs, !user_mode_vm(regs));
90 91
91 pr_emerg("Process %.*s (pid: %d, ti=%p task=%p task.ti=%p)\n",
92 TASK_COMM_LEN, current->comm, task_pid_nr(current),
93 current_thread_info(), current, task_thread_info(current));
94 /* 92 /*
95 * When in-kernel, we also print out the stack and code at the 93 * When in-kernel, we also print out the stack and code at the
96 * time of the fault.. 94 * time of the fault..
diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
index b653675d5288..addb207dab92 100644
--- a/arch/x86/kernel/dumpstack_64.c
+++ b/arch/x86/kernel/dumpstack_64.c
@@ -249,14 +249,10 @@ void show_regs(struct pt_regs *regs)
249{ 249{
250 int i; 250 int i;
251 unsigned long sp; 251 unsigned long sp;
252 const int cpu = smp_processor_id();
253 struct task_struct *cur = current;
254 252
255 sp = regs->sp; 253 sp = regs->sp;
256 printk("CPU %d ", cpu); 254 show_regs_print_info(KERN_DEFAULT);
257 __show_regs(regs, 1); 255 __show_regs(regs, 1);
258 printk(KERN_DEFAULT "Process %s (pid: %d, threadinfo %p, task %p)\n",
259 cur->comm, cur->pid, task_thread_info(cur), cur);
260 256
261 /* 257 /*
262 * When in-kernel, we also print out the stack and code at the 258 * When in-kernel, we also print out the stack and code at the
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 14fcf55a5c5b..607af0d4d5ef 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -121,30 +121,6 @@ void exit_thread(void)
121 drop_fpu(me); 121 drop_fpu(me);
122} 122}
123 123
124void show_regs_common(void)
125{
126 const char *vendor, *product, *board;
127
128 vendor = dmi_get_system_info(DMI_SYS_VENDOR);
129 if (!vendor)
130 vendor = "";
131 product = dmi_get_system_info(DMI_PRODUCT_NAME);
132 if (!product)
133 product = "";
134
135 /* Board Name is optional */
136 board = dmi_get_system_info(DMI_BOARD_NAME);
137
138 printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s %s %s%s%s\n",
139 current->pid, current->comm, print_tainted(),
140 init_utsname()->release,
141 (int)strcspn(init_utsname()->version, " "),
142 init_utsname()->version,
143 vendor, product,
144 board ? "/" : "",
145 board ? board : "");
146}
147
148void flush_thread(void) 124void flush_thread(void)
149{ 125{
150 struct task_struct *tsk = current; 126 struct task_struct *tsk = current;
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index b5a8905785e6..7305f7dfc7ab 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -84,8 +84,6 @@ void __show_regs(struct pt_regs *regs, int all)
84 savesegment(gs, gs); 84 savesegment(gs, gs);
85 } 85 }
86 86
87 show_regs_common();
88
89 printk(KERN_DEFAULT "EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n", 87 printk(KERN_DEFAULT "EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n",
90 (u16)regs->cs, regs->ip, regs->flags, 88 (u16)regs->cs, regs->ip, regs->flags,
91 smp_processor_id()); 89 smp_processor_id());
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 0f49677da51e..355ae06dbf94 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -62,7 +62,6 @@ void __show_regs(struct pt_regs *regs, int all)
62 unsigned int fsindex, gsindex; 62 unsigned int fsindex, gsindex;
63 unsigned int ds, cs, es; 63 unsigned int ds, cs, es;
64 64
65 show_regs_common();
66 printk(KERN_DEFAULT "RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->ip); 65 printk(KERN_DEFAULT "RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->ip);
67 printk_address(regs->ip, 1); 66 printk_address(regs->ip, 1);
68 printk(KERN_DEFAULT "RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss, 67 printk(KERN_DEFAULT "RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss,
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 4689855c2f8a..56f7fcfe7fa2 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -996,6 +996,7 @@ void __init setup_arch(char **cmdline_p)
996 efi_init(); 996 efi_init();
997 997
998 dmi_scan_machine(); 998 dmi_scan_machine();
999 dmi_set_dump_stack_arch_desc();
999 1000
1000 /* 1001 /*
1001 * VMware detection requires dmi to be available, so this 1002 * VMware detection requires dmi to be available, so this
diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
index f0312d746402..3eb18acd0e40 100644
--- a/arch/x86/lib/usercopy_32.c
+++ b/arch/x86/lib/usercopy_32.c
@@ -689,9 +689,3 @@ _copy_from_user(void *to, const void __user *from, unsigned long n)
689 return n; 689 return n;
690} 690}
691EXPORT_SYMBOL(_copy_from_user); 691EXPORT_SYMBOL(_copy_from_user);
692
693void copy_from_user_overflow(void)
694{
695 WARN(1, "Buffer overflow detected!\n");
696}
697EXPORT_SYMBOL(copy_from_user_overflow);
diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c
index 923db5c15278..458186dab5dc 100644
--- a/arch/xtensa/kernel/traps.c
+++ b/arch/xtensa/kernel/traps.c
@@ -383,6 +383,8 @@ void show_regs(struct pt_regs * regs)
383{ 383{
384 int i, wmask; 384 int i, wmask;
385 385
386 show_regs_print_info(KERN_DEFAULT);
387
386 wmask = regs->wmask & ~1; 388 wmask = regs->wmask & ~1;
387 389
388 for (i = 0; i < 16; i++) { 390 for (i = 0; i < 16; i++) {
@@ -481,14 +483,6 @@ void show_stack(struct task_struct *task, unsigned long *sp)
481 show_trace(task, stack); 483 show_trace(task, stack);
482} 484}
483 485
484void dump_stack(void)
485{
486 show_stack(current, NULL);
487}
488
489EXPORT_SYMBOL(dump_stack);
490
491
492void show_code(unsigned int *pc) 486void show_code(unsigned int *pc)
493{ 487{
494 long i; 488 long i;
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 78a956e286e6..8d96238549fa 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -120,6 +120,8 @@ source "drivers/vfio/Kconfig"
120 120
121source "drivers/vlynq/Kconfig" 121source "drivers/vlynq/Kconfig"
122 122
123source "drivers/virt/Kconfig"
124
123source "drivers/virtio/Kconfig" 125source "drivers/virtio/Kconfig"
124 126
125source "drivers/hv/Kconfig" 127source "drivers/hv/Kconfig"
@@ -144,8 +146,6 @@ source "drivers/remoteproc/Kconfig"
144 146
145source "drivers/rpmsg/Kconfig" 147source "drivers/rpmsg/Kconfig"
146 148
147source "drivers/virt/Kconfig"
148
149source "drivers/devfreq/Kconfig" 149source "drivers/devfreq/Kconfig"
150 150
151source "drivers/extcon/Kconfig" 151source "drivers/extcon/Kconfig"
diff --git a/drivers/block/aoe/aoechr.c b/drivers/block/aoe/aoechr.c
index 42e67ad6bd20..ab41be625a53 100644
--- a/drivers/block/aoe/aoechr.c
+++ b/drivers/block/aoe/aoechr.c
@@ -139,13 +139,12 @@ bail: spin_unlock_irqrestore(&emsgs_lock, flags);
139 return; 139 return;
140 } 140 }
141 141
142 mp = kmalloc(n, GFP_ATOMIC); 142 mp = kmemdup(msg, n, GFP_ATOMIC);
143 if (mp == NULL) { 143 if (mp == NULL) {
144 printk(KERN_ERR "aoe: allocation failure, len=%ld\n", n); 144 printk(KERN_ERR "aoe: allocation failure, len=%ld\n", n);
145 goto bail; 145 goto bail;
146 } 146 }
147 147
148 memcpy(mp, msg, n);
149 em->msg = mp; 148 em->msg = mp;
150 em->flags |= EMFL_VALID; 149 em->flags |= EMFL_VALID;
151 em->len = n; 150 em->len = n;
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 7fecc784be01..037288e7874d 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -856,6 +856,8 @@ static int __init nbd_init(void)
856 disk->queue->limits.discard_granularity = 512; 856 disk->queue->limits.discard_granularity = 512;
857 disk->queue->limits.max_discard_sectors = UINT_MAX; 857 disk->queue->limits.max_discard_sectors = UINT_MAX;
858 disk->queue->limits.discard_zeroes_data = 0; 858 disk->queue->limits.discard_zeroes_data = 0;
859 blk_queue_max_hw_sectors(disk->queue, 65536);
860 disk->queue->limits.max_sectors = 256;
859 } 861 }
860 862
861 if (register_blkdev(NBD_MAJOR, "nbd")) { 863 if (register_blkdev(NBD_MAJOR, "nbd")) {
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 32a6c5764950..cd9a6211dcad 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -1485,6 +1485,7 @@ unsigned int get_random_int(void)
1485 1485
1486 return ret; 1486 return ret;
1487} 1487}
1488EXPORT_SYMBOL(get_random_int);
1488 1489
1489/* 1490/*
1490 * randomize_range() returns a start address such that 1491 * randomize_range() returns a start address such that
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index 4cd392dbf115..b95159b33c39 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -22,6 +22,9 @@ static u16 __initdata dmi_ver;
22 */ 22 */
23static int dmi_initialized; 23static int dmi_initialized;
24 24
25/* DMI system identification string used during boot */
26static char dmi_ids_string[128] __initdata;
27
25static const char * __init dmi_string_nosave(const struct dmi_header *dm, u8 s) 28static const char * __init dmi_string_nosave(const struct dmi_header *dm, u8 s)
26{ 29{
27 const u8 *bp = ((u8 *) dm) + dm->length; 30 const u8 *bp = ((u8 *) dm) + dm->length;
@@ -376,99 +379,103 @@ static void __init dmi_decode(const struct dmi_header *dm, void *dummy)
376 } 379 }
377} 380}
378 381
379static void __init print_filtered(const char *info) 382static int __init print_filtered(char *buf, size_t len, const char *info)
380{ 383{
384 int c = 0;
381 const char *p; 385 const char *p;
382 386
383 if (!info) 387 if (!info)
384 return; 388 return c;
385 389
386 for (p = info; *p; p++) 390 for (p = info; *p; p++)
387 if (isprint(*p)) 391 if (isprint(*p))
388 printk(KERN_CONT "%c", *p); 392 c += scnprintf(buf + c, len - c, "%c", *p);
389 else 393 else
390 printk(KERN_CONT "\\x%02x", *p & 0xff); 394 c += scnprintf(buf + c, len - c, "\\x%02x", *p & 0xff);
395 return c;
391} 396}
392 397
393static void __init dmi_dump_ids(void) 398static void __init dmi_format_ids(char *buf, size_t len)
394{ 399{
400 int c = 0;
395 const char *board; /* Board Name is optional */ 401 const char *board; /* Board Name is optional */
396 402
397 printk(KERN_DEBUG "DMI: "); 403 c += print_filtered(buf + c, len - c,
398 print_filtered(dmi_get_system_info(DMI_SYS_VENDOR)); 404 dmi_get_system_info(DMI_SYS_VENDOR));
399 printk(KERN_CONT " "); 405 c += scnprintf(buf + c, len - c, " ");
400 print_filtered(dmi_get_system_info(DMI_PRODUCT_NAME)); 406 c += print_filtered(buf + c, len - c,
407 dmi_get_system_info(DMI_PRODUCT_NAME));
408
401 board = dmi_get_system_info(DMI_BOARD_NAME); 409 board = dmi_get_system_info(DMI_BOARD_NAME);
402 if (board) { 410 if (board) {
403 printk(KERN_CONT "/"); 411 c += scnprintf(buf + c, len - c, "/");
404 print_filtered(board); 412 c += print_filtered(buf + c, len - c, board);
405 } 413 }
406 printk(KERN_CONT ", BIOS "); 414 c += scnprintf(buf + c, len - c, ", BIOS ");
407 print_filtered(dmi_get_system_info(DMI_BIOS_VERSION)); 415 c += print_filtered(buf + c, len - c,
408 printk(KERN_CONT " "); 416 dmi_get_system_info(DMI_BIOS_VERSION));
409 print_filtered(dmi_get_system_info(DMI_BIOS_DATE)); 417 c += scnprintf(buf + c, len - c, " ");
410 printk(KERN_CONT "\n"); 418 c += print_filtered(buf + c, len - c,
419 dmi_get_system_info(DMI_BIOS_DATE));
411} 420}
412 421
413static int __init dmi_present(const char __iomem *p) 422static int __init dmi_present(const u8 *buf)
414{ 423{
415 u8 buf[15]; 424 int smbios_ver;
425
426 if (memcmp(buf, "_SM_", 4) == 0 &&
427 buf[5] < 32 && dmi_checksum(buf, buf[5])) {
428 smbios_ver = (buf[6] << 8) + buf[7];
429
430 /* Some BIOS report weird SMBIOS version, fix that up */
431 switch (smbios_ver) {
432 case 0x021F:
433 case 0x0221:
434 pr_debug("SMBIOS version fixup(2.%d->2.%d)\n",
435 smbios_ver & 0xFF, 3);
436 smbios_ver = 0x0203;
437 break;
438 case 0x0233:
439 pr_debug("SMBIOS version fixup(2.%d->2.%d)\n", 51, 6);
440 smbios_ver = 0x0206;
441 break;
442 }
443 } else {
444 smbios_ver = 0;
445 }
416 446
417 memcpy_fromio(buf, p, 15); 447 buf += 16;
418 if (dmi_checksum(buf, 15)) { 448
449 if (memcmp(buf, "_DMI_", 5) == 0 && dmi_checksum(buf, 15)) {
419 dmi_num = (buf[13] << 8) | buf[12]; 450 dmi_num = (buf[13] << 8) | buf[12];
420 dmi_len = (buf[7] << 8) | buf[6]; 451 dmi_len = (buf[7] << 8) | buf[6];
421 dmi_base = (buf[11] << 24) | (buf[10] << 16) | 452 dmi_base = (buf[11] << 24) | (buf[10] << 16) |
422 (buf[9] << 8) | buf[8]; 453 (buf[9] << 8) | buf[8];
423 454
424 if (dmi_walk_early(dmi_decode) == 0) { 455 if (dmi_walk_early(dmi_decode) == 0) {
425 if (dmi_ver) 456 if (smbios_ver) {
457 dmi_ver = smbios_ver;
426 pr_info("SMBIOS %d.%d present.\n", 458 pr_info("SMBIOS %d.%d present.\n",
427 dmi_ver >> 8, dmi_ver & 0xFF); 459 dmi_ver >> 8, dmi_ver & 0xFF);
428 else { 460 } else {
429 dmi_ver = (buf[14] & 0xF0) << 4 | 461 dmi_ver = (buf[14] & 0xF0) << 4 |
430 (buf[14] & 0x0F); 462 (buf[14] & 0x0F);
431 pr_info("Legacy DMI %d.%d present.\n", 463 pr_info("Legacy DMI %d.%d present.\n",
432 dmi_ver >> 8, dmi_ver & 0xFF); 464 dmi_ver >> 8, dmi_ver & 0xFF);
433 } 465 }
434 dmi_dump_ids(); 466 dmi_format_ids(dmi_ids_string, sizeof(dmi_ids_string));
467 printk(KERN_DEBUG "DMI: %s\n", dmi_ids_string);
435 return 0; 468 return 0;
436 } 469 }
437 } 470 }
438 dmi_ver = 0;
439 return 1;
440}
441
442static int __init smbios_present(const char __iomem *p)
443{
444 u8 buf[32];
445 471
446 memcpy_fromio(buf, p, 32);
447 if ((buf[5] < 32) && dmi_checksum(buf, buf[5])) {
448 dmi_ver = (buf[6] << 8) + buf[7];
449
450 /* Some BIOS report weird SMBIOS version, fix that up */
451 switch (dmi_ver) {
452 case 0x021F:
453 case 0x0221:
454 pr_debug("SMBIOS version fixup(2.%d->2.%d)\n",
455 dmi_ver & 0xFF, 3);
456 dmi_ver = 0x0203;
457 break;
458 case 0x0233:
459 pr_debug("SMBIOS version fixup(2.%d->2.%d)\n", 51, 6);
460 dmi_ver = 0x0206;
461 break;
462 }
463 return memcmp(p + 16, "_DMI_", 5) || dmi_present(p + 16);
464 }
465 return 1; 472 return 1;
466} 473}
467 474
468void __init dmi_scan_machine(void) 475void __init dmi_scan_machine(void)
469{ 476{
470 char __iomem *p, *q; 477 char __iomem *p, *q;
471 int rc; 478 char buf[32];
472 479
473 if (efi_enabled(EFI_CONFIG_TABLES)) { 480 if (efi_enabled(EFI_CONFIG_TABLES)) {
474 if (efi.smbios == EFI_INVALID_TABLE_ADDR) 481 if (efi.smbios == EFI_INVALID_TABLE_ADDR)
@@ -481,10 +488,10 @@ void __init dmi_scan_machine(void)
481 p = dmi_ioremap(efi.smbios, 32); 488 p = dmi_ioremap(efi.smbios, 32);
482 if (p == NULL) 489 if (p == NULL)
483 goto error; 490 goto error;
484 491 memcpy_fromio(buf, p, 32);
485 rc = smbios_present(p);
486 dmi_iounmap(p, 32); 492 dmi_iounmap(p, 32);
487 if (!rc) { 493
494 if (!dmi_present(buf)) {
488 dmi_available = 1; 495 dmi_available = 1;
489 goto out; 496 goto out;
490 } 497 }
@@ -499,18 +506,15 @@ void __init dmi_scan_machine(void)
499 if (p == NULL) 506 if (p == NULL)
500 goto error; 507 goto error;
501 508
509 memset(buf, 0, 16);
502 for (q = p; q < p + 0x10000; q += 16) { 510 for (q = p; q < p + 0x10000; q += 16) {
503 if (memcmp(q, "_SM_", 4) == 0 && q - p <= 0xFFE0) 511 memcpy_fromio(buf + 16, q, 16);
504 rc = smbios_present(q); 512 if (!dmi_present(buf)) {
505 else if (memcmp(q, "_DMI_", 5) == 0)
506 rc = dmi_present(q);
507 else
508 continue;
509 if (!rc) {
510 dmi_available = 1; 513 dmi_available = 1;
511 dmi_iounmap(p, 0x10000); 514 dmi_iounmap(p, 0x10000);
512 goto out; 515 goto out;
513 } 516 }
517 memcpy(buf, buf + 16, 16);
514 } 518 }
515 dmi_iounmap(p, 0x10000); 519 dmi_iounmap(p, 0x10000);
516 } 520 }
@@ -521,6 +525,19 @@ void __init dmi_scan_machine(void)
521} 525}
522 526
523/** 527/**
528 * dmi_set_dump_stack_arch_desc - set arch description for dump_stack()
529 *
530 * Invoke dump_stack_set_arch_desc() with DMI system information so that
531 * DMI identifiers are printed out on task dumps. Arch boot code should
532 * call this function after dmi_scan_machine() if it wants to print out DMI
533 * identifiers on task dumps.
534 */
535void __init dmi_set_dump_stack_arch_desc(void)
536{
537 dump_stack_set_arch_desc("%s", dmi_ids_string);
538}
539
540/**
524 * dmi_matches - check if dmi_system_id structure matches system DMI data 541 * dmi_matches - check if dmi_system_id structure matches system DMI data
525 * @dmi: pointer to the dmi_system_id structure to check 542 * @dmi: pointer to the dmi_system_id structure to check
526 */ 543 */
diff --git a/drivers/memstick/host/r592.c b/drivers/memstick/host/r592.c
index a7c5b31c0d50..9718661c1fb6 100644
--- a/drivers/memstick/host/r592.c
+++ b/drivers/memstick/host/r592.c
@@ -847,7 +847,7 @@ static void r592_remove(struct pci_dev *pdev)
847 dev->dummy_dma_page_physical_address); 847 dev->dummy_dma_page_physical_address);
848} 848}
849 849
850#ifdef CONFIG_PM 850#ifdef CONFIG_PM_SLEEP
851static int r592_suspend(struct device *core_dev) 851static int r592_suspend(struct device *core_dev)
852{ 852{
853 struct pci_dev *pdev = to_pci_dev(core_dev); 853 struct pci_dev *pdev = to_pci_dev(core_dev);
@@ -870,10 +870,10 @@ static int r592_resume(struct device *core_dev)
870 r592_update_card_detect(dev); 870 r592_update_card_detect(dev);
871 return 0; 871 return 0;
872} 872}
873
874SIMPLE_DEV_PM_OPS(r592_pm_ops, r592_suspend, r592_resume);
875#endif 873#endif
876 874
875static SIMPLE_DEV_PM_OPS(r592_pm_ops, r592_suspend, r592_resume);
876
877MODULE_DEVICE_TABLE(pci, r592_pci_id_tbl); 877MODULE_DEVICE_TABLE(pci, r592_pci_id_tbl);
878 878
879static struct pci_driver r852_pci_driver = { 879static struct pci_driver r852_pci_driver = {
@@ -881,9 +881,7 @@ static struct pci_driver r852_pci_driver = {
881 .id_table = r592_pci_id_tbl, 881 .id_table = r592_pci_id_tbl,
882 .probe = r592_probe, 882 .probe = r592_probe,
883 .remove = r592_remove, 883 .remove = r592_remove,
884#ifdef CONFIG_PM
885 .driver.pm = &r592_pm_ops, 884 .driver.pm = &r592_pm_ops,
886#endif
887}; 885};
888 886
889static __init int r592_module_init(void) 887static __init int r592_module_init(void)
diff --git a/drivers/message/i2o/i2o_config.c b/drivers/message/i2o/i2o_config.c
index 5451beff183f..a60c188c2bd9 100644
--- a/drivers/message/i2o/i2o_config.c
+++ b/drivers/message/i2o/i2o_config.c
@@ -687,6 +687,11 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd,
687 } 687 }
688 size = size >> 16; 688 size = size >> 16;
689 size *= 4; 689 size *= 4;
690 if (size > sizeof(rmsg)) {
691 rcode = -EINVAL;
692 goto sg_list_cleanup;
693 }
694
690 /* Copy in the user's I2O command */ 695 /* Copy in the user's I2O command */
691 if (copy_from_user(rmsg, user_msg, size)) { 696 if (copy_from_user(rmsg, user_msg, size)) {
692 rcode = -EFAULT; 697 rcode = -EFAULT;
@@ -922,6 +927,11 @@ static int i2o_cfg_passthru(unsigned long arg)
922 } 927 }
923 size = size >> 16; 928 size = size >> 16;
924 size *= 4; 929 size *= 4;
930 if (size > sizeof(rmsg)) {
931 rcode = -EFAULT;
932 goto sg_list_cleanup;
933 }
934
925 /* Copy in the user's I2O command */ 935 /* Copy in the user's I2O command */
926 if (copy_from_user(rmsg, user_msg, size)) { 936 if (copy_from_user(rmsg, user_msg, size)) {
927 rcode = -EFAULT; 937 rcode = -EFAULT;
diff --git a/drivers/net/ethernet/ibm/emac/debug.c b/drivers/net/ethernet/ibm/emac/debug.c
index b16b4828b64d..a559f326bf63 100644
--- a/drivers/net/ethernet/ibm/emac/debug.c
+++ b/drivers/net/ethernet/ibm/emac/debug.c
@@ -245,7 +245,7 @@ static void emac_sysrq_handler(int key)
245 245
246static struct sysrq_key_op emac_sysrq_op = { 246static struct sysrq_key_op emac_sysrq_op = {
247 .handler = emac_sysrq_handler, 247 .handler = emac_sysrq_handler,
248 .help_msg = "emaC", 248 .help_msg = "emac(c)",
249 .action_msg = "Show EMAC(s) status", 249 .action_msg = "Show EMAC(s) status",
250}; 250};
251 251
diff --git a/drivers/pps/Kconfig b/drivers/pps/Kconfig
index 982d16b5a846..7512e98e9311 100644
--- a/drivers/pps/Kconfig
+++ b/drivers/pps/Kconfig
@@ -20,10 +20,10 @@ config PPS
20 20
21 To compile this driver as a module, choose M here: the module 21 To compile this driver as a module, choose M here: the module
22 will be called pps_core.ko. 22 will be called pps_core.ko.
23if PPS
23 24
24config PPS_DEBUG 25config PPS_DEBUG
25 bool "PPS debugging messages" 26 bool "PPS debugging messages"
26 depends on PPS
27 help 27 help
28 Say Y here if you want the PPS support to produce a bunch of debug 28 Say Y here if you want the PPS support to produce a bunch of debug
29 messages to the system log. Select this if you are having a 29 messages to the system log. Select this if you are having a
@@ -31,13 +31,15 @@ config PPS_DEBUG
31 31
32config NTP_PPS 32config NTP_PPS
33 bool "PPS kernel consumer support" 33 bool "PPS kernel consumer support"
34 depends on PPS && !NO_HZ 34 depends on !NO_HZ
35 help 35 help
36 This option adds support for direct in-kernel time 36 This option adds support for direct in-kernel time
37 synchronization using an external PPS signal. 37 synchronization using an external PPS signal.
38 38
39 It doesn't work on tickless systems at the moment. 39 It doesn't work on tickless systems at the moment.
40 40
41endif
42
41source drivers/pps/clients/Kconfig 43source drivers/pps/clients/Kconfig
42 44
43source drivers/pps/generators/Kconfig 45source drivers/pps/generators/Kconfig
diff --git a/drivers/pps/kc.c b/drivers/pps/kc.c
index 079e930b1938..e219db1f1c84 100644
--- a/drivers/pps/kc.c
+++ b/drivers/pps/kc.c
@@ -34,10 +34,10 @@
34 */ 34 */
35 35
36/* state variables to bind kernel consumer */ 36/* state variables to bind kernel consumer */
37DEFINE_SPINLOCK(pps_kc_hardpps_lock); 37static DEFINE_SPINLOCK(pps_kc_hardpps_lock);
38/* PPS API (RFC 2783): current source and mode for kernel consumer */ 38/* PPS API (RFC 2783): current source and mode for kernel consumer */
39struct pps_device *pps_kc_hardpps_dev; /* unique pointer to device */ 39static struct pps_device *pps_kc_hardpps_dev; /* unique pointer to device */
40int pps_kc_hardpps_mode; /* mode bits for kernel consumer */ 40static int pps_kc_hardpps_mode; /* mode bits for kernel consumer */
41 41
42/* pps_kc_bind - control PPS kernel consumer binding 42/* pps_kc_bind - control PPS kernel consumer binding
43 * @pps: the PPS source 43 * @pps: the PPS source
diff --git a/drivers/staging/speakup/kobjects.c b/drivers/staging/speakup/kobjects.c
index d6d9264e4ca7..943b6c134a22 100644
--- a/drivers/staging/speakup/kobjects.c
+++ b/drivers/staging/speakup/kobjects.c
@@ -15,6 +15,7 @@
15#include <linux/kernel.h> 15#include <linux/kernel.h>
16#include <linux/kobject.h> 16#include <linux/kobject.h>
17#include <linux/string.h> 17#include <linux/string.h>
18#include <linux/string_helpers.h>
18#include <linux/sysfs.h> 19#include <linux/sysfs.h>
19#include <linux/ctype.h> 20#include <linux/ctype.h>
20 21
@@ -417,7 +418,7 @@ static ssize_t synth_direct_store(struct kobject *kobj,
417 bytes = min_t(size_t, len, 250); 418 bytes = min_t(size_t, len, 250);
418 strncpy(tmp, ptr, bytes); 419 strncpy(tmp, ptr, bytes);
419 tmp[bytes] = '\0'; 420 tmp[bytes] = '\0';
420 spk_xlate(tmp); 421 string_unescape_any_inplace(tmp);
421 synth_printf("%s", tmp); 422 synth_printf("%s", tmp);
422 ptr += bytes; 423 ptr += bytes;
423 len -= bytes; 424 len -= bytes;
@@ -605,7 +606,8 @@ ssize_t spk_var_store(struct kobject *kobj, struct kobj_attribute *attr,
605 if (param->data == NULL) 606 if (param->data == NULL)
606 return 0; 607 return 0;
607 ret = 0; 608 ret = 0;
608 cp = spk_xlate((char *) buf); 609 cp = (char *)buf;
610 string_unescape_any_inplace(cp);
609 611
610 spk_lock(flags); 612 spk_lock(flags);
611 switch (param->var_type) { 613 switch (param->var_type) {
diff --git a/drivers/staging/speakup/speakup.h b/drivers/staging/speakup/speakup.h
index c387a02fc1c2..0126f714821a 100644
--- a/drivers/staging/speakup/speakup.h
+++ b/drivers/staging/speakup/speakup.h
@@ -54,7 +54,6 @@ void spk_get_index_count(int *linecount, int *sentcount);
54extern int spk_set_key_info(const u_char *key_info, u_char *k_buffer); 54extern int spk_set_key_info(const u_char *key_info, u_char *k_buffer);
55extern char *spk_strlwr(char *s); 55extern char *spk_strlwr(char *s);
56extern char *spk_s2uchar(char *start, char *dest); 56extern char *spk_s2uchar(char *start, char *dest);
57extern char *spk_xlate(char *s);
58extern int speakup_kobj_init(void); 57extern int speakup_kobj_init(void);
59extern void speakup_kobj_exit(void); 58extern void speakup_kobj_exit(void);
60extern int spk_chartab_get_value(char *keyword); 59extern int spk_chartab_get_value(char *keyword);
diff --git a/drivers/staging/speakup/varhandlers.c b/drivers/staging/speakup/varhandlers.c
index 0099cb12e560..7f6288fc2299 100644
--- a/drivers/staging/speakup/varhandlers.c
+++ b/drivers/staging/speakup/varhandlers.c
@@ -328,49 +328,3 @@ char *spk_s2uchar(char *start, char *dest)
328 *dest = (u_char)val; 328 *dest = (u_char)val;
329 return start; 329 return start;
330} 330}
331
332char *spk_xlate(char *s)
333{
334 static const char finds[] = "nrtvafe";
335 static const char subs[] = "\n\r\t\013\001\014\033";
336 static const char hx[] = "0123456789abcdefABCDEF";
337 char *p = s, *p1, *p2, c;
338 int num;
339 while ((p = strchr(p, '\\'))) {
340 p1 = p+1;
341 p2 = strchr(finds, *p1);
342 if (p2) {
343 *p++ = subs[p2-finds];
344 p1++;
345 } else if (*p1 >= '0' && *p1 <= '7') {
346 num = (*p1++)&7;
347 while (num < 32 && *p1 >= '0' && *p1 <= '7') {
348 num <<= 3;
349 num += (*p1++)&7;
350 }
351 *p++ = num;
352 } else if (*p1 == 'x' &&
353 strchr(hx, p1[1]) && strchr(hx, p1[2])) {
354 p1++;
355 c = *p1++;
356 if (c > '9')
357 c = (c - '7') & 0x0f;
358 else
359 c -= '0';
360 num = c << 4;
361 c = *p1++;
362 if (c > '9')
363 c = (c-'7')&0x0f;
364 else
365 c -= '0';
366 num += c;
367 *p++ = num;
368 } else
369 *p++ = *p1++;
370 p2 = p;
371 while (*p1)
372 *p2++ = *p1++;
373 *p2 = '\0';
374 }
375 return s;
376}
diff --git a/drivers/staging/zcache/Kconfig b/drivers/staging/zcache/Kconfig
index 05e87a1e5d93..2d7b2da3b9e0 100644
--- a/drivers/staging/zcache/Kconfig
+++ b/drivers/staging/zcache/Kconfig
@@ -1,5 +1,5 @@
1config ZCACHE 1config ZCACHE
2 bool "Dynamic compression of swap pages and clean pagecache pages" 2 tristate "Dynamic compression of swap pages and clean pagecache pages"
3 depends on CRYPTO=y && SWAP=y && CLEANCACHE && FRONTSWAP 3 depends on CRYPTO=y && SWAP=y && CLEANCACHE && FRONTSWAP
4 select CRYPTO_LZO 4 select CRYPTO_LZO
5 default n 5 default n
@@ -19,8 +19,8 @@ config ZCACHE_DEBUG
19 how zcache is doing. You probably want to set this to 'N'. 19 how zcache is doing. You probably want to set this to 'N'.
20 20
21config RAMSTER 21config RAMSTER
22 bool "Cross-machine RAM capacity sharing, aka peer-to-peer tmem" 22 tristate "Cross-machine RAM capacity sharing, aka peer-to-peer tmem"
23 depends on CONFIGFS_FS=y && SYSFS=y && !HIGHMEM && ZCACHE=y 23 depends on CONFIGFS_FS=y && SYSFS=y && !HIGHMEM && ZCACHE
24 depends on NET 24 depends on NET
25 # must ensure struct page is 8-byte aligned 25 # must ensure struct page is 8-byte aligned
26 select HAVE_ALIGNED_STRUCT_PAGE if !64BIT 26 select HAVE_ALIGNED_STRUCT_PAGE if !64BIT
diff --git a/drivers/staging/zcache/ramster.h b/drivers/staging/zcache/ramster.h
index 1b71aea2ff62..e1f91d5a0f6a 100644
--- a/drivers/staging/zcache/ramster.h
+++ b/drivers/staging/zcache/ramster.h
@@ -11,10 +11,14 @@
11#ifndef _ZCACHE_RAMSTER_H_ 11#ifndef _ZCACHE_RAMSTER_H_
12#define _ZCACHE_RAMSTER_H_ 12#define _ZCACHE_RAMSTER_H_
13 13
14#ifdef CONFIG_RAMSTER_MODULE
15#define CONFIG_RAMSTER
16#endif
17
14#ifdef CONFIG_RAMSTER 18#ifdef CONFIG_RAMSTER
15#include "ramster/ramster.h" 19#include "ramster/ramster.h"
16#else 20#else
17static inline void ramster_init(bool x, bool y, bool z) 21static inline void ramster_init(bool x, bool y, bool z, bool w)
18{ 22{
19} 23}
20 24
diff --git a/drivers/staging/zcache/ramster/debug.c b/drivers/staging/zcache/ramster/debug.c
index bf34133cc631..327e4f0d98e1 100644
--- a/drivers/staging/zcache/ramster/debug.c
+++ b/drivers/staging/zcache/ramster/debug.c
@@ -43,7 +43,7 @@ static struct debug_entry {
43}; 43};
44#undef ATTR 44#undef ATTR
45 45
46int __init ramster_debugfs_init(void) 46int ramster_debugfs_init(void)
47{ 47{
48 int i; 48 int i;
49 struct dentry *root = debugfs_create_dir("ramster", NULL); 49 struct dentry *root = debugfs_create_dir("ramster", NULL);
diff --git a/drivers/staging/zcache/ramster/nodemanager.c b/drivers/staging/zcache/ramster/nodemanager.c
index c0f48158735d..2cfe93342c0d 100644
--- a/drivers/staging/zcache/ramster/nodemanager.c
+++ b/drivers/staging/zcache/ramster/nodemanager.c
@@ -949,7 +949,7 @@ static void __exit exit_r2nm(void)
949 r2hb_exit(); 949 r2hb_exit();
950} 950}
951 951
952static int __init init_r2nm(void) 952int r2nm_init(void)
953{ 953{
954 int ret = -1; 954 int ret = -1;
955 955
@@ -986,10 +986,11 @@ out_r2hb:
986out: 986out:
987 return ret; 987 return ret;
988} 988}
989EXPORT_SYMBOL_GPL(r2nm_init);
989 990
990MODULE_AUTHOR("Oracle"); 991MODULE_AUTHOR("Oracle");
991MODULE_LICENSE("GPL"); 992MODULE_LICENSE("GPL");
992 993
993/* module_init(init_r2nm) */ 994#ifndef CONFIG_RAMSTER_MODULE
994late_initcall(init_r2nm); 995late_initcall(r2nm_init);
995/* module_exit(exit_r2nm) */ 996#endif
diff --git a/drivers/staging/zcache/ramster/ramster.c b/drivers/staging/zcache/ramster/ramster.c
index 87816279ce3c..b18b887db79f 100644
--- a/drivers/staging/zcache/ramster/ramster.c
+++ b/drivers/staging/zcache/ramster/ramster.c
@@ -121,6 +121,7 @@ int ramster_do_preload_flnode(struct tmem_pool *pool)
121 kmem_cache_free(ramster_flnode_cache, flnode); 121 kmem_cache_free(ramster_flnode_cache, flnode);
122 return ret; 122 return ret;
123} 123}
124EXPORT_SYMBOL_GPL(ramster_do_preload_flnode);
124 125
125/* 126/*
126 * Called by the message handler after a (still compressed) page has been 127 * Called by the message handler after a (still compressed) page has been
@@ -388,6 +389,7 @@ void *ramster_pampd_free(void *pampd, struct tmem_pool *pool,
388 } 389 }
389 return local_pampd; 390 return local_pampd;
390} 391}
392EXPORT_SYMBOL_GPL(ramster_pampd_free);
391 393
392void ramster_count_foreign_pages(bool eph, int count) 394void ramster_count_foreign_pages(bool eph, int count)
393{ 395{
@@ -408,6 +410,7 @@ void ramster_count_foreign_pages(bool eph, int count)
408 } 410 }
409 } 411 }
410} 412}
413EXPORT_SYMBOL_GPL(ramster_count_foreign_pages);
411 414
412/* 415/*
413 * For now, just push over a few pages every few seconds to 416 * For now, just push over a few pages every few seconds to
@@ -593,7 +596,7 @@ requeue:
593 ramster_remotify_queue_delayed_work(HZ); 596 ramster_remotify_queue_delayed_work(HZ);
594} 597}
595 598
596void __init ramster_remotify_init(void) 599void ramster_remotify_init(void)
597{ 600{
598 unsigned long n = 60UL; 601 unsigned long n = 60UL;
599 ramster_remotify_workqueue = 602 ramster_remotify_workqueue =
@@ -768,8 +771,10 @@ static bool frontswap_selfshrinking __read_mostly;
768static void selfshrink_process(struct work_struct *work); 771static void selfshrink_process(struct work_struct *work);
769static DECLARE_DELAYED_WORK(selfshrink_worker, selfshrink_process); 772static DECLARE_DELAYED_WORK(selfshrink_worker, selfshrink_process);
770 773
774#ifndef CONFIG_RAMSTER_MODULE
771/* Enable/disable with kernel boot option. */ 775/* Enable/disable with kernel boot option. */
772static bool use_frontswap_selfshrink __initdata = true; 776static bool use_frontswap_selfshrink = true;
777#endif
773 778
774/* 779/*
775 * The default values for the following parameters were deemed reasonable 780 * The default values for the following parameters were deemed reasonable
@@ -824,6 +829,7 @@ static void frontswap_selfshrink(void)
824 frontswap_shrink(tgt_frontswap_pages); 829 frontswap_shrink(tgt_frontswap_pages);
825} 830}
826 831
832#ifndef CONFIG_RAMSTER_MODULE
827static int __init ramster_nofrontswap_selfshrink_setup(char *s) 833static int __init ramster_nofrontswap_selfshrink_setup(char *s)
828{ 834{
829 use_frontswap_selfshrink = false; 835 use_frontswap_selfshrink = false;
@@ -831,6 +837,7 @@ static int __init ramster_nofrontswap_selfshrink_setup(char *s)
831} 837}
832 838
833__setup("noselfshrink", ramster_nofrontswap_selfshrink_setup); 839__setup("noselfshrink", ramster_nofrontswap_selfshrink_setup);
840#endif
834 841
835static void selfshrink_process(struct work_struct *work) 842static void selfshrink_process(struct work_struct *work)
836{ 843{
@@ -849,6 +856,7 @@ void ramster_cpu_up(int cpu)
849 per_cpu(ramster_remoteputmem1, cpu) = p1; 856 per_cpu(ramster_remoteputmem1, cpu) = p1;
850 per_cpu(ramster_remoteputmem2, cpu) = p2; 857 per_cpu(ramster_remoteputmem2, cpu) = p2;
851} 858}
859EXPORT_SYMBOL_GPL(ramster_cpu_up);
852 860
853void ramster_cpu_down(int cpu) 861void ramster_cpu_down(int cpu)
854{ 862{
@@ -864,6 +872,7 @@ void ramster_cpu_down(int cpu)
864 kp->flnode = NULL; 872 kp->flnode = NULL;
865 } 873 }
866} 874}
875EXPORT_SYMBOL_GPL(ramster_cpu_down);
867 876
868void ramster_register_pamops(struct tmem_pamops *pamops) 877void ramster_register_pamops(struct tmem_pamops *pamops)
869{ 878{
@@ -874,9 +883,11 @@ void ramster_register_pamops(struct tmem_pamops *pamops)
874 pamops->repatriate = ramster_pampd_repatriate; 883 pamops->repatriate = ramster_pampd_repatriate;
875 pamops->repatriate_preload = ramster_pampd_repatriate_preload; 884 pamops->repatriate_preload = ramster_pampd_repatriate_preload;
876} 885}
886EXPORT_SYMBOL_GPL(ramster_register_pamops);
877 887
878void __init ramster_init(bool cleancache, bool frontswap, 888void ramster_init(bool cleancache, bool frontswap,
879 bool frontswap_exclusive_gets) 889 bool frontswap_exclusive_gets,
890 bool frontswap_selfshrink)
880{ 891{
881 int ret = 0; 892 int ret = 0;
882 893
@@ -891,10 +902,17 @@ void __init ramster_init(bool cleancache, bool frontswap,
891 if (ret) 902 if (ret)
892 pr_err("ramster: can't create sysfs for ramster\n"); 903 pr_err("ramster: can't create sysfs for ramster\n");
893 (void)r2net_register_handlers(); 904 (void)r2net_register_handlers();
905#ifdef CONFIG_RAMSTER_MODULE
906 ret = r2nm_init();
907 if (ret)
908 pr_err("ramster: can't init r2net\n");
909 frontswap_selfshrinking = frontswap_selfshrink;
910#else
911 frontswap_selfshrinking = use_frontswap_selfshrink;
912#endif
894 INIT_LIST_HEAD(&ramster_rem_op_list); 913 INIT_LIST_HEAD(&ramster_rem_op_list);
895 ramster_flnode_cache = kmem_cache_create("ramster_flnode", 914 ramster_flnode_cache = kmem_cache_create("ramster_flnode",
896 sizeof(struct flushlist_node), 0, 0, NULL); 915 sizeof(struct flushlist_node), 0, 0, NULL);
897 frontswap_selfshrinking = use_frontswap_selfshrink;
898 if (frontswap_selfshrinking) { 916 if (frontswap_selfshrinking) {
899 pr_info("ramster: Initializing frontswap selfshrink driver.\n"); 917 pr_info("ramster: Initializing frontswap selfshrink driver.\n");
900 schedule_delayed_work(&selfshrink_worker, 918 schedule_delayed_work(&selfshrink_worker,
@@ -902,3 +920,4 @@ void __init ramster_init(bool cleancache, bool frontswap,
902 } 920 }
903 ramster_remotify_init(); 921 ramster_remotify_init();
904} 922}
923EXPORT_SYMBOL_GPL(ramster_init);
diff --git a/drivers/staging/zcache/ramster/ramster.h b/drivers/staging/zcache/ramster/ramster.h
index 12ae56f09ca4..6d41a7a772e3 100644
--- a/drivers/staging/zcache/ramster/ramster.h
+++ b/drivers/staging/zcache/ramster/ramster.h
@@ -147,7 +147,7 @@ extern int r2net_register_handlers(void);
147extern int r2net_remote_target_node_set(int); 147extern int r2net_remote_target_node_set(int);
148 148
149extern int ramster_remotify_pageframe(bool); 149extern int ramster_remotify_pageframe(bool);
150extern void ramster_init(bool, bool, bool); 150extern void ramster_init(bool, bool, bool, bool);
151extern void ramster_register_pamops(struct tmem_pamops *); 151extern void ramster_register_pamops(struct tmem_pamops *);
152extern int ramster_localify(int, struct tmem_oid *oidp, uint32_t, char *, 152extern int ramster_localify(int, struct tmem_oid *oidp, uint32_t, char *,
153 unsigned int, void *); 153 unsigned int, void *);
diff --git a/drivers/staging/zcache/ramster/ramster_nodemanager.h b/drivers/staging/zcache/ramster/ramster_nodemanager.h
index 49f879d943ab..dbaae34ea613 100644
--- a/drivers/staging/zcache/ramster/ramster_nodemanager.h
+++ b/drivers/staging/zcache/ramster/ramster_nodemanager.h
@@ -36,4 +36,6 @@
36/* host name, group name, cluster name all 64 bytes */ 36/* host name, group name, cluster name all 64 bytes */
37#define R2NM_MAX_NAME_LEN 64 /* __NEW_UTS_LEN */ 37#define R2NM_MAX_NAME_LEN 64 /* __NEW_UTS_LEN */
38 38
39extern int r2nm_init(void);
40
39#endif /* _RAMSTER_NODEMANAGER_H */ 41#endif /* _RAMSTER_NODEMANAGER_H */
diff --git a/drivers/staging/zcache/tmem.c b/drivers/staging/zcache/tmem.c
index a2b7e03b6062..d7e51e4152eb 100644
--- a/drivers/staging/zcache/tmem.c
+++ b/drivers/staging/zcache/tmem.c
@@ -35,7 +35,8 @@
35#include <linux/list.h> 35#include <linux/list.h>
36#include <linux/spinlock.h> 36#include <linux/spinlock.h>
37#include <linux/atomic.h> 37#include <linux/atomic.h>
38#ifdef CONFIG_RAMSTER 38#include <linux/export.h>
39#if defined(CONFIG_RAMSTER) || defined(CONFIG_RAMSTER_MODULE)
39#include <linux/delay.h> 40#include <linux/delay.h>
40#endif 41#endif
41 42
@@ -641,6 +642,7 @@ void *tmem_localify_get_pampd(struct tmem_pool *pool, struct tmem_oid *oidp,
641 /* note, hashbucket remains locked */ 642 /* note, hashbucket remains locked */
642 return pampd; 643 return pampd;
643} 644}
645EXPORT_SYMBOL_GPL(tmem_localify_get_pampd);
644 646
645void tmem_localify_finish(struct tmem_obj *obj, uint32_t index, 647void tmem_localify_finish(struct tmem_obj *obj, uint32_t index,
646 void *pampd, void *saved_hb, bool delete) 648 void *pampd, void *saved_hb, bool delete)
@@ -658,6 +660,7 @@ void tmem_localify_finish(struct tmem_obj *obj, uint32_t index,
658 } 660 }
659 spin_unlock(&hb->lock); 661 spin_unlock(&hb->lock);
660} 662}
663EXPORT_SYMBOL_GPL(tmem_localify_finish);
661 664
662/* 665/*
663 * For ramster only. Helper function to support asynchronous tmem_get. 666 * For ramster only. Helper function to support asynchronous tmem_get.
@@ -719,6 +722,7 @@ out:
719 spin_unlock(&hb->lock); 722 spin_unlock(&hb->lock);
720 return ret; 723 return ret;
721} 724}
725EXPORT_SYMBOL_GPL(tmem_replace);
722#endif 726#endif
723 727
724/* 728/*
diff --git a/drivers/staging/zcache/tmem.h b/drivers/staging/zcache/tmem.h
index adbe5a8f28aa..d128ce290f1f 100644
--- a/drivers/staging/zcache/tmem.h
+++ b/drivers/staging/zcache/tmem.h
@@ -126,7 +126,7 @@ static inline unsigned tmem_oid_hash(struct tmem_oid *oidp)
126 TMEM_HASH_BUCKET_BITS); 126 TMEM_HASH_BUCKET_BITS);
127} 127}
128 128
129#ifdef CONFIG_RAMSTER 129#if defined(CONFIG_RAMSTER) || defined(CONFIG_RAMSTER_MODULE)
130struct tmem_xhandle { 130struct tmem_xhandle {
131 uint8_t client_id; 131 uint8_t client_id;
132 uint8_t xh_data_cksum; 132 uint8_t xh_data_cksum;
@@ -171,7 +171,7 @@ struct tmem_obj {
171 unsigned int objnode_tree_height; 171 unsigned int objnode_tree_height;
172 unsigned long objnode_count; 172 unsigned long objnode_count;
173 long pampd_count; 173 long pampd_count;
174#ifdef CONFIG_RAMSTER 174#if defined(CONFIG_RAMSTER) || defined(CONFIG_RAMSTER_MODULE)
175 /* 175 /*
176 * for current design of ramster, all pages belonging to 176 * for current design of ramster, all pages belonging to
177 * an object reside on the same remotenode and extra is 177 * an object reside on the same remotenode and extra is
@@ -215,7 +215,7 @@ struct tmem_pamops {
215 uint32_t); 215 uint32_t);
216 void (*free)(void *, struct tmem_pool *, 216 void (*free)(void *, struct tmem_pool *,
217 struct tmem_oid *, uint32_t, bool); 217 struct tmem_oid *, uint32_t, bool);
218#ifdef CONFIG_RAMSTER 218#if defined(CONFIG_RAMSTER) || defined(CONFIG_RAMSTER_MODULE)
219 void (*new_obj)(struct tmem_obj *); 219 void (*new_obj)(struct tmem_obj *);
220 void (*free_obj)(struct tmem_pool *, struct tmem_obj *, bool); 220 void (*free_obj)(struct tmem_pool *, struct tmem_obj *, bool);
221 void *(*repatriate_preload)(void *, struct tmem_pool *, 221 void *(*repatriate_preload)(void *, struct tmem_pool *,
@@ -247,7 +247,7 @@ extern int tmem_flush_page(struct tmem_pool *, struct tmem_oid *,
247extern int tmem_flush_object(struct tmem_pool *, struct tmem_oid *); 247extern int tmem_flush_object(struct tmem_pool *, struct tmem_oid *);
248extern int tmem_destroy_pool(struct tmem_pool *); 248extern int tmem_destroy_pool(struct tmem_pool *);
249extern void tmem_new_pool(struct tmem_pool *, uint32_t); 249extern void tmem_new_pool(struct tmem_pool *, uint32_t);
250#ifdef CONFIG_RAMSTER 250#if defined(CONFIG_RAMSTER) || defined(CONFIG_RAMSTER_MODULE)
251extern int tmem_replace(struct tmem_pool *, struct tmem_oid *, uint32_t index, 251extern int tmem_replace(struct tmem_pool *, struct tmem_oid *, uint32_t index,
252 void *); 252 void *);
253extern void *tmem_localify_get_pampd(struct tmem_pool *, struct tmem_oid *, 253extern void *tmem_localify_get_pampd(struct tmem_pool *, struct tmem_oid *,
diff --git a/drivers/staging/zcache/zcache-main.c b/drivers/staging/zcache/zcache-main.c
index e23d814b5392..522cb8e55142 100644
--- a/drivers/staging/zcache/zcache-main.c
+++ b/drivers/staging/zcache/zcache-main.c
@@ -37,8 +37,10 @@
37#include "debug.h" 37#include "debug.h"
38#ifdef CONFIG_RAMSTER 38#ifdef CONFIG_RAMSTER
39static bool ramster_enabled __read_mostly; 39static bool ramster_enabled __read_mostly;
40static int disable_frontswap_selfshrink;
40#else 41#else
41#define ramster_enabled false 42#define ramster_enabled false
43#define disable_frontswap_selfshrink 0
42#endif 44#endif
43 45
44#ifndef __PG_WAS_ACTIVE 46#ifndef __PG_WAS_ACTIVE
@@ -81,8 +83,12 @@ static char *namestr __read_mostly = "zcache";
81 (__GFP_FS | __GFP_NORETRY | __GFP_NOWARN | __GFP_NOMEMALLOC) 83 (__GFP_FS | __GFP_NORETRY | __GFP_NOWARN | __GFP_NOMEMALLOC)
82 84
83/* crypto API for zcache */ 85/* crypto API for zcache */
86#ifdef CONFIG_ZCACHE_MODULE
87static char *zcache_comp_name = "lzo";
88#else
84#define ZCACHE_COMP_NAME_SZ CRYPTO_MAX_ALG_NAME 89#define ZCACHE_COMP_NAME_SZ CRYPTO_MAX_ALG_NAME
85static char zcache_comp_name[ZCACHE_COMP_NAME_SZ] __read_mostly; 90static char zcache_comp_name[ZCACHE_COMP_NAME_SZ] __read_mostly;
91#endif
86static struct crypto_comp * __percpu *zcache_comp_pcpu_tfms __read_mostly; 92static struct crypto_comp * __percpu *zcache_comp_pcpu_tfms __read_mostly;
87 93
88enum comp_op { 94enum comp_op {
@@ -1576,9 +1582,9 @@ static struct cleancache_ops zcache_cleancache_ops = {
1576 .init_fs = zcache_cleancache_init_fs 1582 .init_fs = zcache_cleancache_init_fs
1577}; 1583};
1578 1584
1579struct cleancache_ops zcache_cleancache_register_ops(void) 1585struct cleancache_ops *zcache_cleancache_register_ops(void)
1580{ 1586{
1581 struct cleancache_ops old_ops = 1587 struct cleancache_ops *old_ops =
1582 cleancache_register_ops(&zcache_cleancache_ops); 1588 cleancache_register_ops(&zcache_cleancache_ops);
1583 1589
1584 return old_ops; 1590 return old_ops;
@@ -1707,9 +1713,9 @@ static struct frontswap_ops zcache_frontswap_ops = {
1707 .init = zcache_frontswap_init 1713 .init = zcache_frontswap_init
1708}; 1714};
1709 1715
1710struct frontswap_ops zcache_frontswap_register_ops(void) 1716struct frontswap_ops *zcache_frontswap_register_ops(void)
1711{ 1717{
1712 struct frontswap_ops old_ops = 1718 struct frontswap_ops *old_ops =
1713 frontswap_register_ops(&zcache_frontswap_ops); 1719 frontswap_register_ops(&zcache_frontswap_ops);
1714 1720
1715 return old_ops; 1721 return old_ops;
@@ -1721,6 +1727,7 @@ struct frontswap_ops zcache_frontswap_register_ops(void)
1721 * OR NOTHING HAPPENS! 1727 * OR NOTHING HAPPENS!
1722 */ 1728 */
1723 1729
1730#ifndef CONFIG_ZCACHE_MODULE
1724static int __init enable_zcache(char *s) 1731static int __init enable_zcache(char *s)
1725{ 1732{
1726 zcache_enabled = true; 1733 zcache_enabled = true;
@@ -1787,18 +1794,27 @@ static int __init enable_zcache_compressor(char *s)
1787 return 1; 1794 return 1;
1788} 1795}
1789__setup("zcache=", enable_zcache_compressor); 1796__setup("zcache=", enable_zcache_compressor);
1797#endif
1790 1798
1791 1799
1792static int __init zcache_comp_init(void) 1800static int zcache_comp_init(void)
1793{ 1801{
1794 int ret = 0; 1802 int ret = 0;
1795 1803
1796 /* check crypto algorithm */ 1804 /* check crypto algorithm */
1805#ifdef CONFIG_ZCACHE_MODULE
1806 ret = crypto_has_comp(zcache_comp_name, 0, 0);
1807 if (!ret) {
1808 ret = -1;
1809 goto out;
1810 }
1811#else
1797 if (*zcache_comp_name != '\0') { 1812 if (*zcache_comp_name != '\0') {
1798 ret = crypto_has_comp(zcache_comp_name, 0, 0); 1813 ret = crypto_has_comp(zcache_comp_name, 0, 0);
1799 if (!ret) 1814 if (!ret)
1800 pr_info("zcache: %s not supported\n", 1815 pr_info("zcache: %s not supported\n",
1801 zcache_comp_name); 1816 zcache_comp_name);
1817 goto out;
1802 } 1818 }
1803 if (!ret) 1819 if (!ret)
1804 strcpy(zcache_comp_name, "lzo"); 1820 strcpy(zcache_comp_name, "lzo");
@@ -1807,6 +1823,7 @@ static int __init zcache_comp_init(void)
1807 ret = 1; 1823 ret = 1;
1808 goto out; 1824 goto out;
1809 } 1825 }
1826#endif
1810 pr_info("zcache: using %s compressor\n", zcache_comp_name); 1827 pr_info("zcache: using %s compressor\n", zcache_comp_name);
1811 1828
1812 /* alloc percpu transforms */ 1829 /* alloc percpu transforms */
@@ -1818,10 +1835,13 @@ out:
1818 return ret; 1835 return ret;
1819} 1836}
1820 1837
1821static int __init zcache_init(void) 1838static int zcache_init(void)
1822{ 1839{
1823 int ret = 0; 1840 int ret = 0;
1824 1841
1842#ifdef CONFIG_ZCACHE_MODULE
1843 zcache_enabled = 1;
1844#endif
1825 if (ramster_enabled) { 1845 if (ramster_enabled) {
1826 namestr = "ramster"; 1846 namestr = "ramster";
1827 ramster_register_pamops(&zcache_pamops); 1847 ramster_register_pamops(&zcache_pamops);
@@ -1860,7 +1880,7 @@ static int __init zcache_init(void)
1860 } 1880 }
1861 zbud_init(); 1881 zbud_init();
1862 if (zcache_enabled && !disable_cleancache) { 1882 if (zcache_enabled && !disable_cleancache) {
1863 struct cleancache_ops old_ops; 1883 struct cleancache_ops *old_ops;
1864 1884
1865 register_shrinker(&zcache_shrinker); 1885 register_shrinker(&zcache_shrinker);
1866 old_ops = zcache_cleancache_register_ops(); 1886 old_ops = zcache_cleancache_register_ops();
@@ -1870,11 +1890,11 @@ static int __init zcache_init(void)
1870 pr_info("%s: cleancache: ignorenonactive = %d\n", 1890 pr_info("%s: cleancache: ignorenonactive = %d\n",
1871 namestr, !disable_cleancache_ignore_nonactive); 1891 namestr, !disable_cleancache_ignore_nonactive);
1872#endif 1892#endif
1873 if (old_ops.init_fs != NULL) 1893 if (old_ops != NULL)
1874 pr_warn("%s: cleancache_ops overridden\n", namestr); 1894 pr_warn("%s: cleancache_ops overridden\n", namestr);
1875 } 1895 }
1876 if (zcache_enabled && !disable_frontswap) { 1896 if (zcache_enabled && !disable_frontswap) {
1877 struct frontswap_ops old_ops; 1897 struct frontswap_ops *old_ops;
1878 1898
1879 old_ops = zcache_frontswap_register_ops(); 1899 old_ops = zcache_frontswap_register_ops();
1880 if (frontswap_has_exclusive_gets) 1900 if (frontswap_has_exclusive_gets)
@@ -1886,14 +1906,36 @@ static int __init zcache_init(void)
1886 namestr, frontswap_has_exclusive_gets, 1906 namestr, frontswap_has_exclusive_gets,
1887 !disable_frontswap_ignore_nonactive); 1907 !disable_frontswap_ignore_nonactive);
1888#endif 1908#endif
1889 if (old_ops.init != NULL) 1909 if (IS_ERR(old_ops) || old_ops) {
1910 if (IS_ERR(old_ops))
1911 return PTR_RET(old_ops);
1890 pr_warn("%s: frontswap_ops overridden\n", namestr); 1912 pr_warn("%s: frontswap_ops overridden\n", namestr);
1913 }
1891 } 1914 }
1892 if (ramster_enabled) 1915 if (ramster_enabled)
1893 ramster_init(!disable_cleancache, !disable_frontswap, 1916 ramster_init(!disable_cleancache, !disable_frontswap,
1894 frontswap_has_exclusive_gets); 1917 frontswap_has_exclusive_gets,
1918 !disable_frontswap_selfshrink);
1895out: 1919out:
1896 return ret; 1920 return ret;
1897} 1921}
1898 1922
1923#ifdef CONFIG_ZCACHE_MODULE
1924#ifdef CONFIG_RAMSTER
1925module_param(ramster_enabled, int, S_IRUGO);
1926module_param(disable_frontswap_selfshrink, int, S_IRUGO);
1927#endif
1928module_param(disable_cleancache, int, S_IRUGO);
1929module_param(disable_frontswap, int, S_IRUGO);
1930#ifdef FRONTSWAP_HAS_EXCLUSIVE_GETS
1931module_param(frontswap_has_exclusive_gets, bool, S_IRUGO);
1932#endif
1933module_param(disable_frontswap_ignore_nonactive, int, S_IRUGO);
1934module_param(zcache_comp_name, charp, S_IRUGO);
1935module_init(zcache_init);
1936MODULE_LICENSE("GPL");
1937MODULE_AUTHOR("Dan Magenheimer <dan.magenheimer@oracle.com>");
1938MODULE_DESCRIPTION("In-kernel compression of cleancache/frontswap pages");
1939#else
1899late_initcall(zcache_init); 1940late_initcall(zcache_init);
1941#endif
diff --git a/drivers/staging/zcache/zcache.h b/drivers/staging/zcache/zcache.h
index 81722b33b087..849120095e79 100644
--- a/drivers/staging/zcache/zcache.h
+++ b/drivers/staging/zcache/zcache.h
@@ -39,7 +39,7 @@ extern int zcache_flush_page(int, int, struct tmem_oid *, uint32_t);
39extern int zcache_flush_object(int, int, struct tmem_oid *); 39extern int zcache_flush_object(int, int, struct tmem_oid *);
40extern void zcache_decompress_to_page(char *, unsigned int, struct page *); 40extern void zcache_decompress_to_page(char *, unsigned int, struct page *);
41 41
42#ifdef CONFIG_RAMSTER 42#if defined(CONFIG_RAMSTER) || defined(CONFIG_RAMSTER_MODULE)
43extern void *zcache_pampd_create(char *, unsigned int, bool, int, 43extern void *zcache_pampd_create(char *, unsigned int, bool, int,
44 struct tmem_handle *); 44 struct tmem_handle *);
45int zcache_autocreate_pool(unsigned int cli_id, unsigned int pool_id, bool eph); 45int zcache_autocreate_pool(unsigned int cli_id, unsigned int pool_id, bool eph);
diff --git a/drivers/usb/storage/realtek_cr.c b/drivers/usb/storage/realtek_cr.c
index 4797228747fb..8623577bbbe7 100644
--- a/drivers/usb/storage/realtek_cr.c
+++ b/drivers/usb/storage/realtek_cr.c
@@ -933,14 +933,11 @@ static int realtek_cr_autosuspend_setup(struct us_data *us)
933 933
934static void realtek_cr_destructor(void *extra) 934static void realtek_cr_destructor(void *extra)
935{ 935{
936 struct rts51x_chip *chip = (struct rts51x_chip *)extra; 936 struct rts51x_chip *chip = extra;
937 struct us_data *us;
938 937
939 if (!chip) 938 if (!chip)
940 return; 939 return;
941 940
942 us = chip->us;
943
944#ifdef CONFIG_REALTEK_AUTOPM 941#ifdef CONFIG_REALTEK_AUTOPM
945 if (ss_en) { 942 if (ss_en) {
946 del_timer(&chip->rts51x_suspend_timer); 943 del_timer(&chip->rts51x_suspend_timer);
diff --git a/drivers/w1/slaves/w1_bq27000.c b/drivers/w1/slaves/w1_bq27000.c
index 773dca5beafe..afbefed5f2c9 100644
--- a/drivers/w1/slaves/w1_bq27000.c
+++ b/drivers/w1/slaves/w1_bq27000.c
@@ -57,6 +57,8 @@ static int w1_bq27000_add_slave(struct w1_slave *sl)
57 ret = platform_device_add_data(pdev, 57 ret = platform_device_add_data(pdev,
58 &bq27000_battery_info, 58 &bq27000_battery_info,
59 sizeof(bq27000_battery_info)); 59 sizeof(bq27000_battery_info));
60 if (ret)
61 goto pdev_add_failed;
60 pdev->dev.parent = &sl->dev; 62 pdev->dev.parent = &sl->dev;
61 63
62 ret = platform_device_add(pdev); 64 ret = platform_device_add(pdev);
@@ -68,7 +70,7 @@ static int w1_bq27000_add_slave(struct w1_slave *sl)
68 goto success; 70 goto success;
69 71
70pdev_add_failed: 72pdev_add_failed:
71 platform_device_unregister(pdev); 73 platform_device_put(pdev);
72success: 74success:
73 return ret; 75 return ret;
74} 76}
diff --git a/drivers/w1/slaves/w1_ds2760.c b/drivers/w1/slaves/w1_ds2760.c
index aa7bd5fa2fa8..e86a69dc411e 100644
--- a/drivers/w1/slaves/w1_ds2760.c
+++ b/drivers/w1/slaves/w1_ds2760.c
@@ -148,8 +148,9 @@ static int w1_ds2760_add_slave(struct w1_slave *sl)
148 goto success; 148 goto success;
149 149
150bin_attr_failed: 150bin_attr_failed:
151 platform_device_del(pdev);
151pdev_add_failed: 152pdev_add_failed:
152 platform_device_unregister(pdev); 153 platform_device_put(pdev);
153pdev_alloc_failed: 154pdev_alloc_failed:
154 ida_simple_remove(&bat_ida, id); 155 ida_simple_remove(&bat_ida, id);
155noid: 156noid:
diff --git a/drivers/w1/slaves/w1_ds2780.c b/drivers/w1/slaves/w1_ds2780.c
index 7b09307de0ef..98ed9c49cf50 100644
--- a/drivers/w1/slaves/w1_ds2780.c
+++ b/drivers/w1/slaves/w1_ds2780.c
@@ -141,8 +141,9 @@ static int w1_ds2780_add_slave(struct w1_slave *sl)
141 return 0; 141 return 0;
142 142
143bin_attr_failed: 143bin_attr_failed:
144 platform_device_del(pdev);
144pdev_add_failed: 145pdev_add_failed:
145 platform_device_unregister(pdev); 146 platform_device_put(pdev);
146pdev_alloc_failed: 147pdev_alloc_failed:
147 ida_simple_remove(&bat_ida, id); 148 ida_simple_remove(&bat_ida, id);
148noid: 149noid:
diff --git a/drivers/w1/slaves/w1_ds2781.c b/drivers/w1/slaves/w1_ds2781.c
index 877daf74159c..5140d7be67ab 100644
--- a/drivers/w1/slaves/w1_ds2781.c
+++ b/drivers/w1/slaves/w1_ds2781.c
@@ -139,8 +139,9 @@ static int w1_ds2781_add_slave(struct w1_slave *sl)
139 return 0; 139 return 0;
140 140
141bin_attr_failed: 141bin_attr_failed:
142 platform_device_del(pdev);
142pdev_add_failed: 143pdev_add_failed:
143 platform_device_unregister(pdev); 144 platform_device_put(pdev);
144pdev_alloc_failed: 145pdev_alloc_failed:
145 ida_simple_remove(&bat_ida, id); 146 ida_simple_remove(&bat_ida, id);
146noid: 147noid:
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index 67af155cf602..dd4d9cb86243 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -145,9 +145,9 @@ config SWIOTLB_XEN
145 select SWIOTLB 145 select SWIOTLB
146 146
147config XEN_TMEM 147config XEN_TMEM
148 bool 148 tristate
149 depends on !ARM 149 depends on !ARM
150 default y if (CLEANCACHE || FRONTSWAP) 150 default m if (CLEANCACHE || FRONTSWAP)
151 help 151 help
152 Shim to interface in-kernel Transcendent Memory hooks 152 Shim to interface in-kernel Transcendent Memory hooks
153 (e.g. cleancache and frontswap) to Xen tmem hypercalls. 153 (e.g. cleancache and frontswap) to Xen tmem hypercalls.
diff --git a/drivers/xen/tmem.c b/drivers/xen/tmem.c
index 3ee836d42581..e3600be4e7fa 100644
--- a/drivers/xen/tmem.c
+++ b/drivers/xen/tmem.c
@@ -5,6 +5,7 @@
5 * Author: Dan Magenheimer 5 * Author: Dan Magenheimer
6 */ 6 */
7 7
8#include <linux/module.h>
8#include <linux/kernel.h> 9#include <linux/kernel.h>
9#include <linux/types.h> 10#include <linux/types.h>
10#include <linux/init.h> 11#include <linux/init.h>
@@ -128,6 +129,7 @@ static int xen_tmem_flush_object(u32 pool_id, struct tmem_oid oid)
128 return xen_tmem_op(TMEM_FLUSH_OBJECT, pool_id, oid, 0, 0, 0, 0, 0); 129 return xen_tmem_op(TMEM_FLUSH_OBJECT, pool_id, oid, 0, 0, 0, 0, 0);
129} 130}
130 131
132#ifndef CONFIG_XEN_TMEM_MODULE
131bool __read_mostly tmem_enabled = false; 133bool __read_mostly tmem_enabled = false;
132 134
133static int __init enable_tmem(char *s) 135static int __init enable_tmem(char *s)
@@ -136,6 +138,7 @@ static int __init enable_tmem(char *s)
136 return 1; 138 return 1;
137} 139}
138__setup("tmem", enable_tmem); 140__setup("tmem", enable_tmem);
141#endif
139 142
140#ifdef CONFIG_CLEANCACHE 143#ifdef CONFIG_CLEANCACHE
141static int xen_tmem_destroy_pool(u32 pool_id) 144static int xen_tmem_destroy_pool(u32 pool_id)
@@ -227,16 +230,21 @@ static int tmem_cleancache_init_shared_fs(char *uuid, size_t pagesize)
227 return xen_tmem_new_pool(shared_uuid, TMEM_POOL_SHARED, pagesize); 230 return xen_tmem_new_pool(shared_uuid, TMEM_POOL_SHARED, pagesize);
228} 231}
229 232
230static bool __initdata use_cleancache = true; 233static bool disable_cleancache __read_mostly;
231 234static bool disable_selfballooning __read_mostly;
235#ifdef CONFIG_XEN_TMEM_MODULE
236module_param(disable_cleancache, bool, S_IRUGO);
237module_param(disable_selfballooning, bool, S_IRUGO);
238#else
232static int __init no_cleancache(char *s) 239static int __init no_cleancache(char *s)
233{ 240{
234 use_cleancache = false; 241 disable_cleancache = true;
235 return 1; 242 return 1;
236} 243}
237__setup("nocleancache", no_cleancache); 244__setup("nocleancache", no_cleancache);
245#endif
238 246
239static struct cleancache_ops __initdata tmem_cleancache_ops = { 247static struct cleancache_ops tmem_cleancache_ops = {
240 .put_page = tmem_cleancache_put_page, 248 .put_page = tmem_cleancache_put_page,
241 .get_page = tmem_cleancache_get_page, 249 .get_page = tmem_cleancache_get_page,
242 .invalidate_page = tmem_cleancache_flush_page, 250 .invalidate_page = tmem_cleancache_flush_page,
@@ -353,54 +361,71 @@ static void tmem_frontswap_init(unsigned ignored)
353 xen_tmem_new_pool(private, TMEM_POOL_PERSIST, PAGE_SIZE); 361 xen_tmem_new_pool(private, TMEM_POOL_PERSIST, PAGE_SIZE);
354} 362}
355 363
356static bool __initdata use_frontswap = true; 364static bool disable_frontswap __read_mostly;
357 365static bool disable_frontswap_selfshrinking __read_mostly;
366#ifdef CONFIG_XEN_TMEM_MODULE
367module_param(disable_frontswap, bool, S_IRUGO);
368module_param(disable_frontswap_selfshrinking, bool, S_IRUGO);
369#else
358static int __init no_frontswap(char *s) 370static int __init no_frontswap(char *s)
359{ 371{
360 use_frontswap = false; 372 disable_frontswap = true;
361 return 1; 373 return 1;
362} 374}
363__setup("nofrontswap", no_frontswap); 375__setup("nofrontswap", no_frontswap);
376#endif
364 377
365static struct frontswap_ops __initdata tmem_frontswap_ops = { 378static struct frontswap_ops tmem_frontswap_ops = {
366 .store = tmem_frontswap_store, 379 .store = tmem_frontswap_store,
367 .load = tmem_frontswap_load, 380 .load = tmem_frontswap_load,
368 .invalidate_page = tmem_frontswap_flush_page, 381 .invalidate_page = tmem_frontswap_flush_page,
369 .invalidate_area = tmem_frontswap_flush_area, 382 .invalidate_area = tmem_frontswap_flush_area,
370 .init = tmem_frontswap_init 383 .init = tmem_frontswap_init
371}; 384};
385#else /* CONFIG_FRONTSWAP */
386#define disable_frontswap_selfshrinking 1
372#endif 387#endif
373 388
374static int __init xen_tmem_init(void) 389static int xen_tmem_init(void)
375{ 390{
376 if (!xen_domain()) 391 if (!xen_domain())
377 return 0; 392 return 0;
378#ifdef CONFIG_FRONTSWAP 393#ifdef CONFIG_FRONTSWAP
379 if (tmem_enabled && use_frontswap) { 394 if (tmem_enabled && !disable_frontswap) {
380 char *s = ""; 395 char *s = "";
381 struct frontswap_ops old_ops = 396 struct frontswap_ops *old_ops =
382 frontswap_register_ops(&tmem_frontswap_ops); 397 frontswap_register_ops(&tmem_frontswap_ops);
383 398
384 tmem_frontswap_poolid = -1; 399 tmem_frontswap_poolid = -1;
385 if (old_ops.init != NULL) 400 if (IS_ERR(old_ops) || old_ops) {
401 if (IS_ERR(old_ops))
402 return PTR_ERR(old_ops);
386 s = " (WARNING: frontswap_ops overridden)"; 403 s = " (WARNING: frontswap_ops overridden)";
404 }
387 printk(KERN_INFO "frontswap enabled, RAM provided by " 405 printk(KERN_INFO "frontswap enabled, RAM provided by "
388 "Xen Transcendent Memory%s\n", s); 406 "Xen Transcendent Memory%s\n", s);
389 } 407 }
390#endif 408#endif
391#ifdef CONFIG_CLEANCACHE 409#ifdef CONFIG_CLEANCACHE
392 BUG_ON(sizeof(struct cleancache_filekey) != sizeof(struct tmem_oid)); 410 BUG_ON(sizeof(struct cleancache_filekey) != sizeof(struct tmem_oid));
393 if (tmem_enabled && use_cleancache) { 411 if (tmem_enabled && !disable_cleancache) {
394 char *s = ""; 412 char *s = "";
395 struct cleancache_ops old_ops = 413 struct cleancache_ops *old_ops =
396 cleancache_register_ops(&tmem_cleancache_ops); 414 cleancache_register_ops(&tmem_cleancache_ops);
397 if (old_ops.init_fs != NULL) 415 if (old_ops)
398 s = " (WARNING: cleancache_ops overridden)"; 416 s = " (WARNING: cleancache_ops overridden)";
399 printk(KERN_INFO "cleancache enabled, RAM provided by " 417 printk(KERN_INFO "cleancache enabled, RAM provided by "
400 "Xen Transcendent Memory%s\n", s); 418 "Xen Transcendent Memory%s\n", s);
401 } 419 }
402#endif 420#endif
421#ifdef CONFIG_XEN_SELFBALLOONING
422 xen_selfballoon_init(!disable_selfballooning,
423 !disable_frontswap_selfshrinking);
424#endif
403 return 0; 425 return 0;
404} 426}
405 427
406module_init(xen_tmem_init) 428module_init(xen_tmem_init)
429MODULE_LICENSE("GPL");
430MODULE_AUTHOR("Dan Magenheimer <dan.magenheimer@oracle.com>");
431MODULE_DESCRIPTION("Shim to Xen transcendent memory");
diff --git a/drivers/xen/xen-selfballoon.c b/drivers/xen/xen-selfballoon.c
index 2552d3e0a70f..f2ef569c7cc1 100644
--- a/drivers/xen/xen-selfballoon.c
+++ b/drivers/xen/xen-selfballoon.c
@@ -121,7 +121,7 @@ static DECLARE_DELAYED_WORK(selfballoon_worker, selfballoon_process);
121static bool frontswap_selfshrinking __read_mostly; 121static bool frontswap_selfshrinking __read_mostly;
122 122
123/* Enable/disable with kernel boot option. */ 123/* Enable/disable with kernel boot option. */
124static bool use_frontswap_selfshrink __initdata = true; 124static bool use_frontswap_selfshrink = true;
125 125
126/* 126/*
127 * The default values for the following parameters were deemed reasonable 127 * The default values for the following parameters were deemed reasonable
@@ -185,7 +185,7 @@ static int __init xen_nofrontswap_selfshrink_setup(char *s)
185__setup("noselfshrink", xen_nofrontswap_selfshrink_setup); 185__setup("noselfshrink", xen_nofrontswap_selfshrink_setup);
186 186
187/* Disable with kernel boot option. */ 187/* Disable with kernel boot option. */
188static bool use_selfballooning __initdata = true; 188static bool use_selfballooning = true;
189 189
190static int __init xen_noselfballooning_setup(char *s) 190static int __init xen_noselfballooning_setup(char *s)
191{ 191{
@@ -196,7 +196,7 @@ static int __init xen_noselfballooning_setup(char *s)
196__setup("noselfballooning", xen_noselfballooning_setup); 196__setup("noselfballooning", xen_noselfballooning_setup);
197#else /* !CONFIG_FRONTSWAP */ 197#else /* !CONFIG_FRONTSWAP */
198/* Enable with kernel boot option. */ 198/* Enable with kernel boot option. */
199static bool use_selfballooning __initdata = false; 199static bool use_selfballooning;
200 200
201static int __init xen_selfballooning_setup(char *s) 201static int __init xen_selfballooning_setup(char *s)
202{ 202{
@@ -537,7 +537,7 @@ int register_xen_selfballooning(struct device *dev)
537} 537}
538EXPORT_SYMBOL(register_xen_selfballooning); 538EXPORT_SYMBOL(register_xen_selfballooning);
539 539
540static int __init xen_selfballoon_init(void) 540int xen_selfballoon_init(bool use_selfballooning, bool use_frontswap_selfshrink)
541{ 541{
542 bool enable = false; 542 bool enable = false;
543 543
@@ -571,7 +571,4 @@ static int __init xen_selfballoon_init(void)
571 571
572 return 0; 572 return 0;
573} 573}
574 574EXPORT_SYMBOL(xen_selfballoon_init);
575subsys_initcall(xen_selfballoon_init);
576
577MODULE_LICENSE("GPL");
diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
index 0efd1524b977..370b24cee4d8 100644
--- a/fs/Kconfig.binfmt
+++ b/fs/Kconfig.binfmt
@@ -65,6 +65,20 @@ config CORE_DUMP_DEFAULT_ELF_HEADERS
65 This config option changes the default setting of coredump_filter 65 This config option changes the default setting of coredump_filter
66 seen at boot time. If unsure, say Y. 66 seen at boot time. If unsure, say Y.
67 67
68config BINFMT_SCRIPT
69 tristate "Kernel support for scripts starting with #!"
70 default y
71 help
72 Say Y here if you want to execute interpreted scripts starting with
73 #! followed by the path to an interpreter.
74
75 You can build this support as a module; however, until that module
76 gets loaded, you cannot run scripts. Thus, if you want to load this
77 module from an initramfs, the portion of the initramfs before loading
78 this module must consist of compiled binaries only.
79
80 Most systems will not boot if you say M or N here. If unsure, say Y.
81
68config BINFMT_FLAT 82config BINFMT_FLAT
69 bool "Kernel support for flat binaries" 83 bool "Kernel support for flat binaries"
70 depends on !MMU && (!FRV || BROKEN) 84 depends on !MMU && (!FRV || BROKEN)
diff --git a/fs/Makefile b/fs/Makefile
index 3b2c76759ec9..5e67e57b59dc 100644
--- a/fs/Makefile
+++ b/fs/Makefile
@@ -34,10 +34,7 @@ obj-$(CONFIG_COMPAT) += compat.o compat_ioctl.o
34obj-$(CONFIG_BINFMT_AOUT) += binfmt_aout.o 34obj-$(CONFIG_BINFMT_AOUT) += binfmt_aout.o
35obj-$(CONFIG_BINFMT_EM86) += binfmt_em86.o 35obj-$(CONFIG_BINFMT_EM86) += binfmt_em86.o
36obj-$(CONFIG_BINFMT_MISC) += binfmt_misc.o 36obj-$(CONFIG_BINFMT_MISC) += binfmt_misc.o
37 37obj-$(CONFIG_BINFMT_SCRIPT) += binfmt_script.o
38# binfmt_script is always there
39obj-y += binfmt_script.o
40
41obj-$(CONFIG_BINFMT_ELF) += binfmt_elf.o 38obj-$(CONFIG_BINFMT_ELF) += binfmt_elf.o
42obj-$(CONFIG_COMPAT_BINFMT_ELF) += compat_binfmt_elf.o 39obj-$(CONFIG_COMPAT_BINFMT_ELF) += compat_binfmt_elf.o
43obj-$(CONFIG_BINFMT_ELF_FDPIC) += binfmt_elf_fdpic.o 40obj-$(CONFIG_BINFMT_ELF_FDPIC) += binfmt_elf_fdpic.o
diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
index bbc8f8827eac..02fe378fc506 100644
--- a/fs/binfmt_aout.c
+++ b/fs/binfmt_aout.c
@@ -62,7 +62,6 @@ static int aout_core_dump(struct coredump_params *cprm)
62 fs = get_fs(); 62 fs = get_fs();
63 set_fs(KERNEL_DS); 63 set_fs(KERNEL_DS);
64 has_dumped = 1; 64 has_dumped = 1;
65 current->flags |= PF_DUMPCORE;
66 strncpy(dump.u_comm, current->comm, sizeof(dump.u_comm)); 65 strncpy(dump.u_comm, current->comm, sizeof(dump.u_comm));
67 dump.u_ar0 = offsetof(struct user, regs); 66 dump.u_ar0 = offsetof(struct user, regs);
68 dump.signal = cprm->siginfo->si_signo; 67 dump.signal = cprm->siginfo->si_signo;
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 86af964c2425..34a9771eaa6c 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -803,7 +803,8 @@ static int load_elf_binary(struct linux_binprm *bprm)
803 * follow the loader, and is not movable. */ 803 * follow the loader, and is not movable. */
804#ifdef CONFIG_ARCH_BINFMT_ELF_RANDOMIZE_PIE 804#ifdef CONFIG_ARCH_BINFMT_ELF_RANDOMIZE_PIE
805 /* Memory randomization might have been switched off 805 /* Memory randomization might have been switched off
806 * in runtime via sysctl. 806 * in runtime via sysctl or explicit setting of
807 * personality flags.
807 * If that is the case, retain the original non-zero 808 * If that is the case, retain the original non-zero
808 * load_bias value in order to establish proper 809 * load_bias value in order to establish proper
809 * non-randomized mappings. 810 * non-randomized mappings.
@@ -2091,8 +2092,7 @@ static int elf_core_dump(struct coredump_params *cprm)
2091 goto cleanup; 2092 goto cleanup;
2092 2093
2093 has_dumped = 1; 2094 has_dumped = 1;
2094 current->flags |= PF_DUMPCORE; 2095
2095
2096 fs = get_fs(); 2096 fs = get_fs();
2097 set_fs(KERNEL_DS); 2097 set_fs(KERNEL_DS);
2098 2098
diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c
index 9c13e023e2b7..c1cc06aed601 100644
--- a/fs/binfmt_elf_fdpic.c
+++ b/fs/binfmt_elf_fdpic.c
@@ -1687,8 +1687,6 @@ static int elf_fdpic_core_dump(struct coredump_params *cprm)
1687 fill_elf_fdpic_header(elf, e_phnum); 1687 fill_elf_fdpic_header(elf, e_phnum);
1688 1688
1689 has_dumped = 1; 1689 has_dumped = 1;
1690 current->flags |= PF_DUMPCORE;
1691
1692 /* 1690 /*
1693 * Set up the notes in similar form to SVR4 core dumps made 1691 * Set up the notes in similar form to SVR4 core dumps made
1694 * with info from their /proc. 1692 * with info from their /proc.
diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c
index 751df5e4f61a..1c740e152f38 100644
--- a/fs/binfmt_misc.c
+++ b/fs/binfmt_misc.c
@@ -23,6 +23,7 @@
23#include <linux/binfmts.h> 23#include <linux/binfmts.h>
24#include <linux/slab.h> 24#include <linux/slab.h>
25#include <linux/ctype.h> 25#include <linux/ctype.h>
26#include <linux/string_helpers.h>
26#include <linux/file.h> 27#include <linux/file.h>
27#include <linux/pagemap.h> 28#include <linux/pagemap.h>
28#include <linux/namei.h> 29#include <linux/namei.h>
@@ -234,24 +235,6 @@ static char *scanarg(char *s, char del)
234 return s; 235 return s;
235} 236}
236 237
237static int unquote(char *from)
238{
239 char c = 0, *s = from, *p = from;
240
241 while ((c = *s++) != '\0') {
242 if (c == '\\' && *s == 'x') {
243 s++;
244 c = toupper(*s++);
245 *p = (c - (isdigit(c) ? '0' : 'A' - 10)) << 4;
246 c = toupper(*s++);
247 *p++ |= c - (isdigit(c) ? '0' : 'A' - 10);
248 continue;
249 }
250 *p++ = c;
251 }
252 return p - from;
253}
254
255static char * check_special_flags (char * sfs, Node * e) 238static char * check_special_flags (char * sfs, Node * e)
256{ 239{
257 char * p = sfs; 240 char * p = sfs;
@@ -354,8 +337,9 @@ static Node *create_entry(const char __user *buffer, size_t count)
354 p[-1] = '\0'; 337 p[-1] = '\0';
355 if (!e->mask[0]) 338 if (!e->mask[0])
356 e->mask = NULL; 339 e->mask = NULL;
357 e->size = unquote(e->magic); 340 e->size = string_unescape_inplace(e->magic, UNESCAPE_HEX);
358 if (e->mask && unquote(e->mask) != e->size) 341 if (e->mask &&
342 string_unescape_inplace(e->mask, UNESCAPE_HEX) != e->size)
359 goto Einval; 343 goto Einval;
360 if (e->size + e->offset > BINPRM_BUF_SIZE) 344 if (e->size + e->offset > BINPRM_BUF_SIZE)
361 goto Einval; 345 goto Einval;
diff --git a/fs/block_dev.c b/fs/block_dev.c
index aae187a7f94a..ce08de7467a3 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -617,11 +617,9 @@ void bd_forget(struct inode *inode)
617 struct block_device *bdev = NULL; 617 struct block_device *bdev = NULL;
618 618
619 spin_lock(&bdev_lock); 619 spin_lock(&bdev_lock);
620 if (inode->i_bdev) { 620 if (!sb_is_blkdev_sb(inode->i_sb))
621 if (!sb_is_blkdev_sb(inode->i_sb)) 621 bdev = inode->i_bdev;
622 bdev = inode->i_bdev; 622 __bd_forget(inode);
623 __bd_forget(inode);
624 }
625 spin_unlock(&bdev_lock); 623 spin_unlock(&bdev_lock);
626 624
627 if (bdev) 625 if (bdev)
diff --git a/fs/coredump.c b/fs/coredump.c
index c6479658d487..ec306cc9a28a 100644
--- a/fs/coredump.c
+++ b/fs/coredump.c
@@ -263,7 +263,6 @@ static int zap_process(struct task_struct *start, int exit_code)
263 struct task_struct *t; 263 struct task_struct *t;
264 int nr = 0; 264 int nr = 0;
265 265
266 start->signal->flags = SIGNAL_GROUP_EXIT;
267 start->signal->group_exit_code = exit_code; 266 start->signal->group_exit_code = exit_code;
268 start->signal->group_stop_count = 0; 267 start->signal->group_stop_count = 0;
269 268
@@ -280,8 +279,8 @@ static int zap_process(struct task_struct *start, int exit_code)
280 return nr; 279 return nr;
281} 280}
282 281
283static inline int zap_threads(struct task_struct *tsk, struct mm_struct *mm, 282static int zap_threads(struct task_struct *tsk, struct mm_struct *mm,
284 struct core_state *core_state, int exit_code) 283 struct core_state *core_state, int exit_code)
285{ 284{
286 struct task_struct *g, *p; 285 struct task_struct *g, *p;
287 unsigned long flags; 286 unsigned long flags;
@@ -291,11 +290,16 @@ static inline int zap_threads(struct task_struct *tsk, struct mm_struct *mm,
291 if (!signal_group_exit(tsk->signal)) { 290 if (!signal_group_exit(tsk->signal)) {
292 mm->core_state = core_state; 291 mm->core_state = core_state;
293 nr = zap_process(tsk, exit_code); 292 nr = zap_process(tsk, exit_code);
293 tsk->signal->group_exit_task = tsk;
294 /* ignore all signals except SIGKILL, see prepare_signal() */
295 tsk->signal->flags = SIGNAL_GROUP_COREDUMP;
296 clear_tsk_thread_flag(tsk, TIF_SIGPENDING);
294 } 297 }
295 spin_unlock_irq(&tsk->sighand->siglock); 298 spin_unlock_irq(&tsk->sighand->siglock);
296 if (unlikely(nr < 0)) 299 if (unlikely(nr < 0))
297 return nr; 300 return nr;
298 301
302 tsk->flags = PF_DUMPCORE;
299 if (atomic_read(&mm->mm_users) == nr + 1) 303 if (atomic_read(&mm->mm_users) == nr + 1)
300 goto done; 304 goto done;
301 /* 305 /*
@@ -340,6 +344,7 @@ static inline int zap_threads(struct task_struct *tsk, struct mm_struct *mm,
340 if (unlikely(p->mm == mm)) { 344 if (unlikely(p->mm == mm)) {
341 lock_task_sighand(p, &flags); 345 lock_task_sighand(p, &flags);
342 nr += zap_process(p, exit_code); 346 nr += zap_process(p, exit_code);
347 p->signal->flags = SIGNAL_GROUP_EXIT;
343 unlock_task_sighand(p, &flags); 348 unlock_task_sighand(p, &flags);
344 } 349 }
345 break; 350 break;
@@ -386,11 +391,18 @@ static int coredump_wait(int exit_code, struct core_state *core_state)
386 return core_waiters; 391 return core_waiters;
387} 392}
388 393
389static void coredump_finish(struct mm_struct *mm) 394static void coredump_finish(struct mm_struct *mm, bool core_dumped)
390{ 395{
391 struct core_thread *curr, *next; 396 struct core_thread *curr, *next;
392 struct task_struct *task; 397 struct task_struct *task;
393 398
399 spin_lock_irq(&current->sighand->siglock);
400 if (core_dumped && !__fatal_signal_pending(current))
401 current->signal->group_exit_code |= 0x80;
402 current->signal->group_exit_task = NULL;
403 current->signal->flags = SIGNAL_GROUP_EXIT;
404 spin_unlock_irq(&current->sighand->siglock);
405
394 next = mm->core_state->dumper.next; 406 next = mm->core_state->dumper.next;
395 while ((curr = next) != NULL) { 407 while ((curr = next) != NULL) {
396 next = curr->next; 408 next = curr->next;
@@ -407,6 +419,17 @@ static void coredump_finish(struct mm_struct *mm)
407 mm->core_state = NULL; 419 mm->core_state = NULL;
408} 420}
409 421
422static bool dump_interrupted(void)
423{
424 /*
425 * SIGKILL or freezing() interrupt the coredumping. Perhaps we
426 * can do try_to_freeze() and check __fatal_signal_pending(),
427 * but then we need to teach dump_write() to restart and clear
428 * TIF_SIGPENDING.
429 */
430 return signal_pending(current);
431}
432
410static void wait_for_dump_helpers(struct file *file) 433static void wait_for_dump_helpers(struct file *file)
411{ 434{
412 struct pipe_inode_info *pipe; 435 struct pipe_inode_info *pipe;
@@ -416,17 +439,20 @@ static void wait_for_dump_helpers(struct file *file)
416 pipe_lock(pipe); 439 pipe_lock(pipe);
417 pipe->readers++; 440 pipe->readers++;
418 pipe->writers--; 441 pipe->writers--;
442 wake_up_interruptible_sync(&pipe->wait);
443 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
444 pipe_unlock(pipe);
419 445
420 while ((pipe->readers > 1) && (!signal_pending(current))) { 446 /*
421 wake_up_interruptible_sync(&pipe->wait); 447 * We actually want wait_event_freezable() but then we need
422 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); 448 * to clear TIF_SIGPENDING and improve dump_interrupted().
423 pipe_wait(pipe); 449 */
424 } 450 wait_event_interruptible(pipe->wait, pipe->readers == 1);
425 451
452 pipe_lock(pipe);
426 pipe->readers--; 453 pipe->readers--;
427 pipe->writers++; 454 pipe->writers++;
428 pipe_unlock(pipe); 455 pipe_unlock(pipe);
429
430} 456}
431 457
432/* 458/*
@@ -471,6 +497,7 @@ void do_coredump(siginfo_t *siginfo)
471 int ispipe; 497 int ispipe;
472 struct files_struct *displaced; 498 struct files_struct *displaced;
473 bool need_nonrelative = false; 499 bool need_nonrelative = false;
500 bool core_dumped = false;
474 static atomic_t core_dump_count = ATOMIC_INIT(0); 501 static atomic_t core_dump_count = ATOMIC_INIT(0);
475 struct coredump_params cprm = { 502 struct coredump_params cprm = {
476 .siginfo = siginfo, 503 .siginfo = siginfo,
@@ -514,17 +541,12 @@ void do_coredump(siginfo_t *siginfo)
514 541
515 old_cred = override_creds(cred); 542 old_cred = override_creds(cred);
516 543
517 /*
518 * Clear any false indication of pending signals that might
519 * be seen by the filesystem code called to write the core file.
520 */
521 clear_thread_flag(TIF_SIGPENDING);
522
523 ispipe = format_corename(&cn, &cprm); 544 ispipe = format_corename(&cn, &cprm);
524 545
525 if (ispipe) { 546 if (ispipe) {
526 int dump_count; 547 int dump_count;
527 char **helper_argv; 548 char **helper_argv;
549 struct subprocess_info *sub_info;
528 550
529 if (ispipe < 0) { 551 if (ispipe < 0) {
530 printk(KERN_WARNING "format_corename failed\n"); 552 printk(KERN_WARNING "format_corename failed\n");
@@ -571,15 +593,20 @@ void do_coredump(siginfo_t *siginfo)
571 goto fail_dropcount; 593 goto fail_dropcount;
572 } 594 }
573 595
574 retval = call_usermodehelper_fns(helper_argv[0], helper_argv, 596 retval = -ENOMEM;
575 NULL, UMH_WAIT_EXEC, umh_pipe_setup, 597 sub_info = call_usermodehelper_setup(helper_argv[0],
576 NULL, &cprm); 598 helper_argv, NULL, GFP_KERNEL,
599 umh_pipe_setup, NULL, &cprm);
600 if (sub_info)
601 retval = call_usermodehelper_exec(sub_info,
602 UMH_WAIT_EXEC);
603
577 argv_free(helper_argv); 604 argv_free(helper_argv);
578 if (retval) { 605 if (retval) {
579 printk(KERN_INFO "Core dump to %s pipe failed\n", 606 printk(KERN_INFO "Core dump to %s pipe failed\n",
580 cn.corename); 607 cn.corename);
581 goto close_fail; 608 goto close_fail;
582 } 609 }
583 } else { 610 } else {
584 struct inode *inode; 611 struct inode *inode;
585 612
@@ -629,9 +656,7 @@ void do_coredump(siginfo_t *siginfo)
629 goto close_fail; 656 goto close_fail;
630 if (displaced) 657 if (displaced)
631 put_files_struct(displaced); 658 put_files_struct(displaced);
632 retval = binfmt->core_dump(&cprm); 659 core_dumped = !dump_interrupted() && binfmt->core_dump(&cprm);
633 if (retval)
634 current->signal->group_exit_code |= 0x80;
635 660
636 if (ispipe && core_pipe_limit) 661 if (ispipe && core_pipe_limit)
637 wait_for_dump_helpers(cprm.file); 662 wait_for_dump_helpers(cprm.file);
@@ -644,7 +669,7 @@ fail_dropcount:
644fail_unlock: 669fail_unlock:
645 kfree(cn.corename); 670 kfree(cn.corename);
646fail_corename: 671fail_corename:
647 coredump_finish(mm); 672 coredump_finish(mm, core_dumped);
648 revert_creds(old_cred); 673 revert_creds(old_cred);
649fail_creds: 674fail_creds:
650 put_cred(cred); 675 put_cred(cred);
@@ -659,7 +684,9 @@ fail:
659 */ 684 */
660int dump_write(struct file *file, const void *addr, int nr) 685int dump_write(struct file *file, const void *addr, int nr)
661{ 686{
662 return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, addr, nr, &file->f_pos) == nr; 687 return !dump_interrupted() &&
688 access_ok(VERIFY_READ, addr, nr) &&
689 file->f_op->write(file, addr, nr, &file->f_pos) == nr;
663} 690}
664EXPORT_SYMBOL(dump_write); 691EXPORT_SYMBOL(dump_write);
665 692
@@ -668,7 +695,8 @@ int dump_seek(struct file *file, loff_t off)
668 int ret = 1; 695 int ret = 1;
669 696
670 if (file->f_op->llseek && file->f_op->llseek != no_llseek) { 697 if (file->f_op->llseek && file->f_op->llseek != no_llseek) {
671 if (file->f_op->llseek(file, off, SEEK_CUR) < 0) 698 if (dump_interrupted() ||
699 file->f_op->llseek(file, off, SEEK_CUR) < 0)
672 return 0; 700 return 0;
673 } else { 701 } else {
674 char *buf = (char *)get_zeroed_page(GFP_KERNEL); 702 char *buf = (char *)get_zeroed_page(GFP_KERNEL);
diff --git a/fs/dcache.c b/fs/dcache.c
index e8bc3420d63e..e689268046c3 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -1230,8 +1230,10 @@ void shrink_dcache_parent(struct dentry * parent)
1230 LIST_HEAD(dispose); 1230 LIST_HEAD(dispose);
1231 int found; 1231 int found;
1232 1232
1233 while ((found = select_parent(parent, &dispose)) != 0) 1233 while ((found = select_parent(parent, &dispose)) != 0) {
1234 shrink_dentry_list(&dispose); 1234 shrink_dentry_list(&dispose);
1235 cond_resched();
1236 }
1235} 1237}
1236EXPORT_SYMBOL(shrink_dcache_parent); 1238EXPORT_SYMBOL(shrink_dcache_parent);
1237 1239
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 9fec1836057a..277cc38aeda5 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -104,7 +104,7 @@
104struct epoll_filefd { 104struct epoll_filefd {
105 struct file *file; 105 struct file *file;
106 int fd; 106 int fd;
107}; 107} __packed;
108 108
109/* 109/*
110 * Structure used to track possible nested calls, for too deep recursions 110 * Structure used to track possible nested calls, for too deep recursions
@@ -128,6 +128,8 @@ struct nested_calls {
128/* 128/*
129 * Each file descriptor added to the eventpoll interface will 129 * Each file descriptor added to the eventpoll interface will
130 * have an entry of this type linked to the "rbr" RB tree. 130 * have an entry of this type linked to the "rbr" RB tree.
131 * Avoid increasing the size of this struct, there can be many thousands
132 * of these on a server and we do not want this to take another cache line.
131 */ 133 */
132struct epitem { 134struct epitem {
133 /* RB tree node used to link this structure to the eventpoll RB tree */ 135 /* RB tree node used to link this structure to the eventpoll RB tree */
@@ -158,7 +160,7 @@ struct epitem {
158 struct list_head fllink; 160 struct list_head fllink;
159 161
160 /* wakeup_source used when EPOLLWAKEUP is set */ 162 /* wakeup_source used when EPOLLWAKEUP is set */
161 struct wakeup_source *ws; 163 struct wakeup_source __rcu *ws;
162 164
163 /* The structure that describe the interested events and the source fd */ 165 /* The structure that describe the interested events and the source fd */
164 struct epoll_event event; 166 struct epoll_event event;
@@ -536,6 +538,38 @@ static void ep_unregister_pollwait(struct eventpoll *ep, struct epitem *epi)
536 } 538 }
537} 539}
538 540
541/* call only when ep->mtx is held */
542static inline struct wakeup_source *ep_wakeup_source(struct epitem *epi)
543{
544 return rcu_dereference_check(epi->ws, lockdep_is_held(&epi->ep->mtx));
545}
546
547/* call only when ep->mtx is held */
548static inline void ep_pm_stay_awake(struct epitem *epi)
549{
550 struct wakeup_source *ws = ep_wakeup_source(epi);
551
552 if (ws)
553 __pm_stay_awake(ws);
554}
555
556static inline bool ep_has_wakeup_source(struct epitem *epi)
557{
558 return rcu_access_pointer(epi->ws) ? true : false;
559}
560
561/* call when ep->mtx cannot be held (ep_poll_callback) */
562static inline void ep_pm_stay_awake_rcu(struct epitem *epi)
563{
564 struct wakeup_source *ws;
565
566 rcu_read_lock();
567 ws = rcu_dereference(epi->ws);
568 if (ws)
569 __pm_stay_awake(ws);
570 rcu_read_unlock();
571}
572
539/** 573/**
540 * ep_scan_ready_list - Scans the ready list in a way that makes possible for 574 * ep_scan_ready_list - Scans the ready list in a way that makes possible for
541 * the scan code, to call f_op->poll(). Also allows for 575 * the scan code, to call f_op->poll(). Also allows for
@@ -599,7 +633,7 @@ static int ep_scan_ready_list(struct eventpoll *ep,
599 */ 633 */
600 if (!ep_is_linked(&epi->rdllink)) { 634 if (!ep_is_linked(&epi->rdllink)) {
601 list_add_tail(&epi->rdllink, &ep->rdllist); 635 list_add_tail(&epi->rdllink, &ep->rdllist);
602 __pm_stay_awake(epi->ws); 636 ep_pm_stay_awake(epi);
603 } 637 }
604 } 638 }
605 /* 639 /*
@@ -668,7 +702,7 @@ static int ep_remove(struct eventpoll *ep, struct epitem *epi)
668 list_del_init(&epi->rdllink); 702 list_del_init(&epi->rdllink);
669 spin_unlock_irqrestore(&ep->lock, flags); 703 spin_unlock_irqrestore(&ep->lock, flags);
670 704
671 wakeup_source_unregister(epi->ws); 705 wakeup_source_unregister(ep_wakeup_source(epi));
672 706
673 /* At this point it is safe to free the eventpoll item */ 707 /* At this point it is safe to free the eventpoll item */
674 kmem_cache_free(epi_cache, epi); 708 kmem_cache_free(epi_cache, epi);
@@ -711,11 +745,15 @@ static void ep_free(struct eventpoll *ep)
711 * point we are sure no poll callbacks will be lingering around, and also by 745 * point we are sure no poll callbacks will be lingering around, and also by
712 * holding "epmutex" we can be sure that no file cleanup code will hit 746 * holding "epmutex" we can be sure that no file cleanup code will hit
713 * us during this operation. So we can avoid the lock on "ep->lock". 747 * us during this operation. So we can avoid the lock on "ep->lock".
748 * We do not need to lock ep->mtx, either, we only do it to prevent
749 * a lockdep warning.
714 */ 750 */
751 mutex_lock(&ep->mtx);
715 while ((rbp = rb_first(&ep->rbr)) != NULL) { 752 while ((rbp = rb_first(&ep->rbr)) != NULL) {
716 epi = rb_entry(rbp, struct epitem, rbn); 753 epi = rb_entry(rbp, struct epitem, rbn);
717 ep_remove(ep, epi); 754 ep_remove(ep, epi);
718 } 755 }
756 mutex_unlock(&ep->mtx);
719 757
720 mutex_unlock(&epmutex); 758 mutex_unlock(&epmutex);
721 mutex_destroy(&ep->mtx); 759 mutex_destroy(&ep->mtx);
@@ -734,6 +772,13 @@ static int ep_eventpoll_release(struct inode *inode, struct file *file)
734 return 0; 772 return 0;
735} 773}
736 774
775static inline unsigned int ep_item_poll(struct epitem *epi, poll_table *pt)
776{
777 pt->_key = epi->event.events;
778
779 return epi->ffd.file->f_op->poll(epi->ffd.file, pt) & epi->event.events;
780}
781
737static int ep_read_events_proc(struct eventpoll *ep, struct list_head *head, 782static int ep_read_events_proc(struct eventpoll *ep, struct list_head *head,
738 void *priv) 783 void *priv)
739{ 784{
@@ -741,10 +786,9 @@ static int ep_read_events_proc(struct eventpoll *ep, struct list_head *head,
741 poll_table pt; 786 poll_table pt;
742 787
743 init_poll_funcptr(&pt, NULL); 788 init_poll_funcptr(&pt, NULL);
789
744 list_for_each_entry_safe(epi, tmp, head, rdllink) { 790 list_for_each_entry_safe(epi, tmp, head, rdllink) {
745 pt._key = epi->event.events; 791 if (ep_item_poll(epi, &pt))
746 if (epi->ffd.file->f_op->poll(epi->ffd.file, &pt) &
747 epi->event.events)
748 return POLLIN | POLLRDNORM; 792 return POLLIN | POLLRDNORM;
749 else { 793 else {
750 /* 794 /*
@@ -752,7 +796,7 @@ static int ep_read_events_proc(struct eventpoll *ep, struct list_head *head,
752 * callback, but it's not actually ready, as far as 796 * callback, but it's not actually ready, as far as
753 * caller requested events goes. We can remove it here. 797 * caller requested events goes. We can remove it here.
754 */ 798 */
755 __pm_relax(epi->ws); 799 __pm_relax(ep_wakeup_source(epi));
756 list_del_init(&epi->rdllink); 800 list_del_init(&epi->rdllink);
757 } 801 }
758 } 802 }
@@ -984,7 +1028,7 @@ static int ep_poll_callback(wait_queue_t *wait, unsigned mode, int sync, void *k
984 /* If this file is already in the ready list we exit soon */ 1028 /* If this file is already in the ready list we exit soon */
985 if (!ep_is_linked(&epi->rdllink)) { 1029 if (!ep_is_linked(&epi->rdllink)) {
986 list_add_tail(&epi->rdllink, &ep->rdllist); 1030 list_add_tail(&epi->rdllink, &ep->rdllist);
987 __pm_stay_awake(epi->ws); 1031 ep_pm_stay_awake_rcu(epi);
988 } 1032 }
989 1033
990 /* 1034 /*
@@ -1146,6 +1190,7 @@ static int reverse_path_check(void)
1146static int ep_create_wakeup_source(struct epitem *epi) 1190static int ep_create_wakeup_source(struct epitem *epi)
1147{ 1191{
1148 const char *name; 1192 const char *name;
1193 struct wakeup_source *ws;
1149 1194
1150 if (!epi->ep->ws) { 1195 if (!epi->ep->ws) {
1151 epi->ep->ws = wakeup_source_register("eventpoll"); 1196 epi->ep->ws = wakeup_source_register("eventpoll");
@@ -1154,17 +1199,29 @@ static int ep_create_wakeup_source(struct epitem *epi)
1154 } 1199 }
1155 1200
1156 name = epi->ffd.file->f_path.dentry->d_name.name; 1201 name = epi->ffd.file->f_path.dentry->d_name.name;
1157 epi->ws = wakeup_source_register(name); 1202 ws = wakeup_source_register(name);
1158 if (!epi->ws) 1203
1204 if (!ws)
1159 return -ENOMEM; 1205 return -ENOMEM;
1206 rcu_assign_pointer(epi->ws, ws);
1160 1207
1161 return 0; 1208 return 0;
1162} 1209}
1163 1210
1164static void ep_destroy_wakeup_source(struct epitem *epi) 1211/* rare code path, only used when EPOLL_CTL_MOD removes a wakeup source */
1212static noinline void ep_destroy_wakeup_source(struct epitem *epi)
1165{ 1213{
1166 wakeup_source_unregister(epi->ws); 1214 struct wakeup_source *ws = ep_wakeup_source(epi);
1167 epi->ws = NULL; 1215
1216 RCU_INIT_POINTER(epi->ws, NULL);
1217
1218 /*
1219 * wait for ep_pm_stay_awake_rcu to finish, synchronize_rcu is
1220 * used internally by wakeup_source_remove, too (called by
1221 * wakeup_source_unregister), so we cannot use call_rcu
1222 */
1223 synchronize_rcu();
1224 wakeup_source_unregister(ws);
1168} 1225}
1169 1226
1170/* 1227/*
@@ -1199,13 +1256,12 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event,
1199 if (error) 1256 if (error)
1200 goto error_create_wakeup_source; 1257 goto error_create_wakeup_source;
1201 } else { 1258 } else {
1202 epi->ws = NULL; 1259 RCU_INIT_POINTER(epi->ws, NULL);
1203 } 1260 }
1204 1261
1205 /* Initialize the poll table using the queue callback */ 1262 /* Initialize the poll table using the queue callback */
1206 epq.epi = epi; 1263 epq.epi = epi;
1207 init_poll_funcptr(&epq.pt, ep_ptable_queue_proc); 1264 init_poll_funcptr(&epq.pt, ep_ptable_queue_proc);
1208 epq.pt._key = event->events;
1209 1265
1210 /* 1266 /*
1211 * Attach the item to the poll hooks and get current event bits. 1267 * Attach the item to the poll hooks and get current event bits.
@@ -1214,7 +1270,7 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event,
1214 * this operation completes, the poll callback can start hitting 1270 * this operation completes, the poll callback can start hitting
1215 * the new item. 1271 * the new item.
1216 */ 1272 */
1217 revents = tfile->f_op->poll(tfile, &epq.pt); 1273 revents = ep_item_poll(epi, &epq.pt);
1218 1274
1219 /* 1275 /*
1220 * We have to check if something went wrong during the poll wait queue 1276 * We have to check if something went wrong during the poll wait queue
@@ -1247,7 +1303,7 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event,
1247 /* If the file is already "ready" we drop it inside the ready list */ 1303 /* If the file is already "ready" we drop it inside the ready list */
1248 if ((revents & event->events) && !ep_is_linked(&epi->rdllink)) { 1304 if ((revents & event->events) && !ep_is_linked(&epi->rdllink)) {
1249 list_add_tail(&epi->rdllink, &ep->rdllist); 1305 list_add_tail(&epi->rdllink, &ep->rdllist);
1250 __pm_stay_awake(epi->ws); 1306 ep_pm_stay_awake(epi);
1251 1307
1252 /* Notify waiting tasks that events are available */ 1308 /* Notify waiting tasks that events are available */
1253 if (waitqueue_active(&ep->wq)) 1309 if (waitqueue_active(&ep->wq))
@@ -1288,7 +1344,7 @@ error_unregister:
1288 list_del_init(&epi->rdllink); 1344 list_del_init(&epi->rdllink);
1289 spin_unlock_irqrestore(&ep->lock, flags); 1345 spin_unlock_irqrestore(&ep->lock, flags);
1290 1346
1291 wakeup_source_unregister(epi->ws); 1347 wakeup_source_unregister(ep_wakeup_source(epi));
1292 1348
1293error_create_wakeup_source: 1349error_create_wakeup_source:
1294 kmem_cache_free(epi_cache, epi); 1350 kmem_cache_free(epi_cache, epi);
@@ -1314,12 +1370,11 @@ static int ep_modify(struct eventpoll *ep, struct epitem *epi, struct epoll_even
1314 * f_op->poll() call and the new event set registering. 1370 * f_op->poll() call and the new event set registering.
1315 */ 1371 */
1316 epi->event.events = event->events; /* need barrier below */ 1372 epi->event.events = event->events; /* need barrier below */
1317 pt._key = event->events;
1318 epi->event.data = event->data; /* protected by mtx */ 1373 epi->event.data = event->data; /* protected by mtx */
1319 if (epi->event.events & EPOLLWAKEUP) { 1374 if (epi->event.events & EPOLLWAKEUP) {
1320 if (!epi->ws) 1375 if (!ep_has_wakeup_source(epi))
1321 ep_create_wakeup_source(epi); 1376 ep_create_wakeup_source(epi);
1322 } else if (epi->ws) { 1377 } else if (ep_has_wakeup_source(epi)) {
1323 ep_destroy_wakeup_source(epi); 1378 ep_destroy_wakeup_source(epi);
1324 } 1379 }
1325 1380
@@ -1347,7 +1402,7 @@ static int ep_modify(struct eventpoll *ep, struct epitem *epi, struct epoll_even
1347 * Get current event bits. We can safely use the file* here because 1402 * Get current event bits. We can safely use the file* here because
1348 * its usage count has been increased by the caller of this function. 1403 * its usage count has been increased by the caller of this function.
1349 */ 1404 */
1350 revents = epi->ffd.file->f_op->poll(epi->ffd.file, &pt); 1405 revents = ep_item_poll(epi, &pt);
1351 1406
1352 /* 1407 /*
1353 * If the item is "hot" and it is not registered inside the ready 1408 * If the item is "hot" and it is not registered inside the ready
@@ -1357,7 +1412,7 @@ static int ep_modify(struct eventpoll *ep, struct epitem *epi, struct epoll_even
1357 spin_lock_irq(&ep->lock); 1412 spin_lock_irq(&ep->lock);
1358 if (!ep_is_linked(&epi->rdllink)) { 1413 if (!ep_is_linked(&epi->rdllink)) {
1359 list_add_tail(&epi->rdllink, &ep->rdllist); 1414 list_add_tail(&epi->rdllink, &ep->rdllist);
1360 __pm_stay_awake(epi->ws); 1415 ep_pm_stay_awake(epi);
1361 1416
1362 /* Notify waiting tasks that events are available */ 1417 /* Notify waiting tasks that events are available */
1363 if (waitqueue_active(&ep->wq)) 1418 if (waitqueue_active(&ep->wq))
@@ -1383,6 +1438,7 @@ static int ep_send_events_proc(struct eventpoll *ep, struct list_head *head,
1383 unsigned int revents; 1438 unsigned int revents;
1384 struct epitem *epi; 1439 struct epitem *epi;
1385 struct epoll_event __user *uevent; 1440 struct epoll_event __user *uevent;
1441 struct wakeup_source *ws;
1386 poll_table pt; 1442 poll_table pt;
1387 1443
1388 init_poll_funcptr(&pt, NULL); 1444 init_poll_funcptr(&pt, NULL);
@@ -1405,14 +1461,16 @@ static int ep_send_events_proc(struct eventpoll *ep, struct list_head *head,
1405 * instead, but then epi->ws would temporarily be out of sync 1461 * instead, but then epi->ws would temporarily be out of sync
1406 * with ep_is_linked(). 1462 * with ep_is_linked().
1407 */ 1463 */
1408 if (epi->ws && epi->ws->active) 1464 ws = ep_wakeup_source(epi);
1409 __pm_stay_awake(ep->ws); 1465 if (ws) {
1410 __pm_relax(epi->ws); 1466 if (ws->active)
1467 __pm_stay_awake(ep->ws);
1468 __pm_relax(ws);
1469 }
1470
1411 list_del_init(&epi->rdllink); 1471 list_del_init(&epi->rdllink);
1412 1472
1413 pt._key = epi->event.events; 1473 revents = ep_item_poll(epi, &pt);
1414 revents = epi->ffd.file->f_op->poll(epi->ffd.file, &pt) &
1415 epi->event.events;
1416 1474
1417 /* 1475 /*
1418 * If the event mask intersect the caller-requested one, 1476 * If the event mask intersect the caller-requested one,
@@ -1424,7 +1482,7 @@ static int ep_send_events_proc(struct eventpoll *ep, struct list_head *head,
1424 if (__put_user(revents, &uevent->events) || 1482 if (__put_user(revents, &uevent->events) ||
1425 __put_user(epi->event.data, &uevent->data)) { 1483 __put_user(epi->event.data, &uevent->data)) {
1426 list_add(&epi->rdllink, head); 1484 list_add(&epi->rdllink, head);
1427 __pm_stay_awake(epi->ws); 1485 ep_pm_stay_awake(epi);
1428 return eventcnt ? eventcnt : -EFAULT; 1486 return eventcnt ? eventcnt : -EFAULT;
1429 } 1487 }
1430 eventcnt++; 1488 eventcnt++;
@@ -1444,7 +1502,7 @@ static int ep_send_events_proc(struct eventpoll *ep, struct list_head *head,
1444 * poll callback will queue them in ep->ovflist. 1502 * poll callback will queue them in ep->ovflist.
1445 */ 1503 */
1446 list_add_tail(&epi->rdllink, &ep->rdllist); 1504 list_add_tail(&epi->rdllink, &ep->rdllist);
1447 __pm_stay_awake(epi->ws); 1505 ep_pm_stay_awake(epi);
1448 } 1506 }
1449 } 1507 }
1450 } 1508 }
@@ -1964,6 +2022,12 @@ static int __init eventpoll_init(void)
1964 /* Initialize the structure used to perform file's f_op->poll() calls */ 2022 /* Initialize the structure used to perform file's f_op->poll() calls */
1965 ep_nested_calls_init(&poll_readywalk_ncalls); 2023 ep_nested_calls_init(&poll_readywalk_ncalls);
1966 2024
2025 /*
2026 * We can have many thousands of epitems, so prevent this from
2027 * using an extra cache line on 64-bit (and smaller) CPUs
2028 */
2029 BUILD_BUG_ON(sizeof(void *) <= 8 && sizeof(struct epitem) > 128);
2030
1967 /* Allocates slab cache used to allocate "struct epitem" items */ 2031 /* Allocates slab cache used to allocate "struct epitem" items */
1968 epi_cache = kmem_cache_create("eventpoll_epi", sizeof(struct epitem), 2032 epi_cache = kmem_cache_create("eventpoll_epi", sizeof(struct epitem),
1969 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL); 2033 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
diff --git a/fs/exec.c b/fs/exec.c
index 87e731f020fb..963f510a25ab 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -898,11 +898,13 @@ static int de_thread(struct task_struct *tsk)
898 898
899 sig->notify_count = -1; /* for exit_notify() */ 899 sig->notify_count = -1; /* for exit_notify() */
900 for (;;) { 900 for (;;) {
901 threadgroup_change_begin(tsk);
901 write_lock_irq(&tasklist_lock); 902 write_lock_irq(&tasklist_lock);
902 if (likely(leader->exit_state)) 903 if (likely(leader->exit_state))
903 break; 904 break;
904 __set_current_state(TASK_KILLABLE); 905 __set_current_state(TASK_KILLABLE);
905 write_unlock_irq(&tasklist_lock); 906 write_unlock_irq(&tasklist_lock);
907 threadgroup_change_end(tsk);
906 schedule(); 908 schedule();
907 if (unlikely(__fatal_signal_pending(tsk))) 909 if (unlikely(__fatal_signal_pending(tsk)))
908 goto killed; 910 goto killed;
@@ -960,6 +962,7 @@ static int de_thread(struct task_struct *tsk)
960 if (unlikely(leader->ptrace)) 962 if (unlikely(leader->ptrace))
961 __wake_up_parent(leader, leader->parent); 963 __wake_up_parent(leader, leader->parent);
962 write_unlock_irq(&tasklist_lock); 964 write_unlock_irq(&tasklist_lock);
965 threadgroup_change_end(tsk);
963 966
964 release_task(leader); 967 release_task(leader);
965 } 968 }
@@ -1027,17 +1030,7 @@ EXPORT_SYMBOL_GPL(get_task_comm);
1027void set_task_comm(struct task_struct *tsk, char *buf) 1030void set_task_comm(struct task_struct *tsk, char *buf)
1028{ 1031{
1029 task_lock(tsk); 1032 task_lock(tsk);
1030
1031 trace_task_rename(tsk, buf); 1033 trace_task_rename(tsk, buf);
1032
1033 /*
1034 * Threads may access current->comm without holding
1035 * the task lock, so write the string carefully.
1036 * Readers without a lock may see incomplete new
1037 * names but are safe from non-terminating string reads.
1038 */
1039 memset(tsk->comm, 0, TASK_COMM_LEN);
1040 wmb();
1041 strlcpy(tsk->comm, buf, sizeof(tsk->comm)); 1034 strlcpy(tsk->comm, buf, sizeof(tsk->comm));
1042 task_unlock(tsk); 1035 task_unlock(tsk);
1043 perf_event_comm(tsk); 1036 perf_event_comm(tsk);
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 21f46fb3a101..798d4458a4d3 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -1028,6 +1028,7 @@ int bdi_writeback_thread(void *data)
1028 struct backing_dev_info *bdi = wb->bdi; 1028 struct backing_dev_info *bdi = wb->bdi;
1029 long pages_written; 1029 long pages_written;
1030 1030
1031 set_worker_desc("flush-%s", dev_name(bdi->dev));
1031 current->flags |= PF_SWAPWRITE; 1032 current->flags |= PF_SWAPWRITE;
1032 set_freezable(); 1033 set_freezable();
1033 wb->last_active = jiffies; 1034 wb->last_active = jiffies;
diff --git a/fs/hfs/bfind.c b/fs/hfs/bfind.c
index 571abe97b42a..de69d8a24f6d 100644
--- a/fs/hfs/bfind.c
+++ b/fs/hfs/bfind.c
@@ -22,7 +22,8 @@ int hfs_find_init(struct hfs_btree *tree, struct hfs_find_data *fd)
22 return -ENOMEM; 22 return -ENOMEM;
23 fd->search_key = ptr; 23 fd->search_key = ptr;
24 fd->key = ptr + tree->max_key_len + 2; 24 fd->key = ptr + tree->max_key_len + 2;
25 dprint(DBG_BNODE_REFS, "find_init: %d (%p)\n", tree->cnid, __builtin_return_address(0)); 25 hfs_dbg(BNODE_REFS, "find_init: %d (%p)\n",
26 tree->cnid, __builtin_return_address(0));
26 mutex_lock(&tree->tree_lock); 27 mutex_lock(&tree->tree_lock);
27 return 0; 28 return 0;
28} 29}
@@ -31,7 +32,8 @@ void hfs_find_exit(struct hfs_find_data *fd)
31{ 32{
32 hfs_bnode_put(fd->bnode); 33 hfs_bnode_put(fd->bnode);
33 kfree(fd->search_key); 34 kfree(fd->search_key);
34 dprint(DBG_BNODE_REFS, "find_exit: %d (%p)\n", fd->tree->cnid, __builtin_return_address(0)); 35 hfs_dbg(BNODE_REFS, "find_exit: %d (%p)\n",
36 fd->tree->cnid, __builtin_return_address(0));
35 mutex_unlock(&fd->tree->tree_lock); 37 mutex_unlock(&fd->tree->tree_lock);
36 fd->tree = NULL; 38 fd->tree = NULL;
37} 39}
@@ -135,8 +137,8 @@ int hfs_brec_find(struct hfs_find_data *fd)
135 return res; 137 return res;
136 138
137invalid: 139invalid:
138 printk(KERN_ERR "hfs: inconsistency in B*Tree (%d,%d,%d,%u,%u)\n", 140 pr_err("inconsistency in B*Tree (%d,%d,%d,%u,%u)\n",
139 height, bnode->height, bnode->type, nidx, parent); 141 height, bnode->height, bnode->type, nidx, parent);
140 res = -EIO; 142 res = -EIO;
141release: 143release:
142 hfs_bnode_put(bnode); 144 hfs_bnode_put(bnode);
diff --git a/fs/hfs/bitmap.c b/fs/hfs/bitmap.c
index c6e97366e8ac..28307bc9ec1e 100644
--- a/fs/hfs/bitmap.c
+++ b/fs/hfs/bitmap.c
@@ -158,7 +158,7 @@ u32 hfs_vbm_search_free(struct super_block *sb, u32 goal, u32 *num_bits)
158 } 158 }
159 } 159 }
160 160
161 dprint(DBG_BITMAP, "alloc_bits: %u,%u\n", pos, *num_bits); 161 hfs_dbg(BITMAP, "alloc_bits: %u,%u\n", pos, *num_bits);
162 HFS_SB(sb)->free_ablocks -= *num_bits; 162 HFS_SB(sb)->free_ablocks -= *num_bits;
163 hfs_bitmap_dirty(sb); 163 hfs_bitmap_dirty(sb);
164out: 164out:
@@ -200,7 +200,7 @@ int hfs_clear_vbm_bits(struct super_block *sb, u16 start, u16 count)
200 if (!count) 200 if (!count)
201 return 0; 201 return 0;
202 202
203 dprint(DBG_BITMAP, "clear_bits: %u,%u\n", start, count); 203 hfs_dbg(BITMAP, "clear_bits: %u,%u\n", start, count);
204 /* are all of the bits in range? */ 204 /* are all of the bits in range? */
205 if ((start + count) > HFS_SB(sb)->fs_ablocks) 205 if ((start + count) > HFS_SB(sb)->fs_ablocks)
206 return -2; 206 return -2;
diff --git a/fs/hfs/bnode.c b/fs/hfs/bnode.c
index cdb41a1f6a64..f3b1a15ccd59 100644
--- a/fs/hfs/bnode.c
+++ b/fs/hfs/bnode.c
@@ -100,7 +100,7 @@ void hfs_bnode_copy(struct hfs_bnode *dst_node, int dst,
100 struct hfs_btree *tree; 100 struct hfs_btree *tree;
101 struct page *src_page, *dst_page; 101 struct page *src_page, *dst_page;
102 102
103 dprint(DBG_BNODE_MOD, "copybytes: %u,%u,%u\n", dst, src, len); 103 hfs_dbg(BNODE_MOD, "copybytes: %u,%u,%u\n", dst, src, len);
104 if (!len) 104 if (!len)
105 return; 105 return;
106 tree = src_node->tree; 106 tree = src_node->tree;
@@ -120,7 +120,7 @@ void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len)
120 struct page *page; 120 struct page *page;
121 void *ptr; 121 void *ptr;
122 122
123 dprint(DBG_BNODE_MOD, "movebytes: %u,%u,%u\n", dst, src, len); 123 hfs_dbg(BNODE_MOD, "movebytes: %u,%u,%u\n", dst, src, len);
124 if (!len) 124 if (!len)
125 return; 125 return;
126 src += node->page_offset; 126 src += node->page_offset;
@@ -138,16 +138,16 @@ void hfs_bnode_dump(struct hfs_bnode *node)
138 __be32 cnid; 138 __be32 cnid;
139 int i, off, key_off; 139 int i, off, key_off;
140 140
141 dprint(DBG_BNODE_MOD, "bnode: %d\n", node->this); 141 hfs_dbg(BNODE_MOD, "bnode: %d\n", node->this);
142 hfs_bnode_read(node, &desc, 0, sizeof(desc)); 142 hfs_bnode_read(node, &desc, 0, sizeof(desc));
143 dprint(DBG_BNODE_MOD, "%d, %d, %d, %d, %d\n", 143 hfs_dbg(BNODE_MOD, "%d, %d, %d, %d, %d\n",
144 be32_to_cpu(desc.next), be32_to_cpu(desc.prev), 144 be32_to_cpu(desc.next), be32_to_cpu(desc.prev),
145 desc.type, desc.height, be16_to_cpu(desc.num_recs)); 145 desc.type, desc.height, be16_to_cpu(desc.num_recs));
146 146
147 off = node->tree->node_size - 2; 147 off = node->tree->node_size - 2;
148 for (i = be16_to_cpu(desc.num_recs); i >= 0; off -= 2, i--) { 148 for (i = be16_to_cpu(desc.num_recs); i >= 0; off -= 2, i--) {
149 key_off = hfs_bnode_read_u16(node, off); 149 key_off = hfs_bnode_read_u16(node, off);
150 dprint(DBG_BNODE_MOD, " %d", key_off); 150 hfs_dbg_cont(BNODE_MOD, " %d", key_off);
151 if (i && node->type == HFS_NODE_INDEX) { 151 if (i && node->type == HFS_NODE_INDEX) {
152 int tmp; 152 int tmp;
153 153
@@ -155,17 +155,18 @@ void hfs_bnode_dump(struct hfs_bnode *node)
155 tmp = (hfs_bnode_read_u8(node, key_off) | 1) + 1; 155 tmp = (hfs_bnode_read_u8(node, key_off) | 1) + 1;
156 else 156 else
157 tmp = node->tree->max_key_len + 1; 157 tmp = node->tree->max_key_len + 1;
158 dprint(DBG_BNODE_MOD, " (%d,%d", tmp, hfs_bnode_read_u8(node, key_off)); 158 hfs_dbg_cont(BNODE_MOD, " (%d,%d",
159 tmp, hfs_bnode_read_u8(node, key_off));
159 hfs_bnode_read(node, &cnid, key_off + tmp, 4); 160 hfs_bnode_read(node, &cnid, key_off + tmp, 4);
160 dprint(DBG_BNODE_MOD, ",%d)", be32_to_cpu(cnid)); 161 hfs_dbg_cont(BNODE_MOD, ",%d)", be32_to_cpu(cnid));
161 } else if (i && node->type == HFS_NODE_LEAF) { 162 } else if (i && node->type == HFS_NODE_LEAF) {
162 int tmp; 163 int tmp;
163 164
164 tmp = hfs_bnode_read_u8(node, key_off); 165 tmp = hfs_bnode_read_u8(node, key_off);
165 dprint(DBG_BNODE_MOD, " (%d)", tmp); 166 hfs_dbg_cont(BNODE_MOD, " (%d)", tmp);
166 } 167 }
167 } 168 }
168 dprint(DBG_BNODE_MOD, "\n"); 169 hfs_dbg_cont(BNODE_MOD, "\n");
169} 170}
170 171
171void hfs_bnode_unlink(struct hfs_bnode *node) 172void hfs_bnode_unlink(struct hfs_bnode *node)
@@ -220,7 +221,7 @@ struct hfs_bnode *hfs_bnode_findhash(struct hfs_btree *tree, u32 cnid)
220 struct hfs_bnode *node; 221 struct hfs_bnode *node;
221 222
222 if (cnid >= tree->node_count) { 223 if (cnid >= tree->node_count) {
223 printk(KERN_ERR "hfs: request for non-existent node %d in B*Tree\n", cnid); 224 pr_err("request for non-existent node %d in B*Tree\n", cnid);
224 return NULL; 225 return NULL;
225 } 226 }
226 227
@@ -243,7 +244,7 @@ static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid)
243 loff_t off; 244 loff_t off;
244 245
245 if (cnid >= tree->node_count) { 246 if (cnid >= tree->node_count) {
246 printk(KERN_ERR "hfs: request for non-existent node %d in B*Tree\n", cnid); 247 pr_err("request for non-existent node %d in B*Tree\n", cnid);
247 return NULL; 248 return NULL;
248 } 249 }
249 250
@@ -257,8 +258,8 @@ static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid)
257 node->this = cnid; 258 node->this = cnid;
258 set_bit(HFS_BNODE_NEW, &node->flags); 259 set_bit(HFS_BNODE_NEW, &node->flags);
259 atomic_set(&node->refcnt, 1); 260 atomic_set(&node->refcnt, 1);
260 dprint(DBG_BNODE_REFS, "new_node(%d:%d): 1\n", 261 hfs_dbg(BNODE_REFS, "new_node(%d:%d): 1\n",
261 node->tree->cnid, node->this); 262 node->tree->cnid, node->this);
262 init_waitqueue_head(&node->lock_wq); 263 init_waitqueue_head(&node->lock_wq);
263 spin_lock(&tree->hash_lock); 264 spin_lock(&tree->hash_lock);
264 node2 = hfs_bnode_findhash(tree, cnid); 265 node2 = hfs_bnode_findhash(tree, cnid);
@@ -301,7 +302,7 @@ void hfs_bnode_unhash(struct hfs_bnode *node)
301{ 302{
302 struct hfs_bnode **p; 303 struct hfs_bnode **p;
303 304
304 dprint(DBG_BNODE_REFS, "remove_node(%d:%d): %d\n", 305 hfs_dbg(BNODE_REFS, "remove_node(%d:%d): %d\n",
305 node->tree->cnid, node->this, atomic_read(&node->refcnt)); 306 node->tree->cnid, node->this, atomic_read(&node->refcnt));
306 for (p = &node->tree->node_hash[hfs_bnode_hash(node->this)]; 307 for (p = &node->tree->node_hash[hfs_bnode_hash(node->this)];
307 *p && *p != node; p = &(*p)->next_hash) 308 *p && *p != node; p = &(*p)->next_hash)
@@ -443,8 +444,9 @@ void hfs_bnode_get(struct hfs_bnode *node)
443{ 444{
444 if (node) { 445 if (node) {
445 atomic_inc(&node->refcnt); 446 atomic_inc(&node->refcnt);
446 dprint(DBG_BNODE_REFS, "get_node(%d:%d): %d\n", 447 hfs_dbg(BNODE_REFS, "get_node(%d:%d): %d\n",
447 node->tree->cnid, node->this, atomic_read(&node->refcnt)); 448 node->tree->cnid, node->this,
449 atomic_read(&node->refcnt));
448 } 450 }
449} 451}
450 452
@@ -455,8 +457,9 @@ void hfs_bnode_put(struct hfs_bnode *node)
455 struct hfs_btree *tree = node->tree; 457 struct hfs_btree *tree = node->tree;
456 int i; 458 int i;
457 459
458 dprint(DBG_BNODE_REFS, "put_node(%d:%d): %d\n", 460 hfs_dbg(BNODE_REFS, "put_node(%d:%d): %d\n",
459 node->tree->cnid, node->this, atomic_read(&node->refcnt)); 461 node->tree->cnid, node->this,
462 atomic_read(&node->refcnt));
460 BUG_ON(!atomic_read(&node->refcnt)); 463 BUG_ON(!atomic_read(&node->refcnt));
461 if (!atomic_dec_and_lock(&node->refcnt, &tree->hash_lock)) 464 if (!atomic_dec_and_lock(&node->refcnt, &tree->hash_lock))
462 return; 465 return;
diff --git a/fs/hfs/brec.c b/fs/hfs/brec.c
index 92fb358ce824..9f4ee7f52026 100644
--- a/fs/hfs/brec.c
+++ b/fs/hfs/brec.c
@@ -47,15 +47,13 @@ u16 hfs_brec_keylen(struct hfs_bnode *node, u16 rec)
47 if (node->tree->attributes & HFS_TREE_BIGKEYS) { 47 if (node->tree->attributes & HFS_TREE_BIGKEYS) {
48 retval = hfs_bnode_read_u16(node, recoff) + 2; 48 retval = hfs_bnode_read_u16(node, recoff) + 2;
49 if (retval > node->tree->max_key_len + 2) { 49 if (retval > node->tree->max_key_len + 2) {
50 printk(KERN_ERR "hfs: keylen %d too large\n", 50 pr_err("keylen %d too large\n", retval);
51 retval);
52 retval = 0; 51 retval = 0;
53 } 52 }
54 } else { 53 } else {
55 retval = (hfs_bnode_read_u8(node, recoff) | 1) + 1; 54 retval = (hfs_bnode_read_u8(node, recoff) | 1) + 1;
56 if (retval > node->tree->max_key_len + 1) { 55 if (retval > node->tree->max_key_len + 1) {
57 printk(KERN_ERR "hfs: keylen %d too large\n", 56 pr_err("keylen %d too large\n", retval);
58 retval);
59 retval = 0; 57 retval = 0;
60 } 58 }
61 } 59 }
@@ -94,7 +92,8 @@ again:
94 end_rec_off = tree->node_size - (node->num_recs + 1) * 2; 92 end_rec_off = tree->node_size - (node->num_recs + 1) * 2;
95 end_off = hfs_bnode_read_u16(node, end_rec_off); 93 end_off = hfs_bnode_read_u16(node, end_rec_off);
96 end_rec_off -= 2; 94 end_rec_off -= 2;
97 dprint(DBG_BNODE_MOD, "insert_rec: %d, %d, %d, %d\n", rec, size, end_off, end_rec_off); 95 hfs_dbg(BNODE_MOD, "insert_rec: %d, %d, %d, %d\n",
96 rec, size, end_off, end_rec_off);
98 if (size > end_rec_off - end_off) { 97 if (size > end_rec_off - end_off) {
99 if (new_node) 98 if (new_node)
100 panic("not enough room!\n"); 99 panic("not enough room!\n");
@@ -190,7 +189,8 @@ again:
190 mark_inode_dirty(tree->inode); 189 mark_inode_dirty(tree->inode);
191 } 190 }
192 hfs_bnode_dump(node); 191 hfs_bnode_dump(node);
193 dprint(DBG_BNODE_MOD, "remove_rec: %d, %d\n", fd->record, fd->keylength + fd->entrylength); 192 hfs_dbg(BNODE_MOD, "remove_rec: %d, %d\n",
193 fd->record, fd->keylength + fd->entrylength);
194 if (!--node->num_recs) { 194 if (!--node->num_recs) {
195 hfs_bnode_unlink(node); 195 hfs_bnode_unlink(node);
196 if (!node->parent) 196 if (!node->parent)
@@ -240,7 +240,7 @@ static struct hfs_bnode *hfs_bnode_split(struct hfs_find_data *fd)
240 if (IS_ERR(new_node)) 240 if (IS_ERR(new_node))
241 return new_node; 241 return new_node;
242 hfs_bnode_get(node); 242 hfs_bnode_get(node);
243 dprint(DBG_BNODE_MOD, "split_nodes: %d - %d - %d\n", 243 hfs_dbg(BNODE_MOD, "split_nodes: %d - %d - %d\n",
244 node->this, new_node->this, node->next); 244 node->this, new_node->this, node->next);
245 new_node->next = node->next; 245 new_node->next = node->next;
246 new_node->prev = node->this; 246 new_node->prev = node->this;
@@ -374,7 +374,8 @@ again:
374 newkeylen = (hfs_bnode_read_u8(node, 14) | 1) + 1; 374 newkeylen = (hfs_bnode_read_u8(node, 14) | 1) + 1;
375 else 375 else
376 fd->keylength = newkeylen = tree->max_key_len + 1; 376 fd->keylength = newkeylen = tree->max_key_len + 1;
377 dprint(DBG_BNODE_MOD, "update_rec: %d, %d, %d\n", rec, fd->keylength, newkeylen); 377 hfs_dbg(BNODE_MOD, "update_rec: %d, %d, %d\n",
378 rec, fd->keylength, newkeylen);
378 379
379 rec_off = tree->node_size - (rec + 2) * 2; 380 rec_off = tree->node_size - (rec + 2) * 2;
380 end_rec_off = tree->node_size - (parent->num_recs + 1) * 2; 381 end_rec_off = tree->node_size - (parent->num_recs + 1) * 2;
@@ -385,7 +386,7 @@ again:
385 end_off = hfs_bnode_read_u16(parent, end_rec_off); 386 end_off = hfs_bnode_read_u16(parent, end_rec_off);
386 if (end_rec_off - end_off < diff) { 387 if (end_rec_off - end_off < diff) {
387 388
388 printk(KERN_DEBUG "hfs: splitting index node...\n"); 389 printk(KERN_DEBUG "splitting index node...\n");
389 fd->bnode = parent; 390 fd->bnode = parent;
390 new_node = hfs_bnode_split(fd); 391 new_node = hfs_bnode_split(fd);
391 if (IS_ERR(new_node)) 392 if (IS_ERR(new_node))
diff --git a/fs/hfs/btree.c b/fs/hfs/btree.c
index 1cbdeea1db44..1ab19e660e69 100644
--- a/fs/hfs/btree.c
+++ b/fs/hfs/btree.c
@@ -48,7 +48,7 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id, btree_keycmp ke
48 mdb->drXTFlSize, be32_to_cpu(mdb->drXTClpSiz)); 48 mdb->drXTFlSize, be32_to_cpu(mdb->drXTClpSiz));
49 if (HFS_I(tree->inode)->alloc_blocks > 49 if (HFS_I(tree->inode)->alloc_blocks >
50 HFS_I(tree->inode)->first_blocks) { 50 HFS_I(tree->inode)->first_blocks) {
51 printk(KERN_ERR "hfs: invalid btree extent records\n"); 51 pr_err("invalid btree extent records\n");
52 unlock_new_inode(tree->inode); 52 unlock_new_inode(tree->inode);
53 goto free_inode; 53 goto free_inode;
54 } 54 }
@@ -60,8 +60,7 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id, btree_keycmp ke
60 mdb->drCTFlSize, be32_to_cpu(mdb->drCTClpSiz)); 60 mdb->drCTFlSize, be32_to_cpu(mdb->drCTClpSiz));
61 61
62 if (!HFS_I(tree->inode)->first_blocks) { 62 if (!HFS_I(tree->inode)->first_blocks) {
63 printk(KERN_ERR "hfs: invalid btree extent records " 63 pr_err("invalid btree extent records (0 size)\n");
64 "(0 size).\n");
65 unlock_new_inode(tree->inode); 64 unlock_new_inode(tree->inode);
66 goto free_inode; 65 goto free_inode;
67 } 66 }
@@ -100,15 +99,15 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id, btree_keycmp ke
100 switch (id) { 99 switch (id) {
101 case HFS_EXT_CNID: 100 case HFS_EXT_CNID:
102 if (tree->max_key_len != HFS_MAX_EXT_KEYLEN) { 101 if (tree->max_key_len != HFS_MAX_EXT_KEYLEN) {
103 printk(KERN_ERR "hfs: invalid extent max_key_len %d\n", 102 pr_err("invalid extent max_key_len %d\n",
104 tree->max_key_len); 103 tree->max_key_len);
105 goto fail_page; 104 goto fail_page;
106 } 105 }
107 break; 106 break;
108 case HFS_CAT_CNID: 107 case HFS_CAT_CNID:
109 if (tree->max_key_len != HFS_MAX_CAT_KEYLEN) { 108 if (tree->max_key_len != HFS_MAX_CAT_KEYLEN) {
110 printk(KERN_ERR "hfs: invalid catalog max_key_len %d\n", 109 pr_err("invalid catalog max_key_len %d\n",
111 tree->max_key_len); 110 tree->max_key_len);
112 goto fail_page; 111 goto fail_page;
113 } 112 }
114 break; 113 break;
@@ -146,8 +145,9 @@ void hfs_btree_close(struct hfs_btree *tree)
146 while ((node = tree->node_hash[i])) { 145 while ((node = tree->node_hash[i])) {
147 tree->node_hash[i] = node->next_hash; 146 tree->node_hash[i] = node->next_hash;
148 if (atomic_read(&node->refcnt)) 147 if (atomic_read(&node->refcnt))
149 printk(KERN_ERR "hfs: node %d:%d still has %d user(s)!\n", 148 pr_err("node %d:%d still has %d user(s)!\n",
150 node->tree->cnid, node->this, atomic_read(&node->refcnt)); 149 node->tree->cnid, node->this,
150 atomic_read(&node->refcnt));
151 hfs_bnode_free(node); 151 hfs_bnode_free(node);
152 tree->node_hash_cnt--; 152 tree->node_hash_cnt--;
153 } 153 }
@@ -290,7 +290,7 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
290 kunmap(*pagep); 290 kunmap(*pagep);
291 nidx = node->next; 291 nidx = node->next;
292 if (!nidx) { 292 if (!nidx) {
293 printk(KERN_DEBUG "hfs: create new bmap node...\n"); 293 printk(KERN_DEBUG "create new bmap node...\n");
294 next_node = hfs_bmap_new_bmap(node, idx); 294 next_node = hfs_bmap_new_bmap(node, idx);
295 } else 295 } else
296 next_node = hfs_bnode_find(tree, nidx); 296 next_node = hfs_bnode_find(tree, nidx);
@@ -316,7 +316,7 @@ void hfs_bmap_free(struct hfs_bnode *node)
316 u32 nidx; 316 u32 nidx;
317 u8 *data, byte, m; 317 u8 *data, byte, m;
318 318
319 dprint(DBG_BNODE_MOD, "btree_free_node: %u\n", node->this); 319 hfs_dbg(BNODE_MOD, "btree_free_node: %u\n", node->this);
320 tree = node->tree; 320 tree = node->tree;
321 nidx = node->this; 321 nidx = node->this;
322 node = hfs_bnode_find(tree, 0); 322 node = hfs_bnode_find(tree, 0);
@@ -331,7 +331,8 @@ void hfs_bmap_free(struct hfs_bnode *node)
331 hfs_bnode_put(node); 331 hfs_bnode_put(node);
332 if (!i) { 332 if (!i) {
333 /* panic */; 333 /* panic */;
334 printk(KERN_CRIT "hfs: unable to free bnode %u. bmap not found!\n", node->this); 334 pr_crit("unable to free bnode %u. bmap not found!\n",
335 node->this);
335 return; 336 return;
336 } 337 }
337 node = hfs_bnode_find(tree, i); 338 node = hfs_bnode_find(tree, i);
@@ -339,7 +340,8 @@ void hfs_bmap_free(struct hfs_bnode *node)
339 return; 340 return;
340 if (node->type != HFS_NODE_MAP) { 341 if (node->type != HFS_NODE_MAP) {
341 /* panic */; 342 /* panic */;
342 printk(KERN_CRIT "hfs: invalid bmap found! (%u,%d)\n", node->this, node->type); 343 pr_crit("invalid bmap found! (%u,%d)\n",
344 node->this, node->type);
343 hfs_bnode_put(node); 345 hfs_bnode_put(node);
344 return; 346 return;
345 } 347 }
@@ -352,7 +354,8 @@ void hfs_bmap_free(struct hfs_bnode *node)
352 m = 1 << (~nidx & 7); 354 m = 1 << (~nidx & 7);
353 byte = data[off]; 355 byte = data[off];
354 if (!(byte & m)) { 356 if (!(byte & m)) {
355 printk(KERN_CRIT "hfs: trying to free free bnode %u(%d)\n", node->this, node->type); 357 pr_crit("trying to free free bnode %u(%d)\n",
358 node->this, node->type);
356 kunmap(page); 359 kunmap(page);
357 hfs_bnode_put(node); 360 hfs_bnode_put(node);
358 return; 361 return;
diff --git a/fs/hfs/catalog.c b/fs/hfs/catalog.c
index 424b0337f524..ff0316b925a5 100644
--- a/fs/hfs/catalog.c
+++ b/fs/hfs/catalog.c
@@ -87,12 +87,15 @@ int hfs_cat_create(u32 cnid, struct inode *dir, struct qstr *str, struct inode *
87 int entry_size; 87 int entry_size;
88 int err; 88 int err;
89 89
90 dprint(DBG_CAT_MOD, "create_cat: %s,%u(%d)\n", str->name, cnid, inode->i_nlink); 90 hfs_dbg(CAT_MOD, "create_cat: %s,%u(%d)\n",
91 str->name, cnid, inode->i_nlink);
91 if (dir->i_size >= HFS_MAX_VALENCE) 92 if (dir->i_size >= HFS_MAX_VALENCE)
92 return -ENOSPC; 93 return -ENOSPC;
93 94
94 sb = dir->i_sb; 95 sb = dir->i_sb;
95 hfs_find_init(HFS_SB(sb)->cat_tree, &fd); 96 err = hfs_find_init(HFS_SB(sb)->cat_tree, &fd);
97 if (err)
98 return err;
96 99
97 hfs_cat_build_key(sb, fd.search_key, cnid, NULL); 100 hfs_cat_build_key(sb, fd.search_key, cnid, NULL);
98 entry_size = hfs_cat_build_thread(sb, &entry, S_ISDIR(inode->i_mode) ? 101 entry_size = hfs_cat_build_thread(sb, &entry, S_ISDIR(inode->i_mode) ?
@@ -184,14 +187,14 @@ int hfs_cat_find_brec(struct super_block *sb, u32 cnid,
184 187
185 type = rec.type; 188 type = rec.type;
186 if (type != HFS_CDR_THD && type != HFS_CDR_FTH) { 189 if (type != HFS_CDR_THD && type != HFS_CDR_FTH) {
187 printk(KERN_ERR "hfs: found bad thread record in catalog\n"); 190 pr_err("found bad thread record in catalog\n");
188 return -EIO; 191 return -EIO;
189 } 192 }
190 193
191 fd->search_key->cat.ParID = rec.thread.ParID; 194 fd->search_key->cat.ParID = rec.thread.ParID;
192 len = fd->search_key->cat.CName.len = rec.thread.CName.len; 195 len = fd->search_key->cat.CName.len = rec.thread.CName.len;
193 if (len > HFS_NAMELEN) { 196 if (len > HFS_NAMELEN) {
194 printk(KERN_ERR "hfs: bad catalog namelength\n"); 197 pr_err("bad catalog namelength\n");
195 return -EIO; 198 return -EIO;
196 } 199 }
197 memcpy(fd->search_key->cat.CName.name, rec.thread.CName.name, len); 200 memcpy(fd->search_key->cat.CName.name, rec.thread.CName.name, len);
@@ -212,9 +215,11 @@ int hfs_cat_delete(u32 cnid, struct inode *dir, struct qstr *str)
212 struct list_head *pos; 215 struct list_head *pos;
213 int res, type; 216 int res, type;
214 217
215 dprint(DBG_CAT_MOD, "delete_cat: %s,%u\n", str ? str->name : NULL, cnid); 218 hfs_dbg(CAT_MOD, "delete_cat: %s,%u\n", str ? str->name : NULL, cnid);
216 sb = dir->i_sb; 219 sb = dir->i_sb;
217 hfs_find_init(HFS_SB(sb)->cat_tree, &fd); 220 res = hfs_find_init(HFS_SB(sb)->cat_tree, &fd);
221 if (res)
222 return res;
218 223
219 hfs_cat_build_key(sb, fd.search_key, dir->i_ino, str); 224 hfs_cat_build_key(sb, fd.search_key, dir->i_ino, str);
220 res = hfs_brec_find(&fd); 225 res = hfs_brec_find(&fd);
@@ -278,10 +283,13 @@ int hfs_cat_move(u32 cnid, struct inode *src_dir, struct qstr *src_name,
278 int entry_size, type; 283 int entry_size, type;
279 int err; 284 int err;
280 285
281 dprint(DBG_CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n", cnid, src_dir->i_ino, src_name->name, 286 hfs_dbg(CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n",
287 cnid, src_dir->i_ino, src_name->name,
282 dst_dir->i_ino, dst_name->name); 288 dst_dir->i_ino, dst_name->name);
283 sb = src_dir->i_sb; 289 sb = src_dir->i_sb;
284 hfs_find_init(HFS_SB(sb)->cat_tree, &src_fd); 290 err = hfs_find_init(HFS_SB(sb)->cat_tree, &src_fd);
291 if (err)
292 return err;
285 dst_fd = src_fd; 293 dst_fd = src_fd;
286 294
287 /* find the old dir entry and read the data */ 295 /* find the old dir entry and read the data */
diff --git a/fs/hfs/dir.c b/fs/hfs/dir.c
index 5f7f1abd5f6d..17c22a8fd40a 100644
--- a/fs/hfs/dir.c
+++ b/fs/hfs/dir.c
@@ -25,7 +25,9 @@ static struct dentry *hfs_lookup(struct inode *dir, struct dentry *dentry,
25 struct inode *inode = NULL; 25 struct inode *inode = NULL;
26 int res; 26 int res;
27 27
28 hfs_find_init(HFS_SB(dir->i_sb)->cat_tree, &fd); 28 res = hfs_find_init(HFS_SB(dir->i_sb)->cat_tree, &fd);
29 if (res)
30 return ERR_PTR(res);
29 hfs_cat_build_key(dir->i_sb, fd.search_key, dir->i_ino, &dentry->d_name); 31 hfs_cat_build_key(dir->i_sb, fd.search_key, dir->i_ino, &dentry->d_name);
30 res = hfs_brec_read(&fd, &rec, sizeof(rec)); 32 res = hfs_brec_read(&fd, &rec, sizeof(rec));
31 if (res) { 33 if (res) {
@@ -63,7 +65,9 @@ static int hfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
63 if (filp->f_pos >= inode->i_size) 65 if (filp->f_pos >= inode->i_size)
64 return 0; 66 return 0;
65 67
66 hfs_find_init(HFS_SB(sb)->cat_tree, &fd); 68 err = hfs_find_init(HFS_SB(sb)->cat_tree, &fd);
69 if (err)
70 return err;
67 hfs_cat_build_key(sb, fd.search_key, inode->i_ino, NULL); 71 hfs_cat_build_key(sb, fd.search_key, inode->i_ino, NULL);
68 err = hfs_brec_find(&fd); 72 err = hfs_brec_find(&fd);
69 if (err) 73 if (err)
@@ -84,12 +88,12 @@ static int hfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
84 88
85 hfs_bnode_read(fd.bnode, &entry, fd.entryoffset, fd.entrylength); 89 hfs_bnode_read(fd.bnode, &entry, fd.entryoffset, fd.entrylength);
86 if (entry.type != HFS_CDR_THD) { 90 if (entry.type != HFS_CDR_THD) {
87 printk(KERN_ERR "hfs: bad catalog folder thread\n"); 91 pr_err("bad catalog folder thread\n");
88 err = -EIO; 92 err = -EIO;
89 goto out; 93 goto out;
90 } 94 }
91 //if (fd.entrylength < HFS_MIN_THREAD_SZ) { 95 //if (fd.entrylength < HFS_MIN_THREAD_SZ) {
92 // printk(KERN_ERR "hfs: truncated catalog thread\n"); 96 // pr_err("truncated catalog thread\n");
93 // err = -EIO; 97 // err = -EIO;
94 // goto out; 98 // goto out;
95 //} 99 //}
@@ -108,7 +112,7 @@ static int hfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
108 112
109 for (;;) { 113 for (;;) {
110 if (be32_to_cpu(fd.key->cat.ParID) != inode->i_ino) { 114 if (be32_to_cpu(fd.key->cat.ParID) != inode->i_ino) {
111 printk(KERN_ERR "hfs: walked past end of dir\n"); 115 pr_err("walked past end of dir\n");
112 err = -EIO; 116 err = -EIO;
113 goto out; 117 goto out;
114 } 118 }
@@ -123,7 +127,7 @@ static int hfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
123 len = hfs_mac2asc(sb, strbuf, &fd.key->cat.CName); 127 len = hfs_mac2asc(sb, strbuf, &fd.key->cat.CName);
124 if (type == HFS_CDR_DIR) { 128 if (type == HFS_CDR_DIR) {
125 if (fd.entrylength < sizeof(struct hfs_cat_dir)) { 129 if (fd.entrylength < sizeof(struct hfs_cat_dir)) {
126 printk(KERN_ERR "hfs: small dir entry\n"); 130 pr_err("small dir entry\n");
127 err = -EIO; 131 err = -EIO;
128 goto out; 132 goto out;
129 } 133 }
@@ -132,7 +136,7 @@ static int hfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
132 break; 136 break;
133 } else if (type == HFS_CDR_FIL) { 137 } else if (type == HFS_CDR_FIL) {
134 if (fd.entrylength < sizeof(struct hfs_cat_file)) { 138 if (fd.entrylength < sizeof(struct hfs_cat_file)) {
135 printk(KERN_ERR "hfs: small file entry\n"); 139 pr_err("small file entry\n");
136 err = -EIO; 140 err = -EIO;
137 goto out; 141 goto out;
138 } 142 }
@@ -140,7 +144,7 @@ static int hfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
140 be32_to_cpu(entry.file.FlNum), DT_REG)) 144 be32_to_cpu(entry.file.FlNum), DT_REG))
141 break; 145 break;
142 } else { 146 } else {
143 printk(KERN_ERR "hfs: bad catalog entry type %d\n", type); 147 pr_err("bad catalog entry type %d\n", type);
144 err = -EIO; 148 err = -EIO;
145 goto out; 149 goto out;
146 } 150 }
diff --git a/fs/hfs/extent.c b/fs/hfs/extent.c
index a67955a0c36f..e33a0d36a93e 100644
--- a/fs/hfs/extent.c
+++ b/fs/hfs/extent.c
@@ -107,7 +107,7 @@ static u16 hfs_ext_lastblock(struct hfs_extent *ext)
107 return be16_to_cpu(ext->block) + be16_to_cpu(ext->count); 107 return be16_to_cpu(ext->block) + be16_to_cpu(ext->count);
108} 108}
109 109
110static void __hfs_ext_write_extent(struct inode *inode, struct hfs_find_data *fd) 110static int __hfs_ext_write_extent(struct inode *inode, struct hfs_find_data *fd)
111{ 111{
112 int res; 112 int res;
113 113
@@ -116,26 +116,31 @@ static void __hfs_ext_write_extent(struct inode *inode, struct hfs_find_data *fd
116 res = hfs_brec_find(fd); 116 res = hfs_brec_find(fd);
117 if (HFS_I(inode)->flags & HFS_FLG_EXT_NEW) { 117 if (HFS_I(inode)->flags & HFS_FLG_EXT_NEW) {
118 if (res != -ENOENT) 118 if (res != -ENOENT)
119 return; 119 return res;
120 hfs_brec_insert(fd, HFS_I(inode)->cached_extents, sizeof(hfs_extent_rec)); 120 hfs_brec_insert(fd, HFS_I(inode)->cached_extents, sizeof(hfs_extent_rec));
121 HFS_I(inode)->flags &= ~(HFS_FLG_EXT_DIRTY|HFS_FLG_EXT_NEW); 121 HFS_I(inode)->flags &= ~(HFS_FLG_EXT_DIRTY|HFS_FLG_EXT_NEW);
122 } else { 122 } else {
123 if (res) 123 if (res)
124 return; 124 return res;
125 hfs_bnode_write(fd->bnode, HFS_I(inode)->cached_extents, fd->entryoffset, fd->entrylength); 125 hfs_bnode_write(fd->bnode, HFS_I(inode)->cached_extents, fd->entryoffset, fd->entrylength);
126 HFS_I(inode)->flags &= ~HFS_FLG_EXT_DIRTY; 126 HFS_I(inode)->flags &= ~HFS_FLG_EXT_DIRTY;
127 } 127 }
128 return 0;
128} 129}
129 130
130void hfs_ext_write_extent(struct inode *inode) 131int hfs_ext_write_extent(struct inode *inode)
131{ 132{
132 struct hfs_find_data fd; 133 struct hfs_find_data fd;
134 int res = 0;
133 135
134 if (HFS_I(inode)->flags & HFS_FLG_EXT_DIRTY) { 136 if (HFS_I(inode)->flags & HFS_FLG_EXT_DIRTY) {
135 hfs_find_init(HFS_SB(inode->i_sb)->ext_tree, &fd); 137 res = hfs_find_init(HFS_SB(inode->i_sb)->ext_tree, &fd);
136 __hfs_ext_write_extent(inode, &fd); 138 if (res)
139 return res;
140 res = __hfs_ext_write_extent(inode, &fd);
137 hfs_find_exit(&fd); 141 hfs_find_exit(&fd);
138 } 142 }
143 return res;
139} 144}
140 145
141static inline int __hfs_ext_read_extent(struct hfs_find_data *fd, struct hfs_extent *extent, 146static inline int __hfs_ext_read_extent(struct hfs_find_data *fd, struct hfs_extent *extent,
@@ -161,8 +166,11 @@ static inline int __hfs_ext_cache_extent(struct hfs_find_data *fd, struct inode
161{ 166{
162 int res; 167 int res;
163 168
164 if (HFS_I(inode)->flags & HFS_FLG_EXT_DIRTY) 169 if (HFS_I(inode)->flags & HFS_FLG_EXT_DIRTY) {
165 __hfs_ext_write_extent(inode, fd); 170 res = __hfs_ext_write_extent(inode, fd);
171 if (res)
172 return res;
173 }
166 174
167 res = __hfs_ext_read_extent(fd, HFS_I(inode)->cached_extents, inode->i_ino, 175 res = __hfs_ext_read_extent(fd, HFS_I(inode)->cached_extents, inode->i_ino,
168 block, HFS_IS_RSRC(inode) ? HFS_FK_RSRC : HFS_FK_DATA); 176 block, HFS_IS_RSRC(inode) ? HFS_FK_RSRC : HFS_FK_DATA);
@@ -185,9 +193,11 @@ static int hfs_ext_read_extent(struct inode *inode, u16 block)
185 block < HFS_I(inode)->cached_start + HFS_I(inode)->cached_blocks) 193 block < HFS_I(inode)->cached_start + HFS_I(inode)->cached_blocks)
186 return 0; 194 return 0;
187 195
188 hfs_find_init(HFS_SB(inode->i_sb)->ext_tree, &fd); 196 res = hfs_find_init(HFS_SB(inode->i_sb)->ext_tree, &fd);
189 res = __hfs_ext_cache_extent(&fd, inode, block); 197 if (!res) {
190 hfs_find_exit(&fd); 198 res = __hfs_ext_cache_extent(&fd, inode, block);
199 hfs_find_exit(&fd);
200 }
191 return res; 201 return res;
192} 202}
193 203
@@ -195,11 +205,12 @@ static void hfs_dump_extent(struct hfs_extent *extent)
195{ 205{
196 int i; 206 int i;
197 207
198 dprint(DBG_EXTENT, " "); 208 hfs_dbg(EXTENT, " ");
199 for (i = 0; i < 3; i++) 209 for (i = 0; i < 3; i++)
200 dprint(DBG_EXTENT, " %u:%u", be16_to_cpu(extent[i].block), 210 hfs_dbg_cont(EXTENT, " %u:%u",
201 be16_to_cpu(extent[i].count)); 211 be16_to_cpu(extent[i].block),
202 dprint(DBG_EXTENT, "\n"); 212 be16_to_cpu(extent[i].count));
213 hfs_dbg_cont(EXTENT, "\n");
203} 214}
204 215
205static int hfs_add_extent(struct hfs_extent *extent, u16 offset, 216static int hfs_add_extent(struct hfs_extent *extent, u16 offset,
@@ -298,7 +309,9 @@ int hfs_free_fork(struct super_block *sb, struct hfs_cat_file *file, int type)
298 if (total_blocks == blocks) 309 if (total_blocks == blocks)
299 return 0; 310 return 0;
300 311
301 hfs_find_init(HFS_SB(sb)->ext_tree, &fd); 312 res = hfs_find_init(HFS_SB(sb)->ext_tree, &fd);
313 if (res)
314 return res;
302 do { 315 do {
303 res = __hfs_ext_read_extent(&fd, extent, cnid, total_blocks, type); 316 res = __hfs_ext_read_extent(&fd, extent, cnid, total_blocks, type);
304 if (res) 317 if (res)
@@ -392,10 +405,10 @@ int hfs_extend_file(struct inode *inode)
392 goto out; 405 goto out;
393 } 406 }
394 407
395 dprint(DBG_EXTENT, "extend %lu: %u,%u\n", inode->i_ino, start, len); 408 hfs_dbg(EXTENT, "extend %lu: %u,%u\n", inode->i_ino, start, len);
396 if (HFS_I(inode)->alloc_blocks == HFS_I(inode)->first_blocks) { 409 if (HFS_I(inode)->alloc_blocks == HFS_I(inode)->first_blocks) {
397 if (!HFS_I(inode)->first_blocks) { 410 if (!HFS_I(inode)->first_blocks) {
398 dprint(DBG_EXTENT, "first extents\n"); 411 hfs_dbg(EXTENT, "first extents\n");
399 /* no extents yet */ 412 /* no extents yet */
400 HFS_I(inode)->first_extents[0].block = cpu_to_be16(start); 413 HFS_I(inode)->first_extents[0].block = cpu_to_be16(start);
401 HFS_I(inode)->first_extents[0].count = cpu_to_be16(len); 414 HFS_I(inode)->first_extents[0].count = cpu_to_be16(len);
@@ -437,8 +450,10 @@ out:
437 return res; 450 return res;
438 451
439insert_extent: 452insert_extent:
440 dprint(DBG_EXTENT, "insert new extent\n"); 453 hfs_dbg(EXTENT, "insert new extent\n");
441 hfs_ext_write_extent(inode); 454 res = hfs_ext_write_extent(inode);
455 if (res)
456 goto out;
442 457
443 memset(HFS_I(inode)->cached_extents, 0, sizeof(hfs_extent_rec)); 458 memset(HFS_I(inode)->cached_extents, 0, sizeof(hfs_extent_rec));
444 HFS_I(inode)->cached_extents[0].block = cpu_to_be16(start); 459 HFS_I(inode)->cached_extents[0].block = cpu_to_be16(start);
@@ -460,13 +475,13 @@ void hfs_file_truncate(struct inode *inode)
460 u32 size; 475 u32 size;
461 int res; 476 int res;
462 477
463 dprint(DBG_INODE, "truncate: %lu, %Lu -> %Lu\n", inode->i_ino, 478 hfs_dbg(INODE, "truncate: %lu, %Lu -> %Lu\n",
464 (long long)HFS_I(inode)->phys_size, inode->i_size); 479 inode->i_ino, (long long)HFS_I(inode)->phys_size,
480 inode->i_size);
465 if (inode->i_size > HFS_I(inode)->phys_size) { 481 if (inode->i_size > HFS_I(inode)->phys_size) {
466 struct address_space *mapping = inode->i_mapping; 482 struct address_space *mapping = inode->i_mapping;
467 void *fsdata; 483 void *fsdata;
468 struct page *page; 484 struct page *page;
469 int res;
470 485
471 /* XXX: Can use generic_cont_expand? */ 486 /* XXX: Can use generic_cont_expand? */
472 size = inode->i_size - 1; 487 size = inode->i_size - 1;
@@ -488,7 +503,12 @@ void hfs_file_truncate(struct inode *inode)
488 goto out; 503 goto out;
489 504
490 mutex_lock(&HFS_I(inode)->extents_lock); 505 mutex_lock(&HFS_I(inode)->extents_lock);
491 hfs_find_init(HFS_SB(sb)->ext_tree, &fd); 506 res = hfs_find_init(HFS_SB(sb)->ext_tree, &fd);
507 if (res) {
508 mutex_unlock(&HFS_I(inode)->extents_lock);
509 /* XXX: We lack error handling of hfs_file_truncate() */
510 return;
511 }
492 while (1) { 512 while (1) {
493 if (alloc_cnt == HFS_I(inode)->first_blocks) { 513 if (alloc_cnt == HFS_I(inode)->first_blocks) {
494 hfs_free_extents(sb, HFS_I(inode)->first_extents, 514 hfs_free_extents(sb, HFS_I(inode)->first_extents,
diff --git a/fs/hfs/hfs_fs.h b/fs/hfs/hfs_fs.h
index 693df9fe52b2..a73b11839a41 100644
--- a/fs/hfs/hfs_fs.h
+++ b/fs/hfs/hfs_fs.h
@@ -9,6 +9,12 @@
9#ifndef _LINUX_HFS_FS_H 9#ifndef _LINUX_HFS_FS_H
10#define _LINUX_HFS_FS_H 10#define _LINUX_HFS_FS_H
11 11
12#ifdef pr_fmt
13#undef pr_fmt
14#endif
15
16#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17
12#include <linux/slab.h> 18#include <linux/slab.h>
13#include <linux/types.h> 19#include <linux/types.h>
14#include <linux/mutex.h> 20#include <linux/mutex.h>
@@ -34,8 +40,18 @@
34//#define DBG_MASK (DBG_CAT_MOD|DBG_BNODE_REFS|DBG_INODE|DBG_EXTENT) 40//#define DBG_MASK (DBG_CAT_MOD|DBG_BNODE_REFS|DBG_INODE|DBG_EXTENT)
35#define DBG_MASK (0) 41#define DBG_MASK (0)
36 42
37#define dprint(flg, fmt, args...) \ 43#define hfs_dbg(flg, fmt, ...) \
38 if (flg & DBG_MASK) printk(fmt , ## args) 44do { \
45 if (DBG_##flg & DBG_MASK) \
46 printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); \
47} while (0)
48
49#define hfs_dbg_cont(flg, fmt, ...) \
50do { \
51 if (DBG_##flg & DBG_MASK) \
52 pr_cont(fmt, ##__VA_ARGS__); \
53} while (0)
54
39 55
40/* 56/*
41 * struct hfs_inode_info 57 * struct hfs_inode_info
@@ -174,7 +190,7 @@ extern const struct inode_operations hfs_dir_inode_operations;
174/* extent.c */ 190/* extent.c */
175extern int hfs_ext_keycmp(const btree_key *, const btree_key *); 191extern int hfs_ext_keycmp(const btree_key *, const btree_key *);
176extern int hfs_free_fork(struct super_block *, struct hfs_cat_file *, int); 192extern int hfs_free_fork(struct super_block *, struct hfs_cat_file *, int);
177extern void hfs_ext_write_extent(struct inode *); 193extern int hfs_ext_write_extent(struct inode *);
178extern int hfs_extend_file(struct inode *); 194extern int hfs_extend_file(struct inode *);
179extern void hfs_file_truncate(struct inode *); 195extern void hfs_file_truncate(struct inode *);
180 196
diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c
index 3031dfdd2358..716e1aafb2e2 100644
--- a/fs/hfs/inode.c
+++ b/fs/hfs/inode.c
@@ -237,7 +237,7 @@ void hfs_delete_inode(struct inode *inode)
237{ 237{
238 struct super_block *sb = inode->i_sb; 238 struct super_block *sb = inode->i_sb;
239 239
240 dprint(DBG_INODE, "delete_inode: %lu\n", inode->i_ino); 240 hfs_dbg(INODE, "delete_inode: %lu\n", inode->i_ino);
241 if (S_ISDIR(inode->i_mode)) { 241 if (S_ISDIR(inode->i_mode)) {
242 HFS_SB(sb)->folder_count--; 242 HFS_SB(sb)->folder_count--;
243 if (HFS_I(inode)->cat_key.ParID == cpu_to_be32(HFS_ROOT_CNID)) 243 if (HFS_I(inode)->cat_key.ParID == cpu_to_be32(HFS_ROOT_CNID))
@@ -416,9 +416,12 @@ int hfs_write_inode(struct inode *inode, struct writeback_control *wbc)
416 struct inode *main_inode = inode; 416 struct inode *main_inode = inode;
417 struct hfs_find_data fd; 417 struct hfs_find_data fd;
418 hfs_cat_rec rec; 418 hfs_cat_rec rec;
419 int res;
419 420
420 dprint(DBG_INODE, "hfs_write_inode: %lu\n", inode->i_ino); 421 hfs_dbg(INODE, "hfs_write_inode: %lu\n", inode->i_ino);
421 hfs_ext_write_extent(inode); 422 res = hfs_ext_write_extent(inode);
423 if (res)
424 return res;
422 425
423 if (inode->i_ino < HFS_FIRSTUSER_CNID) { 426 if (inode->i_ino < HFS_FIRSTUSER_CNID) {
424 switch (inode->i_ino) { 427 switch (inode->i_ino) {
@@ -515,7 +518,11 @@ static struct dentry *hfs_file_lookup(struct inode *dir, struct dentry *dentry,
515 if (!inode) 518 if (!inode)
516 return ERR_PTR(-ENOMEM); 519 return ERR_PTR(-ENOMEM);
517 520
518 hfs_find_init(HFS_SB(dir->i_sb)->cat_tree, &fd); 521 res = hfs_find_init(HFS_SB(dir->i_sb)->cat_tree, &fd);
522 if (res) {
523 iput(inode);
524 return ERR_PTR(res);
525 }
519 fd.search_key->cat = HFS_I(dir)->cat_key; 526 fd.search_key->cat = HFS_I(dir)->cat_key;
520 res = hfs_brec_read(&fd, &rec, sizeof(rec)); 527 res = hfs_brec_read(&fd, &rec, sizeof(rec));
521 if (!res) { 528 if (!res) {
diff --git a/fs/hfs/mdb.c b/fs/hfs/mdb.c
index b7ec224910c5..aa3f0d6d043c 100644
--- a/fs/hfs/mdb.c
+++ b/fs/hfs/mdb.c
@@ -48,7 +48,7 @@ static int hfs_get_last_session(struct super_block *sb,
48 *start = (sector_t)te.cdte_addr.lba << 2; 48 *start = (sector_t)te.cdte_addr.lba << 2;
49 return 0; 49 return 0;
50 } 50 }
51 printk(KERN_ERR "hfs: invalid session number or type of track\n"); 51 pr_err("invalid session number or type of track\n");
52 return -EINVAL; 52 return -EINVAL;
53 } 53 }
54 ms_info.addr_format = CDROM_LBA; 54 ms_info.addr_format = CDROM_LBA;
@@ -101,7 +101,7 @@ int hfs_mdb_get(struct super_block *sb)
101 101
102 HFS_SB(sb)->alloc_blksz = size = be32_to_cpu(mdb->drAlBlkSiz); 102 HFS_SB(sb)->alloc_blksz = size = be32_to_cpu(mdb->drAlBlkSiz);
103 if (!size || (size & (HFS_SECTOR_SIZE - 1))) { 103 if (!size || (size & (HFS_SECTOR_SIZE - 1))) {
104 printk(KERN_ERR "hfs: bad allocation block size %d\n", size); 104 pr_err("bad allocation block size %d\n", size);
105 goto out_bh; 105 goto out_bh;
106 } 106 }
107 107
@@ -118,7 +118,7 @@ int hfs_mdb_get(struct super_block *sb)
118 size >>= 1; 118 size >>= 1;
119 brelse(bh); 119 brelse(bh);
120 if (!sb_set_blocksize(sb, size)) { 120 if (!sb_set_blocksize(sb, size)) {
121 printk(KERN_ERR "hfs: unable to set blocksize to %u\n", size); 121 pr_err("unable to set blocksize to %u\n", size);
122 goto out; 122 goto out;
123 } 123 }
124 124
@@ -162,8 +162,8 @@ int hfs_mdb_get(struct super_block *sb)
162 } 162 }
163 163
164 if (!HFS_SB(sb)->alt_mdb) { 164 if (!HFS_SB(sb)->alt_mdb) {
165 printk(KERN_WARNING "hfs: unable to locate alternate MDB\n"); 165 pr_warn("unable to locate alternate MDB\n");
166 printk(KERN_WARNING "hfs: continuing without an alternate MDB\n"); 166 pr_warn("continuing without an alternate MDB\n");
167 } 167 }
168 168
169 HFS_SB(sb)->bitmap = (__be32 *)__get_free_pages(GFP_KERNEL, PAGE_SIZE < 8192 ? 1 : 0); 169 HFS_SB(sb)->bitmap = (__be32 *)__get_free_pages(GFP_KERNEL, PAGE_SIZE < 8192 ? 1 : 0);
@@ -178,7 +178,7 @@ int hfs_mdb_get(struct super_block *sb)
178 while (size) { 178 while (size) {
179 bh = sb_bread(sb, off >> sb->s_blocksize_bits); 179 bh = sb_bread(sb, off >> sb->s_blocksize_bits);
180 if (!bh) { 180 if (!bh) {
181 printk(KERN_ERR "hfs: unable to read volume bitmap\n"); 181 pr_err("unable to read volume bitmap\n");
182 goto out; 182 goto out;
183 } 183 }
184 off2 = off & (sb->s_blocksize - 1); 184 off2 = off & (sb->s_blocksize - 1);
@@ -192,23 +192,22 @@ int hfs_mdb_get(struct super_block *sb)
192 192
193 HFS_SB(sb)->ext_tree = hfs_btree_open(sb, HFS_EXT_CNID, hfs_ext_keycmp); 193 HFS_SB(sb)->ext_tree = hfs_btree_open(sb, HFS_EXT_CNID, hfs_ext_keycmp);
194 if (!HFS_SB(sb)->ext_tree) { 194 if (!HFS_SB(sb)->ext_tree) {
195 printk(KERN_ERR "hfs: unable to open extent tree\n"); 195 pr_err("unable to open extent tree\n");
196 goto out; 196 goto out;
197 } 197 }
198 HFS_SB(sb)->cat_tree = hfs_btree_open(sb, HFS_CAT_CNID, hfs_cat_keycmp); 198 HFS_SB(sb)->cat_tree = hfs_btree_open(sb, HFS_CAT_CNID, hfs_cat_keycmp);
199 if (!HFS_SB(sb)->cat_tree) { 199 if (!HFS_SB(sb)->cat_tree) {
200 printk(KERN_ERR "hfs: unable to open catalog tree\n"); 200 pr_err("unable to open catalog tree\n");
201 goto out; 201 goto out;
202 } 202 }
203 203
204 attrib = mdb->drAtrb; 204 attrib = mdb->drAtrb;
205 if (!(attrib & cpu_to_be16(HFS_SB_ATTRIB_UNMNT))) { 205 if (!(attrib & cpu_to_be16(HFS_SB_ATTRIB_UNMNT))) {
206 printk(KERN_WARNING "hfs: filesystem was not cleanly unmounted, " 206 pr_warn("filesystem was not cleanly unmounted, running fsck.hfs is recommended. mounting read-only.\n");
207 "running fsck.hfs is recommended. mounting read-only.\n");
208 sb->s_flags |= MS_RDONLY; 207 sb->s_flags |= MS_RDONLY;
209 } 208 }
210 if ((attrib & cpu_to_be16(HFS_SB_ATTRIB_SLOCK))) { 209 if ((attrib & cpu_to_be16(HFS_SB_ATTRIB_SLOCK))) {
211 printk(KERN_WARNING "hfs: filesystem is marked locked, mounting read-only.\n"); 210 pr_warn("filesystem is marked locked, mounting read-only.\n");
212 sb->s_flags |= MS_RDONLY; 211 sb->s_flags |= MS_RDONLY;
213 } 212 }
214 if (!(sb->s_flags & MS_RDONLY)) { 213 if (!(sb->s_flags & MS_RDONLY)) {
@@ -312,7 +311,7 @@ void hfs_mdb_commit(struct super_block *sb)
312 while (size) { 311 while (size) {
313 bh = sb_bread(sb, block); 312 bh = sb_bread(sb, block);
314 if (!bh) { 313 if (!bh) {
315 printk(KERN_ERR "hfs: unable to read volume bitmap\n"); 314 pr_err("unable to read volume bitmap\n");
316 break; 315 break;
317 } 316 }
318 len = min((int)sb->s_blocksize - off, size); 317 len = min((int)sb->s_blocksize - off, size);
diff --git a/fs/hfs/super.c b/fs/hfs/super.c
index bbaaa8a4ee64..2d2039e754cd 100644
--- a/fs/hfs/super.c
+++ b/fs/hfs/super.c
@@ -117,12 +117,11 @@ static int hfs_remount(struct super_block *sb, int *flags, char *data)
117 return 0; 117 return 0;
118 if (!(*flags & MS_RDONLY)) { 118 if (!(*flags & MS_RDONLY)) {
119 if (!(HFS_SB(sb)->mdb->drAtrb & cpu_to_be16(HFS_SB_ATTRIB_UNMNT))) { 119 if (!(HFS_SB(sb)->mdb->drAtrb & cpu_to_be16(HFS_SB_ATTRIB_UNMNT))) {
120 printk(KERN_WARNING "hfs: filesystem was not cleanly unmounted, " 120 pr_warn("filesystem was not cleanly unmounted, running fsck.hfs is recommended. leaving read-only.\n");
121 "running fsck.hfs is recommended. leaving read-only.\n");
122 sb->s_flags |= MS_RDONLY; 121 sb->s_flags |= MS_RDONLY;
123 *flags |= MS_RDONLY; 122 *flags |= MS_RDONLY;
124 } else if (HFS_SB(sb)->mdb->drAtrb & cpu_to_be16(HFS_SB_ATTRIB_SLOCK)) { 123 } else if (HFS_SB(sb)->mdb->drAtrb & cpu_to_be16(HFS_SB_ATTRIB_SLOCK)) {
125 printk(KERN_WARNING "hfs: filesystem is marked locked, leaving read-only.\n"); 124 pr_warn("filesystem is marked locked, leaving read-only.\n");
126 sb->s_flags |= MS_RDONLY; 125 sb->s_flags |= MS_RDONLY;
127 *flags |= MS_RDONLY; 126 *flags |= MS_RDONLY;
128 } 127 }
@@ -253,29 +252,29 @@ static int parse_options(char *options, struct hfs_sb_info *hsb)
253 switch (token) { 252 switch (token) {
254 case opt_uid: 253 case opt_uid:
255 if (match_int(&args[0], &tmp)) { 254 if (match_int(&args[0], &tmp)) {
256 printk(KERN_ERR "hfs: uid requires an argument\n"); 255 pr_err("uid requires an argument\n");
257 return 0; 256 return 0;
258 } 257 }
259 hsb->s_uid = make_kuid(current_user_ns(), (uid_t)tmp); 258 hsb->s_uid = make_kuid(current_user_ns(), (uid_t)tmp);
260 if (!uid_valid(hsb->s_uid)) { 259 if (!uid_valid(hsb->s_uid)) {
261 printk(KERN_ERR "hfs: invalid uid %d\n", tmp); 260 pr_err("invalid uid %d\n", tmp);
262 return 0; 261 return 0;
263 } 262 }
264 break; 263 break;
265 case opt_gid: 264 case opt_gid:
266 if (match_int(&args[0], &tmp)) { 265 if (match_int(&args[0], &tmp)) {
267 printk(KERN_ERR "hfs: gid requires an argument\n"); 266 pr_err("gid requires an argument\n");
268 return 0; 267 return 0;
269 } 268 }
270 hsb->s_gid = make_kgid(current_user_ns(), (gid_t)tmp); 269 hsb->s_gid = make_kgid(current_user_ns(), (gid_t)tmp);
271 if (!gid_valid(hsb->s_gid)) { 270 if (!gid_valid(hsb->s_gid)) {
272 printk(KERN_ERR "hfs: invalid gid %d\n", tmp); 271 pr_err("invalid gid %d\n", tmp);
273 return 0; 272 return 0;
274 } 273 }
275 break; 274 break;
276 case opt_umask: 275 case opt_umask:
277 if (match_octal(&args[0], &tmp)) { 276 if (match_octal(&args[0], &tmp)) {
278 printk(KERN_ERR "hfs: umask requires a value\n"); 277 pr_err("umask requires a value\n");
279 return 0; 278 return 0;
280 } 279 }
281 hsb->s_file_umask = (umode_t)tmp; 280 hsb->s_file_umask = (umode_t)tmp;
@@ -283,39 +282,39 @@ static int parse_options(char *options, struct hfs_sb_info *hsb)
283 break; 282 break;
284 case opt_file_umask: 283 case opt_file_umask:
285 if (match_octal(&args[0], &tmp)) { 284 if (match_octal(&args[0], &tmp)) {
286 printk(KERN_ERR "hfs: file_umask requires a value\n"); 285 pr_err("file_umask requires a value\n");
287 return 0; 286 return 0;
288 } 287 }
289 hsb->s_file_umask = (umode_t)tmp; 288 hsb->s_file_umask = (umode_t)tmp;
290 break; 289 break;
291 case opt_dir_umask: 290 case opt_dir_umask:
292 if (match_octal(&args[0], &tmp)) { 291 if (match_octal(&args[0], &tmp)) {
293 printk(KERN_ERR "hfs: dir_umask requires a value\n"); 292 pr_err("dir_umask requires a value\n");
294 return 0; 293 return 0;
295 } 294 }
296 hsb->s_dir_umask = (umode_t)tmp; 295 hsb->s_dir_umask = (umode_t)tmp;
297 break; 296 break;
298 case opt_part: 297 case opt_part:
299 if (match_int(&args[0], &hsb->part)) { 298 if (match_int(&args[0], &hsb->part)) {
300 printk(KERN_ERR "hfs: part requires an argument\n"); 299 pr_err("part requires an argument\n");
301 return 0; 300 return 0;
302 } 301 }
303 break; 302 break;
304 case opt_session: 303 case opt_session:
305 if (match_int(&args[0], &hsb->session)) { 304 if (match_int(&args[0], &hsb->session)) {
306 printk(KERN_ERR "hfs: session requires an argument\n"); 305 pr_err("session requires an argument\n");
307 return 0; 306 return 0;
308 } 307 }
309 break; 308 break;
310 case opt_type: 309 case opt_type:
311 if (match_fourchar(&args[0], &hsb->s_type)) { 310 if (match_fourchar(&args[0], &hsb->s_type)) {
312 printk(KERN_ERR "hfs: type requires a 4 character value\n"); 311 pr_err("type requires a 4 character value\n");
313 return 0; 312 return 0;
314 } 313 }
315 break; 314 break;
316 case opt_creator: 315 case opt_creator:
317 if (match_fourchar(&args[0], &hsb->s_creator)) { 316 if (match_fourchar(&args[0], &hsb->s_creator)) {
318 printk(KERN_ERR "hfs: creator requires a 4 character value\n"); 317 pr_err("creator requires a 4 character value\n");
319 return 0; 318 return 0;
320 } 319 }
321 break; 320 break;
@@ -324,14 +323,14 @@ static int parse_options(char *options, struct hfs_sb_info *hsb)
324 break; 323 break;
325 case opt_codepage: 324 case opt_codepage:
326 if (hsb->nls_disk) { 325 if (hsb->nls_disk) {
327 printk(KERN_ERR "hfs: unable to change codepage\n"); 326 pr_err("unable to change codepage\n");
328 return 0; 327 return 0;
329 } 328 }
330 p = match_strdup(&args[0]); 329 p = match_strdup(&args[0]);
331 if (p) 330 if (p)
332 hsb->nls_disk = load_nls(p); 331 hsb->nls_disk = load_nls(p);
333 if (!hsb->nls_disk) { 332 if (!hsb->nls_disk) {
334 printk(KERN_ERR "hfs: unable to load codepage \"%s\"\n", p); 333 pr_err("unable to load codepage \"%s\"\n", p);
335 kfree(p); 334 kfree(p);
336 return 0; 335 return 0;
337 } 336 }
@@ -339,14 +338,14 @@ static int parse_options(char *options, struct hfs_sb_info *hsb)
339 break; 338 break;
340 case opt_iocharset: 339 case opt_iocharset:
341 if (hsb->nls_io) { 340 if (hsb->nls_io) {
342 printk(KERN_ERR "hfs: unable to change iocharset\n"); 341 pr_err("unable to change iocharset\n");
343 return 0; 342 return 0;
344 } 343 }
345 p = match_strdup(&args[0]); 344 p = match_strdup(&args[0]);
346 if (p) 345 if (p)
347 hsb->nls_io = load_nls(p); 346 hsb->nls_io = load_nls(p);
348 if (!hsb->nls_io) { 347 if (!hsb->nls_io) {
349 printk(KERN_ERR "hfs: unable to load iocharset \"%s\"\n", p); 348 pr_err("unable to load iocharset \"%s\"\n", p);
350 kfree(p); 349 kfree(p);
351 return 0; 350 return 0;
352 } 351 }
@@ -360,7 +359,7 @@ static int parse_options(char *options, struct hfs_sb_info *hsb)
360 if (hsb->nls_disk && !hsb->nls_io) { 359 if (hsb->nls_disk && !hsb->nls_io) {
361 hsb->nls_io = load_nls_default(); 360 hsb->nls_io = load_nls_default();
362 if (!hsb->nls_io) { 361 if (!hsb->nls_io) {
363 printk(KERN_ERR "hfs: unable to load default iocharset\n"); 362 pr_err("unable to load default iocharset\n");
364 return 0; 363 return 0;
365 } 364 }
366 } 365 }
@@ -400,7 +399,7 @@ static int hfs_fill_super(struct super_block *sb, void *data, int silent)
400 399
401 res = -EINVAL; 400 res = -EINVAL;
402 if (!parse_options((char *)data, sbi)) { 401 if (!parse_options((char *)data, sbi)) {
403 printk(KERN_ERR "hfs: unable to parse mount options.\n"); 402 pr_err("unable to parse mount options\n");
404 goto bail; 403 goto bail;
405 } 404 }
406 405
@@ -411,14 +410,16 @@ static int hfs_fill_super(struct super_block *sb, void *data, int silent)
411 res = hfs_mdb_get(sb); 410 res = hfs_mdb_get(sb);
412 if (res) { 411 if (res) {
413 if (!silent) 412 if (!silent)
414 printk(KERN_WARNING "hfs: can't find a HFS filesystem on dev %s.\n", 413 pr_warn("can't find a HFS filesystem on dev %s\n",
415 hfs_mdb_name(sb)); 414 hfs_mdb_name(sb));
416 res = -EINVAL; 415 res = -EINVAL;
417 goto bail; 416 goto bail;
418 } 417 }
419 418
420 /* try to get the root inode */ 419 /* try to get the root inode */
421 hfs_find_init(HFS_SB(sb)->cat_tree, &fd); 420 res = hfs_find_init(HFS_SB(sb)->cat_tree, &fd);
421 if (res)
422 goto bail_no_root;
422 res = hfs_cat_find_brec(sb, HFS_ROOT_CNID, &fd); 423 res = hfs_cat_find_brec(sb, HFS_ROOT_CNID, &fd);
423 if (!res) { 424 if (!res) {
424 if (fd.entrylength > sizeof(rec) || fd.entrylength < 0) { 425 if (fd.entrylength > sizeof(rec) || fd.entrylength < 0) {
@@ -447,7 +448,7 @@ static int hfs_fill_super(struct super_block *sb, void *data, int silent)
447 return 0; 448 return 0;
448 449
449bail_no_root: 450bail_no_root:
450 printk(KERN_ERR "hfs: get root inode failed.\n"); 451 pr_err("get root inode failed\n");
451bail: 452bail:
452 hfs_mdb_put(sb); 453 hfs_mdb_put(sb);
453 return res; 454 return res;
diff --git a/fs/hfsplus/attributes.c b/fs/hfsplus/attributes.c
index 8d691f124714..0f47890299c4 100644
--- a/fs/hfsplus/attributes.c
+++ b/fs/hfsplus/attributes.c
@@ -56,7 +56,7 @@ int hfsplus_attr_build_key(struct super_block *sb, hfsplus_btree_key *key,
56 if (name) { 56 if (name) {
57 len = strlen(name); 57 len = strlen(name);
58 if (len > HFSPLUS_ATTR_MAX_STRLEN) { 58 if (len > HFSPLUS_ATTR_MAX_STRLEN) {
59 printk(KERN_ERR "hfs: invalid xattr name's length\n"); 59 pr_err("invalid xattr name's length\n");
60 return -EINVAL; 60 return -EINVAL;
61 } 61 }
62 hfsplus_asc2uni(sb, 62 hfsplus_asc2uni(sb,
@@ -166,10 +166,10 @@ int hfsplus_find_attr(struct super_block *sb, u32 cnid,
166{ 166{
167 int err = 0; 167 int err = 0;
168 168
169 dprint(DBG_ATTR_MOD, "find_attr: %s,%d\n", name ? name : NULL, cnid); 169 hfs_dbg(ATTR_MOD, "find_attr: %s,%d\n", name ? name : NULL, cnid);
170 170
171 if (!HFSPLUS_SB(sb)->attr_tree) { 171 if (!HFSPLUS_SB(sb)->attr_tree) {
172 printk(KERN_ERR "hfs: attributes file doesn't exist\n"); 172 pr_err("attributes file doesn't exist\n");
173 return -EINVAL; 173 return -EINVAL;
174 } 174 }
175 175
@@ -228,11 +228,11 @@ int hfsplus_create_attr(struct inode *inode,
228 int entry_size; 228 int entry_size;
229 int err; 229 int err;
230 230
231 dprint(DBG_ATTR_MOD, "create_attr: %s,%ld\n", 231 hfs_dbg(ATTR_MOD, "create_attr: %s,%ld\n",
232 name ? name : NULL, inode->i_ino); 232 name ? name : NULL, inode->i_ino);
233 233
234 if (!HFSPLUS_SB(sb)->attr_tree) { 234 if (!HFSPLUS_SB(sb)->attr_tree) {
235 printk(KERN_ERR "hfs: attributes file doesn't exist\n"); 235 pr_err("attributes file doesn't exist\n");
236 return -EINVAL; 236 return -EINVAL;
237 } 237 }
238 238
@@ -307,10 +307,10 @@ static int __hfsplus_delete_attr(struct inode *inode, u32 cnid,
307 break; 307 break;
308 case HFSPLUS_ATTR_FORK_DATA: 308 case HFSPLUS_ATTR_FORK_DATA:
309 case HFSPLUS_ATTR_EXTENTS: 309 case HFSPLUS_ATTR_EXTENTS:
310 printk(KERN_ERR "hfs: only inline data xattr are supported\n"); 310 pr_err("only inline data xattr are supported\n");
311 return -EOPNOTSUPP; 311 return -EOPNOTSUPP;
312 default: 312 default:
313 printk(KERN_ERR "hfs: invalid extended attribute record\n"); 313 pr_err("invalid extended attribute record\n");
314 return -ENOENT; 314 return -ENOENT;
315 } 315 }
316 316
@@ -328,11 +328,11 @@ int hfsplus_delete_attr(struct inode *inode, const char *name)
328 struct super_block *sb = inode->i_sb; 328 struct super_block *sb = inode->i_sb;
329 struct hfs_find_data fd; 329 struct hfs_find_data fd;
330 330
331 dprint(DBG_ATTR_MOD, "delete_attr: %s,%ld\n", 331 hfs_dbg(ATTR_MOD, "delete_attr: %s,%ld\n",
332 name ? name : NULL, inode->i_ino); 332 name ? name : NULL, inode->i_ino);
333 333
334 if (!HFSPLUS_SB(sb)->attr_tree) { 334 if (!HFSPLUS_SB(sb)->attr_tree) {
335 printk(KERN_ERR "hfs: attributes file doesn't exist\n"); 335 pr_err("attributes file doesn't exist\n");
336 return -EINVAL; 336 return -EINVAL;
337 } 337 }
338 338
@@ -346,7 +346,7 @@ int hfsplus_delete_attr(struct inode *inode, const char *name)
346 if (err) 346 if (err)
347 goto out; 347 goto out;
348 } else { 348 } else {
349 printk(KERN_ERR "hfs: invalid extended attribute name\n"); 349 pr_err("invalid extended attribute name\n");
350 err = -EINVAL; 350 err = -EINVAL;
351 goto out; 351 goto out;
352 } 352 }
@@ -369,10 +369,10 @@ int hfsplus_delete_all_attrs(struct inode *dir, u32 cnid)
369 int err = 0; 369 int err = 0;
370 struct hfs_find_data fd; 370 struct hfs_find_data fd;
371 371
372 dprint(DBG_ATTR_MOD, "delete_all_attrs: %d\n", cnid); 372 hfs_dbg(ATTR_MOD, "delete_all_attrs: %d\n", cnid);
373 373
374 if (!HFSPLUS_SB(dir->i_sb)->attr_tree) { 374 if (!HFSPLUS_SB(dir->i_sb)->attr_tree) {
375 printk(KERN_ERR "hfs: attributes file doesn't exist\n"); 375 pr_err("attributes file doesn't exist\n");
376 return -EINVAL; 376 return -EINVAL;
377 } 377 }
378 378
@@ -384,7 +384,7 @@ int hfsplus_delete_all_attrs(struct inode *dir, u32 cnid)
384 err = hfsplus_find_attr(dir->i_sb, cnid, NULL, &fd); 384 err = hfsplus_find_attr(dir->i_sb, cnid, NULL, &fd);
385 if (err) { 385 if (err) {
386 if (err != -ENOENT) 386 if (err != -ENOENT)
387 printk(KERN_ERR "hfs: xattr search failed.\n"); 387 pr_err("xattr search failed\n");
388 goto end_delete_all; 388 goto end_delete_all;
389 } 389 }
390 390
diff --git a/fs/hfsplus/bfind.c b/fs/hfsplus/bfind.c
index d73c98d1ee99..c1422d91cd36 100644
--- a/fs/hfsplus/bfind.c
+++ b/fs/hfsplus/bfind.c
@@ -22,7 +22,7 @@ int hfs_find_init(struct hfs_btree *tree, struct hfs_find_data *fd)
22 return -ENOMEM; 22 return -ENOMEM;
23 fd->search_key = ptr; 23 fd->search_key = ptr;
24 fd->key = ptr + tree->max_key_len + 2; 24 fd->key = ptr + tree->max_key_len + 2;
25 dprint(DBG_BNODE_REFS, "find_init: %d (%p)\n", 25 hfs_dbg(BNODE_REFS, "find_init: %d (%p)\n",
26 tree->cnid, __builtin_return_address(0)); 26 tree->cnid, __builtin_return_address(0));
27 switch (tree->cnid) { 27 switch (tree->cnid) {
28 case HFSPLUS_CAT_CNID: 28 case HFSPLUS_CAT_CNID:
@@ -44,7 +44,7 @@ void hfs_find_exit(struct hfs_find_data *fd)
44{ 44{
45 hfs_bnode_put(fd->bnode); 45 hfs_bnode_put(fd->bnode);
46 kfree(fd->search_key); 46 kfree(fd->search_key);
47 dprint(DBG_BNODE_REFS, "find_exit: %d (%p)\n", 47 hfs_dbg(BNODE_REFS, "find_exit: %d (%p)\n",
48 fd->tree->cnid, __builtin_return_address(0)); 48 fd->tree->cnid, __builtin_return_address(0));
49 mutex_unlock(&fd->tree->tree_lock); 49 mutex_unlock(&fd->tree->tree_lock);
50 fd->tree = NULL; 50 fd->tree = NULL;
@@ -56,7 +56,8 @@ int hfs_find_1st_rec_by_cnid(struct hfs_bnode *bnode,
56 int *end, 56 int *end,
57 int *cur_rec) 57 int *cur_rec)
58{ 58{
59 __be32 cur_cnid, search_cnid; 59 __be32 cur_cnid;
60 __be32 search_cnid;
60 61
61 if (bnode->tree->cnid == HFSPLUS_EXT_CNID) { 62 if (bnode->tree->cnid == HFSPLUS_EXT_CNID) {
62 cur_cnid = fd->key->ext.cnid; 63 cur_cnid = fd->key->ext.cnid;
@@ -67,8 +68,11 @@ int hfs_find_1st_rec_by_cnid(struct hfs_bnode *bnode,
67 } else if (bnode->tree->cnid == HFSPLUS_ATTR_CNID) { 68 } else if (bnode->tree->cnid == HFSPLUS_ATTR_CNID) {
68 cur_cnid = fd->key->attr.cnid; 69 cur_cnid = fd->key->attr.cnid;
69 search_cnid = fd->search_key->attr.cnid; 70 search_cnid = fd->search_key->attr.cnid;
70 } else 71 } else {
72 cur_cnid = 0; /* used-uninitialized warning */
73 search_cnid = 0;
71 BUG(); 74 BUG();
75 }
72 76
73 if (cur_cnid == search_cnid) { 77 if (cur_cnid == search_cnid) {
74 (*end) = (*cur_rec); 78 (*end) = (*cur_rec);
@@ -204,7 +208,7 @@ int hfs_brec_find(struct hfs_find_data *fd, search_strategy_t do_key_compare)
204 return res; 208 return res;
205 209
206invalid: 210invalid:
207 printk(KERN_ERR "hfs: inconsistency in B*Tree (%d,%d,%d,%u,%u)\n", 211 pr_err("inconsistency in B*Tree (%d,%d,%d,%u,%u)\n",
208 height, bnode->height, bnode->type, nidx, parent); 212 height, bnode->height, bnode->type, nidx, parent);
209 res = -EIO; 213 res = -EIO;
210release: 214release:
diff --git a/fs/hfsplus/bitmap.c b/fs/hfsplus/bitmap.c
index 6feefc0cb48a..d2954451519e 100644
--- a/fs/hfsplus/bitmap.c
+++ b/fs/hfsplus/bitmap.c
@@ -30,7 +30,7 @@ int hfsplus_block_allocate(struct super_block *sb, u32 size,
30 if (!len) 30 if (!len)
31 return size; 31 return size;
32 32
33 dprint(DBG_BITMAP, "block_allocate: %u,%u,%u\n", size, offset, len); 33 hfs_dbg(BITMAP, "block_allocate: %u,%u,%u\n", size, offset, len);
34 mutex_lock(&sbi->alloc_mutex); 34 mutex_lock(&sbi->alloc_mutex);
35 mapping = sbi->alloc_file->i_mapping; 35 mapping = sbi->alloc_file->i_mapping;
36 page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS, NULL); 36 page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS, NULL);
@@ -89,14 +89,14 @@ int hfsplus_block_allocate(struct super_block *sb, u32 size,
89 else 89 else
90 end = pptr + ((size + 31) & (PAGE_CACHE_BITS - 1)) / 32; 90 end = pptr + ((size + 31) & (PAGE_CACHE_BITS - 1)) / 32;
91 } 91 }
92 dprint(DBG_BITMAP, "bitmap full\n"); 92 hfs_dbg(BITMAP, "bitmap full\n");
93 start = size; 93 start = size;
94 goto out; 94 goto out;
95 95
96found: 96found:
97 start = offset + (curr - pptr) * 32 + i; 97 start = offset + (curr - pptr) * 32 + i;
98 if (start >= size) { 98 if (start >= size) {
99 dprint(DBG_BITMAP, "bitmap full\n"); 99 hfs_dbg(BITMAP, "bitmap full\n");
100 goto out; 100 goto out;
101 } 101 }
102 /* do any partial u32 at the start */ 102 /* do any partial u32 at the start */
@@ -154,7 +154,7 @@ done:
154 *max = offset + (curr - pptr) * 32 + i - start; 154 *max = offset + (curr - pptr) * 32 + i - start;
155 sbi->free_blocks -= *max; 155 sbi->free_blocks -= *max;
156 hfsplus_mark_mdb_dirty(sb); 156 hfsplus_mark_mdb_dirty(sb);
157 dprint(DBG_BITMAP, "-> %u,%u\n", start, *max); 157 hfs_dbg(BITMAP, "-> %u,%u\n", start, *max);
158out: 158out:
159 mutex_unlock(&sbi->alloc_mutex); 159 mutex_unlock(&sbi->alloc_mutex);
160 return start; 160 return start;
@@ -173,7 +173,7 @@ int hfsplus_block_free(struct super_block *sb, u32 offset, u32 count)
173 if (!count) 173 if (!count)
174 return 0; 174 return 0;
175 175
176 dprint(DBG_BITMAP, "block_free: %u,%u\n", offset, count); 176 hfs_dbg(BITMAP, "block_free: %u,%u\n", offset, count);
177 /* are all of the bits in range? */ 177 /* are all of the bits in range? */
178 if ((offset + count) > sbi->total_blocks) 178 if ((offset + count) > sbi->total_blocks)
179 return -ENOENT; 179 return -ENOENT;
@@ -238,8 +238,7 @@ out:
238 return 0; 238 return 0;
239 239
240kaboom: 240kaboom:
241 printk(KERN_CRIT "hfsplus: unable to mark blocks free: error %ld\n", 241 pr_crit("unable to mark blocks free: error %ld\n", PTR_ERR(page));
242 PTR_ERR(page));
243 mutex_unlock(&sbi->alloc_mutex); 242 mutex_unlock(&sbi->alloc_mutex);
244 243
245 return -EIO; 244 return -EIO;
diff --git a/fs/hfsplus/bnode.c b/fs/hfsplus/bnode.c
index f31ac6f404f1..11c860204520 100644
--- a/fs/hfsplus/bnode.c
+++ b/fs/hfsplus/bnode.c
@@ -130,7 +130,7 @@ void hfs_bnode_copy(struct hfs_bnode *dst_node, int dst,
130 struct page **src_page, **dst_page; 130 struct page **src_page, **dst_page;
131 int l; 131 int l;
132 132
133 dprint(DBG_BNODE_MOD, "copybytes: %u,%u,%u\n", dst, src, len); 133 hfs_dbg(BNODE_MOD, "copybytes: %u,%u,%u\n", dst, src, len);
134 if (!len) 134 if (!len)
135 return; 135 return;
136 tree = src_node->tree; 136 tree = src_node->tree;
@@ -188,7 +188,7 @@ void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len)
188 struct page **src_page, **dst_page; 188 struct page **src_page, **dst_page;
189 int l; 189 int l;
190 190
191 dprint(DBG_BNODE_MOD, "movebytes: %u,%u,%u\n", dst, src, len); 191 hfs_dbg(BNODE_MOD, "movebytes: %u,%u,%u\n", dst, src, len);
192 if (!len) 192 if (!len)
193 return; 193 return;
194 src += node->page_offset; 194 src += node->page_offset;
@@ -302,16 +302,16 @@ void hfs_bnode_dump(struct hfs_bnode *node)
302 __be32 cnid; 302 __be32 cnid;
303 int i, off, key_off; 303 int i, off, key_off;
304 304
305 dprint(DBG_BNODE_MOD, "bnode: %d\n", node->this); 305 hfs_dbg(BNODE_MOD, "bnode: %d\n", node->this);
306 hfs_bnode_read(node, &desc, 0, sizeof(desc)); 306 hfs_bnode_read(node, &desc, 0, sizeof(desc));
307 dprint(DBG_BNODE_MOD, "%d, %d, %d, %d, %d\n", 307 hfs_dbg(BNODE_MOD, "%d, %d, %d, %d, %d\n",
308 be32_to_cpu(desc.next), be32_to_cpu(desc.prev), 308 be32_to_cpu(desc.next), be32_to_cpu(desc.prev),
309 desc.type, desc.height, be16_to_cpu(desc.num_recs)); 309 desc.type, desc.height, be16_to_cpu(desc.num_recs));
310 310
311 off = node->tree->node_size - 2; 311 off = node->tree->node_size - 2;
312 for (i = be16_to_cpu(desc.num_recs); i >= 0; off -= 2, i--) { 312 for (i = be16_to_cpu(desc.num_recs); i >= 0; off -= 2, i--) {
313 key_off = hfs_bnode_read_u16(node, off); 313 key_off = hfs_bnode_read_u16(node, off);
314 dprint(DBG_BNODE_MOD, " %d", key_off); 314 hfs_dbg(BNODE_MOD, " %d", key_off);
315 if (i && node->type == HFS_NODE_INDEX) { 315 if (i && node->type == HFS_NODE_INDEX) {
316 int tmp; 316 int tmp;
317 317
@@ -320,17 +320,17 @@ void hfs_bnode_dump(struct hfs_bnode *node)
320 tmp = hfs_bnode_read_u16(node, key_off) + 2; 320 tmp = hfs_bnode_read_u16(node, key_off) + 2;
321 else 321 else
322 tmp = node->tree->max_key_len + 2; 322 tmp = node->tree->max_key_len + 2;
323 dprint(DBG_BNODE_MOD, " (%d", tmp); 323 hfs_dbg_cont(BNODE_MOD, " (%d", tmp);
324 hfs_bnode_read(node, &cnid, key_off + tmp, 4); 324 hfs_bnode_read(node, &cnid, key_off + tmp, 4);
325 dprint(DBG_BNODE_MOD, ",%d)", be32_to_cpu(cnid)); 325 hfs_dbg_cont(BNODE_MOD, ",%d)", be32_to_cpu(cnid));
326 } else if (i && node->type == HFS_NODE_LEAF) { 326 } else if (i && node->type == HFS_NODE_LEAF) {
327 int tmp; 327 int tmp;
328 328
329 tmp = hfs_bnode_read_u16(node, key_off); 329 tmp = hfs_bnode_read_u16(node, key_off);
330 dprint(DBG_BNODE_MOD, " (%d)", tmp); 330 hfs_dbg_cont(BNODE_MOD, " (%d)", tmp);
331 } 331 }
332 } 332 }
333 dprint(DBG_BNODE_MOD, "\n"); 333 hfs_dbg_cont(BNODE_MOD, "\n");
334} 334}
335 335
336void hfs_bnode_unlink(struct hfs_bnode *node) 336void hfs_bnode_unlink(struct hfs_bnode *node)
@@ -366,7 +366,7 @@ void hfs_bnode_unlink(struct hfs_bnode *node)
366 366
367 /* move down? */ 367 /* move down? */
368 if (!node->prev && !node->next) 368 if (!node->prev && !node->next)
369 dprint(DBG_BNODE_MOD, "hfs_btree_del_level\n"); 369 hfs_dbg(BNODE_MOD, "hfs_btree_del_level\n");
370 if (!node->parent) { 370 if (!node->parent) {
371 tree->root = 0; 371 tree->root = 0;
372 tree->depth = 0; 372 tree->depth = 0;
@@ -386,7 +386,7 @@ struct hfs_bnode *hfs_bnode_findhash(struct hfs_btree *tree, u32 cnid)
386 struct hfs_bnode *node; 386 struct hfs_bnode *node;
387 387
388 if (cnid >= tree->node_count) { 388 if (cnid >= tree->node_count) {
389 printk(KERN_ERR "hfs: request for non-existent node " 389 pr_err("request for non-existent node "
390 "%d in B*Tree\n", 390 "%d in B*Tree\n",
391 cnid); 391 cnid);
392 return NULL; 392 return NULL;
@@ -409,7 +409,7 @@ static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid)
409 loff_t off; 409 loff_t off;
410 410
411 if (cnid >= tree->node_count) { 411 if (cnid >= tree->node_count) {
412 printk(KERN_ERR "hfs: request for non-existent node " 412 pr_err("request for non-existent node "
413 "%d in B*Tree\n", 413 "%d in B*Tree\n",
414 cnid); 414 cnid);
415 return NULL; 415 return NULL;
@@ -425,8 +425,8 @@ static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid)
425 node->this = cnid; 425 node->this = cnid;
426 set_bit(HFS_BNODE_NEW, &node->flags); 426 set_bit(HFS_BNODE_NEW, &node->flags);
427 atomic_set(&node->refcnt, 1); 427 atomic_set(&node->refcnt, 1);
428 dprint(DBG_BNODE_REFS, "new_node(%d:%d): 1\n", 428 hfs_dbg(BNODE_REFS, "new_node(%d:%d): 1\n",
429 node->tree->cnid, node->this); 429 node->tree->cnid, node->this);
430 init_waitqueue_head(&node->lock_wq); 430 init_waitqueue_head(&node->lock_wq);
431 spin_lock(&tree->hash_lock); 431 spin_lock(&tree->hash_lock);
432 node2 = hfs_bnode_findhash(tree, cnid); 432 node2 = hfs_bnode_findhash(tree, cnid);
@@ -470,7 +470,7 @@ void hfs_bnode_unhash(struct hfs_bnode *node)
470{ 470{
471 struct hfs_bnode **p; 471 struct hfs_bnode **p;
472 472
473 dprint(DBG_BNODE_REFS, "remove_node(%d:%d): %d\n", 473 hfs_dbg(BNODE_REFS, "remove_node(%d:%d): %d\n",
474 node->tree->cnid, node->this, atomic_read(&node->refcnt)); 474 node->tree->cnid, node->this, atomic_read(&node->refcnt));
475 for (p = &node->tree->node_hash[hfs_bnode_hash(node->this)]; 475 for (p = &node->tree->node_hash[hfs_bnode_hash(node->this)];
476 *p && *p != node; p = &(*p)->next_hash) 476 *p && *p != node; p = &(*p)->next_hash)
@@ -588,7 +588,7 @@ struct hfs_bnode *hfs_bnode_create(struct hfs_btree *tree, u32 num)
588 node = hfs_bnode_findhash(tree, num); 588 node = hfs_bnode_findhash(tree, num);
589 spin_unlock(&tree->hash_lock); 589 spin_unlock(&tree->hash_lock);
590 if (node) { 590 if (node) {
591 printk(KERN_CRIT "new node %u already hashed?\n", num); 591 pr_crit("new node %u already hashed?\n", num);
592 WARN_ON(1); 592 WARN_ON(1);
593 return node; 593 return node;
594 } 594 }
@@ -620,7 +620,7 @@ void hfs_bnode_get(struct hfs_bnode *node)
620{ 620{
621 if (node) { 621 if (node) {
622 atomic_inc(&node->refcnt); 622 atomic_inc(&node->refcnt);
623 dprint(DBG_BNODE_REFS, "get_node(%d:%d): %d\n", 623 hfs_dbg(BNODE_REFS, "get_node(%d:%d): %d\n",
624 node->tree->cnid, node->this, 624 node->tree->cnid, node->this,
625 atomic_read(&node->refcnt)); 625 atomic_read(&node->refcnt));
626 } 626 }
@@ -633,7 +633,7 @@ void hfs_bnode_put(struct hfs_bnode *node)
633 struct hfs_btree *tree = node->tree; 633 struct hfs_btree *tree = node->tree;
634 int i; 634 int i;
635 635
636 dprint(DBG_BNODE_REFS, "put_node(%d:%d): %d\n", 636 hfs_dbg(BNODE_REFS, "put_node(%d:%d): %d\n",
637 node->tree->cnid, node->this, 637 node->tree->cnid, node->this,
638 atomic_read(&node->refcnt)); 638 atomic_read(&node->refcnt));
639 BUG_ON(!atomic_read(&node->refcnt)); 639 BUG_ON(!atomic_read(&node->refcnt));
diff --git a/fs/hfsplus/brec.c b/fs/hfsplus/brec.c
index 298d4e45604b..6e560d56094b 100644
--- a/fs/hfsplus/brec.c
+++ b/fs/hfsplus/brec.c
@@ -45,13 +45,13 @@ u16 hfs_brec_keylen(struct hfs_bnode *node, u16 rec)
45 if (!recoff) 45 if (!recoff)
46 return 0; 46 return 0;
47 if (recoff > node->tree->node_size - 2) { 47 if (recoff > node->tree->node_size - 2) {
48 printk(KERN_ERR "hfs: recoff %d too large\n", recoff); 48 pr_err("recoff %d too large\n", recoff);
49 return 0; 49 return 0;
50 } 50 }
51 51
52 retval = hfs_bnode_read_u16(node, recoff) + 2; 52 retval = hfs_bnode_read_u16(node, recoff) + 2;
53 if (retval > node->tree->max_key_len + 2) { 53 if (retval > node->tree->max_key_len + 2) {
54 printk(KERN_ERR "hfs: keylen %d too large\n", 54 pr_err("keylen %d too large\n",
55 retval); 55 retval);
56 retval = 0; 56 retval = 0;
57 } 57 }
@@ -90,7 +90,7 @@ again:
90 end_rec_off = tree->node_size - (node->num_recs + 1) * 2; 90 end_rec_off = tree->node_size - (node->num_recs + 1) * 2;
91 end_off = hfs_bnode_read_u16(node, end_rec_off); 91 end_off = hfs_bnode_read_u16(node, end_rec_off);
92 end_rec_off -= 2; 92 end_rec_off -= 2;
93 dprint(DBG_BNODE_MOD, "insert_rec: %d, %d, %d, %d\n", 93 hfs_dbg(BNODE_MOD, "insert_rec: %d, %d, %d, %d\n",
94 rec, size, end_off, end_rec_off); 94 rec, size, end_off, end_rec_off);
95 if (size > end_rec_off - end_off) { 95 if (size > end_rec_off - end_off) {
96 if (new_node) 96 if (new_node)
@@ -191,7 +191,7 @@ again:
191 mark_inode_dirty(tree->inode); 191 mark_inode_dirty(tree->inode);
192 } 192 }
193 hfs_bnode_dump(node); 193 hfs_bnode_dump(node);
194 dprint(DBG_BNODE_MOD, "remove_rec: %d, %d\n", 194 hfs_dbg(BNODE_MOD, "remove_rec: %d, %d\n",
195 fd->record, fd->keylength + fd->entrylength); 195 fd->record, fd->keylength + fd->entrylength);
196 if (!--node->num_recs) { 196 if (!--node->num_recs) {
197 hfs_bnode_unlink(node); 197 hfs_bnode_unlink(node);
@@ -244,7 +244,7 @@ static struct hfs_bnode *hfs_bnode_split(struct hfs_find_data *fd)
244 if (IS_ERR(new_node)) 244 if (IS_ERR(new_node))
245 return new_node; 245 return new_node;
246 hfs_bnode_get(node); 246 hfs_bnode_get(node);
247 dprint(DBG_BNODE_MOD, "split_nodes: %d - %d - %d\n", 247 hfs_dbg(BNODE_MOD, "split_nodes: %d - %d - %d\n",
248 node->this, new_node->this, node->next); 248 node->this, new_node->this, node->next);
249 new_node->next = node->next; 249 new_node->next = node->next;
250 new_node->prev = node->this; 250 new_node->prev = node->this;
@@ -379,7 +379,7 @@ again:
379 newkeylen = hfs_bnode_read_u16(node, 14) + 2; 379 newkeylen = hfs_bnode_read_u16(node, 14) + 2;
380 else 380 else
381 fd->keylength = newkeylen = tree->max_key_len + 2; 381 fd->keylength = newkeylen = tree->max_key_len + 2;
382 dprint(DBG_BNODE_MOD, "update_rec: %d, %d, %d\n", 382 hfs_dbg(BNODE_MOD, "update_rec: %d, %d, %d\n",
383 rec, fd->keylength, newkeylen); 383 rec, fd->keylength, newkeylen);
384 384
385 rec_off = tree->node_size - (rec + 2) * 2; 385 rec_off = tree->node_size - (rec + 2) * 2;
@@ -391,7 +391,7 @@ again:
391 end_off = hfs_bnode_read_u16(parent, end_rec_off); 391 end_off = hfs_bnode_read_u16(parent, end_rec_off);
392 if (end_rec_off - end_off < diff) { 392 if (end_rec_off - end_off < diff) {
393 393
394 dprint(DBG_BNODE_MOD, "hfs: splitting index node.\n"); 394 hfs_dbg(BNODE_MOD, "splitting index node\n");
395 fd->bnode = parent; 395 fd->bnode = parent;
396 new_node = hfs_bnode_split(fd); 396 new_node = hfs_bnode_split(fd);
397 if (IS_ERR(new_node)) 397 if (IS_ERR(new_node))
diff --git a/fs/hfsplus/btree.c b/fs/hfsplus/btree.c
index efb689c21a95..0c6540c91167 100644
--- a/fs/hfsplus/btree.c
+++ b/fs/hfsplus/btree.c
@@ -40,8 +40,7 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id)
40 tree->inode = inode; 40 tree->inode = inode;
41 41
42 if (!HFSPLUS_I(tree->inode)->first_blocks) { 42 if (!HFSPLUS_I(tree->inode)->first_blocks) {
43 printk(KERN_ERR 43 pr_err("invalid btree extent records (0 size)\n");
44 "hfs: invalid btree extent records (0 size).\n");
45 goto free_inode; 44 goto free_inode;
46 } 45 }
47 46
@@ -68,12 +67,12 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id)
68 switch (id) { 67 switch (id) {
69 case HFSPLUS_EXT_CNID: 68 case HFSPLUS_EXT_CNID:
70 if (tree->max_key_len != HFSPLUS_EXT_KEYLEN - sizeof(u16)) { 69 if (tree->max_key_len != HFSPLUS_EXT_KEYLEN - sizeof(u16)) {
71 printk(KERN_ERR "hfs: invalid extent max_key_len %d\n", 70 pr_err("invalid extent max_key_len %d\n",
72 tree->max_key_len); 71 tree->max_key_len);
73 goto fail_page; 72 goto fail_page;
74 } 73 }
75 if (tree->attributes & HFS_TREE_VARIDXKEYS) { 74 if (tree->attributes & HFS_TREE_VARIDXKEYS) {
76 printk(KERN_ERR "hfs: invalid extent btree flag\n"); 75 pr_err("invalid extent btree flag\n");
77 goto fail_page; 76 goto fail_page;
78 } 77 }
79 78
@@ -81,12 +80,12 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id)
81 break; 80 break;
82 case HFSPLUS_CAT_CNID: 81 case HFSPLUS_CAT_CNID:
83 if (tree->max_key_len != HFSPLUS_CAT_KEYLEN - sizeof(u16)) { 82 if (tree->max_key_len != HFSPLUS_CAT_KEYLEN - sizeof(u16)) {
84 printk(KERN_ERR "hfs: invalid catalog max_key_len %d\n", 83 pr_err("invalid catalog max_key_len %d\n",
85 tree->max_key_len); 84 tree->max_key_len);
86 goto fail_page; 85 goto fail_page;
87 } 86 }
88 if (!(tree->attributes & HFS_TREE_VARIDXKEYS)) { 87 if (!(tree->attributes & HFS_TREE_VARIDXKEYS)) {
89 printk(KERN_ERR "hfs: invalid catalog btree flag\n"); 88 pr_err("invalid catalog btree flag\n");
90 goto fail_page; 89 goto fail_page;
91 } 90 }
92 91
@@ -100,19 +99,19 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id)
100 break; 99 break;
101 case HFSPLUS_ATTR_CNID: 100 case HFSPLUS_ATTR_CNID:
102 if (tree->max_key_len != HFSPLUS_ATTR_KEYLEN - sizeof(u16)) { 101 if (tree->max_key_len != HFSPLUS_ATTR_KEYLEN - sizeof(u16)) {
103 printk(KERN_ERR "hfs: invalid attributes max_key_len %d\n", 102 pr_err("invalid attributes max_key_len %d\n",
104 tree->max_key_len); 103 tree->max_key_len);
105 goto fail_page; 104 goto fail_page;
106 } 105 }
107 tree->keycmp = hfsplus_attr_bin_cmp_key; 106 tree->keycmp = hfsplus_attr_bin_cmp_key;
108 break; 107 break;
109 default: 108 default:
110 printk(KERN_ERR "hfs: unknown B*Tree requested\n"); 109 pr_err("unknown B*Tree requested\n");
111 goto fail_page; 110 goto fail_page;
112 } 111 }
113 112
114 if (!(tree->attributes & HFS_TREE_BIGKEYS)) { 113 if (!(tree->attributes & HFS_TREE_BIGKEYS)) {
115 printk(KERN_ERR "hfs: invalid btree flag\n"); 114 pr_err("invalid btree flag\n");
116 goto fail_page; 115 goto fail_page;
117 } 116 }
118 117
@@ -155,7 +154,7 @@ void hfs_btree_close(struct hfs_btree *tree)
155 while ((node = tree->node_hash[i])) { 154 while ((node = tree->node_hash[i])) {
156 tree->node_hash[i] = node->next_hash; 155 tree->node_hash[i] = node->next_hash;
157 if (atomic_read(&node->refcnt)) 156 if (atomic_read(&node->refcnt))
158 printk(KERN_CRIT "hfs: node %d:%d " 157 pr_crit("node %d:%d "
159 "still has %d user(s)!\n", 158 "still has %d user(s)!\n",
160 node->tree->cnid, node->this, 159 node->tree->cnid, node->this,
161 atomic_read(&node->refcnt)); 160 atomic_read(&node->refcnt));
@@ -303,7 +302,7 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
303 kunmap(*pagep); 302 kunmap(*pagep);
304 nidx = node->next; 303 nidx = node->next;
305 if (!nidx) { 304 if (!nidx) {
306 dprint(DBG_BNODE_MOD, "hfs: create new bmap node.\n"); 305 hfs_dbg(BNODE_MOD, "create new bmap node\n");
307 next_node = hfs_bmap_new_bmap(node, idx); 306 next_node = hfs_bmap_new_bmap(node, idx);
308 } else 307 } else
309 next_node = hfs_bnode_find(tree, nidx); 308 next_node = hfs_bnode_find(tree, nidx);
@@ -329,7 +328,7 @@ void hfs_bmap_free(struct hfs_bnode *node)
329 u32 nidx; 328 u32 nidx;
330 u8 *data, byte, m; 329 u8 *data, byte, m;
331 330
332 dprint(DBG_BNODE_MOD, "btree_free_node: %u\n", node->this); 331 hfs_dbg(BNODE_MOD, "btree_free_node: %u\n", node->this);
333 BUG_ON(!node->this); 332 BUG_ON(!node->this);
334 tree = node->tree; 333 tree = node->tree;
335 nidx = node->this; 334 nidx = node->this;
@@ -345,7 +344,7 @@ void hfs_bmap_free(struct hfs_bnode *node)
345 hfs_bnode_put(node); 344 hfs_bnode_put(node);
346 if (!i) { 345 if (!i) {
347 /* panic */; 346 /* panic */;
348 printk(KERN_CRIT "hfs: unable to free bnode %u. " 347 pr_crit("unable to free bnode %u. "
349 "bmap not found!\n", 348 "bmap not found!\n",
350 node->this); 349 node->this);
351 return; 350 return;
@@ -355,7 +354,7 @@ void hfs_bmap_free(struct hfs_bnode *node)
355 return; 354 return;
356 if (node->type != HFS_NODE_MAP) { 355 if (node->type != HFS_NODE_MAP) {
357 /* panic */; 356 /* panic */;
358 printk(KERN_CRIT "hfs: invalid bmap found! " 357 pr_crit("invalid bmap found! "
359 "(%u,%d)\n", 358 "(%u,%d)\n",
360 node->this, node->type); 359 node->this, node->type);
361 hfs_bnode_put(node); 360 hfs_bnode_put(node);
@@ -370,7 +369,7 @@ void hfs_bmap_free(struct hfs_bnode *node)
370 m = 1 << (~nidx & 7); 369 m = 1 << (~nidx & 7);
371 byte = data[off]; 370 byte = data[off];
372 if (!(byte & m)) { 371 if (!(byte & m)) {
373 printk(KERN_CRIT "hfs: trying to free free bnode " 372 pr_crit("trying to free free bnode "
374 "%u(%d)\n", 373 "%u(%d)\n",
375 node->this, node->type); 374 node->this, node->type);
376 kunmap(page); 375 kunmap(page);
diff --git a/fs/hfsplus/catalog.c b/fs/hfsplus/catalog.c
index 840d71edd193..968ce411db53 100644
--- a/fs/hfsplus/catalog.c
+++ b/fs/hfsplus/catalog.c
@@ -188,12 +188,12 @@ int hfsplus_find_cat(struct super_block *sb, u32 cnid,
188 188
189 type = be16_to_cpu(tmp.type); 189 type = be16_to_cpu(tmp.type);
190 if (type != HFSPLUS_FOLDER_THREAD && type != HFSPLUS_FILE_THREAD) { 190 if (type != HFSPLUS_FOLDER_THREAD && type != HFSPLUS_FILE_THREAD) {
191 printk(KERN_ERR "hfs: found bad thread record in catalog\n"); 191 pr_err("found bad thread record in catalog\n");
192 return -EIO; 192 return -EIO;
193 } 193 }
194 194
195 if (be16_to_cpu(tmp.thread.nodeName.length) > 255) { 195 if (be16_to_cpu(tmp.thread.nodeName.length) > 255) {
196 printk(KERN_ERR "hfs: catalog name length corrupted\n"); 196 pr_err("catalog name length corrupted\n");
197 return -EIO; 197 return -EIO;
198 } 198 }
199 199
@@ -212,7 +212,7 @@ int hfsplus_create_cat(u32 cnid, struct inode *dir,
212 int entry_size; 212 int entry_size;
213 int err; 213 int err;
214 214
215 dprint(DBG_CAT_MOD, "create_cat: %s,%u(%d)\n", 215 hfs_dbg(CAT_MOD, "create_cat: %s,%u(%d)\n",
216 str->name, cnid, inode->i_nlink); 216 str->name, cnid, inode->i_nlink);
217 err = hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd); 217 err = hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd);
218 if (err) 218 if (err)
@@ -271,8 +271,7 @@ int hfsplus_delete_cat(u32 cnid, struct inode *dir, struct qstr *str)
271 int err, off; 271 int err, off;
272 u16 type; 272 u16 type;
273 273
274 dprint(DBG_CAT_MOD, "delete_cat: %s,%u\n", 274 hfs_dbg(CAT_MOD, "delete_cat: %s,%u\n", str ? str->name : NULL, cnid);
275 str ? str->name : NULL, cnid);
276 err = hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd); 275 err = hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd);
277 if (err) 276 if (err)
278 return err; 277 return err;
@@ -361,7 +360,7 @@ int hfsplus_rename_cat(u32 cnid,
361 int entry_size, type; 360 int entry_size, type;
362 int err; 361 int err;
363 362
364 dprint(DBG_CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n", 363 hfs_dbg(CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n",
365 cnid, src_dir->i_ino, src_name->name, 364 cnid, src_dir->i_ino, src_name->name,
366 dst_dir->i_ino, dst_name->name); 365 dst_dir->i_ino, dst_name->name);
367 err = hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &src_fd); 366 err = hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &src_fd);
diff --git a/fs/hfsplus/dir.c b/fs/hfsplus/dir.c
index 031c24e50521..a37ac934732f 100644
--- a/fs/hfsplus/dir.c
+++ b/fs/hfsplus/dir.c
@@ -103,7 +103,7 @@ again:
103 } else if (!dentry->d_fsdata) 103 } else if (!dentry->d_fsdata)
104 dentry->d_fsdata = (void *)(unsigned long)cnid; 104 dentry->d_fsdata = (void *)(unsigned long)cnid;
105 } else { 105 } else {
106 printk(KERN_ERR "hfs: invalid catalog entry type in lookup\n"); 106 pr_err("invalid catalog entry type in lookup\n");
107 err = -EIO; 107 err = -EIO;
108 goto fail; 108 goto fail;
109 } 109 }
@@ -159,12 +159,12 @@ static int hfsplus_readdir(struct file *filp, void *dirent, filldir_t filldir)
159 hfs_bnode_read(fd.bnode, &entry, fd.entryoffset, 159 hfs_bnode_read(fd.bnode, &entry, fd.entryoffset,
160 fd.entrylength); 160 fd.entrylength);
161 if (be16_to_cpu(entry.type) != HFSPLUS_FOLDER_THREAD) { 161 if (be16_to_cpu(entry.type) != HFSPLUS_FOLDER_THREAD) {
162 printk(KERN_ERR "hfs: bad catalog folder thread\n"); 162 pr_err("bad catalog folder thread\n");
163 err = -EIO; 163 err = -EIO;
164 goto out; 164 goto out;
165 } 165 }
166 if (fd.entrylength < HFSPLUS_MIN_THREAD_SZ) { 166 if (fd.entrylength < HFSPLUS_MIN_THREAD_SZ) {
167 printk(KERN_ERR "hfs: truncated catalog thread\n"); 167 pr_err("truncated catalog thread\n");
168 err = -EIO; 168 err = -EIO;
169 goto out; 169 goto out;
170 } 170 }
@@ -183,7 +183,7 @@ static int hfsplus_readdir(struct file *filp, void *dirent, filldir_t filldir)
183 183
184 for (;;) { 184 for (;;) {
185 if (be32_to_cpu(fd.key->cat.parent) != inode->i_ino) { 185 if (be32_to_cpu(fd.key->cat.parent) != inode->i_ino) {
186 printk(KERN_ERR "hfs: walked past end of dir\n"); 186 pr_err("walked past end of dir\n");
187 err = -EIO; 187 err = -EIO;
188 goto out; 188 goto out;
189 } 189 }
@@ -203,7 +203,7 @@ static int hfsplus_readdir(struct file *filp, void *dirent, filldir_t filldir)
203 if (type == HFSPLUS_FOLDER) { 203 if (type == HFSPLUS_FOLDER) {
204 if (fd.entrylength < 204 if (fd.entrylength <
205 sizeof(struct hfsplus_cat_folder)) { 205 sizeof(struct hfsplus_cat_folder)) {
206 printk(KERN_ERR "hfs: small dir entry\n"); 206 pr_err("small dir entry\n");
207 err = -EIO; 207 err = -EIO;
208 goto out; 208 goto out;
209 } 209 }
@@ -216,7 +216,7 @@ static int hfsplus_readdir(struct file *filp, void *dirent, filldir_t filldir)
216 break; 216 break;
217 } else if (type == HFSPLUS_FILE) { 217 } else if (type == HFSPLUS_FILE) {
218 if (fd.entrylength < sizeof(struct hfsplus_cat_file)) { 218 if (fd.entrylength < sizeof(struct hfsplus_cat_file)) {
219 printk(KERN_ERR "hfs: small file entry\n"); 219 pr_err("small file entry\n");
220 err = -EIO; 220 err = -EIO;
221 goto out; 221 goto out;
222 } 222 }
@@ -224,7 +224,7 @@ static int hfsplus_readdir(struct file *filp, void *dirent, filldir_t filldir)
224 be32_to_cpu(entry.file.id), DT_REG)) 224 be32_to_cpu(entry.file.id), DT_REG))
225 break; 225 break;
226 } else { 226 } else {
227 printk(KERN_ERR "hfs: bad catalog entry type\n"); 227 pr_err("bad catalog entry type\n");
228 err = -EIO; 228 err = -EIO;
229 goto out; 229 goto out;
230 } 230 }
diff --git a/fs/hfsplus/extents.c b/fs/hfsplus/extents.c
index fe0a76213d9e..fbb212fbb1ef 100644
--- a/fs/hfsplus/extents.c
+++ b/fs/hfsplus/extents.c
@@ -83,7 +83,7 @@ static u32 hfsplus_ext_lastblock(struct hfsplus_extent *ext)
83 return be32_to_cpu(ext->start_block) + be32_to_cpu(ext->block_count); 83 return be32_to_cpu(ext->start_block) + be32_to_cpu(ext->block_count);
84} 84}
85 85
86static void __hfsplus_ext_write_extent(struct inode *inode, 86static int __hfsplus_ext_write_extent(struct inode *inode,
87 struct hfs_find_data *fd) 87 struct hfs_find_data *fd)
88{ 88{
89 struct hfsplus_inode_info *hip = HFSPLUS_I(inode); 89 struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
@@ -98,13 +98,13 @@ static void __hfsplus_ext_write_extent(struct inode *inode,
98 res = hfs_brec_find(fd, hfs_find_rec_by_key); 98 res = hfs_brec_find(fd, hfs_find_rec_by_key);
99 if (hip->extent_state & HFSPLUS_EXT_NEW) { 99 if (hip->extent_state & HFSPLUS_EXT_NEW) {
100 if (res != -ENOENT) 100 if (res != -ENOENT)
101 return; 101 return res;
102 hfs_brec_insert(fd, hip->cached_extents, 102 hfs_brec_insert(fd, hip->cached_extents,
103 sizeof(hfsplus_extent_rec)); 103 sizeof(hfsplus_extent_rec));
104 hip->extent_state &= ~(HFSPLUS_EXT_DIRTY | HFSPLUS_EXT_NEW); 104 hip->extent_state &= ~(HFSPLUS_EXT_DIRTY | HFSPLUS_EXT_NEW);
105 } else { 105 } else {
106 if (res) 106 if (res)
107 return; 107 return res;
108 hfs_bnode_write(fd->bnode, hip->cached_extents, 108 hfs_bnode_write(fd->bnode, hip->cached_extents,
109 fd->entryoffset, fd->entrylength); 109 fd->entryoffset, fd->entrylength);
110 hip->extent_state &= ~HFSPLUS_EXT_DIRTY; 110 hip->extent_state &= ~HFSPLUS_EXT_DIRTY;
@@ -117,11 +117,13 @@ static void __hfsplus_ext_write_extent(struct inode *inode,
117 * to explicily mark the inode dirty, too. 117 * to explicily mark the inode dirty, too.
118 */ 118 */
119 set_bit(HFSPLUS_I_EXT_DIRTY, &hip->flags); 119 set_bit(HFSPLUS_I_EXT_DIRTY, &hip->flags);
120
121 return 0;
120} 122}
121 123
122static int hfsplus_ext_write_extent_locked(struct inode *inode) 124static int hfsplus_ext_write_extent_locked(struct inode *inode)
123{ 125{
124 int res; 126 int res = 0;
125 127
126 if (HFSPLUS_I(inode)->extent_state & HFSPLUS_EXT_DIRTY) { 128 if (HFSPLUS_I(inode)->extent_state & HFSPLUS_EXT_DIRTY) {
127 struct hfs_find_data fd; 129 struct hfs_find_data fd;
@@ -129,10 +131,10 @@ static int hfsplus_ext_write_extent_locked(struct inode *inode)
129 res = hfs_find_init(HFSPLUS_SB(inode->i_sb)->ext_tree, &fd); 131 res = hfs_find_init(HFSPLUS_SB(inode->i_sb)->ext_tree, &fd);
130 if (res) 132 if (res)
131 return res; 133 return res;
132 __hfsplus_ext_write_extent(inode, &fd); 134 res = __hfsplus_ext_write_extent(inode, &fd);
133 hfs_find_exit(&fd); 135 hfs_find_exit(&fd);
134 } 136 }
135 return 0; 137 return res;
136} 138}
137 139
138int hfsplus_ext_write_extent(struct inode *inode) 140int hfsplus_ext_write_extent(struct inode *inode)
@@ -175,8 +177,11 @@ static inline int __hfsplus_ext_cache_extent(struct hfs_find_data *fd,
175 177
176 WARN_ON(!mutex_is_locked(&hip->extents_lock)); 178 WARN_ON(!mutex_is_locked(&hip->extents_lock));
177 179
178 if (hip->extent_state & HFSPLUS_EXT_DIRTY) 180 if (hip->extent_state & HFSPLUS_EXT_DIRTY) {
179 __hfsplus_ext_write_extent(inode, fd); 181 res = __hfsplus_ext_write_extent(inode, fd);
182 if (res)
183 return res;
184 }
180 185
181 res = __hfsplus_ext_read_extent(fd, hip->cached_extents, inode->i_ino, 186 res = __hfsplus_ext_read_extent(fd, hip->cached_extents, inode->i_ino,
182 block, HFSPLUS_IS_RSRC(inode) ? 187 block, HFSPLUS_IS_RSRC(inode) ?
@@ -265,7 +270,7 @@ int hfsplus_get_block(struct inode *inode, sector_t iblock,
265 mutex_unlock(&hip->extents_lock); 270 mutex_unlock(&hip->extents_lock);
266 271
267done: 272done:
268 dprint(DBG_EXTENT, "get_block(%lu): %llu - %u\n", 273 hfs_dbg(EXTENT, "get_block(%lu): %llu - %u\n",
269 inode->i_ino, (long long)iblock, dblock); 274 inode->i_ino, (long long)iblock, dblock);
270 275
271 mask = (1 << sbi->fs_shift) - 1; 276 mask = (1 << sbi->fs_shift) - 1;
@@ -288,11 +293,12 @@ static void hfsplus_dump_extent(struct hfsplus_extent *extent)
288{ 293{
289 int i; 294 int i;
290 295
291 dprint(DBG_EXTENT, " "); 296 hfs_dbg(EXTENT, " ");
292 for (i = 0; i < 8; i++) 297 for (i = 0; i < 8; i++)
293 dprint(DBG_EXTENT, " %u:%u", be32_to_cpu(extent[i].start_block), 298 hfs_dbg_cont(EXTENT, " %u:%u",
294 be32_to_cpu(extent[i].block_count)); 299 be32_to_cpu(extent[i].start_block),
295 dprint(DBG_EXTENT, "\n"); 300 be32_to_cpu(extent[i].block_count));
301 hfs_dbg_cont(EXTENT, "\n");
296} 302}
297 303
298static int hfsplus_add_extent(struct hfsplus_extent *extent, u32 offset, 304static int hfsplus_add_extent(struct hfsplus_extent *extent, u32 offset,
@@ -348,8 +354,8 @@ found:
348 if (count <= block_nr) { 354 if (count <= block_nr) {
349 err = hfsplus_block_free(sb, start, count); 355 err = hfsplus_block_free(sb, start, count);
350 if (err) { 356 if (err) {
351 printk(KERN_ERR "hfs: can't free extent\n"); 357 pr_err("can't free extent\n");
352 dprint(DBG_EXTENT, " start: %u count: %u\n", 358 hfs_dbg(EXTENT, " start: %u count: %u\n",
353 start, count); 359 start, count);
354 } 360 }
355 extent->block_count = 0; 361 extent->block_count = 0;
@@ -359,8 +365,8 @@ found:
359 count -= block_nr; 365 count -= block_nr;
360 err = hfsplus_block_free(sb, start + count, block_nr); 366 err = hfsplus_block_free(sb, start + count, block_nr);
361 if (err) { 367 if (err) {
362 printk(KERN_ERR "hfs: can't free extent\n"); 368 pr_err("can't free extent\n");
363 dprint(DBG_EXTENT, " start: %u count: %u\n", 369 hfs_dbg(EXTENT, " start: %u count: %u\n",
364 start, count); 370 start, count);
365 } 371 }
366 extent->block_count = cpu_to_be32(count); 372 extent->block_count = cpu_to_be32(count);
@@ -432,7 +438,7 @@ int hfsplus_file_extend(struct inode *inode)
432 if (sbi->alloc_file->i_size * 8 < 438 if (sbi->alloc_file->i_size * 8 <
433 sbi->total_blocks - sbi->free_blocks + 8) { 439 sbi->total_blocks - sbi->free_blocks + 8) {
434 /* extend alloc file */ 440 /* extend alloc file */
435 printk(KERN_ERR "hfs: extend alloc file! " 441 pr_err("extend alloc file! "
436 "(%llu,%u,%u)\n", 442 "(%llu,%u,%u)\n",
437 sbi->alloc_file->i_size * 8, 443 sbi->alloc_file->i_size * 8,
438 sbi->total_blocks, sbi->free_blocks); 444 sbi->total_blocks, sbi->free_blocks);
@@ -459,11 +465,11 @@ int hfsplus_file_extend(struct inode *inode)
459 } 465 }
460 } 466 }
461 467
462 dprint(DBG_EXTENT, "extend %lu: %u,%u\n", inode->i_ino, start, len); 468 hfs_dbg(EXTENT, "extend %lu: %u,%u\n", inode->i_ino, start, len);
463 469
464 if (hip->alloc_blocks <= hip->first_blocks) { 470 if (hip->alloc_blocks <= hip->first_blocks) {
465 if (!hip->first_blocks) { 471 if (!hip->first_blocks) {
466 dprint(DBG_EXTENT, "first extents\n"); 472 hfs_dbg(EXTENT, "first extents\n");
467 /* no extents yet */ 473 /* no extents yet */
468 hip->first_extents[0].start_block = cpu_to_be32(start); 474 hip->first_extents[0].start_block = cpu_to_be32(start);
469 hip->first_extents[0].block_count = cpu_to_be32(len); 475 hip->first_extents[0].block_count = cpu_to_be32(len);
@@ -500,7 +506,7 @@ out:
500 return res; 506 return res;
501 507
502insert_extent: 508insert_extent:
503 dprint(DBG_EXTENT, "insert new extent\n"); 509 hfs_dbg(EXTENT, "insert new extent\n");
504 res = hfsplus_ext_write_extent_locked(inode); 510 res = hfsplus_ext_write_extent_locked(inode);
505 if (res) 511 if (res)
506 goto out; 512 goto out;
@@ -525,9 +531,8 @@ void hfsplus_file_truncate(struct inode *inode)
525 u32 alloc_cnt, blk_cnt, start; 531 u32 alloc_cnt, blk_cnt, start;
526 int res; 532 int res;
527 533
528 dprint(DBG_INODE, "truncate: %lu, %llu -> %llu\n", 534 hfs_dbg(INODE, "truncate: %lu, %llu -> %llu\n",
529 inode->i_ino, (long long)hip->phys_size, 535 inode->i_ino, (long long)hip->phys_size, inode->i_size);
530 inode->i_size);
531 536
532 if (inode->i_size > hip->phys_size) { 537 if (inode->i_size > hip->phys_size) {
533 struct address_space *mapping = inode->i_mapping; 538 struct address_space *mapping = inode->i_mapping;
diff --git a/fs/hfsplus/hfsplus_fs.h b/fs/hfsplus/hfsplus_fs.h
index 05b11f36024c..60b0a3388b26 100644
--- a/fs/hfsplus/hfsplus_fs.h
+++ b/fs/hfsplus/hfsplus_fs.h
@@ -10,6 +10,12 @@
10#ifndef _LINUX_HFSPLUS_FS_H 10#ifndef _LINUX_HFSPLUS_FS_H
11#define _LINUX_HFSPLUS_FS_H 11#define _LINUX_HFSPLUS_FS_H
12 12
13#ifdef pr_fmt
14#undef pr_fmt
15#endif
16
17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
13#include <linux/fs.h> 19#include <linux/fs.h>
14#include <linux/mutex.h> 20#include <linux/mutex.h>
15#include <linux/buffer_head.h> 21#include <linux/buffer_head.h>
@@ -32,9 +38,17 @@
32#endif 38#endif
33#define DBG_MASK (0) 39#define DBG_MASK (0)
34 40
35#define dprint(flg, fmt, args...) \ 41#define hfs_dbg(flg, fmt, ...) \
36 if (flg & DBG_MASK) \ 42do { \
37 printk(fmt , ## args) 43 if (DBG_##flg & DBG_MASK) \
44 printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); \
45} while (0)
46
47#define hfs_dbg_cont(flg, fmt, ...) \
48do { \
49 if (DBG_##flg & DBG_MASK) \
50 pr_cont(fmt, ##__VA_ARGS__); \
51} while (0)
38 52
39/* Runtime config options */ 53/* Runtime config options */
40#define HFSPLUS_DEF_CR_TYPE 0x3F3F3F3F /* '????' */ 54#define HFSPLUS_DEF_CR_TYPE 0x3F3F3F3F /* '????' */
diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
index 160ccc9cdb4b..7faaa964968e 100644
--- a/fs/hfsplus/inode.c
+++ b/fs/hfsplus/inode.c
@@ -357,7 +357,7 @@ int hfsplus_file_fsync(struct file *file, loff_t start, loff_t end,
357 if (!error) 357 if (!error)
358 error = error2; 358 error = error2;
359 } else { 359 } else {
360 printk(KERN_ERR "hfs: sync non-existent attributes tree\n"); 360 pr_err("sync non-existent attributes tree\n");
361 } 361 }
362 } 362 }
363 363
@@ -573,7 +573,7 @@ int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd)
573 inode->i_ctime = hfsp_mt2ut(file->attribute_mod_date); 573 inode->i_ctime = hfsp_mt2ut(file->attribute_mod_date);
574 HFSPLUS_I(inode)->create_date = file->create_date; 574 HFSPLUS_I(inode)->create_date = file->create_date;
575 } else { 575 } else {
576 printk(KERN_ERR "hfs: bad catalog entry used to create inode\n"); 576 pr_err("bad catalog entry used to create inode\n");
577 res = -EIO; 577 res = -EIO;
578 } 578 }
579 return res; 579 return res;
diff --git a/fs/hfsplus/options.c b/fs/hfsplus/options.c
index ed257c671615..968eab5bc1f5 100644
--- a/fs/hfsplus/options.c
+++ b/fs/hfsplus/options.c
@@ -113,67 +113,67 @@ int hfsplus_parse_options(char *input, struct hfsplus_sb_info *sbi)
113 switch (token) { 113 switch (token) {
114 case opt_creator: 114 case opt_creator:
115 if (match_fourchar(&args[0], &sbi->creator)) { 115 if (match_fourchar(&args[0], &sbi->creator)) {
116 printk(KERN_ERR "hfs: creator requires a 4 character value\n"); 116 pr_err("creator requires a 4 character value\n");
117 return 0; 117 return 0;
118 } 118 }
119 break; 119 break;
120 case opt_type: 120 case opt_type:
121 if (match_fourchar(&args[0], &sbi->type)) { 121 if (match_fourchar(&args[0], &sbi->type)) {
122 printk(KERN_ERR "hfs: type requires a 4 character value\n"); 122 pr_err("type requires a 4 character value\n");
123 return 0; 123 return 0;
124 } 124 }
125 break; 125 break;
126 case opt_umask: 126 case opt_umask:
127 if (match_octal(&args[0], &tmp)) { 127 if (match_octal(&args[0], &tmp)) {
128 printk(KERN_ERR "hfs: umask requires a value\n"); 128 pr_err("umask requires a value\n");
129 return 0; 129 return 0;
130 } 130 }
131 sbi->umask = (umode_t)tmp; 131 sbi->umask = (umode_t)tmp;
132 break; 132 break;
133 case opt_uid: 133 case opt_uid:
134 if (match_int(&args[0], &tmp)) { 134 if (match_int(&args[0], &tmp)) {
135 printk(KERN_ERR "hfs: uid requires an argument\n"); 135 pr_err("uid requires an argument\n");
136 return 0; 136 return 0;
137 } 137 }
138 sbi->uid = make_kuid(current_user_ns(), (uid_t)tmp); 138 sbi->uid = make_kuid(current_user_ns(), (uid_t)tmp);
139 if (!uid_valid(sbi->uid)) { 139 if (!uid_valid(sbi->uid)) {
140 printk(KERN_ERR "hfs: invalid uid specified\n"); 140 pr_err("invalid uid specified\n");
141 return 0; 141 return 0;
142 } 142 }
143 break; 143 break;
144 case opt_gid: 144 case opt_gid:
145 if (match_int(&args[0], &tmp)) { 145 if (match_int(&args[0], &tmp)) {
146 printk(KERN_ERR "hfs: gid requires an argument\n"); 146 pr_err("gid requires an argument\n");
147 return 0; 147 return 0;
148 } 148 }
149 sbi->gid = make_kgid(current_user_ns(), (gid_t)tmp); 149 sbi->gid = make_kgid(current_user_ns(), (gid_t)tmp);
150 if (!gid_valid(sbi->gid)) { 150 if (!gid_valid(sbi->gid)) {
151 printk(KERN_ERR "hfs: invalid gid specified\n"); 151 pr_err("invalid gid specified\n");
152 return 0; 152 return 0;
153 } 153 }
154 break; 154 break;
155 case opt_part: 155 case opt_part:
156 if (match_int(&args[0], &sbi->part)) { 156 if (match_int(&args[0], &sbi->part)) {
157 printk(KERN_ERR "hfs: part requires an argument\n"); 157 pr_err("part requires an argument\n");
158 return 0; 158 return 0;
159 } 159 }
160 break; 160 break;
161 case opt_session: 161 case opt_session:
162 if (match_int(&args[0], &sbi->session)) { 162 if (match_int(&args[0], &sbi->session)) {
163 printk(KERN_ERR "hfs: session requires an argument\n"); 163 pr_err("session requires an argument\n");
164 return 0; 164 return 0;
165 } 165 }
166 break; 166 break;
167 case opt_nls: 167 case opt_nls:
168 if (sbi->nls) { 168 if (sbi->nls) {
169 printk(KERN_ERR "hfs: unable to change nls mapping\n"); 169 pr_err("unable to change nls mapping\n");
170 return 0; 170 return 0;
171 } 171 }
172 p = match_strdup(&args[0]); 172 p = match_strdup(&args[0]);
173 if (p) 173 if (p)
174 sbi->nls = load_nls(p); 174 sbi->nls = load_nls(p);
175 if (!sbi->nls) { 175 if (!sbi->nls) {
176 printk(KERN_ERR "hfs: unable to load " 176 pr_err("unable to load "
177 "nls mapping \"%s\"\n", 177 "nls mapping \"%s\"\n",
178 p); 178 p);
179 kfree(p); 179 kfree(p);
diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
index 7b87284e46dc..4c4d142cf890 100644
--- a/fs/hfsplus/super.c
+++ b/fs/hfsplus/super.c
@@ -132,7 +132,7 @@ static int hfsplus_system_write_inode(struct inode *inode)
132 if (tree) { 132 if (tree) {
133 int err = hfs_btree_write(tree); 133 int err = hfs_btree_write(tree);
134 if (err) { 134 if (err) {
135 printk(KERN_ERR "hfs: b-tree write err: %d, ino %lu\n", 135 pr_err("b-tree write err: %d, ino %lu\n",
136 err, inode->i_ino); 136 err, inode->i_ino);
137 return err; 137 return err;
138 } 138 }
@@ -145,7 +145,7 @@ static int hfsplus_write_inode(struct inode *inode,
145{ 145{
146 int err; 146 int err;
147 147
148 dprint(DBG_INODE, "hfsplus_write_inode: %lu\n", inode->i_ino); 148 hfs_dbg(INODE, "hfsplus_write_inode: %lu\n", inode->i_ino);
149 149
150 err = hfsplus_ext_write_extent(inode); 150 err = hfsplus_ext_write_extent(inode);
151 if (err) 151 if (err)
@@ -160,7 +160,7 @@ static int hfsplus_write_inode(struct inode *inode,
160 160
161static void hfsplus_evict_inode(struct inode *inode) 161static void hfsplus_evict_inode(struct inode *inode)
162{ 162{
163 dprint(DBG_INODE, "hfsplus_evict_inode: %lu\n", inode->i_ino); 163 hfs_dbg(INODE, "hfsplus_evict_inode: %lu\n", inode->i_ino);
164 truncate_inode_pages(&inode->i_data, 0); 164 truncate_inode_pages(&inode->i_data, 0);
165 clear_inode(inode); 165 clear_inode(inode);
166 if (HFSPLUS_IS_RSRC(inode)) { 166 if (HFSPLUS_IS_RSRC(inode)) {
@@ -179,7 +179,7 @@ static int hfsplus_sync_fs(struct super_block *sb, int wait)
179 if (!wait) 179 if (!wait)
180 return 0; 180 return 0;
181 181
182 dprint(DBG_SUPER, "hfsplus_sync_fs\n"); 182 hfs_dbg(SUPER, "hfsplus_sync_fs\n");
183 183
184 /* 184 /*
185 * Explicitly write out the special metadata inodes. 185 * Explicitly write out the special metadata inodes.
@@ -251,7 +251,7 @@ static void delayed_sync_fs(struct work_struct *work)
251 251
252 err = hfsplus_sync_fs(sbi->alloc_file->i_sb, 1); 252 err = hfsplus_sync_fs(sbi->alloc_file->i_sb, 1);
253 if (err) 253 if (err)
254 printk(KERN_ERR "hfs: delayed sync fs err %d\n", err); 254 pr_err("delayed sync fs err %d\n", err);
255} 255}
256 256
257void hfsplus_mark_mdb_dirty(struct super_block *sb) 257void hfsplus_mark_mdb_dirty(struct super_block *sb)
@@ -275,7 +275,7 @@ static void hfsplus_put_super(struct super_block *sb)
275{ 275{
276 struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb); 276 struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
277 277
278 dprint(DBG_SUPER, "hfsplus_put_super\n"); 278 hfs_dbg(SUPER, "hfsplus_put_super\n");
279 279
280 cancel_delayed_work_sync(&sbi->sync_work); 280 cancel_delayed_work_sync(&sbi->sync_work);
281 281
@@ -333,25 +333,19 @@ static int hfsplus_remount(struct super_block *sb, int *flags, char *data)
333 return -EINVAL; 333 return -EINVAL;
334 334
335 if (!(vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_UNMNT))) { 335 if (!(vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_UNMNT))) {
336 printk(KERN_WARNING "hfs: filesystem was " 336 pr_warn("filesystem was not cleanly unmounted, running fsck.hfsplus is recommended. leaving read-only.\n");
337 "not cleanly unmounted, "
338 "running fsck.hfsplus is recommended. "
339 "leaving read-only.\n");
340 sb->s_flags |= MS_RDONLY; 337 sb->s_flags |= MS_RDONLY;
341 *flags |= MS_RDONLY; 338 *flags |= MS_RDONLY;
342 } else if (force) { 339 } else if (force) {
343 /* nothing */ 340 /* nothing */
344 } else if (vhdr->attributes & 341 } else if (vhdr->attributes &
345 cpu_to_be32(HFSPLUS_VOL_SOFTLOCK)) { 342 cpu_to_be32(HFSPLUS_VOL_SOFTLOCK)) {
346 printk(KERN_WARNING "hfs: filesystem is marked locked, " 343 pr_warn("filesystem is marked locked, leaving read-only.\n");
347 "leaving read-only.\n");
348 sb->s_flags |= MS_RDONLY; 344 sb->s_flags |= MS_RDONLY;
349 *flags |= MS_RDONLY; 345 *flags |= MS_RDONLY;
350 } else if (vhdr->attributes & 346 } else if (vhdr->attributes &
351 cpu_to_be32(HFSPLUS_VOL_JOURNALED)) { 347 cpu_to_be32(HFSPLUS_VOL_JOURNALED)) {
352 printk(KERN_WARNING "hfs: filesystem is " 348 pr_warn("filesystem is marked journaled, leaving read-only.\n");
353 "marked journaled, "
354 "leaving read-only.\n");
355 sb->s_flags |= MS_RDONLY; 349 sb->s_flags |= MS_RDONLY;
356 *flags |= MS_RDONLY; 350 *flags |= MS_RDONLY;
357 } 351 }
@@ -397,7 +391,7 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
397 391
398 err = -EINVAL; 392 err = -EINVAL;
399 if (!hfsplus_parse_options(data, sbi)) { 393 if (!hfsplus_parse_options(data, sbi)) {
400 printk(KERN_ERR "hfs: unable to parse mount options\n"); 394 pr_err("unable to parse mount options\n");
401 goto out_unload_nls; 395 goto out_unload_nls;
402 } 396 }
403 397
@@ -405,14 +399,14 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
405 nls = sbi->nls; 399 nls = sbi->nls;
406 sbi->nls = load_nls("utf8"); 400 sbi->nls = load_nls("utf8");
407 if (!sbi->nls) { 401 if (!sbi->nls) {
408 printk(KERN_ERR "hfs: unable to load nls for utf8\n"); 402 pr_err("unable to load nls for utf8\n");
409 goto out_unload_nls; 403 goto out_unload_nls;
410 } 404 }
411 405
412 /* Grab the volume header */ 406 /* Grab the volume header */
413 if (hfsplus_read_wrapper(sb)) { 407 if (hfsplus_read_wrapper(sb)) {
414 if (!silent) 408 if (!silent)
415 printk(KERN_WARNING "hfs: unable to find HFS+ superblock\n"); 409 pr_warn("unable to find HFS+ superblock\n");
416 goto out_unload_nls; 410 goto out_unload_nls;
417 } 411 }
418 vhdr = sbi->s_vhdr; 412 vhdr = sbi->s_vhdr;
@@ -421,7 +415,7 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
421 sb->s_magic = HFSPLUS_VOLHEAD_SIG; 415 sb->s_magic = HFSPLUS_VOLHEAD_SIG;
422 if (be16_to_cpu(vhdr->version) < HFSPLUS_MIN_VERSION || 416 if (be16_to_cpu(vhdr->version) < HFSPLUS_MIN_VERSION ||
423 be16_to_cpu(vhdr->version) > HFSPLUS_CURRENT_VERSION) { 417 be16_to_cpu(vhdr->version) > HFSPLUS_CURRENT_VERSION) {
424 printk(KERN_ERR "hfs: wrong filesystem version\n"); 418 pr_err("wrong filesystem version\n");
425 goto out_free_vhdr; 419 goto out_free_vhdr;
426 } 420 }
427 sbi->total_blocks = be32_to_cpu(vhdr->total_blocks); 421 sbi->total_blocks = be32_to_cpu(vhdr->total_blocks);
@@ -445,7 +439,7 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
445 439
446 if ((last_fs_block > (sector_t)(~0ULL) >> (sbi->alloc_blksz_shift - 9)) || 440 if ((last_fs_block > (sector_t)(~0ULL) >> (sbi->alloc_blksz_shift - 9)) ||
447 (last_fs_page > (pgoff_t)(~0ULL))) { 441 (last_fs_page > (pgoff_t)(~0ULL))) {
448 printk(KERN_ERR "hfs: filesystem size too large.\n"); 442 pr_err("filesystem size too large\n");
449 goto out_free_vhdr; 443 goto out_free_vhdr;
450 } 444 }
451 445
@@ -454,22 +448,16 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
454 sb->s_maxbytes = MAX_LFS_FILESIZE; 448 sb->s_maxbytes = MAX_LFS_FILESIZE;
455 449
456 if (!(vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_UNMNT))) { 450 if (!(vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_UNMNT))) {
457 printk(KERN_WARNING "hfs: Filesystem was " 451 pr_warn("Filesystem was not cleanly unmounted, running fsck.hfsplus is recommended. mounting read-only.\n");
458 "not cleanly unmounted, "
459 "running fsck.hfsplus is recommended. "
460 "mounting read-only.\n");
461 sb->s_flags |= MS_RDONLY; 452 sb->s_flags |= MS_RDONLY;
462 } else if (test_and_clear_bit(HFSPLUS_SB_FORCE, &sbi->flags)) { 453 } else if (test_and_clear_bit(HFSPLUS_SB_FORCE, &sbi->flags)) {
463 /* nothing */ 454 /* nothing */
464 } else if (vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_SOFTLOCK)) { 455 } else if (vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_SOFTLOCK)) {
465 printk(KERN_WARNING "hfs: Filesystem is marked locked, mounting read-only.\n"); 456 pr_warn("Filesystem is marked locked, mounting read-only.\n");
466 sb->s_flags |= MS_RDONLY; 457 sb->s_flags |= MS_RDONLY;
467 } else if ((vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_JOURNALED)) && 458 } else if ((vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_JOURNALED)) &&
468 !(sb->s_flags & MS_RDONLY)) { 459 !(sb->s_flags & MS_RDONLY)) {
469 printk(KERN_WARNING "hfs: write access to " 460 pr_warn("write access to a journaled filesystem is not supported, use the force option at your own risk, mounting read-only.\n");
470 "a journaled filesystem is not supported, "
471 "use the force option at your own risk, "
472 "mounting read-only.\n");
473 sb->s_flags |= MS_RDONLY; 461 sb->s_flags |= MS_RDONLY;
474 } 462 }
475 463
@@ -478,18 +466,18 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
478 /* Load metadata objects (B*Trees) */ 466 /* Load metadata objects (B*Trees) */
479 sbi->ext_tree = hfs_btree_open(sb, HFSPLUS_EXT_CNID); 467 sbi->ext_tree = hfs_btree_open(sb, HFSPLUS_EXT_CNID);
480 if (!sbi->ext_tree) { 468 if (!sbi->ext_tree) {
481 printk(KERN_ERR "hfs: failed to load extents file\n"); 469 pr_err("failed to load extents file\n");
482 goto out_free_vhdr; 470 goto out_free_vhdr;
483 } 471 }
484 sbi->cat_tree = hfs_btree_open(sb, HFSPLUS_CAT_CNID); 472 sbi->cat_tree = hfs_btree_open(sb, HFSPLUS_CAT_CNID);
485 if (!sbi->cat_tree) { 473 if (!sbi->cat_tree) {
486 printk(KERN_ERR "hfs: failed to load catalog file\n"); 474 pr_err("failed to load catalog file\n");
487 goto out_close_ext_tree; 475 goto out_close_ext_tree;
488 } 476 }
489 if (vhdr->attr_file.total_blocks != 0) { 477 if (vhdr->attr_file.total_blocks != 0) {
490 sbi->attr_tree = hfs_btree_open(sb, HFSPLUS_ATTR_CNID); 478 sbi->attr_tree = hfs_btree_open(sb, HFSPLUS_ATTR_CNID);
491 if (!sbi->attr_tree) { 479 if (!sbi->attr_tree) {
492 printk(KERN_ERR "hfs: failed to load attributes file\n"); 480 pr_err("failed to load attributes file\n");
493 goto out_close_cat_tree; 481 goto out_close_cat_tree;
494 } 482 }
495 } 483 }
@@ -497,7 +485,7 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
497 485
498 inode = hfsplus_iget(sb, HFSPLUS_ALLOC_CNID); 486 inode = hfsplus_iget(sb, HFSPLUS_ALLOC_CNID);
499 if (IS_ERR(inode)) { 487 if (IS_ERR(inode)) {
500 printk(KERN_ERR "hfs: failed to load allocation file\n"); 488 pr_err("failed to load allocation file\n");
501 err = PTR_ERR(inode); 489 err = PTR_ERR(inode);
502 goto out_close_attr_tree; 490 goto out_close_attr_tree;
503 } 491 }
@@ -506,7 +494,7 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
506 /* Load the root directory */ 494 /* Load the root directory */
507 root = hfsplus_iget(sb, HFSPLUS_ROOT_CNID); 495 root = hfsplus_iget(sb, HFSPLUS_ROOT_CNID);
508 if (IS_ERR(root)) { 496 if (IS_ERR(root)) {
509 printk(KERN_ERR "hfs: failed to load root directory\n"); 497 pr_err("failed to load root directory\n");
510 err = PTR_ERR(root); 498 err = PTR_ERR(root);
511 goto out_put_alloc_file; 499 goto out_put_alloc_file;
512 } 500 }
diff --git a/fs/hfsplus/wrapper.c b/fs/hfsplus/wrapper.c
index 90effcccca9a..b51a6079108d 100644
--- a/fs/hfsplus/wrapper.c
+++ b/fs/hfsplus/wrapper.c
@@ -156,7 +156,7 @@ static int hfsplus_get_last_session(struct super_block *sb,
156 *start = (sector_t)te.cdte_addr.lba << 2; 156 *start = (sector_t)te.cdte_addr.lba << 2;
157 return 0; 157 return 0;
158 } 158 }
159 printk(KERN_ERR "hfs: invalid session number or type of track\n"); 159 pr_err("invalid session number or type of track\n");
160 return -EINVAL; 160 return -EINVAL;
161 } 161 }
162 ms_info.addr_format = CDROM_LBA; 162 ms_info.addr_format = CDROM_LBA;
@@ -234,8 +234,7 @@ reread:
234 234
235 error = -EINVAL; 235 error = -EINVAL;
236 if (sbi->s_backup_vhdr->signature != sbi->s_vhdr->signature) { 236 if (sbi->s_backup_vhdr->signature != sbi->s_vhdr->signature) {
237 printk(KERN_WARNING 237 pr_warn("invalid secondary volume header\n");
238 "hfs: invalid secondary volume header\n");
239 goto out_free_backup_vhdr; 238 goto out_free_backup_vhdr;
240 } 239 }
241 240
@@ -259,8 +258,7 @@ reread:
259 blocksize >>= 1; 258 blocksize >>= 1;
260 259
261 if (sb_set_blocksize(sb, blocksize) != blocksize) { 260 if (sb_set_blocksize(sb, blocksize) != blocksize) {
262 printk(KERN_ERR "hfs: unable to set blocksize to %u!\n", 261 pr_err("unable to set blocksize to %u!\n", blocksize);
263 blocksize);
264 goto out_free_backup_vhdr; 262 goto out_free_backup_vhdr;
265 } 263 }
266 264
diff --git a/fs/hfsplus/xattr.c b/fs/hfsplus/xattr.c
index e8a4b0815c61..f66346155df5 100644
--- a/fs/hfsplus/xattr.c
+++ b/fs/hfsplus/xattr.c
@@ -107,19 +107,19 @@ int __hfsplus_setxattr(struct inode *inode, const char *name,
107 107
108 err = hfs_find_init(HFSPLUS_SB(inode->i_sb)->cat_tree, &cat_fd); 108 err = hfs_find_init(HFSPLUS_SB(inode->i_sb)->cat_tree, &cat_fd);
109 if (err) { 109 if (err) {
110 printk(KERN_ERR "hfs: can't init xattr find struct\n"); 110 pr_err("can't init xattr find struct\n");
111 return err; 111 return err;
112 } 112 }
113 113
114 err = hfsplus_find_cat(inode->i_sb, inode->i_ino, &cat_fd); 114 err = hfsplus_find_cat(inode->i_sb, inode->i_ino, &cat_fd);
115 if (err) { 115 if (err) {
116 printk(KERN_ERR "hfs: catalog searching failed\n"); 116 pr_err("catalog searching failed\n");
117 goto end_setxattr; 117 goto end_setxattr;
118 } 118 }
119 119
120 if (!strcmp_xattr_finder_info(name)) { 120 if (!strcmp_xattr_finder_info(name)) {
121 if (flags & XATTR_CREATE) { 121 if (flags & XATTR_CREATE) {
122 printk(KERN_ERR "hfs: xattr exists yet\n"); 122 pr_err("xattr exists yet\n");
123 err = -EOPNOTSUPP; 123 err = -EOPNOTSUPP;
124 goto end_setxattr; 124 goto end_setxattr;
125 } 125 }
@@ -165,7 +165,7 @@ int __hfsplus_setxattr(struct inode *inode, const char *name,
165 165
166 if (hfsplus_attr_exists(inode, name)) { 166 if (hfsplus_attr_exists(inode, name)) {
167 if (flags & XATTR_CREATE) { 167 if (flags & XATTR_CREATE) {
168 printk(KERN_ERR "hfs: xattr exists yet\n"); 168 pr_err("xattr exists yet\n");
169 err = -EOPNOTSUPP; 169 err = -EOPNOTSUPP;
170 goto end_setxattr; 170 goto end_setxattr;
171 } 171 }
@@ -177,7 +177,7 @@ int __hfsplus_setxattr(struct inode *inode, const char *name,
177 goto end_setxattr; 177 goto end_setxattr;
178 } else { 178 } else {
179 if (flags & XATTR_REPLACE) { 179 if (flags & XATTR_REPLACE) {
180 printk(KERN_ERR "hfs: cannot replace xattr\n"); 180 pr_err("cannot replace xattr\n");
181 err = -EOPNOTSUPP; 181 err = -EOPNOTSUPP;
182 goto end_setxattr; 182 goto end_setxattr;
183 } 183 }
@@ -210,7 +210,7 @@ int __hfsplus_setxattr(struct inode *inode, const char *name,
210 cat_entry_flags); 210 cat_entry_flags);
211 hfsplus_mark_inode_dirty(inode, HFSPLUS_I_CAT_DIRTY); 211 hfsplus_mark_inode_dirty(inode, HFSPLUS_I_CAT_DIRTY);
212 } else { 212 } else {
213 printk(KERN_ERR "hfs: invalid catalog entry type\n"); 213 pr_err("invalid catalog entry type\n");
214 err = -EIO; 214 err = -EIO;
215 goto end_setxattr; 215 goto end_setxattr;
216 } 216 }
@@ -269,7 +269,7 @@ static ssize_t hfsplus_getxattr_finder_info(struct dentry *dentry,
269 if (size >= record_len) { 269 if (size >= record_len) {
270 res = hfs_find_init(HFSPLUS_SB(inode->i_sb)->cat_tree, &fd); 270 res = hfs_find_init(HFSPLUS_SB(inode->i_sb)->cat_tree, &fd);
271 if (res) { 271 if (res) {
272 printk(KERN_ERR "hfs: can't init xattr find struct\n"); 272 pr_err("can't init xattr find struct\n");
273 return res; 273 return res;
274 } 274 }
275 res = hfsplus_find_cat(inode->i_sb, inode->i_ino, &fd); 275 res = hfsplus_find_cat(inode->i_sb, inode->i_ino, &fd);
@@ -340,13 +340,13 @@ ssize_t hfsplus_getxattr(struct dentry *dentry, const char *name,
340 340
341 entry = hfsplus_alloc_attr_entry(); 341 entry = hfsplus_alloc_attr_entry();
342 if (!entry) { 342 if (!entry) {
343 printk(KERN_ERR "hfs: can't allocate xattr entry\n"); 343 pr_err("can't allocate xattr entry\n");
344 return -ENOMEM; 344 return -ENOMEM;
345 } 345 }
346 346
347 res = hfs_find_init(HFSPLUS_SB(inode->i_sb)->attr_tree, &fd); 347 res = hfs_find_init(HFSPLUS_SB(inode->i_sb)->attr_tree, &fd);
348 if (res) { 348 if (res) {
349 printk(KERN_ERR "hfs: can't init xattr find struct\n"); 349 pr_err("can't init xattr find struct\n");
350 goto failed_getxattr_init; 350 goto failed_getxattr_init;
351 } 351 }
352 352
@@ -355,7 +355,7 @@ ssize_t hfsplus_getxattr(struct dentry *dentry, const char *name,
355 if (res == -ENOENT) 355 if (res == -ENOENT)
356 res = -ENODATA; 356 res = -ENODATA;
357 else 357 else
358 printk(KERN_ERR "hfs: xattr searching failed\n"); 358 pr_err("xattr searching failed\n");
359 goto out; 359 goto out;
360 } 360 }
361 361
@@ -368,17 +368,17 @@ ssize_t hfsplus_getxattr(struct dentry *dentry, const char *name,
368 offsetof(struct hfsplus_attr_inline_data, 368 offsetof(struct hfsplus_attr_inline_data,
369 length)); 369 length));
370 if (record_length > HFSPLUS_MAX_INLINE_DATA_SIZE) { 370 if (record_length > HFSPLUS_MAX_INLINE_DATA_SIZE) {
371 printk(KERN_ERR "hfs: invalid xattr record size\n"); 371 pr_err("invalid xattr record size\n");
372 res = -EIO; 372 res = -EIO;
373 goto out; 373 goto out;
374 } 374 }
375 } else if (record_type == HFSPLUS_ATTR_FORK_DATA || 375 } else if (record_type == HFSPLUS_ATTR_FORK_DATA ||
376 record_type == HFSPLUS_ATTR_EXTENTS) { 376 record_type == HFSPLUS_ATTR_EXTENTS) {
377 printk(KERN_ERR "hfs: only inline data xattr are supported\n"); 377 pr_err("only inline data xattr are supported\n");
378 res = -EOPNOTSUPP; 378 res = -EOPNOTSUPP;
379 goto out; 379 goto out;
380 } else { 380 } else {
381 printk(KERN_ERR "hfs: invalid xattr record\n"); 381 pr_err("invalid xattr record\n");
382 res = -EIO; 382 res = -EIO;
383 goto out; 383 goto out;
384 } 384 }
@@ -427,7 +427,7 @@ static ssize_t hfsplus_listxattr_finder_info(struct dentry *dentry,
427 427
428 res = hfs_find_init(HFSPLUS_SB(inode->i_sb)->cat_tree, &fd); 428 res = hfs_find_init(HFSPLUS_SB(inode->i_sb)->cat_tree, &fd);
429 if (res) { 429 if (res) {
430 printk(KERN_ERR "hfs: can't init xattr find struct\n"); 430 pr_err("can't init xattr find struct\n");
431 return res; 431 return res;
432 } 432 }
433 433
@@ -506,7 +506,7 @@ ssize_t hfsplus_listxattr(struct dentry *dentry, char *buffer, size_t size)
506 506
507 err = hfs_find_init(HFSPLUS_SB(inode->i_sb)->attr_tree, &fd); 507 err = hfs_find_init(HFSPLUS_SB(inode->i_sb)->attr_tree, &fd);
508 if (err) { 508 if (err) {
509 printk(KERN_ERR "hfs: can't init xattr find struct\n"); 509 pr_err("can't init xattr find struct\n");
510 return err; 510 return err;
511 } 511 }
512 512
@@ -525,8 +525,7 @@ ssize_t hfsplus_listxattr(struct dentry *dentry, char *buffer, size_t size)
525 for (;;) { 525 for (;;) {
526 key_len = hfs_bnode_read_u16(fd.bnode, fd.keyoffset); 526 key_len = hfs_bnode_read_u16(fd.bnode, fd.keyoffset);
527 if (key_len == 0 || key_len > fd.tree->max_key_len) { 527 if (key_len == 0 || key_len > fd.tree->max_key_len) {
528 printk(KERN_ERR "hfs: invalid xattr key length: %d\n", 528 pr_err("invalid xattr key length: %d\n", key_len);
529 key_len);
530 res = -EIO; 529 res = -EIO;
531 goto end_listxattr; 530 goto end_listxattr;
532 } 531 }
@@ -541,7 +540,7 @@ ssize_t hfsplus_listxattr(struct dentry *dentry, char *buffer, size_t size)
541 if (hfsplus_uni2asc(inode->i_sb, 540 if (hfsplus_uni2asc(inode->i_sb,
542 (const struct hfsplus_unistr *)&fd.key->attr.key_name, 541 (const struct hfsplus_unistr *)&fd.key->attr.key_name,
543 strbuf, &xattr_name_len)) { 542 strbuf, &xattr_name_len)) {
544 printk(KERN_ERR "hfs: unicode conversion failed\n"); 543 pr_err("unicode conversion failed\n");
545 res = -EIO; 544 res = -EIO;
546 goto end_listxattr; 545 goto end_listxattr;
547 } 546 }
@@ -598,13 +597,13 @@ int hfsplus_removexattr(struct dentry *dentry, const char *name)
598 597
599 err = hfs_find_init(HFSPLUS_SB(inode->i_sb)->cat_tree, &cat_fd); 598 err = hfs_find_init(HFSPLUS_SB(inode->i_sb)->cat_tree, &cat_fd);
600 if (err) { 599 if (err) {
601 printk(KERN_ERR "hfs: can't init xattr find struct\n"); 600 pr_err("can't init xattr find struct\n");
602 return err; 601 return err;
603 } 602 }
604 603
605 err = hfsplus_find_cat(inode->i_sb, inode->i_ino, &cat_fd); 604 err = hfsplus_find_cat(inode->i_sb, inode->i_ino, &cat_fd);
606 if (err) { 605 if (err) {
607 printk(KERN_ERR "hfs: catalog searching failed\n"); 606 pr_err("catalog searching failed\n");
608 goto end_removexattr; 607 goto end_removexattr;
609 } 608 }
610 609
@@ -643,7 +642,7 @@ int hfsplus_removexattr(struct dentry *dentry, const char *name)
643 flags); 642 flags);
644 hfsplus_mark_inode_dirty(inode, HFSPLUS_I_CAT_DIRTY); 643 hfsplus_mark_inode_dirty(inode, HFSPLUS_I_CAT_DIRTY);
645 } else { 644 } else {
646 printk(KERN_ERR "hfs: invalid catalog entry type\n"); 645 pr_err("invalid catalog entry type\n");
647 err = -EIO; 646 err = -EIO;
648 goto end_removexattr; 647 goto end_removexattr;
649 } 648 }
diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c
index 6b49f14eac8c..cf02f5530713 100644
--- a/fs/nilfs2/inode.c
+++ b/fs/nilfs2/inode.c
@@ -175,6 +175,11 @@ static int nilfs_writepages(struct address_space *mapping,
175 struct inode *inode = mapping->host; 175 struct inode *inode = mapping->host;
176 int err = 0; 176 int err = 0;
177 177
178 if (inode->i_sb->s_flags & MS_RDONLY) {
179 nilfs_clear_dirty_pages(mapping, false);
180 return -EROFS;
181 }
182
178 if (wbc->sync_mode == WB_SYNC_ALL) 183 if (wbc->sync_mode == WB_SYNC_ALL)
179 err = nilfs_construct_dsync_segment(inode->i_sb, inode, 184 err = nilfs_construct_dsync_segment(inode->i_sb, inode,
180 wbc->range_start, 185 wbc->range_start,
@@ -187,6 +192,18 @@ static int nilfs_writepage(struct page *page, struct writeback_control *wbc)
187 struct inode *inode = page->mapping->host; 192 struct inode *inode = page->mapping->host;
188 int err; 193 int err;
189 194
195 if (inode->i_sb->s_flags & MS_RDONLY) {
196 /*
197 * It means that filesystem was remounted in read-only
198 * mode because of error or metadata corruption. But we
199 * have dirty pages that try to be flushed in background.
200 * So, here we simply discard this dirty page.
201 */
202 nilfs_clear_dirty_page(page, false);
203 unlock_page(page);
204 return -EROFS;
205 }
206
190 redirty_page_for_writepage(wbc, page); 207 redirty_page_for_writepage(wbc, page);
191 unlock_page(page); 208 unlock_page(page);
192 209
diff --git a/fs/nilfs2/mdt.c b/fs/nilfs2/mdt.c
index f9897d09c693..c4dcd1db57ee 100644
--- a/fs/nilfs2/mdt.c
+++ b/fs/nilfs2/mdt.c
@@ -375,14 +375,25 @@ int nilfs_mdt_fetch_dirty(struct inode *inode)
375static int 375static int
376nilfs_mdt_write_page(struct page *page, struct writeback_control *wbc) 376nilfs_mdt_write_page(struct page *page, struct writeback_control *wbc)
377{ 377{
378 struct inode *inode; 378 struct inode *inode = page->mapping->host;
379 struct super_block *sb; 379 struct super_block *sb;
380 int err = 0; 380 int err = 0;
381 381
382 if (inode && (inode->i_sb->s_flags & MS_RDONLY)) {
383 /*
384 * It means that filesystem was remounted in read-only
385 * mode because of error or metadata corruption. But we
386 * have dirty pages that try to be flushed in background.
387 * So, here we simply discard this dirty page.
388 */
389 nilfs_clear_dirty_page(page, false);
390 unlock_page(page);
391 return -EROFS;
392 }
393
382 redirty_page_for_writepage(wbc, page); 394 redirty_page_for_writepage(wbc, page);
383 unlock_page(page); 395 unlock_page(page);
384 396
385 inode = page->mapping->host;
386 if (!inode) 397 if (!inode)
387 return 0; 398 return 0;
388 399
@@ -561,10 +572,10 @@ void nilfs_mdt_restore_from_shadow_map(struct inode *inode)
561 if (mi->mi_palloc_cache) 572 if (mi->mi_palloc_cache)
562 nilfs_palloc_clear_cache(inode); 573 nilfs_palloc_clear_cache(inode);
563 574
564 nilfs_clear_dirty_pages(inode->i_mapping); 575 nilfs_clear_dirty_pages(inode->i_mapping, true);
565 nilfs_copy_back_pages(inode->i_mapping, &shadow->frozen_data); 576 nilfs_copy_back_pages(inode->i_mapping, &shadow->frozen_data);
566 577
567 nilfs_clear_dirty_pages(&ii->i_btnode_cache); 578 nilfs_clear_dirty_pages(&ii->i_btnode_cache, true);
568 nilfs_copy_back_pages(&ii->i_btnode_cache, &shadow->frozen_btnodes); 579 nilfs_copy_back_pages(&ii->i_btnode_cache, &shadow->frozen_btnodes);
569 580
570 nilfs_bmap_restore(ii->i_bmap, &shadow->bmap_store); 581 nilfs_bmap_restore(ii->i_bmap, &shadow->bmap_store);
diff --git a/fs/nilfs2/page.c b/fs/nilfs2/page.c
index 07f76db04ec7..0ba679866e50 100644
--- a/fs/nilfs2/page.c
+++ b/fs/nilfs2/page.c
@@ -370,7 +370,12 @@ repeat:
370 goto repeat; 370 goto repeat;
371} 371}
372 372
373void nilfs_clear_dirty_pages(struct address_space *mapping) 373/**
374 * nilfs_clear_dirty_pages - discard dirty pages in address space
375 * @mapping: address space with dirty pages for discarding
376 * @silent: suppress [true] or print [false] warning messages
377 */
378void nilfs_clear_dirty_pages(struct address_space *mapping, bool silent)
374{ 379{
375 struct pagevec pvec; 380 struct pagevec pvec;
376 unsigned int i; 381 unsigned int i;
@@ -382,25 +387,9 @@ void nilfs_clear_dirty_pages(struct address_space *mapping)
382 PAGEVEC_SIZE)) { 387 PAGEVEC_SIZE)) {
383 for (i = 0; i < pagevec_count(&pvec); i++) { 388 for (i = 0; i < pagevec_count(&pvec); i++) {
384 struct page *page = pvec.pages[i]; 389 struct page *page = pvec.pages[i];
385 struct buffer_head *bh, *head;
386 390
387 lock_page(page); 391 lock_page(page);
388 ClearPageUptodate(page); 392 nilfs_clear_dirty_page(page, silent);
389 ClearPageMappedToDisk(page);
390 bh = head = page_buffers(page);
391 do {
392 lock_buffer(bh);
393 clear_buffer_dirty(bh);
394 clear_buffer_nilfs_volatile(bh);
395 clear_buffer_nilfs_checked(bh);
396 clear_buffer_nilfs_redirected(bh);
397 clear_buffer_uptodate(bh);
398 clear_buffer_mapped(bh);
399 unlock_buffer(bh);
400 bh = bh->b_this_page;
401 } while (bh != head);
402
403 __nilfs_clear_page_dirty(page);
404 unlock_page(page); 393 unlock_page(page);
405 } 394 }
406 pagevec_release(&pvec); 395 pagevec_release(&pvec);
@@ -408,6 +397,51 @@ void nilfs_clear_dirty_pages(struct address_space *mapping)
408 } 397 }
409} 398}
410 399
400/**
401 * nilfs_clear_dirty_page - discard dirty page
402 * @page: dirty page that will be discarded
403 * @silent: suppress [true] or print [false] warning messages
404 */
405void nilfs_clear_dirty_page(struct page *page, bool silent)
406{
407 struct inode *inode = page->mapping->host;
408 struct super_block *sb = inode->i_sb;
409
410 BUG_ON(!PageLocked(page));
411
412 if (!silent) {
413 nilfs_warning(sb, __func__,
414 "discard page: offset %lld, ino %lu",
415 page_offset(page), inode->i_ino);
416 }
417
418 ClearPageUptodate(page);
419 ClearPageMappedToDisk(page);
420
421 if (page_has_buffers(page)) {
422 struct buffer_head *bh, *head;
423
424 bh = head = page_buffers(page);
425 do {
426 lock_buffer(bh);
427 if (!silent) {
428 nilfs_warning(sb, __func__,
429 "discard block %llu, size %zu",
430 (u64)bh->b_blocknr, bh->b_size);
431 }
432 clear_buffer_dirty(bh);
433 clear_buffer_nilfs_volatile(bh);
434 clear_buffer_nilfs_checked(bh);
435 clear_buffer_nilfs_redirected(bh);
436 clear_buffer_uptodate(bh);
437 clear_buffer_mapped(bh);
438 unlock_buffer(bh);
439 } while (bh = bh->b_this_page, bh != head);
440 }
441
442 __nilfs_clear_page_dirty(page);
443}
444
411unsigned nilfs_page_count_clean_buffers(struct page *page, 445unsigned nilfs_page_count_clean_buffers(struct page *page,
412 unsigned from, unsigned to) 446 unsigned from, unsigned to)
413{ 447{
diff --git a/fs/nilfs2/page.h b/fs/nilfs2/page.h
index fb7de71605a0..ef30c5c2426f 100644
--- a/fs/nilfs2/page.h
+++ b/fs/nilfs2/page.h
@@ -55,7 +55,8 @@ void nilfs_page_bug(struct page *);
55 55
56int nilfs_copy_dirty_pages(struct address_space *, struct address_space *); 56int nilfs_copy_dirty_pages(struct address_space *, struct address_space *);
57void nilfs_copy_back_pages(struct address_space *, struct address_space *); 57void nilfs_copy_back_pages(struct address_space *, struct address_space *);
58void nilfs_clear_dirty_pages(struct address_space *); 58void nilfs_clear_dirty_page(struct page *, bool);
59void nilfs_clear_dirty_pages(struct address_space *, bool);
59void nilfs_mapping_init(struct address_space *mapping, struct inode *inode, 60void nilfs_mapping_init(struct address_space *mapping, struct inode *inode,
60 struct backing_dev_info *bdi); 61 struct backing_dev_info *bdi);
61unsigned nilfs_page_count_clean_buffers(struct page *, unsigned, unsigned); 62unsigned nilfs_page_count_clean_buffers(struct page *, unsigned, unsigned);
diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c
index 8562bd3af947..c616a70e8cf9 100644
--- a/fs/notify/inotify/inotify_user.c
+++ b/fs/notify/inotify/inotify_user.c
@@ -570,7 +570,6 @@ static int inotify_update_existing_watch(struct fsnotify_group *group,
570 int add = (arg & IN_MASK_ADD); 570 int add = (arg & IN_MASK_ADD);
571 int ret; 571 int ret;
572 572
573 /* don't allow invalid bits: we don't want flags set */
574 mask = inotify_arg_to_mask(arg); 573 mask = inotify_arg_to_mask(arg);
575 574
576 fsn_mark = fsnotify_find_inode_mark(group, inode); 575 fsn_mark = fsnotify_find_inode_mark(group, inode);
@@ -621,7 +620,6 @@ static int inotify_new_watch(struct fsnotify_group *group,
621 struct idr *idr = &group->inotify_data.idr; 620 struct idr *idr = &group->inotify_data.idr;
622 spinlock_t *idr_lock = &group->inotify_data.idr_lock; 621 spinlock_t *idr_lock = &group->inotify_data.idr_lock;
623 622
624 /* don't allow invalid bits: we don't want flags set */
625 mask = inotify_arg_to_mask(arg); 623 mask = inotify_arg_to_mask(arg);
626 624
627 tmp_i_mark = kmem_cache_alloc(inotify_inode_mark_cachep, GFP_KERNEL); 625 tmp_i_mark = kmem_cache_alloc(inotify_inode_mark_cachep, GFP_KERNEL);
@@ -747,6 +745,10 @@ SYSCALL_DEFINE3(inotify_add_watch, int, fd, const char __user *, pathname,
747 int ret; 745 int ret;
748 unsigned flags = 0; 746 unsigned flags = 0;
749 747
748 /* don't allow invalid bits: we don't want flags set */
749 if (unlikely(!(mask & ALL_INOTIFY_BITS)))
750 return -EINVAL;
751
750 f = fdget(fd); 752 f = fdget(fd);
751 if (unlikely(!f.file)) 753 if (unlikely(!f.file))
752 return -EBADF; 754 return -EBADF;
diff --git a/fs/proc/base.c b/fs/proc/base.c
index a19308604145..3861bcec41ff 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -1348,11 +1348,10 @@ static ssize_t comm_write(struct file *file, const char __user *buf,
1348 struct inode *inode = file_inode(file); 1348 struct inode *inode = file_inode(file);
1349 struct task_struct *p; 1349 struct task_struct *p;
1350 char buffer[TASK_COMM_LEN]; 1350 char buffer[TASK_COMM_LEN];
1351 const size_t maxlen = sizeof(buffer) - 1;
1351 1352
1352 memset(buffer, 0, sizeof(buffer)); 1353 memset(buffer, 0, sizeof(buffer));
1353 if (count > sizeof(buffer) - 1) 1354 if (copy_from_user(buffer, buf, count > maxlen ? maxlen : count))
1354 count = sizeof(buffer) - 1;
1355 if (copy_from_user(buffer, buf, count))
1356 return -EFAULT; 1355 return -EFAULT;
1357 1356
1358 p = get_proc_task(inode); 1357 p = get_proc_task(inode);
diff --git a/include/Kbuild b/include/Kbuild
index 1dfd33e8d43b..bab1145bc7a7 100644
--- a/include/Kbuild
+++ b/include/Kbuild
@@ -1,5 +1,2 @@
1# Top-level Makefile calls into asm-$(ARCH) 1# Top-level Makefile calls into asm-$(ARCH)
2# List only non-arch directories below 2# List only non-arch directories below
3
4header-y += video/
5header-y += scsi/
diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h
index 42e55deee757..4ce9056b31a8 100644
--- a/include/linux/cleancache.h
+++ b/include/linux/cleancache.h
@@ -33,7 +33,7 @@ struct cleancache_ops {
33 void (*invalidate_fs)(int); 33 void (*invalidate_fs)(int);
34}; 34};
35 35
36extern struct cleancache_ops 36extern struct cleancache_ops *
37 cleancache_register_ops(struct cleancache_ops *ops); 37 cleancache_register_ops(struct cleancache_ops *ops);
38extern void __cleancache_init_fs(struct super_block *); 38extern void __cleancache_init_fs(struct super_block *);
39extern void __cleancache_init_shared_fs(char *, struct super_block *); 39extern void __cleancache_init_shared_fs(char *, struct super_block *);
@@ -42,9 +42,9 @@ extern void __cleancache_put_page(struct page *);
42extern void __cleancache_invalidate_page(struct address_space *, struct page *); 42extern void __cleancache_invalidate_page(struct address_space *, struct page *);
43extern void __cleancache_invalidate_inode(struct address_space *); 43extern void __cleancache_invalidate_inode(struct address_space *);
44extern void __cleancache_invalidate_fs(struct super_block *); 44extern void __cleancache_invalidate_fs(struct super_block *);
45extern int cleancache_enabled;
46 45
47#ifdef CONFIG_CLEANCACHE 46#ifdef CONFIG_CLEANCACHE
47#define cleancache_enabled (1)
48static inline bool cleancache_fs_enabled(struct page *page) 48static inline bool cleancache_fs_enabled(struct page *page)
49{ 49{
50 return page->mapping->host->i_sb->cleancache_poolid >= 0; 50 return page->mapping->host->i_sb->cleancache_poolid >= 0;
diff --git a/include/linux/dmi.h b/include/linux/dmi.h
index f156cca25ad0..b6eb7a05d58e 100644
--- a/include/linux/dmi.h
+++ b/include/linux/dmi.h
@@ -99,6 +99,7 @@ extern const char * dmi_get_system_info(int field);
99extern const struct dmi_device * dmi_find_device(int type, const char *name, 99extern const struct dmi_device * dmi_find_device(int type, const char *name,
100 const struct dmi_device *from); 100 const struct dmi_device *from);
101extern void dmi_scan_machine(void); 101extern void dmi_scan_machine(void);
102extern void dmi_set_dump_stack_arch_desc(void);
102extern bool dmi_get_date(int field, int *yearp, int *monthp, int *dayp); 103extern bool dmi_get_date(int field, int *yearp, int *monthp, int *dayp);
103extern int dmi_name_in_vendors(const char *str); 104extern int dmi_name_in_vendors(const char *str);
104extern int dmi_name_in_serial(const char *str); 105extern int dmi_name_in_serial(const char *str);
@@ -114,6 +115,7 @@ static inline const char * dmi_get_system_info(int field) { return NULL; }
114static inline const struct dmi_device * dmi_find_device(int type, const char *name, 115static inline const struct dmi_device * dmi_find_device(int type, const char *name,
115 const struct dmi_device *from) { return NULL; } 116 const struct dmi_device *from) { return NULL; }
116static inline void dmi_scan_machine(void) { return; } 117static inline void dmi_scan_machine(void) { return; }
118static inline void dmi_set_dump_stack_arch_desc(void) { }
117static inline bool dmi_get_date(int field, int *yearp, int *monthp, int *dayp) 119static inline bool dmi_get_date(int field, int *yearp, int *monthp, int *dayp)
118{ 120{
119 if (yearp) 121 if (yearp)
diff --git a/include/linux/frontswap.h b/include/linux/frontswap.h
index 30442547b9e6..8293262401de 100644
--- a/include/linux/frontswap.h
+++ b/include/linux/frontswap.h
@@ -14,7 +14,7 @@ struct frontswap_ops {
14}; 14};
15 15
16extern bool frontswap_enabled; 16extern bool frontswap_enabled;
17extern struct frontswap_ops 17extern struct frontswap_ops *
18 frontswap_register_ops(struct frontswap_ops *ops); 18 frontswap_register_ops(struct frontswap_ops *ops);
19extern void frontswap_shrink(unsigned long); 19extern void frontswap_shrink(unsigned long);
20extern unsigned long frontswap_curr_pages(void); 20extern unsigned long frontswap_curr_pages(void);
@@ -22,33 +22,19 @@ extern void frontswap_writethrough(bool);
22#define FRONTSWAP_HAS_EXCLUSIVE_GETS 22#define FRONTSWAP_HAS_EXCLUSIVE_GETS
23extern void frontswap_tmem_exclusive_gets(bool); 23extern void frontswap_tmem_exclusive_gets(bool);
24 24
25extern void __frontswap_init(unsigned type); 25extern bool __frontswap_test(struct swap_info_struct *, pgoff_t);
26extern void __frontswap_init(unsigned type, unsigned long *map);
26extern int __frontswap_store(struct page *page); 27extern int __frontswap_store(struct page *page);
27extern int __frontswap_load(struct page *page); 28extern int __frontswap_load(struct page *page);
28extern void __frontswap_invalidate_page(unsigned, pgoff_t); 29extern void __frontswap_invalidate_page(unsigned, pgoff_t);
29extern void __frontswap_invalidate_area(unsigned); 30extern void __frontswap_invalidate_area(unsigned);
30 31
31#ifdef CONFIG_FRONTSWAP 32#ifdef CONFIG_FRONTSWAP
33#define frontswap_enabled (1)
32 34
33static inline bool frontswap_test(struct swap_info_struct *sis, pgoff_t offset) 35static inline bool frontswap_test(struct swap_info_struct *sis, pgoff_t offset)
34{ 36{
35 bool ret = false; 37 return __frontswap_test(sis, offset);
36
37 if (frontswap_enabled && sis->frontswap_map)
38 ret = test_bit(offset, sis->frontswap_map);
39 return ret;
40}
41
42static inline void frontswap_set(struct swap_info_struct *sis, pgoff_t offset)
43{
44 if (frontswap_enabled && sis->frontswap_map)
45 set_bit(offset, sis->frontswap_map);
46}
47
48static inline void frontswap_clear(struct swap_info_struct *sis, pgoff_t offset)
49{
50 if (frontswap_enabled && sis->frontswap_map)
51 clear_bit(offset, sis->frontswap_map);
52} 38}
53 39
54static inline void frontswap_map_set(struct swap_info_struct *p, 40static inline void frontswap_map_set(struct swap_info_struct *p,
@@ -71,14 +57,6 @@ static inline bool frontswap_test(struct swap_info_struct *sis, pgoff_t offset)
71 return false; 57 return false;
72} 58}
73 59
74static inline void frontswap_set(struct swap_info_struct *sis, pgoff_t offset)
75{
76}
77
78static inline void frontswap_clear(struct swap_info_struct *sis, pgoff_t offset)
79{
80}
81
82static inline void frontswap_map_set(struct swap_info_struct *p, 60static inline void frontswap_map_set(struct swap_info_struct *p,
83 unsigned long *map) 61 unsigned long *map)
84{ 62{
@@ -120,10 +98,10 @@ static inline void frontswap_invalidate_area(unsigned type)
120 __frontswap_invalidate_area(type); 98 __frontswap_invalidate_area(type);
121} 99}
122 100
123static inline void frontswap_init(unsigned type) 101static inline void frontswap_init(unsigned type, unsigned long *map)
124{ 102{
125 if (frontswap_enabled) 103 if (frontswap_enabled)
126 __frontswap_init(type); 104 __frontswap_init(type, map);
127} 105}
128 106
129#endif /* _LINUX_FRONTSWAP_H */ 107#endif /* _LINUX_FRONTSWAP_H */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 2c28271ab9d4..17d8b1596215 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -675,9 +675,11 @@ static inline loff_t i_size_read(const struct inode *inode)
675static inline void i_size_write(struct inode *inode, loff_t i_size) 675static inline void i_size_write(struct inode *inode, loff_t i_size)
676{ 676{
677#if BITS_PER_LONG==32 && defined(CONFIG_SMP) 677#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
678 preempt_disable();
678 write_seqcount_begin(&inode->i_size_seqcount); 679 write_seqcount_begin(&inode->i_size_seqcount);
679 inode->i_size = i_size; 680 inode->i_size = i_size;
680 write_seqcount_end(&inode->i_size_seqcount); 681 write_seqcount_end(&inode->i_size_seqcount);
682 preempt_enable();
681#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPT) 683#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPT)
682 preempt_disable(); 684 preempt_disable();
683 inode->i_size = i_size; 685 inode->i_size = i_size;
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 2dac79c39199..6d1844f393c0 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -798,6 +798,4 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
798# define REBUILD_DUE_TO_FTRACE_MCOUNT_RECORD 798# define REBUILD_DUE_TO_FTRACE_MCOUNT_RECORD
799#endif 799#endif
800 800
801extern int do_sysinfo(struct sysinfo *info);
802
803#endif 801#endif
diff --git a/include/linux/kmod.h b/include/linux/kmod.h
index 5398d5807075..0555cc66a15b 100644
--- a/include/linux/kmod.h
+++ b/include/linux/kmod.h
@@ -67,16 +67,15 @@ struct subprocess_info {
67}; 67};
68 68
69extern int 69extern int
70call_usermodehelper_fns(char *path, char **argv, char **envp, int wait, 70call_usermodehelper(char *path, char **argv, char **envp, int wait);
71 int (*init)(struct subprocess_info *info, struct cred *new),
72 void (*cleanup)(struct subprocess_info *), void *data);
73 71
74static inline int 72extern struct subprocess_info *
75call_usermodehelper(char *path, char **argv, char **envp, int wait) 73call_usermodehelper_setup(char *path, char **argv, char **envp, gfp_t gfp_mask,
76{ 74 int (*init)(struct subprocess_info *info, struct cred *new),
77 return call_usermodehelper_fns(path, argv, envp, wait, 75 void (*cleanup)(struct subprocess_info *), void *data);
78 NULL, NULL, NULL); 76
79} 77extern int
78call_usermodehelper_exec(struct subprocess_info *info, int wait);
80 79
81extern struct ctl_table usermodehelper_table[]; 80extern struct ctl_table usermodehelper_table[];
82 81
diff --git a/include/linux/kthread.h b/include/linux/kthread.h
index 8d816646f766..7dcef3317689 100644
--- a/include/linux/kthread.h
+++ b/include/linux/kthread.h
@@ -43,6 +43,7 @@ bool kthread_should_stop(void);
43bool kthread_should_park(void); 43bool kthread_should_park(void);
44bool kthread_freezable_should_stop(bool *was_frozen); 44bool kthread_freezable_should_stop(bool *was_frozen);
45void *kthread_data(struct task_struct *k); 45void *kthread_data(struct task_struct *k);
46void *probe_kthread_data(struct task_struct *k);
46int kthread_park(struct task_struct *k); 47int kthread_park(struct task_struct *k);
47void kthread_unpark(struct task_struct *k); 48void kthread_unpark(struct task_struct *k);
48void kthread_parkme(void); 49void kthread_parkme(void);
diff --git a/include/linux/memory.h b/include/linux/memory.h
index 73817af8b480..85c31a8e2904 100644
--- a/include/linux/memory.h
+++ b/include/linux/memory.h
@@ -137,7 +137,7 @@ enum mem_add_context { BOOT, HOTPLUG };
137#define register_hotmemory_notifier(nb) register_memory_notifier(nb) 137#define register_hotmemory_notifier(nb) register_memory_notifier(nb)
138#define unregister_hotmemory_notifier(nb) unregister_memory_notifier(nb) 138#define unregister_hotmemory_notifier(nb) unregister_memory_notifier(nb)
139#else 139#else
140#define hotplug_memory_notifier(fn, pri) (0) 140#define hotplug_memory_notifier(fn, pri) ({ 0; })
141/* These aren't inline functions due to a GCC bug. */ 141/* These aren't inline functions due to a GCC bug. */
142#define register_hotmemory_notifier(nb) ({ (void)(nb); 0; }) 142#define register_hotmemory_notifier(nb) ({ (void)(nb); 0; })
143#define unregister_hotmemory_notifier(nb) ({ (void)(nb); }) 143#define unregister_hotmemory_notifier(nb) ({ (void)(nb); })
diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h
index 215e5e3dda10..8ac32836690e 100644
--- a/include/linux/pid_namespace.h
+++ b/include/linux/pid_namespace.h
@@ -13,7 +13,9 @@ struct pidmap {
13 void *page; 13 void *page;
14}; 14};
15 15
16#define PIDMAP_ENTRIES ((PID_MAX_LIMIT + 8*PAGE_SIZE - 1)/PAGE_SIZE/8) 16#define BITS_PER_PAGE (PAGE_SIZE * 8)
17#define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1)
18#define PIDMAP_ENTRIES ((PID_MAX_LIMIT+BITS_PER_PAGE-1)/BITS_PER_PAGE)
17 19
18struct bsd_acct_struct; 20struct bsd_acct_struct;
19 21
diff --git a/include/linux/printk.h b/include/linux/printk.h
index 4890fe62c011..6af944ab38f0 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -145,6 +145,9 @@ extern void wake_up_klogd(void);
145 145
146void log_buf_kexec_setup(void); 146void log_buf_kexec_setup(void);
147void __init setup_log_buf(int early); 147void __init setup_log_buf(int early);
148void dump_stack_set_arch_desc(const char *fmt, ...);
149void dump_stack_print_info(const char *log_lvl);
150void show_regs_print_info(const char *log_lvl);
148#else 151#else
149static inline __printf(1, 0) 152static inline __printf(1, 0)
150int vprintk(const char *s, va_list args) 153int vprintk(const char *s, va_list args)
@@ -182,6 +185,18 @@ static inline void log_buf_kexec_setup(void)
182static inline void setup_log_buf(int early) 185static inline void setup_log_buf(int early)
183{ 186{
184} 187}
188
189static inline void dump_stack_set_arch_desc(const char *fmt, ...)
190{
191}
192
193static inline void dump_stack_print_info(const char *log_lvl)
194{
195}
196
197static inline void show_regs_print_info(const char *log_lvl)
198{
199}
185#endif 200#endif
186 201
187extern void dump_stack(void) __cold; 202extern void dump_stack(void) __cold;
diff --git a/include/linux/relay.h b/include/linux/relay.h
index 91cacc34c159..d7c8359693c6 100644
--- a/include/linux/relay.h
+++ b/include/linux/relay.h
@@ -20,9 +20,6 @@
20#include <linux/poll.h> 20#include <linux/poll.h>
21#include <linux/kref.h> 21#include <linux/kref.h>
22 22
23/* Needs a _much_ better name... */
24#define FIX_SIZE(x) ((((x) - 1) & PAGE_MASK) + PAGE_SIZE)
25
26/* 23/*
27 * Tracks changes to rchan/rchan_buf structs 24 * Tracks changes to rchan/rchan_buf structs
28 */ 25 */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 54ddcb82cddf..6f950048b6e9 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -626,6 +626,7 @@ struct signal_struct {
626#define SIGNAL_STOP_STOPPED 0x00000001 /* job control stop in effect */ 626#define SIGNAL_STOP_STOPPED 0x00000001 /* job control stop in effect */
627#define SIGNAL_STOP_CONTINUED 0x00000002 /* SIGCONT since WCONTINUED reap */ 627#define SIGNAL_STOP_CONTINUED 0x00000002 /* SIGCONT since WCONTINUED reap */
628#define SIGNAL_GROUP_EXIT 0x00000004 /* group exit in progress */ 628#define SIGNAL_GROUP_EXIT 0x00000004 /* group exit in progress */
629#define SIGNAL_GROUP_COREDUMP 0x00000008 /* coredump in progress */
629/* 630/*
630 * Pending notifications to parent. 631 * Pending notifications to parent.
631 */ 632 */
@@ -2248,27 +2249,18 @@ static inline void threadgroup_change_end(struct task_struct *tsk)
2248 * 2249 *
2249 * Lock the threadgroup @tsk belongs to. No new task is allowed to enter 2250 * Lock the threadgroup @tsk belongs to. No new task is allowed to enter
2250 * and member tasks aren't allowed to exit (as indicated by PF_EXITING) or 2251 * and member tasks aren't allowed to exit (as indicated by PF_EXITING) or
2251 * perform exec. This is useful for cases where the threadgroup needs to 2252 * change ->group_leader/pid. This is useful for cases where the threadgroup
2252 * stay stable across blockable operations. 2253 * needs to stay stable across blockable operations.
2253 * 2254 *
2254 * fork and exit paths explicitly call threadgroup_change_{begin|end}() for 2255 * fork and exit paths explicitly call threadgroup_change_{begin|end}() for
2255 * synchronization. While held, no new task will be added to threadgroup 2256 * synchronization. While held, no new task will be added to threadgroup
2256 * and no existing live task will have its PF_EXITING set. 2257 * and no existing live task will have its PF_EXITING set.
2257 * 2258 *
2258 * During exec, a task goes and puts its thread group through unusual 2259 * de_thread() does threadgroup_change_{begin|end}() when a non-leader
2259 * changes. After de-threading, exclusive access is assumed to resources 2260 * sub-thread becomes a new leader.
2260 * which are usually shared by tasks in the same group - e.g. sighand may
2261 * be replaced with a new one. Also, the exec'ing task takes over group
2262 * leader role including its pid. Exclude these changes while locked by
2263 * grabbing cred_guard_mutex which is used to synchronize exec path.
2264 */ 2261 */
2265static inline void threadgroup_lock(struct task_struct *tsk) 2262static inline void threadgroup_lock(struct task_struct *tsk)
2266{ 2263{
2267 /*
2268 * exec uses exit for de-threading nesting group_rwsem inside
2269 * cred_guard_mutex. Grab cred_guard_mutex first.
2270 */
2271 mutex_lock(&tsk->signal->cred_guard_mutex);
2272 down_write(&tsk->signal->group_rwsem); 2264 down_write(&tsk->signal->group_rwsem);
2273} 2265}
2274 2266
@@ -2281,7 +2273,6 @@ static inline void threadgroup_lock(struct task_struct *tsk)
2281static inline void threadgroup_unlock(struct task_struct *tsk) 2273static inline void threadgroup_unlock(struct task_struct *tsk)
2282{ 2274{
2283 up_write(&tsk->signal->group_rwsem); 2275 up_write(&tsk->signal->group_rwsem);
2284 mutex_unlock(&tsk->signal->cred_guard_mutex);
2285} 2276}
2286#else 2277#else
2287static inline void threadgroup_change_begin(struct task_struct *tsk) {} 2278static inline void threadgroup_change_begin(struct task_struct *tsk) {}
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 3e07a7df6478..e6564c1dc552 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -20,7 +20,6 @@ struct call_single_data {
20 smp_call_func_t func; 20 smp_call_func_t func;
21 void *info; 21 void *info;
22 u16 flags; 22 u16 flags;
23 u16 priv;
24}; 23};
25 24
26/* total number of cpus in this system (may exceed NR_CPUS) */ 25/* total number of cpus in this system (may exceed NR_CPUS) */
diff --git a/include/linux/string_helpers.h b/include/linux/string_helpers.h
index a3eb2f65b656..3eeee9672a4a 100644
--- a/include/linux/string_helpers.h
+++ b/include/linux/string_helpers.h
@@ -13,4 +13,62 @@ enum string_size_units {
13int string_get_size(u64 size, enum string_size_units units, 13int string_get_size(u64 size, enum string_size_units units,
14 char *buf, int len); 14 char *buf, int len);
15 15
16#define UNESCAPE_SPACE 0x01
17#define UNESCAPE_OCTAL 0x02
18#define UNESCAPE_HEX 0x04
19#define UNESCAPE_SPECIAL 0x08
20#define UNESCAPE_ANY \
21 (UNESCAPE_SPACE | UNESCAPE_OCTAL | UNESCAPE_HEX | UNESCAPE_SPECIAL)
22
23/**
24 * string_unescape - unquote characters in the given string
25 * @src: source buffer (escaped)
26 * @dst: destination buffer (unescaped)
27 * @size: size of the destination buffer (0 to unlimit)
28 * @flags: combination of the flags (bitwise OR):
29 * %UNESCAPE_SPACE:
30 * '\f' - form feed
31 * '\n' - new line
32 * '\r' - carriage return
33 * '\t' - horizontal tab
34 * '\v' - vertical tab
35 * %UNESCAPE_OCTAL:
36 * '\NNN' - byte with octal value NNN (1 to 3 digits)
37 * %UNESCAPE_HEX:
38 * '\xHH' - byte with hexadecimal value HH (1 to 2 digits)
39 * %UNESCAPE_SPECIAL:
40 * '\"' - double quote
41 * '\\' - backslash
42 * '\a' - alert (BEL)
43 * '\e' - escape
44 * %UNESCAPE_ANY:
45 * all previous together
46 *
47 * Returns amount of characters processed to the destination buffer excluding
48 * trailing '\0'.
49 *
50 * Because the size of the output will be the same as or less than the size of
51 * the input, the transformation may be performed in place.
52 *
53 * Caller must provide valid source and destination pointers. Be aware that
54 * destination buffer will always be NULL-terminated. Source string must be
55 * NULL-terminated as well.
56 */
57int string_unescape(char *src, char *dst, size_t size, unsigned int flags);
58
59static inline int string_unescape_inplace(char *buf, unsigned int flags)
60{
61 return string_unescape(buf, buf, 0, flags);
62}
63
64static inline int string_unescape_any(char *src, char *dst, size_t size)
65{
66 return string_unescape(src, dst, size, UNESCAPE_ANY);
67}
68
69static inline int string_unescape_any_inplace(char *buf)
70{
71 return string_unescape_any(buf, buf, 0);
72}
73
16#endif 74#endif
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 717975639378..623488fdc1f5 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -92,6 +92,9 @@ enum {
92 /* bit mask for work_busy() return values */ 92 /* bit mask for work_busy() return values */
93 WORK_BUSY_PENDING = 1 << 0, 93 WORK_BUSY_PENDING = 1 << 0,
94 WORK_BUSY_RUNNING = 1 << 1, 94 WORK_BUSY_RUNNING = 1 << 1,
95
96 /* maximum string length for set_worker_desc() */
97 WORKER_DESC_LEN = 24,
95}; 98};
96 99
97struct work_struct { 100struct work_struct {
@@ -447,6 +450,8 @@ extern void workqueue_set_max_active(struct workqueue_struct *wq,
447extern bool current_is_workqueue_rescuer(void); 450extern bool current_is_workqueue_rescuer(void);
448extern bool workqueue_congested(int cpu, struct workqueue_struct *wq); 451extern bool workqueue_congested(int cpu, struct workqueue_struct *wq);
449extern unsigned int work_busy(struct work_struct *work); 452extern unsigned int work_busy(struct work_struct *work);
453extern __printf(1, 2) void set_worker_desc(const char *fmt, ...);
454extern void print_worker_info(const char *log_lvl, struct task_struct *task);
450 455
451/** 456/**
452 * queue_work - queue work on a workqueue 457 * queue_work - queue work on a workqueue
diff --git a/include/scsi/Kbuild b/include/scsi/Kbuild
deleted file mode 100644
index 562ff9d591b8..000000000000
--- a/include/scsi/Kbuild
+++ /dev/null
@@ -1 +0,0 @@
1header-y += fc/
diff --git a/include/uapi/linux/ptrace.h b/include/uapi/linux/ptrace.h
index 022ab186a812..52ebcc89f306 100644
--- a/include/uapi/linux/ptrace.h
+++ b/include/uapi/linux/ptrace.h
@@ -5,6 +5,7 @@
5 5
6/* has the defines to get at the registers. */ 6/* has the defines to get at the registers. */
7 7
8#include <linux/types.h>
8 9
9#define PTRACE_TRACEME 0 10#define PTRACE_TRACEME 0
10#define PTRACE_PEEKTEXT 1 11#define PTRACE_PEEKTEXT 1
@@ -52,6 +53,17 @@
52#define PTRACE_INTERRUPT 0x4207 53#define PTRACE_INTERRUPT 0x4207
53#define PTRACE_LISTEN 0x4208 54#define PTRACE_LISTEN 0x4208
54 55
56#define PTRACE_PEEKSIGINFO 0x4209
57
58struct ptrace_peeksiginfo_args {
59 __u64 off; /* from which siginfo to start */
60 __u32 flags;
61 __s32 nr; /* how may siginfos to take */
62};
63
64/* Read signals from a shared (process wide) queue */
65#define PTRACE_PEEKSIGINFO_SHARED (1 << 0)
66
55/* Wait extended result codes for the above trace options. */ 67/* Wait extended result codes for the above trace options. */
56#define PTRACE_EVENT_FORK 1 68#define PTRACE_EVENT_FORK 1
57#define PTRACE_EVENT_VFORK 2 69#define PTRACE_EVENT_VFORK 2
diff --git a/include/xen/tmem.h b/include/xen/tmem.h
index 591550a22ac7..3930a90045ff 100644
--- a/include/xen/tmem.h
+++ b/include/xen/tmem.h
@@ -3,7 +3,15 @@
3 3
4#include <linux/types.h> 4#include <linux/types.h>
5 5
6#ifdef CONFIG_XEN_TMEM_MODULE
7#define tmem_enabled true
8#else
6/* defined in drivers/xen/tmem.c */ 9/* defined in drivers/xen/tmem.c */
7extern bool tmem_enabled; 10extern bool tmem_enabled;
11#endif
12
13#ifdef CONFIG_XEN_SELFBALLOONING
14extern int xen_selfballoon_init(bool, bool);
15#endif
8 16
9#endif /* _XEN_TMEM_H */ 17#endif /* _XEN_TMEM_H */
diff --git a/init/Kconfig b/init/Kconfig
index 4367e1379002..a76d13189e47 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1221,6 +1221,35 @@ config SYSCTL
1221config ANON_INODES 1221config ANON_INODES
1222 bool 1222 bool
1223 1223
1224config HAVE_UID16
1225 bool
1226
1227config SYSCTL_EXCEPTION_TRACE
1228 bool
1229 help
1230 Enable support for /proc/sys/debug/exception-trace.
1231
1232config SYSCTL_ARCH_UNALIGN_NO_WARN
1233 bool
1234 help
1235 Enable support for /proc/sys/kernel/ignore-unaligned-usertrap
1236 Allows arch to define/use @no_unaligned_warning to possibly warn
1237 about unaligned access emulation going on under the hood.
1238
1239config SYSCTL_ARCH_UNALIGN_ALLOW
1240 bool
1241 help
1242 Enable support for /proc/sys/kernel/unaligned-trap
1243 Allows arches to define/use @unaligned_enabled to runtime toggle
1244 the unaligned access emulation.
1245 see arch/parisc/kernel/unaligned.c for reference
1246
1247config HOTPLUG
1248 def_bool y
1249
1250config HAVE_PCSPKR_PLATFORM
1251 bool
1252
1224menuconfig EXPERT 1253menuconfig EXPERT
1225 bool "Configure standard kernel features (expert users)" 1254 bool "Configure standard kernel features (expert users)"
1226 # Unhide debug options, to make the on-by-default options visible 1255 # Unhide debug options, to make the on-by-default options visible
@@ -1231,9 +1260,6 @@ menuconfig EXPERT
1231 environments which can tolerate a "non-standard" kernel. 1260 environments which can tolerate a "non-standard" kernel.
1232 Only use this if you really know what you are doing. 1261 Only use this if you really know what you are doing.
1233 1262
1234config HAVE_UID16
1235 bool
1236
1237config UID16 1263config UID16
1238 bool "Enable 16-bit UID system calls" if EXPERT 1264 bool "Enable 16-bit UID system calls" if EXPERT
1239 depends on HAVE_UID16 1265 depends on HAVE_UID16
@@ -1258,26 +1284,6 @@ config SYSCTL_SYSCALL
1258 1284
1259 If unsure say N here. 1285 If unsure say N here.
1260 1286
1261config SYSCTL_EXCEPTION_TRACE
1262 bool
1263 help
1264 Enable support for /proc/sys/debug/exception-trace.
1265
1266config SYSCTL_ARCH_UNALIGN_NO_WARN
1267 bool
1268 help
1269 Enable support for /proc/sys/kernel/ignore-unaligned-usertrap
1270 Allows arch to define/use @no_unaligned_warning to possibly warn
1271 about unaligned access emulation going on under the hood.
1272
1273config SYSCTL_ARCH_UNALIGN_ALLOW
1274 bool
1275 help
1276 Enable support for /proc/sys/kernel/unaligned-trap
1277 Allows arches to define/use @unaligned_enabled to runtime toggle
1278 the unaligned access emulation.
1279 see arch/parisc/kernel/unaligned.c for reference
1280
1281config KALLSYMS 1287config KALLSYMS
1282 bool "Load all symbols for debugging/ksymoops" if EXPERT 1288 bool "Load all symbols for debugging/ksymoops" if EXPERT
1283 default y 1289 default y
@@ -1303,9 +1309,6 @@ config KALLSYMS_ALL
1303 1309
1304 Say N unless you really need all symbols. 1310 Say N unless you really need all symbols.
1305 1311
1306config HOTPLUG
1307 def_bool y
1308
1309config PRINTK 1312config PRINTK
1310 default y 1313 default y
1311 bool "Enable support for printk" if EXPERT 1314 bool "Enable support for printk" if EXPERT
@@ -1344,9 +1347,6 @@ config PCSPKR_PLATFORM
1344 This option allows to disable the internal PC-Speaker 1347 This option allows to disable the internal PC-Speaker
1345 support, saving some memory. 1348 support, saving some memory.
1346 1349
1347config HAVE_PCSPKR_PLATFORM
1348 bool
1349
1350config BASE_FULL 1350config BASE_FULL
1351 default y 1351 default y
1352 bool "Enable full-sized data structures for core" if EXPERT 1352 bool "Enable full-sized data structures for core" if EXPERT
@@ -1418,8 +1418,17 @@ config AIO
1418 default y 1418 default y
1419 help 1419 help
1420 This option enables POSIX asynchronous I/O which may by used 1420 This option enables POSIX asynchronous I/O which may by used
1421 by some high performance threaded applications. Disabling 1421 by some high performance threaded applications. Disabling
1422 this option saves about 7k. 1422 this option saves about 7k.
1423
1424config PCI_QUIRKS
1425 default y
1426 bool "Enable PCI quirk workarounds" if EXPERT
1427 depends on PCI
1428 help
1429 This enables workarounds for various PCI chipset
1430 bugs/quirks. Disable this only if your target machine is
1431 unaffected by PCI quirks.
1423 1432
1424config EMBEDDED 1433config EMBEDDED
1425 bool "Embedded system" 1434 bool "Embedded system"
@@ -1494,15 +1503,6 @@ config VM_EVENT_COUNTERS
1494 on EXPERT systems. /proc/vmstat will only show page counts 1503 on EXPERT systems. /proc/vmstat will only show page counts
1495 if VM event counters are disabled. 1504 if VM event counters are disabled.
1496 1505
1497config PCI_QUIRKS
1498 default y
1499 bool "Enable PCI quirk workarounds" if EXPERT
1500 depends on PCI
1501 help
1502 This enables workarounds for various PCI chipset
1503 bugs/quirks. Disable this only if your target machine is
1504 unaffected by PCI quirks.
1505
1506config SLUB_DEBUG 1506config SLUB_DEBUG
1507 default y 1507 default y
1508 bool "Enable SLUB debugging support" if EXPERT 1508 bool "Enable SLUB debugging support" if EXPERT
diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
index a32ec1ce882b..3e0878e8a80d 100644
--- a/init/do_mounts_initrd.c
+++ b/init/do_mounts_initrd.c
@@ -50,6 +50,7 @@ static int init_linuxrc(struct subprocess_info *info, struct cred *new)
50 50
51static void __init handle_initrd(void) 51static void __init handle_initrd(void)
52{ 52{
53 struct subprocess_info *info;
53 static char *argv[] = { "linuxrc", NULL, }; 54 static char *argv[] = { "linuxrc", NULL, };
54 extern char *envp_init[]; 55 extern char *envp_init[];
55 int error; 56 int error;
@@ -70,8 +71,11 @@ static void __init handle_initrd(void)
70 */ 71 */
71 current->flags |= PF_FREEZER_SKIP; 72 current->flags |= PF_FREEZER_SKIP;
72 73
73 call_usermodehelper_fns("/linuxrc", argv, envp_init, UMH_WAIT_PROC, 74 info = call_usermodehelper_setup("/linuxrc", argv, envp_init,
74 init_linuxrc, NULL, NULL); 75 GFP_KERNEL, init_linuxrc, NULL, NULL);
76 if (!info)
77 return;
78 call_usermodehelper_exec(info, UMH_WAIT_PROC);
75 79
76 current->flags &= ~PF_FREEZER_SKIP; 80 current->flags &= ~PF_FREEZER_SKIP;
77 81
diff --git a/kernel/compat.c b/kernel/compat.c
index 19971d8c7299..1e8f1455117a 100644
--- a/kernel/compat.c
+++ b/kernel/compat.c
@@ -1138,71 +1138,6 @@ asmlinkage long compat_sys_migrate_pages(compat_pid_t pid,
1138} 1138}
1139#endif 1139#endif
1140 1140
1141struct compat_sysinfo {
1142 s32 uptime;
1143 u32 loads[3];
1144 u32 totalram;
1145 u32 freeram;
1146 u32 sharedram;
1147 u32 bufferram;
1148 u32 totalswap;
1149 u32 freeswap;
1150 u16 procs;
1151 u16 pad;
1152 u32 totalhigh;
1153 u32 freehigh;
1154 u32 mem_unit;
1155 char _f[20-2*sizeof(u32)-sizeof(int)];
1156};
1157
1158asmlinkage long
1159compat_sys_sysinfo(struct compat_sysinfo __user *info)
1160{
1161 struct sysinfo s;
1162
1163 do_sysinfo(&s);
1164
1165 /* Check to see if any memory value is too large for 32-bit and scale
1166 * down if needed
1167 */
1168 if ((s.totalram >> 32) || (s.totalswap >> 32)) {
1169 int bitcount = 0;
1170
1171 while (s.mem_unit < PAGE_SIZE) {
1172 s.mem_unit <<= 1;
1173 bitcount++;
1174 }
1175
1176 s.totalram >>= bitcount;
1177 s.freeram >>= bitcount;
1178 s.sharedram >>= bitcount;
1179 s.bufferram >>= bitcount;
1180 s.totalswap >>= bitcount;
1181 s.freeswap >>= bitcount;
1182 s.totalhigh >>= bitcount;
1183 s.freehigh >>= bitcount;
1184 }
1185
1186 if (!access_ok(VERIFY_WRITE, info, sizeof(struct compat_sysinfo)) ||
1187 __put_user (s.uptime, &info->uptime) ||
1188 __put_user (s.loads[0], &info->loads[0]) ||
1189 __put_user (s.loads[1], &info->loads[1]) ||
1190 __put_user (s.loads[2], &info->loads[2]) ||
1191 __put_user (s.totalram, &info->totalram) ||
1192 __put_user (s.freeram, &info->freeram) ||
1193 __put_user (s.sharedram, &info->sharedram) ||
1194 __put_user (s.bufferram, &info->bufferram) ||
1195 __put_user (s.totalswap, &info->totalswap) ||
1196 __put_user (s.freeswap, &info->freeswap) ||
1197 __put_user (s.procs, &info->procs) ||
1198 __put_user (s.totalhigh, &info->totalhigh) ||
1199 __put_user (s.freehigh, &info->freehigh) ||
1200 __put_user (s.mem_unit, &info->mem_unit))
1201 return -EFAULT;
1202
1203 return 0;
1204}
1205
1206COMPAT_SYSCALL_DEFINE2(sched_rr_get_interval, 1141COMPAT_SYSCALL_DEFINE2(sched_rr_get_interval,
1207 compat_pid_t, pid, 1142 compat_pid_t, pid,
1208 struct compat_timespec __user *, interval) 1143 struct compat_timespec __user *, interval)
diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
index c26278fd4851..0506d447aed2 100644
--- a/kernel/debug/debug_core.c
+++ b/kernel/debug/debug_core.c
@@ -775,7 +775,7 @@ static void sysrq_handle_dbg(int key)
775 775
776static struct sysrq_key_op sysrq_dbg_op = { 776static struct sysrq_key_op sysrq_dbg_op = {
777 .handler = sysrq_handle_dbg, 777 .handler = sysrq_handle_dbg,
778 .help_msg = "debug(G)", 778 .help_msg = "debug(g)",
779 .action_msg = "DEBUG", 779 .action_msg = "DEBUG",
780}; 780};
781#endif 781#endif
diff --git a/kernel/kexec.c b/kernel/kexec.c
index b574920cbd4b..59f7b55ba745 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -786,7 +786,7 @@ static int kimage_load_normal_segment(struct kimage *image,
786 struct kexec_segment *segment) 786 struct kexec_segment *segment)
787{ 787{
788 unsigned long maddr; 788 unsigned long maddr;
789 unsigned long ubytes, mbytes; 789 size_t ubytes, mbytes;
790 int result; 790 int result;
791 unsigned char __user *buf; 791 unsigned char __user *buf;
792 792
@@ -819,13 +819,9 @@ static int kimage_load_normal_segment(struct kimage *image,
819 /* Start with a clear page */ 819 /* Start with a clear page */
820 clear_page(ptr); 820 clear_page(ptr);
821 ptr += maddr & ~PAGE_MASK; 821 ptr += maddr & ~PAGE_MASK;
822 mchunk = PAGE_SIZE - (maddr & ~PAGE_MASK); 822 mchunk = min_t(size_t, mbytes,
823 if (mchunk > mbytes) 823 PAGE_SIZE - (maddr & ~PAGE_MASK));
824 mchunk = mbytes; 824 uchunk = min(ubytes, mchunk);
825
826 uchunk = mchunk;
827 if (uchunk > ubytes)
828 uchunk = ubytes;
829 825
830 result = copy_from_user(ptr, buf, uchunk); 826 result = copy_from_user(ptr, buf, uchunk);
831 kunmap(page); 827 kunmap(page);
@@ -850,7 +846,7 @@ static int kimage_load_crash_segment(struct kimage *image,
850 * We do things a page at a time for the sake of kmap. 846 * We do things a page at a time for the sake of kmap.
851 */ 847 */
852 unsigned long maddr; 848 unsigned long maddr;
853 unsigned long ubytes, mbytes; 849 size_t ubytes, mbytes;
854 int result; 850 int result;
855 unsigned char __user *buf; 851 unsigned char __user *buf;
856 852
@@ -871,13 +867,10 @@ static int kimage_load_crash_segment(struct kimage *image,
871 } 867 }
872 ptr = kmap(page); 868 ptr = kmap(page);
873 ptr += maddr & ~PAGE_MASK; 869 ptr += maddr & ~PAGE_MASK;
874 mchunk = PAGE_SIZE - (maddr & ~PAGE_MASK); 870 mchunk = min_t(size_t, mbytes,
875 if (mchunk > mbytes) 871 PAGE_SIZE - (maddr & ~PAGE_MASK));
876 mchunk = mbytes; 872 uchunk = min(ubytes, mchunk);
877 873 if (mchunk > uchunk) {
878 uchunk = mchunk;
879 if (uchunk > ubytes) {
880 uchunk = ubytes;
881 /* Zero the trailing part of the page */ 874 /* Zero the trailing part of the page */
882 memset(ptr + uchunk, 0, mchunk - uchunk); 875 memset(ptr + uchunk, 0, mchunk - uchunk);
883 } 876 }
@@ -1540,14 +1533,13 @@ void vmcoreinfo_append_str(const char *fmt, ...)
1540{ 1533{
1541 va_list args; 1534 va_list args;
1542 char buf[0x50]; 1535 char buf[0x50];
1543 int r; 1536 size_t r;
1544 1537
1545 va_start(args, fmt); 1538 va_start(args, fmt);
1546 r = vsnprintf(buf, sizeof(buf), fmt, args); 1539 r = vsnprintf(buf, sizeof(buf), fmt, args);
1547 va_end(args); 1540 va_end(args);
1548 1541
1549 if (r + vmcoreinfo_size > vmcoreinfo_max_size) 1542 r = min(r, vmcoreinfo_max_size - vmcoreinfo_size);
1550 r = vmcoreinfo_max_size - vmcoreinfo_size;
1551 1543
1552 memcpy(&vmcoreinfo_data[vmcoreinfo_size], buf, r); 1544 memcpy(&vmcoreinfo_data[vmcoreinfo_size], buf, r);
1553 1545
diff --git a/kernel/kmod.c b/kernel/kmod.c
index 56dd34976d7b..1296e72e4161 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -77,6 +77,7 @@ static void free_modprobe_argv(struct subprocess_info *info)
77 77
78static int call_modprobe(char *module_name, int wait) 78static int call_modprobe(char *module_name, int wait)
79{ 79{
80 struct subprocess_info *info;
80 static char *envp[] = { 81 static char *envp[] = {
81 "HOME=/", 82 "HOME=/",
82 "TERM=linux", 83 "TERM=linux",
@@ -98,8 +99,15 @@ static int call_modprobe(char *module_name, int wait)
98 argv[3] = module_name; /* check free_modprobe_argv() */ 99 argv[3] = module_name; /* check free_modprobe_argv() */
99 argv[4] = NULL; 100 argv[4] = NULL;
100 101
101 return call_usermodehelper_fns(modprobe_path, argv, envp, 102 info = call_usermodehelper_setup(modprobe_path, argv, envp, GFP_KERNEL,
102 wait | UMH_KILLABLE, NULL, free_modprobe_argv, NULL); 103 NULL, free_modprobe_argv, NULL);
104 if (!info)
105 goto free_module_name;
106
107 return call_usermodehelper_exec(info, wait | UMH_KILLABLE);
108
109free_module_name:
110 kfree(module_name);
103free_argv: 111free_argv:
104 kfree(argv); 112 kfree(argv);
105out: 113out:
@@ -502,14 +510,28 @@ static void helper_unlock(void)
502 * @argv: arg vector for process 510 * @argv: arg vector for process
503 * @envp: environment for process 511 * @envp: environment for process
504 * @gfp_mask: gfp mask for memory allocation 512 * @gfp_mask: gfp mask for memory allocation
513 * @cleanup: a cleanup function
514 * @init: an init function
515 * @data: arbitrary context sensitive data
505 * 516 *
506 * Returns either %NULL on allocation failure, or a subprocess_info 517 * Returns either %NULL on allocation failure, or a subprocess_info
507 * structure. This should be passed to call_usermodehelper_exec to 518 * structure. This should be passed to call_usermodehelper_exec to
508 * exec the process and free the structure. 519 * exec the process and free the structure.
520 *
521 * The init function is used to customize the helper process prior to
522 * exec. A non-zero return code causes the process to error out, exit,
523 * and return the failure to the calling process
524 *
525 * The cleanup function is just before ethe subprocess_info is about to
526 * be freed. This can be used for freeing the argv and envp. The
527 * Function must be runnable in either a process context or the
528 * context in which call_usermodehelper_exec is called.
509 */ 529 */
510static
511struct subprocess_info *call_usermodehelper_setup(char *path, char **argv, 530struct subprocess_info *call_usermodehelper_setup(char *path, char **argv,
512 char **envp, gfp_t gfp_mask) 531 char **envp, gfp_t gfp_mask,
532 int (*init)(struct subprocess_info *info, struct cred *new),
533 void (*cleanup)(struct subprocess_info *info),
534 void *data)
513{ 535{
514 struct subprocess_info *sub_info; 536 struct subprocess_info *sub_info;
515 sub_info = kzalloc(sizeof(struct subprocess_info), gfp_mask); 537 sub_info = kzalloc(sizeof(struct subprocess_info), gfp_mask);
@@ -520,50 +542,27 @@ struct subprocess_info *call_usermodehelper_setup(char *path, char **argv,
520 sub_info->path = path; 542 sub_info->path = path;
521 sub_info->argv = argv; 543 sub_info->argv = argv;
522 sub_info->envp = envp; 544 sub_info->envp = envp;
545
546 sub_info->cleanup = cleanup;
547 sub_info->init = init;
548 sub_info->data = data;
523 out: 549 out:
524 return sub_info; 550 return sub_info;
525} 551}
526 552EXPORT_SYMBOL(call_usermodehelper_setup);
527/**
528 * call_usermodehelper_setfns - set a cleanup/init function
529 * @info: a subprocess_info returned by call_usermodehelper_setup
530 * @cleanup: a cleanup function
531 * @init: an init function
532 * @data: arbitrary context sensitive data
533 *
534 * The init function is used to customize the helper process prior to
535 * exec. A non-zero return code causes the process to error out, exit,
536 * and return the failure to the calling process
537 *
538 * The cleanup function is just before ethe subprocess_info is about to
539 * be freed. This can be used for freeing the argv and envp. The
540 * Function must be runnable in either a process context or the
541 * context in which call_usermodehelper_exec is called.
542 */
543static
544void call_usermodehelper_setfns(struct subprocess_info *info,
545 int (*init)(struct subprocess_info *info, struct cred *new),
546 void (*cleanup)(struct subprocess_info *info),
547 void *data)
548{
549 info->cleanup = cleanup;
550 info->init = init;
551 info->data = data;
552}
553 553
554/** 554/**
555 * call_usermodehelper_exec - start a usermode application 555 * call_usermodehelper_exec - start a usermode application
556 * @sub_info: information about the subprocessa 556 * @sub_info: information about the subprocessa
557 * @wait: wait for the application to finish and return status. 557 * @wait: wait for the application to finish and return status.
558 * when -1 don't wait at all, but you get no useful error back when 558 * when UMH_NO_WAIT don't wait at all, but you get no useful error back
559 * the program couldn't be exec'ed. This makes it safe to call 559 * when the program couldn't be exec'ed. This makes it safe to call
560 * from interrupt context. 560 * from interrupt context.
561 * 561 *
562 * Runs a user-space application. The application is started 562 * Runs a user-space application. The application is started
563 * asynchronously if wait is not set, and runs as a child of keventd. 563 * asynchronously if wait is not set, and runs as a child of keventd.
564 * (ie. it runs with full root capabilities). 564 * (ie. it runs with full root capabilities).
565 */ 565 */
566static
567int call_usermodehelper_exec(struct subprocess_info *sub_info, int wait) 566int call_usermodehelper_exec(struct subprocess_info *sub_info, int wait)
568{ 567{
569 DECLARE_COMPLETION_ONSTACK(done); 568 DECLARE_COMPLETION_ONSTACK(done);
@@ -615,31 +614,34 @@ unlock:
615 helper_unlock(); 614 helper_unlock();
616 return retval; 615 return retval;
617} 616}
617EXPORT_SYMBOL(call_usermodehelper_exec);
618 618
619/* 619/**
620 * call_usermodehelper_fns() will not run the caller-provided cleanup function 620 * call_usermodehelper() - prepare and start a usermode application
621 * if a memory allocation failure is experienced. So the caller might need to 621 * @path: path to usermode executable
622 * check the call_usermodehelper_fns() return value: if it is -ENOMEM, perform 622 * @argv: arg vector for process
623 * the necessaary cleanup within the caller. 623 * @envp: environment for process
624 * @wait: wait for the application to finish and return status.
625 * when UMH_NO_WAIT don't wait at all, but you get no useful error back
626 * when the program couldn't be exec'ed. This makes it safe to call
627 * from interrupt context.
628 *
629 * This function is the equivalent to use call_usermodehelper_setup() and
630 * call_usermodehelper_exec().
624 */ 631 */
625int call_usermodehelper_fns( 632int call_usermodehelper(char *path, char **argv, char **envp, int wait)
626 char *path, char **argv, char **envp, int wait,
627 int (*init)(struct subprocess_info *info, struct cred *new),
628 void (*cleanup)(struct subprocess_info *), void *data)
629{ 633{
630 struct subprocess_info *info; 634 struct subprocess_info *info;
631 gfp_t gfp_mask = (wait == UMH_NO_WAIT) ? GFP_ATOMIC : GFP_KERNEL; 635 gfp_t gfp_mask = (wait == UMH_NO_WAIT) ? GFP_ATOMIC : GFP_KERNEL;
632 636
633 info = call_usermodehelper_setup(path, argv, envp, gfp_mask); 637 info = call_usermodehelper_setup(path, argv, envp, gfp_mask,
634 638 NULL, NULL, NULL);
635 if (info == NULL) 639 if (info == NULL)
636 return -ENOMEM; 640 return -ENOMEM;
637 641
638 call_usermodehelper_setfns(info, init, cleanup, data);
639
640 return call_usermodehelper_exec(info, wait); 642 return call_usermodehelper_exec(info, wait);
641} 643}
642EXPORT_SYMBOL(call_usermodehelper_fns); 644EXPORT_SYMBOL(call_usermodehelper);
643 645
644static int proc_cap_handler(struct ctl_table *table, int write, 646static int proc_cap_handler(struct ctl_table *table, int write,
645 void __user *buffer, size_t *lenp, loff_t *ppos) 647 void __user *buffer, size_t *lenp, loff_t *ppos)
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 16d8ddd268b1..760e86df8c20 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -17,6 +17,7 @@
17#include <linux/slab.h> 17#include <linux/slab.h>
18#include <linux/freezer.h> 18#include <linux/freezer.h>
19#include <linux/ptrace.h> 19#include <linux/ptrace.h>
20#include <linux/uaccess.h>
20#include <trace/events/sched.h> 21#include <trace/events/sched.h>
21 22
22static DEFINE_SPINLOCK(kthread_create_lock); 23static DEFINE_SPINLOCK(kthread_create_lock);
@@ -135,6 +136,24 @@ void *kthread_data(struct task_struct *task)
135 return to_kthread(task)->data; 136 return to_kthread(task)->data;
136} 137}
137 138
139/**
140 * probe_kthread_data - speculative version of kthread_data()
141 * @task: possible kthread task in question
142 *
143 * @task could be a kthread task. Return the data value specified when it
144 * was created if accessible. If @task isn't a kthread task or its data is
145 * inaccessible for any reason, %NULL is returned. This function requires
146 * that @task itself is safe to dereference.
147 */
148void *probe_kthread_data(struct task_struct *task)
149{
150 struct kthread *kthread = to_kthread(task);
151 void *data = NULL;
152
153 probe_kernel_read(&data, &kthread->data, sizeof(data));
154 return data;
155}
156
138static void __kthread_parkme(struct kthread *self) 157static void __kthread_parkme(struct kthread *self)
139{ 158{
140 __set_current_state(TASK_PARKED); 159 __set_current_state(TASK_PARKED);
diff --git a/kernel/panic.c b/kernel/panic.c
index 7c57cc9eee2c..167ec097ce8b 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -22,7 +22,6 @@
22#include <linux/sysrq.h> 22#include <linux/sysrq.h>
23#include <linux/init.h> 23#include <linux/init.h>
24#include <linux/nmi.h> 24#include <linux/nmi.h>
25#include <linux/dmi.h>
26 25
27#define PANIC_TIMER_STEP 100 26#define PANIC_TIMER_STEP 100
28#define PANIC_BLINK_SPD 18 27#define PANIC_BLINK_SPD 18
@@ -400,13 +399,8 @@ struct slowpath_args {
400static void warn_slowpath_common(const char *file, int line, void *caller, 399static void warn_slowpath_common(const char *file, int line, void *caller,
401 unsigned taint, struct slowpath_args *args) 400 unsigned taint, struct slowpath_args *args)
402{ 401{
403 const char *board;
404
405 printk(KERN_WARNING "------------[ cut here ]------------\n"); 402 printk(KERN_WARNING "------------[ cut here ]------------\n");
406 printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller); 403 printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
407 board = dmi_get_system_info(DMI_PRODUCT_NAME);
408 if (board)
409 printk(KERN_WARNING "Hardware name: %s\n", board);
410 404
411 if (args) 405 if (args)
412 vprintk(args->fmt, args->args); 406 vprintk(args->fmt, args->args);
diff --git a/kernel/pid.c b/kernel/pid.c
index 047dc6264638..6283d6412aff 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -51,9 +51,6 @@ int pid_max = PID_MAX_DEFAULT;
51int pid_max_min = RESERVED_PIDS + 1; 51int pid_max_min = RESERVED_PIDS + 1;
52int pid_max_max = PID_MAX_LIMIT; 52int pid_max_max = PID_MAX_LIMIT;
53 53
54#define BITS_PER_PAGE (PAGE_SIZE*8)
55#define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1)
56
57static inline int mk_pid(struct pid_namespace *pid_ns, 54static inline int mk_pid(struct pid_namespace *pid_ns,
58 struct pidmap *map, int off) 55 struct pidmap *map, int off)
59{ 56{
@@ -183,15 +180,19 @@ static int alloc_pidmap(struct pid_namespace *pid_ns)
183 break; 180 break;
184 } 181 }
185 if (likely(atomic_read(&map->nr_free))) { 182 if (likely(atomic_read(&map->nr_free))) {
186 do { 183 for ( ; ; ) {
187 if (!test_and_set_bit(offset, map->page)) { 184 if (!test_and_set_bit(offset, map->page)) {
188 atomic_dec(&map->nr_free); 185 atomic_dec(&map->nr_free);
189 set_last_pid(pid_ns, last, pid); 186 set_last_pid(pid_ns, last, pid);
190 return pid; 187 return pid;
191 } 188 }
192 offset = find_next_offset(map, offset); 189 offset = find_next_offset(map, offset);
190 if (offset >= BITS_PER_PAGE)
191 break;
193 pid = mk_pid(pid_ns, map, offset); 192 pid = mk_pid(pid_ns, map, offset);
194 } while (offset < BITS_PER_PAGE && pid < pid_max); 193 if (pid >= pid_max)
194 break;
195 }
195 } 196 }
196 if (map < &pid_ns->pidmap[(pid_max-1)/BITS_PER_PAGE]) { 197 if (map < &pid_ns->pidmap[(pid_max-1)/BITS_PER_PAGE]) {
197 ++map; 198 ++map;
diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
index bea15bdf82b0..69473c4a653f 100644
--- a/kernel/pid_namespace.c
+++ b/kernel/pid_namespace.c
@@ -19,8 +19,6 @@
19#include <linux/reboot.h> 19#include <linux/reboot.h>
20#include <linux/export.h> 20#include <linux/export.h>
21 21
22#define BITS_PER_PAGE (PAGE_SIZE*8)
23
24struct pid_cache { 22struct pid_cache {
25 int nr_ids; 23 int nr_ids;
26 char name[16]; 24 char name[16];
diff --git a/kernel/power/poweroff.c b/kernel/power/poweroff.c
index 68197a4e8fc9..7ef6866b521d 100644
--- a/kernel/power/poweroff.c
+++ b/kernel/power/poweroff.c
@@ -32,7 +32,7 @@ static void handle_poweroff(int key)
32 32
33static struct sysrq_key_op sysrq_poweroff_op = { 33static struct sysrq_key_op sysrq_poweroff_op = {
34 .handler = handle_poweroff, 34 .handler = handle_poweroff,
35 .help_msg = "powerOff", 35 .help_msg = "poweroff(o)",
36 .action_msg = "Power Off", 36 .action_msg = "Power Off",
37 .enable_mask = SYSRQ_ENABLE_BOOT, 37 .enable_mask = SYSRQ_ENABLE_BOOT,
38}; 38};
diff --git a/kernel/printk.c b/kernel/printk.c
index 376914e2869d..96dcfcd9a2d4 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -43,6 +43,7 @@
43#include <linux/rculist.h> 43#include <linux/rculist.h>
44#include <linux/poll.h> 44#include <linux/poll.h>
45#include <linux/irq_work.h> 45#include <linux/irq_work.h>
46#include <linux/utsname.h>
46 47
47#include <asm/uaccess.h> 48#include <asm/uaccess.h>
48 49
@@ -2849,4 +2850,65 @@ void kmsg_dump_rewind(struct kmsg_dumper *dumper)
2849 raw_spin_unlock_irqrestore(&logbuf_lock, flags); 2850 raw_spin_unlock_irqrestore(&logbuf_lock, flags);
2850} 2851}
2851EXPORT_SYMBOL_GPL(kmsg_dump_rewind); 2852EXPORT_SYMBOL_GPL(kmsg_dump_rewind);
2853
2854static char dump_stack_arch_desc_str[128];
2855
2856/**
2857 * dump_stack_set_arch_desc - set arch-specific str to show with task dumps
2858 * @fmt: printf-style format string
2859 * @...: arguments for the format string
2860 *
2861 * The configured string will be printed right after utsname during task
2862 * dumps. Usually used to add arch-specific system identifiers. If an
2863 * arch wants to make use of such an ID string, it should initialize this
2864 * as soon as possible during boot.
2865 */
2866void __init dump_stack_set_arch_desc(const char *fmt, ...)
2867{
2868 va_list args;
2869
2870 va_start(args, fmt);
2871 vsnprintf(dump_stack_arch_desc_str, sizeof(dump_stack_arch_desc_str),
2872 fmt, args);
2873 va_end(args);
2874}
2875
2876/**
2877 * dump_stack_print_info - print generic debug info for dump_stack()
2878 * @log_lvl: log level
2879 *
2880 * Arch-specific dump_stack() implementations can use this function to
2881 * print out the same debug information as the generic dump_stack().
2882 */
2883void dump_stack_print_info(const char *log_lvl)
2884{
2885 printk("%sCPU: %d PID: %d Comm: %.20s %s %s %.*s\n",
2886 log_lvl, raw_smp_processor_id(), current->pid, current->comm,
2887 print_tainted(), init_utsname()->release,
2888 (int)strcspn(init_utsname()->version, " "),
2889 init_utsname()->version);
2890
2891 if (dump_stack_arch_desc_str[0] != '\0')
2892 printk("%sHardware name: %s\n",
2893 log_lvl, dump_stack_arch_desc_str);
2894
2895 print_worker_info(log_lvl, current);
2896}
2897
2898/**
2899 * show_regs_print_info - print generic debug info for show_regs()
2900 * @log_lvl: log level
2901 *
2902 * show_regs() implementations can use this function to print out generic
2903 * debug information.
2904 */
2905void show_regs_print_info(const char *log_lvl)
2906{
2907 dump_stack_print_info(log_lvl);
2908
2909 printk("%stask: %p ti: %p task.ti: %p\n",
2910 log_lvl, current, current_thread_info(),
2911 task_thread_info(current));
2912}
2913
2852#endif 2914#endif
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index acbd28424d81..17ae54da0ec2 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -24,6 +24,7 @@
24#include <linux/regset.h> 24#include <linux/regset.h>
25#include <linux/hw_breakpoint.h> 25#include <linux/hw_breakpoint.h>
26#include <linux/cn_proc.h> 26#include <linux/cn_proc.h>
27#include <linux/compat.h>
27 28
28 29
29static int ptrace_trapping_sleep_fn(void *flags) 30static int ptrace_trapping_sleep_fn(void *flags)
@@ -618,6 +619,81 @@ static int ptrace_setsiginfo(struct task_struct *child, const siginfo_t *info)
618 return error; 619 return error;
619} 620}
620 621
622static int ptrace_peek_siginfo(struct task_struct *child,
623 unsigned long addr,
624 unsigned long data)
625{
626 struct ptrace_peeksiginfo_args arg;
627 struct sigpending *pending;
628 struct sigqueue *q;
629 int ret, i;
630
631 ret = copy_from_user(&arg, (void __user *) addr,
632 sizeof(struct ptrace_peeksiginfo_args));
633 if (ret)
634 return -EFAULT;
635
636 if (arg.flags & ~PTRACE_PEEKSIGINFO_SHARED)
637 return -EINVAL; /* unknown flags */
638
639 if (arg.nr < 0)
640 return -EINVAL;
641
642 if (arg.flags & PTRACE_PEEKSIGINFO_SHARED)
643 pending = &child->signal->shared_pending;
644 else
645 pending = &child->pending;
646
647 for (i = 0; i < arg.nr; ) {
648 siginfo_t info;
649 s32 off = arg.off + i;
650
651 spin_lock_irq(&child->sighand->siglock);
652 list_for_each_entry(q, &pending->list, list) {
653 if (!off--) {
654 copy_siginfo(&info, &q->info);
655 break;
656 }
657 }
658 spin_unlock_irq(&child->sighand->siglock);
659
660 if (off >= 0) /* beyond the end of the list */
661 break;
662
663#ifdef CONFIG_COMPAT
664 if (unlikely(is_compat_task())) {
665 compat_siginfo_t __user *uinfo = compat_ptr(data);
666
667 ret = copy_siginfo_to_user32(uinfo, &info);
668 ret |= __put_user(info.si_code, &uinfo->si_code);
669 } else
670#endif
671 {
672 siginfo_t __user *uinfo = (siginfo_t __user *) data;
673
674 ret = copy_siginfo_to_user(uinfo, &info);
675 ret |= __put_user(info.si_code, &uinfo->si_code);
676 }
677
678 if (ret) {
679 ret = -EFAULT;
680 break;
681 }
682
683 data += sizeof(siginfo_t);
684 i++;
685
686 if (signal_pending(current))
687 break;
688
689 cond_resched();
690 }
691
692 if (i > 0)
693 return i;
694
695 return ret;
696}
621 697
622#ifdef PTRACE_SINGLESTEP 698#ifdef PTRACE_SINGLESTEP
623#define is_singlestep(request) ((request) == PTRACE_SINGLESTEP) 699#define is_singlestep(request) ((request) == PTRACE_SINGLESTEP)
@@ -748,6 +824,10 @@ int ptrace_request(struct task_struct *child, long request,
748 ret = put_user(child->ptrace_message, datalp); 824 ret = put_user(child->ptrace_message, datalp);
749 break; 825 break;
750 826
827 case PTRACE_PEEKSIGINFO:
828 ret = ptrace_peek_siginfo(child, addr, data);
829 break;
830
751 case PTRACE_GETSIGINFO: 831 case PTRACE_GETSIGINFO:
752 ret = ptrace_getsiginfo(child, &siginfo); 832 ret = ptrace_getsiginfo(child, &siginfo);
753 if (!ret) 833 if (!ret)
diff --git a/kernel/range.c b/kernel/range.c
index 9b8ae2d6ed68..071b0ab455cb 100644
--- a/kernel/range.c
+++ b/kernel/range.c
@@ -97,7 +97,8 @@ void subtract_range(struct range *range, int az, u64 start, u64 end)
97 range[i].end = range[j].end; 97 range[i].end = range[j].end;
98 range[i].start = end; 98 range[i].start = end;
99 } else { 99 } else {
100 printk(KERN_ERR "run of slot in ranges\n"); 100 pr_err("%s: run out of slot in ranges\n",
101 __func__);
101 } 102 }
102 range[j].end = start; 103 range[j].end = start;
103 continue; 104 continue;
diff --git a/kernel/relay.c b/kernel/relay.c
index 01ab081ac53a..eef0d113b79e 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -588,7 +588,7 @@ struct rchan *relay_open(const char *base_filename,
588 chan->version = RELAYFS_CHANNEL_VERSION; 588 chan->version = RELAYFS_CHANNEL_VERSION;
589 chan->n_subbufs = n_subbufs; 589 chan->n_subbufs = n_subbufs;
590 chan->subbuf_size = subbuf_size; 590 chan->subbuf_size = subbuf_size;
591 chan->alloc_size = FIX_SIZE(subbuf_size * n_subbufs); 591 chan->alloc_size = PAGE_ALIGN(subbuf_size * n_subbufs);
592 chan->parent = parent; 592 chan->parent = parent;
593 chan->private_data = private_data; 593 chan->private_data = private_data;
594 if (base_filename) { 594 if (base_filename) {
@@ -1099,8 +1099,7 @@ static size_t relay_file_read_end_pos(struct rchan_buf *buf,
1099static int subbuf_read_actor(size_t read_start, 1099static int subbuf_read_actor(size_t read_start,
1100 struct rchan_buf *buf, 1100 struct rchan_buf *buf,
1101 size_t avail, 1101 size_t avail,
1102 read_descriptor_t *desc, 1102 read_descriptor_t *desc)
1103 read_actor_t actor)
1104{ 1103{
1105 void *from; 1104 void *from;
1106 int ret = 0; 1105 int ret = 0;
@@ -1121,15 +1120,13 @@ static int subbuf_read_actor(size_t read_start,
1121typedef int (*subbuf_actor_t) (size_t read_start, 1120typedef int (*subbuf_actor_t) (size_t read_start,
1122 struct rchan_buf *buf, 1121 struct rchan_buf *buf,
1123 size_t avail, 1122 size_t avail,
1124 read_descriptor_t *desc, 1123 read_descriptor_t *desc);
1125 read_actor_t actor);
1126 1124
1127/* 1125/*
1128 * relay_file_read_subbufs - read count bytes, bridging subbuf boundaries 1126 * relay_file_read_subbufs - read count bytes, bridging subbuf boundaries
1129 */ 1127 */
1130static ssize_t relay_file_read_subbufs(struct file *filp, loff_t *ppos, 1128static ssize_t relay_file_read_subbufs(struct file *filp, loff_t *ppos,
1131 subbuf_actor_t subbuf_actor, 1129 subbuf_actor_t subbuf_actor,
1132 read_actor_t actor,
1133 read_descriptor_t *desc) 1130 read_descriptor_t *desc)
1134{ 1131{
1135 struct rchan_buf *buf = filp->private_data; 1132 struct rchan_buf *buf = filp->private_data;
@@ -1150,7 +1147,7 @@ static ssize_t relay_file_read_subbufs(struct file *filp, loff_t *ppos,
1150 break; 1147 break;
1151 1148
1152 avail = min(desc->count, avail); 1149 avail = min(desc->count, avail);
1153 ret = subbuf_actor(read_start, buf, avail, desc, actor); 1150 ret = subbuf_actor(read_start, buf, avail, desc);
1154 if (desc->error < 0) 1151 if (desc->error < 0)
1155 break; 1152 break;
1156 1153
@@ -1174,8 +1171,7 @@ static ssize_t relay_file_read(struct file *filp,
1174 desc.count = count; 1171 desc.count = count;
1175 desc.arg.buf = buffer; 1172 desc.arg.buf = buffer;
1176 desc.error = 0; 1173 desc.error = 0;
1177 return relay_file_read_subbufs(filp, ppos, subbuf_read_actor, 1174 return relay_file_read_subbufs(filp, ppos, subbuf_read_actor, &desc);
1178 NULL, &desc);
1179} 1175}
1180 1176
1181static void relay_consume_bytes(struct rchan_buf *rbuf, int bytes_consumed) 1177static void relay_consume_bytes(struct rchan_buf *rbuf, int bytes_consumed)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index c70a8814a767..5662f58f0b69 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -4586,6 +4586,7 @@ void sched_show_task(struct task_struct *p)
4586 task_pid_nr(p), ppid, 4586 task_pid_nr(p), ppid,
4587 (unsigned long)task_thread_info(p)->flags); 4587 (unsigned long)task_thread_info(p)->flags);
4588 4588
4589 print_worker_info(KERN_INFO, p);
4589 show_stack(p, NULL); 4590 show_stack(p, NULL);
4590} 4591}
4591 4592
diff --git a/kernel/semaphore.c b/kernel/semaphore.c
index 4567fc020fe3..6815171a4fff 100644
--- a/kernel/semaphore.c
+++ b/kernel/semaphore.c
@@ -193,7 +193,7 @@ EXPORT_SYMBOL(up);
193struct semaphore_waiter { 193struct semaphore_waiter {
194 struct list_head list; 194 struct list_head list;
195 struct task_struct *task; 195 struct task_struct *task;
196 int up; 196 bool up;
197}; 197};
198 198
199/* 199/*
@@ -209,12 +209,12 @@ static inline int __sched __down_common(struct semaphore *sem, long state,
209 209
210 list_add_tail(&waiter.list, &sem->wait_list); 210 list_add_tail(&waiter.list, &sem->wait_list);
211 waiter.task = task; 211 waiter.task = task;
212 waiter.up = 0; 212 waiter.up = false;
213 213
214 for (;;) { 214 for (;;) {
215 if (signal_pending_state(state, task)) 215 if (signal_pending_state(state, task))
216 goto interrupted; 216 goto interrupted;
217 if (timeout <= 0) 217 if (unlikely(timeout <= 0))
218 goto timed_out; 218 goto timed_out;
219 __set_task_state(task, state); 219 __set_task_state(task, state);
220 raw_spin_unlock_irq(&sem->lock); 220 raw_spin_unlock_irq(&sem->lock);
@@ -258,6 +258,6 @@ static noinline void __sched __up(struct semaphore *sem)
258 struct semaphore_waiter *waiter = list_first_entry(&sem->wait_list, 258 struct semaphore_waiter *waiter = list_first_entry(&sem->wait_list,
259 struct semaphore_waiter, list); 259 struct semaphore_waiter, list);
260 list_del(&waiter->list); 260 list_del(&waiter->list);
261 waiter->up = 1; 261 waiter->up = true;
262 wake_up_process(waiter->task); 262 wake_up_process(waiter->task);
263} 263}
diff --git a/kernel/signal.c b/kernel/signal.c
index 598dc06be421..cede58910f9c 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -854,12 +854,14 @@ static void ptrace_trap_notify(struct task_struct *t)
854 * Returns true if the signal should be actually delivered, otherwise 854 * Returns true if the signal should be actually delivered, otherwise
855 * it should be dropped. 855 * it should be dropped.
856 */ 856 */
857static int prepare_signal(int sig, struct task_struct *p, bool force) 857static bool prepare_signal(int sig, struct task_struct *p, bool force)
858{ 858{
859 struct signal_struct *signal = p->signal; 859 struct signal_struct *signal = p->signal;
860 struct task_struct *t; 860 struct task_struct *t;
861 861
862 if (unlikely(signal->flags & SIGNAL_GROUP_EXIT)) { 862 if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) {
863 if (signal->flags & SIGNAL_GROUP_COREDUMP)
864 return sig == SIGKILL;
863 /* 865 /*
864 * The process is in the middle of dying, nothing to do. 866 * The process is in the middle of dying, nothing to do.
865 */ 867 */
@@ -1160,8 +1162,7 @@ static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
1160static void print_fatal_signal(int signr) 1162static void print_fatal_signal(int signr)
1161{ 1163{
1162 struct pt_regs *regs = signal_pt_regs(); 1164 struct pt_regs *regs = signal_pt_regs();
1163 printk(KERN_INFO "%s/%d: potentially unexpected fatal signal %d.\n", 1165 printk(KERN_INFO "potentially unexpected fatal signal %d.\n", signr);
1164 current->comm, task_pid_nr(current), signr);
1165 1166
1166#if defined(__i386__) && !defined(__arch_um__) 1167#if defined(__i386__) && !defined(__arch_um__)
1167 printk(KERN_INFO "code at %08lx: ", regs->ip); 1168 printk(KERN_INFO "code at %08lx: ", regs->ip);
diff --git a/kernel/smp.c b/kernel/smp.c
index 8e451f3ff51b..4dba0f7b72ad 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -100,16 +100,16 @@ void __init call_function_init(void)
100 * previous function call. For multi-cpu calls its even more interesting 100 * previous function call. For multi-cpu calls its even more interesting
101 * as we'll have to ensure no other cpu is observing our csd. 101 * as we'll have to ensure no other cpu is observing our csd.
102 */ 102 */
103static void csd_lock_wait(struct call_single_data *data) 103static void csd_lock_wait(struct call_single_data *csd)
104{ 104{
105 while (data->flags & CSD_FLAG_LOCK) 105 while (csd->flags & CSD_FLAG_LOCK)
106 cpu_relax(); 106 cpu_relax();
107} 107}
108 108
109static void csd_lock(struct call_single_data *data) 109static void csd_lock(struct call_single_data *csd)
110{ 110{
111 csd_lock_wait(data); 111 csd_lock_wait(csd);
112 data->flags = CSD_FLAG_LOCK; 112 csd->flags |= CSD_FLAG_LOCK;
113 113
114 /* 114 /*
115 * prevent CPU from reordering the above assignment 115 * prevent CPU from reordering the above assignment
@@ -119,16 +119,16 @@ static void csd_lock(struct call_single_data *data)
119 smp_mb(); 119 smp_mb();
120} 120}
121 121
122static void csd_unlock(struct call_single_data *data) 122static void csd_unlock(struct call_single_data *csd)
123{ 123{
124 WARN_ON(!(data->flags & CSD_FLAG_LOCK)); 124 WARN_ON(!(csd->flags & CSD_FLAG_LOCK));
125 125
126 /* 126 /*
127 * ensure we're all done before releasing data: 127 * ensure we're all done before releasing data:
128 */ 128 */
129 smp_mb(); 129 smp_mb();
130 130
131 data->flags &= ~CSD_FLAG_LOCK; 131 csd->flags &= ~CSD_FLAG_LOCK;
132} 132}
133 133
134/* 134/*
@@ -137,7 +137,7 @@ static void csd_unlock(struct call_single_data *data)
137 * ->func, ->info, and ->flags set. 137 * ->func, ->info, and ->flags set.
138 */ 138 */
139static 139static
140void generic_exec_single(int cpu, struct call_single_data *data, int wait) 140void generic_exec_single(int cpu, struct call_single_data *csd, int wait)
141{ 141{
142 struct call_single_queue *dst = &per_cpu(call_single_queue, cpu); 142 struct call_single_queue *dst = &per_cpu(call_single_queue, cpu);
143 unsigned long flags; 143 unsigned long flags;
@@ -145,7 +145,7 @@ void generic_exec_single(int cpu, struct call_single_data *data, int wait)
145 145
146 raw_spin_lock_irqsave(&dst->lock, flags); 146 raw_spin_lock_irqsave(&dst->lock, flags);
147 ipi = list_empty(&dst->list); 147 ipi = list_empty(&dst->list);
148 list_add_tail(&data->list, &dst->list); 148 list_add_tail(&csd->list, &dst->list);
149 raw_spin_unlock_irqrestore(&dst->lock, flags); 149 raw_spin_unlock_irqrestore(&dst->lock, flags);
150 150
151 /* 151 /*
@@ -163,7 +163,7 @@ void generic_exec_single(int cpu, struct call_single_data *data, int wait)
163 arch_send_call_function_single_ipi(cpu); 163 arch_send_call_function_single_ipi(cpu);
164 164
165 if (wait) 165 if (wait)
166 csd_lock_wait(data); 166 csd_lock_wait(csd);
167} 167}
168 168
169/* 169/*
@@ -173,7 +173,6 @@ void generic_exec_single(int cpu, struct call_single_data *data, int wait)
173void generic_smp_call_function_single_interrupt(void) 173void generic_smp_call_function_single_interrupt(void)
174{ 174{
175 struct call_single_queue *q = &__get_cpu_var(call_single_queue); 175 struct call_single_queue *q = &__get_cpu_var(call_single_queue);
176 unsigned int data_flags;
177 LIST_HEAD(list); 176 LIST_HEAD(list);
178 177
179 /* 178 /*
@@ -186,25 +185,26 @@ void generic_smp_call_function_single_interrupt(void)
186 raw_spin_unlock(&q->lock); 185 raw_spin_unlock(&q->lock);
187 186
188 while (!list_empty(&list)) { 187 while (!list_empty(&list)) {
189 struct call_single_data *data; 188 struct call_single_data *csd;
189 unsigned int csd_flags;
190 190
191 data = list_entry(list.next, struct call_single_data, list); 191 csd = list_entry(list.next, struct call_single_data, list);
192 list_del(&data->list); 192 list_del(&csd->list);
193 193
194 /* 194 /*
195 * 'data' can be invalid after this call if flags == 0 195 * 'csd' can be invalid after this call if flags == 0
196 * (when called through generic_exec_single()), 196 * (when called through generic_exec_single()),
197 * so save them away before making the call: 197 * so save them away before making the call:
198 */ 198 */
199 data_flags = data->flags; 199 csd_flags = csd->flags;
200 200
201 data->func(data->info); 201 csd->func(csd->info);
202 202
203 /* 203 /*
204 * Unlocked CSDs are valid through generic_exec_single(): 204 * Unlocked CSDs are valid through generic_exec_single():
205 */ 205 */
206 if (data_flags & CSD_FLAG_LOCK) 206 if (csd_flags & CSD_FLAG_LOCK)
207 csd_unlock(data); 207 csd_unlock(csd);
208 } 208 }
209} 209}
210 210
@@ -249,16 +249,16 @@ int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
249 local_irq_restore(flags); 249 local_irq_restore(flags);
250 } else { 250 } else {
251 if ((unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) { 251 if ((unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) {
252 struct call_single_data *data = &d; 252 struct call_single_data *csd = &d;
253 253
254 if (!wait) 254 if (!wait)
255 data = &__get_cpu_var(csd_data); 255 csd = &__get_cpu_var(csd_data);
256 256
257 csd_lock(data); 257 csd_lock(csd);
258 258
259 data->func = func; 259 csd->func = func;
260 data->info = info; 260 csd->info = info;
261 generic_exec_single(cpu, data, wait); 261 generic_exec_single(cpu, csd, wait);
262 } else { 262 } else {
263 err = -ENXIO; /* CPU not online */ 263 err = -ENXIO; /* CPU not online */
264 } 264 }
@@ -325,7 +325,7 @@ EXPORT_SYMBOL_GPL(smp_call_function_any);
325 * pre-allocated data structure. Useful for embedding @data inside 325 * pre-allocated data structure. Useful for embedding @data inside
326 * other structures, for instance. 326 * other structures, for instance.
327 */ 327 */
328void __smp_call_function_single(int cpu, struct call_single_data *data, 328void __smp_call_function_single(int cpu, struct call_single_data *csd,
329 int wait) 329 int wait)
330{ 330{
331 unsigned int this_cpu; 331 unsigned int this_cpu;
@@ -343,11 +343,11 @@ void __smp_call_function_single(int cpu, struct call_single_data *data,
343 343
344 if (cpu == this_cpu) { 344 if (cpu == this_cpu) {
345 local_irq_save(flags); 345 local_irq_save(flags);
346 data->func(data->info); 346 csd->func(csd->info);
347 local_irq_restore(flags); 347 local_irq_restore(flags);
348 } else { 348 } else {
349 csd_lock(data); 349 csd_lock(csd);
350 generic_exec_single(cpu, data, wait); 350 generic_exec_single(cpu, csd, wait);
351 } 351 }
352 put_cpu(); 352 put_cpu();
353} 353}
@@ -369,7 +369,7 @@ void __smp_call_function_single(int cpu, struct call_single_data *data,
369void smp_call_function_many(const struct cpumask *mask, 369void smp_call_function_many(const struct cpumask *mask,
370 smp_call_func_t func, void *info, bool wait) 370 smp_call_func_t func, void *info, bool wait)
371{ 371{
372 struct call_function_data *data; 372 struct call_function_data *cfd;
373 int cpu, next_cpu, this_cpu = smp_processor_id(); 373 int cpu, next_cpu, this_cpu = smp_processor_id();
374 374
375 /* 375 /*
@@ -401,24 +401,24 @@ void smp_call_function_many(const struct cpumask *mask,
401 return; 401 return;
402 } 402 }
403 403
404 data = &__get_cpu_var(cfd_data); 404 cfd = &__get_cpu_var(cfd_data);
405 405
406 cpumask_and(data->cpumask, mask, cpu_online_mask); 406 cpumask_and(cfd->cpumask, mask, cpu_online_mask);
407 cpumask_clear_cpu(this_cpu, data->cpumask); 407 cpumask_clear_cpu(this_cpu, cfd->cpumask);
408 408
409 /* Some callers race with other cpus changing the passed mask */ 409 /* Some callers race with other cpus changing the passed mask */
410 if (unlikely(!cpumask_weight(data->cpumask))) 410 if (unlikely(!cpumask_weight(cfd->cpumask)))
411 return; 411 return;
412 412
413 /* 413 /*
414 * After we put an entry into the list, data->cpumask 414 * After we put an entry into the list, cfd->cpumask may be cleared
415 * may be cleared again when another CPU sends another IPI for 415 * again when another CPU sends another IPI for a SMP function call, so
416 * a SMP function call, so data->cpumask will be zero. 416 * cfd->cpumask will be zero.
417 */ 417 */
418 cpumask_copy(data->cpumask_ipi, data->cpumask); 418 cpumask_copy(cfd->cpumask_ipi, cfd->cpumask);
419 419
420 for_each_cpu(cpu, data->cpumask) { 420 for_each_cpu(cpu, cfd->cpumask) {
421 struct call_single_data *csd = per_cpu_ptr(data->csd, cpu); 421 struct call_single_data *csd = per_cpu_ptr(cfd->csd, cpu);
422 struct call_single_queue *dst = 422 struct call_single_queue *dst =
423 &per_cpu(call_single_queue, cpu); 423 &per_cpu(call_single_queue, cpu);
424 unsigned long flags; 424 unsigned long flags;
@@ -433,12 +433,13 @@ void smp_call_function_many(const struct cpumask *mask,
433 } 433 }
434 434
435 /* Send a message to all CPUs in the map */ 435 /* Send a message to all CPUs in the map */
436 arch_send_call_function_ipi_mask(data->cpumask_ipi); 436 arch_send_call_function_ipi_mask(cfd->cpumask_ipi);
437 437
438 if (wait) { 438 if (wait) {
439 for_each_cpu(cpu, data->cpumask) { 439 for_each_cpu(cpu, cfd->cpumask) {
440 struct call_single_data *csd = 440 struct call_single_data *csd;
441 per_cpu_ptr(data->csd, cpu); 441
442 csd = per_cpu_ptr(cfd->csd, cpu);
442 csd_lock_wait(csd); 443 csd_lock_wait(csd);
443 } 444 }
444 } 445 }
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 14d7758074aa..aa82723c7202 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -620,8 +620,7 @@ static void remote_softirq_receive(void *data)
620 unsigned long flags; 620 unsigned long flags;
621 int softirq; 621 int softirq;
622 622
623 softirq = cp->priv; 623 softirq = *(int *)cp->info;
624
625 local_irq_save(flags); 624 local_irq_save(flags);
626 __local_trigger(cp, softirq); 625 __local_trigger(cp, softirq);
627 local_irq_restore(flags); 626 local_irq_restore(flags);
@@ -631,9 +630,8 @@ static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softir
631{ 630{
632 if (cpu_online(cpu)) { 631 if (cpu_online(cpu)) {
633 cp->func = remote_softirq_receive; 632 cp->func = remote_softirq_receive;
634 cp->info = cp; 633 cp->info = &softirq;
635 cp->flags = 0; 634 cp->flags = 0;
636 cp->priv = softirq;
637 635
638 __smp_call_function_single(cpu, cp, 0); 636 __smp_call_function_single(cpu, cp, 0);
639 return 0; 637 return 0;
diff --git a/kernel/sys.c b/kernel/sys.c
index 0da73cf73e60..afd0f7e125c9 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -49,6 +49,11 @@
49#include <linux/user_namespace.h> 49#include <linux/user_namespace.h>
50#include <linux/binfmts.h> 50#include <linux/binfmts.h>
51 51
52#include <linux/sched.h>
53#include <linux/rcupdate.h>
54#include <linux/uidgid.h>
55#include <linux/cred.h>
56
52#include <linux/kmsg_dump.h> 57#include <linux/kmsg_dump.h>
53/* Move somewhere else to avoid recompiling? */ 58/* Move somewhere else to avoid recompiling? */
54#include <generated/utsrelease.h> 59#include <generated/utsrelease.h>
@@ -1044,6 +1049,67 @@ change_okay:
1044 return old_fsgid; 1049 return old_fsgid;
1045} 1050}
1046 1051
1052/**
1053 * sys_getpid - return the thread group id of the current process
1054 *
1055 * Note, despite the name, this returns the tgid not the pid. The tgid and
1056 * the pid are identical unless CLONE_THREAD was specified on clone() in
1057 * which case the tgid is the same in all threads of the same group.
1058 *
1059 * This is SMP safe as current->tgid does not change.
1060 */
1061SYSCALL_DEFINE0(getpid)
1062{
1063 return task_tgid_vnr(current);
1064}
1065
1066/* Thread ID - the internal kernel "pid" */
1067SYSCALL_DEFINE0(gettid)
1068{
1069 return task_pid_vnr(current);
1070}
1071
1072/*
1073 * Accessing ->real_parent is not SMP-safe, it could
1074 * change from under us. However, we can use a stale
1075 * value of ->real_parent under rcu_read_lock(), see
1076 * release_task()->call_rcu(delayed_put_task_struct).
1077 */
1078SYSCALL_DEFINE0(getppid)
1079{
1080 int pid;
1081
1082 rcu_read_lock();
1083 pid = task_tgid_vnr(rcu_dereference(current->real_parent));
1084 rcu_read_unlock();
1085
1086 return pid;
1087}
1088
1089SYSCALL_DEFINE0(getuid)
1090{
1091 /* Only we change this so SMP safe */
1092 return from_kuid_munged(current_user_ns(), current_uid());
1093}
1094
1095SYSCALL_DEFINE0(geteuid)
1096{
1097 /* Only we change this so SMP safe */
1098 return from_kuid_munged(current_user_ns(), current_euid());
1099}
1100
1101SYSCALL_DEFINE0(getgid)
1102{
1103 /* Only we change this so SMP safe */
1104 return from_kgid_munged(current_user_ns(), current_gid());
1105}
1106
1107SYSCALL_DEFINE0(getegid)
1108{
1109 /* Only we change this so SMP safe */
1110 return from_kgid_munged(current_user_ns(), current_egid());
1111}
1112
1047void do_sys_times(struct tms *tms) 1113void do_sys_times(struct tms *tms)
1048{ 1114{
1049 cputime_t tgutime, tgstime, cutime, cstime; 1115 cputime_t tgutime, tgstime, cutime, cstime;
@@ -1791,7 +1857,6 @@ SYSCALL_DEFINE1(umask, int, mask)
1791 return mask; 1857 return mask;
1792} 1858}
1793 1859
1794#ifdef CONFIG_CHECKPOINT_RESTORE
1795static int prctl_set_mm_exe_file(struct mm_struct *mm, unsigned int fd) 1860static int prctl_set_mm_exe_file(struct mm_struct *mm, unsigned int fd)
1796{ 1861{
1797 struct fd exe; 1862 struct fd exe;
@@ -1985,17 +2050,12 @@ out:
1985 return error; 2050 return error;
1986} 2051}
1987 2052
2053#ifdef CONFIG_CHECKPOINT_RESTORE
1988static int prctl_get_tid_address(struct task_struct *me, int __user **tid_addr) 2054static int prctl_get_tid_address(struct task_struct *me, int __user **tid_addr)
1989{ 2055{
1990 return put_user(me->clear_child_tid, tid_addr); 2056 return put_user(me->clear_child_tid, tid_addr);
1991} 2057}
1992 2058#else
1993#else /* CONFIG_CHECKPOINT_RESTORE */
1994static int prctl_set_mm(int opt, unsigned long addr,
1995 unsigned long arg4, unsigned long arg5)
1996{
1997 return -EINVAL;
1998}
1999static int prctl_get_tid_address(struct task_struct *me, int __user **tid_addr) 2059static int prctl_get_tid_address(struct task_struct *me, int __user **tid_addr)
2000{ 2060{
2001 return -EINVAL; 2061 return -EINVAL;
@@ -2245,3 +2305,148 @@ int orderly_poweroff(bool force)
2245 return 0; 2305 return 0;
2246} 2306}
2247EXPORT_SYMBOL_GPL(orderly_poweroff); 2307EXPORT_SYMBOL_GPL(orderly_poweroff);
2308
2309/**
2310 * do_sysinfo - fill in sysinfo struct
2311 * @info: pointer to buffer to fill
2312 */
2313static int do_sysinfo(struct sysinfo *info)
2314{
2315 unsigned long mem_total, sav_total;
2316 unsigned int mem_unit, bitcount;
2317 struct timespec tp;
2318
2319 memset(info, 0, sizeof(struct sysinfo));
2320
2321 ktime_get_ts(&tp);
2322 monotonic_to_bootbased(&tp);
2323 info->uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0);
2324
2325 get_avenrun(info->loads, 0, SI_LOAD_SHIFT - FSHIFT);
2326
2327 info->procs = nr_threads;
2328
2329 si_meminfo(info);
2330 si_swapinfo(info);
2331
2332 /*
2333 * If the sum of all the available memory (i.e. ram + swap)
2334 * is less than can be stored in a 32 bit unsigned long then
2335 * we can be binary compatible with 2.2.x kernels. If not,
2336 * well, in that case 2.2.x was broken anyways...
2337 *
2338 * -Erik Andersen <andersee@debian.org>
2339 */
2340
2341 mem_total = info->totalram + info->totalswap;
2342 if (mem_total < info->totalram || mem_total < info->totalswap)
2343 goto out;
2344 bitcount = 0;
2345 mem_unit = info->mem_unit;
2346 while (mem_unit > 1) {
2347 bitcount++;
2348 mem_unit >>= 1;
2349 sav_total = mem_total;
2350 mem_total <<= 1;
2351 if (mem_total < sav_total)
2352 goto out;
2353 }
2354
2355 /*
2356 * If mem_total did not overflow, multiply all memory values by
2357 * info->mem_unit and set it to 1. This leaves things compatible
2358 * with 2.2.x, and also retains compatibility with earlier 2.4.x
2359 * kernels...
2360 */
2361
2362 info->mem_unit = 1;
2363 info->totalram <<= bitcount;
2364 info->freeram <<= bitcount;
2365 info->sharedram <<= bitcount;
2366 info->bufferram <<= bitcount;
2367 info->totalswap <<= bitcount;
2368 info->freeswap <<= bitcount;
2369 info->totalhigh <<= bitcount;
2370 info->freehigh <<= bitcount;
2371
2372out:
2373 return 0;
2374}
2375
2376SYSCALL_DEFINE1(sysinfo, struct sysinfo __user *, info)
2377{
2378 struct sysinfo val;
2379
2380 do_sysinfo(&val);
2381
2382 if (copy_to_user(info, &val, sizeof(struct sysinfo)))
2383 return -EFAULT;
2384
2385 return 0;
2386}
2387
2388#ifdef CONFIG_COMPAT
2389struct compat_sysinfo {
2390 s32 uptime;
2391 u32 loads[3];
2392 u32 totalram;
2393 u32 freeram;
2394 u32 sharedram;
2395 u32 bufferram;
2396 u32 totalswap;
2397 u32 freeswap;
2398 u16 procs;
2399 u16 pad;
2400 u32 totalhigh;
2401 u32 freehigh;
2402 u32 mem_unit;
2403 char _f[20-2*sizeof(u32)-sizeof(int)];
2404};
2405
2406COMPAT_SYSCALL_DEFINE1(sysinfo, struct compat_sysinfo __user *, info)
2407{
2408 struct sysinfo s;
2409
2410 do_sysinfo(&s);
2411
2412 /* Check to see if any memory value is too large for 32-bit and scale
2413 * down if needed
2414 */
2415 if ((s.totalram >> 32) || (s.totalswap >> 32)) {
2416 int bitcount = 0;
2417
2418 while (s.mem_unit < PAGE_SIZE) {
2419 s.mem_unit <<= 1;
2420 bitcount++;
2421 }
2422
2423 s.totalram >>= bitcount;
2424 s.freeram >>= bitcount;
2425 s.sharedram >>= bitcount;
2426 s.bufferram >>= bitcount;
2427 s.totalswap >>= bitcount;
2428 s.freeswap >>= bitcount;
2429 s.totalhigh >>= bitcount;
2430 s.freehigh >>= bitcount;
2431 }
2432
2433 if (!access_ok(VERIFY_WRITE, info, sizeof(struct compat_sysinfo)) ||
2434 __put_user(s.uptime, &info->uptime) ||
2435 __put_user(s.loads[0], &info->loads[0]) ||
2436 __put_user(s.loads[1], &info->loads[1]) ||
2437 __put_user(s.loads[2], &info->loads[2]) ||
2438 __put_user(s.totalram, &info->totalram) ||
2439 __put_user(s.freeram, &info->freeram) ||
2440 __put_user(s.sharedram, &info->sharedram) ||
2441 __put_user(s.bufferram, &info->bufferram) ||
2442 __put_user(s.totalswap, &info->totalswap) ||
2443 __put_user(s.freeswap, &info->freeswap) ||
2444 __put_user(s.procs, &info->procs) ||
2445 __put_user(s.totalhigh, &info->totalhigh) ||
2446 __put_user(s.freehigh, &info->freehigh) ||
2447 __put_user(s.mem_unit, &info->mem_unit))
2448 return -EFAULT;
2449
2450 return 0;
2451}
2452#endif /* CONFIG_COMPAT */
diff --git a/kernel/timer.c b/kernel/timer.c
index dbf7a78a1ef1..09bca8ce9771 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * linux/kernel/timer.c 2 * linux/kernel/timer.c
3 * 3 *
4 * Kernel internal timers, basic process system calls 4 * Kernel internal timers
5 * 5 *
6 * Copyright (C) 1991, 1992 Linus Torvalds 6 * Copyright (C) 1991, 1992 Linus Torvalds
7 * 7 *
@@ -41,6 +41,7 @@
41#include <linux/sched.h> 41#include <linux/sched.h>
42#include <linux/sched/sysctl.h> 42#include <linux/sched/sysctl.h>
43#include <linux/slab.h> 43#include <linux/slab.h>
44#include <linux/compat.h>
44 45
45#include <asm/uaccess.h> 46#include <asm/uaccess.h>
46#include <asm/unistd.h> 47#include <asm/unistd.h>
@@ -1395,61 +1396,6 @@ SYSCALL_DEFINE1(alarm, unsigned int, seconds)
1395 1396
1396#endif 1397#endif
1397 1398
1398/**
1399 * sys_getpid - return the thread group id of the current process
1400 *
1401 * Note, despite the name, this returns the tgid not the pid. The tgid and
1402 * the pid are identical unless CLONE_THREAD was specified on clone() in
1403 * which case the tgid is the same in all threads of the same group.
1404 *
1405 * This is SMP safe as current->tgid does not change.
1406 */
1407SYSCALL_DEFINE0(getpid)
1408{
1409 return task_tgid_vnr(current);
1410}
1411
1412/*
1413 * Accessing ->real_parent is not SMP-safe, it could
1414 * change from under us. However, we can use a stale
1415 * value of ->real_parent under rcu_read_lock(), see
1416 * release_task()->call_rcu(delayed_put_task_struct).
1417 */
1418SYSCALL_DEFINE0(getppid)
1419{
1420 int pid;
1421
1422 rcu_read_lock();
1423 pid = task_tgid_vnr(rcu_dereference(current->real_parent));
1424 rcu_read_unlock();
1425
1426 return pid;
1427}
1428
1429SYSCALL_DEFINE0(getuid)
1430{
1431 /* Only we change this so SMP safe */
1432 return from_kuid_munged(current_user_ns(), current_uid());
1433}
1434
1435SYSCALL_DEFINE0(geteuid)
1436{
1437 /* Only we change this so SMP safe */
1438 return from_kuid_munged(current_user_ns(), current_euid());
1439}
1440
1441SYSCALL_DEFINE0(getgid)
1442{
1443 /* Only we change this so SMP safe */
1444 return from_kgid_munged(current_user_ns(), current_gid());
1445}
1446
1447SYSCALL_DEFINE0(getegid)
1448{
1449 /* Only we change this so SMP safe */
1450 return from_kgid_munged(current_user_ns(), current_egid());
1451}
1452
1453static void process_timeout(unsigned long __data) 1399static void process_timeout(unsigned long __data)
1454{ 1400{
1455 wake_up_process((struct task_struct *)__data); 1401 wake_up_process((struct task_struct *)__data);
@@ -1557,91 +1503,6 @@ signed long __sched schedule_timeout_uninterruptible(signed long timeout)
1557} 1503}
1558EXPORT_SYMBOL(schedule_timeout_uninterruptible); 1504EXPORT_SYMBOL(schedule_timeout_uninterruptible);
1559 1505
1560/* Thread ID - the internal kernel "pid" */
1561SYSCALL_DEFINE0(gettid)
1562{
1563 return task_pid_vnr(current);
1564}
1565
1566/**
1567 * do_sysinfo - fill in sysinfo struct
1568 * @info: pointer to buffer to fill
1569 */
1570int do_sysinfo(struct sysinfo *info)
1571{
1572 unsigned long mem_total, sav_total;
1573 unsigned int mem_unit, bitcount;
1574 struct timespec tp;
1575
1576 memset(info, 0, sizeof(struct sysinfo));
1577
1578 ktime_get_ts(&tp);
1579 monotonic_to_bootbased(&tp);
1580 info->uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0);
1581
1582 get_avenrun(info->loads, 0, SI_LOAD_SHIFT - FSHIFT);
1583
1584 info->procs = nr_threads;
1585
1586 si_meminfo(info);
1587 si_swapinfo(info);
1588
1589 /*
1590 * If the sum of all the available memory (i.e. ram + swap)
1591 * is less than can be stored in a 32 bit unsigned long then
1592 * we can be binary compatible with 2.2.x kernels. If not,
1593 * well, in that case 2.2.x was broken anyways...
1594 *
1595 * -Erik Andersen <andersee@debian.org>
1596 */
1597
1598 mem_total = info->totalram + info->totalswap;
1599 if (mem_total < info->totalram || mem_total < info->totalswap)
1600 goto out;
1601 bitcount = 0;
1602 mem_unit = info->mem_unit;
1603 while (mem_unit > 1) {
1604 bitcount++;
1605 mem_unit >>= 1;
1606 sav_total = mem_total;
1607 mem_total <<= 1;
1608 if (mem_total < sav_total)
1609 goto out;
1610 }
1611
1612 /*
1613 * If mem_total did not overflow, multiply all memory values by
1614 * info->mem_unit and set it to 1. This leaves things compatible
1615 * with 2.2.x, and also retains compatibility with earlier 2.4.x
1616 * kernels...
1617 */
1618
1619 info->mem_unit = 1;
1620 info->totalram <<= bitcount;
1621 info->freeram <<= bitcount;
1622 info->sharedram <<= bitcount;
1623 info->bufferram <<= bitcount;
1624 info->totalswap <<= bitcount;
1625 info->freeswap <<= bitcount;
1626 info->totalhigh <<= bitcount;
1627 info->freehigh <<= bitcount;
1628
1629out:
1630 return 0;
1631}
1632
1633SYSCALL_DEFINE1(sysinfo, struct sysinfo __user *, info)
1634{
1635 struct sysinfo val;
1636
1637 do_sysinfo(&val);
1638
1639 if (copy_to_user(info, &val, sizeof(struct sysinfo)))
1640 return -EFAULT;
1641
1642 return 0;
1643}
1644
1645static int __cpuinit init_timers_cpu(int cpu) 1506static int __cpuinit init_timers_cpu(int cpu)
1646{ 1507{
1647 int j; 1508 int j;
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 154aa12af48e..4aa9f5bc6b2d 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -46,6 +46,7 @@
46#include <linux/rculist.h> 46#include <linux/rculist.h>
47#include <linux/nodemask.h> 47#include <linux/nodemask.h>
48#include <linux/moduleparam.h> 48#include <linux/moduleparam.h>
49#include <linux/uaccess.h>
49 50
50#include "workqueue_internal.h" 51#include "workqueue_internal.h"
51 52
@@ -2197,6 +2198,7 @@ __acquires(&pool->lock)
2197 worker->current_work = NULL; 2198 worker->current_work = NULL;
2198 worker->current_func = NULL; 2199 worker->current_func = NULL;
2199 worker->current_pwq = NULL; 2200 worker->current_pwq = NULL;
2201 worker->desc_valid = false;
2200 pwq_dec_nr_in_flight(pwq, work_color); 2202 pwq_dec_nr_in_flight(pwq, work_color);
2201} 2203}
2202 2204
@@ -4365,6 +4367,83 @@ unsigned int work_busy(struct work_struct *work)
4365} 4367}
4366EXPORT_SYMBOL_GPL(work_busy); 4368EXPORT_SYMBOL_GPL(work_busy);
4367 4369
4370/**
4371 * set_worker_desc - set description for the current work item
4372 * @fmt: printf-style format string
4373 * @...: arguments for the format string
4374 *
4375 * This function can be called by a running work function to describe what
4376 * the work item is about. If the worker task gets dumped, this
4377 * information will be printed out together to help debugging. The
4378 * description can be at most WORKER_DESC_LEN including the trailing '\0'.
4379 */
4380void set_worker_desc(const char *fmt, ...)
4381{
4382 struct worker *worker = current_wq_worker();
4383 va_list args;
4384
4385 if (worker) {
4386 va_start(args, fmt);
4387 vsnprintf(worker->desc, sizeof(worker->desc), fmt, args);
4388 va_end(args);
4389 worker->desc_valid = true;
4390 }
4391}
4392
4393/**
4394 * print_worker_info - print out worker information and description
4395 * @log_lvl: the log level to use when printing
4396 * @task: target task
4397 *
4398 * If @task is a worker and currently executing a work item, print out the
4399 * name of the workqueue being serviced and worker description set with
4400 * set_worker_desc() by the currently executing work item.
4401 *
4402 * This function can be safely called on any task as long as the
4403 * task_struct itself is accessible. While safe, this function isn't
4404 * synchronized and may print out mixups or garbages of limited length.
4405 */
4406void print_worker_info(const char *log_lvl, struct task_struct *task)
4407{
4408 work_func_t *fn = NULL;
4409 char name[WQ_NAME_LEN] = { };
4410 char desc[WORKER_DESC_LEN] = { };
4411 struct pool_workqueue *pwq = NULL;
4412 struct workqueue_struct *wq = NULL;
4413 bool desc_valid = false;
4414 struct worker *worker;
4415
4416 if (!(task->flags & PF_WQ_WORKER))
4417 return;
4418
4419 /*
4420 * This function is called without any synchronization and @task
4421 * could be in any state. Be careful with dereferences.
4422 */
4423 worker = probe_kthread_data(task);
4424
4425 /*
4426 * Carefully copy the associated workqueue's workfn and name. Keep
4427 * the original last '\0' in case the original contains garbage.
4428 */
4429 probe_kernel_read(&fn, &worker->current_func, sizeof(fn));
4430 probe_kernel_read(&pwq, &worker->current_pwq, sizeof(pwq));
4431 probe_kernel_read(&wq, &pwq->wq, sizeof(wq));
4432 probe_kernel_read(name, wq->name, sizeof(name) - 1);
4433
4434 /* copy worker description */
4435 probe_kernel_read(&desc_valid, &worker->desc_valid, sizeof(desc_valid));
4436 if (desc_valid)
4437 probe_kernel_read(desc, worker->desc, sizeof(desc) - 1);
4438
4439 if (fn || name[0] || desc[0]) {
4440 printk("%sWorkqueue: %s %pf", log_lvl, name, fn);
4441 if (desc[0])
4442 pr_cont(" (%s)", desc);
4443 pr_cont("\n");
4444 }
4445}
4446
4368/* 4447/*
4369 * CPU hotplug. 4448 * CPU hotplug.
4370 * 4449 *
diff --git a/kernel/workqueue_internal.h b/kernel/workqueue_internal.h
index 84ab6e1dc6fb..ad83c96b2ece 100644
--- a/kernel/workqueue_internal.h
+++ b/kernel/workqueue_internal.h
@@ -29,15 +29,25 @@ struct worker {
29 struct work_struct *current_work; /* L: work being processed */ 29 struct work_struct *current_work; /* L: work being processed */
30 work_func_t current_func; /* L: current_work's fn */ 30 work_func_t current_func; /* L: current_work's fn */
31 struct pool_workqueue *current_pwq; /* L: current_work's pwq */ 31 struct pool_workqueue *current_pwq; /* L: current_work's pwq */
32 bool desc_valid; /* ->desc is valid */
32 struct list_head scheduled; /* L: scheduled works */ 33 struct list_head scheduled; /* L: scheduled works */
34
35 /* 64 bytes boundary on 64bit, 32 on 32bit */
36
33 struct task_struct *task; /* I: worker task */ 37 struct task_struct *task; /* I: worker task */
34 struct worker_pool *pool; /* I: the associated pool */ 38 struct worker_pool *pool; /* I: the associated pool */
35 /* L: for rescuers */ 39 /* L: for rescuers */
36 /* 64 bytes boundary on 64bit, 32 on 32bit */ 40
37 unsigned long last_active; /* L: last active timestamp */ 41 unsigned long last_active; /* L: last active timestamp */
38 unsigned int flags; /* X: flags */ 42 unsigned int flags; /* X: flags */
39 int id; /* I: worker id */ 43 int id; /* I: worker id */
40 44
45 /*
46 * Opaque string set with work_set_desc(). Printed out with task
47 * dump for debugging - WARN, BUG, panic or sysrq.
48 */
49 char desc[WORKER_DESC_LEN];
50
41 /* used only by rescuers to point to the target workqueue */ 51 /* used only by rescuers to point to the target workqueue */
42 struct workqueue_struct *rescue_wq; /* I: the workqueue to rescue */ 52 struct workqueue_struct *rescue_wq; /* I: the workqueue to rescue */
43}; 53};
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 28be08c09bab..566cf2bc08ea 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1192,7 +1192,7 @@ config MEMORY_NOTIFIER_ERROR_INJECT
1192 bash: echo: write error: Cannot allocate memory 1192 bash: echo: write error: Cannot allocate memory
1193 1193
1194 To compile this code as a module, choose M here: the module will 1194 To compile this code as a module, choose M here: the module will
1195 be called pSeries-reconfig-notifier-error-inject. 1195 be called memory-notifier-error-inject.
1196 1196
1197 If unsure, say N. 1197 If unsure, say N.
1198 1198
@@ -1209,7 +1209,7 @@ config OF_RECONFIG_NOTIFIER_ERROR_INJECT
1209 notified, write the error code to "actions/<notifier event>/error". 1209 notified, write the error code to "actions/<notifier event>/error".
1210 1210
1211 To compile this code as a module, choose M here: the module will 1211 To compile this code as a module, choose M here: the module will
1212 be called memory-notifier-error-inject. 1212 be called of-reconfig-notifier-error-inject.
1213 1213
1214 If unsure, say N. 1214 If unsure, say N.
1215 1215
@@ -1292,6 +1292,24 @@ config LATENCYTOP
1292 Enable this option if you want to use the LatencyTOP tool 1292 Enable this option if you want to use the LatencyTOP tool
1293 to find out which userspace is blocking on what kernel operations. 1293 to find out which userspace is blocking on what kernel operations.
1294 1294
1295config ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
1296 bool
1297
1298config DEBUG_STRICT_USER_COPY_CHECKS
1299 bool "Strict user copy size checks"
1300 depends on ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
1301 depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING
1302 help
1303 Enabling this option turns a certain set of sanity checks for user
1304 copy operations into compile time failures.
1305
1306 The copy_from_user() etc checks are there to help test if there
1307 are sufficient security checks on the length argument of
1308 the copy operation, by having gcc prove that the argument is
1309 within bounds.
1310
1311 If unsure, say N.
1312
1295source mm/Kconfig.debug 1313source mm/Kconfig.debug
1296source kernel/trace/Kconfig 1314source kernel/trace/Kconfig
1297 1315
@@ -1463,5 +1481,8 @@ source "lib/Kconfig.kgdb"
1463 1481
1464source "lib/Kconfig.kmemcheck" 1482source "lib/Kconfig.kmemcheck"
1465 1483
1484config TEST_STRING_HELPERS
1485 tristate "Test functions located in the string_helpers module at runtime"
1486
1466config TEST_KSTRTOX 1487config TEST_KSTRTOX
1467 tristate "Test kstrto*() family of functions at runtime" 1488 tristate "Test kstrto*() family of functions at runtime"
diff --git a/lib/Makefile b/lib/Makefile
index 6e2cc561f761..e9c52e1b853a 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -15,6 +15,7 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \
15 is_single_threaded.o plist.o decompress.o kobject_uevent.o \ 15 is_single_threaded.o plist.o decompress.o kobject_uevent.o \
16 earlycpio.o 16 earlycpio.o
17 17
18obj-$(CONFIG_ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS) += usercopy.o
18lib-$(CONFIG_MMU) += ioremap.o 19lib-$(CONFIG_MMU) += ioremap.o
19lib-$(CONFIG_SMP) += cpumask.o 20lib-$(CONFIG_SMP) += cpumask.o
20 21
@@ -22,8 +23,10 @@ lib-y += kobject.o klist.o
22 23
23obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ 24obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
24 bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \ 25 bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \
25 string_helpers.o gcd.o lcm.o list_sort.o uuid.o flex_array.o \ 26 gcd.o lcm.o list_sort.o uuid.o flex_array.o \
26 bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o kfifo.o 27 bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o kfifo.o
28obj-y += string_helpers.o
29obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o
27obj-y += kstrtox.o 30obj-y += kstrtox.o
28obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o 31obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o
29 32
diff --git a/lib/decompress.c b/lib/decompress.c
index 31a804277282..f8fdedaf7b3d 100644
--- a/lib/decompress.c
+++ b/lib/decompress.c
@@ -38,7 +38,7 @@ struct compress_format {
38 decompress_fn decompressor; 38 decompress_fn decompressor;
39}; 39};
40 40
41static const struct compress_format compressed_formats[] __initdata = { 41static const struct compress_format compressed_formats[] __initconst = {
42 { {037, 0213}, "gzip", gunzip }, 42 { {037, 0213}, "gzip", gunzip },
43 { {037, 0236}, "gzip", gunzip }, 43 { {037, 0236}, "gzip", gunzip },
44 { {0x42, 0x5a}, "bzip2", bunzip2 }, 44 { {0x42, 0x5a}, "bzip2", bunzip2 },
diff --git a/lib/dump_stack.c b/lib/dump_stack.c
index 42f4f55c9458..53bad099ebd6 100644
--- a/lib/dump_stack.c
+++ b/lib/dump_stack.c
@@ -5,11 +5,16 @@
5 5
6#include <linux/kernel.h> 6#include <linux/kernel.h>
7#include <linux/export.h> 7#include <linux/export.h>
8#include <linux/sched.h>
8 9
10/**
11 * dump_stack - dump the current task information and its stack trace
12 *
13 * Architectures can override this implementation by implementing its own.
14 */
9void dump_stack(void) 15void dump_stack(void)
10{ 16{
11 printk(KERN_NOTICE 17 dump_stack_print_info(KERN_DEFAULT);
12 "This architecture does not implement dump_stack()\n"); 18 show_stack(NULL, NULL);
13} 19}
14
15EXPORT_SYMBOL(dump_stack); 20EXPORT_SYMBOL(dump_stack);
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
index 46032453abd5..99fec3ae405a 100644
--- a/lib/dynamic_debug.c
+++ b/lib/dynamic_debug.c
@@ -24,6 +24,7 @@
24#include <linux/sysctl.h> 24#include <linux/sysctl.h>
25#include <linux/ctype.h> 25#include <linux/ctype.h>
26#include <linux/string.h> 26#include <linux/string.h>
27#include <linux/string_helpers.h>
27#include <linux/uaccess.h> 28#include <linux/uaccess.h>
28#include <linux/dynamic_debug.h> 29#include <linux/dynamic_debug.h>
29#include <linux/debugfs.h> 30#include <linux/debugfs.h>
@@ -276,47 +277,6 @@ static inline int parse_lineno(const char *str, unsigned int *val)
276 return 0; 277 return 0;
277} 278}
278 279
279/*
280 * Undo octal escaping in a string, inplace. This is useful to
281 * allow the user to express a query which matches a format
282 * containing embedded spaces.
283 */
284static char *unescape(char *str)
285{
286 char *in = str;
287 char *out = str;
288
289 while (*in) {
290 if (*in == '\\') {
291 if (in[1] == '\\') {
292 *out++ = '\\';
293 in += 2;
294 continue;
295 } else if (in[1] == 't') {
296 *out++ = '\t';
297 in += 2;
298 continue;
299 } else if (in[1] == 'n') {
300 *out++ = '\n';
301 in += 2;
302 continue;
303 } else if (isodigit(in[1]) &&
304 isodigit(in[2]) &&
305 isodigit(in[3])) {
306 *out++ = (((in[1] - '0') << 6) |
307 ((in[2] - '0') << 3) |
308 (in[3] - '0'));
309 in += 4;
310 continue;
311 }
312 }
313 *out++ = *in++;
314 }
315 *out = '\0';
316
317 return str;
318}
319
320static int check_set(const char **dest, char *src, char *name) 280static int check_set(const char **dest, char *src, char *name)
321{ 281{
322 int rc = 0; 282 int rc = 0;
@@ -370,8 +330,10 @@ static int ddebug_parse_query(char *words[], int nwords,
370 } else if (!strcmp(words[i], "module")) { 330 } else if (!strcmp(words[i], "module")) {
371 rc = check_set(&query->module, words[i+1], "module"); 331 rc = check_set(&query->module, words[i+1], "module");
372 } else if (!strcmp(words[i], "format")) { 332 } else if (!strcmp(words[i], "format")) {
373 rc = check_set(&query->format, unescape(words[i+1]), 333 string_unescape_inplace(words[i+1], UNESCAPE_SPACE |
374 "format"); 334 UNESCAPE_OCTAL |
335 UNESCAPE_SPECIAL);
336 rc = check_set(&query->format, words[i+1], "format");
375 } else if (!strcmp(words[i], "line")) { 337 } else if (!strcmp(words[i], "line")) {
376 char *first = words[i+1]; 338 char *first = words[i+1];
377 char *last = strchr(first, '-'); 339 char *last = strchr(first, '-');
diff --git a/lib/rbtree_test.c b/lib/rbtree_test.c
index af38aedbd874..122f02f9941b 100644
--- a/lib/rbtree_test.c
+++ b/lib/rbtree_test.c
@@ -117,8 +117,7 @@ static int black_path_count(struct rb_node *rb)
117static void check(int nr_nodes) 117static void check(int nr_nodes)
118{ 118{
119 struct rb_node *rb; 119 struct rb_node *rb;
120 int count = 0; 120 int count = 0, blacks = 0;
121 int blacks = 0;
122 u32 prev_key = 0; 121 u32 prev_key = 0;
123 122
124 for (rb = rb_first(&root); rb; rb = rb_next(rb)) { 123 for (rb = rb_first(&root); rb; rb = rb_next(rb)) {
@@ -134,7 +133,9 @@ static void check(int nr_nodes)
134 prev_key = node->key; 133 prev_key = node->key;
135 count++; 134 count++;
136 } 135 }
136
137 WARN_ON_ONCE(count != nr_nodes); 137 WARN_ON_ONCE(count != nr_nodes);
138 WARN_ON_ONCE(count < (1 << black_path_count(rb_last(&root))) - 1);
138} 139}
139 140
140static void check_augmented(int nr_nodes) 141static void check_augmented(int nr_nodes)
@@ -148,7 +149,7 @@ static void check_augmented(int nr_nodes)
148 } 149 }
149} 150}
150 151
151static int rbtree_test_init(void) 152static int __init rbtree_test_init(void)
152{ 153{
153 int i, j; 154 int i, j;
154 cycles_t time1, time2, time; 155 cycles_t time1, time2, time;
@@ -221,7 +222,7 @@ static int rbtree_test_init(void)
221 return -EAGAIN; /* Fail will directly unload the module */ 222 return -EAGAIN; /* Fail will directly unload the module */
222} 223}
223 224
224static void rbtree_test_exit(void) 225static void __exit rbtree_test_exit(void)
225{ 226{
226 printk(KERN_ALERT "test exit\n"); 227 printk(KERN_ALERT "test exit\n");
227} 228}
diff --git a/lib/string_helpers.c b/lib/string_helpers.c
index 1cffc223bff5..ed5c1454dd62 100644
--- a/lib/string_helpers.c
+++ b/lib/string_helpers.c
@@ -2,10 +2,12 @@
2 * Helpers for formatting and printing strings 2 * Helpers for formatting and printing strings
3 * 3 *
4 * Copyright 31 August 2008 James Bottomley 4 * Copyright 31 August 2008 James Bottomley
5 * Copyright (C) 2013, Intel Corporation
5 */ 6 */
6#include <linux/kernel.h> 7#include <linux/kernel.h>
7#include <linux/math64.h> 8#include <linux/math64.h>
8#include <linux/export.h> 9#include <linux/export.h>
10#include <linux/ctype.h>
9#include <linux/string_helpers.h> 11#include <linux/string_helpers.h>
10 12
11/** 13/**
@@ -66,3 +68,134 @@ int string_get_size(u64 size, const enum string_size_units units,
66 return 0; 68 return 0;
67} 69}
68EXPORT_SYMBOL(string_get_size); 70EXPORT_SYMBOL(string_get_size);
71
72static bool unescape_space(char **src, char **dst)
73{
74 char *p = *dst, *q = *src;
75
76 switch (*q) {
77 case 'n':
78 *p = '\n';
79 break;
80 case 'r':
81 *p = '\r';
82 break;
83 case 't':
84 *p = '\t';
85 break;
86 case 'v':
87 *p = '\v';
88 break;
89 case 'f':
90 *p = '\f';
91 break;
92 default:
93 return false;
94 }
95 *dst += 1;
96 *src += 1;
97 return true;
98}
99
100static bool unescape_octal(char **src, char **dst)
101{
102 char *p = *dst, *q = *src;
103 u8 num;
104
105 if (isodigit(*q) == 0)
106 return false;
107
108 num = (*q++) & 7;
109 while (num < 32 && isodigit(*q) && (q - *src < 3)) {
110 num <<= 3;
111 num += (*q++) & 7;
112 }
113 *p = num;
114 *dst += 1;
115 *src = q;
116 return true;
117}
118
119static bool unescape_hex(char **src, char **dst)
120{
121 char *p = *dst, *q = *src;
122 int digit;
123 u8 num;
124
125 if (*q++ != 'x')
126 return false;
127
128 num = digit = hex_to_bin(*q++);
129 if (digit < 0)
130 return false;
131
132 digit = hex_to_bin(*q);
133 if (digit >= 0) {
134 q++;
135 num = (num << 4) | digit;
136 }
137 *p = num;
138 *dst += 1;
139 *src = q;
140 return true;
141}
142
143static bool unescape_special(char **src, char **dst)
144{
145 char *p = *dst, *q = *src;
146
147 switch (*q) {
148 case '\"':
149 *p = '\"';
150 break;
151 case '\\':
152 *p = '\\';
153 break;
154 case 'a':
155 *p = '\a';
156 break;
157 case 'e':
158 *p = '\e';
159 break;
160 default:
161 return false;
162 }
163 *dst += 1;
164 *src += 1;
165 return true;
166}
167
168int string_unescape(char *src, char *dst, size_t size, unsigned int flags)
169{
170 char *out = dst;
171
172 while (*src && --size) {
173 if (src[0] == '\\' && src[1] != '\0' && size > 1) {
174 src++;
175 size--;
176
177 if (flags & UNESCAPE_SPACE &&
178 unescape_space(&src, &out))
179 continue;
180
181 if (flags & UNESCAPE_OCTAL &&
182 unescape_octal(&src, &out))
183 continue;
184
185 if (flags & UNESCAPE_HEX &&
186 unescape_hex(&src, &out))
187 continue;
188
189 if (flags & UNESCAPE_SPECIAL &&
190 unescape_special(&src, &out))
191 continue;
192
193 *out++ = '\\';
194 }
195 *out++ = *src++;
196 }
197 *out = '\0';
198
199 return out - dst;
200}
201EXPORT_SYMBOL(string_unescape);
diff --git a/lib/test-string_helpers.c b/lib/test-string_helpers.c
new file mode 100644
index 000000000000..6ac48de04c0e
--- /dev/null
+++ b/lib/test-string_helpers.c
@@ -0,0 +1,103 @@
1/*
2 * Test cases for lib/string_helpers.c module.
3 */
4#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
5
6#include <linux/init.h>
7#include <linux/kernel.h>
8#include <linux/module.h>
9#include <linux/random.h>
10#include <linux/string.h>
11#include <linux/string_helpers.h>
12
13struct test_string {
14 const char *in;
15 const char *out;
16 unsigned int flags;
17};
18
19static const struct test_string strings[] __initconst = {
20 {
21 .in = "\\f\\ \\n\\r\\t\\v",
22 .out = "\f\\ \n\r\t\v",
23 .flags = UNESCAPE_SPACE,
24 },
25 {
26 .in = "\\40\\1\\387\\0064\\05\\040\\8a\\110\\777",
27 .out = " \001\00387\0064\005 \\8aH?7",
28 .flags = UNESCAPE_OCTAL,
29 },
30 {
31 .in = "\\xv\\xa\\x2c\\xD\\x6f2",
32 .out = "\\xv\n,\ro2",
33 .flags = UNESCAPE_HEX,
34 },
35 {
36 .in = "\\h\\\\\\\"\\a\\e\\",
37 .out = "\\h\\\"\a\e\\",
38 .flags = UNESCAPE_SPECIAL,
39 },
40};
41
42static void __init test_string_unescape(unsigned int flags, bool inplace)
43{
44 char in[256];
45 char out_test[256];
46 char out_real[256];
47 int i, p = 0, q_test = 0, q_real = sizeof(out_real);
48
49 for (i = 0; i < ARRAY_SIZE(strings); i++) {
50 const char *s = strings[i].in;
51 int len = strlen(strings[i].in);
52
53 /* Copy string to in buffer */
54 memcpy(&in[p], s, len);
55 p += len;
56
57 /* Copy expected result for given flags */
58 if (flags & strings[i].flags) {
59 s = strings[i].out;
60 len = strlen(strings[i].out);
61 }
62 memcpy(&out_test[q_test], s, len);
63 q_test += len;
64 }
65 in[p++] = '\0';
66
67 /* Call string_unescape and compare result */
68 if (inplace) {
69 memcpy(out_real, in, p);
70 if (flags == UNESCAPE_ANY)
71 q_real = string_unescape_any_inplace(out_real);
72 else
73 q_real = string_unescape_inplace(out_real, flags);
74 } else if (flags == UNESCAPE_ANY) {
75 q_real = string_unescape_any(in, out_real, q_real);
76 } else {
77 q_real = string_unescape(in, out_real, q_real, flags);
78 }
79
80 if (q_real != q_test || memcmp(out_test, out_real, q_test)) {
81 pr_warn("Test failed: flags = %u\n", flags);
82 print_hex_dump(KERN_WARNING, "Input: ",
83 DUMP_PREFIX_NONE, 16, 1, in, p - 1, true);
84 print_hex_dump(KERN_WARNING, "Expected: ",
85 DUMP_PREFIX_NONE, 16, 1, out_test, q_test, true);
86 print_hex_dump(KERN_WARNING, "Got: ",
87 DUMP_PREFIX_NONE, 16, 1, out_real, q_real, true);
88 }
89}
90
91static int __init test_string_helpers_init(void)
92{
93 unsigned int i;
94
95 pr_info("Running tests...\n");
96 for (i = 0; i < UNESCAPE_ANY + 1; i++)
97 test_string_unescape(i, false);
98 test_string_unescape(get_random_int() % (UNESCAPE_ANY + 1), true);
99
100 return -EINVAL;
101}
102module_init(test_string_helpers_init);
103MODULE_LICENSE("Dual BSD/GPL");
diff --git a/arch/s390/lib/usercopy.c b/lib/usercopy.c
index 14b363fec8a2..4f5b1ddbcd25 100644
--- a/arch/s390/lib/usercopy.c
+++ b/lib/usercopy.c
@@ -1,5 +1,6 @@
1#include <linux/module.h> 1#include <linux/export.h>
2#include <linux/bug.h> 2#include <linux/bug.h>
3#include <linux/uaccess.h>
3 4
4void copy_from_user_overflow(void) 5void copy_from_user_overflow(void)
5{ 6{
diff --git a/mm/cleancache.c b/mm/cleancache.c
index d76ba74be2d0..5875f48ce279 100644
--- a/mm/cleancache.c
+++ b/mm/cleancache.c
@@ -19,20 +19,10 @@
19#include <linux/cleancache.h> 19#include <linux/cleancache.h>
20 20
21/* 21/*
22 * This global enablement flag may be read thousands of times per second
23 * by cleancache_get/put/invalidate even on systems where cleancache_ops
24 * is not claimed (e.g. cleancache is config'ed on but remains
25 * disabled), so is preferred to the slower alternative: a function
26 * call that checks a non-global.
27 */
28int cleancache_enabled __read_mostly;
29EXPORT_SYMBOL(cleancache_enabled);
30
31/*
32 * cleancache_ops is set by cleancache_ops_register to contain the pointers 22 * cleancache_ops is set by cleancache_ops_register to contain the pointers
33 * to the cleancache "backend" implementation functions. 23 * to the cleancache "backend" implementation functions.
34 */ 24 */
35static struct cleancache_ops cleancache_ops __read_mostly; 25static struct cleancache_ops *cleancache_ops __read_mostly;
36 26
37/* 27/*
38 * Counters available via /sys/kernel/debug/frontswap (if debugfs is 28 * Counters available via /sys/kernel/debug/frontswap (if debugfs is
@@ -45,15 +35,101 @@ static u64 cleancache_puts;
45static u64 cleancache_invalidates; 35static u64 cleancache_invalidates;
46 36
47/* 37/*
48 * register operations for cleancache, returning previous thus allowing 38 * When no backend is registered all calls to init_fs and init_shared_fs
49 * detection of multiple backends and possible nesting 39 * are registered and fake poolids (FAKE_FS_POOLID_OFFSET or
40 * FAKE_SHARED_FS_POOLID_OFFSET, plus offset in the respective array
41 * [shared_|]fs_poolid_map) are given to the respective super block
42 * (sb->cleancache_poolid) and no tmem_pools are created. When a backend
43 * registers with cleancache the previous calls to init_fs and init_shared_fs
44 * are executed to create tmem_pools and set the respective poolids. While no
45 * backend is registered all "puts", "gets" and "flushes" are ignored or failed.
46 */
47#define MAX_INITIALIZABLE_FS 32
48#define FAKE_FS_POOLID_OFFSET 1000
49#define FAKE_SHARED_FS_POOLID_OFFSET 2000
50
51#define FS_NO_BACKEND (-1)
52#define FS_UNKNOWN (-2)
53static int fs_poolid_map[MAX_INITIALIZABLE_FS];
54static int shared_fs_poolid_map[MAX_INITIALIZABLE_FS];
55static char *uuids[MAX_INITIALIZABLE_FS];
56/*
57 * Mutex for the [shared_|]fs_poolid_map to guard against multiple threads
58 * invoking umount (and ending in __cleancache_invalidate_fs) and also multiple
59 * threads calling mount (and ending up in __cleancache_init_[shared|]fs).
60 */
61static DEFINE_MUTEX(poolid_mutex);
62/*
63 * When set to false (default) all calls to the cleancache functions, except
64 * the __cleancache_invalidate_fs and __cleancache_init_[shared|]fs are guarded
65 * by the if (!cleancache_ops) return. This means multiple threads (from
66 * different filesystems) will be checking cleancache_ops. The usage of a
67 * bool instead of a atomic_t or a bool guarded by a spinlock is OK - we are
68 * OK if the time between the backend's have been initialized (and
69 * cleancache_ops has been set to not NULL) and when the filesystems start
70 * actually calling the backends. The inverse (when unloading) is obviously
71 * not good - but this shim does not do that (yet).
72 */
73
74/*
75 * The backends and filesystems work all asynchronously. This is b/c the
76 * backends can be built as modules.
77 * The usual sequence of events is:
78 * a) mount / -> __cleancache_init_fs is called. We set the
79 * [shared_|]fs_poolid_map and uuids for.
80 *
81 * b). user does I/Os -> we call the rest of __cleancache_* functions
82 * which return immediately as cleancache_ops is false.
83 *
84 * c). modprobe zcache -> cleancache_register_ops. We init the backend
85 * and set cleancache_ops to true, and for any fs_poolid_map
86 * (which is set by __cleancache_init_fs) we initialize the poolid.
87 *
88 * d). user does I/Os -> now that cleancache_ops is true all the
89 * __cleancache_* functions can call the backend. They all check
90 * that fs_poolid_map is valid and if so invoke the backend.
91 *
92 * e). umount / -> __cleancache_invalidate_fs, the fs_poolid_map is
93 * reset (which is the second check in the __cleancache_* ops
94 * to call the backend).
95 *
96 * The sequence of event could also be c), followed by a), and d). and e). The
97 * c) would not happen anymore. There is also the chance of c), and one thread
98 * doing a) + d), and another doing e). For that case we depend on the
99 * filesystem calling __cleancache_invalidate_fs in the proper sequence (so
100 * that it handles all I/Os before it invalidates the fs (which is last part
101 * of unmounting process).
102 *
103 * Note: The acute reader will notice that there is no "rmmod zcache" case.
104 * This is b/c the functionality for that is not yet implemented and when
105 * done, will require some extra locking not yet devised.
106 */
107
108/*
109 * Register operations for cleancache, returning previous thus allowing
110 * detection of multiple backends and possible nesting.
50 */ 111 */
51struct cleancache_ops cleancache_register_ops(struct cleancache_ops *ops) 112struct cleancache_ops *cleancache_register_ops(struct cleancache_ops *ops)
52{ 113{
53 struct cleancache_ops old = cleancache_ops; 114 struct cleancache_ops *old = cleancache_ops;
115 int i;
54 116
55 cleancache_ops = *ops; 117 mutex_lock(&poolid_mutex);
56 cleancache_enabled = 1; 118 for (i = 0; i < MAX_INITIALIZABLE_FS; i++) {
119 if (fs_poolid_map[i] == FS_NO_BACKEND)
120 fs_poolid_map[i] = ops->init_fs(PAGE_SIZE);
121 if (shared_fs_poolid_map[i] == FS_NO_BACKEND)
122 shared_fs_poolid_map[i] = ops->init_shared_fs
123 (uuids[i], PAGE_SIZE);
124 }
125 /*
126 * We MUST set cleancache_ops _after_ we have called the backends
127 * init_fs or init_shared_fs functions. Otherwise the compiler might
128 * re-order where cleancache_ops is set in this function.
129 */
130 barrier();
131 cleancache_ops = ops;
132 mutex_unlock(&poolid_mutex);
57 return old; 133 return old;
58} 134}
59EXPORT_SYMBOL(cleancache_register_ops); 135EXPORT_SYMBOL(cleancache_register_ops);
@@ -61,15 +137,42 @@ EXPORT_SYMBOL(cleancache_register_ops);
61/* Called by a cleancache-enabled filesystem at time of mount */ 137/* Called by a cleancache-enabled filesystem at time of mount */
62void __cleancache_init_fs(struct super_block *sb) 138void __cleancache_init_fs(struct super_block *sb)
63{ 139{
64 sb->cleancache_poolid = (*cleancache_ops.init_fs)(PAGE_SIZE); 140 int i;
141
142 mutex_lock(&poolid_mutex);
143 for (i = 0; i < MAX_INITIALIZABLE_FS; i++) {
144 if (fs_poolid_map[i] == FS_UNKNOWN) {
145 sb->cleancache_poolid = i + FAKE_FS_POOLID_OFFSET;
146 if (cleancache_ops)
147 fs_poolid_map[i] = cleancache_ops->init_fs(PAGE_SIZE);
148 else
149 fs_poolid_map[i] = FS_NO_BACKEND;
150 break;
151 }
152 }
153 mutex_unlock(&poolid_mutex);
65} 154}
66EXPORT_SYMBOL(__cleancache_init_fs); 155EXPORT_SYMBOL(__cleancache_init_fs);
67 156
68/* Called by a cleancache-enabled clustered filesystem at time of mount */ 157/* Called by a cleancache-enabled clustered filesystem at time of mount */
69void __cleancache_init_shared_fs(char *uuid, struct super_block *sb) 158void __cleancache_init_shared_fs(char *uuid, struct super_block *sb)
70{ 159{
71 sb->cleancache_poolid = 160 int i;
72 (*cleancache_ops.init_shared_fs)(uuid, PAGE_SIZE); 161
162 mutex_lock(&poolid_mutex);
163 for (i = 0; i < MAX_INITIALIZABLE_FS; i++) {
164 if (shared_fs_poolid_map[i] == FS_UNKNOWN) {
165 sb->cleancache_poolid = i + FAKE_SHARED_FS_POOLID_OFFSET;
166 uuids[i] = uuid;
167 if (cleancache_ops)
168 shared_fs_poolid_map[i] = cleancache_ops->init_shared_fs
169 (uuid, PAGE_SIZE);
170 else
171 shared_fs_poolid_map[i] = FS_NO_BACKEND;
172 break;
173 }
174 }
175 mutex_unlock(&poolid_mutex);
73} 176}
74EXPORT_SYMBOL(__cleancache_init_shared_fs); 177EXPORT_SYMBOL(__cleancache_init_shared_fs);
75 178
@@ -99,27 +202,53 @@ static int cleancache_get_key(struct inode *inode,
99} 202}
100 203
101/* 204/*
205 * Returns a pool_id that is associated with a given fake poolid.
206 */
207static int get_poolid_from_fake(int fake_pool_id)
208{
209 if (fake_pool_id >= FAKE_SHARED_FS_POOLID_OFFSET)
210 return shared_fs_poolid_map[fake_pool_id -
211 FAKE_SHARED_FS_POOLID_OFFSET];
212 else if (fake_pool_id >= FAKE_FS_POOLID_OFFSET)
213 return fs_poolid_map[fake_pool_id - FAKE_FS_POOLID_OFFSET];
214 return FS_NO_BACKEND;
215}
216
217/*
102 * "Get" data from cleancache associated with the poolid/inode/index 218 * "Get" data from cleancache associated with the poolid/inode/index
103 * that were specified when the data was put to cleanache and, if 219 * that were specified when the data was put to cleanache and, if
104 * successful, use it to fill the specified page with data and return 0. 220 * successful, use it to fill the specified page with data and return 0.
105 * The pageframe is unchanged and returns -1 if the get fails. 221 * The pageframe is unchanged and returns -1 if the get fails.
106 * Page must be locked by caller. 222 * Page must be locked by caller.
223 *
224 * The function has two checks before any action is taken - whether
225 * a backend is registered and whether the sb->cleancache_poolid
226 * is correct.
107 */ 227 */
108int __cleancache_get_page(struct page *page) 228int __cleancache_get_page(struct page *page)
109{ 229{
110 int ret = -1; 230 int ret = -1;
111 int pool_id; 231 int pool_id;
232 int fake_pool_id;
112 struct cleancache_filekey key = { .u.key = { 0 } }; 233 struct cleancache_filekey key = { .u.key = { 0 } };
113 234
235 if (!cleancache_ops) {
236 cleancache_failed_gets++;
237 goto out;
238 }
239
114 VM_BUG_ON(!PageLocked(page)); 240 VM_BUG_ON(!PageLocked(page));
115 pool_id = page->mapping->host->i_sb->cleancache_poolid; 241 fake_pool_id = page->mapping->host->i_sb->cleancache_poolid;
116 if (pool_id < 0) 242 if (fake_pool_id < 0)
117 goto out; 243 goto out;
244 pool_id = get_poolid_from_fake(fake_pool_id);
118 245
119 if (cleancache_get_key(page->mapping->host, &key) < 0) 246 if (cleancache_get_key(page->mapping->host, &key) < 0)
120 goto out; 247 goto out;
121 248
122 ret = (*cleancache_ops.get_page)(pool_id, key, page->index, page); 249 if (pool_id >= 0)
250 ret = cleancache_ops->get_page(pool_id,
251 key, page->index, page);
123 if (ret == 0) 252 if (ret == 0)
124 cleancache_succ_gets++; 253 cleancache_succ_gets++;
125 else 254 else
@@ -134,17 +263,32 @@ EXPORT_SYMBOL(__cleancache_get_page);
134 * (previously-obtained per-filesystem) poolid and the page's, 263 * (previously-obtained per-filesystem) poolid and the page's,
135 * inode and page index. Page must be locked. Note that a put_page 264 * inode and page index. Page must be locked. Note that a put_page
136 * always "succeeds", though a subsequent get_page may succeed or fail. 265 * always "succeeds", though a subsequent get_page may succeed or fail.
266 *
267 * The function has two checks before any action is taken - whether
268 * a backend is registered and whether the sb->cleancache_poolid
269 * is correct.
137 */ 270 */
138void __cleancache_put_page(struct page *page) 271void __cleancache_put_page(struct page *page)
139{ 272{
140 int pool_id; 273 int pool_id;
274 int fake_pool_id;
141 struct cleancache_filekey key = { .u.key = { 0 } }; 275 struct cleancache_filekey key = { .u.key = { 0 } };
142 276
277 if (!cleancache_ops) {
278 cleancache_puts++;
279 return;
280 }
281
143 VM_BUG_ON(!PageLocked(page)); 282 VM_BUG_ON(!PageLocked(page));
144 pool_id = page->mapping->host->i_sb->cleancache_poolid; 283 fake_pool_id = page->mapping->host->i_sb->cleancache_poolid;
284 if (fake_pool_id < 0)
285 return;
286
287 pool_id = get_poolid_from_fake(fake_pool_id);
288
145 if (pool_id >= 0 && 289 if (pool_id >= 0 &&
146 cleancache_get_key(page->mapping->host, &key) >= 0) { 290 cleancache_get_key(page->mapping->host, &key) >= 0) {
147 (*cleancache_ops.put_page)(pool_id, key, page->index, page); 291 cleancache_ops->put_page(pool_id, key, page->index, page);
148 cleancache_puts++; 292 cleancache_puts++;
149 } 293 }
150} 294}
@@ -153,19 +297,31 @@ EXPORT_SYMBOL(__cleancache_put_page);
153/* 297/*
154 * Invalidate any data from cleancache associated with the poolid and the 298 * Invalidate any data from cleancache associated with the poolid and the
155 * page's inode and page index so that a subsequent "get" will fail. 299 * page's inode and page index so that a subsequent "get" will fail.
300 *
301 * The function has two checks before any action is taken - whether
302 * a backend is registered and whether the sb->cleancache_poolid
303 * is correct.
156 */ 304 */
157void __cleancache_invalidate_page(struct address_space *mapping, 305void __cleancache_invalidate_page(struct address_space *mapping,
158 struct page *page) 306 struct page *page)
159{ 307{
160 /* careful... page->mapping is NULL sometimes when this is called */ 308 /* careful... page->mapping is NULL sometimes when this is called */
161 int pool_id = mapping->host->i_sb->cleancache_poolid; 309 int pool_id;
310 int fake_pool_id = mapping->host->i_sb->cleancache_poolid;
162 struct cleancache_filekey key = { .u.key = { 0 } }; 311 struct cleancache_filekey key = { .u.key = { 0 } };
163 312
164 if (pool_id >= 0) { 313 if (!cleancache_ops)
314 return;
315
316 if (fake_pool_id >= 0) {
317 pool_id = get_poolid_from_fake(fake_pool_id);
318 if (pool_id < 0)
319 return;
320
165 VM_BUG_ON(!PageLocked(page)); 321 VM_BUG_ON(!PageLocked(page));
166 if (cleancache_get_key(mapping->host, &key) >= 0) { 322 if (cleancache_get_key(mapping->host, &key) >= 0) {
167 (*cleancache_ops.invalidate_page)(pool_id, 323 cleancache_ops->invalidate_page(pool_id,
168 key, page->index); 324 key, page->index);
169 cleancache_invalidates++; 325 cleancache_invalidates++;
170 } 326 }
171 } 327 }
@@ -176,34 +332,63 @@ EXPORT_SYMBOL(__cleancache_invalidate_page);
176 * Invalidate all data from cleancache associated with the poolid and the 332 * Invalidate all data from cleancache associated with the poolid and the
177 * mappings's inode so that all subsequent gets to this poolid/inode 333 * mappings's inode so that all subsequent gets to this poolid/inode
178 * will fail. 334 * will fail.
335 *
336 * The function has two checks before any action is taken - whether
337 * a backend is registered and whether the sb->cleancache_poolid
338 * is correct.
179 */ 339 */
180void __cleancache_invalidate_inode(struct address_space *mapping) 340void __cleancache_invalidate_inode(struct address_space *mapping)
181{ 341{
182 int pool_id = mapping->host->i_sb->cleancache_poolid; 342 int pool_id;
343 int fake_pool_id = mapping->host->i_sb->cleancache_poolid;
183 struct cleancache_filekey key = { .u.key = { 0 } }; 344 struct cleancache_filekey key = { .u.key = { 0 } };
184 345
346 if (!cleancache_ops)
347 return;
348
349 if (fake_pool_id < 0)
350 return;
351
352 pool_id = get_poolid_from_fake(fake_pool_id);
353
185 if (pool_id >= 0 && cleancache_get_key(mapping->host, &key) >= 0) 354 if (pool_id >= 0 && cleancache_get_key(mapping->host, &key) >= 0)
186 (*cleancache_ops.invalidate_inode)(pool_id, key); 355 cleancache_ops->invalidate_inode(pool_id, key);
187} 356}
188EXPORT_SYMBOL(__cleancache_invalidate_inode); 357EXPORT_SYMBOL(__cleancache_invalidate_inode);
189 358
190/* 359/*
191 * Called by any cleancache-enabled filesystem at time of unmount; 360 * Called by any cleancache-enabled filesystem at time of unmount;
192 * note that pool_id is surrendered and may be reutrned by a subsequent 361 * note that pool_id is surrendered and may be returned by a subsequent
193 * cleancache_init_fs or cleancache_init_shared_fs 362 * cleancache_init_fs or cleancache_init_shared_fs.
194 */ 363 */
195void __cleancache_invalidate_fs(struct super_block *sb) 364void __cleancache_invalidate_fs(struct super_block *sb)
196{ 365{
197 if (sb->cleancache_poolid >= 0) { 366 int index;
198 int old_poolid = sb->cleancache_poolid; 367 int fake_pool_id = sb->cleancache_poolid;
199 sb->cleancache_poolid = -1; 368 int old_poolid = fake_pool_id;
200 (*cleancache_ops.invalidate_fs)(old_poolid); 369
370 mutex_lock(&poolid_mutex);
371 if (fake_pool_id >= FAKE_SHARED_FS_POOLID_OFFSET) {
372 index = fake_pool_id - FAKE_SHARED_FS_POOLID_OFFSET;
373 old_poolid = shared_fs_poolid_map[index];
374 shared_fs_poolid_map[index] = FS_UNKNOWN;
375 uuids[index] = NULL;
376 } else if (fake_pool_id >= FAKE_FS_POOLID_OFFSET) {
377 index = fake_pool_id - FAKE_FS_POOLID_OFFSET;
378 old_poolid = fs_poolid_map[index];
379 fs_poolid_map[index] = FS_UNKNOWN;
201 } 380 }
381 sb->cleancache_poolid = -1;
382 if (cleancache_ops)
383 cleancache_ops->invalidate_fs(old_poolid);
384 mutex_unlock(&poolid_mutex);
202} 385}
203EXPORT_SYMBOL(__cleancache_invalidate_fs); 386EXPORT_SYMBOL(__cleancache_invalidate_fs);
204 387
205static int __init init_cleancache(void) 388static int __init init_cleancache(void)
206{ 389{
390 int i;
391
207#ifdef CONFIG_DEBUG_FS 392#ifdef CONFIG_DEBUG_FS
208 struct dentry *root = debugfs_create_dir("cleancache", NULL); 393 struct dentry *root = debugfs_create_dir("cleancache", NULL);
209 if (root == NULL) 394 if (root == NULL)
@@ -215,6 +400,10 @@ static int __init init_cleancache(void)
215 debugfs_create_u64("invalidates", S_IRUGO, 400 debugfs_create_u64("invalidates", S_IRUGO,
216 root, &cleancache_invalidates); 401 root, &cleancache_invalidates);
217#endif 402#endif
403 for (i = 0; i < MAX_INITIALIZABLE_FS; i++) {
404 fs_poolid_map[i] = FS_UNKNOWN;
405 shared_fs_poolid_map[i] = FS_UNKNOWN;
406 }
218 return 0; 407 return 0;
219} 408}
220module_init(init_cleancache) 409module_init(init_cleancache)
diff --git a/mm/frontswap.c b/mm/frontswap.c
index 2890e67d6026..538367ef1372 100644
--- a/mm/frontswap.c
+++ b/mm/frontswap.c
@@ -24,15 +24,7 @@
24 * frontswap_ops is set by frontswap_register_ops to contain the pointers 24 * frontswap_ops is set by frontswap_register_ops to contain the pointers
25 * to the frontswap "backend" implementation functions. 25 * to the frontswap "backend" implementation functions.
26 */ 26 */
27static struct frontswap_ops frontswap_ops __read_mostly; 27static struct frontswap_ops *frontswap_ops __read_mostly;
28
29/*
30 * This global enablement flag reduces overhead on systems where frontswap_ops
31 * has not been registered, so is preferred to the slower alternative: a
32 * function call that checks a non-global.
33 */
34bool frontswap_enabled __read_mostly;
35EXPORT_SYMBOL(frontswap_enabled);
36 28
37/* 29/*
38 * If enabled, frontswap_store will return failure even on success. As 30 * If enabled, frontswap_store will return failure even on success. As
@@ -80,16 +72,70 @@ static inline void inc_frontswap_succ_stores(void) { }
80static inline void inc_frontswap_failed_stores(void) { } 72static inline void inc_frontswap_failed_stores(void) { }
81static inline void inc_frontswap_invalidates(void) { } 73static inline void inc_frontswap_invalidates(void) { }
82#endif 74#endif
75
76/*
77 * Due to the asynchronous nature of the backends loading potentially
78 * _after_ the swap system has been activated, we have chokepoints
79 * on all frontswap functions to not call the backend until the backend
80 * has registered.
81 *
82 * Specifically when no backend is registered (nobody called
83 * frontswap_register_ops) all calls to frontswap_init (which is done via
84 * swapon -> enable_swap_info -> frontswap_init) are registered and remembered
85 * (via the setting of need_init bitmap) but fail to create tmem_pools. When a
86 * backend registers with frontswap at some later point the previous
87 * calls to frontswap_init are executed (by iterating over the need_init
88 * bitmap) to create tmem_pools and set the respective poolids. All of that is
89 * guarded by us using atomic bit operations on the 'need_init' bitmap.
90 *
91 * This would not guards us against the user deciding to call swapoff right as
92 * we are calling the backend to initialize (so swapon is in action).
93 * Fortunatly for us, the swapon_mutex has been taked by the callee so we are
94 * OK. The other scenario where calls to frontswap_store (called via
95 * swap_writepage) is racing with frontswap_invalidate_area (called via
96 * swapoff) is again guarded by the swap subsystem.
97 *
98 * While no backend is registered all calls to frontswap_[store|load|
99 * invalidate_area|invalidate_page] are ignored or fail.
100 *
101 * The time between the backend being registered and the swap file system
102 * calling the backend (via the frontswap_* functions) is indeterminate as
103 * frontswap_ops is not atomic_t (or a value guarded by a spinlock).
104 * That is OK as we are comfortable missing some of these calls to the newly
105 * registered backend.
106 *
107 * Obviously the opposite (unloading the backend) must be done after all
108 * the frontswap_[store|load|invalidate_area|invalidate_page] start
109 * ignorning or failing the requests - at which point frontswap_ops
110 * would have to be made in some fashion atomic.
111 */
112static DECLARE_BITMAP(need_init, MAX_SWAPFILES);
113
83/* 114/*
84 * Register operations for frontswap, returning previous thus allowing 115 * Register operations for frontswap, returning previous thus allowing
85 * detection of multiple backends and possible nesting. 116 * detection of multiple backends and possible nesting.
86 */ 117 */
87struct frontswap_ops frontswap_register_ops(struct frontswap_ops *ops) 118struct frontswap_ops *frontswap_register_ops(struct frontswap_ops *ops)
88{ 119{
89 struct frontswap_ops old = frontswap_ops; 120 struct frontswap_ops *old = frontswap_ops;
90 121 int i;
91 frontswap_ops = *ops; 122
92 frontswap_enabled = true; 123 for (i = 0; i < MAX_SWAPFILES; i++) {
124 if (test_and_clear_bit(i, need_init)) {
125 struct swap_info_struct *sis = swap_info[i];
126 /* __frontswap_init _should_ have set it! */
127 if (!sis->frontswap_map)
128 return ERR_PTR(-EINVAL);
129 ops->init(i);
130 }
131 }
132 /*
133 * We MUST have frontswap_ops set _after_ the frontswap_init's
134 * have been called. Otherwise __frontswap_store might fail. Hence
135 * the barrier to make sure compiler does not re-order us.
136 */
137 barrier();
138 frontswap_ops = ops;
93 return old; 139 return old;
94} 140}
95EXPORT_SYMBOL(frontswap_register_ops); 141EXPORT_SYMBOL(frontswap_register_ops);
@@ -115,20 +161,48 @@ EXPORT_SYMBOL(frontswap_tmem_exclusive_gets);
115/* 161/*
116 * Called when a swap device is swapon'd. 162 * Called when a swap device is swapon'd.
117 */ 163 */
118void __frontswap_init(unsigned type) 164void __frontswap_init(unsigned type, unsigned long *map)
119{ 165{
120 struct swap_info_struct *sis = swap_info[type]; 166 struct swap_info_struct *sis = swap_info[type];
121 167
122 BUG_ON(sis == NULL); 168 BUG_ON(sis == NULL);
123 if (sis->frontswap_map == NULL) 169
170 /*
171 * p->frontswap is a bitmap that we MUST have to figure out which page
172 * has gone in frontswap. Without it there is no point of continuing.
173 */
174 if (WARN_ON(!map))
124 return; 175 return;
125 frontswap_ops.init(type); 176 /*
177 * Irregardless of whether the frontswap backend has been loaded
178 * before this function or it will be later, we _MUST_ have the
179 * p->frontswap set to something valid to work properly.
180 */
181 frontswap_map_set(sis, map);
182 if (frontswap_ops)
183 frontswap_ops->init(type);
184 else {
185 BUG_ON(type > MAX_SWAPFILES);
186 set_bit(type, need_init);
187 }
126} 188}
127EXPORT_SYMBOL(__frontswap_init); 189EXPORT_SYMBOL(__frontswap_init);
128 190
129static inline void __frontswap_clear(struct swap_info_struct *sis, pgoff_t offset) 191bool __frontswap_test(struct swap_info_struct *sis,
192 pgoff_t offset)
193{
194 bool ret = false;
195
196 if (frontswap_ops && sis->frontswap_map)
197 ret = test_bit(offset, sis->frontswap_map);
198 return ret;
199}
200EXPORT_SYMBOL(__frontswap_test);
201
202static inline void __frontswap_clear(struct swap_info_struct *sis,
203 pgoff_t offset)
130{ 204{
131 frontswap_clear(sis, offset); 205 clear_bit(offset, sis->frontswap_map);
132 atomic_dec(&sis->frontswap_pages); 206 atomic_dec(&sis->frontswap_pages);
133} 207}
134 208
@@ -147,13 +221,20 @@ int __frontswap_store(struct page *page)
147 struct swap_info_struct *sis = swap_info[type]; 221 struct swap_info_struct *sis = swap_info[type];
148 pgoff_t offset = swp_offset(entry); 222 pgoff_t offset = swp_offset(entry);
149 223
224 /*
225 * Return if no backend registed.
226 * Don't need to inc frontswap_failed_stores here.
227 */
228 if (!frontswap_ops)
229 return ret;
230
150 BUG_ON(!PageLocked(page)); 231 BUG_ON(!PageLocked(page));
151 BUG_ON(sis == NULL); 232 BUG_ON(sis == NULL);
152 if (frontswap_test(sis, offset)) 233 if (__frontswap_test(sis, offset))
153 dup = 1; 234 dup = 1;
154 ret = frontswap_ops.store(type, offset, page); 235 ret = frontswap_ops->store(type, offset, page);
155 if (ret == 0) { 236 if (ret == 0) {
156 frontswap_set(sis, offset); 237 set_bit(offset, sis->frontswap_map);
157 inc_frontswap_succ_stores(); 238 inc_frontswap_succ_stores();
158 if (!dup) 239 if (!dup)
159 atomic_inc(&sis->frontswap_pages); 240 atomic_inc(&sis->frontswap_pages);
@@ -188,13 +269,16 @@ int __frontswap_load(struct page *page)
188 269
189 BUG_ON(!PageLocked(page)); 270 BUG_ON(!PageLocked(page));
190 BUG_ON(sis == NULL); 271 BUG_ON(sis == NULL);
191 if (frontswap_test(sis, offset)) 272 /*
192 ret = frontswap_ops.load(type, offset, page); 273 * __frontswap_test() will check whether there is backend registered
274 */
275 if (__frontswap_test(sis, offset))
276 ret = frontswap_ops->load(type, offset, page);
193 if (ret == 0) { 277 if (ret == 0) {
194 inc_frontswap_loads(); 278 inc_frontswap_loads();
195 if (frontswap_tmem_exclusive_gets_enabled) { 279 if (frontswap_tmem_exclusive_gets_enabled) {
196 SetPageDirty(page); 280 SetPageDirty(page);
197 frontswap_clear(sis, offset); 281 __frontswap_clear(sis, offset);
198 } 282 }
199 } 283 }
200 return ret; 284 return ret;
@@ -210,8 +294,11 @@ void __frontswap_invalidate_page(unsigned type, pgoff_t offset)
210 struct swap_info_struct *sis = swap_info[type]; 294 struct swap_info_struct *sis = swap_info[type];
211 295
212 BUG_ON(sis == NULL); 296 BUG_ON(sis == NULL);
213 if (frontswap_test(sis, offset)) { 297 /*
214 frontswap_ops.invalidate_page(type, offset); 298 * __frontswap_test() will check whether there is backend registered
299 */
300 if (__frontswap_test(sis, offset)) {
301 frontswap_ops->invalidate_page(type, offset);
215 __frontswap_clear(sis, offset); 302 __frontswap_clear(sis, offset);
216 inc_frontswap_invalidates(); 303 inc_frontswap_invalidates();
217 } 304 }
@@ -226,12 +313,15 @@ void __frontswap_invalidate_area(unsigned type)
226{ 313{
227 struct swap_info_struct *sis = swap_info[type]; 314 struct swap_info_struct *sis = swap_info[type];
228 315
229 BUG_ON(sis == NULL); 316 if (frontswap_ops) {
230 if (sis->frontswap_map == NULL) 317 BUG_ON(sis == NULL);
231 return; 318 if (sis->frontswap_map == NULL)
232 frontswap_ops.invalidate_area(type); 319 return;
233 atomic_set(&sis->frontswap_pages, 0); 320 frontswap_ops->invalidate_area(type);
234 memset(sis->frontswap_map, 0, sis->max / sizeof(long)); 321 atomic_set(&sis->frontswap_pages, 0);
322 memset(sis->frontswap_map, 0, sis->max / sizeof(long));
323 }
324 clear_bit(type, need_init);
235} 325}
236EXPORT_SYMBOL(__frontswap_invalidate_area); 326EXPORT_SYMBOL(__frontswap_invalidate_area);
237 327
diff --git a/mm/swapfile.c b/mm/swapfile.c
index d417efddfe74..6c340d908b27 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -1509,8 +1509,7 @@ static int setup_swap_extents(struct swap_info_struct *sis, sector_t *span)
1509} 1509}
1510 1510
1511static void _enable_swap_info(struct swap_info_struct *p, int prio, 1511static void _enable_swap_info(struct swap_info_struct *p, int prio,
1512 unsigned char *swap_map, 1512 unsigned char *swap_map)
1513 unsigned long *frontswap_map)
1514{ 1513{
1515 int i, prev; 1514 int i, prev;
1516 1515
@@ -1519,7 +1518,6 @@ static void _enable_swap_info(struct swap_info_struct *p, int prio,
1519 else 1518 else
1520 p->prio = --least_priority; 1519 p->prio = --least_priority;
1521 p->swap_map = swap_map; 1520 p->swap_map = swap_map;
1522 frontswap_map_set(p, frontswap_map);
1523 p->flags |= SWP_WRITEOK; 1521 p->flags |= SWP_WRITEOK;
1524 atomic_long_add(p->pages, &nr_swap_pages); 1522 atomic_long_add(p->pages, &nr_swap_pages);
1525 total_swap_pages += p->pages; 1523 total_swap_pages += p->pages;
@@ -1542,10 +1540,10 @@ static void enable_swap_info(struct swap_info_struct *p, int prio,
1542 unsigned char *swap_map, 1540 unsigned char *swap_map,
1543 unsigned long *frontswap_map) 1541 unsigned long *frontswap_map)
1544{ 1542{
1543 frontswap_init(p->type, frontswap_map);
1545 spin_lock(&swap_lock); 1544 spin_lock(&swap_lock);
1546 spin_lock(&p->lock); 1545 spin_lock(&p->lock);
1547 _enable_swap_info(p, prio, swap_map, frontswap_map); 1546 _enable_swap_info(p, prio, swap_map);
1548 frontswap_init(p->type);
1549 spin_unlock(&p->lock); 1547 spin_unlock(&p->lock);
1550 spin_unlock(&swap_lock); 1548 spin_unlock(&swap_lock);
1551} 1549}
@@ -1554,7 +1552,7 @@ static void reinsert_swap_info(struct swap_info_struct *p)
1554{ 1552{
1555 spin_lock(&swap_lock); 1553 spin_lock(&swap_lock);
1556 spin_lock(&p->lock); 1554 spin_lock(&p->lock);
1557 _enable_swap_info(p, p->prio, p->swap_map, frontswap_map_get(p)); 1555 _enable_swap_info(p, p->prio, p->swap_map);
1558 spin_unlock(&p->lock); 1556 spin_unlock(&p->lock);
1559 spin_unlock(&swap_lock); 1557 spin_unlock(&swap_lock);
1560} 1558}
@@ -1563,6 +1561,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
1563{ 1561{
1564 struct swap_info_struct *p = NULL; 1562 struct swap_info_struct *p = NULL;
1565 unsigned char *swap_map; 1563 unsigned char *swap_map;
1564 unsigned long *frontswap_map;
1566 struct file *swap_file, *victim; 1565 struct file *swap_file, *victim;
1567 struct address_space *mapping; 1566 struct address_space *mapping;
1568 struct inode *inode; 1567 struct inode *inode;
@@ -1662,12 +1661,14 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
1662 swap_map = p->swap_map; 1661 swap_map = p->swap_map;
1663 p->swap_map = NULL; 1662 p->swap_map = NULL;
1664 p->flags = 0; 1663 p->flags = 0;
1665 frontswap_invalidate_area(type); 1664 frontswap_map = frontswap_map_get(p);
1665 frontswap_map_set(p, NULL);
1666 spin_unlock(&p->lock); 1666 spin_unlock(&p->lock);
1667 spin_unlock(&swap_lock); 1667 spin_unlock(&swap_lock);
1668 frontswap_invalidate_area(type);
1668 mutex_unlock(&swapon_mutex); 1669 mutex_unlock(&swapon_mutex);
1669 vfree(swap_map); 1670 vfree(swap_map);
1670 vfree(frontswap_map_get(p)); 1671 vfree(frontswap_map);
1671 /* Destroy swap account informatin */ 1672 /* Destroy swap account informatin */
1672 swap_cgroup_swapoff(type); 1673 swap_cgroup_swapoff(type);
1673 1674
diff --git a/scripts/kconfig/menu.c b/scripts/kconfig/menu.c
index f3bffa309333..826da662886d 100644
--- a/scripts/kconfig/menu.c
+++ b/scripts/kconfig/menu.c
@@ -515,13 +515,6 @@ static void get_prompt_str(struct gstr *r, struct property *prop,
515 struct jump_key *jump; 515 struct jump_key *jump;
516 516
517 str_printf(r, _("Prompt: %s\n"), _(prop->text)); 517 str_printf(r, _("Prompt: %s\n"), _(prop->text));
518 str_printf(r, _(" Defined at %s:%d\n"), prop->menu->file->name,
519 prop->menu->lineno);
520 if (!expr_is_yes(prop->visible.expr)) {
521 str_append(r, _(" Depends on: "));
522 expr_gstr_print(prop->visible.expr, r);
523 str_append(r, "\n");
524 }
525 menu = prop->menu->parent; 518 menu = prop->menu->parent;
526 for (i = 0; menu != &rootmenu && i < 8; menu = menu->parent) { 519 for (i = 0; menu != &rootmenu && i < 8; menu = menu->parent) {
527 bool accessible = menu_is_visible(menu); 520 bool accessible = menu_is_visible(menu);
@@ -572,6 +565,18 @@ static void get_prompt_str(struct gstr *r, struct property *prop,
572} 565}
573 566
574/* 567/*
568 * get peoperty of type P_SYMBOL
569 */
570static struct property *get_symbol_prop(struct symbol *sym)
571{
572 struct property *prop = NULL;
573
574 for_all_properties(sym, prop, P_SYMBOL)
575 break;
576 return prop;
577}
578
579/*
575 * head is optional and may be NULL 580 * head is optional and may be NULL
576 */ 581 */
577void get_symbol_str(struct gstr *r, struct symbol *sym, 582void get_symbol_str(struct gstr *r, struct symbol *sym,
@@ -595,6 +600,14 @@ void get_symbol_str(struct gstr *r, struct symbol *sym,
595 } 600 }
596 for_all_prompts(sym, prop) 601 for_all_prompts(sym, prop)
597 get_prompt_str(r, prop, head); 602 get_prompt_str(r, prop, head);
603 prop = get_symbol_prop(sym);
604 str_printf(r, _(" Defined at %s:%d\n"), prop->menu->file->name,
605 prop->menu->lineno);
606 if (!expr_is_yes(prop->visible.expr)) {
607 str_append(r, _(" Depends on: "));
608 expr_gstr_print(prop->visible.expr, r);
609 str_append(r, "\n");
610 }
598 hit = false; 611 hit = false;
599 for_all_properties(sym, prop, P_SELECT) { 612 for_all_properties(sym, prop, P_SELECT) {
600 if (!hit) { 613 if (!hit) {
diff --git a/security/keys/request_key.c b/security/keys/request_key.c
index 4bd6bdb74193..c411f9bb156b 100644
--- a/security/keys/request_key.c
+++ b/security/keys/request_key.c
@@ -93,9 +93,16 @@ static void umh_keys_cleanup(struct subprocess_info *info)
93static int call_usermodehelper_keys(char *path, char **argv, char **envp, 93static int call_usermodehelper_keys(char *path, char **argv, char **envp,
94 struct key *session_keyring, int wait) 94 struct key *session_keyring, int wait)
95{ 95{
96 return call_usermodehelper_fns(path, argv, envp, wait, 96 struct subprocess_info *info;
97 umh_keys_init, umh_keys_cleanup, 97
98 key_get(session_keyring)); 98 info = call_usermodehelper_setup(path, argv, envp, GFP_KERNEL,
99 umh_keys_init, umh_keys_cleanup,
100 session_keyring);
101 if (!info)
102 return -ENOMEM;
103
104 key_get(session_keyring);
105 return call_usermodehelper_exec(info, wait);
99} 106}
100 107
101/* 108/*
diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
index 3cc0ad7ae863..fa6ea69f2e48 100644
--- a/tools/testing/selftests/Makefile
+++ b/tools/testing/selftests/Makefile
@@ -1,10 +1,12 @@
1TARGETS = breakpoints 1TARGETS = breakpoints
2TARGETS += cpu-hotplug
3TARGETS += efivarfs
2TARGETS += kcmp 4TARGETS += kcmp
5TARGETS += memory-hotplug
3TARGETS += mqueue 6TARGETS += mqueue
7TARGETS += ptrace
8TARGETS += soft-dirty
4TARGETS += vm 9TARGETS += vm
5TARGETS += cpu-hotplug
6TARGETS += memory-hotplug
7TARGETS += efivarfs
8 10
9all: 11all:
10 for TARGET in $(TARGETS); do \ 12 for TARGET in $(TARGETS); do \
diff --git a/tools/testing/selftests/ptrace/Makefile b/tools/testing/selftests/ptrace/Makefile
new file mode 100644
index 000000000000..47ae2d385ce8
--- /dev/null
+++ b/tools/testing/selftests/ptrace/Makefile
@@ -0,0 +1,10 @@
1CFLAGS += -iquote../../../../include/uapi -Wall
2peeksiginfo: peeksiginfo.c
3
4all: peeksiginfo
5
6clean:
7 rm -f peeksiginfo
8
9run_tests: all
10 @./peeksiginfo || echo "peeksiginfo selftests: [FAIL]"
diff --git a/tools/testing/selftests/ptrace/peeksiginfo.c b/tools/testing/selftests/ptrace/peeksiginfo.c
new file mode 100644
index 000000000000..d46558b1f58d
--- /dev/null
+++ b/tools/testing/selftests/ptrace/peeksiginfo.c
@@ -0,0 +1,214 @@
1#define _GNU_SOURCE
2#include <stdio.h>
3#include <signal.h>
4#include <unistd.h>
5#include <errno.h>
6#include <linux/types.h>
7#include <sys/wait.h>
8#include <sys/syscall.h>
9#include <sys/user.h>
10#include <sys/mman.h>
11
12#include "linux/ptrace.h"
13
14static int sys_rt_sigqueueinfo(pid_t tgid, int sig, siginfo_t *uinfo)
15{
16 return syscall(SYS_rt_sigqueueinfo, tgid, sig, uinfo);
17}
18
19static int sys_rt_tgsigqueueinfo(pid_t tgid, pid_t tid,
20 int sig, siginfo_t *uinfo)
21{
22 return syscall(SYS_rt_tgsigqueueinfo, tgid, tid, sig, uinfo);
23}
24
25static int sys_ptrace(int request, pid_t pid, void *addr, void *data)
26{
27 return syscall(SYS_ptrace, request, pid, addr, data);
28}
29
30#define SIGNR 10
31#define TEST_SICODE_PRIV -1
32#define TEST_SICODE_SHARE -2
33
34#define err(fmt, ...) \
35 fprintf(stderr, \
36 "Error (%s:%d): " fmt, \
37 __FILE__, __LINE__, ##__VA_ARGS__)
38
39static int check_error_paths(pid_t child)
40{
41 struct ptrace_peeksiginfo_args arg;
42 int ret, exit_code = -1;
43 void *addr_rw, *addr_ro;
44
45 /*
46 * Allocate two contiguous pages. The first one is for read-write,
47 * another is for read-only.
48 */
49 addr_rw = mmap(NULL, 2 * PAGE_SIZE, PROT_READ | PROT_WRITE,
50 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
51 if (addr_rw == MAP_FAILED) {
52 err("mmap() failed: %m\n");
53 return 1;
54 }
55
56 addr_ro = mmap(addr_rw + PAGE_SIZE, PAGE_SIZE, PROT_READ,
57 MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
58 if (addr_ro == MAP_FAILED) {
59 err("mmap() failed: %m\n");
60 goto out;
61 }
62
63 arg.nr = SIGNR;
64 arg.off = 0;
65
66 /* Unsupported flags */
67 arg.flags = ~0;
68 ret = sys_ptrace(PTRACE_PEEKSIGINFO, child, &arg, addr_rw);
69 if (ret != -1 || errno != EINVAL) {
70 err("sys_ptrace() returns %d (expected -1),"
71 " errno %d (expected %d): %m\n",
72 ret, errno, EINVAL);
73 goto out;
74 }
75 arg.flags = 0;
76
77 /* A part of the buffer is read-only */
78 ret = sys_ptrace(PTRACE_PEEKSIGINFO, child, &arg,
79 addr_ro - sizeof(siginfo_t) * 2);
80 if (ret != 2) {
81 err("sys_ptrace() returns %d (expected 2): %m\n", ret);
82 goto out;
83 }
84
85 /* Read-only buffer */
86 ret = sys_ptrace(PTRACE_PEEKSIGINFO, child, &arg, addr_ro);
87 if (ret != -1 && errno != EFAULT) {
88 err("sys_ptrace() returns %d (expected -1),"
89 " errno %d (expected %d): %m\n",
90 ret, errno, EFAULT);
91 goto out;
92 }
93
94 exit_code = 0;
95out:
96 munmap(addr_rw, 2 * PAGE_SIZE);
97 return exit_code;
98}
99
100int check_direct_path(pid_t child, int shared, int nr)
101{
102 struct ptrace_peeksiginfo_args arg = {.flags = 0, .nr = nr, .off = 0};
103 int i, j, ret, exit_code = -1;
104 siginfo_t siginfo[SIGNR];
105 int si_code;
106
107 if (shared == 1) {
108 arg.flags = PTRACE_PEEKSIGINFO_SHARED;
109 si_code = TEST_SICODE_SHARE;
110 } else {
111 arg.flags = 0;
112 si_code = TEST_SICODE_PRIV;
113 }
114
115 for (i = 0; i < SIGNR; ) {
116 arg.off = i;
117 ret = sys_ptrace(PTRACE_PEEKSIGINFO, child, &arg, siginfo);
118 if (ret == -1) {
119 err("ptrace() failed: %m\n");
120 goto out;
121 }
122
123 if (ret == 0)
124 break;
125
126 for (j = 0; j < ret; j++, i++) {
127 if (siginfo[j].si_code == si_code &&
128 siginfo[j].si_int == i)
129 continue;
130
131 err("%d: Wrong siginfo i=%d si_code=%d si_int=%d\n",
132 shared, i, siginfo[j].si_code, siginfo[j].si_int);
133 goto out;
134 }
135 }
136
137 if (i != SIGNR) {
138 err("Only %d signals were read\n", i);
139 goto out;
140 }
141
142 exit_code = 0;
143out:
144 return exit_code;
145}
146
147int main(int argc, char *argv[])
148{
149 siginfo_t siginfo[SIGNR];
150 int i, exit_code = 1;
151 sigset_t blockmask;
152 pid_t child;
153
154 sigemptyset(&blockmask);
155 sigaddset(&blockmask, SIGRTMIN);
156 sigprocmask(SIG_BLOCK, &blockmask, NULL);
157
158 child = fork();
159 if (child == -1) {
160 err("fork() failed: %m");
161 return 1;
162 } else if (child == 0) {
163 pid_t ppid = getppid();
164 while (1) {
165 if (ppid != getppid())
166 break;
167 sleep(1);
168 }
169 return 1;
170 }
171
172 /* Send signals in process-wide and per-thread queues */
173 for (i = 0; i < SIGNR; i++) {
174 siginfo->si_code = TEST_SICODE_SHARE;
175 siginfo->si_int = i;
176 sys_rt_sigqueueinfo(child, SIGRTMIN, siginfo);
177
178 siginfo->si_code = TEST_SICODE_PRIV;
179 siginfo->si_int = i;
180 sys_rt_tgsigqueueinfo(child, child, SIGRTMIN, siginfo);
181 }
182
183 if (sys_ptrace(PTRACE_ATTACH, child, NULL, NULL) == -1)
184 return 1;
185
186 waitpid(child, NULL, 0);
187
188 /* Dump signals one by one*/
189 if (check_direct_path(child, 0, 1))
190 goto out;
191 /* Dump all signals for one call */
192 if (check_direct_path(child, 0, SIGNR))
193 goto out;
194
195 /*
196 * Dump signal from the process-wide queue.
197 * The number of signals is not multible to the buffer size
198 */
199 if (check_direct_path(child, 1, 3))
200 goto out;
201
202 if (check_error_paths(child))
203 goto out;
204
205 printf("PASS\n");
206 exit_code = 0;
207out:
208 if (sys_ptrace(PTRACE_KILL, child, NULL, NULL) == -1)
209 return 1;
210
211 waitpid(child, NULL, 0);
212
213 return exit_code;
214}
diff --git a/tools/testing/selftests/soft-dirty/Makefile b/tools/testing/selftests/soft-dirty/Makefile
new file mode 100644
index 000000000000..a9cdc823d6e0
--- /dev/null
+++ b/tools/testing/selftests/soft-dirty/Makefile
@@ -0,0 +1,10 @@
1CFLAGS += -iquote../../../../include/uapi -Wall
2soft-dirty: soft-dirty.c
3
4all: soft-dirty
5
6clean:
7 rm -f soft-dirty
8
9run_tests: all
10 @./soft-dirty || echo "soft-dirty selftests: [FAIL]"
diff --git a/tools/testing/selftests/soft-dirty/soft-dirty.c b/tools/testing/selftests/soft-dirty/soft-dirty.c
new file mode 100644
index 000000000000..aba4f87f87f0
--- /dev/null
+++ b/tools/testing/selftests/soft-dirty/soft-dirty.c
@@ -0,0 +1,114 @@
1#include <stdlib.h>
2#include <stdio.h>
3#include <sys/mman.h>
4#include <unistd.h>
5#include <fcntl.h>
6#include <sys/types.h>
7
8typedef unsigned long long u64;
9
10#define PME_PRESENT (1ULL << 63)
11#define PME_SOFT_DIRTY (1Ull << 55)
12
13#define PAGES_TO_TEST 3
14#ifndef PAGE_SIZE
15#define PAGE_SIZE 4096
16#endif
17
18static void get_pagemap2(char *mem, u64 *map)
19{
20 int fd;
21
22 fd = open("/proc/self/pagemap2", O_RDONLY);
23 if (fd < 0) {
24 perror("Can't open pagemap2");
25 exit(1);
26 }
27
28 lseek(fd, (unsigned long)mem / PAGE_SIZE * sizeof(u64), SEEK_SET);
29 read(fd, map, sizeof(u64) * PAGES_TO_TEST);
30 close(fd);
31}
32
33static inline char map_p(u64 map)
34{
35 return map & PME_PRESENT ? 'p' : '-';
36}
37
38static inline char map_sd(u64 map)
39{
40 return map & PME_SOFT_DIRTY ? 'd' : '-';
41}
42
43static int check_pte(int step, int page, u64 *map, u64 want)
44{
45 if ((map[page] & want) != want) {
46 printf("Step %d Page %d has %c%c, want %c%c\n",
47 step, page,
48 map_p(map[page]), map_sd(map[page]),
49 map_p(want), map_sd(want));
50 return 1;
51 }
52
53 return 0;
54}
55
56static void clear_refs(void)
57{
58 int fd;
59 char *v = "4";
60
61 fd = open("/proc/self/clear_refs", O_WRONLY);
62 if (write(fd, v, 3) < 3) {
63 perror("Can't clear soft-dirty bit");
64 exit(1);
65 }
66 close(fd);
67}
68
69int main(void)
70{
71 char *mem, x;
72 u64 map[PAGES_TO_TEST];
73
74 mem = mmap(NULL, PAGES_TO_TEST * PAGE_SIZE,
75 PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, 0, 0);
76
77 x = mem[0];
78 mem[2 * PAGE_SIZE] = 'c';
79 get_pagemap2(mem, map);
80
81 if (check_pte(1, 0, map, PME_PRESENT))
82 return 1;
83 if (check_pte(1, 1, map, 0))
84 return 1;
85 if (check_pte(1, 2, map, PME_PRESENT | PME_SOFT_DIRTY))
86 return 1;
87
88 clear_refs();
89 get_pagemap2(mem, map);
90
91 if (check_pte(2, 0, map, PME_PRESENT))
92 return 1;
93 if (check_pte(2, 1, map, 0))
94 return 1;
95 if (check_pte(2, 2, map, PME_PRESENT))
96 return 1;
97
98 mem[0] = 'a';
99 mem[PAGE_SIZE] = 'b';
100 x = mem[2 * PAGE_SIZE];
101 get_pagemap2(mem, map);
102
103 if (check_pte(3, 0, map, PME_PRESENT | PME_SOFT_DIRTY))
104 return 1;
105 if (check_pte(3, 1, map, PME_PRESENT | PME_SOFT_DIRTY))
106 return 1;
107 if (check_pte(3, 2, map, PME_PRESENT))
108 return 1;
109
110 (void)x; /* gcc warn */
111
112 printf("PASS\n");
113 return 0;
114}