aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/DocBook/kernel-locking.tmpl7
-rw-r--r--Documentation/hwmon/f71882fg4
-rw-r--r--Documentation/powerpc/booting-without-of.txt31
-rw-r--r--Documentation/powerpc/hvcs.txt2
-rw-r--r--MAINTAINERS56
-rw-r--r--arch/arm/plat-samsung/dev-hsmmc.c2
-rw-r--r--arch/arm/plat-samsung/dev-hsmmc1.c2
-rw-r--r--arch/arm/plat-samsung/dev-hsmmc2.c2
-rw-r--r--arch/ia64/include/asm/unistd.h2
-rw-r--r--arch/m68k/include/asm/ide.h13
-rw-r--r--arch/m68knommu/kernel/process.c10
-rw-r--r--arch/microblaze/kernel/prom_parse.c2
-rw-r--r--arch/microblaze/pci/pci-common.c5
-rw-r--r--arch/microblaze/pci/xilinx_pci.c1
-rw-r--r--arch/um/include/asm/dma-mapping.h7
-rw-r--r--arch/x86/Kconfig9
-rw-r--r--arch/x86/include/asm/pgtable_32.h1
-rw-r--r--arch/x86/include/asm/trampoline.h5
-rw-r--r--arch/x86/kernel/apic/io_apic.c2
-rw-r--r--arch/x86/kernel/cpu/amd.c2
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c81
-rw-r--r--arch/x86/kernel/head_32.S8
-rw-r--r--arch/x86/kernel/kprobes.c25
-rw-r--r--arch/x86/kernel/setup.c2
-rw-r--r--arch/x86/kernel/smpboot.c51
-rw-r--r--arch/x86/kernel/trampoline.c18
-rw-r--r--drivers/ata/sata_dwc_460ex.c4
-rw-r--r--drivers/block/xsysace.c1
-rw-r--r--drivers/char/pty.c4
-rw-r--r--drivers/char/tty_io.c92
-rw-r--r--drivers/char/xilinx_hwicap/xilinx_hwicap.c1
-rw-r--r--drivers/hid/hid-core.c1
-rw-r--r--drivers/hid/hid-egalax.c9
-rw-r--r--drivers/hid/hid-ids.h1
-rw-r--r--drivers/hid/hid-picolcd.c4
-rw-r--r--drivers/hid/usbhid/hiddev.c11
-rw-r--r--drivers/hwmon/Kconfig6
-rw-r--r--drivers/hwmon/f71882fg.c83
-rw-r--r--drivers/md/md.c44
-rw-r--r--drivers/md/raid1.c21
-rw-r--r--drivers/md/raid10.c17
-rw-r--r--drivers/md/raid5.c13
-rw-r--r--drivers/mmc/core/host.c2
-rw-r--r--drivers/mmc/host/Kconfig2
-rw-r--r--drivers/mmc/host/sdhci-s3c.c6
-rw-r--r--drivers/mmc/host/sdhci.c3
-rw-r--r--drivers/mmc/host/sdhci.h2
-rw-r--r--drivers/mtd/maps/physmap_of.c1
-rw-r--r--drivers/mtd/nand/nand_base.c11
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c2
-rw-r--r--drivers/scsi/qla4xxx/ql4_glbl.h2
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.c2
-rw-r--r--drivers/serial/of_serial.c3
-rw-r--r--drivers/spi/coldfire_qspi.c1
-rw-r--r--drivers/staging/pohmelfs/path_entry.c8
-rw-r--r--drivers/video/matrox/matroxfb_base.h4
-rw-r--r--fs/buffer.c69
-rw-r--r--fs/cramfs/inode.c2
-rw-r--r--fs/dcache.c71
-rw-r--r--fs/exec.c4
-rw-r--r--fs/fat/misc.c4
-rw-r--r--fs/file_table.c124
-rw-r--r--fs/fs_struct.c32
-rw-r--r--fs/generic_acl.c1
-rw-r--r--fs/hostfs/hostfs_kern.c4
-rw-r--r--fs/internal.h7
-rw-r--r--fs/jbd/checkpoint.c4
-rw-r--r--fs/jbd/commit.c49
-rw-r--r--fs/jbd/journal.c2
-rw-r--r--fs/jbd/revoke.c2
-rw-r--r--fs/jbd2/checkpoint.c4
-rw-r--r--fs/jbd2/commit.c39
-rw-r--r--fs/jbd2/journal.c2
-rw-r--r--fs/jbd2/revoke.c2
-rw-r--r--fs/mbcache.c30
-rw-r--r--fs/namei.c119
-rw-r--r--fs/namespace.c177
-rw-r--r--fs/nfs/Kconfig1
-rw-r--r--fs/nfs/dir.c9
-rw-r--r--fs/nfs/file.c2
-rw-r--r--fs/nfs/nfs4proc.c11
-rw-r--r--fs/nfs/super.c7
-rw-r--r--fs/nfsd/Kconfig1
-rw-r--r--fs/nilfs2/super.c28
-rw-r--r--fs/open.c4
-rw-r--r--fs/pnode.c11
-rw-r--r--fs/reiserfs/inode.c1
-rw-r--r--fs/reiserfs/journal.c2
-rw-r--r--fs/super.c18
-rw-r--r--fs/ufs/balloc.c24
-rw-r--r--fs/ufs/ialloc.c18
-rw-r--r--fs/ufs/truncate.c18
-rw-r--r--fs/ufs/util.c20
-rw-r--r--fs/ufs/util.h3
-rw-r--r--include/asm-generic/syscalls.h6
-rw-r--r--include/linux/buffer_head.h4
-rw-r--r--include/linux/fs.h21
-rw-r--r--include/linux/fs_struct.h14
-rw-r--r--include/linux/kfifo.h2
-rw-r--r--include/linux/lglock.h172
-rw-r--r--include/linux/mm_types.h2
-rw-r--r--include/linux/spi/spi.h3
-rw-r--r--include/linux/tty.h9
-rw-r--r--include/sound/emu10k1.h1
-rw-r--r--include/trace/events/workqueue.h62
-rw-r--r--kernel/fork.c17
-rw-r--r--kernel/kfifo.c9
-rw-r--r--kernel/trace/ring_buffer.c3
-rw-r--r--kernel/trace/trace.c11
-rw-r--r--kernel/trace/trace_events.c207
-rw-r--r--kernel/trace/trace_functions_graph.c10
-rw-r--r--kernel/workqueue.c9
-rw-r--r--lib/Kconfig.debug5
-rw-r--r--lib/radix-tree.c5
-rw-r--r--mm/memory.c15
-rw-r--r--mm/mlock.c21
-rw-r--r--mm/mmap.c21
-rw-r--r--mm/nommu.c7
-rw-r--r--mm/oom_kill.c16
-rw-r--r--mm/page-writeback.c3
-rw-r--r--net/sunrpc/Kconfig9
-rw-r--r--net/sunrpc/xprtrdma/rpc_rdma.c2
-rw-r--r--net/sunrpc/xprtrdma/verbs.c22
-rw-r--r--net/sunrpc/xprtsock.c28
-rw-r--r--samples/kfifo/bytestream-example.c42
-rw-r--r--samples/kfifo/dma-example.c111
-rw-r--r--samples/kfifo/inttype-example.c43
-rw-r--r--samples/kfifo/record-example.c39
-rwxr-xr-xscripts/recordmcount.pl7
-rw-r--r--security/apparmor/path.c9
-rw-r--r--security/selinux/hooks.c9
-rw-r--r--sound/core/pcm_native.c4
-rw-r--r--sound/pci/emu10k1/emu10k1.c4
-rw-r--r--sound/pci/emu10k1/emupcm.c30
-rw-r--r--sound/pci/emu10k1/memory.c4
-rw-r--r--sound/pci/hda/patch_conexant.c1
-rw-r--r--sound/pci/hda/patch_realtek.c176
-rw-r--r--sound/pci/riptide/riptide.c11
-rw-r--r--sound/soc/codecs/wm8776.c7
-rw-r--r--tools/perf/Makefile30
-rw-r--r--tools/perf/feature-tests.mak2
-rw-r--r--tools/perf/util/ui/browsers/annotate.c3
142 files changed, 1911 insertions, 974 deletions
diff --git a/Documentation/DocBook/kernel-locking.tmpl b/Documentation/DocBook/kernel-locking.tmpl
index 084f6ad7b7a0..0b1a3f97f285 100644
--- a/Documentation/DocBook/kernel-locking.tmpl
+++ b/Documentation/DocBook/kernel-locking.tmpl
@@ -1922,9 +1922,12 @@ machines due to caching.
1922 <function>mutex_lock()</function> 1922 <function>mutex_lock()</function>
1923 </para> 1923 </para>
1924 <para> 1924 <para>
1925 There is a <function>mutex_trylock()</function> which can be 1925 There is a <function>mutex_trylock()</function> which does not
1926 used inside interrupt context, as it will not sleep. 1926 sleep. Still, it must not be used inside interrupt context since
1927 its implementation is not safe for that.
1927 <function>mutex_unlock()</function> will also never sleep. 1928 <function>mutex_unlock()</function> will also never sleep.
1929 It cannot be used in interrupt context either since a mutex
1930 must be released by the same task that acquired it.
1928 </para> 1931 </para>
1929 </listitem> 1932 </listitem>
1930 </itemizedlist> 1933 </itemizedlist>
diff --git a/Documentation/hwmon/f71882fg b/Documentation/hwmon/f71882fg
index 1a07fd674cd0..a7952c2bd959 100644
--- a/Documentation/hwmon/f71882fg
+++ b/Documentation/hwmon/f71882fg
@@ -2,10 +2,6 @@ Kernel driver f71882fg
2====================== 2======================
3 3
4Supported chips: 4Supported chips:
5 * Fintek F71808E
6 Prefix: 'f71808fg'
7 Addresses scanned: none, address read from Super I/O config space
8 Datasheet: Not public
9 * Fintek F71858FG 5 * Fintek F71858FG
10 Prefix: 'f71858fg' 6 Prefix: 'f71858fg'
11 Addresses scanned: none, address read from Super I/O config space 7 Addresses scanned: none, address read from Super I/O config space
diff --git a/Documentation/powerpc/booting-without-of.txt b/Documentation/powerpc/booting-without-of.txt
index 568fa08e82e5..302db5da49b3 100644
--- a/Documentation/powerpc/booting-without-of.txt
+++ b/Documentation/powerpc/booting-without-of.txt
@@ -49,40 +49,13 @@ Table of Contents
49 f) MDIO on GPIOs 49 f) MDIO on GPIOs
50 g) SPI busses 50 g) SPI busses
51 51
52 VII - Marvell Discovery mv64[345]6x System Controller chips 52 VII - Specifying interrupt information for devices
53 1) The /system-controller node
54 2) Child nodes of /system-controller
55 a) Marvell Discovery MDIO bus
56 b) Marvell Discovery ethernet controller
57 c) Marvell Discovery PHY nodes
58 d) Marvell Discovery SDMA nodes
59 e) Marvell Discovery BRG nodes
60 f) Marvell Discovery CUNIT nodes
61 g) Marvell Discovery MPSCROUTING nodes
62 h) Marvell Discovery MPSCINTR nodes
63 i) Marvell Discovery MPSC nodes
64 j) Marvell Discovery Watch Dog Timer nodes
65 k) Marvell Discovery I2C nodes
66 l) Marvell Discovery PIC (Programmable Interrupt Controller) nodes
67 m) Marvell Discovery MPP (Multipurpose Pins) multiplexing nodes
68 n) Marvell Discovery GPP (General Purpose Pins) nodes
69 o) Marvell Discovery PCI host bridge node
70 p) Marvell Discovery CPU Error nodes
71 q) Marvell Discovery SRAM Controller nodes
72 r) Marvell Discovery PCI Error Handler nodes
73 s) Marvell Discovery Memory Controller nodes
74
75 VIII - Specifying interrupt information for devices
76 1) interrupts property 53 1) interrupts property
77 2) interrupt-parent property 54 2) interrupt-parent property
78 3) OpenPIC Interrupt Controllers 55 3) OpenPIC Interrupt Controllers
79 4) ISA Interrupt Controllers 56 4) ISA Interrupt Controllers
80 57
81 IX - Specifying GPIO information for devices 58 VIII - Specifying device power management information (sleep property)
82 1) gpios property
83 2) gpio-controller nodes
84
85 X - Specifying device power management information (sleep property)
86 59
87 Appendix A - Sample SOC node for MPC8540 60 Appendix A - Sample SOC node for MPC8540
88 61
diff --git a/Documentation/powerpc/hvcs.txt b/Documentation/powerpc/hvcs.txt
index f93462c5db25..6d8be3468d7d 100644
--- a/Documentation/powerpc/hvcs.txt
+++ b/Documentation/powerpc/hvcs.txt
@@ -560,7 +560,7 @@ The proper channel for reporting bugs is either through the Linux OS
560distribution company that provided your OS or by posting issues to the 560distribution company that provided your OS or by posting issues to the
561PowerPC development mailing list at: 561PowerPC development mailing list at:
562 562
563linuxppc-dev@ozlabs.org 563linuxppc-dev@lists.ozlabs.org
564 564
565This request is to provide a documented and searchable public exchange 565This request is to provide a documented and searchable public exchange
566of the problems and solutions surrounding this driver for the benefit of 566of the problems and solutions surrounding this driver for the benefit of
diff --git a/MAINTAINERS b/MAINTAINERS
index b5b8baa1d70e..433f35385756 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -456,7 +456,7 @@ F: drivers/infiniband/hw/amso1100/
456 456
457AOA (Apple Onboard Audio) ALSA DRIVER 457AOA (Apple Onboard Audio) ALSA DRIVER
458M: Johannes Berg <johannes@sipsolutions.net> 458M: Johannes Berg <johannes@sipsolutions.net>
459L: linuxppc-dev@ozlabs.org 459L: linuxppc-dev@lists.ozlabs.org
460L: alsa-devel@alsa-project.org (moderated for non-subscribers) 460L: alsa-devel@alsa-project.org (moderated for non-subscribers)
461S: Maintained 461S: Maintained
462F: sound/aoa/ 462F: sound/aoa/
@@ -1472,8 +1472,8 @@ F: include/linux/can/platform/
1472 1472
1473CELL BROADBAND ENGINE ARCHITECTURE 1473CELL BROADBAND ENGINE ARCHITECTURE
1474M: Arnd Bergmann <arnd@arndb.de> 1474M: Arnd Bergmann <arnd@arndb.de>
1475L: linuxppc-dev@ozlabs.org 1475L: linuxppc-dev@lists.ozlabs.org
1476L: cbe-oss-dev@ozlabs.org 1476L: cbe-oss-dev@lists.ozlabs.org
1477W: http://www.ibm.com/developerworks/power/cell/ 1477W: http://www.ibm.com/developerworks/power/cell/
1478S: Supported 1478S: Supported
1479F: arch/powerpc/include/asm/cell*.h 1479F: arch/powerpc/include/asm/cell*.h
@@ -2371,13 +2371,13 @@ F: include/linux/fb.h
2371FREESCALE DMA DRIVER 2371FREESCALE DMA DRIVER
2372M: Li Yang <leoli@freescale.com> 2372M: Li Yang <leoli@freescale.com>
2373M: Zhang Wei <zw@zh-kernel.org> 2373M: Zhang Wei <zw@zh-kernel.org>
2374L: linuxppc-dev@ozlabs.org 2374L: linuxppc-dev@lists.ozlabs.org
2375S: Maintained 2375S: Maintained
2376F: drivers/dma/fsldma.* 2376F: drivers/dma/fsldma.*
2377 2377
2378FREESCALE I2C CPM DRIVER 2378FREESCALE I2C CPM DRIVER
2379M: Jochen Friedrich <jochen@scram.de> 2379M: Jochen Friedrich <jochen@scram.de>
2380L: linuxppc-dev@ozlabs.org 2380L: linuxppc-dev@lists.ozlabs.org
2381L: linux-i2c@vger.kernel.org 2381L: linux-i2c@vger.kernel.org
2382S: Maintained 2382S: Maintained
2383F: drivers/i2c/busses/i2c-cpm.c 2383F: drivers/i2c/busses/i2c-cpm.c
@@ -2393,7 +2393,7 @@ F: drivers/video/imxfb.c
2393FREESCALE SOC FS_ENET DRIVER 2393FREESCALE SOC FS_ENET DRIVER
2394M: Pantelis Antoniou <pantelis.antoniou@gmail.com> 2394M: Pantelis Antoniou <pantelis.antoniou@gmail.com>
2395M: Vitaly Bordug <vbordug@ru.mvista.com> 2395M: Vitaly Bordug <vbordug@ru.mvista.com>
2396L: linuxppc-dev@ozlabs.org 2396L: linuxppc-dev@lists.ozlabs.org
2397L: netdev@vger.kernel.org 2397L: netdev@vger.kernel.org
2398S: Maintained 2398S: Maintained
2399F: drivers/net/fs_enet/ 2399F: drivers/net/fs_enet/
@@ -2401,7 +2401,7 @@ F: include/linux/fs_enet_pd.h
2401 2401
2402FREESCALE QUICC ENGINE LIBRARY 2402FREESCALE QUICC ENGINE LIBRARY
2403M: Timur Tabi <timur@freescale.com> 2403M: Timur Tabi <timur@freescale.com>
2404L: linuxppc-dev@ozlabs.org 2404L: linuxppc-dev@lists.ozlabs.org
2405S: Supported 2405S: Supported
2406F: arch/powerpc/sysdev/qe_lib/ 2406F: arch/powerpc/sysdev/qe_lib/
2407F: arch/powerpc/include/asm/*qe.h 2407F: arch/powerpc/include/asm/*qe.h
@@ -2409,27 +2409,27 @@ F: arch/powerpc/include/asm/*qe.h
2409FREESCALE USB PERIPHERAL DRIVERS 2409FREESCALE USB PERIPHERAL DRIVERS
2410M: Li Yang <leoli@freescale.com> 2410M: Li Yang <leoli@freescale.com>
2411L: linux-usb@vger.kernel.org 2411L: linux-usb@vger.kernel.org
2412L: linuxppc-dev@ozlabs.org 2412L: linuxppc-dev@lists.ozlabs.org
2413S: Maintained 2413S: Maintained
2414F: drivers/usb/gadget/fsl* 2414F: drivers/usb/gadget/fsl*
2415 2415
2416FREESCALE QUICC ENGINE UCC ETHERNET DRIVER 2416FREESCALE QUICC ENGINE UCC ETHERNET DRIVER
2417M: Li Yang <leoli@freescale.com> 2417M: Li Yang <leoli@freescale.com>
2418L: netdev@vger.kernel.org 2418L: netdev@vger.kernel.org
2419L: linuxppc-dev@ozlabs.org 2419L: linuxppc-dev@lists.ozlabs.org
2420S: Maintained 2420S: Maintained
2421F: drivers/net/ucc_geth* 2421F: drivers/net/ucc_geth*
2422 2422
2423FREESCALE QUICC ENGINE UCC UART DRIVER 2423FREESCALE QUICC ENGINE UCC UART DRIVER
2424M: Timur Tabi <timur@freescale.com> 2424M: Timur Tabi <timur@freescale.com>
2425L: linuxppc-dev@ozlabs.org 2425L: linuxppc-dev@lists.ozlabs.org
2426S: Supported 2426S: Supported
2427F: drivers/serial/ucc_uart.c 2427F: drivers/serial/ucc_uart.c
2428 2428
2429FREESCALE SOC SOUND DRIVERS 2429FREESCALE SOC SOUND DRIVERS
2430M: Timur Tabi <timur@freescale.com> 2430M: Timur Tabi <timur@freescale.com>
2431L: alsa-devel@alsa-project.org (moderated for non-subscribers) 2431L: alsa-devel@alsa-project.org (moderated for non-subscribers)
2432L: linuxppc-dev@ozlabs.org 2432L: linuxppc-dev@lists.ozlabs.org
2433S: Supported 2433S: Supported
2434F: sound/soc/fsl/fsl* 2434F: sound/soc/fsl/fsl*
2435F: sound/soc/fsl/mpc8610_hpcd.c 2435F: sound/soc/fsl/mpc8610_hpcd.c
@@ -2564,7 +2564,7 @@ F: mm/memory-failure.c
2564F: mm/hwpoison-inject.c 2564F: mm/hwpoison-inject.c
2565 2565
2566HYPERVISOR VIRTUAL CONSOLE DRIVER 2566HYPERVISOR VIRTUAL CONSOLE DRIVER
2567L: linuxppc-dev@ozlabs.org 2567L: linuxppc-dev@lists.ozlabs.org
2568S: Odd Fixes 2568S: Odd Fixes
2569F: drivers/char/hvc_* 2569F: drivers/char/hvc_*
2570 2570
@@ -3476,7 +3476,7 @@ F: drivers/usb/misc/legousbtower.c
3476 3476
3477LGUEST 3477LGUEST
3478M: Rusty Russell <rusty@rustcorp.com.au> 3478M: Rusty Russell <rusty@rustcorp.com.au>
3479L: lguest@ozlabs.org 3479L: lguest@lists.ozlabs.org
3480W: http://lguest.ozlabs.org/ 3480W: http://lguest.ozlabs.org/
3481S: Maintained 3481S: Maintained
3482F: Documentation/lguest/ 3482F: Documentation/lguest/
@@ -3495,7 +3495,7 @@ LINUX FOR POWERPC (32-BIT AND 64-BIT)
3495M: Benjamin Herrenschmidt <benh@kernel.crashing.org> 3495M: Benjamin Herrenschmidt <benh@kernel.crashing.org>
3496M: Paul Mackerras <paulus@samba.org> 3496M: Paul Mackerras <paulus@samba.org>
3497W: http://www.penguinppc.org/ 3497W: http://www.penguinppc.org/
3498L: linuxppc-dev@ozlabs.org 3498L: linuxppc-dev@lists.ozlabs.org
3499Q: http://patchwork.ozlabs.org/project/linuxppc-dev/list/ 3499Q: http://patchwork.ozlabs.org/project/linuxppc-dev/list/
3500T: git git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc.git 3500T: git git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc.git
3501S: Supported 3501S: Supported
@@ -3505,14 +3505,14 @@ F: arch/powerpc/
3505LINUX FOR POWER MACINTOSH 3505LINUX FOR POWER MACINTOSH
3506M: Benjamin Herrenschmidt <benh@kernel.crashing.org> 3506M: Benjamin Herrenschmidt <benh@kernel.crashing.org>
3507W: http://www.penguinppc.org/ 3507W: http://www.penguinppc.org/
3508L: linuxppc-dev@ozlabs.org 3508L: linuxppc-dev@lists.ozlabs.org
3509S: Maintained 3509S: Maintained
3510F: arch/powerpc/platforms/powermac/ 3510F: arch/powerpc/platforms/powermac/
3511F: drivers/macintosh/ 3511F: drivers/macintosh/
3512 3512
3513LINUX FOR POWERPC EMBEDDED MPC5XXX 3513LINUX FOR POWERPC EMBEDDED MPC5XXX
3514M: Grant Likely <grant.likely@secretlab.ca> 3514M: Grant Likely <grant.likely@secretlab.ca>
3515L: linuxppc-dev@ozlabs.org 3515L: linuxppc-dev@lists.ozlabs.org
3516T: git git://git.secretlab.ca/git/linux-2.6.git 3516T: git git://git.secretlab.ca/git/linux-2.6.git
3517S: Maintained 3517S: Maintained
3518F: arch/powerpc/platforms/512x/ 3518F: arch/powerpc/platforms/512x/
@@ -3522,7 +3522,7 @@ LINUX FOR POWERPC EMBEDDED PPC4XX
3522M: Josh Boyer <jwboyer@linux.vnet.ibm.com> 3522M: Josh Boyer <jwboyer@linux.vnet.ibm.com>
3523M: Matt Porter <mporter@kernel.crashing.org> 3523M: Matt Porter <mporter@kernel.crashing.org>
3524W: http://www.penguinppc.org/ 3524W: http://www.penguinppc.org/
3525L: linuxppc-dev@ozlabs.org 3525L: linuxppc-dev@lists.ozlabs.org
3526T: git git://git.kernel.org/pub/scm/linux/kernel/git/jwboyer/powerpc-4xx.git 3526T: git git://git.kernel.org/pub/scm/linux/kernel/git/jwboyer/powerpc-4xx.git
3527S: Maintained 3527S: Maintained
3528F: arch/powerpc/platforms/40x/ 3528F: arch/powerpc/platforms/40x/
@@ -3531,7 +3531,7 @@ F: arch/powerpc/platforms/44x/
3531LINUX FOR POWERPC EMBEDDED XILINX VIRTEX 3531LINUX FOR POWERPC EMBEDDED XILINX VIRTEX
3532M: Grant Likely <grant.likely@secretlab.ca> 3532M: Grant Likely <grant.likely@secretlab.ca>
3533W: http://wiki.secretlab.ca/index.php/Linux_on_Xilinx_Virtex 3533W: http://wiki.secretlab.ca/index.php/Linux_on_Xilinx_Virtex
3534L: linuxppc-dev@ozlabs.org 3534L: linuxppc-dev@lists.ozlabs.org
3535T: git git://git.secretlab.ca/git/linux-2.6.git 3535T: git git://git.secretlab.ca/git/linux-2.6.git
3536S: Maintained 3536S: Maintained
3537F: arch/powerpc/*/*virtex* 3537F: arch/powerpc/*/*virtex*
@@ -3541,20 +3541,20 @@ LINUX FOR POWERPC EMBEDDED PPC8XX
3541M: Vitaly Bordug <vitb@kernel.crashing.org> 3541M: Vitaly Bordug <vitb@kernel.crashing.org>
3542M: Marcelo Tosatti <marcelo@kvack.org> 3542M: Marcelo Tosatti <marcelo@kvack.org>
3543W: http://www.penguinppc.org/ 3543W: http://www.penguinppc.org/
3544L: linuxppc-dev@ozlabs.org 3544L: linuxppc-dev@lists.ozlabs.org
3545S: Maintained 3545S: Maintained
3546F: arch/powerpc/platforms/8xx/ 3546F: arch/powerpc/platforms/8xx/
3547 3547
3548LINUX FOR POWERPC EMBEDDED PPC83XX AND PPC85XX 3548LINUX FOR POWERPC EMBEDDED PPC83XX AND PPC85XX
3549M: Kumar Gala <galak@kernel.crashing.org> 3549M: Kumar Gala <galak@kernel.crashing.org>
3550W: http://www.penguinppc.org/ 3550W: http://www.penguinppc.org/
3551L: linuxppc-dev@ozlabs.org 3551L: linuxppc-dev@lists.ozlabs.org
3552S: Maintained 3552S: Maintained
3553F: arch/powerpc/platforms/83xx/ 3553F: arch/powerpc/platforms/83xx/
3554 3554
3555LINUX FOR POWERPC PA SEMI PWRFICIENT 3555LINUX FOR POWERPC PA SEMI PWRFICIENT
3556M: Olof Johansson <olof@lixom.net> 3556M: Olof Johansson <olof@lixom.net>
3557L: linuxppc-dev@ozlabs.org 3557L: linuxppc-dev@lists.ozlabs.org
3558S: Maintained 3558S: Maintained
3559F: arch/powerpc/platforms/pasemi/ 3559F: arch/powerpc/platforms/pasemi/
3560F: drivers/*/*pasemi* 3560F: drivers/*/*pasemi*
@@ -4601,14 +4601,14 @@ F: drivers/ata/sata_promise.*
4601PS3 NETWORK SUPPORT 4601PS3 NETWORK SUPPORT
4602M: Geoff Levand <geoff@infradead.org> 4602M: Geoff Levand <geoff@infradead.org>
4603L: netdev@vger.kernel.org 4603L: netdev@vger.kernel.org
4604L: cbe-oss-dev@ozlabs.org 4604L: cbe-oss-dev@lists.ozlabs.org
4605S: Maintained 4605S: Maintained
4606F: drivers/net/ps3_gelic_net.* 4606F: drivers/net/ps3_gelic_net.*
4607 4607
4608PS3 PLATFORM SUPPORT 4608PS3 PLATFORM SUPPORT
4609M: Geoff Levand <geoff@infradead.org> 4609M: Geoff Levand <geoff@infradead.org>
4610L: linuxppc-dev@ozlabs.org 4610L: linuxppc-dev@lists.ozlabs.org
4611L: cbe-oss-dev@ozlabs.org 4611L: cbe-oss-dev@lists.ozlabs.org
4612S: Maintained 4612S: Maintained
4613F: arch/powerpc/boot/ps3* 4613F: arch/powerpc/boot/ps3*
4614F: arch/powerpc/include/asm/lv1call.h 4614F: arch/powerpc/include/asm/lv1call.h
@@ -4622,7 +4622,7 @@ F: sound/ppc/snd_ps3*
4622 4622
4623PS3VRAM DRIVER 4623PS3VRAM DRIVER
4624M: Jim Paris <jim@jtan.com> 4624M: Jim Paris <jim@jtan.com>
4625L: cbe-oss-dev@ozlabs.org 4625L: cbe-oss-dev@lists.ozlabs.org
4626S: Maintained 4626S: Maintained
4627F: drivers/block/ps3vram.c 4627F: drivers/block/ps3vram.c
4628 4628
@@ -5068,7 +5068,7 @@ F: drivers/mmc/host/sdhci.*
5068 5068
5069SECURE DIGITAL HOST CONTROLLER INTERFACE, OPEN FIRMWARE BINDINGS (SDHCI-OF) 5069SECURE DIGITAL HOST CONTROLLER INTERFACE, OPEN FIRMWARE BINDINGS (SDHCI-OF)
5070M: Anton Vorontsov <avorontsov@ru.mvista.com> 5070M: Anton Vorontsov <avorontsov@ru.mvista.com>
5071L: linuxppc-dev@ozlabs.org 5071L: linuxppc-dev@lists.ozlabs.org
5072L: linux-mmc@vger.kernel.org 5072L: linux-mmc@vger.kernel.org
5073S: Maintained 5073S: Maintained
5074F: drivers/mmc/host/sdhci-of.* 5074F: drivers/mmc/host/sdhci-of.*
@@ -5485,8 +5485,8 @@ F: drivers/net/spider_net*
5485 5485
5486SPU FILE SYSTEM 5486SPU FILE SYSTEM
5487M: Jeremy Kerr <jk@ozlabs.org> 5487M: Jeremy Kerr <jk@ozlabs.org>
5488L: linuxppc-dev@ozlabs.org 5488L: linuxppc-dev@lists.ozlabs.org
5489L: cbe-oss-dev@ozlabs.org 5489L: cbe-oss-dev@lists.ozlabs.org
5490W: http://www.ibm.com/developerworks/power/cell/ 5490W: http://www.ibm.com/developerworks/power/cell/
5491S: Supported 5491S: Supported
5492F: Documentation/filesystems/spufs.txt 5492F: Documentation/filesystems/spufs.txt
diff --git a/arch/arm/plat-samsung/dev-hsmmc.c b/arch/arm/plat-samsung/dev-hsmmc.c
index b0f93f11e281..9d2be0941410 100644
--- a/arch/arm/plat-samsung/dev-hsmmc.c
+++ b/arch/arm/plat-samsung/dev-hsmmc.c
@@ -70,4 +70,6 @@ void s3c_sdhci0_set_platdata(struct s3c_sdhci_platdata *pd)
70 set->cfg_gpio = pd->cfg_gpio; 70 set->cfg_gpio = pd->cfg_gpio;
71 if (pd->cfg_card) 71 if (pd->cfg_card)
72 set->cfg_card = pd->cfg_card; 72 set->cfg_card = pd->cfg_card;
73 if (pd->host_caps)
74 set->host_caps = pd->host_caps;
73} 75}
diff --git a/arch/arm/plat-samsung/dev-hsmmc1.c b/arch/arm/plat-samsung/dev-hsmmc1.c
index 1504fd802865..a6c8295840af 100644
--- a/arch/arm/plat-samsung/dev-hsmmc1.c
+++ b/arch/arm/plat-samsung/dev-hsmmc1.c
@@ -70,4 +70,6 @@ void s3c_sdhci1_set_platdata(struct s3c_sdhci_platdata *pd)
70 set->cfg_gpio = pd->cfg_gpio; 70 set->cfg_gpio = pd->cfg_gpio;
71 if (pd->cfg_card) 71 if (pd->cfg_card)
72 set->cfg_card = pd->cfg_card; 72 set->cfg_card = pd->cfg_card;
73 if (pd->host_caps)
74 set->host_caps = pd->host_caps;
73} 75}
diff --git a/arch/arm/plat-samsung/dev-hsmmc2.c b/arch/arm/plat-samsung/dev-hsmmc2.c
index b28ef173444d..cb0d7143381a 100644
--- a/arch/arm/plat-samsung/dev-hsmmc2.c
+++ b/arch/arm/plat-samsung/dev-hsmmc2.c
@@ -71,4 +71,6 @@ void s3c_sdhci2_set_platdata(struct s3c_sdhci_platdata *pd)
71 set->cfg_gpio = pd->cfg_gpio; 71 set->cfg_gpio = pd->cfg_gpio;
72 if (pd->cfg_card) 72 if (pd->cfg_card)
73 set->cfg_card = pd->cfg_card; 73 set->cfg_card = pd->cfg_card;
74 if (pd->host_caps)
75 set->host_caps = pd->host_caps;
74} 76}
diff --git a/arch/ia64/include/asm/unistd.h b/arch/ia64/include/asm/unistd.h
index 87f1bd1efc82..954d398a54b4 100644
--- a/arch/ia64/include/asm/unistd.h
+++ b/arch/ia64/include/asm/unistd.h
@@ -356,8 +356,6 @@ asmlinkage unsigned long sys_mmap2(
356 int fd, long pgoff); 356 int fd, long pgoff);
357struct pt_regs; 357struct pt_regs;
358struct sigaction; 358struct sigaction;
359long sys_execve(const char __user *filename, char __user * __user *argv,
360 char __user * __user *envp, struct pt_regs *regs);
361asmlinkage long sys_ia64_pipe(void); 359asmlinkage long sys_ia64_pipe(void);
362asmlinkage long sys_rt_sigaction(int sig, 360asmlinkage long sys_rt_sigaction(int sig,
363 const struct sigaction __user *act, 361 const struct sigaction __user *act,
diff --git a/arch/m68k/include/asm/ide.h b/arch/m68k/include/asm/ide.h
index 3958726664ba..492fee8a1ab2 100644
--- a/arch/m68k/include/asm/ide.h
+++ b/arch/m68k/include/asm/ide.h
@@ -1,6 +1,4 @@
1/* 1/*
2 * linux/include/asm-m68k/ide.h
3 *
4 * Copyright (C) 1994-1996 Linus Torvalds & authors 2 * Copyright (C) 1994-1996 Linus Torvalds & authors
5 */ 3 */
6 4
@@ -34,6 +32,8 @@
34#include <asm/io.h> 32#include <asm/io.h>
35#include <asm/irq.h> 33#include <asm/irq.h>
36 34
35#ifdef CONFIG_MMU
36
37/* 37/*
38 * Get rid of defs from io.h - ide has its private and conflicting versions 38 * Get rid of defs from io.h - ide has its private and conflicting versions
39 * Since so far no single m68k platform uses ISA/PCI I/O space for IDE, we 39 * Since so far no single m68k platform uses ISA/PCI I/O space for IDE, we
@@ -53,5 +53,14 @@
53#define __ide_mm_outsw(port, addr, n) raw_outsw((u16 *)port, addr, n) 53#define __ide_mm_outsw(port, addr, n) raw_outsw((u16 *)port, addr, n)
54#define __ide_mm_outsl(port, addr, n) raw_outsl((u32 *)port, addr, n) 54#define __ide_mm_outsl(port, addr, n) raw_outsl((u32 *)port, addr, n)
55 55
56#else
57
58#define __ide_mm_insw(port, addr, n) io_insw((unsigned int)port, addr, n)
59#define __ide_mm_insl(port, addr, n) io_insl((unsigned int)port, addr, n)
60#define __ide_mm_outsw(port, addr, n) io_outsw((unsigned int)port, addr, n)
61#define __ide_mm_outsl(port, addr, n) io_outsl((unsigned int)port, addr, n)
62
63#endif /* CONFIG_MMU */
64
56#endif /* __KERNEL__ */ 65#endif /* __KERNEL__ */
57#endif /* _M68K_IDE_H */ 66#endif /* _M68K_IDE_H */
diff --git a/arch/m68knommu/kernel/process.c b/arch/m68knommu/kernel/process.c
index 4d090d3c0897..6d3390590e5b 100644
--- a/arch/m68knommu/kernel/process.c
+++ b/arch/m68knommu/kernel/process.c
@@ -316,14 +316,14 @@ void dump(struct pt_regs *fp)
316 fp->d0, fp->d1, fp->d2, fp->d3); 316 fp->d0, fp->d1, fp->d2, fp->d3);
317 printk(KERN_EMERG "d4: %08lx d5: %08lx a0: %08lx a1: %08lx\n", 317 printk(KERN_EMERG "d4: %08lx d5: %08lx a0: %08lx a1: %08lx\n",
318 fp->d4, fp->d5, fp->a0, fp->a1); 318 fp->d4, fp->d5, fp->a0, fp->a1);
319 printk(KERN_EMERG "\nUSP: %08x TRAPFRAME: %08x\n", 319 printk(KERN_EMERG "\nUSP: %08x TRAPFRAME: %p\n",
320 (unsigned int) rdusp(), (unsigned int) fp); 320 (unsigned int) rdusp(), fp);
321 321
322 printk(KERN_EMERG "\nCODE:"); 322 printk(KERN_EMERG "\nCODE:");
323 tp = ((unsigned char *) fp->pc) - 0x20; 323 tp = ((unsigned char *) fp->pc) - 0x20;
324 for (sp = (unsigned long *) tp, i = 0; (i < 0x40); i += 4) { 324 for (sp = (unsigned long *) tp, i = 0; (i < 0x40); i += 4) {
325 if ((i % 0x10) == 0) 325 if ((i % 0x10) == 0)
326 printk(KERN_EMERG "%08x: ", (int) (tp + i)); 326 printk(KERN_EMERG "%p: ", tp + i);
327 printk("%08x ", (int) *sp++); 327 printk("%08x ", (int) *sp++);
328 } 328 }
329 printk(KERN_EMERG "\n"); 329 printk(KERN_EMERG "\n");
@@ -332,7 +332,7 @@ void dump(struct pt_regs *fp)
332 tp = ((unsigned char *) fp) - 0x40; 332 tp = ((unsigned char *) fp) - 0x40;
333 for (sp = (unsigned long *) tp, i = 0; (i < 0xc0); i += 4) { 333 for (sp = (unsigned long *) tp, i = 0; (i < 0xc0); i += 4) {
334 if ((i % 0x10) == 0) 334 if ((i % 0x10) == 0)
335 printk(KERN_EMERG "%08x: ", (int) (tp + i)); 335 printk(KERN_EMERG "%p: ", tp + i);
336 printk("%08x ", (int) *sp++); 336 printk("%08x ", (int) *sp++);
337 } 337 }
338 printk(KERN_EMERG "\n"); 338 printk(KERN_EMERG "\n");
@@ -341,7 +341,7 @@ void dump(struct pt_regs *fp)
341 tp = (unsigned char *) (rdusp() - 0x10); 341 tp = (unsigned char *) (rdusp() - 0x10);
342 for (sp = (unsigned long *) tp, i = 0; (i < 0x80); i += 4) { 342 for (sp = (unsigned long *) tp, i = 0; (i < 0x80); i += 4) {
343 if ((i % 0x10) == 0) 343 if ((i % 0x10) == 0)
344 printk(KERN_EMERG "%08x: ", (int) (tp + i)); 344 printk(KERN_EMERG "%p: ", tp + i);
345 printk("%08x ", (int) *sp++); 345 printk("%08x ", (int) *sp++);
346 } 346 }
347 printk(KERN_EMERG "\n"); 347 printk(KERN_EMERG "\n");
diff --git a/arch/microblaze/kernel/prom_parse.c b/arch/microblaze/kernel/prom_parse.c
index d33ba17601fa..99d9b61cccb5 100644
--- a/arch/microblaze/kernel/prom_parse.c
+++ b/arch/microblaze/kernel/prom_parse.c
@@ -73,7 +73,7 @@ int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq)
73 /* We can only get here if we hit a P2P bridge with no node, 73 /* We can only get here if we hit a P2P bridge with no node,
74 * let's do standard swizzling and try again 74 * let's do standard swizzling and try again
75 */ 75 */
76 lspec = of_irq_pci_swizzle(PCI_SLOT(pdev->devfn), lspec); 76 lspec = pci_swizzle_interrupt_pin(pdev, lspec);
77 pdev = ppdev; 77 pdev = ppdev;
78 } 78 }
79 79
diff --git a/arch/microblaze/pci/pci-common.c b/arch/microblaze/pci/pci-common.c
index 23be25fec4d6..55ef532f32be 100644
--- a/arch/microblaze/pci/pci-common.c
+++ b/arch/microblaze/pci/pci-common.c
@@ -27,10 +27,11 @@
27#include <linux/irq.h> 27#include <linux/irq.h>
28#include <linux/vmalloc.h> 28#include <linux/vmalloc.h>
29#include <linux/slab.h> 29#include <linux/slab.h>
30#include <linux/of.h>
31#include <linux/of_address.h>
30 32
31#include <asm/processor.h> 33#include <asm/processor.h>
32#include <asm/io.h> 34#include <asm/io.h>
33#include <asm/prom.h>
34#include <asm/pci-bridge.h> 35#include <asm/pci-bridge.h>
35#include <asm/byteorder.h> 36#include <asm/byteorder.h>
36 37
@@ -1077,7 +1078,7 @@ void __devinit pcibios_setup_bus_devices(struct pci_bus *bus)
1077 struct dev_archdata *sd = &dev->dev.archdata; 1078 struct dev_archdata *sd = &dev->dev.archdata;
1078 1079
1079 /* Setup OF node pointer in archdata */ 1080 /* Setup OF node pointer in archdata */
1080 sd->of_node = pci_device_to_OF_node(dev); 1081 dev->dev.of_node = pci_device_to_OF_node(dev);
1081 1082
1082 /* Fixup NUMA node as it may not be setup yet by the generic 1083 /* Fixup NUMA node as it may not be setup yet by the generic
1083 * code and is needed by the DMA init 1084 * code and is needed by the DMA init
diff --git a/arch/microblaze/pci/xilinx_pci.c b/arch/microblaze/pci/xilinx_pci.c
index 7869a41b0f94..0687a42a5bd4 100644
--- a/arch/microblaze/pci/xilinx_pci.c
+++ b/arch/microblaze/pci/xilinx_pci.c
@@ -16,6 +16,7 @@
16 16
17#include <linux/ioport.h> 17#include <linux/ioport.h>
18#include <linux/of.h> 18#include <linux/of.h>
19#include <linux/of_address.h>
19#include <linux/pci.h> 20#include <linux/pci.h>
20#include <asm/io.h> 21#include <asm/io.h>
21 22
diff --git a/arch/um/include/asm/dma-mapping.h b/arch/um/include/asm/dma-mapping.h
index 17a2cb5a4178..1f469e80fdd3 100644
--- a/arch/um/include/asm/dma-mapping.h
+++ b/arch/um/include/asm/dma-mapping.h
@@ -95,13 +95,6 @@ dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
95#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) 95#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
96#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) 96#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
97 97
98static inline int
99dma_get_cache_alignment(void)
100{
101 BUG();
102 return(0);
103}
104
105static inline void 98static inline void
106dma_cache_sync(struct device *dev, void *vaddr, size_t size, 99dma_cache_sync(struct device *dev, void *vaddr, size_t size,
107 enum dma_data_direction direction) 100 enum dma_data_direction direction)
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index a84fc34c8f77..cea0cd9a316f 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -245,6 +245,11 @@ config ARCH_HWEIGHT_CFLAGS
245 245
246config KTIME_SCALAR 246config KTIME_SCALAR
247 def_bool X86_32 247 def_bool X86_32
248
249config ARCH_CPU_PROBE_RELEASE
250 def_bool y
251 depends on HOTPLUG_CPU
252
248source "init/Kconfig" 253source "init/Kconfig"
249source "kernel/Kconfig.freezer" 254source "kernel/Kconfig.freezer"
250 255
@@ -749,11 +754,11 @@ config IOMMU_API
749 def_bool (AMD_IOMMU || DMAR) 754 def_bool (AMD_IOMMU || DMAR)
750 755
751config MAXSMP 756config MAXSMP
752 bool "Configure Maximum number of SMP Processors and NUMA Nodes" 757 bool "Enable Maximum number of SMP Processors and NUMA Nodes"
753 depends on X86_64 && SMP && DEBUG_KERNEL && EXPERIMENTAL 758 depends on X86_64 && SMP && DEBUG_KERNEL && EXPERIMENTAL
754 select CPUMASK_OFFSTACK 759 select CPUMASK_OFFSTACK
755 ---help--- 760 ---help---
756 Configure maximum number of CPUS and NUMA Nodes for this architecture. 761 Enable maximum number of CPUS and NUMA Nodes for this architecture.
757 If unsure, say N. 762 If unsure, say N.
758 763
759config NR_CPUS 764config NR_CPUS
diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
index 2984a25ff383..f686f49e8b7b 100644
--- a/arch/x86/include/asm/pgtable_32.h
+++ b/arch/x86/include/asm/pgtable_32.h
@@ -26,6 +26,7 @@ struct mm_struct;
26struct vm_area_struct; 26struct vm_area_struct;
27 27
28extern pgd_t swapper_pg_dir[1024]; 28extern pgd_t swapper_pg_dir[1024];
29extern pgd_t trampoline_pg_dir[1024];
29 30
30static inline void pgtable_cache_init(void) { } 31static inline void pgtable_cache_init(void) { }
31static inline void check_pgt_cache(void) { } 32static inline void check_pgt_cache(void) { }
diff --git a/arch/x86/include/asm/trampoline.h b/arch/x86/include/asm/trampoline.h
index cb507bb05d79..4dde797c0578 100644
--- a/arch/x86/include/asm/trampoline.h
+++ b/arch/x86/include/asm/trampoline.h
@@ -13,14 +13,17 @@ extern unsigned char *trampoline_base;
13 13
14extern unsigned long init_rsp; 14extern unsigned long init_rsp;
15extern unsigned long initial_code; 15extern unsigned long initial_code;
16extern unsigned long initial_page_table;
16extern unsigned long initial_gs; 17extern unsigned long initial_gs;
17 18
18#define TRAMPOLINE_SIZE roundup(trampoline_end - trampoline_data, PAGE_SIZE) 19#define TRAMPOLINE_SIZE roundup(trampoline_end - trampoline_data, PAGE_SIZE)
19 20
20extern unsigned long setup_trampoline(void); 21extern unsigned long setup_trampoline(void);
22extern void __init setup_trampoline_page_table(void);
21extern void __init reserve_trampoline_memory(void); 23extern void __init reserve_trampoline_memory(void);
22#else 24#else
23static inline void reserve_trampoline_memory(void) {}; 25static inline void setup_trampoline_page_table(void) {}
26static inline void reserve_trampoline_memory(void) {}
24#endif /* CONFIG_X86_TRAMPOLINE */ 27#endif /* CONFIG_X86_TRAMPOLINE */
25 28
26#endif /* __ASSEMBLY__ */ 29#endif /* __ASSEMBLY__ */
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 4dc0084ec1b1..f1efebaf5510 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -1728,6 +1728,8 @@ __apicdebuginit(void) print_IO_APIC(void)
1728 struct irq_pin_list *entry; 1728 struct irq_pin_list *entry;
1729 1729
1730 cfg = desc->chip_data; 1730 cfg = desc->chip_data;
1731 if (!cfg)
1732 continue;
1731 entry = cfg->irq_2_pin; 1733 entry = cfg->irq_2_pin;
1732 if (!entry) 1734 if (!entry)
1733 continue; 1735 continue;
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 60a57b13082d..ba5f62f45f01 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -669,7 +669,7 @@ bool cpu_has_amd_erratum(const int *erratum)
669 } 669 }
670 670
671 /* OSVW unavailable or ID unknown, match family-model-stepping range */ 671 /* OSVW unavailable or ID unknown, match family-model-stepping range */
672 ms = (cpu->x86_model << 8) | cpu->x86_mask; 672 ms = (cpu->x86_model << 4) | cpu->x86_mask;
673 while ((range = *erratum++)) 673 while ((range = *erratum++))
674 if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) && 674 if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) &&
675 (ms >= AMD_MODEL_RANGE_START(range)) && 675 (ms >= AMD_MODEL_RANGE_START(range)) &&
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 214ac860ebe0..d8d86d014008 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -491,33 +491,78 @@ static void intel_pmu_enable_all(int added)
491 * Intel Errata AAP53 (model 30) 491 * Intel Errata AAP53 (model 30)
492 * Intel Errata BD53 (model 44) 492 * Intel Errata BD53 (model 44)
493 * 493 *
494 * These chips need to be 'reset' when adding counters by programming 494 * The official story:
495 * the magic three (non counting) events 0x4300D2, 0x4300B1 and 0x4300B5 495 * These chips need to be 'reset' when adding counters by programming the
496 * either in sequence on the same PMC or on different PMCs. 496 * magic three (non-counting) events 0x4300B5, 0x4300D2, and 0x4300B1 either
497 * in sequence on the same PMC or on different PMCs.
498 *
499 * In practise it appears some of these events do in fact count, and
500 * we need to programm all 4 events.
497 */ 501 */
498static void intel_pmu_nhm_enable_all(int added) 502static void intel_pmu_nhm_workaround(void)
499{ 503{
500 if (added) { 504 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
501 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 505 static const unsigned long nhm_magic[4] = {
502 int i; 506 0x4300B5,
507 0x4300D2,
508 0x4300B1,
509 0x4300B1
510 };
511 struct perf_event *event;
512 int i;
513
514 /*
515 * The Errata requires below steps:
516 * 1) Clear MSR_IA32_PEBS_ENABLE and MSR_CORE_PERF_GLOBAL_CTRL;
517 * 2) Configure 4 PERFEVTSELx with the magic events and clear
518 * the corresponding PMCx;
519 * 3) set bit0~bit3 of MSR_CORE_PERF_GLOBAL_CTRL;
520 * 4) Clear MSR_CORE_PERF_GLOBAL_CTRL;
521 * 5) Clear 4 pairs of ERFEVTSELx and PMCx;
522 */
523
524 /*
525 * The real steps we choose are a little different from above.
526 * A) To reduce MSR operations, we don't run step 1) as they
527 * are already cleared before this function is called;
528 * B) Call x86_perf_event_update to save PMCx before configuring
529 * PERFEVTSELx with magic number;
530 * C) With step 5), we do clear only when the PERFEVTSELx is
531 * not used currently.
532 * D) Call x86_perf_event_set_period to restore PMCx;
533 */
503 534
504 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + 0, 0x4300D2); 535 /* We always operate 4 pairs of PERF Counters */
505 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + 1, 0x4300B1); 536 for (i = 0; i < 4; i++) {
506 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + 2, 0x4300B5); 537 event = cpuc->events[i];
538 if (event)
539 x86_perf_event_update(event);
540 }
507 541
508 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x3); 542 for (i = 0; i < 4; i++) {
509 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x0); 543 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, nhm_magic[i]);
544 wrmsrl(MSR_ARCH_PERFMON_PERFCTR0 + i, 0x0);
545 }
510 546
511 for (i = 0; i < 3; i++) { 547 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0xf);
512 struct perf_event *event = cpuc->events[i]; 548 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x0);
513 549
514 if (!event) 550 for (i = 0; i < 4; i++) {
515 continue; 551 event = cpuc->events[i];
516 552
553 if (event) {
554 x86_perf_event_set_period(event);
517 __x86_pmu_enable_event(&event->hw, 555 __x86_pmu_enable_event(&event->hw,
518 ARCH_PERFMON_EVENTSEL_ENABLE); 556 ARCH_PERFMON_EVENTSEL_ENABLE);
519 } 557 } else
558 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, 0x0);
520 } 559 }
560}
561
562static void intel_pmu_nhm_enable_all(int added)
563{
564 if (added)
565 intel_pmu_nhm_workaround();
521 intel_pmu_enable_all(added); 566 intel_pmu_enable_all(added);
522} 567}
523 568
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index ff4c453e13f3..fa8c1b8e09fb 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -334,7 +334,7 @@ ENTRY(startup_32_smp)
334/* 334/*
335 * Enable paging 335 * Enable paging
336 */ 336 */
337 movl $pa(swapper_pg_dir),%eax 337 movl pa(initial_page_table), %eax
338 movl %eax,%cr3 /* set the page table pointer.. */ 338 movl %eax,%cr3 /* set the page table pointer.. */
339 movl %cr0,%eax 339 movl %cr0,%eax
340 orl $X86_CR0_PG,%eax 340 orl $X86_CR0_PG,%eax
@@ -614,6 +614,8 @@ ignore_int:
614.align 4 614.align 4
615ENTRY(initial_code) 615ENTRY(initial_code)
616 .long i386_start_kernel 616 .long i386_start_kernel
617ENTRY(initial_page_table)
618 .long pa(swapper_pg_dir)
617 619
618/* 620/*
619 * BSS section 621 * BSS section
@@ -629,6 +631,10 @@ ENTRY(swapper_pg_dir)
629#endif 631#endif
630swapper_pg_fixmap: 632swapper_pg_fixmap:
631 .fill 1024,4,0 633 .fill 1024,4,0
634#ifdef CONFIG_X86_TRAMPOLINE
635ENTRY(trampoline_pg_dir)
636 .fill 1024,4,0
637#endif
632ENTRY(empty_zero_page) 638ENTRY(empty_zero_page)
633 .fill 4096,1,0 639 .fill 4096,1,0
634 640
diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
index 1bfb6cf4dd55..770ebfb349e9 100644
--- a/arch/x86/kernel/kprobes.c
+++ b/arch/x86/kernel/kprobes.c
@@ -709,6 +709,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
709 struct hlist_node *node, *tmp; 709 struct hlist_node *node, *tmp;
710 unsigned long flags, orig_ret_address = 0; 710 unsigned long flags, orig_ret_address = 0;
711 unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline; 711 unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
712 kprobe_opcode_t *correct_ret_addr = NULL;
712 713
713 INIT_HLIST_HEAD(&empty_rp); 714 INIT_HLIST_HEAD(&empty_rp);
714 kretprobe_hash_lock(current, &head, &flags); 715 kretprobe_hash_lock(current, &head, &flags);
@@ -740,14 +741,34 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
740 /* another task is sharing our hash bucket */ 741 /* another task is sharing our hash bucket */
741 continue; 742 continue;
742 743
744 orig_ret_address = (unsigned long)ri->ret_addr;
745
746 if (orig_ret_address != trampoline_address)
747 /*
748 * This is the real return address. Any other
749 * instances associated with this task are for
750 * other calls deeper on the call stack
751 */
752 break;
753 }
754
755 kretprobe_assert(ri, orig_ret_address, trampoline_address);
756
757 correct_ret_addr = ri->ret_addr;
758 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
759 if (ri->task != current)
760 /* another task is sharing our hash bucket */
761 continue;
762
763 orig_ret_address = (unsigned long)ri->ret_addr;
743 if (ri->rp && ri->rp->handler) { 764 if (ri->rp && ri->rp->handler) {
744 __get_cpu_var(current_kprobe) = &ri->rp->kp; 765 __get_cpu_var(current_kprobe) = &ri->rp->kp;
745 get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE; 766 get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
767 ri->ret_addr = correct_ret_addr;
746 ri->rp->handler(ri, regs); 768 ri->rp->handler(ri, regs);
747 __get_cpu_var(current_kprobe) = NULL; 769 __get_cpu_var(current_kprobe) = NULL;
748 } 770 }
749 771
750 orig_ret_address = (unsigned long)ri->ret_addr;
751 recycle_rp_inst(ri, &empty_rp); 772 recycle_rp_inst(ri, &empty_rp);
752 773
753 if (orig_ret_address != trampoline_address) 774 if (orig_ret_address != trampoline_address)
@@ -759,8 +780,6 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
759 break; 780 break;
760 } 781 }
761 782
762 kretprobe_assert(ri, orig_ret_address, trampoline_address);
763
764 kretprobe_hash_unlock(current, &flags); 783 kretprobe_hash_unlock(current, &flags);
765 784
766 hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { 785 hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index b008e7883207..c3a4fbb2b996 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -1014,6 +1014,8 @@ void __init setup_arch(char **cmdline_p)
1014 paging_init(); 1014 paging_init();
1015 x86_init.paging.pagetable_setup_done(swapper_pg_dir); 1015 x86_init.paging.pagetable_setup_done(swapper_pg_dir);
1016 1016
1017 setup_trampoline_page_table();
1018
1017 tboot_probe(); 1019 tboot_probe();
1018 1020
1019#ifdef CONFIG_X86_64 1021#ifdef CONFIG_X86_64
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index a5e928b0cb5f..8b3bfc4dd708 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -73,7 +73,6 @@
73 73
74#ifdef CONFIG_X86_32 74#ifdef CONFIG_X86_32
75u8 apicid_2_node[MAX_APICID]; 75u8 apicid_2_node[MAX_APICID];
76static int low_mappings;
77#endif 76#endif
78 77
79/* State of each CPU */ 78/* State of each CPU */
@@ -91,6 +90,25 @@ DEFINE_PER_CPU(int, cpu_state) = { 0 };
91static DEFINE_PER_CPU(struct task_struct *, idle_thread_array); 90static DEFINE_PER_CPU(struct task_struct *, idle_thread_array);
92#define get_idle_for_cpu(x) (per_cpu(idle_thread_array, x)) 91#define get_idle_for_cpu(x) (per_cpu(idle_thread_array, x))
93#define set_idle_for_cpu(x, p) (per_cpu(idle_thread_array, x) = (p)) 92#define set_idle_for_cpu(x, p) (per_cpu(idle_thread_array, x) = (p))
93
94/*
95 * We need this for trampoline_base protection from concurrent accesses when
96 * off- and onlining cores wildly.
97 */
98static DEFINE_MUTEX(x86_cpu_hotplug_driver_mutex);
99
100void cpu_hotplug_driver_lock()
101{
102 mutex_lock(&x86_cpu_hotplug_driver_mutex);
103}
104
105void cpu_hotplug_driver_unlock()
106{
107 mutex_unlock(&x86_cpu_hotplug_driver_mutex);
108}
109
110ssize_t arch_cpu_probe(const char *buf, size_t count) { return -1; }
111ssize_t arch_cpu_release(const char *buf, size_t count) { return -1; }
94#else 112#else
95static struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ; 113static struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ;
96#define get_idle_for_cpu(x) (idle_thread_array[(x)]) 114#define get_idle_for_cpu(x) (idle_thread_array[(x)])
@@ -281,6 +299,18 @@ notrace static void __cpuinit start_secondary(void *unused)
281 * fragile that we want to limit the things done here to the 299 * fragile that we want to limit the things done here to the
282 * most necessary things. 300 * most necessary things.
283 */ 301 */
302
303#ifdef CONFIG_X86_32
304 /*
305 * Switch away from the trampoline page-table
306 *
307 * Do this before cpu_init() because it needs to access per-cpu
308 * data which may not be mapped in the trampoline page-table.
309 */
310 load_cr3(swapper_pg_dir);
311 __flush_tlb_all();
312#endif
313
284 vmi_bringup(); 314 vmi_bringup();
285 cpu_init(); 315 cpu_init();
286 preempt_disable(); 316 preempt_disable();
@@ -299,12 +329,6 @@ notrace static void __cpuinit start_secondary(void *unused)
299 legacy_pic->chip->unmask(0); 329 legacy_pic->chip->unmask(0);
300 } 330 }
301 331
302#ifdef CONFIG_X86_32
303 while (low_mappings)
304 cpu_relax();
305 __flush_tlb_all();
306#endif
307
308 /* This must be done before setting cpu_online_mask */ 332 /* This must be done before setting cpu_online_mask */
309 set_cpu_sibling_map(raw_smp_processor_id()); 333 set_cpu_sibling_map(raw_smp_processor_id());
310 wmb(); 334 wmb();
@@ -750,6 +774,7 @@ do_rest:
750#ifdef CONFIG_X86_32 774#ifdef CONFIG_X86_32
751 /* Stack for startup_32 can be just as for start_secondary onwards */ 775 /* Stack for startup_32 can be just as for start_secondary onwards */
752 irq_ctx_init(cpu); 776 irq_ctx_init(cpu);
777 initial_page_table = __pa(&trampoline_pg_dir);
753#else 778#else
754 clear_tsk_thread_flag(c_idle.idle, TIF_FORK); 779 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
755 initial_gs = per_cpu_offset(cpu); 780 initial_gs = per_cpu_offset(cpu);
@@ -897,20 +922,8 @@ int __cpuinit native_cpu_up(unsigned int cpu)
897 922
898 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; 923 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
899 924
900#ifdef CONFIG_X86_32
901 /* init low mem mapping */
902 clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
903 min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
904 flush_tlb_all();
905 low_mappings = 1;
906
907 err = do_boot_cpu(apicid, cpu); 925 err = do_boot_cpu(apicid, cpu);
908 926
909 zap_low_mappings(false);
910 low_mappings = 0;
911#else
912 err = do_boot_cpu(apicid, cpu);
913#endif
914 if (err) { 927 if (err) {
915 pr_debug("do_boot_cpu failed %d\n", err); 928 pr_debug("do_boot_cpu failed %d\n", err);
916 return -EIO; 929 return -EIO;
diff --git a/arch/x86/kernel/trampoline.c b/arch/x86/kernel/trampoline.c
index c652ef62742d..a874495b3673 100644
--- a/arch/x86/kernel/trampoline.c
+++ b/arch/x86/kernel/trampoline.c
@@ -1,6 +1,7 @@
1#include <linux/io.h> 1#include <linux/io.h>
2 2
3#include <asm/trampoline.h> 3#include <asm/trampoline.h>
4#include <asm/pgtable.h>
4#include <asm/e820.h> 5#include <asm/e820.h>
5 6
6#if defined(CONFIG_X86_64) && defined(CONFIG_ACPI_SLEEP) 7#if defined(CONFIG_X86_64) && defined(CONFIG_ACPI_SLEEP)
@@ -37,3 +38,20 @@ unsigned long __trampinit setup_trampoline(void)
37 memcpy(trampoline_base, trampoline_data, TRAMPOLINE_SIZE); 38 memcpy(trampoline_base, trampoline_data, TRAMPOLINE_SIZE);
38 return virt_to_phys(trampoline_base); 39 return virt_to_phys(trampoline_base);
39} 40}
41
42void __init setup_trampoline_page_table(void)
43{
44#ifdef CONFIG_X86_32
45 /* Copy kernel address range */
46 clone_pgd_range(trampoline_pg_dir + KERNEL_PGD_BOUNDARY,
47 swapper_pg_dir + KERNEL_PGD_BOUNDARY,
48 min_t(unsigned long, KERNEL_PGD_PTRS,
49 KERNEL_PGD_BOUNDARY));
50
51 /* Initialize low mappings */
52 clone_pgd_range(trampoline_pg_dir,
53 swapper_pg_dir + KERNEL_PGD_BOUNDARY,
54 min_t(unsigned long, KERNEL_PGD_PTRS,
55 KERNEL_PGD_BOUNDARY));
56#endif
57}
diff --git a/drivers/ata/sata_dwc_460ex.c b/drivers/ata/sata_dwc_460ex.c
index ea24c1e51be2..2673a3d14806 100644
--- a/drivers/ata/sata_dwc_460ex.c
+++ b/drivers/ata/sata_dwc_460ex.c
@@ -1588,7 +1588,7 @@ static const struct ata_port_info sata_dwc_port_info[] = {
1588 }, 1588 },
1589}; 1589};
1590 1590
1591static int sata_dwc_probe(struct of_device *ofdev, 1591static int sata_dwc_probe(struct platform_device *ofdev,
1592 const struct of_device_id *match) 1592 const struct of_device_id *match)
1593{ 1593{
1594 struct sata_dwc_device *hsdev; 1594 struct sata_dwc_device *hsdev;
@@ -1702,7 +1702,7 @@ error_out:
1702 return err; 1702 return err;
1703} 1703}
1704 1704
1705static int sata_dwc_remove(struct of_device *ofdev) 1705static int sata_dwc_remove(struct platform_device *ofdev)
1706{ 1706{
1707 struct device *dev = &ofdev->dev; 1707 struct device *dev = &ofdev->dev;
1708 struct ata_host *host = dev_get_drvdata(dev); 1708 struct ata_host *host = dev_get_drvdata(dev);
diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c
index 2982b3ee9465..057413bb16e2 100644
--- a/drivers/block/xsysace.c
+++ b/drivers/block/xsysace.c
@@ -94,6 +94,7 @@
94#include <linux/hdreg.h> 94#include <linux/hdreg.h>
95#include <linux/platform_device.h> 95#include <linux/platform_device.h>
96#if defined(CONFIG_OF) 96#if defined(CONFIG_OF)
97#include <linux/of_address.h>
97#include <linux/of_device.h> 98#include <linux/of_device.h>
98#include <linux/of_platform.h> 99#include <linux/of_platform.h>
99#endif 100#endif
diff --git a/drivers/char/pty.c b/drivers/char/pty.c
index ad46eae1f9bb..c350d01716bd 100644
--- a/drivers/char/pty.c
+++ b/drivers/char/pty.c
@@ -675,8 +675,8 @@ static int ptmx_open(struct inode *inode, struct file *filp)
675 } 675 }
676 676
677 set_bit(TTY_PTY_LOCK, &tty->flags); /* LOCK THE SLAVE */ 677 set_bit(TTY_PTY_LOCK, &tty->flags); /* LOCK THE SLAVE */
678 filp->private_data = tty; 678
679 file_move(filp, &tty->tty_files); 679 tty_add_file(tty, filp);
680 680
681 retval = devpts_pty_new(inode, tty->link); 681 retval = devpts_pty_new(inode, tty->link);
682 if (retval) 682 if (retval)
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
index 0350c42375a2..949067a0bd47 100644
--- a/drivers/char/tty_io.c
+++ b/drivers/char/tty_io.c
@@ -136,6 +136,9 @@ LIST_HEAD(tty_drivers); /* linked list of tty drivers */
136DEFINE_MUTEX(tty_mutex); 136DEFINE_MUTEX(tty_mutex);
137EXPORT_SYMBOL(tty_mutex); 137EXPORT_SYMBOL(tty_mutex);
138 138
139/* Spinlock to protect the tty->tty_files list */
140DEFINE_SPINLOCK(tty_files_lock);
141
139static ssize_t tty_read(struct file *, char __user *, size_t, loff_t *); 142static ssize_t tty_read(struct file *, char __user *, size_t, loff_t *);
140static ssize_t tty_write(struct file *, const char __user *, size_t, loff_t *); 143static ssize_t tty_write(struct file *, const char __user *, size_t, loff_t *);
141ssize_t redirected_tty_write(struct file *, const char __user *, 144ssize_t redirected_tty_write(struct file *, const char __user *,
@@ -185,6 +188,41 @@ void free_tty_struct(struct tty_struct *tty)
185 kfree(tty); 188 kfree(tty);
186} 189}
187 190
191static inline struct tty_struct *file_tty(struct file *file)
192{
193 return ((struct tty_file_private *)file->private_data)->tty;
194}
195
196/* Associate a new file with the tty structure */
197void tty_add_file(struct tty_struct *tty, struct file *file)
198{
199 struct tty_file_private *priv;
200
201 /* XXX: must implement proper error handling in callers */
202 priv = kmalloc(sizeof(*priv), GFP_KERNEL|__GFP_NOFAIL);
203
204 priv->tty = tty;
205 priv->file = file;
206 file->private_data = priv;
207
208 spin_lock(&tty_files_lock);
209 list_add(&priv->list, &tty->tty_files);
210 spin_unlock(&tty_files_lock);
211}
212
213/* Delete file from its tty */
214void tty_del_file(struct file *file)
215{
216 struct tty_file_private *priv = file->private_data;
217
218 spin_lock(&tty_files_lock);
219 list_del(&priv->list);
220 spin_unlock(&tty_files_lock);
221 file->private_data = NULL;
222 kfree(priv);
223}
224
225
188#define TTY_NUMBER(tty) ((tty)->index + (tty)->driver->name_base) 226#define TTY_NUMBER(tty) ((tty)->index + (tty)->driver->name_base)
189 227
190/** 228/**
@@ -235,11 +273,11 @@ static int check_tty_count(struct tty_struct *tty, const char *routine)
235 struct list_head *p; 273 struct list_head *p;
236 int count = 0; 274 int count = 0;
237 275
238 file_list_lock(); 276 spin_lock(&tty_files_lock);
239 list_for_each(p, &tty->tty_files) { 277 list_for_each(p, &tty->tty_files) {
240 count++; 278 count++;
241 } 279 }
242 file_list_unlock(); 280 spin_unlock(&tty_files_lock);
243 if (tty->driver->type == TTY_DRIVER_TYPE_PTY && 281 if (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
244 tty->driver->subtype == PTY_TYPE_SLAVE && 282 tty->driver->subtype == PTY_TYPE_SLAVE &&
245 tty->link && tty->link->count) 283 tty->link && tty->link->count)
@@ -497,6 +535,7 @@ void __tty_hangup(struct tty_struct *tty)
497 struct file *cons_filp = NULL; 535 struct file *cons_filp = NULL;
498 struct file *filp, *f = NULL; 536 struct file *filp, *f = NULL;
499 struct task_struct *p; 537 struct task_struct *p;
538 struct tty_file_private *priv;
500 int closecount = 0, n; 539 int closecount = 0, n;
501 unsigned long flags; 540 unsigned long flags;
502 int refs = 0; 541 int refs = 0;
@@ -506,7 +545,7 @@ void __tty_hangup(struct tty_struct *tty)
506 545
507 546
508 spin_lock(&redirect_lock); 547 spin_lock(&redirect_lock);
509 if (redirect && redirect->private_data == tty) { 548 if (redirect && file_tty(redirect) == tty) {
510 f = redirect; 549 f = redirect;
511 redirect = NULL; 550 redirect = NULL;
512 } 551 }
@@ -519,9 +558,10 @@ void __tty_hangup(struct tty_struct *tty)
519 workqueue with the lock held */ 558 workqueue with the lock held */
520 check_tty_count(tty, "tty_hangup"); 559 check_tty_count(tty, "tty_hangup");
521 560
522 file_list_lock(); 561 spin_lock(&tty_files_lock);
523 /* This breaks for file handles being sent over AF_UNIX sockets ? */ 562 /* This breaks for file handles being sent over AF_UNIX sockets ? */
524 list_for_each_entry(filp, &tty->tty_files, f_u.fu_list) { 563 list_for_each_entry(priv, &tty->tty_files, list) {
564 filp = priv->file;
525 if (filp->f_op->write == redirected_tty_write) 565 if (filp->f_op->write == redirected_tty_write)
526 cons_filp = filp; 566 cons_filp = filp;
527 if (filp->f_op->write != tty_write) 567 if (filp->f_op->write != tty_write)
@@ -530,7 +570,7 @@ void __tty_hangup(struct tty_struct *tty)
530 __tty_fasync(-1, filp, 0); /* can't block */ 570 __tty_fasync(-1, filp, 0); /* can't block */
531 filp->f_op = &hung_up_tty_fops; 571 filp->f_op = &hung_up_tty_fops;
532 } 572 }
533 file_list_unlock(); 573 spin_unlock(&tty_files_lock);
534 574
535 tty_ldisc_hangup(tty); 575 tty_ldisc_hangup(tty);
536 576
@@ -889,12 +929,10 @@ static ssize_t tty_read(struct file *file, char __user *buf, size_t count,
889 loff_t *ppos) 929 loff_t *ppos)
890{ 930{
891 int i; 931 int i;
892 struct tty_struct *tty; 932 struct inode *inode = file->f_path.dentry->d_inode;
893 struct inode *inode; 933 struct tty_struct *tty = file_tty(file);
894 struct tty_ldisc *ld; 934 struct tty_ldisc *ld;
895 935
896 tty = file->private_data;
897 inode = file->f_path.dentry->d_inode;
898 if (tty_paranoia_check(tty, inode, "tty_read")) 936 if (tty_paranoia_check(tty, inode, "tty_read"))
899 return -EIO; 937 return -EIO;
900 if (!tty || (test_bit(TTY_IO_ERROR, &tty->flags))) 938 if (!tty || (test_bit(TTY_IO_ERROR, &tty->flags)))
@@ -1065,12 +1103,11 @@ void tty_write_message(struct tty_struct *tty, char *msg)
1065static ssize_t tty_write(struct file *file, const char __user *buf, 1103static ssize_t tty_write(struct file *file, const char __user *buf,
1066 size_t count, loff_t *ppos) 1104 size_t count, loff_t *ppos)
1067{ 1105{
1068 struct tty_struct *tty;
1069 struct inode *inode = file->f_path.dentry->d_inode; 1106 struct inode *inode = file->f_path.dentry->d_inode;
1107 struct tty_struct *tty = file_tty(file);
1108 struct tty_ldisc *ld;
1070 ssize_t ret; 1109 ssize_t ret;
1071 struct tty_ldisc *ld;
1072 1110
1073 tty = file->private_data;
1074 if (tty_paranoia_check(tty, inode, "tty_write")) 1111 if (tty_paranoia_check(tty, inode, "tty_write"))
1075 return -EIO; 1112 return -EIO;
1076 if (!tty || !tty->ops->write || 1113 if (!tty || !tty->ops->write ||
@@ -1424,9 +1461,9 @@ static void release_one_tty(struct work_struct *work)
1424 tty_driver_kref_put(driver); 1461 tty_driver_kref_put(driver);
1425 module_put(driver->owner); 1462 module_put(driver->owner);
1426 1463
1427 file_list_lock(); 1464 spin_lock(&tty_files_lock);
1428 list_del_init(&tty->tty_files); 1465 list_del_init(&tty->tty_files);
1429 file_list_unlock(); 1466 spin_unlock(&tty_files_lock);
1430 1467
1431 put_pid(tty->pgrp); 1468 put_pid(tty->pgrp);
1432 put_pid(tty->session); 1469 put_pid(tty->session);
@@ -1507,13 +1544,13 @@ static void release_tty(struct tty_struct *tty, int idx)
1507 1544
1508int tty_release(struct inode *inode, struct file *filp) 1545int tty_release(struct inode *inode, struct file *filp)
1509{ 1546{
1510 struct tty_struct *tty, *o_tty; 1547 struct tty_struct *tty = file_tty(filp);
1548 struct tty_struct *o_tty;
1511 int pty_master, tty_closing, o_tty_closing, do_sleep; 1549 int pty_master, tty_closing, o_tty_closing, do_sleep;
1512 int devpts; 1550 int devpts;
1513 int idx; 1551 int idx;
1514 char buf[64]; 1552 char buf[64];
1515 1553
1516 tty = filp->private_data;
1517 if (tty_paranoia_check(tty, inode, "tty_release_dev")) 1554 if (tty_paranoia_check(tty, inode, "tty_release_dev"))
1518 return 0; 1555 return 0;
1519 1556
@@ -1671,8 +1708,7 @@ int tty_release(struct inode *inode, struct file *filp)
1671 * - do_tty_hangup no longer sees this file descriptor as 1708 * - do_tty_hangup no longer sees this file descriptor as
1672 * something that needs to be handled for hangups. 1709 * something that needs to be handled for hangups.
1673 */ 1710 */
1674 file_kill(filp); 1711 tty_del_file(filp);
1675 filp->private_data = NULL;
1676 1712
1677 /* 1713 /*
1678 * Perform some housekeeping before deciding whether to return. 1714 * Perform some housekeeping before deciding whether to return.
@@ -1839,8 +1875,8 @@ got_driver:
1839 return PTR_ERR(tty); 1875 return PTR_ERR(tty);
1840 } 1876 }
1841 1877
1842 filp->private_data = tty; 1878 tty_add_file(tty, filp);
1843 file_move(filp, &tty->tty_files); 1879
1844 check_tty_count(tty, "tty_open"); 1880 check_tty_count(tty, "tty_open");
1845 if (tty->driver->type == TTY_DRIVER_TYPE_PTY && 1881 if (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
1846 tty->driver->subtype == PTY_TYPE_MASTER) 1882 tty->driver->subtype == PTY_TYPE_MASTER)
@@ -1916,11 +1952,10 @@ got_driver:
1916 1952
1917static unsigned int tty_poll(struct file *filp, poll_table *wait) 1953static unsigned int tty_poll(struct file *filp, poll_table *wait)
1918{ 1954{
1919 struct tty_struct *tty; 1955 struct tty_struct *tty = file_tty(filp);
1920 struct tty_ldisc *ld; 1956 struct tty_ldisc *ld;
1921 int ret = 0; 1957 int ret = 0;
1922 1958
1923 tty = filp->private_data;
1924 if (tty_paranoia_check(tty, filp->f_path.dentry->d_inode, "tty_poll")) 1959 if (tty_paranoia_check(tty, filp->f_path.dentry->d_inode, "tty_poll"))
1925 return 0; 1960 return 0;
1926 1961
@@ -1933,11 +1968,10 @@ static unsigned int tty_poll(struct file *filp, poll_table *wait)
1933 1968
1934static int __tty_fasync(int fd, struct file *filp, int on) 1969static int __tty_fasync(int fd, struct file *filp, int on)
1935{ 1970{
1936 struct tty_struct *tty; 1971 struct tty_struct *tty = file_tty(filp);
1937 unsigned long flags; 1972 unsigned long flags;
1938 int retval = 0; 1973 int retval = 0;
1939 1974
1940 tty = filp->private_data;
1941 if (tty_paranoia_check(tty, filp->f_path.dentry->d_inode, "tty_fasync")) 1975 if (tty_paranoia_check(tty, filp->f_path.dentry->d_inode, "tty_fasync"))
1942 goto out; 1976 goto out;
1943 1977
@@ -2491,13 +2525,13 @@ EXPORT_SYMBOL(tty_pair_get_pty);
2491 */ 2525 */
2492long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 2526long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2493{ 2527{
2494 struct tty_struct *tty, *real_tty; 2528 struct tty_struct *tty = file_tty(file);
2529 struct tty_struct *real_tty;
2495 void __user *p = (void __user *)arg; 2530 void __user *p = (void __user *)arg;
2496 int retval; 2531 int retval;
2497 struct tty_ldisc *ld; 2532 struct tty_ldisc *ld;
2498 struct inode *inode = file->f_dentry->d_inode; 2533 struct inode *inode = file->f_dentry->d_inode;
2499 2534
2500 tty = file->private_data;
2501 if (tty_paranoia_check(tty, inode, "tty_ioctl")) 2535 if (tty_paranoia_check(tty, inode, "tty_ioctl"))
2502 return -EINVAL; 2536 return -EINVAL;
2503 2537
@@ -2619,7 +2653,7 @@ static long tty_compat_ioctl(struct file *file, unsigned int cmd,
2619 unsigned long arg) 2653 unsigned long arg)
2620{ 2654{
2621 struct inode *inode = file->f_dentry->d_inode; 2655 struct inode *inode = file->f_dentry->d_inode;
2622 struct tty_struct *tty = file->private_data; 2656 struct tty_struct *tty = file_tty(file);
2623 struct tty_ldisc *ld; 2657 struct tty_ldisc *ld;
2624 int retval = -ENOIOCTLCMD; 2658 int retval = -ENOIOCTLCMD;
2625 2659
@@ -2711,7 +2745,7 @@ void __do_SAK(struct tty_struct *tty)
2711 if (!filp) 2745 if (!filp)
2712 continue; 2746 continue;
2713 if (filp->f_op->read == tty_read && 2747 if (filp->f_op->read == tty_read &&
2714 filp->private_data == tty) { 2748 file_tty(filp) == tty) {
2715 printk(KERN_NOTICE "SAK: killed process %d" 2749 printk(KERN_NOTICE "SAK: killed process %d"
2716 " (%s): fd#%d opened to the tty\n", 2750 " (%s): fd#%d opened to the tty\n",
2717 task_pid_nr(p), p->comm, i); 2751 task_pid_nr(p), p->comm, i);
diff --git a/drivers/char/xilinx_hwicap/xilinx_hwicap.c b/drivers/char/xilinx_hwicap/xilinx_hwicap.c
index 0ed763cd2e77..b663d573aad9 100644
--- a/drivers/char/xilinx_hwicap/xilinx_hwicap.c
+++ b/drivers/char/xilinx_hwicap/xilinx_hwicap.c
@@ -94,6 +94,7 @@
94 94
95#ifdef CONFIG_OF 95#ifdef CONFIG_OF
96/* For open firmware. */ 96/* For open firmware. */
97#include <linux/of_address.h>
97#include <linux/of_device.h> 98#include <linux/of_device.h>
98#include <linux/of_platform.h> 99#include <linux/of_platform.h>
99#endif 100#endif
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index e635199a0cd2..0c52899be964 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1299,6 +1299,7 @@ static const struct hid_device_id hid_blacklist[] = {
1299 { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_MOUSE) }, 1299 { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_MOUSE) },
1300 { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0006) }, 1300 { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0006) },
1301 { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH) }, 1301 { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH) },
1302 { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH1) },
1302 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) }, 1303 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) },
1303 { HID_USB_DEVICE(USB_VENDOR_ID_EZKEY, USB_DEVICE_ID_BTC_8193) }, 1304 { HID_USB_DEVICE(USB_VENDOR_ID_EZKEY, USB_DEVICE_ID_BTC_8193) },
1304 { HID_USB_DEVICE(USB_VENDOR_ID_GAMERON, USB_DEVICE_ID_GAMERON_DUAL_PSX_ADAPTOR) }, 1305 { HID_USB_DEVICE(USB_VENDOR_ID_GAMERON, USB_DEVICE_ID_GAMERON_DUAL_PSX_ADAPTOR) },
diff --git a/drivers/hid/hid-egalax.c b/drivers/hid/hid-egalax.c
index f44bdc084cb2..8ca7f65cf2f8 100644
--- a/drivers/hid/hid-egalax.c
+++ b/drivers/hid/hid-egalax.c
@@ -159,6 +159,13 @@ static int egalax_event(struct hid_device *hid, struct hid_field *field,
159{ 159{
160 struct egalax_data *td = hid_get_drvdata(hid); 160 struct egalax_data *td = hid_get_drvdata(hid);
161 161
162 /* Note, eGalax has two product lines: the first is resistive and
163 * uses a standard parallel multitouch protocol (product ID ==
164 * 48xx). The second is capacitive and uses an unusual "serial"
165 * protocol with a different message for each multitouch finger
166 * (product ID == 72xx). We do not yet generate a correct event
167 * sequence for the capacitive/serial protocol.
168 */
162 if (hid->claimed & HID_CLAIMED_INPUT) { 169 if (hid->claimed & HID_CLAIMED_INPUT) {
163 struct input_dev *input = field->hidinput->input; 170 struct input_dev *input = field->hidinput->input;
164 171
@@ -246,6 +253,8 @@ static void egalax_remove(struct hid_device *hdev)
246static const struct hid_device_id egalax_devices[] = { 253static const struct hid_device_id egalax_devices[] = {
247 { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, 254 { HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
248 USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH) }, 255 USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH) },
256 { HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
257 USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH1) },
249 { } 258 { }
250}; 259};
251MODULE_DEVICE_TABLE(hid, egalax_devices); 260MODULE_DEVICE_TABLE(hid, egalax_devices);
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index d3fc13ae094d..85c6d13c9ffa 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -188,6 +188,7 @@
188#define USB_VENDOR_ID_DWAV 0x0eef 188#define USB_VENDOR_ID_DWAV 0x0eef
189#define USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER 0x0001 189#define USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER 0x0001
190#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH 0x480d 190#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH 0x480d
191#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH1 0x720c
191 192
192#define USB_VENDOR_ID_ELECOM 0x056e 193#define USB_VENDOR_ID_ELECOM 0x056e
193#define USB_DEVICE_ID_ELECOM_BM084 0x0061 194#define USB_DEVICE_ID_ELECOM_BM084 0x0061
diff --git a/drivers/hid/hid-picolcd.c b/drivers/hid/hid-picolcd.c
index 346f0e34987e..bc2e07740628 100644
--- a/drivers/hid/hid-picolcd.c
+++ b/drivers/hid/hid-picolcd.c
@@ -547,11 +547,11 @@ static void picolcd_fb_destroy(struct fb_info *info)
547 ref_cnt--; 547 ref_cnt--;
548 mutex_lock(&info->lock); 548 mutex_lock(&info->lock);
549 (*ref_cnt)--; 549 (*ref_cnt)--;
550 may_release = !ref_cnt; 550 may_release = !*ref_cnt;
551 mutex_unlock(&info->lock); 551 mutex_unlock(&info->lock);
552 if (may_release) { 552 if (may_release) {
553 framebuffer_release(info);
554 vfree((u8 *)info->fix.smem_start); 553 vfree((u8 *)info->fix.smem_start);
554 framebuffer_release(info);
555 } 555 }
556} 556}
557 557
diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
index 254a003af048..0a29c51114aa 100644
--- a/drivers/hid/usbhid/hiddev.c
+++ b/drivers/hid/usbhid/hiddev.c
@@ -266,13 +266,15 @@ static int hiddev_open(struct inode *inode, struct file *file)
266{ 266{
267 struct hiddev_list *list; 267 struct hiddev_list *list;
268 struct usb_interface *intf; 268 struct usb_interface *intf;
269 struct hid_device *hid;
269 struct hiddev *hiddev; 270 struct hiddev *hiddev;
270 int res; 271 int res;
271 272
272 intf = usb_find_interface(&hiddev_driver, iminor(inode)); 273 intf = usb_find_interface(&hiddev_driver, iminor(inode));
273 if (!intf) 274 if (!intf)
274 return -ENODEV; 275 return -ENODEV;
275 hiddev = usb_get_intfdata(intf); 276 hid = usb_get_intfdata(intf);
277 hiddev = hid->hiddev;
276 278
277 if (!(list = kzalloc(sizeof(struct hiddev_list), GFP_KERNEL))) 279 if (!(list = kzalloc(sizeof(struct hiddev_list), GFP_KERNEL)))
278 return -ENOMEM; 280 return -ENOMEM;
@@ -587,7 +589,7 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
587 struct hiddev_list *list = file->private_data; 589 struct hiddev_list *list = file->private_data;
588 struct hiddev *hiddev = list->hiddev; 590 struct hiddev *hiddev = list->hiddev;
589 struct hid_device *hid = hiddev->hid; 591 struct hid_device *hid = hiddev->hid;
590 struct usb_device *dev = hid_to_usb_dev(hid); 592 struct usb_device *dev;
591 struct hiddev_collection_info cinfo; 593 struct hiddev_collection_info cinfo;
592 struct hiddev_report_info rinfo; 594 struct hiddev_report_info rinfo;
593 struct hiddev_field_info finfo; 595 struct hiddev_field_info finfo;
@@ -601,9 +603,11 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
601 /* Called without BKL by compat methods so no BKL taken */ 603 /* Called without BKL by compat methods so no BKL taken */
602 604
603 /* FIXME: Who or what stop this racing with a disconnect ?? */ 605 /* FIXME: Who or what stop this racing with a disconnect ?? */
604 if (!hiddev->exist) 606 if (!hiddev->exist || !hid)
605 return -EIO; 607 return -EIO;
606 608
609 dev = hid_to_usb_dev(hid);
610
607 switch (cmd) { 611 switch (cmd) {
608 612
609 case HIDIOCGVERSION: 613 case HIDIOCGVERSION:
@@ -888,7 +892,6 @@ int hiddev_connect(struct hid_device *hid, unsigned int force)
888 hid->hiddev = hiddev; 892 hid->hiddev = hiddev;
889 hiddev->hid = hid; 893 hiddev->hid = hid;
890 hiddev->exist = 1; 894 hiddev->exist = 1;
891 usb_set_intfdata(usbhid->intf, usbhid);
892 retval = usb_register_dev(usbhid->intf, &hiddev_class); 895 retval = usb_register_dev(usbhid->intf, &hiddev_class);
893 if (retval) { 896 if (retval) {
894 err_hid("Not able to get a minor for this device."); 897 err_hid("Not able to get a minor for this device.");
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 0fba82943125..4d4d09bdec0a 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -332,11 +332,11 @@ config SENSORS_F71805F
332 will be called f71805f. 332 will be called f71805f.
333 333
334config SENSORS_F71882FG 334config SENSORS_F71882FG
335 tristate "Fintek F71808E, F71858FG, F71862FG, F71882FG, F71889FG and F8000" 335 tristate "Fintek F71858FG, F71862FG, F71882FG, F71889FG and F8000"
336 depends on EXPERIMENTAL 336 depends on EXPERIMENTAL
337 help 337 help
338 If you say yes here you get support for hardware monitoring features 338 If you say yes here you get support for hardware monitoring
339 of the Fintek F71808E, F71858FG, F71862FG/71863FG, F71882FG/F71883FG, 339 features of the Fintek F71858FG, F71862FG/71863FG, F71882FG/F71883FG,
340 F71889FG and F8000 Super-I/O chips. 340 F71889FG and F8000 Super-I/O chips.
341 341
342 This driver can also be built as a module. If so, the module 342 This driver can also be built as a module. If so, the module
diff --git a/drivers/hwmon/f71882fg.c b/drivers/hwmon/f71882fg.c
index 6207120dcd4d..537841ef44b9 100644
--- a/drivers/hwmon/f71882fg.c
+++ b/drivers/hwmon/f71882fg.c
@@ -45,7 +45,6 @@
45#define SIO_REG_ADDR 0x60 /* Logical device address (2 bytes) */ 45#define SIO_REG_ADDR 0x60 /* Logical device address (2 bytes) */
46 46
47#define SIO_FINTEK_ID 0x1934 /* Manufacturers ID */ 47#define SIO_FINTEK_ID 0x1934 /* Manufacturers ID */
48#define SIO_F71808_ID 0x0901 /* Chipset ID */
49#define SIO_F71858_ID 0x0507 /* Chipset ID */ 48#define SIO_F71858_ID 0x0507 /* Chipset ID */
50#define SIO_F71862_ID 0x0601 /* Chipset ID */ 49#define SIO_F71862_ID 0x0601 /* Chipset ID */
51#define SIO_F71882_ID 0x0541 /* Chipset ID */ 50#define SIO_F71882_ID 0x0541 /* Chipset ID */
@@ -97,10 +96,9 @@ static unsigned short force_id;
97module_param(force_id, ushort, 0); 96module_param(force_id, ushort, 0);
98MODULE_PARM_DESC(force_id, "Override the detected device ID"); 97MODULE_PARM_DESC(force_id, "Override the detected device ID");
99 98
100enum chips { f71808fg, f71858fg, f71862fg, f71882fg, f71889fg, f8000 }; 99enum chips { f71858fg, f71862fg, f71882fg, f71889fg, f8000 };
101 100
102static const char *f71882fg_names[] = { 101static const char *f71882fg_names[] = {
103 "f71808fg",
104 "f71858fg", 102 "f71858fg",
105 "f71862fg", 103 "f71862fg",
106 "f71882fg", 104 "f71882fg",
@@ -308,8 +306,8 @@ static struct sensor_device_attribute_2 f71858fg_in_temp_attr[] = {
308 SENSOR_ATTR_2(temp3_fault, S_IRUGO, show_temp_fault, NULL, 0, 2), 306 SENSOR_ATTR_2(temp3_fault, S_IRUGO, show_temp_fault, NULL, 0, 2),
309}; 307};
310 308
311/* In attr common to the f71862fg, f71882fg and f71889fg */ 309/* Temp and in attr common to the f71862fg, f71882fg and f71889fg */
312static struct sensor_device_attribute_2 fxxxx_in_attr[] = { 310static struct sensor_device_attribute_2 fxxxx_in_temp_attr[] = {
313 SENSOR_ATTR_2(in0_input, S_IRUGO, show_in, NULL, 0, 0), 311 SENSOR_ATTR_2(in0_input, S_IRUGO, show_in, NULL, 0, 0),
314 SENSOR_ATTR_2(in1_input, S_IRUGO, show_in, NULL, 0, 1), 312 SENSOR_ATTR_2(in1_input, S_IRUGO, show_in, NULL, 0, 1),
315 SENSOR_ATTR_2(in2_input, S_IRUGO, show_in, NULL, 0, 2), 313 SENSOR_ATTR_2(in2_input, S_IRUGO, show_in, NULL, 0, 2),
@@ -319,22 +317,6 @@ static struct sensor_device_attribute_2 fxxxx_in_attr[] = {
319 SENSOR_ATTR_2(in6_input, S_IRUGO, show_in, NULL, 0, 6), 317 SENSOR_ATTR_2(in6_input, S_IRUGO, show_in, NULL, 0, 6),
320 SENSOR_ATTR_2(in7_input, S_IRUGO, show_in, NULL, 0, 7), 318 SENSOR_ATTR_2(in7_input, S_IRUGO, show_in, NULL, 0, 7),
321 SENSOR_ATTR_2(in8_input, S_IRUGO, show_in, NULL, 0, 8), 319 SENSOR_ATTR_2(in8_input, S_IRUGO, show_in, NULL, 0, 8),
322};
323
324/* In attr for the f71808fg */
325static struct sensor_device_attribute_2 f71808_in_attr[] = {
326 SENSOR_ATTR_2(in0_input, S_IRUGO, show_in, NULL, 0, 0),
327 SENSOR_ATTR_2(in1_input, S_IRUGO, show_in, NULL, 0, 1),
328 SENSOR_ATTR_2(in2_input, S_IRUGO, show_in, NULL, 0, 2),
329 SENSOR_ATTR_2(in3_input, S_IRUGO, show_in, NULL, 0, 3),
330 SENSOR_ATTR_2(in4_input, S_IRUGO, show_in, NULL, 0, 4),
331 SENSOR_ATTR_2(in5_input, S_IRUGO, show_in, NULL, 0, 5),
332 SENSOR_ATTR_2(in6_input, S_IRUGO, show_in, NULL, 0, 7),
333 SENSOR_ATTR_2(in7_input, S_IRUGO, show_in, NULL, 0, 8),
334};
335
336/* Temp attr common to the f71808fg, f71862fg, f71882fg and f71889fg */
337static struct sensor_device_attribute_2 fxxxx_temp_attr[] = {
338 SENSOR_ATTR_2(temp1_input, S_IRUGO, show_temp, NULL, 0, 1), 320 SENSOR_ATTR_2(temp1_input, S_IRUGO, show_temp, NULL, 0, 1),
339 SENSOR_ATTR_2(temp1_max, S_IRUGO|S_IWUSR, show_temp_max, 321 SENSOR_ATTR_2(temp1_max, S_IRUGO|S_IWUSR, show_temp_max,
340 store_temp_max, 0, 1), 322 store_temp_max, 0, 1),
@@ -373,10 +355,6 @@ static struct sensor_device_attribute_2 fxxxx_temp_attr[] = {
373 store_temp_beep, 0, 6), 355 store_temp_beep, 0, 6),
374 SENSOR_ATTR_2(temp2_type, S_IRUGO, show_temp_type, NULL, 0, 2), 356 SENSOR_ATTR_2(temp2_type, S_IRUGO, show_temp_type, NULL, 0, 2),
375 SENSOR_ATTR_2(temp2_fault, S_IRUGO, show_temp_fault, NULL, 0, 2), 357 SENSOR_ATTR_2(temp2_fault, S_IRUGO, show_temp_fault, NULL, 0, 2),
376};
377
378/* Temp and in attr common to the f71862fg, f71882fg and f71889fg */
379static struct sensor_device_attribute_2 f71862_temp_attr[] = {
380 SENSOR_ATTR_2(temp3_input, S_IRUGO, show_temp, NULL, 0, 3), 358 SENSOR_ATTR_2(temp3_input, S_IRUGO, show_temp, NULL, 0, 3),
381 SENSOR_ATTR_2(temp3_max, S_IRUGO|S_IWUSR, show_temp_max, 359 SENSOR_ATTR_2(temp3_max, S_IRUGO|S_IWUSR, show_temp_max,
382 store_temp_max, 0, 3), 360 store_temp_max, 0, 3),
@@ -1011,11 +989,6 @@ static struct f71882fg_data *f71882fg_update_device(struct device *dev)
1011 data->temp_type[1] = 6; 989 data->temp_type[1] = 6;
1012 break; 990 break;
1013 } 991 }
1014 } else if (data->type == f71808fg) {
1015 reg = f71882fg_read8(data, F71882FG_REG_TEMP_TYPE);
1016 data->temp_type[1] = (reg & 0x02) ? 2 : 4;
1017 data->temp_type[2] = (reg & 0x04) ? 2 : 4;
1018
1019 } else { 992 } else {
1020 reg2 = f71882fg_read8(data, F71882FG_REG_PECI); 993 reg2 = f71882fg_read8(data, F71882FG_REG_PECI);
1021 if ((reg2 & 0x03) == 0x01) 994 if ((reg2 & 0x03) == 0x01)
@@ -1898,8 +1871,7 @@ static ssize_t store_pwm_auto_point_temp(struct device *dev,
1898 1871
1899 val /= 1000; 1872 val /= 1000;
1900 1873
1901 if (data->type == f71889fg 1874 if (data->type == f71889fg)
1902 || data->type == f71808fg)
1903 val = SENSORS_LIMIT(val, -128, 127); 1875 val = SENSORS_LIMIT(val, -128, 127);
1904 else 1876 else
1905 val = SENSORS_LIMIT(val, 0, 127); 1877 val = SENSORS_LIMIT(val, 0, 127);
@@ -2002,28 +1974,8 @@ static int __devinit f71882fg_probe(struct platform_device *pdev)
2002 /* fall through! */ 1974 /* fall through! */
2003 case f71862fg: 1975 case f71862fg:
2004 err = f71882fg_create_sysfs_files(pdev, 1976 err = f71882fg_create_sysfs_files(pdev,
2005 f71862_temp_attr, 1977 fxxxx_in_temp_attr,
2006 ARRAY_SIZE(f71862_temp_attr)); 1978 ARRAY_SIZE(fxxxx_in_temp_attr));
2007 if (err)
2008 goto exit_unregister_sysfs;
2009 err = f71882fg_create_sysfs_files(pdev,
2010 fxxxx_in_attr,
2011 ARRAY_SIZE(fxxxx_in_attr));
2012 if (err)
2013 goto exit_unregister_sysfs;
2014 err = f71882fg_create_sysfs_files(pdev,
2015 fxxxx_temp_attr,
2016 ARRAY_SIZE(fxxxx_temp_attr));
2017 break;
2018 case f71808fg:
2019 err = f71882fg_create_sysfs_files(pdev,
2020 f71808_in_attr,
2021 ARRAY_SIZE(f71808_in_attr));
2022 if (err)
2023 goto exit_unregister_sysfs;
2024 err = f71882fg_create_sysfs_files(pdev,
2025 fxxxx_temp_attr,
2026 ARRAY_SIZE(fxxxx_temp_attr));
2027 break; 1979 break;
2028 case f8000: 1980 case f8000:
2029 err = f71882fg_create_sysfs_files(pdev, 1981 err = f71882fg_create_sysfs_files(pdev,
@@ -2050,7 +2002,6 @@ static int __devinit f71882fg_probe(struct platform_device *pdev)
2050 case f71862fg: 2002 case f71862fg:
2051 err = (data->pwm_enable & 0x15) != 0x15; 2003 err = (data->pwm_enable & 0x15) != 0x15;
2052 break; 2004 break;
2053 case f71808fg:
2054 case f71882fg: 2005 case f71882fg:
2055 case f71889fg: 2006 case f71889fg:
2056 err = 0; 2007 err = 0;
@@ -2096,7 +2047,6 @@ static int __devinit f71882fg_probe(struct platform_device *pdev)
2096 f8000_auto_pwm_attr, 2047 f8000_auto_pwm_attr,
2097 ARRAY_SIZE(f8000_auto_pwm_attr)); 2048 ARRAY_SIZE(f8000_auto_pwm_attr));
2098 break; 2049 break;
2099 case f71808fg:
2100 case f71889fg: 2050 case f71889fg:
2101 for (i = 0; i < nr_fans; i++) { 2051 for (i = 0; i < nr_fans; i++) {
2102 data->pwm_auto_point_mapping[i] = 2052 data->pwm_auto_point_mapping[i] =
@@ -2176,22 +2126,8 @@ static int f71882fg_remove(struct platform_device *pdev)
2176 /* fall through! */ 2126 /* fall through! */
2177 case f71862fg: 2127 case f71862fg:
2178 f71882fg_remove_sysfs_files(pdev, 2128 f71882fg_remove_sysfs_files(pdev,
2179 f71862_temp_attr, 2129 fxxxx_in_temp_attr,
2180 ARRAY_SIZE(f71862_temp_attr)); 2130 ARRAY_SIZE(fxxxx_in_temp_attr));
2181 f71882fg_remove_sysfs_files(pdev,
2182 fxxxx_in_attr,
2183 ARRAY_SIZE(fxxxx_in_attr));
2184 f71882fg_remove_sysfs_files(pdev,
2185 fxxxx_temp_attr,
2186 ARRAY_SIZE(fxxxx_temp_attr));
2187 break;
2188 case f71808fg:
2189 f71882fg_remove_sysfs_files(pdev,
2190 f71808_in_attr,
2191 ARRAY_SIZE(f71808_in_attr));
2192 f71882fg_remove_sysfs_files(pdev,
2193 fxxxx_temp_attr,
2194 ARRAY_SIZE(fxxxx_temp_attr));
2195 break; 2131 break;
2196 case f8000: 2132 case f8000:
2197 f71882fg_remove_sysfs_files(pdev, 2133 f71882fg_remove_sysfs_files(pdev,
@@ -2259,9 +2195,6 @@ static int __init f71882fg_find(int sioaddr, unsigned short *address,
2259 2195
2260 devid = force_id ? force_id : superio_inw(sioaddr, SIO_REG_DEVID); 2196 devid = force_id ? force_id : superio_inw(sioaddr, SIO_REG_DEVID);
2261 switch (devid) { 2197 switch (devid) {
2262 case SIO_F71808_ID:
2263 sio_data->type = f71808fg;
2264 break;
2265 case SIO_F71858_ID: 2198 case SIO_F71858_ID:
2266 sio_data->type = f71858fg; 2199 sio_data->type = f71858fg;
2267 break; 2200 break;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 11567c7999a2..c148b6302154 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -2136,16 +2136,6 @@ static void sync_sbs(mddev_t * mddev, int nospares)
2136 * with the rest of the array) 2136 * with the rest of the array)
2137 */ 2137 */
2138 mdk_rdev_t *rdev; 2138 mdk_rdev_t *rdev;
2139
2140 /* First make sure individual recovery_offsets are correct */
2141 list_for_each_entry(rdev, &mddev->disks, same_set) {
2142 if (rdev->raid_disk >= 0 &&
2143 mddev->delta_disks >= 0 &&
2144 !test_bit(In_sync, &rdev->flags) &&
2145 mddev->curr_resync_completed > rdev->recovery_offset)
2146 rdev->recovery_offset = mddev->curr_resync_completed;
2147
2148 }
2149 list_for_each_entry(rdev, &mddev->disks, same_set) { 2139 list_for_each_entry(rdev, &mddev->disks, same_set) {
2150 if (rdev->sb_events == mddev->events || 2140 if (rdev->sb_events == mddev->events ||
2151 (nospares && 2141 (nospares &&
@@ -2167,12 +2157,27 @@ static void md_update_sb(mddev_t * mddev, int force_change)
2167 int sync_req; 2157 int sync_req;
2168 int nospares = 0; 2158 int nospares = 0;
2169 2159
2170 mddev->utime = get_seconds();
2171 if (mddev->external)
2172 return;
2173repeat: 2160repeat:
2161 /* First make sure individual recovery_offsets are correct */
2162 list_for_each_entry(rdev, &mddev->disks, same_set) {
2163 if (rdev->raid_disk >= 0 &&
2164 mddev->delta_disks >= 0 &&
2165 !test_bit(In_sync, &rdev->flags) &&
2166 mddev->curr_resync_completed > rdev->recovery_offset)
2167 rdev->recovery_offset = mddev->curr_resync_completed;
2168
2169 }
2170 if (mddev->external || !mddev->persistent) {
2171 clear_bit(MD_CHANGE_DEVS, &mddev->flags);
2172 clear_bit(MD_CHANGE_CLEAN, &mddev->flags);
2173 wake_up(&mddev->sb_wait);
2174 return;
2175 }
2176
2174 spin_lock_irq(&mddev->write_lock); 2177 spin_lock_irq(&mddev->write_lock);
2175 2178
2179 mddev->utime = get_seconds();
2180
2176 set_bit(MD_CHANGE_PENDING, &mddev->flags); 2181 set_bit(MD_CHANGE_PENDING, &mddev->flags);
2177 if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags)) 2182 if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags))
2178 force_change = 1; 2183 force_change = 1;
@@ -2221,19 +2226,6 @@ repeat:
2221 MD_BUG(); 2226 MD_BUG();
2222 mddev->events --; 2227 mddev->events --;
2223 } 2228 }
2224
2225 /*
2226 * do not write anything to disk if using
2227 * nonpersistent superblocks
2228 */
2229 if (!mddev->persistent) {
2230 if (!mddev->external)
2231 clear_bit(MD_CHANGE_PENDING, &mddev->flags);
2232
2233 spin_unlock_irq(&mddev->write_lock);
2234 wake_up(&mddev->sb_wait);
2235 return;
2236 }
2237 sync_sbs(mddev, nospares); 2229 sync_sbs(mddev, nospares);
2238 spin_unlock_irq(&mddev->write_lock); 2230 spin_unlock_irq(&mddev->write_lock);
2239 2231
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 73cc74ffc26b..ad83a4dcadc3 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -787,8 +787,8 @@ static int make_request(mddev_t *mddev, struct bio * bio)
787 struct bio_list bl; 787 struct bio_list bl;
788 struct page **behind_pages = NULL; 788 struct page **behind_pages = NULL;
789 const int rw = bio_data_dir(bio); 789 const int rw = bio_data_dir(bio);
790 const bool do_sync = (bio->bi_rw & REQ_SYNC); 790 const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
791 bool do_barriers; 791 unsigned long do_barriers;
792 mdk_rdev_t *blocked_rdev; 792 mdk_rdev_t *blocked_rdev;
793 793
794 /* 794 /*
@@ -1120,6 +1120,8 @@ static int raid1_spare_active(mddev_t *mddev)
1120{ 1120{
1121 int i; 1121 int i;
1122 conf_t *conf = mddev->private; 1122 conf_t *conf = mddev->private;
1123 int count = 0;
1124 unsigned long flags;
1123 1125
1124 /* 1126 /*
1125 * Find all failed disks within the RAID1 configuration 1127 * Find all failed disks within the RAID1 configuration
@@ -1131,15 +1133,16 @@ static int raid1_spare_active(mddev_t *mddev)
1131 if (rdev 1133 if (rdev
1132 && !test_bit(Faulty, &rdev->flags) 1134 && !test_bit(Faulty, &rdev->flags)
1133 && !test_and_set_bit(In_sync, &rdev->flags)) { 1135 && !test_and_set_bit(In_sync, &rdev->flags)) {
1134 unsigned long flags; 1136 count++;
1135 spin_lock_irqsave(&conf->device_lock, flags); 1137 sysfs_notify_dirent(rdev->sysfs_state);
1136 mddev->degraded--;
1137 spin_unlock_irqrestore(&conf->device_lock, flags);
1138 } 1138 }
1139 } 1139 }
1140 spin_lock_irqsave(&conf->device_lock, flags);
1141 mddev->degraded -= count;
1142 spin_unlock_irqrestore(&conf->device_lock, flags);
1140 1143
1141 print_conf(conf); 1144 print_conf(conf);
1142 return 0; 1145 return count;
1143} 1146}
1144 1147
1145 1148
@@ -1640,7 +1643,7 @@ static void raid1d(mddev_t *mddev)
1640 * We already have a nr_pending reference on these rdevs. 1643 * We already have a nr_pending reference on these rdevs.
1641 */ 1644 */
1642 int i; 1645 int i;
1643 const bool do_sync = (r1_bio->master_bio->bi_rw & REQ_SYNC); 1646 const unsigned long do_sync = (r1_bio->master_bio->bi_rw & REQ_SYNC);
1644 clear_bit(R1BIO_BarrierRetry, &r1_bio->state); 1647 clear_bit(R1BIO_BarrierRetry, &r1_bio->state);
1645 clear_bit(R1BIO_Barrier, &r1_bio->state); 1648 clear_bit(R1BIO_Barrier, &r1_bio->state);
1646 for (i=0; i < conf->raid_disks; i++) 1649 for (i=0; i < conf->raid_disks; i++)
@@ -1696,7 +1699,7 @@ static void raid1d(mddev_t *mddev)
1696 (unsigned long long)r1_bio->sector); 1699 (unsigned long long)r1_bio->sector);
1697 raid_end_bio_io(r1_bio); 1700 raid_end_bio_io(r1_bio);
1698 } else { 1701 } else {
1699 const bool do_sync = r1_bio->master_bio->bi_rw & REQ_SYNC; 1702 const unsigned long do_sync = r1_bio->master_bio->bi_rw & REQ_SYNC;
1700 r1_bio->bios[r1_bio->read_disk] = 1703 r1_bio->bios[r1_bio->read_disk] =
1701 mddev->ro ? IO_BLOCKED : NULL; 1704 mddev->ro ? IO_BLOCKED : NULL;
1702 r1_bio->read_disk = disk; 1705 r1_bio->read_disk = disk;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index a88aeb5198c7..84718383124d 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -799,7 +799,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
799 int i; 799 int i;
800 int chunk_sects = conf->chunk_mask + 1; 800 int chunk_sects = conf->chunk_mask + 1;
801 const int rw = bio_data_dir(bio); 801 const int rw = bio_data_dir(bio);
802 const bool do_sync = (bio->bi_rw & REQ_SYNC); 802 const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
803 struct bio_list bl; 803 struct bio_list bl;
804 unsigned long flags; 804 unsigned long flags;
805 mdk_rdev_t *blocked_rdev; 805 mdk_rdev_t *blocked_rdev;
@@ -1116,6 +1116,8 @@ static int raid10_spare_active(mddev_t *mddev)
1116 int i; 1116 int i;
1117 conf_t *conf = mddev->private; 1117 conf_t *conf = mddev->private;
1118 mirror_info_t *tmp; 1118 mirror_info_t *tmp;
1119 int count = 0;
1120 unsigned long flags;
1119 1121
1120 /* 1122 /*
1121 * Find all non-in_sync disks within the RAID10 configuration 1123 * Find all non-in_sync disks within the RAID10 configuration
@@ -1126,15 +1128,16 @@ static int raid10_spare_active(mddev_t *mddev)
1126 if (tmp->rdev 1128 if (tmp->rdev
1127 && !test_bit(Faulty, &tmp->rdev->flags) 1129 && !test_bit(Faulty, &tmp->rdev->flags)
1128 && !test_and_set_bit(In_sync, &tmp->rdev->flags)) { 1130 && !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
1129 unsigned long flags; 1131 count++;
1130 spin_lock_irqsave(&conf->device_lock, flags); 1132 sysfs_notify_dirent(tmp->rdev->sysfs_state);
1131 mddev->degraded--;
1132 spin_unlock_irqrestore(&conf->device_lock, flags);
1133 } 1133 }
1134 } 1134 }
1135 spin_lock_irqsave(&conf->device_lock, flags);
1136 mddev->degraded -= count;
1137 spin_unlock_irqrestore(&conf->device_lock, flags);
1135 1138
1136 print_conf(conf); 1139 print_conf(conf);
1137 return 0; 1140 return count;
1138} 1141}
1139 1142
1140 1143
@@ -1734,7 +1737,7 @@ static void raid10d(mddev_t *mddev)
1734 raid_end_bio_io(r10_bio); 1737 raid_end_bio_io(r10_bio);
1735 bio_put(bio); 1738 bio_put(bio);
1736 } else { 1739 } else {
1737 const bool do_sync = (r10_bio->master_bio->bi_rw & REQ_SYNC); 1740 const unsigned long do_sync = (r10_bio->master_bio->bi_rw & REQ_SYNC);
1738 bio_put(bio); 1741 bio_put(bio);
1739 rdev = conf->mirrors[mirror].rdev; 1742 rdev = conf->mirrors[mirror].rdev;
1740 if (printk_ratelimit()) 1743 if (printk_ratelimit())
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 866d4b5a144c..69b0a169e43d 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -5330,6 +5330,8 @@ static int raid5_spare_active(mddev_t *mddev)
5330 int i; 5330 int i;
5331 raid5_conf_t *conf = mddev->private; 5331 raid5_conf_t *conf = mddev->private;
5332 struct disk_info *tmp; 5332 struct disk_info *tmp;
5333 int count = 0;
5334 unsigned long flags;
5333 5335
5334 for (i = 0; i < conf->raid_disks; i++) { 5336 for (i = 0; i < conf->raid_disks; i++) {
5335 tmp = conf->disks + i; 5337 tmp = conf->disks + i;
@@ -5337,14 +5339,15 @@ static int raid5_spare_active(mddev_t *mddev)
5337 && tmp->rdev->recovery_offset == MaxSector 5339 && tmp->rdev->recovery_offset == MaxSector
5338 && !test_bit(Faulty, &tmp->rdev->flags) 5340 && !test_bit(Faulty, &tmp->rdev->flags)
5339 && !test_and_set_bit(In_sync, &tmp->rdev->flags)) { 5341 && !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
5340 unsigned long flags; 5342 count++;
5341 spin_lock_irqsave(&conf->device_lock, flags); 5343 sysfs_notify_dirent(tmp->rdev->sysfs_state);
5342 mddev->degraded--;
5343 spin_unlock_irqrestore(&conf->device_lock, flags);
5344 } 5344 }
5345 } 5345 }
5346 spin_lock_irqsave(&conf->device_lock, flags);
5347 mddev->degraded -= count;
5348 spin_unlock_irqrestore(&conf->device_lock, flags);
5346 print_raid5_conf(conf); 5349 print_raid5_conf(conf);
5347 return 0; 5350 return count;
5348} 5351}
5349 5352
5350static int raid5_remove_disk(mddev_t *mddev, int number) 5353static int raid5_remove_disk(mddev_t *mddev, int number)
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index 0efe631e50ca..d80cfdc8edd2 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -86,7 +86,9 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
86 init_waitqueue_head(&host->wq); 86 init_waitqueue_head(&host->wq);
87 INIT_DELAYED_WORK(&host->detect, mmc_rescan); 87 INIT_DELAYED_WORK(&host->detect, mmc_rescan);
88 INIT_DELAYED_WORK_DEFERRABLE(&host->disable, mmc_host_deeper_disable); 88 INIT_DELAYED_WORK_DEFERRABLE(&host->disable, mmc_host_deeper_disable);
89#ifdef CONFIG_PM
89 host->pm_notify.notifier_call = mmc_pm_notify; 90 host->pm_notify.notifier_call = mmc_pm_notify;
91#endif
90 92
91 /* 93 /*
92 * By default, hosts do not support SGIO or large requests. 94 * By default, hosts do not support SGIO or large requests.
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 283190bc2a40..68d12794cfd9 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -132,7 +132,7 @@ config MMC_SDHCI_CNS3XXX
132 132
133config MMC_SDHCI_S3C 133config MMC_SDHCI_S3C
134 tristate "SDHCI support on Samsung S3C SoC" 134 tristate "SDHCI support on Samsung S3C SoC"
135 depends on MMC_SDHCI && (PLAT_S3C24XX || PLAT_S3C64XX) 135 depends on MMC_SDHCI && PLAT_SAMSUNG
136 help 136 help
137 This selects the Secure Digital Host Controller Interface (SDHCI) 137 This selects the Secure Digital Host Controller Interface (SDHCI)
138 often referrered to as the HSMMC block in some of the Samsung S3C 138 often referrered to as the HSMMC block in some of the Samsung S3C
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
index 0a7f2614c6f0..71ad4163b95e 100644
--- a/drivers/mmc/host/sdhci-s3c.c
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -242,7 +242,7 @@ static void sdhci_s3c_notify_change(struct platform_device *dev, int state)
242{ 242{
243 struct sdhci_host *host = platform_get_drvdata(dev); 243 struct sdhci_host *host = platform_get_drvdata(dev);
244 if (host) { 244 if (host) {
245 mutex_lock(&host->lock); 245 spin_lock(&host->lock);
246 if (state) { 246 if (state) {
247 dev_dbg(&dev->dev, "card inserted.\n"); 247 dev_dbg(&dev->dev, "card inserted.\n");
248 host->flags &= ~SDHCI_DEVICE_DEAD; 248 host->flags &= ~SDHCI_DEVICE_DEAD;
@@ -252,8 +252,8 @@ static void sdhci_s3c_notify_change(struct platform_device *dev, int state)
252 host->flags |= SDHCI_DEVICE_DEAD; 252 host->flags |= SDHCI_DEVICE_DEAD;
253 host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION; 253 host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
254 } 254 }
255 sdhci_card_detect(host); 255 tasklet_schedule(&host->card_tasklet);
256 mutex_unlock(&host->lock); 256 spin_unlock(&host->lock);
257 } 257 }
258} 258}
259 259
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 785512133b50..401527d273b5 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -1180,7 +1180,8 @@ static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1180 else 1180 else
1181 ctrl &= ~SDHCI_CTRL_4BITBUS; 1181 ctrl &= ~SDHCI_CTRL_4BITBUS;
1182 1182
1183 if (ios->timing == MMC_TIMING_SD_HS) 1183 if (ios->timing == MMC_TIMING_SD_HS &&
1184 !(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT))
1184 ctrl |= SDHCI_CTRL_HISPD; 1185 ctrl |= SDHCI_CTRL_HISPD;
1185 else 1186 else
1186 ctrl &= ~SDHCI_CTRL_HISPD; 1187 ctrl &= ~SDHCI_CTRL_HISPD;
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index 036cfae76368..d316bc79b636 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -245,6 +245,8 @@ struct sdhci_host {
245#define SDHCI_QUIRK_MISSING_CAPS (1<<27) 245#define SDHCI_QUIRK_MISSING_CAPS (1<<27)
246/* Controller uses Auto CMD12 command to stop the transfer */ 246/* Controller uses Auto CMD12 command to stop the transfer */
247#define SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12 (1<<28) 247#define SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12 (1<<28)
248/* Controller doesn't have HISPD bit field in HI-SPEED SD card */
249#define SDHCI_QUIRK_NO_HISPD_BIT (1<<29)
248 250
249 int irq; /* Device IRQ */ 251 int irq; /* Device IRQ */
250 void __iomem * ioaddr; /* Mapped address */ 252 void __iomem * ioaddr; /* Mapped address */
diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c
index 00af55d7afba..fe63f6bd663c 100644
--- a/drivers/mtd/maps/physmap_of.c
+++ b/drivers/mtd/maps/physmap_of.c
@@ -22,6 +22,7 @@
22#include <linux/mtd/partitions.h> 22#include <linux/mtd/partitions.h>
23#include <linux/mtd/concat.h> 23#include <linux/mtd/concat.h>
24#include <linux/of.h> 24#include <linux/of.h>
25#include <linux/of_address.h>
25#include <linux/of_platform.h> 26#include <linux/of_platform.h>
26#include <linux/slab.h> 27#include <linux/slab.h>
27 28
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index a3c7473dd409..d551ddd9537a 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -2866,6 +2866,7 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
2866 */ 2866 */
2867 if (id_data[0] == id_data[6] && id_data[1] == id_data[7] && 2867 if (id_data[0] == id_data[6] && id_data[1] == id_data[7] &&
2868 id_data[0] == NAND_MFR_SAMSUNG && 2868 id_data[0] == NAND_MFR_SAMSUNG &&
2869 (chip->cellinfo & NAND_CI_CELLTYPE_MSK) &&
2869 id_data[5] != 0x00) { 2870 id_data[5] != 0x00) {
2870 /* Calc pagesize */ 2871 /* Calc pagesize */
2871 mtd->writesize = 2048 << (extid & 0x03); 2872 mtd->writesize = 2048 << (extid & 0x03);
@@ -2934,14 +2935,10 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
2934 chip->chip_shift = ffs((unsigned)(chip->chipsize >> 32)) + 32 - 1; 2935 chip->chip_shift = ffs((unsigned)(chip->chipsize >> 32)) + 32 - 1;
2935 2936
2936 /* Set the bad block position */ 2937 /* Set the bad block position */
2937 if (!(busw & NAND_BUSWIDTH_16) && (*maf_id == NAND_MFR_STMICRO || 2938 if (mtd->writesize > 512 || (busw & NAND_BUSWIDTH_16))
2938 (*maf_id == NAND_MFR_SAMSUNG &&
2939 mtd->writesize == 512) ||
2940 *maf_id == NAND_MFR_AMD))
2941 chip->badblockpos = NAND_SMALL_BADBLOCK_POS;
2942 else
2943 chip->badblockpos = NAND_LARGE_BADBLOCK_POS; 2939 chip->badblockpos = NAND_LARGE_BADBLOCK_POS;
2944 2940 else
2941 chip->badblockpos = NAND_SMALL_BADBLOCK_POS;
2945 2942
2946 /* Get chip options, preserve non chip based options */ 2943 /* Get chip options, preserve non chip based options */
2947 chip->options &= ~NAND_CHIPOPTIONS_MSK; 2944 chip->options &= ~NAND_CHIPOPTIONS_MSK;
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index e02fa4f0e3c9..4d89f3780207 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -363,7 +363,7 @@ static struct pxa3xx_nand_flash *builtin_flash_types[] = {
363#define tAR_NDTR1(r) (((r) >> 0) & 0xf) 363#define tAR_NDTR1(r) (((r) >> 0) & 0xf)
364 364
365/* convert nano-seconds to nand flash controller clock cycles */ 365/* convert nano-seconds to nand flash controller clock cycles */
366#define ns2cycle(ns, clk) (int)(((ns) * (clk / 1000000) / 1000) - 1) 366#define ns2cycle(ns, clk) (int)((ns) * (clk / 1000000) / 1000)
367 367
368/* convert nand flash controller clock cycles to nano-seconds */ 368/* convert nand flash controller clock cycles to nano-seconds */
369#define cycle2ns(c, clk) ((((c) + 1) * 1000000 + clk / 500) / (clk / 1000)) 369#define cycle2ns(c, clk) ((((c) + 1) * 1000000 + clk / 500) / (clk / 1000))
diff --git a/drivers/scsi/qla4xxx/ql4_glbl.h b/drivers/scsi/qla4xxx/ql4_glbl.h
index f065204e401b..95a26fb1626c 100644
--- a/drivers/scsi/qla4xxx/ql4_glbl.h
+++ b/drivers/scsi/qla4xxx/ql4_glbl.h
@@ -132,7 +132,7 @@ void qla4_8xxx_idc_unlock(struct scsi_qla_host *ha);
132int qla4_8xxx_device_state_handler(struct scsi_qla_host *ha); 132int qla4_8xxx_device_state_handler(struct scsi_qla_host *ha);
133void qla4_8xxx_need_qsnt_handler(struct scsi_qla_host *ha); 133void qla4_8xxx_need_qsnt_handler(struct scsi_qla_host *ha);
134void qla4_8xxx_clear_drv_active(struct scsi_qla_host *ha); 134void qla4_8xxx_clear_drv_active(struct scsi_qla_host *ha);
135inline void qla4_8xxx_set_drv_active(struct scsi_qla_host *ha); 135void qla4_8xxx_set_drv_active(struct scsi_qla_host *ha);
136 136
137extern int ql4xextended_error_logging; 137extern int ql4xextended_error_logging;
138extern int ql4xdiscoverywait; 138extern int ql4xdiscoverywait;
diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c
index e031a734836e..5d4a3822382d 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.c
+++ b/drivers/scsi/qla4xxx/ql4_nx.c
@@ -1418,7 +1418,7 @@ static int qla4_8xxx_rcvpeg_ready(struct scsi_qla_host *ha)
1418 return QLA_SUCCESS; 1418 return QLA_SUCCESS;
1419} 1419}
1420 1420
1421inline void 1421void
1422qla4_8xxx_set_drv_active(struct scsi_qla_host *ha) 1422qla4_8xxx_set_drv_active(struct scsi_qla_host *ha)
1423{ 1423{
1424 uint32_t drv_active; 1424 uint32_t drv_active;
diff --git a/drivers/serial/of_serial.c b/drivers/serial/of_serial.c
index 659a695bdad6..2af8fd113123 100644
--- a/drivers/serial/of_serial.c
+++ b/drivers/serial/of_serial.c
@@ -14,11 +14,10 @@
14#include <linux/slab.h> 14#include <linux/slab.h>
15#include <linux/serial_core.h> 15#include <linux/serial_core.h>
16#include <linux/serial_8250.h> 16#include <linux/serial_8250.h>
17#include <linux/of_address.h>
17#include <linux/of_platform.h> 18#include <linux/of_platform.h>
18#include <linux/nwpserial.h> 19#include <linux/nwpserial.h>
19 20
20#include <asm/prom.h>
21
22struct of_serial_info { 21struct of_serial_info {
23 int type; 22 int type;
24 int line; 23 int line;
diff --git a/drivers/spi/coldfire_qspi.c b/drivers/spi/coldfire_qspi.c
index 59be3efe0636..052b3c7fa6a0 100644
--- a/drivers/spi/coldfire_qspi.c
+++ b/drivers/spi/coldfire_qspi.c
@@ -24,6 +24,7 @@
24#include <linux/interrupt.h> 24#include <linux/interrupt.h>
25#include <linux/errno.h> 25#include <linux/errno.h>
26#include <linux/platform_device.h> 26#include <linux/platform_device.h>
27#include <linux/sched.h>
27#include <linux/workqueue.h> 28#include <linux/workqueue.h>
28#include <linux/delay.h> 29#include <linux/delay.h>
29#include <linux/io.h> 30#include <linux/io.h>
diff --git a/drivers/staging/pohmelfs/path_entry.c b/drivers/staging/pohmelfs/path_entry.c
index cdc4dd50d638..8ec83d2dffb7 100644
--- a/drivers/staging/pohmelfs/path_entry.c
+++ b/drivers/staging/pohmelfs/path_entry.c
@@ -44,9 +44,9 @@ int pohmelfs_construct_path_string(struct pohmelfs_inode *pi, void *data, int le
44 return -ENOENT; 44 return -ENOENT;
45 } 45 }
46 46
47 read_lock(&current->fs->lock); 47 spin_lock(&current->fs->lock);
48 path.mnt = mntget(current->fs->root.mnt); 48 path.mnt = mntget(current->fs->root.mnt);
49 read_unlock(&current->fs->lock); 49 spin_unlock(&current->fs->lock);
50 50
51 path.dentry = d; 51 path.dentry = d;
52 52
@@ -91,9 +91,9 @@ int pohmelfs_path_length(struct pohmelfs_inode *pi)
91 return -ENOENT; 91 return -ENOENT;
92 } 92 }
93 93
94 read_lock(&current->fs->lock); 94 spin_lock(&current->fs->lock);
95 root = dget(current->fs->root.dentry); 95 root = dget(current->fs->root.dentry);
96 read_unlock(&current->fs->lock); 96 spin_unlock(&current->fs->lock);
97 97
98 spin_lock(&dcache_lock); 98 spin_lock(&dcache_lock);
99 99
diff --git a/drivers/video/matrox/matroxfb_base.h b/drivers/video/matrox/matroxfb_base.h
index f3a4e15672d9..f96a471cb1a8 100644
--- a/drivers/video/matrox/matroxfb_base.h
+++ b/drivers/video/matrox/matroxfb_base.h
@@ -151,13 +151,13 @@ static inline void mga_writel(vaddr_t va, unsigned int offs, u_int32_t value) {
151static inline void mga_memcpy_toio(vaddr_t va, const void* src, int len) { 151static inline void mga_memcpy_toio(vaddr_t va, const void* src, int len) {
152#if defined(__alpha__) || defined(__i386__) || defined(__x86_64__) 152#if defined(__alpha__) || defined(__i386__) || defined(__x86_64__)
153 /* 153 /*
154 * memcpy_toio works for us if: 154 * iowrite32_rep works for us if:
155 * (1) Copies data as 32bit quantities, not byte after byte, 155 * (1) Copies data as 32bit quantities, not byte after byte,
156 * (2) Performs LE ordered stores, and 156 * (2) Performs LE ordered stores, and
157 * (3) It copes with unaligned source (destination is guaranteed to be page 157 * (3) It copes with unaligned source (destination is guaranteed to be page
158 * aligned and length is guaranteed to be multiple of 4). 158 * aligned and length is guaranteed to be multiple of 4).
159 */ 159 */
160 memcpy_toio(va.vaddr, src, len); 160 iowrite32_rep(va.vaddr, src, len >> 2);
161#else 161#else
162 u_int32_t __iomem* addr = va.vaddr; 162 u_int32_t __iomem* addr = va.vaddr;
163 163
diff --git a/fs/buffer.c b/fs/buffer.c
index 50efa339e051..3e7dca279d1c 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -770,11 +770,12 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
770 spin_unlock(lock); 770 spin_unlock(lock);
771 /* 771 /*
772 * Ensure any pending I/O completes so that 772 * Ensure any pending I/O completes so that
773 * ll_rw_block() actually writes the current 773 * write_dirty_buffer() actually writes the
774 * contents - it is a noop if I/O is still in 774 * current contents - it is a noop if I/O is
775 * flight on potentially older contents. 775 * still in flight on potentially older
776 * contents.
776 */ 777 */
777 ll_rw_block(SWRITE_SYNC_PLUG, 1, &bh); 778 write_dirty_buffer(bh, WRITE_SYNC_PLUG);
778 779
779 /* 780 /*
780 * Kick off IO for the previous mapping. Note 781 * Kick off IO for the previous mapping. Note
@@ -2912,13 +2913,6 @@ int submit_bh(int rw, struct buffer_head * bh)
2912 BUG_ON(buffer_unwritten(bh)); 2913 BUG_ON(buffer_unwritten(bh));
2913 2914
2914 /* 2915 /*
2915 * Mask in barrier bit for a write (could be either a WRITE or a
2916 * WRITE_SYNC
2917 */
2918 if (buffer_ordered(bh) && (rw & WRITE))
2919 rw |= WRITE_BARRIER;
2920
2921 /*
2922 * Only clear out a write error when rewriting 2916 * Only clear out a write error when rewriting
2923 */ 2917 */
2924 if (test_set_buffer_req(bh) && (rw & WRITE)) 2918 if (test_set_buffer_req(bh) && (rw & WRITE))
@@ -2956,22 +2950,21 @@ EXPORT_SYMBOL(submit_bh);
2956 2950
2957/** 2951/**
2958 * ll_rw_block: low-level access to block devices (DEPRECATED) 2952 * ll_rw_block: low-level access to block devices (DEPRECATED)
2959 * @rw: whether to %READ or %WRITE or %SWRITE or maybe %READA (readahead) 2953 * @rw: whether to %READ or %WRITE or maybe %READA (readahead)
2960 * @nr: number of &struct buffer_heads in the array 2954 * @nr: number of &struct buffer_heads in the array
2961 * @bhs: array of pointers to &struct buffer_head 2955 * @bhs: array of pointers to &struct buffer_head
2962 * 2956 *
2963 * ll_rw_block() takes an array of pointers to &struct buffer_heads, and 2957 * ll_rw_block() takes an array of pointers to &struct buffer_heads, and
2964 * requests an I/O operation on them, either a %READ or a %WRITE. The third 2958 * requests an I/O operation on them, either a %READ or a %WRITE. The third
2965 * %SWRITE is like %WRITE only we make sure that the *current* data in buffers 2959 * %READA option is described in the documentation for generic_make_request()
2966 * are sent to disk. The fourth %READA option is described in the documentation 2960 * which ll_rw_block() calls.
2967 * for generic_make_request() which ll_rw_block() calls.
2968 * 2961 *
2969 * This function drops any buffer that it cannot get a lock on (with the 2962 * This function drops any buffer that it cannot get a lock on (with the
2970 * BH_Lock state bit) unless SWRITE is required, any buffer that appears to be 2963 * BH_Lock state bit), any buffer that appears to be clean when doing a write
2971 * clean when doing a write request, and any buffer that appears to be 2964 * request, and any buffer that appears to be up-to-date when doing read
2972 * up-to-date when doing read request. Further it marks as clean buffers that 2965 * request. Further it marks as clean buffers that are processed for
2973 * are processed for writing (the buffer cache won't assume that they are 2966 * writing (the buffer cache won't assume that they are actually clean
2974 * actually clean until the buffer gets unlocked). 2967 * until the buffer gets unlocked).
2975 * 2968 *
2976 * ll_rw_block sets b_end_io to simple completion handler that marks 2969 * ll_rw_block sets b_end_io to simple completion handler that marks
2977 * the buffer up-to-date (if approriate), unlocks the buffer and wakes 2970 * the buffer up-to-date (if approriate), unlocks the buffer and wakes
@@ -2987,20 +2980,13 @@ void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
2987 for (i = 0; i < nr; i++) { 2980 for (i = 0; i < nr; i++) {
2988 struct buffer_head *bh = bhs[i]; 2981 struct buffer_head *bh = bhs[i];
2989 2982
2990 if (rw == SWRITE || rw == SWRITE_SYNC || rw == SWRITE_SYNC_PLUG) 2983 if (!trylock_buffer(bh))
2991 lock_buffer(bh);
2992 else if (!trylock_buffer(bh))
2993 continue; 2984 continue;
2994 2985 if (rw == WRITE) {
2995 if (rw == WRITE || rw == SWRITE || rw == SWRITE_SYNC ||
2996 rw == SWRITE_SYNC_PLUG) {
2997 if (test_clear_buffer_dirty(bh)) { 2986 if (test_clear_buffer_dirty(bh)) {
2998 bh->b_end_io = end_buffer_write_sync; 2987 bh->b_end_io = end_buffer_write_sync;
2999 get_bh(bh); 2988 get_bh(bh);
3000 if (rw == SWRITE_SYNC) 2989 submit_bh(WRITE, bh);
3001 submit_bh(WRITE_SYNC, bh);
3002 else
3003 submit_bh(WRITE, bh);
3004 continue; 2990 continue;
3005 } 2991 }
3006 } else { 2992 } else {
@@ -3016,12 +3002,25 @@ void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
3016} 3002}
3017EXPORT_SYMBOL(ll_rw_block); 3003EXPORT_SYMBOL(ll_rw_block);
3018 3004
3005void write_dirty_buffer(struct buffer_head *bh, int rw)
3006{
3007 lock_buffer(bh);
3008 if (!test_clear_buffer_dirty(bh)) {
3009 unlock_buffer(bh);
3010 return;
3011 }
3012 bh->b_end_io = end_buffer_write_sync;
3013 get_bh(bh);
3014 submit_bh(rw, bh);
3015}
3016EXPORT_SYMBOL(write_dirty_buffer);
3017
3019/* 3018/*
3020 * For a data-integrity writeout, we need to wait upon any in-progress I/O 3019 * For a data-integrity writeout, we need to wait upon any in-progress I/O
3021 * and then start new I/O and then wait upon it. The caller must have a ref on 3020 * and then start new I/O and then wait upon it. The caller must have a ref on
3022 * the buffer_head. 3021 * the buffer_head.
3023 */ 3022 */
3024int sync_dirty_buffer(struct buffer_head *bh) 3023int __sync_dirty_buffer(struct buffer_head *bh, int rw)
3025{ 3024{
3026 int ret = 0; 3025 int ret = 0;
3027 3026
@@ -3030,7 +3029,7 @@ int sync_dirty_buffer(struct buffer_head *bh)
3030 if (test_clear_buffer_dirty(bh)) { 3029 if (test_clear_buffer_dirty(bh)) {
3031 get_bh(bh); 3030 get_bh(bh);
3032 bh->b_end_io = end_buffer_write_sync; 3031 bh->b_end_io = end_buffer_write_sync;
3033 ret = submit_bh(WRITE_SYNC, bh); 3032 ret = submit_bh(rw, bh);
3034 wait_on_buffer(bh); 3033 wait_on_buffer(bh);
3035 if (buffer_eopnotsupp(bh)) { 3034 if (buffer_eopnotsupp(bh)) {
3036 clear_buffer_eopnotsupp(bh); 3035 clear_buffer_eopnotsupp(bh);
@@ -3043,6 +3042,12 @@ int sync_dirty_buffer(struct buffer_head *bh)
3043 } 3042 }
3044 return ret; 3043 return ret;
3045} 3044}
3045EXPORT_SYMBOL(__sync_dirty_buffer);
3046
3047int sync_dirty_buffer(struct buffer_head *bh)
3048{
3049 return __sync_dirty_buffer(bh, WRITE_SYNC);
3050}
3046EXPORT_SYMBOL(sync_dirty_buffer); 3051EXPORT_SYMBOL(sync_dirty_buffer);
3047 3052
3048/* 3053/*
diff --git a/fs/cramfs/inode.c b/fs/cramfs/inode.c
index a53b130b366c..1e7a33028d33 100644
--- a/fs/cramfs/inode.c
+++ b/fs/cramfs/inode.c
@@ -80,7 +80,7 @@ static struct inode *get_cramfs_inode(struct super_block *sb,
80 } 80 }
81 } else { 81 } else {
82 inode = iget_locked(sb, CRAMINO(cramfs_inode)); 82 inode = iget_locked(sb, CRAMINO(cramfs_inode));
83 if (inode) { 83 if (inode && (inode->i_state & I_NEW)) {
84 setup_inode(inode, cramfs_inode); 84 setup_inode(inode, cramfs_inode);
85 unlock_new_inode(inode); 85 unlock_new_inode(inode);
86 } 86 }
diff --git a/fs/dcache.c b/fs/dcache.c
index 4d13bf50b7b1..83293be48149 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -1332,31 +1332,13 @@ EXPORT_SYMBOL(d_add_ci);
1332 * d_lookup - search for a dentry 1332 * d_lookup - search for a dentry
1333 * @parent: parent dentry 1333 * @parent: parent dentry
1334 * @name: qstr of name we wish to find 1334 * @name: qstr of name we wish to find
1335 * Returns: dentry, or NULL
1335 * 1336 *
1336 * Searches the children of the parent dentry for the name in question. If 1337 * d_lookup searches the children of the parent dentry for the name in
1337 * the dentry is found its reference count is incremented and the dentry 1338 * question. If the dentry is found its reference count is incremented and the
1338 * is returned. The caller must use dput to free the entry when it has 1339 * dentry is returned. The caller must use dput to free the entry when it has
1339 * finished using it. %NULL is returned on failure. 1340 * finished using it. %NULL is returned if the dentry does not exist.
1340 *
1341 * __d_lookup is dcache_lock free. The hash list is protected using RCU.
1342 * Memory barriers are used while updating and doing lockless traversal.
1343 * To avoid races with d_move while rename is happening, d_lock is used.
1344 *
1345 * Overflows in memcmp(), while d_move, are avoided by keeping the length
1346 * and name pointer in one structure pointed by d_qstr.
1347 *
1348 * rcu_read_lock() and rcu_read_unlock() are used to disable preemption while
1349 * lookup is going on.
1350 *
1351 * The dentry unused LRU is not updated even if lookup finds the required dentry
1352 * in there. It is updated in places such as prune_dcache, shrink_dcache_sb,
1353 * select_parent and __dget_locked. This laziness saves lookup from dcache_lock
1354 * acquisition.
1355 *
1356 * d_lookup() is protected against the concurrent renames in some unrelated
1357 * directory using the seqlockt_t rename_lock.
1358 */ 1341 */
1359
1360struct dentry * d_lookup(struct dentry * parent, struct qstr * name) 1342struct dentry * d_lookup(struct dentry * parent, struct qstr * name)
1361{ 1343{
1362 struct dentry * dentry = NULL; 1344 struct dentry * dentry = NULL;
@@ -1372,6 +1354,21 @@ struct dentry * d_lookup(struct dentry * parent, struct qstr * name)
1372} 1354}
1373EXPORT_SYMBOL(d_lookup); 1355EXPORT_SYMBOL(d_lookup);
1374 1356
1357/*
1358 * __d_lookup - search for a dentry (racy)
1359 * @parent: parent dentry
1360 * @name: qstr of name we wish to find
1361 * Returns: dentry, or NULL
1362 *
1363 * __d_lookup is like d_lookup, however it may (rarely) return a
1364 * false-negative result due to unrelated rename activity.
1365 *
1366 * __d_lookup is slightly faster by avoiding rename_lock read seqlock,
1367 * however it must be used carefully, eg. with a following d_lookup in
1368 * the case of failure.
1369 *
1370 * __d_lookup callers must be commented.
1371 */
1375struct dentry * __d_lookup(struct dentry * parent, struct qstr * name) 1372struct dentry * __d_lookup(struct dentry * parent, struct qstr * name)
1376{ 1373{
1377 unsigned int len = name->len; 1374 unsigned int len = name->len;
@@ -1382,6 +1379,19 @@ struct dentry * __d_lookup(struct dentry * parent, struct qstr * name)
1382 struct hlist_node *node; 1379 struct hlist_node *node;
1383 struct dentry *dentry; 1380 struct dentry *dentry;
1384 1381
1382 /*
1383 * The hash list is protected using RCU.
1384 *
1385 * Take d_lock when comparing a candidate dentry, to avoid races
1386 * with d_move().
1387 *
1388 * It is possible that concurrent renames can mess up our list
1389 * walk here and result in missing our dentry, resulting in the
1390 * false-negative result. d_lookup() protects against concurrent
1391 * renames using rename_lock seqlock.
1392 *
1393 * See Documentation/vfs/dcache-locking.txt for more details.
1394 */
1385 rcu_read_lock(); 1395 rcu_read_lock();
1386 1396
1387 hlist_for_each_entry_rcu(dentry, node, head, d_hash) { 1397 hlist_for_each_entry_rcu(dentry, node, head, d_hash) {
@@ -1396,8 +1406,8 @@ struct dentry * __d_lookup(struct dentry * parent, struct qstr * name)
1396 1406
1397 /* 1407 /*
1398 * Recheck the dentry after taking the lock - d_move may have 1408 * Recheck the dentry after taking the lock - d_move may have
1399 * changed things. Don't bother checking the hash because we're 1409 * changed things. Don't bother checking the hash because
1400 * about to compare the whole name anyway. 1410 * we're about to compare the whole name anyway.
1401 */ 1411 */
1402 if (dentry->d_parent != parent) 1412 if (dentry->d_parent != parent)
1403 goto next; 1413 goto next;
@@ -1925,7 +1935,7 @@ static int prepend_path(const struct path *path, struct path *root,
1925 bool slash = false; 1935 bool slash = false;
1926 int error = 0; 1936 int error = 0;
1927 1937
1928 spin_lock(&vfsmount_lock); 1938 br_read_lock(vfsmount_lock);
1929 while (dentry != root->dentry || vfsmnt != root->mnt) { 1939 while (dentry != root->dentry || vfsmnt != root->mnt) {
1930 struct dentry * parent; 1940 struct dentry * parent;
1931 1941
@@ -1954,7 +1964,7 @@ out:
1954 if (!error && !slash) 1964 if (!error && !slash)
1955 error = prepend(buffer, buflen, "/", 1); 1965 error = prepend(buffer, buflen, "/", 1);
1956 1966
1957 spin_unlock(&vfsmount_lock); 1967 br_read_unlock(vfsmount_lock);
1958 return error; 1968 return error;
1959 1969
1960global_root: 1970global_root:
@@ -2292,11 +2302,12 @@ int path_is_under(struct path *path1, struct path *path2)
2292 struct vfsmount *mnt = path1->mnt; 2302 struct vfsmount *mnt = path1->mnt;
2293 struct dentry *dentry = path1->dentry; 2303 struct dentry *dentry = path1->dentry;
2294 int res; 2304 int res;
2295 spin_lock(&vfsmount_lock); 2305
2306 br_read_lock(vfsmount_lock);
2296 if (mnt != path2->mnt) { 2307 if (mnt != path2->mnt) {
2297 for (;;) { 2308 for (;;) {
2298 if (mnt->mnt_parent == mnt) { 2309 if (mnt->mnt_parent == mnt) {
2299 spin_unlock(&vfsmount_lock); 2310 br_read_unlock(vfsmount_lock);
2300 return 0; 2311 return 0;
2301 } 2312 }
2302 if (mnt->mnt_parent == path2->mnt) 2313 if (mnt->mnt_parent == path2->mnt)
@@ -2306,7 +2317,7 @@ int path_is_under(struct path *path1, struct path *path2)
2306 dentry = mnt->mnt_mountpoint; 2317 dentry = mnt->mnt_mountpoint;
2307 } 2318 }
2308 res = is_subdir(dentry, path2->dentry); 2319 res = is_subdir(dentry, path2->dentry);
2309 spin_unlock(&vfsmount_lock); 2320 br_read_unlock(vfsmount_lock);
2310 return res; 2321 return res;
2311} 2322}
2312EXPORT_SYMBOL(path_is_under); 2323EXPORT_SYMBOL(path_is_under);
diff --git a/fs/exec.c b/fs/exec.c
index 05c7d6b84df7..2d9455282744 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1118,7 +1118,7 @@ int check_unsafe_exec(struct linux_binprm *bprm)
1118 bprm->unsafe = tracehook_unsafe_exec(p); 1118 bprm->unsafe = tracehook_unsafe_exec(p);
1119 1119
1120 n_fs = 1; 1120 n_fs = 1;
1121 write_lock(&p->fs->lock); 1121 spin_lock(&p->fs->lock);
1122 rcu_read_lock(); 1122 rcu_read_lock();
1123 for (t = next_thread(p); t != p; t = next_thread(t)) { 1123 for (t = next_thread(p); t != p; t = next_thread(t)) {
1124 if (t->fs == p->fs) 1124 if (t->fs == p->fs)
@@ -1135,7 +1135,7 @@ int check_unsafe_exec(struct linux_binprm *bprm)
1135 res = 1; 1135 res = 1;
1136 } 1136 }
1137 } 1137 }
1138 write_unlock(&p->fs->lock); 1138 spin_unlock(&p->fs->lock);
1139 1139
1140 return res; 1140 return res;
1141} 1141}
diff --git a/fs/fat/misc.c b/fs/fat/misc.c
index 1fa23f6ffba5..1736f2356388 100644
--- a/fs/fat/misc.c
+++ b/fs/fat/misc.c
@@ -250,7 +250,9 @@ int fat_sync_bhs(struct buffer_head **bhs, int nr_bhs)
250{ 250{
251 int i, err = 0; 251 int i, err = 0;
252 252
253 ll_rw_block(SWRITE, nr_bhs, bhs); 253 for (i = 0; i < nr_bhs; i++)
254 write_dirty_buffer(bhs[i], WRITE);
255
254 for (i = 0; i < nr_bhs; i++) { 256 for (i = 0; i < nr_bhs; i++) {
255 wait_on_buffer(bhs[i]); 257 wait_on_buffer(bhs[i]);
256 if (buffer_eopnotsupp(bhs[i])) { 258 if (buffer_eopnotsupp(bhs[i])) {
diff --git a/fs/file_table.c b/fs/file_table.c
index edecd36fed9b..a04bdd81c11c 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -20,7 +20,9 @@
20#include <linux/cdev.h> 20#include <linux/cdev.h>
21#include <linux/fsnotify.h> 21#include <linux/fsnotify.h>
22#include <linux/sysctl.h> 22#include <linux/sysctl.h>
23#include <linux/lglock.h>
23#include <linux/percpu_counter.h> 24#include <linux/percpu_counter.h>
25#include <linux/percpu.h>
24#include <linux/ima.h> 26#include <linux/ima.h>
25 27
26#include <asm/atomic.h> 28#include <asm/atomic.h>
@@ -32,8 +34,8 @@ struct files_stat_struct files_stat = {
32 .max_files = NR_FILE 34 .max_files = NR_FILE
33}; 35};
34 36
35/* public. Not pretty! */ 37DECLARE_LGLOCK(files_lglock);
36__cacheline_aligned_in_smp DEFINE_SPINLOCK(files_lock); 38DEFINE_LGLOCK(files_lglock);
37 39
38/* SLAB cache for file structures */ 40/* SLAB cache for file structures */
39static struct kmem_cache *filp_cachep __read_mostly; 41static struct kmem_cache *filp_cachep __read_mostly;
@@ -249,7 +251,7 @@ static void __fput(struct file *file)
249 cdev_put(inode->i_cdev); 251 cdev_put(inode->i_cdev);
250 fops_put(file->f_op); 252 fops_put(file->f_op);
251 put_pid(file->f_owner.pid); 253 put_pid(file->f_owner.pid);
252 file_kill(file); 254 file_sb_list_del(file);
253 if (file->f_mode & FMODE_WRITE) 255 if (file->f_mode & FMODE_WRITE)
254 drop_file_write_access(file); 256 drop_file_write_access(file);
255 file->f_path.dentry = NULL; 257 file->f_path.dentry = NULL;
@@ -328,41 +330,107 @@ struct file *fget_light(unsigned int fd, int *fput_needed)
328 return file; 330 return file;
329} 331}
330 332
331
332void put_filp(struct file *file) 333void put_filp(struct file *file)
333{ 334{
334 if (atomic_long_dec_and_test(&file->f_count)) { 335 if (atomic_long_dec_and_test(&file->f_count)) {
335 security_file_free(file); 336 security_file_free(file);
336 file_kill(file); 337 file_sb_list_del(file);
337 file_free(file); 338 file_free(file);
338 } 339 }
339} 340}
340 341
341void file_move(struct file *file, struct list_head *list) 342static inline int file_list_cpu(struct file *file)
342{ 343{
343 if (!list) 344#ifdef CONFIG_SMP
344 return; 345 return file->f_sb_list_cpu;
345 file_list_lock(); 346#else
346 list_move(&file->f_u.fu_list, list); 347 return smp_processor_id();
347 file_list_unlock(); 348#endif
349}
350
351/* helper for file_sb_list_add to reduce ifdefs */
352static inline void __file_sb_list_add(struct file *file, struct super_block *sb)
353{
354 struct list_head *list;
355#ifdef CONFIG_SMP
356 int cpu;
357 cpu = smp_processor_id();
358 file->f_sb_list_cpu = cpu;
359 list = per_cpu_ptr(sb->s_files, cpu);
360#else
361 list = &sb->s_files;
362#endif
363 list_add(&file->f_u.fu_list, list);
348} 364}
349 365
350void file_kill(struct file *file) 366/**
367 * file_sb_list_add - add a file to the sb's file list
368 * @file: file to add
369 * @sb: sb to add it to
370 *
371 * Use this function to associate a file with the superblock of the inode it
372 * refers to.
373 */
374void file_sb_list_add(struct file *file, struct super_block *sb)
375{
376 lg_local_lock(files_lglock);
377 __file_sb_list_add(file, sb);
378 lg_local_unlock(files_lglock);
379}
380
381/**
382 * file_sb_list_del - remove a file from the sb's file list
383 * @file: file to remove
384 * @sb: sb to remove it from
385 *
386 * Use this function to remove a file from its superblock.
387 */
388void file_sb_list_del(struct file *file)
351{ 389{
352 if (!list_empty(&file->f_u.fu_list)) { 390 if (!list_empty(&file->f_u.fu_list)) {
353 file_list_lock(); 391 lg_local_lock_cpu(files_lglock, file_list_cpu(file));
354 list_del_init(&file->f_u.fu_list); 392 list_del_init(&file->f_u.fu_list);
355 file_list_unlock(); 393 lg_local_unlock_cpu(files_lglock, file_list_cpu(file));
356 } 394 }
357} 395}
358 396
397#ifdef CONFIG_SMP
398
399/*
400 * These macros iterate all files on all CPUs for a given superblock.
401 * files_lglock must be held globally.
402 */
403#define do_file_list_for_each_entry(__sb, __file) \
404{ \
405 int i; \
406 for_each_possible_cpu(i) { \
407 struct list_head *list; \
408 list = per_cpu_ptr((__sb)->s_files, i); \
409 list_for_each_entry((__file), list, f_u.fu_list)
410
411#define while_file_list_for_each_entry \
412 } \
413}
414
415#else
416
417#define do_file_list_for_each_entry(__sb, __file) \
418{ \
419 struct list_head *list; \
420 list = &(sb)->s_files; \
421 list_for_each_entry((__file), list, f_u.fu_list)
422
423#define while_file_list_for_each_entry \
424}
425
426#endif
427
359int fs_may_remount_ro(struct super_block *sb) 428int fs_may_remount_ro(struct super_block *sb)
360{ 429{
361 struct file *file; 430 struct file *file;
362
363 /* Check that no files are currently opened for writing. */ 431 /* Check that no files are currently opened for writing. */
364 file_list_lock(); 432 lg_global_lock(files_lglock);
365 list_for_each_entry(file, &sb->s_files, f_u.fu_list) { 433 do_file_list_for_each_entry(sb, file) {
366 struct inode *inode = file->f_path.dentry->d_inode; 434 struct inode *inode = file->f_path.dentry->d_inode;
367 435
368 /* File with pending delete? */ 436 /* File with pending delete? */
@@ -372,11 +440,11 @@ int fs_may_remount_ro(struct super_block *sb)
372 /* Writeable file? */ 440 /* Writeable file? */
373 if (S_ISREG(inode->i_mode) && (file->f_mode & FMODE_WRITE)) 441 if (S_ISREG(inode->i_mode) && (file->f_mode & FMODE_WRITE))
374 goto too_bad; 442 goto too_bad;
375 } 443 } while_file_list_for_each_entry;
376 file_list_unlock(); 444 lg_global_unlock(files_lglock);
377 return 1; /* Tis' cool bro. */ 445 return 1; /* Tis' cool bro. */
378too_bad: 446too_bad:
379 file_list_unlock(); 447 lg_global_unlock(files_lglock);
380 return 0; 448 return 0;
381} 449}
382 450
@@ -392,8 +460,8 @@ void mark_files_ro(struct super_block *sb)
392 struct file *f; 460 struct file *f;
393 461
394retry: 462retry:
395 file_list_lock(); 463 lg_global_lock(files_lglock);
396 list_for_each_entry(f, &sb->s_files, f_u.fu_list) { 464 do_file_list_for_each_entry(sb, f) {
397 struct vfsmount *mnt; 465 struct vfsmount *mnt;
398 if (!S_ISREG(f->f_path.dentry->d_inode->i_mode)) 466 if (!S_ISREG(f->f_path.dentry->d_inode->i_mode))
399 continue; 467 continue;
@@ -408,16 +476,13 @@ retry:
408 continue; 476 continue;
409 file_release_write(f); 477 file_release_write(f);
410 mnt = mntget(f->f_path.mnt); 478 mnt = mntget(f->f_path.mnt);
411 file_list_unlock(); 479 /* This can sleep, so we can't hold the spinlock. */
412 /* 480 lg_global_unlock(files_lglock);
413 * This can sleep, so we can't hold
414 * the file_list_lock() spinlock.
415 */
416 mnt_drop_write(mnt); 481 mnt_drop_write(mnt);
417 mntput(mnt); 482 mntput(mnt);
418 goto retry; 483 goto retry;
419 } 484 } while_file_list_for_each_entry;
420 file_list_unlock(); 485 lg_global_unlock(files_lglock);
421} 486}
422 487
423void __init files_init(unsigned long mempages) 488void __init files_init(unsigned long mempages)
@@ -437,5 +502,6 @@ void __init files_init(unsigned long mempages)
437 if (files_stat.max_files < NR_FILE) 502 if (files_stat.max_files < NR_FILE)
438 files_stat.max_files = NR_FILE; 503 files_stat.max_files = NR_FILE;
439 files_defer_init(); 504 files_defer_init();
505 lg_lock_init(files_lglock);
440 percpu_counter_init(&nr_files, 0); 506 percpu_counter_init(&nr_files, 0);
441} 507}
diff --git a/fs/fs_struct.c b/fs/fs_struct.c
index 1ee40eb9a2c0..ed45a9cf5f3d 100644
--- a/fs/fs_struct.c
+++ b/fs/fs_struct.c
@@ -13,11 +13,11 @@ void set_fs_root(struct fs_struct *fs, struct path *path)
13{ 13{
14 struct path old_root; 14 struct path old_root;
15 15
16 write_lock(&fs->lock); 16 spin_lock(&fs->lock);
17 old_root = fs->root; 17 old_root = fs->root;
18 fs->root = *path; 18 fs->root = *path;
19 path_get(path); 19 path_get(path);
20 write_unlock(&fs->lock); 20 spin_unlock(&fs->lock);
21 if (old_root.dentry) 21 if (old_root.dentry)
22 path_put(&old_root); 22 path_put(&old_root);
23} 23}
@@ -30,11 +30,11 @@ void set_fs_pwd(struct fs_struct *fs, struct path *path)
30{ 30{
31 struct path old_pwd; 31 struct path old_pwd;
32 32
33 write_lock(&fs->lock); 33 spin_lock(&fs->lock);
34 old_pwd = fs->pwd; 34 old_pwd = fs->pwd;
35 fs->pwd = *path; 35 fs->pwd = *path;
36 path_get(path); 36 path_get(path);
37 write_unlock(&fs->lock); 37 spin_unlock(&fs->lock);
38 38
39 if (old_pwd.dentry) 39 if (old_pwd.dentry)
40 path_put(&old_pwd); 40 path_put(&old_pwd);
@@ -51,7 +51,7 @@ void chroot_fs_refs(struct path *old_root, struct path *new_root)
51 task_lock(p); 51 task_lock(p);
52 fs = p->fs; 52 fs = p->fs;
53 if (fs) { 53 if (fs) {
54 write_lock(&fs->lock); 54 spin_lock(&fs->lock);
55 if (fs->root.dentry == old_root->dentry 55 if (fs->root.dentry == old_root->dentry
56 && fs->root.mnt == old_root->mnt) { 56 && fs->root.mnt == old_root->mnt) {
57 path_get(new_root); 57 path_get(new_root);
@@ -64,7 +64,7 @@ void chroot_fs_refs(struct path *old_root, struct path *new_root)
64 fs->pwd = *new_root; 64 fs->pwd = *new_root;
65 count++; 65 count++;
66 } 66 }
67 write_unlock(&fs->lock); 67 spin_unlock(&fs->lock);
68 } 68 }
69 task_unlock(p); 69 task_unlock(p);
70 } while_each_thread(g, p); 70 } while_each_thread(g, p);
@@ -87,10 +87,10 @@ void exit_fs(struct task_struct *tsk)
87 if (fs) { 87 if (fs) {
88 int kill; 88 int kill;
89 task_lock(tsk); 89 task_lock(tsk);
90 write_lock(&fs->lock); 90 spin_lock(&fs->lock);
91 tsk->fs = NULL; 91 tsk->fs = NULL;
92 kill = !--fs->users; 92 kill = !--fs->users;
93 write_unlock(&fs->lock); 93 spin_unlock(&fs->lock);
94 task_unlock(tsk); 94 task_unlock(tsk);
95 if (kill) 95 if (kill)
96 free_fs_struct(fs); 96 free_fs_struct(fs);
@@ -104,7 +104,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
104 if (fs) { 104 if (fs) {
105 fs->users = 1; 105 fs->users = 1;
106 fs->in_exec = 0; 106 fs->in_exec = 0;
107 rwlock_init(&fs->lock); 107 spin_lock_init(&fs->lock);
108 fs->umask = old->umask; 108 fs->umask = old->umask;
109 get_fs_root_and_pwd(old, &fs->root, &fs->pwd); 109 get_fs_root_and_pwd(old, &fs->root, &fs->pwd);
110 } 110 }
@@ -121,10 +121,10 @@ int unshare_fs_struct(void)
121 return -ENOMEM; 121 return -ENOMEM;
122 122
123 task_lock(current); 123 task_lock(current);
124 write_lock(&fs->lock); 124 spin_lock(&fs->lock);
125 kill = !--fs->users; 125 kill = !--fs->users;
126 current->fs = new_fs; 126 current->fs = new_fs;
127 write_unlock(&fs->lock); 127 spin_unlock(&fs->lock);
128 task_unlock(current); 128 task_unlock(current);
129 129
130 if (kill) 130 if (kill)
@@ -143,7 +143,7 @@ EXPORT_SYMBOL(current_umask);
143/* to be mentioned only in INIT_TASK */ 143/* to be mentioned only in INIT_TASK */
144struct fs_struct init_fs = { 144struct fs_struct init_fs = {
145 .users = 1, 145 .users = 1,
146 .lock = __RW_LOCK_UNLOCKED(init_fs.lock), 146 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
147 .umask = 0022, 147 .umask = 0022,
148}; 148};
149 149
@@ -156,14 +156,14 @@ void daemonize_fs_struct(void)
156 156
157 task_lock(current); 157 task_lock(current);
158 158
159 write_lock(&init_fs.lock); 159 spin_lock(&init_fs.lock);
160 init_fs.users++; 160 init_fs.users++;
161 write_unlock(&init_fs.lock); 161 spin_unlock(&init_fs.lock);
162 162
163 write_lock(&fs->lock); 163 spin_lock(&fs->lock);
164 current->fs = &init_fs; 164 current->fs = &init_fs;
165 kill = !--fs->users; 165 kill = !--fs->users;
166 write_unlock(&fs->lock); 166 spin_unlock(&fs->lock);
167 167
168 task_unlock(current); 168 task_unlock(current);
169 if (kill) 169 if (kill)
diff --git a/fs/generic_acl.c b/fs/generic_acl.c
index 99800e564157..6bc9e3a5a693 100644
--- a/fs/generic_acl.c
+++ b/fs/generic_acl.c
@@ -94,6 +94,7 @@ generic_acl_set(struct dentry *dentry, const char *name, const void *value,
94 if (error < 0) 94 if (error < 0)
95 goto failed; 95 goto failed;
96 inode->i_mode = mode; 96 inode->i_mode = mode;
97 inode->i_ctime = CURRENT_TIME;
97 if (error == 0) { 98 if (error == 0) {
98 posix_acl_release(acl); 99 posix_acl_release(acl);
99 acl = NULL; 100 acl = NULL;
diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
index dd1e55535a4e..f7dc9b5f9ef8 100644
--- a/fs/hostfs/hostfs_kern.c
+++ b/fs/hostfs/hostfs_kern.c
@@ -104,7 +104,7 @@ static char *__dentry_name(struct dentry *dentry, char *name)
104 __putname(name); 104 __putname(name);
105 return NULL; 105 return NULL;
106 } 106 }
107 strncpy(name, root, PATH_MAX); 107 strlcpy(name, root, PATH_MAX);
108 if (len > p - name) { 108 if (len > p - name) {
109 __putname(name); 109 __putname(name);
110 return NULL; 110 return NULL;
@@ -876,7 +876,7 @@ static void *hostfs_follow_link(struct dentry *dentry, struct nameidata *nd)
876 char *path = dentry_name(dentry); 876 char *path = dentry_name(dentry);
877 int err = -ENOMEM; 877 int err = -ENOMEM;
878 if (path) { 878 if (path) {
879 int err = hostfs_do_readlink(path, link, PATH_MAX); 879 err = hostfs_do_readlink(path, link, PATH_MAX);
880 if (err == PATH_MAX) 880 if (err == PATH_MAX)
881 err = -E2BIG; 881 err = -E2BIG;
882 __putname(path); 882 __putname(path);
diff --git a/fs/internal.h b/fs/internal.h
index 6b706bc60a66..a6910e91cee8 100644
--- a/fs/internal.h
+++ b/fs/internal.h
@@ -9,6 +9,8 @@
9 * 2 of the License, or (at your option) any later version. 9 * 2 of the License, or (at your option) any later version.
10 */ 10 */
11 11
12#include <linux/lglock.h>
13
12struct super_block; 14struct super_block;
13struct linux_binprm; 15struct linux_binprm;
14struct path; 16struct path;
@@ -70,7 +72,8 @@ extern struct vfsmount *copy_tree(struct vfsmount *, struct dentry *, int);
70 72
71extern void __init mnt_init(void); 73extern void __init mnt_init(void);
72 74
73extern spinlock_t vfsmount_lock; 75DECLARE_BRLOCK(vfsmount_lock);
76
74 77
75/* 78/*
76 * fs_struct.c 79 * fs_struct.c
@@ -80,6 +83,8 @@ extern void chroot_fs_refs(struct path *, struct path *);
80/* 83/*
81 * file_table.c 84 * file_table.c
82 */ 85 */
86extern void file_sb_list_add(struct file *f, struct super_block *sb);
87extern void file_sb_list_del(struct file *f);
83extern void mark_files_ro(struct super_block *); 88extern void mark_files_ro(struct super_block *);
84extern struct file *get_empty_filp(void); 89extern struct file *get_empty_filp(void);
85 90
diff --git a/fs/jbd/checkpoint.c b/fs/jbd/checkpoint.c
index b0435dd0654d..05a38b9c4c0e 100644
--- a/fs/jbd/checkpoint.c
+++ b/fs/jbd/checkpoint.c
@@ -254,7 +254,9 @@ __flush_batch(journal_t *journal, struct buffer_head **bhs, int *batch_count)
254{ 254{
255 int i; 255 int i;
256 256
257 ll_rw_block(SWRITE, *batch_count, bhs); 257 for (i = 0; i < *batch_count; i++)
258 write_dirty_buffer(bhs[i], WRITE);
259
258 for (i = 0; i < *batch_count; i++) { 260 for (i = 0; i < *batch_count; i++) {
259 struct buffer_head *bh = bhs[i]; 261 struct buffer_head *bh = bhs[i];
260 clear_buffer_jwrite(bh); 262 clear_buffer_jwrite(bh);
diff --git a/fs/jbd/commit.c b/fs/jbd/commit.c
index 28a9ddaa0c49..95d8c11c929e 100644
--- a/fs/jbd/commit.c
+++ b/fs/jbd/commit.c
@@ -119,7 +119,6 @@ static int journal_write_commit_record(journal_t *journal,
119 struct buffer_head *bh; 119 struct buffer_head *bh;
120 journal_header_t *header; 120 journal_header_t *header;
121 int ret; 121 int ret;
122 int barrier_done = 0;
123 122
124 if (is_journal_aborted(journal)) 123 if (is_journal_aborted(journal))
125 return 0; 124 return 0;
@@ -137,34 +136,36 @@ static int journal_write_commit_record(journal_t *journal,
137 136
138 JBUFFER_TRACE(descriptor, "write commit block"); 137 JBUFFER_TRACE(descriptor, "write commit block");
139 set_buffer_dirty(bh); 138 set_buffer_dirty(bh);
139
140 if (journal->j_flags & JFS_BARRIER) { 140 if (journal->j_flags & JFS_BARRIER) {
141 set_buffer_ordered(bh); 141 ret = __sync_dirty_buffer(bh, WRITE_SYNC | WRITE_BARRIER);
142 barrier_done = 1;
143 }
144 ret = sync_dirty_buffer(bh);
145 if (barrier_done)
146 clear_buffer_ordered(bh);
147 /* is it possible for another commit to fail at roughly
148 * the same time as this one? If so, we don't want to
149 * trust the barrier flag in the super, but instead want
150 * to remember if we sent a barrier request
151 */
152 if (ret == -EOPNOTSUPP && barrier_done) {
153 char b[BDEVNAME_SIZE];
154 142
155 printk(KERN_WARNING 143 /*
156 "JBD: barrier-based sync failed on %s - " 144 * Is it possible for another commit to fail at roughly
157 "disabling barriers\n", 145 * the same time as this one? If so, we don't want to
158 bdevname(journal->j_dev, b)); 146 * trust the barrier flag in the super, but instead want
159 spin_lock(&journal->j_state_lock); 147 * to remember if we sent a barrier request
160 journal->j_flags &= ~JFS_BARRIER; 148 */
161 spin_unlock(&journal->j_state_lock); 149 if (ret == -EOPNOTSUPP) {
150 char b[BDEVNAME_SIZE];
162 151
163 /* And try again, without the barrier */ 152 printk(KERN_WARNING
164 set_buffer_uptodate(bh); 153 "JBD: barrier-based sync failed on %s - "
165 set_buffer_dirty(bh); 154 "disabling barriers\n",
155 bdevname(journal->j_dev, b));
156 spin_lock(&journal->j_state_lock);
157 journal->j_flags &= ~JFS_BARRIER;
158 spin_unlock(&journal->j_state_lock);
159
160 /* And try again, without the barrier */
161 set_buffer_uptodate(bh);
162 set_buffer_dirty(bh);
163 ret = sync_dirty_buffer(bh);
164 }
165 } else {
166 ret = sync_dirty_buffer(bh); 166 ret = sync_dirty_buffer(bh);
167 } 167 }
168
168 put_bh(bh); /* One for getblk() */ 169 put_bh(bh); /* One for getblk() */
169 journal_put_journal_head(descriptor); 170 journal_put_journal_head(descriptor);
170 171
diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c
index f19ce94693d8..2c4b1f109da9 100644
--- a/fs/jbd/journal.c
+++ b/fs/jbd/journal.c
@@ -1024,7 +1024,7 @@ void journal_update_superblock(journal_t *journal, int wait)
1024 if (wait) 1024 if (wait)
1025 sync_dirty_buffer(bh); 1025 sync_dirty_buffer(bh);
1026 else 1026 else
1027 ll_rw_block(SWRITE, 1, &bh); 1027 write_dirty_buffer(bh, WRITE);
1028 1028
1029out: 1029out:
1030 /* If we have just flushed the log (by marking s_start==0), then 1030 /* If we have just flushed the log (by marking s_start==0), then
diff --git a/fs/jbd/revoke.c b/fs/jbd/revoke.c
index ad717328343a..d29018307e2e 100644
--- a/fs/jbd/revoke.c
+++ b/fs/jbd/revoke.c
@@ -617,7 +617,7 @@ static void flush_descriptor(journal_t *journal,
617 set_buffer_jwrite(bh); 617 set_buffer_jwrite(bh);
618 BUFFER_TRACE(bh, "write"); 618 BUFFER_TRACE(bh, "write");
619 set_buffer_dirty(bh); 619 set_buffer_dirty(bh);
620 ll_rw_block((write_op == WRITE) ? SWRITE : SWRITE_SYNC_PLUG, 1, &bh); 620 write_dirty_buffer(bh, write_op);
621} 621}
622#endif 622#endif
623 623
diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c
index 1c23a0f4e8a3..5247e7ffdcb4 100644
--- a/fs/jbd2/checkpoint.c
+++ b/fs/jbd2/checkpoint.c
@@ -255,7 +255,9 @@ __flush_batch(journal_t *journal, int *batch_count)
255{ 255{
256 int i; 256 int i;
257 257
258 ll_rw_block(SWRITE, *batch_count, journal->j_chkpt_bhs); 258 for (i = 0; i < *batch_count; i++)
259 write_dirty_buffer(journal->j_chkpt_bhs[i], WRITE);
260
259 for (i = 0; i < *batch_count; i++) { 261 for (i = 0; i < *batch_count; i++) {
260 struct buffer_head *bh = journal->j_chkpt_bhs[i]; 262 struct buffer_head *bh = journal->j_chkpt_bhs[i];
261 clear_buffer_jwrite(bh); 263 clear_buffer_jwrite(bh);
diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
index f52e5e8049f1..7c068c189d80 100644
--- a/fs/jbd2/commit.c
+++ b/fs/jbd2/commit.c
@@ -101,7 +101,6 @@ static int journal_submit_commit_record(journal_t *journal,
101 struct commit_header *tmp; 101 struct commit_header *tmp;
102 struct buffer_head *bh; 102 struct buffer_head *bh;
103 int ret; 103 int ret;
104 int barrier_done = 0;
105 struct timespec now = current_kernel_time(); 104 struct timespec now = current_kernel_time();
106 105
107 if (is_journal_aborted(journal)) 106 if (is_journal_aborted(journal))
@@ -136,30 +135,22 @@ static int journal_submit_commit_record(journal_t *journal,
136 if (journal->j_flags & JBD2_BARRIER && 135 if (journal->j_flags & JBD2_BARRIER &&
137 !JBD2_HAS_INCOMPAT_FEATURE(journal, 136 !JBD2_HAS_INCOMPAT_FEATURE(journal,
138 JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT)) { 137 JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT)) {
139 set_buffer_ordered(bh); 138 ret = submit_bh(WRITE_SYNC_PLUG | WRITE_BARRIER, bh);
140 barrier_done = 1; 139 if (ret == -EOPNOTSUPP) {
141 } 140 printk(KERN_WARNING
142 ret = submit_bh(WRITE_SYNC_PLUG, bh); 141 "JBD2: Disabling barriers on %s, "
143 if (barrier_done) 142 "not supported by device\n", journal->j_devname);
144 clear_buffer_ordered(bh); 143 write_lock(&journal->j_state_lock);
145 144 journal->j_flags &= ~JBD2_BARRIER;
146 /* is it possible for another commit to fail at roughly 145 write_unlock(&journal->j_state_lock);
147 * the same time as this one? If so, we don't want to
148 * trust the barrier flag in the super, but instead want
149 * to remember if we sent a barrier request
150 */
151 if (ret == -EOPNOTSUPP && barrier_done) {
152 printk(KERN_WARNING
153 "JBD2: Disabling barriers on %s, "
154 "not supported by device\n", journal->j_devname);
155 write_lock(&journal->j_state_lock);
156 journal->j_flags &= ~JBD2_BARRIER;
157 write_unlock(&journal->j_state_lock);
158 146
159 /* And try again, without the barrier */ 147 /* And try again, without the barrier */
160 lock_buffer(bh); 148 lock_buffer(bh);
161 set_buffer_uptodate(bh); 149 set_buffer_uptodate(bh);
162 clear_buffer_dirty(bh); 150 clear_buffer_dirty(bh);
151 ret = submit_bh(WRITE_SYNC_PLUG, bh);
152 }
153 } else {
163 ret = submit_bh(WRITE_SYNC_PLUG, bh); 154 ret = submit_bh(WRITE_SYNC_PLUG, bh);
164 } 155 }
165 *cbh = bh; 156 *cbh = bh;
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index ad5866aaf0f9..0e8014ea6b94 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -1124,7 +1124,7 @@ void jbd2_journal_update_superblock(journal_t *journal, int wait)
1124 set_buffer_uptodate(bh); 1124 set_buffer_uptodate(bh);
1125 } 1125 }
1126 } else 1126 } else
1127 ll_rw_block(SWRITE, 1, &bh); 1127 write_dirty_buffer(bh, WRITE);
1128 1128
1129out: 1129out:
1130 /* If we have just flushed the log (by marking s_start==0), then 1130 /* If we have just flushed the log (by marking s_start==0), then
diff --git a/fs/jbd2/revoke.c b/fs/jbd2/revoke.c
index a360b06af2e3..9ad321fd63fd 100644
--- a/fs/jbd2/revoke.c
+++ b/fs/jbd2/revoke.c
@@ -625,7 +625,7 @@ static void flush_descriptor(journal_t *journal,
625 set_buffer_jwrite(bh); 625 set_buffer_jwrite(bh);
626 BUFFER_TRACE(bh, "write"); 626 BUFFER_TRACE(bh, "write");
627 set_buffer_dirty(bh); 627 set_buffer_dirty(bh);
628 ll_rw_block((write_op == WRITE) ? SWRITE : SWRITE_SYNC_PLUG, 1, &bh); 628 write_dirty_buffer(bh, write_op);
629} 629}
630#endif 630#endif
631 631
diff --git a/fs/mbcache.c b/fs/mbcache.c
index cf4e6cdfd15b..93444747237b 100644
--- a/fs/mbcache.c
+++ b/fs/mbcache.c
@@ -80,6 +80,7 @@ struct mb_cache {
80 struct list_head c_cache_list; 80 struct list_head c_cache_list;
81 const char *c_name; 81 const char *c_name;
82 atomic_t c_entry_count; 82 atomic_t c_entry_count;
83 int c_max_entries;
83 int c_bucket_bits; 84 int c_bucket_bits;
84 struct kmem_cache *c_entry_cache; 85 struct kmem_cache *c_entry_cache;
85 struct list_head *c_block_hash; 86 struct list_head *c_block_hash;
@@ -243,6 +244,12 @@ mb_cache_create(const char *name, int bucket_bits)
243 if (!cache->c_entry_cache) 244 if (!cache->c_entry_cache)
244 goto fail2; 245 goto fail2;
245 246
247 /*
248 * Set an upper limit on the number of cache entries so that the hash
249 * chains won't grow too long.
250 */
251 cache->c_max_entries = bucket_count << 4;
252
246 spin_lock(&mb_cache_spinlock); 253 spin_lock(&mb_cache_spinlock);
247 list_add(&cache->c_cache_list, &mb_cache_list); 254 list_add(&cache->c_cache_list, &mb_cache_list);
248 spin_unlock(&mb_cache_spinlock); 255 spin_unlock(&mb_cache_spinlock);
@@ -333,7 +340,6 @@ mb_cache_destroy(struct mb_cache *cache)
333 kfree(cache); 340 kfree(cache);
334} 341}
335 342
336
337/* 343/*
338 * mb_cache_entry_alloc() 344 * mb_cache_entry_alloc()
339 * 345 *
@@ -345,17 +351,29 @@ mb_cache_destroy(struct mb_cache *cache)
345struct mb_cache_entry * 351struct mb_cache_entry *
346mb_cache_entry_alloc(struct mb_cache *cache, gfp_t gfp_flags) 352mb_cache_entry_alloc(struct mb_cache *cache, gfp_t gfp_flags)
347{ 353{
348 struct mb_cache_entry *ce; 354 struct mb_cache_entry *ce = NULL;
349 355
350 ce = kmem_cache_alloc(cache->c_entry_cache, gfp_flags); 356 if (atomic_read(&cache->c_entry_count) >= cache->c_max_entries) {
351 if (ce) { 357 spin_lock(&mb_cache_spinlock);
358 if (!list_empty(&mb_cache_lru_list)) {
359 ce = list_entry(mb_cache_lru_list.next,
360 struct mb_cache_entry, e_lru_list);
361 list_del_init(&ce->e_lru_list);
362 __mb_cache_entry_unhash(ce);
363 }
364 spin_unlock(&mb_cache_spinlock);
365 }
366 if (!ce) {
367 ce = kmem_cache_alloc(cache->c_entry_cache, gfp_flags);
368 if (!ce)
369 return NULL;
352 atomic_inc(&cache->c_entry_count); 370 atomic_inc(&cache->c_entry_count);
353 INIT_LIST_HEAD(&ce->e_lru_list); 371 INIT_LIST_HEAD(&ce->e_lru_list);
354 INIT_LIST_HEAD(&ce->e_block_list); 372 INIT_LIST_HEAD(&ce->e_block_list);
355 ce->e_cache = cache; 373 ce->e_cache = cache;
356 ce->e_used = 1 + MB_CACHE_WRITER;
357 ce->e_queued = 0; 374 ce->e_queued = 0;
358 } 375 }
376 ce->e_used = 1 + MB_CACHE_WRITER;
359 return ce; 377 return ce;
360} 378}
361 379
diff --git a/fs/namei.c b/fs/namei.c
index 17ea76bf2fbe..24896e833565 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -595,15 +595,16 @@ int follow_up(struct path *path)
595{ 595{
596 struct vfsmount *parent; 596 struct vfsmount *parent;
597 struct dentry *mountpoint; 597 struct dentry *mountpoint;
598 spin_lock(&vfsmount_lock); 598
599 br_read_lock(vfsmount_lock);
599 parent = path->mnt->mnt_parent; 600 parent = path->mnt->mnt_parent;
600 if (parent == path->mnt) { 601 if (parent == path->mnt) {
601 spin_unlock(&vfsmount_lock); 602 br_read_unlock(vfsmount_lock);
602 return 0; 603 return 0;
603 } 604 }
604 mntget(parent); 605 mntget(parent);
605 mountpoint = dget(path->mnt->mnt_mountpoint); 606 mountpoint = dget(path->mnt->mnt_mountpoint);
606 spin_unlock(&vfsmount_lock); 607 br_read_unlock(vfsmount_lock);
607 dput(path->dentry); 608 dput(path->dentry);
608 path->dentry = mountpoint; 609 path->dentry = mountpoint;
609 mntput(path->mnt); 610 mntput(path->mnt);
@@ -686,6 +687,35 @@ static __always_inline void follow_dotdot(struct nameidata *nd)
686} 687}
687 688
688/* 689/*
690 * Allocate a dentry with name and parent, and perform a parent
691 * directory ->lookup on it. Returns the new dentry, or ERR_PTR
692 * on error. parent->d_inode->i_mutex must be held. d_lookup must
693 * have verified that no child exists while under i_mutex.
694 */
695static struct dentry *d_alloc_and_lookup(struct dentry *parent,
696 struct qstr *name, struct nameidata *nd)
697{
698 struct inode *inode = parent->d_inode;
699 struct dentry *dentry;
700 struct dentry *old;
701
702 /* Don't create child dentry for a dead directory. */
703 if (unlikely(IS_DEADDIR(inode)))
704 return ERR_PTR(-ENOENT);
705
706 dentry = d_alloc(parent, name);
707 if (unlikely(!dentry))
708 return ERR_PTR(-ENOMEM);
709
710 old = inode->i_op->lookup(inode, dentry, nd);
711 if (unlikely(old)) {
712 dput(dentry);
713 dentry = old;
714 }
715 return dentry;
716}
717
718/*
689 * It's more convoluted than I'd like it to be, but... it's still fairly 719 * It's more convoluted than I'd like it to be, but... it's still fairly
690 * small and for now I'd prefer to have fast path as straight as possible. 720 * small and for now I'd prefer to have fast path as straight as possible.
691 * It _is_ time-critical. 721 * It _is_ time-critical.
@@ -706,9 +736,15 @@ static int do_lookup(struct nameidata *nd, struct qstr *name,
706 return err; 736 return err;
707 } 737 }
708 738
739 /*
740 * Rename seqlock is not required here because in the off chance
741 * of a false negative due to a concurrent rename, we're going to
742 * do the non-racy lookup, below.
743 */
709 dentry = __d_lookup(nd->path.dentry, name); 744 dentry = __d_lookup(nd->path.dentry, name);
710 if (!dentry) 745 if (!dentry)
711 goto need_lookup; 746 goto need_lookup;
747found:
712 if (dentry->d_op && dentry->d_op->d_revalidate) 748 if (dentry->d_op && dentry->d_op->d_revalidate)
713 goto need_revalidate; 749 goto need_revalidate;
714done: 750done:
@@ -724,56 +760,28 @@ need_lookup:
724 mutex_lock(&dir->i_mutex); 760 mutex_lock(&dir->i_mutex);
725 /* 761 /*
726 * First re-do the cached lookup just in case it was created 762 * First re-do the cached lookup just in case it was created
727 * while we waited for the directory semaphore.. 763 * while we waited for the directory semaphore, or the first
764 * lookup failed due to an unrelated rename.
728 * 765 *
729 * FIXME! This could use version numbering or similar to 766 * This could use version numbering or similar to avoid unnecessary
730 * avoid unnecessary cache lookups. 767 * cache lookups, but then we'd have to do the first lookup in the
731 * 768 * non-racy way. However in the common case here, everything should
732 * The "dcache_lock" is purely to protect the RCU list walker 769 * be hot in cache, so would it be a big win?
733 * from concurrent renames at this point (we mustn't get false
734 * negatives from the RCU list walk here, unlike the optimistic
735 * fast walk).
736 *
737 * so doing d_lookup() (with seqlock), instead of lockfree __d_lookup
738 */ 770 */
739 dentry = d_lookup(parent, name); 771 dentry = d_lookup(parent, name);
740 if (!dentry) { 772 if (likely(!dentry)) {
741 struct dentry *new; 773 dentry = d_alloc_and_lookup(parent, name, nd);
742
743 /* Don't create child dentry for a dead directory. */
744 dentry = ERR_PTR(-ENOENT);
745 if (IS_DEADDIR(dir))
746 goto out_unlock;
747
748 new = d_alloc(parent, name);
749 dentry = ERR_PTR(-ENOMEM);
750 if (new) {
751 dentry = dir->i_op->lookup(dir, new, nd);
752 if (dentry)
753 dput(new);
754 else
755 dentry = new;
756 }
757out_unlock:
758 mutex_unlock(&dir->i_mutex); 774 mutex_unlock(&dir->i_mutex);
759 if (IS_ERR(dentry)) 775 if (IS_ERR(dentry))
760 goto fail; 776 goto fail;
761 goto done; 777 goto done;
762 } 778 }
763
764 /* 779 /*
765 * Uhhuh! Nasty case: the cache was re-populated while 780 * Uhhuh! Nasty case: the cache was re-populated while
766 * we waited on the semaphore. Need to revalidate. 781 * we waited on the semaphore. Need to revalidate.
767 */ 782 */
768 mutex_unlock(&dir->i_mutex); 783 mutex_unlock(&dir->i_mutex);
769 if (dentry->d_op && dentry->d_op->d_revalidate) { 784 goto found;
770 dentry = do_revalidate(dentry, nd);
771 if (!dentry)
772 dentry = ERR_PTR(-ENOENT);
773 }
774 if (IS_ERR(dentry))
775 goto fail;
776 goto done;
777 785
778need_revalidate: 786need_revalidate:
779 dentry = do_revalidate(dentry, nd); 787 dentry = do_revalidate(dentry, nd);
@@ -1130,35 +1138,18 @@ static struct dentry *__lookup_hash(struct qstr *name,
1130 goto out; 1138 goto out;
1131 } 1139 }
1132 1140
1133 dentry = __d_lookup(base, name); 1141 /*
1134 1142 * Don't bother with __d_lookup: callers are for creat as
1135 /* lockess __d_lookup may fail due to concurrent d_move() 1143 * well as unlink, so a lot of the time it would cost
1136 * in some unrelated directory, so try with d_lookup 1144 * a double lookup.
1137 */ 1145 */
1138 if (!dentry) 1146 dentry = d_lookup(base, name);
1139 dentry = d_lookup(base, name);
1140 1147
1141 if (dentry && dentry->d_op && dentry->d_op->d_revalidate) 1148 if (dentry && dentry->d_op && dentry->d_op->d_revalidate)
1142 dentry = do_revalidate(dentry, nd); 1149 dentry = do_revalidate(dentry, nd);
1143 1150
1144 if (!dentry) { 1151 if (!dentry)
1145 struct dentry *new; 1152 dentry = d_alloc_and_lookup(base, name, nd);
1146
1147 /* Don't create child dentry for a dead directory. */
1148 dentry = ERR_PTR(-ENOENT);
1149 if (IS_DEADDIR(inode))
1150 goto out;
1151
1152 new = d_alloc(base, name);
1153 dentry = ERR_PTR(-ENOMEM);
1154 if (!new)
1155 goto out;
1156 dentry = inode->i_op->lookup(inode, new, nd);
1157 if (!dentry)
1158 dentry = new;
1159 else
1160 dput(new);
1161 }
1162out: 1153out:
1163 return dentry; 1154 return dentry;
1164} 1155}
diff --git a/fs/namespace.c b/fs/namespace.c
index 2e10cb19c5b0..de402eb6eafb 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -11,6 +11,8 @@
11#include <linux/syscalls.h> 11#include <linux/syscalls.h>
12#include <linux/slab.h> 12#include <linux/slab.h>
13#include <linux/sched.h> 13#include <linux/sched.h>
14#include <linux/spinlock.h>
15#include <linux/percpu.h>
14#include <linux/smp_lock.h> 16#include <linux/smp_lock.h>
15#include <linux/init.h> 17#include <linux/init.h>
16#include <linux/kernel.h> 18#include <linux/kernel.h>
@@ -38,12 +40,10 @@
38#define HASH_SHIFT ilog2(PAGE_SIZE / sizeof(struct list_head)) 40#define HASH_SHIFT ilog2(PAGE_SIZE / sizeof(struct list_head))
39#define HASH_SIZE (1UL << HASH_SHIFT) 41#define HASH_SIZE (1UL << HASH_SHIFT)
40 42
41/* spinlock for vfsmount related operations, inplace of dcache_lock */
42__cacheline_aligned_in_smp DEFINE_SPINLOCK(vfsmount_lock);
43
44static int event; 43static int event;
45static DEFINE_IDA(mnt_id_ida); 44static DEFINE_IDA(mnt_id_ida);
46static DEFINE_IDA(mnt_group_ida); 45static DEFINE_IDA(mnt_group_ida);
46static DEFINE_SPINLOCK(mnt_id_lock);
47static int mnt_id_start = 0; 47static int mnt_id_start = 0;
48static int mnt_group_start = 1; 48static int mnt_group_start = 1;
49 49
@@ -55,6 +55,16 @@ static struct rw_semaphore namespace_sem;
55struct kobject *fs_kobj; 55struct kobject *fs_kobj;
56EXPORT_SYMBOL_GPL(fs_kobj); 56EXPORT_SYMBOL_GPL(fs_kobj);
57 57
58/*
59 * vfsmount lock may be taken for read to prevent changes to the
60 * vfsmount hash, ie. during mountpoint lookups or walking back
61 * up the tree.
62 *
63 * It should be taken for write in all cases where the vfsmount
64 * tree or hash is modified or when a vfsmount structure is modified.
65 */
66DEFINE_BRLOCK(vfsmount_lock);
67
58static inline unsigned long hash(struct vfsmount *mnt, struct dentry *dentry) 68static inline unsigned long hash(struct vfsmount *mnt, struct dentry *dentry)
59{ 69{
60 unsigned long tmp = ((unsigned long)mnt / L1_CACHE_BYTES); 70 unsigned long tmp = ((unsigned long)mnt / L1_CACHE_BYTES);
@@ -65,18 +75,21 @@ static inline unsigned long hash(struct vfsmount *mnt, struct dentry *dentry)
65 75
66#define MNT_WRITER_UNDERFLOW_LIMIT -(1<<16) 76#define MNT_WRITER_UNDERFLOW_LIMIT -(1<<16)
67 77
68/* allocation is serialized by namespace_sem */ 78/*
79 * allocation is serialized by namespace_sem, but we need the spinlock to
80 * serialize with freeing.
81 */
69static int mnt_alloc_id(struct vfsmount *mnt) 82static int mnt_alloc_id(struct vfsmount *mnt)
70{ 83{
71 int res; 84 int res;
72 85
73retry: 86retry:
74 ida_pre_get(&mnt_id_ida, GFP_KERNEL); 87 ida_pre_get(&mnt_id_ida, GFP_KERNEL);
75 spin_lock(&vfsmount_lock); 88 spin_lock(&mnt_id_lock);
76 res = ida_get_new_above(&mnt_id_ida, mnt_id_start, &mnt->mnt_id); 89 res = ida_get_new_above(&mnt_id_ida, mnt_id_start, &mnt->mnt_id);
77 if (!res) 90 if (!res)
78 mnt_id_start = mnt->mnt_id + 1; 91 mnt_id_start = mnt->mnt_id + 1;
79 spin_unlock(&vfsmount_lock); 92 spin_unlock(&mnt_id_lock);
80 if (res == -EAGAIN) 93 if (res == -EAGAIN)
81 goto retry; 94 goto retry;
82 95
@@ -86,11 +99,11 @@ retry:
86static void mnt_free_id(struct vfsmount *mnt) 99static void mnt_free_id(struct vfsmount *mnt)
87{ 100{
88 int id = mnt->mnt_id; 101 int id = mnt->mnt_id;
89 spin_lock(&vfsmount_lock); 102 spin_lock(&mnt_id_lock);
90 ida_remove(&mnt_id_ida, id); 103 ida_remove(&mnt_id_ida, id);
91 if (mnt_id_start > id) 104 if (mnt_id_start > id)
92 mnt_id_start = id; 105 mnt_id_start = id;
93 spin_unlock(&vfsmount_lock); 106 spin_unlock(&mnt_id_lock);
94} 107}
95 108
96/* 109/*
@@ -348,7 +361,7 @@ static int mnt_make_readonly(struct vfsmount *mnt)
348{ 361{
349 int ret = 0; 362 int ret = 0;
350 363
351 spin_lock(&vfsmount_lock); 364 br_write_lock(vfsmount_lock);
352 mnt->mnt_flags |= MNT_WRITE_HOLD; 365 mnt->mnt_flags |= MNT_WRITE_HOLD;
353 /* 366 /*
354 * After storing MNT_WRITE_HOLD, we'll read the counters. This store 367 * After storing MNT_WRITE_HOLD, we'll read the counters. This store
@@ -382,15 +395,15 @@ static int mnt_make_readonly(struct vfsmount *mnt)
382 */ 395 */
383 smp_wmb(); 396 smp_wmb();
384 mnt->mnt_flags &= ~MNT_WRITE_HOLD; 397 mnt->mnt_flags &= ~MNT_WRITE_HOLD;
385 spin_unlock(&vfsmount_lock); 398 br_write_unlock(vfsmount_lock);
386 return ret; 399 return ret;
387} 400}
388 401
389static void __mnt_unmake_readonly(struct vfsmount *mnt) 402static void __mnt_unmake_readonly(struct vfsmount *mnt)
390{ 403{
391 spin_lock(&vfsmount_lock); 404 br_write_lock(vfsmount_lock);
392 mnt->mnt_flags &= ~MNT_READONLY; 405 mnt->mnt_flags &= ~MNT_READONLY;
393 spin_unlock(&vfsmount_lock); 406 br_write_unlock(vfsmount_lock);
394} 407}
395 408
396void simple_set_mnt(struct vfsmount *mnt, struct super_block *sb) 409void simple_set_mnt(struct vfsmount *mnt, struct super_block *sb)
@@ -414,6 +427,7 @@ void free_vfsmnt(struct vfsmount *mnt)
414/* 427/*
415 * find the first or last mount at @dentry on vfsmount @mnt depending on 428 * find the first or last mount at @dentry on vfsmount @mnt depending on
416 * @dir. If @dir is set return the first mount else return the last mount. 429 * @dir. If @dir is set return the first mount else return the last mount.
430 * vfsmount_lock must be held for read or write.
417 */ 431 */
418struct vfsmount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry, 432struct vfsmount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry,
419 int dir) 433 int dir)
@@ -443,10 +457,11 @@ struct vfsmount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry,
443struct vfsmount *lookup_mnt(struct path *path) 457struct vfsmount *lookup_mnt(struct path *path)
444{ 458{
445 struct vfsmount *child_mnt; 459 struct vfsmount *child_mnt;
446 spin_lock(&vfsmount_lock); 460
461 br_read_lock(vfsmount_lock);
447 if ((child_mnt = __lookup_mnt(path->mnt, path->dentry, 1))) 462 if ((child_mnt = __lookup_mnt(path->mnt, path->dentry, 1)))
448 mntget(child_mnt); 463 mntget(child_mnt);
449 spin_unlock(&vfsmount_lock); 464 br_read_unlock(vfsmount_lock);
450 return child_mnt; 465 return child_mnt;
451} 466}
452 467
@@ -455,6 +470,9 @@ static inline int check_mnt(struct vfsmount *mnt)
455 return mnt->mnt_ns == current->nsproxy->mnt_ns; 470 return mnt->mnt_ns == current->nsproxy->mnt_ns;
456} 471}
457 472
473/*
474 * vfsmount lock must be held for write
475 */
458static void touch_mnt_namespace(struct mnt_namespace *ns) 476static void touch_mnt_namespace(struct mnt_namespace *ns)
459{ 477{
460 if (ns) { 478 if (ns) {
@@ -463,6 +481,9 @@ static void touch_mnt_namespace(struct mnt_namespace *ns)
463 } 481 }
464} 482}
465 483
484/*
485 * vfsmount lock must be held for write
486 */
466static void __touch_mnt_namespace(struct mnt_namespace *ns) 487static void __touch_mnt_namespace(struct mnt_namespace *ns)
467{ 488{
468 if (ns && ns->event != event) { 489 if (ns && ns->event != event) {
@@ -471,6 +492,9 @@ static void __touch_mnt_namespace(struct mnt_namespace *ns)
471 } 492 }
472} 493}
473 494
495/*
496 * vfsmount lock must be held for write
497 */
474static void detach_mnt(struct vfsmount *mnt, struct path *old_path) 498static void detach_mnt(struct vfsmount *mnt, struct path *old_path)
475{ 499{
476 old_path->dentry = mnt->mnt_mountpoint; 500 old_path->dentry = mnt->mnt_mountpoint;
@@ -482,6 +506,9 @@ static void detach_mnt(struct vfsmount *mnt, struct path *old_path)
482 old_path->dentry->d_mounted--; 506 old_path->dentry->d_mounted--;
483} 507}
484 508
509/*
510 * vfsmount lock must be held for write
511 */
485void mnt_set_mountpoint(struct vfsmount *mnt, struct dentry *dentry, 512void mnt_set_mountpoint(struct vfsmount *mnt, struct dentry *dentry,
486 struct vfsmount *child_mnt) 513 struct vfsmount *child_mnt)
487{ 514{
@@ -490,6 +517,9 @@ void mnt_set_mountpoint(struct vfsmount *mnt, struct dentry *dentry,
490 dentry->d_mounted++; 517 dentry->d_mounted++;
491} 518}
492 519
520/*
521 * vfsmount lock must be held for write
522 */
493static void attach_mnt(struct vfsmount *mnt, struct path *path) 523static void attach_mnt(struct vfsmount *mnt, struct path *path)
494{ 524{
495 mnt_set_mountpoint(path->mnt, path->dentry, mnt); 525 mnt_set_mountpoint(path->mnt, path->dentry, mnt);
@@ -499,7 +529,7 @@ static void attach_mnt(struct vfsmount *mnt, struct path *path)
499} 529}
500 530
501/* 531/*
502 * the caller must hold vfsmount_lock 532 * vfsmount lock must be held for write
503 */ 533 */
504static void commit_tree(struct vfsmount *mnt) 534static void commit_tree(struct vfsmount *mnt)
505{ 535{
@@ -623,39 +653,43 @@ static inline void __mntput(struct vfsmount *mnt)
623void mntput_no_expire(struct vfsmount *mnt) 653void mntput_no_expire(struct vfsmount *mnt)
624{ 654{
625repeat: 655repeat:
626 if (atomic_dec_and_lock(&mnt->mnt_count, &vfsmount_lock)) { 656 if (atomic_add_unless(&mnt->mnt_count, -1, 1))
627 if (likely(!mnt->mnt_pinned)) { 657 return;
628 spin_unlock(&vfsmount_lock); 658 br_write_lock(vfsmount_lock);
629 __mntput(mnt); 659 if (!atomic_dec_and_test(&mnt->mnt_count)) {
630 return; 660 br_write_unlock(vfsmount_lock);
631 } 661 return;
632 atomic_add(mnt->mnt_pinned + 1, &mnt->mnt_count); 662 }
633 mnt->mnt_pinned = 0; 663 if (likely(!mnt->mnt_pinned)) {
634 spin_unlock(&vfsmount_lock); 664 br_write_unlock(vfsmount_lock);
635 acct_auto_close_mnt(mnt); 665 __mntput(mnt);
636 goto repeat; 666 return;
637 } 667 }
668 atomic_add(mnt->mnt_pinned + 1, &mnt->mnt_count);
669 mnt->mnt_pinned = 0;
670 br_write_unlock(vfsmount_lock);
671 acct_auto_close_mnt(mnt);
672 goto repeat;
638} 673}
639
640EXPORT_SYMBOL(mntput_no_expire); 674EXPORT_SYMBOL(mntput_no_expire);
641 675
642void mnt_pin(struct vfsmount *mnt) 676void mnt_pin(struct vfsmount *mnt)
643{ 677{
644 spin_lock(&vfsmount_lock); 678 br_write_lock(vfsmount_lock);
645 mnt->mnt_pinned++; 679 mnt->mnt_pinned++;
646 spin_unlock(&vfsmount_lock); 680 br_write_unlock(vfsmount_lock);
647} 681}
648 682
649EXPORT_SYMBOL(mnt_pin); 683EXPORT_SYMBOL(mnt_pin);
650 684
651void mnt_unpin(struct vfsmount *mnt) 685void mnt_unpin(struct vfsmount *mnt)
652{ 686{
653 spin_lock(&vfsmount_lock); 687 br_write_lock(vfsmount_lock);
654 if (mnt->mnt_pinned) { 688 if (mnt->mnt_pinned) {
655 atomic_inc(&mnt->mnt_count); 689 atomic_inc(&mnt->mnt_count);
656 mnt->mnt_pinned--; 690 mnt->mnt_pinned--;
657 } 691 }
658 spin_unlock(&vfsmount_lock); 692 br_write_unlock(vfsmount_lock);
659} 693}
660 694
661EXPORT_SYMBOL(mnt_unpin); 695EXPORT_SYMBOL(mnt_unpin);
@@ -746,12 +780,12 @@ int mnt_had_events(struct proc_mounts *p)
746 struct mnt_namespace *ns = p->ns; 780 struct mnt_namespace *ns = p->ns;
747 int res = 0; 781 int res = 0;
748 782
749 spin_lock(&vfsmount_lock); 783 br_read_lock(vfsmount_lock);
750 if (p->event != ns->event) { 784 if (p->event != ns->event) {
751 p->event = ns->event; 785 p->event = ns->event;
752 res = 1; 786 res = 1;
753 } 787 }
754 spin_unlock(&vfsmount_lock); 788 br_read_unlock(vfsmount_lock);
755 789
756 return res; 790 return res;
757} 791}
@@ -952,12 +986,12 @@ int may_umount_tree(struct vfsmount *mnt)
952 int minimum_refs = 0; 986 int minimum_refs = 0;
953 struct vfsmount *p; 987 struct vfsmount *p;
954 988
955 spin_lock(&vfsmount_lock); 989 br_read_lock(vfsmount_lock);
956 for (p = mnt; p; p = next_mnt(p, mnt)) { 990 for (p = mnt; p; p = next_mnt(p, mnt)) {
957 actual_refs += atomic_read(&p->mnt_count); 991 actual_refs += atomic_read(&p->mnt_count);
958 minimum_refs += 2; 992 minimum_refs += 2;
959 } 993 }
960 spin_unlock(&vfsmount_lock); 994 br_read_unlock(vfsmount_lock);
961 995
962 if (actual_refs > minimum_refs) 996 if (actual_refs > minimum_refs)
963 return 0; 997 return 0;
@@ -984,10 +1018,10 @@ int may_umount(struct vfsmount *mnt)
984{ 1018{
985 int ret = 1; 1019 int ret = 1;
986 down_read(&namespace_sem); 1020 down_read(&namespace_sem);
987 spin_lock(&vfsmount_lock); 1021 br_read_lock(vfsmount_lock);
988 if (propagate_mount_busy(mnt, 2)) 1022 if (propagate_mount_busy(mnt, 2))
989 ret = 0; 1023 ret = 0;
990 spin_unlock(&vfsmount_lock); 1024 br_read_unlock(vfsmount_lock);
991 up_read(&namespace_sem); 1025 up_read(&namespace_sem);
992 return ret; 1026 return ret;
993} 1027}
@@ -1003,13 +1037,14 @@ void release_mounts(struct list_head *head)
1003 if (mnt->mnt_parent != mnt) { 1037 if (mnt->mnt_parent != mnt) {
1004 struct dentry *dentry; 1038 struct dentry *dentry;
1005 struct vfsmount *m; 1039 struct vfsmount *m;
1006 spin_lock(&vfsmount_lock); 1040
1041 br_write_lock(vfsmount_lock);
1007 dentry = mnt->mnt_mountpoint; 1042 dentry = mnt->mnt_mountpoint;
1008 m = mnt->mnt_parent; 1043 m = mnt->mnt_parent;
1009 mnt->mnt_mountpoint = mnt->mnt_root; 1044 mnt->mnt_mountpoint = mnt->mnt_root;
1010 mnt->mnt_parent = mnt; 1045 mnt->mnt_parent = mnt;
1011 m->mnt_ghosts--; 1046 m->mnt_ghosts--;
1012 spin_unlock(&vfsmount_lock); 1047 br_write_unlock(vfsmount_lock);
1013 dput(dentry); 1048 dput(dentry);
1014 mntput(m); 1049 mntput(m);
1015 } 1050 }
@@ -1017,6 +1052,10 @@ void release_mounts(struct list_head *head)
1017 } 1052 }
1018} 1053}
1019 1054
1055/*
1056 * vfsmount lock must be held for write
1057 * namespace_sem must be held for write
1058 */
1020void umount_tree(struct vfsmount *mnt, int propagate, struct list_head *kill) 1059void umount_tree(struct vfsmount *mnt, int propagate, struct list_head *kill)
1021{ 1060{
1022 struct vfsmount *p; 1061 struct vfsmount *p;
@@ -1107,7 +1146,7 @@ static int do_umount(struct vfsmount *mnt, int flags)
1107 } 1146 }
1108 1147
1109 down_write(&namespace_sem); 1148 down_write(&namespace_sem);
1110 spin_lock(&vfsmount_lock); 1149 br_write_lock(vfsmount_lock);
1111 event++; 1150 event++;
1112 1151
1113 if (!(flags & MNT_DETACH)) 1152 if (!(flags & MNT_DETACH))
@@ -1119,7 +1158,7 @@ static int do_umount(struct vfsmount *mnt, int flags)
1119 umount_tree(mnt, 1, &umount_list); 1158 umount_tree(mnt, 1, &umount_list);
1120 retval = 0; 1159 retval = 0;
1121 } 1160 }
1122 spin_unlock(&vfsmount_lock); 1161 br_write_unlock(vfsmount_lock);
1123 up_write(&namespace_sem); 1162 up_write(&namespace_sem);
1124 release_mounts(&umount_list); 1163 release_mounts(&umount_list);
1125 return retval; 1164 return retval;
@@ -1231,19 +1270,19 @@ struct vfsmount *copy_tree(struct vfsmount *mnt, struct dentry *dentry,
1231 q = clone_mnt(p, p->mnt_root, flag); 1270 q = clone_mnt(p, p->mnt_root, flag);
1232 if (!q) 1271 if (!q)
1233 goto Enomem; 1272 goto Enomem;
1234 spin_lock(&vfsmount_lock); 1273 br_write_lock(vfsmount_lock);
1235 list_add_tail(&q->mnt_list, &res->mnt_list); 1274 list_add_tail(&q->mnt_list, &res->mnt_list);
1236 attach_mnt(q, &path); 1275 attach_mnt(q, &path);
1237 spin_unlock(&vfsmount_lock); 1276 br_write_unlock(vfsmount_lock);
1238 } 1277 }
1239 } 1278 }
1240 return res; 1279 return res;
1241Enomem: 1280Enomem:
1242 if (res) { 1281 if (res) {
1243 LIST_HEAD(umount_list); 1282 LIST_HEAD(umount_list);
1244 spin_lock(&vfsmount_lock); 1283 br_write_lock(vfsmount_lock);
1245 umount_tree(res, 0, &umount_list); 1284 umount_tree(res, 0, &umount_list);
1246 spin_unlock(&vfsmount_lock); 1285 br_write_unlock(vfsmount_lock);
1247 release_mounts(&umount_list); 1286 release_mounts(&umount_list);
1248 } 1287 }
1249 return NULL; 1288 return NULL;
@@ -1262,9 +1301,9 @@ void drop_collected_mounts(struct vfsmount *mnt)
1262{ 1301{
1263 LIST_HEAD(umount_list); 1302 LIST_HEAD(umount_list);
1264 down_write(&namespace_sem); 1303 down_write(&namespace_sem);
1265 spin_lock(&vfsmount_lock); 1304 br_write_lock(vfsmount_lock);
1266 umount_tree(mnt, 0, &umount_list); 1305 umount_tree(mnt, 0, &umount_list);
1267 spin_unlock(&vfsmount_lock); 1306 br_write_unlock(vfsmount_lock);
1268 up_write(&namespace_sem); 1307 up_write(&namespace_sem);
1269 release_mounts(&umount_list); 1308 release_mounts(&umount_list);
1270} 1309}
@@ -1392,7 +1431,7 @@ static int attach_recursive_mnt(struct vfsmount *source_mnt,
1392 if (err) 1431 if (err)
1393 goto out_cleanup_ids; 1432 goto out_cleanup_ids;
1394 1433
1395 spin_lock(&vfsmount_lock); 1434 br_write_lock(vfsmount_lock);
1396 1435
1397 if (IS_MNT_SHARED(dest_mnt)) { 1436 if (IS_MNT_SHARED(dest_mnt)) {
1398 for (p = source_mnt; p; p = next_mnt(p, source_mnt)) 1437 for (p = source_mnt; p; p = next_mnt(p, source_mnt))
@@ -1411,7 +1450,8 @@ static int attach_recursive_mnt(struct vfsmount *source_mnt,
1411 list_del_init(&child->mnt_hash); 1450 list_del_init(&child->mnt_hash);
1412 commit_tree(child); 1451 commit_tree(child);
1413 } 1452 }
1414 spin_unlock(&vfsmount_lock); 1453 br_write_unlock(vfsmount_lock);
1454
1415 return 0; 1455 return 0;
1416 1456
1417 out_cleanup_ids: 1457 out_cleanup_ids:
@@ -1466,10 +1506,10 @@ static int do_change_type(struct path *path, int flag)
1466 goto out_unlock; 1506 goto out_unlock;
1467 } 1507 }
1468 1508
1469 spin_lock(&vfsmount_lock); 1509 br_write_lock(vfsmount_lock);
1470 for (m = mnt; m; m = (recurse ? next_mnt(m, mnt) : NULL)) 1510 for (m = mnt; m; m = (recurse ? next_mnt(m, mnt) : NULL))
1471 change_mnt_propagation(m, type); 1511 change_mnt_propagation(m, type);
1472 spin_unlock(&vfsmount_lock); 1512 br_write_unlock(vfsmount_lock);
1473 1513
1474 out_unlock: 1514 out_unlock:
1475 up_write(&namespace_sem); 1515 up_write(&namespace_sem);
@@ -1513,9 +1553,10 @@ static int do_loopback(struct path *path, char *old_name,
1513 err = graft_tree(mnt, path); 1553 err = graft_tree(mnt, path);
1514 if (err) { 1554 if (err) {
1515 LIST_HEAD(umount_list); 1555 LIST_HEAD(umount_list);
1516 spin_lock(&vfsmount_lock); 1556
1557 br_write_lock(vfsmount_lock);
1517 umount_tree(mnt, 0, &umount_list); 1558 umount_tree(mnt, 0, &umount_list);
1518 spin_unlock(&vfsmount_lock); 1559 br_write_unlock(vfsmount_lock);
1519 release_mounts(&umount_list); 1560 release_mounts(&umount_list);
1520 } 1561 }
1521 1562
@@ -1568,16 +1609,16 @@ static int do_remount(struct path *path, int flags, int mnt_flags,
1568 else 1609 else
1569 err = do_remount_sb(sb, flags, data, 0); 1610 err = do_remount_sb(sb, flags, data, 0);
1570 if (!err) { 1611 if (!err) {
1571 spin_lock(&vfsmount_lock); 1612 br_write_lock(vfsmount_lock);
1572 mnt_flags |= path->mnt->mnt_flags & MNT_PROPAGATION_MASK; 1613 mnt_flags |= path->mnt->mnt_flags & MNT_PROPAGATION_MASK;
1573 path->mnt->mnt_flags = mnt_flags; 1614 path->mnt->mnt_flags = mnt_flags;
1574 spin_unlock(&vfsmount_lock); 1615 br_write_unlock(vfsmount_lock);
1575 } 1616 }
1576 up_write(&sb->s_umount); 1617 up_write(&sb->s_umount);
1577 if (!err) { 1618 if (!err) {
1578 spin_lock(&vfsmount_lock); 1619 br_write_lock(vfsmount_lock);
1579 touch_mnt_namespace(path->mnt->mnt_ns); 1620 touch_mnt_namespace(path->mnt->mnt_ns);
1580 spin_unlock(&vfsmount_lock); 1621 br_write_unlock(vfsmount_lock);
1581 } 1622 }
1582 return err; 1623 return err;
1583} 1624}
@@ -1754,7 +1795,7 @@ void mark_mounts_for_expiry(struct list_head *mounts)
1754 return; 1795 return;
1755 1796
1756 down_write(&namespace_sem); 1797 down_write(&namespace_sem);
1757 spin_lock(&vfsmount_lock); 1798 br_write_lock(vfsmount_lock);
1758 1799
1759 /* extract from the expiration list every vfsmount that matches the 1800 /* extract from the expiration list every vfsmount that matches the
1760 * following criteria: 1801 * following criteria:
@@ -1773,7 +1814,7 @@ void mark_mounts_for_expiry(struct list_head *mounts)
1773 touch_mnt_namespace(mnt->mnt_ns); 1814 touch_mnt_namespace(mnt->mnt_ns);
1774 umount_tree(mnt, 1, &umounts); 1815 umount_tree(mnt, 1, &umounts);
1775 } 1816 }
1776 spin_unlock(&vfsmount_lock); 1817 br_write_unlock(vfsmount_lock);
1777 up_write(&namespace_sem); 1818 up_write(&namespace_sem);
1778 1819
1779 release_mounts(&umounts); 1820 release_mounts(&umounts);
@@ -1830,6 +1871,8 @@ resume:
1830/* 1871/*
1831 * process a list of expirable mountpoints with the intent of discarding any 1872 * process a list of expirable mountpoints with the intent of discarding any
1832 * submounts of a specific parent mountpoint 1873 * submounts of a specific parent mountpoint
1874 *
1875 * vfsmount_lock must be held for write
1833 */ 1876 */
1834static void shrink_submounts(struct vfsmount *mnt, struct list_head *umounts) 1877static void shrink_submounts(struct vfsmount *mnt, struct list_head *umounts)
1835{ 1878{
@@ -2048,9 +2091,9 @@ static struct mnt_namespace *dup_mnt_ns(struct mnt_namespace *mnt_ns,
2048 kfree(new_ns); 2091 kfree(new_ns);
2049 return ERR_PTR(-ENOMEM); 2092 return ERR_PTR(-ENOMEM);
2050 } 2093 }
2051 spin_lock(&vfsmount_lock); 2094 br_write_lock(vfsmount_lock);
2052 list_add_tail(&new_ns->list, &new_ns->root->mnt_list); 2095 list_add_tail(&new_ns->list, &new_ns->root->mnt_list);
2053 spin_unlock(&vfsmount_lock); 2096 br_write_unlock(vfsmount_lock);
2054 2097
2055 /* 2098 /*
2056 * Second pass: switch the tsk->fs->* elements and mark new vfsmounts 2099 * Second pass: switch the tsk->fs->* elements and mark new vfsmounts
@@ -2244,7 +2287,7 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
2244 goto out2; /* not attached */ 2287 goto out2; /* not attached */
2245 /* make sure we can reach put_old from new_root */ 2288 /* make sure we can reach put_old from new_root */
2246 tmp = old.mnt; 2289 tmp = old.mnt;
2247 spin_lock(&vfsmount_lock); 2290 br_write_lock(vfsmount_lock);
2248 if (tmp != new.mnt) { 2291 if (tmp != new.mnt) {
2249 for (;;) { 2292 for (;;) {
2250 if (tmp->mnt_parent == tmp) 2293 if (tmp->mnt_parent == tmp)
@@ -2264,7 +2307,7 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
2264 /* mount new_root on / */ 2307 /* mount new_root on / */
2265 attach_mnt(new.mnt, &root_parent); 2308 attach_mnt(new.mnt, &root_parent);
2266 touch_mnt_namespace(current->nsproxy->mnt_ns); 2309 touch_mnt_namespace(current->nsproxy->mnt_ns);
2267 spin_unlock(&vfsmount_lock); 2310 br_write_unlock(vfsmount_lock);
2268 chroot_fs_refs(&root, &new); 2311 chroot_fs_refs(&root, &new);
2269 error = 0; 2312 error = 0;
2270 path_put(&root_parent); 2313 path_put(&root_parent);
@@ -2279,7 +2322,7 @@ out1:
2279out0: 2322out0:
2280 return error; 2323 return error;
2281out3: 2324out3:
2282 spin_unlock(&vfsmount_lock); 2325 br_write_unlock(vfsmount_lock);
2283 goto out2; 2326 goto out2;
2284} 2327}
2285 2328
@@ -2326,6 +2369,8 @@ void __init mnt_init(void)
2326 for (u = 0; u < HASH_SIZE; u++) 2369 for (u = 0; u < HASH_SIZE; u++)
2327 INIT_LIST_HEAD(&mount_hashtable[u]); 2370 INIT_LIST_HEAD(&mount_hashtable[u]);
2328 2371
2372 br_lock_init(vfsmount_lock);
2373
2329 err = sysfs_init(); 2374 err = sysfs_init();
2330 if (err) 2375 if (err)
2331 printk(KERN_WARNING "%s: sysfs_init error: %d\n", 2376 printk(KERN_WARNING "%s: sysfs_init error: %d\n",
@@ -2344,9 +2389,9 @@ void put_mnt_ns(struct mnt_namespace *ns)
2344 if (!atomic_dec_and_test(&ns->count)) 2389 if (!atomic_dec_and_test(&ns->count))
2345 return; 2390 return;
2346 down_write(&namespace_sem); 2391 down_write(&namespace_sem);
2347 spin_lock(&vfsmount_lock); 2392 br_write_lock(vfsmount_lock);
2348 umount_tree(ns->root, 0, &umount_list); 2393 umount_tree(ns->root, 0, &umount_list);
2349 spin_unlock(&vfsmount_lock); 2394 br_write_unlock(vfsmount_lock);
2350 up_write(&namespace_sem); 2395 up_write(&namespace_sem);
2351 release_mounts(&umount_list); 2396 release_mounts(&umount_list);
2352 kfree(ns); 2397 kfree(ns);
diff --git a/fs/nfs/Kconfig b/fs/nfs/Kconfig
index 26a510a7be09..6c2aad49d731 100644
--- a/fs/nfs/Kconfig
+++ b/fs/nfs/Kconfig
@@ -63,7 +63,6 @@ config NFS_V3_ACL
63config NFS_V4 63config NFS_V4
64 bool "NFS client support for NFS version 4" 64 bool "NFS client support for NFS version 4"
65 depends on NFS_FS 65 depends on NFS_FS
66 select RPCSEC_GSS_KRB5
67 help 66 help
68 This option enables support for version 4 of the NFS protocol 67 This option enables support for version 4 of the NFS protocol
69 (RFC 3530) in the kernel's NFS client. 68 (RFC 3530) in the kernel's NFS client.
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 29539ceeb745..e257172d438c 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -140,6 +140,13 @@ nfs_opendir(struct inode *inode, struct file *filp)
140 140
141 /* Call generic open code in order to cache credentials */ 141 /* Call generic open code in order to cache credentials */
142 res = nfs_open(inode, filp); 142 res = nfs_open(inode, filp);
143 if (filp->f_path.dentry == filp->f_path.mnt->mnt_root) {
144 /* This is a mountpoint, so d_revalidate will never
145 * have been called, so we need to refresh the
146 * inode (for close-open consistency) ourselves.
147 */
148 __nfs_revalidate_inode(NFS_SERVER(inode), inode);
149 }
143 return res; 150 return res;
144} 151}
145 152
@@ -1103,7 +1110,7 @@ static int nfs_open_revalidate(struct dentry *dentry, struct nameidata *nd)
1103 if ((openflags & (O_CREAT|O_EXCL)) == (O_CREAT|O_EXCL)) 1110 if ((openflags & (O_CREAT|O_EXCL)) == (O_CREAT|O_EXCL))
1104 goto no_open_dput; 1111 goto no_open_dput;
1105 /* We can't create new files, or truncate existing ones here */ 1112 /* We can't create new files, or truncate existing ones here */
1106 openflags &= ~(O_CREAT|O_TRUNC); 1113 openflags &= ~(O_CREAT|O_EXCL|O_TRUNC);
1107 1114
1108 /* 1115 /*
1109 * Note: we're not holding inode->i_mutex and so may be racing with 1116 * Note: we're not holding inode->i_mutex and so may be racing with
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index 2d141a74ae82..eb51bd6201da 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -323,7 +323,7 @@ nfs_file_fsync(struct file *file, int datasync)
323 have_error |= test_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags); 323 have_error |= test_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags);
324 if (have_error) 324 if (have_error)
325 ret = xchg(&ctx->error, 0); 325 ret = xchg(&ctx->error, 0);
326 if (!ret) 326 if (!ret && status < 0)
327 ret = status; 327 ret = status;
328 return ret; 328 return ret;
329} 329}
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 7ffbb98ddec3..089da5b5d20a 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -2036,7 +2036,8 @@ nfs4_atomic_open(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
2036 struct rpc_cred *cred; 2036 struct rpc_cred *cred;
2037 struct nfs4_state *state; 2037 struct nfs4_state *state;
2038 struct dentry *res; 2038 struct dentry *res;
2039 fmode_t fmode = nd->intent.open.flags & (FMODE_READ | FMODE_WRITE | FMODE_EXEC); 2039 int open_flags = nd->intent.open.flags;
2040 fmode_t fmode = open_flags & (FMODE_READ | FMODE_WRITE | FMODE_EXEC);
2040 2041
2041 if (nd->flags & LOOKUP_CREATE) { 2042 if (nd->flags & LOOKUP_CREATE) {
2042 attr.ia_mode = nd->intent.open.create_mode; 2043 attr.ia_mode = nd->intent.open.create_mode;
@@ -2044,8 +2045,9 @@ nfs4_atomic_open(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
2044 if (!IS_POSIXACL(dir)) 2045 if (!IS_POSIXACL(dir))
2045 attr.ia_mode &= ~current_umask(); 2046 attr.ia_mode &= ~current_umask();
2046 } else { 2047 } else {
2048 open_flags &= ~O_EXCL;
2047 attr.ia_valid = 0; 2049 attr.ia_valid = 0;
2048 BUG_ON(nd->intent.open.flags & O_CREAT); 2050 BUG_ON(open_flags & O_CREAT);
2049 } 2051 }
2050 2052
2051 cred = rpc_lookup_cred(); 2053 cred = rpc_lookup_cred();
@@ -2054,7 +2056,7 @@ nfs4_atomic_open(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
2054 parent = dentry->d_parent; 2056 parent = dentry->d_parent;
2055 /* Protect against concurrent sillydeletes */ 2057 /* Protect against concurrent sillydeletes */
2056 nfs_block_sillyrename(parent); 2058 nfs_block_sillyrename(parent);
2057 state = nfs4_do_open(dir, &path, fmode, nd->intent.open.flags, &attr, cred); 2059 state = nfs4_do_open(dir, &path, fmode, open_flags, &attr, cred);
2058 put_rpccred(cred); 2060 put_rpccred(cred);
2059 if (IS_ERR(state)) { 2061 if (IS_ERR(state)) {
2060 if (PTR_ERR(state) == -ENOENT) { 2062 if (PTR_ERR(state) == -ENOENT) {
@@ -2273,8 +2275,7 @@ static int nfs4_get_referral(struct inode *dir, const struct qstr *name, struct
2273out: 2275out:
2274 if (page) 2276 if (page)
2275 __free_page(page); 2277 __free_page(page);
2276 if (locations) 2278 kfree(locations);
2277 kfree(locations);
2278 return status; 2279 return status;
2279} 2280}
2280 2281
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index ee26316ad1f4..ec3966e4706b 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -655,6 +655,13 @@ static void nfs_show_mount_options(struct seq_file *m, struct nfs_server *nfss,
655 655
656 if (nfss->options & NFS_OPTION_FSCACHE) 656 if (nfss->options & NFS_OPTION_FSCACHE)
657 seq_printf(m, ",fsc"); 657 seq_printf(m, ",fsc");
658
659 if (nfss->flags & NFS_MOUNT_LOOKUP_CACHE_NONEG) {
660 if (nfss->flags & NFS_MOUNT_LOOKUP_CACHE_NONE)
661 seq_printf(m, ",lookupcache=none");
662 else
663 seq_printf(m, ",lookupcache=pos");
664 }
658} 665}
659 666
660/* 667/*
diff --git a/fs/nfsd/Kconfig b/fs/nfsd/Kconfig
index 503b9da159a3..95932f523aef 100644
--- a/fs/nfsd/Kconfig
+++ b/fs/nfsd/Kconfig
@@ -69,7 +69,6 @@ config NFSD_V4
69 depends on NFSD && PROC_FS && EXPERIMENTAL 69 depends on NFSD && PROC_FS && EXPERIMENTAL
70 select NFSD_V3 70 select NFSD_V3
71 select FS_POSIX_ACL 71 select FS_POSIX_ACL
72 select RPCSEC_GSS_KRB5
73 help 72 help
74 This option enables support in your system's NFS server for 73 This option enables support in your system's NFS server for
75 version 4 of the NFS protocol (RFC 3530). 74 version 4 of the NFS protocol (RFC 3530).
diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c
index bee60c04109a..922263393c76 100644
--- a/fs/nilfs2/super.c
+++ b/fs/nilfs2/super.c
@@ -175,24 +175,24 @@ static int nilfs_sync_super(struct nilfs_sb_info *sbi, int flag)
175{ 175{
176 struct the_nilfs *nilfs = sbi->s_nilfs; 176 struct the_nilfs *nilfs = sbi->s_nilfs;
177 int err; 177 int err;
178 int barrier_done = 0;
179 178
180 if (nilfs_test_opt(sbi, BARRIER)) {
181 set_buffer_ordered(nilfs->ns_sbh[0]);
182 barrier_done = 1;
183 }
184 retry: 179 retry:
185 set_buffer_dirty(nilfs->ns_sbh[0]); 180 set_buffer_dirty(nilfs->ns_sbh[0]);
186 err = sync_dirty_buffer(nilfs->ns_sbh[0]); 181
187 if (err == -EOPNOTSUPP && barrier_done) { 182 if (nilfs_test_opt(sbi, BARRIER)) {
188 nilfs_warning(sbi->s_super, __func__, 183 err = __sync_dirty_buffer(nilfs->ns_sbh[0],
189 "barrier-based sync failed. " 184 WRITE_SYNC | WRITE_BARRIER);
190 "disabling barriers\n"); 185 if (err == -EOPNOTSUPP) {
191 nilfs_clear_opt(sbi, BARRIER); 186 nilfs_warning(sbi->s_super, __func__,
192 barrier_done = 0; 187 "barrier-based sync failed. "
193 clear_buffer_ordered(nilfs->ns_sbh[0]); 188 "disabling barriers\n");
194 goto retry; 189 nilfs_clear_opt(sbi, BARRIER);
190 goto retry;
191 }
192 } else {
193 err = sync_dirty_buffer(nilfs->ns_sbh[0]);
195 } 194 }
195
196 if (unlikely(err)) { 196 if (unlikely(err)) {
197 printk(KERN_ERR 197 printk(KERN_ERR
198 "NILFS: unable to write superblock (err=%d)\n", err); 198 "NILFS: unable to write superblock (err=%d)\n", err);
diff --git a/fs/open.c b/fs/open.c
index 630715f9f73d..d74e1983e8dc 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -675,7 +675,7 @@ static struct file *__dentry_open(struct dentry *dentry, struct vfsmount *mnt,
675 f->f_path.mnt = mnt; 675 f->f_path.mnt = mnt;
676 f->f_pos = 0; 676 f->f_pos = 0;
677 f->f_op = fops_get(inode->i_fop); 677 f->f_op = fops_get(inode->i_fop);
678 file_move(f, &inode->i_sb->s_files); 678 file_sb_list_add(f, inode->i_sb);
679 679
680 error = security_dentry_open(f, cred); 680 error = security_dentry_open(f, cred);
681 if (error) 681 if (error)
@@ -721,7 +721,7 @@ cleanup_all:
721 mnt_drop_write(mnt); 721 mnt_drop_write(mnt);
722 } 722 }
723 } 723 }
724 file_kill(f); 724 file_sb_list_del(f);
725 f->f_path.dentry = NULL; 725 f->f_path.dentry = NULL;
726 f->f_path.mnt = NULL; 726 f->f_path.mnt = NULL;
727cleanup_file: 727cleanup_file:
diff --git a/fs/pnode.c b/fs/pnode.c
index 5cc564a83149..8066b8dd748f 100644
--- a/fs/pnode.c
+++ b/fs/pnode.c
@@ -126,6 +126,9 @@ static int do_make_slave(struct vfsmount *mnt)
126 return 0; 126 return 0;
127} 127}
128 128
129/*
130 * vfsmount lock must be held for write
131 */
129void change_mnt_propagation(struct vfsmount *mnt, int type) 132void change_mnt_propagation(struct vfsmount *mnt, int type)
130{ 133{
131 if (type == MS_SHARED) { 134 if (type == MS_SHARED) {
@@ -270,12 +273,12 @@ int propagate_mnt(struct vfsmount *dest_mnt, struct dentry *dest_dentry,
270 prev_src_mnt = child; 273 prev_src_mnt = child;
271 } 274 }
272out: 275out:
273 spin_lock(&vfsmount_lock); 276 br_write_lock(vfsmount_lock);
274 while (!list_empty(&tmp_list)) { 277 while (!list_empty(&tmp_list)) {
275 child = list_first_entry(&tmp_list, struct vfsmount, mnt_hash); 278 child = list_first_entry(&tmp_list, struct vfsmount, mnt_hash);
276 umount_tree(child, 0, &umount_list); 279 umount_tree(child, 0, &umount_list);
277 } 280 }
278 spin_unlock(&vfsmount_lock); 281 br_write_unlock(vfsmount_lock);
279 release_mounts(&umount_list); 282 release_mounts(&umount_list);
280 return ret; 283 return ret;
281} 284}
@@ -296,6 +299,8 @@ static inline int do_refcount_check(struct vfsmount *mnt, int count)
296 * other mounts its parent propagates to. 299 * other mounts its parent propagates to.
297 * Check if any of these mounts that **do not have submounts** 300 * Check if any of these mounts that **do not have submounts**
298 * have more references than 'refcnt'. If so return busy. 301 * have more references than 'refcnt'. If so return busy.
302 *
303 * vfsmount lock must be held for read or write
299 */ 304 */
300int propagate_mount_busy(struct vfsmount *mnt, int refcnt) 305int propagate_mount_busy(struct vfsmount *mnt, int refcnt)
301{ 306{
@@ -353,6 +358,8 @@ static void __propagate_umount(struct vfsmount *mnt)
353 * collect all mounts that receive propagation from the mount in @list, 358 * collect all mounts that receive propagation from the mount in @list,
354 * and return these additional mounts in the same list. 359 * and return these additional mounts in the same list.
355 * @list: the list of mounts to be unmounted. 360 * @list: the list of mounts to be unmounted.
361 *
362 * vfsmount lock must be held for write
356 */ 363 */
357int propagate_umount(struct list_head *list) 364int propagate_umount(struct list_head *list)
358{ 365{
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index ae35413dcbe1..caa758377d66 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -83,6 +83,7 @@ void reiserfs_evict_inode(struct inode *inode)
83 dquot_drop(inode); 83 dquot_drop(inode);
84 inode->i_blocks = 0; 84 inode->i_blocks = 0;
85 reiserfs_write_unlock_once(inode->i_sb, depth); 85 reiserfs_write_unlock_once(inode->i_sb, depth);
86 return;
86 87
87no_delete: 88no_delete:
88 end_writeback(inode); 89 end_writeback(inode);
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
index 1ec952b1f036..812e2c05aa29 100644
--- a/fs/reiserfs/journal.c
+++ b/fs/reiserfs/journal.c
@@ -2311,7 +2311,7 @@ static int journal_read_transaction(struct super_block *sb,
2311 /* flush out the real blocks */ 2311 /* flush out the real blocks */
2312 for (i = 0; i < get_desc_trans_len(desc); i++) { 2312 for (i = 0; i < get_desc_trans_len(desc); i++) {
2313 set_buffer_dirty(real_blocks[i]); 2313 set_buffer_dirty(real_blocks[i]);
2314 ll_rw_block(SWRITE, 1, real_blocks + i); 2314 write_dirty_buffer(real_blocks[i], WRITE);
2315 } 2315 }
2316 for (i = 0; i < get_desc_trans_len(desc); i++) { 2316 for (i = 0; i < get_desc_trans_len(desc); i++) {
2317 wait_on_buffer(real_blocks[i]); 2317 wait_on_buffer(real_blocks[i]);
diff --git a/fs/super.c b/fs/super.c
index 9674ab2c8718..8819e3a7ff20 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -54,7 +54,22 @@ static struct super_block *alloc_super(struct file_system_type *type)
54 s = NULL; 54 s = NULL;
55 goto out; 55 goto out;
56 } 56 }
57#ifdef CONFIG_SMP
58 s->s_files = alloc_percpu(struct list_head);
59 if (!s->s_files) {
60 security_sb_free(s);
61 kfree(s);
62 s = NULL;
63 goto out;
64 } else {
65 int i;
66
67 for_each_possible_cpu(i)
68 INIT_LIST_HEAD(per_cpu_ptr(s->s_files, i));
69 }
70#else
57 INIT_LIST_HEAD(&s->s_files); 71 INIT_LIST_HEAD(&s->s_files);
72#endif
58 INIT_LIST_HEAD(&s->s_instances); 73 INIT_LIST_HEAD(&s->s_instances);
59 INIT_HLIST_HEAD(&s->s_anon); 74 INIT_HLIST_HEAD(&s->s_anon);
60 INIT_LIST_HEAD(&s->s_inodes); 75 INIT_LIST_HEAD(&s->s_inodes);
@@ -108,6 +123,9 @@ out:
108 */ 123 */
109static inline void destroy_super(struct super_block *s) 124static inline void destroy_super(struct super_block *s)
110{ 125{
126#ifdef CONFIG_SMP
127 free_percpu(s->s_files);
128#endif
111 security_sb_free(s); 129 security_sb_free(s);
112 kfree(s->s_subtype); 130 kfree(s->s_subtype);
113 kfree(s->s_options); 131 kfree(s->s_options);
diff --git a/fs/ufs/balloc.c b/fs/ufs/balloc.c
index 048484fb10d2..46f7a807bbc1 100644
--- a/fs/ufs/balloc.c
+++ b/fs/ufs/balloc.c
@@ -114,10 +114,8 @@ void ufs_free_fragments(struct inode *inode, u64 fragment, unsigned count)
114 114
115 ubh_mark_buffer_dirty (USPI_UBH(uspi)); 115 ubh_mark_buffer_dirty (USPI_UBH(uspi));
116 ubh_mark_buffer_dirty (UCPI_UBH(ucpi)); 116 ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
117 if (sb->s_flags & MS_SYNCHRONOUS) { 117 if (sb->s_flags & MS_SYNCHRONOUS)
118 ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi)); 118 ubh_sync_block(UCPI_UBH(ucpi));
119 ubh_wait_on_buffer (UCPI_UBH(ucpi));
120 }
121 sb->s_dirt = 1; 119 sb->s_dirt = 1;
122 120
123 unlock_super (sb); 121 unlock_super (sb);
@@ -207,10 +205,8 @@ do_more:
207 205
208 ubh_mark_buffer_dirty (USPI_UBH(uspi)); 206 ubh_mark_buffer_dirty (USPI_UBH(uspi));
209 ubh_mark_buffer_dirty (UCPI_UBH(ucpi)); 207 ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
210 if (sb->s_flags & MS_SYNCHRONOUS) { 208 if (sb->s_flags & MS_SYNCHRONOUS)
211 ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi)); 209 ubh_sync_block(UCPI_UBH(ucpi));
212 ubh_wait_on_buffer (UCPI_UBH(ucpi));
213 }
214 210
215 if (overflow) { 211 if (overflow) {
216 fragment += count; 212 fragment += count;
@@ -558,10 +554,8 @@ static u64 ufs_add_fragments(struct inode *inode, u64 fragment,
558 554
559 ubh_mark_buffer_dirty (USPI_UBH(uspi)); 555 ubh_mark_buffer_dirty (USPI_UBH(uspi));
560 ubh_mark_buffer_dirty (UCPI_UBH(ucpi)); 556 ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
561 if (sb->s_flags & MS_SYNCHRONOUS) { 557 if (sb->s_flags & MS_SYNCHRONOUS)
562 ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi)); 558 ubh_sync_block(UCPI_UBH(ucpi));
563 ubh_wait_on_buffer (UCPI_UBH(ucpi));
564 }
565 sb->s_dirt = 1; 559 sb->s_dirt = 1;
566 560
567 UFSD("EXIT, fragment %llu\n", (unsigned long long)fragment); 561 UFSD("EXIT, fragment %llu\n", (unsigned long long)fragment);
@@ -680,10 +674,8 @@ cg_found:
680succed: 674succed:
681 ubh_mark_buffer_dirty (USPI_UBH(uspi)); 675 ubh_mark_buffer_dirty (USPI_UBH(uspi));
682 ubh_mark_buffer_dirty (UCPI_UBH(ucpi)); 676 ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
683 if (sb->s_flags & MS_SYNCHRONOUS) { 677 if (sb->s_flags & MS_SYNCHRONOUS)
684 ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi)); 678 ubh_sync_block(UCPI_UBH(ucpi));
685 ubh_wait_on_buffer (UCPI_UBH(ucpi));
686 }
687 sb->s_dirt = 1; 679 sb->s_dirt = 1;
688 680
689 result += cgno * uspi->s_fpg; 681 result += cgno * uspi->s_fpg;
diff --git a/fs/ufs/ialloc.c b/fs/ufs/ialloc.c
index 428017e018fe..2eabf04af3de 100644
--- a/fs/ufs/ialloc.c
+++ b/fs/ufs/ialloc.c
@@ -113,10 +113,8 @@ void ufs_free_inode (struct inode * inode)
113 113
114 ubh_mark_buffer_dirty (USPI_UBH(uspi)); 114 ubh_mark_buffer_dirty (USPI_UBH(uspi));
115 ubh_mark_buffer_dirty (UCPI_UBH(ucpi)); 115 ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
116 if (sb->s_flags & MS_SYNCHRONOUS) { 116 if (sb->s_flags & MS_SYNCHRONOUS)
117 ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi)); 117 ubh_sync_block(UCPI_UBH(ucpi));
118 ubh_wait_on_buffer (UCPI_UBH(ucpi));
119 }
120 118
121 sb->s_dirt = 1; 119 sb->s_dirt = 1;
122 unlock_super (sb); 120 unlock_super (sb);
@@ -156,10 +154,8 @@ static void ufs2_init_inodes_chunk(struct super_block *sb,
156 154
157 fs32_add(sb, &ucg->cg_u.cg_u2.cg_initediblk, uspi->s_inopb); 155 fs32_add(sb, &ucg->cg_u.cg_u2.cg_initediblk, uspi->s_inopb);
158 ubh_mark_buffer_dirty(UCPI_UBH(ucpi)); 156 ubh_mark_buffer_dirty(UCPI_UBH(ucpi));
159 if (sb->s_flags & MS_SYNCHRONOUS) { 157 if (sb->s_flags & MS_SYNCHRONOUS)
160 ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi)); 158 ubh_sync_block(UCPI_UBH(ucpi));
161 ubh_wait_on_buffer(UCPI_UBH(ucpi));
162 }
163 159
164 UFSD("EXIT\n"); 160 UFSD("EXIT\n");
165} 161}
@@ -290,10 +286,8 @@ cg_found:
290 } 286 }
291 ubh_mark_buffer_dirty (USPI_UBH(uspi)); 287 ubh_mark_buffer_dirty (USPI_UBH(uspi));
292 ubh_mark_buffer_dirty (UCPI_UBH(ucpi)); 288 ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
293 if (sb->s_flags & MS_SYNCHRONOUS) { 289 if (sb->s_flags & MS_SYNCHRONOUS)
294 ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi)); 290 ubh_sync_block(UCPI_UBH(ucpi));
295 ubh_wait_on_buffer (UCPI_UBH(ucpi));
296 }
297 sb->s_dirt = 1; 291 sb->s_dirt = 1;
298 292
299 inode->i_ino = cg * uspi->s_ipg + bit; 293 inode->i_ino = cg * uspi->s_ipg + bit;
diff --git a/fs/ufs/truncate.c b/fs/ufs/truncate.c
index 34d5cb135320..a58f9155fc9a 100644
--- a/fs/ufs/truncate.c
+++ b/fs/ufs/truncate.c
@@ -243,10 +243,8 @@ static int ufs_trunc_indirect(struct inode *inode, u64 offset, void *p)
243 ubh_bforget(ind_ubh); 243 ubh_bforget(ind_ubh);
244 ind_ubh = NULL; 244 ind_ubh = NULL;
245 } 245 }
246 if (IS_SYNC(inode) && ind_ubh && ubh_buffer_dirty(ind_ubh)) { 246 if (IS_SYNC(inode) && ind_ubh && ubh_buffer_dirty(ind_ubh))
247 ubh_ll_rw_block(SWRITE, ind_ubh); 247 ubh_sync_block(ind_ubh);
248 ubh_wait_on_buffer (ind_ubh);
249 }
250 ubh_brelse (ind_ubh); 248 ubh_brelse (ind_ubh);
251 249
252 UFSD("EXIT: ino %lu\n", inode->i_ino); 250 UFSD("EXIT: ino %lu\n", inode->i_ino);
@@ -307,10 +305,8 @@ static int ufs_trunc_dindirect(struct inode *inode, u64 offset, void *p)
307 ubh_bforget(dind_bh); 305 ubh_bforget(dind_bh);
308 dind_bh = NULL; 306 dind_bh = NULL;
309 } 307 }
310 if (IS_SYNC(inode) && dind_bh && ubh_buffer_dirty(dind_bh)) { 308 if (IS_SYNC(inode) && dind_bh && ubh_buffer_dirty(dind_bh))
311 ubh_ll_rw_block(SWRITE, dind_bh); 309 ubh_sync_block(dind_bh);
312 ubh_wait_on_buffer (dind_bh);
313 }
314 ubh_brelse (dind_bh); 310 ubh_brelse (dind_bh);
315 311
316 UFSD("EXIT: ino %lu\n", inode->i_ino); 312 UFSD("EXIT: ino %lu\n", inode->i_ino);
@@ -367,10 +363,8 @@ static int ufs_trunc_tindirect(struct inode *inode)
367 ubh_bforget(tind_bh); 363 ubh_bforget(tind_bh);
368 tind_bh = NULL; 364 tind_bh = NULL;
369 } 365 }
370 if (IS_SYNC(inode) && tind_bh && ubh_buffer_dirty(tind_bh)) { 366 if (IS_SYNC(inode) && tind_bh && ubh_buffer_dirty(tind_bh))
371 ubh_ll_rw_block(SWRITE, tind_bh); 367 ubh_sync_block(tind_bh);
372 ubh_wait_on_buffer (tind_bh);
373 }
374 ubh_brelse (tind_bh); 368 ubh_brelse (tind_bh);
375 369
376 UFSD("EXIT: ino %lu\n", inode->i_ino); 370 UFSD("EXIT: ino %lu\n", inode->i_ino);
diff --git a/fs/ufs/util.c b/fs/ufs/util.c
index 85a7fc9e4a4e..d2c36d53fe66 100644
--- a/fs/ufs/util.c
+++ b/fs/ufs/util.c
@@ -113,21 +113,17 @@ void ubh_mark_buffer_uptodate (struct ufs_buffer_head * ubh, int flag)
113 } 113 }
114} 114}
115 115
116void ubh_ll_rw_block(int rw, struct ufs_buffer_head *ubh) 116void ubh_sync_block(struct ufs_buffer_head *ubh)
117{ 117{
118 if (!ubh) 118 if (ubh) {
119 return; 119 unsigned i;
120 120
121 ll_rw_block(rw, ubh->count, ubh->bh); 121 for (i = 0; i < ubh->count; i++)
122} 122 write_dirty_buffer(ubh->bh[i], WRITE);
123 123
124void ubh_wait_on_buffer (struct ufs_buffer_head * ubh) 124 for (i = 0; i < ubh->count; i++)
125{ 125 wait_on_buffer(ubh->bh[i]);
126 unsigned i; 126 }
127 if (!ubh)
128 return;
129 for ( i = 0; i < ubh->count; i++ )
130 wait_on_buffer (ubh->bh[i]);
131} 127}
132 128
133void ubh_bforget (struct ufs_buffer_head * ubh) 129void ubh_bforget (struct ufs_buffer_head * ubh)
diff --git a/fs/ufs/util.h b/fs/ufs/util.h
index 0466036912f1..9f8775ce381c 100644
--- a/fs/ufs/util.h
+++ b/fs/ufs/util.h
@@ -269,8 +269,7 @@ extern void ubh_brelse (struct ufs_buffer_head *);
269extern void ubh_brelse_uspi (struct ufs_sb_private_info *); 269extern void ubh_brelse_uspi (struct ufs_sb_private_info *);
270extern void ubh_mark_buffer_dirty (struct ufs_buffer_head *); 270extern void ubh_mark_buffer_dirty (struct ufs_buffer_head *);
271extern void ubh_mark_buffer_uptodate (struct ufs_buffer_head *, int); 271extern void ubh_mark_buffer_uptodate (struct ufs_buffer_head *, int);
272extern void ubh_ll_rw_block(int, struct ufs_buffer_head *); 272extern void ubh_sync_block(struct ufs_buffer_head *);
273extern void ubh_wait_on_buffer (struct ufs_buffer_head *);
274extern void ubh_bforget (struct ufs_buffer_head *); 273extern void ubh_bforget (struct ufs_buffer_head *);
275extern int ubh_buffer_dirty (struct ufs_buffer_head *); 274extern int ubh_buffer_dirty (struct ufs_buffer_head *);
276#define ubh_ubhcpymem(mem,ubh,size) _ubh_ubhcpymem_(uspi,mem,ubh,size) 275#define ubh_ubhcpymem(mem,ubh,size) _ubh_ubhcpymem_(uspi,mem,ubh,size)
diff --git a/include/asm-generic/syscalls.h b/include/asm-generic/syscalls.h
index df84e3b04555..d89dec864d42 100644
--- a/include/asm-generic/syscalls.h
+++ b/include/asm-generic/syscalls.h
@@ -23,8 +23,10 @@ asmlinkage long sys_vfork(struct pt_regs *regs);
23#endif 23#endif
24 24
25#ifndef sys_execve 25#ifndef sys_execve
26asmlinkage long sys_execve(char __user *filename, char __user * __user *argv, 26asmlinkage long sys_execve(const char __user *filename,
27 char __user * __user *envp, struct pt_regs *regs); 27 const char __user *const __user *argv,
28 const char __user *const __user *envp,
29 struct pt_regs *regs);
28#endif 30#endif
29 31
30#ifndef sys_mmap2 32#ifndef sys_mmap2
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index 43e649a72529..ec94c12f21da 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -32,7 +32,6 @@ enum bh_state_bits {
32 BH_Delay, /* Buffer is not yet allocated on disk */ 32 BH_Delay, /* Buffer is not yet allocated on disk */
33 BH_Boundary, /* Block is followed by a discontiguity */ 33 BH_Boundary, /* Block is followed by a discontiguity */
34 BH_Write_EIO, /* I/O error on write */ 34 BH_Write_EIO, /* I/O error on write */
35 BH_Ordered, /* ordered write */
36 BH_Eopnotsupp, /* operation not supported (barrier) */ 35 BH_Eopnotsupp, /* operation not supported (barrier) */
37 BH_Unwritten, /* Buffer is allocated on disk but not written */ 36 BH_Unwritten, /* Buffer is allocated on disk but not written */
38 BH_Quiet, /* Buffer Error Prinks to be quiet */ 37 BH_Quiet, /* Buffer Error Prinks to be quiet */
@@ -125,7 +124,6 @@ BUFFER_FNS(Async_Write, async_write)
125BUFFER_FNS(Delay, delay) 124BUFFER_FNS(Delay, delay)
126BUFFER_FNS(Boundary, boundary) 125BUFFER_FNS(Boundary, boundary)
127BUFFER_FNS(Write_EIO, write_io_error) 126BUFFER_FNS(Write_EIO, write_io_error)
128BUFFER_FNS(Ordered, ordered)
129BUFFER_FNS(Eopnotsupp, eopnotsupp) 127BUFFER_FNS(Eopnotsupp, eopnotsupp)
130BUFFER_FNS(Unwritten, unwritten) 128BUFFER_FNS(Unwritten, unwritten)
131 129
@@ -183,6 +181,8 @@ void unlock_buffer(struct buffer_head *bh);
183void __lock_buffer(struct buffer_head *bh); 181void __lock_buffer(struct buffer_head *bh);
184void ll_rw_block(int, int, struct buffer_head * bh[]); 182void ll_rw_block(int, int, struct buffer_head * bh[]);
185int sync_dirty_buffer(struct buffer_head *bh); 183int sync_dirty_buffer(struct buffer_head *bh);
184int __sync_dirty_buffer(struct buffer_head *bh, int rw);
185void write_dirty_buffer(struct buffer_head *bh, int rw);
186int submit_bh(int, struct buffer_head *); 186int submit_bh(int, struct buffer_head *);
187void write_boundary_block(struct block_device *bdev, 187void write_boundary_block(struct block_device *bdev,
188 sector_t bblock, unsigned blocksize); 188 sector_t bblock, unsigned blocksize);
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 9a96b4d83fc1..76041b614758 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -125,9 +125,6 @@ struct inodes_stat_t {
125 * block layer could (in theory) choose to ignore this 125 * block layer could (in theory) choose to ignore this
126 * request if it runs into resource problems. 126 * request if it runs into resource problems.
127 * WRITE A normal async write. Device will be plugged. 127 * WRITE A normal async write. Device will be plugged.
128 * SWRITE Like WRITE, but a special case for ll_rw_block() that
129 * tells it to lock the buffer first. Normally a buffer
130 * must be locked before doing IO.
131 * WRITE_SYNC_PLUG Synchronous write. Identical to WRITE, but passes down 128 * WRITE_SYNC_PLUG Synchronous write. Identical to WRITE, but passes down
132 * the hint that someone will be waiting on this IO 129 * the hint that someone will be waiting on this IO
133 * shortly. The device must still be unplugged explicitly, 130 * shortly. The device must still be unplugged explicitly,
@@ -138,9 +135,6 @@ struct inodes_stat_t {
138 * immediately after submission. The write equivalent 135 * immediately after submission. The write equivalent
139 * of READ_SYNC. 136 * of READ_SYNC.
140 * WRITE_ODIRECT_PLUG Special case write for O_DIRECT only. 137 * WRITE_ODIRECT_PLUG Special case write for O_DIRECT only.
141 * SWRITE_SYNC
142 * SWRITE_SYNC_PLUG Like WRITE_SYNC/WRITE_SYNC_PLUG, but locks the buffer.
143 * See SWRITE.
144 * WRITE_BARRIER Like WRITE_SYNC, but tells the block layer that all 138 * WRITE_BARRIER Like WRITE_SYNC, but tells the block layer that all
145 * previously submitted writes must be safely on storage 139 * previously submitted writes must be safely on storage
146 * before this one is started. Also guarantees that when 140 * before this one is started. Also guarantees that when
@@ -155,7 +149,6 @@ struct inodes_stat_t {
155#define READ 0 149#define READ 0
156#define WRITE RW_MASK 150#define WRITE RW_MASK
157#define READA RWA_MASK 151#define READA RWA_MASK
158#define SWRITE (WRITE | READA)
159 152
160#define READ_SYNC (READ | REQ_SYNC | REQ_UNPLUG) 153#define READ_SYNC (READ | REQ_SYNC | REQ_UNPLUG)
161#define READ_META (READ | REQ_META) 154#define READ_META (READ | REQ_META)
@@ -165,8 +158,6 @@ struct inodes_stat_t {
165#define WRITE_META (WRITE | REQ_META) 158#define WRITE_META (WRITE | REQ_META)
166#define WRITE_BARRIER (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_UNPLUG | \ 159#define WRITE_BARRIER (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_UNPLUG | \
167 REQ_HARDBARRIER) 160 REQ_HARDBARRIER)
168#define SWRITE_SYNC_PLUG (SWRITE | REQ_SYNC | REQ_NOIDLE)
169#define SWRITE_SYNC (SWRITE | REQ_SYNC | REQ_NOIDLE | REQ_UNPLUG)
170 161
171/* 162/*
172 * These aren't really reads or writes, they pass down information about 163 * These aren't really reads or writes, they pass down information about
@@ -929,6 +920,9 @@ struct file {
929#define f_vfsmnt f_path.mnt 920#define f_vfsmnt f_path.mnt
930 const struct file_operations *f_op; 921 const struct file_operations *f_op;
931 spinlock_t f_lock; /* f_ep_links, f_flags, no IRQ */ 922 spinlock_t f_lock; /* f_ep_links, f_flags, no IRQ */
923#ifdef CONFIG_SMP
924 int f_sb_list_cpu;
925#endif
932 atomic_long_t f_count; 926 atomic_long_t f_count;
933 unsigned int f_flags; 927 unsigned int f_flags;
934 fmode_t f_mode; 928 fmode_t f_mode;
@@ -953,9 +947,6 @@ struct file {
953 unsigned long f_mnt_write_state; 947 unsigned long f_mnt_write_state;
954#endif 948#endif
955}; 949};
956extern spinlock_t files_lock;
957#define file_list_lock() spin_lock(&files_lock);
958#define file_list_unlock() spin_unlock(&files_lock);
959 950
960#define get_file(x) atomic_long_inc(&(x)->f_count) 951#define get_file(x) atomic_long_inc(&(x)->f_count)
961#define fput_atomic(x) atomic_long_add_unless(&(x)->f_count, -1, 1) 952#define fput_atomic(x) atomic_long_add_unless(&(x)->f_count, -1, 1)
@@ -1346,7 +1337,11 @@ struct super_block {
1346 1337
1347 struct list_head s_inodes; /* all inodes */ 1338 struct list_head s_inodes; /* all inodes */
1348 struct hlist_head s_anon; /* anonymous dentries for (nfs) exporting */ 1339 struct hlist_head s_anon; /* anonymous dentries for (nfs) exporting */
1340#ifdef CONFIG_SMP
1341 struct list_head __percpu *s_files;
1342#else
1349 struct list_head s_files; 1343 struct list_head s_files;
1344#endif
1350 /* s_dentry_lru and s_nr_dentry_unused are protected by dcache_lock */ 1345 /* s_dentry_lru and s_nr_dentry_unused are protected by dcache_lock */
1351 struct list_head s_dentry_lru; /* unused dentry lru */ 1346 struct list_head s_dentry_lru; /* unused dentry lru */
1352 int s_nr_dentry_unused; /* # of dentry on lru */ 1347 int s_nr_dentry_unused; /* # of dentry on lru */
@@ -2197,8 +2192,6 @@ static inline void insert_inode_hash(struct inode *inode) {
2197 __insert_inode_hash(inode, inode->i_ino); 2192 __insert_inode_hash(inode, inode->i_ino);
2198} 2193}
2199 2194
2200extern void file_move(struct file *f, struct list_head *list);
2201extern void file_kill(struct file *f);
2202#ifdef CONFIG_BLOCK 2195#ifdef CONFIG_BLOCK
2203extern void submit_bio(int, struct bio *); 2196extern void submit_bio(int, struct bio *);
2204extern int bdev_read_only(struct block_device *); 2197extern int bdev_read_only(struct block_device *);
diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
index eca3d5202138..a42b5bf02f8b 100644
--- a/include/linux/fs_struct.h
+++ b/include/linux/fs_struct.h
@@ -5,7 +5,7 @@
5 5
6struct fs_struct { 6struct fs_struct {
7 int users; 7 int users;
8 rwlock_t lock; 8 spinlock_t lock;
9 int umask; 9 int umask;
10 int in_exec; 10 int in_exec;
11 struct path root, pwd; 11 struct path root, pwd;
@@ -23,29 +23,29 @@ extern int unshare_fs_struct(void);
23 23
24static inline void get_fs_root(struct fs_struct *fs, struct path *root) 24static inline void get_fs_root(struct fs_struct *fs, struct path *root)
25{ 25{
26 read_lock(&fs->lock); 26 spin_lock(&fs->lock);
27 *root = fs->root; 27 *root = fs->root;
28 path_get(root); 28 path_get(root);
29 read_unlock(&fs->lock); 29 spin_unlock(&fs->lock);
30} 30}
31 31
32static inline void get_fs_pwd(struct fs_struct *fs, struct path *pwd) 32static inline void get_fs_pwd(struct fs_struct *fs, struct path *pwd)
33{ 33{
34 read_lock(&fs->lock); 34 spin_lock(&fs->lock);
35 *pwd = fs->pwd; 35 *pwd = fs->pwd;
36 path_get(pwd); 36 path_get(pwd);
37 read_unlock(&fs->lock); 37 spin_unlock(&fs->lock);
38} 38}
39 39
40static inline void get_fs_root_and_pwd(struct fs_struct *fs, struct path *root, 40static inline void get_fs_root_and_pwd(struct fs_struct *fs, struct path *root,
41 struct path *pwd) 41 struct path *pwd)
42{ 42{
43 read_lock(&fs->lock); 43 spin_lock(&fs->lock);
44 *root = fs->root; 44 *root = fs->root;
45 path_get(root); 45 path_get(root);
46 *pwd = fs->pwd; 46 *pwd = fs->pwd;
47 path_get(pwd); 47 path_get(pwd);
48 read_unlock(&fs->lock); 48 spin_unlock(&fs->lock);
49} 49}
50 50
51#endif /* _LINUX_FS_STRUCT_H */ 51#endif /* _LINUX_FS_STRUCT_H */
diff --git a/include/linux/kfifo.h b/include/linux/kfifo.h
index 311f8753d713..4aa95f203f3e 100644
--- a/include/linux/kfifo.h
+++ b/include/linux/kfifo.h
@@ -836,6 +836,8 @@ extern void __kfifo_dma_out_finish_r(struct __kfifo *fifo, size_t recsize);
836 836
837extern unsigned int __kfifo_len_r(struct __kfifo *fifo, size_t recsize); 837extern unsigned int __kfifo_len_r(struct __kfifo *fifo, size_t recsize);
838 838
839extern void __kfifo_skip_r(struct __kfifo *fifo, size_t recsize);
840
839extern unsigned int __kfifo_out_peek_r(struct __kfifo *fifo, 841extern unsigned int __kfifo_out_peek_r(struct __kfifo *fifo,
840 void *buf, unsigned int len, size_t recsize); 842 void *buf, unsigned int len, size_t recsize);
841 843
diff --git a/include/linux/lglock.h b/include/linux/lglock.h
new file mode 100644
index 000000000000..b288cb713b90
--- /dev/null
+++ b/include/linux/lglock.h
@@ -0,0 +1,172 @@
1/*
2 * Specialised local-global spinlock. Can only be declared as global variables
3 * to avoid overhead and keep things simple (and we don't want to start using
4 * these inside dynamically allocated structures).
5 *
6 * "local/global locks" (lglocks) can be used to:
7 *
8 * - Provide fast exclusive access to per-CPU data, with exclusive access to
9 * another CPU's data allowed but possibly subject to contention, and to
10 * provide very slow exclusive access to all per-CPU data.
11 * - Or to provide very fast and scalable read serialisation, and to provide
12 * very slow exclusive serialisation of data (not necessarily per-CPU data).
13 *
14 * Brlocks are also implemented as a short-hand notation for the latter use
15 * case.
16 *
17 * Copyright 2009, 2010, Nick Piggin, Novell Inc.
18 */
19#ifndef __LINUX_LGLOCK_H
20#define __LINUX_LGLOCK_H
21
22#include <linux/spinlock.h>
23#include <linux/lockdep.h>
24#include <linux/percpu.h>
25
26/* can make br locks by using local lock for read side, global lock for write */
27#define br_lock_init(name) name##_lock_init()
28#define br_read_lock(name) name##_local_lock()
29#define br_read_unlock(name) name##_local_unlock()
30#define br_write_lock(name) name##_global_lock_online()
31#define br_write_unlock(name) name##_global_unlock_online()
32
33#define DECLARE_BRLOCK(name) DECLARE_LGLOCK(name)
34#define DEFINE_BRLOCK(name) DEFINE_LGLOCK(name)
35
36
37#define lg_lock_init(name) name##_lock_init()
38#define lg_local_lock(name) name##_local_lock()
39#define lg_local_unlock(name) name##_local_unlock()
40#define lg_local_lock_cpu(name, cpu) name##_local_lock_cpu(cpu)
41#define lg_local_unlock_cpu(name, cpu) name##_local_unlock_cpu(cpu)
42#define lg_global_lock(name) name##_global_lock()
43#define lg_global_unlock(name) name##_global_unlock()
44#define lg_global_lock_online(name) name##_global_lock_online()
45#define lg_global_unlock_online(name) name##_global_unlock_online()
46
47#ifdef CONFIG_DEBUG_LOCK_ALLOC
48#define LOCKDEP_INIT_MAP lockdep_init_map
49
50#define DEFINE_LGLOCK_LOCKDEP(name) \
51 struct lock_class_key name##_lock_key; \
52 struct lockdep_map name##_lock_dep_map; \
53 EXPORT_SYMBOL(name##_lock_dep_map)
54
55#else
56#define LOCKDEP_INIT_MAP(a, b, c, d)
57
58#define DEFINE_LGLOCK_LOCKDEP(name)
59#endif
60
61
62#define DECLARE_LGLOCK(name) \
63 extern void name##_lock_init(void); \
64 extern void name##_local_lock(void); \
65 extern void name##_local_unlock(void); \
66 extern void name##_local_lock_cpu(int cpu); \
67 extern void name##_local_unlock_cpu(int cpu); \
68 extern void name##_global_lock(void); \
69 extern void name##_global_unlock(void); \
70 extern void name##_global_lock_online(void); \
71 extern void name##_global_unlock_online(void); \
72
73#define DEFINE_LGLOCK(name) \
74 \
75 DEFINE_PER_CPU(arch_spinlock_t, name##_lock); \
76 DEFINE_LGLOCK_LOCKDEP(name); \
77 \
78 void name##_lock_init(void) { \
79 int i; \
80 LOCKDEP_INIT_MAP(&name##_lock_dep_map, #name, &name##_lock_key, 0); \
81 for_each_possible_cpu(i) { \
82 arch_spinlock_t *lock; \
83 lock = &per_cpu(name##_lock, i); \
84 *lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; \
85 } \
86 } \
87 EXPORT_SYMBOL(name##_lock_init); \
88 \
89 void name##_local_lock(void) { \
90 arch_spinlock_t *lock; \
91 preempt_disable(); \
92 rwlock_acquire_read(&name##_lock_dep_map, 0, 0, _THIS_IP_); \
93 lock = &__get_cpu_var(name##_lock); \
94 arch_spin_lock(lock); \
95 } \
96 EXPORT_SYMBOL(name##_local_lock); \
97 \
98 void name##_local_unlock(void) { \
99 arch_spinlock_t *lock; \
100 rwlock_release(&name##_lock_dep_map, 1, _THIS_IP_); \
101 lock = &__get_cpu_var(name##_lock); \
102 arch_spin_unlock(lock); \
103 preempt_enable(); \
104 } \
105 EXPORT_SYMBOL(name##_local_unlock); \
106 \
107 void name##_local_lock_cpu(int cpu) { \
108 arch_spinlock_t *lock; \
109 preempt_disable(); \
110 rwlock_acquire_read(&name##_lock_dep_map, 0, 0, _THIS_IP_); \
111 lock = &per_cpu(name##_lock, cpu); \
112 arch_spin_lock(lock); \
113 } \
114 EXPORT_SYMBOL(name##_local_lock_cpu); \
115 \
116 void name##_local_unlock_cpu(int cpu) { \
117 arch_spinlock_t *lock; \
118 rwlock_release(&name##_lock_dep_map, 1, _THIS_IP_); \
119 lock = &per_cpu(name##_lock, cpu); \
120 arch_spin_unlock(lock); \
121 preempt_enable(); \
122 } \
123 EXPORT_SYMBOL(name##_local_unlock_cpu); \
124 \
125 void name##_global_lock_online(void) { \
126 int i; \
127 preempt_disable(); \
128 rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_); \
129 for_each_online_cpu(i) { \
130 arch_spinlock_t *lock; \
131 lock = &per_cpu(name##_lock, i); \
132 arch_spin_lock(lock); \
133 } \
134 } \
135 EXPORT_SYMBOL(name##_global_lock_online); \
136 \
137 void name##_global_unlock_online(void) { \
138 int i; \
139 rwlock_release(&name##_lock_dep_map, 1, _RET_IP_); \
140 for_each_online_cpu(i) { \
141 arch_spinlock_t *lock; \
142 lock = &per_cpu(name##_lock, i); \
143 arch_spin_unlock(lock); \
144 } \
145 preempt_enable(); \
146 } \
147 EXPORT_SYMBOL(name##_global_unlock_online); \
148 \
149 void name##_global_lock(void) { \
150 int i; \
151 preempt_disable(); \
152 rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_); \
153 for_each_online_cpu(i) { \
154 arch_spinlock_t *lock; \
155 lock = &per_cpu(name##_lock, i); \
156 arch_spin_lock(lock); \
157 } \
158 } \
159 EXPORT_SYMBOL(name##_global_lock); \
160 \
161 void name##_global_unlock(void) { \
162 int i; \
163 rwlock_release(&name##_lock_dep_map, 1, _RET_IP_); \
164 for_each_online_cpu(i) { \
165 arch_spinlock_t *lock; \
166 lock = &per_cpu(name##_lock, i); \
167 arch_spin_unlock(lock); \
168 } \
169 preempt_enable(); \
170 } \
171 EXPORT_SYMBOL(name##_global_unlock);
172#endif
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index b8bb9a6a1f37..ee7e258627f9 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -134,7 +134,7 @@ struct vm_area_struct {
134 within vm_mm. */ 134 within vm_mm. */
135 135
136 /* linked list of VM areas per task, sorted by address */ 136 /* linked list of VM areas per task, sorted by address */
137 struct vm_area_struct *vm_next; 137 struct vm_area_struct *vm_next, *vm_prev;
138 138
139 pgprot_t vm_page_prot; /* Access permissions of this VMA. */ 139 pgprot_t vm_page_prot; /* Access permissions of this VMA. */
140 unsigned long vm_flags; /* Flags, see mm.h. */ 140 unsigned long vm_flags; /* Flags, see mm.h. */
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index ae0a5286f558..92e52a1e6af3 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -213,6 +213,9 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
213 * @dma_alignment: SPI controller constraint on DMA buffers alignment. 213 * @dma_alignment: SPI controller constraint on DMA buffers alignment.
214 * @mode_bits: flags understood by this controller driver 214 * @mode_bits: flags understood by this controller driver
215 * @flags: other constraints relevant to this driver 215 * @flags: other constraints relevant to this driver
216 * @bus_lock_spinlock: spinlock for SPI bus locking
217 * @bus_lock_mutex: mutex for SPI bus locking
218 * @bus_lock_flag: indicates that the SPI bus is locked for exclusive use
216 * @setup: updates the device mode and clocking records used by a 219 * @setup: updates the device mode and clocking records used by a
217 * device's SPI controller; protocol code may call this. This 220 * device's SPI controller; protocol code may call this. This
218 * must fail if an unrecognized or unsupported mode is requested. 221 * must fail if an unrecognized or unsupported mode is requested.
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 1437da3ddc62..67d64e6efe7a 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -329,6 +329,13 @@ struct tty_struct {
329 struct tty_port *port; 329 struct tty_port *port;
330}; 330};
331 331
332/* Each of a tty's open files has private_data pointing to tty_file_private */
333struct tty_file_private {
334 struct tty_struct *tty;
335 struct file *file;
336 struct list_head list;
337};
338
332/* tty magic number */ 339/* tty magic number */
333#define TTY_MAGIC 0x5401 340#define TTY_MAGIC 0x5401
334 341
@@ -458,6 +465,7 @@ extern void proc_clear_tty(struct task_struct *p);
458extern struct tty_struct *get_current_tty(void); 465extern struct tty_struct *get_current_tty(void);
459extern void tty_default_fops(struct file_operations *fops); 466extern void tty_default_fops(struct file_operations *fops);
460extern struct tty_struct *alloc_tty_struct(void); 467extern struct tty_struct *alloc_tty_struct(void);
468extern void tty_add_file(struct tty_struct *tty, struct file *file);
461extern void free_tty_struct(struct tty_struct *tty); 469extern void free_tty_struct(struct tty_struct *tty);
462extern void initialize_tty_struct(struct tty_struct *tty, 470extern void initialize_tty_struct(struct tty_struct *tty,
463 struct tty_driver *driver, int idx); 471 struct tty_driver *driver, int idx);
@@ -470,6 +478,7 @@ extern struct tty_struct *tty_pair_get_tty(struct tty_struct *tty);
470extern struct tty_struct *tty_pair_get_pty(struct tty_struct *tty); 478extern struct tty_struct *tty_pair_get_pty(struct tty_struct *tty);
471 479
472extern struct mutex tty_mutex; 480extern struct mutex tty_mutex;
481extern spinlock_t tty_files_lock;
473 482
474extern void tty_write_unlock(struct tty_struct *tty); 483extern void tty_write_unlock(struct tty_struct *tty);
475extern int tty_write_lock(struct tty_struct *tty, int ndelay); 484extern int tty_write_lock(struct tty_struct *tty, int ndelay);
diff --git a/include/sound/emu10k1.h b/include/sound/emu10k1.h
index 6a664c3f7c1e..7dc97d12253c 100644
--- a/include/sound/emu10k1.h
+++ b/include/sound/emu10k1.h
@@ -1707,6 +1707,7 @@ struct snd_emu10k1 {
1707 unsigned int card_type; /* EMU10K1_CARD_* */ 1707 unsigned int card_type; /* EMU10K1_CARD_* */
1708 unsigned int ecard_ctrl; /* ecard control bits */ 1708 unsigned int ecard_ctrl; /* ecard control bits */
1709 unsigned long dma_mask; /* PCI DMA mask */ 1709 unsigned long dma_mask; /* PCI DMA mask */
1710 unsigned int delay_pcm_irq; /* in samples */
1710 int max_cache_pages; /* max memory size / PAGE_SIZE */ 1711 int max_cache_pages; /* max memory size / PAGE_SIZE */
1711 struct snd_dma_buffer silent_page; /* silent page */ 1712 struct snd_dma_buffer silent_page; /* silent page */
1712 struct snd_dma_buffer ptb_pages; /* page table pages */ 1713 struct snd_dma_buffer ptb_pages; /* page table pages */
diff --git a/include/trace/events/workqueue.h b/include/trace/events/workqueue.h
new file mode 100644
index 000000000000..49682d7e9d60
--- /dev/null
+++ b/include/trace/events/workqueue.h
@@ -0,0 +1,62 @@
1#undef TRACE_SYSTEM
2#define TRACE_SYSTEM workqueue
3
4#if !defined(_TRACE_WORKQUEUE_H) || defined(TRACE_HEADER_MULTI_READ)
5#define _TRACE_WORKQUEUE_H
6
7#include <linux/tracepoint.h>
8#include <linux/workqueue.h>
9
10/**
11 * workqueue_execute_start - called immediately before the workqueue callback
12 * @work: pointer to struct work_struct
13 *
14 * Allows to track workqueue execution.
15 */
16TRACE_EVENT(workqueue_execute_start,
17
18 TP_PROTO(struct work_struct *work),
19
20 TP_ARGS(work),
21
22 TP_STRUCT__entry(
23 __field( void *, work )
24 __field( void *, function)
25 ),
26
27 TP_fast_assign(
28 __entry->work = work;
29 __entry->function = work->func;
30 ),
31
32 TP_printk("work struct %p: function %pf", __entry->work, __entry->function)
33);
34
35/**
36 * workqueue_execute_end - called immediately before the workqueue callback
37 * @work: pointer to struct work_struct
38 *
39 * Allows to track workqueue execution.
40 */
41TRACE_EVENT(workqueue_execute_end,
42
43 TP_PROTO(struct work_struct *work),
44
45 TP_ARGS(work),
46
47 TP_STRUCT__entry(
48 __field( void *, work )
49 ),
50
51 TP_fast_assign(
52 __entry->work = work;
53 ),
54
55 TP_printk("work struct %p", __entry->work)
56);
57
58
59#endif /* _TRACE_WORKQUEUE_H */
60
61/* This part must be outside protection */
62#include <trace/define_trace.h>
diff --git a/kernel/fork.c b/kernel/fork.c
index 98b450876f93..b7e9d60a675d 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -300,7 +300,7 @@ out:
300#ifdef CONFIG_MMU 300#ifdef CONFIG_MMU
301static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) 301static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
302{ 302{
303 struct vm_area_struct *mpnt, *tmp, **pprev; 303 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
304 struct rb_node **rb_link, *rb_parent; 304 struct rb_node **rb_link, *rb_parent;
305 int retval; 305 int retval;
306 unsigned long charge; 306 unsigned long charge;
@@ -328,6 +328,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
328 if (retval) 328 if (retval)
329 goto out; 329 goto out;
330 330
331 prev = NULL;
331 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) { 332 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
332 struct file *file; 333 struct file *file;
333 334
@@ -359,7 +360,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
359 goto fail_nomem_anon_vma_fork; 360 goto fail_nomem_anon_vma_fork;
360 tmp->vm_flags &= ~VM_LOCKED; 361 tmp->vm_flags &= ~VM_LOCKED;
361 tmp->vm_mm = mm; 362 tmp->vm_mm = mm;
362 tmp->vm_next = NULL; 363 tmp->vm_next = tmp->vm_prev = NULL;
363 file = tmp->vm_file; 364 file = tmp->vm_file;
364 if (file) { 365 if (file) {
365 struct inode *inode = file->f_path.dentry->d_inode; 366 struct inode *inode = file->f_path.dentry->d_inode;
@@ -392,6 +393,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
392 */ 393 */
393 *pprev = tmp; 394 *pprev = tmp;
394 pprev = &tmp->vm_next; 395 pprev = &tmp->vm_next;
396 tmp->vm_prev = prev;
397 prev = tmp;
395 398
396 __vma_link_rb(mm, tmp, rb_link, rb_parent); 399 __vma_link_rb(mm, tmp, rb_link, rb_parent);
397 rb_link = &tmp->vm_rb.rb_right; 400 rb_link = &tmp->vm_rb.rb_right;
@@ -752,13 +755,13 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
752 struct fs_struct *fs = current->fs; 755 struct fs_struct *fs = current->fs;
753 if (clone_flags & CLONE_FS) { 756 if (clone_flags & CLONE_FS) {
754 /* tsk->fs is already what we want */ 757 /* tsk->fs is already what we want */
755 write_lock(&fs->lock); 758 spin_lock(&fs->lock);
756 if (fs->in_exec) { 759 if (fs->in_exec) {
757 write_unlock(&fs->lock); 760 spin_unlock(&fs->lock);
758 return -EAGAIN; 761 return -EAGAIN;
759 } 762 }
760 fs->users++; 763 fs->users++;
761 write_unlock(&fs->lock); 764 spin_unlock(&fs->lock);
762 return 0; 765 return 0;
763 } 766 }
764 tsk->fs = copy_fs_struct(fs); 767 tsk->fs = copy_fs_struct(fs);
@@ -1676,13 +1679,13 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
1676 1679
1677 if (new_fs) { 1680 if (new_fs) {
1678 fs = current->fs; 1681 fs = current->fs;
1679 write_lock(&fs->lock); 1682 spin_lock(&fs->lock);
1680 current->fs = new_fs; 1683 current->fs = new_fs;
1681 if (--fs->users) 1684 if (--fs->users)
1682 new_fs = NULL; 1685 new_fs = NULL;
1683 else 1686 else
1684 new_fs = fs; 1687 new_fs = fs;
1685 write_unlock(&fs->lock); 1688 spin_unlock(&fs->lock);
1686 } 1689 }
1687 1690
1688 if (new_mm) { 1691 if (new_mm) {
diff --git a/kernel/kfifo.c b/kernel/kfifo.c
index 4502604ecadf..6b5580c57644 100644
--- a/kernel/kfifo.c
+++ b/kernel/kfifo.c
@@ -503,6 +503,15 @@ unsigned int __kfifo_out_r(struct __kfifo *fifo, void *buf,
503} 503}
504EXPORT_SYMBOL(__kfifo_out_r); 504EXPORT_SYMBOL(__kfifo_out_r);
505 505
506void __kfifo_skip_r(struct __kfifo *fifo, size_t recsize)
507{
508 unsigned int n;
509
510 n = __kfifo_peek_n(fifo, recsize);
511 fifo->out += n + recsize;
512}
513EXPORT_SYMBOL(__kfifo_skip_r);
514
506int __kfifo_from_user_r(struct __kfifo *fifo, const void __user *from, 515int __kfifo_from_user_r(struct __kfifo *fifo, const void __user *from,
507 unsigned long len, unsigned int *copied, size_t recsize) 516 unsigned long len, unsigned int *copied, size_t recsize)
508{ 517{
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 3632ce87674f..19cccc3c3028 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -3846,6 +3846,9 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
3846 rpos = reader->read; 3846 rpos = reader->read;
3847 pos += size; 3847 pos += size;
3848 3848
3849 if (rpos >= commit)
3850 break;
3851
3849 event = rb_reader_event(cpu_buffer); 3852 event = rb_reader_event(cpu_buffer);
3850 size = rb_event_length(event); 3853 size = rb_event_length(event);
3851 } while (len > size); 3854 } while (len > size);
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index ba14a22be4cc..9ec59f541156 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -3463,6 +3463,7 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
3463 size_t cnt, loff_t *fpos) 3463 size_t cnt, loff_t *fpos)
3464{ 3464{
3465 char *buf; 3465 char *buf;
3466 size_t written;
3466 3467
3467 if (tracing_disabled) 3468 if (tracing_disabled)
3468 return -EINVAL; 3469 return -EINVAL;
@@ -3484,11 +3485,15 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
3484 } else 3485 } else
3485 buf[cnt] = '\0'; 3486 buf[cnt] = '\0';
3486 3487
3487 cnt = mark_printk("%s", buf); 3488 written = mark_printk("%s", buf);
3488 kfree(buf); 3489 kfree(buf);
3489 *fpos += cnt; 3490 *fpos += written;
3490 3491
3491 return cnt; 3492 /* don't tell userspace we wrote more - it might confuse them */
3493 if (written > cnt)
3494 written = cnt;
3495
3496 return written;
3492} 3497}
3493 3498
3494static int tracing_clock_show(struct seq_file *m, void *v) 3499static int tracing_clock_show(struct seq_file *m, void *v)
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 09b4fa6e4d3b..4c758f146328 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -598,88 +598,165 @@ out:
598 return ret; 598 return ret;
599} 599}
600 600
601static void print_event_fields(struct trace_seq *s, struct list_head *head) 601enum {
602 FORMAT_HEADER = 1,
603 FORMAT_PRINTFMT = 2,
604};
605
606static void *f_next(struct seq_file *m, void *v, loff_t *pos)
602{ 607{
608 struct ftrace_event_call *call = m->private;
603 struct ftrace_event_field *field; 609 struct ftrace_event_field *field;
610 struct list_head *head;
604 611
605 list_for_each_entry_reverse(field, head, link) { 612 (*pos)++;
606 /*
607 * Smartly shows the array type(except dynamic array).
608 * Normal:
609 * field:TYPE VAR
610 * If TYPE := TYPE[LEN], it is shown:
611 * field:TYPE VAR[LEN]
612 */
613 const char *array_descriptor = strchr(field->type, '[');
614 613
615 if (!strncmp(field->type, "__data_loc", 10)) 614 switch ((unsigned long)v) {
616 array_descriptor = NULL; 615 case FORMAT_HEADER:
616 head = &ftrace_common_fields;
617 617
618 if (!array_descriptor) { 618 if (unlikely(list_empty(head)))
619 trace_seq_printf(s, "\tfield:%s %s;\toffset:%u;" 619 return NULL;
620 "\tsize:%u;\tsigned:%d;\n", 620
621 field->type, field->name, field->offset, 621 field = list_entry(head->prev, struct ftrace_event_field, link);
622 field->size, !!field->is_signed); 622 return field;
623 } else { 623
624 trace_seq_printf(s, "\tfield:%.*s %s%s;\toffset:%u;" 624 case FORMAT_PRINTFMT:
625 "\tsize:%u;\tsigned:%d;\n", 625 /* all done */
626 (int)(array_descriptor - field->type), 626 return NULL;
627 field->type, field->name, 627 }
628 array_descriptor, field->offset, 628
629 field->size, !!field->is_signed); 629 head = trace_get_fields(call);
630 } 630
631 /*
632 * To separate common fields from event fields, the
633 * LSB is set on the first event field. Clear it in case.
634 */
635 v = (void *)((unsigned long)v & ~1L);
636
637 field = v;
638 /*
639 * If this is a common field, and at the end of the list, then
640 * continue with main list.
641 */
642 if (field->link.prev == &ftrace_common_fields) {
643 if (unlikely(list_empty(head)))
644 return NULL;
645 field = list_entry(head->prev, struct ftrace_event_field, link);
646 /* Set the LSB to notify f_show to print an extra newline */
647 field = (struct ftrace_event_field *)
648 ((unsigned long)field | 1);
649 return field;
631 } 650 }
651
652 /* If we are done tell f_show to print the format */
653 if (field->link.prev == head)
654 return (void *)FORMAT_PRINTFMT;
655
656 field = list_entry(field->link.prev, struct ftrace_event_field, link);
657
658 return field;
632} 659}
633 660
634static ssize_t 661static void *f_start(struct seq_file *m, loff_t *pos)
635event_format_read(struct file *filp, char __user *ubuf, size_t cnt,
636 loff_t *ppos)
637{ 662{
638 struct ftrace_event_call *call = filp->private_data; 663 loff_t l = 0;
639 struct list_head *head; 664 void *p;
640 struct trace_seq *s;
641 char *buf;
642 int r;
643 665
644 if (*ppos) 666 /* Start by showing the header */
667 if (!*pos)
668 return (void *)FORMAT_HEADER;
669
670 p = (void *)FORMAT_HEADER;
671 do {
672 p = f_next(m, p, &l);
673 } while (p && l < *pos);
674
675 return p;
676}
677
678static int f_show(struct seq_file *m, void *v)
679{
680 struct ftrace_event_call *call = m->private;
681 struct ftrace_event_field *field;
682 const char *array_descriptor;
683
684 switch ((unsigned long)v) {
685 case FORMAT_HEADER:
686 seq_printf(m, "name: %s\n", call->name);
687 seq_printf(m, "ID: %d\n", call->event.type);
688 seq_printf(m, "format:\n");
645 return 0; 689 return 0;
646 690
647 s = kmalloc(sizeof(*s), GFP_KERNEL); 691 case FORMAT_PRINTFMT:
648 if (!s) 692 seq_printf(m, "\nprint fmt: %s\n",
649 return -ENOMEM; 693 call->print_fmt);
694 return 0;
695 }
650 696
651 trace_seq_init(s); 697 /*
698 * To separate common fields from event fields, the
699 * LSB is set on the first event field. Clear it and
700 * print a newline if it is set.
701 */
702 if ((unsigned long)v & 1) {
703 seq_putc(m, '\n');
704 v = (void *)((unsigned long)v & ~1L);
705 }
652 706
653 trace_seq_printf(s, "name: %s\n", call->name); 707 field = v;
654 trace_seq_printf(s, "ID: %d\n", call->event.type);
655 trace_seq_printf(s, "format:\n");
656 708
657 /* print common fields */ 709 /*
658 print_event_fields(s, &ftrace_common_fields); 710 * Smartly shows the array type(except dynamic array).
711 * Normal:
712 * field:TYPE VAR
713 * If TYPE := TYPE[LEN], it is shown:
714 * field:TYPE VAR[LEN]
715 */
716 array_descriptor = strchr(field->type, '[');
659 717
660 trace_seq_putc(s, '\n'); 718 if (!strncmp(field->type, "__data_loc", 10))
719 array_descriptor = NULL;
661 720
662 /* print event specific fields */ 721 if (!array_descriptor)
663 head = trace_get_fields(call); 722 seq_printf(m, "\tfield:%s %s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
664 print_event_fields(s, head); 723 field->type, field->name, field->offset,
724 field->size, !!field->is_signed);
725 else
726 seq_printf(m, "\tfield:%.*s %s%s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
727 (int)(array_descriptor - field->type),
728 field->type, field->name,
729 array_descriptor, field->offset,
730 field->size, !!field->is_signed);
665 731
666 r = trace_seq_printf(s, "\nprint fmt: %s\n", call->print_fmt); 732 return 0;
733}
667 734
668 if (!r) { 735static void f_stop(struct seq_file *m, void *p)
669 /* 736{
670 * ug! The format output is bigger than a PAGE!! 737}
671 */
672 buf = "FORMAT TOO BIG\n";
673 r = simple_read_from_buffer(ubuf, cnt, ppos,
674 buf, strlen(buf));
675 goto out;
676 }
677 738
678 r = simple_read_from_buffer(ubuf, cnt, ppos, 739static const struct seq_operations trace_format_seq_ops = {
679 s->buffer, s->len); 740 .start = f_start,
680 out: 741 .next = f_next,
681 kfree(s); 742 .stop = f_stop,
682 return r; 743 .show = f_show,
744};
745
746static int trace_format_open(struct inode *inode, struct file *file)
747{
748 struct ftrace_event_call *call = inode->i_private;
749 struct seq_file *m;
750 int ret;
751
752 ret = seq_open(file, &trace_format_seq_ops);
753 if (ret < 0)
754 return ret;
755
756 m = file->private_data;
757 m->private = call;
758
759 return 0;
683} 760}
684 761
685static ssize_t 762static ssize_t
@@ -877,8 +954,10 @@ static const struct file_operations ftrace_enable_fops = {
877}; 954};
878 955
879static const struct file_operations ftrace_event_format_fops = { 956static const struct file_operations ftrace_event_format_fops = {
880 .open = tracing_open_generic, 957 .open = trace_format_open,
881 .read = event_format_read, 958 .read = seq_read,
959 .llseek = seq_lseek,
960 .release = seq_release,
882}; 961};
883 962
884static const struct file_operations ftrace_event_id_fops = { 963static const struct file_operations ftrace_event_id_fops = {
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index 6bff23625781..6f233698518e 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -507,7 +507,15 @@ get_return_for_leaf(struct trace_iterator *iter,
507 * if the output fails. 507 * if the output fails.
508 */ 508 */
509 data->ent = *curr; 509 data->ent = *curr;
510 data->ret = *next; 510 /*
511 * If the next event is not a return type, then
512 * we only care about what type it is. Otherwise we can
513 * safely copy the entire event.
514 */
515 if (next->ent.type == TRACE_GRAPH_RET)
516 data->ret = *next;
517 else
518 data->ret.ent.type = next->ent.type;
511 } 519 }
512 } 520 }
513 521
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 2994a0e3a61c..8bd600c020e5 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -35,6 +35,9 @@
35#include <linux/lockdep.h> 35#include <linux/lockdep.h>
36#include <linux/idr.h> 36#include <linux/idr.h>
37 37
38#define CREATE_TRACE_POINTS
39#include <trace/events/workqueue.h>
40
38#include "workqueue_sched.h" 41#include "workqueue_sched.h"
39 42
40enum { 43enum {
@@ -1790,7 +1793,13 @@ static void process_one_work(struct worker *worker, struct work_struct *work)
1790 work_clear_pending(work); 1793 work_clear_pending(work);
1791 lock_map_acquire(&cwq->wq->lockdep_map); 1794 lock_map_acquire(&cwq->wq->lockdep_map);
1792 lock_map_acquire(&lockdep_map); 1795 lock_map_acquire(&lockdep_map);
1796 trace_workqueue_execute_start(work);
1793 f(work); 1797 f(work);
1798 /*
1799 * While we must be careful to not use "work" after this, the trace
1800 * point will only record its address.
1801 */
1802 trace_workqueue_execute_end(work);
1794 lock_map_release(&lockdep_map); 1803 lock_map_release(&lockdep_map);
1795 lock_map_release(&cwq->wq->lockdep_map); 1804 lock_map_release(&cwq->wq->lockdep_map);
1796 1805
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 9e06b7f5ecf1..1b4afd2e6ca0 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -994,13 +994,16 @@ config FAULT_INJECTION_STACKTRACE_FILTER
994 994
995config LATENCYTOP 995config LATENCYTOP
996 bool "Latency measuring infrastructure" 996 bool "Latency measuring infrastructure"
997 depends on HAVE_LATENCYTOP_SUPPORT
998 depends on DEBUG_KERNEL
999 depends on STACKTRACE_SUPPORT
1000 depends on PROC_FS
997 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE 1001 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE
998 select KALLSYMS 1002 select KALLSYMS
999 select KALLSYMS_ALL 1003 select KALLSYMS_ALL
1000 select STACKTRACE 1004 select STACKTRACE
1001 select SCHEDSTATS 1005 select SCHEDSTATS
1002 select SCHED_DEBUG 1006 select SCHED_DEBUG
1003 depends on HAVE_LATENCYTOP_SUPPORT
1004 help 1007 help
1005 Enable this option if you want to use the LatencyTOP tool 1008 Enable this option if you want to use the LatencyTOP tool
1006 to find out which userspace is blocking on what kernel operations. 1009 to find out which userspace is blocking on what kernel operations.
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index e907858498a6..5b7d4623f0b7 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -625,6 +625,8 @@ EXPORT_SYMBOL(radix_tree_tag_get);
625 * 625 *
626 * The function returns number of leaves where the tag was set and sets 626 * The function returns number of leaves where the tag was set and sets
627 * *first_indexp to the first unscanned index. 627 * *first_indexp to the first unscanned index.
628 * WARNING! *first_indexp can wrap if last_index is ULONG_MAX. Caller must
629 * be prepared to handle that.
628 */ 630 */
629unsigned long radix_tree_range_tag_if_tagged(struct radix_tree_root *root, 631unsigned long radix_tree_range_tag_if_tagged(struct radix_tree_root *root,
630 unsigned long *first_indexp, unsigned long last_index, 632 unsigned long *first_indexp, unsigned long last_index,
@@ -675,7 +677,8 @@ unsigned long radix_tree_range_tag_if_tagged(struct radix_tree_root *root,
675next: 677next:
676 /* Go to next item at level determined by 'shift' */ 678 /* Go to next item at level determined by 'shift' */
677 index = ((index >> shift) + 1) << shift; 679 index = ((index >> shift) + 1) << shift;
678 if (index > last_index) 680 /* Overflow can happen when last_index is ~0UL... */
681 if (index > last_index || !index)
679 break; 682 break;
680 if (tagged >= nr_to_tag) 683 if (tagged >= nr_to_tag)
681 break; 684 break;
diff --git a/mm/memory.c b/mm/memory.c
index b6e5fd23cc5a..2ed2267439df 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2770,11 +2770,18 @@ static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned lo
2770{ 2770{
2771 address &= PAGE_MASK; 2771 address &= PAGE_MASK;
2772 if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) { 2772 if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
2773 address -= PAGE_SIZE; 2773 struct vm_area_struct *prev = vma->vm_prev;
2774 if (find_vma(vma->vm_mm, address) != vma) 2774
2775 return -ENOMEM; 2775 /*
2776 * Is there a mapping abutting this one below?
2777 *
2778 * That's only ok if it's the same stack mapping
2779 * that has gotten split..
2780 */
2781 if (prev && prev->vm_end == address)
2782 return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
2776 2783
2777 expand_stack(vma, address); 2784 expand_stack(vma, address - PAGE_SIZE);
2778 } 2785 }
2779 return 0; 2786 return 0;
2780} 2787}
diff --git a/mm/mlock.c b/mm/mlock.c
index 49e5e4cb8232..cbae7c5b9568 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -135,6 +135,19 @@ void munlock_vma_page(struct page *page)
135 } 135 }
136} 136}
137 137
138/* Is the vma a continuation of the stack vma above it? */
139static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr)
140{
141 return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
142}
143
144static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
145{
146 return (vma->vm_flags & VM_GROWSDOWN) &&
147 (vma->vm_start == addr) &&
148 !vma_stack_continue(vma->vm_prev, addr);
149}
150
138/** 151/**
139 * __mlock_vma_pages_range() - mlock a range of pages in the vma. 152 * __mlock_vma_pages_range() - mlock a range of pages in the vma.
140 * @vma: target vma 153 * @vma: target vma
@@ -168,11 +181,9 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma,
168 gup_flags |= FOLL_WRITE; 181 gup_flags |= FOLL_WRITE;
169 182
170 /* We don't try to access the guard page of a stack vma */ 183 /* We don't try to access the guard page of a stack vma */
171 if (vma->vm_flags & VM_GROWSDOWN) { 184 if (stack_guard_page(vma, start)) {
172 if (start == vma->vm_start) { 185 addr += PAGE_SIZE;
173 start += PAGE_SIZE; 186 nr_pages--;
174 nr_pages--;
175 }
176 } 187 }
177 188
178 while (nr_pages > 0) { 189 while (nr_pages > 0) {
diff --git a/mm/mmap.c b/mm/mmap.c
index 31003338b978..331e51af38c9 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -388,17 +388,23 @@ static inline void
388__vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma, 388__vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
389 struct vm_area_struct *prev, struct rb_node *rb_parent) 389 struct vm_area_struct *prev, struct rb_node *rb_parent)
390{ 390{
391 struct vm_area_struct *next;
392
393 vma->vm_prev = prev;
391 if (prev) { 394 if (prev) {
392 vma->vm_next = prev->vm_next; 395 next = prev->vm_next;
393 prev->vm_next = vma; 396 prev->vm_next = vma;
394 } else { 397 } else {
395 mm->mmap = vma; 398 mm->mmap = vma;
396 if (rb_parent) 399 if (rb_parent)
397 vma->vm_next = rb_entry(rb_parent, 400 next = rb_entry(rb_parent,
398 struct vm_area_struct, vm_rb); 401 struct vm_area_struct, vm_rb);
399 else 402 else
400 vma->vm_next = NULL; 403 next = NULL;
401 } 404 }
405 vma->vm_next = next;
406 if (next)
407 next->vm_prev = vma;
402} 408}
403 409
404void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma, 410void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma,
@@ -483,7 +489,11 @@ static inline void
483__vma_unlink(struct mm_struct *mm, struct vm_area_struct *vma, 489__vma_unlink(struct mm_struct *mm, struct vm_area_struct *vma,
484 struct vm_area_struct *prev) 490 struct vm_area_struct *prev)
485{ 491{
486 prev->vm_next = vma->vm_next; 492 struct vm_area_struct *next = vma->vm_next;
493
494 prev->vm_next = next;
495 if (next)
496 next->vm_prev = prev;
487 rb_erase(&vma->vm_rb, &mm->mm_rb); 497 rb_erase(&vma->vm_rb, &mm->mm_rb);
488 if (mm->mmap_cache == vma) 498 if (mm->mmap_cache == vma)
489 mm->mmap_cache = prev; 499 mm->mmap_cache = prev;
@@ -1915,6 +1925,7 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
1915 unsigned long addr; 1925 unsigned long addr;
1916 1926
1917 insertion_point = (prev ? &prev->vm_next : &mm->mmap); 1927 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
1928 vma->vm_prev = NULL;
1918 do { 1929 do {
1919 rb_erase(&vma->vm_rb, &mm->mm_rb); 1930 rb_erase(&vma->vm_rb, &mm->mm_rb);
1920 mm->map_count--; 1931 mm->map_count--;
@@ -1922,6 +1933,8 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
1922 vma = vma->vm_next; 1933 vma = vma->vm_next;
1923 } while (vma && vma->vm_start < end); 1934 } while (vma && vma->vm_start < end);
1924 *insertion_point = vma; 1935 *insertion_point = vma;
1936 if (vma)
1937 vma->vm_prev = prev;
1925 tail_vma->vm_next = NULL; 1938 tail_vma->vm_next = NULL;
1926 if (mm->unmap_area == arch_unmap_area) 1939 if (mm->unmap_area == arch_unmap_area)
1927 addr = prev ? prev->vm_end : mm->mmap_base; 1940 addr = prev ? prev->vm_end : mm->mmap_base;
diff --git a/mm/nommu.c b/mm/nommu.c
index efa9a380335e..88ff091eb07a 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -604,7 +604,7 @@ static void protect_vma(struct vm_area_struct *vma, unsigned long flags)
604 */ 604 */
605static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma) 605static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma)
606{ 606{
607 struct vm_area_struct *pvma, **pp; 607 struct vm_area_struct *pvma, **pp, *next;
608 struct address_space *mapping; 608 struct address_space *mapping;
609 struct rb_node **p, *parent; 609 struct rb_node **p, *parent;
610 610
@@ -664,8 +664,11 @@ static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma)
664 break; 664 break;
665 } 665 }
666 666
667 vma->vm_next = *pp; 667 next = *pp;
668 *pp = vma; 668 *pp = vma;
669 vma->vm_next = next;
670 if (next)
671 next->vm_prev = vma;
669} 672}
670 673
671/* 674/*
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 5014e50644d1..fc81cb22869e 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -372,7 +372,7 @@ static void dump_tasks(const struct mem_cgroup *mem)
372 } 372 }
373 373
374 pr_info("[%5d] %5d %5d %8lu %8lu %3u %3d %5d %s\n", 374 pr_info("[%5d] %5d %5d %8lu %8lu %3u %3d %5d %s\n",
375 task->pid, __task_cred(task)->uid, task->tgid, 375 task->pid, task_uid(task), task->tgid,
376 task->mm->total_vm, get_mm_rss(task->mm), 376 task->mm->total_vm, get_mm_rss(task->mm),
377 task_cpu(task), task->signal->oom_adj, 377 task_cpu(task), task->signal->oom_adj,
378 task->signal->oom_score_adj, task->comm); 378 task->signal->oom_score_adj, task->comm);
@@ -401,10 +401,9 @@ static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order,
401static int oom_kill_task(struct task_struct *p, struct mem_cgroup *mem) 401static int oom_kill_task(struct task_struct *p, struct mem_cgroup *mem)
402{ 402{
403 p = find_lock_task_mm(p); 403 p = find_lock_task_mm(p);
404 if (!p) { 404 if (!p)
405 task_unlock(p);
406 return 1; 405 return 1;
407 } 406
408 pr_err("Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB\n", 407 pr_err("Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB\n",
409 task_pid_nr(p), p->comm, K(p->mm->total_vm), 408 task_pid_nr(p), p->comm, K(p->mm->total_vm),
410 K(get_mm_counter(p->mm, MM_ANONPAGES)), 409 K(get_mm_counter(p->mm, MM_ANONPAGES)),
@@ -647,6 +646,7 @@ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
647 unsigned long freed = 0; 646 unsigned long freed = 0;
648 unsigned int points; 647 unsigned int points;
649 enum oom_constraint constraint = CONSTRAINT_NONE; 648 enum oom_constraint constraint = CONSTRAINT_NONE;
649 int killed = 0;
650 650
651 blocking_notifier_call_chain(&oom_notify_list, 0, &freed); 651 blocking_notifier_call_chain(&oom_notify_list, 0, &freed);
652 if (freed > 0) 652 if (freed > 0)
@@ -684,7 +684,7 @@ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
684 if (!oom_kill_process(current, gfp_mask, order, 0, totalpages, 684 if (!oom_kill_process(current, gfp_mask, order, 0, totalpages,
685 NULL, nodemask, 685 NULL, nodemask,
686 "Out of memory (oom_kill_allocating_task)")) 686 "Out of memory (oom_kill_allocating_task)"))
687 return; 687 goto out;
688 } 688 }
689 689
690retry: 690retry:
@@ -692,7 +692,7 @@ retry:
692 constraint == CONSTRAINT_MEMORY_POLICY ? nodemask : 692 constraint == CONSTRAINT_MEMORY_POLICY ? nodemask :
693 NULL); 693 NULL);
694 if (PTR_ERR(p) == -1UL) 694 if (PTR_ERR(p) == -1UL)
695 return; 695 goto out;
696 696
697 /* Found nothing?!?! Either we hang forever, or we panic. */ 697 /* Found nothing?!?! Either we hang forever, or we panic. */
698 if (!p) { 698 if (!p) {
@@ -704,13 +704,15 @@ retry:
704 if (oom_kill_process(p, gfp_mask, order, points, totalpages, NULL, 704 if (oom_kill_process(p, gfp_mask, order, points, totalpages, NULL,
705 nodemask, "Out of memory")) 705 nodemask, "Out of memory"))
706 goto retry; 706 goto retry;
707 killed = 1;
708out:
707 read_unlock(&tasklist_lock); 709 read_unlock(&tasklist_lock);
708 710
709 /* 711 /*
710 * Give "p" a good chance of killing itself before we 712 * Give "p" a good chance of killing itself before we
711 * retry to allocate memory unless "p" is current 713 * retry to allocate memory unless "p" is current
712 */ 714 */
713 if (!test_thread_flag(TIF_MEMDIE)) 715 if (killed && !test_thread_flag(TIF_MEMDIE))
714 schedule_timeout_uninterruptible(1); 716 schedule_timeout_uninterruptible(1);
715} 717}
716 718
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 7262aacea8a2..c09ef5219cbe 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -836,7 +836,8 @@ void tag_pages_for_writeback(struct address_space *mapping,
836 spin_unlock_irq(&mapping->tree_lock); 836 spin_unlock_irq(&mapping->tree_lock);
837 WARN_ON_ONCE(tagged > WRITEBACK_TAG_BATCH); 837 WARN_ON_ONCE(tagged > WRITEBACK_TAG_BATCH);
838 cond_resched(); 838 cond_resched();
839 } while (tagged >= WRITEBACK_TAG_BATCH); 839 /* We check 'start' to handle wrapping when end == ~0UL */
840 } while (tagged >= WRITEBACK_TAG_BATCH && start);
840} 841}
841EXPORT_SYMBOL(tag_pages_for_writeback); 842EXPORT_SYMBOL(tag_pages_for_writeback);
842 843
diff --git a/net/sunrpc/Kconfig b/net/sunrpc/Kconfig
index 443c161eb8bd..3376d7657185 100644
--- a/net/sunrpc/Kconfig
+++ b/net/sunrpc/Kconfig
@@ -18,10 +18,11 @@ config SUNRPC_XPRT_RDMA
18 If unsure, say N. 18 If unsure, say N.
19 19
20config RPCSEC_GSS_KRB5 20config RPCSEC_GSS_KRB5
21 tristate "Secure RPC: Kerberos V mechanism (EXPERIMENTAL)" 21 tristate
22 depends on SUNRPC && EXPERIMENTAL 22 depends on SUNRPC && CRYPTO
23 prompt "Secure RPC: Kerberos V mechanism" if !(NFS_V4 || NFSD_V4)
24 default y
23 select SUNRPC_GSS 25 select SUNRPC_GSS
24 select CRYPTO
25 select CRYPTO_MD5 26 select CRYPTO_MD5
26 select CRYPTO_DES 27 select CRYPTO_DES
27 select CRYPTO_CBC 28 select CRYPTO_CBC
@@ -34,7 +35,7 @@ config RPCSEC_GSS_KRB5
34 available from http://linux-nfs.org/. In addition, user-space 35 available from http://linux-nfs.org/. In addition, user-space
35 Kerberos support should be installed. 36 Kerberos support should be installed.
36 37
37 If unsure, say N. 38 If unsure, say Y.
38 39
39config RPCSEC_GSS_SPKM3 40config RPCSEC_GSS_SPKM3
40 tristate "Secure RPC: SPKM3 mechanism (EXPERIMENTAL)" 41 tristate "Secure RPC: SPKM3 mechanism (EXPERIMENTAL)"
diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c
index e5e28d1946a4..2ac3f6e8adff 100644
--- a/net/sunrpc/xprtrdma/rpc_rdma.c
+++ b/net/sunrpc/xprtrdma/rpc_rdma.c
@@ -249,6 +249,8 @@ rpcrdma_create_chunks(struct rpc_rqst *rqst, struct xdr_buf *target,
249 req->rl_nchunks = nchunks; 249 req->rl_nchunks = nchunks;
250 250
251 BUG_ON(nchunks == 0); 251 BUG_ON(nchunks == 0);
252 BUG_ON((r_xprt->rx_ia.ri_memreg_strategy == RPCRDMA_FRMR)
253 && (nchunks > 3));
252 254
253 /* 255 /*
254 * finish off header. If write, marshal discrim and nchunks. 256 * finish off header. If write, marshal discrim and nchunks.
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index 27015c6d8eb5..5f4c7b3bc711 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -650,10 +650,22 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
650 ep->rep_attr.cap.max_send_wr = cdata->max_requests; 650 ep->rep_attr.cap.max_send_wr = cdata->max_requests;
651 switch (ia->ri_memreg_strategy) { 651 switch (ia->ri_memreg_strategy) {
652 case RPCRDMA_FRMR: 652 case RPCRDMA_FRMR:
653 /* Add room for frmr register and invalidate WRs */ 653 /* Add room for frmr register and invalidate WRs.
654 ep->rep_attr.cap.max_send_wr *= 3; 654 * 1. FRMR reg WR for head
655 if (ep->rep_attr.cap.max_send_wr > devattr.max_qp_wr) 655 * 2. FRMR invalidate WR for head
656 return -EINVAL; 656 * 3. FRMR reg WR for pagelist
657 * 4. FRMR invalidate WR for pagelist
658 * 5. FRMR reg WR for tail
659 * 6. FRMR invalidate WR for tail
660 * 7. The RDMA_SEND WR
661 */
662 ep->rep_attr.cap.max_send_wr *= 7;
663 if (ep->rep_attr.cap.max_send_wr > devattr.max_qp_wr) {
664 cdata->max_requests = devattr.max_qp_wr / 7;
665 if (!cdata->max_requests)
666 return -EINVAL;
667 ep->rep_attr.cap.max_send_wr = cdata->max_requests * 7;
668 }
657 break; 669 break;
658 case RPCRDMA_MEMWINDOWS_ASYNC: 670 case RPCRDMA_MEMWINDOWS_ASYNC:
659 case RPCRDMA_MEMWINDOWS: 671 case RPCRDMA_MEMWINDOWS:
@@ -1490,7 +1502,7 @@ rpcrdma_register_frmr_external(struct rpcrdma_mr_seg *seg,
1490 memset(&frmr_wr, 0, sizeof frmr_wr); 1502 memset(&frmr_wr, 0, sizeof frmr_wr);
1491 frmr_wr.opcode = IB_WR_FAST_REG_MR; 1503 frmr_wr.opcode = IB_WR_FAST_REG_MR;
1492 frmr_wr.send_flags = 0; /* unsignaled */ 1504 frmr_wr.send_flags = 0; /* unsignaled */
1493 frmr_wr.wr.fast_reg.iova_start = (unsigned long)seg1->mr_dma; 1505 frmr_wr.wr.fast_reg.iova_start = seg1->mr_dma;
1494 frmr_wr.wr.fast_reg.page_list = seg1->mr_chunk.rl_mw->r.frmr.fr_pgl; 1506 frmr_wr.wr.fast_reg.page_list = seg1->mr_chunk.rl_mw->r.frmr.fr_pgl;
1495 frmr_wr.wr.fast_reg.page_list_len = i; 1507 frmr_wr.wr.fast_reg.page_list_len = i;
1496 frmr_wr.wr.fast_reg.page_shift = PAGE_SHIFT; 1508 frmr_wr.wr.fast_reg.page_shift = PAGE_SHIFT;
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 49a62f0c4b87..b6309db56226 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -1305,10 +1305,11 @@ static void xs_tcp_state_change(struct sock *sk)
1305 if (!(xprt = xprt_from_sock(sk))) 1305 if (!(xprt = xprt_from_sock(sk)))
1306 goto out; 1306 goto out;
1307 dprintk("RPC: xs_tcp_state_change client %p...\n", xprt); 1307 dprintk("RPC: xs_tcp_state_change client %p...\n", xprt);
1308 dprintk("RPC: state %x conn %d dead %d zapped %d\n", 1308 dprintk("RPC: state %x conn %d dead %d zapped %d sk_shutdown %d\n",
1309 sk->sk_state, xprt_connected(xprt), 1309 sk->sk_state, xprt_connected(xprt),
1310 sock_flag(sk, SOCK_DEAD), 1310 sock_flag(sk, SOCK_DEAD),
1311 sock_flag(sk, SOCK_ZAPPED)); 1311 sock_flag(sk, SOCK_ZAPPED),
1312 sk->sk_shutdown);
1312 1313
1313 switch (sk->sk_state) { 1314 switch (sk->sk_state) {
1314 case TCP_ESTABLISHED: 1315 case TCP_ESTABLISHED:
@@ -1779,10 +1780,25 @@ static void xs_tcp_reuse_connection(struct rpc_xprt *xprt, struct sock_xprt *tra
1779{ 1780{
1780 unsigned int state = transport->inet->sk_state; 1781 unsigned int state = transport->inet->sk_state;
1781 1782
1782 if (state == TCP_CLOSE && transport->sock->state == SS_UNCONNECTED) 1783 if (state == TCP_CLOSE && transport->sock->state == SS_UNCONNECTED) {
1783 return; 1784 /* we don't need to abort the connection if the socket
1784 if ((1 << state) & (TCPF_ESTABLISHED|TCPF_SYN_SENT)) 1785 * hasn't undergone a shutdown
1785 return; 1786 */
1787 if (transport->inet->sk_shutdown == 0)
1788 return;
1789 dprintk("RPC: %s: TCP_CLOSEd and sk_shutdown set to %d\n",
1790 __func__, transport->inet->sk_shutdown);
1791 }
1792 if ((1 << state) & (TCPF_ESTABLISHED|TCPF_SYN_SENT)) {
1793 /* we don't need to abort the connection if the socket
1794 * hasn't undergone a shutdown
1795 */
1796 if (transport->inet->sk_shutdown == 0)
1797 return;
1798 dprintk("RPC: %s: ESTABLISHED/SYN_SENT "
1799 "sk_shutdown set to %d\n",
1800 __func__, transport->inet->sk_shutdown);
1801 }
1786 xs_abort_connection(xprt, transport); 1802 xs_abort_connection(xprt, transport);
1787} 1803}
1788 1804
diff --git a/samples/kfifo/bytestream-example.c b/samples/kfifo/bytestream-example.c
index 642eef3f6336..178061e87ffe 100644
--- a/samples/kfifo/bytestream-example.c
+++ b/samples/kfifo/bytestream-example.c
@@ -44,10 +44,17 @@ static struct kfifo test;
44static DECLARE_KFIFO(test, unsigned char, FIFO_SIZE); 44static DECLARE_KFIFO(test, unsigned char, FIFO_SIZE);
45#endif 45#endif
46 46
47static const unsigned char expected_result[FIFO_SIZE] = {
48 3, 4, 5, 6, 7, 8, 9, 0,
49 1, 20, 21, 22, 23, 24, 25, 26,
50 27, 28, 29, 30, 31, 32, 33, 34,
51 35, 36, 37, 38, 39, 40, 41, 42,
52};
53
47static int __init testfunc(void) 54static int __init testfunc(void)
48{ 55{
49 unsigned char buf[6]; 56 unsigned char buf[6];
50 unsigned char i; 57 unsigned char i, j;
51 unsigned int ret; 58 unsigned int ret;
52 59
53 printk(KERN_INFO "byte stream fifo test start\n"); 60 printk(KERN_INFO "byte stream fifo test start\n");
@@ -73,16 +80,34 @@ static int __init testfunc(void)
73 ret = kfifo_in(&test, buf, ret); 80 ret = kfifo_in(&test, buf, ret);
74 printk(KERN_INFO "ret: %d\n", ret); 81 printk(KERN_INFO "ret: %d\n", ret);
75 82
83 /* skip first element of the fifo */
84 printk(KERN_INFO "skip 1st element\n");
85 kfifo_skip(&test);
86
76 /* put values into the fifo until is full */ 87 /* put values into the fifo until is full */
77 for (i = 20; kfifo_put(&test, &i); i++) 88 for (i = 20; kfifo_put(&test, &i); i++)
78 ; 89 ;
79 90
80 printk(KERN_INFO "queue len: %u\n", kfifo_len(&test)); 91 printk(KERN_INFO "queue len: %u\n", kfifo_len(&test));
81 92
82 /* print out all values in the fifo */ 93 /* show the first value without removing from the fifo */
83 while (kfifo_get(&test, &i)) 94 if (kfifo_peek(&test, &i))
84 printk("%d ", i); 95 printk(KERN_INFO "%d\n", i);
85 printk("\n"); 96
97 /* check the correctness of all values in the fifo */
98 j = 0;
99 while (kfifo_get(&test, &i)) {
100 printk(KERN_INFO "item = %d\n", i);
101 if (i != expected_result[j++]) {
102 printk(KERN_WARNING "value mismatch: test failed\n");
103 return -EIO;
104 }
105 }
106 if (j != ARRAY_SIZE(expected_result)) {
107 printk(KERN_WARNING "size mismatch: test failed\n");
108 return -EIO;
109 }
110 printk(KERN_INFO "test passed\n");
86 111
87 return 0; 112 return 0;
88} 113}
@@ -138,7 +163,12 @@ static int __init example_init(void)
138#else 163#else
139 INIT_KFIFO(test); 164 INIT_KFIFO(test);
140#endif 165#endif
141 testfunc(); 166 if (testfunc() < 0) {
167#ifdef DYNAMIC
168 kfifo_free(&test);
169#endif
170 return -EIO;
171 }
142 172
143 if (proc_create(PROC_FIFO, 0, NULL, &fifo_fops) == NULL) { 173 if (proc_create(PROC_FIFO, 0, NULL, &fifo_fops) == NULL) {
144#ifdef DYNAMIC 174#ifdef DYNAMIC
diff --git a/samples/kfifo/dma-example.c b/samples/kfifo/dma-example.c
index b9482c28b41a..ee03a4f0b64f 100644
--- a/samples/kfifo/dma-example.c
+++ b/samples/kfifo/dma-example.c
@@ -29,8 +29,8 @@ static int __init example_init(void)
29 printk(KERN_INFO "DMA fifo test start\n"); 29 printk(KERN_INFO "DMA fifo test start\n");
30 30
31 if (kfifo_alloc(&fifo, FIFO_SIZE, GFP_KERNEL)) { 31 if (kfifo_alloc(&fifo, FIFO_SIZE, GFP_KERNEL)) {
32 printk(KERN_ERR "error kfifo_alloc\n"); 32 printk(KERN_WARNING "error kfifo_alloc\n");
33 return 1; 33 return -ENOMEM;
34 } 34 }
35 35
36 printk(KERN_INFO "queue size: %u\n", kfifo_size(&fifo)); 36 printk(KERN_INFO "queue size: %u\n", kfifo_size(&fifo));
@@ -41,72 +41,99 @@ static int __init example_init(void)
41 kfifo_put(&fifo, &i); 41 kfifo_put(&fifo, &i);
42 42
43 /* kick away first byte */ 43 /* kick away first byte */
44 ret = kfifo_get(&fifo, &i); 44 kfifo_skip(&fifo);
45 45
46 printk(KERN_INFO "queue len: %u\n", kfifo_len(&fifo)); 46 printk(KERN_INFO "queue len: %u\n", kfifo_len(&fifo));
47 47
48 /*
49 * Configure the kfifo buffer to receive data from DMA input.
50 *
51 * .--------------------------------------.
52 * | 0 | 1 | 2 | ... | 12 | 13 | ... | 31 |
53 * |---|------------------|---------------|
54 * \_/ \________________/ \_____________/
55 * \ \ \
56 * \ \_allocated data \
57 * \_*free space* \_*free space*
58 *
59 * We need two different SG entries: one for the free space area at the
60 * end of the kfifo buffer (19 bytes) and another for the first free
61 * byte at the beginning, after the kfifo_skip().
62 */
63 sg_init_table(sg, ARRAY_SIZE(sg));
48 ret = kfifo_dma_in_prepare(&fifo, sg, ARRAY_SIZE(sg), FIFO_SIZE); 64 ret = kfifo_dma_in_prepare(&fifo, sg, ARRAY_SIZE(sg), FIFO_SIZE);
49 printk(KERN_INFO "DMA sgl entries: %d\n", ret); 65 printk(KERN_INFO "DMA sgl entries: %d\n", ret);
66 if (!ret) {
67 /* fifo is full and no sgl was created */
68 printk(KERN_WARNING "error kfifo_dma_in_prepare\n");
69 return -EIO;
70 }
50 71
51 /* if 0 was returned, fifo is full and no sgl was created */ 72 /* receive data */
52 if (ret) { 73 printk(KERN_INFO "scatterlist for receive:\n");
53 printk(KERN_INFO "scatterlist for receive:\n"); 74 for (i = 0; i < ARRAY_SIZE(sg); i++) {
54 for (i = 0; i < ARRAY_SIZE(sg); i++) { 75 printk(KERN_INFO
55 printk(KERN_INFO 76 "sg[%d] -> "
56 "sg[%d] -> " 77 "page_link 0x%.8lx offset 0x%.8x length 0x%.8x\n",
57 "page_link 0x%.8lx offset 0x%.8x length 0x%.8x\n", 78 i, sg[i].page_link, sg[i].offset, sg[i].length);
58 i, sg[i].page_link, sg[i].offset, sg[i].length);
59 79
60 if (sg_is_last(&sg[i])) 80 if (sg_is_last(&sg[i]))
61 break; 81 break;
62 } 82 }
63 83
64 /* but here your code to setup and exectute the dma operation */ 84 /* put here your code to setup and exectute the dma operation */
65 /* ... */ 85 /* ... */
66 86
67 /* example: zero bytes received */ 87 /* example: zero bytes received */
68 ret = 0; 88 ret = 0;
69 89
70 /* finish the dma operation and update the received data */ 90 /* finish the dma operation and update the received data */
71 kfifo_dma_in_finish(&fifo, ret); 91 kfifo_dma_in_finish(&fifo, ret);
72 }
73 92
93 /* Prepare to transmit data, example: 8 bytes */
74 ret = kfifo_dma_out_prepare(&fifo, sg, ARRAY_SIZE(sg), 8); 94 ret = kfifo_dma_out_prepare(&fifo, sg, ARRAY_SIZE(sg), 8);
75 printk(KERN_INFO "DMA sgl entries: %d\n", ret); 95 printk(KERN_INFO "DMA sgl entries: %d\n", ret);
96 if (!ret) {
97 /* no data was available and no sgl was created */
98 printk(KERN_WARNING "error kfifo_dma_out_prepare\n");
99 return -EIO;
100 }
76 101
77 /* if 0 was returned, no data was available and no sgl was created */ 102 printk(KERN_INFO "scatterlist for transmit:\n");
78 if (ret) { 103 for (i = 0; i < ARRAY_SIZE(sg); i++) {
79 printk(KERN_INFO "scatterlist for transmit:\n"); 104 printk(KERN_INFO
80 for (i = 0; i < ARRAY_SIZE(sg); i++) { 105 "sg[%d] -> "
81 printk(KERN_INFO 106 "page_link 0x%.8lx offset 0x%.8x length 0x%.8x\n",
82 "sg[%d] -> " 107 i, sg[i].page_link, sg[i].offset, sg[i].length);
83 "page_link 0x%.8lx offset 0x%.8x length 0x%.8x\n",
84 i, sg[i].page_link, sg[i].offset, sg[i].length);
85 108
86 if (sg_is_last(&sg[i])) 109 if (sg_is_last(&sg[i]))
87 break; 110 break;
88 } 111 }
89 112
90 /* but here your code to setup and exectute the dma operation */ 113 /* put here your code to setup and exectute the dma operation */
91 /* ... */ 114 /* ... */
92 115
93 /* example: 5 bytes transmitted */ 116 /* example: 5 bytes transmitted */
94 ret = 5; 117 ret = 5;
95 118
96 /* finish the dma operation and update the transmitted data */ 119 /* finish the dma operation and update the transmitted data */
97 kfifo_dma_out_finish(&fifo, ret); 120 kfifo_dma_out_finish(&fifo, ret);
98 }
99 121
122 ret = kfifo_len(&fifo);
100 printk(KERN_INFO "queue len: %u\n", kfifo_len(&fifo)); 123 printk(KERN_INFO "queue len: %u\n", kfifo_len(&fifo));
101 124
125 if (ret != 7) {
126 printk(KERN_WARNING "size mismatch: test failed");
127 return -EIO;
128 }
129 printk(KERN_INFO "test passed\n");
130
102 return 0; 131 return 0;
103} 132}
104 133
105static void __exit example_exit(void) 134static void __exit example_exit(void)
106{ 135{
107#ifdef DYNAMIC 136 kfifo_free(&fifo);
108 kfifo_free(&test);
109#endif
110} 137}
111 138
112module_init(example_init); 139module_init(example_init);
diff --git a/samples/kfifo/inttype-example.c b/samples/kfifo/inttype-example.c
index d6c5b7d9df64..71b2aabca96a 100644
--- a/samples/kfifo/inttype-example.c
+++ b/samples/kfifo/inttype-example.c
@@ -44,10 +44,17 @@ static DECLARE_KFIFO_PTR(test, int);
44static DEFINE_KFIFO(test, int, FIFO_SIZE); 44static DEFINE_KFIFO(test, int, FIFO_SIZE);
45#endif 45#endif
46 46
47static const int expected_result[FIFO_SIZE] = {
48 3, 4, 5, 6, 7, 8, 9, 0,
49 1, 20, 21, 22, 23, 24, 25, 26,
50 27, 28, 29, 30, 31, 32, 33, 34,
51 35, 36, 37, 38, 39, 40, 41, 42,
52};
53
47static int __init testfunc(void) 54static int __init testfunc(void)
48{ 55{
49 int buf[6]; 56 int buf[6];
50 int i; 57 int i, j;
51 unsigned int ret; 58 unsigned int ret;
52 59
53 printk(KERN_INFO "int fifo test start\n"); 60 printk(KERN_INFO "int fifo test start\n");
@@ -66,8 +73,13 @@ static int __init testfunc(void)
66 ret = kfifo_in(&test, buf, ret); 73 ret = kfifo_in(&test, buf, ret);
67 printk(KERN_INFO "ret: %d\n", ret); 74 printk(KERN_INFO "ret: %d\n", ret);
68 75
69 for (i = 20; i != 30; i++) 76 /* skip first element of the fifo */
70 kfifo_put(&test, &i); 77 printk(KERN_INFO "skip 1st element\n");
78 kfifo_skip(&test);
79
80 /* put values into the fifo until is full */
81 for (i = 20; kfifo_put(&test, &i); i++)
82 ;
71 83
72 printk(KERN_INFO "queue len: %u\n", kfifo_len(&test)); 84 printk(KERN_INFO "queue len: %u\n", kfifo_len(&test));
73 85
@@ -75,10 +87,20 @@ static int __init testfunc(void)
75 if (kfifo_peek(&test, &i)) 87 if (kfifo_peek(&test, &i))
76 printk(KERN_INFO "%d\n", i); 88 printk(KERN_INFO "%d\n", i);
77 89
78 /* print out all values in the fifo */ 90 /* check the correctness of all values in the fifo */
79 while (kfifo_get(&test, &i)) 91 j = 0;
80 printk("%d ", i); 92 while (kfifo_get(&test, &i)) {
81 printk("\n"); 93 printk(KERN_INFO "item = %d\n", i);
94 if (i != expected_result[j++]) {
95 printk(KERN_WARNING "value mismatch: test failed\n");
96 return -EIO;
97 }
98 }
99 if (j != ARRAY_SIZE(expected_result)) {
100 printk(KERN_WARNING "size mismatch: test failed\n");
101 return -EIO;
102 }
103 printk(KERN_INFO "test passed\n");
82 104
83 return 0; 105 return 0;
84} 106}
@@ -132,7 +154,12 @@ static int __init example_init(void)
132 return ret; 154 return ret;
133 } 155 }
134#endif 156#endif
135 testfunc(); 157 if (testfunc() < 0) {
158#ifdef DYNAMIC
159 kfifo_free(&test);
160#endif
161 return -EIO;
162 }
136 163
137 if (proc_create(PROC_FIFO, 0, NULL, &fifo_fops) == NULL) { 164 if (proc_create(PROC_FIFO, 0, NULL, &fifo_fops) == NULL) {
138#ifdef DYNAMIC 165#ifdef DYNAMIC
diff --git a/samples/kfifo/record-example.c b/samples/kfifo/record-example.c
index 32c6e0bda744..e68bd16a5da4 100644
--- a/samples/kfifo/record-example.c
+++ b/samples/kfifo/record-example.c
@@ -55,6 +55,19 @@ typedef STRUCT_KFIFO_REC_1(FIFO_SIZE) mytest;
55static mytest test; 55static mytest test;
56#endif 56#endif
57 57
58static const char *expected_result[] = {
59 "a",
60 "bb",
61 "ccc",
62 "dddd",
63 "eeeee",
64 "ffffff",
65 "ggggggg",
66 "hhhhhhhh",
67 "iiiiiiiii",
68 "jjjjjjjjjj",
69};
70
58static int __init testfunc(void) 71static int __init testfunc(void)
59{ 72{
60 char buf[100]; 73 char buf[100];
@@ -75,6 +88,10 @@ static int __init testfunc(void)
75 kfifo_in(&test, buf, i + 1); 88 kfifo_in(&test, buf, i + 1);
76 } 89 }
77 90
91 /* skip first element of the fifo */
92 printk(KERN_INFO "skip 1st element\n");
93 kfifo_skip(&test);
94
78 printk(KERN_INFO "fifo len: %u\n", kfifo_len(&test)); 95 printk(KERN_INFO "fifo len: %u\n", kfifo_len(&test));
79 96
80 /* show the first record without removing from the fifo */ 97 /* show the first record without removing from the fifo */
@@ -82,11 +99,22 @@ static int __init testfunc(void)
82 if (ret) 99 if (ret)
83 printk(KERN_INFO "%.*s\n", ret, buf); 100 printk(KERN_INFO "%.*s\n", ret, buf);
84 101
85 /* print out all records in the fifo */ 102 /* check the correctness of all values in the fifo */
103 i = 0;
86 while (!kfifo_is_empty(&test)) { 104 while (!kfifo_is_empty(&test)) {
87 ret = kfifo_out(&test, buf, sizeof(buf)); 105 ret = kfifo_out(&test, buf, sizeof(buf));
88 printk(KERN_INFO "%.*s\n", ret, buf); 106 buf[ret] = '\0';
107 printk(KERN_INFO "item = %.*s\n", ret, buf);
108 if (strcmp(buf, expected_result[i++])) {
109 printk(KERN_WARNING "value mismatch: test failed\n");
110 return -EIO;
111 }
112 }
113 if (i != ARRAY_SIZE(expected_result)) {
114 printk(KERN_WARNING "size mismatch: test failed\n");
115 return -EIO;
89 } 116 }
117 printk(KERN_INFO "test passed\n");
90 118
91 return 0; 119 return 0;
92} 120}
@@ -142,7 +170,12 @@ static int __init example_init(void)
142#else 170#else
143 INIT_KFIFO(test); 171 INIT_KFIFO(test);
144#endif 172#endif
145 testfunc(); 173 if (testfunc() < 0) {
174#ifdef DYNAMIC
175 kfifo_free(&test);
176#endif
177 return -EIO;
178 }
146 179
147 if (proc_create(PROC_FIFO, 0, NULL, &fifo_fops) == NULL) { 180 if (proc_create(PROC_FIFO, 0, NULL, &fifo_fops) == NULL) {
148#ifdef DYNAMIC 181#ifdef DYNAMIC
diff --git a/scripts/recordmcount.pl b/scripts/recordmcount.pl
index 0171060b5fd6..e67f05486087 100755
--- a/scripts/recordmcount.pl
+++ b/scripts/recordmcount.pl
@@ -159,6 +159,7 @@ my $section_regex; # Find the start of a section
159my $function_regex; # Find the name of a function 159my $function_regex; # Find the name of a function
160 # (return offset and func name) 160 # (return offset and func name)
161my $mcount_regex; # Find the call site to mcount (return offset) 161my $mcount_regex; # Find the call site to mcount (return offset)
162my $mcount_adjust; # Address adjustment to mcount offset
162my $alignment; # The .align value to use for $mcount_section 163my $alignment; # The .align value to use for $mcount_section
163my $section_type; # Section header plus possible alignment command 164my $section_type; # Section header plus possible alignment command
164my $can_use_local = 0; # If we can use local function references 165my $can_use_local = 0; # If we can use local function references
@@ -213,6 +214,7 @@ $section_regex = "Disassembly of section\\s+(\\S+):";
213$function_regex = "^([0-9a-fA-F]+)\\s+<(.*?)>:"; 214$function_regex = "^([0-9a-fA-F]+)\\s+<(.*?)>:";
214$mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\smcount\$"; 215$mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\smcount\$";
215$section_type = '@progbits'; 216$section_type = '@progbits';
217$mcount_adjust = 0;
216$type = ".long"; 218$type = ".long";
217 219
218if ($arch eq "x86_64") { 220if ($arch eq "x86_64") {
@@ -351,6 +353,9 @@ if ($arch eq "x86_64") {
351} elsif ($arch eq "microblaze") { 353} elsif ($arch eq "microblaze") {
352 # Microblaze calls '_mcount' instead of plain 'mcount'. 354 # Microblaze calls '_mcount' instead of plain 'mcount'.
353 $mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\s_mcount\$"; 355 $mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\s_mcount\$";
356} elsif ($arch eq "blackfin") {
357 $mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\s__mcount\$";
358 $mcount_adjust = -4;
354} else { 359} else {
355 die "Arch $arch is not supported with CONFIG_FTRACE_MCOUNT_RECORD"; 360 die "Arch $arch is not supported with CONFIG_FTRACE_MCOUNT_RECORD";
356} 361}
@@ -511,7 +516,7 @@ while (<IN>) {
511 } 516 }
512 # is this a call site to mcount? If so, record it to print later 517 # is this a call site to mcount? If so, record it to print later
513 if ($text_found && /$mcount_regex/) { 518 if ($text_found && /$mcount_regex/) {
514 push(@offsets, hex $1); 519 push(@offsets, (hex $1) + $mcount_adjust);
515 } 520 }
516} 521}
517 522
diff --git a/security/apparmor/path.c b/security/apparmor/path.c
index 96bab9469d48..19358dc14605 100644
--- a/security/apparmor/path.c
+++ b/security/apparmor/path.c
@@ -62,19 +62,14 @@ static int d_namespace_path(struct path *path, char *buf, int buflen,
62 int deleted, connected; 62 int deleted, connected;
63 int error = 0; 63 int error = 0;
64 64
65 /* Get the root we want to resolve too */ 65 /* Get the root we want to resolve too, released below */
66 if (flags & PATH_CHROOT_REL) { 66 if (flags & PATH_CHROOT_REL) {
67 /* resolve paths relative to chroot */ 67 /* resolve paths relative to chroot */
68 read_lock(&current->fs->lock); 68 get_fs_root(current->fs, &root);
69 root = current->fs->root;
70 /* released below */
71 path_get(&root);
72 read_unlock(&current->fs->lock);
73 } else { 69 } else {
74 /* resolve paths relative to namespace */ 70 /* resolve paths relative to namespace */
75 root.mnt = current->nsproxy->mnt_ns->root; 71 root.mnt = current->nsproxy->mnt_ns->root;
76 root.dentry = root.mnt->mnt_root; 72 root.dentry = root.mnt->mnt_root;
77 /* released below */
78 path_get(&root); 73 path_get(&root);
79 } 74 }
80 75
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 42043f96e54f..4796ddd4e721 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -2170,8 +2170,9 @@ static inline void flush_unauthorized_files(const struct cred *cred,
2170 2170
2171 tty = get_current_tty(); 2171 tty = get_current_tty();
2172 if (tty) { 2172 if (tty) {
2173 file_list_lock(); 2173 spin_lock(&tty_files_lock);
2174 if (!list_empty(&tty->tty_files)) { 2174 if (!list_empty(&tty->tty_files)) {
2175 struct tty_file_private *file_priv;
2175 struct inode *inode; 2176 struct inode *inode;
2176 2177
2177 /* Revalidate access to controlling tty. 2178 /* Revalidate access to controlling tty.
@@ -2179,14 +2180,16 @@ static inline void flush_unauthorized_files(const struct cred *cred,
2179 than using file_has_perm, as this particular open 2180 than using file_has_perm, as this particular open
2180 file may belong to another process and we are only 2181 file may belong to another process and we are only
2181 interested in the inode-based check here. */ 2182 interested in the inode-based check here. */
2182 file = list_first_entry(&tty->tty_files, struct file, f_u.fu_list); 2183 file_priv = list_first_entry(&tty->tty_files,
2184 struct tty_file_private, list);
2185 file = file_priv->file;
2183 inode = file->f_path.dentry->d_inode; 2186 inode = file->f_path.dentry->d_inode;
2184 if (inode_has_perm(cred, inode, 2187 if (inode_has_perm(cred, inode,
2185 FILE__READ | FILE__WRITE, NULL)) { 2188 FILE__READ | FILE__WRITE, NULL)) {
2186 drop_tty = 1; 2189 drop_tty = 1;
2187 } 2190 }
2188 } 2191 }
2189 file_list_unlock(); 2192 spin_unlock(&tty_files_lock);
2190 tty_kref_put(tty); 2193 tty_kref_put(tty);
2191 } 2194 }
2192 /* Reset controlling tty. */ 2195 /* Reset controlling tty. */
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index a3b2a6479246..134fc6c2e08d 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -978,6 +978,10 @@ static int snd_pcm_do_pause(struct snd_pcm_substream *substream, int push)
978{ 978{
979 if (substream->runtime->trigger_master != substream) 979 if (substream->runtime->trigger_master != substream)
980 return 0; 980 return 0;
981 /* some drivers might use hw_ptr to recover from the pause -
982 update the hw_ptr now */
983 if (push)
984 snd_pcm_update_hw_ptr(substream);
981 /* The jiffies check in snd_pcm_update_hw_ptr*() is done by 985 /* The jiffies check in snd_pcm_update_hw_ptr*() is done by
982 * a delta betwen the current jiffies, this gives a large enough 986 * a delta betwen the current jiffies, this gives a large enough
983 * delta, effectively to skip the check once. 987 * delta, effectively to skip the check once.
diff --git a/sound/pci/emu10k1/emu10k1.c b/sound/pci/emu10k1/emu10k1.c
index 4203782d7cb7..aff8387c45cf 100644
--- a/sound/pci/emu10k1/emu10k1.c
+++ b/sound/pci/emu10k1/emu10k1.c
@@ -52,6 +52,7 @@ static int max_synth_voices[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = 64};
52static int max_buffer_size[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = 128}; 52static int max_buffer_size[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = 128};
53static int enable_ir[SNDRV_CARDS]; 53static int enable_ir[SNDRV_CARDS];
54static uint subsystem[SNDRV_CARDS]; /* Force card subsystem model */ 54static uint subsystem[SNDRV_CARDS]; /* Force card subsystem model */
55static uint delay_pcm_irq[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = 2};
55 56
56module_param_array(index, int, NULL, 0444); 57module_param_array(index, int, NULL, 0444);
57MODULE_PARM_DESC(index, "Index value for the EMU10K1 soundcard."); 58MODULE_PARM_DESC(index, "Index value for the EMU10K1 soundcard.");
@@ -73,6 +74,8 @@ module_param_array(enable_ir, bool, NULL, 0444);
73MODULE_PARM_DESC(enable_ir, "Enable IR."); 74MODULE_PARM_DESC(enable_ir, "Enable IR.");
74module_param_array(subsystem, uint, NULL, 0444); 75module_param_array(subsystem, uint, NULL, 0444);
75MODULE_PARM_DESC(subsystem, "Force card subsystem model."); 76MODULE_PARM_DESC(subsystem, "Force card subsystem model.");
77module_param_array(delay_pcm_irq, uint, NULL, 0444);
78MODULE_PARM_DESC(delay_pcm_irq, "Delay PCM interrupt by specified number of samples (default 0).");
76/* 79/*
77 * Class 0401: 1102:0008 (rev 00) Subsystem: 1102:1001 -> Audigy2 Value Model:SB0400 80 * Class 0401: 1102:0008 (rev 00) Subsystem: 1102:1001 -> Audigy2 Value Model:SB0400
78 */ 81 */
@@ -127,6 +130,7 @@ static int __devinit snd_card_emu10k1_probe(struct pci_dev *pci,
127 &emu)) < 0) 130 &emu)) < 0)
128 goto error; 131 goto error;
129 card->private_data = emu; 132 card->private_data = emu;
133 emu->delay_pcm_irq = delay_pcm_irq[dev] & 0x1f;
130 if ((err = snd_emu10k1_pcm(emu, 0, NULL)) < 0) 134 if ((err = snd_emu10k1_pcm(emu, 0, NULL)) < 0)
131 goto error; 135 goto error;
132 if ((err = snd_emu10k1_pcm_mic(emu, 1, NULL)) < 0) 136 if ((err = snd_emu10k1_pcm_mic(emu, 1, NULL)) < 0)
diff --git a/sound/pci/emu10k1/emupcm.c b/sound/pci/emu10k1/emupcm.c
index 55b83ef73c63..622bace148e3 100644
--- a/sound/pci/emu10k1/emupcm.c
+++ b/sound/pci/emu10k1/emupcm.c
@@ -332,7 +332,7 @@ static void snd_emu10k1_pcm_init_voice(struct snd_emu10k1 *emu,
332 evoice->epcm->ccca_start_addr = start_addr + ccis; 332 evoice->epcm->ccca_start_addr = start_addr + ccis;
333 if (extra) { 333 if (extra) {
334 start_addr += ccis; 334 start_addr += ccis;
335 end_addr += ccis; 335 end_addr += ccis + emu->delay_pcm_irq;
336 } 336 }
337 if (stereo && !extra) { 337 if (stereo && !extra) {
338 snd_emu10k1_ptr_write(emu, CPF, voice, CPF_STEREO_MASK); 338 snd_emu10k1_ptr_write(emu, CPF, voice, CPF_STEREO_MASK);
@@ -360,7 +360,9 @@ static void snd_emu10k1_pcm_init_voice(struct snd_emu10k1 *emu,
360 /* Assumption that PT is already 0 so no harm overwriting */ 360 /* Assumption that PT is already 0 so no harm overwriting */
361 snd_emu10k1_ptr_write(emu, PTRX, voice, (send_amount[0] << 8) | send_amount[1]); 361 snd_emu10k1_ptr_write(emu, PTRX, voice, (send_amount[0] << 8) | send_amount[1]);
362 snd_emu10k1_ptr_write(emu, DSL, voice, end_addr | (send_amount[3] << 24)); 362 snd_emu10k1_ptr_write(emu, DSL, voice, end_addr | (send_amount[3] << 24));
363 snd_emu10k1_ptr_write(emu, PSST, voice, start_addr | (send_amount[2] << 24)); 363 snd_emu10k1_ptr_write(emu, PSST, voice,
364 (start_addr + (extra ? emu->delay_pcm_irq : 0)) |
365 (send_amount[2] << 24));
364 if (emu->card_capabilities->emu_model) 366 if (emu->card_capabilities->emu_model)
365 pitch_target = PITCH_48000; /* Disable interpolators on emu1010 card */ 367 pitch_target = PITCH_48000; /* Disable interpolators on emu1010 card */
366 else 368 else
@@ -732,6 +734,23 @@ static void snd_emu10k1_playback_stop_voice(struct snd_emu10k1 *emu, struct snd_
732 snd_emu10k1_ptr_write(emu, IP, voice, 0); 734 snd_emu10k1_ptr_write(emu, IP, voice, 0);
733} 735}
734 736
737static inline void snd_emu10k1_playback_mangle_extra(struct snd_emu10k1 *emu,
738 struct snd_emu10k1_pcm *epcm,
739 struct snd_pcm_substream *substream,
740 struct snd_pcm_runtime *runtime)
741{
742 unsigned int ptr, period_pos;
743
744 /* try to sychronize the current position for the interrupt
745 source voice */
746 period_pos = runtime->status->hw_ptr - runtime->hw_ptr_interrupt;
747 period_pos %= runtime->period_size;
748 ptr = snd_emu10k1_ptr_read(emu, CCCA, epcm->extra->number);
749 ptr &= ~0x00ffffff;
750 ptr |= epcm->ccca_start_addr + period_pos;
751 snd_emu10k1_ptr_write(emu, CCCA, epcm->extra->number, ptr);
752}
753
735static int snd_emu10k1_playback_trigger(struct snd_pcm_substream *substream, 754static int snd_emu10k1_playback_trigger(struct snd_pcm_substream *substream,
736 int cmd) 755 int cmd)
737{ 756{
@@ -753,6 +772,8 @@ static int snd_emu10k1_playback_trigger(struct snd_pcm_substream *substream,
753 /* follow thru */ 772 /* follow thru */
754 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: 773 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
755 case SNDRV_PCM_TRIGGER_RESUME: 774 case SNDRV_PCM_TRIGGER_RESUME:
775 if (cmd == SNDRV_PCM_TRIGGER_PAUSE_RELEASE)
776 snd_emu10k1_playback_mangle_extra(emu, epcm, substream, runtime);
756 mix = &emu->pcm_mixer[substream->number]; 777 mix = &emu->pcm_mixer[substream->number];
757 snd_emu10k1_playback_prepare_voice(emu, epcm->voices[0], 1, 0, mix); 778 snd_emu10k1_playback_prepare_voice(emu, epcm->voices[0], 1, 0, mix);
758 snd_emu10k1_playback_prepare_voice(emu, epcm->voices[1], 0, 0, mix); 779 snd_emu10k1_playback_prepare_voice(emu, epcm->voices[1], 0, 0, mix);
@@ -869,8 +890,9 @@ static snd_pcm_uframes_t snd_emu10k1_playback_pointer(struct snd_pcm_substream *
869#endif 890#endif
870 /* 891 /*
871 printk(KERN_DEBUG 892 printk(KERN_DEBUG
872 "ptr = 0x%x, buffer_size = 0x%x, period_size = 0x%x\n", 893 "ptr = 0x%lx, buffer_size = 0x%lx, period_size = 0x%lx\n",
873 ptr, runtime->buffer_size, runtime->period_size); 894 (long)ptr, (long)runtime->buffer_size,
895 (long)runtime->period_size);
874 */ 896 */
875 return ptr; 897 return ptr;
876} 898}
diff --git a/sound/pci/emu10k1/memory.c b/sound/pci/emu10k1/memory.c
index ffb1ddb8dc28..957a311514c8 100644
--- a/sound/pci/emu10k1/memory.c
+++ b/sound/pci/emu10k1/memory.c
@@ -310,8 +310,10 @@ snd_emu10k1_alloc_pages(struct snd_emu10k1 *emu, struct snd_pcm_substream *subst
310 if (snd_BUG_ON(!hdr)) 310 if (snd_BUG_ON(!hdr))
311 return NULL; 311 return NULL;
312 312
313 idx = runtime->period_size >= runtime->buffer_size ?
314 (emu->delay_pcm_irq * 2) : 0;
313 mutex_lock(&hdr->block_mutex); 315 mutex_lock(&hdr->block_mutex);
314 blk = search_empty(emu, runtime->dma_bytes); 316 blk = search_empty(emu, runtime->dma_bytes + idx);
315 if (blk == NULL) { 317 if (blk == NULL) {
316 mutex_unlock(&hdr->block_mutex); 318 mutex_unlock(&hdr->block_mutex);
317 return NULL; 319 return NULL;
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index 31b5d9eeba68..c424952a734e 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -3049,6 +3049,7 @@ static struct snd_pci_quirk cxt5066_cfg_tbl[] = {
3049 SND_PCI_QUIRK(0x1028, 0x02f5, "Dell", 3049 SND_PCI_QUIRK(0x1028, 0x02f5, "Dell",
3050 CXT5066_DELL_LAPTOP), 3050 CXT5066_DELL_LAPTOP),
3051 SND_PCI_QUIRK(0x152d, 0x0833, "OLPC XO-1.5", CXT5066_OLPC_XO_1_5), 3051 SND_PCI_QUIRK(0x152d, 0x0833, "OLPC XO-1.5", CXT5066_OLPC_XO_1_5),
3052 SND_PCI_QUIRK(0x1028, 0x02d8, "Dell Vostro", CXT5066_DELL_VOSTO),
3052 SND_PCI_QUIRK(0x1028, 0x0402, "Dell Vostro", CXT5066_DELL_VOSTO), 3053 SND_PCI_QUIRK(0x1028, 0x0402, "Dell Vostro", CXT5066_DELL_VOSTO),
3053 SND_PCI_QUIRK(0x1028, 0x0408, "Dell Inspiron One 19T", CXT5066_IDEAPAD), 3054 SND_PCI_QUIRK(0x1028, 0x0408, "Dell Inspiron One 19T", CXT5066_IDEAPAD),
3054 SND_PCI_QUIRK(0x1179, 0xff50, "Toshiba Satellite P500-PSPGSC-01800T", CXT5066_OLPC_XO_1_5), 3055 SND_PCI_QUIRK(0x1179, 0xff50, "Toshiba Satellite P500-PSPGSC-01800T", CXT5066_OLPC_XO_1_5),
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 2cd1ae809e46..a4dd04524e43 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -19030,6 +19030,7 @@ static int patch_alc888(struct hda_codec *codec)
19030/* 19030/*
19031 * ALC680 support 19031 * ALC680 support
19032 */ 19032 */
19033#define ALC680_DIGIN_NID ALC880_DIGIN_NID
19033#define ALC680_DIGOUT_NID ALC880_DIGOUT_NID 19034#define ALC680_DIGOUT_NID ALC880_DIGOUT_NID
19034#define alc680_modes alc260_modes 19035#define alc680_modes alc260_modes
19035 19036
@@ -19044,23 +19045,93 @@ static hda_nid_t alc680_adc_nids[3] = {
19044 0x07, 0x08, 0x09 19045 0x07, 0x08, 0x09
19045}; 19046};
19046 19047
19048/*
19049 * Analog capture ADC cgange
19050 */
19051static int alc680_capture_pcm_prepare(struct hda_pcm_stream *hinfo,
19052 struct hda_codec *codec,
19053 unsigned int stream_tag,
19054 unsigned int format,
19055 struct snd_pcm_substream *substream)
19056{
19057 struct alc_spec *spec = codec->spec;
19058 struct auto_pin_cfg *cfg = &spec->autocfg;
19059 unsigned int pre_mic, pre_line;
19060
19061 pre_mic = snd_hda_jack_detect(codec, cfg->input_pins[AUTO_PIN_MIC]);
19062 pre_line = snd_hda_jack_detect(codec, cfg->input_pins[AUTO_PIN_LINE]);
19063
19064 spec->cur_adc_stream_tag = stream_tag;
19065 spec->cur_adc_format = format;
19066
19067 if (pre_mic || pre_line) {
19068 if (pre_mic)
19069 snd_hda_codec_setup_stream(codec, 0x08, stream_tag, 0,
19070 format);
19071 else
19072 snd_hda_codec_setup_stream(codec, 0x09, stream_tag, 0,
19073 format);
19074 } else
19075 snd_hda_codec_setup_stream(codec, 0x07, stream_tag, 0, format);
19076 return 0;
19077}
19078
19079static int alc680_capture_pcm_cleanup(struct hda_pcm_stream *hinfo,
19080 struct hda_codec *codec,
19081 struct snd_pcm_substream *substream)
19082{
19083 snd_hda_codec_cleanup_stream(codec, 0x07);
19084 snd_hda_codec_cleanup_stream(codec, 0x08);
19085 snd_hda_codec_cleanup_stream(codec, 0x09);
19086 return 0;
19087}
19088
19089static struct hda_pcm_stream alc680_pcm_analog_auto_capture = {
19090 .substreams = 1, /* can be overridden */
19091 .channels_min = 2,
19092 .channels_max = 2,
19093 /* NID is set in alc_build_pcms */
19094 .ops = {
19095 .prepare = alc680_capture_pcm_prepare,
19096 .cleanup = alc680_capture_pcm_cleanup
19097 },
19098};
19099
19047static struct snd_kcontrol_new alc680_base_mixer[] = { 19100static struct snd_kcontrol_new alc680_base_mixer[] = {
19048 /* output mixer control */ 19101 /* output mixer control */
19049 HDA_CODEC_VOLUME("Front Playback Volume", 0x2, 0x0, HDA_OUTPUT), 19102 HDA_CODEC_VOLUME("Front Playback Volume", 0x2, 0x0, HDA_OUTPUT),
19050 HDA_CODEC_MUTE("Front Playback Switch", 0x14, 0x0, HDA_OUTPUT), 19103 HDA_CODEC_MUTE("Front Playback Switch", 0x14, 0x0, HDA_OUTPUT),
19051 HDA_CODEC_VOLUME("Headphone Playback Volume", 0x4, 0x0, HDA_OUTPUT), 19104 HDA_CODEC_VOLUME("Headphone Playback Volume", 0x4, 0x0, HDA_OUTPUT),
19052 HDA_CODEC_MUTE("Headphone Playback Switch", 0x16, 0x0, HDA_OUTPUT), 19105 HDA_CODEC_MUTE("Headphone Playback Switch", 0x16, 0x0, HDA_OUTPUT),
19106 HDA_CODEC_VOLUME("Int Mic Boost", 0x12, 0, HDA_INPUT),
19053 HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT), 19107 HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
19108 HDA_CODEC_VOLUME("Line In Boost", 0x19, 0, HDA_INPUT),
19054 { } 19109 { }
19055}; 19110};
19056 19111
19057static struct snd_kcontrol_new alc680_capture_mixer[] = { 19112static struct hda_bind_ctls alc680_bind_cap_vol = {
19058 HDA_CODEC_VOLUME("Capture Volume", 0x07, 0x0, HDA_INPUT), 19113 .ops = &snd_hda_bind_vol,
19059 HDA_CODEC_MUTE("Capture Switch", 0x07, 0x0, HDA_INPUT), 19114 .values = {
19060 HDA_CODEC_VOLUME_IDX("Capture Volume", 1, 0x08, 0x0, HDA_INPUT), 19115 HDA_COMPOSE_AMP_VAL(0x07, 3, 0, HDA_INPUT),
19061 HDA_CODEC_MUTE_IDX("Capture Switch", 1, 0x08, 0x0, HDA_INPUT), 19116 HDA_COMPOSE_AMP_VAL(0x08, 3, 0, HDA_INPUT),
19062 HDA_CODEC_VOLUME_IDX("Capture Volume", 2, 0x09, 0x0, HDA_INPUT), 19117 HDA_COMPOSE_AMP_VAL(0x09, 3, 0, HDA_INPUT),
19063 HDA_CODEC_MUTE_IDX("Capture Switch", 2, 0x09, 0x0, HDA_INPUT), 19118 0
19119 },
19120};
19121
19122static struct hda_bind_ctls alc680_bind_cap_switch = {
19123 .ops = &snd_hda_bind_sw,
19124 .values = {
19125 HDA_COMPOSE_AMP_VAL(0x07, 3, 0, HDA_INPUT),
19126 HDA_COMPOSE_AMP_VAL(0x08, 3, 0, HDA_INPUT),
19127 HDA_COMPOSE_AMP_VAL(0x09, 3, 0, HDA_INPUT),
19128 0
19129 },
19130};
19131
19132static struct snd_kcontrol_new alc680_master_capture_mixer[] = {
19133 HDA_BIND_VOL("Capture Volume", &alc680_bind_cap_vol),
19134 HDA_BIND_SW("Capture Switch", &alc680_bind_cap_switch),
19064 { } /* end */ 19135 { } /* end */
19065}; 19136};
19066 19137
@@ -19068,25 +19139,73 @@ static struct snd_kcontrol_new alc680_capture_mixer[] = {
19068 * generic initialization of ADC, input mixers and output mixers 19139 * generic initialization of ADC, input mixers and output mixers
19069 */ 19140 */
19070static struct hda_verb alc680_init_verbs[] = { 19141static struct hda_verb alc680_init_verbs[] = {
19071 /* Unmute DAC0-1 and set vol = 0 */ 19142 {0x02, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
19072 {0x02, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO}, 19143 {0x03, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
19073 {0x03, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO}, 19144 {0x04, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
19074 {0x04, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO},
19075 19145
19076 {0x14, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x40}, 19146 {0x12, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN},
19077 {0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x40}, 19147 {0x14, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
19078 {0x16, AC_VERB_SET_PIN_WIDGET_CONTROL, 0xc0}, 19148 {0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
19079 {0x18, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x24}, 19149 {0x16, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP},
19080 {0x19, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x20}, 19150 {0x18, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80},
19151 {0x19, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN},
19081 19152
19082 {0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE}, 19153 {0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
19083 {0x15, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE}, 19154 {0x15, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
19084 {0x16, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE}, 19155 {0x16, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
19085 {0x18, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE}, 19156 {0x18, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
19086 {0x19, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE}, 19157 {0x19, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
19158
19159 {0x16, AC_VERB_SET_UNSOLICITED_ENABLE, ALC880_HP_EVENT | AC_USRSP_EN},
19160 {0x18, AC_VERB_SET_UNSOLICITED_ENABLE, ALC880_MIC_EVENT | AC_USRSP_EN},
19161
19087 { } 19162 { }
19088}; 19163};
19089 19164
19165/* toggle speaker-output according to the hp-jack state */
19166static void alc680_base_setup(struct hda_codec *codec)
19167{
19168 struct alc_spec *spec = codec->spec;
19169
19170 spec->autocfg.hp_pins[0] = 0x16;
19171 spec->autocfg.speaker_pins[0] = 0x14;
19172 spec->autocfg.speaker_pins[1] = 0x15;
19173 spec->autocfg.input_pins[AUTO_PIN_MIC] = 0x18;
19174 spec->autocfg.input_pins[AUTO_PIN_LINE] = 0x19;
19175}
19176
19177static void alc680_rec_autoswitch(struct hda_codec *codec)
19178{
19179 struct alc_spec *spec = codec->spec;
19180 struct auto_pin_cfg *cfg = &spec->autocfg;
19181 unsigned int present;
19182 hda_nid_t new_adc;
19183
19184 present = snd_hda_jack_detect(codec, cfg->input_pins[AUTO_PIN_MIC]);
19185
19186 new_adc = present ? 0x8 : 0x7;
19187 __snd_hda_codec_cleanup_stream(codec, !present ? 0x8 : 0x7, 1);
19188 snd_hda_codec_setup_stream(codec, new_adc,
19189 spec->cur_adc_stream_tag, 0,
19190 spec->cur_adc_format);
19191
19192}
19193
19194static void alc680_unsol_event(struct hda_codec *codec,
19195 unsigned int res)
19196{
19197 if ((res >> 26) == ALC880_HP_EVENT)
19198 alc_automute_amp(codec);
19199 if ((res >> 26) == ALC880_MIC_EVENT)
19200 alc680_rec_autoswitch(codec);
19201}
19202
19203static void alc680_inithook(struct hda_codec *codec)
19204{
19205 alc_automute_amp(codec);
19206 alc680_rec_autoswitch(codec);
19207}
19208
19090/* create input playback/capture controls for the given pin */ 19209/* create input playback/capture controls for the given pin */
19091static int alc680_new_analog_output(struct alc_spec *spec, hda_nid_t nid, 19210static int alc680_new_analog_output(struct alc_spec *spec, hda_nid_t nid,
19092 const char *ctlname, int idx) 19211 const char *ctlname, int idx)
@@ -19197,13 +19316,7 @@ static void alc680_auto_init_hp_out(struct hda_codec *codec)
19197#define alc680_pcm_analog_capture alc880_pcm_analog_capture 19316#define alc680_pcm_analog_capture alc880_pcm_analog_capture
19198#define alc680_pcm_analog_alt_capture alc880_pcm_analog_alt_capture 19317#define alc680_pcm_analog_alt_capture alc880_pcm_analog_alt_capture
19199#define alc680_pcm_digital_playback alc880_pcm_digital_playback 19318#define alc680_pcm_digital_playback alc880_pcm_digital_playback
19200 19319#define alc680_pcm_digital_capture alc880_pcm_digital_capture
19201static struct hda_input_mux alc680_capture_source = {
19202 .num_items = 1,
19203 .items = {
19204 { "Mic", 0x0 },
19205 },
19206};
19207 19320
19208/* 19321/*
19209 * BIOS auto configuration 19322 * BIOS auto configuration
@@ -19218,6 +19331,7 @@ static int alc680_parse_auto_config(struct hda_codec *codec)
19218 alc680_ignore); 19331 alc680_ignore);
19219 if (err < 0) 19332 if (err < 0)
19220 return err; 19333 return err;
19334
19221 if (!spec->autocfg.line_outs) { 19335 if (!spec->autocfg.line_outs) {
19222 if (spec->autocfg.dig_outs || spec->autocfg.dig_in_pin) { 19336 if (spec->autocfg.dig_outs || spec->autocfg.dig_in_pin) {
19223 spec->multiout.max_channels = 2; 19337 spec->multiout.max_channels = 2;
@@ -19239,8 +19353,6 @@ static int alc680_parse_auto_config(struct hda_codec *codec)
19239 add_mixer(spec, spec->kctls.list); 19353 add_mixer(spec, spec->kctls.list);
19240 19354
19241 add_verb(spec, alc680_init_verbs); 19355 add_verb(spec, alc680_init_verbs);
19242 spec->num_mux_defs = 1;
19243 spec->input_mux = &alc680_capture_source;
19244 19356
19245 err = alc_auto_add_mic_boost(codec); 19357 err = alc_auto_add_mic_boost(codec);
19246 if (err < 0) 19358 if (err < 0)
@@ -19279,17 +19391,17 @@ static struct snd_pci_quirk alc680_cfg_tbl[] = {
19279static struct alc_config_preset alc680_presets[] = { 19391static struct alc_config_preset alc680_presets[] = {
19280 [ALC680_BASE] = { 19392 [ALC680_BASE] = {
19281 .mixers = { alc680_base_mixer }, 19393 .mixers = { alc680_base_mixer },
19282 .cap_mixer = alc680_capture_mixer, 19394 .cap_mixer = alc680_master_capture_mixer,
19283 .init_verbs = { alc680_init_verbs }, 19395 .init_verbs = { alc680_init_verbs },
19284 .num_dacs = ARRAY_SIZE(alc680_dac_nids), 19396 .num_dacs = ARRAY_SIZE(alc680_dac_nids),
19285 .dac_nids = alc680_dac_nids, 19397 .dac_nids = alc680_dac_nids,
19286 .num_adc_nids = ARRAY_SIZE(alc680_adc_nids),
19287 .adc_nids = alc680_adc_nids,
19288 .hp_nid = 0x04,
19289 .dig_out_nid = ALC680_DIGOUT_NID, 19398 .dig_out_nid = ALC680_DIGOUT_NID,
19290 .num_channel_mode = ARRAY_SIZE(alc680_modes), 19399 .num_channel_mode = ARRAY_SIZE(alc680_modes),
19291 .channel_mode = alc680_modes, 19400 .channel_mode = alc680_modes,
19292 .input_mux = &alc680_capture_source, 19401 .unsol_event = alc680_unsol_event,
19402 .setup = alc680_base_setup,
19403 .init_hook = alc680_inithook,
19404
19293 }, 19405 },
19294}; 19406};
19295 19407
@@ -19333,9 +19445,9 @@ static int patch_alc680(struct hda_codec *codec)
19333 setup_preset(codec, &alc680_presets[board_config]); 19445 setup_preset(codec, &alc680_presets[board_config]);
19334 19446
19335 spec->stream_analog_playback = &alc680_pcm_analog_playback; 19447 spec->stream_analog_playback = &alc680_pcm_analog_playback;
19336 spec->stream_analog_capture = &alc680_pcm_analog_capture; 19448 spec->stream_analog_capture = &alc680_pcm_analog_auto_capture;
19337 spec->stream_analog_alt_capture = &alc680_pcm_analog_alt_capture;
19338 spec->stream_digital_playback = &alc680_pcm_digital_playback; 19449 spec->stream_digital_playback = &alc680_pcm_digital_playback;
19450 spec->stream_digital_capture = &alc680_pcm_digital_capture;
19339 19451
19340 if (!spec->adc_nids) { 19452 if (!spec->adc_nids) {
19341 spec->adc_nids = alc680_adc_nids; 19453 spec->adc_nids = alc680_adc_nids;
diff --git a/sound/pci/riptide/riptide.c b/sound/pci/riptide/riptide.c
index f64fb7d988cb..ad5202efd7a9 100644
--- a/sound/pci/riptide/riptide.c
+++ b/sound/pci/riptide/riptide.c
@@ -1224,15 +1224,14 @@ static int try_to_load_firmware(struct cmdif *cif, struct snd_riptide *chip)
1224 firmware.firmware.ASIC, firmware.firmware.CODEC, 1224 firmware.firmware.ASIC, firmware.firmware.CODEC,
1225 firmware.firmware.AUXDSP, firmware.firmware.PROG); 1225 firmware.firmware.AUXDSP, firmware.firmware.PROG);
1226 1226
1227 if (!chip)
1228 return 1;
1229
1227 for (i = 0; i < FIRMWARE_VERSIONS; i++) { 1230 for (i = 0; i < FIRMWARE_VERSIONS; i++) {
1228 if (!memcmp(&firmware_versions[i], &firmware, sizeof(firmware))) 1231 if (!memcmp(&firmware_versions[i], &firmware, sizeof(firmware)))
1229 break; 1232 return 1; /* OK */
1230 }
1231 if (i >= FIRMWARE_VERSIONS)
1232 return 0; /* no match */
1233 1233
1234 if (!chip) 1234 }
1235 return 1; /* OK */
1236 1235
1237 snd_printdd("Writing Firmware\n"); 1236 snd_printdd("Writing Firmware\n");
1238 if (!chip->fw_entry) { 1237 if (!chip->fw_entry) {
diff --git a/sound/soc/codecs/wm8776.c b/sound/soc/codecs/wm8776.c
index 4e212ed62ea6..f8154e661524 100644
--- a/sound/soc/codecs/wm8776.c
+++ b/sound/soc/codecs/wm8776.c
@@ -178,13 +178,6 @@ static int wm8776_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
178 case SND_SOC_DAIFMT_LEFT_J: 178 case SND_SOC_DAIFMT_LEFT_J:
179 iface |= 0x0001; 179 iface |= 0x0001;
180 break; 180 break;
181 /* FIXME: CHECK A/B */
182 case SND_SOC_DAIFMT_DSP_A:
183 iface |= 0x0003;
184 break;
185 case SND_SOC_DAIFMT_DSP_B:
186 iface |= 0x0007;
187 break;
188 default: 181 default:
189 return -EINVAL; 182 return -EINVAL;
190 } 183 }
diff --git a/tools/perf/Makefile b/tools/perf/Makefile
index 41abb90df50d..4f1fa77c1feb 100644
--- a/tools/perf/Makefile
+++ b/tools/perf/Makefile
@@ -5,6 +5,12 @@ endif
5# The default target of this Makefile is... 5# The default target of this Makefile is...
6all:: 6all::
7 7
8ifneq ($(OUTPUT),)
9# check that the output directory actually exists
10OUTDIR := $(shell cd $(OUTPUT) && /bin/pwd)
11$(if $(OUTDIR),, $(error output directory "$(OUTPUT)" does not exist))
12endif
13
8# Define V=1 to have a more verbose compile. 14# Define V=1 to have a more verbose compile.
9# Define V=2 to have an even more verbose compile. 15# Define V=2 to have an even more verbose compile.
10# 16#
@@ -157,10 +163,6 @@ all::
157# 163#
158# Define NO_DWARF if you do not want debug-info analysis feature at all. 164# Define NO_DWARF if you do not want debug-info analysis feature at all.
159 165
160$(shell sh -c 'mkdir -p $(OUTPUT)scripts/{perl,python}/Perf-Trace-Util/' 2> /dev/null)
161$(shell sh -c 'mkdir -p $(OUTPUT)util/{ui/browsers,scripting-engines}/' 2> /dev/null)
162$(shell sh -c 'mkdir $(OUTPUT)bench' 2> /dev/null)
163
164$(OUTPUT)PERF-VERSION-FILE: .FORCE-PERF-VERSION-FILE 166$(OUTPUT)PERF-VERSION-FILE: .FORCE-PERF-VERSION-FILE
165 @$(SHELL_PATH) util/PERF-VERSION-GEN $(OUTPUT) 167 @$(SHELL_PATH) util/PERF-VERSION-GEN $(OUTPUT)
166-include $(OUTPUT)PERF-VERSION-FILE 168-include $(OUTPUT)PERF-VERSION-FILE
@@ -186,8 +188,6 @@ ifeq ($(ARCH),x86_64)
186 ARCH := x86 188 ARCH := x86
187endif 189endif
188 190
189$(shell sh -c 'mkdir -p $(OUTPUT)arch/$(ARCH)/util/' 2> /dev/null)
190
191# CFLAGS and LDFLAGS are for the users to override from the command line. 191# CFLAGS and LDFLAGS are for the users to override from the command line.
192 192
193# 193#
@@ -268,6 +268,7 @@ export prefix bindir sharedir sysconfdir
268CC = $(CROSS_COMPILE)gcc 268CC = $(CROSS_COMPILE)gcc
269AR = $(CROSS_COMPILE)ar 269AR = $(CROSS_COMPILE)ar
270RM = rm -f 270RM = rm -f
271MKDIR = mkdir
271TAR = tar 272TAR = tar
272FIND = find 273FIND = find
273INSTALL = install 274INSTALL = install
@@ -838,6 +839,7 @@ ifndef V
838 QUIET_CC = @echo ' ' CC $@; 839 QUIET_CC = @echo ' ' CC $@;
839 QUIET_AR = @echo ' ' AR $@; 840 QUIET_AR = @echo ' ' AR $@;
840 QUIET_LINK = @echo ' ' LINK $@; 841 QUIET_LINK = @echo ' ' LINK $@;
842 QUIET_MKDIR = @echo ' ' MKDIR $@;
841 QUIET_BUILT_IN = @echo ' ' BUILTIN $@; 843 QUIET_BUILT_IN = @echo ' ' BUILTIN $@;
842 QUIET_GEN = @echo ' ' GEN $@; 844 QUIET_GEN = @echo ' ' GEN $@;
843 QUIET_SUBDIR0 = +@subdir= 845 QUIET_SUBDIR0 = +@subdir=
@@ -935,15 +937,15 @@ $(OUTPUT)common-cmds.h: $(wildcard Documentation/perf-*.txt)
935 $(QUIET_GEN). util/generate-cmdlist.sh > $@+ && mv $@+ $@ 937 $(QUIET_GEN). util/generate-cmdlist.sh > $@+ && mv $@+ $@
936 938
937$(patsubst %.sh,%,$(SCRIPT_SH)) : % : %.sh 939$(patsubst %.sh,%,$(SCRIPT_SH)) : % : %.sh
938 $(QUIET_GEN)$(RM) $@ $@+ && \ 940 $(QUIET_GEN)$(RM) $(OUTPUT)$@ $(OUTPUT)$@+ && \
939 sed -e '1s|#!.*/sh|#!$(SHELL_PATH_SQ)|' \ 941 sed -e '1s|#!.*/sh|#!$(SHELL_PATH_SQ)|' \
940 -e 's|@SHELL_PATH@|$(SHELL_PATH_SQ)|' \ 942 -e 's|@SHELL_PATH@|$(SHELL_PATH_SQ)|' \
941 -e 's|@@PERL@@|$(PERL_PATH_SQ)|g' \ 943 -e 's|@@PERL@@|$(PERL_PATH_SQ)|g' \
942 -e 's/@@PERF_VERSION@@/$(PERF_VERSION)/g' \ 944 -e 's/@@PERF_VERSION@@/$(PERF_VERSION)/g' \
943 -e 's/@@NO_CURL@@/$(NO_CURL)/g' \ 945 -e 's/@@NO_CURL@@/$(NO_CURL)/g' \
944 $@.sh >$@+ && \ 946 $@.sh > $(OUTPUT)$@+ && \
945 chmod +x $@+ && \ 947 chmod +x $(OUTPUT)$@+ && \
946 mv $@+ $(OUTPUT)$@ 948 mv $(OUTPUT)$@+ $(OUTPUT)$@
947 949
948configure: configure.ac 950configure: configure.ac
949 $(QUIET_GEN)$(RM) $@ $<+ && \ 951 $(QUIET_GEN)$(RM) $@ $<+ && \
@@ -1012,6 +1014,14 @@ $(LIB_OBJS) $(BUILTIN_OBJS): $(LIB_H)
1012$(patsubst perf-%$X,%.o,$(PROGRAMS)): $(LIB_H) $(wildcard */*.h) 1014$(patsubst perf-%$X,%.o,$(PROGRAMS)): $(LIB_H) $(wildcard */*.h)
1013builtin-revert.o wt-status.o: wt-status.h 1015builtin-revert.o wt-status.o: wt-status.h
1014 1016
1017# we compile into subdirectories. if the target directory is not the source directory, they might not exists. So
1018# we depend the various files onto their directories.
1019DIRECTORY_DEPS = $(LIB_OBJS) $(BUILTIN_OBJS) $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)common-cmds.h
1020$(DIRECTORY_DEPS): $(sort $(dir $(DIRECTORY_DEPS)))
1021# In the second step, we make a rule to actually create these directories
1022$(sort $(dir $(DIRECTORY_DEPS))):
1023 $(QUIET_MKDIR)$(MKDIR) -p $@ 2>/dev/null
1024
1015$(LIB_FILE): $(LIB_OBJS) 1025$(LIB_FILE): $(LIB_OBJS)
1016 $(QUIET_AR)$(RM) $@ && $(AR) rcs $@ $(LIB_OBJS) 1026 $(QUIET_AR)$(RM) $@ && $(AR) rcs $@ $(LIB_OBJS)
1017 1027
diff --git a/tools/perf/feature-tests.mak b/tools/perf/feature-tests.mak
index ddb68e601f0e..7a7b60859053 100644
--- a/tools/perf/feature-tests.mak
+++ b/tools/perf/feature-tests.mak
@@ -113,7 +113,7 @@ endef
113# try-cc 113# try-cc
114# Usage: option = $(call try-cc, source-to-build, cc-options) 114# Usage: option = $(call try-cc, source-to-build, cc-options)
115try-cc = $(shell sh -c \ 115try-cc = $(shell sh -c \
116 'TMP="$(TMPOUT).$$$$"; \ 116 'TMP="$(OUTPUT)$(TMPOUT).$$$$"; \
117 echo "$(1)" | \ 117 echo "$(1)" | \
118 $(CC) -x c - $(2) -o "$$TMP" > /dev/null 2>&1 && echo y; \ 118 $(CC) -x c - $(2) -o "$$TMP" > /dev/null 2>&1 && echo y; \
119 rm -f "$$TMP"') 119 rm -f "$$TMP"')
diff --git a/tools/perf/util/ui/browsers/annotate.c b/tools/perf/util/ui/browsers/annotate.c
index 55ff792459ac..a90273e63f4f 100644
--- a/tools/perf/util/ui/browsers/annotate.c
+++ b/tools/perf/util/ui/browsers/annotate.c
@@ -146,6 +146,7 @@ static int annotate_browser__run(struct annotate_browser *self,
146 return -1; 146 return -1;
147 147
148 newtFormAddHotKey(self->b.form, NEWT_KEY_LEFT); 148 newtFormAddHotKey(self->b.form, NEWT_KEY_LEFT);
149 newtFormAddHotKey(self->b.form, NEWT_KEY_RIGHT);
149 150
150 nd = self->curr_hot; 151 nd = self->curr_hot;
151 if (nd) { 152 if (nd) {
@@ -178,7 +179,7 @@ static int annotate_browser__run(struct annotate_browser *self,
178 } 179 }
179out: 180out:
180 ui_browser__hide(&self->b); 181 ui_browser__hide(&self->b);
181 return 0; 182 return es->u.key;
182} 183}
183 184
184int hist_entry__tui_annotate(struct hist_entry *self) 185int hist_entry__tui_annotate(struct hist_entry *self)