aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/filesystems/proc.txt1
-rw-r--r--Documentation/power/devices.txt67
-rw-r--r--Documentation/power/runtime_pm.txt5
-rw-r--r--MAINTAINERS10
-rw-r--r--Makefile2
-rw-r--r--arch/alpha/include/asm/mmzone.h1
-rw-r--r--arch/arm/boot/compressed/head.S14
-rw-r--r--arch/arm/include/asm/assembler.h4
-rw-r--r--arch/arm/include/asm/entry-macro-multi.S2
-rw-r--r--arch/arm/kernel/module.c13
-rw-r--r--arch/arm/kernel/smp.c6
-rw-r--r--arch/arm/mach-h720x/Kconfig2
-rw-r--r--arch/arm/mach-msm/timer.c14
-rw-r--r--arch/arm/mm/proc-v7.S16
-rw-r--r--arch/arm/plat-iop/cp6.c1
-rw-r--r--arch/m32r/include/asm/mmzone.h8
-rw-r--r--arch/mn10300/include/asm/uaccess.h1
-rw-r--r--arch/parisc/include/asm/mmzone.h7
-rw-r--r--arch/powerpc/include/asm/mmzone.h7
-rw-r--r--arch/s390/Kconfig1
-rw-r--r--arch/s390/kernel/smp.c4
-rw-r--r--arch/s390/oprofile/init.c8
-rw-r--r--arch/sh/include/asm/mmzone.h4
-rw-r--r--arch/sparc/include/asm/mmzone.h2
-rw-r--r--arch/tile/include/asm/mmzone.h11
-rw-r--r--arch/x86/include/asm/memblock.h2
-rw-r--r--arch/x86/include/asm/mmzone_32.h11
-rw-r--r--arch/x86/include/asm/mmzone_64.h3
-rw-r--r--arch/x86/include/asm/pvclock.h9
-rw-r--r--arch/x86/kvm/mmu.c2
-rw-r--r--arch/x86/kvm/paging_tmpl.h2
-rw-r--r--arch/x86/kvm/vmx.c3
-rw-r--r--arch/x86/mm/memblock.c4
-rw-r--r--arch/x86/pci/acpi.c2
-rw-r--r--arch/x86/platform/efi/efi.c29
-rw-r--r--arch/x86/xen/enlighten.c9
-rw-r--r--arch/x86/xen/mmu.c12
-rw-r--r--arch/x86/xen/setup.c10
-rw-r--r--arch/x86/xen/smp.c7
-rw-r--r--block/blk-throttle.c4
-rw-r--r--block/cfq-iosched.c16
-rw-r--r--block/genhd.c79
-rw-r--r--drivers/ata/libata-core.c6
-rw-r--r--drivers/ata/libata-scsi.c6
-rw-r--r--drivers/ata/pata_marvell.c3
-rw-r--r--drivers/ata/sata_dwc_460ex.c2
-rw-r--r--drivers/base/power/clock_ops.c4
-rw-r--r--drivers/base/power/main.c28
-rw-r--r--drivers/bluetooth/btmrvl_debugfs.c12
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c10
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c4
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c1
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h1
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c5
-rw-r--r--drivers/gpu/drm/i915/intel_display.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c59
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_perf.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c4
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c17
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c27
-rw-r--r--drivers/gpu/drm/radeon/r600.c18
-rw-r--r--drivers/gpu/drm/radeon/radeon.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c40
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c11
-rw-r--r--drivers/gpu/drm/radeon/rv770.c3
-rw-r--r--drivers/hid/hid-core.c1
-rw-r--r--drivers/hid/hid-ids.h1
-rw-r--r--drivers/hid/hid-multitouch.c12
-rw-r--r--drivers/hwmon/asus_atk0110.c5
-rw-r--r--drivers/hwmon/coretemp.c4
-rw-r--r--drivers/hwmon/ibmaem.c2
-rw-r--r--drivers/hwmon/ibmpex.c1
-rw-r--r--drivers/hwmon/s3c-hwmon.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c46
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c4
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c5
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c25
-rw-r--r--drivers/infiniband/hw/qib/qib_intr.c6
-rw-r--r--drivers/input/evdev.c3
-rw-r--r--drivers/input/input.c2
-rw-r--r--drivers/input/keyboard/omap-keypad.c1
-rw-r--r--drivers/input/keyboard/sh_keysc.c2
-rw-r--r--drivers/input/mousedev.c4
-rw-r--r--drivers/isdn/gigaset/interface.c4
-rw-r--r--drivers/misc/sgi-xp/xpnet.c6
-rw-r--r--drivers/net/3c503.c3
-rw-r--r--drivers/net/bfin_mac.c20
-rw-r--r--drivers/net/bonding/bond_main.c1
-rw-r--r--drivers/net/fs_enet/mac-fcc.c2
-rw-r--r--drivers/net/hp100.c4
-rw-r--r--drivers/net/hplance.c2
-rw-r--r--drivers/net/netxen/netxen_nic_main.c4
-rw-r--r--drivers/net/phy/Kconfig1
-rw-r--r--drivers/net/phy/dp83640.c24
-rw-r--r--drivers/net/ppp_async.c4
-rw-r--r--drivers/net/pxa168_eth.c2
-rw-r--r--drivers/net/r8169.c10
-rw-r--r--drivers/net/tun.c24
-rw-r--r--drivers/net/usb/Kconfig10
-rw-r--r--drivers/net/usb/Makefile1
-rw-r--r--drivers/net/usb/kalmia.c384
-rw-r--r--drivers/net/wan/farsync.c4
-rw-r--r--drivers/net/wireless/mwifiex/cfg80211.c2
-rw-r--r--drivers/net/wireless/mwl8k.c4
-rw-r--r--drivers/pci/pci-driver.c4
-rw-r--r--drivers/pci/pci.c2
-rw-r--r--drivers/pci/probe.c2
-rw-r--r--drivers/rtc/rtc-vt8500.c45
-rw-r--r--drivers/target/loopback/tcm_loop.c13
-rw-r--r--drivers/target/target_core_configfs.c24
-rw-r--r--drivers/target/target_core_device.c5
-rw-r--r--drivers/target/target_core_pr.c6
-rw-r--r--drivers/target/target_core_tmr.c8
-rw-r--r--drivers/target/target_core_transport.c6
-rw-r--r--drivers/target/tcm_fc/tcm_fc.h2
-rw-r--r--drivers/target/tcm_fc/tfc_cmd.c64
-rw-r--r--drivers/target/tcm_fc/tfc_io.c2
-rw-r--r--drivers/target/tcm_fc/tfc_sess.c4
-rw-r--r--drivers/tty/serial/mrst_max3110.c5
-rw-r--r--drivers/usb/core/driver.c6
-rw-r--r--drivers/xen/events.c2
-rw-r--r--fs/bad_inode.c3
-rw-r--r--fs/block_dev.c14
-rw-r--r--fs/btrfs/ctree.h16
-rw-r--r--fs/btrfs/delayed-inode.c136
-rw-r--r--fs/btrfs/delayed-inode.h6
-rw-r--r--fs/btrfs/disk-io.c12
-rw-r--r--fs/btrfs/extent-tree.c8
-rw-r--r--fs/btrfs/free-space-cache.c9
-rw-r--r--fs/btrfs/inode.c14
-rw-r--r--fs/btrfs/ioctl.c2
-rw-r--r--fs/btrfs/relocation.c30
-rw-r--r--fs/btrfs/sysfs.c146
-rw-r--r--fs/btrfs/transaction.c114
-rw-r--r--fs/btrfs/tree-log.c2
-rw-r--r--fs/cifs/Kconfig2
-rw-r--r--fs/cifs/cifs_fs_sb.h1
-rw-r--r--fs/cifs/cifsfs.c159
-rw-r--r--fs/cifs/cifsproto.h8
-rw-r--r--fs/cifs/connect.c88
-rw-r--r--fs/cifs/smbencrypt.c6
-rw-r--r--fs/coda/pioctl.c2
-rw-r--r--fs/ext4/ext4_extents.h9
-rw-r--r--fs/ext4/extents.c42
-rw-r--r--fs/ext4/inode.c2
-rw-r--r--fs/ext4/mballoc.c8
-rw-r--r--fs/ext4/move_extent.c10
-rw-r--r--fs/ext4/super.c15
-rw-r--r--fs/jbd2/checkpoint.c28
-rw-r--r--fs/jbd2/commit.c33
-rw-r--r--fs/jbd2/journal.c91
-rw-r--r--fs/jbd2/transaction.c69
-rw-r--r--fs/jfs/file.c6
-rw-r--r--fs/jfs/jfs_imap.c12
-rw-r--r--fs/jfs/jfs_incore.h3
-rw-r--r--fs/jfs/resize.c2
-rw-r--r--fs/lockd/clntproc.c8
-rw-r--r--fs/logfs/dir.c8
-rw-r--r--fs/namei.c6
-rw-r--r--fs/nfs/inode.c6
-rw-r--r--fs/nfs/internal.h11
-rw-r--r--fs/nfs/nfs4filelayout.c21
-rw-r--r--fs/nfs/nfs4proc.c45
-rw-r--r--fs/nfs/nfs4xdr.c26
-rw-r--r--fs/nfs/objlayout/objio_osd.c4
-rw-r--r--fs/nfs/objlayout/objlayout.c2
-rw-r--r--fs/nfs/pagelist.c3
-rw-r--r--fs/nfs/pnfs.c44
-rw-r--r--fs/nfs/pnfs.h1
-rw-r--r--fs/nfs/pnfs_dev.c17
-rw-r--r--fs/nfsd/Kconfig1
-rw-r--r--fs/nfsd/nfsctl.c19
-rw-r--r--fs/nfsd/vfs.c19
-rw-r--r--fs/nilfs2/inode.c7
-rw-r--r--fs/omfs/file.c1
-rw-r--r--fs/proc/base.c6
-rw-r--r--fs/proc/proc_sysctl.c3
-rw-r--r--fs/reiserfs/xattr.c2
-rw-r--r--fs/timerfd.c5
-rw-r--r--fs/ubifs/super.c1
-rw-r--r--fs/xfs/xfs_attr.c7
-rw-r--r--fs/xfs/xfs_iget.c13
-rw-r--r--fs/xfs/xfs_inode.h10
-rw-r--r--fs/xfs/xfs_vnodeops.c7
-rw-r--r--include/linux/blk_types.h2
-rw-r--r--include/linux/blktrace_api.h3
-rw-r--r--include/linux/clocksource.h1
-rw-r--r--include/linux/device.h4
-rw-r--r--include/linux/device_cgroup.h10
-rw-r--r--include/linux/fs.h2
-rw-r--r--include/linux/input/sh_keysc.h2
-rw-r--r--include/linux/interrupt.h1
-rw-r--r--include/linux/jbd2.h2
-rw-r--r--include/linux/mmzone.h7
-rw-r--r--include/linux/nfs_page.h3
-rw-r--r--include/linux/nfs_xdr.h1
-rw-r--r--include/linux/pm.h3
-rw-r--r--include/linux/smp.h5
-rw-r--r--include/linux/sunrpc/gss_krb5_enctypes.h4
-rw-r--r--include/linux/sunrpc/sched.h3
-rw-r--r--include/net/netfilter/nf_conntrack.h6
-rw-r--r--include/sound/soc.h3
-rw-r--r--include/trace/events/ext4.h179
-rw-r--r--include/trace/events/irq.h3
-rw-r--r--init/calibrate.c14
-rw-r--r--init/main.c1
-rw-r--r--kernel/power/user.c4
-rw-r--r--kernel/rcutree.c398
-rw-r--r--kernel/rcutree.h12
-rw-r--r--kernel/rcutree_plugin.h419
-rw-r--r--kernel/rcutree_trace.c32
-rw-r--r--kernel/smp.c5
-rw-r--r--kernel/softirq.c2
-rw-r--r--kernel/time/alarmtimer.c158
-rw-r--r--kernel/time/clocksource.c24
-rw-r--r--kernel/trace/trace_printk.c5
-rw-r--r--mm/memory_hotplug.c4
-rw-r--r--net/8021q/vlan.c2
-rw-r--r--net/bluetooth/hci_event.c18
-rw-r--r--net/bluetooth/l2cap_sock.c1
-rw-r--r--net/bluetooth/rfcomm/sock.c1
-rw-r--r--net/bluetooth/sco.c13
-rw-r--r--net/bridge/br_device.c1
-rw-r--r--net/bridge/br_multicast.c4
-rw-r--r--net/caif/cfmuxl.c2
-rw-r--r--net/ieee802154/nl-phy.c3
-rw-r--r--net/ipv4/af_inet.c1
-rw-r--r--net/ipv4/inet_diag.c14
-rw-r--r--net/ipv4/netfilter/ip_queue.c3
-rw-r--r--net/ipv4/netfilter/ip_tables.c2
-rw-r--r--net/ipv4/netfilter/ipt_ecn.c7
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c4
-rw-r--r--net/ipv4/ping.c1
-rw-r--r--net/ipv4/route.c4
-rw-r--r--net/ipv4/tcp_ipv4.c1
-rw-r--r--net/ipv6/netfilter/ip6_queue.c3
-rw-r--r--net/ipv6/tcp_ipv6.c1
-rw-r--r--net/netfilter/ipvs/ip_vs_conn.c10
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c1
-rw-r--r--net/netfilter/nfnetlink_log.c3
-rw-r--r--net/netfilter/nfnetlink_queue.c3
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c4
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_mech.c3
-rw-r--r--net/sunrpc/clnt.c5
-rw-r--r--net/sunrpc/sched.c1
-rw-r--r--security/device_cgroup.c8
-rw-r--r--security/keys/request_key.c3
-rw-r--r--sound/pci/asihpi/asihpi.c1
-rw-r--r--sound/pci/hda/patch_realtek.c13
-rw-r--r--sound/pci/hda/patch_via.c35
-rw-r--r--sound/soc/codecs/wm8991.c1
-rw-r--r--sound/soc/imx/Kconfig7
-rw-r--r--sound/soc/imx/imx-pcm-dma-mx2.c2
-rw-r--r--sound/soc/imx/imx-ssi.c2
-rw-r--r--sound/soc/pxa/pxa2xx-pcm.c4
-rw-r--r--sound/soc/soc-cache.c3
-rw-r--r--tools/perf/Makefile2
-rw-r--r--tools/perf/util/trace-event-parse.c1
260 files changed, 2644 insertions, 1817 deletions
diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt
index f48178024067..db3b1aba32a3 100644
--- a/Documentation/filesystems/proc.txt
+++ b/Documentation/filesystems/proc.txt
@@ -843,6 +843,7 @@ Provides counts of softirq handlers serviced since boot time, for each cpu.
843 TASKLET: 0 0 0 290 843 TASKLET: 0 0 0 290
844 SCHED: 27035 26983 26971 26746 844 SCHED: 27035 26983 26971 26746
845 HRTIMER: 0 0 0 0 845 HRTIMER: 0 0 0 0
846 RCU: 1678 1769 2178 2250
846 847
847 848
8481.3 IDE devices in /proc/ide 8491.3 IDE devices in /proc/ide
diff --git a/Documentation/power/devices.txt b/Documentation/power/devices.txt
index 88880839ece4..64565aac6e40 100644
--- a/Documentation/power/devices.txt
+++ b/Documentation/power/devices.txt
@@ -520,59 +520,20 @@ Support for power domains is provided through the pwr_domain field of struct
520device. This field is a pointer to an object of type struct dev_power_domain, 520device. This field is a pointer to an object of type struct dev_power_domain,
521defined in include/linux/pm.h, providing a set of power management callbacks 521defined in include/linux/pm.h, providing a set of power management callbacks
522analogous to the subsystem-level and device driver callbacks that are executed 522analogous to the subsystem-level and device driver callbacks that are executed
523for the given device during all power transitions, in addition to the respective 523for the given device during all power transitions, instead of the respective
524subsystem-level callbacks. Specifically, the power domain "suspend" callbacks 524subsystem-level callbacks. Specifically, if a device's pm_domain pointer is
525(i.e. ->runtime_suspend(), ->suspend(), ->freeze(), ->poweroff(), etc.) are 525not NULL, the ->suspend() callback from the object pointed to by it will be
526executed after the analogous subsystem-level callbacks, while the power domain 526executed instead of its subsystem's (e.g. bus type's) ->suspend() callback and
527"resume" callbacks (i.e. ->runtime_resume(), ->resume(), ->thaw(), ->restore, 527anlogously for all of the remaining callbacks. In other words, power management
528etc.) are executed before the analogous subsystem-level callbacks. Error codes 528domain callbacks, if defined for the given device, always take precedence over
529returned by the "suspend" and "resume" power domain callbacks are ignored. 529the callbacks provided by the device's subsystem (e.g. bus type).
530 530
531Power domain ->runtime_idle() callback is executed before the subsystem-level 531The support for device power management domains is only relevant to platforms
532->runtime_idle() callback and the result returned by it is not ignored. Namely, 532needing to use the same device driver power management callbacks in many
533if it returns error code, the subsystem-level ->runtime_idle() callback will not 533different power domain configurations and wanting to avoid incorporating the
534be called and the helper function rpm_idle() executing it will return error 534support for power domains into subsystem-level callbacks, for example by
535code. This mechanism is intended to help platforms where saving device state 535modifying the platform bus type. Other platforms need not implement it or take
536is a time consuming operation and should only be carried out if all devices 536it into account in any way.
537in the power domain are idle, before turning off the shared power resource(s).
538Namely, the power domain ->runtime_idle() callback may return error code until
539the pm_runtime_idle() helper (or its asychronous version) has been called for
540all devices in the power domain (it is recommended that the returned error code
541be -EBUSY in those cases), preventing the subsystem-level ->runtime_idle()
542callback from being run prematurely.
543
544The support for device power domains is only relevant to platforms needing to
545use the same subsystem-level (e.g. platform bus type) and device driver power
546management callbacks in many different power domain configurations and wanting
547to avoid incorporating the support for power domains into the subsystem-level
548callbacks. The other platforms need not implement it or take it into account
549in any way.
550
551
552System Devices
553--------------
554System devices (sysdevs) follow a slightly different API, which can be found in
555
556 include/linux/sysdev.h
557 drivers/base/sys.c
558
559System devices will be suspended with interrupts disabled, and after all other
560devices have been suspended. On resume, they will be resumed before any other
561devices, and also with interrupts disabled. These things occur in special
562"sysdev_driver" phases, which affect only system devices.
563
564Thus, after the suspend_noirq (or freeze_noirq or poweroff_noirq) phase, when
565the non-boot CPUs are all offline and IRQs are disabled on the remaining online
566CPU, then a sysdev_driver.suspend phase is carried out, and the system enters a
567sleep state (or a system image is created). During resume (or after the image
568has been created or loaded) a sysdev_driver.resume phase is carried out, IRQs
569are enabled on the only online CPU, the non-boot CPUs are enabled, and the
570resume_noirq (or thaw_noirq or restore_noirq) phase begins.
571
572Code to actually enter and exit the system-wide low power state sometimes
573involves hardware details that are only known to the boot firmware, and
574may leave a CPU running software (from SRAM or flash memory) that monitors
575the system and manages its wakeup sequence.
576 537
577 538
578Device Low Power (suspend) States 539Device Low Power (suspend) States
diff --git a/Documentation/power/runtime_pm.txt b/Documentation/power/runtime_pm.txt
index 654097b130b4..22accb3eb40e 100644
--- a/Documentation/power/runtime_pm.txt
+++ b/Documentation/power/runtime_pm.txt
@@ -566,11 +566,6 @@ to do this is:
566 pm_runtime_set_active(dev); 566 pm_runtime_set_active(dev);
567 pm_runtime_enable(dev); 567 pm_runtime_enable(dev);
568 568
569The PM core always increments the run-time usage counter before calling the
570->prepare() callback and decrements it after calling the ->complete() callback.
571Hence disabling run-time PM temporarily like this will not cause any run-time
572suspend callbacks to be lost.
573
5747. Generic subsystem callbacks 5697. Generic subsystem callbacks
575 570
576Subsystems may wish to conserve code space by using the set of generic power 571Subsystems may wish to conserve code space by using the set of generic power
diff --git a/MAINTAINERS b/MAINTAINERS
index 502f2dd761eb..f0358cd91de3 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2291,8 +2291,7 @@ F: drivers/scsi/eata_pio.*
2291 2291
2292EBTABLES 2292EBTABLES
2293M: Bart De Schuymer <bart.de.schuymer@pandora.be> 2293M: Bart De Schuymer <bart.de.schuymer@pandora.be>
2294L: ebtables-user@lists.sourceforge.net 2294L: netfilter-devel@vger.kernel.org
2295L: ebtables-devel@lists.sourceforge.net
2296W: http://ebtables.sourceforge.net/ 2295W: http://ebtables.sourceforge.net/
2297S: Maintained 2296S: Maintained
2298F: include/linux/netfilter_bridge/ebt_*.h 2297F: include/linux/netfilter_bridge/ebt_*.h
@@ -7007,6 +7006,13 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/mjg59/platform-drivers-x86.
7007S: Maintained 7006S: Maintained
7008F: drivers/platform/x86 7007F: drivers/platform/x86
7009 7008
7009X86 MCE INFRASTRUCTURE
7010M: Tony Luck <tony.luck@intel.com>
7011M: Borislav Petkov <bp@amd64.org>
7012L: linux-edac@vger.kernel.org
7013S: Maintained
7014F: arch/x86/kernel/cpu/mcheck/*
7015
7010XEN HYPERVISOR INTERFACE 7016XEN HYPERVISOR INTERFACE
7011M: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> 7017M: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
7012M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> 7018M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
diff --git a/Makefile b/Makefile
index 0499c2ee8541..41330a06e4ec 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 3 1VERSION = 3
2PATCHLEVEL = 0 2PATCHLEVEL = 0
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc3 4EXTRAVERSION = -rc4
5NAME = Sneaky Weasel 5NAME = Sneaky Weasel
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/alpha/include/asm/mmzone.h b/arch/alpha/include/asm/mmzone.h
index 8af56ce346ad..445dc42e0334 100644
--- a/arch/alpha/include/asm/mmzone.h
+++ b/arch/alpha/include/asm/mmzone.h
@@ -56,7 +56,6 @@ PLAT_NODE_DATA_LOCALNR(unsigned long p, int n)
56 * Given a kernel address, find the home node of the underlying memory. 56 * Given a kernel address, find the home node of the underlying memory.
57 */ 57 */
58#define kvaddr_to_nid(kaddr) pa_to_nid(__pa(kaddr)) 58#define kvaddr_to_nid(kaddr) pa_to_nid(__pa(kaddr))
59#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
60 59
61/* 60/*
62 * Given a kaddr, LOCAL_BASE_ADDR finds the owning node of the memory 61 * Given a kaddr, LOCAL_BASE_ADDR finds the owning node of the memory
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index 942fad97e447..940b20178107 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -597,6 +597,8 @@ __common_mmu_cache_on:
597 sub pc, lr, r0, lsr #32 @ properly flush pipeline 597 sub pc, lr, r0, lsr #32 @ properly flush pipeline
598#endif 598#endif
599 599
600#define PROC_ENTRY_SIZE (4*5)
601
600/* 602/*
601 * Here follow the relocatable cache support functions for the 603 * Here follow the relocatable cache support functions for the
602 * various processors. This is a generic hook for locating an 604 * various processors. This is a generic hook for locating an
@@ -624,7 +626,7 @@ call_cache_fn: adr r12, proc_types
624 ARM( addeq pc, r12, r3 ) @ call cache function 626 ARM( addeq pc, r12, r3 ) @ call cache function
625 THUMB( addeq r12, r3 ) 627 THUMB( addeq r12, r3 )
626 THUMB( moveq pc, r12 ) @ call cache function 628 THUMB( moveq pc, r12 ) @ call cache function
627 add r12, r12, #4*5 629 add r12, r12, #PROC_ENTRY_SIZE
628 b 1b 630 b 1b
629 631
630/* 632/*
@@ -794,6 +796,16 @@ proc_types:
794 796
795 .size proc_types, . - proc_types 797 .size proc_types, . - proc_types
796 798
799 /*
800 * If you get a "non-constant expression in ".if" statement"
801 * error from the assembler on this line, check that you have
802 * not accidentally written a "b" instruction where you should
803 * have written W(b).
804 */
805 .if (. - proc_types) % PROC_ENTRY_SIZE != 0
806 .error "The size of one or more proc_types entries is wrong."
807 .endif
808
797/* 809/*
798 * Turn off the Cache and MMU. ARMv3 does not support 810 * Turn off the Cache and MMU. ARMv3 does not support
799 * reading the control register, but ARMv4 does. 811 * reading the control register, but ARMv4 does.
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index bc2d2d75f706..65c3f2474f5e 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -13,6 +13,9 @@
13 * Do not include any C declarations in this file - it is included by 13 * Do not include any C declarations in this file - it is included by
14 * assembler source. 14 * assembler source.
15 */ 15 */
16#ifndef __ASM_ASSEMBLER_H__
17#define __ASM_ASSEMBLER_H__
18
16#ifndef __ASSEMBLY__ 19#ifndef __ASSEMBLY__
17#error "Only include this from assembly code" 20#error "Only include this from assembly code"
18#endif 21#endif
@@ -290,3 +293,4 @@
290 .macro ldrusr, reg, ptr, inc, cond=al, rept=1, abort=9001f 293 .macro ldrusr, reg, ptr, inc, cond=al, rept=1, abort=9001f
291 usracc ldr, \reg, \ptr, \inc, \cond, \rept, \abort 294 usracc ldr, \reg, \ptr, \inc, \cond, \rept, \abort
292 .endm 295 .endm
296#endif /* __ASM_ASSEMBLER_H__ */
diff --git a/arch/arm/include/asm/entry-macro-multi.S b/arch/arm/include/asm/entry-macro-multi.S
index ec0bbf79c71f..2da8547de6d6 100644
--- a/arch/arm/include/asm/entry-macro-multi.S
+++ b/arch/arm/include/asm/entry-macro-multi.S
@@ -1,3 +1,5 @@
1#include <asm/assembler.h>
2
1/* 3/*
2 * Interrupt handling. Preserves r7, r8, r9 4 * Interrupt handling. Preserves r7, r8, r9
3 */ 5 */
diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
index fee7c36349eb..016d6a0830a3 100644
--- a/arch/arm/kernel/module.c
+++ b/arch/arm/kernel/module.c
@@ -193,8 +193,17 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
193 offset -= 0x02000000; 193 offset -= 0x02000000;
194 offset += sym->st_value - loc; 194 offset += sym->st_value - loc;
195 195
196 /* only Thumb addresses allowed (no interworking) */ 196 /*
197 if (!(offset & 1) || 197 * For function symbols, only Thumb addresses are
198 * allowed (no interworking).
199 *
200 * For non-function symbols, the destination
201 * has no specific ARM/Thumb disposition, so
202 * the branch is resolved under the assumption
203 * that interworking is not required.
204 */
205 if ((ELF32_ST_TYPE(sym->st_info) == STT_FUNC &&
206 !(offset & 1)) ||
198 offset <= (s32)0xff000000 || 207 offset <= (s32)0xff000000 ||
199 offset >= (s32)0x01000000) { 208 offset >= (s32)0x01000000) {
200 pr_err("%s: section %u reloc %u sym '%s': relocation %u out of range (%#lx -> %#x)\n", 209 pr_err("%s: section %u reloc %u sym '%s': relocation %u out of range (%#lx -> %#x)\n",
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 344e52b16c8c..e7f92a4321f3 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -318,9 +318,13 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
318 smp_store_cpu_info(cpu); 318 smp_store_cpu_info(cpu);
319 319
320 /* 320 /*
321 * OK, now it's safe to let the boot CPU continue 321 * OK, now it's safe to let the boot CPU continue. Wait for
322 * the CPU migration code to notice that the CPU is online
323 * before we continue.
322 */ 324 */
323 set_cpu_online(cpu, true); 325 set_cpu_online(cpu, true);
326 while (!cpu_active(cpu))
327 cpu_relax();
324 328
325 /* 329 /*
326 * OK, it's off to the idle thread for us 330 * OK, it's off to the idle thread for us
diff --git a/arch/arm/mach-h720x/Kconfig b/arch/arm/mach-h720x/Kconfig
index 9b6982efbd22..abf356c02343 100644
--- a/arch/arm/mach-h720x/Kconfig
+++ b/arch/arm/mach-h720x/Kconfig
@@ -6,12 +6,14 @@ config ARCH_H7201
6 bool "gms30c7201" 6 bool "gms30c7201"
7 depends on ARCH_H720X 7 depends on ARCH_H720X
8 select CPU_H7201 8 select CPU_H7201
9 select ZONE_DMA
9 help 10 help
10 Say Y here if you are using the Hynix GMS30C7201 Reference Board 11 Say Y here if you are using the Hynix GMS30C7201 Reference Board
11 12
12config ARCH_H7202 13config ARCH_H7202
13 bool "hms30c7202" 14 bool "hms30c7202"
14 select CPU_H7202 15 select CPU_H7202
16 select ZONE_DMA
15 depends on ARCH_H720X 17 depends on ARCH_H720X
16 help 18 help
17 Say Y here if you are using the Hynix HMS30C7202 Reference Board 19 Say Y here if you are using the Hynix HMS30C7202 Reference Board
diff --git a/arch/arm/mach-msm/timer.c b/arch/arm/mach-msm/timer.c
index 38b95e949d13..63621f152c98 100644
--- a/arch/arm/mach-msm/timer.c
+++ b/arch/arm/mach-msm/timer.c
@@ -23,6 +23,8 @@
23#include <linux/io.h> 23#include <linux/io.h>
24 24
25#include <asm/mach/time.h> 25#include <asm/mach/time.h>
26#include <asm/hardware/gic.h>
27
26#include <mach/msm_iomap.h> 28#include <mach/msm_iomap.h>
27#include <mach/cpu.h> 29#include <mach/cpu.h>
28 30
@@ -55,10 +57,12 @@ enum timer_location {
55#if defined(CONFIG_ARCH_QSD8X50) 57#if defined(CONFIG_ARCH_QSD8X50)
56#define DGT_HZ (19200000 / 4) /* 19.2 MHz / 4 by default */ 58#define DGT_HZ (19200000 / 4) /* 19.2 MHz / 4 by default */
57#define MSM_DGT_SHIFT (0) 59#define MSM_DGT_SHIFT (0)
58#elif defined(CONFIG_ARCH_MSM7X30) || defined(CONFIG_ARCH_MSM8X60) || \ 60#elif defined(CONFIG_ARCH_MSM7X30)
59 defined(CONFIG_ARCH_MSM8960)
60#define DGT_HZ (24576000 / 4) /* 24.576 MHz (LPXO) / 4 by default */ 61#define DGT_HZ (24576000 / 4) /* 24.576 MHz (LPXO) / 4 by default */
61#define MSM_DGT_SHIFT (0) 62#define MSM_DGT_SHIFT (0)
63#elif defined(CONFIG_ARCH_MSM8X60) || defined(CONFIG_ARCH_MSM8960)
64#define DGT_HZ (27000000 / 4) /* 27 MHz (PXO) / 4 by default */
65#define MSM_DGT_SHIFT (0)
62#else 66#else
63#define DGT_HZ 19200000 /* 19.2 MHz or 600 KHz after shift */ 67#define DGT_HZ 19200000 /* 19.2 MHz or 600 KHz after shift */
64#define MSM_DGT_SHIFT (5) 68#define MSM_DGT_SHIFT (5)
@@ -100,7 +104,11 @@ static cycle_t msm_read_timer_count(struct clocksource *cs)
100{ 104{
101 struct msm_clock *clk = container_of(cs, struct msm_clock, clocksource); 105 struct msm_clock *clk = container_of(cs, struct msm_clock, clocksource);
102 106
103 return readl(clk->global_counter); 107 /*
108 * Shift timer count down by a constant due to unreliable lower bits
109 * on some targets.
110 */
111 return readl(clk->global_counter) >> clk->shift;
104} 112}
105 113
106static struct msm_clock *clockevent_to_clock(struct clock_event_device *evt) 114static struct msm_clock *clockevent_to_clock(struct clock_event_device *evt)
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index 3c3867850a30..089c0b5e454f 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -210,19 +210,21 @@ cpu_v7_name:
210 210
211/* Suspend/resume support: derived from arch/arm/mach-s5pv210/sleep.S */ 211/* Suspend/resume support: derived from arch/arm/mach-s5pv210/sleep.S */
212.globl cpu_v7_suspend_size 212.globl cpu_v7_suspend_size
213.equ cpu_v7_suspend_size, 4 * 8 213.equ cpu_v7_suspend_size, 4 * 9
214#ifdef CONFIG_PM_SLEEP 214#ifdef CONFIG_PM_SLEEP
215ENTRY(cpu_v7_do_suspend) 215ENTRY(cpu_v7_do_suspend)
216 stmfd sp!, {r4 - r11, lr} 216 stmfd sp!, {r4 - r11, lr}
217 mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID 217 mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID
218 mrc p15, 0, r5, c13, c0, 1 @ Context ID 218 mrc p15, 0, r5, c13, c0, 1 @ Context ID
219 mrc p15, 0, r6, c13, c0, 3 @ User r/o thread ID
220 stmia r0!, {r4 - r6}
219 mrc p15, 0, r6, c3, c0, 0 @ Domain ID 221 mrc p15, 0, r6, c3, c0, 0 @ Domain ID
220 mrc p15, 0, r7, c2, c0, 0 @ TTB 0 222 mrc p15, 0, r7, c2, c0, 0 @ TTB 0
221 mrc p15, 0, r8, c2, c0, 1 @ TTB 1 223 mrc p15, 0, r8, c2, c0, 1 @ TTB 1
222 mrc p15, 0, r9, c1, c0, 0 @ Control register 224 mrc p15, 0, r9, c1, c0, 0 @ Control register
223 mrc p15, 0, r10, c1, c0, 1 @ Auxiliary control register 225 mrc p15, 0, r10, c1, c0, 1 @ Auxiliary control register
224 mrc p15, 0, r11, c1, c0, 2 @ Co-processor access control 226 mrc p15, 0, r11, c1, c0, 2 @ Co-processor access control
225 stmia r0, {r4 - r11} 227 stmia r0, {r6 - r11}
226 ldmfd sp!, {r4 - r11, pc} 228 ldmfd sp!, {r4 - r11, pc}
227ENDPROC(cpu_v7_do_suspend) 229ENDPROC(cpu_v7_do_suspend)
228 230
@@ -230,9 +232,11 @@ ENTRY(cpu_v7_do_resume)
230 mov ip, #0 232 mov ip, #0
231 mcr p15, 0, ip, c8, c7, 0 @ invalidate TLBs 233 mcr p15, 0, ip, c8, c7, 0 @ invalidate TLBs
232 mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache 234 mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache
233 ldmia r0, {r4 - r11} 235 ldmia r0!, {r4 - r6}
234 mcr p15, 0, r4, c13, c0, 0 @ FCSE/PID 236 mcr p15, 0, r4, c13, c0, 0 @ FCSE/PID
235 mcr p15, 0, r5, c13, c0, 1 @ Context ID 237 mcr p15, 0, r5, c13, c0, 1 @ Context ID
238 mcr p15, 0, r6, c13, c0, 3 @ User r/o thread ID
239 ldmia r0, {r6 - r11}
236 mcr p15, 0, r6, c3, c0, 0 @ Domain ID 240 mcr p15, 0, r6, c3, c0, 0 @ Domain ID
237 mcr p15, 0, r7, c2, c0, 0 @ TTB 0 241 mcr p15, 0, r7, c2, c0, 0 @ TTB 0
238 mcr p15, 0, r8, c2, c0, 1 @ TTB 1 242 mcr p15, 0, r8, c2, c0, 1 @ TTB 1
@@ -418,9 +422,9 @@ ENTRY(v7_processor_functions)
418 .word cpu_v7_dcache_clean_area 422 .word cpu_v7_dcache_clean_area
419 .word cpu_v7_switch_mm 423 .word cpu_v7_switch_mm
420 .word cpu_v7_set_pte_ext 424 .word cpu_v7_set_pte_ext
421 .word 0 425 .word cpu_v7_suspend_size
422 .word 0 426 .word cpu_v7_do_suspend
423 .word 0 427 .word cpu_v7_do_resume
424 .size v7_processor_functions, . - v7_processor_functions 428 .size v7_processor_functions, . - v7_processor_functions
425 429
426 .section ".rodata" 430 .section ".rodata"
diff --git a/arch/arm/plat-iop/cp6.c b/arch/arm/plat-iop/cp6.c
index 9612a87e2a88..bab73e2c79db 100644
--- a/arch/arm/plat-iop/cp6.c
+++ b/arch/arm/plat-iop/cp6.c
@@ -18,6 +18,7 @@
18 */ 18 */
19#include <linux/init.h> 19#include <linux/init.h>
20#include <asm/traps.h> 20#include <asm/traps.h>
21#include <asm/ptrace.h>
21 22
22static int cp6_trap(struct pt_regs *regs, unsigned int instr) 23static int cp6_trap(struct pt_regs *regs, unsigned int instr)
23{ 24{
diff --git a/arch/m32r/include/asm/mmzone.h b/arch/m32r/include/asm/mmzone.h
index 9f3b5accda88..115ced33febd 100644
--- a/arch/m32r/include/asm/mmzone.h
+++ b/arch/m32r/include/asm/mmzone.h
@@ -14,12 +14,6 @@ extern struct pglist_data *node_data[];
14#define NODE_DATA(nid) (node_data[nid]) 14#define NODE_DATA(nid) (node_data[nid])
15 15
16#define node_localnr(pfn, nid) ((pfn) - NODE_DATA(nid)->node_start_pfn) 16#define node_localnr(pfn, nid) ((pfn) - NODE_DATA(nid)->node_start_pfn)
17#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
18#define node_end_pfn(nid) \
19({ \
20 pg_data_t *__pgdat = NODE_DATA(nid); \
21 __pgdat->node_start_pfn + __pgdat->node_spanned_pages - 1; \
22})
23 17
24#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)) 18#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
25/* 19/*
@@ -44,7 +38,7 @@ static __inline__ int pfn_to_nid(unsigned long pfn)
44 int node; 38 int node;
45 39
46 for (node = 0 ; node < MAX_NUMNODES ; node++) 40 for (node = 0 ; node < MAX_NUMNODES ; node++)
47 if (pfn >= node_start_pfn(node) && pfn <= node_end_pfn(node)) 41 if (pfn >= node_start_pfn(node) && pfn < node_end_pfn(node))
48 break; 42 break;
49 43
50 return node; 44 return node;
diff --git a/arch/mn10300/include/asm/uaccess.h b/arch/mn10300/include/asm/uaccess.h
index 3d6e60dad9d9..780560b330d9 100644
--- a/arch/mn10300/include/asm/uaccess.h
+++ b/arch/mn10300/include/asm/uaccess.h
@@ -15,6 +15,7 @@
15 * User space memory access functions 15 * User space memory access functions
16 */ 16 */
17#include <linux/thread_info.h> 17#include <linux/thread_info.h>
18#include <linux/kernel.h>
18#include <asm/page.h> 19#include <asm/page.h>
19#include <asm/errno.h> 20#include <asm/errno.h>
20 21
diff --git a/arch/parisc/include/asm/mmzone.h b/arch/parisc/include/asm/mmzone.h
index 9608d2cf214a..e67eb9c3d1bf 100644
--- a/arch/parisc/include/asm/mmzone.h
+++ b/arch/parisc/include/asm/mmzone.h
@@ -14,13 +14,6 @@ extern struct node_map_data node_data[];
14 14
15#define NODE_DATA(nid) (&node_data[nid].pg_data) 15#define NODE_DATA(nid) (&node_data[nid].pg_data)
16 16
17#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
18#define node_end_pfn(nid) \
19({ \
20 pg_data_t *__pgdat = NODE_DATA(nid); \
21 __pgdat->node_start_pfn + __pgdat->node_spanned_pages; \
22})
23
24/* We have these possible memory map layouts: 17/* We have these possible memory map layouts:
25 * Astro: 0-3.75, 67.75-68, 4-64 18 * Astro: 0-3.75, 67.75-68, 4-64
26 * zx1: 0-1, 257-260, 4-256 19 * zx1: 0-1, 257-260, 4-256
diff --git a/arch/powerpc/include/asm/mmzone.h b/arch/powerpc/include/asm/mmzone.h
index fd3fd58bad84..7b589178be46 100644
--- a/arch/powerpc/include/asm/mmzone.h
+++ b/arch/powerpc/include/asm/mmzone.h
@@ -38,13 +38,6 @@ u64 memory_hotplug_max(void);
38#define memory_hotplug_max() memblock_end_of_DRAM() 38#define memory_hotplug_max() memblock_end_of_DRAM()
39#endif 39#endif
40 40
41/*
42 * Following are macros that each numa implmentation must define.
43 */
44
45#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
46#define node_end_pfn(nid) (NODE_DATA(nid)->node_end_pfn)
47
48#else 41#else
49#define memory_hotplug_max() memblock_end_of_DRAM() 42#define memory_hotplug_max() memblock_end_of_DRAM()
50#endif /* CONFIG_NEED_MULTIPLE_NODES */ 43#endif /* CONFIG_NEED_MULTIPLE_NODES */
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 90d77bd078f5..c03fef7a9c22 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -579,6 +579,7 @@ config S390_GUEST
579 def_bool y 579 def_bool y
580 prompt "s390 guest support for KVM (EXPERIMENTAL)" 580 prompt "s390 guest support for KVM (EXPERIMENTAL)"
581 depends on 64BIT && EXPERIMENTAL 581 depends on 64BIT && EXPERIMENTAL
582 select VIRTUALIZATION
582 select VIRTIO 583 select VIRTIO
583 select VIRTIO_RING 584 select VIRTIO_RING
584 select VIRTIO_CONSOLE 585 select VIRTIO_CONSOLE
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 52420d2785b3..1d55c95f617c 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -262,7 +262,7 @@ void smp_ctl_set_bit(int cr, int bit)
262 262
263 memset(&parms.orvals, 0, sizeof(parms.orvals)); 263 memset(&parms.orvals, 0, sizeof(parms.orvals));
264 memset(&parms.andvals, 0xff, sizeof(parms.andvals)); 264 memset(&parms.andvals, 0xff, sizeof(parms.andvals));
265 parms.orvals[cr] = 1 << bit; 265 parms.orvals[cr] = 1UL << bit;
266 on_each_cpu(smp_ctl_bit_callback, &parms, 1); 266 on_each_cpu(smp_ctl_bit_callback, &parms, 1);
267} 267}
268EXPORT_SYMBOL(smp_ctl_set_bit); 268EXPORT_SYMBOL(smp_ctl_set_bit);
@@ -276,7 +276,7 @@ void smp_ctl_clear_bit(int cr, int bit)
276 276
277 memset(&parms.orvals, 0, sizeof(parms.orvals)); 277 memset(&parms.orvals, 0, sizeof(parms.orvals));
278 memset(&parms.andvals, 0xff, sizeof(parms.andvals)); 278 memset(&parms.andvals, 0xff, sizeof(parms.andvals));
279 parms.andvals[cr] = ~(1L << bit); 279 parms.andvals[cr] = ~(1UL << bit);
280 on_each_cpu(smp_ctl_bit_callback, &parms, 1); 280 on_each_cpu(smp_ctl_bit_callback, &parms, 1);
281} 281}
282EXPORT_SYMBOL(smp_ctl_clear_bit); 282EXPORT_SYMBOL(smp_ctl_clear_bit);
diff --git a/arch/s390/oprofile/init.c b/arch/s390/oprofile/init.c
index 5995e9bc72d9..0e358c2cffeb 100644
--- a/arch/s390/oprofile/init.c
+++ b/arch/s390/oprofile/init.c
@@ -25,7 +25,7 @@ extern void s390_backtrace(struct pt_regs * const regs, unsigned int depth);
25 25
26#include "hwsampler.h" 26#include "hwsampler.h"
27 27
28#define DEFAULT_INTERVAL 4096 28#define DEFAULT_INTERVAL 4127518
29 29
30#define DEFAULT_SDBT_BLOCKS 1 30#define DEFAULT_SDBT_BLOCKS 1
31#define DEFAULT_SDB_BLOCKS 511 31#define DEFAULT_SDB_BLOCKS 511
@@ -151,6 +151,12 @@ static int oprofile_hwsampler_init(struct oprofile_operations *ops)
151 if (oprofile_max_interval == 0) 151 if (oprofile_max_interval == 0)
152 return -ENODEV; 152 return -ENODEV;
153 153
154 /* The initial value should be sane */
155 if (oprofile_hw_interval < oprofile_min_interval)
156 oprofile_hw_interval = oprofile_min_interval;
157 if (oprofile_hw_interval > oprofile_max_interval)
158 oprofile_hw_interval = oprofile_max_interval;
159
154 if (oprofile_timer_init(ops)) 160 if (oprofile_timer_init(ops))
155 return -ENODEV; 161 return -ENODEV;
156 162
diff --git a/arch/sh/include/asm/mmzone.h b/arch/sh/include/asm/mmzone.h
index 8887baff5eff..15a8496960e6 100644
--- a/arch/sh/include/asm/mmzone.h
+++ b/arch/sh/include/asm/mmzone.h
@@ -9,10 +9,6 @@
9extern struct pglist_data *node_data[]; 9extern struct pglist_data *node_data[];
10#define NODE_DATA(nid) (node_data[nid]) 10#define NODE_DATA(nid) (node_data[nid])
11 11
12#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
13#define node_end_pfn(nid) (NODE_DATA(nid)->node_start_pfn + \
14 NODE_DATA(nid)->node_spanned_pages)
15
16static inline int pfn_to_nid(unsigned long pfn) 12static inline int pfn_to_nid(unsigned long pfn)
17{ 13{
18 int nid; 14 int nid;
diff --git a/arch/sparc/include/asm/mmzone.h b/arch/sparc/include/asm/mmzone.h
index e8c648741ed4..99d9b9f577bf 100644
--- a/arch/sparc/include/asm/mmzone.h
+++ b/arch/sparc/include/asm/mmzone.h
@@ -8,8 +8,6 @@
8extern struct pglist_data *node_data[]; 8extern struct pglist_data *node_data[];
9 9
10#define NODE_DATA(nid) (node_data[nid]) 10#define NODE_DATA(nid) (node_data[nid])
11#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
12#define node_end_pfn(nid) (NODE_DATA(nid)->node_end_pfn)
13 11
14extern int numa_cpu_lookup_table[]; 12extern int numa_cpu_lookup_table[];
15extern cpumask_t numa_cpumask_lookup_table[]; 13extern cpumask_t numa_cpumask_lookup_table[];
diff --git a/arch/tile/include/asm/mmzone.h b/arch/tile/include/asm/mmzone.h
index c6344c4f32ac..9d3dbce8f953 100644
--- a/arch/tile/include/asm/mmzone.h
+++ b/arch/tile/include/asm/mmzone.h
@@ -40,17 +40,6 @@ static inline int pfn_to_nid(unsigned long pfn)
40 return highbits_to_node[__pfn_to_highbits(pfn)]; 40 return highbits_to_node[__pfn_to_highbits(pfn)];
41} 41}
42 42
43/*
44 * Following are macros that each numa implmentation must define.
45 */
46
47#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
48#define node_end_pfn(nid) \
49({ \
50 pg_data_t *__pgdat = NODE_DATA(nid); \
51 __pgdat->node_start_pfn + __pgdat->node_spanned_pages; \
52})
53
54#define kern_addr_valid(kaddr) virt_addr_valid((void *)kaddr) 43#define kern_addr_valid(kaddr) virt_addr_valid((void *)kaddr)
55 44
56static inline int pfn_valid(int pfn) 45static inline int pfn_valid(int pfn)
diff --git a/arch/x86/include/asm/memblock.h b/arch/x86/include/asm/memblock.h
index 19ae14ba6978..0cd3800f33b9 100644
--- a/arch/x86/include/asm/memblock.h
+++ b/arch/x86/include/asm/memblock.h
@@ -4,7 +4,6 @@
4#define ARCH_DISCARD_MEMBLOCK 4#define ARCH_DISCARD_MEMBLOCK
5 5
6u64 memblock_x86_find_in_range_size(u64 start, u64 *sizep, u64 align); 6u64 memblock_x86_find_in_range_size(u64 start, u64 *sizep, u64 align);
7void memblock_x86_to_bootmem(u64 start, u64 end);
8 7
9void memblock_x86_reserve_range(u64 start, u64 end, char *name); 8void memblock_x86_reserve_range(u64 start, u64 end, char *name);
10void memblock_x86_free_range(u64 start, u64 end); 9void memblock_x86_free_range(u64 start, u64 end);
@@ -19,5 +18,6 @@ u64 memblock_x86_hole_size(u64 start, u64 end);
19u64 memblock_x86_find_in_range_node(int nid, u64 start, u64 end, u64 size, u64 align); 18u64 memblock_x86_find_in_range_node(int nid, u64 start, u64 end, u64 size, u64 align);
20u64 memblock_x86_free_memory_in_range(u64 addr, u64 limit); 19u64 memblock_x86_free_memory_in_range(u64 addr, u64 limit);
21u64 memblock_x86_memory_in_range(u64 addr, u64 limit); 20u64 memblock_x86_memory_in_range(u64 addr, u64 limit);
21bool memblock_x86_check_reserved_size(u64 *addrp, u64 *sizep, u64 align);
22 22
23#endif 23#endif
diff --git a/arch/x86/include/asm/mmzone_32.h b/arch/x86/include/asm/mmzone_32.h
index 5e83a416eca8..224e8c5eb307 100644
--- a/arch/x86/include/asm/mmzone_32.h
+++ b/arch/x86/include/asm/mmzone_32.h
@@ -48,17 +48,6 @@ static inline int pfn_to_nid(unsigned long pfn)
48#endif 48#endif
49} 49}
50 50
51/*
52 * Following are macros that each numa implmentation must define.
53 */
54
55#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
56#define node_end_pfn(nid) \
57({ \
58 pg_data_t *__pgdat = NODE_DATA(nid); \
59 __pgdat->node_start_pfn + __pgdat->node_spanned_pages; \
60})
61
62static inline int pfn_valid(int pfn) 51static inline int pfn_valid(int pfn)
63{ 52{
64 int nid = pfn_to_nid(pfn); 53 int nid = pfn_to_nid(pfn);
diff --git a/arch/x86/include/asm/mmzone_64.h b/arch/x86/include/asm/mmzone_64.h
index b3f88d7867c7..129d9aa3ceb3 100644
--- a/arch/x86/include/asm/mmzone_64.h
+++ b/arch/x86/include/asm/mmzone_64.h
@@ -13,8 +13,5 @@ extern struct pglist_data *node_data[];
13 13
14#define NODE_DATA(nid) (node_data[nid]) 14#define NODE_DATA(nid) (node_data[nid])
15 15
16#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
17#define node_end_pfn(nid) (NODE_DATA(nid)->node_start_pfn + \
18 NODE_DATA(nid)->node_spanned_pages)
19#endif 16#endif
20#endif /* _ASM_X86_MMZONE_64_H */ 17#endif /* _ASM_X86_MMZONE_64_H */
diff --git a/arch/x86/include/asm/pvclock.h b/arch/x86/include/asm/pvclock.h
index 31d84acc1512..a518c0a45044 100644
--- a/arch/x86/include/asm/pvclock.h
+++ b/arch/x86/include/asm/pvclock.h
@@ -22,6 +22,8 @@ static inline u64 pvclock_scale_delta(u64 delta, u32 mul_frac, int shift)
22 u64 product; 22 u64 product;
23#ifdef __i386__ 23#ifdef __i386__
24 u32 tmp1, tmp2; 24 u32 tmp1, tmp2;
25#else
26 ulong tmp;
25#endif 27#endif
26 28
27 if (shift < 0) 29 if (shift < 0)
@@ -42,8 +44,11 @@ static inline u64 pvclock_scale_delta(u64 delta, u32 mul_frac, int shift)
42 : "a" ((u32)delta), "1" ((u32)(delta >> 32)), "2" (mul_frac) ); 44 : "a" ((u32)delta), "1" ((u32)(delta >> 32)), "2" (mul_frac) );
43#elif defined(__x86_64__) 45#elif defined(__x86_64__)
44 __asm__ ( 46 __asm__ (
45 "mul %%rdx ; shrd $32,%%rdx,%%rax" 47 "mul %[mul_frac] ; shrd $32, %[hi], %[lo]"
46 : "=a" (product) : "0" (delta), "d" ((u64)mul_frac) ); 48 : [lo]"=a"(product),
49 [hi]"=d"(tmp)
50 : "0"(delta),
51 [mul_frac]"rm"((u64)mul_frac));
47#else 52#else
48#error implement me! 53#error implement me!
49#endif 54#endif
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index bd14bb4c8594..aee38623b768 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -565,7 +565,7 @@ gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t gfn,
565 565
566static bool mapping_level_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t large_gfn) 566static bool mapping_level_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t large_gfn)
567{ 567{
568 return gfn_to_memslot_dirty_bitmap(vcpu, large_gfn, true); 568 return !gfn_to_memslot_dirty_bitmap(vcpu, large_gfn, true);
569} 569}
570 570
571static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn) 571static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn)
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 6c4dc010c4cb..9d03ad4dd5ec 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -121,7 +121,7 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
121 gva_t addr, u32 access) 121 gva_t addr, u32 access)
122{ 122{
123 pt_element_t pte; 123 pt_element_t pte;
124 pt_element_t __user *ptep_user; 124 pt_element_t __user *uninitialized_var(ptep_user);
125 gfn_t table_gfn; 125 gfn_t table_gfn;
126 unsigned index, pt_access, uninitialized_var(pte_access); 126 unsigned index, pt_access, uninitialized_var(pte_access);
127 gpa_t pte_gpa; 127 gpa_t pte_gpa;
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 4c3fa0f67469..d48ec60ea421 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -2047,7 +2047,8 @@ static void ept_update_paging_mode_cr0(unsigned long *hw_cr0,
2047 unsigned long cr0, 2047 unsigned long cr0,
2048 struct kvm_vcpu *vcpu) 2048 struct kvm_vcpu *vcpu)
2049{ 2049{
2050 vmx_decache_cr3(vcpu); 2050 if (!test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail))
2051 vmx_decache_cr3(vcpu);
2051 if (!(cr0 & X86_CR0_PG)) { 2052 if (!(cr0 & X86_CR0_PG)) {
2052 /* From paging/starting to nonpaging */ 2053 /* From paging/starting to nonpaging */
2053 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, 2054 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL,
diff --git a/arch/x86/mm/memblock.c b/arch/x86/mm/memblock.c
index aa1169392b83..992da5ec5a64 100644
--- a/arch/x86/mm/memblock.c
+++ b/arch/x86/mm/memblock.c
@@ -8,7 +8,7 @@
8#include <linux/range.h> 8#include <linux/range.h>
9 9
10/* Check for already reserved areas */ 10/* Check for already reserved areas */
11static bool __init check_with_memblock_reserved_size(u64 *addrp, u64 *sizep, u64 align) 11bool __init memblock_x86_check_reserved_size(u64 *addrp, u64 *sizep, u64 align)
12{ 12{
13 struct memblock_region *r; 13 struct memblock_region *r;
14 u64 addr = *addrp, last; 14 u64 addr = *addrp, last;
@@ -59,7 +59,7 @@ u64 __init memblock_x86_find_in_range_size(u64 start, u64 *sizep, u64 align)
59 if (addr >= ei_last) 59 if (addr >= ei_last)
60 continue; 60 continue;
61 *sizep = ei_last - addr; 61 *sizep = ei_last - addr;
62 while (check_with_memblock_reserved_size(&addr, sizep, align)) 62 while (memblock_x86_check_reserved_size(&addr, sizep, align))
63 ; 63 ;
64 64
65 if (*sizep) 65 if (*sizep)
diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c
index 0972315c3860..68c3c1395202 100644
--- a/arch/x86/pci/acpi.c
+++ b/arch/x86/pci/acpi.c
@@ -188,7 +188,7 @@ static bool resource_contains(struct resource *res, resource_size_t point)
188 return false; 188 return false;
189} 189}
190 190
191static void coalesce_windows(struct pci_root_info *info, int type) 191static void coalesce_windows(struct pci_root_info *info, unsigned long type)
192{ 192{
193 int i, j; 193 int i, j;
194 struct resource *res1, *res2; 194 struct resource *res1, *res2;
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index 0d3a4fa34560..474356b98ede 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -310,14 +310,31 @@ void __init efi_reserve_boot_services(void)
310 310
311 for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { 311 for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
312 efi_memory_desc_t *md = p; 312 efi_memory_desc_t *md = p;
313 unsigned long long start = md->phys_addr; 313 u64 start = md->phys_addr;
314 unsigned long long size = md->num_pages << EFI_PAGE_SHIFT; 314 u64 size = md->num_pages << EFI_PAGE_SHIFT;
315 315
316 if (md->type != EFI_BOOT_SERVICES_CODE && 316 if (md->type != EFI_BOOT_SERVICES_CODE &&
317 md->type != EFI_BOOT_SERVICES_DATA) 317 md->type != EFI_BOOT_SERVICES_DATA)
318 continue; 318 continue;
319 319 /* Only reserve where possible:
320 memblock_x86_reserve_range(start, start + size, "EFI Boot"); 320 * - Not within any already allocated areas
321 * - Not over any memory area (really needed, if above?)
322 * - Not within any part of the kernel
323 * - Not the bios reserved area
324 */
325 if ((start+size >= virt_to_phys(_text)
326 && start <= virt_to_phys(_end)) ||
327 !e820_all_mapped(start, start+size, E820_RAM) ||
328 memblock_x86_check_reserved_size(&start, &size,
329 1<<EFI_PAGE_SHIFT)) {
330 /* Could not reserve, skip it */
331 md->num_pages = 0;
332 memblock_dbg(PFX "Could not reserve boot range "
333 "[0x%010llx-0x%010llx]\n",
334 start, start+size-1);
335 } else
336 memblock_x86_reserve_range(start, start+size,
337 "EFI Boot");
321 } 338 }
322} 339}
323 340
@@ -334,6 +351,10 @@ static void __init efi_free_boot_services(void)
334 md->type != EFI_BOOT_SERVICES_DATA) 351 md->type != EFI_BOOT_SERVICES_DATA)
335 continue; 352 continue;
336 353
354 /* Could not reserve boot area */
355 if (!size)
356 continue;
357
337 free_bootmem_late(start, size); 358 free_bootmem_late(start, size);
338 } 359 }
339} 360}
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index dd7b88f2ec7a..5525163a0398 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -1033,6 +1033,13 @@ static void xen_machine_halt(void)
1033 xen_reboot(SHUTDOWN_poweroff); 1033 xen_reboot(SHUTDOWN_poweroff);
1034} 1034}
1035 1035
1036static void xen_machine_power_off(void)
1037{
1038 if (pm_power_off)
1039 pm_power_off();
1040 xen_reboot(SHUTDOWN_poweroff);
1041}
1042
1036static void xen_crash_shutdown(struct pt_regs *regs) 1043static void xen_crash_shutdown(struct pt_regs *regs)
1037{ 1044{
1038 xen_reboot(SHUTDOWN_crash); 1045 xen_reboot(SHUTDOWN_crash);
@@ -1058,7 +1065,7 @@ int xen_panic_handler_init(void)
1058static const struct machine_ops xen_machine_ops __initconst = { 1065static const struct machine_ops xen_machine_ops __initconst = {
1059 .restart = xen_restart, 1066 .restart = xen_restart,
1060 .halt = xen_machine_halt, 1067 .halt = xen_machine_halt,
1061 .power_off = xen_machine_halt, 1068 .power_off = xen_machine_power_off,
1062 .shutdown = xen_machine_halt, 1069 .shutdown = xen_machine_halt,
1063 .crash_shutdown = xen_crash_shutdown, 1070 .crash_shutdown = xen_crash_shutdown,
1064 .emergency_restart = xen_emergency_restart, 1071 .emergency_restart = xen_emergency_restart,
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index dc708dcc62f1..673e968df3cf 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -59,6 +59,7 @@
59#include <asm/page.h> 59#include <asm/page.h>
60#include <asm/init.h> 60#include <asm/init.h>
61#include <asm/pat.h> 61#include <asm/pat.h>
62#include <asm/smp.h>
62 63
63#include <asm/xen/hypercall.h> 64#include <asm/xen/hypercall.h>
64#include <asm/xen/hypervisor.h> 65#include <asm/xen/hypervisor.h>
@@ -1231,7 +1232,7 @@ static void xen_flush_tlb_others(const struct cpumask *cpus,
1231{ 1232{
1232 struct { 1233 struct {
1233 struct mmuext_op op; 1234 struct mmuext_op op;
1234 DECLARE_BITMAP(mask, NR_CPUS); 1235 DECLARE_BITMAP(mask, num_processors);
1235 } *args; 1236 } *args;
1236 struct multicall_space mcs; 1237 struct multicall_space mcs;
1237 1238
@@ -1599,6 +1600,11 @@ static void __init xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
1599 for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) { 1600 for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) {
1600 pte_t pte; 1601 pte_t pte;
1601 1602
1603#ifdef CONFIG_X86_32
1604 if (pfn > max_pfn_mapped)
1605 max_pfn_mapped = pfn;
1606#endif
1607
1602 if (!pte_none(pte_page[pteidx])) 1608 if (!pte_none(pte_page[pteidx]))
1603 continue; 1609 continue;
1604 1610
@@ -1766,7 +1772,9 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
1766 initial_kernel_pmd = 1772 initial_kernel_pmd =
1767 extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE); 1773 extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE);
1768 1774
1769 max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->mfn_list)); 1775 max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->pt_base) +
1776 xen_start_info->nr_pt_frames * PAGE_SIZE +
1777 512*1024);
1770 1778
1771 kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd); 1779 kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd);
1772 memcpy(initial_kernel_pmd, kernel_pmd, sizeof(pmd_t) * PTRS_PER_PMD); 1780 memcpy(initial_kernel_pmd, kernel_pmd, sizeof(pmd_t) * PTRS_PER_PMD);
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index be1a464f6d66..60aeeb56948f 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -227,11 +227,7 @@ char * __init xen_memory_setup(void)
227 227
228 memcpy(map_raw, map, sizeof(map)); 228 memcpy(map_raw, map, sizeof(map));
229 e820.nr_map = 0; 229 e820.nr_map = 0;
230#ifdef CONFIG_X86_32
231 xen_extra_mem_start = mem_end; 230 xen_extra_mem_start = mem_end;
232#else
233 xen_extra_mem_start = max((1ULL << 32), mem_end);
234#endif
235 for (i = 0; i < memmap.nr_entries; i++) { 231 for (i = 0; i < memmap.nr_entries; i++) {
236 unsigned long long end; 232 unsigned long long end;
237 233
@@ -266,6 +262,12 @@ char * __init xen_memory_setup(void)
266 if (map[i].size > 0) 262 if (map[i].size > 0)
267 e820_add_region(map[i].addr, map[i].size, map[i].type); 263 e820_add_region(map[i].addr, map[i].size, map[i].type);
268 } 264 }
265 /* Align the balloon area so that max_low_pfn does not get set
266 * to be at the _end_ of the PCI gap at the far end (fee01000).
267 * Note that xen_extra_mem_start gets set in the loop above to be
268 * past the last E820 region. */
269 if (xen_initial_domain() && (xen_extra_mem_start < (1ULL<<32)))
270 xen_extra_mem_start = (1ULL<<32);
269 271
270 /* 272 /*
271 * In domU, the ISA region is normal, usable memory, but we 273 * In domU, the ISA region is normal, usable memory, but we
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 41038c01de40..b4533a86d7e4 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -205,11 +205,18 @@ static void __init xen_smp_prepare_boot_cpu(void)
205static void __init xen_smp_prepare_cpus(unsigned int max_cpus) 205static void __init xen_smp_prepare_cpus(unsigned int max_cpus)
206{ 206{
207 unsigned cpu; 207 unsigned cpu;
208 unsigned int i;
208 209
209 xen_init_lock_cpu(0); 210 xen_init_lock_cpu(0);
210 211
211 smp_store_cpu_info(0); 212 smp_store_cpu_info(0);
212 cpu_data(0).x86_max_cores = 1; 213 cpu_data(0).x86_max_cores = 1;
214
215 for_each_possible_cpu(i) {
216 zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL);
217 zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL);
218 zalloc_cpumask_var(&per_cpu(cpu_llc_shared_map, i), GFP_KERNEL);
219 }
213 set_cpu_sibling_map(0); 220 set_cpu_sibling_map(0);
214 221
215 if (xen_smp_intr_init(0)) 222 if (xen_smp_intr_init(0))
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index a62be8d0dc1b..3689f833afdc 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -927,7 +927,7 @@ static int throtl_dispatch(struct request_queue *q)
927 927
928 bio_list_init(&bio_list_on_stack); 928 bio_list_init(&bio_list_on_stack);
929 929
930 throtl_log(td, "dispatch nr_queued=%lu read=%u write=%u", 930 throtl_log(td, "dispatch nr_queued=%d read=%u write=%u",
931 total_nr_queued(td), td->nr_queued[READ], 931 total_nr_queued(td), td->nr_queued[READ],
932 td->nr_queued[WRITE]); 932 td->nr_queued[WRITE]);
933 933
@@ -1204,7 +1204,7 @@ int blk_throtl_bio(struct request_queue *q, struct bio **biop)
1204 } 1204 }
1205 1205
1206queue_bio: 1206queue_bio:
1207 throtl_log_tg(td, tg, "[%c] bio. bdisp=%u sz=%u bps=%llu" 1207 throtl_log_tg(td, tg, "[%c] bio. bdisp=%llu sz=%u bps=%llu"
1208 " iodisp=%u iops=%u queued=%d/%d", 1208 " iodisp=%u iops=%u queued=%d/%d",
1209 rw == READ ? 'R' : 'W', 1209 rw == READ ? 'R' : 'W',
1210 tg->bytes_disp[rw], bio->bi_size, tg->bps[rw], 1210 tg->bytes_disp[rw], bio->bi_size, tg->bps[rw],
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 3c7b537bf908..f3799432676d 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -988,9 +988,10 @@ static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
988 988
989 cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime, 989 cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime,
990 st->min_vdisktime); 990 st->min_vdisktime);
991 cfq_log_cfqq(cfqq->cfqd, cfqq, "sl_used=%u disp=%u charge=%u iops=%u" 991 cfq_log_cfqq(cfqq->cfqd, cfqq,
992 " sect=%u", used_sl, cfqq->slice_dispatch, charge, 992 "sl_used=%u disp=%u charge=%u iops=%u sect=%lu",
993 iops_mode(cfqd), cfqq->nr_sectors); 993 used_sl, cfqq->slice_dispatch, charge,
994 iops_mode(cfqd), cfqq->nr_sectors);
994 cfq_blkiocg_update_timeslice_used(&cfqg->blkg, used_sl, 995 cfq_blkiocg_update_timeslice_used(&cfqg->blkg, used_sl,
995 unaccounted_sl); 996 unaccounted_sl);
996 cfq_blkiocg_set_start_empty_time(&cfqg->blkg); 997 cfq_blkiocg_set_start_empty_time(&cfqg->blkg);
@@ -2023,8 +2024,8 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
2023 */ 2024 */
2024 if (sample_valid(cic->ttime_samples) && 2025 if (sample_valid(cic->ttime_samples) &&
2025 (cfqq->slice_end - jiffies < cic->ttime_mean)) { 2026 (cfqq->slice_end - jiffies < cic->ttime_mean)) {
2026 cfq_log_cfqq(cfqd, cfqq, "Not idling. think_time:%d", 2027 cfq_log_cfqq(cfqd, cfqq, "Not idling. think_time:%lu",
2027 cic->ttime_mean); 2028 cic->ttime_mean);
2028 return; 2029 return;
2029 } 2030 }
2030 2031
@@ -2772,8 +2773,11 @@ static void __cfq_exit_single_io_context(struct cfq_data *cfqd,
2772 smp_wmb(); 2773 smp_wmb();
2773 cic->key = cfqd_dead_key(cfqd); 2774 cic->key = cfqd_dead_key(cfqd);
2774 2775
2775 if (ioc->ioc_data == cic) 2776 if (rcu_dereference(ioc->ioc_data) == cic) {
2777 spin_lock(&ioc->lock);
2776 rcu_assign_pointer(ioc->ioc_data, NULL); 2778 rcu_assign_pointer(ioc->ioc_data, NULL);
2779 spin_unlock(&ioc->lock);
2780 }
2777 2781
2778 if (cic->cfqq[BLK_RW_ASYNC]) { 2782 if (cic->cfqq[BLK_RW_ASYNC]) {
2779 cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_ASYNC]); 2783 cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_ASYNC]);
diff --git a/block/genhd.c b/block/genhd.c
index 95822ae25cfe..3608289c8ecd 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -1371,6 +1371,7 @@ struct disk_events {
1371 struct gendisk *disk; /* the associated disk */ 1371 struct gendisk *disk; /* the associated disk */
1372 spinlock_t lock; 1372 spinlock_t lock;
1373 1373
1374 struct mutex block_mutex; /* protects blocking */
1374 int block; /* event blocking depth */ 1375 int block; /* event blocking depth */
1375 unsigned int pending; /* events already sent out */ 1376 unsigned int pending; /* events already sent out */
1376 unsigned int clearing; /* events being cleared */ 1377 unsigned int clearing; /* events being cleared */
@@ -1414,22 +1415,44 @@ static unsigned long disk_events_poll_jiffies(struct gendisk *disk)
1414 return msecs_to_jiffies(intv_msecs); 1415 return msecs_to_jiffies(intv_msecs);
1415} 1416}
1416 1417
1417static void __disk_block_events(struct gendisk *disk, bool sync) 1418/**
1419 * disk_block_events - block and flush disk event checking
1420 * @disk: disk to block events for
1421 *
1422 * On return from this function, it is guaranteed that event checking
1423 * isn't in progress and won't happen until unblocked by
1424 * disk_unblock_events(). Events blocking is counted and the actual
1425 * unblocking happens after the matching number of unblocks are done.
1426 *
1427 * Note that this intentionally does not block event checking from
1428 * disk_clear_events().
1429 *
1430 * CONTEXT:
1431 * Might sleep.
1432 */
1433void disk_block_events(struct gendisk *disk)
1418{ 1434{
1419 struct disk_events *ev = disk->ev; 1435 struct disk_events *ev = disk->ev;
1420 unsigned long flags; 1436 unsigned long flags;
1421 bool cancel; 1437 bool cancel;
1422 1438
1439 if (!ev)
1440 return;
1441
1442 /*
1443 * Outer mutex ensures that the first blocker completes canceling
1444 * the event work before further blockers are allowed to finish.
1445 */
1446 mutex_lock(&ev->block_mutex);
1447
1423 spin_lock_irqsave(&ev->lock, flags); 1448 spin_lock_irqsave(&ev->lock, flags);
1424 cancel = !ev->block++; 1449 cancel = !ev->block++;
1425 spin_unlock_irqrestore(&ev->lock, flags); 1450 spin_unlock_irqrestore(&ev->lock, flags);
1426 1451
1427 if (cancel) { 1452 if (cancel)
1428 if (sync) 1453 cancel_delayed_work_sync(&disk->ev->dwork);
1429 cancel_delayed_work_sync(&disk->ev->dwork); 1454
1430 else 1455 mutex_unlock(&ev->block_mutex);
1431 cancel_delayed_work(&disk->ev->dwork);
1432 }
1433} 1456}
1434 1457
1435static void __disk_unblock_events(struct gendisk *disk, bool check_now) 1458static void __disk_unblock_events(struct gendisk *disk, bool check_now)
@@ -1461,27 +1484,6 @@ out_unlock:
1461} 1484}
1462 1485
1463/** 1486/**
1464 * disk_block_events - block and flush disk event checking
1465 * @disk: disk to block events for
1466 *
1467 * On return from this function, it is guaranteed that event checking
1468 * isn't in progress and won't happen until unblocked by
1469 * disk_unblock_events(). Events blocking is counted and the actual
1470 * unblocking happens after the matching number of unblocks are done.
1471 *
1472 * Note that this intentionally does not block event checking from
1473 * disk_clear_events().
1474 *
1475 * CONTEXT:
1476 * Might sleep.
1477 */
1478void disk_block_events(struct gendisk *disk)
1479{
1480 if (disk->ev)
1481 __disk_block_events(disk, true);
1482}
1483
1484/**
1485 * disk_unblock_events - unblock disk event checking 1487 * disk_unblock_events - unblock disk event checking
1486 * @disk: disk to unblock events for 1488 * @disk: disk to unblock events for
1487 * 1489 *
@@ -1508,10 +1510,18 @@ void disk_unblock_events(struct gendisk *disk)
1508 */ 1510 */
1509void disk_check_events(struct gendisk *disk) 1511void disk_check_events(struct gendisk *disk)
1510{ 1512{
1511 if (disk->ev) { 1513 struct disk_events *ev = disk->ev;
1512 __disk_block_events(disk, false); 1514 unsigned long flags;
1513 __disk_unblock_events(disk, true); 1515
1516 if (!ev)
1517 return;
1518
1519 spin_lock_irqsave(&ev->lock, flags);
1520 if (!ev->block) {
1521 cancel_delayed_work(&ev->dwork);
1522 queue_delayed_work(system_nrt_wq, &ev->dwork, 0);
1514 } 1523 }
1524 spin_unlock_irqrestore(&ev->lock, flags);
1515} 1525}
1516EXPORT_SYMBOL_GPL(disk_check_events); 1526EXPORT_SYMBOL_GPL(disk_check_events);
1517 1527
@@ -1546,7 +1556,7 @@ unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask)
1546 spin_unlock_irq(&ev->lock); 1556 spin_unlock_irq(&ev->lock);
1547 1557
1548 /* uncondtionally schedule event check and wait for it to finish */ 1558 /* uncondtionally schedule event check and wait for it to finish */
1549 __disk_block_events(disk, true); 1559 disk_block_events(disk);
1550 queue_delayed_work(system_nrt_wq, &ev->dwork, 0); 1560 queue_delayed_work(system_nrt_wq, &ev->dwork, 0);
1551 flush_delayed_work(&ev->dwork); 1561 flush_delayed_work(&ev->dwork);
1552 __disk_unblock_events(disk, false); 1562 __disk_unblock_events(disk, false);
@@ -1664,7 +1674,7 @@ static ssize_t disk_events_poll_msecs_store(struct device *dev,
1664 if (intv < 0 && intv != -1) 1674 if (intv < 0 && intv != -1)
1665 return -EINVAL; 1675 return -EINVAL;
1666 1676
1667 __disk_block_events(disk, true); 1677 disk_block_events(disk);
1668 disk->ev->poll_msecs = intv; 1678 disk->ev->poll_msecs = intv;
1669 __disk_unblock_events(disk, true); 1679 __disk_unblock_events(disk, true);
1670 1680
@@ -1750,6 +1760,7 @@ static void disk_add_events(struct gendisk *disk)
1750 INIT_LIST_HEAD(&ev->node); 1760 INIT_LIST_HEAD(&ev->node);
1751 ev->disk = disk; 1761 ev->disk = disk;
1752 spin_lock_init(&ev->lock); 1762 spin_lock_init(&ev->lock);
1763 mutex_init(&ev->block_mutex);
1753 ev->block = 1; 1764 ev->block = 1;
1754 ev->poll_msecs = -1; 1765 ev->poll_msecs = -1;
1755 INIT_DELAYED_WORK(&ev->dwork, disk_events_workfn); 1766 INIT_DELAYED_WORK(&ev->dwork, disk_events_workfn);
@@ -1770,7 +1781,7 @@ static void disk_del_events(struct gendisk *disk)
1770 if (!disk->ev) 1781 if (!disk->ev)
1771 return; 1782 return;
1772 1783
1773 __disk_block_events(disk, true); 1784 disk_block_events(disk);
1774 1785
1775 mutex_lock(&disk_events_mutex); 1786 mutex_lock(&disk_events_mutex);
1776 list_del_init(&disk->ev->node); 1787 list_del_init(&disk->ev->node);
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 736bee5dafeb..000d03ae6653 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4143,9 +4143,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
4143 * Devices which choke on SETXFER. Applies only if both the 4143 * Devices which choke on SETXFER. Applies only if both the
4144 * device and controller are SATA. 4144 * device and controller are SATA.
4145 */ 4145 */
4146 { "PIONEER DVD-RW DVRTD08", "1.00", ATA_HORKAGE_NOSETXFER }, 4146 { "PIONEER DVD-RW DVRTD08", NULL, ATA_HORKAGE_NOSETXFER },
4147 { "PIONEER DVD-RW DVR-212D", "1.28", ATA_HORKAGE_NOSETXFER }, 4147 { "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER },
4148 { "PIONEER DVD-RW DVR-216D", "1.08", ATA_HORKAGE_NOSETXFER }, 4148 { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER },
4149 4149
4150 /* End Marker */ 4150 /* End Marker */
4151 { } 4151 { }
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index d51f9795c064..927f968e99d9 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -3797,6 +3797,12 @@ EXPORT_SYMBOL_GPL(ata_sas_port_alloc);
3797 */ 3797 */
3798int ata_sas_port_start(struct ata_port *ap) 3798int ata_sas_port_start(struct ata_port *ap)
3799{ 3799{
3800 /*
3801 * the port is marked as frozen at allocation time, but if we don't
3802 * have new eh, we won't thaw it
3803 */
3804 if (!ap->ops->error_handler)
3805 ap->pflags &= ~ATA_PFLAG_FROZEN;
3800 return 0; 3806 return 0;
3801} 3807}
3802EXPORT_SYMBOL_GPL(ata_sas_port_start); 3808EXPORT_SYMBOL_GPL(ata_sas_port_start);
diff --git a/drivers/ata/pata_marvell.c b/drivers/ata/pata_marvell.c
index 75a6a0c0094f..5d7f58a7e34d 100644
--- a/drivers/ata/pata_marvell.c
+++ b/drivers/ata/pata_marvell.c
@@ -161,6 +161,9 @@ static const struct pci_device_id marvell_pci_tbl[] = {
161 { PCI_DEVICE(0x11AB, 0x6121), }, 161 { PCI_DEVICE(0x11AB, 0x6121), },
162 { PCI_DEVICE(0x11AB, 0x6123), }, 162 { PCI_DEVICE(0x11AB, 0x6123), },
163 { PCI_DEVICE(0x11AB, 0x6145), }, 163 { PCI_DEVICE(0x11AB, 0x6145), },
164 { PCI_DEVICE(0x1B4B, 0x91A0), },
165 { PCI_DEVICE(0x1B4B, 0x91A4), },
166
164 { } /* terminate list */ 167 { } /* terminate list */
165}; 168};
166 169
diff --git a/drivers/ata/sata_dwc_460ex.c b/drivers/ata/sata_dwc_460ex.c
index 1c4b3aa4c7c4..dc88a39e7db8 100644
--- a/drivers/ata/sata_dwc_460ex.c
+++ b/drivers/ata/sata_dwc_460ex.c
@@ -389,7 +389,7 @@ static void sata_dwc_tf_dump(struct ata_taskfile *tf)
389/* 389/*
390 * Function: get_burst_length_encode 390 * Function: get_burst_length_encode
391 * arguments: datalength: length in bytes of data 391 * arguments: datalength: length in bytes of data
392 * returns value to be programmed in register corrresponding to data length 392 * returns value to be programmed in register corresponding to data length
393 * This value is effectively the log(base 2) of the length 393 * This value is effectively the log(base 2) of the length
394 */ 394 */
395static int get_burst_length_encode(int datalength) 395static int get_burst_length_encode(int datalength)
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
index eaa8a854af03..ad367c4139b1 100644
--- a/drivers/base/power/clock_ops.c
+++ b/drivers/base/power/clock_ops.c
@@ -387,7 +387,7 @@ static int pm_runtime_clk_notify(struct notifier_block *nb,
387 clknb = container_of(nb, struct pm_clk_notifier_block, nb); 387 clknb = container_of(nb, struct pm_clk_notifier_block, nb);
388 388
389 switch (action) { 389 switch (action) {
390 case BUS_NOTIFY_ADD_DEVICE: 390 case BUS_NOTIFY_BIND_DRIVER:
391 if (clknb->con_ids[0]) { 391 if (clknb->con_ids[0]) {
392 for (con_id = clknb->con_ids; *con_id; con_id++) 392 for (con_id = clknb->con_ids; *con_id; con_id++)
393 enable_clock(dev, *con_id); 393 enable_clock(dev, *con_id);
@@ -395,7 +395,7 @@ static int pm_runtime_clk_notify(struct notifier_block *nb,
395 enable_clock(dev, NULL); 395 enable_clock(dev, NULL);
396 } 396 }
397 break; 397 break;
398 case BUS_NOTIFY_DEL_DEVICE: 398 case BUS_NOTIFY_UNBOUND_DRIVER:
399 if (clknb->con_ids[0]) { 399 if (clknb->con_ids[0]) {
400 for (con_id = clknb->con_ids; *con_id; con_id++) 400 for (con_id = clknb->con_ids; *con_id; con_id++)
401 disable_clock(dev, *con_id); 401 disable_clock(dev, *con_id);
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index aa6320207745..06f09bf89cb2 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -57,7 +57,8 @@ static int async_error;
57 */ 57 */
58void device_pm_init(struct device *dev) 58void device_pm_init(struct device *dev)
59{ 59{
60 dev->power.in_suspend = false; 60 dev->power.is_prepared = false;
61 dev->power.is_suspended = false;
61 init_completion(&dev->power.completion); 62 init_completion(&dev->power.completion);
62 complete_all(&dev->power.completion); 63 complete_all(&dev->power.completion);
63 dev->power.wakeup = NULL; 64 dev->power.wakeup = NULL;
@@ -91,7 +92,7 @@ void device_pm_add(struct device *dev)
91 pr_debug("PM: Adding info for %s:%s\n", 92 pr_debug("PM: Adding info for %s:%s\n",
92 dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); 93 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
93 mutex_lock(&dpm_list_mtx); 94 mutex_lock(&dpm_list_mtx);
94 if (dev->parent && dev->parent->power.in_suspend) 95 if (dev->parent && dev->parent->power.is_prepared)
95 dev_warn(dev, "parent %s should not be sleeping\n", 96 dev_warn(dev, "parent %s should not be sleeping\n",
96 dev_name(dev->parent)); 97 dev_name(dev->parent));
97 list_add_tail(&dev->power.entry, &dpm_list); 98 list_add_tail(&dev->power.entry, &dpm_list);
@@ -511,7 +512,14 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
511 dpm_wait(dev->parent, async); 512 dpm_wait(dev->parent, async);
512 device_lock(dev); 513 device_lock(dev);
513 514
514 dev->power.in_suspend = false; 515 /*
516 * This is a fib. But we'll allow new children to be added below
517 * a resumed device, even if the device hasn't been completed yet.
518 */
519 dev->power.is_prepared = false;
520
521 if (!dev->power.is_suspended)
522 goto Unlock;
515 523
516 if (dev->pwr_domain) { 524 if (dev->pwr_domain) {
517 pm_dev_dbg(dev, state, "power domain "); 525 pm_dev_dbg(dev, state, "power domain ");
@@ -548,6 +556,9 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
548 } 556 }
549 557
550 End: 558 End:
559 dev->power.is_suspended = false;
560
561 Unlock:
551 device_unlock(dev); 562 device_unlock(dev);
552 complete_all(&dev->power.completion); 563 complete_all(&dev->power.completion);
553 564
@@ -670,7 +681,7 @@ void dpm_complete(pm_message_t state)
670 struct device *dev = to_device(dpm_prepared_list.prev); 681 struct device *dev = to_device(dpm_prepared_list.prev);
671 682
672 get_device(dev); 683 get_device(dev);
673 dev->power.in_suspend = false; 684 dev->power.is_prepared = false;
674 list_move(&dev->power.entry, &list); 685 list_move(&dev->power.entry, &list);
675 mutex_unlock(&dpm_list_mtx); 686 mutex_unlock(&dpm_list_mtx);
676 687
@@ -835,11 +846,11 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
835 device_lock(dev); 846 device_lock(dev);
836 847
837 if (async_error) 848 if (async_error)
838 goto End; 849 goto Unlock;
839 850
840 if (pm_wakeup_pending()) { 851 if (pm_wakeup_pending()) {
841 async_error = -EBUSY; 852 async_error = -EBUSY;
842 goto End; 853 goto Unlock;
843 } 854 }
844 855
845 if (dev->pwr_domain) { 856 if (dev->pwr_domain) {
@@ -877,6 +888,9 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
877 } 888 }
878 889
879 End: 890 End:
891 dev->power.is_suspended = !error;
892
893 Unlock:
880 device_unlock(dev); 894 device_unlock(dev);
881 complete_all(&dev->power.completion); 895 complete_all(&dev->power.completion);
882 896
@@ -1042,7 +1056,7 @@ int dpm_prepare(pm_message_t state)
1042 put_device(dev); 1056 put_device(dev);
1043 break; 1057 break;
1044 } 1058 }
1045 dev->power.in_suspend = true; 1059 dev->power.is_prepared = true;
1046 if (!list_empty(&dev->power.entry)) 1060 if (!list_empty(&dev->power.entry))
1047 list_move_tail(&dev->power.entry, &dpm_prepared_list); 1061 list_move_tail(&dev->power.entry, &dpm_prepared_list);
1048 put_device(dev); 1062 put_device(dev);
diff --git a/drivers/bluetooth/btmrvl_debugfs.c b/drivers/bluetooth/btmrvl_debugfs.c
index fd6305bf953e..8ecf4c6c2874 100644
--- a/drivers/bluetooth/btmrvl_debugfs.c
+++ b/drivers/bluetooth/btmrvl_debugfs.c
@@ -64,6 +64,8 @@ static ssize_t btmrvl_hscfgcmd_write(struct file *file,
64 return -EFAULT; 64 return -EFAULT;
65 65
66 ret = strict_strtol(buf, 10, &result); 66 ret = strict_strtol(buf, 10, &result);
67 if (ret)
68 return ret;
67 69
68 priv->btmrvl_dev.hscfgcmd = result; 70 priv->btmrvl_dev.hscfgcmd = result;
69 71
@@ -108,6 +110,8 @@ static ssize_t btmrvl_psmode_write(struct file *file, const char __user *ubuf,
108 return -EFAULT; 110 return -EFAULT;
109 111
110 ret = strict_strtol(buf, 10, &result); 112 ret = strict_strtol(buf, 10, &result);
113 if (ret)
114 return ret;
111 115
112 priv->btmrvl_dev.psmode = result; 116 priv->btmrvl_dev.psmode = result;
113 117
@@ -147,6 +151,8 @@ static ssize_t btmrvl_pscmd_write(struct file *file, const char __user *ubuf,
147 return -EFAULT; 151 return -EFAULT;
148 152
149 ret = strict_strtol(buf, 10, &result); 153 ret = strict_strtol(buf, 10, &result);
154 if (ret)
155 return ret;
150 156
151 priv->btmrvl_dev.pscmd = result; 157 priv->btmrvl_dev.pscmd = result;
152 158
@@ -191,6 +197,8 @@ static ssize_t btmrvl_gpiogap_write(struct file *file, const char __user *ubuf,
191 return -EFAULT; 197 return -EFAULT;
192 198
193 ret = strict_strtol(buf, 16, &result); 199 ret = strict_strtol(buf, 16, &result);
200 if (ret)
201 return ret;
194 202
195 priv->btmrvl_dev.gpio_gap = result; 203 priv->btmrvl_dev.gpio_gap = result;
196 204
@@ -230,6 +238,8 @@ static ssize_t btmrvl_hscmd_write(struct file *file, const char __user *ubuf,
230 return -EFAULT; 238 return -EFAULT;
231 239
232 ret = strict_strtol(buf, 10, &result); 240 ret = strict_strtol(buf, 10, &result);
241 if (ret)
242 return ret;
233 243
234 priv->btmrvl_dev.hscmd = result; 244 priv->btmrvl_dev.hscmd = result;
235 if (priv->btmrvl_dev.hscmd) { 245 if (priv->btmrvl_dev.hscmd) {
@@ -272,6 +282,8 @@ static ssize_t btmrvl_hsmode_write(struct file *file, const char __user *ubuf,
272 return -EFAULT; 282 return -EFAULT;
273 283
274 ret = strict_strtol(buf, 10, &result); 284 ret = strict_strtol(buf, 10, &result);
285 if (ret)
286 return ret;
275 287
276 priv->btmrvl_dev.hsmode = result; 288 priv->btmrvl_dev.hsmode = result;
277 289
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 94c84d744100..c6389de53161 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1219,11 +1219,11 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1219 ret = i915_gem_object_bind_to_gtt(obj, 0, true); 1219 ret = i915_gem_object_bind_to_gtt(obj, 0, true);
1220 if (ret) 1220 if (ret)
1221 goto unlock; 1221 goto unlock;
1222 }
1223 1222
1224 ret = i915_gem_object_set_to_gtt_domain(obj, write); 1223 ret = i915_gem_object_set_to_gtt_domain(obj, write);
1225 if (ret) 1224 if (ret)
1226 goto unlock; 1225 goto unlock;
1226 }
1227 1227
1228 if (obj->tiling_mode == I915_TILING_NONE) 1228 if (obj->tiling_mode == I915_TILING_NONE)
1229 ret = i915_gem_object_put_fence(obj); 1229 ret = i915_gem_object_put_fence(obj);
@@ -2926,8 +2926,6 @@ i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
2926 */ 2926 */
2927 wmb(); 2927 wmb();
2928 2928
2929 i915_gem_release_mmap(obj);
2930
2931 old_write_domain = obj->base.write_domain; 2929 old_write_domain = obj->base.write_domain;
2932 obj->base.write_domain = 0; 2930 obj->base.write_domain = 0;
2933 2931
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 20a4cc5b818f..4934cf84c320 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -187,10 +187,6 @@ i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj,
187 if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) 187 if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU)
188 i915_gem_clflush_object(obj); 188 i915_gem_clflush_object(obj);
189 189
190 /* blow away mappings if mapped through GTT */
191 if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_GTT)
192 i915_gem_release_mmap(obj);
193
194 if (obj->base.pending_write_domain) 190 if (obj->base.pending_write_domain)
195 cd->flips |= atomic_read(&obj->pending_flip); 191 cd->flips |= atomic_read(&obj->pending_flip);
196 192
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 9e34a1abeb61..ae2b49969b99 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1749,6 +1749,7 @@ void ironlake_irq_preinstall(struct drm_device *dev)
1749 * happens. 1749 * happens.
1750 */ 1750 */
1751 I915_WRITE(GEN6_BLITTER_HWSTAM, ~GEN6_BLITTER_USER_INTERRUPT); 1751 I915_WRITE(GEN6_BLITTER_HWSTAM, ~GEN6_BLITTER_USER_INTERRUPT);
1752 I915_WRITE(GEN6_BSD_HWSTAM, ~GEN6_BSD_USER_INTERRUPT);
1752 } 1753 }
1753 1754
1754 /* XXX hotplug from PCH */ 1755 /* XXX hotplug from PCH */
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 2f967af8e62e..5d5def756c9e 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -531,6 +531,7 @@
531#define GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE 0 531#define GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE 0
532#define GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR (1 << 3) 532#define GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR (1 << 3)
533 533
534#define GEN6_BSD_HWSTAM 0x12098
534#define GEN6_BSD_IMR 0x120a8 535#define GEN6_BSD_IMR 0x120a8
535#define GEN6_BSD_USER_INTERRUPT (1 << 12) 536#define GEN6_BSD_USER_INTERRUPT (1 << 12)
536 537
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 60a94d2b5264..e8152d23d5b6 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -678,6 +678,7 @@ void i915_save_display(struct drm_device *dev)
678 } 678 }
679 679
680 /* VGA state */ 680 /* VGA state */
681 mutex_lock(&dev->struct_mutex);
681 dev_priv->saveVGA0 = I915_READ(VGA0); 682 dev_priv->saveVGA0 = I915_READ(VGA0);
682 dev_priv->saveVGA1 = I915_READ(VGA1); 683 dev_priv->saveVGA1 = I915_READ(VGA1);
683 dev_priv->saveVGA_PD = I915_READ(VGA_PD); 684 dev_priv->saveVGA_PD = I915_READ(VGA_PD);
@@ -687,6 +688,7 @@ void i915_save_display(struct drm_device *dev)
687 dev_priv->saveVGACNTRL = I915_READ(VGACNTRL); 688 dev_priv->saveVGACNTRL = I915_READ(VGACNTRL);
688 689
689 i915_save_vga(dev); 690 i915_save_vga(dev);
691 mutex_unlock(&dev->struct_mutex);
690} 692}
691 693
692void i915_restore_display(struct drm_device *dev) 694void i915_restore_display(struct drm_device *dev)
@@ -780,6 +782,8 @@ void i915_restore_display(struct drm_device *dev)
780 I915_WRITE(CPU_VGACNTRL, dev_priv->saveVGACNTRL); 782 I915_WRITE(CPU_VGACNTRL, dev_priv->saveVGACNTRL);
781 else 783 else
782 I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL); 784 I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL);
785
786 mutex_lock(&dev->struct_mutex);
783 I915_WRITE(VGA0, dev_priv->saveVGA0); 787 I915_WRITE(VGA0, dev_priv->saveVGA0);
784 I915_WRITE(VGA1, dev_priv->saveVGA1); 788 I915_WRITE(VGA1, dev_priv->saveVGA1);
785 I915_WRITE(VGA_PD, dev_priv->saveVGA_PD); 789 I915_WRITE(VGA_PD, dev_priv->saveVGA_PD);
@@ -787,6 +791,7 @@ void i915_restore_display(struct drm_device *dev)
787 udelay(150); 791 udelay(150);
788 792
789 i915_restore_vga(dev); 793 i915_restore_vga(dev);
794 mutex_unlock(&dev->struct_mutex);
790} 795}
791 796
792int i915_save_state(struct drm_device *dev) 797int i915_save_state(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 81a9059b6a94..aa43e7be6053 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -4687,6 +4687,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
4687 4687
4688 I915_WRITE(DSPCNTR(plane), dspcntr); 4688 I915_WRITE(DSPCNTR(plane), dspcntr);
4689 POSTING_READ(DSPCNTR(plane)); 4689 POSTING_READ(DSPCNTR(plane));
4690 intel_enable_plane(dev_priv, plane, pipe);
4690 4691
4691 ret = intel_pipe_set_base(crtc, x, y, old_fb); 4692 ret = intel_pipe_set_base(crtc, x, y, old_fb);
4692 4693
@@ -5217,8 +5218,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
5217 5218
5218 I915_WRITE(DSPCNTR(plane), dspcntr); 5219 I915_WRITE(DSPCNTR(plane), dspcntr);
5219 POSTING_READ(DSPCNTR(plane)); 5220 POSTING_READ(DSPCNTR(plane));
5220 if (!HAS_PCH_SPLIT(dev))
5221 intel_enable_plane(dev_priv, plane, pipe);
5222 5221
5223 ret = intel_pipe_set_base(crtc, x, y, old_fb); 5222 ret = intel_pipe_set_base(crtc, x, y, old_fb);
5224 5223
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index f0d459bb46e4..525744d593c1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -262,7 +262,6 @@ static bool nouveau_dsm_detect(void)
262 vga_count++; 262 vga_count++;
263 263
264 retval = nouveau_dsm_pci_probe(pdev); 264 retval = nouveau_dsm_pci_probe(pdev);
265 printk("ret val is %d\n", retval);
266 if (retval & NOUVEAU_DSM_HAS_MUX) 265 if (retval & NOUVEAU_DSM_HAS_MUX)
267 has_dsm |= 1; 266 has_dsm |= 1;
268 if (retval & NOUVEAU_DSM_HAS_OPT) 267 if (retval & NOUVEAU_DSM_HAS_OPT)
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 4b9f4493c9f9..7347075ca5b8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -339,11 +339,12 @@ semaphore_acquire(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
339 int ret; 339 int ret;
340 340
341 if (dev_priv->chipset < 0x84) { 341 if (dev_priv->chipset < 0x84) {
342 ret = RING_SPACE(chan, 3); 342 ret = RING_SPACE(chan, 4);
343 if (ret) 343 if (ret)
344 return ret; 344 return ret;
345 345
346 BEGIN_RING(chan, NvSubSw, NV_SW_SEMAPHORE_OFFSET, 2); 346 BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 3);
347 OUT_RING (chan, NvSema);
347 OUT_RING (chan, sema->mem->start); 348 OUT_RING (chan, sema->mem->start);
348 OUT_RING (chan, 1); 349 OUT_RING (chan, 1);
349 } else 350 } else
@@ -351,10 +352,12 @@ semaphore_acquire(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
351 struct nouveau_vma *vma = &dev_priv->fence.bo->vma; 352 struct nouveau_vma *vma = &dev_priv->fence.bo->vma;
352 u64 offset = vma->offset + sema->mem->start; 353 u64 offset = vma->offset + sema->mem->start;
353 354
354 ret = RING_SPACE(chan, 5); 355 ret = RING_SPACE(chan, 7);
355 if (ret) 356 if (ret)
356 return ret; 357 return ret;
357 358
359 BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 1);
360 OUT_RING (chan, chan->vram_handle);
358 BEGIN_RING(chan, NvSubSw, 0x0010, 4); 361 BEGIN_RING(chan, NvSubSw, 0x0010, 4);
359 OUT_RING (chan, upper_32_bits(offset)); 362 OUT_RING (chan, upper_32_bits(offset));
360 OUT_RING (chan, lower_32_bits(offset)); 363 OUT_RING (chan, lower_32_bits(offset));
@@ -394,11 +397,12 @@ semaphore_release(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
394 int ret; 397 int ret;
395 398
396 if (dev_priv->chipset < 0x84) { 399 if (dev_priv->chipset < 0x84) {
397 ret = RING_SPACE(chan, 4); 400 ret = RING_SPACE(chan, 5);
398 if (ret) 401 if (ret)
399 return ret; 402 return ret;
400 403
401 BEGIN_RING(chan, NvSubSw, NV_SW_SEMAPHORE_OFFSET, 1); 404 BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 2);
405 OUT_RING (chan, NvSema);
402 OUT_RING (chan, sema->mem->start); 406 OUT_RING (chan, sema->mem->start);
403 BEGIN_RING(chan, NvSubSw, NV_SW_SEMAPHORE_RELEASE, 1); 407 BEGIN_RING(chan, NvSubSw, NV_SW_SEMAPHORE_RELEASE, 1);
404 OUT_RING (chan, 1); 408 OUT_RING (chan, 1);
@@ -407,10 +411,12 @@ semaphore_release(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
407 struct nouveau_vma *vma = &dev_priv->fence.bo->vma; 411 struct nouveau_vma *vma = &dev_priv->fence.bo->vma;
408 u64 offset = vma->offset + sema->mem->start; 412 u64 offset = vma->offset + sema->mem->start;
409 413
410 ret = RING_SPACE(chan, 5); 414 ret = RING_SPACE(chan, 7);
411 if (ret) 415 if (ret)
412 return ret; 416 return ret;
413 417
418 BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 1);
419 OUT_RING (chan, chan->vram_handle);
414 BEGIN_RING(chan, NvSubSw, 0x0010, 4); 420 BEGIN_RING(chan, NvSubSw, 0x0010, 4);
415 OUT_RING (chan, upper_32_bits(offset)); 421 OUT_RING (chan, upper_32_bits(offset));
416 OUT_RING (chan, lower_32_bits(offset)); 422 OUT_RING (chan, lower_32_bits(offset));
@@ -504,22 +510,22 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
504 struct nouveau_gpuobj *obj = NULL; 510 struct nouveau_gpuobj *obj = NULL;
505 int ret; 511 int ret;
506 512
507 if (dev_priv->card_type >= NV_C0) 513 if (dev_priv->card_type < NV_C0) {
508 goto out_initialised; 514 /* Create an NV_SW object for various sync purposes */
515 ret = nouveau_gpuobj_gr_new(chan, NvSw, NV_SW);
516 if (ret)
517 return ret;
509 518
510 /* Create an NV_SW object for various sync purposes */ 519 ret = RING_SPACE(chan, 2);
511 ret = nouveau_gpuobj_gr_new(chan, NvSw, NV_SW); 520 if (ret)
512 if (ret) 521 return ret;
513 return ret;
514 522
515 /* we leave subchannel empty for nvc0 */ 523 BEGIN_RING(chan, NvSubSw, 0, 1);
516 ret = RING_SPACE(chan, 2); 524 OUT_RING (chan, NvSw);
517 if (ret) 525 FIRE_RING (chan);
518 return ret; 526 }
519 BEGIN_RING(chan, NvSubSw, 0, 1);
520 OUT_RING(chan, NvSw);
521 527
522 /* Create a DMA object for the shared cross-channel sync area. */ 528 /* Setup area of memory shared between all channels for x-chan sync */
523 if (USE_SEMA(dev) && dev_priv->chipset < 0x84) { 529 if (USE_SEMA(dev) && dev_priv->chipset < 0x84) {
524 struct ttm_mem_reg *mem = &dev_priv->fence.bo->bo.mem; 530 struct ttm_mem_reg *mem = &dev_priv->fence.bo->bo.mem;
525 531
@@ -534,23 +540,8 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
534 nouveau_gpuobj_ref(NULL, &obj); 540 nouveau_gpuobj_ref(NULL, &obj);
535 if (ret) 541 if (ret)
536 return ret; 542 return ret;
537
538 ret = RING_SPACE(chan, 2);
539 if (ret)
540 return ret;
541 BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 1);
542 OUT_RING(chan, NvSema);
543 } else {
544 ret = RING_SPACE(chan, 2);
545 if (ret)
546 return ret;
547 BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 1);
548 OUT_RING (chan, chan->vram_handle); /* whole VM */
549 } 543 }
550 544
551 FIRE_RING(chan);
552
553out_initialised:
554 INIT_LIST_HEAD(&chan->fence.pending); 545 INIT_LIST_HEAD(&chan->fence.pending);
555 spin_lock_init(&chan->fence.lock); 546 spin_lock_init(&chan->fence.lock);
556 atomic_set(&chan->fence.last_sequence_irq, 0); 547 atomic_set(&chan->fence.last_sequence_irq, 0);
diff --git a/drivers/gpu/drm/nouveau/nouveau_perf.c b/drivers/gpu/drm/nouveau/nouveau_perf.c
index 922fb6b664ed..ef9dec0e6f8b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_perf.c
+++ b/drivers/gpu/drm/nouveau/nouveau_perf.c
@@ -182,6 +182,11 @@ nouveau_perf_init(struct drm_device *dev)
182 entries = perf[2]; 182 entries = perf[2];
183 } 183 }
184 184
185 if (entries > NOUVEAU_PM_MAX_LEVEL) {
186 NV_DEBUG(dev, "perf table has too many entries - buggy vbios?\n");
187 entries = NOUVEAU_PM_MAX_LEVEL;
188 }
189
185 entry = perf + headerlen; 190 entry = perf + headerlen;
186 for (i = 0; i < entries; i++) { 191 for (i = 0; i < entries; i++) {
187 struct nouveau_pm_level *perflvl = &pm->perflvl[pm->nr_perflvl]; 192 struct nouveau_pm_level *perflvl = &pm->perflvl[pm->nr_perflvl];
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index 80218887e0a0..144f79a350ae 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -881,8 +881,8 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
881 881
882#ifdef __BIG_ENDIAN 882#ifdef __BIG_ENDIAN
883 /* Put the card in BE mode if it's not */ 883 /* Put the card in BE mode if it's not */
884 if (nv_rd32(dev, NV03_PMC_BOOT_1)) 884 if (nv_rd32(dev, NV03_PMC_BOOT_1) != 0x01000001)
885 nv_wr32(dev, NV03_PMC_BOOT_1, 0x00000001); 885 nv_wr32(dev, NV03_PMC_BOOT_1, 0x01000001);
886 886
887 DRM_MEMORYBARRIER(); 887 DRM_MEMORYBARRIER();
888#endif 888#endif
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 74a3f6872701..08da478ba544 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -409,7 +409,7 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
409 struct nouveau_channel *evo = dispc->sync; 409 struct nouveau_channel *evo = dispc->sync;
410 int ret; 410 int ret;
411 411
412 ret = RING_SPACE(evo, 24); 412 ret = RING_SPACE(evo, chan ? 25 : 27);
413 if (unlikely(ret)) 413 if (unlikely(ret))
414 return ret; 414 return ret;
415 415
@@ -458,8 +458,19 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
458 /* queue the flip on the crtc's "display sync" channel */ 458 /* queue the flip on the crtc's "display sync" channel */
459 BEGIN_RING(evo, 0, 0x0100, 1); 459 BEGIN_RING(evo, 0, 0x0100, 1);
460 OUT_RING (evo, 0xfffe0000); 460 OUT_RING (evo, 0xfffe0000);
461 BEGIN_RING(evo, 0, 0x0084, 5); 461 if (chan) {
462 OUT_RING (evo, chan ? 0x00000100 : 0x00000010); 462 BEGIN_RING(evo, 0, 0x0084, 1);
463 OUT_RING (evo, 0x00000100);
464 } else {
465 BEGIN_RING(evo, 0, 0x0084, 1);
466 OUT_RING (evo, 0x00000010);
467 /* allows gamma somehow, PDISP will bitch at you if
468 * you don't wait for vblank before changing this..
469 */
470 BEGIN_RING(evo, 0, 0x00e0, 1);
471 OUT_RING (evo, 0x40000000);
472 }
473 BEGIN_RING(evo, 0, 0x0088, 4);
463 OUT_RING (evo, dispc->sem.offset); 474 OUT_RING (evo, dispc->sem.offset);
464 OUT_RING (evo, 0xf00d0000 | dispc->sem.value); 475 OUT_RING (evo, 0xf00d0000 | dispc->sem.value);
465 OUT_RING (evo, 0x74b1e000); 476 OUT_RING (evo, 0x74b1e000);
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 7e3d96e7ac04..12d2fdc52414 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -140,11 +140,17 @@ void evergreen_pm_misc(struct radeon_device *rdev)
140 struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage; 140 struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage;
141 141
142 if (voltage->type == VOLTAGE_SW) { 142 if (voltage->type == VOLTAGE_SW) {
143 /* 0xff01 is a flag rather then an actual voltage */
144 if (voltage->voltage == 0xff01)
145 return;
143 if (voltage->voltage && (voltage->voltage != rdev->pm.current_vddc)) { 146 if (voltage->voltage && (voltage->voltage != rdev->pm.current_vddc)) {
144 radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC); 147 radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC);
145 rdev->pm.current_vddc = voltage->voltage; 148 rdev->pm.current_vddc = voltage->voltage;
146 DRM_DEBUG("Setting: vddc: %d\n", voltage->voltage); 149 DRM_DEBUG("Setting: vddc: %d\n", voltage->voltage);
147 } 150 }
151 /* 0xff01 is a flag rather then an actual voltage */
152 if (voltage->vddci == 0xff01)
153 return;
148 if (voltage->vddci && (voltage->vddci != rdev->pm.current_vddci)) { 154 if (voltage->vddci && (voltage->vddci != rdev->pm.current_vddci)) {
149 radeon_atom_set_voltage(rdev, voltage->vddci, SET_VOLTAGE_TYPE_ASIC_VDDCI); 155 radeon_atom_set_voltage(rdev, voltage->vddci, SET_VOLTAGE_TYPE_ASIC_VDDCI);
150 rdev->pm.current_vddci = voltage->vddci; 156 rdev->pm.current_vddci = voltage->vddci;
@@ -2007,9 +2013,9 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
2007 rdev->config.evergreen.tile_config |= (3 << 0); 2013 rdev->config.evergreen.tile_config |= (3 << 0);
2008 break; 2014 break;
2009 } 2015 }
2010 /* num banks is 8 on all fusion asics */ 2016 /* num banks is 8 on all fusion asics. 0 = 4, 1 = 8, 2 = 16 */
2011 if (rdev->flags & RADEON_IS_IGP) 2017 if (rdev->flags & RADEON_IS_IGP)
2012 rdev->config.evergreen.tile_config |= 8 << 4; 2018 rdev->config.evergreen.tile_config |= 1 << 4;
2013 else 2019 else
2014 rdev->config.evergreen.tile_config |= 2020 rdev->config.evergreen.tile_config |=
2015 ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4; 2021 ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4;
@@ -2695,28 +2701,25 @@ static inline u32 evergreen_get_ih_wptr(struct radeon_device *rdev)
2695 2701
2696int evergreen_irq_process(struct radeon_device *rdev) 2702int evergreen_irq_process(struct radeon_device *rdev)
2697{ 2703{
2698 u32 wptr = evergreen_get_ih_wptr(rdev); 2704 u32 wptr;
2699 u32 rptr = rdev->ih.rptr; 2705 u32 rptr;
2700 u32 src_id, src_data; 2706 u32 src_id, src_data;
2701 u32 ring_index; 2707 u32 ring_index;
2702 unsigned long flags; 2708 unsigned long flags;
2703 bool queue_hotplug = false; 2709 bool queue_hotplug = false;
2704 2710
2705 DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr); 2711 if (!rdev->ih.enabled || rdev->shutdown)
2706 if (!rdev->ih.enabled)
2707 return IRQ_NONE; 2712 return IRQ_NONE;
2708 2713
2709 spin_lock_irqsave(&rdev->ih.lock, flags); 2714 wptr = evergreen_get_ih_wptr(rdev);
2715 rptr = rdev->ih.rptr;
2716 DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
2710 2717
2718 spin_lock_irqsave(&rdev->ih.lock, flags);
2711 if (rptr == wptr) { 2719 if (rptr == wptr) {
2712 spin_unlock_irqrestore(&rdev->ih.lock, flags); 2720 spin_unlock_irqrestore(&rdev->ih.lock, flags);
2713 return IRQ_NONE; 2721 return IRQ_NONE;
2714 } 2722 }
2715 if (rdev->shutdown) {
2716 spin_unlock_irqrestore(&rdev->ih.lock, flags);
2717 return IRQ_NONE;
2718 }
2719
2720restart_ih: 2723restart_ih:
2721 /* display interrupts */ 2724 /* display interrupts */
2722 evergreen_irq_ack(rdev); 2725 evergreen_irq_ack(rdev);
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 7dd45ca64e29..f79d2ccb6755 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -590,6 +590,9 @@ void r600_pm_misc(struct radeon_device *rdev)
590 struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage; 590 struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage;
591 591
592 if ((voltage->type == VOLTAGE_SW) && voltage->voltage) { 592 if ((voltage->type == VOLTAGE_SW) && voltage->voltage) {
593 /* 0xff01 is a flag rather then an actual voltage */
594 if (voltage->voltage == 0xff01)
595 return;
593 if (voltage->voltage != rdev->pm.current_vddc) { 596 if (voltage->voltage != rdev->pm.current_vddc) {
594 radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC); 597 radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC);
595 rdev->pm.current_vddc = voltage->voltage; 598 rdev->pm.current_vddc = voltage->voltage;
@@ -3294,27 +3297,26 @@ static inline u32 r600_get_ih_wptr(struct radeon_device *rdev)
3294 3297
3295int r600_irq_process(struct radeon_device *rdev) 3298int r600_irq_process(struct radeon_device *rdev)
3296{ 3299{
3297 u32 wptr = r600_get_ih_wptr(rdev); 3300 u32 wptr;
3298 u32 rptr = rdev->ih.rptr; 3301 u32 rptr;
3299 u32 src_id, src_data; 3302 u32 src_id, src_data;
3300 u32 ring_index; 3303 u32 ring_index;
3301 unsigned long flags; 3304 unsigned long flags;
3302 bool queue_hotplug = false; 3305 bool queue_hotplug = false;
3303 3306
3304 DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr); 3307 if (!rdev->ih.enabled || rdev->shutdown)
3305 if (!rdev->ih.enabled)
3306 return IRQ_NONE; 3308 return IRQ_NONE;
3307 3309
3310 wptr = r600_get_ih_wptr(rdev);
3311 rptr = rdev->ih.rptr;
3312 DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
3313
3308 spin_lock_irqsave(&rdev->ih.lock, flags); 3314 spin_lock_irqsave(&rdev->ih.lock, flags);
3309 3315
3310 if (rptr == wptr) { 3316 if (rptr == wptr) {
3311 spin_unlock_irqrestore(&rdev->ih.lock, flags); 3317 spin_unlock_irqrestore(&rdev->ih.lock, flags);
3312 return IRQ_NONE; 3318 return IRQ_NONE;
3313 } 3319 }
3314 if (rdev->shutdown) {
3315 spin_unlock_irqrestore(&rdev->ih.lock, flags);
3316 return IRQ_NONE;
3317 }
3318 3320
3319restart_ih: 3321restart_ih:
3320 /* display interrupts */ 3322 /* display interrupts */
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 27f45579e64b..ef0e0e016914 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -179,6 +179,7 @@ void radeon_pm_resume(struct radeon_device *rdev);
179void radeon_combios_get_power_modes(struct radeon_device *rdev); 179void radeon_combios_get_power_modes(struct radeon_device *rdev);
180void radeon_atombios_get_power_modes(struct radeon_device *rdev); 180void radeon_atombios_get_power_modes(struct radeon_device *rdev);
181void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 voltage_type); 181void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 voltage_type);
182int radeon_atom_get_max_vddc(struct radeon_device *rdev, u16 *voltage);
182void rs690_pm_info(struct radeon_device *rdev); 183void rs690_pm_info(struct radeon_device *rdev);
183extern int rv6xx_get_temp(struct radeon_device *rdev); 184extern int rv6xx_get_temp(struct radeon_device *rdev);
184extern int rv770_get_temp(struct radeon_device *rdev); 185extern int rv770_get_temp(struct radeon_device *rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index fa62a503ae70..bf2b61584cdb 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -2320,6 +2320,14 @@ static bool radeon_atombios_parse_pplib_clock_info(struct radeon_device *rdev,
2320 le16_to_cpu(clock_info->r600.usVDDC); 2320 le16_to_cpu(clock_info->r600.usVDDC);
2321 } 2321 }
2322 2322
2323 /* patch up vddc if necessary */
2324 if (rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage == 0xff01) {
2325 u16 vddc;
2326
2327 if (radeon_atom_get_max_vddc(rdev, &vddc) == 0)
2328 rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage = vddc;
2329 }
2330
2323 if (rdev->flags & RADEON_IS_IGP) { 2331 if (rdev->flags & RADEON_IS_IGP) {
2324 /* skip invalid modes */ 2332 /* skip invalid modes */
2325 if (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0) 2333 if (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0)
@@ -2607,6 +2615,10 @@ void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 v
2607 if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) 2615 if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
2608 return; 2616 return;
2609 2617
2618 /* 0xff01 is a flag rather then an actual voltage */
2619 if (voltage_level == 0xff01)
2620 return;
2621
2610 switch (crev) { 2622 switch (crev) {
2611 case 1: 2623 case 1:
2612 args.v1.ucVoltageType = voltage_type; 2624 args.v1.ucVoltageType = voltage_type;
@@ -2626,7 +2638,35 @@ void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 v
2626 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 2638 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
2627} 2639}
2628 2640
2641int radeon_atom_get_max_vddc(struct radeon_device *rdev,
2642 u16 *voltage)
2643{
2644 union set_voltage args;
2645 int index = GetIndexIntoMasterTable(COMMAND, SetVoltage);
2646 u8 frev, crev;
2647
2648 if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
2649 return -EINVAL;
2650
2651 switch (crev) {
2652 case 1:
2653 return -EINVAL;
2654 case 2:
2655 args.v2.ucVoltageType = SET_VOLTAGE_GET_MAX_VOLTAGE;
2656 args.v2.ucVoltageMode = 0;
2657 args.v2.usVoltageLevel = 0;
2658
2659 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
2629 2660
2661 *voltage = le16_to_cpu(args.v2.usVoltageLevel);
2662 break;
2663 default:
2664 DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
2665 return -EINVAL;
2666 }
2667
2668 return 0;
2669}
2630 2670
2631void radeon_atom_initialize_bios_scratch_regs(struct drm_device *dev) 2671void radeon_atom_initialize_bios_scratch_regs(struct drm_device *dev)
2632{ 2672{
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index f55b64cb59d1..b293487e5aa3 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -1090,9 +1090,10 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
1090 break; 1090 break;
1091 } 1091 }
1092 1092
1093 if (is_dp) 1093 if (is_dp) {
1094 args.v2.acConfig.fCoherentMode = 1; 1094 args.v2.acConfig.fCoherentMode = 1;
1095 else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { 1095 args.v2.acConfig.fDPConnector = 1;
1096 } else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
1096 if (dig->coherent_mode) 1097 if (dig->coherent_mode)
1097 args.v2.acConfig.fCoherentMode = 1; 1098 args.v2.acConfig.fCoherentMode = 1;
1098 if (radeon_encoder->pixel_clock > 165000) 1099 if (radeon_encoder->pixel_clock > 165000)
@@ -1431,7 +1432,11 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
1431 if (is_dig) { 1432 if (is_dig) {
1432 switch (mode) { 1433 switch (mode) {
1433 case DRM_MODE_DPMS_ON: 1434 case DRM_MODE_DPMS_ON:
1434 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0); 1435 /* some early dce3.2 boards have a bug in their transmitter control table */
1436 if ((rdev->family == CHIP_RV710) || (rdev->family == CHIP_RV730))
1437 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
1438 else
1439 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
1435 if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) { 1440 if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) {
1436 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); 1441 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
1437 1442
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index ef8a5babe9f7..6f508ffd1035 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -105,6 +105,9 @@ void rv770_pm_misc(struct radeon_device *rdev)
105 struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage; 105 struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage;
106 106
107 if ((voltage->type == VOLTAGE_SW) && voltage->voltage) { 107 if ((voltage->type == VOLTAGE_SW) && voltage->voltage) {
108 /* 0xff01 is a flag rather then an actual voltage */
109 if (voltage->voltage == 0xff01)
110 return;
108 if (voltage->voltage != rdev->pm.current_vddc) { 111 if (voltage->voltage != rdev->pm.current_vddc) {
109 radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC); 112 radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC);
110 rdev->pm.current_vddc = voltage->voltage; 113 rdev->pm.current_vddc = voltage->voltage;
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index f7440e8ce3e7..6f3289a57888 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1423,6 +1423,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
1423 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACETRAVELLER) }, 1423 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACETRAVELLER) },
1424 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACENAVIGATOR) }, 1424 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACENAVIGATOR) },
1425 { HID_USB_DEVICE(USB_VENDOR_ID_LUMIO, USB_DEVICE_ID_CRYSTALTOUCH) }, 1425 { HID_USB_DEVICE(USB_VENDOR_ID_LUMIO, USB_DEVICE_ID_CRYSTALTOUCH) },
1426 { HID_USB_DEVICE(USB_VENDOR_ID_LUMIO, USB_DEVICE_ID_CRYSTALTOUCH_DUAL) },
1426 { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD) }, 1427 { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD) },
1427 { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD_BOOTLOADER) }, 1428 { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD_BOOTLOADER) },
1428 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_SIDEWINDER_GV) }, 1429 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_SIDEWINDER_GV) },
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index aecb5a4b8d6d..a756ee6c7df5 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -449,6 +449,7 @@
449 449
450#define USB_VENDOR_ID_LUMIO 0x202e 450#define USB_VENDOR_ID_LUMIO 0x202e
451#define USB_DEVICE_ID_CRYSTALTOUCH 0x0006 451#define USB_DEVICE_ID_CRYSTALTOUCH 0x0006
452#define USB_DEVICE_ID_CRYSTALTOUCH_DUAL 0x0007
452 453
453#define USB_VENDOR_ID_MCC 0x09db 454#define USB_VENDOR_ID_MCC 0x09db
454#define USB_DEVICE_ID_MCC_PMD1024LS 0x0076 455#define USB_DEVICE_ID_MCC_PMD1024LS 0x0076
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 0b2dcd0ee591..62cac4dc3b62 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -271,6 +271,8 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
271 } 271 }
272 return 1; 272 return 1;
273 case HID_DG_CONTACTID: 273 case HID_DG_CONTACTID:
274 if (!td->maxcontacts)
275 td->maxcontacts = MT_DEFAULT_MAXCONTACT;
274 input_mt_init_slots(hi->input, td->maxcontacts); 276 input_mt_init_slots(hi->input, td->maxcontacts);
275 td->last_slot_field = usage->hid; 277 td->last_slot_field = usage->hid;
276 td->last_field_index = field->index; 278 td->last_field_index = field->index;
@@ -547,9 +549,6 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
547 if (ret) 549 if (ret)
548 goto fail; 550 goto fail;
549 551
550 if (!td->maxcontacts)
551 td->maxcontacts = MT_DEFAULT_MAXCONTACT;
552
553 td->slots = kzalloc(td->maxcontacts * sizeof(struct mt_slot), 552 td->slots = kzalloc(td->maxcontacts * sizeof(struct mt_slot),
554 GFP_KERNEL); 553 GFP_KERNEL);
555 if (!td->slots) { 554 if (!td->slots) {
@@ -677,6 +676,9 @@ static const struct hid_device_id mt_devices[] = {
677 { .driver_data = MT_CLS_CONFIDENCE_MINUS_ONE, 676 { .driver_data = MT_CLS_CONFIDENCE_MINUS_ONE,
678 HID_USB_DEVICE(USB_VENDOR_ID_LUMIO, 677 HID_USB_DEVICE(USB_VENDOR_ID_LUMIO,
679 USB_DEVICE_ID_CRYSTALTOUCH) }, 678 USB_DEVICE_ID_CRYSTALTOUCH) },
679 { .driver_data = MT_CLS_CONFIDENCE_MINUS_ONE,
680 HID_USB_DEVICE(USB_VENDOR_ID_LUMIO,
681 USB_DEVICE_ID_CRYSTALTOUCH_DUAL) },
680 682
681 /* MosArt panels */ 683 /* MosArt panels */
682 { .driver_data = MT_CLS_CONFIDENCE_MINUS_ONE, 684 { .driver_data = MT_CLS_CONFIDENCE_MINUS_ONE,
@@ -707,10 +709,10 @@ static const struct hid_device_id mt_devices[] = {
707 HID_USB_DEVICE(USB_VENDOR_ID_STANTUM, 709 HID_USB_DEVICE(USB_VENDOR_ID_STANTUM,
708 USB_DEVICE_ID_MTP)}, 710 USB_DEVICE_ID_MTP)},
709 { .driver_data = MT_CLS_CONFIDENCE, 711 { .driver_data = MT_CLS_CONFIDENCE,
710 HID_USB_DEVICE(USB_VENDOR_ID_STANTUM, 712 HID_USB_DEVICE(USB_VENDOR_ID_STANTUM_STM,
711 USB_DEVICE_ID_MTP_STM)}, 713 USB_DEVICE_ID_MTP_STM)},
712 { .driver_data = MT_CLS_CONFIDENCE, 714 { .driver_data = MT_CLS_CONFIDENCE,
713 HID_USB_DEVICE(USB_VENDOR_ID_STANTUM, 715 HID_USB_DEVICE(USB_VENDOR_ID_STANTUM_SITRONIX,
714 USB_DEVICE_ID_MTP_SITRONIX)}, 716 USB_DEVICE_ID_MTP_SITRONIX)},
715 717
716 /* Touch International panels */ 718 /* Touch International panels */
diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
index b5e892017e0c..dcb78a7a8047 100644
--- a/drivers/hwmon/asus_atk0110.c
+++ b/drivers/hwmon/asus_atk0110.c
@@ -268,6 +268,7 @@ static struct device_attribute atk_name_attr =
268static void atk_init_attribute(struct device_attribute *attr, char *name, 268static void atk_init_attribute(struct device_attribute *attr, char *name,
269 sysfs_show_func show) 269 sysfs_show_func show)
270{ 270{
271 sysfs_attr_init(&attr->attr);
271 attr->attr.name = name; 272 attr->attr.name = name;
272 attr->attr.mode = 0444; 273 attr->attr.mode = 0444;
273 attr->show = show; 274 attr->show = show;
@@ -1188,19 +1189,15 @@ static int atk_create_files(struct atk_data *data)
1188 int err; 1189 int err;
1189 1190
1190 list_for_each_entry(s, &data->sensor_list, list) { 1191 list_for_each_entry(s, &data->sensor_list, list) {
1191 sysfs_attr_init(&s->input_attr.attr);
1192 err = device_create_file(data->hwmon_dev, &s->input_attr); 1192 err = device_create_file(data->hwmon_dev, &s->input_attr);
1193 if (err) 1193 if (err)
1194 return err; 1194 return err;
1195 sysfs_attr_init(&s->label_attr.attr);
1196 err = device_create_file(data->hwmon_dev, &s->label_attr); 1195 err = device_create_file(data->hwmon_dev, &s->label_attr);
1197 if (err) 1196 if (err)
1198 return err; 1197 return err;
1199 sysfs_attr_init(&s->limit1_attr.attr);
1200 err = device_create_file(data->hwmon_dev, &s->limit1_attr); 1198 err = device_create_file(data->hwmon_dev, &s->limit1_attr);
1201 if (err) 1199 if (err)
1202 return err; 1200 return err;
1203 sysfs_attr_init(&s->limit2_attr.attr);
1204 err = device_create_file(data->hwmon_dev, &s->limit2_attr); 1201 err = device_create_file(data->hwmon_dev, &s->limit2_attr);
1205 if (err) 1202 if (err)
1206 return err; 1203 return err;
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index 85e937984ff7..0070d5476dd0 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -97,9 +97,7 @@ struct platform_data {
97struct pdev_entry { 97struct pdev_entry {
98 struct list_head list; 98 struct list_head list;
99 struct platform_device *pdev; 99 struct platform_device *pdev;
100 unsigned int cpu;
101 u16 phys_proc_id; 100 u16 phys_proc_id;
102 u16 cpu_core_id;
103}; 101};
104 102
105static LIST_HEAD(pdev_list); 103static LIST_HEAD(pdev_list);
@@ -653,9 +651,7 @@ static int __cpuinit coretemp_device_add(unsigned int cpu)
653 } 651 }
654 652
655 pdev_entry->pdev = pdev; 653 pdev_entry->pdev = pdev;
656 pdev_entry->cpu = cpu;
657 pdev_entry->phys_proc_id = TO_PHYS_ID(cpu); 654 pdev_entry->phys_proc_id = TO_PHYS_ID(cpu);
658 pdev_entry->cpu_core_id = TO_CORE_ID(cpu);
659 655
660 list_add_tail(&pdev_entry->list, &pdev_list); 656 list_add_tail(&pdev_entry->list, &pdev_list);
661 mutex_unlock(&pdev_list_mutex); 657 mutex_unlock(&pdev_list_mutex);
diff --git a/drivers/hwmon/ibmaem.c b/drivers/hwmon/ibmaem.c
index 537409d07ee7..1a409c5bc9bc 100644
--- a/drivers/hwmon/ibmaem.c
+++ b/drivers/hwmon/ibmaem.c
@@ -947,6 +947,7 @@ static int aem_register_sensors(struct aem_data *data,
947 947
948 /* Set up read-only sensors */ 948 /* Set up read-only sensors */
949 while (ro->label) { 949 while (ro->label) {
950 sysfs_attr_init(&sensors->dev_attr.attr);
950 sensors->dev_attr.attr.name = ro->label; 951 sensors->dev_attr.attr.name = ro->label;
951 sensors->dev_attr.attr.mode = S_IRUGO; 952 sensors->dev_attr.attr.mode = S_IRUGO;
952 sensors->dev_attr.show = ro->show; 953 sensors->dev_attr.show = ro->show;
@@ -963,6 +964,7 @@ static int aem_register_sensors(struct aem_data *data,
963 964
964 /* Set up read-write sensors */ 965 /* Set up read-write sensors */
965 while (rw->label) { 966 while (rw->label) {
967 sysfs_attr_init(&sensors->dev_attr.attr);
966 sensors->dev_attr.attr.name = rw->label; 968 sensors->dev_attr.attr.name = rw->label;
967 sensors->dev_attr.attr.mode = S_IRUGO | S_IWUSR; 969 sensors->dev_attr.attr.mode = S_IRUGO | S_IWUSR;
968 sensors->dev_attr.show = rw->show; 970 sensors->dev_attr.show = rw->show;
diff --git a/drivers/hwmon/ibmpex.c b/drivers/hwmon/ibmpex.c
index 06d4eafcf76b..41dbf8161ed7 100644
--- a/drivers/hwmon/ibmpex.c
+++ b/drivers/hwmon/ibmpex.c
@@ -358,6 +358,7 @@ static int create_sensor(struct ibmpex_bmc_data *data, int type,
358 else if (type == POWER_SENSOR) 358 else if (type == POWER_SENSOR)
359 sprintf(n, power_sensor_name_templates[func], "power", counter); 359 sprintf(n, power_sensor_name_templates[func], "power", counter);
360 360
361 sysfs_attr_init(&data->sensors[sensor].attr[func].dev_attr.attr);
361 data->sensors[sensor].attr[func].dev_attr.attr.name = n; 362 data->sensors[sensor].attr[func].dev_attr.attr.name = n;
362 data->sensors[sensor].attr[func].dev_attr.attr.mode = S_IRUGO; 363 data->sensors[sensor].attr[func].dev_attr.attr.mode = S_IRUGO;
363 data->sensors[sensor].attr[func].dev_attr.show = ibmpex_show_sensor; 364 data->sensors[sensor].attr[func].dev_attr.show = ibmpex_show_sensor;
diff --git a/drivers/hwmon/s3c-hwmon.c b/drivers/hwmon/s3c-hwmon.c
index 92b42db43bcf..b39f52e2752a 100644
--- a/drivers/hwmon/s3c-hwmon.c
+++ b/drivers/hwmon/s3c-hwmon.c
@@ -232,6 +232,7 @@ static int s3c_hwmon_create_attr(struct device *dev,
232 232
233 attr = &attrs->in; 233 attr = &attrs->in;
234 attr->index = channel; 234 attr->index = channel;
235 sysfs_attr_init(&attr->dev_attr.attr);
235 attr->dev_attr.attr.name = attrs->in_name; 236 attr->dev_attr.attr.name = attrs->in_name;
236 attr->dev_attr.attr.mode = S_IRUGO; 237 attr->dev_attr.attr.mode = S_IRUGO;
237 attr->dev_attr.show = s3c_hwmon_ch_show; 238 attr->dev_attr.show = s3c_hwmon_ch_show;
@@ -249,6 +250,7 @@ static int s3c_hwmon_create_attr(struct device *dev,
249 250
250 attr = &attrs->label; 251 attr = &attrs->label;
251 attr->index = channel; 252 attr->index = channel;
253 sysfs_attr_init(&attr->dev_attr.attr);
252 attr->dev_attr.attr.name = attrs->label_name; 254 attr->dev_attr.attr.name = attrs->label_name;
253 attr->dev_attr.attr.mode = S_IRUGO; 255 attr->dev_attr.attr.mode = S_IRUGO;
254 attr->dev_attr.show = s3c_hwmon_label_show; 256 attr->dev_attr.show = s3c_hwmon_label_show;
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index f660cd04ec2f..31fb44085c9b 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -1463,9 +1463,9 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
1463 struct c4iw_qp_attributes attrs; 1463 struct c4iw_qp_attributes attrs;
1464 int disconnect = 1; 1464 int disconnect = 1;
1465 int release = 0; 1465 int release = 0;
1466 int abort = 0;
1467 struct tid_info *t = dev->rdev.lldi.tids; 1466 struct tid_info *t = dev->rdev.lldi.tids;
1468 unsigned int tid = GET_TID(hdr); 1467 unsigned int tid = GET_TID(hdr);
1468 int ret;
1469 1469
1470 ep = lookup_tid(t, tid); 1470 ep = lookup_tid(t, tid);
1471 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 1471 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
@@ -1501,10 +1501,12 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
1501 start_ep_timer(ep); 1501 start_ep_timer(ep);
1502 __state_set(&ep->com, CLOSING); 1502 __state_set(&ep->com, CLOSING);
1503 attrs.next_state = C4IW_QP_STATE_CLOSING; 1503 attrs.next_state = C4IW_QP_STATE_CLOSING;
1504 abort = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, 1504 ret = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
1505 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); 1505 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
1506 peer_close_upcall(ep); 1506 if (ret != -ECONNRESET) {
1507 disconnect = 1; 1507 peer_close_upcall(ep);
1508 disconnect = 1;
1509 }
1508 break; 1510 break;
1509 case ABORTING: 1511 case ABORTING:
1510 disconnect = 0; 1512 disconnect = 0;
@@ -2109,15 +2111,16 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
2109 break; 2111 break;
2110 } 2112 }
2111 2113
2112 mutex_unlock(&ep->com.mutex);
2113 if (close) { 2114 if (close) {
2114 if (abrupt) 2115 if (abrupt) {
2115 ret = abort_connection(ep, NULL, gfp); 2116 close_complete_upcall(ep);
2116 else 2117 ret = send_abort(ep, NULL, gfp);
2118 } else
2117 ret = send_halfclose(ep, gfp); 2119 ret = send_halfclose(ep, gfp);
2118 if (ret) 2120 if (ret)
2119 fatal = 1; 2121 fatal = 1;
2120 } 2122 }
2123 mutex_unlock(&ep->com.mutex);
2121 if (fatal) 2124 if (fatal)
2122 release_ep_resources(ep); 2125 release_ep_resources(ep);
2123 return ret; 2126 return ret;
@@ -2301,6 +2304,31 @@ static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
2301 return 0; 2304 return 0;
2302} 2305}
2303 2306
2307static int peer_abort_intr(struct c4iw_dev *dev, struct sk_buff *skb)
2308{
2309 struct cpl_abort_req_rss *req = cplhdr(skb);
2310 struct c4iw_ep *ep;
2311 struct tid_info *t = dev->rdev.lldi.tids;
2312 unsigned int tid = GET_TID(req);
2313
2314 ep = lookup_tid(t, tid);
2315 if (is_neg_adv_abort(req->status)) {
2316 PDBG("%s neg_adv_abort ep %p tid %u\n", __func__, ep,
2317 ep->hwtid);
2318 kfree_skb(skb);
2319 return 0;
2320 }
2321 PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid,
2322 ep->com.state);
2323
2324 /*
2325 * Wake up any threads in rdma_init() or rdma_fini().
2326 */
2327 c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
2328 sched(dev, skb);
2329 return 0;
2330}
2331
2304/* 2332/*
2305 * Most upcalls from the T4 Core go to sched() to 2333 * Most upcalls from the T4 Core go to sched() to
2306 * schedule the processing on a work queue. 2334 * schedule the processing on a work queue.
@@ -2317,7 +2345,7 @@ c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS] = {
2317 [CPL_PASS_ESTABLISH] = sched, 2345 [CPL_PASS_ESTABLISH] = sched,
2318 [CPL_PEER_CLOSE] = sched, 2346 [CPL_PEER_CLOSE] = sched,
2319 [CPL_CLOSE_CON_RPL] = sched, 2347 [CPL_CLOSE_CON_RPL] = sched,
2320 [CPL_ABORT_REQ_RSS] = sched, 2348 [CPL_ABORT_REQ_RSS] = peer_abort_intr,
2321 [CPL_RDMA_TERMINATE] = sched, 2349 [CPL_RDMA_TERMINATE] = sched,
2322 [CPL_FW4_ACK] = sched, 2350 [CPL_FW4_ACK] = sched,
2323 [CPL_SET_TCB_RPL] = set_tcb_rpl, 2351 [CPL_SET_TCB_RPL] = set_tcb_rpl,
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index 8d8f8add6fcd..1720dc790d13 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -801,6 +801,10 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries,
801 if (ucontext) { 801 if (ucontext) {
802 memsize = roundup(memsize, PAGE_SIZE); 802 memsize = roundup(memsize, PAGE_SIZE);
803 hwentries = memsize / sizeof *chp->cq.queue; 803 hwentries = memsize / sizeof *chp->cq.queue;
804 while (hwentries > T4_MAX_IQ_SIZE) {
805 memsize -= PAGE_SIZE;
806 hwentries = memsize / sizeof *chp->cq.queue;
807 }
804 } 808 }
805 chp->cq.size = hwentries; 809 chp->cq.size = hwentries;
806 chp->cq.memsize = memsize; 810 chp->cq.memsize = memsize;
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
index 273ffe49525a..0347eed4a167 100644
--- a/drivers/infiniband/hw/cxgb4/mem.c
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -625,7 +625,7 @@ pbl_done:
625 mhp->attr.perms = c4iw_ib_to_tpt_access(acc); 625 mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
626 mhp->attr.va_fbo = virt; 626 mhp->attr.va_fbo = virt;
627 mhp->attr.page_size = shift - 12; 627 mhp->attr.page_size = shift - 12;
628 mhp->attr.len = (u32) length; 628 mhp->attr.len = length;
629 629
630 err = register_mem(rhp, php, mhp, shift); 630 err = register_mem(rhp, php, mhp, shift);
631 if (err) 631 if (err)
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index 3b773b05a898..a41578e48c7b 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -1207,11 +1207,8 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
1207 c4iw_get_ep(&qhp->ep->com); 1207 c4iw_get_ep(&qhp->ep->com);
1208 } 1208 }
1209 ret = rdma_fini(rhp, qhp, ep); 1209 ret = rdma_fini(rhp, qhp, ep);
1210 if (ret) { 1210 if (ret)
1211 if (internal)
1212 c4iw_get_ep(&qhp->ep->com);
1213 goto err; 1211 goto err;
1214 }
1215 break; 1212 break;
1216 case C4IW_QP_STATE_TERMINATE: 1213 case C4IW_QP_STATE_TERMINATE:
1217 set_state(qhp, C4IW_QP_STATE_TERMINATE); 1214 set_state(qhp, C4IW_QP_STATE_TERMINATE);
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index 9f53e68a096a..8ec5237031a0 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -469,6 +469,8 @@ static u8 ib_rate_to_delay[IB_RATE_120_GBPS + 1] = {
469#define IB_7322_LT_STATE_RECOVERIDLE 0x0f 469#define IB_7322_LT_STATE_RECOVERIDLE 0x0f
470#define IB_7322_LT_STATE_CFGENH 0x10 470#define IB_7322_LT_STATE_CFGENH 0x10
471#define IB_7322_LT_STATE_CFGTEST 0x11 471#define IB_7322_LT_STATE_CFGTEST 0x11
472#define IB_7322_LT_STATE_CFGWAITRMTTEST 0x12
473#define IB_7322_LT_STATE_CFGWAITENH 0x13
472 474
473/* link state machine states from IBC */ 475/* link state machine states from IBC */
474#define IB_7322_L_STATE_DOWN 0x0 476#define IB_7322_L_STATE_DOWN 0x0
@@ -498,8 +500,10 @@ static const u8 qib_7322_physportstate[0x20] = {
498 IB_PHYSPORTSTATE_LINK_ERR_RECOVER, 500 IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
499 [IB_7322_LT_STATE_CFGENH] = IB_PHYSPORTSTATE_CFG_ENH, 501 [IB_7322_LT_STATE_CFGENH] = IB_PHYSPORTSTATE_CFG_ENH,
500 [IB_7322_LT_STATE_CFGTEST] = IB_PHYSPORTSTATE_CFG_TRAIN, 502 [IB_7322_LT_STATE_CFGTEST] = IB_PHYSPORTSTATE_CFG_TRAIN,
501 [0x12] = IB_PHYSPORTSTATE_CFG_TRAIN, 503 [IB_7322_LT_STATE_CFGWAITRMTTEST] =
502 [0x13] = IB_PHYSPORTSTATE_CFG_WAIT_ENH, 504 IB_PHYSPORTSTATE_CFG_TRAIN,
505 [IB_7322_LT_STATE_CFGWAITENH] =
506 IB_PHYSPORTSTATE_CFG_WAIT_ENH,
503 [0x14] = IB_PHYSPORTSTATE_CFG_TRAIN, 507 [0x14] = IB_PHYSPORTSTATE_CFG_TRAIN,
504 [0x15] = IB_PHYSPORTSTATE_CFG_TRAIN, 508 [0x15] = IB_PHYSPORTSTATE_CFG_TRAIN,
505 [0x16] = IB_PHYSPORTSTATE_CFG_TRAIN, 509 [0x16] = IB_PHYSPORTSTATE_CFG_TRAIN,
@@ -1692,7 +1696,9 @@ static void handle_serdes_issues(struct qib_pportdata *ppd, u64 ibcst)
1692 break; 1696 break;
1693 } 1697 }
1694 1698
1695 if (ibclt == IB_7322_LT_STATE_CFGTEST && 1699 if (((ibclt >= IB_7322_LT_STATE_CFGTEST &&
1700 ibclt <= IB_7322_LT_STATE_CFGWAITENH) ||
1701 ibclt == IB_7322_LT_STATE_LINKUP) &&
1696 (ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR))) { 1702 (ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR))) {
1697 force_h1(ppd); 1703 force_h1(ppd);
1698 ppd->cpspec->qdr_reforce = 1; 1704 ppd->cpspec->qdr_reforce = 1;
@@ -7301,12 +7307,17 @@ static void ibsd_wr_allchans(struct qib_pportdata *ppd, int addr, unsigned data,
7301static void serdes_7322_los_enable(struct qib_pportdata *ppd, int enable) 7307static void serdes_7322_los_enable(struct qib_pportdata *ppd, int enable)
7302{ 7308{
7303 u64 data = qib_read_kreg_port(ppd, krp_serdesctrl); 7309 u64 data = qib_read_kreg_port(ppd, krp_serdesctrl);
7304 printk(KERN_INFO QIB_DRV_NAME " IB%u:%u Turning LOS %s\n", 7310 u8 state = SYM_FIELD(data, IBSerdesCtrl_0, RXLOSEN);
7305 ppd->dd->unit, ppd->port, (enable ? "on" : "off")); 7311
7306 if (enable) 7312 if (enable && !state) {
7313 printk(KERN_INFO QIB_DRV_NAME " IB%u:%u Turning LOS on\n",
7314 ppd->dd->unit, ppd->port);
7307 data |= SYM_MASK(IBSerdesCtrl_0, RXLOSEN); 7315 data |= SYM_MASK(IBSerdesCtrl_0, RXLOSEN);
7308 else 7316 } else if (!enable && state) {
7317 printk(KERN_INFO QIB_DRV_NAME " IB%u:%u Turning LOS off\n",
7318 ppd->dd->unit, ppd->port);
7309 data &= ~SYM_MASK(IBSerdesCtrl_0, RXLOSEN); 7319 data &= ~SYM_MASK(IBSerdesCtrl_0, RXLOSEN);
7320 }
7310 qib_write_kreg_port(ppd, krp_serdesctrl, data); 7321 qib_write_kreg_port(ppd, krp_serdesctrl, data);
7311} 7322}
7312 7323
diff --git a/drivers/infiniband/hw/qib/qib_intr.c b/drivers/infiniband/hw/qib/qib_intr.c
index a693c56ec8a6..6ae57d23004a 100644
--- a/drivers/infiniband/hw/qib/qib_intr.c
+++ b/drivers/infiniband/hw/qib/qib_intr.c
@@ -96,8 +96,12 @@ void qib_handle_e_ibstatuschanged(struct qib_pportdata *ppd, u64 ibcs)
96 * states, or if it transitions from any of the up (INIT or better) 96 * states, or if it transitions from any of the up (INIT or better)
97 * states into any of the down states (except link recovery), then 97 * states into any of the down states (except link recovery), then
98 * call the chip-specific code to take appropriate actions. 98 * call the chip-specific code to take appropriate actions.
99 *
100 * ppd->lflags could be 0 if this is the first time the interrupt
101 * handlers has been called but the link is already up.
99 */ 102 */
100 if (lstate >= IB_PORT_INIT && (ppd->lflags & QIBL_LINKDOWN) && 103 if (lstate >= IB_PORT_INIT &&
104 (!ppd->lflags || (ppd->lflags & QIBL_LINKDOWN)) &&
101 ltstate == IB_PHYSPORTSTATE_LINKUP) { 105 ltstate == IB_PHYSPORTSTATE_LINKUP) {
102 /* transitioned to UP */ 106 /* transitioned to UP */
103 if (dd->f_ib_updown(ppd, 1, ibcs)) 107 if (dd->f_ib_updown(ppd, 1, ibcs))
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index be0921ef6b52..4cf25347b015 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -111,7 +111,8 @@ static void evdev_event(struct input_handle *handle,
111 111
112 rcu_read_unlock(); 112 rcu_read_unlock();
113 113
114 wake_up_interruptible(&evdev->wait); 114 if (type == EV_SYN && code == SYN_REPORT)
115 wake_up_interruptible(&evdev->wait);
115} 116}
116 117
117static int evdev_fasync(int fd, struct file *file, int on) 118static int evdev_fasync(int fd, struct file *file, int on)
diff --git a/drivers/input/input.c b/drivers/input/input.c
index 75e11c7b70fd..da38d97a51b1 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -1756,7 +1756,7 @@ static unsigned int input_estimate_events_per_packet(struct input_dev *dev)
1756 } else if (test_bit(ABS_MT_TRACKING_ID, dev->absbit)) { 1756 } else if (test_bit(ABS_MT_TRACKING_ID, dev->absbit)) {
1757 mt_slots = dev->absinfo[ABS_MT_TRACKING_ID].maximum - 1757 mt_slots = dev->absinfo[ABS_MT_TRACKING_ID].maximum -
1758 dev->absinfo[ABS_MT_TRACKING_ID].minimum + 1, 1758 dev->absinfo[ABS_MT_TRACKING_ID].minimum + 1,
1759 clamp(mt_slots, 2, 32); 1759 mt_slots = clamp(mt_slots, 2, 32);
1760 } else if (test_bit(ABS_MT_POSITION_X, dev->absbit)) { 1760 } else if (test_bit(ABS_MT_POSITION_X, dev->absbit)) {
1761 mt_slots = 2; 1761 mt_slots = 2;
1762 } else { 1762 } else {
diff --git a/drivers/input/keyboard/omap-keypad.c b/drivers/input/keyboard/omap-keypad.c
index f23a743817db..33d0bdc837c0 100644
--- a/drivers/input/keyboard/omap-keypad.c
+++ b/drivers/input/keyboard/omap-keypad.c
@@ -209,6 +209,7 @@ static void omap_kp_tasklet(unsigned long data)
209#endif 209#endif
210 } 210 }
211 } 211 }
212 input_sync(omap_kp_data->input);
212 memcpy(keypad_state, new_state, sizeof(keypad_state)); 213 memcpy(keypad_state, new_state, sizeof(keypad_state));
213 214
214 if (key_down) { 215 if (key_down) {
diff --git a/drivers/input/keyboard/sh_keysc.c b/drivers/input/keyboard/sh_keysc.c
index 834cf98e7efb..6876700a4469 100644
--- a/drivers/input/keyboard/sh_keysc.c
+++ b/drivers/input/keyboard/sh_keysc.c
@@ -32,7 +32,7 @@ static const struct {
32 [SH_KEYSC_MODE_3] = { 2, 4, 7 }, 32 [SH_KEYSC_MODE_3] = { 2, 4, 7 },
33 [SH_KEYSC_MODE_4] = { 3, 6, 6 }, 33 [SH_KEYSC_MODE_4] = { 3, 6, 6 },
34 [SH_KEYSC_MODE_5] = { 4, 6, 7 }, 34 [SH_KEYSC_MODE_5] = { 4, 6, 7 },
35 [SH_KEYSC_MODE_6] = { 5, 7, 7 }, 35 [SH_KEYSC_MODE_6] = { 5, 8, 8 },
36}; 36};
37 37
38struct sh_keysc_priv { 38struct sh_keysc_priv {
diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
index 257e033986e4..0110b5a3a167 100644
--- a/drivers/input/mousedev.c
+++ b/drivers/input/mousedev.c
@@ -187,7 +187,7 @@ static void mousedev_abs_event(struct input_dev *dev, struct mousedev *mousedev,
187 if (size == 0) 187 if (size == 0)
188 size = xres ? : 1; 188 size = xres ? : 1;
189 189
190 clamp(value, min, max); 190 value = clamp(value, min, max);
191 191
192 mousedev->packet.x = ((value - min) * xres) / size; 192 mousedev->packet.x = ((value - min) * xres) / size;
193 mousedev->packet.abs_event = 1; 193 mousedev->packet.abs_event = 1;
@@ -201,7 +201,7 @@ static void mousedev_abs_event(struct input_dev *dev, struct mousedev *mousedev,
201 if (size == 0) 201 if (size == 0)
202 size = yres ? : 1; 202 size = yres ? : 1;
203 203
204 clamp(value, min, max); 204 value = clamp(value, min, max);
205 205
206 mousedev->packet.y = yres - ((value - min) * yres) / size; 206 mousedev->packet.y = yres - ((value - min) * yres) / size;
207 mousedev->packet.abs_event = 1; 207 mousedev->packet.abs_event = 1;
diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
index 59de638225fe..e35058bcd7b9 100644
--- a/drivers/isdn/gigaset/interface.c
+++ b/drivers/isdn/gigaset/interface.c
@@ -156,8 +156,10 @@ static int if_open(struct tty_struct *tty, struct file *filp)
156 if (!cs || !try_module_get(cs->driver->owner)) 156 if (!cs || !try_module_get(cs->driver->owner))
157 return -ENODEV; 157 return -ENODEV;
158 158
159 if (mutex_lock_interruptible(&cs->mutex)) 159 if (mutex_lock_interruptible(&cs->mutex)) {
160 module_put(cs->driver->owner);
160 return -ERESTARTSYS; 161 return -ERESTARTSYS;
162 }
161 tty->driver_data = cs; 163 tty->driver_data = cs;
162 164
163 ++cs->open_count; 165 ++cs->open_count;
diff --git a/drivers/misc/sgi-xp/xpnet.c b/drivers/misc/sgi-xp/xpnet.c
index ee5109a3cd98..42f067347bc7 100644
--- a/drivers/misc/sgi-xp/xpnet.c
+++ b/drivers/misc/sgi-xp/xpnet.c
@@ -495,14 +495,14 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
495 } 495 }
496 } 496 }
497 497
498 dev->stats.tx_packets++;
499 dev->stats.tx_bytes += skb->len;
500
498 if (atomic_dec_return(&queued_msg->use_count) == 0) { 501 if (atomic_dec_return(&queued_msg->use_count) == 0) {
499 dev_kfree_skb(skb); 502 dev_kfree_skb(skb);
500 kfree(queued_msg); 503 kfree(queued_msg);
501 } 504 }
502 505
503 dev->stats.tx_packets++;
504 dev->stats.tx_bytes += skb->len;
505
506 return NETDEV_TX_OK; 506 return NETDEV_TX_OK;
507} 507}
508 508
diff --git a/drivers/net/3c503.c b/drivers/net/3c503.c
index d84f6e8903a5..5b732988d493 100644
--- a/drivers/net/3c503.c
+++ b/drivers/net/3c503.c
@@ -412,7 +412,7 @@ el2_open(struct net_device *dev)
412 outb_p(0x04 << ((*irqp == 9) ? 2 : *irqp), E33G_IDCFR); 412 outb_p(0x04 << ((*irqp == 9) ? 2 : *irqp), E33G_IDCFR);
413 outb_p(0x00, E33G_IDCFR); 413 outb_p(0x00, E33G_IDCFR);
414 msleep(1); 414 msleep(1);
415 free_irq(*irqp, el2_probe_interrupt); 415 free_irq(*irqp, &seen);
416 if (!seen) 416 if (!seen)
417 continue; 417 continue;
418 418
@@ -422,6 +422,7 @@ el2_open(struct net_device *dev)
422 continue; 422 continue;
423 if (retval < 0) 423 if (retval < 0)
424 goto err_disable; 424 goto err_disable;
425 break;
425 } while (*++irqp); 426 } while (*++irqp);
426 427
427 if (*irqp == 0) { 428 if (*irqp == 0) {
diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c
index 68d45ba2d9b9..6c019e148546 100644
--- a/drivers/net/bfin_mac.c
+++ b/drivers/net/bfin_mac.c
@@ -52,13 +52,13 @@ MODULE_DESCRIPTION(DRV_DESC);
52MODULE_ALIAS("platform:bfin_mac"); 52MODULE_ALIAS("platform:bfin_mac");
53 53
54#if defined(CONFIG_BFIN_MAC_USE_L1) 54#if defined(CONFIG_BFIN_MAC_USE_L1)
55# define bfin_mac_alloc(dma_handle, size) l1_data_sram_zalloc(size) 55# define bfin_mac_alloc(dma_handle, size, num) l1_data_sram_zalloc(size*num)
56# define bfin_mac_free(dma_handle, ptr) l1_data_sram_free(ptr) 56# define bfin_mac_free(dma_handle, ptr, num) l1_data_sram_free(ptr)
57#else 57#else
58# define bfin_mac_alloc(dma_handle, size) \ 58# define bfin_mac_alloc(dma_handle, size, num) \
59 dma_alloc_coherent(NULL, size, dma_handle, GFP_KERNEL) 59 dma_alloc_coherent(NULL, size*num, dma_handle, GFP_KERNEL)
60# define bfin_mac_free(dma_handle, ptr) \ 60# define bfin_mac_free(dma_handle, ptr, num) \
61 dma_free_coherent(NULL, sizeof(*ptr), ptr, dma_handle) 61 dma_free_coherent(NULL, sizeof(*ptr)*num, ptr, dma_handle)
62#endif 62#endif
63 63
64#define PKT_BUF_SZ 1580 64#define PKT_BUF_SZ 1580
@@ -95,7 +95,7 @@ static void desc_list_free(void)
95 t = t->next; 95 t = t->next;
96 } 96 }
97 } 97 }
98 bfin_mac_free(dma_handle, tx_desc); 98 bfin_mac_free(dma_handle, tx_desc, CONFIG_BFIN_TX_DESC_NUM);
99 } 99 }
100 100
101 if (rx_desc) { 101 if (rx_desc) {
@@ -109,7 +109,7 @@ static void desc_list_free(void)
109 r = r->next; 109 r = r->next;
110 } 110 }
111 } 111 }
112 bfin_mac_free(dma_handle, rx_desc); 112 bfin_mac_free(dma_handle, rx_desc, CONFIG_BFIN_RX_DESC_NUM);
113 } 113 }
114} 114}
115 115
@@ -126,13 +126,13 @@ static int desc_list_init(void)
126#endif 126#endif
127 127
128 tx_desc = bfin_mac_alloc(&dma_handle, 128 tx_desc = bfin_mac_alloc(&dma_handle,
129 sizeof(struct net_dma_desc_tx) * 129 sizeof(struct net_dma_desc_tx),
130 CONFIG_BFIN_TX_DESC_NUM); 130 CONFIG_BFIN_TX_DESC_NUM);
131 if (tx_desc == NULL) 131 if (tx_desc == NULL)
132 goto init_error; 132 goto init_error;
133 133
134 rx_desc = bfin_mac_alloc(&dma_handle, 134 rx_desc = bfin_mac_alloc(&dma_handle,
135 sizeof(struct net_dma_desc_rx) * 135 sizeof(struct net_dma_desc_rx),
136 CONFIG_BFIN_RX_DESC_NUM); 136 CONFIG_BFIN_RX_DESC_NUM);
137 if (rx_desc == NULL) 137 if (rx_desc == NULL)
138 goto init_error; 138 goto init_error;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 652b30e525d0..eafe44a528ac 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1297,6 +1297,7 @@ static inline int slave_enable_netpoll(struct slave *slave)
1297 goto out; 1297 goto out;
1298 1298
1299 np->dev = slave->dev; 1299 np->dev = slave->dev;
1300 strlcpy(np->dev_name, slave->dev->name, IFNAMSIZ);
1300 err = __netpoll_setup(np); 1301 err = __netpoll_setup(np);
1301 if (err) { 1302 if (err) {
1302 kfree(np); 1303 kfree(np);
diff --git a/drivers/net/fs_enet/mac-fcc.c b/drivers/net/fs_enet/mac-fcc.c
index 7a84e45487e8..7583a9572bcc 100644
--- a/drivers/net/fs_enet/mac-fcc.c
+++ b/drivers/net/fs_enet/mac-fcc.c
@@ -105,7 +105,7 @@ static int do_pd_setup(struct fs_enet_private *fep)
105 goto out_ep; 105 goto out_ep;
106 106
107 fep->fcc.mem = (void __iomem *)cpm2_immr; 107 fep->fcc.mem = (void __iomem *)cpm2_immr;
108 fpi->dpram_offset = cpm_dpalloc(128, 8); 108 fpi->dpram_offset = cpm_dpalloc(128, 32);
109 if (IS_ERR_VALUE(fpi->dpram_offset)) { 109 if (IS_ERR_VALUE(fpi->dpram_offset)) {
110 ret = fpi->dpram_offset; 110 ret = fpi->dpram_offset;
111 goto out_fcccp; 111 goto out_fcccp;
diff --git a/drivers/net/hp100.c b/drivers/net/hp100.c
index 8e10d2f6a5ad..c3ecb118c1df 100644
--- a/drivers/net/hp100.c
+++ b/drivers/net/hp100.c
@@ -1580,12 +1580,12 @@ static netdev_tx_t hp100_start_xmit_bm(struct sk_buff *skb,
1580 hp100_outl(ringptr->pdl_paddr, TX_PDA_L); /* Low Prio. Queue */ 1580 hp100_outl(ringptr->pdl_paddr, TX_PDA_L); /* Low Prio. Queue */
1581 1581
1582 lp->txrcommit++; 1582 lp->txrcommit++;
1583 spin_unlock_irqrestore(&lp->lock, flags);
1584 1583
1585 /* Update statistics */
1586 dev->stats.tx_packets++; 1584 dev->stats.tx_packets++;
1587 dev->stats.tx_bytes += skb->len; 1585 dev->stats.tx_bytes += skb->len;
1588 1586
1587 spin_unlock_irqrestore(&lp->lock, flags);
1588
1589 return NETDEV_TX_OK; 1589 return NETDEV_TX_OK;
1590 1590
1591drop: 1591drop:
diff --git a/drivers/net/hplance.c b/drivers/net/hplance.c
index b6060f7538df..a900d5bf2948 100644
--- a/drivers/net/hplance.c
+++ b/drivers/net/hplance.c
@@ -135,7 +135,7 @@ static void __devexit hplance_remove_one(struct dio_dev *d)
135} 135}
136 136
137/* Initialise a single lance board at the given DIO device */ 137/* Initialise a single lance board at the given DIO device */
138static void __init hplance_init(struct net_device *dev, struct dio_dev *d) 138static void __devinit hplance_init(struct net_device *dev, struct dio_dev *d)
139{ 139{
140 unsigned long va = (d->resource.start + DIO_VIRADDRBASE); 140 unsigned long va = (d->resource.start + DIO_VIRADDRBASE);
141 struct hplance_private *lp; 141 struct hplance_private *lp;
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index b644383017f9..c0788a31ff0f 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -1965,11 +1965,11 @@ netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1965 1965
1966 netxen_tso_check(netdev, tx_ring, first_desc, skb); 1966 netxen_tso_check(netdev, tx_ring, first_desc, skb);
1967 1967
1968 netxen_nic_update_cmd_producer(adapter, tx_ring);
1969
1970 adapter->stats.txbytes += skb->len; 1968 adapter->stats.txbytes += skb->len;
1971 adapter->stats.xmitcalled++; 1969 adapter->stats.xmitcalled++;
1972 1970
1971 netxen_nic_update_cmd_producer(adapter, tx_ring);
1972
1973 return NETDEV_TX_OK; 1973 return NETDEV_TX_OK;
1974 1974
1975drop_packet: 1975drop_packet:
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 392a6c4b72e5..a70244306c94 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -58,6 +58,7 @@ config BROADCOM_PHY
58 58
59config BCM63XX_PHY 59config BCM63XX_PHY
60 tristate "Drivers for Broadcom 63xx SOCs internal PHY" 60 tristate "Drivers for Broadcom 63xx SOCs internal PHY"
61 depends on BCM63XX
61 ---help--- 62 ---help---
62 Currently supports the 6348 and 6358 PHYs. 63 Currently supports the 6348 and 6358 PHYs.
63 64
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index b0c9522bb535..2cd8dc5847b4 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -543,11 +543,20 @@ static void recalibrate(struct dp83640_clock *clock)
543 543
544/* time stamping methods */ 544/* time stamping methods */
545 545
546static void decode_evnt(struct dp83640_private *dp83640, 546static int decode_evnt(struct dp83640_private *dp83640,
547 struct phy_txts *phy_txts, u16 ests) 547 void *data, u16 ests)
548{ 548{
549 struct phy_txts *phy_txts;
549 struct ptp_clock_event event; 550 struct ptp_clock_event event;
550 int words = (ests >> EVNT_TS_LEN_SHIFT) & EVNT_TS_LEN_MASK; 551 int words = (ests >> EVNT_TS_LEN_SHIFT) & EVNT_TS_LEN_MASK;
552 u16 ext_status = 0;
553
554 if (ests & MULT_EVNT) {
555 ext_status = *(u16 *) data;
556 data += sizeof(ext_status);
557 }
558
559 phy_txts = data;
551 560
552 switch (words) { /* fall through in every case */ 561 switch (words) { /* fall through in every case */
553 case 3: 562 case 3:
@@ -565,6 +574,9 @@ static void decode_evnt(struct dp83640_private *dp83640,
565 event.timestamp = phy2txts(&dp83640->edata); 574 event.timestamp = phy2txts(&dp83640->edata);
566 575
567 ptp_clock_event(dp83640->clock->ptp_clock, &event); 576 ptp_clock_event(dp83640->clock->ptp_clock, &event);
577
578 words = ext_status ? words + 2 : words + 1;
579 return words * sizeof(u16);
568} 580}
569 581
570static void decode_rxts(struct dp83640_private *dp83640, 582static void decode_rxts(struct dp83640_private *dp83640,
@@ -643,9 +655,7 @@ static void decode_status_frame(struct dp83640_private *dp83640,
643 655
644 } else if (PSF_EVNT == type && len >= sizeof(*phy_txts)) { 656 } else if (PSF_EVNT == type && len >= sizeof(*phy_txts)) {
645 657
646 phy_txts = (struct phy_txts *) ptr; 658 size = decode_evnt(dp83640, ptr, ests);
647 decode_evnt(dp83640, phy_txts, ests);
648 size = sizeof(*phy_txts);
649 659
650 } else { 660 } else {
651 size = 0; 661 size = 0;
@@ -1034,8 +1044,8 @@ static bool dp83640_rxtstamp(struct phy_device *phydev,
1034 1044
1035 if (is_status_frame(skb, type)) { 1045 if (is_status_frame(skb, type)) {
1036 decode_status_frame(dp83640, skb); 1046 decode_status_frame(dp83640, skb);
1037 /* Let the stack drop this frame. */ 1047 kfree_skb(skb);
1038 return false; 1048 return true;
1039 } 1049 }
1040 1050
1041 SKB_PTP_TYPE(skb) = type; 1051 SKB_PTP_TYPE(skb) = type;
diff --git a/drivers/net/ppp_async.c b/drivers/net/ppp_async.c
index a1b82c9c67d2..c554a397e558 100644
--- a/drivers/net/ppp_async.c
+++ b/drivers/net/ppp_async.c
@@ -523,7 +523,7 @@ static void ppp_async_process(unsigned long arg)
523#define PUT_BYTE(ap, buf, c, islcp) do { \ 523#define PUT_BYTE(ap, buf, c, islcp) do { \
524 if ((islcp && c < 0x20) || (ap->xaccm[c >> 5] & (1 << (c & 0x1f)))) {\ 524 if ((islcp && c < 0x20) || (ap->xaccm[c >> 5] & (1 << (c & 0x1f)))) {\
525 *buf++ = PPP_ESCAPE; \ 525 *buf++ = PPP_ESCAPE; \
526 *buf++ = c ^ 0x20; \ 526 *buf++ = c ^ PPP_TRANS; \
527 } else \ 527 } else \
528 *buf++ = c; \ 528 *buf++ = c; \
529} while (0) 529} while (0)
@@ -896,7 +896,7 @@ ppp_async_input(struct asyncppp *ap, const unsigned char *buf,
896 sp = skb_put(skb, n); 896 sp = skb_put(skb, n);
897 memcpy(sp, buf, n); 897 memcpy(sp, buf, n);
898 if (ap->state & SC_ESCAPE) { 898 if (ap->state & SC_ESCAPE) {
899 sp[0] ^= 0x20; 899 sp[0] ^= PPP_TRANS;
900 ap->state &= ~SC_ESCAPE; 900 ap->state &= ~SC_ESCAPE;
901 } 901 }
902 } 902 }
diff --git a/drivers/net/pxa168_eth.c b/drivers/net/pxa168_eth.c
index 89f7540d90f9..5f597ca592bb 100644
--- a/drivers/net/pxa168_eth.c
+++ b/drivers/net/pxa168_eth.c
@@ -1273,7 +1273,7 @@ static int pxa168_eth_start_xmit(struct sk_buff *skb, struct net_device *dev)
1273 wmb(); 1273 wmb();
1274 wrl(pep, SDMA_CMD, SDMA_CMD_TXDH | SDMA_CMD_ERD); 1274 wrl(pep, SDMA_CMD, SDMA_CMD_TXDH | SDMA_CMD_ERD);
1275 1275
1276 stats->tx_bytes += skb->len; 1276 stats->tx_bytes += length;
1277 stats->tx_packets++; 1277 stats->tx_packets++;
1278 dev->trans_start = jiffies; 1278 dev->trans_start = jiffies;
1279 if (pep->tx_ring_size - pep->tx_desc_count <= 1) { 1279 if (pep->tx_ring_size - pep->tx_desc_count <= 1) {
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index ef1ce2ebeb4a..05d81780d1fd 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -1621,7 +1621,7 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp,
1621 * 1621 *
1622 * (RTL_R32(TxConfig) & 0x700000) == 0x200000 ? 8101Eb : 8101Ec 1622 * (RTL_R32(TxConfig) & 0x700000) == 0x200000 ? 8101Eb : 8101Ec
1623 */ 1623 */
1624 static const struct { 1624 static const struct rtl_mac_info {
1625 u32 mask; 1625 u32 mask;
1626 u32 val; 1626 u32 val;
1627 int mac_version; 1627 int mac_version;
@@ -1689,7 +1689,8 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp,
1689 1689
1690 /* Catch-all */ 1690 /* Catch-all */
1691 { 0x00000000, 0x00000000, RTL_GIGA_MAC_NONE } 1691 { 0x00000000, 0x00000000, RTL_GIGA_MAC_NONE }
1692 }, *p = mac_info; 1692 };
1693 const struct rtl_mac_info *p = mac_info;
1693 u32 reg; 1694 u32 reg;
1694 1695
1695 reg = RTL_R32(TxConfig); 1696 reg = RTL_R32(TxConfig);
@@ -3681,7 +3682,7 @@ static void rtl_set_rx_max_size(void __iomem *ioaddr, unsigned int rx_buf_sz)
3681 3682
3682static void rtl8169_set_magic_reg(void __iomem *ioaddr, unsigned mac_version) 3683static void rtl8169_set_magic_reg(void __iomem *ioaddr, unsigned mac_version)
3683{ 3684{
3684 static const struct { 3685 static const struct rtl_cfg2_info {
3685 u32 mac_version; 3686 u32 mac_version;
3686 u32 clk; 3687 u32 clk;
3687 u32 val; 3688 u32 val;
@@ -3690,7 +3691,8 @@ static void rtl8169_set_magic_reg(void __iomem *ioaddr, unsigned mac_version)
3690 { RTL_GIGA_MAC_VER_05, PCI_Clock_66MHz, 0x000fffff }, 3691 { RTL_GIGA_MAC_VER_05, PCI_Clock_66MHz, 0x000fffff },
3691 { RTL_GIGA_MAC_VER_06, PCI_Clock_33MHz, 0x00ffff00 }, // 8110SCe 3692 { RTL_GIGA_MAC_VER_06, PCI_Clock_33MHz, 0x00ffff00 }, // 8110SCe
3692 { RTL_GIGA_MAC_VER_06, PCI_Clock_66MHz, 0x00ffffff } 3693 { RTL_GIGA_MAC_VER_06, PCI_Clock_66MHz, 0x00ffffff }
3693 }, *p = cfg2_info; 3694 };
3695 const struct rtl_cfg2_info *p = cfg2_info;
3694 unsigned int i; 3696 unsigned int i;
3695 u32 clk; 3697 u32 clk;
3696 3698
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 74e94054ab1a..5235f48be1be 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -460,7 +460,23 @@ static u32 tun_net_fix_features(struct net_device *dev, u32 features)
460 460
461 return (features & tun->set_features) | (features & ~TUN_USER_FEATURES); 461 return (features & tun->set_features) | (features & ~TUN_USER_FEATURES);
462} 462}
463 463#ifdef CONFIG_NET_POLL_CONTROLLER
464static void tun_poll_controller(struct net_device *dev)
465{
466 /*
467 * Tun only receives frames when:
468 * 1) the char device endpoint gets data from user space
469 * 2) the tun socket gets a sendmsg call from user space
470 * Since both of those are syncronous operations, we are guaranteed
471 * never to have pending data when we poll for it
472 * so theres nothing to do here but return.
473 * We need this though so netpoll recognizes us as an interface that
474 * supports polling, which enables bridge devices in virt setups to
475 * still use netconsole
476 */
477 return;
478}
479#endif
464static const struct net_device_ops tun_netdev_ops = { 480static const struct net_device_ops tun_netdev_ops = {
465 .ndo_uninit = tun_net_uninit, 481 .ndo_uninit = tun_net_uninit,
466 .ndo_open = tun_net_open, 482 .ndo_open = tun_net_open,
@@ -468,6 +484,9 @@ static const struct net_device_ops tun_netdev_ops = {
468 .ndo_start_xmit = tun_net_xmit, 484 .ndo_start_xmit = tun_net_xmit,
469 .ndo_change_mtu = tun_net_change_mtu, 485 .ndo_change_mtu = tun_net_change_mtu,
470 .ndo_fix_features = tun_net_fix_features, 486 .ndo_fix_features = tun_net_fix_features,
487#ifdef CONFIG_NET_POLL_CONTROLLER
488 .ndo_poll_controller = tun_poll_controller,
489#endif
471}; 490};
472 491
473static const struct net_device_ops tap_netdev_ops = { 492static const struct net_device_ops tap_netdev_ops = {
@@ -480,6 +499,9 @@ static const struct net_device_ops tap_netdev_ops = {
480 .ndo_set_multicast_list = tun_net_mclist, 499 .ndo_set_multicast_list = tun_net_mclist,
481 .ndo_set_mac_address = eth_mac_addr, 500 .ndo_set_mac_address = eth_mac_addr,
482 .ndo_validate_addr = eth_validate_addr, 501 .ndo_validate_addr = eth_validate_addr,
502#ifdef CONFIG_NET_POLL_CONTROLLER
503 .ndo_poll_controller = tun_poll_controller,
504#endif
483}; 505};
484 506
485/* Initialize net device. */ 507/* Initialize net device. */
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 9d4f9117260f..84d4608153c9 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -385,6 +385,16 @@ config USB_NET_CX82310_ETH
385 router with USB ethernet port. This driver is for routers only, 385 router with USB ethernet port. This driver is for routers only,
386 it will not work with ADSL modems (use cxacru driver instead). 386 it will not work with ADSL modems (use cxacru driver instead).
387 387
388config USB_NET_KALMIA
389 tristate "Samsung Kalmia based LTE USB modem"
390 depends on USB_USBNET
391 help
392 Choose this option if you have a Samsung Kalmia based USB modem
393 as Samsung GT-B3730.
394
395 To compile this driver as a module, choose M here: the
396 module will be called kalmia.
397
388config USB_HSO 398config USB_HSO
389 tristate "Option USB High Speed Mobile Devices" 399 tristate "Option USB High Speed Mobile Devices"
390 depends on USB && RFKILL 400 depends on USB && RFKILL
diff --git a/drivers/net/usb/Makefile b/drivers/net/usb/Makefile
index c7ec8a5f0a90..c203fa21f6b1 100644
--- a/drivers/net/usb/Makefile
+++ b/drivers/net/usb/Makefile
@@ -23,6 +23,7 @@ obj-$(CONFIG_USB_NET_MCS7830) += mcs7830.o
23obj-$(CONFIG_USB_USBNET) += usbnet.o 23obj-$(CONFIG_USB_USBNET) += usbnet.o
24obj-$(CONFIG_USB_NET_INT51X1) += int51x1.o 24obj-$(CONFIG_USB_NET_INT51X1) += int51x1.o
25obj-$(CONFIG_USB_CDC_PHONET) += cdc-phonet.o 25obj-$(CONFIG_USB_CDC_PHONET) += cdc-phonet.o
26obj-$(CONFIG_USB_NET_KALMIA) += kalmia.o
26obj-$(CONFIG_USB_IPHETH) += ipheth.o 27obj-$(CONFIG_USB_IPHETH) += ipheth.o
27obj-$(CONFIG_USB_SIERRA_NET) += sierra_net.o 28obj-$(CONFIG_USB_SIERRA_NET) += sierra_net.o
28obj-$(CONFIG_USB_NET_CX82310_ETH) += cx82310_eth.o 29obj-$(CONFIG_USB_NET_CX82310_ETH) += cx82310_eth.o
diff --git a/drivers/net/usb/kalmia.c b/drivers/net/usb/kalmia.c
new file mode 100644
index 000000000000..d965fb1e013e
--- /dev/null
+++ b/drivers/net/usb/kalmia.c
@@ -0,0 +1,384 @@
1/*
2 * USB network interface driver for Samsung Kalmia based LTE USB modem like the
3 * Samsung GT-B3730 and GT-B3710.
4 *
5 * Copyright (C) 2011 Marius Bjoernstad Kotsbak <marius@kotsbak.com>
6 *
7 * Sponsored by Quicklink Video Distribution Services Ltd.
8 *
9 * Based on the cdc_eem module.
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 */
16
17#include <linux/module.h>
18#include <linux/init.h>
19#include <linux/netdevice.h>
20#include <linux/etherdevice.h>
21#include <linux/ctype.h>
22#include <linux/ethtool.h>
23#include <linux/workqueue.h>
24#include <linux/mii.h>
25#include <linux/usb.h>
26#include <linux/crc32.h>
27#include <linux/usb/cdc.h>
28#include <linux/usb/usbnet.h>
29#include <linux/gfp.h>
30
31/*
32 * The Samsung Kalmia based LTE USB modems have a CDC ACM port for modem control
33 * handled by the "option" module and an ethernet data port handled by this
34 * module.
35 *
36 * The stick must first be switched into modem mode by usb_modeswitch
37 * or similar tool. Then the modem gets sent two initialization packets by
38 * this module, which gives the MAC address of the device. User space can then
39 * connect the modem using AT commands through the ACM port and then use
40 * DHCP on the network interface exposed by this module. Network packets are
41 * sent to and from the modem in a proprietary format discovered after watching
42 * the behavior of the windows driver for the modem.
43 *
44 * More information about the use of the modem is available in usb_modeswitch
45 * forum and the project page:
46 *
47 * http://www.draisberghof.de/usb_modeswitch/bb/viewtopic.php?t=465
48 * https://github.com/mkotsbak/Samsung-GT-B3730-linux-driver
49 */
50
51/* #define DEBUG */
52/* #define VERBOSE */
53
54#define KALMIA_HEADER_LENGTH 6
55#define KALMIA_ALIGN_SIZE 4
56#define KALMIA_USB_TIMEOUT 10000
57
58/*-------------------------------------------------------------------------*/
59
60static int
61kalmia_send_init_packet(struct usbnet *dev, u8 *init_msg, u8 init_msg_len,
62 u8 *buffer, u8 expected_len)
63{
64 int act_len;
65 int status;
66
67 netdev_dbg(dev->net, "Sending init packet");
68
69 status = usb_bulk_msg(dev->udev, usb_sndbulkpipe(dev->udev, 0x02),
70 init_msg, init_msg_len, &act_len, KALMIA_USB_TIMEOUT);
71 if (status != 0) {
72 netdev_err(dev->net,
73 "Error sending init packet. Status %i, length %i\n",
74 status, act_len);
75 return status;
76 }
77 else if (act_len != init_msg_len) {
78 netdev_err(dev->net,
79 "Did not send all of init packet. Bytes sent: %i",
80 act_len);
81 }
82 else {
83 netdev_dbg(dev->net, "Successfully sent init packet.");
84 }
85
86 status = usb_bulk_msg(dev->udev, usb_rcvbulkpipe(dev->udev, 0x81),
87 buffer, expected_len, &act_len, KALMIA_USB_TIMEOUT);
88
89 if (status != 0)
90 netdev_err(dev->net,
91 "Error receiving init result. Status %i, length %i\n",
92 status, act_len);
93 else if (act_len != expected_len)
94 netdev_err(dev->net, "Unexpected init result length: %i\n",
95 act_len);
96
97 return status;
98}
99
100static int
101kalmia_init_and_get_ethernet_addr(struct usbnet *dev, u8 *ethernet_addr)
102{
103 char init_msg_1[] =
104 { 0x57, 0x50, 0x04, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00,
105 0x00, 0x00 };
106 char init_msg_2[] =
107 { 0x57, 0x50, 0x04, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0xf4,
108 0x00, 0x00 };
109 char receive_buf[28];
110 int status;
111
112 status = kalmia_send_init_packet(dev, init_msg_1, sizeof(init_msg_1)
113 / sizeof(init_msg_1[0]), receive_buf, 24);
114 if (status != 0)
115 return status;
116
117 status = kalmia_send_init_packet(dev, init_msg_2, sizeof(init_msg_2)
118 / sizeof(init_msg_2[0]), receive_buf, 28);
119 if (status != 0)
120 return status;
121
122 memcpy(ethernet_addr, receive_buf + 10, ETH_ALEN);
123
124 return status;
125}
126
127static int
128kalmia_bind(struct usbnet *dev, struct usb_interface *intf)
129{
130 u8 status;
131 u8 ethernet_addr[ETH_ALEN];
132
133 /* Don't bind to AT command interface */
134 if (intf->cur_altsetting->desc.bInterfaceClass != USB_CLASS_VENDOR_SPEC)
135 return -EINVAL;
136
137 dev->in = usb_rcvbulkpipe(dev->udev, 0x81 & USB_ENDPOINT_NUMBER_MASK);
138 dev->out = usb_sndbulkpipe(dev->udev, 0x02 & USB_ENDPOINT_NUMBER_MASK);
139 dev->status = NULL;
140
141 dev->net->hard_header_len += KALMIA_HEADER_LENGTH;
142 dev->hard_mtu = 1400;
143 dev->rx_urb_size = dev->hard_mtu * 10; // Found as optimal after testing
144
145 status = kalmia_init_and_get_ethernet_addr(dev, ethernet_addr);
146
147 if (status < 0) {
148 usb_set_intfdata(intf, NULL);
149 usb_driver_release_interface(driver_of(intf), intf);
150 return status;
151 }
152
153 memcpy(dev->net->dev_addr, ethernet_addr, ETH_ALEN);
154 memcpy(dev->net->perm_addr, ethernet_addr, ETH_ALEN);
155
156 return status;
157}
158
159static struct sk_buff *
160kalmia_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
161{
162 struct sk_buff *skb2 = NULL;
163 u16 content_len;
164 unsigned char *header_start;
165 unsigned char ether_type_1, ether_type_2;
166 u8 remainder, padlen = 0;
167
168 if (!skb_cloned(skb)) {
169 int headroom = skb_headroom(skb);
170 int tailroom = skb_tailroom(skb);
171
172 if ((tailroom >= KALMIA_ALIGN_SIZE) && (headroom
173 >= KALMIA_HEADER_LENGTH))
174 goto done;
175
176 if ((headroom + tailroom) > (KALMIA_HEADER_LENGTH
177 + KALMIA_ALIGN_SIZE)) {
178 skb->data = memmove(skb->head + KALMIA_HEADER_LENGTH,
179 skb->data, skb->len);
180 skb_set_tail_pointer(skb, skb->len);
181 goto done;
182 }
183 }
184
185 skb2 = skb_copy_expand(skb, KALMIA_HEADER_LENGTH,
186 KALMIA_ALIGN_SIZE, flags);
187 if (!skb2)
188 return NULL;
189
190 dev_kfree_skb_any(skb);
191 skb = skb2;
192
193 done: header_start = skb_push(skb, KALMIA_HEADER_LENGTH);
194 ether_type_1 = header_start[KALMIA_HEADER_LENGTH + 12];
195 ether_type_2 = header_start[KALMIA_HEADER_LENGTH + 13];
196
197 netdev_dbg(dev->net, "Sending etherType: %02x%02x", ether_type_1,
198 ether_type_2);
199
200 /* According to empiric data for data packages */
201 header_start[0] = 0x57;
202 header_start[1] = 0x44;
203 content_len = skb->len - KALMIA_HEADER_LENGTH;
204 header_start[2] = (content_len & 0xff); /* low byte */
205 header_start[3] = (content_len >> 8); /* high byte */
206
207 header_start[4] = ether_type_1;
208 header_start[5] = ether_type_2;
209
210 /* Align to 4 bytes by padding with zeros */
211 remainder = skb->len % KALMIA_ALIGN_SIZE;
212 if (remainder > 0) {
213 padlen = KALMIA_ALIGN_SIZE - remainder;
214 memset(skb_put(skb, padlen), 0, padlen);
215 }
216
217 netdev_dbg(
218 dev->net,
219 "Sending package with length %i and padding %i. Header: %02x:%02x:%02x:%02x:%02x:%02x.",
220 content_len, padlen, header_start[0], header_start[1],
221 header_start[2], header_start[3], header_start[4],
222 header_start[5]);
223
224 return skb;
225}
226
227static int
228kalmia_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
229{
230 /*
231 * Our task here is to strip off framing, leaving skb with one
232 * data frame for the usbnet framework code to process.
233 */
234 const u8 HEADER_END_OF_USB_PACKET[] =
235 { 0x57, 0x5a, 0x00, 0x00, 0x08, 0x00 };
236 const u8 EXPECTED_UNKNOWN_HEADER_1[] =
237 { 0x57, 0x43, 0x1e, 0x00, 0x15, 0x02 };
238 const u8 EXPECTED_UNKNOWN_HEADER_2[] =
239 { 0x57, 0x50, 0x0e, 0x00, 0x00, 0x00 };
240 u8 i = 0;
241
242 /* incomplete header? */
243 if (skb->len < KALMIA_HEADER_LENGTH)
244 return 0;
245
246 do {
247 struct sk_buff *skb2 = NULL;
248 u8 *header_start;
249 u16 usb_packet_length, ether_packet_length;
250 int is_last;
251
252 header_start = skb->data;
253
254 if (unlikely(header_start[0] != 0x57 || header_start[1] != 0x44)) {
255 if (!memcmp(header_start, EXPECTED_UNKNOWN_HEADER_1,
256 sizeof(EXPECTED_UNKNOWN_HEADER_1)) || !memcmp(
257 header_start, EXPECTED_UNKNOWN_HEADER_2,
258 sizeof(EXPECTED_UNKNOWN_HEADER_2))) {
259 netdev_dbg(
260 dev->net,
261 "Received expected unknown frame header: %02x:%02x:%02x:%02x:%02x:%02x. Package length: %i\n",
262 header_start[0], header_start[1],
263 header_start[2], header_start[3],
264 header_start[4], header_start[5],
265 skb->len - KALMIA_HEADER_LENGTH);
266 }
267 else {
268 netdev_err(
269 dev->net,
270 "Received unknown frame header: %02x:%02x:%02x:%02x:%02x:%02x. Package length: %i\n",
271 header_start[0], header_start[1],
272 header_start[2], header_start[3],
273 header_start[4], header_start[5],
274 skb->len - KALMIA_HEADER_LENGTH);
275 return 0;
276 }
277 }
278 else
279 netdev_dbg(
280 dev->net,
281 "Received header: %02x:%02x:%02x:%02x:%02x:%02x. Package length: %i\n",
282 header_start[0], header_start[1], header_start[2],
283 header_start[3], header_start[4], header_start[5],
284 skb->len - KALMIA_HEADER_LENGTH);
285
286 /* subtract start header and end header */
287 usb_packet_length = skb->len - (2 * KALMIA_HEADER_LENGTH);
288 ether_packet_length = header_start[2] + (header_start[3] << 8);
289 skb_pull(skb, KALMIA_HEADER_LENGTH);
290
291 /* Some small packets misses end marker */
292 if (usb_packet_length < ether_packet_length) {
293 ether_packet_length = usb_packet_length
294 + KALMIA_HEADER_LENGTH;
295 is_last = true;
296 }
297 else {
298 netdev_dbg(dev->net, "Correct package length #%i", i
299 + 1);
300
301 is_last = (memcmp(skb->data + ether_packet_length,
302 HEADER_END_OF_USB_PACKET,
303 sizeof(HEADER_END_OF_USB_PACKET)) == 0);
304 if (!is_last) {
305 header_start = skb->data + ether_packet_length;
306 netdev_dbg(
307 dev->net,
308 "End header: %02x:%02x:%02x:%02x:%02x:%02x. Package length: %i\n",
309 header_start[0], header_start[1],
310 header_start[2], header_start[3],
311 header_start[4], header_start[5],
312 skb->len - KALMIA_HEADER_LENGTH);
313 }
314 }
315
316 if (is_last) {
317 skb2 = skb;
318 }
319 else {
320 skb2 = skb_clone(skb, GFP_ATOMIC);
321 if (unlikely(!skb2))
322 return 0;
323 }
324
325 skb_trim(skb2, ether_packet_length);
326
327 if (is_last) {
328 return 1;
329 }
330 else {
331 usbnet_skb_return(dev, skb2);
332 skb_pull(skb, ether_packet_length);
333 }
334
335 i++;
336 }
337 while (skb->len);
338
339 return 1;
340}
341
342static const struct driver_info kalmia_info = {
343 .description = "Samsung Kalmia LTE USB dongle",
344 .flags = FLAG_WWAN,
345 .bind = kalmia_bind,
346 .rx_fixup = kalmia_rx_fixup,
347 .tx_fixup = kalmia_tx_fixup
348};
349
350/*-------------------------------------------------------------------------*/
351
352static const struct usb_device_id products[] = {
353 /* The unswitched USB ID, to get the module auto loaded: */
354 { USB_DEVICE(0x04e8, 0x689a) },
355 /* The stick swithed into modem (by e.g. usb_modeswitch): */
356 { USB_DEVICE(0x04e8, 0x6889),
357 .driver_info = (unsigned long) &kalmia_info, },
358 { /* EMPTY == end of list */} };
359MODULE_DEVICE_TABLE( usb, products);
360
361static struct usb_driver kalmia_driver = {
362 .name = "kalmia",
363 .id_table = products,
364 .probe = usbnet_probe,
365 .disconnect = usbnet_disconnect,
366 .suspend = usbnet_suspend,
367 .resume = usbnet_resume
368};
369
370static int __init kalmia_init(void)
371{
372 return usb_register(&kalmia_driver);
373}
374module_init( kalmia_init);
375
376static void __exit kalmia_exit(void)
377{
378 usb_deregister(&kalmia_driver);
379}
380module_exit( kalmia_exit);
381
382MODULE_AUTHOR("Marius Bjoernstad Kotsbak <marius@kotsbak.com>");
383MODULE_DESCRIPTION("Samsung Kalmia USB network driver");
384MODULE_LICENSE("GPL");
diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
index e050bd65e037..777d1a4e81b2 100644
--- a/drivers/net/wan/farsync.c
+++ b/drivers/net/wan/farsync.c
@@ -2203,8 +2203,10 @@ fst_open(struct net_device *dev)
2203 2203
2204 if (port->mode != FST_RAW) { 2204 if (port->mode != FST_RAW) {
2205 err = hdlc_open(dev); 2205 err = hdlc_open(dev);
2206 if (err) 2206 if (err) {
2207 module_put(THIS_MODULE);
2207 return err; 2208 return err;
2209 }
2208 } 2210 }
2209 2211
2210 fst_openport(port); 2212 fst_openport(port);
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
index 660831ce293c..687c1f223497 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/mwifiex/cfg80211.c
@@ -1288,6 +1288,8 @@ int mwifiex_register_cfg80211(struct net_device *dev, u8 *mac,
1288 1288
1289 *(unsigned long *) wdev_priv = (unsigned long) priv; 1289 *(unsigned long *) wdev_priv = (unsigned long) priv;
1290 1290
1291 set_wiphy_dev(wdev->wiphy, (struct device *) priv->adapter->dev);
1292
1291 ret = wiphy_register(wdev->wiphy); 1293 ret = wiphy_register(wdev->wiphy);
1292 if (ret < 0) { 1294 if (ret < 0) {
1293 dev_err(priv->adapter->dev, "%s: registering cfg80211 device\n", 1295 dev_err(priv->adapter->dev, "%s: registering cfg80211 device\n",
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c
index 32261189bcef..aeac3cc4dbe4 100644
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -2474,6 +2474,7 @@ struct mwl8k_cmd_set_hw_spec {
2474 * faster client. 2474 * faster client.
2475 */ 2475 */
2476#define MWL8K_SET_HW_SPEC_FLAG_ENABLE_LIFE_TIME_EXPIRY 0x00000400 2476#define MWL8K_SET_HW_SPEC_FLAG_ENABLE_LIFE_TIME_EXPIRY 0x00000400
2477#define MWL8K_SET_HW_SPEC_FLAG_GENERATE_CCMP_HDR 0x00000200
2477#define MWL8K_SET_HW_SPEC_FLAG_HOST_DECR_MGMT 0x00000080 2478#define MWL8K_SET_HW_SPEC_FLAG_HOST_DECR_MGMT 0x00000080
2478#define MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_PROBERESP 0x00000020 2479#define MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_PROBERESP 0x00000020
2479#define MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_BEACON 0x00000010 2480#define MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_BEACON 0x00000010
@@ -2510,7 +2511,8 @@ static int mwl8k_cmd_set_hw_spec(struct ieee80211_hw *hw)
2510 cmd->flags = cpu_to_le32(MWL8K_SET_HW_SPEC_FLAG_HOST_DECR_MGMT | 2511 cmd->flags = cpu_to_le32(MWL8K_SET_HW_SPEC_FLAG_HOST_DECR_MGMT |
2511 MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_PROBERESP | 2512 MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_PROBERESP |
2512 MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_BEACON | 2513 MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_BEACON |
2513 MWL8K_SET_HW_SPEC_FLAG_ENABLE_LIFE_TIME_EXPIRY); 2514 MWL8K_SET_HW_SPEC_FLAG_ENABLE_LIFE_TIME_EXPIRY |
2515 MWL8K_SET_HW_SPEC_FLAG_GENERATE_CCMP_HDR);
2514 cmd->num_tx_desc_per_queue = cpu_to_le32(MWL8K_TX_DESCS); 2516 cmd->num_tx_desc_per_queue = cpu_to_le32(MWL8K_TX_DESCS);
2515 cmd->total_rxd = cpu_to_le32(MWL8K_RX_DESCS); 2517 cmd->total_rxd = cpu_to_le32(MWL8K_RX_DESCS);
2516 2518
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 135df164a4c1..46767c53917a 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -624,7 +624,7 @@ static int pci_pm_prepare(struct device *dev)
624 * system from the sleep state, we'll have to prevent it from signaling 624 * system from the sleep state, we'll have to prevent it from signaling
625 * wake-up. 625 * wake-up.
626 */ 626 */
627 pm_runtime_resume(dev); 627 pm_runtime_get_sync(dev);
628 628
629 if (drv && drv->pm && drv->pm->prepare) 629 if (drv && drv->pm && drv->pm->prepare)
630 error = drv->pm->prepare(dev); 630 error = drv->pm->prepare(dev);
@@ -638,6 +638,8 @@ static void pci_pm_complete(struct device *dev)
638 638
639 if (drv && drv->pm && drv->pm->complete) 639 if (drv && drv->pm && drv->pm->complete)
640 drv->pm->complete(dev); 640 drv->pm->complete(dev);
641
642 pm_runtime_put_sync(dev);
641} 643}
642 644
643#else /* !CONFIG_PM_SLEEP */ 645#else /* !CONFIG_PM_SLEEP */
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 5f10c23dff94..2c5b9b991279 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -3284,7 +3284,7 @@ static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode,
3284 * @dev: the PCI device 3284 * @dev: the PCI device
3285 * @decode: true = enable decoding, false = disable decoding 3285 * @decode: true = enable decoding, false = disable decoding
3286 * @command_bits: PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY 3286 * @command_bits: PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY
3287 * @change_bridge_flags: traverse ancestors and change bridges 3287 * @flags: traverse ancestors and change bridges
3288 * CHANGE_BRIDGE_ONLY / CHANGE_BRIDGE 3288 * CHANGE_BRIDGE_ONLY / CHANGE_BRIDGE
3289 */ 3289 */
3290int pci_set_vga_state(struct pci_dev *dev, bool decode, 3290int pci_set_vga_state(struct pci_dev *dev, bool decode,
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 48849ffdd672..bafb3c3d4a89 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -168,7 +168,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
168 res->flags |= pci_calc_resource_flags(l) | IORESOURCE_SIZEALIGN; 168 res->flags |= pci_calc_resource_flags(l) | IORESOURCE_SIZEALIGN;
169 if (type == pci_bar_io) { 169 if (type == pci_bar_io) {
170 l &= PCI_BASE_ADDRESS_IO_MASK; 170 l &= PCI_BASE_ADDRESS_IO_MASK;
171 mask = PCI_BASE_ADDRESS_IO_MASK & IO_SPACE_LIMIT; 171 mask = PCI_BASE_ADDRESS_IO_MASK & (u32) IO_SPACE_LIMIT;
172 } else { 172 } else {
173 l &= PCI_BASE_ADDRESS_MEM_MASK; 173 l &= PCI_BASE_ADDRESS_MEM_MASK;
174 mask = (u32)PCI_BASE_ADDRESS_MEM_MASK; 174 mask = (u32)PCI_BASE_ADDRESS_MEM_MASK;
diff --git a/drivers/rtc/rtc-vt8500.c b/drivers/rtc/rtc-vt8500.c
index b8bc862903ae..efd6066b5cd2 100644
--- a/drivers/rtc/rtc-vt8500.c
+++ b/drivers/rtc/rtc-vt8500.c
@@ -78,7 +78,6 @@ struct vt8500_rtc {
78 void __iomem *regbase; 78 void __iomem *regbase;
79 struct resource *res; 79 struct resource *res;
80 int irq_alarm; 80 int irq_alarm;
81 int irq_hz;
82 struct rtc_device *rtc; 81 struct rtc_device *rtc;
83 spinlock_t lock; /* Protects this structure */ 82 spinlock_t lock; /* Protects this structure */
84}; 83};
@@ -100,10 +99,6 @@ static irqreturn_t vt8500_rtc_irq(int irq, void *dev_id)
100 if (isr & 1) 99 if (isr & 1)
101 events |= RTC_AF | RTC_IRQF; 100 events |= RTC_AF | RTC_IRQF;
102 101
103 /* Only second/minute interrupts are supported */
104 if (isr & 2)
105 events |= RTC_UF | RTC_IRQF;
106
107 rtc_update_irq(vt8500_rtc->rtc, 1, events); 102 rtc_update_irq(vt8500_rtc->rtc, 1, events);
108 103
109 return IRQ_HANDLED; 104 return IRQ_HANDLED;
@@ -199,27 +194,12 @@ static int vt8500_alarm_irq_enable(struct device *dev, unsigned int enabled)
199 return 0; 194 return 0;
200} 195}
201 196
202static int vt8500_update_irq_enable(struct device *dev, unsigned int enabled)
203{
204 struct vt8500_rtc *vt8500_rtc = dev_get_drvdata(dev);
205 unsigned long tmp = readl(vt8500_rtc->regbase + VT8500_RTC_CR);
206
207 if (enabled)
208 tmp |= VT8500_RTC_CR_SM_SEC | VT8500_RTC_CR_SM_ENABLE;
209 else
210 tmp &= ~VT8500_RTC_CR_SM_ENABLE;
211
212 writel(tmp, vt8500_rtc->regbase + VT8500_RTC_CR);
213 return 0;
214}
215
216static const struct rtc_class_ops vt8500_rtc_ops = { 197static const struct rtc_class_ops vt8500_rtc_ops = {
217 .read_time = vt8500_rtc_read_time, 198 .read_time = vt8500_rtc_read_time,
218 .set_time = vt8500_rtc_set_time, 199 .set_time = vt8500_rtc_set_time,
219 .read_alarm = vt8500_rtc_read_alarm, 200 .read_alarm = vt8500_rtc_read_alarm,
220 .set_alarm = vt8500_rtc_set_alarm, 201 .set_alarm = vt8500_rtc_set_alarm,
221 .alarm_irq_enable = vt8500_alarm_irq_enable, 202 .alarm_irq_enable = vt8500_alarm_irq_enable,
222 .update_irq_enable = vt8500_update_irq_enable,
223}; 203};
224 204
225static int __devinit vt8500_rtc_probe(struct platform_device *pdev) 205static int __devinit vt8500_rtc_probe(struct platform_device *pdev)
@@ -248,13 +228,6 @@ static int __devinit vt8500_rtc_probe(struct platform_device *pdev)
248 goto err_free; 228 goto err_free;
249 } 229 }
250 230
251 vt8500_rtc->irq_hz = platform_get_irq(pdev, 1);
252 if (vt8500_rtc->irq_hz < 0) {
253 dev_err(&pdev->dev, "No 1Hz IRQ resource defined\n");
254 ret = -ENXIO;
255 goto err_free;
256 }
257
258 vt8500_rtc->res = request_mem_region(vt8500_rtc->res->start, 231 vt8500_rtc->res = request_mem_region(vt8500_rtc->res->start,
259 resource_size(vt8500_rtc->res), 232 resource_size(vt8500_rtc->res),
260 "vt8500-rtc"); 233 "vt8500-rtc");
@@ -272,9 +245,8 @@ static int __devinit vt8500_rtc_probe(struct platform_device *pdev)
272 goto err_release; 245 goto err_release;
273 } 246 }
274 247
275 /* Enable the second/minute interrupt generation and enable RTC */ 248 /* Enable RTC and set it to 24-hour mode */
276 writel(VT8500_RTC_CR_ENABLE | VT8500_RTC_CR_24H 249 writel(VT8500_RTC_CR_ENABLE | VT8500_RTC_CR_24H,
277 | VT8500_RTC_CR_SM_ENABLE | VT8500_RTC_CR_SM_SEC,
278 vt8500_rtc->regbase + VT8500_RTC_CR); 250 vt8500_rtc->regbase + VT8500_RTC_CR);
279 251
280 vt8500_rtc->rtc = rtc_device_register("vt8500-rtc", &pdev->dev, 252 vt8500_rtc->rtc = rtc_device_register("vt8500-rtc", &pdev->dev,
@@ -286,26 +258,16 @@ static int __devinit vt8500_rtc_probe(struct platform_device *pdev)
286 goto err_unmap; 258 goto err_unmap;
287 } 259 }
288 260
289 ret = request_irq(vt8500_rtc->irq_hz, vt8500_rtc_irq, 0,
290 "rtc 1Hz", vt8500_rtc);
291 if (ret < 0) {
292 dev_err(&pdev->dev, "can't get irq %i, err %d\n",
293 vt8500_rtc->irq_hz, ret);
294 goto err_unreg;
295 }
296
297 ret = request_irq(vt8500_rtc->irq_alarm, vt8500_rtc_irq, 0, 261 ret = request_irq(vt8500_rtc->irq_alarm, vt8500_rtc_irq, 0,
298 "rtc alarm", vt8500_rtc); 262 "rtc alarm", vt8500_rtc);
299 if (ret < 0) { 263 if (ret < 0) {
300 dev_err(&pdev->dev, "can't get irq %i, err %d\n", 264 dev_err(&pdev->dev, "can't get irq %i, err %d\n",
301 vt8500_rtc->irq_alarm, ret); 265 vt8500_rtc->irq_alarm, ret);
302 goto err_free_hz; 266 goto err_unreg;
303 } 267 }
304 268
305 return 0; 269 return 0;
306 270
307err_free_hz:
308 free_irq(vt8500_rtc->irq_hz, vt8500_rtc);
309err_unreg: 271err_unreg:
310 rtc_device_unregister(vt8500_rtc->rtc); 272 rtc_device_unregister(vt8500_rtc->rtc);
311err_unmap: 273err_unmap:
@@ -323,7 +285,6 @@ static int __devexit vt8500_rtc_remove(struct platform_device *pdev)
323 struct vt8500_rtc *vt8500_rtc = platform_get_drvdata(pdev); 285 struct vt8500_rtc *vt8500_rtc = platform_get_drvdata(pdev);
324 286
325 free_irq(vt8500_rtc->irq_alarm, vt8500_rtc); 287 free_irq(vt8500_rtc->irq_alarm, vt8500_rtc);
326 free_irq(vt8500_rtc->irq_hz, vt8500_rtc);
327 288
328 rtc_device_unregister(vt8500_rtc->rtc); 289 rtc_device_unregister(vt8500_rtc->rtc);
329 290
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index dee2a2c909f5..70c2e7fa6664 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -386,7 +386,7 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc)
386 */ 386 */
387 se_cmd->se_tmr_req = core_tmr_alloc_req(se_cmd, (void *)tl_tmr, 387 se_cmd->se_tmr_req = core_tmr_alloc_req(se_cmd, (void *)tl_tmr,
388 TMR_LUN_RESET); 388 TMR_LUN_RESET);
389 if (!se_cmd->se_tmr_req) 389 if (IS_ERR(se_cmd->se_tmr_req))
390 goto release; 390 goto release;
391 /* 391 /*
392 * Locate the underlying TCM struct se_lun from sc->device->lun 392 * Locate the underlying TCM struct se_lun from sc->device->lun
@@ -1017,6 +1017,7 @@ static int tcm_loop_make_nexus(
1017 struct se_portal_group *se_tpg; 1017 struct se_portal_group *se_tpg;
1018 struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba; 1018 struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
1019 struct tcm_loop_nexus *tl_nexus; 1019 struct tcm_loop_nexus *tl_nexus;
1020 int ret = -ENOMEM;
1020 1021
1021 if (tl_tpg->tl_hba->tl_nexus) { 1022 if (tl_tpg->tl_hba->tl_nexus) {
1022 printk(KERN_INFO "tl_tpg->tl_hba->tl_nexus already exists\n"); 1023 printk(KERN_INFO "tl_tpg->tl_hba->tl_nexus already exists\n");
@@ -1033,8 +1034,10 @@ static int tcm_loop_make_nexus(
1033 * Initialize the struct se_session pointer 1034 * Initialize the struct se_session pointer
1034 */ 1035 */
1035 tl_nexus->se_sess = transport_init_session(); 1036 tl_nexus->se_sess = transport_init_session();
1036 if (!tl_nexus->se_sess) 1037 if (IS_ERR(tl_nexus->se_sess)) {
1038 ret = PTR_ERR(tl_nexus->se_sess);
1037 goto out; 1039 goto out;
1040 }
1038 /* 1041 /*
1039 * Since we are running in 'demo mode' this call with generate a 1042 * Since we are running in 'demo mode' this call with generate a
1040 * struct se_node_acl for the tcm_loop struct se_portal_group with the SCSI 1043 * struct se_node_acl for the tcm_loop struct se_portal_group with the SCSI
@@ -1060,7 +1063,7 @@ static int tcm_loop_make_nexus(
1060 1063
1061out: 1064out:
1062 kfree(tl_nexus); 1065 kfree(tl_nexus);
1063 return -ENOMEM; 1066 return ret;
1064} 1067}
1065 1068
1066static int tcm_loop_drop_nexus( 1069static int tcm_loop_drop_nexus(
@@ -1140,7 +1143,7 @@ static ssize_t tcm_loop_tpg_store_nexus(
1140 * the fabric protocol_id set in tcm_loop_make_scsi_hba(), and call 1143 * the fabric protocol_id set in tcm_loop_make_scsi_hba(), and call
1141 * tcm_loop_make_nexus() 1144 * tcm_loop_make_nexus()
1142 */ 1145 */
1143 if (strlen(page) > TL_WWN_ADDR_LEN) { 1146 if (strlen(page) >= TL_WWN_ADDR_LEN) {
1144 printk(KERN_ERR "Emulated NAA Sas Address: %s, exceeds" 1147 printk(KERN_ERR "Emulated NAA Sas Address: %s, exceeds"
1145 " max: %d\n", page, TL_WWN_ADDR_LEN); 1148 " max: %d\n", page, TL_WWN_ADDR_LEN);
1146 return -EINVAL; 1149 return -EINVAL;
@@ -1321,7 +1324,7 @@ struct se_wwn *tcm_loop_make_scsi_hba(
1321 return ERR_PTR(-EINVAL); 1324 return ERR_PTR(-EINVAL);
1322 1325
1323check_len: 1326check_len:
1324 if (strlen(name) > TL_WWN_ADDR_LEN) { 1327 if (strlen(name) >= TL_WWN_ADDR_LEN) {
1325 printk(KERN_ERR "Emulated NAA %s Address: %s, exceeds" 1328 printk(KERN_ERR "Emulated NAA %s Address: %s, exceeds"
1326 " max: %d\n", name, tcm_loop_dump_proto_id(tl_hba), 1329 " max: %d\n", name, tcm_loop_dump_proto_id(tl_hba),
1327 TL_WWN_ADDR_LEN); 1330 TL_WWN_ADDR_LEN);
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index ee6fad979b50..25c1f49a7d8b 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -304,7 +304,7 @@ struct target_fabric_configfs *target_fabric_configfs_init(
304 printk(KERN_ERR "Unable to locate passed fabric name\n"); 304 printk(KERN_ERR "Unable to locate passed fabric name\n");
305 return NULL; 305 return NULL;
306 } 306 }
307 if (strlen(name) > TARGET_FABRIC_NAME_SIZE) { 307 if (strlen(name) >= TARGET_FABRIC_NAME_SIZE) {
308 printk(KERN_ERR "Passed name: %s exceeds TARGET_FABRIC" 308 printk(KERN_ERR "Passed name: %s exceeds TARGET_FABRIC"
309 "_NAME_SIZE\n", name); 309 "_NAME_SIZE\n", name);
310 return NULL; 310 return NULL;
@@ -312,7 +312,7 @@ struct target_fabric_configfs *target_fabric_configfs_init(
312 312
313 tf = kzalloc(sizeof(struct target_fabric_configfs), GFP_KERNEL); 313 tf = kzalloc(sizeof(struct target_fabric_configfs), GFP_KERNEL);
314 if (!(tf)) 314 if (!(tf))
315 return ERR_PTR(-ENOMEM); 315 return NULL;
316 316
317 INIT_LIST_HEAD(&tf->tf_list); 317 INIT_LIST_HEAD(&tf->tf_list);
318 atomic_set(&tf->tf_access_cnt, 0); 318 atomic_set(&tf->tf_access_cnt, 0);
@@ -851,7 +851,7 @@ static ssize_t target_core_dev_wwn_store_attr_vpd_unit_serial(
851 return -EOPNOTSUPP; 851 return -EOPNOTSUPP;
852 } 852 }
853 853
854 if ((strlen(page) + 1) > INQUIRY_VPD_SERIAL_LEN) { 854 if (strlen(page) >= INQUIRY_VPD_SERIAL_LEN) {
855 printk(KERN_ERR "Emulated VPD Unit Serial exceeds" 855 printk(KERN_ERR "Emulated VPD Unit Serial exceeds"
856 " INQUIRY_VPD_SERIAL_LEN: %d\n", INQUIRY_VPD_SERIAL_LEN); 856 " INQUIRY_VPD_SERIAL_LEN: %d\n", INQUIRY_VPD_SERIAL_LEN);
857 return -EOVERFLOW; 857 return -EOVERFLOW;
@@ -917,7 +917,7 @@ static ssize_t target_core_dev_wwn_show_attr_vpd_protocol_identifier(
917 917
918 transport_dump_vpd_proto_id(vpd, buf, VPD_TMP_BUF_SIZE); 918 transport_dump_vpd_proto_id(vpd, buf, VPD_TMP_BUF_SIZE);
919 919
920 if ((len + strlen(buf) > PAGE_SIZE)) 920 if ((len + strlen(buf) >= PAGE_SIZE))
921 break; 921 break;
922 922
923 len += sprintf(page+len, "%s", buf); 923 len += sprintf(page+len, "%s", buf);
@@ -962,19 +962,19 @@ static ssize_t target_core_dev_wwn_show_attr_##_name( \
962 \ 962 \
963 memset(buf, 0, VPD_TMP_BUF_SIZE); \ 963 memset(buf, 0, VPD_TMP_BUF_SIZE); \
964 transport_dump_vpd_assoc(vpd, buf, VPD_TMP_BUF_SIZE); \ 964 transport_dump_vpd_assoc(vpd, buf, VPD_TMP_BUF_SIZE); \
965 if ((len + strlen(buf) > PAGE_SIZE)) \ 965 if ((len + strlen(buf) >= PAGE_SIZE)) \
966 break; \ 966 break; \
967 len += sprintf(page+len, "%s", buf); \ 967 len += sprintf(page+len, "%s", buf); \
968 \ 968 \
969 memset(buf, 0, VPD_TMP_BUF_SIZE); \ 969 memset(buf, 0, VPD_TMP_BUF_SIZE); \
970 transport_dump_vpd_ident_type(vpd, buf, VPD_TMP_BUF_SIZE); \ 970 transport_dump_vpd_ident_type(vpd, buf, VPD_TMP_BUF_SIZE); \
971 if ((len + strlen(buf) > PAGE_SIZE)) \ 971 if ((len + strlen(buf) >= PAGE_SIZE)) \
972 break; \ 972 break; \
973 len += sprintf(page+len, "%s", buf); \ 973 len += sprintf(page+len, "%s", buf); \
974 \ 974 \
975 memset(buf, 0, VPD_TMP_BUF_SIZE); \ 975 memset(buf, 0, VPD_TMP_BUF_SIZE); \
976 transport_dump_vpd_ident(vpd, buf, VPD_TMP_BUF_SIZE); \ 976 transport_dump_vpd_ident(vpd, buf, VPD_TMP_BUF_SIZE); \
977 if ((len + strlen(buf) > PAGE_SIZE)) \ 977 if ((len + strlen(buf) >= PAGE_SIZE)) \
978 break; \ 978 break; \
979 len += sprintf(page+len, "%s", buf); \ 979 len += sprintf(page+len, "%s", buf); \
980 } \ 980 } \
@@ -1299,7 +1299,7 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_registered_i_pts(
1299 &i_buf[0] : "", pr_reg->pr_res_key, 1299 &i_buf[0] : "", pr_reg->pr_res_key,
1300 pr_reg->pr_res_generation); 1300 pr_reg->pr_res_generation);
1301 1301
1302 if ((len + strlen(buf) > PAGE_SIZE)) 1302 if ((len + strlen(buf) >= PAGE_SIZE))
1303 break; 1303 break;
1304 1304
1305 len += sprintf(page+len, "%s", buf); 1305 len += sprintf(page+len, "%s", buf);
@@ -1496,7 +1496,7 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
1496 ret = -ENOMEM; 1496 ret = -ENOMEM;
1497 goto out; 1497 goto out;
1498 } 1498 }
1499 if (strlen(i_port) > PR_APTPL_MAX_IPORT_LEN) { 1499 if (strlen(i_port) >= PR_APTPL_MAX_IPORT_LEN) {
1500 printk(KERN_ERR "APTPL metadata initiator_node=" 1500 printk(KERN_ERR "APTPL metadata initiator_node="
1501 " exceeds PR_APTPL_MAX_IPORT_LEN: %d\n", 1501 " exceeds PR_APTPL_MAX_IPORT_LEN: %d\n",
1502 PR_APTPL_MAX_IPORT_LEN); 1502 PR_APTPL_MAX_IPORT_LEN);
@@ -1510,7 +1510,7 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
1510 ret = -ENOMEM; 1510 ret = -ENOMEM;
1511 goto out; 1511 goto out;
1512 } 1512 }
1513 if (strlen(isid) > PR_REG_ISID_LEN) { 1513 if (strlen(isid) >= PR_REG_ISID_LEN) {
1514 printk(KERN_ERR "APTPL metadata initiator_isid" 1514 printk(KERN_ERR "APTPL metadata initiator_isid"
1515 "= exceeds PR_REG_ISID_LEN: %d\n", 1515 "= exceeds PR_REG_ISID_LEN: %d\n",
1516 PR_REG_ISID_LEN); 1516 PR_REG_ISID_LEN);
@@ -1571,7 +1571,7 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
1571 ret = -ENOMEM; 1571 ret = -ENOMEM;
1572 goto out; 1572 goto out;
1573 } 1573 }
1574 if (strlen(t_port) > PR_APTPL_MAX_TPORT_LEN) { 1574 if (strlen(t_port) >= PR_APTPL_MAX_TPORT_LEN) {
1575 printk(KERN_ERR "APTPL metadata target_node=" 1575 printk(KERN_ERR "APTPL metadata target_node="
1576 " exceeds PR_APTPL_MAX_TPORT_LEN: %d\n", 1576 " exceeds PR_APTPL_MAX_TPORT_LEN: %d\n",
1577 PR_APTPL_MAX_TPORT_LEN); 1577 PR_APTPL_MAX_TPORT_LEN);
@@ -3052,7 +3052,7 @@ static struct config_group *target_core_call_addhbatotarget(
3052 int ret; 3052 int ret;
3053 3053
3054 memset(buf, 0, TARGET_CORE_NAME_MAX_LEN); 3054 memset(buf, 0, TARGET_CORE_NAME_MAX_LEN);
3055 if (strlen(name) > TARGET_CORE_NAME_MAX_LEN) { 3055 if (strlen(name) >= TARGET_CORE_NAME_MAX_LEN) {
3056 printk(KERN_ERR "Passed *name strlen(): %d exceeds" 3056 printk(KERN_ERR "Passed *name strlen(): %d exceeds"
3057 " TARGET_CORE_NAME_MAX_LEN: %d\n", (int)strlen(name), 3057 " TARGET_CORE_NAME_MAX_LEN: %d\n", (int)strlen(name),
3058 TARGET_CORE_NAME_MAX_LEN); 3058 TARGET_CORE_NAME_MAX_LEN);
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 8407f9ca2b31..ba698ea62bb2 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -192,7 +192,7 @@ int transport_get_lun_for_tmr(
192 &SE_NODE_ACL(se_sess)->device_list[unpacked_lun]; 192 &SE_NODE_ACL(se_sess)->device_list[unpacked_lun];
193 if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) { 193 if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
194 se_lun = se_cmd->se_lun = se_tmr->tmr_lun = deve->se_lun; 194 se_lun = se_cmd->se_lun = se_tmr->tmr_lun = deve->se_lun;
195 dev = se_tmr->tmr_dev = se_lun->lun_se_dev; 195 dev = se_lun->lun_se_dev;
196 se_cmd->pr_res_key = deve->pr_res_key; 196 se_cmd->pr_res_key = deve->pr_res_key;
197 se_cmd->orig_fe_lun = unpacked_lun; 197 se_cmd->orig_fe_lun = unpacked_lun;
198 se_cmd->se_orig_obj_ptr = SE_LUN(se_cmd)->lun_se_dev; 198 se_cmd->se_orig_obj_ptr = SE_LUN(se_cmd)->lun_se_dev;
@@ -216,6 +216,7 @@ int transport_get_lun_for_tmr(
216 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 216 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
217 return -1; 217 return -1;
218 } 218 }
219 se_tmr->tmr_dev = dev;
219 220
220 spin_lock(&dev->se_tmr_lock); 221 spin_lock(&dev->se_tmr_lock);
221 list_add_tail(&se_tmr->tmr_list, &dev->dev_tmr_list); 222 list_add_tail(&se_tmr->tmr_list, &dev->dev_tmr_list);
@@ -1430,7 +1431,7 @@ struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
1430 struct se_lun_acl *lacl; 1431 struct se_lun_acl *lacl;
1431 struct se_node_acl *nacl; 1432 struct se_node_acl *nacl;
1432 1433
1433 if (strlen(initiatorname) > TRANSPORT_IQN_LEN) { 1434 if (strlen(initiatorname) >= TRANSPORT_IQN_LEN) {
1434 printk(KERN_ERR "%s InitiatorName exceeds maximum size.\n", 1435 printk(KERN_ERR "%s InitiatorName exceeds maximum size.\n",
1435 TPG_TFO(tpg)->get_fabric_name()); 1436 TPG_TFO(tpg)->get_fabric_name());
1436 *ret = -EOVERFLOW; 1437 *ret = -EOVERFLOW;
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index a79f518ca6e2..b662db3a320b 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -1916,7 +1916,7 @@ static int __core_scsi3_update_aptpl_buf(
1916 pr_reg->pr_res_mapped_lun); 1916 pr_reg->pr_res_mapped_lun);
1917 } 1917 }
1918 1918
1919 if ((len + strlen(tmp) > pr_aptpl_buf_len)) { 1919 if ((len + strlen(tmp) >= pr_aptpl_buf_len)) {
1920 printk(KERN_ERR "Unable to update renaming" 1920 printk(KERN_ERR "Unable to update renaming"
1921 " APTPL metadata\n"); 1921 " APTPL metadata\n");
1922 spin_unlock(&T10_RES(su_dev)->registration_lock); 1922 spin_unlock(&T10_RES(su_dev)->registration_lock);
@@ -1934,7 +1934,7 @@ static int __core_scsi3_update_aptpl_buf(
1934 TPG_TFO(tpg)->tpg_get_tag(tpg), 1934 TPG_TFO(tpg)->tpg_get_tag(tpg),
1935 lun->lun_sep->sep_rtpi, lun->unpacked_lun, reg_count); 1935 lun->lun_sep->sep_rtpi, lun->unpacked_lun, reg_count);
1936 1936
1937 if ((len + strlen(tmp) > pr_aptpl_buf_len)) { 1937 if ((len + strlen(tmp) >= pr_aptpl_buf_len)) {
1938 printk(KERN_ERR "Unable to update renaming" 1938 printk(KERN_ERR "Unable to update renaming"
1939 " APTPL metadata\n"); 1939 " APTPL metadata\n");
1940 spin_unlock(&T10_RES(su_dev)->registration_lock); 1940 spin_unlock(&T10_RES(su_dev)->registration_lock);
@@ -1986,7 +1986,7 @@ static int __core_scsi3_write_aptpl_to_file(
1986 memset(iov, 0, sizeof(struct iovec)); 1986 memset(iov, 0, sizeof(struct iovec));
1987 memset(path, 0, 512); 1987 memset(path, 0, 512);
1988 1988
1989 if (strlen(&wwn->unit_serial[0]) > 512) { 1989 if (strlen(&wwn->unit_serial[0]) >= 512) {
1990 printk(KERN_ERR "WWN value for struct se_device does not fit" 1990 printk(KERN_ERR "WWN value for struct se_device does not fit"
1991 " into path buffer\n"); 1991 " into path buffer\n");
1992 return -1; 1992 return -1;
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index 59b8b9c5ad72..179063d81cdd 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -75,10 +75,16 @@ void core_tmr_release_req(
75{ 75{
76 struct se_device *dev = tmr->tmr_dev; 76 struct se_device *dev = tmr->tmr_dev;
77 77
78 if (!dev) {
79 kmem_cache_free(se_tmr_req_cache, tmr);
80 return;
81 }
82
78 spin_lock(&dev->se_tmr_lock); 83 spin_lock(&dev->se_tmr_lock);
79 list_del(&tmr->tmr_list); 84 list_del(&tmr->tmr_list);
80 kmem_cache_free(se_tmr_req_cache, tmr);
81 spin_unlock(&dev->se_tmr_lock); 85 spin_unlock(&dev->se_tmr_lock);
86
87 kmem_cache_free(se_tmr_req_cache, tmr);
82} 88}
83 89
84static void core_tmr_handle_tas_abort( 90static void core_tmr_handle_tas_abort(
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 4dafeb8b5638..4b9b7169bdd9 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -536,13 +536,13 @@ EXPORT_SYMBOL(transport_register_session);
536void transport_deregister_session_configfs(struct se_session *se_sess) 536void transport_deregister_session_configfs(struct se_session *se_sess)
537{ 537{
538 struct se_node_acl *se_nacl; 538 struct se_node_acl *se_nacl;
539 539 unsigned long flags;
540 /* 540 /*
541 * Used by struct se_node_acl's under ConfigFS to locate active struct se_session 541 * Used by struct se_node_acl's under ConfigFS to locate active struct se_session
542 */ 542 */
543 se_nacl = se_sess->se_node_acl; 543 se_nacl = se_sess->se_node_acl;
544 if ((se_nacl)) { 544 if ((se_nacl)) {
545 spin_lock_irq(&se_nacl->nacl_sess_lock); 545 spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
546 list_del(&se_sess->sess_acl_list); 546 list_del(&se_sess->sess_acl_list);
547 /* 547 /*
548 * If the session list is empty, then clear the pointer. 548 * If the session list is empty, then clear the pointer.
@@ -556,7 +556,7 @@ void transport_deregister_session_configfs(struct se_session *se_sess)
556 se_nacl->acl_sess_list.prev, 556 se_nacl->acl_sess_list.prev,
557 struct se_session, sess_acl_list); 557 struct se_session, sess_acl_list);
558 } 558 }
559 spin_unlock_irq(&se_nacl->nacl_sess_lock); 559 spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
560 } 560 }
561} 561}
562EXPORT_SYMBOL(transport_deregister_session_configfs); 562EXPORT_SYMBOL(transport_deregister_session_configfs);
diff --git a/drivers/target/tcm_fc/tcm_fc.h b/drivers/target/tcm_fc/tcm_fc.h
index defff32b7880..7b82f1b7fef8 100644
--- a/drivers/target/tcm_fc/tcm_fc.h
+++ b/drivers/target/tcm_fc/tcm_fc.h
@@ -144,7 +144,7 @@ enum ft_cmd_state {
144 */ 144 */
145struct ft_cmd { 145struct ft_cmd {
146 enum ft_cmd_state state; 146 enum ft_cmd_state state;
147 u16 lun; /* LUN from request */ 147 u32 lun; /* LUN from request */
148 struct ft_sess *sess; /* session held for cmd */ 148 struct ft_sess *sess; /* session held for cmd */
149 struct fc_seq *seq; /* sequence in exchange mgr */ 149 struct fc_seq *seq; /* sequence in exchange mgr */
150 struct se_cmd se_cmd; /* Local TCM I/O descriptor */ 150 struct se_cmd se_cmd; /* Local TCM I/O descriptor */
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
index c056a1132ae1..b2a106729d49 100644
--- a/drivers/target/tcm_fc/tfc_cmd.c
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -94,29 +94,6 @@ void ft_dump_cmd(struct ft_cmd *cmd, const char *caller)
94 16, 4, cmd->cdb, MAX_COMMAND_SIZE, 0); 94 16, 4, cmd->cdb, MAX_COMMAND_SIZE, 0);
95} 95}
96 96
97/*
98 * Get LUN from CDB.
99 */
100static int ft_get_lun_for_cmd(struct ft_cmd *cmd, u8 *lunp)
101{
102 u64 lun;
103
104 lun = lunp[1];
105 switch (lunp[0] >> 6) {
106 case 0:
107 break;
108 case 1:
109 lun |= (lunp[0] & 0x3f) << 8;
110 break;
111 default:
112 return -1;
113 }
114 if (lun >= TRANSPORT_MAX_LUNS_PER_TPG)
115 return -1;
116 cmd->lun = lun;
117 return transport_get_lun_for_cmd(&cmd->se_cmd, NULL, lun);
118}
119
120static void ft_queue_cmd(struct ft_sess *sess, struct ft_cmd *cmd) 97static void ft_queue_cmd(struct ft_sess *sess, struct ft_cmd *cmd)
121{ 98{
122 struct se_queue_obj *qobj; 99 struct se_queue_obj *qobj;
@@ -418,6 +395,7 @@ static void ft_send_tm(struct ft_cmd *cmd)
418{ 395{
419 struct se_tmr_req *tmr; 396 struct se_tmr_req *tmr;
420 struct fcp_cmnd *fcp; 397 struct fcp_cmnd *fcp;
398 struct ft_sess *sess;
421 u8 tm_func; 399 u8 tm_func;
422 400
423 fcp = fc_frame_payload_get(cmd->req_frame, sizeof(*fcp)); 401 fcp = fc_frame_payload_get(cmd->req_frame, sizeof(*fcp));
@@ -425,13 +403,6 @@ static void ft_send_tm(struct ft_cmd *cmd)
425 switch (fcp->fc_tm_flags) { 403 switch (fcp->fc_tm_flags) {
426 case FCP_TMF_LUN_RESET: 404 case FCP_TMF_LUN_RESET:
427 tm_func = TMR_LUN_RESET; 405 tm_func = TMR_LUN_RESET;
428 if (ft_get_lun_for_cmd(cmd, fcp->fc_lun) < 0) {
429 ft_dump_cmd(cmd, __func__);
430 transport_send_check_condition_and_sense(&cmd->se_cmd,
431 cmd->se_cmd.scsi_sense_reason, 0);
432 ft_sess_put(cmd->sess);
433 return;
434 }
435 break; 406 break;
436 case FCP_TMF_TGT_RESET: 407 case FCP_TMF_TGT_RESET:
437 tm_func = TMR_TARGET_WARM_RESET; 408 tm_func = TMR_TARGET_WARM_RESET;
@@ -463,6 +434,36 @@ static void ft_send_tm(struct ft_cmd *cmd)
463 return; 434 return;
464 } 435 }
465 cmd->se_cmd.se_tmr_req = tmr; 436 cmd->se_cmd.se_tmr_req = tmr;
437
438 switch (fcp->fc_tm_flags) {
439 case FCP_TMF_LUN_RESET:
440 cmd->lun = scsilun_to_int((struct scsi_lun *)fcp->fc_lun);
441 if (transport_get_lun_for_tmr(&cmd->se_cmd, cmd->lun) < 0) {
442 /*
443 * Make sure to clean up newly allocated TMR request
444 * since "unable to handle TMR request because failed
445 * to get to LUN"
446 */
447 FT_TM_DBG("Failed to get LUN for TMR func %d, "
448 "se_cmd %p, unpacked_lun %d\n",
449 tm_func, &cmd->se_cmd, cmd->lun);
450 ft_dump_cmd(cmd, __func__);
451 sess = cmd->sess;
452 transport_send_check_condition_and_sense(&cmd->se_cmd,
453 cmd->se_cmd.scsi_sense_reason, 0);
454 transport_generic_free_cmd(&cmd->se_cmd, 0, 1, 0);
455 ft_sess_put(sess);
456 return;
457 }
458 break;
459 case FCP_TMF_TGT_RESET:
460 case FCP_TMF_CLR_TASK_SET:
461 case FCP_TMF_ABT_TASK_SET:
462 case FCP_TMF_CLR_ACA:
463 break;
464 default:
465 return;
466 }
466 transport_generic_handle_tmr(&cmd->se_cmd); 467 transport_generic_handle_tmr(&cmd->se_cmd);
467} 468}
468 469
@@ -635,7 +636,8 @@ static void ft_send_cmd(struct ft_cmd *cmd)
635 636
636 fc_seq_exch(cmd->seq)->lp->tt.seq_set_resp(cmd->seq, ft_recv_seq, cmd); 637 fc_seq_exch(cmd->seq)->lp->tt.seq_set_resp(cmd->seq, ft_recv_seq, cmd);
637 638
638 ret = ft_get_lun_for_cmd(cmd, fcp->fc_lun); 639 cmd->lun = scsilun_to_int((struct scsi_lun *)fcp->fc_lun);
640 ret = transport_get_lun_for_cmd(&cmd->se_cmd, NULL, cmd->lun);
639 if (ret < 0) { 641 if (ret < 0) {
640 ft_dump_cmd(cmd, __func__); 642 ft_dump_cmd(cmd, __func__);
641 transport_send_check_condition_and_sense(&cmd->se_cmd, 643 transport_send_check_condition_and_sense(&cmd->se_cmd,
diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c
index 4c3c0efbe13f..8c4a24077d9d 100644
--- a/drivers/target/tcm_fc/tfc_io.c
+++ b/drivers/target/tcm_fc/tfc_io.c
@@ -203,7 +203,7 @@ int ft_queue_data_in(struct se_cmd *se_cmd)
203 /* XXX For now, initiator will retry */ 203 /* XXX For now, initiator will retry */
204 if (printk_ratelimit()) 204 if (printk_ratelimit())
205 printk(KERN_ERR "%s: Failed to send frame %p, " 205 printk(KERN_ERR "%s: Failed to send frame %p, "
206 "xid <0x%x>, remaining <0x%x>, " 206 "xid <0x%x>, remaining %zu, "
207 "lso_max <0x%x>\n", 207 "lso_max <0x%x>\n",
208 __func__, fp, ep->xid, 208 __func__, fp, ep->xid,
209 remaining, lport->lso_max); 209 remaining, lport->lso_max);
diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c
index a3bd57f2ea32..7491e21cc6ae 100644
--- a/drivers/target/tcm_fc/tfc_sess.c
+++ b/drivers/target/tcm_fc/tfc_sess.c
@@ -229,7 +229,7 @@ static struct ft_sess *ft_sess_create(struct ft_tport *tport, u32 port_id,
229 return NULL; 229 return NULL;
230 230
231 sess->se_sess = transport_init_session(); 231 sess->se_sess = transport_init_session();
232 if (!sess->se_sess) { 232 if (IS_ERR(sess->se_sess)) {
233 kfree(sess); 233 kfree(sess);
234 return NULL; 234 return NULL;
235 } 235 }
@@ -332,7 +332,7 @@ void ft_sess_close(struct se_session *se_sess)
332 lport = sess->tport->lport; 332 lport = sess->tport->lport;
333 port_id = sess->port_id; 333 port_id = sess->port_id;
334 if (port_id == -1) { 334 if (port_id == -1) {
335 mutex_lock(&ft_lport_lock); 335 mutex_unlock(&ft_lport_lock);
336 return; 336 return;
337 } 337 }
338 FT_SESS_DBG("port_id %x\n", port_id); 338 FT_SESS_DBG("port_id %x\n", port_id);
diff --git a/drivers/tty/serial/mrst_max3110.c b/drivers/tty/serial/mrst_max3110.c
index 1bd28450ca40..a764bf99743b 100644
--- a/drivers/tty/serial/mrst_max3110.c
+++ b/drivers/tty/serial/mrst_max3110.c
@@ -421,7 +421,6 @@ static int max3110_main_thread(void *_max)
421 int ret = 0; 421 int ret = 0;
422 struct circ_buf *xmit = &max->con_xmit; 422 struct circ_buf *xmit = &max->con_xmit;
423 423
424 init_waitqueue_head(wq);
425 pr_info(PR_FMT "start main thread\n"); 424 pr_info(PR_FMT "start main thread\n");
426 425
427 do { 426 do {
@@ -823,7 +822,7 @@ static int __devinit serial_m3110_probe(struct spi_device *spi)
823 res = RC_TAG; 822 res = RC_TAG;
824 ret = max3110_write_then_read(max, (u8 *)&res, (u8 *)&res, 2, 0); 823 ret = max3110_write_then_read(max, (u8 *)&res, (u8 *)&res, 2, 0);
825 if (ret < 0 || res == 0 || res == 0xffff) { 824 if (ret < 0 || res == 0 || res == 0xffff) {
826 printk(KERN_ERR "MAX3111 deemed not present (conf reg %04x)", 825 dev_dbg(&spi->dev, "MAX3111 deemed not present (conf reg %04x)",
827 res); 826 res);
828 ret = -ENODEV; 827 ret = -ENODEV;
829 goto err_get_page; 828 goto err_get_page;
@@ -838,6 +837,8 @@ static int __devinit serial_m3110_probe(struct spi_device *spi)
838 max->con_xmit.head = 0; 837 max->con_xmit.head = 0;
839 max->con_xmit.tail = 0; 838 max->con_xmit.tail = 0;
840 839
840 init_waitqueue_head(&max->wq);
841
841 max->main_thread = kthread_run(max3110_main_thread, 842 max->main_thread = kthread_run(max3110_main_thread,
842 max, "max3110_main"); 843 max, "max3110_main");
843 if (IS_ERR(max->main_thread)) { 844 if (IS_ERR(max->main_thread)) {
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index e35a17687c05..aa3cc465a601 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -375,7 +375,7 @@ static int usb_unbind_interface(struct device *dev)
375 * Just re-enable it without affecting the endpoint toggles. 375 * Just re-enable it without affecting the endpoint toggles.
376 */ 376 */
377 usb_enable_interface(udev, intf, false); 377 usb_enable_interface(udev, intf, false);
378 } else if (!error && !intf->dev.power.in_suspend) { 378 } else if (!error && !intf->dev.power.is_prepared) {
379 r = usb_set_interface(udev, intf->altsetting[0]. 379 r = usb_set_interface(udev, intf->altsetting[0].
380 desc.bInterfaceNumber, 0); 380 desc.bInterfaceNumber, 0);
381 if (r < 0) 381 if (r < 0)
@@ -960,7 +960,7 @@ void usb_rebind_intf(struct usb_interface *intf)
960 } 960 }
961 961
962 /* Try to rebind the interface */ 962 /* Try to rebind the interface */
963 if (!intf->dev.power.in_suspend) { 963 if (!intf->dev.power.is_prepared) {
964 intf->needs_binding = 0; 964 intf->needs_binding = 0;
965 rc = device_attach(&intf->dev); 965 rc = device_attach(&intf->dev);
966 if (rc < 0) 966 if (rc < 0)
@@ -1107,7 +1107,7 @@ static int usb_resume_interface(struct usb_device *udev,
1107 if (intf->condition == USB_INTERFACE_UNBOUND) { 1107 if (intf->condition == USB_INTERFACE_UNBOUND) {
1108 1108
1109 /* Carry out a deferred switch to altsetting 0 */ 1109 /* Carry out a deferred switch to altsetting 0 */
1110 if (intf->needs_altsetting0 && !intf->dev.power.in_suspend) { 1110 if (intf->needs_altsetting0 && !intf->dev.power.is_prepared) {
1111 usb_set_interface(udev, intf->altsetting[0]. 1111 usb_set_interface(udev, intf->altsetting[0].
1112 desc.bInterfaceNumber, 0); 1112 desc.bInterfaceNumber, 0);
1113 intf->needs_altsetting0 = 0; 1113 intf->needs_altsetting0 = 0;
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index 553da68bd510..30df85d8fca8 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -395,9 +395,9 @@ static void unmask_evtchn(int port)
395static void xen_irq_init(unsigned irq) 395static void xen_irq_init(unsigned irq)
396{ 396{
397 struct irq_info *info; 397 struct irq_info *info;
398#ifdef CONFIG_SMP
398 struct irq_desc *desc = irq_to_desc(irq); 399 struct irq_desc *desc = irq_to_desc(irq);
399 400
400#ifdef CONFIG_SMP
401 /* By default all event channels notify CPU#0. */ 401 /* By default all event channels notify CPU#0. */
402 cpumask_copy(desc->irq_data.affinity, cpumask_of(0)); 402 cpumask_copy(desc->irq_data.affinity, cpumask_of(0));
403#endif 403#endif
diff --git a/fs/bad_inode.c b/fs/bad_inode.c
index 9ad2369d9e35..bfcb18feb1df 100644
--- a/fs/bad_inode.c
+++ b/fs/bad_inode.c
@@ -231,9 +231,6 @@ static int bad_inode_readlink(struct dentry *dentry, char __user *buffer,
231 231
232static int bad_inode_permission(struct inode *inode, int mask, unsigned int flags) 232static int bad_inode_permission(struct inode *inode, int mask, unsigned int flags)
233{ 233{
234 if (flags & IPERM_FLAG_RCU)
235 return -ECHILD;
236
237 return -EIO; 234 return -EIO;
238} 235}
239 236
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 1a2421f908f0..610e8e0b04b8 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -762,7 +762,19 @@ static struct block_device *bd_start_claiming(struct block_device *bdev,
762 if (!disk) 762 if (!disk)
763 return ERR_PTR(-ENXIO); 763 return ERR_PTR(-ENXIO);
764 764
765 whole = bdget_disk(disk, 0); 765 /*
766 * Normally, @bdev should equal what's returned from bdget_disk()
767 * if partno is 0; however, some drivers (floppy) use multiple
768 * bdev's for the same physical device and @bdev may be one of the
769 * aliases. Keep @bdev if partno is 0. This means claimer
770 * tracking is broken for those devices but it has always been that
771 * way.
772 */
773 if (partno)
774 whole = bdget_disk(disk, 0);
775 else
776 whole = bdgrab(bdev);
777
766 module_put(disk->fops->owner); 778 module_put(disk->fops->owner);
767 put_disk(disk); 779 put_disk(disk);
768 if (!whole) 780 if (!whole)
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 378b5b4443f3..f30ac05dbda7 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -19,7 +19,6 @@
19#ifndef __BTRFS_CTREE__ 19#ifndef __BTRFS_CTREE__
20#define __BTRFS_CTREE__ 20#define __BTRFS_CTREE__
21 21
22#include <linux/version.h>
23#include <linux/mm.h> 22#include <linux/mm.h>
24#include <linux/highmem.h> 23#include <linux/highmem.h>
25#include <linux/fs.h> 24#include <linux/fs.h>
@@ -967,6 +966,12 @@ struct btrfs_fs_info {
967 struct srcu_struct subvol_srcu; 966 struct srcu_struct subvol_srcu;
968 967
969 spinlock_t trans_lock; 968 spinlock_t trans_lock;
969 /*
970 * the reloc mutex goes with the trans lock, it is taken
971 * during commit to protect us from the relocation code
972 */
973 struct mutex reloc_mutex;
974
970 struct list_head trans_list; 975 struct list_head trans_list;
971 struct list_head hashers; 976 struct list_head hashers;
972 struct list_head dead_roots; 977 struct list_head dead_roots;
@@ -1172,6 +1177,14 @@ struct btrfs_root {
1172 u32 type; 1177 u32 type;
1173 1178
1174 u64 highest_objectid; 1179 u64 highest_objectid;
1180
1181 /* btrfs_record_root_in_trans is a multi-step process,
1182 * and it can race with the balancing code. But the
1183 * race is very small, and only the first time the root
1184 * is added to each transaction. So in_trans_setup
1185 * is used to tell us when more checks are required
1186 */
1187 unsigned long in_trans_setup;
1175 int ref_cows; 1188 int ref_cows;
1176 int track_dirty; 1189 int track_dirty;
1177 int in_radix; 1190 int in_radix;
@@ -1181,7 +1194,6 @@ struct btrfs_root {
1181 struct btrfs_key defrag_max; 1194 struct btrfs_key defrag_max;
1182 int defrag_running; 1195 int defrag_running;
1183 char *name; 1196 char *name;
1184 int in_sysfs;
1185 1197
1186 /* the dirty list is only used by non-reference counted roots */ 1198 /* the dirty list is only used by non-reference counted roots */
1187 struct list_head dirty_list; 1199 struct list_head dirty_list;
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
index 6462c29d2d37..98c68e658a9b 100644
--- a/fs/btrfs/delayed-inode.c
+++ b/fs/btrfs/delayed-inode.c
@@ -82,19 +82,16 @@ static inline struct btrfs_delayed_root *btrfs_get_delayed_root(
82 return root->fs_info->delayed_root; 82 return root->fs_info->delayed_root;
83} 83}
84 84
85static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node( 85static struct btrfs_delayed_node *btrfs_get_delayed_node(struct inode *inode)
86 struct inode *inode)
87{ 86{
88 struct btrfs_delayed_node *node;
89 struct btrfs_inode *btrfs_inode = BTRFS_I(inode); 87 struct btrfs_inode *btrfs_inode = BTRFS_I(inode);
90 struct btrfs_root *root = btrfs_inode->root; 88 struct btrfs_root *root = btrfs_inode->root;
91 u64 ino = btrfs_ino(inode); 89 u64 ino = btrfs_ino(inode);
92 int ret; 90 struct btrfs_delayed_node *node;
93 91
94again:
95 node = ACCESS_ONCE(btrfs_inode->delayed_node); 92 node = ACCESS_ONCE(btrfs_inode->delayed_node);
96 if (node) { 93 if (node) {
97 atomic_inc(&node->refs); /* can be accessed */ 94 atomic_inc(&node->refs);
98 return node; 95 return node;
99 } 96 }
100 97
@@ -102,8 +99,10 @@ again:
102 node = radix_tree_lookup(&root->delayed_nodes_tree, ino); 99 node = radix_tree_lookup(&root->delayed_nodes_tree, ino);
103 if (node) { 100 if (node) {
104 if (btrfs_inode->delayed_node) { 101 if (btrfs_inode->delayed_node) {
102 atomic_inc(&node->refs); /* can be accessed */
103 BUG_ON(btrfs_inode->delayed_node != node);
105 spin_unlock(&root->inode_lock); 104 spin_unlock(&root->inode_lock);
106 goto again; 105 return node;
107 } 106 }
108 btrfs_inode->delayed_node = node; 107 btrfs_inode->delayed_node = node;
109 atomic_inc(&node->refs); /* can be accessed */ 108 atomic_inc(&node->refs); /* can be accessed */
@@ -113,6 +112,23 @@ again:
113 } 112 }
114 spin_unlock(&root->inode_lock); 113 spin_unlock(&root->inode_lock);
115 114
115 return NULL;
116}
117
118static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node(
119 struct inode *inode)
120{
121 struct btrfs_delayed_node *node;
122 struct btrfs_inode *btrfs_inode = BTRFS_I(inode);
123 struct btrfs_root *root = btrfs_inode->root;
124 u64 ino = btrfs_ino(inode);
125 int ret;
126
127again:
128 node = btrfs_get_delayed_node(inode);
129 if (node)
130 return node;
131
116 node = kmem_cache_alloc(delayed_node_cache, GFP_NOFS); 132 node = kmem_cache_alloc(delayed_node_cache, GFP_NOFS);
117 if (!node) 133 if (!node)
118 return ERR_PTR(-ENOMEM); 134 return ERR_PTR(-ENOMEM);
@@ -297,7 +313,6 @@ struct btrfs_delayed_item *btrfs_alloc_delayed_item(u32 data_len)
297 item->data_len = data_len; 313 item->data_len = data_len;
298 item->ins_or_del = 0; 314 item->ins_or_del = 0;
299 item->bytes_reserved = 0; 315 item->bytes_reserved = 0;
300 item->block_rsv = NULL;
301 item->delayed_node = NULL; 316 item->delayed_node = NULL;
302 atomic_set(&item->refs, 1); 317 atomic_set(&item->refs, 1);
303 } 318 }
@@ -549,19 +564,6 @@ struct btrfs_delayed_item *__btrfs_next_delayed_item(
549 return next; 564 return next;
550} 565}
551 566
552static inline struct btrfs_delayed_node *btrfs_get_delayed_node(
553 struct inode *inode)
554{
555 struct btrfs_inode *btrfs_inode = BTRFS_I(inode);
556 struct btrfs_delayed_node *delayed_node;
557
558 delayed_node = btrfs_inode->delayed_node;
559 if (delayed_node)
560 atomic_inc(&delayed_node->refs);
561
562 return delayed_node;
563}
564
565static inline struct btrfs_root *btrfs_get_fs_root(struct btrfs_root *root, 567static inline struct btrfs_root *btrfs_get_fs_root(struct btrfs_root *root,
566 u64 root_id) 568 u64 root_id)
567{ 569{
@@ -593,10 +595,8 @@ static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
593 595
594 num_bytes = btrfs_calc_trans_metadata_size(root, 1); 596 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
595 ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes); 597 ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes);
596 if (!ret) { 598 if (!ret)
597 item->bytes_reserved = num_bytes; 599 item->bytes_reserved = num_bytes;
598 item->block_rsv = dst_rsv;
599 }
600 600
601 return ret; 601 return ret;
602} 602}
@@ -604,10 +604,13 @@ static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
604static void btrfs_delayed_item_release_metadata(struct btrfs_root *root, 604static void btrfs_delayed_item_release_metadata(struct btrfs_root *root,
605 struct btrfs_delayed_item *item) 605 struct btrfs_delayed_item *item)
606{ 606{
607 struct btrfs_block_rsv *rsv;
608
607 if (!item->bytes_reserved) 609 if (!item->bytes_reserved)
608 return; 610 return;
609 611
610 btrfs_block_rsv_release(root, item->block_rsv, 612 rsv = &root->fs_info->global_block_rsv;
613 btrfs_block_rsv_release(root, rsv,
611 item->bytes_reserved); 614 item->bytes_reserved);
612} 615}
613 616
@@ -1014,6 +1017,7 @@ int btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
1014 struct btrfs_delayed_root *delayed_root; 1017 struct btrfs_delayed_root *delayed_root;
1015 struct btrfs_delayed_node *curr_node, *prev_node; 1018 struct btrfs_delayed_node *curr_node, *prev_node;
1016 struct btrfs_path *path; 1019 struct btrfs_path *path;
1020 struct btrfs_block_rsv *block_rsv;
1017 int ret = 0; 1021 int ret = 0;
1018 1022
1019 path = btrfs_alloc_path(); 1023 path = btrfs_alloc_path();
@@ -1021,6 +1025,9 @@ int btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
1021 return -ENOMEM; 1025 return -ENOMEM;
1022 path->leave_spinning = 1; 1026 path->leave_spinning = 1;
1023 1027
1028 block_rsv = trans->block_rsv;
1029 trans->block_rsv = &root->fs_info->global_block_rsv;
1030
1024 delayed_root = btrfs_get_delayed_root(root); 1031 delayed_root = btrfs_get_delayed_root(root);
1025 1032
1026 curr_node = btrfs_first_delayed_node(delayed_root); 1033 curr_node = btrfs_first_delayed_node(delayed_root);
@@ -1045,6 +1052,7 @@ int btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
1045 } 1052 }
1046 1053
1047 btrfs_free_path(path); 1054 btrfs_free_path(path);
1055 trans->block_rsv = block_rsv;
1048 return ret; 1056 return ret;
1049} 1057}
1050 1058
@@ -1052,6 +1060,7 @@ static int __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1052 struct btrfs_delayed_node *node) 1060 struct btrfs_delayed_node *node)
1053{ 1061{
1054 struct btrfs_path *path; 1062 struct btrfs_path *path;
1063 struct btrfs_block_rsv *block_rsv;
1055 int ret; 1064 int ret;
1056 1065
1057 path = btrfs_alloc_path(); 1066 path = btrfs_alloc_path();
@@ -1059,6 +1068,9 @@ static int __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1059 return -ENOMEM; 1068 return -ENOMEM;
1060 path->leave_spinning = 1; 1069 path->leave_spinning = 1;
1061 1070
1071 block_rsv = trans->block_rsv;
1072 trans->block_rsv = &node->root->fs_info->global_block_rsv;
1073
1062 ret = btrfs_insert_delayed_items(trans, path, node->root, node); 1074 ret = btrfs_insert_delayed_items(trans, path, node->root, node);
1063 if (!ret) 1075 if (!ret)
1064 ret = btrfs_delete_delayed_items(trans, path, node->root, node); 1076 ret = btrfs_delete_delayed_items(trans, path, node->root, node);
@@ -1066,6 +1078,7 @@ static int __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1066 ret = btrfs_update_delayed_inode(trans, node->root, path, node); 1078 ret = btrfs_update_delayed_inode(trans, node->root, path, node);
1067 btrfs_free_path(path); 1079 btrfs_free_path(path);
1068 1080
1081 trans->block_rsv = block_rsv;
1069 return ret; 1082 return ret;
1070} 1083}
1071 1084
@@ -1116,6 +1129,7 @@ static void btrfs_async_run_delayed_node_done(struct btrfs_work *work)
1116 struct btrfs_path *path; 1129 struct btrfs_path *path;
1117 struct btrfs_delayed_node *delayed_node = NULL; 1130 struct btrfs_delayed_node *delayed_node = NULL;
1118 struct btrfs_root *root; 1131 struct btrfs_root *root;
1132 struct btrfs_block_rsv *block_rsv;
1119 unsigned long nr = 0; 1133 unsigned long nr = 0;
1120 int need_requeue = 0; 1134 int need_requeue = 0;
1121 int ret; 1135 int ret;
@@ -1134,6 +1148,9 @@ static void btrfs_async_run_delayed_node_done(struct btrfs_work *work)
1134 if (IS_ERR(trans)) 1148 if (IS_ERR(trans))
1135 goto free_path; 1149 goto free_path;
1136 1150
1151 block_rsv = trans->block_rsv;
1152 trans->block_rsv = &root->fs_info->global_block_rsv;
1153
1137 ret = btrfs_insert_delayed_items(trans, path, root, delayed_node); 1154 ret = btrfs_insert_delayed_items(trans, path, root, delayed_node);
1138 if (!ret) 1155 if (!ret)
1139 ret = btrfs_delete_delayed_items(trans, path, root, 1156 ret = btrfs_delete_delayed_items(trans, path, root,
@@ -1176,6 +1193,7 @@ static void btrfs_async_run_delayed_node_done(struct btrfs_work *work)
1176 1193
1177 nr = trans->blocks_used; 1194 nr = trans->blocks_used;
1178 1195
1196 trans->block_rsv = block_rsv;
1179 btrfs_end_transaction_dmeta(trans, root); 1197 btrfs_end_transaction_dmeta(trans, root);
1180 __btrfs_btree_balance_dirty(root, nr); 1198 __btrfs_btree_balance_dirty(root, nr);
1181free_path: 1199free_path:
@@ -1222,6 +1240,13 @@ again:
1222 return 0; 1240 return 0;
1223} 1241}
1224 1242
1243void btrfs_assert_delayed_root_empty(struct btrfs_root *root)
1244{
1245 struct btrfs_delayed_root *delayed_root;
1246 delayed_root = btrfs_get_delayed_root(root);
1247 WARN_ON(btrfs_first_delayed_node(delayed_root));
1248}
1249
1225void btrfs_balance_delayed_items(struct btrfs_root *root) 1250void btrfs_balance_delayed_items(struct btrfs_root *root)
1226{ 1251{
1227 struct btrfs_delayed_root *delayed_root; 1252 struct btrfs_delayed_root *delayed_root;
@@ -1382,8 +1407,7 @@ end:
1382 1407
1383int btrfs_inode_delayed_dir_index_count(struct inode *inode) 1408int btrfs_inode_delayed_dir_index_count(struct inode *inode)
1384{ 1409{
1385 struct btrfs_delayed_node *delayed_node = BTRFS_I(inode)->delayed_node; 1410 struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1386 int ret = 0;
1387 1411
1388 if (!delayed_node) 1412 if (!delayed_node)
1389 return -ENOENT; 1413 return -ENOENT;
@@ -1393,11 +1417,14 @@ int btrfs_inode_delayed_dir_index_count(struct inode *inode)
1393 * a new directory index is added into the delayed node and index_cnt 1417 * a new directory index is added into the delayed node and index_cnt
1394 * is updated now. So we needn't lock the delayed node. 1418 * is updated now. So we needn't lock the delayed node.
1395 */ 1419 */
1396 if (!delayed_node->index_cnt) 1420 if (!delayed_node->index_cnt) {
1421 btrfs_release_delayed_node(delayed_node);
1397 return -EINVAL; 1422 return -EINVAL;
1423 }
1398 1424
1399 BTRFS_I(inode)->index_cnt = delayed_node->index_cnt; 1425 BTRFS_I(inode)->index_cnt = delayed_node->index_cnt;
1400 return ret; 1426 btrfs_release_delayed_node(delayed_node);
1427 return 0;
1401} 1428}
1402 1429
1403void btrfs_get_delayed_items(struct inode *inode, struct list_head *ins_list, 1430void btrfs_get_delayed_items(struct inode *inode, struct list_head *ins_list,
@@ -1591,6 +1618,57 @@ static void fill_stack_inode_item(struct btrfs_trans_handle *trans,
1591 inode->i_ctime.tv_nsec); 1618 inode->i_ctime.tv_nsec);
1592} 1619}
1593 1620
1621int btrfs_fill_inode(struct inode *inode, u32 *rdev)
1622{
1623 struct btrfs_delayed_node *delayed_node;
1624 struct btrfs_inode_item *inode_item;
1625 struct btrfs_timespec *tspec;
1626
1627 delayed_node = btrfs_get_delayed_node(inode);
1628 if (!delayed_node)
1629 return -ENOENT;
1630
1631 mutex_lock(&delayed_node->mutex);
1632 if (!delayed_node->inode_dirty) {
1633 mutex_unlock(&delayed_node->mutex);
1634 btrfs_release_delayed_node(delayed_node);
1635 return -ENOENT;
1636 }
1637
1638 inode_item = &delayed_node->inode_item;
1639
1640 inode->i_uid = btrfs_stack_inode_uid(inode_item);
1641 inode->i_gid = btrfs_stack_inode_gid(inode_item);
1642 btrfs_i_size_write(inode, btrfs_stack_inode_size(inode_item));
1643 inode->i_mode = btrfs_stack_inode_mode(inode_item);
1644 inode->i_nlink = btrfs_stack_inode_nlink(inode_item);
1645 inode_set_bytes(inode, btrfs_stack_inode_nbytes(inode_item));
1646 BTRFS_I(inode)->generation = btrfs_stack_inode_generation(inode_item);
1647 BTRFS_I(inode)->sequence = btrfs_stack_inode_sequence(inode_item);
1648 inode->i_rdev = 0;
1649 *rdev = btrfs_stack_inode_rdev(inode_item);
1650 BTRFS_I(inode)->flags = btrfs_stack_inode_flags(inode_item);
1651
1652 tspec = btrfs_inode_atime(inode_item);
1653 inode->i_atime.tv_sec = btrfs_stack_timespec_sec(tspec);
1654 inode->i_atime.tv_nsec = btrfs_stack_timespec_nsec(tspec);
1655
1656 tspec = btrfs_inode_mtime(inode_item);
1657 inode->i_mtime.tv_sec = btrfs_stack_timespec_sec(tspec);
1658 inode->i_mtime.tv_nsec = btrfs_stack_timespec_nsec(tspec);
1659
1660 tspec = btrfs_inode_ctime(inode_item);
1661 inode->i_ctime.tv_sec = btrfs_stack_timespec_sec(tspec);
1662 inode->i_ctime.tv_nsec = btrfs_stack_timespec_nsec(tspec);
1663
1664 inode->i_generation = BTRFS_I(inode)->generation;
1665 BTRFS_I(inode)->index_cnt = (u64)-1;
1666
1667 mutex_unlock(&delayed_node->mutex);
1668 btrfs_release_delayed_node(delayed_node);
1669 return 0;
1670}
1671
1594int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans, 1672int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
1595 struct btrfs_root *root, struct inode *inode) 1673 struct btrfs_root *root, struct inode *inode)
1596{ 1674{
diff --git a/fs/btrfs/delayed-inode.h b/fs/btrfs/delayed-inode.h
index eb7d240aa648..8d27af4bd8b9 100644
--- a/fs/btrfs/delayed-inode.h
+++ b/fs/btrfs/delayed-inode.h
@@ -75,7 +75,6 @@ struct btrfs_delayed_item {
75 struct list_head tree_list; /* used for batch insert/delete items */ 75 struct list_head tree_list; /* used for batch insert/delete items */
76 struct list_head readdir_list; /* used for readdir items */ 76 struct list_head readdir_list; /* used for readdir items */
77 u64 bytes_reserved; 77 u64 bytes_reserved;
78 struct btrfs_block_rsv *block_rsv;
79 struct btrfs_delayed_node *delayed_node; 78 struct btrfs_delayed_node *delayed_node;
80 atomic_t refs; 79 atomic_t refs;
81 int ins_or_del; 80 int ins_or_del;
@@ -120,6 +119,7 @@ void btrfs_kill_delayed_inode_items(struct inode *inode);
120 119
121int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans, 120int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
122 struct btrfs_root *root, struct inode *inode); 121 struct btrfs_root *root, struct inode *inode);
122int btrfs_fill_inode(struct inode *inode, u32 *rdev);
123 123
124/* Used for drop dead root */ 124/* Used for drop dead root */
125void btrfs_kill_all_delayed_nodes(struct btrfs_root *root); 125void btrfs_kill_all_delayed_nodes(struct btrfs_root *root);
@@ -138,4 +138,8 @@ int btrfs_readdir_delayed_dir_index(struct file *filp, void *dirent,
138/* for init */ 138/* for init */
139int __init btrfs_delayed_inode_init(void); 139int __init btrfs_delayed_inode_init(void);
140void btrfs_delayed_inode_exit(void); 140void btrfs_delayed_inode_exit(void);
141
142/* for debugging */
143void btrfs_assert_delayed_root_empty(struct btrfs_root *root);
144
141#endif 145#endif
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 9f68c6898653..1ac8db5dc0a3 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -1044,7 +1044,6 @@ static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
1044 root->last_trans = 0; 1044 root->last_trans = 0;
1045 root->highest_objectid = 0; 1045 root->highest_objectid = 0;
1046 root->name = NULL; 1046 root->name = NULL;
1047 root->in_sysfs = 0;
1048 root->inode_tree = RB_ROOT; 1047 root->inode_tree = RB_ROOT;
1049 INIT_RADIX_TREE(&root->delayed_nodes_tree, GFP_ATOMIC); 1048 INIT_RADIX_TREE(&root->delayed_nodes_tree, GFP_ATOMIC);
1050 root->block_rsv = NULL; 1049 root->block_rsv = NULL;
@@ -1300,19 +1299,21 @@ again:
1300 return root; 1299 return root;
1301 1300
1302 root->free_ino_ctl = kzalloc(sizeof(*root->free_ino_ctl), GFP_NOFS); 1301 root->free_ino_ctl = kzalloc(sizeof(*root->free_ino_ctl), GFP_NOFS);
1303 if (!root->free_ino_ctl)
1304 goto fail;
1305 root->free_ino_pinned = kzalloc(sizeof(*root->free_ino_pinned), 1302 root->free_ino_pinned = kzalloc(sizeof(*root->free_ino_pinned),
1306 GFP_NOFS); 1303 GFP_NOFS);
1307 if (!root->free_ino_pinned) 1304 if (!root->free_ino_pinned || !root->free_ino_ctl) {
1305 ret = -ENOMEM;
1308 goto fail; 1306 goto fail;
1307 }
1309 1308
1310 btrfs_init_free_ino_ctl(root); 1309 btrfs_init_free_ino_ctl(root);
1311 mutex_init(&root->fs_commit_mutex); 1310 mutex_init(&root->fs_commit_mutex);
1312 spin_lock_init(&root->cache_lock); 1311 spin_lock_init(&root->cache_lock);
1313 init_waitqueue_head(&root->cache_wait); 1312 init_waitqueue_head(&root->cache_wait);
1314 1313
1315 set_anon_super(&root->anon_super, NULL); 1314 ret = set_anon_super(&root->anon_super, NULL);
1315 if (ret)
1316 goto fail;
1316 1317
1317 if (btrfs_root_refs(&root->root_item) == 0) { 1318 if (btrfs_root_refs(&root->root_item) == 0) {
1318 ret = -ENOENT; 1319 ret = -ENOENT;
@@ -1618,6 +1619,7 @@ struct btrfs_root *open_ctree(struct super_block *sb,
1618 spin_lock_init(&fs_info->fs_roots_radix_lock); 1619 spin_lock_init(&fs_info->fs_roots_radix_lock);
1619 spin_lock_init(&fs_info->delayed_iput_lock); 1620 spin_lock_init(&fs_info->delayed_iput_lock);
1620 spin_lock_init(&fs_info->defrag_inodes_lock); 1621 spin_lock_init(&fs_info->defrag_inodes_lock);
1622 mutex_init(&fs_info->reloc_mutex);
1621 1623
1622 init_completion(&fs_info->kobj_unregister); 1624 init_completion(&fs_info->kobj_unregister);
1623 fs_info->tree_root = tree_root; 1625 fs_info->tree_root = tree_root;
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index b42efc2ded51..71cd456fdb60 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -3314,10 +3314,6 @@ static int shrink_delalloc(struct btrfs_trans_handle *trans,
3314 if (reserved == 0) 3314 if (reserved == 0)
3315 return 0; 3315 return 0;
3316 3316
3317 /* nothing to shrink - nothing to reclaim */
3318 if (root->fs_info->delalloc_bytes == 0)
3319 return 0;
3320
3321 max_reclaim = min(reserved, to_reclaim); 3317 max_reclaim = min(reserved, to_reclaim);
3322 3318
3323 while (loops < 1024) { 3319 while (loops < 1024) {
@@ -4846,7 +4842,7 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
4846 u64 num_bytes, u64 empty_size, 4842 u64 num_bytes, u64 empty_size,
4847 u64 search_start, u64 search_end, 4843 u64 search_start, u64 search_end,
4848 u64 hint_byte, struct btrfs_key *ins, 4844 u64 hint_byte, struct btrfs_key *ins,
4849 int data) 4845 u64 data)
4850{ 4846{
4851 int ret = 0; 4847 int ret = 0;
4852 struct btrfs_root *root = orig_root->fs_info->extent_root; 4848 struct btrfs_root *root = orig_root->fs_info->extent_root;
@@ -4873,7 +4869,7 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
4873 4869
4874 space_info = __find_space_info(root->fs_info, data); 4870 space_info = __find_space_info(root->fs_info, data);
4875 if (!space_info) { 4871 if (!space_info) {
4876 printk(KERN_ERR "No space info for %d\n", data); 4872 printk(KERN_ERR "No space info for %llu\n", data);
4877 return -ENOSPC; 4873 return -ENOSPC;
4878 } 4874 }
4879 4875
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 9f985a429877..bf0d61567f3d 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -1893,9 +1893,12 @@ void __btrfs_remove_free_space_cache_locked(struct btrfs_free_space_ctl *ctl)
1893 1893
1894 while ((node = rb_last(&ctl->free_space_offset)) != NULL) { 1894 while ((node = rb_last(&ctl->free_space_offset)) != NULL) {
1895 info = rb_entry(node, struct btrfs_free_space, offset_index); 1895 info = rb_entry(node, struct btrfs_free_space, offset_index);
1896 unlink_free_space(ctl, info); 1896 if (!info->bitmap) {
1897 kfree(info->bitmap); 1897 unlink_free_space(ctl, info);
1898 kmem_cache_free(btrfs_free_space_cachep, info); 1898 kmem_cache_free(btrfs_free_space_cachep, info);
1899 } else {
1900 free_bitmap(ctl, info);
1901 }
1899 if (need_resched()) { 1902 if (need_resched()) {
1900 spin_unlock(&ctl->tree_lock); 1903 spin_unlock(&ctl->tree_lock);
1901 cond_resched(); 1904 cond_resched();
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 751ddf8fc58a..d340f63d8f07 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -2509,6 +2509,11 @@ static void btrfs_read_locked_inode(struct inode *inode)
2509 int maybe_acls; 2509 int maybe_acls;
2510 u32 rdev; 2510 u32 rdev;
2511 int ret; 2511 int ret;
2512 bool filled = false;
2513
2514 ret = btrfs_fill_inode(inode, &rdev);
2515 if (!ret)
2516 filled = true;
2512 2517
2513 path = btrfs_alloc_path(); 2518 path = btrfs_alloc_path();
2514 BUG_ON(!path); 2519 BUG_ON(!path);
@@ -2520,6 +2525,10 @@ static void btrfs_read_locked_inode(struct inode *inode)
2520 goto make_bad; 2525 goto make_bad;
2521 2526
2522 leaf = path->nodes[0]; 2527 leaf = path->nodes[0];
2528
2529 if (filled)
2530 goto cache_acl;
2531
2523 inode_item = btrfs_item_ptr(leaf, path->slots[0], 2532 inode_item = btrfs_item_ptr(leaf, path->slots[0],
2524 struct btrfs_inode_item); 2533 struct btrfs_inode_item);
2525 if (!leaf->map_token) 2534 if (!leaf->map_token)
@@ -2556,7 +2565,7 @@ static void btrfs_read_locked_inode(struct inode *inode)
2556 2565
2557 BTRFS_I(inode)->index_cnt = (u64)-1; 2566 BTRFS_I(inode)->index_cnt = (u64)-1;
2558 BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item); 2567 BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item);
2559 2568cache_acl:
2560 /* 2569 /*
2561 * try to precache a NULL acl entry for files that don't have 2570 * try to precache a NULL acl entry for files that don't have
2562 * any xattrs or acls 2571 * any xattrs or acls
@@ -2572,7 +2581,6 @@ static void btrfs_read_locked_inode(struct inode *inode)
2572 } 2581 }
2573 2582
2574 btrfs_free_path(path); 2583 btrfs_free_path(path);
2575 inode_item = NULL;
2576 2584
2577 switch (inode->i_mode & S_IFMT) { 2585 switch (inode->i_mode & S_IFMT) {
2578 case S_IFREG: 2586 case S_IFREG:
@@ -3076,6 +3084,7 @@ int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
3076 ret = btrfs_update_inode(trans, root, dir); 3084 ret = btrfs_update_inode(trans, root, dir);
3077 BUG_ON(ret); 3085 BUG_ON(ret);
3078 3086
3087 btrfs_free_path(path);
3079 return 0; 3088 return 0;
3080} 3089}
3081 3090
@@ -4519,6 +4528,7 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
4519 inode_tree_add(inode); 4528 inode_tree_add(inode);
4520 4529
4521 trace_btrfs_inode_new(inode); 4530 trace_btrfs_inode_new(inode);
4531 btrfs_set_inode_last_trans(trans, inode);
4522 4532
4523 return inode; 4533 return inode;
4524fail: 4534fail:
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index b793d112d1f6..a3c4751e07db 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -482,8 +482,10 @@ static int create_snapshot(struct btrfs_root *root, struct dentry *dentry,
482 ret = btrfs_snap_reserve_metadata(trans, pending_snapshot); 482 ret = btrfs_snap_reserve_metadata(trans, pending_snapshot);
483 BUG_ON(ret); 483 BUG_ON(ret);
484 484
485 spin_lock(&root->fs_info->trans_lock);
485 list_add(&pending_snapshot->list, 486 list_add(&pending_snapshot->list,
486 &trans->transaction->pending_snapshots); 487 &trans->transaction->pending_snapshots);
488 spin_unlock(&root->fs_info->trans_lock);
487 if (async_transid) { 489 if (async_transid) {
488 *async_transid = trans->transid; 490 *async_transid = trans->transid;
489 ret = btrfs_commit_transaction_async(trans, 491 ret = btrfs_commit_transaction_async(trans,
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index b1ef27cc673b..5e0a3dc79a45 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -1368,7 +1368,7 @@ int btrfs_update_reloc_root(struct btrfs_trans_handle *trans,
1368 int ret; 1368 int ret;
1369 1369
1370 if (!root->reloc_root) 1370 if (!root->reloc_root)
1371 return 0; 1371 goto out;
1372 1372
1373 reloc_root = root->reloc_root; 1373 reloc_root = root->reloc_root;
1374 root_item = &reloc_root->root_item; 1374 root_item = &reloc_root->root_item;
@@ -1390,6 +1390,8 @@ int btrfs_update_reloc_root(struct btrfs_trans_handle *trans,
1390 ret = btrfs_update_root(trans, root->fs_info->tree_root, 1390 ret = btrfs_update_root(trans, root->fs_info->tree_root,
1391 &reloc_root->root_key, root_item); 1391 &reloc_root->root_key, root_item);
1392 BUG_ON(ret); 1392 BUG_ON(ret);
1393
1394out:
1393 return 0; 1395 return 0;
1394} 1396}
1395 1397
@@ -2142,10 +2144,11 @@ int prepare_to_merge(struct reloc_control *rc, int err)
2142 u64 num_bytes = 0; 2144 u64 num_bytes = 0;
2143 int ret; 2145 int ret;
2144 2146
2145 spin_lock(&root->fs_info->trans_lock); 2147 mutex_lock(&root->fs_info->reloc_mutex);
2146 rc->merging_rsv_size += root->nodesize * (BTRFS_MAX_LEVEL - 1) * 2; 2148 rc->merging_rsv_size += root->nodesize * (BTRFS_MAX_LEVEL - 1) * 2;
2147 rc->merging_rsv_size += rc->nodes_relocated * 2; 2149 rc->merging_rsv_size += rc->nodes_relocated * 2;
2148 spin_unlock(&root->fs_info->trans_lock); 2150 mutex_unlock(&root->fs_info->reloc_mutex);
2151
2149again: 2152again:
2150 if (!err) { 2153 if (!err) {
2151 num_bytes = rc->merging_rsv_size; 2154 num_bytes = rc->merging_rsv_size;
@@ -2214,9 +2217,16 @@ int merge_reloc_roots(struct reloc_control *rc)
2214 int ret; 2217 int ret;
2215again: 2218again:
2216 root = rc->extent_root; 2219 root = rc->extent_root;
2217 spin_lock(&root->fs_info->trans_lock); 2220
2221 /*
2222 * this serializes us with btrfs_record_root_in_transaction,
2223 * we have to make sure nobody is in the middle of
2224 * adding their roots to the list while we are
2225 * doing this splice
2226 */
2227 mutex_lock(&root->fs_info->reloc_mutex);
2218 list_splice_init(&rc->reloc_roots, &reloc_roots); 2228 list_splice_init(&rc->reloc_roots, &reloc_roots);
2219 spin_unlock(&root->fs_info->trans_lock); 2229 mutex_unlock(&root->fs_info->reloc_mutex);
2220 2230
2221 while (!list_empty(&reloc_roots)) { 2231 while (!list_empty(&reloc_roots)) {
2222 found = 1; 2232 found = 1;
@@ -3590,17 +3600,19 @@ next:
3590static void set_reloc_control(struct reloc_control *rc) 3600static void set_reloc_control(struct reloc_control *rc)
3591{ 3601{
3592 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; 3602 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3593 spin_lock(&fs_info->trans_lock); 3603
3604 mutex_lock(&fs_info->reloc_mutex);
3594 fs_info->reloc_ctl = rc; 3605 fs_info->reloc_ctl = rc;
3595 spin_unlock(&fs_info->trans_lock); 3606 mutex_unlock(&fs_info->reloc_mutex);
3596} 3607}
3597 3608
3598static void unset_reloc_control(struct reloc_control *rc) 3609static void unset_reloc_control(struct reloc_control *rc)
3599{ 3610{
3600 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; 3611 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3601 spin_lock(&fs_info->trans_lock); 3612
3613 mutex_lock(&fs_info->reloc_mutex);
3602 fs_info->reloc_ctl = NULL; 3614 fs_info->reloc_ctl = NULL;
3603 spin_unlock(&fs_info->trans_lock); 3615 mutex_unlock(&fs_info->reloc_mutex);
3604} 3616}
3605 3617
3606static int check_extent_flags(u64 flags) 3618static int check_extent_flags(u64 flags)
diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
index c3c223ae6691..daac9ae6d731 100644
--- a/fs/btrfs/sysfs.c
+++ b/fs/btrfs/sysfs.c
@@ -28,152 +28,6 @@
28#include "disk-io.h" 28#include "disk-io.h"
29#include "transaction.h" 29#include "transaction.h"
30 30
31static ssize_t root_blocks_used_show(struct btrfs_root *root, char *buf)
32{
33 return snprintf(buf, PAGE_SIZE, "%llu\n",
34 (unsigned long long)btrfs_root_used(&root->root_item));
35}
36
37static ssize_t root_block_limit_show(struct btrfs_root *root, char *buf)
38{
39 return snprintf(buf, PAGE_SIZE, "%llu\n",
40 (unsigned long long)btrfs_root_limit(&root->root_item));
41}
42
43static ssize_t super_blocks_used_show(struct btrfs_fs_info *fs, char *buf)
44{
45
46 return snprintf(buf, PAGE_SIZE, "%llu\n",
47 (unsigned long long)btrfs_super_bytes_used(&fs->super_copy));
48}
49
50static ssize_t super_total_blocks_show(struct btrfs_fs_info *fs, char *buf)
51{
52 return snprintf(buf, PAGE_SIZE, "%llu\n",
53 (unsigned long long)btrfs_super_total_bytes(&fs->super_copy));
54}
55
56static ssize_t super_blocksize_show(struct btrfs_fs_info *fs, char *buf)
57{
58 return snprintf(buf, PAGE_SIZE, "%llu\n",
59 (unsigned long long)btrfs_super_sectorsize(&fs->super_copy));
60}
61
62/* this is for root attrs (subvols/snapshots) */
63struct btrfs_root_attr {
64 struct attribute attr;
65 ssize_t (*show)(struct btrfs_root *, char *);
66 ssize_t (*store)(struct btrfs_root *, const char *, size_t);
67};
68
69#define ROOT_ATTR(name, mode, show, store) \
70static struct btrfs_root_attr btrfs_root_attr_##name = __ATTR(name, mode, \
71 show, store)
72
73ROOT_ATTR(blocks_used, 0444, root_blocks_used_show, NULL);
74ROOT_ATTR(block_limit, 0644, root_block_limit_show, NULL);
75
76static struct attribute *btrfs_root_attrs[] = {
77 &btrfs_root_attr_blocks_used.attr,
78 &btrfs_root_attr_block_limit.attr,
79 NULL,
80};
81
82/* this is for super attrs (actual full fs) */
83struct btrfs_super_attr {
84 struct attribute attr;
85 ssize_t (*show)(struct btrfs_fs_info *, char *);
86 ssize_t (*store)(struct btrfs_fs_info *, const char *, size_t);
87};
88
89#define SUPER_ATTR(name, mode, show, store) \
90static struct btrfs_super_attr btrfs_super_attr_##name = __ATTR(name, mode, \
91 show, store)
92
93SUPER_ATTR(blocks_used, 0444, super_blocks_used_show, NULL);
94SUPER_ATTR(total_blocks, 0444, super_total_blocks_show, NULL);
95SUPER_ATTR(blocksize, 0444, super_blocksize_show, NULL);
96
97static struct attribute *btrfs_super_attrs[] = {
98 &btrfs_super_attr_blocks_used.attr,
99 &btrfs_super_attr_total_blocks.attr,
100 &btrfs_super_attr_blocksize.attr,
101 NULL,
102};
103
104static ssize_t btrfs_super_attr_show(struct kobject *kobj,
105 struct attribute *attr, char *buf)
106{
107 struct btrfs_fs_info *fs = container_of(kobj, struct btrfs_fs_info,
108 super_kobj);
109 struct btrfs_super_attr *a = container_of(attr,
110 struct btrfs_super_attr,
111 attr);
112
113 return a->show ? a->show(fs, buf) : 0;
114}
115
116static ssize_t btrfs_super_attr_store(struct kobject *kobj,
117 struct attribute *attr,
118 const char *buf, size_t len)
119{
120 struct btrfs_fs_info *fs = container_of(kobj, struct btrfs_fs_info,
121 super_kobj);
122 struct btrfs_super_attr *a = container_of(attr,
123 struct btrfs_super_attr,
124 attr);
125
126 return a->store ? a->store(fs, buf, len) : 0;
127}
128
129static ssize_t btrfs_root_attr_show(struct kobject *kobj,
130 struct attribute *attr, char *buf)
131{
132 struct btrfs_root *root = container_of(kobj, struct btrfs_root,
133 root_kobj);
134 struct btrfs_root_attr *a = container_of(attr,
135 struct btrfs_root_attr,
136 attr);
137
138 return a->show ? a->show(root, buf) : 0;
139}
140
141static ssize_t btrfs_root_attr_store(struct kobject *kobj,
142 struct attribute *attr,
143 const char *buf, size_t len)
144{
145 struct btrfs_root *root = container_of(kobj, struct btrfs_root,
146 root_kobj);
147 struct btrfs_root_attr *a = container_of(attr,
148 struct btrfs_root_attr,
149 attr);
150 return a->store ? a->store(root, buf, len) : 0;
151}
152
153static void btrfs_super_release(struct kobject *kobj)
154{
155 struct btrfs_fs_info *fs = container_of(kobj, struct btrfs_fs_info,
156 super_kobj);
157 complete(&fs->kobj_unregister);
158}
159
160static void btrfs_root_release(struct kobject *kobj)
161{
162 struct btrfs_root *root = container_of(kobj, struct btrfs_root,
163 root_kobj);
164 complete(&root->kobj_unregister);
165}
166
167static const struct sysfs_ops btrfs_super_attr_ops = {
168 .show = btrfs_super_attr_show,
169 .store = btrfs_super_attr_store,
170};
171
172static const struct sysfs_ops btrfs_root_attr_ops = {
173 .show = btrfs_root_attr_show,
174 .store = btrfs_root_attr_store,
175};
176
177/* /sys/fs/btrfs/ entry */ 31/* /sys/fs/btrfs/ entry */
178static struct kset *btrfs_kset; 32static struct kset *btrfs_kset;
179 33
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 2b3590b9fe98..51dcec86757f 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -126,28 +126,85 @@ static noinline int join_transaction(struct btrfs_root *root, int nofail)
126 * to make sure the old root from before we joined the transaction is deleted 126 * to make sure the old root from before we joined the transaction is deleted
127 * when the transaction commits 127 * when the transaction commits
128 */ 128 */
129int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans, 129static int record_root_in_trans(struct btrfs_trans_handle *trans,
130 struct btrfs_root *root) 130 struct btrfs_root *root)
131{ 131{
132 if (root->ref_cows && root->last_trans < trans->transid) { 132 if (root->ref_cows && root->last_trans < trans->transid) {
133 WARN_ON(root == root->fs_info->extent_root); 133 WARN_ON(root == root->fs_info->extent_root);
134 WARN_ON(root->commit_root != root->node); 134 WARN_ON(root->commit_root != root->node);
135 135
136 /*
137 * see below for in_trans_setup usage rules
138 * we have the reloc mutex held now, so there
139 * is only one writer in this function
140 */
141 root->in_trans_setup = 1;
142
143 /* make sure readers find in_trans_setup before
144 * they find our root->last_trans update
145 */
146 smp_wmb();
147
136 spin_lock(&root->fs_info->fs_roots_radix_lock); 148 spin_lock(&root->fs_info->fs_roots_radix_lock);
137 if (root->last_trans == trans->transid) { 149 if (root->last_trans == trans->transid) {
138 spin_unlock(&root->fs_info->fs_roots_radix_lock); 150 spin_unlock(&root->fs_info->fs_roots_radix_lock);
139 return 0; 151 return 0;
140 } 152 }
141 root->last_trans = trans->transid;
142 radix_tree_tag_set(&root->fs_info->fs_roots_radix, 153 radix_tree_tag_set(&root->fs_info->fs_roots_radix,
143 (unsigned long)root->root_key.objectid, 154 (unsigned long)root->root_key.objectid,
144 BTRFS_ROOT_TRANS_TAG); 155 BTRFS_ROOT_TRANS_TAG);
145 spin_unlock(&root->fs_info->fs_roots_radix_lock); 156 spin_unlock(&root->fs_info->fs_roots_radix_lock);
157 root->last_trans = trans->transid;
158
159 /* this is pretty tricky. We don't want to
160 * take the relocation lock in btrfs_record_root_in_trans
161 * unless we're really doing the first setup for this root in
162 * this transaction.
163 *
164 * Normally we'd use root->last_trans as a flag to decide
165 * if we want to take the expensive mutex.
166 *
167 * But, we have to set root->last_trans before we
168 * init the relocation root, otherwise, we trip over warnings
169 * in ctree.c. The solution used here is to flag ourselves
170 * with root->in_trans_setup. When this is 1, we're still
171 * fixing up the reloc trees and everyone must wait.
172 *
173 * When this is zero, they can trust root->last_trans and fly
174 * through btrfs_record_root_in_trans without having to take the
175 * lock. smp_wmb() makes sure that all the writes above are
176 * done before we pop in the zero below
177 */
146 btrfs_init_reloc_root(trans, root); 178 btrfs_init_reloc_root(trans, root);
179 smp_wmb();
180 root->in_trans_setup = 0;
147 } 181 }
148 return 0; 182 return 0;
149} 183}
150 184
185
186int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
187 struct btrfs_root *root)
188{
189 if (!root->ref_cows)
190 return 0;
191
192 /*
193 * see record_root_in_trans for comments about in_trans_setup usage
194 * and barriers
195 */
196 smp_rmb();
197 if (root->last_trans == trans->transid &&
198 !root->in_trans_setup)
199 return 0;
200
201 mutex_lock(&root->fs_info->reloc_mutex);
202 record_root_in_trans(trans, root);
203 mutex_unlock(&root->fs_info->reloc_mutex);
204
205 return 0;
206}
207
151/* wait for commit against the current transaction to become unblocked 208/* wait for commit against the current transaction to become unblocked
152 * when this is done, it is safe to start a new transaction, but the current 209 * when this is done, it is safe to start a new transaction, but the current
153 * transaction might not be fully on disk. 210 * transaction might not be fully on disk.
@@ -882,7 +939,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
882 parent = dget_parent(dentry); 939 parent = dget_parent(dentry);
883 parent_inode = parent->d_inode; 940 parent_inode = parent->d_inode;
884 parent_root = BTRFS_I(parent_inode)->root; 941 parent_root = BTRFS_I(parent_inode)->root;
885 btrfs_record_root_in_trans(trans, parent_root); 942 record_root_in_trans(trans, parent_root);
886 943
887 /* 944 /*
888 * insert the directory item 945 * insert the directory item
@@ -900,7 +957,16 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
900 ret = btrfs_update_inode(trans, parent_root, parent_inode); 957 ret = btrfs_update_inode(trans, parent_root, parent_inode);
901 BUG_ON(ret); 958 BUG_ON(ret);
902 959
903 btrfs_record_root_in_trans(trans, root); 960 /*
961 * pull in the delayed directory update
962 * and the delayed inode item
963 * otherwise we corrupt the FS during
964 * snapshot
965 */
966 ret = btrfs_run_delayed_items(trans, root);
967 BUG_ON(ret);
968
969 record_root_in_trans(trans, root);
904 btrfs_set_root_last_snapshot(&root->root_item, trans->transid); 970 btrfs_set_root_last_snapshot(&root->root_item, trans->transid);
905 memcpy(new_root_item, &root->root_item, sizeof(*new_root_item)); 971 memcpy(new_root_item, &root->root_item, sizeof(*new_root_item));
906 btrfs_check_and_init_root_item(new_root_item); 972 btrfs_check_and_init_root_item(new_root_item);
@@ -961,14 +1027,6 @@ static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans,
961 int ret; 1027 int ret;
962 1028
963 list_for_each_entry(pending, head, list) { 1029 list_for_each_entry(pending, head, list) {
964 /*
965 * We must deal with the delayed items before creating
966 * snapshots, or we will create a snapthot with inconsistent
967 * information.
968 */
969 ret = btrfs_run_delayed_items(trans, fs_info->fs_root);
970 BUG_ON(ret);
971
972 ret = create_pending_snapshot(trans, fs_info, pending); 1030 ret = create_pending_snapshot(trans, fs_info, pending);
973 BUG_ON(ret); 1031 BUG_ON(ret);
974 } 1032 }
@@ -1241,21 +1299,42 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
1241 schedule_timeout(1); 1299 schedule_timeout(1);
1242 1300
1243 finish_wait(&cur_trans->writer_wait, &wait); 1301 finish_wait(&cur_trans->writer_wait, &wait);
1244 spin_lock(&root->fs_info->trans_lock);
1245 root->fs_info->trans_no_join = 1;
1246 spin_unlock(&root->fs_info->trans_lock);
1247 } while (atomic_read(&cur_trans->num_writers) > 1 || 1302 } while (atomic_read(&cur_trans->num_writers) > 1 ||
1248 (should_grow && cur_trans->num_joined != joined)); 1303 (should_grow && cur_trans->num_joined != joined));
1249 1304
1250 ret = create_pending_snapshots(trans, root->fs_info); 1305 /*
1251 BUG_ON(ret); 1306 * Ok now we need to make sure to block out any other joins while we
1307 * commit the transaction. We could have started a join before setting
1308 * no_join so make sure to wait for num_writers to == 1 again.
1309 */
1310 spin_lock(&root->fs_info->trans_lock);
1311 root->fs_info->trans_no_join = 1;
1312 spin_unlock(&root->fs_info->trans_lock);
1313 wait_event(cur_trans->writer_wait,
1314 atomic_read(&cur_trans->num_writers) == 1);
1315
1316 /*
1317 * the reloc mutex makes sure that we stop
1318 * the balancing code from coming in and moving
1319 * extents around in the middle of the commit
1320 */
1321 mutex_lock(&root->fs_info->reloc_mutex);
1252 1322
1253 ret = btrfs_run_delayed_items(trans, root); 1323 ret = btrfs_run_delayed_items(trans, root);
1254 BUG_ON(ret); 1324 BUG_ON(ret);
1255 1325
1326 ret = create_pending_snapshots(trans, root->fs_info);
1327 BUG_ON(ret);
1328
1256 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); 1329 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
1257 BUG_ON(ret); 1330 BUG_ON(ret);
1258 1331
1332 /*
1333 * make sure none of the code above managed to slip in a
1334 * delayed item
1335 */
1336 btrfs_assert_delayed_root_empty(root);
1337
1259 WARN_ON(cur_trans != trans->transaction); 1338 WARN_ON(cur_trans != trans->transaction);
1260 1339
1261 btrfs_scrub_pause(root); 1340 btrfs_scrub_pause(root);
@@ -1312,6 +1391,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
1312 root->fs_info->running_transaction = NULL; 1391 root->fs_info->running_transaction = NULL;
1313 root->fs_info->trans_no_join = 0; 1392 root->fs_info->trans_no_join = 0;
1314 spin_unlock(&root->fs_info->trans_lock); 1393 spin_unlock(&root->fs_info->trans_lock);
1394 mutex_unlock(&root->fs_info->reloc_mutex);
1315 1395
1316 wake_up(&root->fs_info->transaction_wait); 1396 wake_up(&root->fs_info->transaction_wait);
1317 1397
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 592396c6dc47..4ce8a9f41d1e 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -3177,7 +3177,7 @@ again:
3177 tmp_key.offset = (u64)-1; 3177 tmp_key.offset = (u64)-1;
3178 3178
3179 wc.replay_dest = btrfs_read_fs_root_no_name(fs_info, &tmp_key); 3179 wc.replay_dest = btrfs_read_fs_root_no_name(fs_info, &tmp_key);
3180 BUG_ON(!wc.replay_dest); 3180 BUG_ON(IS_ERR_OR_NULL(wc.replay_dest));
3181 3181
3182 wc.replay_dest->log_root = log; 3182 wc.replay_dest->log_root = log;
3183 btrfs_record_root_in_trans(trans, wc.replay_dest); 3183 btrfs_record_root_in_trans(trans, wc.replay_dest);
diff --git a/fs/cifs/Kconfig b/fs/cifs/Kconfig
index 53ed1ad2c112..f66cc1625150 100644
--- a/fs/cifs/Kconfig
+++ b/fs/cifs/Kconfig
@@ -156,6 +156,6 @@ config CIFS_ACL
156 156
157config CIFS_NFSD_EXPORT 157config CIFS_NFSD_EXPORT
158 bool "Allow nfsd to export CIFS file system (EXPERIMENTAL)" 158 bool "Allow nfsd to export CIFS file system (EXPERIMENTAL)"
159 depends on CIFS && EXPERIMENTAL 159 depends on CIFS && EXPERIMENTAL && BROKEN
160 help 160 help
161 Allows NFS server to export a CIFS mounted share (nfsd over cifs) 161 Allows NFS server to export a CIFS mounted share (nfsd over cifs)
diff --git a/fs/cifs/cifs_fs_sb.h b/fs/cifs/cifs_fs_sb.h
index ffb1459dc6ec..7260e11e21f8 100644
--- a/fs/cifs/cifs_fs_sb.h
+++ b/fs/cifs/cifs_fs_sb.h
@@ -42,6 +42,7 @@
42#define CIFS_MOUNT_MULTIUSER 0x20000 /* multiuser mount */ 42#define CIFS_MOUNT_MULTIUSER 0x20000 /* multiuser mount */
43#define CIFS_MOUNT_STRICT_IO 0x40000 /* strict cache mode */ 43#define CIFS_MOUNT_STRICT_IO 0x40000 /* strict cache mode */
44#define CIFS_MOUNT_RWPIDFORWARD 0x80000 /* use pid forwarding for rw */ 44#define CIFS_MOUNT_RWPIDFORWARD 0x80000 /* use pid forwarding for rw */
45#define CIFS_MOUNT_POSIXACL 0x100000 /* mirror of MS_POSIXACL in mnt_cifs_flags */
45 46
46struct cifs_sb_info { 47struct cifs_sb_info {
47 struct rb_root tlink_tree; 48 struct rb_root tlink_tree;
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index e9def996e383..35f9154615fa 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -104,8 +104,7 @@ cifs_sb_deactive(struct super_block *sb)
104} 104}
105 105
106static int 106static int
107cifs_read_super(struct super_block *sb, struct smb_vol *volume_info, 107cifs_read_super(struct super_block *sb)
108 const char *devname, int silent)
109{ 108{
110 struct inode *inode; 109 struct inode *inode;
111 struct cifs_sb_info *cifs_sb; 110 struct cifs_sb_info *cifs_sb;
@@ -113,22 +112,16 @@ cifs_read_super(struct super_block *sb, struct smb_vol *volume_info,
113 112
114 cifs_sb = CIFS_SB(sb); 113 cifs_sb = CIFS_SB(sb);
115 114
116 spin_lock_init(&cifs_sb->tlink_tree_lock); 115 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIXACL)
117 cifs_sb->tlink_tree = RB_ROOT; 116 sb->s_flags |= MS_POSIXACL;
118 117
119 rc = bdi_setup_and_register(&cifs_sb->bdi, "cifs", BDI_CAP_MAP_COPY); 118 if (cifs_sb_master_tcon(cifs_sb)->ses->capabilities & CAP_LARGE_FILES)
120 if (rc) 119 sb->s_maxbytes = MAX_LFS_FILESIZE;
121 return rc; 120 else
122 121 sb->s_maxbytes = MAX_NON_LFS;
123 cifs_sb->bdi.ra_pages = default_backing_dev_info.ra_pages;
124 122
125 rc = cifs_mount(sb, cifs_sb, volume_info, devname); 123 /* BB FIXME fix time_gran to be larger for LANMAN sessions */
126 124 sb->s_time_gran = 100;
127 if (rc) {
128 if (!silent)
129 cERROR(1, "cifs_mount failed w/return code = %d", rc);
130 goto out_mount_failed;
131 }
132 125
133 sb->s_magic = CIFS_MAGIC_NUMBER; 126 sb->s_magic = CIFS_MAGIC_NUMBER;
134 sb->s_op = &cifs_super_ops; 127 sb->s_op = &cifs_super_ops;
@@ -170,37 +163,14 @@ out_no_root:
170 if (inode) 163 if (inode)
171 iput(inode); 164 iput(inode);
172 165
173 cifs_umount(sb, cifs_sb);
174
175out_mount_failed:
176 bdi_destroy(&cifs_sb->bdi);
177 return rc; 166 return rc;
178} 167}
179 168
180static void 169static void cifs_kill_sb(struct super_block *sb)
181cifs_put_super(struct super_block *sb)
182{ 170{
183 int rc = 0; 171 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
184 struct cifs_sb_info *cifs_sb; 172 kill_anon_super(sb);
185 173 cifs_umount(cifs_sb);
186 cFYI(1, "In cifs_put_super");
187 cifs_sb = CIFS_SB(sb);
188 if (cifs_sb == NULL) {
189 cFYI(1, "Empty cifs superblock info passed to unmount");
190 return;
191 }
192
193 rc = cifs_umount(sb, cifs_sb);
194 if (rc)
195 cERROR(1, "cifs_umount failed with return code %d", rc);
196 if (cifs_sb->mountdata) {
197 kfree(cifs_sb->mountdata);
198 cifs_sb->mountdata = NULL;
199 }
200
201 unload_nls(cifs_sb->local_nls);
202 bdi_destroy(&cifs_sb->bdi);
203 kfree(cifs_sb);
204} 174}
205 175
206static int 176static int
@@ -257,9 +227,6 @@ static int cifs_permission(struct inode *inode, int mask, unsigned int flags)
257{ 227{
258 struct cifs_sb_info *cifs_sb; 228 struct cifs_sb_info *cifs_sb;
259 229
260 if (flags & IPERM_FLAG_RCU)
261 return -ECHILD;
262
263 cifs_sb = CIFS_SB(inode->i_sb); 230 cifs_sb = CIFS_SB(inode->i_sb);
264 231
265 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) { 232 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) {
@@ -551,7 +518,6 @@ static int cifs_drop_inode(struct inode *inode)
551} 518}
552 519
553static const struct super_operations cifs_super_ops = { 520static const struct super_operations cifs_super_ops = {
554 .put_super = cifs_put_super,
555 .statfs = cifs_statfs, 521 .statfs = cifs_statfs,
556 .alloc_inode = cifs_alloc_inode, 522 .alloc_inode = cifs_alloc_inode,
557 .destroy_inode = cifs_destroy_inode, 523 .destroy_inode = cifs_destroy_inode,
@@ -588,7 +554,7 @@ cifs_get_root(struct smb_vol *vol, struct super_block *sb)
588 full_path = cifs_build_path_to_root(vol, cifs_sb, 554 full_path = cifs_build_path_to_root(vol, cifs_sb,
589 cifs_sb_master_tcon(cifs_sb)); 555 cifs_sb_master_tcon(cifs_sb));
590 if (full_path == NULL) 556 if (full_path == NULL)
591 return NULL; 557 return ERR_PTR(-ENOMEM);
592 558
593 cFYI(1, "Get root dentry for %s", full_path); 559 cFYI(1, "Get root dentry for %s", full_path);
594 560
@@ -617,7 +583,7 @@ cifs_get_root(struct smb_vol *vol, struct super_block *sb)
617 dchild = d_alloc(dparent, &name); 583 dchild = d_alloc(dparent, &name);
618 if (dchild == NULL) { 584 if (dchild == NULL) {
619 dput(dparent); 585 dput(dparent);
620 dparent = NULL; 586 dparent = ERR_PTR(-ENOMEM);
621 goto out; 587 goto out;
622 } 588 }
623 } 589 }
@@ -635,7 +601,7 @@ cifs_get_root(struct smb_vol *vol, struct super_block *sb)
635 if (rc) { 601 if (rc) {
636 dput(dchild); 602 dput(dchild);
637 dput(dparent); 603 dput(dparent);
638 dparent = NULL; 604 dparent = ERR_PTR(rc);
639 goto out; 605 goto out;
640 } 606 }
641 alias = d_materialise_unique(dchild, inode); 607 alias = d_materialise_unique(dchild, inode);
@@ -643,7 +609,7 @@ cifs_get_root(struct smb_vol *vol, struct super_block *sb)
643 dput(dchild); 609 dput(dchild);
644 if (IS_ERR(alias)) { 610 if (IS_ERR(alias)) {
645 dput(dparent); 611 dput(dparent);
646 dparent = NULL; 612 dparent = ERR_PTR(-EINVAL); /* XXX */
647 goto out; 613 goto out;
648 } 614 }
649 dchild = alias; 615 dchild = alias;
@@ -663,6 +629,13 @@ out:
663 return dparent; 629 return dparent;
664} 630}
665 631
632static int cifs_set_super(struct super_block *sb, void *data)
633{
634 struct cifs_mnt_data *mnt_data = data;
635 sb->s_fs_info = mnt_data->cifs_sb;
636 return set_anon_super(sb, NULL);
637}
638
666static struct dentry * 639static struct dentry *
667cifs_do_mount(struct file_system_type *fs_type, 640cifs_do_mount(struct file_system_type *fs_type,
668 int flags, const char *dev_name, void *data) 641 int flags, const char *dev_name, void *data)
@@ -683,75 +656,73 @@ cifs_do_mount(struct file_system_type *fs_type,
683 cifs_sb = kzalloc(sizeof(struct cifs_sb_info), GFP_KERNEL); 656 cifs_sb = kzalloc(sizeof(struct cifs_sb_info), GFP_KERNEL);
684 if (cifs_sb == NULL) { 657 if (cifs_sb == NULL) {
685 root = ERR_PTR(-ENOMEM); 658 root = ERR_PTR(-ENOMEM);
686 goto out; 659 goto out_nls;
660 }
661
662 cifs_sb->mountdata = kstrndup(data, PAGE_SIZE, GFP_KERNEL);
663 if (cifs_sb->mountdata == NULL) {
664 root = ERR_PTR(-ENOMEM);
665 goto out_cifs_sb;
687 } 666 }
688 667
689 cifs_setup_cifs_sb(volume_info, cifs_sb); 668 cifs_setup_cifs_sb(volume_info, cifs_sb);
690 669
670 rc = cifs_mount(cifs_sb, volume_info);
671 if (rc) {
672 if (!(flags & MS_SILENT))
673 cERROR(1, "cifs_mount failed w/return code = %d", rc);
674 root = ERR_PTR(rc);
675 goto out_mountdata;
676 }
677
691 mnt_data.vol = volume_info; 678 mnt_data.vol = volume_info;
692 mnt_data.cifs_sb = cifs_sb; 679 mnt_data.cifs_sb = cifs_sb;
693 mnt_data.flags = flags; 680 mnt_data.flags = flags;
694 681
695 sb = sget(fs_type, cifs_match_super, set_anon_super, &mnt_data); 682 sb = sget(fs_type, cifs_match_super, cifs_set_super, &mnt_data);
696 if (IS_ERR(sb)) { 683 if (IS_ERR(sb)) {
697 root = ERR_CAST(sb); 684 root = ERR_CAST(sb);
698 goto out_cifs_sb; 685 cifs_umount(cifs_sb);
686 goto out;
699 } 687 }
700 688
701 if (sb->s_fs_info) { 689 if (sb->s_root) {
702 cFYI(1, "Use existing superblock"); 690 cFYI(1, "Use existing superblock");
703 goto out_shared; 691 cifs_umount(cifs_sb);
704 } 692 } else {
693 sb->s_flags = flags;
694 /* BB should we make this contingent on mount parm? */
695 sb->s_flags |= MS_NODIRATIME | MS_NOATIME;
696
697 rc = cifs_read_super(sb);
698 if (rc) {
699 root = ERR_PTR(rc);
700 goto out_super;
701 }
705 702
706 /* 703 sb->s_flags |= MS_ACTIVE;
707 * Copy mount params for use in submounts. Better to do
708 * the copy here and deal with the error before cleanup gets
709 * complicated post-mount.
710 */
711 cifs_sb->mountdata = kstrndup(data, PAGE_SIZE, GFP_KERNEL);
712 if (cifs_sb->mountdata == NULL) {
713 root = ERR_PTR(-ENOMEM);
714 goto out_super;
715 } 704 }
716 705
717 sb->s_flags = flags;
718 /* BB should we make this contingent on mount parm? */
719 sb->s_flags |= MS_NODIRATIME | MS_NOATIME;
720 sb->s_fs_info = cifs_sb;
721
722 rc = cifs_read_super(sb, volume_info, dev_name,
723 flags & MS_SILENT ? 1 : 0);
724 if (rc) {
725 root = ERR_PTR(rc);
726 goto out_super;
727 }
728
729 sb->s_flags |= MS_ACTIVE;
730
731 root = cifs_get_root(volume_info, sb); 706 root = cifs_get_root(volume_info, sb);
732 if (root == NULL) 707 if (IS_ERR(root))
733 goto out_super; 708 goto out_super;
734 709
735 cFYI(1, "dentry root is: %p", root); 710 cFYI(1, "dentry root is: %p", root);
736 goto out; 711 goto out;
737 712
738out_shared:
739 root = cifs_get_root(volume_info, sb);
740 if (root)
741 cFYI(1, "dentry root is: %p", root);
742 goto out;
743
744out_super: 713out_super:
745 kfree(cifs_sb->mountdata);
746 deactivate_locked_super(sb); 714 deactivate_locked_super(sb);
747
748out_cifs_sb:
749 unload_nls(cifs_sb->local_nls);
750 kfree(cifs_sb);
751
752out: 715out:
753 cifs_cleanup_volume_info(&volume_info); 716 cifs_cleanup_volume_info(&volume_info);
754 return root; 717 return root;
718
719out_mountdata:
720 kfree(cifs_sb->mountdata);
721out_cifs_sb:
722 kfree(cifs_sb);
723out_nls:
724 unload_nls(volume_info->local_nls);
725 goto out;
755} 726}
756 727
757static ssize_t cifs_file_aio_write(struct kiocb *iocb, const struct iovec *iov, 728static ssize_t cifs_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
@@ -840,7 +811,7 @@ struct file_system_type cifs_fs_type = {
840 .owner = THIS_MODULE, 811 .owner = THIS_MODULE,
841 .name = "cifs", 812 .name = "cifs",
842 .mount = cifs_do_mount, 813 .mount = cifs_do_mount,
843 .kill_sb = kill_anon_super, 814 .kill_sb = cifs_kill_sb,
844 /* .fs_flags */ 815 /* .fs_flags */
845}; 816};
846const struct inode_operations cifs_dir_inode_ops = { 817const struct inode_operations cifs_dir_inode_ops = {
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index 953f84413c77..257f312ede42 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -157,9 +157,8 @@ extern int cifs_match_super(struct super_block *, void *);
157extern void cifs_cleanup_volume_info(struct smb_vol **pvolume_info); 157extern void cifs_cleanup_volume_info(struct smb_vol **pvolume_info);
158extern int cifs_setup_volume_info(struct smb_vol **pvolume_info, 158extern int cifs_setup_volume_info(struct smb_vol **pvolume_info,
159 char *mount_data, const char *devname); 159 char *mount_data, const char *devname);
160extern int cifs_mount(struct super_block *, struct cifs_sb_info *, 160extern int cifs_mount(struct cifs_sb_info *, struct smb_vol *);
161 struct smb_vol *, const char *); 161extern void cifs_umount(struct cifs_sb_info *);
162extern int cifs_umount(struct super_block *, struct cifs_sb_info *);
163extern void cifs_dfs_release_automount_timer(void); 162extern void cifs_dfs_release_automount_timer(void);
164void cifs_proc_init(void); 163void cifs_proc_init(void);
165void cifs_proc_clean(void); 164void cifs_proc_clean(void);
@@ -218,7 +217,8 @@ extern int get_dfs_path(int xid, struct cifs_ses *pSesInfo,
218 struct dfs_info3_param **preferrals, 217 struct dfs_info3_param **preferrals,
219 int remap); 218 int remap);
220extern void reset_cifs_unix_caps(int xid, struct cifs_tcon *tcon, 219extern void reset_cifs_unix_caps(int xid, struct cifs_tcon *tcon,
221 struct super_block *sb, struct smb_vol *vol); 220 struct cifs_sb_info *cifs_sb,
221 struct smb_vol *vol);
222extern int CIFSSMBQFSInfo(const int xid, struct cifs_tcon *tcon, 222extern int CIFSSMBQFSInfo(const int xid, struct cifs_tcon *tcon,
223 struct kstatfs *FSData); 223 struct kstatfs *FSData);
224extern int SMBOldQFSInfo(const int xid, struct cifs_tcon *tcon, 224extern int SMBOldQFSInfo(const int xid, struct cifs_tcon *tcon,
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 12cf72dd0c42..7f540df52527 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -2546,7 +2546,7 @@ ip_connect(struct TCP_Server_Info *server)
2546} 2546}
2547 2547
2548void reset_cifs_unix_caps(int xid, struct cifs_tcon *tcon, 2548void reset_cifs_unix_caps(int xid, struct cifs_tcon *tcon,
2549 struct super_block *sb, struct smb_vol *vol_info) 2549 struct cifs_sb_info *cifs_sb, struct smb_vol *vol_info)
2550{ 2550{
2551 /* if we are reconnecting then should we check to see if 2551 /* if we are reconnecting then should we check to see if
2552 * any requested capabilities changed locally e.g. via 2552 * any requested capabilities changed locally e.g. via
@@ -2600,22 +2600,23 @@ void reset_cifs_unix_caps(int xid, struct cifs_tcon *tcon,
2600 cap &= ~CIFS_UNIX_POSIX_ACL_CAP; 2600 cap &= ~CIFS_UNIX_POSIX_ACL_CAP;
2601 else if (CIFS_UNIX_POSIX_ACL_CAP & cap) { 2601 else if (CIFS_UNIX_POSIX_ACL_CAP & cap) {
2602 cFYI(1, "negotiated posix acl support"); 2602 cFYI(1, "negotiated posix acl support");
2603 if (sb) 2603 if (cifs_sb)
2604 sb->s_flags |= MS_POSIXACL; 2604 cifs_sb->mnt_cifs_flags |=
2605 CIFS_MOUNT_POSIXACL;
2605 } 2606 }
2606 2607
2607 if (vol_info && vol_info->posix_paths == 0) 2608 if (vol_info && vol_info->posix_paths == 0)
2608 cap &= ~CIFS_UNIX_POSIX_PATHNAMES_CAP; 2609 cap &= ~CIFS_UNIX_POSIX_PATHNAMES_CAP;
2609 else if (cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) { 2610 else if (cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) {
2610 cFYI(1, "negotiate posix pathnames"); 2611 cFYI(1, "negotiate posix pathnames");
2611 if (sb) 2612 if (cifs_sb)
2612 CIFS_SB(sb)->mnt_cifs_flags |= 2613 cifs_sb->mnt_cifs_flags |=
2613 CIFS_MOUNT_POSIX_PATHS; 2614 CIFS_MOUNT_POSIX_PATHS;
2614 } 2615 }
2615 2616
2616 if (sb && (CIFS_SB(sb)->rsize > 127 * 1024)) { 2617 if (cifs_sb && (cifs_sb->rsize > 127 * 1024)) {
2617 if ((cap & CIFS_UNIX_LARGE_READ_CAP) == 0) { 2618 if ((cap & CIFS_UNIX_LARGE_READ_CAP) == 0) {
2618 CIFS_SB(sb)->rsize = 127 * 1024; 2619 cifs_sb->rsize = 127 * 1024;
2619 cFYI(DBG2, "larger reads not supported by srv"); 2620 cFYI(DBG2, "larger reads not supported by srv");
2620 } 2621 }
2621 } 2622 }
@@ -2662,6 +2663,9 @@ void cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
2662{ 2663{
2663 INIT_DELAYED_WORK(&cifs_sb->prune_tlinks, cifs_prune_tlinks); 2664 INIT_DELAYED_WORK(&cifs_sb->prune_tlinks, cifs_prune_tlinks);
2664 2665
2666 spin_lock_init(&cifs_sb->tlink_tree_lock);
2667 cifs_sb->tlink_tree = RB_ROOT;
2668
2665 if (pvolume_info->rsize > CIFSMaxBufSize) { 2669 if (pvolume_info->rsize > CIFSMaxBufSize) {
2666 cERROR(1, "rsize %d too large, using MaxBufSize", 2670 cERROR(1, "rsize %d too large, using MaxBufSize",
2667 pvolume_info->rsize); 2671 pvolume_info->rsize);
@@ -2750,21 +2754,21 @@ void cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
2750 2754
2751/* 2755/*
2752 * When the server supports very large writes via POSIX extensions, we can 2756 * When the server supports very large writes via POSIX extensions, we can
2753 * allow up to 2^24 - PAGE_CACHE_SIZE. 2757 * allow up to 2^24-1, minus the size of a WRITE_AND_X header, not including
2758 * the RFC1001 length.
2754 * 2759 *
2755 * Note that this might make for "interesting" allocation problems during 2760 * Note that this might make for "interesting" allocation problems during
2756 * writeback however (as we have to allocate an array of pointers for the 2761 * writeback however as we have to allocate an array of pointers for the
2757 * pages). A 16M write means ~32kb page array with PAGE_CACHE_SIZE == 4096. 2762 * pages. A 16M write means ~32kb page array with PAGE_CACHE_SIZE == 4096.
2758 */ 2763 */
2759#define CIFS_MAX_WSIZE ((1<<24) - PAGE_CACHE_SIZE) 2764#define CIFS_MAX_WSIZE ((1<<24) - 1 - sizeof(WRITE_REQ) + 4)
2760 2765
2761/* 2766/*
2762 * When the server doesn't allow large posix writes, default to a wsize of 2767 * When the server doesn't allow large posix writes, only allow a wsize of
2763 * 128k - PAGE_CACHE_SIZE -- one page less than the largest frame size 2768 * 128k minus the size of the WRITE_AND_X header. That allows for a write up
2764 * described in RFC1001. This allows space for the header without going over 2769 * to the maximum size described by RFC1002.
2765 * that by default.
2766 */ 2770 */
2767#define CIFS_MAX_RFC1001_WSIZE (128 * 1024 - PAGE_CACHE_SIZE) 2771#define CIFS_MAX_RFC1002_WSIZE (128 * 1024 - sizeof(WRITE_REQ) + 4)
2768 2772
2769/* 2773/*
2770 * The default wsize is 1M. find_get_pages seems to return a maximum of 256 2774 * The default wsize is 1M. find_get_pages seems to return a maximum of 256
@@ -2783,11 +2787,18 @@ cifs_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *pvolume_info)
2783 2787
2784 /* can server support 24-bit write sizes? (via UNIX extensions) */ 2788 /* can server support 24-bit write sizes? (via UNIX extensions) */
2785 if (!tcon->unix_ext || !(unix_cap & CIFS_UNIX_LARGE_WRITE_CAP)) 2789 if (!tcon->unix_ext || !(unix_cap & CIFS_UNIX_LARGE_WRITE_CAP))
2786 wsize = min_t(unsigned int, wsize, CIFS_MAX_RFC1001_WSIZE); 2790 wsize = min_t(unsigned int, wsize, CIFS_MAX_RFC1002_WSIZE);
2787 2791
2788 /* no CAP_LARGE_WRITE_X? Limit it to 16 bits */ 2792 /*
2789 if (!(server->capabilities & CAP_LARGE_WRITE_X)) 2793 * no CAP_LARGE_WRITE_X or is signing enabled without CAP_UNIX set?
2790 wsize = min_t(unsigned int, wsize, USHRT_MAX); 2794 * Limit it to max buffer offered by the server, minus the size of the
2795 * WRITEX header, not including the 4 byte RFC1001 length.
2796 */
2797 if (!(server->capabilities & CAP_LARGE_WRITE_X) ||
2798 (!(server->capabilities & CAP_UNIX) &&
2799 (server->sec_mode & (SECMODE_SIGN_ENABLED|SECMODE_SIGN_REQUIRED))))
2800 wsize = min_t(unsigned int, wsize,
2801 server->maxBuf - sizeof(WRITE_REQ) + 4);
2791 2802
2792 /* hard limit of CIFS_MAX_WSIZE */ 2803 /* hard limit of CIFS_MAX_WSIZE */
2793 wsize = min_t(unsigned int, wsize, CIFS_MAX_WSIZE); 2804 wsize = min_t(unsigned int, wsize, CIFS_MAX_WSIZE);
@@ -2937,7 +2948,11 @@ int cifs_setup_volume_info(struct smb_vol **pvolume_info, char *mount_data,
2937 2948
2938 if (volume_info->nullauth) { 2949 if (volume_info->nullauth) {
2939 cFYI(1, "null user"); 2950 cFYI(1, "null user");
2940 volume_info->username = ""; 2951 volume_info->username = kzalloc(1, GFP_KERNEL);
2952 if (volume_info->username == NULL) {
2953 rc = -ENOMEM;
2954 goto out;
2955 }
2941 } else if (volume_info->username) { 2956 } else if (volume_info->username) {
2942 /* BB fixme parse for domain name here */ 2957 /* BB fixme parse for domain name here */
2943 cFYI(1, "Username: %s", volume_info->username); 2958 cFYI(1, "Username: %s", volume_info->username);
@@ -2971,8 +2986,7 @@ out:
2971} 2986}
2972 2987
2973int 2988int
2974cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb, 2989cifs_mount(struct cifs_sb_info *cifs_sb, struct smb_vol *volume_info)
2975 struct smb_vol *volume_info, const char *devname)
2976{ 2990{
2977 int rc = 0; 2991 int rc = 0;
2978 int xid; 2992 int xid;
@@ -2983,6 +2997,13 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb,
2983 struct tcon_link *tlink; 2997 struct tcon_link *tlink;
2984#ifdef CONFIG_CIFS_DFS_UPCALL 2998#ifdef CONFIG_CIFS_DFS_UPCALL
2985 int referral_walks_count = 0; 2999 int referral_walks_count = 0;
3000
3001 rc = bdi_setup_and_register(&cifs_sb->bdi, "cifs", BDI_CAP_MAP_COPY);
3002 if (rc)
3003 return rc;
3004
3005 cifs_sb->bdi.ra_pages = default_backing_dev_info.ra_pages;
3006
2986try_mount_again: 3007try_mount_again:
2987 /* cleanup activities if we're chasing a referral */ 3008 /* cleanup activities if we're chasing a referral */
2988 if (referral_walks_count) { 3009 if (referral_walks_count) {
@@ -3007,6 +3028,7 @@ try_mount_again:
3007 srvTcp = cifs_get_tcp_session(volume_info); 3028 srvTcp = cifs_get_tcp_session(volume_info);
3008 if (IS_ERR(srvTcp)) { 3029 if (IS_ERR(srvTcp)) {
3009 rc = PTR_ERR(srvTcp); 3030 rc = PTR_ERR(srvTcp);
3031 bdi_destroy(&cifs_sb->bdi);
3010 goto out; 3032 goto out;
3011 } 3033 }
3012 3034
@@ -3018,14 +3040,6 @@ try_mount_again:
3018 goto mount_fail_check; 3040 goto mount_fail_check;
3019 } 3041 }
3020 3042
3021 if (pSesInfo->capabilities & CAP_LARGE_FILES)
3022 sb->s_maxbytes = MAX_LFS_FILESIZE;
3023 else
3024 sb->s_maxbytes = MAX_NON_LFS;
3025
3026 /* BB FIXME fix time_gran to be larger for LANMAN sessions */
3027 sb->s_time_gran = 100;
3028
3029 /* search for existing tcon to this server share */ 3043 /* search for existing tcon to this server share */
3030 tcon = cifs_get_tcon(pSesInfo, volume_info); 3044 tcon = cifs_get_tcon(pSesInfo, volume_info);
3031 if (IS_ERR(tcon)) { 3045 if (IS_ERR(tcon)) {
@@ -3038,7 +3052,7 @@ try_mount_again:
3038 if (tcon->ses->capabilities & CAP_UNIX) { 3052 if (tcon->ses->capabilities & CAP_UNIX) {
3039 /* reset of caps checks mount to see if unix extensions 3053 /* reset of caps checks mount to see if unix extensions
3040 disabled for just this mount */ 3054 disabled for just this mount */
3041 reset_cifs_unix_caps(xid, tcon, sb, volume_info); 3055 reset_cifs_unix_caps(xid, tcon, cifs_sb, volume_info);
3042 if ((tcon->ses->server->tcpStatus == CifsNeedReconnect) && 3056 if ((tcon->ses->server->tcpStatus == CifsNeedReconnect) &&
3043 (le64_to_cpu(tcon->fsUnixInfo.Capability) & 3057 (le64_to_cpu(tcon->fsUnixInfo.Capability) &
3044 CIFS_UNIX_TRANSPORT_ENCRYPTION_MANDATORY_CAP)) { 3058 CIFS_UNIX_TRANSPORT_ENCRYPTION_MANDATORY_CAP)) {
@@ -3161,6 +3175,7 @@ mount_fail_check:
3161 cifs_put_smb_ses(pSesInfo); 3175 cifs_put_smb_ses(pSesInfo);
3162 else 3176 else
3163 cifs_put_tcp_session(srvTcp); 3177 cifs_put_tcp_session(srvTcp);
3178 bdi_destroy(&cifs_sb->bdi);
3164 goto out; 3179 goto out;
3165 } 3180 }
3166 3181
@@ -3335,8 +3350,8 @@ CIFSTCon(unsigned int xid, struct cifs_ses *ses,
3335 return rc; 3350 return rc;
3336} 3351}
3337 3352
3338int 3353void
3339cifs_umount(struct super_block *sb, struct cifs_sb_info *cifs_sb) 3354cifs_umount(struct cifs_sb_info *cifs_sb)
3340{ 3355{
3341 struct rb_root *root = &cifs_sb->tlink_tree; 3356 struct rb_root *root = &cifs_sb->tlink_tree;
3342 struct rb_node *node; 3357 struct rb_node *node;
@@ -3357,7 +3372,10 @@ cifs_umount(struct super_block *sb, struct cifs_sb_info *cifs_sb)
3357 } 3372 }
3358 spin_unlock(&cifs_sb->tlink_tree_lock); 3373 spin_unlock(&cifs_sb->tlink_tree_lock);
3359 3374
3360 return 0; 3375 bdi_destroy(&cifs_sb->bdi);
3376 kfree(cifs_sb->mountdata);
3377 unload_nls(cifs_sb->local_nls);
3378 kfree(cifs_sb);
3361} 3379}
3362 3380
3363int cifs_negotiate_protocol(unsigned int xid, struct cifs_ses *ses) 3381int cifs_negotiate_protocol(unsigned int xid, struct cifs_ses *ses)
diff --git a/fs/cifs/smbencrypt.c b/fs/cifs/smbencrypt.c
index 1525d5e662b6..1c5b770c3141 100644
--- a/fs/cifs/smbencrypt.c
+++ b/fs/cifs/smbencrypt.c
@@ -90,12 +90,10 @@ smbhash(unsigned char *out, const unsigned char *in, unsigned char *key)
90 sg_init_one(&sgout, out, 8); 90 sg_init_one(&sgout, out, 8);
91 91
92 rc = crypto_blkcipher_encrypt(&desc, &sgout, &sgin, 8); 92 rc = crypto_blkcipher_encrypt(&desc, &sgout, &sgin, 8);
93 if (rc) { 93 if (rc)
94 cERROR(1, "could not encrypt crypt key rc: %d\n", rc); 94 cERROR(1, "could not encrypt crypt key rc: %d\n", rc);
95 crypto_free_blkcipher(tfm_des);
96 goto smbhash_err;
97 }
98 95
96 crypto_free_blkcipher(tfm_des);
99smbhash_err: 97smbhash_err:
100 return rc; 98 return rc;
101} 99}
diff --git a/fs/coda/pioctl.c b/fs/coda/pioctl.c
index 6cbb3afb36dc..cb140ef293e4 100644
--- a/fs/coda/pioctl.c
+++ b/fs/coda/pioctl.c
@@ -43,8 +43,6 @@ const struct file_operations coda_ioctl_operations = {
43/* the coda pioctl inode ops */ 43/* the coda pioctl inode ops */
44static int coda_ioctl_permission(struct inode *inode, int mask, unsigned int flags) 44static int coda_ioctl_permission(struct inode *inode, int mask, unsigned int flags)
45{ 45{
46 if (flags & IPERM_FLAG_RCU)
47 return -ECHILD;
48 return (mask & MAY_EXEC) ? -EACCES : 0; 46 return (mask & MAY_EXEC) ? -EACCES : 0;
49} 47}
50 48
diff --git a/fs/ext4/ext4_extents.h b/fs/ext4/ext4_extents.h
index 2e29abb30f76..095c36f3b612 100644
--- a/fs/ext4/ext4_extents.h
+++ b/fs/ext4/ext4_extents.h
@@ -125,7 +125,7 @@ struct ext4_ext_path {
125 * positive retcode - signal for ext4_ext_walk_space(), see below 125 * positive retcode - signal for ext4_ext_walk_space(), see below
126 * callback must return valid extent (passed or newly created) 126 * callback must return valid extent (passed or newly created)
127 */ 127 */
128typedef int (*ext_prepare_callback)(struct inode *, struct ext4_ext_path *, 128typedef int (*ext_prepare_callback)(struct inode *, ext4_lblk_t,
129 struct ext4_ext_cache *, 129 struct ext4_ext_cache *,
130 struct ext4_extent *, void *); 130 struct ext4_extent *, void *);
131 131
@@ -133,8 +133,11 @@ typedef int (*ext_prepare_callback)(struct inode *, struct ext4_ext_path *,
133#define EXT_BREAK 1 133#define EXT_BREAK 1
134#define EXT_REPEAT 2 134#define EXT_REPEAT 2
135 135
136/* Maximum logical block in a file; ext4_extent's ee_block is __le32 */ 136/*
137#define EXT_MAX_BLOCK 0xffffffff 137 * Maximum number of logical blocks in a file; ext4_extent's ee_block is
138 * __le32.
139 */
140#define EXT_MAX_BLOCKS 0xffffffff
138 141
139/* 142/*
140 * EXT_INIT_MAX_LEN is the maximum number of blocks we can have in an 143 * EXT_INIT_MAX_LEN is the maximum number of blocks we can have in an
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 5199bac7fc62..f815cc81e7a2 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -1408,7 +1408,7 @@ got_index:
1408 1408
1409/* 1409/*
1410 * ext4_ext_next_allocated_block: 1410 * ext4_ext_next_allocated_block:
1411 * returns allocated block in subsequent extent or EXT_MAX_BLOCK. 1411 * returns allocated block in subsequent extent or EXT_MAX_BLOCKS.
1412 * NOTE: it considers block number from index entry as 1412 * NOTE: it considers block number from index entry as
1413 * allocated block. Thus, index entries have to be consistent 1413 * allocated block. Thus, index entries have to be consistent
1414 * with leaves. 1414 * with leaves.
@@ -1422,7 +1422,7 @@ ext4_ext_next_allocated_block(struct ext4_ext_path *path)
1422 depth = path->p_depth; 1422 depth = path->p_depth;
1423 1423
1424 if (depth == 0 && path->p_ext == NULL) 1424 if (depth == 0 && path->p_ext == NULL)
1425 return EXT_MAX_BLOCK; 1425 return EXT_MAX_BLOCKS;
1426 1426
1427 while (depth >= 0) { 1427 while (depth >= 0) {
1428 if (depth == path->p_depth) { 1428 if (depth == path->p_depth) {
@@ -1439,12 +1439,12 @@ ext4_ext_next_allocated_block(struct ext4_ext_path *path)
1439 depth--; 1439 depth--;
1440 } 1440 }
1441 1441
1442 return EXT_MAX_BLOCK; 1442 return EXT_MAX_BLOCKS;
1443} 1443}
1444 1444
1445/* 1445/*
1446 * ext4_ext_next_leaf_block: 1446 * ext4_ext_next_leaf_block:
1447 * returns first allocated block from next leaf or EXT_MAX_BLOCK 1447 * returns first allocated block from next leaf or EXT_MAX_BLOCKS
1448 */ 1448 */
1449static ext4_lblk_t ext4_ext_next_leaf_block(struct inode *inode, 1449static ext4_lblk_t ext4_ext_next_leaf_block(struct inode *inode,
1450 struct ext4_ext_path *path) 1450 struct ext4_ext_path *path)
@@ -1456,7 +1456,7 @@ static ext4_lblk_t ext4_ext_next_leaf_block(struct inode *inode,
1456 1456
1457 /* zero-tree has no leaf blocks at all */ 1457 /* zero-tree has no leaf blocks at all */
1458 if (depth == 0) 1458 if (depth == 0)
1459 return EXT_MAX_BLOCK; 1459 return EXT_MAX_BLOCKS;
1460 1460
1461 /* go to index block */ 1461 /* go to index block */
1462 depth--; 1462 depth--;
@@ -1469,7 +1469,7 @@ static ext4_lblk_t ext4_ext_next_leaf_block(struct inode *inode,
1469 depth--; 1469 depth--;
1470 } 1470 }
1471 1471
1472 return EXT_MAX_BLOCK; 1472 return EXT_MAX_BLOCKS;
1473} 1473}
1474 1474
1475/* 1475/*
@@ -1677,13 +1677,13 @@ static unsigned int ext4_ext_check_overlap(struct inode *inode,
1677 */ 1677 */
1678 if (b2 < b1) { 1678 if (b2 < b1) {
1679 b2 = ext4_ext_next_allocated_block(path); 1679 b2 = ext4_ext_next_allocated_block(path);
1680 if (b2 == EXT_MAX_BLOCK) 1680 if (b2 == EXT_MAX_BLOCKS)
1681 goto out; 1681 goto out;
1682 } 1682 }
1683 1683
1684 /* check for wrap through zero on extent logical start block*/ 1684 /* check for wrap through zero on extent logical start block*/
1685 if (b1 + len1 < b1) { 1685 if (b1 + len1 < b1) {
1686 len1 = EXT_MAX_BLOCK - b1; 1686 len1 = EXT_MAX_BLOCKS - b1;
1687 newext->ee_len = cpu_to_le16(len1); 1687 newext->ee_len = cpu_to_le16(len1);
1688 ret = 1; 1688 ret = 1;
1689 } 1689 }
@@ -1767,7 +1767,7 @@ repeat:
1767 fex = EXT_LAST_EXTENT(eh); 1767 fex = EXT_LAST_EXTENT(eh);
1768 next = ext4_ext_next_leaf_block(inode, path); 1768 next = ext4_ext_next_leaf_block(inode, path);
1769 if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block) 1769 if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block)
1770 && next != EXT_MAX_BLOCK) { 1770 && next != EXT_MAX_BLOCKS) {
1771 ext_debug("next leaf block - %d\n", next); 1771 ext_debug("next leaf block - %d\n", next);
1772 BUG_ON(npath != NULL); 1772 BUG_ON(npath != NULL);
1773 npath = ext4_ext_find_extent(inode, next, NULL); 1773 npath = ext4_ext_find_extent(inode, next, NULL);
@@ -1887,7 +1887,7 @@ static int ext4_ext_walk_space(struct inode *inode, ext4_lblk_t block,
1887 BUG_ON(func == NULL); 1887 BUG_ON(func == NULL);
1888 BUG_ON(inode == NULL); 1888 BUG_ON(inode == NULL);
1889 1889
1890 while (block < last && block != EXT_MAX_BLOCK) { 1890 while (block < last && block != EXT_MAX_BLOCKS) {
1891 num = last - block; 1891 num = last - block;
1892 /* find extent for this block */ 1892 /* find extent for this block */
1893 down_read(&EXT4_I(inode)->i_data_sem); 1893 down_read(&EXT4_I(inode)->i_data_sem);
@@ -1958,7 +1958,7 @@ static int ext4_ext_walk_space(struct inode *inode, ext4_lblk_t block,
1958 err = -EIO; 1958 err = -EIO;
1959 break; 1959 break;
1960 } 1960 }
1961 err = func(inode, path, &cbex, ex, cbdata); 1961 err = func(inode, next, &cbex, ex, cbdata);
1962 ext4_ext_drop_refs(path); 1962 ext4_ext_drop_refs(path);
1963 1963
1964 if (err < 0) 1964 if (err < 0)
@@ -2020,7 +2020,7 @@ ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path,
2020 if (ex == NULL) { 2020 if (ex == NULL) {
2021 /* there is no extent yet, so gap is [0;-] */ 2021 /* there is no extent yet, so gap is [0;-] */
2022 lblock = 0; 2022 lblock = 0;
2023 len = EXT_MAX_BLOCK; 2023 len = EXT_MAX_BLOCKS;
2024 ext_debug("cache gap(whole file):"); 2024 ext_debug("cache gap(whole file):");
2025 } else if (block < le32_to_cpu(ex->ee_block)) { 2025 } else if (block < le32_to_cpu(ex->ee_block)) {
2026 lblock = block; 2026 lblock = block;
@@ -2350,7 +2350,7 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
2350 * never happen because at least one of the end points 2350 * never happen because at least one of the end points
2351 * needs to be on the edge of the extent. 2351 * needs to be on the edge of the extent.
2352 */ 2352 */
2353 if (end == EXT_MAX_BLOCK) { 2353 if (end == EXT_MAX_BLOCKS - 1) {
2354 ext_debug(" bad truncate %u:%u\n", 2354 ext_debug(" bad truncate %u:%u\n",
2355 start, end); 2355 start, end);
2356 block = 0; 2356 block = 0;
@@ -2398,7 +2398,7 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
2398 * If this is a truncate, this condition 2398 * If this is a truncate, this condition
2399 * should never happen 2399 * should never happen
2400 */ 2400 */
2401 if (end == EXT_MAX_BLOCK) { 2401 if (end == EXT_MAX_BLOCKS - 1) {
2402 ext_debug(" bad truncate %u:%u\n", 2402 ext_debug(" bad truncate %u:%u\n",
2403 start, end); 2403 start, end);
2404 err = -EIO; 2404 err = -EIO;
@@ -2478,7 +2478,7 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
2478 * we need to remove it from the leaf 2478 * we need to remove it from the leaf
2479 */ 2479 */
2480 if (num == 0) { 2480 if (num == 0) {
2481 if (end != EXT_MAX_BLOCK) { 2481 if (end != EXT_MAX_BLOCKS - 1) {
2482 /* 2482 /*
2483 * For hole punching, we need to scoot all the 2483 * For hole punching, we need to scoot all the
2484 * extents up when an extent is removed so that 2484 * extents up when an extent is removed so that
@@ -3699,7 +3699,7 @@ void ext4_ext_truncate(struct inode *inode)
3699 3699
3700 last_block = (inode->i_size + sb->s_blocksize - 1) 3700 last_block = (inode->i_size + sb->s_blocksize - 1)
3701 >> EXT4_BLOCK_SIZE_BITS(sb); 3701 >> EXT4_BLOCK_SIZE_BITS(sb);
3702 err = ext4_ext_remove_space(inode, last_block, EXT_MAX_BLOCK); 3702 err = ext4_ext_remove_space(inode, last_block, EXT_MAX_BLOCKS - 1);
3703 3703
3704 /* In a multi-transaction truncate, we only make the final 3704 /* In a multi-transaction truncate, we only make the final
3705 * transaction synchronous. 3705 * transaction synchronous.
@@ -3914,14 +3914,13 @@ int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset,
3914/* 3914/*
3915 * Callback function called for each extent to gather FIEMAP information. 3915 * Callback function called for each extent to gather FIEMAP information.
3916 */ 3916 */
3917static int ext4_ext_fiemap_cb(struct inode *inode, struct ext4_ext_path *path, 3917static int ext4_ext_fiemap_cb(struct inode *inode, ext4_lblk_t next,
3918 struct ext4_ext_cache *newex, struct ext4_extent *ex, 3918 struct ext4_ext_cache *newex, struct ext4_extent *ex,
3919 void *data) 3919 void *data)
3920{ 3920{
3921 __u64 logical; 3921 __u64 logical;
3922 __u64 physical; 3922 __u64 physical;
3923 __u64 length; 3923 __u64 length;
3924 loff_t size;
3925 __u32 flags = 0; 3924 __u32 flags = 0;
3926 int ret = 0; 3925 int ret = 0;
3927 struct fiemap_extent_info *fieinfo = data; 3926 struct fiemap_extent_info *fieinfo = data;
@@ -4103,8 +4102,7 @@ found_delayed_extent:
4103 if (ex && ext4_ext_is_uninitialized(ex)) 4102 if (ex && ext4_ext_is_uninitialized(ex))
4104 flags |= FIEMAP_EXTENT_UNWRITTEN; 4103 flags |= FIEMAP_EXTENT_UNWRITTEN;
4105 4104
4106 size = i_size_read(inode); 4105 if (next == EXT_MAX_BLOCKS)
4107 if (logical + length >= size)
4108 flags |= FIEMAP_EXTENT_LAST; 4106 flags |= FIEMAP_EXTENT_LAST;
4109 4107
4110 ret = fiemap_fill_next_extent(fieinfo, logical, physical, 4108 ret = fiemap_fill_next_extent(fieinfo, logical, physical,
@@ -4347,8 +4345,8 @@ int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
4347 4345
4348 start_blk = start >> inode->i_sb->s_blocksize_bits; 4346 start_blk = start >> inode->i_sb->s_blocksize_bits;
4349 last_blk = (start + len - 1) >> inode->i_sb->s_blocksize_bits; 4347 last_blk = (start + len - 1) >> inode->i_sb->s_blocksize_bits;
4350 if (last_blk >= EXT_MAX_BLOCK) 4348 if (last_blk >= EXT_MAX_BLOCKS)
4351 last_blk = EXT_MAX_BLOCK-1; 4349 last_blk = EXT_MAX_BLOCKS-1;
4352 len_blks = ((ext4_lblk_t) last_blk) - start_blk + 1; 4350 len_blks = ((ext4_lblk_t) last_blk) - start_blk + 1;
4353 4351
4354 /* 4352 /*
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index a5763e3505ba..e3126c051006 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -2634,7 +2634,7 @@ static int ext4_writepage(struct page *page,
2634 struct buffer_head *page_bufs = NULL; 2634 struct buffer_head *page_bufs = NULL;
2635 struct inode *inode = page->mapping->host; 2635 struct inode *inode = page->mapping->host;
2636 2636
2637 trace_ext4_writepage(inode, page); 2637 trace_ext4_writepage(page);
2638 size = i_size_read(inode); 2638 size = i_size_read(inode);
2639 if (page->index == size >> PAGE_CACHE_SHIFT) 2639 if (page->index == size >> PAGE_CACHE_SHIFT)
2640 len = size & ~PAGE_CACHE_MASK; 2640 len = size & ~PAGE_CACHE_MASK;
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 859f2ae8864e..6ed859d56850 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -3578,8 +3578,8 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
3578 free += next - bit; 3578 free += next - bit;
3579 3579
3580 trace_ext4_mballoc_discard(sb, NULL, group, bit, next - bit); 3580 trace_ext4_mballoc_discard(sb, NULL, group, bit, next - bit);
3581 trace_ext4_mb_release_inode_pa(sb, pa->pa_inode, pa, 3581 trace_ext4_mb_release_inode_pa(pa, grp_blk_start + bit,
3582 grp_blk_start + bit, next - bit); 3582 next - bit);
3583 mb_free_blocks(pa->pa_inode, e4b, bit, next - bit); 3583 mb_free_blocks(pa->pa_inode, e4b, bit, next - bit);
3584 bit = next + 1; 3584 bit = next + 1;
3585 } 3585 }
@@ -3608,7 +3608,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
3608 ext4_group_t group; 3608 ext4_group_t group;
3609 ext4_grpblk_t bit; 3609 ext4_grpblk_t bit;
3610 3610
3611 trace_ext4_mb_release_group_pa(sb, pa); 3611 trace_ext4_mb_release_group_pa(pa);
3612 BUG_ON(pa->pa_deleted == 0); 3612 BUG_ON(pa->pa_deleted == 0);
3613 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit); 3613 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
3614 BUG_ON(group != e4b->bd_group && pa->pa_len != 0); 3614 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
@@ -4448,7 +4448,7 @@ ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b,
4448 * @inode: inode 4448 * @inode: inode
4449 * @block: start physical block to free 4449 * @block: start physical block to free
4450 * @count: number of blocks to count 4450 * @count: number of blocks to count
4451 * @metadata: Are these metadata blocks 4451 * @flags: flags used by ext4_free_blocks
4452 */ 4452 */
4453void ext4_free_blocks(handle_t *handle, struct inode *inode, 4453void ext4_free_blocks(handle_t *handle, struct inode *inode,
4454 struct buffer_head *bh, ext4_fsblk_t block, 4454 struct buffer_head *bh, ext4_fsblk_t block,
diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
index 2b8304bf3c50..f57455a1b1b2 100644
--- a/fs/ext4/move_extent.c
+++ b/fs/ext4/move_extent.c
@@ -1002,12 +1002,12 @@ mext_check_arguments(struct inode *orig_inode,
1002 return -EINVAL; 1002 return -EINVAL;
1003 } 1003 }
1004 1004
1005 if ((orig_start > EXT_MAX_BLOCK) || 1005 if ((orig_start >= EXT_MAX_BLOCKS) ||
1006 (donor_start > EXT_MAX_BLOCK) || 1006 (donor_start >= EXT_MAX_BLOCKS) ||
1007 (*len > EXT_MAX_BLOCK) || 1007 (*len > EXT_MAX_BLOCKS) ||
1008 (orig_start + *len > EXT_MAX_BLOCK)) { 1008 (orig_start + *len >= EXT_MAX_BLOCKS)) {
1009 ext4_debug("ext4 move extent: Can't handle over [%u] blocks " 1009 ext4_debug("ext4 move extent: Can't handle over [%u] blocks "
1010 "[ino:orig %lu, donor %lu]\n", EXT_MAX_BLOCK, 1010 "[ino:orig %lu, donor %lu]\n", EXT_MAX_BLOCKS,
1011 orig_inode->i_ino, donor_inode->i_ino); 1011 orig_inode->i_ino, donor_inode->i_ino);
1012 return -EINVAL; 1012 return -EINVAL;
1013 } 1013 }
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index cc5c157aa11d..9ea71aa864b3 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -2243,6 +2243,12 @@ static void ext4_orphan_cleanup(struct super_block *sb,
2243 * in the vfs. ext4 inode has 48 bits of i_block in fsblock units, 2243 * in the vfs. ext4 inode has 48 bits of i_block in fsblock units,
2244 * so that won't be a limiting factor. 2244 * so that won't be a limiting factor.
2245 * 2245 *
2246 * However there is other limiting factor. We do store extents in the form
2247 * of starting block and length, hence the resulting length of the extent
2248 * covering maximum file size must fit into on-disk format containers as
2249 * well. Given that length is always by 1 unit bigger than max unit (because
2250 * we count 0 as well) we have to lower the s_maxbytes by one fs block.
2251 *
2246 * Note, this does *not* consider any metadata overhead for vfs i_blocks. 2252 * Note, this does *not* consider any metadata overhead for vfs i_blocks.
2247 */ 2253 */
2248static loff_t ext4_max_size(int blkbits, int has_huge_files) 2254static loff_t ext4_max_size(int blkbits, int has_huge_files)
@@ -2264,10 +2270,13 @@ static loff_t ext4_max_size(int blkbits, int has_huge_files)
2264 upper_limit <<= blkbits; 2270 upper_limit <<= blkbits;
2265 } 2271 }
2266 2272
2267 /* 32-bit extent-start container, ee_block */ 2273 /*
2268 res = 1LL << 32; 2274 * 32-bit extent-start container, ee_block. We lower the maxbytes
2275 * by one fs block, so ee_len can cover the extent of maximum file
2276 * size
2277 */
2278 res = (1LL << 32) - 1;
2269 res <<= blkbits; 2279 res <<= blkbits;
2270 res -= 1;
2271 2280
2272 /* Sanity check against vm- & vfs- imposed limits */ 2281 /* Sanity check against vm- & vfs- imposed limits */
2273 if (res > upper_limit) 2282 if (res > upper_limit)
diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c
index 6a79fd0a1a32..2c62c5aae82f 100644
--- a/fs/jbd2/checkpoint.c
+++ b/fs/jbd2/checkpoint.c
@@ -97,10 +97,14 @@ static int __try_to_free_cp_buf(struct journal_head *jh)
97 97
98 if (jh->b_jlist == BJ_None && !buffer_locked(bh) && 98 if (jh->b_jlist == BJ_None && !buffer_locked(bh) &&
99 !buffer_dirty(bh) && !buffer_write_io_error(bh)) { 99 !buffer_dirty(bh) && !buffer_write_io_error(bh)) {
100 /*
101 * Get our reference so that bh cannot be freed before
102 * we unlock it
103 */
104 get_bh(bh);
100 JBUFFER_TRACE(jh, "remove from checkpoint list"); 105 JBUFFER_TRACE(jh, "remove from checkpoint list");
101 ret = __jbd2_journal_remove_checkpoint(jh) + 1; 106 ret = __jbd2_journal_remove_checkpoint(jh) + 1;
102 jbd_unlock_bh_state(bh); 107 jbd_unlock_bh_state(bh);
103 jbd2_journal_remove_journal_head(bh);
104 BUFFER_TRACE(bh, "release"); 108 BUFFER_TRACE(bh, "release");
105 __brelse(bh); 109 __brelse(bh);
106 } else { 110 } else {
@@ -223,8 +227,8 @@ restart:
223 spin_lock(&journal->j_list_lock); 227 spin_lock(&journal->j_list_lock);
224 goto restart; 228 goto restart;
225 } 229 }
230 get_bh(bh);
226 if (buffer_locked(bh)) { 231 if (buffer_locked(bh)) {
227 atomic_inc(&bh->b_count);
228 spin_unlock(&journal->j_list_lock); 232 spin_unlock(&journal->j_list_lock);
229 jbd_unlock_bh_state(bh); 233 jbd_unlock_bh_state(bh);
230 wait_on_buffer(bh); 234 wait_on_buffer(bh);
@@ -243,7 +247,6 @@ restart:
243 */ 247 */
244 released = __jbd2_journal_remove_checkpoint(jh); 248 released = __jbd2_journal_remove_checkpoint(jh);
245 jbd_unlock_bh_state(bh); 249 jbd_unlock_bh_state(bh);
246 jbd2_journal_remove_journal_head(bh);
247 __brelse(bh); 250 __brelse(bh);
248 } 251 }
249 252
@@ -284,7 +287,7 @@ static int __process_buffer(journal_t *journal, struct journal_head *jh,
284 int ret = 0; 287 int ret = 0;
285 288
286 if (buffer_locked(bh)) { 289 if (buffer_locked(bh)) {
287 atomic_inc(&bh->b_count); 290 get_bh(bh);
288 spin_unlock(&journal->j_list_lock); 291 spin_unlock(&journal->j_list_lock);
289 jbd_unlock_bh_state(bh); 292 jbd_unlock_bh_state(bh);
290 wait_on_buffer(bh); 293 wait_on_buffer(bh);
@@ -316,12 +319,12 @@ static int __process_buffer(journal_t *journal, struct journal_head *jh,
316 ret = 1; 319 ret = 1;
317 if (unlikely(buffer_write_io_error(bh))) 320 if (unlikely(buffer_write_io_error(bh)))
318 ret = -EIO; 321 ret = -EIO;
322 get_bh(bh);
319 J_ASSERT_JH(jh, !buffer_jbddirty(bh)); 323 J_ASSERT_JH(jh, !buffer_jbddirty(bh));
320 BUFFER_TRACE(bh, "remove from checkpoint"); 324 BUFFER_TRACE(bh, "remove from checkpoint");
321 __jbd2_journal_remove_checkpoint(jh); 325 __jbd2_journal_remove_checkpoint(jh);
322 spin_unlock(&journal->j_list_lock); 326 spin_unlock(&journal->j_list_lock);
323 jbd_unlock_bh_state(bh); 327 jbd_unlock_bh_state(bh);
324 jbd2_journal_remove_journal_head(bh);
325 __brelse(bh); 328 __brelse(bh);
326 } else { 329 } else {
327 /* 330 /*
@@ -554,7 +557,8 @@ int jbd2_cleanup_journal_tail(journal_t *journal)
554/* 557/*
555 * journal_clean_one_cp_list 558 * journal_clean_one_cp_list
556 * 559 *
557 * Find all the written-back checkpoint buffers in the given list and release them. 560 * Find all the written-back checkpoint buffers in the given list and
561 * release them.
558 * 562 *
559 * Called with the journal locked. 563 * Called with the journal locked.
560 * Called with j_list_lock held. 564 * Called with j_list_lock held.
@@ -663,8 +667,8 @@ out:
663 * checkpoint lists. 667 * checkpoint lists.
664 * 668 *
665 * The function returns 1 if it frees the transaction, 0 otherwise. 669 * The function returns 1 if it frees the transaction, 0 otherwise.
670 * The function can free jh and bh.
666 * 671 *
667 * This function is called with the journal locked.
668 * This function is called with j_list_lock held. 672 * This function is called with j_list_lock held.
669 * This function is called with jbd_lock_bh_state(jh2bh(jh)) 673 * This function is called with jbd_lock_bh_state(jh2bh(jh))
670 */ 674 */
@@ -684,13 +688,14 @@ int __jbd2_journal_remove_checkpoint(struct journal_head *jh)
684 } 688 }
685 journal = transaction->t_journal; 689 journal = transaction->t_journal;
686 690
691 JBUFFER_TRACE(jh, "removing from transaction");
687 __buffer_unlink(jh); 692 __buffer_unlink(jh);
688 jh->b_cp_transaction = NULL; 693 jh->b_cp_transaction = NULL;
694 jbd2_journal_put_journal_head(jh);
689 695
690 if (transaction->t_checkpoint_list != NULL || 696 if (transaction->t_checkpoint_list != NULL ||
691 transaction->t_checkpoint_io_list != NULL) 697 transaction->t_checkpoint_io_list != NULL)
692 goto out; 698 goto out;
693 JBUFFER_TRACE(jh, "transaction has no more buffers");
694 699
695 /* 700 /*
696 * There is one special case to worry about: if we have just pulled the 701 * There is one special case to worry about: if we have just pulled the
@@ -701,10 +706,8 @@ int __jbd2_journal_remove_checkpoint(struct journal_head *jh)
701 * The locking here around t_state is a bit sleazy. 706 * The locking here around t_state is a bit sleazy.
702 * See the comment at the end of jbd2_journal_commit_transaction(). 707 * See the comment at the end of jbd2_journal_commit_transaction().
703 */ 708 */
704 if (transaction->t_state != T_FINISHED) { 709 if (transaction->t_state != T_FINISHED)
705 JBUFFER_TRACE(jh, "belongs to running/committing transaction");
706 goto out; 710 goto out;
707 }
708 711
709 /* OK, that was the last buffer for the transaction: we can now 712 /* OK, that was the last buffer for the transaction: we can now
710 safely remove this transaction from the log */ 713 safely remove this transaction from the log */
@@ -723,7 +726,6 @@ int __jbd2_journal_remove_checkpoint(struct journal_head *jh)
723 wake_up(&journal->j_wait_logspace); 726 wake_up(&journal->j_wait_logspace);
724 ret = 1; 727 ret = 1;
725out: 728out:
726 JBUFFER_TRACE(jh, "exit");
727 return ret; 729 return ret;
728} 730}
729 731
@@ -742,6 +744,8 @@ void __jbd2_journal_insert_checkpoint(struct journal_head *jh,
742 J_ASSERT_JH(jh, buffer_dirty(jh2bh(jh)) || buffer_jbddirty(jh2bh(jh))); 744 J_ASSERT_JH(jh, buffer_dirty(jh2bh(jh)) || buffer_jbddirty(jh2bh(jh)));
743 J_ASSERT_JH(jh, jh->b_cp_transaction == NULL); 745 J_ASSERT_JH(jh, jh->b_cp_transaction == NULL);
744 746
747 /* Get reference for checkpointing transaction */
748 jbd2_journal_grab_journal_head(jh2bh(jh));
745 jh->b_cp_transaction = transaction; 749 jh->b_cp_transaction = transaction;
746 750
747 if (!transaction->t_checkpoint_list) { 751 if (!transaction->t_checkpoint_list) {
diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
index 7f21cf3aaf92..eef6979821a4 100644
--- a/fs/jbd2/commit.c
+++ b/fs/jbd2/commit.c
@@ -848,10 +848,16 @@ restart_loop:
848 while (commit_transaction->t_forget) { 848 while (commit_transaction->t_forget) {
849 transaction_t *cp_transaction; 849 transaction_t *cp_transaction;
850 struct buffer_head *bh; 850 struct buffer_head *bh;
851 int try_to_free = 0;
851 852
852 jh = commit_transaction->t_forget; 853 jh = commit_transaction->t_forget;
853 spin_unlock(&journal->j_list_lock); 854 spin_unlock(&journal->j_list_lock);
854 bh = jh2bh(jh); 855 bh = jh2bh(jh);
856 /*
857 * Get a reference so that bh cannot be freed before we are
858 * done with it.
859 */
860 get_bh(bh);
855 jbd_lock_bh_state(bh); 861 jbd_lock_bh_state(bh);
856 J_ASSERT_JH(jh, jh->b_transaction == commit_transaction); 862 J_ASSERT_JH(jh, jh->b_transaction == commit_transaction);
857 863
@@ -914,28 +920,27 @@ restart_loop:
914 __jbd2_journal_insert_checkpoint(jh, commit_transaction); 920 __jbd2_journal_insert_checkpoint(jh, commit_transaction);
915 if (is_journal_aborted(journal)) 921 if (is_journal_aborted(journal))
916 clear_buffer_jbddirty(bh); 922 clear_buffer_jbddirty(bh);
917 JBUFFER_TRACE(jh, "refile for checkpoint writeback");
918 __jbd2_journal_refile_buffer(jh);
919 jbd_unlock_bh_state(bh);
920 } else { 923 } else {
921 J_ASSERT_BH(bh, !buffer_dirty(bh)); 924 J_ASSERT_BH(bh, !buffer_dirty(bh));
922 /* The buffer on BJ_Forget list and not jbddirty means 925 /*
926 * The buffer on BJ_Forget list and not jbddirty means
923 * it has been freed by this transaction and hence it 927 * it has been freed by this transaction and hence it
924 * could not have been reallocated until this 928 * could not have been reallocated until this
925 * transaction has committed. *BUT* it could be 929 * transaction has committed. *BUT* it could be
926 * reallocated once we have written all the data to 930 * reallocated once we have written all the data to
927 * disk and before we process the buffer on BJ_Forget 931 * disk and before we process the buffer on BJ_Forget
928 * list. */ 932 * list.
929 JBUFFER_TRACE(jh, "refile or unfile freed buffer"); 933 */
930 __jbd2_journal_refile_buffer(jh); 934 if (!jh->b_next_transaction)
931 if (!jh->b_transaction) { 935 try_to_free = 1;
932 jbd_unlock_bh_state(bh);
933 /* needs a brelse */
934 jbd2_journal_remove_journal_head(bh);
935 release_buffer_page(bh);
936 } else
937 jbd_unlock_bh_state(bh);
938 } 936 }
937 JBUFFER_TRACE(jh, "refile or unfile buffer");
938 __jbd2_journal_refile_buffer(jh);
939 jbd_unlock_bh_state(bh);
940 if (try_to_free)
941 release_buffer_page(bh); /* Drops bh reference */
942 else
943 __brelse(bh);
939 cond_resched_lock(&journal->j_list_lock); 944 cond_resched_lock(&journal->j_list_lock);
940 } 945 }
941 spin_unlock(&journal->j_list_lock); 946 spin_unlock(&journal->j_list_lock);
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index 9a7826990304..0dfa5b598e68 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -2078,10 +2078,9 @@ static void journal_free_journal_head(struct journal_head *jh)
2078 * When a buffer has its BH_JBD bit set it is immune from being released by 2078 * When a buffer has its BH_JBD bit set it is immune from being released by
2079 * core kernel code, mainly via ->b_count. 2079 * core kernel code, mainly via ->b_count.
2080 * 2080 *
2081 * A journal_head may be detached from its buffer_head when the journal_head's 2081 * A journal_head is detached from its buffer_head when the journal_head's
2082 * b_transaction, b_cp_transaction and b_next_transaction pointers are NULL. 2082 * b_jcount reaches zero. Running transaction (b_transaction) and checkpoint
2083 * Various places in JBD call jbd2_journal_remove_journal_head() to indicate that the 2083 * transaction (b_cp_transaction) hold their references to b_jcount.
2084 * journal_head can be dropped if needed.
2085 * 2084 *
2086 * Various places in the kernel want to attach a journal_head to a buffer_head 2085 * Various places in the kernel want to attach a journal_head to a buffer_head
2087 * _before_ attaching the journal_head to a transaction. To protect the 2086 * _before_ attaching the journal_head to a transaction. To protect the
@@ -2094,17 +2093,16 @@ static void journal_free_journal_head(struct journal_head *jh)
2094 * (Attach a journal_head if needed. Increments b_jcount) 2093 * (Attach a journal_head if needed. Increments b_jcount)
2095 * struct journal_head *jh = jbd2_journal_add_journal_head(bh); 2094 * struct journal_head *jh = jbd2_journal_add_journal_head(bh);
2096 * ... 2095 * ...
2096 * (Get another reference for transaction)
2097 * jbd2_journal_grab_journal_head(bh);
2097 * jh->b_transaction = xxx; 2098 * jh->b_transaction = xxx;
2099 * (Put original reference)
2098 * jbd2_journal_put_journal_head(jh); 2100 * jbd2_journal_put_journal_head(jh);
2099 *
2100 * Now, the journal_head's b_jcount is zero, but it is safe from being released
2101 * because it has a non-zero b_transaction.
2102 */ 2101 */
2103 2102
2104/* 2103/*
2105 * Give a buffer_head a journal_head. 2104 * Give a buffer_head a journal_head.
2106 * 2105 *
2107 * Doesn't need the journal lock.
2108 * May sleep. 2106 * May sleep.
2109 */ 2107 */
2110struct journal_head *jbd2_journal_add_journal_head(struct buffer_head *bh) 2108struct journal_head *jbd2_journal_add_journal_head(struct buffer_head *bh)
@@ -2168,61 +2166,29 @@ static void __journal_remove_journal_head(struct buffer_head *bh)
2168 struct journal_head *jh = bh2jh(bh); 2166 struct journal_head *jh = bh2jh(bh);
2169 2167
2170 J_ASSERT_JH(jh, jh->b_jcount >= 0); 2168 J_ASSERT_JH(jh, jh->b_jcount >= 0);
2171 2169 J_ASSERT_JH(jh, jh->b_transaction == NULL);
2172 get_bh(bh); 2170 J_ASSERT_JH(jh, jh->b_next_transaction == NULL);
2173 if (jh->b_jcount == 0) { 2171 J_ASSERT_JH(jh, jh->b_cp_transaction == NULL);
2174 if (jh->b_transaction == NULL && 2172 J_ASSERT_JH(jh, jh->b_jlist == BJ_None);
2175 jh->b_next_transaction == NULL && 2173 J_ASSERT_BH(bh, buffer_jbd(bh));
2176 jh->b_cp_transaction == NULL) { 2174 J_ASSERT_BH(bh, jh2bh(jh) == bh);
2177 J_ASSERT_JH(jh, jh->b_jlist == BJ_None); 2175 BUFFER_TRACE(bh, "remove journal_head");
2178 J_ASSERT_BH(bh, buffer_jbd(bh)); 2176 if (jh->b_frozen_data) {
2179 J_ASSERT_BH(bh, jh2bh(jh) == bh); 2177 printk(KERN_WARNING "%s: freeing b_frozen_data\n", __func__);
2180 BUFFER_TRACE(bh, "remove journal_head"); 2178 jbd2_free(jh->b_frozen_data, bh->b_size);
2181 if (jh->b_frozen_data) {
2182 printk(KERN_WARNING "%s: freeing "
2183 "b_frozen_data\n",
2184 __func__);
2185 jbd2_free(jh->b_frozen_data, bh->b_size);
2186 }
2187 if (jh->b_committed_data) {
2188 printk(KERN_WARNING "%s: freeing "
2189 "b_committed_data\n",
2190 __func__);
2191 jbd2_free(jh->b_committed_data, bh->b_size);
2192 }
2193 bh->b_private = NULL;
2194 jh->b_bh = NULL; /* debug, really */
2195 clear_buffer_jbd(bh);
2196 __brelse(bh);
2197 journal_free_journal_head(jh);
2198 } else {
2199 BUFFER_TRACE(bh, "journal_head was locked");
2200 }
2201 } 2179 }
2180 if (jh->b_committed_data) {
2181 printk(KERN_WARNING "%s: freeing b_committed_data\n", __func__);
2182 jbd2_free(jh->b_committed_data, bh->b_size);
2183 }
2184 bh->b_private = NULL;
2185 jh->b_bh = NULL; /* debug, really */
2186 clear_buffer_jbd(bh);
2187 journal_free_journal_head(jh);
2202} 2188}
2203 2189
2204/* 2190/*
2205 * jbd2_journal_remove_journal_head(): if the buffer isn't attached to a transaction 2191 * Drop a reference on the passed journal_head. If it fell to zero then
2206 * and has a zero b_jcount then remove and release its journal_head. If we did
2207 * see that the buffer is not used by any transaction we also "logically"
2208 * decrement ->b_count.
2209 *
2210 * We in fact take an additional increment on ->b_count as a convenience,
2211 * because the caller usually wants to do additional things with the bh
2212 * after calling here.
2213 * The caller of jbd2_journal_remove_journal_head() *must* run __brelse(bh) at some
2214 * time. Once the caller has run __brelse(), the buffer is eligible for
2215 * reaping by try_to_free_buffers().
2216 */
2217void jbd2_journal_remove_journal_head(struct buffer_head *bh)
2218{
2219 jbd_lock_bh_journal_head(bh);
2220 __journal_remove_journal_head(bh);
2221 jbd_unlock_bh_journal_head(bh);
2222}
2223
2224/*
2225 * Drop a reference on the passed journal_head. If it fell to zero then try to
2226 * release the journal_head from the buffer_head. 2192 * release the journal_head from the buffer_head.
2227 */ 2193 */
2228void jbd2_journal_put_journal_head(struct journal_head *jh) 2194void jbd2_journal_put_journal_head(struct journal_head *jh)
@@ -2232,11 +2198,12 @@ void jbd2_journal_put_journal_head(struct journal_head *jh)
2232 jbd_lock_bh_journal_head(bh); 2198 jbd_lock_bh_journal_head(bh);
2233 J_ASSERT_JH(jh, jh->b_jcount > 0); 2199 J_ASSERT_JH(jh, jh->b_jcount > 0);
2234 --jh->b_jcount; 2200 --jh->b_jcount;
2235 if (!jh->b_jcount && !jh->b_transaction) { 2201 if (!jh->b_jcount) {
2236 __journal_remove_journal_head(bh); 2202 __journal_remove_journal_head(bh);
2203 jbd_unlock_bh_journal_head(bh);
2237 __brelse(bh); 2204 __brelse(bh);
2238 } 2205 } else
2239 jbd_unlock_bh_journal_head(bh); 2206 jbd_unlock_bh_journal_head(bh);
2240} 2207}
2241 2208
2242/* 2209/*
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index 3eec82d32fd4..2d7109414cdd 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -30,6 +30,7 @@
30#include <linux/module.h> 30#include <linux/module.h>
31 31
32static void __jbd2_journal_temp_unlink_buffer(struct journal_head *jh); 32static void __jbd2_journal_temp_unlink_buffer(struct journal_head *jh);
33static void __jbd2_journal_unfile_buffer(struct journal_head *jh);
33 34
34/* 35/*
35 * jbd2_get_transaction: obtain a new transaction_t object. 36 * jbd2_get_transaction: obtain a new transaction_t object.
@@ -764,7 +765,6 @@ repeat:
764 if (!jh->b_transaction) { 765 if (!jh->b_transaction) {
765 JBUFFER_TRACE(jh, "no transaction"); 766 JBUFFER_TRACE(jh, "no transaction");
766 J_ASSERT_JH(jh, !jh->b_next_transaction); 767 J_ASSERT_JH(jh, !jh->b_next_transaction);
767 jh->b_transaction = transaction;
768 JBUFFER_TRACE(jh, "file as BJ_Reserved"); 768 JBUFFER_TRACE(jh, "file as BJ_Reserved");
769 spin_lock(&journal->j_list_lock); 769 spin_lock(&journal->j_list_lock);
770 __jbd2_journal_file_buffer(jh, transaction, BJ_Reserved); 770 __jbd2_journal_file_buffer(jh, transaction, BJ_Reserved);
@@ -814,7 +814,6 @@ out:
814 * int jbd2_journal_get_write_access() - notify intent to modify a buffer for metadata (not data) update. 814 * int jbd2_journal_get_write_access() - notify intent to modify a buffer for metadata (not data) update.
815 * @handle: transaction to add buffer modifications to 815 * @handle: transaction to add buffer modifications to
816 * @bh: bh to be used for metadata writes 816 * @bh: bh to be used for metadata writes
817 * @credits: variable that will receive credits for the buffer
818 * 817 *
819 * Returns an error code or 0 on success. 818 * Returns an error code or 0 on success.
820 * 819 *
@@ -896,8 +895,6 @@ int jbd2_journal_get_create_access(handle_t *handle, struct buffer_head *bh)
896 * committed and so it's safe to clear the dirty bit. 895 * committed and so it's safe to clear the dirty bit.
897 */ 896 */
898 clear_buffer_dirty(jh2bh(jh)); 897 clear_buffer_dirty(jh2bh(jh));
899 jh->b_transaction = transaction;
900
901 /* first access by this transaction */ 898 /* first access by this transaction */
902 jh->b_modified = 0; 899 jh->b_modified = 0;
903 900
@@ -932,7 +929,6 @@ out:
932 * non-rewindable consequences 929 * non-rewindable consequences
933 * @handle: transaction 930 * @handle: transaction
934 * @bh: buffer to undo 931 * @bh: buffer to undo
935 * @credits: store the number of taken credits here (if not NULL)
936 * 932 *
937 * Sometimes there is a need to distinguish between metadata which has 933 * Sometimes there is a need to distinguish between metadata which has
938 * been committed to disk and that which has not. The ext3fs code uses 934 * been committed to disk and that which has not. The ext3fs code uses
@@ -1232,8 +1228,6 @@ int jbd2_journal_forget (handle_t *handle, struct buffer_head *bh)
1232 __jbd2_journal_file_buffer(jh, transaction, BJ_Forget); 1228 __jbd2_journal_file_buffer(jh, transaction, BJ_Forget);
1233 } else { 1229 } else {
1234 __jbd2_journal_unfile_buffer(jh); 1230 __jbd2_journal_unfile_buffer(jh);
1235 jbd2_journal_remove_journal_head(bh);
1236 __brelse(bh);
1237 if (!buffer_jbd(bh)) { 1231 if (!buffer_jbd(bh)) {
1238 spin_unlock(&journal->j_list_lock); 1232 spin_unlock(&journal->j_list_lock);
1239 jbd_unlock_bh_state(bh); 1233 jbd_unlock_bh_state(bh);
@@ -1556,19 +1550,32 @@ void __jbd2_journal_temp_unlink_buffer(struct journal_head *jh)
1556 mark_buffer_dirty(bh); /* Expose it to the VM */ 1550 mark_buffer_dirty(bh); /* Expose it to the VM */
1557} 1551}
1558 1552
1559void __jbd2_journal_unfile_buffer(struct journal_head *jh) 1553/*
1554 * Remove buffer from all transactions.
1555 *
1556 * Called with bh_state lock and j_list_lock
1557 *
1558 * jh and bh may be already freed when this function returns.
1559 */
1560static void __jbd2_journal_unfile_buffer(struct journal_head *jh)
1560{ 1561{
1561 __jbd2_journal_temp_unlink_buffer(jh); 1562 __jbd2_journal_temp_unlink_buffer(jh);
1562 jh->b_transaction = NULL; 1563 jh->b_transaction = NULL;
1564 jbd2_journal_put_journal_head(jh);
1563} 1565}
1564 1566
1565void jbd2_journal_unfile_buffer(journal_t *journal, struct journal_head *jh) 1567void jbd2_journal_unfile_buffer(journal_t *journal, struct journal_head *jh)
1566{ 1568{
1567 jbd_lock_bh_state(jh2bh(jh)); 1569 struct buffer_head *bh = jh2bh(jh);
1570
1571 /* Get reference so that buffer cannot be freed before we unlock it */
1572 get_bh(bh);
1573 jbd_lock_bh_state(bh);
1568 spin_lock(&journal->j_list_lock); 1574 spin_lock(&journal->j_list_lock);
1569 __jbd2_journal_unfile_buffer(jh); 1575 __jbd2_journal_unfile_buffer(jh);
1570 spin_unlock(&journal->j_list_lock); 1576 spin_unlock(&journal->j_list_lock);
1571 jbd_unlock_bh_state(jh2bh(jh)); 1577 jbd_unlock_bh_state(bh);
1578 __brelse(bh);
1572} 1579}
1573 1580
1574/* 1581/*
@@ -1595,8 +1602,6 @@ __journal_try_to_free_buffer(journal_t *journal, struct buffer_head *bh)
1595 if (jh->b_jlist == BJ_None) { 1602 if (jh->b_jlist == BJ_None) {
1596 JBUFFER_TRACE(jh, "remove from checkpoint list"); 1603 JBUFFER_TRACE(jh, "remove from checkpoint list");
1597 __jbd2_journal_remove_checkpoint(jh); 1604 __jbd2_journal_remove_checkpoint(jh);
1598 jbd2_journal_remove_journal_head(bh);
1599 __brelse(bh);
1600 } 1605 }
1601 } 1606 }
1602 spin_unlock(&journal->j_list_lock); 1607 spin_unlock(&journal->j_list_lock);
@@ -1659,7 +1664,6 @@ int jbd2_journal_try_to_free_buffers(journal_t *journal,
1659 /* 1664 /*
1660 * We take our own ref against the journal_head here to avoid 1665 * We take our own ref against the journal_head here to avoid
1661 * having to add tons of locking around each instance of 1666 * having to add tons of locking around each instance of
1662 * jbd2_journal_remove_journal_head() and
1663 * jbd2_journal_put_journal_head(). 1667 * jbd2_journal_put_journal_head().
1664 */ 1668 */
1665 jh = jbd2_journal_grab_journal_head(bh); 1669 jh = jbd2_journal_grab_journal_head(bh);
@@ -1697,10 +1701,9 @@ static int __dispose_buffer(struct journal_head *jh, transaction_t *transaction)
1697 int may_free = 1; 1701 int may_free = 1;
1698 struct buffer_head *bh = jh2bh(jh); 1702 struct buffer_head *bh = jh2bh(jh);
1699 1703
1700 __jbd2_journal_unfile_buffer(jh);
1701
1702 if (jh->b_cp_transaction) { 1704 if (jh->b_cp_transaction) {
1703 JBUFFER_TRACE(jh, "on running+cp transaction"); 1705 JBUFFER_TRACE(jh, "on running+cp transaction");
1706 __jbd2_journal_temp_unlink_buffer(jh);
1704 /* 1707 /*
1705 * We don't want to write the buffer anymore, clear the 1708 * We don't want to write the buffer anymore, clear the
1706 * bit so that we don't confuse checks in 1709 * bit so that we don't confuse checks in
@@ -1711,8 +1714,7 @@ static int __dispose_buffer(struct journal_head *jh, transaction_t *transaction)
1711 may_free = 0; 1714 may_free = 0;
1712 } else { 1715 } else {
1713 JBUFFER_TRACE(jh, "on running transaction"); 1716 JBUFFER_TRACE(jh, "on running transaction");
1714 jbd2_journal_remove_journal_head(bh); 1717 __jbd2_journal_unfile_buffer(jh);
1715 __brelse(bh);
1716 } 1718 }
1717 return may_free; 1719 return may_free;
1718} 1720}
@@ -1990,6 +1992,8 @@ void __jbd2_journal_file_buffer(struct journal_head *jh,
1990 1992
1991 if (jh->b_transaction) 1993 if (jh->b_transaction)
1992 __jbd2_journal_temp_unlink_buffer(jh); 1994 __jbd2_journal_temp_unlink_buffer(jh);
1995 else
1996 jbd2_journal_grab_journal_head(bh);
1993 jh->b_transaction = transaction; 1997 jh->b_transaction = transaction;
1994 1998
1995 switch (jlist) { 1999 switch (jlist) {
@@ -2041,9 +2045,10 @@ void jbd2_journal_file_buffer(struct journal_head *jh,
2041 * already started to be used by a subsequent transaction, refile the 2045 * already started to be used by a subsequent transaction, refile the
2042 * buffer on that transaction's metadata list. 2046 * buffer on that transaction's metadata list.
2043 * 2047 *
2044 * Called under journal->j_list_lock 2048 * Called under j_list_lock
2045 *
2046 * Called under jbd_lock_bh_state(jh2bh(jh)) 2049 * Called under jbd_lock_bh_state(jh2bh(jh))
2050 *
2051 * jh and bh may be already free when this function returns
2047 */ 2052 */
2048void __jbd2_journal_refile_buffer(struct journal_head *jh) 2053void __jbd2_journal_refile_buffer(struct journal_head *jh)
2049{ 2054{
@@ -2067,6 +2072,11 @@ void __jbd2_journal_refile_buffer(struct journal_head *jh)
2067 2072
2068 was_dirty = test_clear_buffer_jbddirty(bh); 2073 was_dirty = test_clear_buffer_jbddirty(bh);
2069 __jbd2_journal_temp_unlink_buffer(jh); 2074 __jbd2_journal_temp_unlink_buffer(jh);
2075 /*
2076 * We set b_transaction here because b_next_transaction will inherit
2077 * our jh reference and thus __jbd2_journal_file_buffer() must not
2078 * take a new one.
2079 */
2070 jh->b_transaction = jh->b_next_transaction; 2080 jh->b_transaction = jh->b_next_transaction;
2071 jh->b_next_transaction = NULL; 2081 jh->b_next_transaction = NULL;
2072 if (buffer_freed(bh)) 2082 if (buffer_freed(bh))
@@ -2083,30 +2093,21 @@ void __jbd2_journal_refile_buffer(struct journal_head *jh)
2083} 2093}
2084 2094
2085/* 2095/*
2086 * For the unlocked version of this call, also make sure that any 2096 * __jbd2_journal_refile_buffer() with necessary locking added. We take our
2087 * hanging journal_head is cleaned up if necessary. 2097 * bh reference so that we can safely unlock bh.
2088 * 2098 *
2089 * __jbd2_journal_refile_buffer is usually called as part of a single locked 2099 * The jh and bh may be freed by this call.
2090 * operation on a buffer_head, in which the caller is probably going to
2091 * be hooking the journal_head onto other lists. In that case it is up
2092 * to the caller to remove the journal_head if necessary. For the
2093 * unlocked jbd2_journal_refile_buffer call, the caller isn't going to be
2094 * doing anything else to the buffer so we need to do the cleanup
2095 * ourselves to avoid a jh leak.
2096 *
2097 * *** The journal_head may be freed by this call! ***
2098 */ 2100 */
2099void jbd2_journal_refile_buffer(journal_t *journal, struct journal_head *jh) 2101void jbd2_journal_refile_buffer(journal_t *journal, struct journal_head *jh)
2100{ 2102{
2101 struct buffer_head *bh = jh2bh(jh); 2103 struct buffer_head *bh = jh2bh(jh);
2102 2104
2105 /* Get reference so that buffer cannot be freed before we unlock it */
2106 get_bh(bh);
2103 jbd_lock_bh_state(bh); 2107 jbd_lock_bh_state(bh);
2104 spin_lock(&journal->j_list_lock); 2108 spin_lock(&journal->j_list_lock);
2105
2106 __jbd2_journal_refile_buffer(jh); 2109 __jbd2_journal_refile_buffer(jh);
2107 jbd_unlock_bh_state(bh); 2110 jbd_unlock_bh_state(bh);
2108 jbd2_journal_remove_journal_head(bh);
2109
2110 spin_unlock(&journal->j_list_lock); 2111 spin_unlock(&journal->j_list_lock);
2111 __brelse(bh); 2112 __brelse(bh);
2112} 2113}
diff --git a/fs/jfs/file.c b/fs/jfs/file.c
index c5ce6c1d1ff4..2f3f531f3606 100644
--- a/fs/jfs/file.c
+++ b/fs/jfs/file.c
@@ -66,9 +66,9 @@ static int jfs_open(struct inode *inode, struct file *file)
66 struct jfs_inode_info *ji = JFS_IP(inode); 66 struct jfs_inode_info *ji = JFS_IP(inode);
67 spin_lock_irq(&ji->ag_lock); 67 spin_lock_irq(&ji->ag_lock);
68 if (ji->active_ag == -1) { 68 if (ji->active_ag == -1) {
69 ji->active_ag = ji->agno; 69 struct jfs_sb_info *jfs_sb = JFS_SBI(inode->i_sb);
70 atomic_inc( 70 ji->active_ag = BLKTOAG(addressPXD(&ji->ixpxd), jfs_sb);
71 &JFS_SBI(inode->i_sb)->bmap->db_active[ji->agno]); 71 atomic_inc( &jfs_sb->bmap->db_active[ji->active_ag]);
72 } 72 }
73 spin_unlock_irq(&ji->ag_lock); 73 spin_unlock_irq(&ji->ag_lock);
74 } 74 }
diff --git a/fs/jfs/jfs_imap.c b/fs/jfs/jfs_imap.c
index ed53a4740168..b78b2f978f04 100644
--- a/fs/jfs/jfs_imap.c
+++ b/fs/jfs/jfs_imap.c
@@ -397,7 +397,7 @@ int diRead(struct inode *ip)
397 release_metapage(mp); 397 release_metapage(mp);
398 398
399 /* set the ag for the inode */ 399 /* set the ag for the inode */
400 JFS_IP(ip)->agno = BLKTOAG(agstart, sbi); 400 JFS_IP(ip)->agstart = agstart;
401 JFS_IP(ip)->active_ag = -1; 401 JFS_IP(ip)->active_ag = -1;
402 402
403 return (rc); 403 return (rc);
@@ -901,7 +901,7 @@ int diFree(struct inode *ip)
901 901
902 /* get the allocation group for this ino. 902 /* get the allocation group for this ino.
903 */ 903 */
904 agno = JFS_IP(ip)->agno; 904 agno = BLKTOAG(JFS_IP(ip)->agstart, JFS_SBI(ip->i_sb));
905 905
906 /* Lock the AG specific inode map information 906 /* Lock the AG specific inode map information
907 */ 907 */
@@ -1315,12 +1315,11 @@ int diFree(struct inode *ip)
1315static inline void 1315static inline void
1316diInitInode(struct inode *ip, int iagno, int ino, int extno, struct iag * iagp) 1316diInitInode(struct inode *ip, int iagno, int ino, int extno, struct iag * iagp)
1317{ 1317{
1318 struct jfs_sb_info *sbi = JFS_SBI(ip->i_sb);
1319 struct jfs_inode_info *jfs_ip = JFS_IP(ip); 1318 struct jfs_inode_info *jfs_ip = JFS_IP(ip);
1320 1319
1321 ip->i_ino = (iagno << L2INOSPERIAG) + ino; 1320 ip->i_ino = (iagno << L2INOSPERIAG) + ino;
1322 jfs_ip->ixpxd = iagp->inoext[extno]; 1321 jfs_ip->ixpxd = iagp->inoext[extno];
1323 jfs_ip->agno = BLKTOAG(le64_to_cpu(iagp->agstart), sbi); 1322 jfs_ip->agstart = le64_to_cpu(iagp->agstart);
1324 jfs_ip->active_ag = -1; 1323 jfs_ip->active_ag = -1;
1325} 1324}
1326 1325
@@ -1379,7 +1378,7 @@ int diAlloc(struct inode *pip, bool dir, struct inode *ip)
1379 */ 1378 */
1380 1379
1381 /* get the ag number of this iag */ 1380 /* get the ag number of this iag */
1382 agno = JFS_IP(pip)->agno; 1381 agno = BLKTOAG(JFS_IP(pip)->agstart, JFS_SBI(pip->i_sb));
1383 1382
1384 if (atomic_read(&JFS_SBI(pip->i_sb)->bmap->db_active[agno])) { 1383 if (atomic_read(&JFS_SBI(pip->i_sb)->bmap->db_active[agno])) {
1385 /* 1384 /*
@@ -2921,10 +2920,9 @@ int diExtendFS(struct inode *ipimap, struct inode *ipbmap)
2921 continue; 2920 continue;
2922 } 2921 }
2923 2922
2924 /* agstart that computes to the same ag is treated as same; */
2925 agstart = le64_to_cpu(iagp->agstart); 2923 agstart = le64_to_cpu(iagp->agstart);
2926 /* iagp->agstart = agstart & ~(mp->db_agsize - 1); */
2927 n = agstart >> mp->db_agl2size; 2924 n = agstart >> mp->db_agl2size;
2925 iagp->agstart = cpu_to_le64((s64)n << mp->db_agl2size);
2928 2926
2929 /* compute backed inodes */ 2927 /* compute backed inodes */
2930 numinos = (EXTSPERIAG - le32_to_cpu(iagp->nfreeexts)) 2928 numinos = (EXTSPERIAG - le32_to_cpu(iagp->nfreeexts))
diff --git a/fs/jfs/jfs_incore.h b/fs/jfs/jfs_incore.h
index 1439f119ec83..584a4a1a6e81 100644
--- a/fs/jfs/jfs_incore.h
+++ b/fs/jfs/jfs_incore.h
@@ -50,8 +50,9 @@ struct jfs_inode_info {
50 short btindex; /* btpage entry index*/ 50 short btindex; /* btpage entry index*/
51 struct inode *ipimap; /* inode map */ 51 struct inode *ipimap; /* inode map */
52 unsigned long cflag; /* commit flags */ 52 unsigned long cflag; /* commit flags */
53 u64 agstart; /* agstart of the containing IAG */
53 u16 bxflag; /* xflag of pseudo buffer? */ 54 u16 bxflag; /* xflag of pseudo buffer? */
54 unchar agno; /* ag number */ 55 unchar pad;
55 signed char active_ag; /* ag currently allocating from */ 56 signed char active_ag; /* ag currently allocating from */
56 lid_t blid; /* lid of pseudo buffer? */ 57 lid_t blid; /* lid of pseudo buffer? */
57 lid_t atlhead; /* anonymous tlock list head */ 58 lid_t atlhead; /* anonymous tlock list head */
diff --git a/fs/jfs/resize.c b/fs/jfs/resize.c
index 8ea5efb5a34e..8d0c1c7c0820 100644
--- a/fs/jfs/resize.c
+++ b/fs/jfs/resize.c
@@ -80,7 +80,7 @@ int jfs_extendfs(struct super_block *sb, s64 newLVSize, int newLogSize)
80 int log_formatted = 0; 80 int log_formatted = 0;
81 struct inode *iplist[1]; 81 struct inode *iplist[1];
82 struct jfs_superblock *j_sb, *j_sb2; 82 struct jfs_superblock *j_sb, *j_sb2;
83 uint old_agsize; 83 s64 old_agsize;
84 int agsizechanged = 0; 84 int agsizechanged = 0;
85 struct buffer_head *bh, *bh2; 85 struct buffer_head *bh, *bh2;
86 86
diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
index adb45ec9038c..e374050a911c 100644
--- a/fs/lockd/clntproc.c
+++ b/fs/lockd/clntproc.c
@@ -708,7 +708,13 @@ static void nlmclnt_unlock_callback(struct rpc_task *task, void *data)
708 708
709 if (task->tk_status < 0) { 709 if (task->tk_status < 0) {
710 dprintk("lockd: unlock failed (err = %d)\n", -task->tk_status); 710 dprintk("lockd: unlock failed (err = %d)\n", -task->tk_status);
711 goto retry_rebind; 711 switch (task->tk_status) {
712 case -EACCES:
713 case -EIO:
714 goto die;
715 default:
716 goto retry_rebind;
717 }
712 } 718 }
713 if (status == NLM_LCK_DENIED_GRACE_PERIOD) { 719 if (status == NLM_LCK_DENIED_GRACE_PERIOD) {
714 rpc_delay(task, NLMCLNT_GRACE_WAIT); 720 rpc_delay(task, NLMCLNT_GRACE_WAIT);
diff --git a/fs/logfs/dir.c b/fs/logfs/dir.c
index 9ed89d1663f8..1afae26cf236 100644
--- a/fs/logfs/dir.c
+++ b/fs/logfs/dir.c
@@ -555,13 +555,6 @@ static int logfs_symlink(struct inode *dir, struct dentry *dentry,
555 return __logfs_create(dir, dentry, inode, target, destlen); 555 return __logfs_create(dir, dentry, inode, target, destlen);
556} 556}
557 557
558static int logfs_permission(struct inode *inode, int mask, unsigned int flags)
559{
560 if (flags & IPERM_FLAG_RCU)
561 return -ECHILD;
562 return generic_permission(inode, mask, flags, NULL);
563}
564
565static int logfs_link(struct dentry *old_dentry, struct inode *dir, 558static int logfs_link(struct dentry *old_dentry, struct inode *dir,
566 struct dentry *dentry) 559 struct dentry *dentry)
567{ 560{
@@ -820,7 +813,6 @@ const struct inode_operations logfs_dir_iops = {
820 .mknod = logfs_mknod, 813 .mknod = logfs_mknod,
821 .rename = logfs_rename, 814 .rename = logfs_rename,
822 .rmdir = logfs_rmdir, 815 .rmdir = logfs_rmdir,
823 .permission = logfs_permission,
824 .symlink = logfs_symlink, 816 .symlink = logfs_symlink,
825 .unlink = logfs_unlink, 817 .unlink = logfs_unlink,
826}; 818};
diff --git a/fs/namei.c b/fs/namei.c
index 9e425e7e6c8f..0223c41fb114 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -238,7 +238,8 @@ int generic_permission(struct inode *inode, int mask, unsigned int flags,
238 238
239 /* 239 /*
240 * Read/write DACs are always overridable. 240 * Read/write DACs are always overridable.
241 * Executable DACs are overridable if at least one exec bit is set. 241 * Executable DACs are overridable for all directories and
242 * for non-directories that have least one exec bit set.
242 */ 243 */
243 if (!(mask & MAY_EXEC) || execute_ok(inode)) 244 if (!(mask & MAY_EXEC) || execute_ok(inode))
244 if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE)) 245 if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
@@ -1011,9 +1012,6 @@ failed:
1011 * Follow down to the covering mount currently visible to userspace. At each 1012 * Follow down to the covering mount currently visible to userspace. At each
1012 * point, the filesystem owning that dentry may be queried as to whether the 1013 * point, the filesystem owning that dentry may be queried as to whether the
1013 * caller is permitted to proceed or not. 1014 * caller is permitted to proceed or not.
1014 *
1015 * Care must be taken as namespace_sem may be held (indicated by mounting_here
1016 * being true).
1017 */ 1015 */
1018int follow_down(struct path *path) 1016int follow_down(struct path *path)
1019{ 1017{
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 144f2a3c7185..6f4850deb272 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -256,7 +256,8 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr)
256 256
257 nfs_attr_check_mountpoint(sb, fattr); 257 nfs_attr_check_mountpoint(sb, fattr);
258 258
259 if ((fattr->valid & NFS_ATTR_FATTR_FILEID) == 0 && (fattr->valid & NFS_ATTR_FATTR_MOUNTPOINT) == 0) 259 if (((fattr->valid & NFS_ATTR_FATTR_FILEID) == 0) &&
260 !nfs_attr_use_mounted_on_fileid(fattr))
260 goto out_no_inode; 261 goto out_no_inode;
261 if ((fattr->valid & NFS_ATTR_FATTR_TYPE) == 0) 262 if ((fattr->valid & NFS_ATTR_FATTR_TYPE) == 0)
262 goto out_no_inode; 263 goto out_no_inode;
@@ -1294,7 +1295,8 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
1294 if (new_isize != cur_isize) { 1295 if (new_isize != cur_isize) {
1295 /* Do we perhaps have any outstanding writes, or has 1296 /* Do we perhaps have any outstanding writes, or has
1296 * the file grown beyond our last write? */ 1297 * the file grown beyond our last write? */
1297 if (nfsi->npages == 0 || new_isize > cur_isize) { 1298 if ((nfsi->npages == 0 && !test_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags)) ||
1299 new_isize > cur_isize) {
1298 i_size_write(inode, new_isize); 1300 i_size_write(inode, new_isize);
1299 invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA; 1301 invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA;
1300 } 1302 }
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index b9056cbe68d6..2a55347a2daa 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -45,6 +45,17 @@ static inline void nfs_attr_check_mountpoint(struct super_block *parent, struct
45 fattr->valid |= NFS_ATTR_FATTR_MOUNTPOINT; 45 fattr->valid |= NFS_ATTR_FATTR_MOUNTPOINT;
46} 46}
47 47
48static inline int nfs_attr_use_mounted_on_fileid(struct nfs_fattr *fattr)
49{
50 if (((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) == 0) ||
51 (((fattr->valid & NFS_ATTR_FATTR_MOUNTPOINT) == 0) &&
52 ((fattr->valid & NFS_ATTR_FATTR_V4_REFERRAL) == 0)))
53 return 0;
54
55 fattr->fileid = fattr->mounted_on_fileid;
56 return 1;
57}
58
48struct nfs_clone_mount { 59struct nfs_clone_mount {
49 const struct super_block *sb; 60 const struct super_block *sb;
50 const struct dentry *dentry; 61 const struct dentry *dentry;
diff --git a/fs/nfs/nfs4filelayout.c b/fs/nfs/nfs4filelayout.c
index 426908809c97..0bafcc91c27f 100644
--- a/fs/nfs/nfs4filelayout.c
+++ b/fs/nfs/nfs4filelayout.c
@@ -30,6 +30,7 @@
30 */ 30 */
31 31
32#include <linux/nfs_fs.h> 32#include <linux/nfs_fs.h>
33#include <linux/nfs_page.h>
33 34
34#include "internal.h" 35#include "internal.h"
35#include "nfs4filelayout.h" 36#include "nfs4filelayout.h"
@@ -552,13 +553,18 @@ filelayout_decode_layout(struct pnfs_layout_hdr *flo,
552 __func__, nfl_util, fl->num_fh, fl->first_stripe_index, 553 __func__, nfl_util, fl->num_fh, fl->first_stripe_index,
553 fl->pattern_offset); 554 fl->pattern_offset);
554 555
555 if (!fl->num_fh) 556 /* Note that a zero value for num_fh is legal for STRIPE_SPARSE.
557 * Futher checking is done in filelayout_check_layout */
558 if (fl->num_fh < 0 || fl->num_fh >
559 max(NFS4_PNFS_MAX_STRIPE_CNT, NFS4_PNFS_MAX_MULTI_CNT))
556 goto out_err; 560 goto out_err;
557 561
558 fl->fh_array = kzalloc(fl->num_fh * sizeof(struct nfs_fh *), 562 if (fl->num_fh > 0) {
559 gfp_flags); 563 fl->fh_array = kzalloc(fl->num_fh * sizeof(struct nfs_fh *),
560 if (!fl->fh_array) 564 gfp_flags);
561 goto out_err; 565 if (!fl->fh_array)
566 goto out_err;
567 }
562 568
563 for (i = 0; i < fl->num_fh; i++) { 569 for (i = 0; i < fl->num_fh; i++) {
564 /* Do we want to use a mempool here? */ 570 /* Do we want to use a mempool here? */
@@ -661,8 +667,9 @@ filelayout_pg_test(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
661 u64 p_stripe, r_stripe; 667 u64 p_stripe, r_stripe;
662 u32 stripe_unit; 668 u32 stripe_unit;
663 669
664 if (!pnfs_generic_pg_test(pgio, prev, req)) 670 if (!pnfs_generic_pg_test(pgio, prev, req) ||
665 return 0; 671 !nfs_generic_pg_test(pgio, prev, req))
672 return false;
666 673
667 if (!pgio->pg_lseg) 674 if (!pgio->pg_lseg)
668 return 1; 675 return 1;
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index d2c4b59c896d..5879b23e0c99 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -2265,12 +2265,14 @@ static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *fhandle,
2265 return nfs4_map_errors(status); 2265 return nfs4_map_errors(status);
2266} 2266}
2267 2267
2268static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr);
2268/* 2269/*
2269 * Get locations and (maybe) other attributes of a referral. 2270 * Get locations and (maybe) other attributes of a referral.
2270 * Note that we'll actually follow the referral later when 2271 * Note that we'll actually follow the referral later when
2271 * we detect fsid mismatch in inode revalidation 2272 * we detect fsid mismatch in inode revalidation
2272 */ 2273 */
2273static int nfs4_get_referral(struct inode *dir, const struct qstr *name, struct nfs_fattr *fattr, struct nfs_fh *fhandle) 2274static int nfs4_get_referral(struct inode *dir, const struct qstr *name,
2275 struct nfs_fattr *fattr, struct nfs_fh *fhandle)
2274{ 2276{
2275 int status = -ENOMEM; 2277 int status = -ENOMEM;
2276 struct page *page = NULL; 2278 struct page *page = NULL;
@@ -2288,15 +2290,16 @@ static int nfs4_get_referral(struct inode *dir, const struct qstr *name, struct
2288 goto out; 2290 goto out;
2289 /* Make sure server returned a different fsid for the referral */ 2291 /* Make sure server returned a different fsid for the referral */
2290 if (nfs_fsid_equal(&NFS_SERVER(dir)->fsid, &locations->fattr.fsid)) { 2292 if (nfs_fsid_equal(&NFS_SERVER(dir)->fsid, &locations->fattr.fsid)) {
2291 dprintk("%s: server did not return a different fsid for a referral at %s\n", __func__, name->name); 2293 dprintk("%s: server did not return a different fsid for"
2294 " a referral at %s\n", __func__, name->name);
2292 status = -EIO; 2295 status = -EIO;
2293 goto out; 2296 goto out;
2294 } 2297 }
2298 /* Fixup attributes for the nfs_lookup() call to nfs_fhget() */
2299 nfs_fixup_referral_attributes(&locations->fattr);
2295 2300
2301 /* replace the lookup nfs_fattr with the locations nfs_fattr */
2296 memcpy(fattr, &locations->fattr, sizeof(struct nfs_fattr)); 2302 memcpy(fattr, &locations->fattr, sizeof(struct nfs_fattr));
2297 fattr->valid |= NFS_ATTR_FATTR_V4_REFERRAL;
2298 if (!fattr->mode)
2299 fattr->mode = S_IFDIR;
2300 memset(fhandle, 0, sizeof(struct nfs_fh)); 2303 memset(fhandle, 0, sizeof(struct nfs_fh));
2301out: 2304out:
2302 if (page) 2305 if (page)
@@ -4667,11 +4670,15 @@ static size_t nfs4_xattr_list_nfs4_acl(struct dentry *dentry, char *list,
4667 return len; 4670 return len;
4668} 4671}
4669 4672
4673/*
4674 * nfs_fhget will use either the mounted_on_fileid or the fileid
4675 */
4670static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr) 4676static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr)
4671{ 4677{
4672 if (!((fattr->valid & NFS_ATTR_FATTR_FILEID) && 4678 if (!(((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) ||
4673 (fattr->valid & NFS_ATTR_FATTR_FSID) && 4679 (fattr->valid & NFS_ATTR_FATTR_FILEID)) &&
4674 (fattr->valid & NFS_ATTR_FATTR_V4_REFERRAL))) 4680 (fattr->valid & NFS_ATTR_FATTR_FSID) &&
4681 (fattr->valid & NFS_ATTR_FATTR_V4_REFERRAL)))
4675 return; 4682 return;
4676 4683
4677 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE | 4684 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE |
@@ -4686,7 +4693,6 @@ int nfs4_proc_fs_locations(struct inode *dir, const struct qstr *name,
4686 struct nfs_server *server = NFS_SERVER(dir); 4693 struct nfs_server *server = NFS_SERVER(dir);
4687 u32 bitmask[2] = { 4694 u32 bitmask[2] = {
4688 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS, 4695 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS,
4689 [1] = FATTR4_WORD1_MOUNTED_ON_FILEID,
4690 }; 4696 };
4691 struct nfs4_fs_locations_arg args = { 4697 struct nfs4_fs_locations_arg args = {
4692 .dir_fh = NFS_FH(dir), 4698 .dir_fh = NFS_FH(dir),
@@ -4705,11 +4711,18 @@ int nfs4_proc_fs_locations(struct inode *dir, const struct qstr *name,
4705 int status; 4711 int status;
4706 4712
4707 dprintk("%s: start\n", __func__); 4713 dprintk("%s: start\n", __func__);
4714
4715 /* Ask for the fileid of the absent filesystem if mounted_on_fileid
4716 * is not supported */
4717 if (NFS_SERVER(dir)->attr_bitmask[1] & FATTR4_WORD1_MOUNTED_ON_FILEID)
4718 bitmask[1] |= FATTR4_WORD1_MOUNTED_ON_FILEID;
4719 else
4720 bitmask[0] |= FATTR4_WORD0_FILEID;
4721
4708 nfs_fattr_init(&fs_locations->fattr); 4722 nfs_fattr_init(&fs_locations->fattr);
4709 fs_locations->server = server; 4723 fs_locations->server = server;
4710 fs_locations->nlocations = 0; 4724 fs_locations->nlocations = 0;
4711 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 4725 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
4712 nfs_fixup_referral_attributes(&fs_locations->fattr);
4713 dprintk("%s: returned status = %d\n", __func__, status); 4726 dprintk("%s: returned status = %d\n", __func__, status);
4714 return status; 4727 return status;
4715} 4728}
@@ -5098,7 +5111,6 @@ static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args)
5098 if (mxresp_sz == 0) 5111 if (mxresp_sz == 0)
5099 mxresp_sz = NFS_MAX_FILE_IO_SIZE; 5112 mxresp_sz = NFS_MAX_FILE_IO_SIZE;
5100 /* Fore channel attributes */ 5113 /* Fore channel attributes */
5101 args->fc_attrs.headerpadsz = 0;
5102 args->fc_attrs.max_rqst_sz = mxrqst_sz; 5114 args->fc_attrs.max_rqst_sz = mxrqst_sz;
5103 args->fc_attrs.max_resp_sz = mxresp_sz; 5115 args->fc_attrs.max_resp_sz = mxresp_sz;
5104 args->fc_attrs.max_ops = NFS4_MAX_OPS; 5116 args->fc_attrs.max_ops = NFS4_MAX_OPS;
@@ -5111,7 +5123,6 @@ static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args)
5111 args->fc_attrs.max_ops, args->fc_attrs.max_reqs); 5123 args->fc_attrs.max_ops, args->fc_attrs.max_reqs);
5112 5124
5113 /* Back channel attributes */ 5125 /* Back channel attributes */
5114 args->bc_attrs.headerpadsz = 0;
5115 args->bc_attrs.max_rqst_sz = PAGE_SIZE; 5126 args->bc_attrs.max_rqst_sz = PAGE_SIZE;
5116 args->bc_attrs.max_resp_sz = PAGE_SIZE; 5127 args->bc_attrs.max_resp_sz = PAGE_SIZE;
5117 args->bc_attrs.max_resp_sz_cached = 0; 5128 args->bc_attrs.max_resp_sz_cached = 0;
@@ -5131,8 +5142,6 @@ static int nfs4_verify_fore_channel_attrs(struct nfs41_create_session_args *args
5131 struct nfs4_channel_attrs *sent = &args->fc_attrs; 5142 struct nfs4_channel_attrs *sent = &args->fc_attrs;
5132 struct nfs4_channel_attrs *rcvd = &session->fc_attrs; 5143 struct nfs4_channel_attrs *rcvd = &session->fc_attrs;
5133 5144
5134 if (rcvd->headerpadsz > sent->headerpadsz)
5135 return -EINVAL;
5136 if (rcvd->max_resp_sz > sent->max_resp_sz) 5145 if (rcvd->max_resp_sz > sent->max_resp_sz)
5137 return -EINVAL; 5146 return -EINVAL;
5138 /* 5147 /*
@@ -5697,6 +5706,7 @@ static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata)
5697{ 5706{
5698 struct nfs4_layoutreturn *lrp = calldata; 5707 struct nfs4_layoutreturn *lrp = calldata;
5699 struct nfs_server *server; 5708 struct nfs_server *server;
5709 struct pnfs_layout_hdr *lo = NFS_I(lrp->args.inode)->layout;
5700 5710
5701 dprintk("--> %s\n", __func__); 5711 dprintk("--> %s\n", __func__);
5702 5712
@@ -5708,16 +5718,15 @@ static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata)
5708 nfs_restart_rpc(task, lrp->clp); 5718 nfs_restart_rpc(task, lrp->clp);
5709 return; 5719 return;
5710 } 5720 }
5721 spin_lock(&lo->plh_inode->i_lock);
5711 if (task->tk_status == 0) { 5722 if (task->tk_status == 0) {
5712 struct pnfs_layout_hdr *lo = NFS_I(lrp->args.inode)->layout;
5713
5714 if (lrp->res.lrs_present) { 5723 if (lrp->res.lrs_present) {
5715 spin_lock(&lo->plh_inode->i_lock);
5716 pnfs_set_layout_stateid(lo, &lrp->res.stateid, true); 5724 pnfs_set_layout_stateid(lo, &lrp->res.stateid, true);
5717 spin_unlock(&lo->plh_inode->i_lock);
5718 } else 5725 } else
5719 BUG_ON(!list_empty(&lo->plh_segs)); 5726 BUG_ON(!list_empty(&lo->plh_segs));
5720 } 5727 }
5728 lo->plh_block_lgets--;
5729 spin_unlock(&lo->plh_inode->i_lock);
5721 dprintk("<-- %s\n", __func__); 5730 dprintk("<-- %s\n", __func__);
5722} 5731}
5723 5732
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index d869a5e5464b..6870bc61ceec 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -255,7 +255,7 @@ static int nfs4_stat_to_errno(int);
255#define decode_fs_locations_maxsz \ 255#define decode_fs_locations_maxsz \
256 (0) 256 (0)
257#define encode_secinfo_maxsz (op_encode_hdr_maxsz + nfs4_name_maxsz) 257#define encode_secinfo_maxsz (op_encode_hdr_maxsz + nfs4_name_maxsz)
258#define decode_secinfo_maxsz (op_decode_hdr_maxsz + 4 + (NFS_MAX_SECFLAVORS * (16 + GSS_OID_MAX_LEN))) 258#define decode_secinfo_maxsz (op_decode_hdr_maxsz + 1 + ((NFS_MAX_SECFLAVORS * (16 + GSS_OID_MAX_LEN)) / 4))
259 259
260#if defined(CONFIG_NFS_V4_1) 260#if defined(CONFIG_NFS_V4_1)
261#define NFS4_MAX_MACHINE_NAME_LEN (64) 261#define NFS4_MAX_MACHINE_NAME_LEN (64)
@@ -1725,7 +1725,7 @@ static void encode_create_session(struct xdr_stream *xdr,
1725 *p++ = cpu_to_be32(args->flags); /*flags */ 1725 *p++ = cpu_to_be32(args->flags); /*flags */
1726 1726
1727 /* Fore Channel */ 1727 /* Fore Channel */
1728 *p++ = cpu_to_be32(args->fc_attrs.headerpadsz); /* header padding size */ 1728 *p++ = cpu_to_be32(0); /* header padding size */
1729 *p++ = cpu_to_be32(args->fc_attrs.max_rqst_sz); /* max req size */ 1729 *p++ = cpu_to_be32(args->fc_attrs.max_rqst_sz); /* max req size */
1730 *p++ = cpu_to_be32(args->fc_attrs.max_resp_sz); /* max resp size */ 1730 *p++ = cpu_to_be32(args->fc_attrs.max_resp_sz); /* max resp size */
1731 *p++ = cpu_to_be32(max_resp_sz_cached); /* Max resp sz cached */ 1731 *p++ = cpu_to_be32(max_resp_sz_cached); /* Max resp sz cached */
@@ -1734,7 +1734,7 @@ static void encode_create_session(struct xdr_stream *xdr,
1734 *p++ = cpu_to_be32(0); /* rdmachannel_attrs */ 1734 *p++ = cpu_to_be32(0); /* rdmachannel_attrs */
1735 1735
1736 /* Back Channel */ 1736 /* Back Channel */
1737 *p++ = cpu_to_be32(args->fc_attrs.headerpadsz); /* header padding size */ 1737 *p++ = cpu_to_be32(0); /* header padding size */
1738 *p++ = cpu_to_be32(args->bc_attrs.max_rqst_sz); /* max req size */ 1738 *p++ = cpu_to_be32(args->bc_attrs.max_rqst_sz); /* max req size */
1739 *p++ = cpu_to_be32(args->bc_attrs.max_resp_sz); /* max resp size */ 1739 *p++ = cpu_to_be32(args->bc_attrs.max_resp_sz); /* max resp size */
1740 *p++ = cpu_to_be32(args->bc_attrs.max_resp_sz_cached); /* Max resp sz cached */ 1740 *p++ = cpu_to_be32(args->bc_attrs.max_resp_sz_cached); /* Max resp sz cached */
@@ -3098,7 +3098,7 @@ out_overflow:
3098 return -EIO; 3098 return -EIO;
3099} 3099}
3100 3100
3101static int decode_attr_error(struct xdr_stream *xdr, uint32_t *bitmap) 3101static int decode_attr_error(struct xdr_stream *xdr, uint32_t *bitmap, int32_t *res)
3102{ 3102{
3103 __be32 *p; 3103 __be32 *p;
3104 3104
@@ -3109,7 +3109,7 @@ static int decode_attr_error(struct xdr_stream *xdr, uint32_t *bitmap)
3109 if (unlikely(!p)) 3109 if (unlikely(!p))
3110 goto out_overflow; 3110 goto out_overflow;
3111 bitmap[0] &= ~FATTR4_WORD0_RDATTR_ERROR; 3111 bitmap[0] &= ~FATTR4_WORD0_RDATTR_ERROR;
3112 return -be32_to_cpup(p); 3112 *res = -be32_to_cpup(p);
3113 } 3113 }
3114 return 0; 3114 return 0;
3115out_overflow: 3115out_overflow:
@@ -4070,6 +4070,7 @@ static int decode_getfattr_attrs(struct xdr_stream *xdr, uint32_t *bitmap,
4070 int status; 4070 int status;
4071 umode_t fmode = 0; 4071 umode_t fmode = 0;
4072 uint32_t type; 4072 uint32_t type;
4073 int32_t err;
4073 4074
4074 status = decode_attr_type(xdr, bitmap, &type); 4075 status = decode_attr_type(xdr, bitmap, &type);
4075 if (status < 0) 4076 if (status < 0)
@@ -4095,13 +4096,12 @@ static int decode_getfattr_attrs(struct xdr_stream *xdr, uint32_t *bitmap,
4095 goto xdr_error; 4096 goto xdr_error;
4096 fattr->valid |= status; 4097 fattr->valid |= status;
4097 4098
4098 status = decode_attr_error(xdr, bitmap); 4099 err = 0;
4099 if (status == -NFS4ERR_WRONGSEC) { 4100 status = decode_attr_error(xdr, bitmap, &err);
4100 nfs_fixup_secinfo_attributes(fattr, fh);
4101 status = 0;
4102 }
4103 if (status < 0) 4101 if (status < 0)
4104 goto xdr_error; 4102 goto xdr_error;
4103 if (err == -NFS4ERR_WRONGSEC)
4104 nfs_fixup_secinfo_attributes(fattr, fh);
4105 4105
4106 status = decode_attr_filehandle(xdr, bitmap, fh); 4106 status = decode_attr_filehandle(xdr, bitmap, fh);
4107 if (status < 0) 4107 if (status < 0)
@@ -4997,12 +4997,14 @@ static int decode_chan_attrs(struct xdr_stream *xdr,
4997 struct nfs4_channel_attrs *attrs) 4997 struct nfs4_channel_attrs *attrs)
4998{ 4998{
4999 __be32 *p; 4999 __be32 *p;
5000 u32 nr_attrs; 5000 u32 nr_attrs, val;
5001 5001
5002 p = xdr_inline_decode(xdr, 28); 5002 p = xdr_inline_decode(xdr, 28);
5003 if (unlikely(!p)) 5003 if (unlikely(!p))
5004 goto out_overflow; 5004 goto out_overflow;
5005 attrs->headerpadsz = be32_to_cpup(p++); 5005 val = be32_to_cpup(p++); /* headerpadsz */
5006 if (val)
5007 return -EINVAL; /* no support for header padding yet */
5006 attrs->max_rqst_sz = be32_to_cpup(p++); 5008 attrs->max_rqst_sz = be32_to_cpup(p++);
5007 attrs->max_resp_sz = be32_to_cpup(p++); 5009 attrs->max_resp_sz = be32_to_cpup(p++);
5008 attrs->max_resp_sz_cached = be32_to_cpup(p++); 5010 attrs->max_resp_sz_cached = be32_to_cpup(p++);
diff --git a/fs/nfs/objlayout/objio_osd.c b/fs/nfs/objlayout/objio_osd.c
index 9cf208df1f25..8ff2ea3f10ef 100644
--- a/fs/nfs/objlayout/objio_osd.c
+++ b/fs/nfs/objlayout/objio_osd.c
@@ -108,7 +108,6 @@ _dev_list_add(const struct nfs_server *nfss,
108 de = n; 108 de = n;
109 } 109 }
110 110
111 atomic_inc(&de->id_node.ref);
112 return de; 111 return de;
113} 112}
114 113
@@ -1001,6 +1000,9 @@ static bool objio_pg_test(struct nfs_pageio_descriptor *pgio,
1001 if (!pnfs_generic_pg_test(pgio, prev, req)) 1000 if (!pnfs_generic_pg_test(pgio, prev, req))
1002 return false; 1001 return false;
1003 1002
1003 if (pgio->pg_lseg == NULL)
1004 return true;
1005
1004 return pgio->pg_count + req->wb_bytes <= 1006 return pgio->pg_count + req->wb_bytes <=
1005 OBJIO_LSEG(pgio->pg_lseg)->max_io_size; 1007 OBJIO_LSEG(pgio->pg_lseg)->max_io_size;
1006} 1008}
diff --git a/fs/nfs/objlayout/objlayout.c b/fs/nfs/objlayout/objlayout.c
index dc3956c0de80..1d06f8e2adea 100644
--- a/fs/nfs/objlayout/objlayout.c
+++ b/fs/nfs/objlayout/objlayout.c
@@ -291,7 +291,7 @@ objlayout_read_done(struct objlayout_io_state *state, ssize_t status, bool sync)
291 struct nfs_read_data *rdata; 291 struct nfs_read_data *rdata;
292 292
293 state->status = status; 293 state->status = status;
294 dprintk("%s: Begin status=%ld eof=%d\n", __func__, status, eof); 294 dprintk("%s: Begin status=%zd eof=%d\n", __func__, status, eof);
295 rdata = state->rpcdata; 295 rdata = state->rpcdata;
296 rdata->task.tk_status = status; 296 rdata->task.tk_status = status;
297 if (status >= 0) { 297 if (status >= 0) {
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index 7913961aff22..009855716286 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -204,7 +204,7 @@ nfs_wait_on_request(struct nfs_page *req)
204 TASK_UNINTERRUPTIBLE); 204 TASK_UNINTERRUPTIBLE);
205} 205}
206 206
207static bool nfs_generic_pg_test(struct nfs_pageio_descriptor *desc, struct nfs_page *prev, struct nfs_page *req) 207bool nfs_generic_pg_test(struct nfs_pageio_descriptor *desc, struct nfs_page *prev, struct nfs_page *req)
208{ 208{
209 /* 209 /*
210 * FIXME: ideally we should be able to coalesce all requests 210 * FIXME: ideally we should be able to coalesce all requests
@@ -218,6 +218,7 @@ static bool nfs_generic_pg_test(struct nfs_pageio_descriptor *desc, struct nfs_p
218 218
219 return desc->pg_count + req->wb_bytes <= desc->pg_bsize; 219 return desc->pg_count + req->wb_bytes <= desc->pg_bsize;
220} 220}
221EXPORT_SYMBOL_GPL(nfs_generic_pg_test);
221 222
222/** 223/**
223 * nfs_pageio_init - initialise a page io descriptor 224 * nfs_pageio_init - initialise a page io descriptor
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index 8c1309d852a6..29c0ca7fc347 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -634,14 +634,16 @@ _pnfs_return_layout(struct inode *ino)
634 634
635 spin_lock(&ino->i_lock); 635 spin_lock(&ino->i_lock);
636 lo = nfsi->layout; 636 lo = nfsi->layout;
637 if (!lo || !mark_matching_lsegs_invalid(lo, &tmp_list, NULL)) { 637 if (!lo) {
638 spin_unlock(&ino->i_lock); 638 spin_unlock(&ino->i_lock);
639 dprintk("%s: no layout segments to return\n", __func__); 639 dprintk("%s: no layout to return\n", __func__);
640 goto out; 640 return status;
641 } 641 }
642 stateid = nfsi->layout->plh_stateid; 642 stateid = nfsi->layout->plh_stateid;
643 /* Reference matched in nfs4_layoutreturn_release */ 643 /* Reference matched in nfs4_layoutreturn_release */
644 get_layout_hdr(lo); 644 get_layout_hdr(lo);
645 mark_matching_lsegs_invalid(lo, &tmp_list, NULL);
646 lo->plh_block_lgets++;
645 spin_unlock(&ino->i_lock); 647 spin_unlock(&ino->i_lock);
646 pnfs_free_lseg_list(&tmp_list); 648 pnfs_free_lseg_list(&tmp_list);
647 649
@@ -650,6 +652,9 @@ _pnfs_return_layout(struct inode *ino)
650 lrp = kzalloc(sizeof(*lrp), GFP_KERNEL); 652 lrp = kzalloc(sizeof(*lrp), GFP_KERNEL);
651 if (unlikely(lrp == NULL)) { 653 if (unlikely(lrp == NULL)) {
652 status = -ENOMEM; 654 status = -ENOMEM;
655 set_bit(NFS_LAYOUT_RW_FAILED, &lo->plh_flags);
656 set_bit(NFS_LAYOUT_RO_FAILED, &lo->plh_flags);
657 put_layout_hdr(lo);
653 goto out; 658 goto out;
654 } 659 }
655 660
@@ -887,7 +892,7 @@ pnfs_find_lseg(struct pnfs_layout_hdr *lo,
887 ret = get_lseg(lseg); 892 ret = get_lseg(lseg);
888 break; 893 break;
889 } 894 }
890 if (cmp_layout(range, &lseg->pls_range) > 0) 895 if (lseg->pls_range.offset > range->offset)
891 break; 896 break;
892 } 897 }
893 898
@@ -1059,23 +1064,36 @@ pnfs_generic_pg_test(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
1059 gfp_flags = GFP_NOFS; 1064 gfp_flags = GFP_NOFS;
1060 } 1065 }
1061 1066
1062 if (pgio->pg_count == prev->wb_bytes) { 1067 if (pgio->pg_lseg == NULL) {
1068 if (pgio->pg_count != prev->wb_bytes)
1069 return true;
1063 /* This is first coelesce call for a series of nfs_pages */ 1070 /* This is first coelesce call for a series of nfs_pages */
1064 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode, 1071 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
1065 prev->wb_context, 1072 prev->wb_context,
1066 req_offset(req), 1073 req_offset(prev),
1067 pgio->pg_count, 1074 pgio->pg_count,
1068 access_type, 1075 access_type,
1069 gfp_flags); 1076 gfp_flags);
1070 return true; 1077 if (pgio->pg_lseg == NULL)
1078 return true;
1071 } 1079 }
1072 1080
1073 if (pgio->pg_lseg && 1081 /*
1074 req_offset(req) > end_offset(pgio->pg_lseg->pls_range.offset, 1082 * Test if a nfs_page is fully contained in the pnfs_layout_range.
1075 pgio->pg_lseg->pls_range.length)) 1083 * Note that this test makes several assumptions:
1076 return false; 1084 * - that the previous nfs_page in the struct nfs_pageio_descriptor
1077 1085 * is known to lie within the range.
1078 return true; 1086 * - that the nfs_page being tested is known to be contiguous with the
1087 * previous nfs_page.
1088 * - Layout ranges are page aligned, so we only have to test the
1089 * start offset of the request.
1090 *
1091 * Please also note that 'end_offset' is actually the offset of the
1092 * first byte that lies outside the pnfs_layout_range. FIXME?
1093 *
1094 */
1095 return req_offset(req) < end_offset(pgio->pg_lseg->pls_range.offset,
1096 pgio->pg_lseg->pls_range.length);
1079} 1097}
1080EXPORT_SYMBOL_GPL(pnfs_generic_pg_test); 1098EXPORT_SYMBOL_GPL(pnfs_generic_pg_test);
1081 1099
diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h
index 48d0a8e4d062..96bf4e6f45be 100644
--- a/fs/nfs/pnfs.h
+++ b/fs/nfs/pnfs.h
@@ -186,6 +186,7 @@ int pnfs_ld_read_done(struct nfs_read_data *);
186/* pnfs_dev.c */ 186/* pnfs_dev.c */
187struct nfs4_deviceid_node { 187struct nfs4_deviceid_node {
188 struct hlist_node node; 188 struct hlist_node node;
189 struct hlist_node tmpnode;
189 const struct pnfs_layoutdriver_type *ld; 190 const struct pnfs_layoutdriver_type *ld;
190 const struct nfs_client *nfs_client; 191 const struct nfs_client *nfs_client;
191 struct nfs4_deviceid deviceid; 192 struct nfs4_deviceid deviceid;
diff --git a/fs/nfs/pnfs_dev.c b/fs/nfs/pnfs_dev.c
index c65e133ce9c0..f0f8e1e22f6c 100644
--- a/fs/nfs/pnfs_dev.c
+++ b/fs/nfs/pnfs_dev.c
@@ -174,6 +174,7 @@ nfs4_init_deviceid_node(struct nfs4_deviceid_node *d,
174 const struct nfs4_deviceid *id) 174 const struct nfs4_deviceid *id)
175{ 175{
176 INIT_HLIST_NODE(&d->node); 176 INIT_HLIST_NODE(&d->node);
177 INIT_HLIST_NODE(&d->tmpnode);
177 d->ld = ld; 178 d->ld = ld;
178 d->nfs_client = nfs_client; 179 d->nfs_client = nfs_client;
179 d->deviceid = *id; 180 d->deviceid = *id;
@@ -208,6 +209,7 @@ nfs4_insert_deviceid_node(struct nfs4_deviceid_node *new)
208 209
209 hlist_add_head_rcu(&new->node, &nfs4_deviceid_cache[hash]); 210 hlist_add_head_rcu(&new->node, &nfs4_deviceid_cache[hash]);
210 spin_unlock(&nfs4_deviceid_lock); 211 spin_unlock(&nfs4_deviceid_lock);
212 atomic_inc(&new->ref);
211 213
212 return new; 214 return new;
213} 215}
@@ -238,24 +240,29 @@ static void
238_deviceid_purge_client(const struct nfs_client *clp, long hash) 240_deviceid_purge_client(const struct nfs_client *clp, long hash)
239{ 241{
240 struct nfs4_deviceid_node *d; 242 struct nfs4_deviceid_node *d;
241 struct hlist_node *n, *next; 243 struct hlist_node *n;
242 HLIST_HEAD(tmp); 244 HLIST_HEAD(tmp);
243 245
246 spin_lock(&nfs4_deviceid_lock);
244 rcu_read_lock(); 247 rcu_read_lock();
245 hlist_for_each_entry_rcu(d, n, &nfs4_deviceid_cache[hash], node) 248 hlist_for_each_entry_rcu(d, n, &nfs4_deviceid_cache[hash], node)
246 if (d->nfs_client == clp && atomic_read(&d->ref)) { 249 if (d->nfs_client == clp && atomic_read(&d->ref)) {
247 hlist_del_init_rcu(&d->node); 250 hlist_del_init_rcu(&d->node);
248 hlist_add_head(&d->node, &tmp); 251 hlist_add_head(&d->tmpnode, &tmp);
249 } 252 }
250 rcu_read_unlock(); 253 rcu_read_unlock();
254 spin_unlock(&nfs4_deviceid_lock);
251 255
252 if (hlist_empty(&tmp)) 256 if (hlist_empty(&tmp))
253 return; 257 return;
254 258
255 synchronize_rcu(); 259 synchronize_rcu();
256 hlist_for_each_entry_safe(d, n, next, &tmp, node) 260 while (!hlist_empty(&tmp)) {
261 d = hlist_entry(tmp.first, struct nfs4_deviceid_node, tmpnode);
262 hlist_del(&d->tmpnode);
257 if (atomic_dec_and_test(&d->ref)) 263 if (atomic_dec_and_test(&d->ref))
258 d->ld->free_deviceid_node(d); 264 d->ld->free_deviceid_node(d);
265 }
259} 266}
260 267
261void 268void
@@ -263,8 +270,8 @@ nfs4_deviceid_purge_client(const struct nfs_client *clp)
263{ 270{
264 long h; 271 long h;
265 272
266 spin_lock(&nfs4_deviceid_lock); 273 if (!(clp->cl_exchange_flags & EXCHGID4_FLAG_USE_PNFS_MDS))
274 return;
267 for (h = 0; h < NFS4_DEVICE_ID_HASH_SIZE; h++) 275 for (h = 0; h < NFS4_DEVICE_ID_HASH_SIZE; h++)
268 _deviceid_purge_client(clp, h); 276 _deviceid_purge_client(clp, h);
269 spin_unlock(&nfs4_deviceid_lock);
270} 277}
diff --git a/fs/nfsd/Kconfig b/fs/nfsd/Kconfig
index 18b3e8975fe0..fbb2a5ef5817 100644
--- a/fs/nfsd/Kconfig
+++ b/fs/nfsd/Kconfig
@@ -82,6 +82,7 @@ config NFSD_V4
82 select NFSD_V3 82 select NFSD_V3
83 select FS_POSIX_ACL 83 select FS_POSIX_ACL
84 select SUNRPC_GSS 84 select SUNRPC_GSS
85 select CRYPTO
85 help 86 help
86 This option enables support in your system's NFS server for 87 This option enables support in your system's NFS server for
87 version 4 of the NFS protocol (RFC 3530). 88 version 4 of the NFS protocol (RFC 3530).
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index 1f5eae40f34e..2b1449dd2f49 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -13,6 +13,7 @@
13#include <linux/lockd/lockd.h> 13#include <linux/lockd/lockd.h>
14#include <linux/sunrpc/clnt.h> 14#include <linux/sunrpc/clnt.h>
15#include <linux/sunrpc/gss_api.h> 15#include <linux/sunrpc/gss_api.h>
16#include <linux/sunrpc/gss_krb5_enctypes.h>
16 17
17#include "idmap.h" 18#include "idmap.h"
18#include "nfsd.h" 19#include "nfsd.h"
@@ -189,18 +190,10 @@ static struct file_operations export_features_operations = {
189 .release = single_release, 190 .release = single_release,
190}; 191};
191 192
192#ifdef CONFIG_SUNRPC_GSS 193#if defined(CONFIG_SUNRPC_GSS) || defined(CONFIG_SUNRPC_GSS_MODULE)
193static int supported_enctypes_show(struct seq_file *m, void *v) 194static int supported_enctypes_show(struct seq_file *m, void *v)
194{ 195{
195 struct gss_api_mech *k5mech; 196 seq_printf(m, KRB5_SUPPORTED_ENCTYPES);
196
197 k5mech = gss_mech_get_by_name("krb5");
198 if (k5mech == NULL)
199 goto out;
200 if (k5mech->gm_upcall_enctypes != NULL)
201 seq_printf(m, k5mech->gm_upcall_enctypes);
202 gss_mech_put(k5mech);
203out:
204 return 0; 197 return 0;
205} 198}
206 199
@@ -215,7 +208,7 @@ static struct file_operations supported_enctypes_ops = {
215 .llseek = seq_lseek, 208 .llseek = seq_lseek,
216 .release = single_release, 209 .release = single_release,
217}; 210};
218#endif /* CONFIG_SUNRPC_GSS */ 211#endif /* CONFIG_SUNRPC_GSS or CONFIG_SUNRPC_GSS_MODULE */
219 212
220extern int nfsd_pool_stats_open(struct inode *inode, struct file *file); 213extern int nfsd_pool_stats_open(struct inode *inode, struct file *file);
221extern int nfsd_pool_stats_release(struct inode *inode, struct file *file); 214extern int nfsd_pool_stats_release(struct inode *inode, struct file *file);
@@ -1427,9 +1420,9 @@ static int nfsd_fill_super(struct super_block * sb, void * data, int silent)
1427 [NFSD_Versions] = {"versions", &transaction_ops, S_IWUSR|S_IRUSR}, 1420 [NFSD_Versions] = {"versions", &transaction_ops, S_IWUSR|S_IRUSR},
1428 [NFSD_Ports] = {"portlist", &transaction_ops, S_IWUSR|S_IRUGO}, 1421 [NFSD_Ports] = {"portlist", &transaction_ops, S_IWUSR|S_IRUGO},
1429 [NFSD_MaxBlkSize] = {"max_block_size", &transaction_ops, S_IWUSR|S_IRUGO}, 1422 [NFSD_MaxBlkSize] = {"max_block_size", &transaction_ops, S_IWUSR|S_IRUGO},
1430#ifdef CONFIG_SUNRPC_GSS 1423#if defined(CONFIG_SUNRPC_GSS) || defined(CONFIG_SUNRPC_GSS_MODULE)
1431 [NFSD_SupportedEnctypes] = {"supported_krb5_enctypes", &supported_enctypes_ops, S_IRUGO}, 1424 [NFSD_SupportedEnctypes] = {"supported_krb5_enctypes", &supported_enctypes_ops, S_IRUGO},
1432#endif /* CONFIG_SUNRPC_GSS */ 1425#endif /* CONFIG_SUNRPC_GSS or CONFIG_SUNRPC_GSS_MODULE */
1433#ifdef CONFIG_NFSD_V4 1426#ifdef CONFIG_NFSD_V4
1434 [NFSD_Leasetime] = {"nfsv4leasetime", &transaction_ops, S_IWUSR|S_IRUSR}, 1427 [NFSD_Leasetime] = {"nfsv4leasetime", &transaction_ops, S_IWUSR|S_IRUSR},
1435 [NFSD_Gracetime] = {"nfsv4gracetime", &transaction_ops, S_IWUSR|S_IRUSR}, 1428 [NFSD_Gracetime] = {"nfsv4gracetime", &transaction_ops, S_IWUSR|S_IRUSR},
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index d5718273bb32..fd0acca5370a 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -696,7 +696,15 @@ nfsd_access(struct svc_rqst *rqstp, struct svc_fh *fhp, u32 *access, u32 *suppor
696} 696}
697#endif /* CONFIG_NFSD_V3 */ 697#endif /* CONFIG_NFSD_V3 */
698 698
699static int nfsd_open_break_lease(struct inode *inode, int access)
700{
701 unsigned int mode;
699 702
703 if (access & NFSD_MAY_NOT_BREAK_LEASE)
704 return 0;
705 mode = (access & NFSD_MAY_WRITE) ? O_WRONLY : O_RDONLY;
706 return break_lease(inode, mode | O_NONBLOCK);
707}
700 708
701/* 709/*
702 * Open an existing file or directory. 710 * Open an existing file or directory.
@@ -744,12 +752,7 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
744 if (!inode->i_fop) 752 if (!inode->i_fop)
745 goto out; 753 goto out;
746 754
747 /* 755 host_err = nfsd_open_break_lease(inode, access);
748 * Check to see if there are any leases on this file.
749 * This may block while leases are broken.
750 */
751 if (!(access & NFSD_MAY_NOT_BREAK_LEASE))
752 host_err = break_lease(inode, O_NONBLOCK | ((access & NFSD_MAY_WRITE) ? O_WRONLY : 0));
753 if (host_err) /* NOMEM or WOULDBLOCK */ 756 if (host_err) /* NOMEM or WOULDBLOCK */
754 goto out_nfserr; 757 goto out_nfserr;
755 758
@@ -1660,8 +1663,10 @@ nfsd_link(struct svc_rqst *rqstp, struct svc_fh *ffhp,
1660 if (!dold->d_inode) 1663 if (!dold->d_inode)
1661 goto out_drop_write; 1664 goto out_drop_write;
1662 host_err = nfsd_break_lease(dold->d_inode); 1665 host_err = nfsd_break_lease(dold->d_inode);
1663 if (host_err) 1666 if (host_err) {
1667 err = nfserrno(host_err);
1664 goto out_drop_write; 1668 goto out_drop_write;
1669 }
1665 host_err = vfs_link(dold, dirp, dnew); 1670 host_err = vfs_link(dold, dirp, dnew);
1666 if (!host_err) { 1671 if (!host_err) {
1667 err = nfserrno(commit_metadata(ffhp)); 1672 err = nfserrno(commit_metadata(ffhp));
diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c
index b954878ad6ce..b9b45fc2903e 100644
--- a/fs/nilfs2/inode.c
+++ b/fs/nilfs2/inode.c
@@ -801,12 +801,7 @@ out_err:
801 801
802int nilfs_permission(struct inode *inode, int mask, unsigned int flags) 802int nilfs_permission(struct inode *inode, int mask, unsigned int flags)
803{ 803{
804 struct nilfs_root *root; 804 struct nilfs_root *root = NILFS_I(inode)->i_root;
805
806 if (flags & IPERM_FLAG_RCU)
807 return -ECHILD;
808
809 root = NILFS_I(inode)->i_root;
810 if ((mask & MAY_WRITE) && root && 805 if ((mask & MAY_WRITE) && root &&
811 root->cno != NILFS_CPTREE_CURRENT_CNO) 806 root->cno != NILFS_CPTREE_CURRENT_CNO)
812 return -EROFS; /* snapshot is not writable */ 807 return -EROFS; /* snapshot is not writable */
diff --git a/fs/omfs/file.c b/fs/omfs/file.c
index d738a7e493dd..2c6d95257a4d 100644
--- a/fs/omfs/file.c
+++ b/fs/omfs/file.c
@@ -4,7 +4,6 @@
4 * Released under GPL v2. 4 * Released under GPL v2.
5 */ 5 */
6 6
7#include <linux/version.h>
8#include <linux/module.h> 7#include <linux/module.h>
9#include <linux/fs.h> 8#include <linux/fs.h>
10#include <linux/buffer_head.h> 9#include <linux/buffer_head.h>
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 14def991d9dd..8a84210ca080 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -2169,11 +2169,7 @@ static const struct file_operations proc_fd_operations = {
2169 */ 2169 */
2170static int proc_fd_permission(struct inode *inode, int mask, unsigned int flags) 2170static int proc_fd_permission(struct inode *inode, int mask, unsigned int flags)
2171{ 2171{
2172 int rv; 2172 int rv = generic_permission(inode, mask, flags, NULL);
2173
2174 if (flags & IPERM_FLAG_RCU)
2175 return -ECHILD;
2176 rv = generic_permission(inode, mask, flags, NULL);
2177 if (rv == 0) 2173 if (rv == 0)
2178 return 0; 2174 return 0;
2179 if (task_pid(current) == proc_pid(inode)) 2175 if (task_pid(current) == proc_pid(inode))
diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
index f50133c11c24..d167de365a8d 100644
--- a/fs/proc/proc_sysctl.c
+++ b/fs/proc/proc_sysctl.c
@@ -304,9 +304,6 @@ static int proc_sys_permission(struct inode *inode, int mask,unsigned int flags)
304 struct ctl_table *table; 304 struct ctl_table *table;
305 int error; 305 int error;
306 306
307 if (flags & IPERM_FLAG_RCU)
308 return -ECHILD;
309
310 /* Executable files are not allowed under /proc/sys/ */ 307 /* Executable files are not allowed under /proc/sys/ */
311 if ((mask & MAY_EXEC) && S_ISREG(inode->i_mode)) 308 if ((mask & MAY_EXEC) && S_ISREG(inode->i_mode))
312 return -EACCES; 309 return -EACCES;
diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c
index e8a62f41b458..d78089690965 100644
--- a/fs/reiserfs/xattr.c
+++ b/fs/reiserfs/xattr.c
@@ -954,8 +954,6 @@ static int xattr_mount_check(struct super_block *s)
954 954
955int reiserfs_permission(struct inode *inode, int mask, unsigned int flags) 955int reiserfs_permission(struct inode *inode, int mask, unsigned int flags)
956{ 956{
957 if (flags & IPERM_FLAG_RCU)
958 return -ECHILD;
959 /* 957 /*
960 * We don't do permission checks on the internal objects. 958 * We don't do permission checks on the internal objects.
961 * Permissions are determined by the "owning" object. 959 * Permissions are determined by the "owning" object.
diff --git a/fs/timerfd.c b/fs/timerfd.c
index f67acbdda5e8..dffeb3795af1 100644
--- a/fs/timerfd.c
+++ b/fs/timerfd.c
@@ -61,7 +61,9 @@ static enum hrtimer_restart timerfd_tmrproc(struct hrtimer *htmr)
61 61
62/* 62/*
63 * Called when the clock was set to cancel the timers in the cancel 63 * Called when the clock was set to cancel the timers in the cancel
64 * list. 64 * list. This will wake up processes waiting on these timers. The
65 * wake-up requires ctx->ticks to be non zero, therefore we increment
66 * it before calling wake_up_locked().
65 */ 67 */
66void timerfd_clock_was_set(void) 68void timerfd_clock_was_set(void)
67{ 69{
@@ -76,6 +78,7 @@ void timerfd_clock_was_set(void)
76 spin_lock_irqsave(&ctx->wqh.lock, flags); 78 spin_lock_irqsave(&ctx->wqh.lock, flags);
77 if (ctx->moffs.tv64 != moffs.tv64) { 79 if (ctx->moffs.tv64 != moffs.tv64) {
78 ctx->moffs.tv64 = KTIME_MAX; 80 ctx->moffs.tv64 = KTIME_MAX;
81 ctx->ticks++;
79 wake_up_locked(&ctx->wqh); 82 wake_up_locked(&ctx->wqh);
80 } 83 }
81 spin_unlock_irqrestore(&ctx->wqh.lock, flags); 84 spin_unlock_irqrestore(&ctx->wqh.lock, flags);
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index 8c892c2d5300..529be0582029 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -2146,6 +2146,7 @@ static struct dentry *ubifs_mount(struct file_system_type *fs_type, int flags,
2146 if (IS_ERR(sb)) { 2146 if (IS_ERR(sb)) {
2147 err = PTR_ERR(sb); 2147 err = PTR_ERR(sb);
2148 kfree(c); 2148 kfree(c);
2149 goto out_close;
2149 } 2150 }
2150 2151
2151 if (sb->s_root) { 2152 if (sb->s_root) {
diff --git a/fs/xfs/xfs_attr.c b/fs/xfs/xfs_attr.c
index c86375378810..01d2072fb6d4 100644
--- a/fs/xfs/xfs_attr.c
+++ b/fs/xfs/xfs_attr.c
@@ -490,6 +490,13 @@ xfs_attr_remove_int(xfs_inode_t *dp, struct xfs_name *name, int flags)
490 args.whichfork = XFS_ATTR_FORK; 490 args.whichfork = XFS_ATTR_FORK;
491 491
492 /* 492 /*
493 * we have no control over the attribute names that userspace passes us
494 * to remove, so we have to allow the name lookup prior to attribute
495 * removal to fail.
496 */
497 args.op_flags = XFS_DA_OP_OKNOENT;
498
499 /*
493 * Attach the dquots to the inode. 500 * Attach the dquots to the inode.
494 */ 501 */
495 error = xfs_qm_dqattach(dp, 0); 502 error = xfs_qm_dqattach(dp, 0);
diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c
index cb9b6d1469f7..3631783b2b53 100644
--- a/fs/xfs/xfs_iget.c
+++ b/fs/xfs/xfs_iget.c
@@ -253,16 +253,21 @@ xfs_iget_cache_hit(
253 rcu_read_lock(); 253 rcu_read_lock();
254 spin_lock(&ip->i_flags_lock); 254 spin_lock(&ip->i_flags_lock);
255 255
256 ip->i_flags &= ~XFS_INEW; 256 ip->i_flags &= ~(XFS_INEW | XFS_IRECLAIM);
257 ip->i_flags |= XFS_IRECLAIMABLE; 257 ASSERT(ip->i_flags & XFS_IRECLAIMABLE);
258 __xfs_inode_set_reclaim_tag(pag, ip);
259 trace_xfs_iget_reclaim_fail(ip); 258 trace_xfs_iget_reclaim_fail(ip);
260 goto out_error; 259 goto out_error;
261 } 260 }
262 261
263 spin_lock(&pag->pag_ici_lock); 262 spin_lock(&pag->pag_ici_lock);
264 spin_lock(&ip->i_flags_lock); 263 spin_lock(&ip->i_flags_lock);
265 ip->i_flags &= ~(XFS_IRECLAIMABLE | XFS_IRECLAIM); 264
265 /*
266 * Clear the per-lifetime state in the inode as we are now
267 * effectively a new inode and need to return to the initial
268 * state before reuse occurs.
269 */
270 ip->i_flags &= ~XFS_IRECLAIM_RESET_FLAGS;
266 ip->i_flags |= XFS_INEW; 271 ip->i_flags |= XFS_INEW;
267 __xfs_inode_clear_reclaim_tag(mp, pag, ip); 272 __xfs_inode_clear_reclaim_tag(mp, pag, ip);
268 inode->i_state = I_NEW; 273 inode->i_state = I_NEW;
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index 3ae6d58e5473..964cfea77686 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -384,6 +384,16 @@ static inline void xfs_ifunlock(xfs_inode_t *ip)
384#define XFS_IDIRTY_RELEASE 0x0040 /* dirty release already seen */ 384#define XFS_IDIRTY_RELEASE 0x0040 /* dirty release already seen */
385 385
386/* 386/*
387 * Per-lifetime flags need to be reset when re-using a reclaimable inode during
388 * inode lookup. Thi prevents unintended behaviour on the new inode from
389 * ocurring.
390 */
391#define XFS_IRECLAIM_RESET_FLAGS \
392 (XFS_IRECLAIMABLE | XFS_IRECLAIM | \
393 XFS_IDIRTY_RELEASE | XFS_ITRUNCATED | \
394 XFS_IFILESTREAM);
395
396/*
387 * Flags for inode locking. 397 * Flags for inode locking.
388 * Bit ranges: 1<<1 - 1<<16-1 -- iolock/ilock modes (bitfield) 398 * Bit ranges: 1<<1 - 1<<16-1 -- iolock/ilock modes (bitfield)
389 * 1<<16 - 1<<32-1 -- lockdep annotation (integers) 399 * 1<<16 - 1<<32-1 -- lockdep annotation (integers)
diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
index b7a5fe7c52c8..619720705bc6 100644
--- a/fs/xfs/xfs_vnodeops.c
+++ b/fs/xfs/xfs_vnodeops.c
@@ -960,8 +960,11 @@ xfs_release(
960 * be exposed to that problem. 960 * be exposed to that problem.
961 */ 961 */
962 truncated = xfs_iflags_test_and_clear(ip, XFS_ITRUNCATED); 962 truncated = xfs_iflags_test_and_clear(ip, XFS_ITRUNCATED);
963 if (truncated && VN_DIRTY(VFS_I(ip)) && ip->i_delayed_blks > 0) 963 if (truncated) {
964 xfs_flush_pages(ip, 0, -1, XBF_ASYNC, FI_NONE); 964 xfs_iflags_clear(ip, XFS_IDIRTY_RELEASE);
965 if (VN_DIRTY(VFS_I(ip)) && ip->i_delayed_blks > 0)
966 xfs_flush_pages(ip, 0, -1, XBF_ASYNC, FI_NONE);
967 }
965 } 968 }
966 969
967 if (ip->i_d.di_nlink == 0) 970 if (ip->i_d.di_nlink == 0)
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 2a7cea53ca0d..6395692b2e7a 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -167,7 +167,7 @@ enum rq_flag_bits {
167 (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER) 167 (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
168#define REQ_COMMON_MASK \ 168#define REQ_COMMON_MASK \
169 (REQ_WRITE | REQ_FAILFAST_MASK | REQ_SYNC | REQ_META | REQ_DISCARD | \ 169 (REQ_WRITE | REQ_FAILFAST_MASK | REQ_SYNC | REQ_META | REQ_DISCARD | \
170 REQ_NOIDLE | REQ_FLUSH | REQ_FUA) 170 REQ_NOIDLE | REQ_FLUSH | REQ_FUA | REQ_SECURE)
171#define REQ_CLONE_MASK REQ_COMMON_MASK 171#define REQ_CLONE_MASK REQ_COMMON_MASK
172 172
173#define REQ_RAHEAD (1 << __REQ_RAHEAD) 173#define REQ_RAHEAD (1 << __REQ_RAHEAD)
diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
index b22fb0d3db0f..8c7c2de7631a 100644
--- a/include/linux/blktrace_api.h
+++ b/include/linux/blktrace_api.h
@@ -169,7 +169,8 @@ extern void blk_trace_shutdown(struct request_queue *);
169extern int do_blk_trace_setup(struct request_queue *q, char *name, 169extern int do_blk_trace_setup(struct request_queue *q, char *name,
170 dev_t dev, struct block_device *bdev, 170 dev_t dev, struct block_device *bdev,
171 struct blk_user_trace_setup *buts); 171 struct blk_user_trace_setup *buts);
172extern void __trace_note_message(struct blk_trace *, const char *fmt, ...); 172extern __attribute__((format(printf, 2, 3)))
173void __trace_note_message(struct blk_trace *, const char *fmt, ...);
173 174
174/** 175/**
175 * blk_add_trace_msg - Add a (simple) message to the blktrace stream 176 * blk_add_trace_msg - Add a (simple) message to the blktrace stream
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index d4646b48dc4a..18a1baf31f2d 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -188,6 +188,7 @@ struct clocksource {
188#ifdef CONFIG_CLOCKSOURCE_WATCHDOG 188#ifdef CONFIG_CLOCKSOURCE_WATCHDOG
189 /* Watchdog related data, used by the framework */ 189 /* Watchdog related data, used by the framework */
190 struct list_head wd_list; 190 struct list_head wd_list;
191 cycle_t cs_last;
191 cycle_t wd_last; 192 cycle_t wd_last;
192#endif 193#endif
193} ____cacheline_aligned; 194} ____cacheline_aligned;
diff --git a/include/linux/device.h b/include/linux/device.h
index c66111affca9..553fd37b173b 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -654,13 +654,13 @@ static inline int device_is_registered(struct device *dev)
654 654
655static inline void device_enable_async_suspend(struct device *dev) 655static inline void device_enable_async_suspend(struct device *dev)
656{ 656{
657 if (!dev->power.in_suspend) 657 if (!dev->power.is_prepared)
658 dev->power.async_suspend = true; 658 dev->power.async_suspend = true;
659} 659}
660 660
661static inline void device_disable_async_suspend(struct device *dev) 661static inline void device_disable_async_suspend(struct device *dev)
662{ 662{
663 if (!dev->power.in_suspend) 663 if (!dev->power.is_prepared)
664 dev->power.async_suspend = false; 664 dev->power.async_suspend = false;
665} 665}
666 666
diff --git a/include/linux/device_cgroup.h b/include/linux/device_cgroup.h
index 0b0d9c39ed67..7aad1f440867 100644
--- a/include/linux/device_cgroup.h
+++ b/include/linux/device_cgroup.h
@@ -2,8 +2,16 @@
2#include <linux/fs.h> 2#include <linux/fs.h>
3 3
4#ifdef CONFIG_CGROUP_DEVICE 4#ifdef CONFIG_CGROUP_DEVICE
5extern int devcgroup_inode_permission(struct inode *inode, int mask); 5extern int __devcgroup_inode_permission(struct inode *inode, int mask);
6extern int devcgroup_inode_mknod(int mode, dev_t dev); 6extern int devcgroup_inode_mknod(int mode, dev_t dev);
7static inline int devcgroup_inode_permission(struct inode *inode, int mask)
8{
9 if (likely(!inode->i_rdev))
10 return 0;
11 if (!S_ISBLK(inode->i_mode) && !S_ISCHR(inode->i_mode))
12 return 0;
13 return __devcgroup_inode_permission(inode, mask);
14}
7#else 15#else
8static inline int devcgroup_inode_permission(struct inode *inode, int mask) 16static inline int devcgroup_inode_permission(struct inode *inode, int mask)
9{ return 0; } 17{ return 0; }
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 1c777878f1ea..6e73e2e9ae33 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -744,7 +744,7 @@ struct inode {
744 744
745 spinlock_t i_lock; /* i_blocks, i_bytes, maybe i_size */ 745 spinlock_t i_lock; /* i_blocks, i_bytes, maybe i_size */
746 unsigned int i_flags; 746 unsigned int i_flags;
747 unsigned int i_state; 747 unsigned long i_state;
748#ifdef CONFIG_SECURITY 748#ifdef CONFIG_SECURITY
749 void *i_security; 749 void *i_security;
750#endif 750#endif
diff --git a/include/linux/input/sh_keysc.h b/include/linux/input/sh_keysc.h
index 649dc7f12925..5d253cd93691 100644
--- a/include/linux/input/sh_keysc.h
+++ b/include/linux/input/sh_keysc.h
@@ -1,7 +1,7 @@
1#ifndef __SH_KEYSC_H__ 1#ifndef __SH_KEYSC_H__
2#define __SH_KEYSC_H__ 2#define __SH_KEYSC_H__
3 3
4#define SH_KEYSC_MAXKEYS 49 4#define SH_KEYSC_MAXKEYS 64
5 5
6struct sh_keysc_info { 6struct sh_keysc_info {
7 enum { SH_KEYSC_MODE_1, SH_KEYSC_MODE_2, SH_KEYSC_MODE_3, 7 enum { SH_KEYSC_MODE_1, SH_KEYSC_MODE_2, SH_KEYSC_MODE_3,
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 6c12989839d9..f6efed0039ed 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -414,6 +414,7 @@ enum
414 TASKLET_SOFTIRQ, 414 TASKLET_SOFTIRQ,
415 SCHED_SOFTIRQ, 415 SCHED_SOFTIRQ,
416 HRTIMER_SOFTIRQ, 416 HRTIMER_SOFTIRQ,
417 RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */
417 418
418 NR_SOFTIRQS 419 NR_SOFTIRQS
419}; 420};
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index 4ecb7b16b278..d087c2e7b2aa 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -1024,7 +1024,6 @@ struct journal_s
1024 1024
1025/* Filing buffers */ 1025/* Filing buffers */
1026extern void jbd2_journal_unfile_buffer(journal_t *, struct journal_head *); 1026extern void jbd2_journal_unfile_buffer(journal_t *, struct journal_head *);
1027extern void __jbd2_journal_unfile_buffer(struct journal_head *);
1028extern void __jbd2_journal_refile_buffer(struct journal_head *); 1027extern void __jbd2_journal_refile_buffer(struct journal_head *);
1029extern void jbd2_journal_refile_buffer(journal_t *, struct journal_head *); 1028extern void jbd2_journal_refile_buffer(journal_t *, struct journal_head *);
1030extern void __jbd2_journal_file_buffer(struct journal_head *, transaction_t *, int); 1029extern void __jbd2_journal_file_buffer(struct journal_head *, transaction_t *, int);
@@ -1165,7 +1164,6 @@ extern void jbd2_journal_release_jbd_inode(journal_t *journal, struct jbd2_in
1165 */ 1164 */
1166struct journal_head *jbd2_journal_add_journal_head(struct buffer_head *bh); 1165struct journal_head *jbd2_journal_add_journal_head(struct buffer_head *bh);
1167struct journal_head *jbd2_journal_grab_journal_head(struct buffer_head *bh); 1166struct journal_head *jbd2_journal_grab_journal_head(struct buffer_head *bh);
1168void jbd2_journal_remove_journal_head(struct buffer_head *bh);
1169void jbd2_journal_put_journal_head(struct journal_head *jh); 1167void jbd2_journal_put_journal_head(struct journal_head *jh);
1170 1168
1171/* 1169/*
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index c928dac6cad0..9f7c3ebcbbad 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -647,6 +647,13 @@ typedef struct pglist_data {
647#endif 647#endif
648#define nid_page_nr(nid, pagenr) pgdat_page_nr(NODE_DATA(nid),(pagenr)) 648#define nid_page_nr(nid, pagenr) pgdat_page_nr(NODE_DATA(nid),(pagenr))
649 649
650#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
651
652#define node_end_pfn(nid) ({\
653 pg_data_t *__pgdat = NODE_DATA(nid);\
654 __pgdat->node_start_pfn + __pgdat->node_spanned_pages;\
655})
656
650#include <linux/memory_hotplug.h> 657#include <linux/memory_hotplug.h>
651 658
652extern struct mutex zonelists_mutex; 659extern struct mutex zonelists_mutex;
diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h
index 3a34e80ae92f..25311b3bedf8 100644
--- a/include/linux/nfs_page.h
+++ b/include/linux/nfs_page.h
@@ -92,6 +92,9 @@ extern int nfs_pageio_add_request(struct nfs_pageio_descriptor *,
92 struct nfs_page *); 92 struct nfs_page *);
93extern void nfs_pageio_complete(struct nfs_pageio_descriptor *desc); 93extern void nfs_pageio_complete(struct nfs_pageio_descriptor *desc);
94extern void nfs_pageio_cond_complete(struct nfs_pageio_descriptor *, pgoff_t); 94extern void nfs_pageio_cond_complete(struct nfs_pageio_descriptor *, pgoff_t);
95extern bool nfs_generic_pg_test(struct nfs_pageio_descriptor *desc,
96 struct nfs_page *prev,
97 struct nfs_page *req);
95extern int nfs_wait_on_request(struct nfs_page *); 98extern int nfs_wait_on_request(struct nfs_page *);
96extern void nfs_unlock_request(struct nfs_page *req); 99extern void nfs_unlock_request(struct nfs_page *req);
97extern int nfs_set_page_tag_locked(struct nfs_page *req); 100extern int nfs_set_page_tag_locked(struct nfs_page *req);
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 5e8444a11adf..00848d86ffb2 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -158,7 +158,6 @@ struct nfs_seqid;
158 158
159/* nfs41 sessions channel attributes */ 159/* nfs41 sessions channel attributes */
160struct nfs4_channel_attrs { 160struct nfs4_channel_attrs {
161 u32 headerpadsz;
162 u32 max_rqst_sz; 161 u32 max_rqst_sz;
163 u32 max_resp_sz; 162 u32 max_resp_sz;
164 u32 max_resp_sz_cached; 163 u32 max_resp_sz_cached;
diff --git a/include/linux/pm.h b/include/linux/pm.h
index 3160648ccdda..411e4f4be52b 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -425,7 +425,8 @@ struct dev_pm_info {
425 pm_message_t power_state; 425 pm_message_t power_state;
426 unsigned int can_wakeup:1; 426 unsigned int can_wakeup:1;
427 unsigned int async_suspend:1; 427 unsigned int async_suspend:1;
428 unsigned int in_suspend:1; /* Owned by the PM core */ 428 bool is_prepared:1; /* Owned by the PM core */
429 bool is_suspended:1; /* Ditto */
429 spinlock_t lock; 430 spinlock_t lock;
430#ifdef CONFIG_PM_SLEEP 431#ifdef CONFIG_PM_SLEEP
431 struct list_head entry; 432 struct list_head entry;
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 7ad824d510a2..8cc38d3bab0c 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -85,12 +85,15 @@ int smp_call_function_any(const struct cpumask *mask,
85 * Generic and arch helpers 85 * Generic and arch helpers
86 */ 86 */
87#ifdef CONFIG_USE_GENERIC_SMP_HELPERS 87#ifdef CONFIG_USE_GENERIC_SMP_HELPERS
88void __init call_function_init(void);
88void generic_smp_call_function_single_interrupt(void); 89void generic_smp_call_function_single_interrupt(void);
89void generic_smp_call_function_interrupt(void); 90void generic_smp_call_function_interrupt(void);
90void ipi_call_lock(void); 91void ipi_call_lock(void);
91void ipi_call_unlock(void); 92void ipi_call_unlock(void);
92void ipi_call_lock_irq(void); 93void ipi_call_lock_irq(void);
93void ipi_call_unlock_irq(void); 94void ipi_call_unlock_irq(void);
95#else
96static inline void call_function_init(void) { }
94#endif 97#endif
95 98
96/* 99/*
@@ -134,7 +137,7 @@ static inline void smp_send_reschedule(int cpu) { }
134#define smp_prepare_boot_cpu() do {} while (0) 137#define smp_prepare_boot_cpu() do {} while (0)
135#define smp_call_function_many(mask, func, info, wait) \ 138#define smp_call_function_many(mask, func, info, wait) \
136 (up_smp_call_function(func, info)) 139 (up_smp_call_function(func, info))
137static inline void init_call_single_data(void) { } 140static inline void call_function_init(void) { }
138 141
139static inline int 142static inline int
140smp_call_function_any(const struct cpumask *mask, smp_call_func_t func, 143smp_call_function_any(const struct cpumask *mask, smp_call_func_t func,
diff --git a/include/linux/sunrpc/gss_krb5_enctypes.h b/include/linux/sunrpc/gss_krb5_enctypes.h
new file mode 100644
index 000000000000..ec6234eee89c
--- /dev/null
+++ b/include/linux/sunrpc/gss_krb5_enctypes.h
@@ -0,0 +1,4 @@
1/*
2 * Dumb way to share this static piece of information with nfsd
3 */
4#define KRB5_SUPPORTED_ENCTYPES "18,17,16,23,3,1,2"
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
index f73c482ec9c6..fe2d8e6b923b 100644
--- a/include/linux/sunrpc/sched.h
+++ b/include/linux/sunrpc/sched.h
@@ -84,7 +84,8 @@ struct rpc_task {
84#endif 84#endif
85 unsigned char tk_priority : 2,/* Task priority */ 85 unsigned char tk_priority : 2,/* Task priority */
86 tk_garb_retry : 2, 86 tk_garb_retry : 2,
87 tk_cred_retry : 2; 87 tk_cred_retry : 2,
88 tk_rebind_retry : 2;
88}; 89};
89#define tk_xprt tk_client->cl_xprt 90#define tk_xprt tk_client->cl_xprt
90 91
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index c7c42e7acc31..5d4f8e586e32 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -307,6 +307,12 @@ static inline int nf_ct_is_untracked(const struct nf_conn *ct)
307 return test_bit(IPS_UNTRACKED_BIT, &ct->status); 307 return test_bit(IPS_UNTRACKED_BIT, &ct->status);
308} 308}
309 309
310/* Packet is received from loopback */
311static inline bool nf_is_loopback_packet(const struct sk_buff *skb)
312{
313 return skb->dev && skb->skb_iif && skb->dev->flags & IFF_LOOPBACK;
314}
315
310extern int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp); 316extern int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp);
311extern unsigned int nf_conntrack_htable_size; 317extern unsigned int nf_conntrack_htable_size;
312extern unsigned int nf_conntrack_max; 318extern unsigned int nf_conntrack_max;
diff --git a/include/sound/soc.h b/include/sound/soc.h
index f1de3e0c75bc..3a4bd3a3c68d 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -248,8 +248,7 @@ typedef int (*hw_write_t)(void *,const char* ,int);
248extern struct snd_ac97_bus_ops soc_ac97_ops; 248extern struct snd_ac97_bus_ops soc_ac97_ops;
249 249
250enum snd_soc_control_type { 250enum snd_soc_control_type {
251 SND_SOC_CUSTOM = 1, 251 SND_SOC_I2C = 1,
252 SND_SOC_I2C,
253 SND_SOC_SPI, 252 SND_SOC_SPI,
254}; 253};
255 254
diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h
index e09592d2f916..5ce2b2f5f524 100644
--- a/include/trace/events/ext4.h
+++ b/include/trace/events/ext4.h
@@ -26,7 +26,7 @@ TRACE_EVENT(ext4_free_inode,
26 __field( umode_t, mode ) 26 __field( umode_t, mode )
27 __field( uid_t, uid ) 27 __field( uid_t, uid )
28 __field( gid_t, gid ) 28 __field( gid_t, gid )
29 __field( blkcnt_t, blocks ) 29 __field( __u64, blocks )
30 ), 30 ),
31 31
32 TP_fast_assign( 32 TP_fast_assign(
@@ -40,9 +40,8 @@ TRACE_EVENT(ext4_free_inode,
40 40
41 TP_printk("dev %d,%d ino %lu mode 0%o uid %u gid %u blocks %llu", 41 TP_printk("dev %d,%d ino %lu mode 0%o uid %u gid %u blocks %llu",
42 MAJOR(__entry->dev), MINOR(__entry->dev), 42 MAJOR(__entry->dev), MINOR(__entry->dev),
43 (unsigned long) __entry->ino, 43 (unsigned long) __entry->ino, __entry->mode,
44 __entry->mode, __entry->uid, __entry->gid, 44 __entry->uid, __entry->gid, __entry->blocks)
45 (unsigned long long) __entry->blocks)
46); 45);
47 46
48TRACE_EVENT(ext4_request_inode, 47TRACE_EVENT(ext4_request_inode,
@@ -178,7 +177,7 @@ TRACE_EVENT(ext4_begin_ordered_truncate,
178 TP_printk("dev %d,%d ino %lu new_size %lld", 177 TP_printk("dev %d,%d ino %lu new_size %lld",
179 MAJOR(__entry->dev), MINOR(__entry->dev), 178 MAJOR(__entry->dev), MINOR(__entry->dev),
180 (unsigned long) __entry->ino, 179 (unsigned long) __entry->ino,
181 (long long) __entry->new_size) 180 __entry->new_size)
182); 181);
183 182
184DECLARE_EVENT_CLASS(ext4__write_begin, 183DECLARE_EVENT_CLASS(ext4__write_begin,
@@ -204,7 +203,7 @@ DECLARE_EVENT_CLASS(ext4__write_begin,
204 __entry->flags = flags; 203 __entry->flags = flags;
205 ), 204 ),
206 205
207 TP_printk("dev %d,%d ino %lu pos %llu len %u flags %u", 206 TP_printk("dev %d,%d ino %lu pos %lld len %u flags %u",
208 MAJOR(__entry->dev), MINOR(__entry->dev), 207 MAJOR(__entry->dev), MINOR(__entry->dev),
209 (unsigned long) __entry->ino, 208 (unsigned long) __entry->ino,
210 __entry->pos, __entry->len, __entry->flags) 209 __entry->pos, __entry->len, __entry->flags)
@@ -248,7 +247,7 @@ DECLARE_EVENT_CLASS(ext4__write_end,
248 __entry->copied = copied; 247 __entry->copied = copied;
249 ), 248 ),
250 249
251 TP_printk("dev %d,%d ino %lu pos %llu len %u copied %u", 250 TP_printk("dev %d,%d ino %lu pos %lld len %u copied %u",
252 MAJOR(__entry->dev), MINOR(__entry->dev), 251 MAJOR(__entry->dev), MINOR(__entry->dev),
253 (unsigned long) __entry->ino, 252 (unsigned long) __entry->ino,
254 __entry->pos, __entry->len, __entry->copied) 253 __entry->pos, __entry->len, __entry->copied)
@@ -286,29 +285,6 @@ DEFINE_EVENT(ext4__write_end, ext4_da_write_end,
286 TP_ARGS(inode, pos, len, copied) 285 TP_ARGS(inode, pos, len, copied)
287); 286);
288 287
289TRACE_EVENT(ext4_writepage,
290 TP_PROTO(struct inode *inode, struct page *page),
291
292 TP_ARGS(inode, page),
293
294 TP_STRUCT__entry(
295 __field( dev_t, dev )
296 __field( ino_t, ino )
297 __field( pgoff_t, index )
298
299 ),
300
301 TP_fast_assign(
302 __entry->dev = inode->i_sb->s_dev;
303 __entry->ino = inode->i_ino;
304 __entry->index = page->index;
305 ),
306
307 TP_printk("dev %d,%d ino %lu page_index %lu",
308 MAJOR(__entry->dev), MINOR(__entry->dev),
309 (unsigned long) __entry->ino, __entry->index)
310);
311
312TRACE_EVENT(ext4_da_writepages, 288TRACE_EVENT(ext4_da_writepages,
313 TP_PROTO(struct inode *inode, struct writeback_control *wbc), 289 TP_PROTO(struct inode *inode, struct writeback_control *wbc),
314 290
@@ -341,7 +317,7 @@ TRACE_EVENT(ext4_da_writepages,
341 ), 317 ),
342 318
343 TP_printk("dev %d,%d ino %lu nr_to_write %ld pages_skipped %ld " 319 TP_printk("dev %d,%d ino %lu nr_to_write %ld pages_skipped %ld "
344 "range_start %llu range_end %llu sync_mode %d" 320 "range_start %lld range_end %lld sync_mode %d"
345 "for_kupdate %d range_cyclic %d writeback_index %lu", 321 "for_kupdate %d range_cyclic %d writeback_index %lu",
346 MAJOR(__entry->dev), MINOR(__entry->dev), 322 MAJOR(__entry->dev), MINOR(__entry->dev),
347 (unsigned long) __entry->ino, __entry->nr_to_write, 323 (unsigned long) __entry->ino, __entry->nr_to_write,
@@ -449,7 +425,14 @@ DECLARE_EVENT_CLASS(ext4__page_op,
449 TP_printk("dev %d,%d ino %lu page_index %lu", 425 TP_printk("dev %d,%d ino %lu page_index %lu",
450 MAJOR(__entry->dev), MINOR(__entry->dev), 426 MAJOR(__entry->dev), MINOR(__entry->dev),
451 (unsigned long) __entry->ino, 427 (unsigned long) __entry->ino,
452 __entry->index) 428 (unsigned long) __entry->index)
429);
430
431DEFINE_EVENT(ext4__page_op, ext4_writepage,
432
433 TP_PROTO(struct page *page),
434
435 TP_ARGS(page)
453); 436);
454 437
455DEFINE_EVENT(ext4__page_op, ext4_readpage, 438DEFINE_EVENT(ext4__page_op, ext4_readpage,
@@ -489,7 +472,7 @@ TRACE_EVENT(ext4_invalidatepage,
489 TP_printk("dev %d,%d ino %lu page_index %lu offset %lu", 472 TP_printk("dev %d,%d ino %lu page_index %lu offset %lu",
490 MAJOR(__entry->dev), MINOR(__entry->dev), 473 MAJOR(__entry->dev), MINOR(__entry->dev),
491 (unsigned long) __entry->ino, 474 (unsigned long) __entry->ino,
492 __entry->index, __entry->offset) 475 (unsigned long) __entry->index, __entry->offset)
493); 476);
494 477
495TRACE_EVENT(ext4_discard_blocks, 478TRACE_EVENT(ext4_discard_blocks,
@@ -562,12 +545,10 @@ DEFINE_EVENT(ext4__mb_new_pa, ext4_mb_new_group_pa,
562); 545);
563 546
564TRACE_EVENT(ext4_mb_release_inode_pa, 547TRACE_EVENT(ext4_mb_release_inode_pa,
565 TP_PROTO(struct super_block *sb, 548 TP_PROTO(struct ext4_prealloc_space *pa,
566 struct inode *inode,
567 struct ext4_prealloc_space *pa,
568 unsigned long long block, unsigned int count), 549 unsigned long long block, unsigned int count),
569 550
570 TP_ARGS(sb, inode, pa, block, count), 551 TP_ARGS(pa, block, count),
571 552
572 TP_STRUCT__entry( 553 TP_STRUCT__entry(
573 __field( dev_t, dev ) 554 __field( dev_t, dev )
@@ -578,8 +559,8 @@ TRACE_EVENT(ext4_mb_release_inode_pa,
578 ), 559 ),
579 560
580 TP_fast_assign( 561 TP_fast_assign(
581 __entry->dev = sb->s_dev; 562 __entry->dev = pa->pa_inode->i_sb->s_dev;
582 __entry->ino = inode->i_ino; 563 __entry->ino = pa->pa_inode->i_ino;
583 __entry->block = block; 564 __entry->block = block;
584 __entry->count = count; 565 __entry->count = count;
585 ), 566 ),
@@ -591,10 +572,9 @@ TRACE_EVENT(ext4_mb_release_inode_pa,
591); 572);
592 573
593TRACE_EVENT(ext4_mb_release_group_pa, 574TRACE_EVENT(ext4_mb_release_group_pa,
594 TP_PROTO(struct super_block *sb, 575 TP_PROTO(struct ext4_prealloc_space *pa),
595 struct ext4_prealloc_space *pa),
596 576
597 TP_ARGS(sb, pa), 577 TP_ARGS(pa),
598 578
599 TP_STRUCT__entry( 579 TP_STRUCT__entry(
600 __field( dev_t, dev ) 580 __field( dev_t, dev )
@@ -604,7 +584,7 @@ TRACE_EVENT(ext4_mb_release_group_pa,
604 ), 584 ),
605 585
606 TP_fast_assign( 586 TP_fast_assign(
607 __entry->dev = sb->s_dev; 587 __entry->dev = pa->pa_inode->i_sb->s_dev;
608 __entry->pa_pstart = pa->pa_pstart; 588 __entry->pa_pstart = pa->pa_pstart;
609 __entry->pa_len = pa->pa_len; 589 __entry->pa_len = pa->pa_len;
610 ), 590 ),
@@ -666,10 +646,10 @@ TRACE_EVENT(ext4_request_blocks,
666 __field( ino_t, ino ) 646 __field( ino_t, ino )
667 __field( unsigned int, flags ) 647 __field( unsigned int, flags )
668 __field( unsigned int, len ) 648 __field( unsigned int, len )
669 __field( __u64, logical ) 649 __field( __u32, logical )
650 __field( __u32, lleft )
651 __field( __u32, lright )
670 __field( __u64, goal ) 652 __field( __u64, goal )
671 __field( __u64, lleft )
672 __field( __u64, lright )
673 __field( __u64, pleft ) 653 __field( __u64, pleft )
674 __field( __u64, pright ) 654 __field( __u64, pright )
675 ), 655 ),
@@ -687,17 +667,13 @@ TRACE_EVENT(ext4_request_blocks,
687 __entry->pright = ar->pright; 667 __entry->pright = ar->pright;
688 ), 668 ),
689 669
690 TP_printk("dev %d,%d ino %lu flags %u len %u lblk %llu goal %llu " 670 TP_printk("dev %d,%d ino %lu flags %u len %u lblk %u goal %llu "
691 "lleft %llu lright %llu pleft %llu pright %llu ", 671 "lleft %u lright %u pleft %llu pright %llu ",
692 MAJOR(__entry->dev), MINOR(__entry->dev), 672 MAJOR(__entry->dev), MINOR(__entry->dev),
693 (unsigned long) __entry->ino, 673 (unsigned long) __entry->ino, __entry->flags,
694 __entry->flags, __entry->len, 674 __entry->len, __entry->logical, __entry->goal,
695 (unsigned long long) __entry->logical, 675 __entry->lleft, __entry->lright, __entry->pleft,
696 (unsigned long long) __entry->goal, 676 __entry->pright)
697 (unsigned long long) __entry->lleft,
698 (unsigned long long) __entry->lright,
699 (unsigned long long) __entry->pleft,
700 (unsigned long long) __entry->pright)
701); 677);
702 678
703TRACE_EVENT(ext4_allocate_blocks, 679TRACE_EVENT(ext4_allocate_blocks,
@@ -711,10 +687,10 @@ TRACE_EVENT(ext4_allocate_blocks,
711 __field( __u64, block ) 687 __field( __u64, block )
712 __field( unsigned int, flags ) 688 __field( unsigned int, flags )
713 __field( unsigned int, len ) 689 __field( unsigned int, len )
714 __field( __u64, logical ) 690 __field( __u32, logical )
691 __field( __u32, lleft )
692 __field( __u32, lright )
715 __field( __u64, goal ) 693 __field( __u64, goal )
716 __field( __u64, lleft )
717 __field( __u64, lright )
718 __field( __u64, pleft ) 694 __field( __u64, pleft )
719 __field( __u64, pright ) 695 __field( __u64, pright )
720 ), 696 ),
@@ -733,17 +709,13 @@ TRACE_EVENT(ext4_allocate_blocks,
733 __entry->pright = ar->pright; 709 __entry->pright = ar->pright;
734 ), 710 ),
735 711
736 TP_printk("dev %d,%d ino %lu flags %u len %u block %llu lblk %llu " 712 TP_printk("dev %d,%d ino %lu flags %u len %u block %llu lblk %u "
737 "goal %llu lleft %llu lright %llu pleft %llu pright %llu", 713 "goal %llu lleft %u lright %u pleft %llu pright %llu",
738 MAJOR(__entry->dev), MINOR(__entry->dev), 714 MAJOR(__entry->dev), MINOR(__entry->dev),
739 (unsigned long) __entry->ino, 715 (unsigned long) __entry->ino, __entry->flags,
740 __entry->flags, __entry->len, __entry->block, 716 __entry->len, __entry->block, __entry->logical,
741 (unsigned long long) __entry->logical, 717 __entry->goal, __entry->lleft, __entry->lright,
742 (unsigned long long) __entry->goal, 718 __entry->pleft, __entry->pright)
743 (unsigned long long) __entry->lleft,
744 (unsigned long long) __entry->lright,
745 (unsigned long long) __entry->pleft,
746 (unsigned long long) __entry->pright)
747); 719);
748 720
749TRACE_EVENT(ext4_free_blocks, 721TRACE_EVENT(ext4_free_blocks,
@@ -755,10 +727,10 @@ TRACE_EVENT(ext4_free_blocks,
755 TP_STRUCT__entry( 727 TP_STRUCT__entry(
756 __field( dev_t, dev ) 728 __field( dev_t, dev )
757 __field( ino_t, ino ) 729 __field( ino_t, ino )
758 __field( umode_t, mode ) 730 __field( umode_t, mode )
759 __field( __u64, block ) 731 __field( __u64, block )
760 __field( unsigned long, count ) 732 __field( unsigned long, count )
761 __field( int, flags ) 733 __field( int, flags )
762 ), 734 ),
763 735
764 TP_fast_assign( 736 TP_fast_assign(
@@ -798,7 +770,7 @@ TRACE_EVENT(ext4_sync_file_enter,
798 __entry->parent = dentry->d_parent->d_inode->i_ino; 770 __entry->parent = dentry->d_parent->d_inode->i_ino;
799 ), 771 ),
800 772
801 TP_printk("dev %d,%d ino %ld parent %ld datasync %d ", 773 TP_printk("dev %d,%d ino %lu parent %lu datasync %d ",
802 MAJOR(__entry->dev), MINOR(__entry->dev), 774 MAJOR(__entry->dev), MINOR(__entry->dev),
803 (unsigned long) __entry->ino, 775 (unsigned long) __entry->ino,
804 (unsigned long) __entry->parent, __entry->datasync) 776 (unsigned long) __entry->parent, __entry->datasync)
@@ -821,7 +793,7 @@ TRACE_EVENT(ext4_sync_file_exit,
821 __entry->dev = inode->i_sb->s_dev; 793 __entry->dev = inode->i_sb->s_dev;
822 ), 794 ),
823 795
824 TP_printk("dev %d,%d ino %ld ret %d", 796 TP_printk("dev %d,%d ino %lu ret %d",
825 MAJOR(__entry->dev), MINOR(__entry->dev), 797 MAJOR(__entry->dev), MINOR(__entry->dev),
826 (unsigned long) __entry->ino, 798 (unsigned long) __entry->ino,
827 __entry->ret) 799 __entry->ret)
@@ -1005,7 +977,7 @@ DECLARE_EVENT_CLASS(ext4__mballoc,
1005 __entry->result_len = len; 977 __entry->result_len = len;
1006 ), 978 ),
1007 979
1008 TP_printk("dev %d,%d inode %lu extent %u/%d/%u ", 980 TP_printk("dev %d,%d inode %lu extent %u/%d/%d ",
1009 MAJOR(__entry->dev), MINOR(__entry->dev), 981 MAJOR(__entry->dev), MINOR(__entry->dev),
1010 (unsigned long) __entry->ino, 982 (unsigned long) __entry->ino,
1011 __entry->result_group, __entry->result_start, 983 __entry->result_group, __entry->result_start,
@@ -1093,7 +1065,7 @@ TRACE_EVENT(ext4_da_update_reserve_space,
1093 "allocated_meta_blocks %d", 1065 "allocated_meta_blocks %d",
1094 MAJOR(__entry->dev), MINOR(__entry->dev), 1066 MAJOR(__entry->dev), MINOR(__entry->dev),
1095 (unsigned long) __entry->ino, 1067 (unsigned long) __entry->ino,
1096 __entry->mode, (unsigned long long) __entry->i_blocks, 1068 __entry->mode, __entry->i_blocks,
1097 __entry->used_blocks, __entry->reserved_data_blocks, 1069 __entry->used_blocks, __entry->reserved_data_blocks,
1098 __entry->reserved_meta_blocks, __entry->allocated_meta_blocks) 1070 __entry->reserved_meta_blocks, __entry->allocated_meta_blocks)
1099); 1071);
@@ -1127,7 +1099,7 @@ TRACE_EVENT(ext4_da_reserve_space,
1127 "reserved_data_blocks %d reserved_meta_blocks %d", 1099 "reserved_data_blocks %d reserved_meta_blocks %d",
1128 MAJOR(__entry->dev), MINOR(__entry->dev), 1100 MAJOR(__entry->dev), MINOR(__entry->dev),
1129 (unsigned long) __entry->ino, 1101 (unsigned long) __entry->ino,
1130 __entry->mode, (unsigned long long) __entry->i_blocks, 1102 __entry->mode, __entry->i_blocks,
1131 __entry->md_needed, __entry->reserved_data_blocks, 1103 __entry->md_needed, __entry->reserved_data_blocks,
1132 __entry->reserved_meta_blocks) 1104 __entry->reserved_meta_blocks)
1133); 1105);
@@ -1164,7 +1136,7 @@ TRACE_EVENT(ext4_da_release_space,
1164 "allocated_meta_blocks %d", 1136 "allocated_meta_blocks %d",
1165 MAJOR(__entry->dev), MINOR(__entry->dev), 1137 MAJOR(__entry->dev), MINOR(__entry->dev),
1166 (unsigned long) __entry->ino, 1138 (unsigned long) __entry->ino,
1167 __entry->mode, (unsigned long long) __entry->i_blocks, 1139 __entry->mode, __entry->i_blocks,
1168 __entry->freed_blocks, __entry->reserved_data_blocks, 1140 __entry->freed_blocks, __entry->reserved_data_blocks,
1169 __entry->reserved_meta_blocks, __entry->allocated_meta_blocks) 1141 __entry->reserved_meta_blocks, __entry->allocated_meta_blocks)
1170); 1142);
@@ -1239,14 +1211,15 @@ TRACE_EVENT(ext4_direct_IO_enter,
1239 __entry->rw = rw; 1211 __entry->rw = rw;
1240 ), 1212 ),
1241 1213
1242 TP_printk("dev %d,%d ino %lu pos %llu len %lu rw %d", 1214 TP_printk("dev %d,%d ino %lu pos %lld len %lu rw %d",
1243 MAJOR(__entry->dev), MINOR(__entry->dev), 1215 MAJOR(__entry->dev), MINOR(__entry->dev),
1244 (unsigned long) __entry->ino, 1216 (unsigned long) __entry->ino,
1245 (unsigned long long) __entry->pos, __entry->len, __entry->rw) 1217 __entry->pos, __entry->len, __entry->rw)
1246); 1218);
1247 1219
1248TRACE_EVENT(ext4_direct_IO_exit, 1220TRACE_EVENT(ext4_direct_IO_exit,
1249 TP_PROTO(struct inode *inode, loff_t offset, unsigned long len, int rw, int ret), 1221 TP_PROTO(struct inode *inode, loff_t offset, unsigned long len,
1222 int rw, int ret),
1250 1223
1251 TP_ARGS(inode, offset, len, rw, ret), 1224 TP_ARGS(inode, offset, len, rw, ret),
1252 1225
@@ -1268,10 +1241,10 @@ TRACE_EVENT(ext4_direct_IO_exit,
1268 __entry->ret = ret; 1241 __entry->ret = ret;
1269 ), 1242 ),
1270 1243
1271 TP_printk("dev %d,%d ino %lu pos %llu len %lu rw %d ret %d", 1244 TP_printk("dev %d,%d ino %lu pos %lld len %lu rw %d ret %d",
1272 MAJOR(__entry->dev), MINOR(__entry->dev), 1245 MAJOR(__entry->dev), MINOR(__entry->dev),
1273 (unsigned long) __entry->ino, 1246 (unsigned long) __entry->ino,
1274 (unsigned long long) __entry->pos, __entry->len, 1247 __entry->pos, __entry->len,
1275 __entry->rw, __entry->ret) 1248 __entry->rw, __entry->ret)
1276); 1249);
1277 1250
@@ -1296,15 +1269,15 @@ TRACE_EVENT(ext4_fallocate_enter,
1296 __entry->mode = mode; 1269 __entry->mode = mode;
1297 ), 1270 ),
1298 1271
1299 TP_printk("dev %d,%d ino %ld pos %llu len %llu mode %d", 1272 TP_printk("dev %d,%d ino %lu pos %lld len %lld mode %d",
1300 MAJOR(__entry->dev), MINOR(__entry->dev), 1273 MAJOR(__entry->dev), MINOR(__entry->dev),
1301 (unsigned long) __entry->ino, 1274 (unsigned long) __entry->ino, __entry->pos,
1302 (unsigned long long) __entry->pos, 1275 __entry->len, __entry->mode)
1303 (unsigned long long) __entry->len, __entry->mode)
1304); 1276);
1305 1277
1306TRACE_EVENT(ext4_fallocate_exit, 1278TRACE_EVENT(ext4_fallocate_exit,
1307 TP_PROTO(struct inode *inode, loff_t offset, unsigned int max_blocks, int ret), 1279 TP_PROTO(struct inode *inode, loff_t offset,
1280 unsigned int max_blocks, int ret),
1308 1281
1309 TP_ARGS(inode, offset, max_blocks, ret), 1282 TP_ARGS(inode, offset, max_blocks, ret),
1310 1283
@@ -1312,7 +1285,7 @@ TRACE_EVENT(ext4_fallocate_exit,
1312 __field( ino_t, ino ) 1285 __field( ino_t, ino )
1313 __field( dev_t, dev ) 1286 __field( dev_t, dev )
1314 __field( loff_t, pos ) 1287 __field( loff_t, pos )
1315 __field( unsigned, blocks ) 1288 __field( unsigned int, blocks )
1316 __field( int, ret ) 1289 __field( int, ret )
1317 ), 1290 ),
1318 1291
@@ -1324,10 +1297,10 @@ TRACE_EVENT(ext4_fallocate_exit,
1324 __entry->ret = ret; 1297 __entry->ret = ret;
1325 ), 1298 ),
1326 1299
1327 TP_printk("dev %d,%d ino %ld pos %llu blocks %d ret %d", 1300 TP_printk("dev %d,%d ino %lu pos %lld blocks %u ret %d",
1328 MAJOR(__entry->dev), MINOR(__entry->dev), 1301 MAJOR(__entry->dev), MINOR(__entry->dev),
1329 (unsigned long) __entry->ino, 1302 (unsigned long) __entry->ino,
1330 (unsigned long long) __entry->pos, __entry->blocks, 1303 __entry->pos, __entry->blocks,
1331 __entry->ret) 1304 __entry->ret)
1332); 1305);
1333 1306
@@ -1350,7 +1323,7 @@ TRACE_EVENT(ext4_unlink_enter,
1350 __entry->dev = dentry->d_inode->i_sb->s_dev; 1323 __entry->dev = dentry->d_inode->i_sb->s_dev;
1351 ), 1324 ),
1352 1325
1353 TP_printk("dev %d,%d ino %ld size %lld parent %ld", 1326 TP_printk("dev %d,%d ino %lu size %lld parent %lu",
1354 MAJOR(__entry->dev), MINOR(__entry->dev), 1327 MAJOR(__entry->dev), MINOR(__entry->dev),
1355 (unsigned long) __entry->ino, __entry->size, 1328 (unsigned long) __entry->ino, __entry->size,
1356 (unsigned long) __entry->parent) 1329 (unsigned long) __entry->parent)
@@ -1373,7 +1346,7 @@ TRACE_EVENT(ext4_unlink_exit,
1373 __entry->ret = ret; 1346 __entry->ret = ret;
1374 ), 1347 ),
1375 1348
1376 TP_printk("dev %d,%d ino %ld ret %d", 1349 TP_printk("dev %d,%d ino %lu ret %d",
1377 MAJOR(__entry->dev), MINOR(__entry->dev), 1350 MAJOR(__entry->dev), MINOR(__entry->dev),
1378 (unsigned long) __entry->ino, 1351 (unsigned long) __entry->ino,
1379 __entry->ret) 1352 __entry->ret)
@@ -1387,7 +1360,7 @@ DECLARE_EVENT_CLASS(ext4__truncate,
1387 TP_STRUCT__entry( 1360 TP_STRUCT__entry(
1388 __field( ino_t, ino ) 1361 __field( ino_t, ino )
1389 __field( dev_t, dev ) 1362 __field( dev_t, dev )
1390 __field( blkcnt_t, blocks ) 1363 __field( __u64, blocks )
1391 ), 1364 ),
1392 1365
1393 TP_fast_assign( 1366 TP_fast_assign(
@@ -1396,9 +1369,9 @@ DECLARE_EVENT_CLASS(ext4__truncate,
1396 __entry->blocks = inode->i_blocks; 1369 __entry->blocks = inode->i_blocks;
1397 ), 1370 ),
1398 1371
1399 TP_printk("dev %d,%d ino %lu blocks %lu", 1372 TP_printk("dev %d,%d ino %lu blocks %llu",
1400 MAJOR(__entry->dev), MINOR(__entry->dev), 1373 MAJOR(__entry->dev), MINOR(__entry->dev),
1401 (unsigned long) __entry->ino, (unsigned long) __entry->blocks) 1374 (unsigned long) __entry->ino, __entry->blocks)
1402); 1375);
1403 1376
1404DEFINE_EVENT(ext4__truncate, ext4_truncate_enter, 1377DEFINE_EVENT(ext4__truncate, ext4_truncate_enter,
@@ -1417,7 +1390,7 @@ DEFINE_EVENT(ext4__truncate, ext4_truncate_exit,
1417 1390
1418DECLARE_EVENT_CLASS(ext4__map_blocks_enter, 1391DECLARE_EVENT_CLASS(ext4__map_blocks_enter,
1419 TP_PROTO(struct inode *inode, ext4_lblk_t lblk, 1392 TP_PROTO(struct inode *inode, ext4_lblk_t lblk,
1420 unsigned len, unsigned flags), 1393 unsigned int len, unsigned int flags),
1421 1394
1422 TP_ARGS(inode, lblk, len, flags), 1395 TP_ARGS(inode, lblk, len, flags),
1423 1396
@@ -1425,8 +1398,8 @@ DECLARE_EVENT_CLASS(ext4__map_blocks_enter,
1425 __field( ino_t, ino ) 1398 __field( ino_t, ino )
1426 __field( dev_t, dev ) 1399 __field( dev_t, dev )
1427 __field( ext4_lblk_t, lblk ) 1400 __field( ext4_lblk_t, lblk )
1428 __field( unsigned, len ) 1401 __field( unsigned int, len )
1429 __field( unsigned, flags ) 1402 __field( unsigned int, flags )
1430 ), 1403 ),
1431 1404
1432 TP_fast_assign( 1405 TP_fast_assign(
@@ -1440,7 +1413,7 @@ DECLARE_EVENT_CLASS(ext4__map_blocks_enter,
1440 TP_printk("dev %d,%d ino %lu lblk %u len %u flags %u", 1413 TP_printk("dev %d,%d ino %lu lblk %u len %u flags %u",
1441 MAJOR(__entry->dev), MINOR(__entry->dev), 1414 MAJOR(__entry->dev), MINOR(__entry->dev),
1442 (unsigned long) __entry->ino, 1415 (unsigned long) __entry->ino,
1443 (unsigned) __entry->lblk, __entry->len, __entry->flags) 1416 __entry->lblk, __entry->len, __entry->flags)
1444); 1417);
1445 1418
1446DEFINE_EVENT(ext4__map_blocks_enter, ext4_ext_map_blocks_enter, 1419DEFINE_EVENT(ext4__map_blocks_enter, ext4_ext_map_blocks_enter,
@@ -1459,7 +1432,7 @@ DEFINE_EVENT(ext4__map_blocks_enter, ext4_ind_map_blocks_enter,
1459 1432
1460DECLARE_EVENT_CLASS(ext4__map_blocks_exit, 1433DECLARE_EVENT_CLASS(ext4__map_blocks_exit,
1461 TP_PROTO(struct inode *inode, ext4_lblk_t lblk, 1434 TP_PROTO(struct inode *inode, ext4_lblk_t lblk,
1462 ext4_fsblk_t pblk, unsigned len, int ret), 1435 ext4_fsblk_t pblk, unsigned int len, int ret),
1463 1436
1464 TP_ARGS(inode, lblk, pblk, len, ret), 1437 TP_ARGS(inode, lblk, pblk, len, ret),
1465 1438
@@ -1468,7 +1441,7 @@ DECLARE_EVENT_CLASS(ext4__map_blocks_exit,
1468 __field( dev_t, dev ) 1441 __field( dev_t, dev )
1469 __field( ext4_lblk_t, lblk ) 1442 __field( ext4_lblk_t, lblk )
1470 __field( ext4_fsblk_t, pblk ) 1443 __field( ext4_fsblk_t, pblk )
1471 __field( unsigned, len ) 1444 __field( unsigned int, len )
1472 __field( int, ret ) 1445 __field( int, ret )
1473 ), 1446 ),
1474 1447
@@ -1484,7 +1457,7 @@ DECLARE_EVENT_CLASS(ext4__map_blocks_exit,
1484 TP_printk("dev %d,%d ino %lu lblk %u pblk %llu len %u ret %d", 1457 TP_printk("dev %d,%d ino %lu lblk %u pblk %llu len %u ret %d",
1485 MAJOR(__entry->dev), MINOR(__entry->dev), 1458 MAJOR(__entry->dev), MINOR(__entry->dev),
1486 (unsigned long) __entry->ino, 1459 (unsigned long) __entry->ino,
1487 (unsigned) __entry->lblk, (unsigned long long) __entry->pblk, 1460 __entry->lblk, __entry->pblk,
1488 __entry->len, __entry->ret) 1461 __entry->len, __entry->ret)
1489); 1462);
1490 1463
@@ -1524,7 +1497,7 @@ TRACE_EVENT(ext4_ext_load_extent,
1524 TP_printk("dev %d,%d ino %lu lblk %u pblk %llu", 1497 TP_printk("dev %d,%d ino %lu lblk %u pblk %llu",
1525 MAJOR(__entry->dev), MINOR(__entry->dev), 1498 MAJOR(__entry->dev), MINOR(__entry->dev),
1526 (unsigned long) __entry->ino, 1499 (unsigned long) __entry->ino,
1527 (unsigned) __entry->lblk, (unsigned long long) __entry->pblk) 1500 __entry->lblk, __entry->pblk)
1528); 1501);
1529 1502
1530TRACE_EVENT(ext4_load_inode, 1503TRACE_EVENT(ext4_load_inode,
diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
index ae045ca7d356..1c09820df585 100644
--- a/include/trace/events/irq.h
+++ b/include/trace/events/irq.h
@@ -20,7 +20,8 @@ struct softirq_action;
20 softirq_name(BLOCK_IOPOLL), \ 20 softirq_name(BLOCK_IOPOLL), \
21 softirq_name(TASKLET), \ 21 softirq_name(TASKLET), \
22 softirq_name(SCHED), \ 22 softirq_name(SCHED), \
23 softirq_name(HRTIMER)) 23 softirq_name(HRTIMER), \
24 softirq_name(RCU))
24 25
25/** 26/**
26 * irq_handler_entry - called immediately before the irq action handler 27 * irq_handler_entry - called immediately before the irq action handler
diff --git a/init/calibrate.c b/init/calibrate.c
index 2568d22a304e..aae2f40fea4c 100644
--- a/init/calibrate.c
+++ b/init/calibrate.c
@@ -245,30 +245,32 @@ recalibrate:
245 245
246void __cpuinit calibrate_delay(void) 246void __cpuinit calibrate_delay(void)
247{ 247{
248 unsigned long lpj;
248 static bool printed; 249 static bool printed;
249 250
250 if (preset_lpj) { 251 if (preset_lpj) {
251 loops_per_jiffy = preset_lpj; 252 lpj = preset_lpj;
252 if (!printed) 253 if (!printed)
253 pr_info("Calibrating delay loop (skipped) " 254 pr_info("Calibrating delay loop (skipped) "
254 "preset value.. "); 255 "preset value.. ");
255 } else if ((!printed) && lpj_fine) { 256 } else if ((!printed) && lpj_fine) {
256 loops_per_jiffy = lpj_fine; 257 lpj = lpj_fine;
257 pr_info("Calibrating delay loop (skipped), " 258 pr_info("Calibrating delay loop (skipped), "
258 "value calculated using timer frequency.. "); 259 "value calculated using timer frequency.. ");
259 } else if ((loops_per_jiffy = calibrate_delay_direct()) != 0) { 260 } else if ((lpj = calibrate_delay_direct()) != 0) {
260 if (!printed) 261 if (!printed)
261 pr_info("Calibrating delay using timer " 262 pr_info("Calibrating delay using timer "
262 "specific routine.. "); 263 "specific routine.. ");
263 } else { 264 } else {
264 if (!printed) 265 if (!printed)
265 pr_info("Calibrating delay loop... "); 266 pr_info("Calibrating delay loop... ");
266 loops_per_jiffy = calibrate_delay_converge(); 267 lpj = calibrate_delay_converge();
267 } 268 }
268 if (!printed) 269 if (!printed)
269 pr_cont("%lu.%02lu BogoMIPS (lpj=%lu)\n", 270 pr_cont("%lu.%02lu BogoMIPS (lpj=%lu)\n",
270 loops_per_jiffy/(500000/HZ), 271 lpj/(500000/HZ),
271 (loops_per_jiffy/(5000/HZ)) % 100, loops_per_jiffy); 272 (lpj/(5000/HZ)) % 100, lpj);
272 273
274 loops_per_jiffy = lpj;
273 printed = true; 275 printed = true;
274} 276}
diff --git a/init/main.c b/init/main.c
index cafba67c13bf..d7211faed2ad 100644
--- a/init/main.c
+++ b/init/main.c
@@ -542,6 +542,7 @@ asmlinkage void __init start_kernel(void)
542 timekeeping_init(); 542 timekeeping_init();
543 time_init(); 543 time_init();
544 profile_init(); 544 profile_init();
545 call_function_init();
545 if (!irqs_disabled()) 546 if (!irqs_disabled())
546 printk(KERN_CRIT "start_kernel(): bug: interrupts were " 547 printk(KERN_CRIT "start_kernel(): bug: interrupts were "
547 "enabled early\n"); 548 "enabled early\n");
diff --git a/kernel/power/user.c b/kernel/power/user.c
index 7d02d33be699..42ddbc6f0de6 100644
--- a/kernel/power/user.c
+++ b/kernel/power/user.c
@@ -113,8 +113,10 @@ static int snapshot_open(struct inode *inode, struct file *filp)
113 if (error) 113 if (error)
114 pm_notifier_call_chain(PM_POST_RESTORE); 114 pm_notifier_call_chain(PM_POST_RESTORE);
115 } 115 }
116 if (error) 116 if (error) {
117 free_basic_memory_bitmaps();
117 atomic_inc(&snapshot_device_available); 118 atomic_inc(&snapshot_device_available);
119 }
118 data->frozen = 0; 120 data->frozen = 0;
119 data->ready = 0; 121 data->ready = 0;
120 data->platform_support = 0; 122 data->platform_support = 0;
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index 89419ff92e99..7e59ffb3d0ba 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -87,6 +87,8 @@ static struct rcu_state *rcu_state;
87int rcu_scheduler_active __read_mostly; 87int rcu_scheduler_active __read_mostly;
88EXPORT_SYMBOL_GPL(rcu_scheduler_active); 88EXPORT_SYMBOL_GPL(rcu_scheduler_active);
89 89
90#ifdef CONFIG_RCU_BOOST
91
90/* 92/*
91 * Control variables for per-CPU and per-rcu_node kthreads. These 93 * Control variables for per-CPU and per-rcu_node kthreads. These
92 * handle all flavors of RCU. 94 * handle all flavors of RCU.
@@ -98,8 +100,11 @@ DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
98DEFINE_PER_CPU(char, rcu_cpu_has_work); 100DEFINE_PER_CPU(char, rcu_cpu_has_work);
99static char rcu_kthreads_spawnable; 101static char rcu_kthreads_spawnable;
100 102
103#endif /* #ifdef CONFIG_RCU_BOOST */
104
101static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu); 105static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
102static void invoke_rcu_cpu_kthread(void); 106static void invoke_rcu_core(void);
107static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp);
103 108
104#define RCU_KTHREAD_PRIO 1 /* RT priority for per-CPU kthreads. */ 109#define RCU_KTHREAD_PRIO 1 /* RT priority for per-CPU kthreads. */
105 110
@@ -1088,14 +1093,8 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp)
1088 int need_report = 0; 1093 int need_report = 0;
1089 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); 1094 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
1090 struct rcu_node *rnp; 1095 struct rcu_node *rnp;
1091 struct task_struct *t;
1092 1096
1093 /* Stop the CPU's kthread. */ 1097 rcu_stop_cpu_kthread(cpu);
1094 t = per_cpu(rcu_cpu_kthread_task, cpu);
1095 if (t != NULL) {
1096 per_cpu(rcu_cpu_kthread_task, cpu) = NULL;
1097 kthread_stop(t);
1098 }
1099 1098
1100 /* Exclude any attempts to start a new grace period. */ 1099 /* Exclude any attempts to start a new grace period. */
1101 raw_spin_lock_irqsave(&rsp->onofflock, flags); 1100 raw_spin_lock_irqsave(&rsp->onofflock, flags);
@@ -1231,7 +1230,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
1231 1230
1232 /* Re-raise the RCU softirq if there are callbacks remaining. */ 1231 /* Re-raise the RCU softirq if there are callbacks remaining. */
1233 if (cpu_has_callbacks_ready_to_invoke(rdp)) 1232 if (cpu_has_callbacks_ready_to_invoke(rdp))
1234 invoke_rcu_cpu_kthread(); 1233 invoke_rcu_core();
1235} 1234}
1236 1235
1237/* 1236/*
@@ -1277,7 +1276,7 @@ void rcu_check_callbacks(int cpu, int user)
1277 } 1276 }
1278 rcu_preempt_check_callbacks(cpu); 1277 rcu_preempt_check_callbacks(cpu);
1279 if (rcu_pending(cpu)) 1278 if (rcu_pending(cpu))
1280 invoke_rcu_cpu_kthread(); 1279 invoke_rcu_core();
1281} 1280}
1282 1281
1283#ifdef CONFIG_SMP 1282#ifdef CONFIG_SMP
@@ -1442,13 +1441,14 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
1442 } 1441 }
1443 1442
1444 /* If there are callbacks ready, invoke them. */ 1443 /* If there are callbacks ready, invoke them. */
1445 rcu_do_batch(rsp, rdp); 1444 if (cpu_has_callbacks_ready_to_invoke(rdp))
1445 invoke_rcu_callbacks(rsp, rdp);
1446} 1446}
1447 1447
1448/* 1448/*
1449 * Do softirq processing for the current CPU. 1449 * Do softirq processing for the current CPU.
1450 */ 1450 */
1451static void rcu_process_callbacks(void) 1451static void rcu_process_callbacks(struct softirq_action *unused)
1452{ 1452{
1453 __rcu_process_callbacks(&rcu_sched_state, 1453 __rcu_process_callbacks(&rcu_sched_state,
1454 &__get_cpu_var(rcu_sched_data)); 1454 &__get_cpu_var(rcu_sched_data));
@@ -1465,342 +1465,20 @@ static void rcu_process_callbacks(void)
1465 * the current CPU with interrupts disabled, the rcu_cpu_kthread_task 1465 * the current CPU with interrupts disabled, the rcu_cpu_kthread_task
1466 * cannot disappear out from under us. 1466 * cannot disappear out from under us.
1467 */ 1467 */
1468static void invoke_rcu_cpu_kthread(void) 1468static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
1469{
1470 unsigned long flags;
1471
1472 local_irq_save(flags);
1473 __this_cpu_write(rcu_cpu_has_work, 1);
1474 if (__this_cpu_read(rcu_cpu_kthread_task) == NULL) {
1475 local_irq_restore(flags);
1476 return;
1477 }
1478 wake_up_process(__this_cpu_read(rcu_cpu_kthread_task));
1479 local_irq_restore(flags);
1480}
1481
1482/*
1483 * Wake up the specified per-rcu_node-structure kthread.
1484 * Because the per-rcu_node kthreads are immortal, we don't need
1485 * to do anything to keep them alive.
1486 */
1487static void invoke_rcu_node_kthread(struct rcu_node *rnp)
1488{
1489 struct task_struct *t;
1490
1491 t = rnp->node_kthread_task;
1492 if (t != NULL)
1493 wake_up_process(t);
1494}
1495
1496/*
1497 * Set the specified CPU's kthread to run RT or not, as specified by
1498 * the to_rt argument. The CPU-hotplug locks are held, so the task
1499 * is not going away.
1500 */
1501static void rcu_cpu_kthread_setrt(int cpu, int to_rt)
1502{
1503 int policy;
1504 struct sched_param sp;
1505 struct task_struct *t;
1506
1507 t = per_cpu(rcu_cpu_kthread_task, cpu);
1508 if (t == NULL)
1509 return;
1510 if (to_rt) {
1511 policy = SCHED_FIFO;
1512 sp.sched_priority = RCU_KTHREAD_PRIO;
1513 } else {
1514 policy = SCHED_NORMAL;
1515 sp.sched_priority = 0;
1516 }
1517 sched_setscheduler_nocheck(t, policy, &sp);
1518}
1519
1520/*
1521 * Timer handler to initiate the waking up of per-CPU kthreads that
1522 * have yielded the CPU due to excess numbers of RCU callbacks.
1523 * We wake up the per-rcu_node kthread, which in turn will wake up
1524 * the booster kthread.
1525 */
1526static void rcu_cpu_kthread_timer(unsigned long arg)
1527{
1528 struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, arg);
1529 struct rcu_node *rnp = rdp->mynode;
1530
1531 atomic_or(rdp->grpmask, &rnp->wakemask);
1532 invoke_rcu_node_kthread(rnp);
1533}
1534
1535/*
1536 * Drop to non-real-time priority and yield, but only after posting a
1537 * timer that will cause us to regain our real-time priority if we
1538 * remain preempted. Either way, we restore our real-time priority
1539 * before returning.
1540 */
1541static void rcu_yield(void (*f)(unsigned long), unsigned long arg)
1542{
1543 struct sched_param sp;
1544 struct timer_list yield_timer;
1545
1546 setup_timer_on_stack(&yield_timer, f, arg);
1547 mod_timer(&yield_timer, jiffies + 2);
1548 sp.sched_priority = 0;
1549 sched_setscheduler_nocheck(current, SCHED_NORMAL, &sp);
1550 set_user_nice(current, 19);
1551 schedule();
1552 sp.sched_priority = RCU_KTHREAD_PRIO;
1553 sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
1554 del_timer(&yield_timer);
1555}
1556
1557/*
1558 * Handle cases where the rcu_cpu_kthread() ends up on the wrong CPU.
1559 * This can happen while the corresponding CPU is either coming online
1560 * or going offline. We cannot wait until the CPU is fully online
1561 * before starting the kthread, because the various notifier functions
1562 * can wait for RCU grace periods. So we park rcu_cpu_kthread() until
1563 * the corresponding CPU is online.
1564 *
1565 * Return 1 if the kthread needs to stop, 0 otherwise.
1566 *
1567 * Caller must disable bh. This function can momentarily enable it.
1568 */
1569static int rcu_cpu_kthread_should_stop(int cpu)
1570{
1571 while (cpu_is_offline(cpu) ||
1572 !cpumask_equal(&current->cpus_allowed, cpumask_of(cpu)) ||
1573 smp_processor_id() != cpu) {
1574 if (kthread_should_stop())
1575 return 1;
1576 per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
1577 per_cpu(rcu_cpu_kthread_cpu, cpu) = raw_smp_processor_id();
1578 local_bh_enable();
1579 schedule_timeout_uninterruptible(1);
1580 if (!cpumask_equal(&current->cpus_allowed, cpumask_of(cpu)))
1581 set_cpus_allowed_ptr(current, cpumask_of(cpu));
1582 local_bh_disable();
1583 }
1584 per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu;
1585 return 0;
1586}
1587
1588/*
1589 * Per-CPU kernel thread that invokes RCU callbacks. This replaces the
1590 * earlier RCU softirq.
1591 */
1592static int rcu_cpu_kthread(void *arg)
1593{
1594 int cpu = (int)(long)arg;
1595 unsigned long flags;
1596 int spincnt = 0;
1597 unsigned int *statusp = &per_cpu(rcu_cpu_kthread_status, cpu);
1598 char work;
1599 char *workp = &per_cpu(rcu_cpu_has_work, cpu);
1600
1601 for (;;) {
1602 *statusp = RCU_KTHREAD_WAITING;
1603 rcu_wait(*workp != 0 || kthread_should_stop());
1604 local_bh_disable();
1605 if (rcu_cpu_kthread_should_stop(cpu)) {
1606 local_bh_enable();
1607 break;
1608 }
1609 *statusp = RCU_KTHREAD_RUNNING;
1610 per_cpu(rcu_cpu_kthread_loops, cpu)++;
1611 local_irq_save(flags);
1612 work = *workp;
1613 *workp = 0;
1614 local_irq_restore(flags);
1615 if (work)
1616 rcu_process_callbacks();
1617 local_bh_enable();
1618 if (*workp != 0)
1619 spincnt++;
1620 else
1621 spincnt = 0;
1622 if (spincnt > 10) {
1623 *statusp = RCU_KTHREAD_YIELDING;
1624 rcu_yield(rcu_cpu_kthread_timer, (unsigned long)cpu);
1625 spincnt = 0;
1626 }
1627 }
1628 *statusp = RCU_KTHREAD_STOPPED;
1629 return 0;
1630}
1631
1632/*
1633 * Spawn a per-CPU kthread, setting up affinity and priority.
1634 * Because the CPU hotplug lock is held, no other CPU will be attempting
1635 * to manipulate rcu_cpu_kthread_task. There might be another CPU
1636 * attempting to access it during boot, but the locking in kthread_bind()
1637 * will enforce sufficient ordering.
1638 */
1639static int __cpuinit rcu_spawn_one_cpu_kthread(int cpu)
1640{ 1469{
1641 struct sched_param sp; 1470 if (likely(!rsp->boost)) {
1642 struct task_struct *t; 1471 rcu_do_batch(rsp, rdp);
1643
1644 if (!rcu_kthreads_spawnable ||
1645 per_cpu(rcu_cpu_kthread_task, cpu) != NULL)
1646 return 0;
1647 t = kthread_create(rcu_cpu_kthread, (void *)(long)cpu, "rcuc%d", cpu);
1648 if (IS_ERR(t))
1649 return PTR_ERR(t);
1650 kthread_bind(t, cpu);
1651 per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu;
1652 WARN_ON_ONCE(per_cpu(rcu_cpu_kthread_task, cpu) != NULL);
1653 per_cpu(rcu_cpu_kthread_task, cpu) = t;
1654 sp.sched_priority = RCU_KTHREAD_PRIO;
1655 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1656 return 0;
1657}
1658
1659/*
1660 * Per-rcu_node kthread, which is in charge of waking up the per-CPU
1661 * kthreads when needed. We ignore requests to wake up kthreads
1662 * for offline CPUs, which is OK because force_quiescent_state()
1663 * takes care of this case.
1664 */
1665static int rcu_node_kthread(void *arg)
1666{
1667 int cpu;
1668 unsigned long flags;
1669 unsigned long mask;
1670 struct rcu_node *rnp = (struct rcu_node *)arg;
1671 struct sched_param sp;
1672 struct task_struct *t;
1673
1674 for (;;) {
1675 rnp->node_kthread_status = RCU_KTHREAD_WAITING;
1676 rcu_wait(atomic_read(&rnp->wakemask) != 0);
1677 rnp->node_kthread_status = RCU_KTHREAD_RUNNING;
1678 raw_spin_lock_irqsave(&rnp->lock, flags);
1679 mask = atomic_xchg(&rnp->wakemask, 0);
1680 rcu_initiate_boost(rnp, flags); /* releases rnp->lock. */
1681 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1) {
1682 if ((mask & 0x1) == 0)
1683 continue;
1684 preempt_disable();
1685 t = per_cpu(rcu_cpu_kthread_task, cpu);
1686 if (!cpu_online(cpu) || t == NULL) {
1687 preempt_enable();
1688 continue;
1689 }
1690 per_cpu(rcu_cpu_has_work, cpu) = 1;
1691 sp.sched_priority = RCU_KTHREAD_PRIO;
1692 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1693 preempt_enable();
1694 }
1695 }
1696 /* NOTREACHED */
1697 rnp->node_kthread_status = RCU_KTHREAD_STOPPED;
1698 return 0;
1699}
1700
1701/*
1702 * Set the per-rcu_node kthread's affinity to cover all CPUs that are
1703 * served by the rcu_node in question. The CPU hotplug lock is still
1704 * held, so the value of rnp->qsmaskinit will be stable.
1705 *
1706 * We don't include outgoingcpu in the affinity set, use -1 if there is
1707 * no outgoing CPU. If there are no CPUs left in the affinity set,
1708 * this function allows the kthread to execute on any CPU.
1709 */
1710static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
1711{
1712 cpumask_var_t cm;
1713 int cpu;
1714 unsigned long mask = rnp->qsmaskinit;
1715
1716 if (rnp->node_kthread_task == NULL)
1717 return;
1718 if (!alloc_cpumask_var(&cm, GFP_KERNEL))
1719 return; 1472 return;
1720 cpumask_clear(cm);
1721 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1)
1722 if ((mask & 0x1) && cpu != outgoingcpu)
1723 cpumask_set_cpu(cpu, cm);
1724 if (cpumask_weight(cm) == 0) {
1725 cpumask_setall(cm);
1726 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++)
1727 cpumask_clear_cpu(cpu, cm);
1728 WARN_ON_ONCE(cpumask_weight(cm) == 0);
1729 } 1473 }
1730 set_cpus_allowed_ptr(rnp->node_kthread_task, cm); 1474 invoke_rcu_callbacks_kthread();
1731 rcu_boost_kthread_setaffinity(rnp, cm);
1732 free_cpumask_var(cm);
1733} 1475}
1734 1476
1735/* 1477static void invoke_rcu_core(void)
1736 * Spawn a per-rcu_node kthread, setting priority and affinity.
1737 * Called during boot before online/offline can happen, or, if
1738 * during runtime, with the main CPU-hotplug locks held. So only
1739 * one of these can be executing at a time.
1740 */
1741static int __cpuinit rcu_spawn_one_node_kthread(struct rcu_state *rsp,
1742 struct rcu_node *rnp)
1743{ 1478{
1744 unsigned long flags; 1479 raise_softirq(RCU_SOFTIRQ);
1745 int rnp_index = rnp - &rsp->node[0];
1746 struct sched_param sp;
1747 struct task_struct *t;
1748
1749 if (!rcu_kthreads_spawnable ||
1750 rnp->qsmaskinit == 0)
1751 return 0;
1752 if (rnp->node_kthread_task == NULL) {
1753 t = kthread_create(rcu_node_kthread, (void *)rnp,
1754 "rcun%d", rnp_index);
1755 if (IS_ERR(t))
1756 return PTR_ERR(t);
1757 raw_spin_lock_irqsave(&rnp->lock, flags);
1758 rnp->node_kthread_task = t;
1759 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1760 sp.sched_priority = 99;
1761 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1762 }
1763 return rcu_spawn_one_boost_kthread(rsp, rnp, rnp_index);
1764} 1480}
1765 1481
1766static void rcu_wake_one_boost_kthread(struct rcu_node *rnp);
1767
1768/*
1769 * Spawn all kthreads -- called as soon as the scheduler is running.
1770 */
1771static int __init rcu_spawn_kthreads(void)
1772{
1773 int cpu;
1774 struct rcu_node *rnp;
1775 struct task_struct *t;
1776
1777 rcu_kthreads_spawnable = 1;
1778 for_each_possible_cpu(cpu) {
1779 per_cpu(rcu_cpu_has_work, cpu) = 0;
1780 if (cpu_online(cpu)) {
1781 (void)rcu_spawn_one_cpu_kthread(cpu);
1782 t = per_cpu(rcu_cpu_kthread_task, cpu);
1783 if (t)
1784 wake_up_process(t);
1785 }
1786 }
1787 rnp = rcu_get_root(rcu_state);
1788 (void)rcu_spawn_one_node_kthread(rcu_state, rnp);
1789 if (rnp->node_kthread_task)
1790 wake_up_process(rnp->node_kthread_task);
1791 if (NUM_RCU_NODES > 1) {
1792 rcu_for_each_leaf_node(rcu_state, rnp) {
1793 (void)rcu_spawn_one_node_kthread(rcu_state, rnp);
1794 t = rnp->node_kthread_task;
1795 if (t)
1796 wake_up_process(t);
1797 rcu_wake_one_boost_kthread(rnp);
1798 }
1799 }
1800 return 0;
1801}
1802early_initcall(rcu_spawn_kthreads);
1803
1804static void 1482static void
1805__call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu), 1483__call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
1806 struct rcu_state *rsp) 1484 struct rcu_state *rsp)
@@ -2207,44 +1885,6 @@ static void __cpuinit rcu_prepare_cpu(int cpu)
2207 rcu_preempt_init_percpu_data(cpu); 1885 rcu_preempt_init_percpu_data(cpu);
2208} 1886}
2209 1887
2210static void __cpuinit rcu_prepare_kthreads(int cpu)
2211{
2212 struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu);
2213 struct rcu_node *rnp = rdp->mynode;
2214
2215 /* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */
2216 if (rcu_kthreads_spawnable) {
2217 (void)rcu_spawn_one_cpu_kthread(cpu);
2218 if (rnp->node_kthread_task == NULL)
2219 (void)rcu_spawn_one_node_kthread(rcu_state, rnp);
2220 }
2221}
2222
2223/*
2224 * kthread_create() creates threads in TASK_UNINTERRUPTIBLE state,
2225 * but the RCU threads are woken on demand, and if demand is low this
2226 * could be a while triggering the hung task watchdog.
2227 *
2228 * In order to avoid this, poke all tasks once the CPU is fully
2229 * up and running.
2230 */
2231static void __cpuinit rcu_online_kthreads(int cpu)
2232{
2233 struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu);
2234 struct rcu_node *rnp = rdp->mynode;
2235 struct task_struct *t;
2236
2237 t = per_cpu(rcu_cpu_kthread_task, cpu);
2238 if (t)
2239 wake_up_process(t);
2240
2241 t = rnp->node_kthread_task;
2242 if (t)
2243 wake_up_process(t);
2244
2245 rcu_wake_one_boost_kthread(rnp);
2246}
2247
2248/* 1888/*
2249 * Handle CPU online/offline notification events. 1889 * Handle CPU online/offline notification events.
2250 */ 1890 */
@@ -2262,7 +1902,6 @@ static int __cpuinit rcu_cpu_notify(struct notifier_block *self,
2262 rcu_prepare_kthreads(cpu); 1902 rcu_prepare_kthreads(cpu);
2263 break; 1903 break;
2264 case CPU_ONLINE: 1904 case CPU_ONLINE:
2265 rcu_online_kthreads(cpu);
2266 case CPU_DOWN_FAILED: 1905 case CPU_DOWN_FAILED:
2267 rcu_node_kthread_setaffinity(rnp, -1); 1906 rcu_node_kthread_setaffinity(rnp, -1);
2268 rcu_cpu_kthread_setrt(cpu, 1); 1907 rcu_cpu_kthread_setrt(cpu, 1);
@@ -2410,6 +2049,7 @@ void __init rcu_init(void)
2410 rcu_init_one(&rcu_sched_state, &rcu_sched_data); 2049 rcu_init_one(&rcu_sched_state, &rcu_sched_data);
2411 rcu_init_one(&rcu_bh_state, &rcu_bh_data); 2050 rcu_init_one(&rcu_bh_state, &rcu_bh_data);
2412 __rcu_init_preempt(); 2051 __rcu_init_preempt();
2052 open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
2413 2053
2414 /* 2054 /*
2415 * We don't need protection against CPU-hotplug here because 2055 * We don't need protection against CPU-hotplug here because
diff --git a/kernel/rcutree.h b/kernel/rcutree.h
index 7b9a08b4aaea..01b2ccda26fb 100644
--- a/kernel/rcutree.h
+++ b/kernel/rcutree.h
@@ -369,6 +369,7 @@ struct rcu_state {
369 /* period because */ 369 /* period because */
370 /* force_quiescent_state() */ 370 /* force_quiescent_state() */
371 /* was running. */ 371 /* was running. */
372 u8 boost; /* Subject to priority boost. */
372 unsigned long gpnum; /* Current gp number. */ 373 unsigned long gpnum; /* Current gp number. */
373 unsigned long completed; /* # of last completed gp. */ 374 unsigned long completed; /* # of last completed gp. */
374 375
@@ -426,6 +427,7 @@ static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp);
426#ifdef CONFIG_HOTPLUG_CPU 427#ifdef CONFIG_HOTPLUG_CPU
427static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, 428static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp,
428 unsigned long flags); 429 unsigned long flags);
430static void rcu_stop_cpu_kthread(int cpu);
429#endif /* #ifdef CONFIG_HOTPLUG_CPU */ 431#endif /* #ifdef CONFIG_HOTPLUG_CPU */
430static void rcu_print_detail_task_stall(struct rcu_state *rsp); 432static void rcu_print_detail_task_stall(struct rcu_state *rsp);
431static void rcu_print_task_stall(struct rcu_node *rnp); 433static void rcu_print_task_stall(struct rcu_node *rnp);
@@ -450,11 +452,19 @@ static void rcu_preempt_send_cbs_to_online(void);
450static void __init __rcu_init_preempt(void); 452static void __init __rcu_init_preempt(void);
451static void rcu_needs_cpu_flush(void); 453static void rcu_needs_cpu_flush(void);
452static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags); 454static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags);
455static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
456static void invoke_rcu_callbacks_kthread(void);
457#ifdef CONFIG_RCU_BOOST
458static void rcu_preempt_do_callbacks(void);
453static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, 459static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp,
454 cpumask_var_t cm); 460 cpumask_var_t cm);
455static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
456static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp, 461static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
457 struct rcu_node *rnp, 462 struct rcu_node *rnp,
458 int rnp_index); 463 int rnp_index);
464static void invoke_rcu_node_kthread(struct rcu_node *rnp);
465static void rcu_yield(void (*f)(unsigned long), unsigned long arg);
466#endif /* #ifdef CONFIG_RCU_BOOST */
467static void rcu_cpu_kthread_setrt(int cpu, int to_rt);
468static void __cpuinit rcu_prepare_kthreads(int cpu);
459 469
460#endif /* #ifndef RCU_TREE_NONCORE */ 470#endif /* #ifndef RCU_TREE_NONCORE */
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index c8bff3099a89..14dc7dd00902 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -602,6 +602,15 @@ static void rcu_preempt_process_callbacks(void)
602 &__get_cpu_var(rcu_preempt_data)); 602 &__get_cpu_var(rcu_preempt_data));
603} 603}
604 604
605#ifdef CONFIG_RCU_BOOST
606
607static void rcu_preempt_do_callbacks(void)
608{
609 rcu_do_batch(&rcu_preempt_state, &__get_cpu_var(rcu_preempt_data));
610}
611
612#endif /* #ifdef CONFIG_RCU_BOOST */
613
605/* 614/*
606 * Queue a preemptible-RCU callback for invocation after a grace period. 615 * Queue a preemptible-RCU callback for invocation after a grace period.
607 */ 616 */
@@ -1249,6 +1258,23 @@ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
1249} 1258}
1250 1259
1251/* 1260/*
1261 * Wake up the per-CPU kthread to invoke RCU callbacks.
1262 */
1263static void invoke_rcu_callbacks_kthread(void)
1264{
1265 unsigned long flags;
1266
1267 local_irq_save(flags);
1268 __this_cpu_write(rcu_cpu_has_work, 1);
1269 if (__this_cpu_read(rcu_cpu_kthread_task) == NULL) {
1270 local_irq_restore(flags);
1271 return;
1272 }
1273 wake_up_process(__this_cpu_read(rcu_cpu_kthread_task));
1274 local_irq_restore(flags);
1275}
1276
1277/*
1252 * Set the affinity of the boost kthread. The CPU-hotplug locks are 1278 * Set the affinity of the boost kthread. The CPU-hotplug locks are
1253 * held, so no one should be messing with the existence of the boost 1279 * held, so no one should be messing with the existence of the boost
1254 * kthread. 1280 * kthread.
@@ -1288,6 +1314,7 @@ static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
1288 1314
1289 if (&rcu_preempt_state != rsp) 1315 if (&rcu_preempt_state != rsp)
1290 return 0; 1316 return 0;
1317 rsp->boost = 1;
1291 if (rnp->boost_kthread_task != NULL) 1318 if (rnp->boost_kthread_task != NULL)
1292 return 0; 1319 return 0;
1293 t = kthread_create(rcu_boost_kthread, (void *)rnp, 1320 t = kthread_create(rcu_boost_kthread, (void *)rnp,
@@ -1299,13 +1326,372 @@ static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
1299 raw_spin_unlock_irqrestore(&rnp->lock, flags); 1326 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1300 sp.sched_priority = RCU_KTHREAD_PRIO; 1327 sp.sched_priority = RCU_KTHREAD_PRIO;
1301 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); 1328 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1329 wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */
1302 return 0; 1330 return 0;
1303} 1331}
1304 1332
1305static void __cpuinit rcu_wake_one_boost_kthread(struct rcu_node *rnp) 1333#ifdef CONFIG_HOTPLUG_CPU
1334
1335/*
1336 * Stop the RCU's per-CPU kthread when its CPU goes offline,.
1337 */
1338static void rcu_stop_cpu_kthread(int cpu)
1306{ 1339{
1307 if (rnp->boost_kthread_task) 1340 struct task_struct *t;
1308 wake_up_process(rnp->boost_kthread_task); 1341
1342 /* Stop the CPU's kthread. */
1343 t = per_cpu(rcu_cpu_kthread_task, cpu);
1344 if (t != NULL) {
1345 per_cpu(rcu_cpu_kthread_task, cpu) = NULL;
1346 kthread_stop(t);
1347 }
1348}
1349
1350#endif /* #ifdef CONFIG_HOTPLUG_CPU */
1351
1352static void rcu_kthread_do_work(void)
1353{
1354 rcu_do_batch(&rcu_sched_state, &__get_cpu_var(rcu_sched_data));
1355 rcu_do_batch(&rcu_bh_state, &__get_cpu_var(rcu_bh_data));
1356 rcu_preempt_do_callbacks();
1357}
1358
1359/*
1360 * Wake up the specified per-rcu_node-structure kthread.
1361 * Because the per-rcu_node kthreads are immortal, we don't need
1362 * to do anything to keep them alive.
1363 */
1364static void invoke_rcu_node_kthread(struct rcu_node *rnp)
1365{
1366 struct task_struct *t;
1367
1368 t = rnp->node_kthread_task;
1369 if (t != NULL)
1370 wake_up_process(t);
1371}
1372
1373/*
1374 * Set the specified CPU's kthread to run RT or not, as specified by
1375 * the to_rt argument. The CPU-hotplug locks are held, so the task
1376 * is not going away.
1377 */
1378static void rcu_cpu_kthread_setrt(int cpu, int to_rt)
1379{
1380 int policy;
1381 struct sched_param sp;
1382 struct task_struct *t;
1383
1384 t = per_cpu(rcu_cpu_kthread_task, cpu);
1385 if (t == NULL)
1386 return;
1387 if (to_rt) {
1388 policy = SCHED_FIFO;
1389 sp.sched_priority = RCU_KTHREAD_PRIO;
1390 } else {
1391 policy = SCHED_NORMAL;
1392 sp.sched_priority = 0;
1393 }
1394 sched_setscheduler_nocheck(t, policy, &sp);
1395}
1396
1397/*
1398 * Timer handler to initiate the waking up of per-CPU kthreads that
1399 * have yielded the CPU due to excess numbers of RCU callbacks.
1400 * We wake up the per-rcu_node kthread, which in turn will wake up
1401 * the booster kthread.
1402 */
1403static void rcu_cpu_kthread_timer(unsigned long arg)
1404{
1405 struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, arg);
1406 struct rcu_node *rnp = rdp->mynode;
1407
1408 atomic_or(rdp->grpmask, &rnp->wakemask);
1409 invoke_rcu_node_kthread(rnp);
1410}
1411
1412/*
1413 * Drop to non-real-time priority and yield, but only after posting a
1414 * timer that will cause us to regain our real-time priority if we
1415 * remain preempted. Either way, we restore our real-time priority
1416 * before returning.
1417 */
1418static void rcu_yield(void (*f)(unsigned long), unsigned long arg)
1419{
1420 struct sched_param sp;
1421 struct timer_list yield_timer;
1422
1423 setup_timer_on_stack(&yield_timer, f, arg);
1424 mod_timer(&yield_timer, jiffies + 2);
1425 sp.sched_priority = 0;
1426 sched_setscheduler_nocheck(current, SCHED_NORMAL, &sp);
1427 set_user_nice(current, 19);
1428 schedule();
1429 sp.sched_priority = RCU_KTHREAD_PRIO;
1430 sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
1431 del_timer(&yield_timer);
1432}
1433
1434/*
1435 * Handle cases where the rcu_cpu_kthread() ends up on the wrong CPU.
1436 * This can happen while the corresponding CPU is either coming online
1437 * or going offline. We cannot wait until the CPU is fully online
1438 * before starting the kthread, because the various notifier functions
1439 * can wait for RCU grace periods. So we park rcu_cpu_kthread() until
1440 * the corresponding CPU is online.
1441 *
1442 * Return 1 if the kthread needs to stop, 0 otherwise.
1443 *
1444 * Caller must disable bh. This function can momentarily enable it.
1445 */
1446static int rcu_cpu_kthread_should_stop(int cpu)
1447{
1448 while (cpu_is_offline(cpu) ||
1449 !cpumask_equal(&current->cpus_allowed, cpumask_of(cpu)) ||
1450 smp_processor_id() != cpu) {
1451 if (kthread_should_stop())
1452 return 1;
1453 per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
1454 per_cpu(rcu_cpu_kthread_cpu, cpu) = raw_smp_processor_id();
1455 local_bh_enable();
1456 schedule_timeout_uninterruptible(1);
1457 if (!cpumask_equal(&current->cpus_allowed, cpumask_of(cpu)))
1458 set_cpus_allowed_ptr(current, cpumask_of(cpu));
1459 local_bh_disable();
1460 }
1461 per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu;
1462 return 0;
1463}
1464
1465/*
1466 * Per-CPU kernel thread that invokes RCU callbacks. This replaces the
1467 * earlier RCU softirq.
1468 */
1469static int rcu_cpu_kthread(void *arg)
1470{
1471 int cpu = (int)(long)arg;
1472 unsigned long flags;
1473 int spincnt = 0;
1474 unsigned int *statusp = &per_cpu(rcu_cpu_kthread_status, cpu);
1475 char work;
1476 char *workp = &per_cpu(rcu_cpu_has_work, cpu);
1477
1478 for (;;) {
1479 *statusp = RCU_KTHREAD_WAITING;
1480 rcu_wait(*workp != 0 || kthread_should_stop());
1481 local_bh_disable();
1482 if (rcu_cpu_kthread_should_stop(cpu)) {
1483 local_bh_enable();
1484 break;
1485 }
1486 *statusp = RCU_KTHREAD_RUNNING;
1487 per_cpu(rcu_cpu_kthread_loops, cpu)++;
1488 local_irq_save(flags);
1489 work = *workp;
1490 *workp = 0;
1491 local_irq_restore(flags);
1492 if (work)
1493 rcu_kthread_do_work();
1494 local_bh_enable();
1495 if (*workp != 0)
1496 spincnt++;
1497 else
1498 spincnt = 0;
1499 if (spincnt > 10) {
1500 *statusp = RCU_KTHREAD_YIELDING;
1501 rcu_yield(rcu_cpu_kthread_timer, (unsigned long)cpu);
1502 spincnt = 0;
1503 }
1504 }
1505 *statusp = RCU_KTHREAD_STOPPED;
1506 return 0;
1507}
1508
1509/*
1510 * Spawn a per-CPU kthread, setting up affinity and priority.
1511 * Because the CPU hotplug lock is held, no other CPU will be attempting
1512 * to manipulate rcu_cpu_kthread_task. There might be another CPU
1513 * attempting to access it during boot, but the locking in kthread_bind()
1514 * will enforce sufficient ordering.
1515 *
1516 * Please note that we cannot simply refuse to wake up the per-CPU
1517 * kthread because kthreads are created in TASK_UNINTERRUPTIBLE state,
1518 * which can result in softlockup complaints if the task ends up being
1519 * idle for more than a couple of minutes.
1520 *
1521 * However, please note also that we cannot bind the per-CPU kthread to its
1522 * CPU until that CPU is fully online. We also cannot wait until the
1523 * CPU is fully online before we create its per-CPU kthread, as this would
1524 * deadlock the system when CPU notifiers tried waiting for grace
1525 * periods. So we bind the per-CPU kthread to its CPU only if the CPU
1526 * is online. If its CPU is not yet fully online, then the code in
1527 * rcu_cpu_kthread() will wait until it is fully online, and then do
1528 * the binding.
1529 */
1530static int __cpuinit rcu_spawn_one_cpu_kthread(int cpu)
1531{
1532 struct sched_param sp;
1533 struct task_struct *t;
1534
1535 if (!rcu_kthreads_spawnable ||
1536 per_cpu(rcu_cpu_kthread_task, cpu) != NULL)
1537 return 0;
1538 t = kthread_create(rcu_cpu_kthread, (void *)(long)cpu, "rcuc%d", cpu);
1539 if (IS_ERR(t))
1540 return PTR_ERR(t);
1541 if (cpu_online(cpu))
1542 kthread_bind(t, cpu);
1543 per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu;
1544 WARN_ON_ONCE(per_cpu(rcu_cpu_kthread_task, cpu) != NULL);
1545 sp.sched_priority = RCU_KTHREAD_PRIO;
1546 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1547 per_cpu(rcu_cpu_kthread_task, cpu) = t;
1548 wake_up_process(t); /* Get to TASK_INTERRUPTIBLE quickly. */
1549 return 0;
1550}
1551
1552/*
1553 * Per-rcu_node kthread, which is in charge of waking up the per-CPU
1554 * kthreads when needed. We ignore requests to wake up kthreads
1555 * for offline CPUs, which is OK because force_quiescent_state()
1556 * takes care of this case.
1557 */
1558static int rcu_node_kthread(void *arg)
1559{
1560 int cpu;
1561 unsigned long flags;
1562 unsigned long mask;
1563 struct rcu_node *rnp = (struct rcu_node *)arg;
1564 struct sched_param sp;
1565 struct task_struct *t;
1566
1567 for (;;) {
1568 rnp->node_kthread_status = RCU_KTHREAD_WAITING;
1569 rcu_wait(atomic_read(&rnp->wakemask) != 0);
1570 rnp->node_kthread_status = RCU_KTHREAD_RUNNING;
1571 raw_spin_lock_irqsave(&rnp->lock, flags);
1572 mask = atomic_xchg(&rnp->wakemask, 0);
1573 rcu_initiate_boost(rnp, flags); /* releases rnp->lock. */
1574 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1) {
1575 if ((mask & 0x1) == 0)
1576 continue;
1577 preempt_disable();
1578 t = per_cpu(rcu_cpu_kthread_task, cpu);
1579 if (!cpu_online(cpu) || t == NULL) {
1580 preempt_enable();
1581 continue;
1582 }
1583 per_cpu(rcu_cpu_has_work, cpu) = 1;
1584 sp.sched_priority = RCU_KTHREAD_PRIO;
1585 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1586 preempt_enable();
1587 }
1588 }
1589 /* NOTREACHED */
1590 rnp->node_kthread_status = RCU_KTHREAD_STOPPED;
1591 return 0;
1592}
1593
1594/*
1595 * Set the per-rcu_node kthread's affinity to cover all CPUs that are
1596 * served by the rcu_node in question. The CPU hotplug lock is still
1597 * held, so the value of rnp->qsmaskinit will be stable.
1598 *
1599 * We don't include outgoingcpu in the affinity set, use -1 if there is
1600 * no outgoing CPU. If there are no CPUs left in the affinity set,
1601 * this function allows the kthread to execute on any CPU.
1602 */
1603static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
1604{
1605 cpumask_var_t cm;
1606 int cpu;
1607 unsigned long mask = rnp->qsmaskinit;
1608
1609 if (rnp->node_kthread_task == NULL)
1610 return;
1611 if (!alloc_cpumask_var(&cm, GFP_KERNEL))
1612 return;
1613 cpumask_clear(cm);
1614 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1)
1615 if ((mask & 0x1) && cpu != outgoingcpu)
1616 cpumask_set_cpu(cpu, cm);
1617 if (cpumask_weight(cm) == 0) {
1618 cpumask_setall(cm);
1619 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++)
1620 cpumask_clear_cpu(cpu, cm);
1621 WARN_ON_ONCE(cpumask_weight(cm) == 0);
1622 }
1623 set_cpus_allowed_ptr(rnp->node_kthread_task, cm);
1624 rcu_boost_kthread_setaffinity(rnp, cm);
1625 free_cpumask_var(cm);
1626}
1627
1628/*
1629 * Spawn a per-rcu_node kthread, setting priority and affinity.
1630 * Called during boot before online/offline can happen, or, if
1631 * during runtime, with the main CPU-hotplug locks held. So only
1632 * one of these can be executing at a time.
1633 */
1634static int __cpuinit rcu_spawn_one_node_kthread(struct rcu_state *rsp,
1635 struct rcu_node *rnp)
1636{
1637 unsigned long flags;
1638 int rnp_index = rnp - &rsp->node[0];
1639 struct sched_param sp;
1640 struct task_struct *t;
1641
1642 if (!rcu_kthreads_spawnable ||
1643 rnp->qsmaskinit == 0)
1644 return 0;
1645 if (rnp->node_kthread_task == NULL) {
1646 t = kthread_create(rcu_node_kthread, (void *)rnp,
1647 "rcun%d", rnp_index);
1648 if (IS_ERR(t))
1649 return PTR_ERR(t);
1650 raw_spin_lock_irqsave(&rnp->lock, flags);
1651 rnp->node_kthread_task = t;
1652 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1653 sp.sched_priority = 99;
1654 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1655 wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */
1656 }
1657 return rcu_spawn_one_boost_kthread(rsp, rnp, rnp_index);
1658}
1659
1660/*
1661 * Spawn all kthreads -- called as soon as the scheduler is running.
1662 */
1663static int __init rcu_spawn_kthreads(void)
1664{
1665 int cpu;
1666 struct rcu_node *rnp;
1667
1668 rcu_kthreads_spawnable = 1;
1669 for_each_possible_cpu(cpu) {
1670 per_cpu(rcu_cpu_has_work, cpu) = 0;
1671 if (cpu_online(cpu))
1672 (void)rcu_spawn_one_cpu_kthread(cpu);
1673 }
1674 rnp = rcu_get_root(rcu_state);
1675 (void)rcu_spawn_one_node_kthread(rcu_state, rnp);
1676 if (NUM_RCU_NODES > 1) {
1677 rcu_for_each_leaf_node(rcu_state, rnp)
1678 (void)rcu_spawn_one_node_kthread(rcu_state, rnp);
1679 }
1680 return 0;
1681}
1682early_initcall(rcu_spawn_kthreads);
1683
1684static void __cpuinit rcu_prepare_kthreads(int cpu)
1685{
1686 struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu);
1687 struct rcu_node *rnp = rdp->mynode;
1688
1689 /* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */
1690 if (rcu_kthreads_spawnable) {
1691 (void)rcu_spawn_one_cpu_kthread(cpu);
1692 if (rnp->node_kthread_task == NULL)
1693 (void)rcu_spawn_one_node_kthread(rcu_state, rnp);
1694 }
1309} 1695}
1310 1696
1311#else /* #ifdef CONFIG_RCU_BOOST */ 1697#else /* #ifdef CONFIG_RCU_BOOST */
@@ -1315,23 +1701,32 @@ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
1315 raw_spin_unlock_irqrestore(&rnp->lock, flags); 1701 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1316} 1702}
1317 1703
1318static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, 1704static void invoke_rcu_callbacks_kthread(void)
1319 cpumask_var_t cm)
1320{ 1705{
1706 WARN_ON_ONCE(1);
1321} 1707}
1322 1708
1323static void rcu_preempt_boost_start_gp(struct rcu_node *rnp) 1709static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
1324{ 1710{
1325} 1711}
1326 1712
1327static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp, 1713#ifdef CONFIG_HOTPLUG_CPU
1328 struct rcu_node *rnp, 1714
1329 int rnp_index) 1715static void rcu_stop_cpu_kthread(int cpu)
1716{
1717}
1718
1719#endif /* #ifdef CONFIG_HOTPLUG_CPU */
1720
1721static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
1722{
1723}
1724
1725static void rcu_cpu_kthread_setrt(int cpu, int to_rt)
1330{ 1726{
1331 return 0;
1332} 1727}
1333 1728
1334static void __cpuinit rcu_wake_one_boost_kthread(struct rcu_node *rnp) 1729static void __cpuinit rcu_prepare_kthreads(int cpu)
1335{ 1730{
1336} 1731}
1337 1732
@@ -1509,7 +1904,7 @@ static DEFINE_PER_CPU(unsigned long, rcu_dyntick_holdoff);
1509 * 1904 *
1510 * Because it is not legal to invoke rcu_process_callbacks() with irqs 1905 * Because it is not legal to invoke rcu_process_callbacks() with irqs
1511 * disabled, we do one pass of force_quiescent_state(), then do a 1906 * disabled, we do one pass of force_quiescent_state(), then do a
1512 * invoke_rcu_cpu_kthread() to cause rcu_process_callbacks() to be invoked 1907 * invoke_rcu_core() to cause rcu_process_callbacks() to be invoked
1513 * later. The per-cpu rcu_dyntick_drain variable controls the sequencing. 1908 * later. The per-cpu rcu_dyntick_drain variable controls the sequencing.
1514 */ 1909 */
1515int rcu_needs_cpu(int cpu) 1910int rcu_needs_cpu(int cpu)
@@ -1560,7 +1955,7 @@ int rcu_needs_cpu(int cpu)
1560 1955
1561 /* If RCU callbacks are still pending, RCU still needs this CPU. */ 1956 /* If RCU callbacks are still pending, RCU still needs this CPU. */
1562 if (c) 1957 if (c)
1563 invoke_rcu_cpu_kthread(); 1958 invoke_rcu_core();
1564 return c; 1959 return c;
1565} 1960}
1566 1961
diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
index 9678cc3650f5..4e144876dc68 100644
--- a/kernel/rcutree_trace.c
+++ b/kernel/rcutree_trace.c
@@ -46,6 +46,8 @@
46#define RCU_TREE_NONCORE 46#define RCU_TREE_NONCORE
47#include "rcutree.h" 47#include "rcutree.h"
48 48
49#ifdef CONFIG_RCU_BOOST
50
49DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_status); 51DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
50DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_cpu); 52DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_cpu);
51DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_loops); 53DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
@@ -58,6 +60,8 @@ static char convert_kthread_status(unsigned int kthread_status)
58 return "SRWOY"[kthread_status]; 60 return "SRWOY"[kthread_status];
59} 61}
60 62
63#endif /* #ifdef CONFIG_RCU_BOOST */
64
61static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp) 65static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
62{ 66{
63 if (!rdp->beenonline) 67 if (!rdp->beenonline)
@@ -76,7 +80,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
76 rdp->dynticks_fqs); 80 rdp->dynticks_fqs);
77#endif /* #ifdef CONFIG_NO_HZ */ 81#endif /* #ifdef CONFIG_NO_HZ */
78 seq_printf(m, " of=%lu ri=%lu", rdp->offline_fqs, rdp->resched_ipi); 82 seq_printf(m, " of=%lu ri=%lu", rdp->offline_fqs, rdp->resched_ipi);
79 seq_printf(m, " ql=%ld qs=%c%c%c%c kt=%d/%c/%d ktl=%x b=%ld", 83 seq_printf(m, " ql=%ld qs=%c%c%c%c",
80 rdp->qlen, 84 rdp->qlen,
81 ".N"[rdp->nxttail[RCU_NEXT_READY_TAIL] != 85 ".N"[rdp->nxttail[RCU_NEXT_READY_TAIL] !=
82 rdp->nxttail[RCU_NEXT_TAIL]], 86 rdp->nxttail[RCU_NEXT_TAIL]],
@@ -84,13 +88,16 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
84 rdp->nxttail[RCU_NEXT_READY_TAIL]], 88 rdp->nxttail[RCU_NEXT_READY_TAIL]],
85 ".W"[rdp->nxttail[RCU_DONE_TAIL] != 89 ".W"[rdp->nxttail[RCU_DONE_TAIL] !=
86 rdp->nxttail[RCU_WAIT_TAIL]], 90 rdp->nxttail[RCU_WAIT_TAIL]],
87 ".D"[&rdp->nxtlist != rdp->nxttail[RCU_DONE_TAIL]], 91 ".D"[&rdp->nxtlist != rdp->nxttail[RCU_DONE_TAIL]]);
92#ifdef CONFIG_RCU_BOOST
93 seq_printf(m, " kt=%d/%c/%d ktl=%x",
88 per_cpu(rcu_cpu_has_work, rdp->cpu), 94 per_cpu(rcu_cpu_has_work, rdp->cpu),
89 convert_kthread_status(per_cpu(rcu_cpu_kthread_status, 95 convert_kthread_status(per_cpu(rcu_cpu_kthread_status,
90 rdp->cpu)), 96 rdp->cpu)),
91 per_cpu(rcu_cpu_kthread_cpu, rdp->cpu), 97 per_cpu(rcu_cpu_kthread_cpu, rdp->cpu),
92 per_cpu(rcu_cpu_kthread_loops, rdp->cpu) & 0xffff, 98 per_cpu(rcu_cpu_kthread_loops, rdp->cpu) & 0xffff);
93 rdp->blimit); 99#endif /* #ifdef CONFIG_RCU_BOOST */
100 seq_printf(m, " b=%ld", rdp->blimit);
94 seq_printf(m, " ci=%lu co=%lu ca=%lu\n", 101 seq_printf(m, " ci=%lu co=%lu ca=%lu\n",
95 rdp->n_cbs_invoked, rdp->n_cbs_orphaned, rdp->n_cbs_adopted); 102 rdp->n_cbs_invoked, rdp->n_cbs_orphaned, rdp->n_cbs_adopted);
96} 103}
@@ -147,18 +154,21 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp)
147 rdp->dynticks_fqs); 154 rdp->dynticks_fqs);
148#endif /* #ifdef CONFIG_NO_HZ */ 155#endif /* #ifdef CONFIG_NO_HZ */
149 seq_printf(m, ",%lu,%lu", rdp->offline_fqs, rdp->resched_ipi); 156 seq_printf(m, ",%lu,%lu", rdp->offline_fqs, rdp->resched_ipi);
150 seq_printf(m, ",%ld,\"%c%c%c%c\",%d,\"%c\",%ld", rdp->qlen, 157 seq_printf(m, ",%ld,\"%c%c%c%c\"", rdp->qlen,
151 ".N"[rdp->nxttail[RCU_NEXT_READY_TAIL] != 158 ".N"[rdp->nxttail[RCU_NEXT_READY_TAIL] !=
152 rdp->nxttail[RCU_NEXT_TAIL]], 159 rdp->nxttail[RCU_NEXT_TAIL]],
153 ".R"[rdp->nxttail[RCU_WAIT_TAIL] != 160 ".R"[rdp->nxttail[RCU_WAIT_TAIL] !=
154 rdp->nxttail[RCU_NEXT_READY_TAIL]], 161 rdp->nxttail[RCU_NEXT_READY_TAIL]],
155 ".W"[rdp->nxttail[RCU_DONE_TAIL] != 162 ".W"[rdp->nxttail[RCU_DONE_TAIL] !=
156 rdp->nxttail[RCU_WAIT_TAIL]], 163 rdp->nxttail[RCU_WAIT_TAIL]],
157 ".D"[&rdp->nxtlist != rdp->nxttail[RCU_DONE_TAIL]], 164 ".D"[&rdp->nxtlist != rdp->nxttail[RCU_DONE_TAIL]]);
165#ifdef CONFIG_RCU_BOOST
166 seq_printf(m, ",%d,\"%c\"",
158 per_cpu(rcu_cpu_has_work, rdp->cpu), 167 per_cpu(rcu_cpu_has_work, rdp->cpu),
159 convert_kthread_status(per_cpu(rcu_cpu_kthread_status, 168 convert_kthread_status(per_cpu(rcu_cpu_kthread_status,
160 rdp->cpu)), 169 rdp->cpu)));
161 rdp->blimit); 170#endif /* #ifdef CONFIG_RCU_BOOST */
171 seq_printf(m, ",%ld", rdp->blimit);
162 seq_printf(m, ",%lu,%lu,%lu\n", 172 seq_printf(m, ",%lu,%lu,%lu\n",
163 rdp->n_cbs_invoked, rdp->n_cbs_orphaned, rdp->n_cbs_adopted); 173 rdp->n_cbs_invoked, rdp->n_cbs_orphaned, rdp->n_cbs_adopted);
164} 174}
@@ -169,7 +179,11 @@ static int show_rcudata_csv(struct seq_file *m, void *unused)
169#ifdef CONFIG_NO_HZ 179#ifdef CONFIG_NO_HZ
170 seq_puts(m, "\"dt\",\"dt nesting\",\"dt NMI nesting\",\"df\","); 180 seq_puts(m, "\"dt\",\"dt nesting\",\"dt NMI nesting\",\"df\",");
171#endif /* #ifdef CONFIG_NO_HZ */ 181#endif /* #ifdef CONFIG_NO_HZ */
172 seq_puts(m, "\"of\",\"ri\",\"ql\",\"b\",\"ci\",\"co\",\"ca\"\n"); 182 seq_puts(m, "\"of\",\"ri\",\"ql\",\"qs\"");
183#ifdef CONFIG_RCU_BOOST
184 seq_puts(m, "\"kt\",\"ktl\"");
185#endif /* #ifdef CONFIG_RCU_BOOST */
186 seq_puts(m, ",\"b\",\"ci\",\"co\",\"ca\"\n");
173#ifdef CONFIG_TREE_PREEMPT_RCU 187#ifdef CONFIG_TREE_PREEMPT_RCU
174 seq_puts(m, "\"rcu_preempt:\"\n"); 188 seq_puts(m, "\"rcu_preempt:\"\n");
175 PRINT_RCU_DATA(rcu_preempt_data, print_one_rcu_data_csv, m); 189 PRINT_RCU_DATA(rcu_preempt_data, print_one_rcu_data_csv, m);
diff --git a/kernel/smp.c b/kernel/smp.c
index 73a195193558..fb67dfa8394e 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -74,7 +74,7 @@ static struct notifier_block __cpuinitdata hotplug_cfd_notifier = {
74 .notifier_call = hotplug_cfd, 74 .notifier_call = hotplug_cfd,
75}; 75};
76 76
77static int __cpuinit init_call_single_data(void) 77void __init call_function_init(void)
78{ 78{
79 void *cpu = (void *)(long)smp_processor_id(); 79 void *cpu = (void *)(long)smp_processor_id();
80 int i; 80 int i;
@@ -88,10 +88,7 @@ static int __cpuinit init_call_single_data(void)
88 88
89 hotplug_cfd(&hotplug_cfd_notifier, CPU_UP_PREPARE, cpu); 89 hotplug_cfd(&hotplug_cfd_notifier, CPU_UP_PREPARE, cpu);
90 register_cpu_notifier(&hotplug_cfd_notifier); 90 register_cpu_notifier(&hotplug_cfd_notifier);
91
92 return 0;
93} 91}
94early_initcall(init_call_single_data);
95 92
96/* 93/*
97 * csd_lock/csd_unlock used to serialize access to per-cpu csd resources 94 * csd_lock/csd_unlock used to serialize access to per-cpu csd resources
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 13960170cad4..40cf63ddd4b3 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -58,7 +58,7 @@ DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
58 58
59char *softirq_to_name[NR_SOFTIRQS] = { 59char *softirq_to_name[NR_SOFTIRQS] = {
60 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL", 60 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
61 "TASKLET", "SCHED", "HRTIMER" 61 "TASKLET", "SCHED", "HRTIMER", "RCU"
62}; 62};
63 63
64/* 64/*
diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
index 2d966244ea60..59f369f98a04 100644
--- a/kernel/time/alarmtimer.c
+++ b/kernel/time/alarmtimer.c
@@ -42,15 +42,75 @@ static struct alarm_base {
42 clockid_t base_clockid; 42 clockid_t base_clockid;
43} alarm_bases[ALARM_NUMTYPE]; 43} alarm_bases[ALARM_NUMTYPE];
44 44
45/* freezer delta & lock used to handle clock_nanosleep triggered wakeups */
46static ktime_t freezer_delta;
47static DEFINE_SPINLOCK(freezer_delta_lock);
48
45#ifdef CONFIG_RTC_CLASS 49#ifdef CONFIG_RTC_CLASS
46/* rtc timer and device for setting alarm wakeups at suspend */ 50/* rtc timer and device for setting alarm wakeups at suspend */
47static struct rtc_timer rtctimer; 51static struct rtc_timer rtctimer;
48static struct rtc_device *rtcdev; 52static struct rtc_device *rtcdev;
49#endif 53static DEFINE_SPINLOCK(rtcdev_lock);
50 54
51/* freezer delta & lock used to handle clock_nanosleep triggered wakeups */ 55/**
52static ktime_t freezer_delta; 56 * has_wakealarm - check rtc device has wakealarm ability
53static DEFINE_SPINLOCK(freezer_delta_lock); 57 * @dev: current device
58 * @name_ptr: name to be returned
59 *
60 * This helper function checks to see if the rtc device can wake
61 * from suspend.
62 */
63static int has_wakealarm(struct device *dev, void *name_ptr)
64{
65 struct rtc_device *candidate = to_rtc_device(dev);
66
67 if (!candidate->ops->set_alarm)
68 return 0;
69 if (!device_may_wakeup(candidate->dev.parent))
70 return 0;
71
72 *(const char **)name_ptr = dev_name(dev);
73 return 1;
74}
75
76/**
77 * alarmtimer_get_rtcdev - Return selected rtcdevice
78 *
79 * This function returns the rtc device to use for wakealarms.
80 * If one has not already been chosen, it checks to see if a
81 * functional rtc device is available.
82 */
83static struct rtc_device *alarmtimer_get_rtcdev(void)
84{
85 struct device *dev;
86 char *str;
87 unsigned long flags;
88 struct rtc_device *ret;
89
90 spin_lock_irqsave(&rtcdev_lock, flags);
91 if (!rtcdev) {
92 /* Find an rtc device and init the rtc_timer */
93 dev = class_find_device(rtc_class, NULL, &str, has_wakealarm);
94 /* If we have a device then str is valid. See has_wakealarm() */
95 if (dev) {
96 rtcdev = rtc_class_open(str);
97 /*
98 * Drop the reference we got in class_find_device,
99 * rtc_open takes its own.
100 */
101 put_device(dev);
102 rtc_timer_init(&rtctimer, NULL, NULL);
103 }
104 }
105 ret = rtcdev;
106 spin_unlock_irqrestore(&rtcdev_lock, flags);
107
108 return ret;
109}
110#else
111#define alarmtimer_get_rtcdev() (0)
112#define rtcdev (0)
113#endif
54 114
55 115
56/** 116/**
@@ -166,6 +226,7 @@ static int alarmtimer_suspend(struct device *dev)
166 struct rtc_time tm; 226 struct rtc_time tm;
167 ktime_t min, now; 227 ktime_t min, now;
168 unsigned long flags; 228 unsigned long flags;
229 struct rtc_device *rtc;
169 int i; 230 int i;
170 231
171 spin_lock_irqsave(&freezer_delta_lock, flags); 232 spin_lock_irqsave(&freezer_delta_lock, flags);
@@ -173,8 +234,9 @@ static int alarmtimer_suspend(struct device *dev)
173 freezer_delta = ktime_set(0, 0); 234 freezer_delta = ktime_set(0, 0);
174 spin_unlock_irqrestore(&freezer_delta_lock, flags); 235 spin_unlock_irqrestore(&freezer_delta_lock, flags);
175 236
237 rtc = rtcdev;
176 /* If we have no rtcdev, just return */ 238 /* If we have no rtcdev, just return */
177 if (!rtcdev) 239 if (!rtc)
178 return 0; 240 return 0;
179 241
180 /* Find the soonest timer to expire*/ 242 /* Find the soonest timer to expire*/
@@ -199,12 +261,12 @@ static int alarmtimer_suspend(struct device *dev)
199 WARN_ON(min.tv64 < NSEC_PER_SEC); 261 WARN_ON(min.tv64 < NSEC_PER_SEC);
200 262
201 /* Setup an rtc timer to fire that far in the future */ 263 /* Setup an rtc timer to fire that far in the future */
202 rtc_timer_cancel(rtcdev, &rtctimer); 264 rtc_timer_cancel(rtc, &rtctimer);
203 rtc_read_time(rtcdev, &tm); 265 rtc_read_time(rtc, &tm);
204 now = rtc_tm_to_ktime(tm); 266 now = rtc_tm_to_ktime(tm);
205 now = ktime_add(now, min); 267 now = ktime_add(now, min);
206 268
207 rtc_timer_start(rtcdev, &rtctimer, now, ktime_set(0, 0)); 269 rtc_timer_start(rtc, &rtctimer, now, ktime_set(0, 0));
208 270
209 return 0; 271 return 0;
210} 272}
@@ -322,6 +384,9 @@ static int alarm_clock_getres(const clockid_t which_clock, struct timespec *tp)
322{ 384{
323 clockid_t baseid = alarm_bases[clock2alarm(which_clock)].base_clockid; 385 clockid_t baseid = alarm_bases[clock2alarm(which_clock)].base_clockid;
324 386
387 if (!alarmtimer_get_rtcdev())
388 return -ENOTSUPP;
389
325 return hrtimer_get_res(baseid, tp); 390 return hrtimer_get_res(baseid, tp);
326} 391}
327 392
@@ -336,6 +401,9 @@ static int alarm_clock_get(clockid_t which_clock, struct timespec *tp)
336{ 401{
337 struct alarm_base *base = &alarm_bases[clock2alarm(which_clock)]; 402 struct alarm_base *base = &alarm_bases[clock2alarm(which_clock)];
338 403
404 if (!alarmtimer_get_rtcdev())
405 return -ENOTSUPP;
406
339 *tp = ktime_to_timespec(base->gettime()); 407 *tp = ktime_to_timespec(base->gettime());
340 return 0; 408 return 0;
341} 409}
@@ -351,6 +419,9 @@ static int alarm_timer_create(struct k_itimer *new_timer)
351 enum alarmtimer_type type; 419 enum alarmtimer_type type;
352 struct alarm_base *base; 420 struct alarm_base *base;
353 421
422 if (!alarmtimer_get_rtcdev())
423 return -ENOTSUPP;
424
354 if (!capable(CAP_WAKE_ALARM)) 425 if (!capable(CAP_WAKE_ALARM))
355 return -EPERM; 426 return -EPERM;
356 427
@@ -385,6 +456,9 @@ static void alarm_timer_get(struct k_itimer *timr,
385 */ 456 */
386static int alarm_timer_del(struct k_itimer *timr) 457static int alarm_timer_del(struct k_itimer *timr)
387{ 458{
459 if (!rtcdev)
460 return -ENOTSUPP;
461
388 alarm_cancel(&timr->it.alarmtimer); 462 alarm_cancel(&timr->it.alarmtimer);
389 return 0; 463 return 0;
390} 464}
@@ -402,6 +476,9 @@ static int alarm_timer_set(struct k_itimer *timr, int flags,
402 struct itimerspec *new_setting, 476 struct itimerspec *new_setting,
403 struct itimerspec *old_setting) 477 struct itimerspec *old_setting)
404{ 478{
479 if (!rtcdev)
480 return -ENOTSUPP;
481
405 /* Save old values */ 482 /* Save old values */
406 old_setting->it_interval = 483 old_setting->it_interval =
407 ktime_to_timespec(timr->it.alarmtimer.period); 484 ktime_to_timespec(timr->it.alarmtimer.period);
@@ -541,6 +618,9 @@ static int alarm_timer_nsleep(const clockid_t which_clock, int flags,
541 int ret = 0; 618 int ret = 0;
542 struct restart_block *restart; 619 struct restart_block *restart;
543 620
621 if (!alarmtimer_get_rtcdev())
622 return -ENOTSUPP;
623
544 if (!capable(CAP_WAKE_ALARM)) 624 if (!capable(CAP_WAKE_ALARM))
545 return -EPERM; 625 return -EPERM;
546 626
@@ -638,65 +718,3 @@ static int __init alarmtimer_init(void)
638} 718}
639device_initcall(alarmtimer_init); 719device_initcall(alarmtimer_init);
640 720
641#ifdef CONFIG_RTC_CLASS
642/**
643 * has_wakealarm - check rtc device has wakealarm ability
644 * @dev: current device
645 * @name_ptr: name to be returned
646 *
647 * This helper function checks to see if the rtc device can wake
648 * from suspend.
649 */
650static int __init has_wakealarm(struct device *dev, void *name_ptr)
651{
652 struct rtc_device *candidate = to_rtc_device(dev);
653
654 if (!candidate->ops->set_alarm)
655 return 0;
656 if (!device_may_wakeup(candidate->dev.parent))
657 return 0;
658
659 *(const char **)name_ptr = dev_name(dev);
660 return 1;
661}
662
663/**
664 * alarmtimer_init_late - Late initializing of alarmtimer code
665 *
666 * This function locates a rtc device to use for wakealarms.
667 * Run as late_initcall to make sure rtc devices have been
668 * registered.
669 */
670static int __init alarmtimer_init_late(void)
671{
672 struct device *dev;
673 char *str;
674
675 /* Find an rtc device and init the rtc_timer */
676 dev = class_find_device(rtc_class, NULL, &str, has_wakealarm);
677 /* If we have a device then str is valid. See has_wakealarm() */
678 if (dev) {
679 rtcdev = rtc_class_open(str);
680 /*
681 * Drop the reference we got in class_find_device,
682 * rtc_open takes its own.
683 */
684 put_device(dev);
685 }
686 if (!rtcdev) {
687 printk(KERN_WARNING "No RTC device found, ALARM timers will"
688 " not wake from suspend");
689 }
690 rtc_timer_init(&rtctimer, NULL, NULL);
691
692 return 0;
693}
694#else
695static int __init alarmtimer_init_late(void)
696{
697 printk(KERN_WARNING "Kernel not built with RTC support, ALARM timers"
698 " will not wake from suspend");
699 return 0;
700}
701#endif
702late_initcall(alarmtimer_init_late);
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index 1c95fd677328..e0980f0d9a0a 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -185,7 +185,6 @@ static struct clocksource *watchdog;
185static struct timer_list watchdog_timer; 185static struct timer_list watchdog_timer;
186static DECLARE_WORK(watchdog_work, clocksource_watchdog_work); 186static DECLARE_WORK(watchdog_work, clocksource_watchdog_work);
187static DEFINE_SPINLOCK(watchdog_lock); 187static DEFINE_SPINLOCK(watchdog_lock);
188static cycle_t watchdog_last;
189static int watchdog_running; 188static int watchdog_running;
190 189
191static int clocksource_watchdog_kthread(void *data); 190static int clocksource_watchdog_kthread(void *data);
@@ -254,11 +253,6 @@ static void clocksource_watchdog(unsigned long data)
254 if (!watchdog_running) 253 if (!watchdog_running)
255 goto out; 254 goto out;
256 255
257 wdnow = watchdog->read(watchdog);
258 wd_nsec = clocksource_cyc2ns((wdnow - watchdog_last) & watchdog->mask,
259 watchdog->mult, watchdog->shift);
260 watchdog_last = wdnow;
261
262 list_for_each_entry(cs, &watchdog_list, wd_list) { 256 list_for_each_entry(cs, &watchdog_list, wd_list) {
263 257
264 /* Clocksource already marked unstable? */ 258 /* Clocksource already marked unstable? */
@@ -268,19 +262,28 @@ static void clocksource_watchdog(unsigned long data)
268 continue; 262 continue;
269 } 263 }
270 264
265 local_irq_disable();
271 csnow = cs->read(cs); 266 csnow = cs->read(cs);
267 wdnow = watchdog->read(watchdog);
268 local_irq_enable();
272 269
273 /* Clocksource initialized ? */ 270 /* Clocksource initialized ? */
274 if (!(cs->flags & CLOCK_SOURCE_WATCHDOG)) { 271 if (!(cs->flags & CLOCK_SOURCE_WATCHDOG)) {
275 cs->flags |= CLOCK_SOURCE_WATCHDOG; 272 cs->flags |= CLOCK_SOURCE_WATCHDOG;
276 cs->wd_last = csnow; 273 cs->wd_last = wdnow;
274 cs->cs_last = csnow;
277 continue; 275 continue;
278 } 276 }
279 277
280 /* Check the deviation from the watchdog clocksource. */ 278 wd_nsec = clocksource_cyc2ns((wdnow - cs->wd_last) & watchdog->mask,
281 cs_nsec = clocksource_cyc2ns((csnow - cs->wd_last) & 279 watchdog->mult, watchdog->shift);
280
281 cs_nsec = clocksource_cyc2ns((csnow - cs->cs_last) &
282 cs->mask, cs->mult, cs->shift); 282 cs->mask, cs->mult, cs->shift);
283 cs->wd_last = csnow; 283 cs->cs_last = csnow;
284 cs->wd_last = wdnow;
285
286 /* Check the deviation from the watchdog clocksource. */
284 if (abs(cs_nsec - wd_nsec) > WATCHDOG_THRESHOLD) { 287 if (abs(cs_nsec - wd_nsec) > WATCHDOG_THRESHOLD) {
285 clocksource_unstable(cs, cs_nsec - wd_nsec); 288 clocksource_unstable(cs, cs_nsec - wd_nsec);
286 continue; 289 continue;
@@ -318,7 +321,6 @@ static inline void clocksource_start_watchdog(void)
318 return; 321 return;
319 init_timer(&watchdog_timer); 322 init_timer(&watchdog_timer);
320 watchdog_timer.function = clocksource_watchdog; 323 watchdog_timer.function = clocksource_watchdog;
321 watchdog_last = watchdog->read(watchdog);
322 watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL; 324 watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL;
323 add_timer_on(&watchdog_timer, cpumask_first(cpu_online_mask)); 325 add_timer_on(&watchdog_timer, cpumask_first(cpu_online_mask));
324 watchdog_running = 1; 326 watchdog_running = 1;
diff --git a/kernel/trace/trace_printk.c b/kernel/trace/trace_printk.c
index dff763b7baf1..1f06468a10d7 100644
--- a/kernel/trace/trace_printk.c
+++ b/kernel/trace/trace_printk.c
@@ -240,13 +240,10 @@ static const char **find_next(void *v, loff_t *pos)
240 const char **fmt = v; 240 const char **fmt = v;
241 int start_index; 241 int start_index;
242 242
243 if (!fmt)
244 fmt = __start___trace_bprintk_fmt + *pos;
245
246 start_index = __stop___trace_bprintk_fmt - __start___trace_bprintk_fmt; 243 start_index = __stop___trace_bprintk_fmt - __start___trace_bprintk_fmt;
247 244
248 if (*pos < start_index) 245 if (*pos < start_index)
249 return fmt; 246 return __start___trace_bprintk_fmt + *pos;
250 247
251 return find_next_mod_format(start_index, v, fmt, pos); 248 return find_next_mod_format(start_index, v, fmt, pos);
252} 249}
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 02159c755136..c46887b5a11e 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -498,7 +498,9 @@ static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start)
498 * The node we allocated has no zone fallback lists. For avoiding 498 * The node we allocated has no zone fallback lists. For avoiding
499 * to access not-initialized zonelist, build here. 499 * to access not-initialized zonelist, build here.
500 */ 500 */
501 mutex_lock(&zonelists_mutex);
501 build_all_zonelists(NULL); 502 build_all_zonelists(NULL);
503 mutex_unlock(&zonelists_mutex);
502 504
503 return pgdat; 505 return pgdat;
504} 506}
@@ -521,7 +523,7 @@ int mem_online_node(int nid)
521 523
522 lock_memory_hotplug(); 524 lock_memory_hotplug();
523 pgdat = hotadd_new_pgdat(nid, 0); 525 pgdat = hotadd_new_pgdat(nid, 0);
524 if (pgdat) { 526 if (!pgdat) {
525 ret = -ENOMEM; 527 ret = -ENOMEM;
526 goto out; 528 goto out;
527 } 529 }
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index c7a581a96894..917ecb93ea28 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -205,7 +205,7 @@ int register_vlan_dev(struct net_device *dev)
205 grp->nr_vlans++; 205 grp->nr_vlans++;
206 206
207 if (ngrp) { 207 if (ngrp) {
208 if (ops->ndo_vlan_rx_register) 208 if (ops->ndo_vlan_rx_register && (real_dev->features & NETIF_F_HW_VLAN_RX))
209 ops->ndo_vlan_rx_register(real_dev, ngrp); 209 ops->ndo_vlan_rx_register(real_dev, ngrp);
210 rcu_assign_pointer(real_dev->vlgrp, ngrp); 210 rcu_assign_pointer(real_dev->vlgrp, ngrp);
211 } 211 }
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index f13ddbf858ba..77930aa522e3 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -477,14 +477,16 @@ static void hci_setup_event_mask(struct hci_dev *hdev)
477 * command otherwise */ 477 * command otherwise */
478 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 }; 478 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
479 479
480 /* Events for 1.2 and newer controllers */ 480 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
481 if (hdev->lmp_ver > 1) { 481 * any event mask for pre 1.2 devices */
482 events[4] |= 0x01; /* Flow Specification Complete */ 482 if (hdev->lmp_ver <= 1)
483 events[4] |= 0x02; /* Inquiry Result with RSSI */ 483 return;
484 events[4] |= 0x04; /* Read Remote Extended Features Complete */ 484
485 events[5] |= 0x08; /* Synchronous Connection Complete */ 485 events[4] |= 0x01; /* Flow Specification Complete */
486 events[5] |= 0x10; /* Synchronous Connection Changed */ 486 events[4] |= 0x02; /* Inquiry Result with RSSI */
487 } 487 events[4] |= 0x04; /* Read Remote Extended Features Complete */
488 events[5] |= 0x08; /* Synchronous Connection Complete */
489 events[5] |= 0x10; /* Synchronous Connection Changed */
488 490
489 if (hdev->features[3] & LMP_RSSI_INQ) 491 if (hdev->features[3] & LMP_RSSI_INQ)
490 events[4] |= 0x04; /* Inquiry Result with RSSI */ 492 events[4] |= 0x04; /* Inquiry Result with RSSI */
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index 18dc9888d8c2..8248303f44e8 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -413,6 +413,7 @@ static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __us
413 break; 413 break;
414 } 414 }
415 415
416 memset(&cinfo, 0, sizeof(cinfo));
416 cinfo.hci_handle = chan->conn->hcon->handle; 417 cinfo.hci_handle = chan->conn->hcon->handle;
417 memcpy(cinfo.dev_class, chan->conn->hcon->dev_class, 3); 418 memcpy(cinfo.dev_class, chan->conn->hcon->dev_class, 3);
418 419
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index 386cfaffd4b7..1b10727ce523 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -788,6 +788,7 @@ static int rfcomm_sock_getsockopt_old(struct socket *sock, int optname, char __u
788 788
789 l2cap_sk = rfcomm_pi(sk)->dlc->session->sock->sk; 789 l2cap_sk = rfcomm_pi(sk)->dlc->session->sock->sk;
790 790
791 memset(&cinfo, 0, sizeof(cinfo));
791 cinfo.hci_handle = conn->hcon->handle; 792 cinfo.hci_handle = conn->hcon->handle;
792 memcpy(cinfo.dev_class, conn->hcon->dev_class, 3); 793 memcpy(cinfo.dev_class, conn->hcon->dev_class, 3);
793 794
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index 42fdffd1d76c..cb4fb7837e5c 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -369,6 +369,15 @@ static void __sco_sock_close(struct sock *sk)
369 369
370 case BT_CONNECTED: 370 case BT_CONNECTED:
371 case BT_CONFIG: 371 case BT_CONFIG:
372 if (sco_pi(sk)->conn) {
373 sk->sk_state = BT_DISCONN;
374 sco_sock_set_timer(sk, SCO_DISCONN_TIMEOUT);
375 hci_conn_put(sco_pi(sk)->conn->hcon);
376 sco_pi(sk)->conn->hcon = NULL;
377 } else
378 sco_chan_del(sk, ECONNRESET);
379 break;
380
372 case BT_CONNECT: 381 case BT_CONNECT:
373 case BT_DISCONN: 382 case BT_DISCONN:
374 sco_chan_del(sk, ECONNRESET); 383 sco_chan_del(sk, ECONNRESET);
@@ -819,7 +828,9 @@ static void sco_chan_del(struct sock *sk, int err)
819 conn->sk = NULL; 828 conn->sk = NULL;
820 sco_pi(sk)->conn = NULL; 829 sco_pi(sk)->conn = NULL;
821 sco_conn_unlock(conn); 830 sco_conn_unlock(conn);
822 hci_conn_put(conn->hcon); 831
832 if (conn->hcon)
833 hci_conn_put(conn->hcon);
823 } 834 }
824 835
825 sk->sk_state = BT_CLOSED; 836 sk->sk_state = BT_CLOSED;
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index a6b2f86378c7..c188c803c09c 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -243,6 +243,7 @@ int br_netpoll_enable(struct net_bridge_port *p)
243 goto out; 243 goto out;
244 244
245 np->dev = p->dev; 245 np->dev = p->dev;
246 strlcpy(np->dev_name, p->dev->name, IFNAMSIZ);
246 247
247 err = __netpoll_setup(np); 248 err = __netpoll_setup(np);
248 if (err) { 249 if (err) {
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 2f14eafdeeab..29b9812c8da0 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -1424,7 +1424,7 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br,
1424 switch (ih->type) { 1424 switch (ih->type) {
1425 case IGMP_HOST_MEMBERSHIP_REPORT: 1425 case IGMP_HOST_MEMBERSHIP_REPORT:
1426 case IGMPV2_HOST_MEMBERSHIP_REPORT: 1426 case IGMPV2_HOST_MEMBERSHIP_REPORT:
1427 BR_INPUT_SKB_CB(skb2)->mrouters_only = 1; 1427 BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
1428 err = br_ip4_multicast_add_group(br, port, ih->group); 1428 err = br_ip4_multicast_add_group(br, port, ih->group);
1429 break; 1429 break;
1430 case IGMPV3_HOST_MEMBERSHIP_REPORT: 1430 case IGMPV3_HOST_MEMBERSHIP_REPORT:
@@ -1543,7 +1543,7 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
1543 goto out; 1543 goto out;
1544 } 1544 }
1545 mld = (struct mld_msg *)skb_transport_header(skb2); 1545 mld = (struct mld_msg *)skb_transport_header(skb2);
1546 BR_INPUT_SKB_CB(skb2)->mrouters_only = 1; 1546 BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
1547 err = br_ip6_multicast_add_group(br, port, &mld->mld_mca); 1547 err = br_ip6_multicast_add_group(br, port, &mld->mld_mca);
1548 break; 1548 break;
1549 } 1549 }
diff --git a/net/caif/cfmuxl.c b/net/caif/cfmuxl.c
index 3a66b8c10e09..c23979e79dfa 100644
--- a/net/caif/cfmuxl.c
+++ b/net/caif/cfmuxl.c
@@ -255,7 +255,7 @@ static void cfmuxl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
255 255
256 if (cfsrvl_phyid_match(layer, phyid) && layer->ctrlcmd) { 256 if (cfsrvl_phyid_match(layer, phyid) && layer->ctrlcmd) {
257 257
258 if ((ctrl == _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND || 258 if ((ctrl == _CAIF_CTRLCMD_PHYIF_DOWN_IND ||
259 ctrl == CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND) && 259 ctrl == CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND) &&
260 layer->id != 0) { 260 layer->id != 0) {
261 261
diff --git a/net/ieee802154/nl-phy.c b/net/ieee802154/nl-phy.c
index ed0eab39f531..02548b292b53 100644
--- a/net/ieee802154/nl-phy.c
+++ b/net/ieee802154/nl-phy.c
@@ -44,7 +44,7 @@ static int ieee802154_nl_fill_phy(struct sk_buff *msg, u32 pid,
44 pr_debug("%s\n", __func__); 44 pr_debug("%s\n", __func__);
45 45
46 if (!buf) 46 if (!buf)
47 goto out; 47 return -EMSGSIZE;
48 48
49 hdr = genlmsg_put(msg, 0, seq, &nl802154_family, flags, 49 hdr = genlmsg_put(msg, 0, seq, &nl802154_family, flags,
50 IEEE802154_LIST_PHY); 50 IEEE802154_LIST_PHY);
@@ -65,6 +65,7 @@ static int ieee802154_nl_fill_phy(struct sk_buff *msg, u32 pid,
65 pages * sizeof(uint32_t), buf); 65 pages * sizeof(uint32_t), buf);
66 66
67 mutex_unlock(&phy->pib_lock); 67 mutex_unlock(&phy->pib_lock);
68 kfree(buf);
68 return genlmsg_end(msg, hdr); 69 return genlmsg_end(msg, hdr);
69 70
70nla_put_failure: 71nla_put_failure:
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 9c1926027a26..eae1f676f870 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -676,6 +676,7 @@ int inet_accept(struct socket *sock, struct socket *newsock, int flags)
676 676
677 lock_sock(sk2); 677 lock_sock(sk2);
678 678
679 sock_rps_record_flow(sk2);
679 WARN_ON(!((1 << sk2->sk_state) & 680 WARN_ON(!((1 << sk2->sk_state) &
680 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_CLOSE))); 681 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_CLOSE)));
681 682
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 6ffe94ca5bc9..3267d3898437 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -437,7 +437,7 @@ static int valid_cc(const void *bc, int len, int cc)
437 return 0; 437 return 0;
438 if (cc == len) 438 if (cc == len)
439 return 1; 439 return 1;
440 if (op->yes < 4) 440 if (op->yes < 4 || op->yes & 3)
441 return 0; 441 return 0;
442 len -= op->yes; 442 len -= op->yes;
443 bc += op->yes; 443 bc += op->yes;
@@ -447,11 +447,11 @@ static int valid_cc(const void *bc, int len, int cc)
447 447
448static int inet_diag_bc_audit(const void *bytecode, int bytecode_len) 448static int inet_diag_bc_audit(const void *bytecode, int bytecode_len)
449{ 449{
450 const unsigned char *bc = bytecode; 450 const void *bc = bytecode;
451 int len = bytecode_len; 451 int len = bytecode_len;
452 452
453 while (len > 0) { 453 while (len > 0) {
454 struct inet_diag_bc_op *op = (struct inet_diag_bc_op *)bc; 454 const struct inet_diag_bc_op *op = bc;
455 455
456//printk("BC: %d %d %d {%d} / %d\n", op->code, op->yes, op->no, op[1].no, len); 456//printk("BC: %d %d %d {%d} / %d\n", op->code, op->yes, op->no, op[1].no, len);
457 switch (op->code) { 457 switch (op->code) {
@@ -462,22 +462,20 @@ static int inet_diag_bc_audit(const void *bytecode, int bytecode_len)
462 case INET_DIAG_BC_S_LE: 462 case INET_DIAG_BC_S_LE:
463 case INET_DIAG_BC_D_GE: 463 case INET_DIAG_BC_D_GE:
464 case INET_DIAG_BC_D_LE: 464 case INET_DIAG_BC_D_LE:
465 if (op->yes < 4 || op->yes > len + 4)
466 return -EINVAL;
467 case INET_DIAG_BC_JMP: 465 case INET_DIAG_BC_JMP:
468 if (op->no < 4 || op->no > len + 4) 466 if (op->no < 4 || op->no > len + 4 || op->no & 3)
469 return -EINVAL; 467 return -EINVAL;
470 if (op->no < len && 468 if (op->no < len &&
471 !valid_cc(bytecode, bytecode_len, len - op->no)) 469 !valid_cc(bytecode, bytecode_len, len - op->no))
472 return -EINVAL; 470 return -EINVAL;
473 break; 471 break;
474 case INET_DIAG_BC_NOP: 472 case INET_DIAG_BC_NOP:
475 if (op->yes < 4 || op->yes > len + 4)
476 return -EINVAL;
477 break; 473 break;
478 default: 474 default:
479 return -EINVAL; 475 return -EINVAL;
480 } 476 }
477 if (op->yes < 4 || op->yes > len + 4 || op->yes & 3)
478 return -EINVAL;
481 bc += op->yes; 479 bc += op->yes;
482 len -= op->yes; 480 len -= op->yes;
483 } 481 }
diff --git a/net/ipv4/netfilter/ip_queue.c b/net/ipv4/netfilter/ip_queue.c
index f7f9bd7ba12d..5c9b9d963918 100644
--- a/net/ipv4/netfilter/ip_queue.c
+++ b/net/ipv4/netfilter/ip_queue.c
@@ -203,7 +203,8 @@ ipq_build_packet_message(struct nf_queue_entry *entry, int *errp)
203 else 203 else
204 pmsg->outdev_name[0] = '\0'; 204 pmsg->outdev_name[0] = '\0';
205 205
206 if (entry->indev && entry->skb->dev) { 206 if (entry->indev && entry->skb->dev &&
207 entry->skb->mac_header != entry->skb->network_header) {
207 pmsg->hw_type = entry->skb->dev->type; 208 pmsg->hw_type = entry->skb->dev->type;
208 pmsg->hw_addrlen = dev_parse_header(entry->skb, 209 pmsg->hw_addrlen = dev_parse_header(entry->skb,
209 pmsg->hw_addr); 210 pmsg->hw_addr);
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index 764743843503..24e556e83a3b 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -566,7 +566,7 @@ check_entry(const struct ipt_entry *e, const char *name)
566 const struct xt_entry_target *t; 566 const struct xt_entry_target *t;
567 567
568 if (!ip_checkentry(&e->ip)) { 568 if (!ip_checkentry(&e->ip)) {
569 duprintf("ip check failed %p %s.\n", e, par->match->name); 569 duprintf("ip check failed %p %s.\n", e, name);
570 return -EINVAL; 570 return -EINVAL;
571 } 571 }
572 572
diff --git a/net/ipv4/netfilter/ipt_ecn.c b/net/ipv4/netfilter/ipt_ecn.c
index af6e9c778345..2b57e52c746c 100644
--- a/net/ipv4/netfilter/ipt_ecn.c
+++ b/net/ipv4/netfilter/ipt_ecn.c
@@ -25,7 +25,8 @@ MODULE_LICENSE("GPL");
25static inline bool match_ip(const struct sk_buff *skb, 25static inline bool match_ip(const struct sk_buff *skb,
26 const struct ipt_ecn_info *einfo) 26 const struct ipt_ecn_info *einfo)
27{ 27{
28 return (ip_hdr(skb)->tos & IPT_ECN_IP_MASK) == einfo->ip_ect; 28 return ((ip_hdr(skb)->tos & IPT_ECN_IP_MASK) == einfo->ip_ect) ^
29 !!(einfo->invert & IPT_ECN_OP_MATCH_IP);
29} 30}
30 31
31static inline bool match_tcp(const struct sk_buff *skb, 32static inline bool match_tcp(const struct sk_buff *skb,
@@ -76,8 +77,6 @@ static bool ecn_mt(const struct sk_buff *skb, struct xt_action_param *par)
76 return false; 77 return false;
77 78
78 if (info->operation & (IPT_ECN_OP_MATCH_ECE|IPT_ECN_OP_MATCH_CWR)) { 79 if (info->operation & (IPT_ECN_OP_MATCH_ECE|IPT_ECN_OP_MATCH_CWR)) {
79 if (ip_hdr(skb)->protocol != IPPROTO_TCP)
80 return false;
81 if (!match_tcp(skb, info, &par->hotdrop)) 80 if (!match_tcp(skb, info, &par->hotdrop))
82 return false; 81 return false;
83 } 82 }
@@ -97,7 +96,7 @@ static int ecn_mt_check(const struct xt_mtchk_param *par)
97 return -EINVAL; 96 return -EINVAL;
98 97
99 if (info->operation & (IPT_ECN_OP_MATCH_ECE|IPT_ECN_OP_MATCH_CWR) && 98 if (info->operation & (IPT_ECN_OP_MATCH_ECE|IPT_ECN_OP_MATCH_CWR) &&
100 ip->proto != IPPROTO_TCP) { 99 (ip->proto != IPPROTO_TCP || ip->invflags & IPT_INV_PROTO)) {
101 pr_info("cannot match TCP bits in rule for non-tcp packets\n"); 100 pr_info("cannot match TCP bits in rule for non-tcp packets\n");
102 return -EINVAL; 101 return -EINVAL;
103 } 102 }
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
index db10075dd88e..de9da21113a1 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
@@ -121,7 +121,9 @@ static unsigned int ipv4_confirm(unsigned int hooknum,
121 return ret; 121 return ret;
122 } 122 }
123 123
124 if (test_bit(IPS_SEQ_ADJUST_BIT, &ct->status)) { 124 /* adjust seqs for loopback traffic only in outgoing direction */
125 if (test_bit(IPS_SEQ_ADJUST_BIT, &ct->status) &&
126 !nf_is_loopback_packet(skb)) {
125 typeof(nf_nat_seq_adjust_hook) seq_adjust; 127 typeof(nf_nat_seq_adjust_hook) seq_adjust;
126 128
127 seq_adjust = rcu_dereference(nf_nat_seq_adjust_hook); 129 seq_adjust = rcu_dereference(nf_nat_seq_adjust_hook);
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index 9aaa67165f42..39b403f854c6 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -41,7 +41,6 @@
41#include <linux/proc_fs.h> 41#include <linux/proc_fs.h>
42#include <net/sock.h> 42#include <net/sock.h>
43#include <net/ping.h> 43#include <net/ping.h>
44#include <net/icmp.h>
45#include <net/udp.h> 44#include <net/udp.h>
46#include <net/route.h> 45#include <net/route.h>
47#include <net/inet_common.h> 46#include <net/inet_common.h>
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 045f0ec6a4a0..aa13ef105110 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1902,9 +1902,7 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1902 1902
1903 hash = rt_hash(daddr, saddr, dev->ifindex, rt_genid(dev_net(dev))); 1903 hash = rt_hash(daddr, saddr, dev->ifindex, rt_genid(dev_net(dev)));
1904 rth = rt_intern_hash(hash, rth, skb, dev->ifindex); 1904 rth = rt_intern_hash(hash, rth, skb, dev->ifindex);
1905 err = 0; 1905 return IS_ERR(rth) ? PTR_ERR(rth) : 0;
1906 if (IS_ERR(rth))
1907 err = PTR_ERR(rth);
1908 1906
1909e_nobufs: 1907e_nobufs:
1910 return -ENOBUFS; 1908 return -ENOBUFS;
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index a7d6671e33b8..708dc203b034 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1589,6 +1589,7 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1589 goto discard; 1589 goto discard;
1590 1590
1591 if (nsk != sk) { 1591 if (nsk != sk) {
1592 sock_rps_save_rxhash(nsk, skb->rxhash);
1592 if (tcp_child_process(sk, nsk, skb)) { 1593 if (tcp_child_process(sk, nsk, skb)) {
1593 rsk = nsk; 1594 rsk = nsk;
1594 goto reset; 1595 goto reset;
diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c
index 065fe405fb58..249394863284 100644
--- a/net/ipv6/netfilter/ip6_queue.c
+++ b/net/ipv6/netfilter/ip6_queue.c
@@ -204,7 +204,8 @@ ipq_build_packet_message(struct nf_queue_entry *entry, int *errp)
204 else 204 else
205 pmsg->outdev_name[0] = '\0'; 205 pmsg->outdev_name[0] = '\0';
206 206
207 if (entry->indev && entry->skb->dev) { 207 if (entry->indev && entry->skb->dev &&
208 entry->skb->mac_header != entry->skb->network_header) {
208 pmsg->hw_type = entry->skb->dev->type; 209 pmsg->hw_type = entry->skb->dev->type;
209 pmsg->hw_addrlen = dev_parse_header(entry->skb, pmsg->hw_addr); 210 pmsg->hw_addrlen = dev_parse_header(entry->skb, pmsg->hw_addr);
210 } 211 }
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index d1fd28711ba5..87551ca568cd 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1644,6 +1644,7 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1644 * the new socket.. 1644 * the new socket..
1645 */ 1645 */
1646 if(nsk != sk) { 1646 if(nsk != sk) {
1647 sock_rps_save_rxhash(nsk, skb->rxhash);
1647 if (tcp_child_process(sk, nsk, skb)) 1648 if (tcp_child_process(sk, nsk, skb))
1648 goto reset; 1649 goto reset;
1649 if (opt_skb) 1650 if (opt_skb)
diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
index bf28ac2fc99b..782db275ac53 100644
--- a/net/netfilter/ipvs/ip_vs_conn.c
+++ b/net/netfilter/ipvs/ip_vs_conn.c
@@ -776,8 +776,16 @@ static void ip_vs_conn_expire(unsigned long data)
776 if (cp->control) 776 if (cp->control)
777 ip_vs_control_del(cp); 777 ip_vs_control_del(cp);
778 778
779 if (cp->flags & IP_VS_CONN_F_NFCT) 779 if (cp->flags & IP_VS_CONN_F_NFCT) {
780 ip_vs_conn_drop_conntrack(cp); 780 ip_vs_conn_drop_conntrack(cp);
781 /* Do not access conntracks during subsys cleanup
782 * because nf_conntrack_find_get can not be used after
783 * conntrack cleanup for the net.
784 */
785 smp_rmb();
786 if (ipvs->enable)
787 ip_vs_conn_drop_conntrack(cp);
788 }
781 789
782 ip_vs_pe_put(cp->pe); 790 ip_vs_pe_put(cp->pe);
783 kfree(cp->pe_data); 791 kfree(cp->pe_data);
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index 55af2242bccd..24c28d238dcb 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -1945,6 +1945,7 @@ static void __net_exit __ip_vs_dev_cleanup(struct net *net)
1945{ 1945{
1946 EnterFunction(2); 1946 EnterFunction(2);
1947 net_ipvs(net)->enable = 0; /* Disable packet reception */ 1947 net_ipvs(net)->enable = 0; /* Disable packet reception */
1948 smp_wmb();
1948 __ip_vs_sync_cleanup(net); 1949 __ip_vs_sync_cleanup(net);
1949 LeaveFunction(2); 1950 LeaveFunction(2);
1950} 1951}
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index e0ee010935e7..2e7ccbb43ddb 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -456,7 +456,8 @@ __build_packet_message(struct nfulnl_instance *inst,
456 if (skb->mark) 456 if (skb->mark)
457 NLA_PUT_BE32(inst->skb, NFULA_MARK, htonl(skb->mark)); 457 NLA_PUT_BE32(inst->skb, NFULA_MARK, htonl(skb->mark));
458 458
459 if (indev && skb->dev) { 459 if (indev && skb->dev &&
460 skb->mac_header != skb->network_header) {
460 struct nfulnl_msg_packet_hw phw; 461 struct nfulnl_msg_packet_hw phw;
461 int len = dev_parse_header(skb, phw.hw_addr); 462 int len = dev_parse_header(skb, phw.hw_addr);
462 if (len > 0) { 463 if (len > 0) {
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index b83123f12b42..fdd2fafe0a14 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -335,7 +335,8 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
335 if (entskb->mark) 335 if (entskb->mark)
336 NLA_PUT_BE32(skb, NFQA_MARK, htonl(entskb->mark)); 336 NLA_PUT_BE32(skb, NFQA_MARK, htonl(entskb->mark));
337 337
338 if (indev && entskb->dev) { 338 if (indev && entskb->dev &&
339 entskb->mac_header != entskb->network_header) {
339 struct nfqnl_msg_packet_hw phw; 340 struct nfqnl_msg_packet_hw phw;
340 int len = dev_parse_header(entskb, phw.hw_addr); 341 int len = dev_parse_header(entskb, phw.hw_addr);
341 if (len) { 342 if (len) {
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index 339ba64cce1e..5daf6cc4faea 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -577,13 +577,13 @@ retry:
577 } 577 }
578 inode = &gss_msg->inode->vfs_inode; 578 inode = &gss_msg->inode->vfs_inode;
579 for (;;) { 579 for (;;) {
580 prepare_to_wait(&gss_msg->waitqueue, &wait, TASK_INTERRUPTIBLE); 580 prepare_to_wait(&gss_msg->waitqueue, &wait, TASK_KILLABLE);
581 spin_lock(&inode->i_lock); 581 spin_lock(&inode->i_lock);
582 if (gss_msg->ctx != NULL || gss_msg->msg.errno < 0) { 582 if (gss_msg->ctx != NULL || gss_msg->msg.errno < 0) {
583 break; 583 break;
584 } 584 }
585 spin_unlock(&inode->i_lock); 585 spin_unlock(&inode->i_lock);
586 if (signalled()) { 586 if (fatal_signal_pending(current)) {
587 err = -ERESTARTSYS; 587 err = -ERESTARTSYS;
588 goto out_intr; 588 goto out_intr;
589 } 589 }
diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c
index 0a9a2ec2e469..c3b75333b821 100644
--- a/net/sunrpc/auth_gss/gss_krb5_mech.c
+++ b/net/sunrpc/auth_gss/gss_krb5_mech.c
@@ -43,6 +43,7 @@
43#include <linux/sunrpc/gss_krb5.h> 43#include <linux/sunrpc/gss_krb5.h>
44#include <linux/sunrpc/xdr.h> 44#include <linux/sunrpc/xdr.h>
45#include <linux/crypto.h> 45#include <linux/crypto.h>
46#include <linux/sunrpc/gss_krb5_enctypes.h>
46 47
47#ifdef RPC_DEBUG 48#ifdef RPC_DEBUG
48# define RPCDBG_FACILITY RPCDBG_AUTH 49# define RPCDBG_FACILITY RPCDBG_AUTH
@@ -750,7 +751,7 @@ static struct gss_api_mech gss_kerberos_mech = {
750 .gm_ops = &gss_kerberos_ops, 751 .gm_ops = &gss_kerberos_ops,
751 .gm_pf_num = ARRAY_SIZE(gss_kerberos_pfs), 752 .gm_pf_num = ARRAY_SIZE(gss_kerberos_pfs),
752 .gm_pfs = gss_kerberos_pfs, 753 .gm_pfs = gss_kerberos_pfs,
753 .gm_upcall_enctypes = "18,17,16,23,3,1,2", 754 .gm_upcall_enctypes = KRB5_SUPPORTED_ENCTYPES,
754}; 755};
755 756
756static int __init init_kerberos_module(void) 757static int __init init_kerberos_module(void)
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index b84d7395535e..8c9141583d6f 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -1061,7 +1061,7 @@ call_allocate(struct rpc_task *task)
1061 1061
1062 dprintk("RPC: %5u rpc_buffer allocation failed\n", task->tk_pid); 1062 dprintk("RPC: %5u rpc_buffer allocation failed\n", task->tk_pid);
1063 1063
1064 if (RPC_IS_ASYNC(task) || !signalled()) { 1064 if (RPC_IS_ASYNC(task) || !fatal_signal_pending(current)) {
1065 task->tk_action = call_allocate; 1065 task->tk_action = call_allocate;
1066 rpc_delay(task, HZ>>4); 1066 rpc_delay(task, HZ>>4);
1067 return; 1067 return;
@@ -1175,6 +1175,9 @@ call_bind_status(struct rpc_task *task)
1175 status = -EOPNOTSUPP; 1175 status = -EOPNOTSUPP;
1176 break; 1176 break;
1177 } 1177 }
1178 if (task->tk_rebind_retry == 0)
1179 break;
1180 task->tk_rebind_retry--;
1178 rpc_delay(task, 3*HZ); 1181 rpc_delay(task, 3*HZ);
1179 goto retry_timeout; 1182 goto retry_timeout;
1180 case -ETIMEDOUT: 1183 case -ETIMEDOUT:
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index 6b43ee7221d5..a27406b1654f 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -792,6 +792,7 @@ static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *ta
792 /* Initialize retry counters */ 792 /* Initialize retry counters */
793 task->tk_garb_retry = 2; 793 task->tk_garb_retry = 2;
794 task->tk_cred_retry = 2; 794 task->tk_cred_retry = 2;
795 task->tk_rebind_retry = 2;
795 796
796 task->tk_priority = task_setup_data->priority - RPC_PRIORITY_LOW; 797 task->tk_priority = task_setup_data->priority - RPC_PRIORITY_LOW;
797 task->tk_owner = current->tgid; 798 task->tk_owner = current->tgid;
diff --git a/security/device_cgroup.c b/security/device_cgroup.c
index cd1f779fa51d..1be68269e1c2 100644
--- a/security/device_cgroup.c
+++ b/security/device_cgroup.c
@@ -474,17 +474,11 @@ struct cgroup_subsys devices_subsys = {
474 .subsys_id = devices_subsys_id, 474 .subsys_id = devices_subsys_id,
475}; 475};
476 476
477int devcgroup_inode_permission(struct inode *inode, int mask) 477int __devcgroup_inode_permission(struct inode *inode, int mask)
478{ 478{
479 struct dev_cgroup *dev_cgroup; 479 struct dev_cgroup *dev_cgroup;
480 struct dev_whitelist_item *wh; 480 struct dev_whitelist_item *wh;
481 481
482 dev_t device = inode->i_rdev;
483 if (!device)
484 return 0;
485 if (!S_ISBLK(inode->i_mode) && !S_ISCHR(inode->i_mode))
486 return 0;
487
488 rcu_read_lock(); 482 rcu_read_lock();
489 483
490 dev_cgroup = task_devcgroup(current); 484 dev_cgroup = task_devcgroup(current);
diff --git a/security/keys/request_key.c b/security/keys/request_key.c
index 8e319a416eec..82465328c39b 100644
--- a/security/keys/request_key.c
+++ b/security/keys/request_key.c
@@ -469,7 +469,7 @@ static struct key *construct_key_and_link(struct key_type *type,
469 } else if (ret == -EINPROGRESS) { 469 } else if (ret == -EINPROGRESS) {
470 ret = 0; 470 ret = 0;
471 } else { 471 } else {
472 key = ERR_PTR(ret); 472 goto couldnt_alloc_key;
473 } 473 }
474 474
475 key_put(dest_keyring); 475 key_put(dest_keyring);
@@ -479,6 +479,7 @@ static struct key *construct_key_and_link(struct key_type *type,
479construction_failed: 479construction_failed:
480 key_negate_and_link(key, key_negative_timeout, NULL, NULL); 480 key_negate_and_link(key, key_negative_timeout, NULL, NULL);
481 key_put(key); 481 key_put(key);
482couldnt_alloc_key:
482 key_put(dest_keyring); 483 key_put(dest_keyring);
483 kleave(" = %d", ret); 484 kleave(" = %d", ret);
484 return ERR_PTR(ret); 485 return ERR_PTR(ret);
diff --git a/sound/pci/asihpi/asihpi.c b/sound/pci/asihpi/asihpi.c
index 2ca6f4f85b41..e3569bdd3b64 100644
--- a/sound/pci/asihpi/asihpi.c
+++ b/sound/pci/asihpi/asihpi.c
@@ -27,7 +27,6 @@
27#include "hpioctl.h" 27#include "hpioctl.h"
28 28
29#include <linux/pci.h> 29#include <linux/pci.h>
30#include <linux/version.h>
31#include <linux/init.h> 30#include <linux/init.h>
32#include <linux/jiffies.h> 31#include <linux/jiffies.h>
33#include <linux/slab.h> 32#include <linux/slab.h>
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 61a774b3d3cb..d21191dcfe88 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -4883,7 +4883,6 @@ static const struct snd_pci_quirk alc880_cfg_tbl[] = {
4883 SND_PCI_QUIRK(0x1025, 0xe309, "ULI", ALC880_3ST_DIG), 4883 SND_PCI_QUIRK(0x1025, 0xe309, "ULI", ALC880_3ST_DIG),
4884 SND_PCI_QUIRK(0x1025, 0xe310, "ULI", ALC880_3ST), 4884 SND_PCI_QUIRK(0x1025, 0xe310, "ULI", ALC880_3ST),
4885 SND_PCI_QUIRK(0x1039, 0x1234, NULL, ALC880_6ST_DIG), 4885 SND_PCI_QUIRK(0x1039, 0x1234, NULL, ALC880_6ST_DIG),
4886 SND_PCI_QUIRK(0x103c, 0x2a09, "HP", ALC880_5ST),
4887 SND_PCI_QUIRK(0x1043, 0x10b3, "ASUS W1V", ALC880_ASUS_W1V), 4886 SND_PCI_QUIRK(0x1043, 0x10b3, "ASUS W1V", ALC880_ASUS_W1V),
4888 SND_PCI_QUIRK(0x1043, 0x10c2, "ASUS W6A", ALC880_ASUS_DIG), 4887 SND_PCI_QUIRK(0x1043, 0x10c2, "ASUS W6A", ALC880_ASUS_DIG),
4889 SND_PCI_QUIRK(0x1043, 0x10c3, "ASUS Wxx", ALC880_ASUS_DIG), 4888 SND_PCI_QUIRK(0x1043, 0x10c3, "ASUS Wxx", ALC880_ASUS_DIG),
@@ -12600,6 +12599,7 @@ static const struct hda_verb alc262_toshiba_rx1_unsol_verbs[] = {
12600 */ 12599 */
12601enum { 12600enum {
12602 PINFIX_FSC_H270, 12601 PINFIX_FSC_H270,
12602 PINFIX_HP_Z200,
12603}; 12603};
12604 12604
12605static const struct alc_fixup alc262_fixups[] = { 12605static const struct alc_fixup alc262_fixups[] = {
@@ -12612,9 +12612,17 @@ static const struct alc_fixup alc262_fixups[] = {
12612 { } 12612 { }
12613 } 12613 }
12614 }, 12614 },
12615 [PINFIX_HP_Z200] = {
12616 .type = ALC_FIXUP_PINS,
12617 .v.pins = (const struct alc_pincfg[]) {
12618 { 0x16, 0x99130120 }, /* internal speaker */
12619 { }
12620 }
12621 },
12615}; 12622};
12616 12623
12617static const struct snd_pci_quirk alc262_fixup_tbl[] = { 12624static const struct snd_pci_quirk alc262_fixup_tbl[] = {
12625 SND_PCI_QUIRK(0x103c, 0x170b, "HP Z200", PINFIX_HP_Z200),
12618 SND_PCI_QUIRK(0x1734, 0x1147, "FSC Celsius H270", PINFIX_FSC_H270), 12626 SND_PCI_QUIRK(0x1734, 0x1147, "FSC Celsius H270", PINFIX_FSC_H270),
12619 {} 12627 {}
12620}; 12628};
@@ -12731,6 +12739,8 @@ static const struct snd_pci_quirk alc262_cfg_tbl[] = {
12731 ALC262_HP_BPC), 12739 ALC262_HP_BPC),
12732 SND_PCI_QUIRK_MASK(0x103c, 0xff00, 0x1500, "HP z series", 12740 SND_PCI_QUIRK_MASK(0x103c, 0xff00, 0x1500, "HP z series",
12733 ALC262_HP_BPC), 12741 ALC262_HP_BPC),
12742 SND_PCI_QUIRK(0x103c, 0x170b, "HP Z200",
12743 ALC262_AUTO),
12734 SND_PCI_QUIRK_MASK(0x103c, 0xff00, 0x1700, "HP xw series", 12744 SND_PCI_QUIRK_MASK(0x103c, 0xff00, 0x1700, "HP xw series",
12735 ALC262_HP_BPC), 12745 ALC262_HP_BPC),
12736 SND_PCI_QUIRK(0x103c, 0x2800, "HP D7000", ALC262_HP_BPC_D7000_WL), 12746 SND_PCI_QUIRK(0x103c, 0x2800, "HP D7000", ALC262_HP_BPC_D7000_WL),
@@ -13872,7 +13882,6 @@ static const struct snd_pci_quirk alc268_cfg_tbl[] = {
13872 SND_PCI_QUIRK(0x1043, 0x1205, "ASUS W7J", ALC268_3ST), 13882 SND_PCI_QUIRK(0x1043, 0x1205, "ASUS W7J", ALC268_3ST),
13873 SND_PCI_QUIRK(0x1170, 0x0040, "ZEPTO", ALC268_ZEPTO), 13883 SND_PCI_QUIRK(0x1170, 0x0040, "ZEPTO", ALC268_ZEPTO),
13874 SND_PCI_QUIRK(0x14c0, 0x0025, "COMPAL IFL90/JFL-92", ALC268_TOSHIBA), 13884 SND_PCI_QUIRK(0x14c0, 0x0025, "COMPAL IFL90/JFL-92", ALC268_TOSHIBA),
13875 SND_PCI_QUIRK(0x152d, 0x0763, "Diverse (CPR2000)", ALC268_ACER),
13876 SND_PCI_QUIRK(0x152d, 0x0771, "Quanta IL1", ALC267_QUANTA_IL1), 13885 SND_PCI_QUIRK(0x152d, 0x0771, "Quanta IL1", ALC267_QUANTA_IL1),
13877 {} 13886 {}
13878}; 13887};
diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c
index c952582fb218..f43bb0eaed8b 100644
--- a/sound/pci/hda/patch_via.c
+++ b/sound/pci/hda/patch_via.c
@@ -745,12 +745,23 @@ static int via_independent_hp_put(struct snd_kcontrol *kcontrol,
745 struct via_spec *spec = codec->spec; 745 struct via_spec *spec = codec->spec;
746 hda_nid_t nid = kcontrol->private_value; 746 hda_nid_t nid = kcontrol->private_value;
747 unsigned int pinsel = ucontrol->value.enumerated.item[0]; 747 unsigned int pinsel = ucontrol->value.enumerated.item[0];
748 unsigned int parm0, parm1;
748 /* Get Independent Mode index of headphone pin widget */ 749 /* Get Independent Mode index of headphone pin widget */
749 spec->hp_independent_mode = spec->hp_independent_mode_index == pinsel 750 spec->hp_independent_mode = spec->hp_independent_mode_index == pinsel
750 ? 1 : 0; 751 ? 1 : 0;
751 if (spec->codec_type == VT1718S) 752 if (spec->codec_type == VT1718S) {
752 snd_hda_codec_write(codec, nid, 0, 753 snd_hda_codec_write(codec, nid, 0,
753 AC_VERB_SET_CONNECT_SEL, pinsel ? 2 : 0); 754 AC_VERB_SET_CONNECT_SEL, pinsel ? 2 : 0);
755 /* Set correct mute switch for MW3 */
756 parm0 = spec->hp_independent_mode ?
757 AMP_IN_UNMUTE(0) : AMP_IN_MUTE(0);
758 parm1 = spec->hp_independent_mode ?
759 AMP_IN_MUTE(1) : AMP_IN_UNMUTE(1);
760 snd_hda_codec_write(codec, 0x1b, 0,
761 AC_VERB_SET_AMP_GAIN_MUTE, parm0);
762 snd_hda_codec_write(codec, 0x1b, 0,
763 AC_VERB_SET_AMP_GAIN_MUTE, parm1);
764 }
754 else 765 else
755 snd_hda_codec_write(codec, nid, 0, 766 snd_hda_codec_write(codec, nid, 0,
756 AC_VERB_SET_CONNECT_SEL, pinsel); 767 AC_VERB_SET_CONNECT_SEL, pinsel);
@@ -4283,9 +4294,6 @@ static const struct hda_verb vt1718S_volume_init_verbs[] = {
4283 {0x21, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(2)}, 4294 {0x21, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(2)},
4284 {0x21, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)}, 4295 {0x21, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)},
4285 {0x21, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(5)}, 4296 {0x21, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(5)},
4286
4287 /* Setup default input of Front HP to MW9 */
4288 {0x28, AC_VERB_SET_CONNECT_SEL, 0x1},
4289 /* PW9 PW10 Output enable */ 4297 /* PW9 PW10 Output enable */
4290 {0x2d, AC_VERB_SET_PIN_WIDGET_CONTROL, AC_PINCTL_OUT_EN}, 4298 {0x2d, AC_VERB_SET_PIN_WIDGET_CONTROL, AC_PINCTL_OUT_EN},
4291 {0x2e, AC_VERB_SET_PIN_WIDGET_CONTROL, AC_PINCTL_OUT_EN}, 4299 {0x2e, AC_VERB_SET_PIN_WIDGET_CONTROL, AC_PINCTL_OUT_EN},
@@ -4294,10 +4302,10 @@ static const struct hda_verb vt1718S_volume_init_verbs[] = {
4294 /* Enable Boost Volume backdoor */ 4302 /* Enable Boost Volume backdoor */
4295 {0x1, 0xf88, 0x8}, 4303 {0x1, 0xf88, 0x8},
4296 /* MW0/1/2/3/4: un-mute index 0 (AOWx), mute index 1 (MW9) */ 4304 /* MW0/1/2/3/4: un-mute index 0 (AOWx), mute index 1 (MW9) */
4297 {0x18, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)}, 4305 {0x18, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
4298 {0x19, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)}, 4306 {0x19, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
4299 {0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)}, 4307 {0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
4300 {0x1b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)}, 4308 {0x1b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
4301 {0x1c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)}, 4309 {0x1c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
4302 {0x18, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1)}, 4310 {0x18, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1)},
4303 {0x19, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)}, 4311 {0x19, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
@@ -4307,8 +4315,6 @@ static const struct hda_verb vt1718S_volume_init_verbs[] = {
4307 /* set MUX1 = 2 (AOW4), MUX2 = 1 (AOW3) */ 4315 /* set MUX1 = 2 (AOW4), MUX2 = 1 (AOW3) */
4308 {0x34, AC_VERB_SET_CONNECT_SEL, 0x2}, 4316 {0x34, AC_VERB_SET_CONNECT_SEL, 0x2},
4309 {0x35, AC_VERB_SET_CONNECT_SEL, 0x1}, 4317 {0x35, AC_VERB_SET_CONNECT_SEL, 0x1},
4310 /* Unmute MW4's index 0 */
4311 {0x1c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
4312 { } 4318 { }
4313}; 4319};
4314 4320
@@ -4456,6 +4462,19 @@ static int vt1718S_auto_create_multi_out_ctls(struct via_spec *spec,
4456 if (err < 0) 4462 if (err < 0)
4457 return err; 4463 return err;
4458 } else if (i == AUTO_SEQ_FRONT) { 4464 } else if (i == AUTO_SEQ_FRONT) {
4465 /* add control to mixer index 0 */
4466 err = via_add_control(spec, VIA_CTL_WIDGET_VOL,
4467 "Master Front Playback Volume",
4468 HDA_COMPOSE_AMP_VAL(0x21, 3, 5,
4469 HDA_INPUT));
4470 if (err < 0)
4471 return err;
4472 err = via_add_control(spec, VIA_CTL_WIDGET_MUTE,
4473 "Master Front Playback Switch",
4474 HDA_COMPOSE_AMP_VAL(0x21, 3, 5,
4475 HDA_INPUT));
4476 if (err < 0)
4477 return err;
4459 /* Front */ 4478 /* Front */
4460 sprintf(name, "%s Playback Volume", chname[i]); 4479 sprintf(name, "%s Playback Volume", chname[i]);
4461 err = via_add_control( 4480 err = via_add_control(
diff --git a/sound/soc/codecs/wm8991.c b/sound/soc/codecs/wm8991.c
index 3c2ee1bb73cd..6af23d06870f 100644
--- a/sound/soc/codecs/wm8991.c
+++ b/sound/soc/codecs/wm8991.c
@@ -13,7 +13,6 @@
13 13
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/moduleparam.h> 15#include <linux/moduleparam.h>
16#include <linux/version.h>
17#include <linux/kernel.h> 16#include <linux/kernel.h>
18#include <linux/init.h> 17#include <linux/init.h>
19#include <linux/delay.h> 18#include <linux/delay.h>
diff --git a/sound/soc/imx/Kconfig b/sound/soc/imx/Kconfig
index d8f130d39dd9..bb699bb55a50 100644
--- a/sound/soc/imx/Kconfig
+++ b/sound/soc/imx/Kconfig
@@ -11,9 +11,6 @@ menuconfig SND_IMX_SOC
11 11
12if SND_IMX_SOC 12if SND_IMX_SOC
13 13
14config SND_MXC_SOC_SSI
15 tristate
16
17config SND_MXC_SOC_FIQ 14config SND_MXC_SOC_FIQ
18 tristate 15 tristate
19 16
@@ -24,7 +21,6 @@ config SND_MXC_SOC_WM1133_EV1
24 tristate "Audio on the the i.MX31ADS with WM1133-EV1 fitted" 21 tristate "Audio on the the i.MX31ADS with WM1133-EV1 fitted"
25 depends on MACH_MX31ADS_WM1133_EV1 && EXPERIMENTAL 22 depends on MACH_MX31ADS_WM1133_EV1 && EXPERIMENTAL
26 select SND_SOC_WM8350 23 select SND_SOC_WM8350
27 select SND_MXC_SOC_SSI
28 select SND_MXC_SOC_FIQ 24 select SND_MXC_SOC_FIQ
29 help 25 help
30 Enable support for audio on the i.MX31ADS with the WM1133-EV1 26 Enable support for audio on the i.MX31ADS with the WM1133-EV1
@@ -34,7 +30,6 @@ config SND_SOC_MX27VIS_AIC32X4
34 tristate "SoC audio support for Visstrim M10 boards" 30 tristate "SoC audio support for Visstrim M10 boards"
35 depends on MACH_IMX27_VISSTRIM_M10 31 depends on MACH_IMX27_VISSTRIM_M10
36 select SND_SOC_TVL320AIC32X4 32 select SND_SOC_TVL320AIC32X4
37 select SND_MXC_SOC_SSI
38 select SND_MXC_SOC_MX2 33 select SND_MXC_SOC_MX2
39 help 34 help
40 Say Y if you want to add support for SoC audio on Visstrim SM10 35 Say Y if you want to add support for SoC audio on Visstrim SM10
@@ -44,7 +39,6 @@ config SND_SOC_PHYCORE_AC97
44 tristate "SoC Audio support for Phytec phyCORE (and phyCARD) boards" 39 tristate "SoC Audio support for Phytec phyCORE (and phyCARD) boards"
45 depends on MACH_PCM043 || MACH_PCA100 40 depends on MACH_PCM043 || MACH_PCA100
46 select SND_SOC_WM9712 41 select SND_SOC_WM9712
47 select SND_MXC_SOC_SSI
48 select SND_MXC_SOC_FIQ 42 select SND_MXC_SOC_FIQ
49 help 43 help
50 Say Y if you want to add support for SoC audio on Phytec phyCORE 44 Say Y if you want to add support for SoC audio on Phytec phyCORE
@@ -57,7 +51,6 @@ config SND_SOC_EUKREA_TLV320
57 || MACH_EUKREA_MBIMXSD35_BASEBOARD \ 51 || MACH_EUKREA_MBIMXSD35_BASEBOARD \
58 || MACH_EUKREA_MBIMXSD51_BASEBOARD 52 || MACH_EUKREA_MBIMXSD51_BASEBOARD
59 select SND_SOC_TLV320AIC23 53 select SND_SOC_TLV320AIC23
60 select SND_MXC_SOC_SSI
61 select SND_MXC_SOC_FIQ 54 select SND_MXC_SOC_FIQ
62 help 55 help
63 Enable I2S based access to the TLV320AIC23B codec attached 56 Enable I2S based access to the TLV320AIC23B codec attached
diff --git a/sound/soc/imx/imx-pcm-dma-mx2.c b/sound/soc/imx/imx-pcm-dma-mx2.c
index aab7765f401a..4173b3d87f97 100644
--- a/sound/soc/imx/imx-pcm-dma-mx2.c
+++ b/sound/soc/imx/imx-pcm-dma-mx2.c
@@ -337,3 +337,5 @@ static void __exit snd_imx_pcm_exit(void)
337 platform_driver_unregister(&imx_pcm_driver); 337 platform_driver_unregister(&imx_pcm_driver);
338} 338}
339module_exit(snd_imx_pcm_exit); 339module_exit(snd_imx_pcm_exit);
340MODULE_LICENSE("GPL");
341MODULE_ALIAS("platform:imx-pcm-audio");
diff --git a/sound/soc/imx/imx-ssi.c b/sound/soc/imx/imx-ssi.c
index 5b13feca7537..61fceb09cdb5 100644
--- a/sound/soc/imx/imx-ssi.c
+++ b/sound/soc/imx/imx-ssi.c
@@ -774,4 +774,4 @@ module_exit(imx_ssi_exit);
774MODULE_AUTHOR("Sascha Hauer, <s.hauer@pengutronix.de>"); 774MODULE_AUTHOR("Sascha Hauer, <s.hauer@pengutronix.de>");
775MODULE_DESCRIPTION("i.MX I2S/ac97 SoC Interface"); 775MODULE_DESCRIPTION("i.MX I2S/ac97 SoC Interface");
776MODULE_LICENSE("GPL"); 776MODULE_LICENSE("GPL");
777 777MODULE_ALIAS("platform:imx-ssi");
diff --git a/sound/soc/pxa/pxa2xx-pcm.c b/sound/soc/pxa/pxa2xx-pcm.c
index 2ce0b2d891d5..fab20a54e863 100644
--- a/sound/soc/pxa/pxa2xx-pcm.c
+++ b/sound/soc/pxa/pxa2xx-pcm.c
@@ -95,14 +95,14 @@ static int pxa2xx_soc_pcm_new(struct snd_card *card, struct snd_soc_dai *dai,
95 if (!card->dev->coherent_dma_mask) 95 if (!card->dev->coherent_dma_mask)
96 card->dev->coherent_dma_mask = DMA_BIT_MASK(32); 96 card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
97 97
98 if (dai->driver->playback.channels_min) { 98 if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
99 ret = pxa2xx_pcm_preallocate_dma_buffer(pcm, 99 ret = pxa2xx_pcm_preallocate_dma_buffer(pcm,
100 SNDRV_PCM_STREAM_PLAYBACK); 100 SNDRV_PCM_STREAM_PLAYBACK);
101 if (ret) 101 if (ret)
102 goto out; 102 goto out;
103 } 103 }
104 104
105 if (dai->driver->capture.channels_min) { 105 if (pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream) {
106 ret = pxa2xx_pcm_preallocate_dma_buffer(pcm, 106 ret = pxa2xx_pcm_preallocate_dma_buffer(pcm,
107 SNDRV_PCM_STREAM_CAPTURE); 107 SNDRV_PCM_STREAM_CAPTURE);
108 if (ret) 108 if (ret)
diff --git a/sound/soc/soc-cache.c b/sound/soc/soc-cache.c
index c005ceb70c9d..039b9532b270 100644
--- a/sound/soc/soc-cache.c
+++ b/sound/soc/soc-cache.c
@@ -409,9 +409,6 @@ int snd_soc_codec_set_cache_io(struct snd_soc_codec *codec,
409 codec->bulk_write_raw = snd_soc_hw_bulk_write_raw; 409 codec->bulk_write_raw = snd_soc_hw_bulk_write_raw;
410 410
411 switch (control) { 411 switch (control) {
412 case SND_SOC_CUSTOM:
413 break;
414
415 case SND_SOC_I2C: 412 case SND_SOC_I2C:
416#if defined(CONFIG_I2C) || (defined(CONFIG_I2C_MODULE) && defined(MODULE)) 413#if defined(CONFIG_I2C) || (defined(CONFIG_I2C_MODULE) && defined(MODULE))
417 codec->hw_write = (hw_write_t)i2c_master_send; 414 codec->hw_write = (hw_write_t)i2c_master_send;
diff --git a/tools/perf/Makefile b/tools/perf/Makefile
index 032ba6398a5c..940257b5774e 100644
--- a/tools/perf/Makefile
+++ b/tools/perf/Makefile
@@ -633,7 +633,7 @@ prefix_SQ = $(subst ','\'',$(prefix))
633 633
634SHELL_PATH_SQ = $(subst ','\'',$(SHELL_PATH)) 634SHELL_PATH_SQ = $(subst ','\'',$(SHELL_PATH))
635 635
636LIBS = -Wl,--whole-archive $(PERFLIBS) -Wl,--no-whole-archive $(EXTLIBS) 636LIBS = -Wl,--whole-archive $(PERFLIBS) -Wl,--no-whole-archive -Wl,--start-group $(EXTLIBS) -Wl,--end-group
637 637
638ALL_CFLAGS += $(BASIC_CFLAGS) 638ALL_CFLAGS += $(BASIC_CFLAGS)
639ALL_CFLAGS += $(ARCH_CFLAGS) 639ALL_CFLAGS += $(ARCH_CFLAGS)
diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c
index 1e88485c16a0..0a7ed5b5e281 100644
--- a/tools/perf/util/trace-event-parse.c
+++ b/tools/perf/util/trace-event-parse.c
@@ -2187,6 +2187,7 @@ static const struct flag flags[] = {
2187 { "TASKLET_SOFTIRQ", 6 }, 2187 { "TASKLET_SOFTIRQ", 6 },
2188 { "SCHED_SOFTIRQ", 7 }, 2188 { "SCHED_SOFTIRQ", 7 },
2189 { "HRTIMER_SOFTIRQ", 8 }, 2189 { "HRTIMER_SOFTIRQ", 8 },
2190 { "RCU_SOFTIRQ", 9 },
2190 2191
2191 { "HRTIMER_NORESTART", 0 }, 2192 { "HRTIMER_NORESTART", 0 },
2192 { "HRTIMER_RESTART", 1 }, 2193 { "HRTIMER_RESTART", 1 },