aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/module-signing.txt6
-rw-r--r--Documentation/networking/mpls-sysctl.txt9
-rw-r--r--Documentation/networking/scaling.txt2
-rw-r--r--Documentation/powerpc/transactional_memory.txt32
-rw-r--r--MAINTAINERS8
-rw-r--r--Makefile2
-rw-r--r--arch/arm64/Kconfig1
-rw-r--r--arch/arm64/include/asm/barrier.h16
-rw-r--r--arch/arm64/kernel/perf_event.c9
-rw-r--r--arch/arm64/mm/dma-mapping.c9
-rw-r--r--arch/powerpc/include/uapi/asm/tm.h2
-rw-r--r--arch/powerpc/kernel/eeh.c11
-rw-r--r--arch/powerpc/kernel/entry_64.S19
-rw-r--r--arch/powerpc/kernel/idle_power7.S2
-rw-r--r--arch/powerpc/kvm/book3s_xics.c2
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c2
-rw-r--r--arch/powerpc/platforms/pseries/dlpar.c10
-rw-r--r--arch/s390/Kconfig2
-rw-r--r--arch/s390/crypto/crypt_s390.h122
-rw-r--r--arch/s390/crypto/prng.c850
-rw-r--r--arch/s390/include/asm/kexec.h3
-rw-r--r--arch/s390/include/asm/mmu.h4
-rw-r--r--arch/s390/include/asm/mmu_context.h3
-rw-r--r--arch/s390/include/asm/pgalloc.h1
-rw-r--r--arch/s390/include/asm/pgtable.h167
-rw-r--r--arch/s390/mm/hugetlbpage.c66
-rw-r--r--arch/s390/mm/pgtable.c142
-rw-r--r--arch/tile/kernel/setup.c2
-rw-r--r--arch/x86/include/asm/pvclock.h1
-rw-r--r--arch/x86/kernel/pvclock.c44
-rw-r--r--arch/x86/kvm/x86.c33
-rw-r--r--arch/x86/vdso/vclock_gettime.c34
-rw-r--r--drivers/acpi/sbs.c2
-rw-r--r--drivers/block/rbd.c5
-rw-r--r--drivers/cpuidle/cpuidle.c16
-rw-r--r--drivers/dma/Kconfig1
-rw-r--r--drivers/dma/dmaengine.c4
-rw-r--r--drivers/dma/sh/usb-dmac.c2
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h2
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c8
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c3
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c6
-rw-r--r--drivers/gpu/drm/radeon/dce6_afmt.c25
-rw-r--r--drivers/gpu/drm/radeon/evergreen_hdmi.c53
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_audio.c30
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_mn.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_vm.c36
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c1
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c9
-rw-r--r--drivers/md/dm-ioctl.c17
-rw-r--r--drivers/md/dm.c19
-rw-r--r--drivers/net/bonding/bond_main.c12
-rw-r--r--drivers/net/bonding/bond_procfs.c1
-rw-r--r--drivers/net/bonding/bonding_priv.h25
-rw-r--r--drivers/net/can/Kconfig2
-rw-r--r--drivers/net/can/usb/kvaser_usb.c2
-rw-r--r--drivers/net/ethernet/8390/etherh.c2
-rw-r--r--drivers/net/ethernet/altera/altera_msgdmahw.h5
-rw-r--r--drivers/net/ethernet/altera/altera_tse_main.c43
-rw-r--r--drivers/net/ethernet/amd/Kconfig2
-rw-r--r--drivers/net/ethernet/arc/Kconfig5
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_hw.h2
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.h2
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c116
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c17
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c17
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c2
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c4
-rw-r--r--drivers/net/ethernet/cadence/macb.c7
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c5
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c5
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c6
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c7
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c4
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c29
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h1
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c38
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c4
-rw-r--r--drivers/net/ethernet/rocker/rocker.c5
-rw-r--r--drivers/net/ethernet/ti/netcp_ethss.c8
-rw-r--r--drivers/net/hyperv/hyperv_net.h13
-rw-r--r--drivers/net/hyperv/netvsc.c8
-rw-r--r--drivers/net/hyperv/netvsc_drv.c47
-rw-r--r--drivers/net/hyperv/rndis_filter.c3
-rw-r--r--drivers/net/phy/mdio-gpio.c14
-rw-r--r--drivers/net/phy/mdio-mux-gpio.c60
-rw-r--r--drivers/net/ppp/ppp_mppe.c36
-rw-r--r--drivers/net/vxlan.c6
-rw-r--r--drivers/s390/char/con3215.c2
-rw-r--r--drivers/scsi/3w-9xxx.c57
-rw-r--r--drivers/scsi/3w-9xxx.h5
-rw-r--r--drivers/scsi/3w-sas.c50
-rw-r--r--drivers/scsi/3w-sas.h4
-rw-r--r--drivers/scsi/3w-xxxx.c42
-rw-r--r--drivers/scsi/3w-xxxx.h5
-rw-r--r--drivers/scsi/aha1542.c23
-rw-r--r--drivers/scsi/scsi_devinfo.c1
-rw-r--r--drivers/scsi/scsi_scan.c6
-rw-r--r--drivers/sh/pm_runtime.c7
-rw-r--r--drivers/tty/serial/8250/8250_pci.c25
-rw-r--r--drivers/tty/serial/atmel_serial.c2
-rw-r--r--drivers/tty/serial/of_serial.c1
-rw-r--r--drivers/tty/serial/samsung.c5
-rw-r--r--drivers/tty/serial/serial_core.c2
-rw-r--r--drivers/tty/serial/uartlite.c11
-rw-r--r--drivers/tty/serial/xilinx_uartps.c12
-rw-r--r--drivers/tty/tty_ioctl.c3
-rw-r--r--drivers/usb/dwc3/dwc3-omap.c94
-rw-r--r--drivers/usb/gadget/configfs.c1
-rw-r--r--drivers/usb/gadget/function/f_hid.c16
-rw-r--r--drivers/usb/gadget/function/u_serial.c5
-rw-r--r--drivers/usb/gadget/legacy/acm_ms.c10
-rw-r--r--drivers/usb/gadget/legacy/audio.c10
-rw-r--r--drivers/usb/gadget/legacy/cdc2.c10
-rw-r--r--drivers/usb/gadget/legacy/dbgp.c4
-rw-r--r--drivers/usb/gadget/legacy/ether.c12
-rw-r--r--drivers/usb/gadget/legacy/g_ffs.c2
-rw-r--r--drivers/usb/gadget/legacy/gmidi.c10
-rw-r--r--drivers/usb/gadget/legacy/hid.c12
-rw-r--r--drivers/usb/gadget/legacy/mass_storage.c6
-rw-r--r--drivers/usb/gadget/legacy/multi.c10
-rw-r--r--drivers/usb/gadget/legacy/ncm.c10
-rw-r--r--drivers/usb/gadget/legacy/nokia.c10
-rw-r--r--drivers/usb/gadget/legacy/printer.c8
-rw-r--r--drivers/usb/gadget/legacy/serial.c4
-rw-r--r--drivers/usb/gadget/legacy/tcm_usb_gadget.c2
-rw-r--r--drivers/usb/gadget/legacy/webcam.c8
-rw-r--r--drivers/usb/gadget/legacy/zero.c4
-rw-r--r--drivers/usb/gadget/udc/at91_udc.c4
-rw-r--r--drivers/usb/gadget/udc/atmel_usba_udc.c4
-rw-r--r--drivers/usb/gadget/udc/fsl_udc_core.c4
-rw-r--r--drivers/usb/gadget/udc/fusb300_udc.c4
-rw-r--r--drivers/usb/gadget/udc/m66592-udc.c4
-rw-r--r--drivers/usb/gadget/udc/r8a66597-udc.c4
-rw-r--r--drivers/usb/gadget/udc/udc-xilinx.c4
-rw-r--r--drivers/usb/phy/phy-isp1301-omap.c2
-rw-r--r--fs/btrfs/delayed-inode.c2
-rw-r--r--fs/btrfs/extent-tree.c90
-rw-r--r--fs/btrfs/extent_io.c54
-rw-r--r--fs/btrfs/free-space-cache.c10
-rw-r--r--fs/btrfs/inode.c21
-rw-r--r--fs/btrfs/ioctl.c3
-rw-r--r--fs/btrfs/volumes.c15
-rw-r--r--fs/ext4/Kconfig9
-rw-r--r--fs/ext4/crypto_fname.c280
-rw-r--r--fs/ext4/crypto_key.c1
-rw-r--r--fs/ext4/crypto_policy.c14
-rw-r--r--fs/ext4/dir.c2
-rw-r--r--fs/ext4/ext4.h16
-rw-r--r--fs/ext4/ext4_crypto.h11
-rw-r--r--fs/ext4/extents.c15
-rw-r--r--fs/ext4/extents_status.c8
-rw-r--r--fs/ext4/inode.c2
-rw-r--r--fs/ext4/namei.c72
-rw-r--r--fs/ext4/resize.c7
-rw-r--r--fs/ext4/symlink.c2
-rw-r--r--include/acpi/actypes.h1
-rw-r--r--include/linux/kexec.h4
-rw-r--r--include/linux/netdevice.h16
-rw-r--r--include/linux/netfilter_bridge.h16
-rw-r--r--include/linux/rhashtable.h3
-rw-r--r--include/linux/rtnetlink.h2
-rw-r--r--include/linux/sched.h8
-rw-r--r--include/linux/skbuff.h1
-rw-r--r--include/linux/tty.h1
-rw-r--r--include/net/bonding.h7
-rw-r--r--include/net/inet_connection_sock.h20
-rw-r--r--include/net/request_sock.h18
-rw-r--r--include/scsi/scsi_devinfo.h1
-rw-r--r--include/sound/designware_i2s.h2
-rw-r--r--include/sound/emu10k1.h14
-rw-r--r--include/sound/soc-dapm.h2
-rw-r--r--include/sound/soc.h12
-rw-r--r--include/sound/spear_dma.h2
-rw-r--r--include/uapi/linux/virtio_ring.h2
-rw-r--r--kernel/Makefile6
-rw-r--r--kernel/bpf/core.c12
-rw-r--r--kernel/kexec.c2
-rw-r--r--kernel/sched/core.c15
-rw-r--r--kernel/sched/idle.c16
-rw-r--r--lib/rhashtable.c11
-rw-r--r--net/bridge/br_mdb.c2
-rw-r--r--net/bridge/br_netlink.c4
-rw-r--r--net/bridge/br_private.h2
-rw-r--r--net/core/dev.c12
-rw-r--r--net/core/rtnetlink.c12
-rw-r--r--net/core/skbuff.c30
-rw-r--r--net/dccp/ipv4.c3
-rw-r--r--net/dccp/ipv6.c3
-rw-r--r--net/dccp/minisocks.c3
-rw-r--r--net/dsa/dsa.c2
-rw-r--r--net/ipv4/inet_connection_sock.c34
-rw-r--r--net/ipv4/ping.c1
-rw-r--r--net/ipv4/route.c5
-rw-r--r--net/ipv4/tcp_ipv4.c3
-rw-r--r--net/ipv4/tcp_minisocks.c7
-rw-r--r--net/ipv4/tcp_output.c64
-rw-r--r--net/ipv6/ip6_gre.c9
-rw-r--r--net/ipv6/tcp_ipv6.c3
-rw-r--r--net/mpls/af_mpls.c125
-rw-r--r--net/mpls/internal.h6
-rw-r--r--net/netfilter/nf_tables_api.c3
-rw-r--r--net/netfilter/nft_reject.c2
-rw-r--r--net/netfilter/nft_reject_inet.c2
-rw-r--r--net/netlink/af_netlink.c6
-rw-r--r--net/sched/act_connmark.c2
-rw-r--r--net/tipc/bearer.c17
-rw-r--r--net/tipc/link.c16
-rw-r--r--net/tipc/server.c9
-rw-r--r--net/tipc/socket.c3
-rw-r--r--net/unix/garbage.c70
-rw-r--r--sound/pci/emu10k1/emu10k1.c6
-rw-r--r--sound/pci/emu10k1/emu10k1_callback.c4
-rw-r--r--sound/pci/emu10k1/emu10k1_main.c21
-rw-r--r--sound/pci/emu10k1/emupcm.c2
-rw-r--r--sound/pci/emu10k1/memory.c11
-rw-r--r--sound/pci/hda/hda_codec.c24
-rw-r--r--sound/pci/hda/hda_generic.c3
-rw-r--r--sound/pci/hda/patch_realtek.c16
-rw-r--r--sound/pci/hda/thinkpad_helper.c1
-rw-r--r--sound/soc/codecs/rt5645.c13
-rw-r--r--sound/soc/codecs/rt5677.c5
-rw-r--r--sound/soc/codecs/tfa9879.c4
-rw-r--r--sound/soc/fsl/fsl_ssi.c2
-rw-r--r--sound/soc/intel/Makefile2
-rw-r--r--sound/soc/intel/baytrail/sst-baytrail-ipc.c1
-rw-r--r--sound/soc/intel/haswell/sst-haswell-ipc.c1
-rw-r--r--sound/soc/qcom/lpass-cpu.c2
-rw-r--r--sound/soc/samsung/s3c24xx-i2s.c4
-rw-r--r--sound/soc/sh/rcar/dma.c1
-rw-r--r--sound/synth/emux/emux_oss.c11
-rw-r--r--sound/synth/emux/emux_seq.c29
-rw-r--r--tools/testing/selftests/powerpc/pmu/Makefile2
-rw-r--r--tools/testing/selftests/powerpc/tm/Makefile2
247 files changed, 2772 insertions, 1760 deletions
diff --git a/Documentation/module-signing.txt b/Documentation/module-signing.txt
index 09c2382ad055..c72702ec1ded 100644
--- a/Documentation/module-signing.txt
+++ b/Documentation/module-signing.txt
@@ -119,9 +119,9 @@ Most notably, in the x509.genkey file, the req_distinguished_name section
119should be altered from the default: 119should be altered from the default:
120 120
121 [ req_distinguished_name ] 121 [ req_distinguished_name ]
122 O = Magrathea 122 #O = Unspecified company
123 CN = Glacier signing key 123 CN = Build time autogenerated kernel key
124 emailAddress = slartibartfast@magrathea.h2g2 124 #emailAddress = unspecified.user@unspecified.company
125 125
126The generated RSA key size can also be set with: 126The generated RSA key size can also be set with:
127 127
diff --git a/Documentation/networking/mpls-sysctl.txt b/Documentation/networking/mpls-sysctl.txt
index 639ddf0ece9b..9ed15f86c17c 100644
--- a/Documentation/networking/mpls-sysctl.txt
+++ b/Documentation/networking/mpls-sysctl.txt
@@ -18,3 +18,12 @@ platform_labels - INTEGER
18 18
19 Possible values: 0 - 1048575 19 Possible values: 0 - 1048575
20 Default: 0 20 Default: 0
21
22conf/<interface>/input - BOOL
23 Control whether packets can be input on this interface.
24
25 If disabled, packets will be discarded without further
26 processing.
27
28 0 - disabled (default)
29 not 0 - enabled
diff --git a/Documentation/networking/scaling.txt b/Documentation/networking/scaling.txt
index cbfac0949635..59f4db2a0c85 100644
--- a/Documentation/networking/scaling.txt
+++ b/Documentation/networking/scaling.txt
@@ -282,7 +282,7 @@ following is true:
282 282
283- The current CPU's queue head counter >= the recorded tail counter 283- The current CPU's queue head counter >= the recorded tail counter
284 value in rps_dev_flow[i] 284 value in rps_dev_flow[i]
285- The current CPU is unset (equal to RPS_NO_CPU) 285- The current CPU is unset (>= nr_cpu_ids)
286- The current CPU is offline 286- The current CPU is offline
287 287
288After this check, the packet is sent to the (possibly updated) current 288After this check, the packet is sent to the (possibly updated) current
diff --git a/Documentation/powerpc/transactional_memory.txt b/Documentation/powerpc/transactional_memory.txt
index ba0a2a4a54ba..ded69794a5c0 100644
--- a/Documentation/powerpc/transactional_memory.txt
+++ b/Documentation/powerpc/transactional_memory.txt
@@ -74,23 +74,22 @@ Causes of transaction aborts
74Syscalls 74Syscalls
75======== 75========
76 76
77Syscalls made from within an active transaction will not be performed and the 77Performing syscalls from within transaction is not recommended, and can lead
78transaction will be doomed by the kernel with the failure code TM_CAUSE_SYSCALL 78to unpredictable results.
79| TM_CAUSE_PERSISTENT.
80 79
81Syscalls made from within a suspended transaction are performed as normal and 80Syscalls do not by design abort transactions, but beware: The kernel code will
82the transaction is not explicitly doomed by the kernel. However, what the 81not be running in transactional state. The effect of syscalls will always
83kernel does to perform the syscall may result in the transaction being doomed 82remain visible, but depending on the call they may abort your transaction as a
84by the hardware. The syscall is performed in suspended mode so any side 83side-effect, read soon-to-be-aborted transactional data that should not remain
85effects will be persistent, independent of transaction success or failure. No 84invisible, etc. If you constantly retry a transaction that constantly aborts
86guarantees are provided by the kernel about which syscalls will affect 85itself by calling a syscall, you'll have a livelock & make no progress.
87transaction success.
88 86
89Care must be taken when relying on syscalls to abort during active transactions 87Simple syscalls (e.g. sigprocmask()) "could" be OK. Even things like write()
90if the calls are made via a library. Libraries may cache values (which may 88from, say, printf() should be OK as long as the kernel does not access any
91give the appearance of success) or perform operations that cause transaction 89memory that was accessed transactionally.
92failure before entering the kernel (which may produce different failure codes). 90
93Examples are glibc's getpid() and lazy symbol resolution. 91Consider any syscalls that happen to work as debug-only -- not recommended for
92production use. Best to queue them up till after the transaction is over.
94 93
95 94
96Signals 95Signals
@@ -177,7 +176,8 @@ kernel aborted a transaction:
177 TM_CAUSE_RESCHED Thread was rescheduled. 176 TM_CAUSE_RESCHED Thread was rescheduled.
178 TM_CAUSE_TLBI Software TLB invalid. 177 TM_CAUSE_TLBI Software TLB invalid.
179 TM_CAUSE_FAC_UNAV FP/VEC/VSX unavailable trap. 178 TM_CAUSE_FAC_UNAV FP/VEC/VSX unavailable trap.
180 TM_CAUSE_SYSCALL Syscall from active transaction. 179 TM_CAUSE_SYSCALL Currently unused; future syscalls that must abort
180 transactions for consistency will use this.
181 TM_CAUSE_SIGNAL Signal delivered. 181 TM_CAUSE_SIGNAL Signal delivered.
182 TM_CAUSE_MISC Currently unused. 182 TM_CAUSE_MISC Currently unused.
183 TM_CAUSE_ALIGNMENT Alignment fault. 183 TM_CAUSE_ALIGNMENT Alignment fault.
diff --git a/MAINTAINERS b/MAINTAINERS
index 2e5bbc0d68b2..781e099495d3 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -3413,6 +3413,13 @@ F: drivers/gpu/drm/rcar-du/
3413F: drivers/gpu/drm/shmobile/ 3413F: drivers/gpu/drm/shmobile/
3414F: include/linux/platform_data/shmob_drm.h 3414F: include/linux/platform_data/shmob_drm.h
3415 3415
3416DRM DRIVERS FOR ROCKCHIP
3417M: Mark Yao <mark.yao@rock-chips.com>
3418L: dri-devel@lists.freedesktop.org
3419S: Maintained
3420F: drivers/gpu/drm/rockchip/
3421F: Documentation/devicetree/bindings/video/rockchip*
3422
3416DSBR100 USB FM RADIO DRIVER 3423DSBR100 USB FM RADIO DRIVER
3417M: Alexey Klimov <klimov.linux@gmail.com> 3424M: Alexey Klimov <klimov.linux@gmail.com>
3418L: linux-media@vger.kernel.org 3425L: linux-media@vger.kernel.org
@@ -10523,7 +10530,6 @@ F: include/linux/virtio_console.h
10523F: include/uapi/linux/virtio_console.h 10530F: include/uapi/linux/virtio_console.h
10524 10531
10525VIRTIO CORE, NET AND BLOCK DRIVERS 10532VIRTIO CORE, NET AND BLOCK DRIVERS
10526M: Rusty Russell <rusty@rustcorp.com.au>
10527M: "Michael S. Tsirkin" <mst@redhat.com> 10533M: "Michael S. Tsirkin" <mst@redhat.com>
10528L: virtualization@lists.linux-foundation.org 10534L: virtualization@lists.linux-foundation.org
10529S: Maintained 10535S: Maintained
diff --git a/Makefile b/Makefile
index 7ff1239f9cd2..2da553fd7fc3 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 4 1VERSION = 4
2PATCHLEVEL = 1 2PATCHLEVEL = 1
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc1 4EXTRAVERSION = -rc2
5NAME = Hurr durr I'ma sheep 5NAME = Hurr durr I'ma sheep
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 4269dba63cf1..7796af4b1d6f 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -31,6 +31,7 @@ config ARM64
31 select GENERIC_EARLY_IOREMAP 31 select GENERIC_EARLY_IOREMAP
32 select GENERIC_IRQ_PROBE 32 select GENERIC_IRQ_PROBE
33 select GENERIC_IRQ_SHOW 33 select GENERIC_IRQ_SHOW
34 select GENERIC_IRQ_SHOW_LEVEL
34 select GENERIC_PCI_IOMAP 35 select GENERIC_PCI_IOMAP
35 select GENERIC_SCHED_CLOCK 36 select GENERIC_SCHED_CLOCK
36 select GENERIC_SMP_IDLE_THREAD 37 select GENERIC_SMP_IDLE_THREAD
diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
index a5abb0062d6e..71f19c4dc0de 100644
--- a/arch/arm64/include/asm/barrier.h
+++ b/arch/arm64/include/asm/barrier.h
@@ -65,6 +65,14 @@ do { \
65do { \ 65do { \
66 compiletime_assert_atomic_type(*p); \ 66 compiletime_assert_atomic_type(*p); \
67 switch (sizeof(*p)) { \ 67 switch (sizeof(*p)) { \
68 case 1: \
69 asm volatile ("stlrb %w1, %0" \
70 : "=Q" (*p) : "r" (v) : "memory"); \
71 break; \
72 case 2: \
73 asm volatile ("stlrh %w1, %0" \
74 : "=Q" (*p) : "r" (v) : "memory"); \
75 break; \
68 case 4: \ 76 case 4: \
69 asm volatile ("stlr %w1, %0" \ 77 asm volatile ("stlr %w1, %0" \
70 : "=Q" (*p) : "r" (v) : "memory"); \ 78 : "=Q" (*p) : "r" (v) : "memory"); \
@@ -81,6 +89,14 @@ do { \
81 typeof(*p) ___p1; \ 89 typeof(*p) ___p1; \
82 compiletime_assert_atomic_type(*p); \ 90 compiletime_assert_atomic_type(*p); \
83 switch (sizeof(*p)) { \ 91 switch (sizeof(*p)) { \
92 case 1: \
93 asm volatile ("ldarb %w0, %1" \
94 : "=r" (___p1) : "Q" (*p) : "memory"); \
95 break; \
96 case 2: \
97 asm volatile ("ldarh %w0, %1" \
98 : "=r" (___p1) : "Q" (*p) : "memory"); \
99 break; \
84 case 4: \ 100 case 4: \
85 asm volatile ("ldar %w0, %1" \ 101 asm volatile ("ldar %w0, %1" \
86 : "=r" (___p1) : "Q" (*p) : "memory"); \ 102 : "=r" (___p1) : "Q" (*p) : "memory"); \
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index 195991dadc37..23f25acf43a9 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -1310,7 +1310,7 @@ static const struct of_device_id armpmu_of_device_ids[] = {
1310 1310
1311static int armpmu_device_probe(struct platform_device *pdev) 1311static int armpmu_device_probe(struct platform_device *pdev)
1312{ 1312{
1313 int i, *irqs; 1313 int i, irq, *irqs;
1314 1314
1315 if (!cpu_pmu) 1315 if (!cpu_pmu)
1316 return -ENODEV; 1316 return -ENODEV;
@@ -1319,6 +1319,11 @@ static int armpmu_device_probe(struct platform_device *pdev)
1319 if (!irqs) 1319 if (!irqs)
1320 return -ENOMEM; 1320 return -ENOMEM;
1321 1321
1322 /* Don't bother with PPIs; they're already affine */
1323 irq = platform_get_irq(pdev, 0);
1324 if (irq >= 0 && irq_is_percpu(irq))
1325 return 0;
1326
1322 for (i = 0; i < pdev->num_resources; ++i) { 1327 for (i = 0; i < pdev->num_resources; ++i) {
1323 struct device_node *dn; 1328 struct device_node *dn;
1324 int cpu; 1329 int cpu;
@@ -1327,7 +1332,7 @@ static int armpmu_device_probe(struct platform_device *pdev)
1327 i); 1332 i);
1328 if (!dn) { 1333 if (!dn) {
1329 pr_warn("Failed to parse %s/interrupt-affinity[%d]\n", 1334 pr_warn("Failed to parse %s/interrupt-affinity[%d]\n",
1330 of_node_full_name(dn), i); 1335 of_node_full_name(pdev->dev.of_node), i);
1331 break; 1336 break;
1332 } 1337 }
1333 1338
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index ef7d112f5ce0..b0bd4e5fd5cf 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -67,8 +67,7 @@ static void *__alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags)
67 67
68 *ret_page = phys_to_page(phys); 68 *ret_page = phys_to_page(phys);
69 ptr = (void *)val; 69 ptr = (void *)val;
70 if (flags & __GFP_ZERO) 70 memset(ptr, 0, size);
71 memset(ptr, 0, size);
72 } 71 }
73 72
74 return ptr; 73 return ptr;
@@ -105,7 +104,6 @@ static void *__dma_alloc_coherent(struct device *dev, size_t size,
105 struct page *page; 104 struct page *page;
106 void *addr; 105 void *addr;
107 106
108 size = PAGE_ALIGN(size);
109 page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT, 107 page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
110 get_order(size)); 108 get_order(size));
111 if (!page) 109 if (!page)
@@ -113,8 +111,7 @@ static void *__dma_alloc_coherent(struct device *dev, size_t size,
113 111
114 *dma_handle = phys_to_dma(dev, page_to_phys(page)); 112 *dma_handle = phys_to_dma(dev, page_to_phys(page));
115 addr = page_address(page); 113 addr = page_address(page);
116 if (flags & __GFP_ZERO) 114 memset(addr, 0, size);
117 memset(addr, 0, size);
118 return addr; 115 return addr;
119 } else { 116 } else {
120 return swiotlb_alloc_coherent(dev, size, dma_handle, flags); 117 return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
@@ -195,6 +192,8 @@ static void __dma_free(struct device *dev, size_t size,
195{ 192{
196 void *swiotlb_addr = phys_to_virt(dma_to_phys(dev, dma_handle)); 193 void *swiotlb_addr = phys_to_virt(dma_to_phys(dev, dma_handle));
197 194
195 size = PAGE_ALIGN(size);
196
198 if (!is_device_dma_coherent(dev)) { 197 if (!is_device_dma_coherent(dev)) {
199 if (__free_from_pool(vaddr, size)) 198 if (__free_from_pool(vaddr, size))
200 return; 199 return;
diff --git a/arch/powerpc/include/uapi/asm/tm.h b/arch/powerpc/include/uapi/asm/tm.h
index 5047659815a5..5d836b7c1176 100644
--- a/arch/powerpc/include/uapi/asm/tm.h
+++ b/arch/powerpc/include/uapi/asm/tm.h
@@ -11,7 +11,7 @@
11#define TM_CAUSE_RESCHED 0xde 11#define TM_CAUSE_RESCHED 0xde
12#define TM_CAUSE_TLBI 0xdc 12#define TM_CAUSE_TLBI 0xdc
13#define TM_CAUSE_FAC_UNAV 0xda 13#define TM_CAUSE_FAC_UNAV 0xda
14#define TM_CAUSE_SYSCALL 0xd8 14#define TM_CAUSE_SYSCALL 0xd8 /* future use */
15#define TM_CAUSE_MISC 0xd6 /* future use */ 15#define TM_CAUSE_MISC 0xd6 /* future use */
16#define TM_CAUSE_SIGNAL 0xd4 16#define TM_CAUSE_SIGNAL 0xd4
17#define TM_CAUSE_ALIGNMENT 0xd2 17#define TM_CAUSE_ALIGNMENT 0xd2
diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
index 44b480e3a5af..9ee61d15653d 100644
--- a/arch/powerpc/kernel/eeh.c
+++ b/arch/powerpc/kernel/eeh.c
@@ -749,21 +749,24 @@ int pcibios_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state stat
749 eeh_unfreeze_pe(pe, false); 749 eeh_unfreeze_pe(pe, false);
750 eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED); 750 eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED);
751 eeh_pe_dev_traverse(pe, eeh_restore_dev_state, dev); 751 eeh_pe_dev_traverse(pe, eeh_restore_dev_state, dev);
752 eeh_pe_state_clear(pe, EEH_PE_ISOLATED);
752 break; 753 break;
753 case pcie_hot_reset: 754 case pcie_hot_reset:
755 eeh_pe_state_mark(pe, EEH_PE_ISOLATED);
754 eeh_ops->set_option(pe, EEH_OPT_FREEZE_PE); 756 eeh_ops->set_option(pe, EEH_OPT_FREEZE_PE);
755 eeh_pe_dev_traverse(pe, eeh_disable_and_save_dev_state, dev); 757 eeh_pe_dev_traverse(pe, eeh_disable_and_save_dev_state, dev);
756 eeh_pe_state_mark(pe, EEH_PE_CFG_BLOCKED); 758 eeh_pe_state_mark(pe, EEH_PE_CFG_BLOCKED);
757 eeh_ops->reset(pe, EEH_RESET_HOT); 759 eeh_ops->reset(pe, EEH_RESET_HOT);
758 break; 760 break;
759 case pcie_warm_reset: 761 case pcie_warm_reset:
762 eeh_pe_state_mark(pe, EEH_PE_ISOLATED);
760 eeh_ops->set_option(pe, EEH_OPT_FREEZE_PE); 763 eeh_ops->set_option(pe, EEH_OPT_FREEZE_PE);
761 eeh_pe_dev_traverse(pe, eeh_disable_and_save_dev_state, dev); 764 eeh_pe_dev_traverse(pe, eeh_disable_and_save_dev_state, dev);
762 eeh_pe_state_mark(pe, EEH_PE_CFG_BLOCKED); 765 eeh_pe_state_mark(pe, EEH_PE_CFG_BLOCKED);
763 eeh_ops->reset(pe, EEH_RESET_FUNDAMENTAL); 766 eeh_ops->reset(pe, EEH_RESET_FUNDAMENTAL);
764 break; 767 break;
765 default: 768 default:
766 eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED); 769 eeh_pe_state_clear(pe, EEH_PE_ISOLATED | EEH_PE_CFG_BLOCKED);
767 return -EINVAL; 770 return -EINVAL;
768 }; 771 };
769 772
@@ -1058,6 +1061,9 @@ void eeh_add_device_early(struct pci_dn *pdn)
1058 if (!edev || !eeh_enabled()) 1061 if (!edev || !eeh_enabled())
1059 return; 1062 return;
1060 1063
1064 if (!eeh_has_flag(EEH_PROBE_MODE_DEVTREE))
1065 return;
1066
1061 /* USB Bus children of PCI devices will not have BUID's */ 1067 /* USB Bus children of PCI devices will not have BUID's */
1062 phb = edev->phb; 1068 phb = edev->phb;
1063 if (NULL == phb || 1069 if (NULL == phb ||
@@ -1112,6 +1118,9 @@ void eeh_add_device_late(struct pci_dev *dev)
1112 return; 1118 return;
1113 } 1119 }
1114 1120
1121 if (eeh_has_flag(EEH_PROBE_MODE_DEV))
1122 eeh_ops->probe(pdn, NULL);
1123
1115 /* 1124 /*
1116 * The EEH cache might not be removed correctly because of 1125 * The EEH cache might not be removed correctly because of
1117 * unbalanced kref to the device during unplug time, which 1126 * unbalanced kref to the device during unplug time, which
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 8ca9434c40e6..afbc20019c2e 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -34,7 +34,6 @@
34#include <asm/ftrace.h> 34#include <asm/ftrace.h>
35#include <asm/hw_irq.h> 35#include <asm/hw_irq.h>
36#include <asm/context_tracking.h> 36#include <asm/context_tracking.h>
37#include <asm/tm.h>
38 37
39/* 38/*
40 * System calls. 39 * System calls.
@@ -146,24 +145,6 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
146 andi. r11,r10,_TIF_SYSCALL_DOTRACE 145 andi. r11,r10,_TIF_SYSCALL_DOTRACE
147 bne syscall_dotrace 146 bne syscall_dotrace
148.Lsyscall_dotrace_cont: 147.Lsyscall_dotrace_cont:
149#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
150BEGIN_FTR_SECTION
151 b 1f
152END_FTR_SECTION_IFCLR(CPU_FTR_TM)
153 extrdi. r11, r12, 1, (63-MSR_TS_T_LG) /* transaction active? */
154 beq+ 1f
155
156 /* Doom the transaction and don't perform the syscall: */
157 mfmsr r11
158 li r12, 1
159 rldimi r11, r12, MSR_TM_LG, 63-MSR_TM_LG
160 mtmsrd r11, 0
161 li r11, (TM_CAUSE_SYSCALL|TM_CAUSE_PERSISTENT)
162 TABORT(R11)
163
164 b .Lsyscall_exit
1651:
166#endif
167 cmpldi 0,r0,NR_syscalls 148 cmpldi 0,r0,NR_syscalls
168 bge- syscall_enosys 149 bge- syscall_enosys
169 150
diff --git a/arch/powerpc/kernel/idle_power7.S b/arch/powerpc/kernel/idle_power7.S
index eeaa0d5f69d5..ccde8f084ce4 100644
--- a/arch/powerpc/kernel/idle_power7.S
+++ b/arch/powerpc/kernel/idle_power7.S
@@ -501,9 +501,11 @@ BEGIN_FTR_SECTION
501 CHECK_HMI_INTERRUPT 501 CHECK_HMI_INTERRUPT
502END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) 502END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
503 ld r1,PACAR1(r13) 503 ld r1,PACAR1(r13)
504 ld r6,_CCR(r1)
504 ld r4,_MSR(r1) 505 ld r4,_MSR(r1)
505 ld r5,_NIP(r1) 506 ld r5,_NIP(r1)
506 addi r1,r1,INT_FRAME_SIZE 507 addi r1,r1,INT_FRAME_SIZE
508 mtcr r6
507 mtspr SPRN_SRR1,r4 509 mtspr SPRN_SRR1,r4
508 mtspr SPRN_SRR0,r5 510 mtspr SPRN_SRR0,r5
509 rfid 511 rfid
diff --git a/arch/powerpc/kvm/book3s_xics.c b/arch/powerpc/kvm/book3s_xics.c
index 8f3e6cc54d95..c6ca7db64673 100644
--- a/arch/powerpc/kvm/book3s_xics.c
+++ b/arch/powerpc/kvm/book3s_xics.c
@@ -12,6 +12,7 @@
12#include <linux/err.h> 12#include <linux/err.h>
13#include <linux/gfp.h> 13#include <linux/gfp.h>
14#include <linux/anon_inodes.h> 14#include <linux/anon_inodes.h>
15#include <linux/spinlock.h>
15 16
16#include <asm/uaccess.h> 17#include <asm/uaccess.h>
17#include <asm/kvm_book3s.h> 18#include <asm/kvm_book3s.h>
@@ -20,7 +21,6 @@
20#include <asm/xics.h> 21#include <asm/xics.h>
21#include <asm/debug.h> 22#include <asm/debug.h>
22#include <asm/time.h> 23#include <asm/time.h>
23#include <asm/spinlock.h>
24 24
25#include <linux/debugfs.h> 25#include <linux/debugfs.h>
26#include <linux/seq_file.h> 26#include <linux/seq_file.h>
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index 920c252d1f49..f8bc950efcae 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -2693,7 +2693,6 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
2693 hose->last_busno = 0xff; 2693 hose->last_busno = 0xff;
2694 } 2694 }
2695 hose->private_data = phb; 2695 hose->private_data = phb;
2696 hose->controller_ops = pnv_pci_controller_ops;
2697 phb->hub_id = hub_id; 2696 phb->hub_id = hub_id;
2698 phb->opal_id = phb_id; 2697 phb->opal_id = phb_id;
2699 phb->type = ioda_type; 2698 phb->type = ioda_type;
@@ -2812,6 +2811,7 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
2812 pnv_pci_controller_ops.enable_device_hook = pnv_pci_enable_device_hook; 2811 pnv_pci_controller_ops.enable_device_hook = pnv_pci_enable_device_hook;
2813 pnv_pci_controller_ops.window_alignment = pnv_pci_window_alignment; 2812 pnv_pci_controller_ops.window_alignment = pnv_pci_window_alignment;
2814 pnv_pci_controller_ops.reset_secondary_bus = pnv_pci_reset_secondary_bus; 2813 pnv_pci_controller_ops.reset_secondary_bus = pnv_pci_reset_secondary_bus;
2814 hose->controller_ops = pnv_pci_controller_ops;
2815 2815
2816#ifdef CONFIG_PCI_IOV 2816#ifdef CONFIG_PCI_IOV
2817 ppc_md.pcibios_fixup_sriov = pnv_pci_ioda_fixup_iov_resources; 2817 ppc_md.pcibios_fixup_sriov = pnv_pci_ioda_fixup_iov_resources;
diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c
index b4b11096ea8b..019d34aaf054 100644
--- a/arch/powerpc/platforms/pseries/dlpar.c
+++ b/arch/powerpc/platforms/pseries/dlpar.c
@@ -412,6 +412,10 @@ static ssize_t dlpar_cpu_probe(const char *buf, size_t count)
412 if (rc) 412 if (rc)
413 return -EINVAL; 413 return -EINVAL;
414 414
415 rc = dlpar_acquire_drc(drc_index);
416 if (rc)
417 return -EINVAL;
418
415 parent = of_find_node_by_path("/cpus"); 419 parent = of_find_node_by_path("/cpus");
416 if (!parent) 420 if (!parent)
417 return -ENODEV; 421 return -ENODEV;
@@ -422,12 +426,6 @@ static ssize_t dlpar_cpu_probe(const char *buf, size_t count)
422 426
423 of_node_put(parent); 427 of_node_put(parent);
424 428
425 rc = dlpar_acquire_drc(drc_index);
426 if (rc) {
427 dlpar_free_cc_nodes(dn);
428 return -EINVAL;
429 }
430
431 rc = dlpar_attach_node(dn); 429 rc = dlpar_attach_node(dn);
432 if (rc) { 430 if (rc) {
433 dlpar_release_drc(drc_index); 431 dlpar_release_drc(drc_index);
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 8e58c614c37d..b06dc3839268 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -115,7 +115,7 @@ config S390
115 select HAVE_ARCH_SECCOMP_FILTER 115 select HAVE_ARCH_SECCOMP_FILTER
116 select HAVE_ARCH_TRACEHOOK 116 select HAVE_ARCH_TRACEHOOK
117 select HAVE_ARCH_TRANSPARENT_HUGEPAGE 117 select HAVE_ARCH_TRANSPARENT_HUGEPAGE
118 select HAVE_BPF_JIT if PACK_STACK && HAVE_MARCH_Z9_109_FEATURES 118 select HAVE_BPF_JIT if PACK_STACK && HAVE_MARCH_Z196_FEATURES
119 select HAVE_CMPXCHG_DOUBLE 119 select HAVE_CMPXCHG_DOUBLE
120 select HAVE_CMPXCHG_LOCAL 120 select HAVE_CMPXCHG_LOCAL
121 select HAVE_DEBUG_KMEMLEAK 121 select HAVE_DEBUG_KMEMLEAK
diff --git a/arch/s390/crypto/crypt_s390.h b/arch/s390/crypto/crypt_s390.h
index ba3b2aefddf5..d9c4c313fbc6 100644
--- a/arch/s390/crypto/crypt_s390.h
+++ b/arch/s390/crypto/crypt_s390.h
@@ -3,9 +3,10 @@
3 * 3 *
4 * Support for s390 cryptographic instructions. 4 * Support for s390 cryptographic instructions.
5 * 5 *
6 * Copyright IBM Corp. 2003, 2007 6 * Copyright IBM Corp. 2003, 2015
7 * Author(s): Thomas Spatzier 7 * Author(s): Thomas Spatzier
8 * Jan Glauber (jan.glauber@de.ibm.com) 8 * Jan Glauber (jan.glauber@de.ibm.com)
9 * Harald Freudenberger (freude@de.ibm.com)
9 * 10 *
10 * This program is free software; you can redistribute it and/or modify it 11 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the Free 12 * under the terms of the GNU General Public License as published by the Free
@@ -28,15 +29,17 @@
28#define CRYPT_S390_MSA 0x1 29#define CRYPT_S390_MSA 0x1
29#define CRYPT_S390_MSA3 0x2 30#define CRYPT_S390_MSA3 0x2
30#define CRYPT_S390_MSA4 0x4 31#define CRYPT_S390_MSA4 0x4
32#define CRYPT_S390_MSA5 0x8
31 33
32/* s390 cryptographic operations */ 34/* s390 cryptographic operations */
33enum crypt_s390_operations { 35enum crypt_s390_operations {
34 CRYPT_S390_KM = 0x0100, 36 CRYPT_S390_KM = 0x0100,
35 CRYPT_S390_KMC = 0x0200, 37 CRYPT_S390_KMC = 0x0200,
36 CRYPT_S390_KIMD = 0x0300, 38 CRYPT_S390_KIMD = 0x0300,
37 CRYPT_S390_KLMD = 0x0400, 39 CRYPT_S390_KLMD = 0x0400,
38 CRYPT_S390_KMAC = 0x0500, 40 CRYPT_S390_KMAC = 0x0500,
39 CRYPT_S390_KMCTR = 0x0600 41 CRYPT_S390_KMCTR = 0x0600,
42 CRYPT_S390_PPNO = 0x0700
40}; 43};
41 44
42/* 45/*
@@ -138,6 +141,16 @@ enum crypt_s390_kmac_func {
138 KMAC_TDEA_192 = CRYPT_S390_KMAC | 3 141 KMAC_TDEA_192 = CRYPT_S390_KMAC | 3
139}; 142};
140 143
144/*
145 * function codes for PPNO (PERFORM PSEUDORANDOM NUMBER
146 * OPERATION) instruction
147 */
148enum crypt_s390_ppno_func {
149 PPNO_QUERY = CRYPT_S390_PPNO | 0,
150 PPNO_SHA512_DRNG_GEN = CRYPT_S390_PPNO | 3,
151 PPNO_SHA512_DRNG_SEED = CRYPT_S390_PPNO | 0x83
152};
153
141/** 154/**
142 * crypt_s390_km: 155 * crypt_s390_km:
143 * @func: the function code passed to KM; see crypt_s390_km_func 156 * @func: the function code passed to KM; see crypt_s390_km_func
@@ -162,11 +175,11 @@ static inline int crypt_s390_km(long func, void *param,
162 int ret; 175 int ret;
163 176
164 asm volatile( 177 asm volatile(
165 "0: .insn rre,0xb92e0000,%3,%1 \n" /* KM opcode */ 178 "0: .insn rre,0xb92e0000,%3,%1\n" /* KM opcode */
166 "1: brc 1,0b \n" /* handle partial completion */ 179 "1: brc 1,0b\n" /* handle partial completion */
167 " la %0,0\n" 180 " la %0,0\n"
168 "2:\n" 181 "2:\n"
169 EX_TABLE(0b,2b) EX_TABLE(1b,2b) 182 EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
170 : "=d" (ret), "+a" (__src), "+d" (__src_len), "+a" (__dest) 183 : "=d" (ret), "+a" (__src), "+d" (__src_len), "+a" (__dest)
171 : "d" (__func), "a" (__param), "0" (-1) : "cc", "memory"); 184 : "d" (__func), "a" (__param), "0" (-1) : "cc", "memory");
172 if (ret < 0) 185 if (ret < 0)
@@ -198,11 +211,11 @@ static inline int crypt_s390_kmc(long func, void *param,
198 int ret; 211 int ret;
199 212
200 asm volatile( 213 asm volatile(
201 "0: .insn rre,0xb92f0000,%3,%1 \n" /* KMC opcode */ 214 "0: .insn rre,0xb92f0000,%3,%1\n" /* KMC opcode */
202 "1: brc 1,0b \n" /* handle partial completion */ 215 "1: brc 1,0b\n" /* handle partial completion */
203 " la %0,0\n" 216 " la %0,0\n"
204 "2:\n" 217 "2:\n"
205 EX_TABLE(0b,2b) EX_TABLE(1b,2b) 218 EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
206 : "=d" (ret), "+a" (__src), "+d" (__src_len), "+a" (__dest) 219 : "=d" (ret), "+a" (__src), "+d" (__src_len), "+a" (__dest)
207 : "d" (__func), "a" (__param), "0" (-1) : "cc", "memory"); 220 : "d" (__func), "a" (__param), "0" (-1) : "cc", "memory");
208 if (ret < 0) 221 if (ret < 0)
@@ -233,11 +246,11 @@ static inline int crypt_s390_kimd(long func, void *param,
233 int ret; 246 int ret;
234 247
235 asm volatile( 248 asm volatile(
236 "0: .insn rre,0xb93e0000,%1,%1 \n" /* KIMD opcode */ 249 "0: .insn rre,0xb93e0000,%1,%1\n" /* KIMD opcode */
237 "1: brc 1,0b \n" /* handle partial completion */ 250 "1: brc 1,0b\n" /* handle partial completion */
238 " la %0,0\n" 251 " la %0,0\n"
239 "2:\n" 252 "2:\n"
240 EX_TABLE(0b,2b) EX_TABLE(1b,2b) 253 EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
241 : "=d" (ret), "+a" (__src), "+d" (__src_len) 254 : "=d" (ret), "+a" (__src), "+d" (__src_len)
242 : "d" (__func), "a" (__param), "0" (-1) : "cc", "memory"); 255 : "d" (__func), "a" (__param), "0" (-1) : "cc", "memory");
243 if (ret < 0) 256 if (ret < 0)
@@ -267,11 +280,11 @@ static inline int crypt_s390_klmd(long func, void *param,
267 int ret; 280 int ret;
268 281
269 asm volatile( 282 asm volatile(
270 "0: .insn rre,0xb93f0000,%1,%1 \n" /* KLMD opcode */ 283 "0: .insn rre,0xb93f0000,%1,%1\n" /* KLMD opcode */
271 "1: brc 1,0b \n" /* handle partial completion */ 284 "1: brc 1,0b\n" /* handle partial completion */
272 " la %0,0\n" 285 " la %0,0\n"
273 "2:\n" 286 "2:\n"
274 EX_TABLE(0b,2b) EX_TABLE(1b,2b) 287 EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
275 : "=d" (ret), "+a" (__src), "+d" (__src_len) 288 : "=d" (ret), "+a" (__src), "+d" (__src_len)
276 : "d" (__func), "a" (__param), "0" (-1) : "cc", "memory"); 289 : "d" (__func), "a" (__param), "0" (-1) : "cc", "memory");
277 if (ret < 0) 290 if (ret < 0)
@@ -302,11 +315,11 @@ static inline int crypt_s390_kmac(long func, void *param,
302 int ret; 315 int ret;
303 316
304 asm volatile( 317 asm volatile(
305 "0: .insn rre,0xb91e0000,%1,%1 \n" /* KLAC opcode */ 318 "0: .insn rre,0xb91e0000,%1,%1\n" /* KLAC opcode */
306 "1: brc 1,0b \n" /* handle partial completion */ 319 "1: brc 1,0b\n" /* handle partial completion */
307 " la %0,0\n" 320 " la %0,0\n"
308 "2:\n" 321 "2:\n"
309 EX_TABLE(0b,2b) EX_TABLE(1b,2b) 322 EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
310 : "=d" (ret), "+a" (__src), "+d" (__src_len) 323 : "=d" (ret), "+a" (__src), "+d" (__src_len)
311 : "d" (__func), "a" (__param), "0" (-1) : "cc", "memory"); 324 : "d" (__func), "a" (__param), "0" (-1) : "cc", "memory");
312 if (ret < 0) 325 if (ret < 0)
@@ -340,11 +353,11 @@ static inline int crypt_s390_kmctr(long func, void *param, u8 *dest,
340 int ret = -1; 353 int ret = -1;
341 354
342 asm volatile( 355 asm volatile(
343 "0: .insn rrf,0xb92d0000,%3,%1,%4,0 \n" /* KMCTR opcode */ 356 "0: .insn rrf,0xb92d0000,%3,%1,%4,0\n" /* KMCTR opcode */
344 "1: brc 1,0b \n" /* handle partial completion */ 357 "1: brc 1,0b\n" /* handle partial completion */
345 " la %0,0\n" 358 " la %0,0\n"
346 "2:\n" 359 "2:\n"
347 EX_TABLE(0b,2b) EX_TABLE(1b,2b) 360 EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
348 : "+d" (ret), "+a" (__src), "+d" (__src_len), "+a" (__dest), 361 : "+d" (ret), "+a" (__src), "+d" (__src_len), "+a" (__dest),
349 "+a" (__ctr) 362 "+a" (__ctr)
350 : "d" (__func), "a" (__param) : "cc", "memory"); 363 : "d" (__func), "a" (__param) : "cc", "memory");
@@ -354,6 +367,47 @@ static inline int crypt_s390_kmctr(long func, void *param, u8 *dest,
354} 367}
355 368
356/** 369/**
370 * crypt_s390_ppno:
371 * @func: the function code passed to PPNO; see crypt_s390_ppno_func
372 * @param: address of parameter block; see POP for details on each func
373 * @dest: address of destination memory area
374 * @dest_len: size of destination memory area in bytes
375 * @seed: address of seed data
376 * @seed_len: size of seed data in bytes
377 *
378 * Executes the PPNO (PERFORM PSEUDORANDOM NUMBER OPERATION)
379 * operation of the CPU.
380 *
381 * Returns -1 for failure, 0 for the query func, number of random
382 * bytes stored in dest buffer for generate function
383 */
384static inline int crypt_s390_ppno(long func, void *param,
385 u8 *dest, long dest_len,
386 const u8 *seed, long seed_len)
387{
388 register long __func asm("0") = func & CRYPT_S390_FUNC_MASK;
389 register void *__param asm("1") = param; /* param block (240 bytes) */
390 register u8 *__dest asm("2") = dest; /* buf for recv random bytes */
391 register long __dest_len asm("3") = dest_len; /* requested random bytes */
392 register const u8 *__seed asm("4") = seed; /* buf with seed data */
393 register long __seed_len asm("5") = seed_len; /* bytes in seed buf */
394 int ret = -1;
395
396 asm volatile (
397 "0: .insn rre,0xb93c0000,%1,%5\n" /* PPNO opcode */
398 "1: brc 1,0b\n" /* handle partial completion */
399 " la %0,0\n"
400 "2:\n"
401 EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
402 : "+d" (ret), "+a"(__dest), "+d"(__dest_len)
403 : "d"(__func), "a"(__param), "a"(__seed), "d"(__seed_len)
404 : "cc", "memory");
405 if (ret < 0)
406 return ret;
407 return (func & CRYPT_S390_FUNC_MASK) ? dest_len - __dest_len : 0;
408}
409
410/**
357 * crypt_s390_func_available: 411 * crypt_s390_func_available:
358 * @func: the function code of the specific function; 0 if op in general 412 * @func: the function code of the specific function; 0 if op in general
359 * 413 *
@@ -373,6 +427,9 @@ static inline int crypt_s390_func_available(int func,
373 return 0; 427 return 0;
374 if (facility_mask & CRYPT_S390_MSA4 && !test_facility(77)) 428 if (facility_mask & CRYPT_S390_MSA4 && !test_facility(77))
375 return 0; 429 return 0;
430 if (facility_mask & CRYPT_S390_MSA5 && !test_facility(57))
431 return 0;
432
376 switch (func & CRYPT_S390_OP_MASK) { 433 switch (func & CRYPT_S390_OP_MASK) {
377 case CRYPT_S390_KM: 434 case CRYPT_S390_KM:
378 ret = crypt_s390_km(KM_QUERY, &status, NULL, NULL, 0); 435 ret = crypt_s390_km(KM_QUERY, &status, NULL, NULL, 0);
@@ -390,8 +447,12 @@ static inline int crypt_s390_func_available(int func,
390 ret = crypt_s390_kmac(KMAC_QUERY, &status, NULL, 0); 447 ret = crypt_s390_kmac(KMAC_QUERY, &status, NULL, 0);
391 break; 448 break;
392 case CRYPT_S390_KMCTR: 449 case CRYPT_S390_KMCTR:
393 ret = crypt_s390_kmctr(KMCTR_QUERY, &status, NULL, NULL, 0, 450 ret = crypt_s390_kmctr(KMCTR_QUERY, &status,
394 NULL); 451 NULL, NULL, 0, NULL);
452 break;
453 case CRYPT_S390_PPNO:
454 ret = crypt_s390_ppno(PPNO_QUERY, &status,
455 NULL, 0, NULL, 0);
395 break; 456 break;
396 default: 457 default:
397 return 0; 458 return 0;
@@ -419,15 +480,14 @@ static inline int crypt_s390_pcc(long func, void *param)
419 int ret = -1; 480 int ret = -1;
420 481
421 asm volatile( 482 asm volatile(
422 "0: .insn rre,0xb92c0000,0,0 \n" /* PCC opcode */ 483 "0: .insn rre,0xb92c0000,0,0\n" /* PCC opcode */
423 "1: brc 1,0b \n" /* handle partial completion */ 484 "1: brc 1,0b\n" /* handle partial completion */
424 " la %0,0\n" 485 " la %0,0\n"
425 "2:\n" 486 "2:\n"
426 EX_TABLE(0b,2b) EX_TABLE(1b,2b) 487 EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
427 : "+d" (ret) 488 : "+d" (ret)
428 : "d" (__func), "a" (__param) : "cc", "memory"); 489 : "d" (__func), "a" (__param) : "cc", "memory");
429 return ret; 490 return ret;
430} 491}
431 492
432
433#endif /* _CRYPTO_ARCH_S390_CRYPT_S390_H */ 493#endif /* _CRYPTO_ARCH_S390_CRYPT_S390_H */
diff --git a/arch/s390/crypto/prng.c b/arch/s390/crypto/prng.c
index 94a35a4c1b48..1f374b39a4ec 100644
--- a/arch/s390/crypto/prng.c
+++ b/arch/s390/crypto/prng.c
@@ -1,106 +1,529 @@
1/* 1/*
2 * Copyright IBM Corp. 2006, 2007 2 * Copyright IBM Corp. 2006, 2015
3 * Author(s): Jan Glauber <jan.glauber@de.ibm.com> 3 * Author(s): Jan Glauber <jan.glauber@de.ibm.com>
4 * Harald Freudenberger <freude@de.ibm.com>
4 * Driver for the s390 pseudo random number generator 5 * Driver for the s390 pseudo random number generator
5 */ 6 */
7
8#define KMSG_COMPONENT "prng"
9#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
10
6#include <linux/fs.h> 11#include <linux/fs.h>
12#include <linux/fips.h>
7#include <linux/init.h> 13#include <linux/init.h>
8#include <linux/kernel.h> 14#include <linux/kernel.h>
15#include <linux/device.h>
9#include <linux/miscdevice.h> 16#include <linux/miscdevice.h>
10#include <linux/module.h> 17#include <linux/module.h>
11#include <linux/moduleparam.h> 18#include <linux/moduleparam.h>
19#include <linux/mutex.h>
12#include <linux/random.h> 20#include <linux/random.h>
13#include <linux/slab.h> 21#include <linux/slab.h>
14#include <asm/debug.h> 22#include <asm/debug.h>
15#include <asm/uaccess.h> 23#include <asm/uaccess.h>
24#include <asm/timex.h>
16 25
17#include "crypt_s390.h" 26#include "crypt_s390.h"
18 27
19MODULE_LICENSE("GPL"); 28MODULE_LICENSE("GPL");
20MODULE_AUTHOR("Jan Glauber <jan.glauber@de.ibm.com>"); 29MODULE_AUTHOR("IBM Corporation");
21MODULE_DESCRIPTION("s390 PRNG interface"); 30MODULE_DESCRIPTION("s390 PRNG interface");
22 31
23static int prng_chunk_size = 256; 32
24module_param(prng_chunk_size, int, S_IRUSR | S_IRGRP | S_IROTH); 33#define PRNG_MODE_AUTO 0
34#define PRNG_MODE_TDES 1
35#define PRNG_MODE_SHA512 2
36
37static unsigned int prng_mode = PRNG_MODE_AUTO;
38module_param_named(mode, prng_mode, int, 0);
39MODULE_PARM_DESC(prng_mode, "PRNG mode: 0 - auto, 1 - TDES, 2 - SHA512");
40
41
42#define PRNG_CHUNKSIZE_TDES_MIN 8
43#define PRNG_CHUNKSIZE_TDES_MAX (64*1024)
44#define PRNG_CHUNKSIZE_SHA512_MIN 64
45#define PRNG_CHUNKSIZE_SHA512_MAX (64*1024)
46
47static unsigned int prng_chunk_size = 256;
48module_param_named(chunksize, prng_chunk_size, int, 0);
25MODULE_PARM_DESC(prng_chunk_size, "PRNG read chunk size in bytes"); 49MODULE_PARM_DESC(prng_chunk_size, "PRNG read chunk size in bytes");
26 50
27static int prng_entropy_limit = 4096; 51
28module_param(prng_entropy_limit, int, S_IRUSR | S_IRGRP | S_IROTH | S_IWUSR); 52#define PRNG_RESEED_LIMIT_TDES 4096
29MODULE_PARM_DESC(prng_entropy_limit, 53#define PRNG_RESEED_LIMIT_TDES_LOWER 4096
30 "PRNG add entropy after that much bytes were produced"); 54#define PRNG_RESEED_LIMIT_SHA512 100000
55#define PRNG_RESEED_LIMIT_SHA512_LOWER 10000
56
57static unsigned int prng_reseed_limit;
58module_param_named(reseed_limit, prng_reseed_limit, int, 0);
59MODULE_PARM_DESC(prng_reseed_limit, "PRNG reseed limit");
60
31 61
32/* 62/*
33 * Any one who considers arithmetical methods of producing random digits is, 63 * Any one who considers arithmetical methods of producing random digits is,
34 * of course, in a state of sin. -- John von Neumann 64 * of course, in a state of sin. -- John von Neumann
35 */ 65 */
36 66
37struct s390_prng_data { 67static int prng_errorflag;
38 unsigned long count; /* how many bytes were produced */ 68
39 char *buf; 69#define PRNG_GEN_ENTROPY_FAILED 1
70#define PRNG_SELFTEST_FAILED 2
71#define PRNG_INSTANTIATE_FAILED 3
72#define PRNG_SEED_FAILED 4
73#define PRNG_RESEED_FAILED 5
74#define PRNG_GEN_FAILED 6
75
76struct prng_ws_s {
77 u8 parm_block[32];
78 u32 reseed_counter;
79 u64 byte_counter;
40}; 80};
41 81
42static struct s390_prng_data *p; 82struct ppno_ws_s {
83 u32 res;
84 u32 reseed_counter;
85 u64 stream_bytes;
86 u8 V[112];
87 u8 C[112];
88};
43 89
44/* copied from libica, use a non-zero initial parameter block */ 90struct prng_data_s {
45static unsigned char parm_block[32] = { 91 struct mutex mutex;
460x0F,0x2B,0x8E,0x63,0x8C,0x8E,0xD2,0x52,0x64,0xB7,0xA0,0x7B,0x75,0x28,0xB8,0xF4, 92 union {
470x75,0x5F,0xD2,0xA6,0x8D,0x97,0x11,0xFF,0x49,0xD8,0x23,0xF3,0x7E,0x21,0xEC,0xA0, 93 struct prng_ws_s prngws;
94 struct ppno_ws_s ppnows;
95 };
96 u8 *buf;
97 u32 rest;
98 u8 *prev;
48}; 99};
49 100
50static int prng_open(struct inode *inode, struct file *file) 101static struct prng_data_s *prng_data;
102
103/* initial parameter block for tdes mode, copied from libica */
104static const u8 initial_parm_block[32] __initconst = {
105 0x0F, 0x2B, 0x8E, 0x63, 0x8C, 0x8E, 0xD2, 0x52,
106 0x64, 0xB7, 0xA0, 0x7B, 0x75, 0x28, 0xB8, 0xF4,
107 0x75, 0x5F, 0xD2, 0xA6, 0x8D, 0x97, 0x11, 0xFF,
108 0x49, 0xD8, 0x23, 0xF3, 0x7E, 0x21, 0xEC, 0xA0 };
109
110
111/*** helper functions ***/
112
113static int generate_entropy(u8 *ebuf, size_t nbytes)
51{ 114{
52 return nonseekable_open(inode, file); 115 int n, ret = 0;
116 u8 *pg, *h, hash[32];
117
118 pg = (u8 *) __get_free_page(GFP_KERNEL);
119 if (!pg) {
120 prng_errorflag = PRNG_GEN_ENTROPY_FAILED;
121 return -ENOMEM;
122 }
123
124 while (nbytes) {
125 /* fill page with urandom bytes */
126 get_random_bytes(pg, PAGE_SIZE);
127 /* exor page with stckf values */
128 for (n = 0; n < sizeof(PAGE_SIZE/sizeof(u64)); n++) {
129 u64 *p = ((u64 *)pg) + n;
130 *p ^= get_tod_clock_fast();
131 }
132 n = (nbytes < sizeof(hash)) ? nbytes : sizeof(hash);
133 if (n < sizeof(hash))
134 h = hash;
135 else
136 h = ebuf;
137 /* generate sha256 from this page */
138 if (crypt_s390_kimd(KIMD_SHA_256, h,
139 pg, PAGE_SIZE) != PAGE_SIZE) {
140 prng_errorflag = PRNG_GEN_ENTROPY_FAILED;
141 ret = -EIO;
142 goto out;
143 }
144 if (n < sizeof(hash))
145 memcpy(ebuf, hash, n);
146 ret += n;
147 ebuf += n;
148 nbytes -= n;
149 }
150
151out:
152 free_page((unsigned long)pg);
153 return ret;
53} 154}
54 155
55static void prng_add_entropy(void) 156
157/*** tdes functions ***/
158
159static void prng_tdes_add_entropy(void)
56{ 160{
57 __u64 entropy[4]; 161 __u64 entropy[4];
58 unsigned int i; 162 unsigned int i;
59 int ret; 163 int ret;
60 164
61 for (i = 0; i < 16; i++) { 165 for (i = 0; i < 16; i++) {
62 ret = crypt_s390_kmc(KMC_PRNG, parm_block, (char *)entropy, 166 ret = crypt_s390_kmc(KMC_PRNG, prng_data->prngws.parm_block,
63 (char *)entropy, sizeof(entropy)); 167 (char *)entropy, (char *)entropy,
168 sizeof(entropy));
64 BUG_ON(ret < 0 || ret != sizeof(entropy)); 169 BUG_ON(ret < 0 || ret != sizeof(entropy));
65 memcpy(parm_block, entropy, sizeof(entropy)); 170 memcpy(prng_data->prngws.parm_block, entropy, sizeof(entropy));
66 } 171 }
67} 172}
68 173
69static void prng_seed(int nbytes) 174
175static void prng_tdes_seed(int nbytes)
70{ 176{
71 char buf[16]; 177 char buf[16];
72 int i = 0; 178 int i = 0;
73 179
74 BUG_ON(nbytes > 16); 180 BUG_ON(nbytes > sizeof(buf));
181
75 get_random_bytes(buf, nbytes); 182 get_random_bytes(buf, nbytes);
76 183
77 /* Add the entropy */ 184 /* Add the entropy */
78 while (nbytes >= 8) { 185 while (nbytes >= 8) {
79 *((__u64 *)parm_block) ^= *((__u64 *)(buf+i)); 186 *((__u64 *)prng_data->prngws.parm_block) ^= *((__u64 *)(buf+i));
80 prng_add_entropy(); 187 prng_tdes_add_entropy();
81 i += 8; 188 i += 8;
82 nbytes -= 8; 189 nbytes -= 8;
83 } 190 }
84 prng_add_entropy(); 191 prng_tdes_add_entropy();
192 prng_data->prngws.reseed_counter = 0;
193}
194
195
196static int __init prng_tdes_instantiate(void)
197{
198 int datalen;
199
200 pr_debug("prng runs in TDES mode with "
201 "chunksize=%d and reseed_limit=%u\n",
202 prng_chunk_size, prng_reseed_limit);
203
204 /* memory allocation, prng_data struct init, mutex init */
205 datalen = sizeof(struct prng_data_s) + prng_chunk_size;
206 prng_data = kzalloc(datalen, GFP_KERNEL);
207 if (!prng_data) {
208 prng_errorflag = PRNG_INSTANTIATE_FAILED;
209 return -ENOMEM;
210 }
211 mutex_init(&prng_data->mutex);
212 prng_data->buf = ((u8 *)prng_data) + sizeof(struct prng_data_s);
213 memcpy(prng_data->prngws.parm_block, initial_parm_block, 32);
214
215 /* initialize the PRNG, add 128 bits of entropy */
216 prng_tdes_seed(16);
217
218 return 0;
85} 219}
86 220
87static ssize_t prng_read(struct file *file, char __user *ubuf, size_t nbytes, 221
88 loff_t *ppos) 222static void prng_tdes_deinstantiate(void)
223{
224 pr_debug("The prng module stopped "
225 "after running in triple DES mode\n");
226 kzfree(prng_data);
227}
228
229
230/*** sha512 functions ***/
231
232static int __init prng_sha512_selftest(void)
89{ 233{
90 int chunk, n; 234 /* NIST DRBG testvector for Hash Drbg, Sha-512, Count #0 */
235 static const u8 seed[] __initconst = {
236 0x6b, 0x50, 0xa7, 0xd8, 0xf8, 0xa5, 0x5d, 0x7a,
237 0x3d, 0xf8, 0xbb, 0x40, 0xbc, 0xc3, 0xb7, 0x22,
238 0xd8, 0x70, 0x8d, 0xe6, 0x7f, 0xda, 0x01, 0x0b,
239 0x03, 0xc4, 0xc8, 0x4d, 0x72, 0x09, 0x6f, 0x8c,
240 0x3e, 0xc6, 0x49, 0xcc, 0x62, 0x56, 0xd9, 0xfa,
241 0x31, 0xdb, 0x7a, 0x29, 0x04, 0xaa, 0xf0, 0x25 };
242 static const u8 V0[] __initconst = {
243 0x00, 0xad, 0xe3, 0x6f, 0x9a, 0x01, 0xc7, 0x76,
244 0x61, 0x34, 0x35, 0xf5, 0x4e, 0x24, 0x74, 0x22,
245 0x21, 0x9a, 0x29, 0x89, 0xc7, 0x93, 0x2e, 0x60,
246 0x1e, 0xe8, 0x14, 0x24, 0x8d, 0xd5, 0x03, 0xf1,
247 0x65, 0x5d, 0x08, 0x22, 0x72, 0xd5, 0xad, 0x95,
248 0xe1, 0x23, 0x1e, 0x8a, 0xa7, 0x13, 0xd9, 0x2b,
249 0x5e, 0xbc, 0xbb, 0x80, 0xab, 0x8d, 0xe5, 0x79,
250 0xab, 0x5b, 0x47, 0x4e, 0xdd, 0xee, 0x6b, 0x03,
251 0x8f, 0x0f, 0x5c, 0x5e, 0xa9, 0x1a, 0x83, 0xdd,
252 0xd3, 0x88, 0xb2, 0x75, 0x4b, 0xce, 0x83, 0x36,
253 0x57, 0x4b, 0xf1, 0x5c, 0xca, 0x7e, 0x09, 0xc0,
254 0xd3, 0x89, 0xc6, 0xe0, 0xda, 0xc4, 0x81, 0x7e,
255 0x5b, 0xf9, 0xe1, 0x01, 0xc1, 0x92, 0x05, 0xea,
256 0xf5, 0x2f, 0xc6, 0xc6, 0xc7, 0x8f, 0xbc, 0xf4 };
257 static const u8 C0[] __initconst = {
258 0x00, 0xf4, 0xa3, 0xe5, 0xa0, 0x72, 0x63, 0x95,
259 0xc6, 0x4f, 0x48, 0xd0, 0x8b, 0x5b, 0x5f, 0x8e,
260 0x6b, 0x96, 0x1f, 0x16, 0xed, 0xbc, 0x66, 0x94,
261 0x45, 0x31, 0xd7, 0x47, 0x73, 0x22, 0xa5, 0x86,
262 0xce, 0xc0, 0x4c, 0xac, 0x63, 0xb8, 0x39, 0x50,
263 0xbf, 0xe6, 0x59, 0x6c, 0x38, 0x58, 0x99, 0x1f,
264 0x27, 0xa7, 0x9d, 0x71, 0x2a, 0xb3, 0x7b, 0xf9,
265 0xfb, 0x17, 0x86, 0xaa, 0x99, 0x81, 0xaa, 0x43,
266 0xe4, 0x37, 0xd3, 0x1e, 0x6e, 0xe5, 0xe6, 0xee,
267 0xc2, 0xed, 0x95, 0x4f, 0x53, 0x0e, 0x46, 0x8a,
268 0xcc, 0x45, 0xa5, 0xdb, 0x69, 0x0d, 0x81, 0xc9,
269 0x32, 0x92, 0xbc, 0x8f, 0x33, 0xe6, 0xf6, 0x09,
270 0x7c, 0x8e, 0x05, 0x19, 0x0d, 0xf1, 0xb6, 0xcc,
271 0xf3, 0x02, 0x21, 0x90, 0x25, 0xec, 0xed, 0x0e };
272 static const u8 random[] __initconst = {
273 0x95, 0xb7, 0xf1, 0x7e, 0x98, 0x02, 0xd3, 0x57,
274 0x73, 0x92, 0xc6, 0xa9, 0xc0, 0x80, 0x83, 0xb6,
275 0x7d, 0xd1, 0x29, 0x22, 0x65, 0xb5, 0xf4, 0x2d,
276 0x23, 0x7f, 0x1c, 0x55, 0xbb, 0x9b, 0x10, 0xbf,
277 0xcf, 0xd8, 0x2c, 0x77, 0xa3, 0x78, 0xb8, 0x26,
278 0x6a, 0x00, 0x99, 0x14, 0x3b, 0x3c, 0x2d, 0x64,
279 0x61, 0x1e, 0xee, 0xb6, 0x9a, 0xcd, 0xc0, 0x55,
280 0x95, 0x7c, 0x13, 0x9e, 0x8b, 0x19, 0x0c, 0x7a,
281 0x06, 0x95, 0x5f, 0x2c, 0x79, 0x7c, 0x27, 0x78,
282 0xde, 0x94, 0x03, 0x96, 0xa5, 0x01, 0xf4, 0x0e,
283 0x91, 0x39, 0x6a, 0xcf, 0x8d, 0x7e, 0x45, 0xeb,
284 0xdb, 0xb5, 0x3b, 0xbf, 0x8c, 0x97, 0x52, 0x30,
285 0xd2, 0xf0, 0xff, 0x91, 0x06, 0xc7, 0x61, 0x19,
286 0xae, 0x49, 0x8e, 0x7f, 0xbc, 0x03, 0xd9, 0x0f,
287 0x8e, 0x4c, 0x51, 0x62, 0x7a, 0xed, 0x5c, 0x8d,
288 0x42, 0x63, 0xd5, 0xd2, 0xb9, 0x78, 0x87, 0x3a,
289 0x0d, 0xe5, 0x96, 0xee, 0x6d, 0xc7, 0xf7, 0xc2,
290 0x9e, 0x37, 0xee, 0xe8, 0xb3, 0x4c, 0x90, 0xdd,
291 0x1c, 0xf6, 0xa9, 0xdd, 0xb2, 0x2b, 0x4c, 0xbd,
292 0x08, 0x6b, 0x14, 0xb3, 0x5d, 0xe9, 0x3d, 0xa2,
293 0xd5, 0xcb, 0x18, 0x06, 0x69, 0x8c, 0xbd, 0x7b,
294 0xbb, 0x67, 0xbf, 0xe3, 0xd3, 0x1f, 0xd2, 0xd1,
295 0xdb, 0xd2, 0xa1, 0xe0, 0x58, 0xa3, 0xeb, 0x99,
296 0xd7, 0xe5, 0x1f, 0x1a, 0x93, 0x8e, 0xed, 0x5e,
297 0x1c, 0x1d, 0xe2, 0x3a, 0x6b, 0x43, 0x45, 0xd3,
298 0x19, 0x14, 0x09, 0xf9, 0x2f, 0x39, 0xb3, 0x67,
299 0x0d, 0x8d, 0xbf, 0xb6, 0x35, 0xd8, 0xe6, 0xa3,
300 0x69, 0x32, 0xd8, 0x10, 0x33, 0xd1, 0x44, 0x8d,
301 0x63, 0xb4, 0x03, 0xdd, 0xf8, 0x8e, 0x12, 0x1b,
302 0x6e, 0x81, 0x9a, 0xc3, 0x81, 0x22, 0x6c, 0x13,
303 0x21, 0xe4, 0xb0, 0x86, 0x44, 0xf6, 0x72, 0x7c,
304 0x36, 0x8c, 0x5a, 0x9f, 0x7a, 0x4b, 0x3e, 0xe2 };
305
91 int ret = 0; 306 int ret = 0;
92 int tmp; 307 u8 buf[sizeof(random)];
308 struct ppno_ws_s ws;
309
310 memset(&ws, 0, sizeof(ws));
311
312 /* initial seed */
313 ret = crypt_s390_ppno(PPNO_SHA512_DRNG_SEED,
314 &ws, NULL, 0,
315 seed, sizeof(seed));
316 if (ret < 0) {
317 pr_err("The prng self test seed operation for the "
318 "SHA-512 mode failed with rc=%d\n", ret);
319 prng_errorflag = PRNG_SELFTEST_FAILED;
320 return -EIO;
321 }
322
323 /* check working states V and C */
324 if (memcmp(ws.V, V0, sizeof(V0)) != 0
325 || memcmp(ws.C, C0, sizeof(C0)) != 0) {
326 pr_err("The prng self test state test "
327 "for the SHA-512 mode failed\n");
328 prng_errorflag = PRNG_SELFTEST_FAILED;
329 return -EIO;
330 }
331
332 /* generate random bytes */
333 ret = crypt_s390_ppno(PPNO_SHA512_DRNG_GEN,
334 &ws, buf, sizeof(buf),
335 NULL, 0);
336 if (ret < 0) {
337 pr_err("The prng self test generate operation for "
338 "the SHA-512 mode failed with rc=%d\n", ret);
339 prng_errorflag = PRNG_SELFTEST_FAILED;
340 return -EIO;
341 }
342 ret = crypt_s390_ppno(PPNO_SHA512_DRNG_GEN,
343 &ws, buf, sizeof(buf),
344 NULL, 0);
345 if (ret < 0) {
346 pr_err("The prng self test generate operation for "
347 "the SHA-512 mode failed with rc=%d\n", ret);
348 prng_errorflag = PRNG_SELFTEST_FAILED;
349 return -EIO;
350 }
351
352 /* check against expected data */
353 if (memcmp(buf, random, sizeof(random)) != 0) {
354 pr_err("The prng self test data test "
355 "for the SHA-512 mode failed\n");
356 prng_errorflag = PRNG_SELFTEST_FAILED;
357 return -EIO;
358 }
359
360 return 0;
361}
362
363
364static int __init prng_sha512_instantiate(void)
365{
366 int ret, datalen;
367 u8 seed[64];
368
369 pr_debug("prng runs in SHA-512 mode "
370 "with chunksize=%d and reseed_limit=%u\n",
371 prng_chunk_size, prng_reseed_limit);
372
373 /* memory allocation, prng_data struct init, mutex init */
374 datalen = sizeof(struct prng_data_s) + prng_chunk_size;
375 if (fips_enabled)
376 datalen += prng_chunk_size;
377 prng_data = kzalloc(datalen, GFP_KERNEL);
378 if (!prng_data) {
379 prng_errorflag = PRNG_INSTANTIATE_FAILED;
380 return -ENOMEM;
381 }
382 mutex_init(&prng_data->mutex);
383 prng_data->buf = ((u8 *)prng_data) + sizeof(struct prng_data_s);
384
385 /* selftest */
386 ret = prng_sha512_selftest();
387 if (ret)
388 goto outfree;
389
390 /* generate initial seed bytestring, first 48 bytes of entropy */
391 ret = generate_entropy(seed, 48);
392 if (ret != 48)
393 goto outfree;
394 /* followed by 16 bytes of unique nonce */
395 get_tod_clock_ext(seed + 48);
396
397 /* initial seed of the ppno drng */
398 ret = crypt_s390_ppno(PPNO_SHA512_DRNG_SEED,
399 &prng_data->ppnows, NULL, 0,
400 seed, sizeof(seed));
401 if (ret < 0) {
402 prng_errorflag = PRNG_SEED_FAILED;
403 ret = -EIO;
404 goto outfree;
405 }
406
407 /* if fips mode is enabled, generate a first block of random
408 bytes for the FIPS 140-2 Conditional Self Test */
409 if (fips_enabled) {
410 prng_data->prev = prng_data->buf + prng_chunk_size;
411 ret = crypt_s390_ppno(PPNO_SHA512_DRNG_GEN,
412 &prng_data->ppnows,
413 prng_data->prev,
414 prng_chunk_size,
415 NULL, 0);
416 if (ret < 0 || ret != prng_chunk_size) {
417 prng_errorflag = PRNG_GEN_FAILED;
418 ret = -EIO;
419 goto outfree;
420 }
421 }
422
423 return 0;
424
425outfree:
426 kfree(prng_data);
427 return ret;
428}
429
430
431static void prng_sha512_deinstantiate(void)
432{
433 pr_debug("The prng module stopped after running in SHA-512 mode\n");
434 kzfree(prng_data);
435}
436
437
438static int prng_sha512_reseed(void)
439{
440 int ret;
441 u8 seed[32];
442
443 /* generate 32 bytes of fresh entropy */
444 ret = generate_entropy(seed, sizeof(seed));
445 if (ret != sizeof(seed))
446 return ret;
447
448 /* do a reseed of the ppno drng with this bytestring */
449 ret = crypt_s390_ppno(PPNO_SHA512_DRNG_SEED,
450 &prng_data->ppnows, NULL, 0,
451 seed, sizeof(seed));
452 if (ret) {
453 prng_errorflag = PRNG_RESEED_FAILED;
454 return -EIO;
455 }
456
457 return 0;
458}
459
460
461static int prng_sha512_generate(u8 *buf, size_t nbytes)
462{
463 int ret;
464
465 /* reseed needed ? */
466 if (prng_data->ppnows.reseed_counter > prng_reseed_limit) {
467 ret = prng_sha512_reseed();
468 if (ret)
469 return ret;
470 }
471
472 /* PPNO generate */
473 ret = crypt_s390_ppno(PPNO_SHA512_DRNG_GEN,
474 &prng_data->ppnows, buf, nbytes,
475 NULL, 0);
476 if (ret < 0 || ret != nbytes) {
477 prng_errorflag = PRNG_GEN_FAILED;
478 return -EIO;
479 }
480
481 /* FIPS 140-2 Conditional Self Test */
482 if (fips_enabled) {
483 if (!memcmp(prng_data->prev, buf, nbytes)) {
484 prng_errorflag = PRNG_GEN_FAILED;
485 return -EILSEQ;
486 }
487 memcpy(prng_data->prev, buf, nbytes);
488 }
489
490 return ret;
491}
492
493
494/*** file io functions ***/
495
496static int prng_open(struct inode *inode, struct file *file)
497{
498 return nonseekable_open(inode, file);
499}
500
501
502static ssize_t prng_tdes_read(struct file *file, char __user *ubuf,
503 size_t nbytes, loff_t *ppos)
504{
505 int chunk, n, tmp, ret = 0;
506
507 /* lock prng_data struct */
508 if (mutex_lock_interruptible(&prng_data->mutex))
509 return -ERESTARTSYS;
93 510
94 /* nbytes can be arbitrary length, we split it into chunks */
95 while (nbytes) { 511 while (nbytes) {
96 /* same as in extract_entropy_user in random.c */
97 if (need_resched()) { 512 if (need_resched()) {
98 if (signal_pending(current)) { 513 if (signal_pending(current)) {
99 if (ret == 0) 514 if (ret == 0)
100 ret = -ERESTARTSYS; 515 ret = -ERESTARTSYS;
101 break; 516 break;
102 } 517 }
518 /* give mutex free before calling schedule() */
519 mutex_unlock(&prng_data->mutex);
103 schedule(); 520 schedule();
521 /* occopy mutex again */
522 if (mutex_lock_interruptible(&prng_data->mutex)) {
523 if (ret == 0)
524 ret = -ERESTARTSYS;
525 return ret;
526 }
104 } 527 }
105 528
106 /* 529 /*
@@ -112,12 +535,11 @@ static ssize_t prng_read(struct file *file, char __user *ubuf, size_t nbytes,
112 /* PRNG only likes multiples of 8 bytes */ 535 /* PRNG only likes multiples of 8 bytes */
113 n = (chunk + 7) & -8; 536 n = (chunk + 7) & -8;
114 537
115 if (p->count > prng_entropy_limit) 538 if (prng_data->prngws.reseed_counter > prng_reseed_limit)
116 prng_seed(8); 539 prng_tdes_seed(8);
117 540
118 /* if the CPU supports PRNG stckf is present too */ 541 /* if the CPU supports PRNG stckf is present too */
119 asm volatile(".insn s,0xb27c0000,%0" 542 *((unsigned long long *)prng_data->buf) = get_tod_clock_fast();
120 : "=m" (*((unsigned long long *)p->buf)) : : "cc");
121 543
122 /* 544 /*
123 * Beside the STCKF the input for the TDES-EDE is the output 545 * Beside the STCKF the input for the TDES-EDE is the output
@@ -132,35 +554,259 @@ static ssize_t prng_read(struct file *file, char __user *ubuf, size_t nbytes,
132 * Note: you can still get strict X9.17 conformity by setting 554 * Note: you can still get strict X9.17 conformity by setting
133 * prng_chunk_size to 8 bytes. 555 * prng_chunk_size to 8 bytes.
134 */ 556 */
135 tmp = crypt_s390_kmc(KMC_PRNG, parm_block, p->buf, p->buf, n); 557 tmp = crypt_s390_kmc(KMC_PRNG, prng_data->prngws.parm_block,
136 BUG_ON((tmp < 0) || (tmp != n)); 558 prng_data->buf, prng_data->buf, n);
559 if (tmp < 0 || tmp != n) {
560 ret = -EIO;
561 break;
562 }
137 563
138 p->count += n; 564 prng_data->prngws.byte_counter += n;
565 prng_data->prngws.reseed_counter += n;
139 566
140 if (copy_to_user(ubuf, p->buf, chunk)) 567 if (copy_to_user(ubuf, prng_data->buf, chunk))
141 return -EFAULT; 568 return -EFAULT;
142 569
143 nbytes -= chunk; 570 nbytes -= chunk;
144 ret += chunk; 571 ret += chunk;
145 ubuf += chunk; 572 ubuf += chunk;
146 } 573 }
574
575 /* unlock prng_data struct */
576 mutex_unlock(&prng_data->mutex);
577
147 return ret; 578 return ret;
148} 579}
149 580
150static const struct file_operations prng_fops = { 581
582static ssize_t prng_sha512_read(struct file *file, char __user *ubuf,
583 size_t nbytes, loff_t *ppos)
584{
585 int n, ret = 0;
586 u8 *p;
587
588 /* if errorflag is set do nothing and return 'broken pipe' */
589 if (prng_errorflag)
590 return -EPIPE;
591
592 /* lock prng_data struct */
593 if (mutex_lock_interruptible(&prng_data->mutex))
594 return -ERESTARTSYS;
595
596 while (nbytes) {
597 if (need_resched()) {
598 if (signal_pending(current)) {
599 if (ret == 0)
600 ret = -ERESTARTSYS;
601 break;
602 }
603 /* give mutex free before calling schedule() */
604 mutex_unlock(&prng_data->mutex);
605 schedule();
606 /* occopy mutex again */
607 if (mutex_lock_interruptible(&prng_data->mutex)) {
608 if (ret == 0)
609 ret = -ERESTARTSYS;
610 return ret;
611 }
612 }
613 if (prng_data->rest) {
614 /* push left over random bytes from the previous read */
615 p = prng_data->buf + prng_chunk_size - prng_data->rest;
616 n = (nbytes < prng_data->rest) ?
617 nbytes : prng_data->rest;
618 prng_data->rest -= n;
619 } else {
620 /* generate one chunk of random bytes into read buf */
621 p = prng_data->buf;
622 n = prng_sha512_generate(p, prng_chunk_size);
623 if (n < 0) {
624 ret = n;
625 break;
626 }
627 if (nbytes < prng_chunk_size) {
628 n = nbytes;
629 prng_data->rest = prng_chunk_size - n;
630 } else {
631 n = prng_chunk_size;
632 prng_data->rest = 0;
633 }
634 }
635 if (copy_to_user(ubuf, p, n)) {
636 ret = -EFAULT;
637 break;
638 }
639 ubuf += n;
640 nbytes -= n;
641 ret += n;
642 }
643
644 /* unlock prng_data struct */
645 mutex_unlock(&prng_data->mutex);
646
647 return ret;
648}
649
650
651/*** sysfs stuff ***/
652
653static const struct file_operations prng_sha512_fops = {
654 .owner = THIS_MODULE,
655 .open = &prng_open,
656 .release = NULL,
657 .read = &prng_sha512_read,
658 .llseek = noop_llseek,
659};
660static const struct file_operations prng_tdes_fops = {
151 .owner = THIS_MODULE, 661 .owner = THIS_MODULE,
152 .open = &prng_open, 662 .open = &prng_open,
153 .release = NULL, 663 .release = NULL,
154 .read = &prng_read, 664 .read = &prng_tdes_read,
155 .llseek = noop_llseek, 665 .llseek = noop_llseek,
156}; 666};
157 667
158static struct miscdevice prng_dev = { 668static struct miscdevice prng_sha512_dev = {
669 .name = "prandom",
670 .minor = MISC_DYNAMIC_MINOR,
671 .fops = &prng_sha512_fops,
672};
673static struct miscdevice prng_tdes_dev = {
159 .name = "prandom", 674 .name = "prandom",
160 .minor = MISC_DYNAMIC_MINOR, 675 .minor = MISC_DYNAMIC_MINOR,
161 .fops = &prng_fops, 676 .fops = &prng_tdes_fops,
162}; 677};
163 678
679
680/* chunksize attribute (ro) */
681static ssize_t prng_chunksize_show(struct device *dev,
682 struct device_attribute *attr,
683 char *buf)
684{
685 return snprintf(buf, PAGE_SIZE, "%u\n", prng_chunk_size);
686}
687static DEVICE_ATTR(chunksize, 0444, prng_chunksize_show, NULL);
688
689/* counter attribute (ro) */
690static ssize_t prng_counter_show(struct device *dev,
691 struct device_attribute *attr,
692 char *buf)
693{
694 u64 counter;
695
696 if (mutex_lock_interruptible(&prng_data->mutex))
697 return -ERESTARTSYS;
698 if (prng_mode == PRNG_MODE_SHA512)
699 counter = prng_data->ppnows.stream_bytes;
700 else
701 counter = prng_data->prngws.byte_counter;
702 mutex_unlock(&prng_data->mutex);
703
704 return snprintf(buf, PAGE_SIZE, "%llu\n", counter);
705}
706static DEVICE_ATTR(byte_counter, 0444, prng_counter_show, NULL);
707
708/* errorflag attribute (ro) */
709static ssize_t prng_errorflag_show(struct device *dev,
710 struct device_attribute *attr,
711 char *buf)
712{
713 return snprintf(buf, PAGE_SIZE, "%d\n", prng_errorflag);
714}
715static DEVICE_ATTR(errorflag, 0444, prng_errorflag_show, NULL);
716
717/* mode attribute (ro) */
718static ssize_t prng_mode_show(struct device *dev,
719 struct device_attribute *attr,
720 char *buf)
721{
722 if (prng_mode == PRNG_MODE_TDES)
723 return snprintf(buf, PAGE_SIZE, "TDES\n");
724 else
725 return snprintf(buf, PAGE_SIZE, "SHA512\n");
726}
727static DEVICE_ATTR(mode, 0444, prng_mode_show, NULL);
728
729/* reseed attribute (w) */
730static ssize_t prng_reseed_store(struct device *dev,
731 struct device_attribute *attr,
732 const char *buf, size_t count)
733{
734 if (mutex_lock_interruptible(&prng_data->mutex))
735 return -ERESTARTSYS;
736 prng_sha512_reseed();
737 mutex_unlock(&prng_data->mutex);
738
739 return count;
740}
741static DEVICE_ATTR(reseed, 0200, NULL, prng_reseed_store);
742
743/* reseed limit attribute (rw) */
744static ssize_t prng_reseed_limit_show(struct device *dev,
745 struct device_attribute *attr,
746 char *buf)
747{
748 return snprintf(buf, PAGE_SIZE, "%u\n", prng_reseed_limit);
749}
750static ssize_t prng_reseed_limit_store(struct device *dev,
751 struct device_attribute *attr,
752 const char *buf, size_t count)
753{
754 unsigned limit;
755
756 if (sscanf(buf, "%u\n", &limit) != 1)
757 return -EINVAL;
758
759 if (prng_mode == PRNG_MODE_SHA512) {
760 if (limit < PRNG_RESEED_LIMIT_SHA512_LOWER)
761 return -EINVAL;
762 } else {
763 if (limit < PRNG_RESEED_LIMIT_TDES_LOWER)
764 return -EINVAL;
765 }
766
767 prng_reseed_limit = limit;
768
769 return count;
770}
771static DEVICE_ATTR(reseed_limit, 0644,
772 prng_reseed_limit_show, prng_reseed_limit_store);
773
774/* strength attribute (ro) */
775static ssize_t prng_strength_show(struct device *dev,
776 struct device_attribute *attr,
777 char *buf)
778{
779 return snprintf(buf, PAGE_SIZE, "256\n");
780}
781static DEVICE_ATTR(strength, 0444, prng_strength_show, NULL);
782
783static struct attribute *prng_sha512_dev_attrs[] = {
784 &dev_attr_errorflag.attr,
785 &dev_attr_chunksize.attr,
786 &dev_attr_byte_counter.attr,
787 &dev_attr_mode.attr,
788 &dev_attr_reseed.attr,
789 &dev_attr_reseed_limit.attr,
790 &dev_attr_strength.attr,
791 NULL
792};
793static struct attribute *prng_tdes_dev_attrs[] = {
794 &dev_attr_chunksize.attr,
795 &dev_attr_byte_counter.attr,
796 &dev_attr_mode.attr,
797 NULL
798};
799
800static struct attribute_group prng_sha512_dev_attr_group = {
801 .attrs = prng_sha512_dev_attrs
802};
803static struct attribute_group prng_tdes_dev_attr_group = {
804 .attrs = prng_tdes_dev_attrs
805};
806
807
808/*** module init and exit ***/
809
164static int __init prng_init(void) 810static int __init prng_init(void)
165{ 811{
166 int ret; 812 int ret;
@@ -169,43 +815,105 @@ static int __init prng_init(void)
169 if (!crypt_s390_func_available(KMC_PRNG, CRYPT_S390_MSA)) 815 if (!crypt_s390_func_available(KMC_PRNG, CRYPT_S390_MSA))
170 return -EOPNOTSUPP; 816 return -EOPNOTSUPP;
171 817
172 if (prng_chunk_size < 8) 818 /* choose prng mode */
173 return -EINVAL; 819 if (prng_mode != PRNG_MODE_TDES) {
820 /* check for MSA5 support for PPNO operations */
821 if (!crypt_s390_func_available(PPNO_SHA512_DRNG_GEN,
822 CRYPT_S390_MSA5)) {
823 if (prng_mode == PRNG_MODE_SHA512) {
824 pr_err("The prng module cannot "
825 "start in SHA-512 mode\n");
826 return -EOPNOTSUPP;
827 }
828 prng_mode = PRNG_MODE_TDES;
829 } else
830 prng_mode = PRNG_MODE_SHA512;
831 }
174 832
175 p = kmalloc(sizeof(struct s390_prng_data), GFP_KERNEL); 833 if (prng_mode == PRNG_MODE_SHA512) {
176 if (!p)
177 return -ENOMEM;
178 p->count = 0;
179 834
180 p->buf = kmalloc(prng_chunk_size, GFP_KERNEL); 835 /* SHA512 mode */
181 if (!p->buf) {
182 ret = -ENOMEM;
183 goto out_free;
184 }
185 836
186 /* initialize the PRNG, add 128 bits of entropy */ 837 if (prng_chunk_size < PRNG_CHUNKSIZE_SHA512_MIN
187 prng_seed(16); 838 || prng_chunk_size > PRNG_CHUNKSIZE_SHA512_MAX)
839 return -EINVAL;
840 prng_chunk_size = (prng_chunk_size + 0x3f) & ~0x3f;
188 841
189 ret = misc_register(&prng_dev); 842 if (prng_reseed_limit == 0)
190 if (ret) 843 prng_reseed_limit = PRNG_RESEED_LIMIT_SHA512;
191 goto out_buf; 844 else if (prng_reseed_limit < PRNG_RESEED_LIMIT_SHA512_LOWER)
192 return 0; 845 return -EINVAL;
846
847 ret = prng_sha512_instantiate();
848 if (ret)
849 goto out;
850
851 ret = misc_register(&prng_sha512_dev);
852 if (ret) {
853 prng_sha512_deinstantiate();
854 goto out;
855 }
856 ret = sysfs_create_group(&prng_sha512_dev.this_device->kobj,
857 &prng_sha512_dev_attr_group);
858 if (ret) {
859 misc_deregister(&prng_sha512_dev);
860 prng_sha512_deinstantiate();
861 goto out;
862 }
193 863
194out_buf: 864 } else {
195 kfree(p->buf); 865
196out_free: 866 /* TDES mode */
197 kfree(p); 867
868 if (prng_chunk_size < PRNG_CHUNKSIZE_TDES_MIN
869 || prng_chunk_size > PRNG_CHUNKSIZE_TDES_MAX)
870 return -EINVAL;
871 prng_chunk_size = (prng_chunk_size + 0x07) & ~0x07;
872
873 if (prng_reseed_limit == 0)
874 prng_reseed_limit = PRNG_RESEED_LIMIT_TDES;
875 else if (prng_reseed_limit < PRNG_RESEED_LIMIT_TDES_LOWER)
876 return -EINVAL;
877
878 ret = prng_tdes_instantiate();
879 if (ret)
880 goto out;
881
882 ret = misc_register(&prng_tdes_dev);
883 if (ret) {
884 prng_tdes_deinstantiate();
885 goto out;
886 }
887 ret = sysfs_create_group(&prng_tdes_dev.this_device->kobj,
888 &prng_tdes_dev_attr_group);
889 if (ret) {
890 misc_deregister(&prng_tdes_dev);
891 prng_tdes_deinstantiate();
892 goto out;
893 }
894
895 }
896
897out:
198 return ret; 898 return ret;
199} 899}
200 900
901
201static void __exit prng_exit(void) 902static void __exit prng_exit(void)
202{ 903{
203 /* wipe me */ 904 if (prng_mode == PRNG_MODE_SHA512) {
204 kzfree(p->buf); 905 sysfs_remove_group(&prng_sha512_dev.this_device->kobj,
205 kfree(p); 906 &prng_sha512_dev_attr_group);
206 907 misc_deregister(&prng_sha512_dev);
207 misc_deregister(&prng_dev); 908 prng_sha512_deinstantiate();
909 } else {
910 sysfs_remove_group(&prng_tdes_dev.this_device->kobj,
911 &prng_tdes_dev_attr_group);
912 misc_deregister(&prng_tdes_dev);
913 prng_tdes_deinstantiate();
914 }
208} 915}
209 916
917
210module_init(prng_init); 918module_init(prng_init);
211module_exit(prng_exit); 919module_exit(prng_exit);
diff --git a/arch/s390/include/asm/kexec.h b/arch/s390/include/asm/kexec.h
index 694bcd6bd927..2f924bc30e35 100644
--- a/arch/s390/include/asm/kexec.h
+++ b/arch/s390/include/asm/kexec.h
@@ -26,6 +26,9 @@
26/* Not more than 2GB */ 26/* Not more than 2GB */
27#define KEXEC_CONTROL_MEMORY_LIMIT (1UL<<31) 27#define KEXEC_CONTROL_MEMORY_LIMIT (1UL<<31)
28 28
29/* Allocate control page with GFP_DMA */
30#define KEXEC_CONTROL_MEMORY_GFP GFP_DMA
31
29/* Maximum address we can use for the crash control pages */ 32/* Maximum address we can use for the crash control pages */
30#define KEXEC_CRASH_CONTROL_MEMORY_LIMIT (-1UL) 33#define KEXEC_CRASH_CONTROL_MEMORY_LIMIT (-1UL)
31 34
diff --git a/arch/s390/include/asm/mmu.h b/arch/s390/include/asm/mmu.h
index a5e656260a70..d29ad9545b41 100644
--- a/arch/s390/include/asm/mmu.h
+++ b/arch/s390/include/asm/mmu.h
@@ -14,7 +14,9 @@ typedef struct {
14 unsigned long asce_bits; 14 unsigned long asce_bits;
15 unsigned long asce_limit; 15 unsigned long asce_limit;
16 unsigned long vdso_base; 16 unsigned long vdso_base;
17 /* The mmu context has extended page tables. */ 17 /* The mmu context allocates 4K page tables. */
18 unsigned int alloc_pgste:1;
19 /* The mmu context uses extended page tables. */
18 unsigned int has_pgste:1; 20 unsigned int has_pgste:1;
19 /* The mmu context uses storage keys. */ 21 /* The mmu context uses storage keys. */
20 unsigned int use_skey:1; 22 unsigned int use_skey:1;
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index d25d9ff10ba8..fb1b93ea3e3f 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -20,8 +20,11 @@ static inline int init_new_context(struct task_struct *tsk,
20 mm->context.flush_mm = 0; 20 mm->context.flush_mm = 0;
21 mm->context.asce_bits = _ASCE_TABLE_LENGTH | _ASCE_USER_BITS; 21 mm->context.asce_bits = _ASCE_TABLE_LENGTH | _ASCE_USER_BITS;
22 mm->context.asce_bits |= _ASCE_TYPE_REGION3; 22 mm->context.asce_bits |= _ASCE_TYPE_REGION3;
23#ifdef CONFIG_PGSTE
24 mm->context.alloc_pgste = page_table_allocate_pgste;
23 mm->context.has_pgste = 0; 25 mm->context.has_pgste = 0;
24 mm->context.use_skey = 0; 26 mm->context.use_skey = 0;
27#endif
25 mm->context.asce_limit = STACK_TOP_MAX; 28 mm->context.asce_limit = STACK_TOP_MAX;
26 crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm)); 29 crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm));
27 return 0; 30 return 0;
diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h
index 51e7fb634ebc..7b7858f158b4 100644
--- a/arch/s390/include/asm/pgalloc.h
+++ b/arch/s390/include/asm/pgalloc.h
@@ -21,6 +21,7 @@ void crst_table_free(struct mm_struct *, unsigned long *);
21unsigned long *page_table_alloc(struct mm_struct *); 21unsigned long *page_table_alloc(struct mm_struct *);
22void page_table_free(struct mm_struct *, unsigned long *); 22void page_table_free(struct mm_struct *, unsigned long *);
23void page_table_free_rcu(struct mmu_gather *, unsigned long *, unsigned long); 23void page_table_free_rcu(struct mmu_gather *, unsigned long *, unsigned long);
24extern int page_table_allocate_pgste;
24 25
25int set_guest_storage_key(struct mm_struct *mm, unsigned long addr, 26int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
26 unsigned long key, bool nq); 27 unsigned long key, bool nq);
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 989cfae9e202..fc642399b489 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -12,12 +12,9 @@
12#define _ASM_S390_PGTABLE_H 12#define _ASM_S390_PGTABLE_H
13 13
14/* 14/*
15 * The Linux memory management assumes a three-level page table setup. For 15 * The Linux memory management assumes a three-level page table setup.
16 * s390 31 bit we "fold" the mid level into the top-level page table, so 16 * For s390 64 bit we use up to four of the five levels the hardware
17 * that we physically have the same two-level page table as the s390 mmu 17 * provides (region first tables are not used).
18 * expects in 31 bit mode. For s390 64 bit we use three of the five levels
19 * the hardware provides (region first and region second tables are not
20 * used).
21 * 18 *
22 * The "pgd_xxx()" functions are trivial for a folded two-level 19 * The "pgd_xxx()" functions are trivial for a folded two-level
23 * setup: the pgd is never bad, and a pmd always exists (as it's folded 20 * setup: the pgd is never bad, and a pmd always exists (as it's folded
@@ -101,8 +98,8 @@ extern unsigned long zero_page_mask;
101 98
102#ifndef __ASSEMBLY__ 99#ifndef __ASSEMBLY__
103/* 100/*
104 * The vmalloc and module area will always be on the topmost area of the kernel 101 * The vmalloc and module area will always be on the topmost area of the
105 * mapping. We reserve 96MB (31bit) / 128GB (64bit) for vmalloc and modules. 102 * kernel mapping. We reserve 128GB (64bit) for vmalloc and modules.
106 * On 64 bit kernels we have a 2GB area at the top of the vmalloc area where 103 * On 64 bit kernels we have a 2GB area at the top of the vmalloc area where
107 * modules will reside. That makes sure that inter module branches always 104 * modules will reside. That makes sure that inter module branches always
108 * happen without trampolines and in addition the placement within a 2GB frame 105 * happen without trampolines and in addition the placement within a 2GB frame
@@ -131,38 +128,6 @@ static inline int is_module_addr(void *addr)
131} 128}
132 129
133/* 130/*
134 * A 31 bit pagetable entry of S390 has following format:
135 * | PFRA | | OS |
136 * 0 0IP0
137 * 00000000001111111111222222222233
138 * 01234567890123456789012345678901
139 *
140 * I Page-Invalid Bit: Page is not available for address-translation
141 * P Page-Protection Bit: Store access not possible for page
142 *
143 * A 31 bit segmenttable entry of S390 has following format:
144 * | P-table origin | |PTL
145 * 0 IC
146 * 00000000001111111111222222222233
147 * 01234567890123456789012345678901
148 *
149 * I Segment-Invalid Bit: Segment is not available for address-translation
150 * C Common-Segment Bit: Segment is not private (PoP 3-30)
151 * PTL Page-Table-Length: Page-table length (PTL+1*16 entries -> up to 256)
152 *
153 * The 31 bit segmenttable origin of S390 has following format:
154 *
155 * |S-table origin | | STL |
156 * X **GPS
157 * 00000000001111111111222222222233
158 * 01234567890123456789012345678901
159 *
160 * X Space-Switch event:
161 * G Segment-Invalid Bit: *
162 * P Private-Space Bit: Segment is not private (PoP 3-30)
163 * S Storage-Alteration:
164 * STL Segment-Table-Length: Segment-table length (STL+1*16 entries -> up to 2048)
165 *
166 * A 64 bit pagetable entry of S390 has following format: 131 * A 64 bit pagetable entry of S390 has following format:
167 * | PFRA |0IPC| OS | 132 * | PFRA |0IPC| OS |
168 * 0000000000111111111122222222223333333333444444444455555555556666 133 * 0000000000111111111122222222223333333333444444444455555555556666
@@ -220,7 +185,6 @@ static inline int is_module_addr(void *addr)
220 185
221/* Software bits in the page table entry */ 186/* Software bits in the page table entry */
222#define _PAGE_PRESENT 0x001 /* SW pte present bit */ 187#define _PAGE_PRESENT 0x001 /* SW pte present bit */
223#define _PAGE_TYPE 0x002 /* SW pte type bit */
224#define _PAGE_YOUNG 0x004 /* SW pte young bit */ 188#define _PAGE_YOUNG 0x004 /* SW pte young bit */
225#define _PAGE_DIRTY 0x008 /* SW pte dirty bit */ 189#define _PAGE_DIRTY 0x008 /* SW pte dirty bit */
226#define _PAGE_READ 0x010 /* SW pte read bit */ 190#define _PAGE_READ 0x010 /* SW pte read bit */
@@ -240,31 +204,34 @@ static inline int is_module_addr(void *addr)
240 * table lock held. 204 * table lock held.
241 * 205 *
242 * The following table gives the different possible bit combinations for 206 * The following table gives the different possible bit combinations for
243 * the pte hardware and software bits in the last 12 bits of a pte: 207 * the pte hardware and software bits in the last 12 bits of a pte
208 * (. unassigned bit, x don't care, t swap type):
244 * 209 *
245 * 842100000000 210 * 842100000000
246 * 000084210000 211 * 000084210000
247 * 000000008421 212 * 000000008421
248 * .IR...wrdytp 213 * .IR.uswrdy.p
249 * empty .10...000000 214 * empty .10.00000000
250 * swap .10...xxxx10 215 * swap .11..ttttt.0
251 * file .11...xxxxx0 216 * prot-none, clean, old .11.xx0000.1
252 * prot-none, clean, old .11...000001 217 * prot-none, clean, young .11.xx0001.1
253 * prot-none, clean, young .11...000101 218 * prot-none, dirty, old .10.xx0010.1
254 * prot-none, dirty, old .10...001001 219 * prot-none, dirty, young .10.xx0011.1
255 * prot-none, dirty, young .10...001101 220 * read-only, clean, old .11.xx0100.1
256 * read-only, clean, old .11...010001 221 * read-only, clean, young .01.xx0101.1
257 * read-only, clean, young .01...010101 222 * read-only, dirty, old .11.xx0110.1
258 * read-only, dirty, old .11...011001 223 * read-only, dirty, young .01.xx0111.1
259 * read-only, dirty, young .01...011101 224 * read-write, clean, old .11.xx1100.1
260 * read-write, clean, old .11...110001 225 * read-write, clean, young .01.xx1101.1
261 * read-write, clean, young .01...110101 226 * read-write, dirty, old .10.xx1110.1
262 * read-write, dirty, old .10...111001 227 * read-write, dirty, young .00.xx1111.1
263 * read-write, dirty, young .00...111101 228 * HW-bits: R read-only, I invalid
229 * SW-bits: p present, y young, d dirty, r read, w write, s special,
230 * u unused, l large
264 * 231 *
265 * pte_present is true for the bit pattern .xx...xxxxx1, (pte & 0x001) == 0x001 232 * pte_none is true for the bit pattern .10.00000000, pte == 0x400
266 * pte_none is true for the bit pattern .10...xxxx00, (pte & 0x603) == 0x400 233 * pte_swap is true for the bit pattern .11..ooooo.0, (pte & 0x201) == 0x200
267 * pte_swap is true for the bit pattern .10...xxxx10, (pte & 0x603) == 0x402 234 * pte_present is true for the bit pattern .xx.xxxxxx.1, (pte & 0x001) == 0x001
268 */ 235 */
269 236
270/* Bits in the segment/region table address-space-control-element */ 237/* Bits in the segment/region table address-space-control-element */
@@ -335,6 +302,8 @@ static inline int is_module_addr(void *addr)
335 * read-write, dirty, young 11..0...0...11 302 * read-write, dirty, young 11..0...0...11
336 * The segment table origin is used to distinguish empty (origin==0) from 303 * The segment table origin is used to distinguish empty (origin==0) from
337 * read-write, old segment table entries (origin!=0) 304 * read-write, old segment table entries (origin!=0)
305 * HW-bits: R read-only, I invalid
306 * SW-bits: y young, d dirty, r read, w write
338 */ 307 */
339 308
340#define _SEGMENT_ENTRY_SPLIT_BIT 11 /* THP splitting bit number */ 309#define _SEGMENT_ENTRY_SPLIT_BIT 11 /* THP splitting bit number */
@@ -423,6 +392,15 @@ static inline int mm_has_pgste(struct mm_struct *mm)
423 return 0; 392 return 0;
424} 393}
425 394
395static inline int mm_alloc_pgste(struct mm_struct *mm)
396{
397#ifdef CONFIG_PGSTE
398 if (unlikely(mm->context.alloc_pgste))
399 return 1;
400#endif
401 return 0;
402}
403
426/* 404/*
427 * In the case that a guest uses storage keys 405 * In the case that a guest uses storage keys
428 * faults should no longer be backed by zero pages 406 * faults should no longer be backed by zero pages
@@ -582,10 +560,9 @@ static inline int pte_none(pte_t pte)
582 560
583static inline int pte_swap(pte_t pte) 561static inline int pte_swap(pte_t pte)
584{ 562{
585 /* Bit pattern: (pte & 0x603) == 0x402 */ 563 /* Bit pattern: (pte & 0x201) == 0x200 */
586 return (pte_val(pte) & (_PAGE_INVALID | _PAGE_PROTECT | 564 return (pte_val(pte) & (_PAGE_PROTECT | _PAGE_PRESENT))
587 _PAGE_TYPE | _PAGE_PRESENT)) 565 == _PAGE_PROTECT;
588 == (_PAGE_INVALID | _PAGE_TYPE);
589} 566}
590 567
591static inline int pte_special(pte_t pte) 568static inline int pte_special(pte_t pte)
@@ -1586,51 +1563,51 @@ static inline int has_transparent_hugepage(void)
1586#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 1563#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1587 1564
1588/* 1565/*
1589 * 31 bit swap entry format:
1590 * A page-table entry has some bits we have to treat in a special way.
1591 * Bits 0, 20 and bit 23 have to be zero, otherwise an specification
1592 * exception will occur instead of a page translation exception. The
1593 * specifiation exception has the bad habit not to store necessary
1594 * information in the lowcore.
1595 * Bits 21, 22, 30 and 31 are used to indicate the page type.
1596 * A swap pte is indicated by bit pattern (pte & 0x603) == 0x402
1597 * This leaves the bits 1-19 and bits 24-29 to store type and offset.
1598 * We use the 5 bits from 25-29 for the type and the 20 bits from 1-19
1599 * plus 24 for the offset.
1600 * 0| offset |0110|o|type |00|
1601 * 0 0000000001111111111 2222 2 22222 33
1602 * 0 1234567890123456789 0123 4 56789 01
1603 *
1604 * 64 bit swap entry format: 1566 * 64 bit swap entry format:
1605 * A page-table entry has some bits we have to treat in a special way. 1567 * A page-table entry has some bits we have to treat in a special way.
1606 * Bits 52 and bit 55 have to be zero, otherwise an specification 1568 * Bits 52 and bit 55 have to be zero, otherwise an specification
1607 * exception will occur instead of a page translation exception. The 1569 * exception will occur instead of a page translation exception. The
1608 * specifiation exception has the bad habit not to store necessary 1570 * specifiation exception has the bad habit not to store necessary
1609 * information in the lowcore. 1571 * information in the lowcore.
1610 * Bits 53, 54, 62 and 63 are used to indicate the page type. 1572 * Bits 54 and 63 are used to indicate the page type.
1611 * A swap pte is indicated by bit pattern (pte & 0x603) == 0x402 1573 * A swap pte is indicated by bit pattern (pte & 0x201) == 0x200
1612 * This leaves the bits 0-51 and bits 56-61 to store type and offset. 1574 * This leaves the bits 0-51 and bits 56-62 to store type and offset.
1613 * We use the 5 bits from 57-61 for the type and the 53 bits from 0-51 1575 * We use the 5 bits from 57-61 for the type and the 52 bits from 0-51
1614 * plus 56 for the offset. 1576 * for the offset.
1615 * | offset |0110|o|type |00| 1577 * | offset |01100|type |00|
1616 * 0000000000111111111122222222223333333333444444444455 5555 5 55566 66 1578 * |0000000000111111111122222222223333333333444444444455|55555|55566|66|
1617 * 0123456789012345678901234567890123456789012345678901 2345 6 78901 23 1579 * |0123456789012345678901234567890123456789012345678901|23456|78901|23|
1618 */ 1580 */
1619 1581
1620#define __SWP_OFFSET_MASK (~0UL >> 11) 1582#define __SWP_OFFSET_MASK ((1UL << 52) - 1)
1583#define __SWP_OFFSET_SHIFT 12
1584#define __SWP_TYPE_MASK ((1UL << 5) - 1)
1585#define __SWP_TYPE_SHIFT 2
1621 1586
1622static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset) 1587static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
1623{ 1588{
1624 pte_t pte; 1589 pte_t pte;
1625 offset &= __SWP_OFFSET_MASK; 1590
1626 pte_val(pte) = _PAGE_INVALID | _PAGE_TYPE | ((type & 0x1f) << 2) | 1591 pte_val(pte) = _PAGE_INVALID | _PAGE_PROTECT;
1627 ((offset & 1UL) << 7) | ((offset & ~1UL) << 11); 1592 pte_val(pte) |= (offset & __SWP_OFFSET_MASK) << __SWP_OFFSET_SHIFT;
1593 pte_val(pte) |= (type & __SWP_TYPE_MASK) << __SWP_TYPE_SHIFT;
1628 return pte; 1594 return pte;
1629} 1595}
1630 1596
1631#define __swp_type(entry) (((entry).val >> 2) & 0x1f) 1597static inline unsigned long __swp_type(swp_entry_t entry)
1632#define __swp_offset(entry) (((entry).val >> 11) | (((entry).val >> 7) & 1)) 1598{
1633#define __swp_entry(type,offset) ((swp_entry_t) { pte_val(mk_swap_pte((type),(offset))) }) 1599 return (entry.val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK;
1600}
1601
1602static inline unsigned long __swp_offset(swp_entry_t entry)
1603{
1604 return (entry.val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK;
1605}
1606
1607static inline swp_entry_t __swp_entry(unsigned long type, unsigned long offset)
1608{
1609 return (swp_entry_t) { pte_val(mk_swap_pte(type, offset)) };
1610}
1634 1611
1635#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) 1612#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
1636#define __swp_entry_to_pte(x) ((pte_t) { (x).val }) 1613#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c
index 210ffede0153..e617e74b7be2 100644
--- a/arch/s390/mm/hugetlbpage.c
+++ b/arch/s390/mm/hugetlbpage.c
@@ -14,20 +14,23 @@ static inline pmd_t __pte_to_pmd(pte_t pte)
14 14
15 /* 15 /*
16 * Convert encoding pte bits pmd bits 16 * Convert encoding pte bits pmd bits
17 * .IR...wrdytp dy..R...I...wr 17 * lIR.uswrdy.p dy..R...I...wr
18 * empty .10...000000 -> 00..0...1...00 18 * empty 010.000000.0 -> 00..0...1...00
19 * prot-none, clean, old .11...000001 -> 00..1...1...00 19 * prot-none, clean, old 111.000000.1 -> 00..1...1...00
20 * prot-none, clean, young .11...000101 -> 01..1...1...00 20 * prot-none, clean, young 111.000001.1 -> 01..1...1...00
21 * prot-none, dirty, old .10...001001 -> 10..1...1...00 21 * prot-none, dirty, old 111.000010.1 -> 10..1...1...00
22 * prot-none, dirty, young .10...001101 -> 11..1...1...00 22 * prot-none, dirty, young 111.000011.1 -> 11..1...1...00
23 * read-only, clean, old .11...010001 -> 00..1...1...01 23 * read-only, clean, old 111.000100.1 -> 00..1...1...01
24 * read-only, clean, young .01...010101 -> 01..1...0...01 24 * read-only, clean, young 101.000101.1 -> 01..1...0...01
25 * read-only, dirty, old .11...011001 -> 10..1...1...01 25 * read-only, dirty, old 111.000110.1 -> 10..1...1...01
26 * read-only, dirty, young .01...011101 -> 11..1...0...01 26 * read-only, dirty, young 101.000111.1 -> 11..1...0...01
27 * read-write, clean, old .11...110001 -> 00..0...1...11 27 * read-write, clean, old 111.001100.1 -> 00..1...1...11
28 * read-write, clean, young .01...110101 -> 01..0...0...11 28 * read-write, clean, young 101.001101.1 -> 01..1...0...11
29 * read-write, dirty, old .10...111001 -> 10..0...1...11 29 * read-write, dirty, old 110.001110.1 -> 10..0...1...11
30 * read-write, dirty, young .00...111101 -> 11..0...0...11 30 * read-write, dirty, young 100.001111.1 -> 11..0...0...11
31 * HW-bits: R read-only, I invalid
32 * SW-bits: p present, y young, d dirty, r read, w write, s special,
33 * u unused, l large
31 */ 34 */
32 if (pte_present(pte)) { 35 if (pte_present(pte)) {
33 pmd_val(pmd) = pte_val(pte) & PAGE_MASK; 36 pmd_val(pmd) = pte_val(pte) & PAGE_MASK;
@@ -48,20 +51,23 @@ static inline pte_t __pmd_to_pte(pmd_t pmd)
48 51
49 /* 52 /*
50 * Convert encoding pmd bits pte bits 53 * Convert encoding pmd bits pte bits
51 * dy..R...I...wr .IR...wrdytp 54 * dy..R...I...wr lIR.uswrdy.p
52 * empty 00..0...1...00 -> .10...001100 55 * empty 00..0...1...00 -> 010.000000.0
53 * prot-none, clean, old 00..0...1...00 -> .10...000001 56 * prot-none, clean, old 00..1...1...00 -> 111.000000.1
54 * prot-none, clean, young 01..0...1...00 -> .10...000101 57 * prot-none, clean, young 01..1...1...00 -> 111.000001.1
55 * prot-none, dirty, old 10..0...1...00 -> .10...001001 58 * prot-none, dirty, old 10..1...1...00 -> 111.000010.1
56 * prot-none, dirty, young 11..0...1...00 -> .10...001101 59 * prot-none, dirty, young 11..1...1...00 -> 111.000011.1
57 * read-only, clean, old 00..1...1...01 -> .11...010001 60 * read-only, clean, old 00..1...1...01 -> 111.000100.1
58 * read-only, clean, young 01..1...1...01 -> .11...010101 61 * read-only, clean, young 01..1...0...01 -> 101.000101.1
59 * read-only, dirty, old 10..1...1...01 -> .11...011001 62 * read-only, dirty, old 10..1...1...01 -> 111.000110.1
60 * read-only, dirty, young 11..1...1...01 -> .11...011101 63 * read-only, dirty, young 11..1...0...01 -> 101.000111.1
61 * read-write, clean, old 00..0...1...11 -> .10...110001 64 * read-write, clean, old 00..1...1...11 -> 111.001100.1
62 * read-write, clean, young 01..0...1...11 -> .10...110101 65 * read-write, clean, young 01..1...0...11 -> 101.001101.1
63 * read-write, dirty, old 10..0...1...11 -> .10...111001 66 * read-write, dirty, old 10..0...1...11 -> 110.001110.1
64 * read-write, dirty, young 11..0...1...11 -> .10...111101 67 * read-write, dirty, young 11..0...0...11 -> 100.001111.1
68 * HW-bits: R read-only, I invalid
69 * SW-bits: p present, y young, d dirty, r read, w write, s special,
70 * u unused, l large
65 */ 71 */
66 if (pmd_present(pmd)) { 72 if (pmd_present(pmd)) {
67 pte_val(pte) = pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN_LARGE; 73 pte_val(pte) = pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN_LARGE;
@@ -70,8 +76,8 @@ static inline pte_t __pmd_to_pte(pmd_t pmd)
70 pte_val(pte) |= (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) << 4; 76 pte_val(pte) |= (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) << 4;
71 pte_val(pte) |= (pmd_val(pmd) & _SEGMENT_ENTRY_INVALID) << 5; 77 pte_val(pte) |= (pmd_val(pmd) & _SEGMENT_ENTRY_INVALID) << 5;
72 pte_val(pte) |= (pmd_val(pmd) & _SEGMENT_ENTRY_PROTECT); 78 pte_val(pte) |= (pmd_val(pmd) & _SEGMENT_ENTRY_PROTECT);
73 pmd_val(pmd) |= (pte_val(pte) & _PAGE_DIRTY) << 10; 79 pte_val(pte) |= (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY) >> 10;
74 pmd_val(pmd) |= (pte_val(pte) & _PAGE_YOUNG) << 10; 80 pte_val(pte) |= (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG) >> 10;
75 } else 81 } else
76 pte_val(pte) = _PAGE_INVALID; 82 pte_val(pte) = _PAGE_INVALID;
77 return pte; 83 return pte;
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 33f589459113..b33f66110ca9 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -18,6 +18,7 @@
18#include <linux/rcupdate.h> 18#include <linux/rcupdate.h>
19#include <linux/slab.h> 19#include <linux/slab.h>
20#include <linux/swapops.h> 20#include <linux/swapops.h>
21#include <linux/sysctl.h>
21#include <linux/ksm.h> 22#include <linux/ksm.h>
22#include <linux/mman.h> 23#include <linux/mman.h>
23 24
@@ -920,6 +921,40 @@ unsigned long get_guest_storage_key(struct mm_struct *mm, unsigned long addr)
920} 921}
921EXPORT_SYMBOL(get_guest_storage_key); 922EXPORT_SYMBOL(get_guest_storage_key);
922 923
924static int page_table_allocate_pgste_min = 0;
925static int page_table_allocate_pgste_max = 1;
926int page_table_allocate_pgste = 0;
927EXPORT_SYMBOL(page_table_allocate_pgste);
928
929static struct ctl_table page_table_sysctl[] = {
930 {
931 .procname = "allocate_pgste",
932 .data = &page_table_allocate_pgste,
933 .maxlen = sizeof(int),
934 .mode = S_IRUGO | S_IWUSR,
935 .proc_handler = proc_dointvec,
936 .extra1 = &page_table_allocate_pgste_min,
937 .extra2 = &page_table_allocate_pgste_max,
938 },
939 { }
940};
941
942static struct ctl_table page_table_sysctl_dir[] = {
943 {
944 .procname = "vm",
945 .maxlen = 0,
946 .mode = 0555,
947 .child = page_table_sysctl,
948 },
949 { }
950};
951
952static int __init page_table_register_sysctl(void)
953{
954 return register_sysctl_table(page_table_sysctl_dir) ? 0 : -ENOMEM;
955}
956__initcall(page_table_register_sysctl);
957
923#else /* CONFIG_PGSTE */ 958#else /* CONFIG_PGSTE */
924 959
925static inline int page_table_with_pgste(struct page *page) 960static inline int page_table_with_pgste(struct page *page)
@@ -963,7 +998,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
963 struct page *uninitialized_var(page); 998 struct page *uninitialized_var(page);
964 unsigned int mask, bit; 999 unsigned int mask, bit;
965 1000
966 if (mm_has_pgste(mm)) 1001 if (mm_alloc_pgste(mm))
967 return page_table_alloc_pgste(mm); 1002 return page_table_alloc_pgste(mm);
968 /* Allocate fragments of a 4K page as 1K/2K page table */ 1003 /* Allocate fragments of a 4K page as 1K/2K page table */
969 spin_lock_bh(&mm->context.list_lock); 1004 spin_lock_bh(&mm->context.list_lock);
@@ -1165,116 +1200,25 @@ static inline void thp_split_mm(struct mm_struct *mm)
1165} 1200}
1166#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 1201#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1167 1202
1168static unsigned long page_table_realloc_pmd(struct mmu_gather *tlb,
1169 struct mm_struct *mm, pud_t *pud,
1170 unsigned long addr, unsigned long end)
1171{
1172 unsigned long next, *table, *new;
1173 struct page *page;
1174 spinlock_t *ptl;
1175 pmd_t *pmd;
1176
1177 pmd = pmd_offset(pud, addr);
1178 do {
1179 next = pmd_addr_end(addr, end);
1180again:
1181 if (pmd_none_or_clear_bad(pmd))
1182 continue;
1183 table = (unsigned long *) pmd_deref(*pmd);
1184 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
1185 if (page_table_with_pgste(page))
1186 continue;
1187 /* Allocate new page table with pgstes */
1188 new = page_table_alloc_pgste(mm);
1189 if (!new)
1190 return -ENOMEM;
1191
1192 ptl = pmd_lock(mm, pmd);
1193 if (likely((unsigned long *) pmd_deref(*pmd) == table)) {
1194 /* Nuke pmd entry pointing to the "short" page table */
1195 pmdp_flush_lazy(mm, addr, pmd);
1196 pmd_clear(pmd);
1197 /* Copy ptes from old table to new table */
1198 memcpy(new, table, PAGE_SIZE/2);
1199 clear_table(table, _PAGE_INVALID, PAGE_SIZE/2);
1200 /* Establish new table */
1201 pmd_populate(mm, pmd, (pte_t *) new);
1202 /* Free old table with rcu, there might be a walker! */
1203 page_table_free_rcu(tlb, table, addr);
1204 new = NULL;
1205 }
1206 spin_unlock(ptl);
1207 if (new) {
1208 page_table_free_pgste(new);
1209 goto again;
1210 }
1211 } while (pmd++, addr = next, addr != end);
1212
1213 return addr;
1214}
1215
1216static unsigned long page_table_realloc_pud(struct mmu_gather *tlb,
1217 struct mm_struct *mm, pgd_t *pgd,
1218 unsigned long addr, unsigned long end)
1219{
1220 unsigned long next;
1221 pud_t *pud;
1222
1223 pud = pud_offset(pgd, addr);
1224 do {
1225 next = pud_addr_end(addr, end);
1226 if (pud_none_or_clear_bad(pud))
1227 continue;
1228 next = page_table_realloc_pmd(tlb, mm, pud, addr, next);
1229 if (unlikely(IS_ERR_VALUE(next)))
1230 return next;
1231 } while (pud++, addr = next, addr != end);
1232
1233 return addr;
1234}
1235
1236static unsigned long page_table_realloc(struct mmu_gather *tlb, struct mm_struct *mm,
1237 unsigned long addr, unsigned long end)
1238{
1239 unsigned long next;
1240 pgd_t *pgd;
1241
1242 pgd = pgd_offset(mm, addr);
1243 do {
1244 next = pgd_addr_end(addr, end);
1245 if (pgd_none_or_clear_bad(pgd))
1246 continue;
1247 next = page_table_realloc_pud(tlb, mm, pgd, addr, next);
1248 if (unlikely(IS_ERR_VALUE(next)))
1249 return next;
1250 } while (pgd++, addr = next, addr != end);
1251
1252 return 0;
1253}
1254
1255/* 1203/*
1256 * switch on pgstes for its userspace process (for kvm) 1204 * switch on pgstes for its userspace process (for kvm)
1257 */ 1205 */
1258int s390_enable_sie(void) 1206int s390_enable_sie(void)
1259{ 1207{
1260 struct task_struct *tsk = current; 1208 struct mm_struct *mm = current->mm;
1261 struct mm_struct *mm = tsk->mm;
1262 struct mmu_gather tlb;
1263 1209
1264 /* Do we have pgstes? if yes, we are done */ 1210 /* Do we have pgstes? if yes, we are done */
1265 if (mm_has_pgste(tsk->mm)) 1211 if (mm_has_pgste(mm))
1266 return 0; 1212 return 0;
1267 1213 /* Fail if the page tables are 2K */
1214 if (!mm_alloc_pgste(mm))
1215 return -EINVAL;
1268 down_write(&mm->mmap_sem); 1216 down_write(&mm->mmap_sem);
1217 mm->context.has_pgste = 1;
1269 /* split thp mappings and disable thp for future mappings */ 1218 /* split thp mappings and disable thp for future mappings */
1270 thp_split_mm(mm); 1219 thp_split_mm(mm);
1271 /* Reallocate the page tables with pgstes */
1272 tlb_gather_mmu(&tlb, mm, 0, TASK_SIZE);
1273 if (!page_table_realloc(&tlb, mm, 0, TASK_SIZE))
1274 mm->context.has_pgste = 1;
1275 tlb_finish_mmu(&tlb, 0, TASK_SIZE);
1276 up_write(&mm->mmap_sem); 1220 up_write(&mm->mmap_sem);
1277 return mm->context.has_pgste ? 0 : -ENOMEM; 1221 return 0;
1278} 1222}
1279EXPORT_SYMBOL_GPL(s390_enable_sie); 1223EXPORT_SYMBOL_GPL(s390_enable_sie);
1280 1224
diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c
index 6873f006f7d0..d366675e4bf8 100644
--- a/arch/tile/kernel/setup.c
+++ b/arch/tile/kernel/setup.c
@@ -774,7 +774,7 @@ static void __init zone_sizes_init(void)
774 * though, there'll be no lowmem, so we just alloc_bootmem 774 * though, there'll be no lowmem, so we just alloc_bootmem
775 * the memmap. There will be no percpu memory either. 775 * the memmap. There will be no percpu memory either.
776 */ 776 */
777 if (i != 0 && cpumask_test_cpu(i, &isolnodes)) { 777 if (i != 0 && node_isset(i, isolnodes)) {
778 node_memmap_pfn[i] = 778 node_memmap_pfn[i] =
779 alloc_bootmem_pfn(0, memmap_size, 0); 779 alloc_bootmem_pfn(0, memmap_size, 0);
780 BUG_ON(node_percpu[i] != 0); 780 BUG_ON(node_percpu[i] != 0);
diff --git a/arch/x86/include/asm/pvclock.h b/arch/x86/include/asm/pvclock.h
index 25b1cc07d496..d6b078e9fa28 100644
--- a/arch/x86/include/asm/pvclock.h
+++ b/arch/x86/include/asm/pvclock.h
@@ -95,7 +95,6 @@ unsigned __pvclock_read_cycles(const struct pvclock_vcpu_time_info *src,
95 95
96struct pvclock_vsyscall_time_info { 96struct pvclock_vsyscall_time_info {
97 struct pvclock_vcpu_time_info pvti; 97 struct pvclock_vcpu_time_info pvti;
98 u32 migrate_count;
99} __attribute__((__aligned__(SMP_CACHE_BYTES))); 98} __attribute__((__aligned__(SMP_CACHE_BYTES)));
100 99
101#define PVTI_SIZE sizeof(struct pvclock_vsyscall_time_info) 100#define PVTI_SIZE sizeof(struct pvclock_vsyscall_time_info)
diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
index e5ecd20e72dd..2f355d229a58 100644
--- a/arch/x86/kernel/pvclock.c
+++ b/arch/x86/kernel/pvclock.c
@@ -141,46 +141,7 @@ void pvclock_read_wallclock(struct pvclock_wall_clock *wall_clock,
141 set_normalized_timespec(ts, now.tv_sec, now.tv_nsec); 141 set_normalized_timespec(ts, now.tv_sec, now.tv_nsec);
142} 142}
143 143
144static struct pvclock_vsyscall_time_info *pvclock_vdso_info;
145
146static struct pvclock_vsyscall_time_info *
147pvclock_get_vsyscall_user_time_info(int cpu)
148{
149 if (!pvclock_vdso_info) {
150 BUG();
151 return NULL;
152 }
153
154 return &pvclock_vdso_info[cpu];
155}
156
157struct pvclock_vcpu_time_info *pvclock_get_vsyscall_time_info(int cpu)
158{
159 return &pvclock_get_vsyscall_user_time_info(cpu)->pvti;
160}
161
162#ifdef CONFIG_X86_64 144#ifdef CONFIG_X86_64
163static int pvclock_task_migrate(struct notifier_block *nb, unsigned long l,
164 void *v)
165{
166 struct task_migration_notifier *mn = v;
167 struct pvclock_vsyscall_time_info *pvti;
168
169 pvti = pvclock_get_vsyscall_user_time_info(mn->from_cpu);
170
171 /* this is NULL when pvclock vsyscall is not initialized */
172 if (unlikely(pvti == NULL))
173 return NOTIFY_DONE;
174
175 pvti->migrate_count++;
176
177 return NOTIFY_DONE;
178}
179
180static struct notifier_block pvclock_migrate = {
181 .notifier_call = pvclock_task_migrate,
182};
183
184/* 145/*
185 * Initialize the generic pvclock vsyscall state. This will allocate 146 * Initialize the generic pvclock vsyscall state. This will allocate
186 * a/some page(s) for the per-vcpu pvclock information, set up a 147 * a/some page(s) for the per-vcpu pvclock information, set up a
@@ -194,17 +155,12 @@ int __init pvclock_init_vsyscall(struct pvclock_vsyscall_time_info *i,
194 155
195 WARN_ON (size != PVCLOCK_VSYSCALL_NR_PAGES*PAGE_SIZE); 156 WARN_ON (size != PVCLOCK_VSYSCALL_NR_PAGES*PAGE_SIZE);
196 157
197 pvclock_vdso_info = i;
198
199 for (idx = 0; idx <= (PVCLOCK_FIXMAP_END-PVCLOCK_FIXMAP_BEGIN); idx++) { 158 for (idx = 0; idx <= (PVCLOCK_FIXMAP_END-PVCLOCK_FIXMAP_BEGIN); idx++) {
200 __set_fixmap(PVCLOCK_FIXMAP_BEGIN + idx, 159 __set_fixmap(PVCLOCK_FIXMAP_BEGIN + idx,
201 __pa(i) + (idx*PAGE_SIZE), 160 __pa(i) + (idx*PAGE_SIZE),
202 PAGE_KERNEL_VVAR); 161 PAGE_KERNEL_VVAR);
203 } 162 }
204 163
205
206 register_task_migration_notifier(&pvclock_migrate);
207
208 return 0; 164 return 0;
209} 165}
210#endif 166#endif
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index ed31c31b2485..c73efcd03e29 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1669,12 +1669,28 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
1669 &guest_hv_clock, sizeof(guest_hv_clock)))) 1669 &guest_hv_clock, sizeof(guest_hv_clock))))
1670 return 0; 1670 return 0;
1671 1671
1672 /* 1672 /* This VCPU is paused, but it's legal for a guest to read another
1673 * The interface expects us to write an even number signaling that the 1673 * VCPU's kvmclock, so we really have to follow the specification where
1674 * update is finished. Since the guest won't see the intermediate 1674 * it says that version is odd if data is being modified, and even after
1675 * state, we just increase by 2 at the end. 1675 * it is consistent.
1676 *
1677 * Version field updates must be kept separate. This is because
1678 * kvm_write_guest_cached might use a "rep movs" instruction, and
1679 * writes within a string instruction are weakly ordered. So there
1680 * are three writes overall.
1681 *
1682 * As a small optimization, only write the version field in the first
1683 * and third write. The vcpu->pv_time cache is still valid, because the
1684 * version field is the first in the struct.
1676 */ 1685 */
1677 vcpu->hv_clock.version = guest_hv_clock.version + 2; 1686 BUILD_BUG_ON(offsetof(struct pvclock_vcpu_time_info, version) != 0);
1687
1688 vcpu->hv_clock.version = guest_hv_clock.version + 1;
1689 kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
1690 &vcpu->hv_clock,
1691 sizeof(vcpu->hv_clock.version));
1692
1693 smp_wmb();
1678 1694
1679 /* retain PVCLOCK_GUEST_STOPPED if set in guest copy */ 1695 /* retain PVCLOCK_GUEST_STOPPED if set in guest copy */
1680 pvclock_flags = (guest_hv_clock.flags & PVCLOCK_GUEST_STOPPED); 1696 pvclock_flags = (guest_hv_clock.flags & PVCLOCK_GUEST_STOPPED);
@@ -1695,6 +1711,13 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
1695 kvm_write_guest_cached(v->kvm, &vcpu->pv_time, 1711 kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
1696 &vcpu->hv_clock, 1712 &vcpu->hv_clock,
1697 sizeof(vcpu->hv_clock)); 1713 sizeof(vcpu->hv_clock));
1714
1715 smp_wmb();
1716
1717 vcpu->hv_clock.version++;
1718 kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
1719 &vcpu->hv_clock,
1720 sizeof(vcpu->hv_clock.version));
1698 return 0; 1721 return 0;
1699} 1722}
1700 1723
diff --git a/arch/x86/vdso/vclock_gettime.c b/arch/x86/vdso/vclock_gettime.c
index 40d2473836c9..9793322751e0 100644
--- a/arch/x86/vdso/vclock_gettime.c
+++ b/arch/x86/vdso/vclock_gettime.c
@@ -82,15 +82,18 @@ static notrace cycle_t vread_pvclock(int *mode)
82 cycle_t ret; 82 cycle_t ret;
83 u64 last; 83 u64 last;
84 u32 version; 84 u32 version;
85 u32 migrate_count;
86 u8 flags; 85 u8 flags;
87 unsigned cpu, cpu1; 86 unsigned cpu, cpu1;
88 87
89 88
90 /* 89 /*
91 * When looping to get a consistent (time-info, tsc) pair, we 90 * Note: hypervisor must guarantee that:
92 * also need to deal with the possibility we can switch vcpus, 91 * 1. cpu ID number maps 1:1 to per-CPU pvclock time info.
93 * so make sure we always re-fetch time-info for the current vcpu. 92 * 2. that per-CPU pvclock time info is updated if the
93 * underlying CPU changes.
94 * 3. that version is increased whenever underlying CPU
95 * changes.
96 *
94 */ 97 */
95 do { 98 do {
96 cpu = __getcpu() & VGETCPU_CPU_MASK; 99 cpu = __getcpu() & VGETCPU_CPU_MASK;
@@ -99,27 +102,20 @@ static notrace cycle_t vread_pvclock(int *mode)
99 * __getcpu() calls (Gleb). 102 * __getcpu() calls (Gleb).
100 */ 103 */
101 104
102 /* Make sure migrate_count will change if we leave the VCPU. */ 105 pvti = get_pvti(cpu);
103 do {
104 pvti = get_pvti(cpu);
105 migrate_count = pvti->migrate_count;
106
107 cpu1 = cpu;
108 cpu = __getcpu() & VGETCPU_CPU_MASK;
109 } while (unlikely(cpu != cpu1));
110 106
111 version = __pvclock_read_cycles(&pvti->pvti, &ret, &flags); 107 version = __pvclock_read_cycles(&pvti->pvti, &ret, &flags);
112 108
113 /* 109 /*
114 * Test we're still on the cpu as well as the version. 110 * Test we're still on the cpu as well as the version.
115 * - We must read TSC of pvti's VCPU. 111 * We could have been migrated just after the first
116 * - KVM doesn't follow the versioning protocol, so data could 112 * vgetcpu but before fetching the version, so we
117 * change before version if we left the VCPU. 113 * wouldn't notice a version change.
118 */ 114 */
119 smp_rmb(); 115 cpu1 = __getcpu() & VGETCPU_CPU_MASK;
120 } while (unlikely((pvti->pvti.version & 1) || 116 } while (unlikely(cpu != cpu1 ||
121 pvti->pvti.version != version || 117 (pvti->pvti.version & 1) ||
122 pvti->migrate_count != migrate_count)); 118 pvti->pvti.version != version));
123 119
124 if (unlikely(!(flags & PVCLOCK_TSC_STABLE_BIT))) 120 if (unlikely(!(flags & PVCLOCK_TSC_STABLE_BIT)))
125 *mode = VCLOCK_NONE; 121 *mode = VCLOCK_NONE;
diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c
index cd827625cf07..01504c819e8f 100644
--- a/drivers/acpi/sbs.c
+++ b/drivers/acpi/sbs.c
@@ -684,7 +684,7 @@ static int acpi_sbs_add(struct acpi_device *device)
684 if (!sbs_manager_broken) { 684 if (!sbs_manager_broken) {
685 result = acpi_manager_get_info(sbs); 685 result = acpi_manager_get_info(sbs);
686 if (!result) { 686 if (!result) {
687 sbs->manager_present = 0; 687 sbs->manager_present = 1;
688 for (id = 0; id < MAX_SBS_BAT; ++id) 688 for (id = 0; id < MAX_SBS_BAT; ++id)
689 if ((sbs->batteries_supported & (1 << id))) 689 if ((sbs->batteries_supported & (1 << id)))
690 acpi_battery_add(sbs, id); 690 acpi_battery_add(sbs, id);
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 812523330a78..ec6c5c6e1ac9 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -2264,6 +2264,11 @@ static bool rbd_img_obj_end_request(struct rbd_obj_request *obj_request)
2264 result, xferred); 2264 result, xferred);
2265 if (!img_request->result) 2265 if (!img_request->result)
2266 img_request->result = result; 2266 img_request->result = result;
2267 /*
2268 * Need to end I/O on the entire obj_request worth of
2269 * bytes in case of error.
2270 */
2271 xferred = obj_request->length;
2267 } 2272 }
2268 2273
2269 /* Image object requests don't own their page array */ 2274 /* Image object requests don't own their page array */
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 7a73a279e179..61c417b9e53f 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -158,9 +158,18 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
158 int entered_state; 158 int entered_state;
159 159
160 struct cpuidle_state *target_state = &drv->states[index]; 160 struct cpuidle_state *target_state = &drv->states[index];
161 bool broadcast = !!(target_state->flags & CPUIDLE_FLAG_TIMER_STOP);
161 ktime_t time_start, time_end; 162 ktime_t time_start, time_end;
162 s64 diff; 163 s64 diff;
163 164
165 /*
166 * Tell the time framework to switch to a broadcast timer because our
167 * local timer will be shut down. If a local timer is used from another
168 * CPU as a broadcast timer, this call may fail if it is not available.
169 */
170 if (broadcast && tick_broadcast_enter())
171 return -EBUSY;
172
164 trace_cpu_idle_rcuidle(index, dev->cpu); 173 trace_cpu_idle_rcuidle(index, dev->cpu);
165 time_start = ktime_get(); 174 time_start = ktime_get();
166 175
@@ -169,6 +178,13 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
169 time_end = ktime_get(); 178 time_end = ktime_get();
170 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu); 179 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu);
171 180
181 if (broadcast) {
182 if (WARN_ON_ONCE(!irqs_disabled()))
183 local_irq_disable();
184
185 tick_broadcast_exit();
186 }
187
172 if (!cpuidle_state_is_coupled(dev, drv, entered_state)) 188 if (!cpuidle_state_is_coupled(dev, drv, entered_state))
173 local_irq_enable(); 189 local_irq_enable();
174 190
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index fd7ac13f2574..bda2cb06dc7a 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -437,6 +437,7 @@ config IMG_MDC_DMA
437 437
438config XGENE_DMA 438config XGENE_DMA
439 tristate "APM X-Gene DMA support" 439 tristate "APM X-Gene DMA support"
440 depends on ARCH_XGENE || COMPILE_TEST
440 select DMA_ENGINE 441 select DMA_ENGINE
441 select DMA_ENGINE_RAID 442 select DMA_ENGINE_RAID
442 select ASYNC_TX_ENABLE_CHANNEL_SWITCH 443 select ASYNC_TX_ENABLE_CHANNEL_SWITCH
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 0e035a8cf401..2890d744bb1b 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -571,11 +571,15 @@ struct dma_chan *dma_get_any_slave_channel(struct dma_device *device)
571 571
572 chan = private_candidate(&mask, device, NULL, NULL); 572 chan = private_candidate(&mask, device, NULL, NULL);
573 if (chan) { 573 if (chan) {
574 dma_cap_set(DMA_PRIVATE, device->cap_mask);
575 device->privatecnt++;
574 err = dma_chan_get(chan); 576 err = dma_chan_get(chan);
575 if (err) { 577 if (err) {
576 pr_debug("%s: failed to get %s: (%d)\n", 578 pr_debug("%s: failed to get %s: (%d)\n",
577 __func__, dma_chan_name(chan), err); 579 __func__, dma_chan_name(chan), err);
578 chan = NULL; 580 chan = NULL;
581 if (--device->privatecnt == 0)
582 dma_cap_clear(DMA_PRIVATE, device->cap_mask);
579 } 583 }
580 } 584 }
581 585
diff --git a/drivers/dma/sh/usb-dmac.c b/drivers/dma/sh/usb-dmac.c
index f705798ce3eb..ebd8a5f398b0 100644
--- a/drivers/dma/sh/usb-dmac.c
+++ b/drivers/dma/sh/usb-dmac.c
@@ -673,6 +673,7 @@ static struct dma_chan *usb_dmac_of_xlate(struct of_phandle_args *dma_spec,
673 * Power management 673 * Power management
674 */ 674 */
675 675
676#ifdef CONFIG_PM
676static int usb_dmac_runtime_suspend(struct device *dev) 677static int usb_dmac_runtime_suspend(struct device *dev)
677{ 678{
678 struct usb_dmac *dmac = dev_get_drvdata(dev); 679 struct usb_dmac *dmac = dev_get_drvdata(dev);
@@ -690,6 +691,7 @@ static int usb_dmac_runtime_resume(struct device *dev)
690 691
691 return usb_dmac_init(dmac); 692 return usb_dmac_init(dmac);
692} 693}
694#endif /* CONFIG_PM */
693 695
694static const struct dev_pm_ops usb_dmac_pm = { 696static const struct dev_pm_ops usb_dmac_pm = {
695 SET_RUNTIME_PM_OPS(usb_dmac_runtime_suspend, usb_dmac_runtime_resume, 697 SET_RUNTIME_PM_OPS(usb_dmac_runtime_suspend, usb_dmac_runtime_resume,
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 3da1af46625c..773d1d24e604 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -6074,6 +6074,8 @@ enum skl_disp_power_wells {
6074#define GTFIFOCTL 0x120008 6074#define GTFIFOCTL 0x120008
6075#define GT_FIFO_FREE_ENTRIES_MASK 0x7f 6075#define GT_FIFO_FREE_ENTRIES_MASK 0x7f
6076#define GT_FIFO_NUM_RESERVED_ENTRIES 20 6076#define GT_FIFO_NUM_RESERVED_ENTRIES 20
6077#define GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL (1 << 12)
6078#define GT_FIFO_CTL_RC6_POLICY_STALL (1 << 11)
6077 6079
6078#define HSW_IDICR 0x9008 6080#define HSW_IDICR 0x9008
6079#define IDIHASHMSK(x) (((x) & 0x3f) << 16) 6081#define IDIHASHMSK(x) (((x) & 0x3f) << 16)
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index ab5cc94588e1..ff2a74651dd4 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -360,6 +360,14 @@ static void __intel_uncore_early_sanitize(struct drm_device *dev,
360 __raw_i915_write32(dev_priv, GTFIFODBG, 360 __raw_i915_write32(dev_priv, GTFIFODBG,
361 __raw_i915_read32(dev_priv, GTFIFODBG)); 361 __raw_i915_read32(dev_priv, GTFIFODBG));
362 362
363 /* WaDisableShadowRegForCpd:chv */
364 if (IS_CHERRYVIEW(dev)) {
365 __raw_i915_write32(dev_priv, GTFIFOCTL,
366 __raw_i915_read32(dev_priv, GTFIFOCTL) |
367 GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL |
368 GT_FIFO_CTL_RC6_POLICY_STALL);
369 }
370
363 intel_uncore_forcewake_reset(dev, restore_forcewake); 371 intel_uncore_forcewake_reset(dev, restore_forcewake);
364} 372}
365 373
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index dac78ad24b31..42b2ea3fdcf3 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -580,6 +580,9 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
580 else 580 else
581 radeon_crtc->pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV; 581 radeon_crtc->pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
582 582
583 /* if there is no audio, set MINM_OVER_MAXP */
584 if (!drm_detect_monitor_audio(radeon_connector_edid(connector)))
585 radeon_crtc->pll_flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP;
583 if (rdev->family < CHIP_RV770) 586 if (rdev->family < CHIP_RV770)
584 radeon_crtc->pll_flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP; 587 radeon_crtc->pll_flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP;
585 /* use frac fb div on APUs */ 588 /* use frac fb div on APUs */
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index f57c1ab617bc..dd39f434b4a7 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -1761,17 +1761,15 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
1761 struct drm_device *dev = encoder->dev; 1761 struct drm_device *dev = encoder->dev;
1762 struct radeon_device *rdev = dev->dev_private; 1762 struct radeon_device *rdev = dev->dev_private;
1763 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 1763 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1764 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
1765 int encoder_mode = atombios_get_encoder_mode(encoder); 1764 int encoder_mode = atombios_get_encoder_mode(encoder);
1766 1765
1767 DRM_DEBUG_KMS("encoder dpms %d to mode %d, devices %08x, active_devices %08x\n", 1766 DRM_DEBUG_KMS("encoder dpms %d to mode %d, devices %08x, active_devices %08x\n",
1768 radeon_encoder->encoder_id, mode, radeon_encoder->devices, 1767 radeon_encoder->encoder_id, mode, radeon_encoder->devices,
1769 radeon_encoder->active_device); 1768 radeon_encoder->active_device);
1770 1769
1771 if (connector && (radeon_audio != 0) && 1770 if ((radeon_audio != 0) &&
1772 ((encoder_mode == ATOM_ENCODER_MODE_HDMI) || 1771 ((encoder_mode == ATOM_ENCODER_MODE_HDMI) ||
1773 (ENCODER_MODE_IS_DP(encoder_mode) && 1772 ENCODER_MODE_IS_DP(encoder_mode)))
1774 drm_detect_monitor_audio(radeon_connector_edid(connector)))))
1775 radeon_audio_dpms(encoder, mode); 1773 radeon_audio_dpms(encoder, mode);
1776 1774
1777 switch (radeon_encoder->encoder_id) { 1775 switch (radeon_encoder->encoder_id) {
diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c
index 3adc2afe32aa..68fd9fc677e3 100644
--- a/drivers/gpu/drm/radeon/dce6_afmt.c
+++ b/drivers/gpu/drm/radeon/dce6_afmt.c
@@ -295,28 +295,3 @@ void dce6_dp_audio_set_dto(struct radeon_device *rdev,
295 WREG32(DCCG_AUDIO_DTO1_MODULE, clock); 295 WREG32(DCCG_AUDIO_DTO1_MODULE, clock);
296 } 296 }
297} 297}
298
299void dce6_dp_enable(struct drm_encoder *encoder, bool enable)
300{
301 struct drm_device *dev = encoder->dev;
302 struct radeon_device *rdev = dev->dev_private;
303 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
304 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
305
306 if (!dig || !dig->afmt)
307 return;
308
309 if (enable) {
310 WREG32(EVERGREEN_DP_SEC_TIMESTAMP + dig->afmt->offset,
311 EVERGREEN_DP_SEC_TIMESTAMP_MODE(1));
312 WREG32(EVERGREEN_DP_SEC_CNTL + dig->afmt->offset,
313 EVERGREEN_DP_SEC_ASP_ENABLE | /* Audio packet transmission */
314 EVERGREEN_DP_SEC_ATP_ENABLE | /* Audio timestamp packet transmission */
315 EVERGREEN_DP_SEC_AIP_ENABLE | /* Audio infoframe packet transmission */
316 EVERGREEN_DP_SEC_STREAM_ENABLE); /* Master enable for secondary stream engine */
317 } else {
318 WREG32(EVERGREEN_DP_SEC_CNTL + dig->afmt->offset, 0);
319 }
320
321 dig->afmt->enabled = enable;
322}
diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
index c18d4ecbd95d..0926739c9fa7 100644
--- a/drivers/gpu/drm/radeon/evergreen_hdmi.c
+++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
@@ -219,13 +219,9 @@ void evergreen_set_avi_packet(struct radeon_device *rdev, u32 offset,
219 WREG32(AFMT_AVI_INFO3 + offset, 219 WREG32(AFMT_AVI_INFO3 + offset,
220 frame[0xC] | (frame[0xD] << 8) | (buffer[1] << 24)); 220 frame[0xC] | (frame[0xD] << 8) | (buffer[1] << 24));
221 221
222 WREG32_OR(HDMI_INFOFRAME_CONTROL0 + offset,
223 HDMI_AVI_INFO_SEND | /* enable AVI info frames */
224 HDMI_AVI_INFO_CONT); /* required for audio info values to be updated */
225
226 WREG32_P(HDMI_INFOFRAME_CONTROL1 + offset, 222 WREG32_P(HDMI_INFOFRAME_CONTROL1 + offset,
227 HDMI_AVI_INFO_LINE(2), /* anything other than 0 */ 223 HDMI_AVI_INFO_LINE(2), /* anything other than 0 */
228 ~HDMI_AVI_INFO_LINE_MASK); 224 ~HDMI_AVI_INFO_LINE_MASK);
229} 225}
230 226
231void dce4_hdmi_audio_set_dto(struct radeon_device *rdev, 227void dce4_hdmi_audio_set_dto(struct radeon_device *rdev,
@@ -370,9 +366,13 @@ void dce4_set_audio_packet(struct drm_encoder *encoder, u32 offset)
370 WREG32(AFMT_AUDIO_PACKET_CONTROL2 + offset, 366 WREG32(AFMT_AUDIO_PACKET_CONTROL2 + offset,
371 AFMT_AUDIO_CHANNEL_ENABLE(0xff)); 367 AFMT_AUDIO_CHANNEL_ENABLE(0xff));
372 368
369 WREG32(HDMI_AUDIO_PACKET_CONTROL + offset,
370 HDMI_AUDIO_DELAY_EN(1) | /* set the default audio delay */
371 HDMI_AUDIO_PACKETS_PER_LINE(3)); /* should be suffient for all audio modes and small enough for all hblanks */
372
373 /* allow 60958 channel status and send audio packets fields to be updated */ 373 /* allow 60958 channel status and send audio packets fields to be updated */
374 WREG32(AFMT_AUDIO_PACKET_CONTROL + offset, 374 WREG32_OR(AFMT_AUDIO_PACKET_CONTROL + offset,
375 AFMT_AUDIO_SAMPLE_SEND | AFMT_RESET_FIFO_WHEN_AUDIO_DIS | AFMT_60958_CS_UPDATE); 375 AFMT_RESET_FIFO_WHEN_AUDIO_DIS | AFMT_60958_CS_UPDATE);
376} 376}
377 377
378 378
@@ -398,17 +398,26 @@ void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable)
398 return; 398 return;
399 399
400 if (enable) { 400 if (enable) {
401 WREG32(HDMI_INFOFRAME_CONTROL1 + dig->afmt->offset, 401 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
402 HDMI_AUDIO_INFO_LINE(2)); /* anything other than 0 */
403
404 WREG32(HDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset,
405 HDMI_AUDIO_DELAY_EN(1) | /* set the default audio delay */
406 HDMI_AUDIO_PACKETS_PER_LINE(3)); /* should be suffient for all audio modes and small enough for all hblanks */
407 402
408 WREG32(HDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, 403 if (drm_detect_monitor_audio(radeon_connector_edid(connector))) {
409 HDMI_AUDIO_INFO_SEND | /* enable audio info frames (frames won't be set until audio is enabled) */ 404 WREG32(HDMI_INFOFRAME_CONTROL0 + dig->afmt->offset,
410 HDMI_AUDIO_INFO_CONT); /* required for audio info values to be updated */ 405 HDMI_AVI_INFO_SEND | /* enable AVI info frames */
406 HDMI_AVI_INFO_CONT | /* required for audio info values to be updated */
407 HDMI_AUDIO_INFO_SEND | /* enable audio info frames (frames won't be set until audio is enabled) */
408 HDMI_AUDIO_INFO_CONT); /* required for audio info values to be updated */
409 WREG32_OR(AFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset,
410 AFMT_AUDIO_SAMPLE_SEND);
411 } else {
412 WREG32(HDMI_INFOFRAME_CONTROL0 + dig->afmt->offset,
413 HDMI_AVI_INFO_SEND | /* enable AVI info frames */
414 HDMI_AVI_INFO_CONT); /* required for audio info values to be updated */
415 WREG32_AND(AFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset,
416 ~AFMT_AUDIO_SAMPLE_SEND);
417 }
411 } else { 418 } else {
419 WREG32_AND(AFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset,
420 ~AFMT_AUDIO_SAMPLE_SEND);
412 WREG32(HDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, 0); 421 WREG32(HDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, 0);
413 } 422 }
414 423
@@ -424,20 +433,24 @@ void evergreen_dp_enable(struct drm_encoder *encoder, bool enable)
424 struct radeon_device *rdev = dev->dev_private; 433 struct radeon_device *rdev = dev->dev_private;
425 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 434 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
426 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 435 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
436 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
427 437
428 if (!dig || !dig->afmt) 438 if (!dig || !dig->afmt)
429 return; 439 return;
430 440
431 if (enable) { 441 if (enable && drm_detect_monitor_audio(radeon_connector_edid(connector))) {
432 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); 442 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
433 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 443 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
434 struct radeon_connector_atom_dig *dig_connector; 444 struct radeon_connector_atom_dig *dig_connector;
435 uint32_t val; 445 uint32_t val;
436 446
447 WREG32_OR(AFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset,
448 AFMT_AUDIO_SAMPLE_SEND);
449
437 WREG32(EVERGREEN_DP_SEC_TIMESTAMP + dig->afmt->offset, 450 WREG32(EVERGREEN_DP_SEC_TIMESTAMP + dig->afmt->offset,
438 EVERGREEN_DP_SEC_TIMESTAMP_MODE(1)); 451 EVERGREEN_DP_SEC_TIMESTAMP_MODE(1));
439 452
440 if (radeon_connector->con_priv) { 453 if (!ASIC_IS_DCE6(rdev) && radeon_connector->con_priv) {
441 dig_connector = radeon_connector->con_priv; 454 dig_connector = radeon_connector->con_priv;
442 val = RREG32(EVERGREEN_DP_SEC_AUD_N + dig->afmt->offset); 455 val = RREG32(EVERGREEN_DP_SEC_AUD_N + dig->afmt->offset);
443 val &= ~EVERGREEN_DP_SEC_N_BASE_MULTIPLE(0xf); 456 val &= ~EVERGREEN_DP_SEC_N_BASE_MULTIPLE(0xf);
@@ -457,6 +470,8 @@ void evergreen_dp_enable(struct drm_encoder *encoder, bool enable)
457 EVERGREEN_DP_SEC_STREAM_ENABLE); /* Master enable for secondary stream engine */ 470 EVERGREEN_DP_SEC_STREAM_ENABLE); /* Master enable for secondary stream engine */
458 } else { 471 } else {
459 WREG32(EVERGREEN_DP_SEC_CNTL + dig->afmt->offset, 0); 472 WREG32(EVERGREEN_DP_SEC_CNTL + dig->afmt->offset, 0);
473 WREG32_AND(AFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset,
474 ~AFMT_AUDIO_SAMPLE_SEND);
460 } 475 }
461 476
462 dig->afmt->enabled = enable; 477 dig->afmt->enabled = enable;
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index dd6606b8e23c..e85894ade95c 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -228,12 +228,13 @@ void r600_set_avi_packet(struct radeon_device *rdev, u32 offset,
228 WREG32(HDMI0_AVI_INFO3 + offset, 228 WREG32(HDMI0_AVI_INFO3 + offset,
229 frame[0xC] | (frame[0xD] << 8) | (buffer[1] << 24)); 229 frame[0xC] | (frame[0xD] << 8) | (buffer[1] << 24));
230 230
231 WREG32_OR(HDMI0_INFOFRAME_CONTROL1 + offset,
232 HDMI0_AVI_INFO_LINE(2)); /* anything other than 0 */
233
231 WREG32_OR(HDMI0_INFOFRAME_CONTROL0 + offset, 234 WREG32_OR(HDMI0_INFOFRAME_CONTROL0 + offset,
232 HDMI0_AVI_INFO_SEND | /* enable AVI info frames */ 235 HDMI0_AVI_INFO_SEND | /* enable AVI info frames */
233 HDMI0_AVI_INFO_CONT); /* send AVI info frames every frame/field */ 236 HDMI0_AVI_INFO_CONT); /* send AVI info frames every frame/field */
234 237
235 WREG32_OR(HDMI0_INFOFRAME_CONTROL1 + offset,
236 HDMI0_AVI_INFO_LINE(2)); /* anything other than 0 */
237} 238}
238 239
239/* 240/*
diff --git a/drivers/gpu/drm/radeon/radeon_audio.c b/drivers/gpu/drm/radeon/radeon_audio.c
index 48d49e651a30..8b82abb78df1 100644
--- a/drivers/gpu/drm/radeon/radeon_audio.c
+++ b/drivers/gpu/drm/radeon/radeon_audio.c
@@ -102,7 +102,6 @@ static void radeon_audio_dp_mode_set(struct drm_encoder *encoder,
102void r600_hdmi_enable(struct drm_encoder *encoder, bool enable); 102void r600_hdmi_enable(struct drm_encoder *encoder, bool enable);
103void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable); 103void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable);
104void evergreen_dp_enable(struct drm_encoder *encoder, bool enable); 104void evergreen_dp_enable(struct drm_encoder *encoder, bool enable);
105void dce6_dp_enable(struct drm_encoder *encoder, bool enable);
106 105
107static const u32 pin_offsets[7] = 106static const u32 pin_offsets[7] =
108{ 107{
@@ -240,7 +239,7 @@ static struct radeon_audio_funcs dce6_dp_funcs = {
240 .set_avi_packet = evergreen_set_avi_packet, 239 .set_avi_packet = evergreen_set_avi_packet,
241 .set_audio_packet = dce4_set_audio_packet, 240 .set_audio_packet = dce4_set_audio_packet,
242 .mode_set = radeon_audio_dp_mode_set, 241 .mode_set = radeon_audio_dp_mode_set,
243 .dpms = dce6_dp_enable, 242 .dpms = evergreen_dp_enable,
244}; 243};
245 244
246static void radeon_audio_interface_init(struct radeon_device *rdev) 245static void radeon_audio_interface_init(struct radeon_device *rdev)
@@ -461,30 +460,33 @@ void radeon_audio_detect(struct drm_connector *connector,
461 if (!connector || !connector->encoder) 460 if (!connector || !connector->encoder)
462 return; 461 return;
463 462
463 if (!radeon_encoder_is_digital(connector->encoder))
464 return;
465
464 rdev = connector->encoder->dev->dev_private; 466 rdev = connector->encoder->dev->dev_private;
465 radeon_encoder = to_radeon_encoder(connector->encoder); 467 radeon_encoder = to_radeon_encoder(connector->encoder);
466 dig = radeon_encoder->enc_priv; 468 dig = radeon_encoder->enc_priv;
467 469
468 if (status == connector_status_connected) { 470 if (!dig->afmt)
469 struct radeon_connector *radeon_connector; 471 return;
470 int sink_type;
471
472 if (!drm_detect_monitor_audio(radeon_connector_edid(connector))) {
473 radeon_encoder->audio = NULL;
474 return;
475 }
476 472
477 radeon_connector = to_radeon_connector(connector); 473 if (status == connector_status_connected) {
478 sink_type = radeon_dp_getsinktype(radeon_connector); 474 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
479 475
480 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort && 476 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort &&
481 sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) 477 radeon_dp_getsinktype(radeon_connector) ==
478 CONNECTOR_OBJECT_ID_DISPLAYPORT)
482 radeon_encoder->audio = rdev->audio.dp_funcs; 479 radeon_encoder->audio = rdev->audio.dp_funcs;
483 else 480 else
484 radeon_encoder->audio = rdev->audio.hdmi_funcs; 481 radeon_encoder->audio = rdev->audio.hdmi_funcs;
485 482
486 dig->afmt->pin = radeon_audio_get_pin(connector->encoder); 483 dig->afmt->pin = radeon_audio_get_pin(connector->encoder);
487 radeon_audio_enable(rdev, dig->afmt->pin, 0xf); 484 if (drm_detect_monitor_audio(radeon_connector_edid(connector))) {
485 radeon_audio_enable(rdev, dig->afmt->pin, 0xf);
486 } else {
487 radeon_audio_enable(rdev, dig->afmt->pin, 0);
488 dig->afmt->pin = NULL;
489 }
488 } else { 490 } else {
489 radeon_audio_enable(rdev, dig->afmt->pin, 0); 491 radeon_audio_enable(rdev, dig->afmt->pin, 0);
490 dig->afmt->pin = NULL; 492 dig->afmt->pin = NULL;
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index cebb65e07e1d..d17d251dbd4f 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -1379,8 +1379,10 @@ out:
1379 /* updated in get modes as well since we need to know if it's analog or digital */ 1379 /* updated in get modes as well since we need to know if it's analog or digital */
1380 radeon_connector_update_scratch_regs(connector, ret); 1380 radeon_connector_update_scratch_regs(connector, ret);
1381 1381
1382 if (radeon_audio != 0) 1382 if (radeon_audio != 0) {
1383 radeon_connector_get_edid(connector);
1383 radeon_audio_detect(connector, ret); 1384 radeon_audio_detect(connector, ret);
1385 }
1384 1386
1385exit: 1387exit:
1386 pm_runtime_mark_last_busy(connector->dev->dev); 1388 pm_runtime_mark_last_busy(connector->dev->dev);
@@ -1717,8 +1719,10 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
1717 1719
1718 radeon_connector_update_scratch_regs(connector, ret); 1720 radeon_connector_update_scratch_regs(connector, ret);
1719 1721
1720 if (radeon_audio != 0) 1722 if (radeon_audio != 0) {
1723 radeon_connector_get_edid(connector);
1721 radeon_audio_detect(connector, ret); 1724 radeon_audio_detect(connector, ret);
1725 }
1722 1726
1723out: 1727out:
1724 pm_runtime_mark_last_busy(connector->dev->dev); 1728 pm_runtime_mark_last_busy(connector->dev->dev);
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 4d0f96cc3da4..ab39b85e0f76 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -88,7 +88,7 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
88 p->dma_reloc_idx = 0; 88 p->dma_reloc_idx = 0;
89 /* FIXME: we assume that each relocs use 4 dwords */ 89 /* FIXME: we assume that each relocs use 4 dwords */
90 p->nrelocs = chunk->length_dw / 4; 90 p->nrelocs = chunk->length_dw / 4;
91 p->relocs = kcalloc(p->nrelocs, sizeof(struct radeon_bo_list), GFP_KERNEL); 91 p->relocs = drm_calloc_large(p->nrelocs, sizeof(struct radeon_bo_list));
92 if (p->relocs == NULL) { 92 if (p->relocs == NULL) {
93 return -ENOMEM; 93 return -ENOMEM;
94 } 94 }
@@ -428,7 +428,7 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error, bo
428 } 428 }
429 } 429 }
430 kfree(parser->track); 430 kfree(parser->track);
431 kfree(parser->relocs); 431 drm_free_large(parser->relocs);
432 drm_free_large(parser->vm_bos); 432 drm_free_large(parser->vm_bos);
433 for (i = 0; i < parser->nchunks; i++) 433 for (i = 0; i < parser->nchunks; i++)
434 drm_free_large(parser->chunks[i].kdata); 434 drm_free_large(parser->chunks[i].kdata);
diff --git a/drivers/gpu/drm/radeon/radeon_mn.c b/drivers/gpu/drm/radeon/radeon_mn.c
index 01701376b239..535bf404b725 100644
--- a/drivers/gpu/drm/radeon/radeon_mn.c
+++ b/drivers/gpu/drm/radeon/radeon_mn.c
@@ -135,7 +135,7 @@ static void radeon_mn_invalidate_range_start(struct mmu_notifier *mn,
135 while (it) { 135 while (it) {
136 struct radeon_mn_node *node; 136 struct radeon_mn_node *node;
137 struct radeon_bo *bo; 137 struct radeon_bo *bo;
138 int r; 138 long r;
139 139
140 node = container_of(it, struct radeon_mn_node, it); 140 node = container_of(it, struct radeon_mn_node, it);
141 it = interval_tree_iter_next(it, start, end); 141 it = interval_tree_iter_next(it, start, end);
@@ -144,19 +144,19 @@ static void radeon_mn_invalidate_range_start(struct mmu_notifier *mn,
144 144
145 r = radeon_bo_reserve(bo, true); 145 r = radeon_bo_reserve(bo, true);
146 if (r) { 146 if (r) {
147 DRM_ERROR("(%d) failed to reserve user bo\n", r); 147 DRM_ERROR("(%ld) failed to reserve user bo\n", r);
148 continue; 148 continue;
149 } 149 }
150 150
151 r = reservation_object_wait_timeout_rcu(bo->tbo.resv, 151 r = reservation_object_wait_timeout_rcu(bo->tbo.resv,
152 true, false, MAX_SCHEDULE_TIMEOUT); 152 true, false, MAX_SCHEDULE_TIMEOUT);
153 if (r) 153 if (r <= 0)
154 DRM_ERROR("(%d) failed to wait for user bo\n", r); 154 DRM_ERROR("(%ld) failed to wait for user bo\n", r);
155 155
156 radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_CPU); 156 radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_CPU);
157 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); 157 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
158 if (r) 158 if (r)
159 DRM_ERROR("(%d) failed to validate user bo\n", r); 159 DRM_ERROR("(%ld) failed to validate user bo\n", r);
160 160
161 radeon_bo_unreserve(bo); 161 radeon_bo_unreserve(bo);
162 } 162 }
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index 2a5a4a9e772d..de42fc4a22b8 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -473,6 +473,23 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
473 } 473 }
474 474
475 mutex_lock(&vm->mutex); 475 mutex_lock(&vm->mutex);
476 soffset /= RADEON_GPU_PAGE_SIZE;
477 eoffset /= RADEON_GPU_PAGE_SIZE;
478 if (soffset || eoffset) {
479 struct interval_tree_node *it;
480 it = interval_tree_iter_first(&vm->va, soffset, eoffset - 1);
481 if (it && it != &bo_va->it) {
482 struct radeon_bo_va *tmp;
483 tmp = container_of(it, struct radeon_bo_va, it);
484 /* bo and tmp overlap, invalid offset */
485 dev_err(rdev->dev, "bo %p va 0x%010Lx conflict with "
486 "(bo %p 0x%010lx 0x%010lx)\n", bo_va->bo,
487 soffset, tmp->bo, tmp->it.start, tmp->it.last);
488 mutex_unlock(&vm->mutex);
489 return -EINVAL;
490 }
491 }
492
476 if (bo_va->it.start || bo_va->it.last) { 493 if (bo_va->it.start || bo_va->it.last) {
477 if (bo_va->addr) { 494 if (bo_va->addr) {
478 /* add a clone of the bo_va to clear the old address */ 495 /* add a clone of the bo_va to clear the old address */
@@ -490,6 +507,8 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
490 spin_lock(&vm->status_lock); 507 spin_lock(&vm->status_lock);
491 list_add(&tmp->vm_status, &vm->freed); 508 list_add(&tmp->vm_status, &vm->freed);
492 spin_unlock(&vm->status_lock); 509 spin_unlock(&vm->status_lock);
510
511 bo_va->addr = 0;
493 } 512 }
494 513
495 interval_tree_remove(&bo_va->it, &vm->va); 514 interval_tree_remove(&bo_va->it, &vm->va);
@@ -497,21 +516,7 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
497 bo_va->it.last = 0; 516 bo_va->it.last = 0;
498 } 517 }
499 518
500 soffset /= RADEON_GPU_PAGE_SIZE;
501 eoffset /= RADEON_GPU_PAGE_SIZE;
502 if (soffset || eoffset) { 519 if (soffset || eoffset) {
503 struct interval_tree_node *it;
504 it = interval_tree_iter_first(&vm->va, soffset, eoffset - 1);
505 if (it) {
506 struct radeon_bo_va *tmp;
507 tmp = container_of(it, struct radeon_bo_va, it);
508 /* bo and tmp overlap, invalid offset */
509 dev_err(rdev->dev, "bo %p va 0x%010Lx conflict with "
510 "(bo %p 0x%010lx 0x%010lx)\n", bo_va->bo,
511 soffset, tmp->bo, tmp->it.start, tmp->it.last);
512 mutex_unlock(&vm->mutex);
513 return -EINVAL;
514 }
515 bo_va->it.start = soffset; 520 bo_va->it.start = soffset;
516 bo_va->it.last = eoffset - 1; 521 bo_va->it.last = eoffset - 1;
517 interval_tree_insert(&bo_va->it, &vm->va); 522 interval_tree_insert(&bo_va->it, &vm->va);
@@ -1107,7 +1112,8 @@ void radeon_vm_bo_rmv(struct radeon_device *rdev,
1107 list_del(&bo_va->bo_list); 1112 list_del(&bo_va->bo_list);
1108 1113
1109 mutex_lock(&vm->mutex); 1114 mutex_lock(&vm->mutex);
1110 interval_tree_remove(&bo_va->it, &vm->va); 1115 if (bo_va->it.start || bo_va->it.last)
1116 interval_tree_remove(&bo_va->it, &vm->va);
1111 spin_lock(&vm->status_lock); 1117 spin_lock(&vm->status_lock);
1112 list_del(&bo_va->vm_status); 1118 list_del(&bo_va->vm_status);
1113 1119
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index b35bccfeef79..ff8b83f5e929 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -2924,6 +2924,7 @@ struct si_dpm_quirk {
2924static struct si_dpm_quirk si_dpm_quirk_list[] = { 2924static struct si_dpm_quirk si_dpm_quirk_list[] = {
2925 /* PITCAIRN - https://bugs.freedesktop.org/show_bug.cgi?id=76490 */ 2925 /* PITCAIRN - https://bugs.freedesktop.org/show_bug.cgi?id=76490 */
2926 { PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 }, 2926 { PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 },
2927 { PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 },
2927 { 0, 0, 0, 0 }, 2928 { 0, 0, 0, 0 },
2928}; 2929};
2929 2930
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index ccb0ce073ef2..4557f335a8a5 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -1409,7 +1409,7 @@ static int vop_bind(struct device *dev, struct device *master, void *data)
1409 struct vop *vop; 1409 struct vop *vop;
1410 struct resource *res; 1410 struct resource *res;
1411 size_t alloc_size; 1411 size_t alloc_size;
1412 int ret; 1412 int ret, irq;
1413 1413
1414 of_id = of_match_device(vop_driver_dt_match, dev); 1414 of_id = of_match_device(vop_driver_dt_match, dev);
1415 vop_data = of_id->data; 1415 vop_data = of_id->data;
@@ -1445,11 +1445,12 @@ static int vop_bind(struct device *dev, struct device *master, void *data)
1445 return ret; 1445 return ret;
1446 } 1446 }
1447 1447
1448 vop->irq = platform_get_irq(pdev, 0); 1448 irq = platform_get_irq(pdev, 0);
1449 if (vop->irq < 0) { 1449 if (irq < 0) {
1450 dev_err(dev, "cannot find irq for vop\n"); 1450 dev_err(dev, "cannot find irq for vop\n");
1451 return vop->irq; 1451 return irq;
1452 } 1452 }
1453 vop->irq = (unsigned int)irq;
1453 1454
1454 spin_lock_init(&vop->reg_lock); 1455 spin_lock_init(&vop->reg_lock);
1455 spin_lock_init(&vop->irq_lock); 1456 spin_lock_init(&vop->irq_lock);
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index c8a18e4ee9dc..720ceeb7fa9b 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -1298,21 +1298,22 @@ static int table_load(struct dm_ioctl *param, size_t param_size)
1298 goto err_unlock_md_type; 1298 goto err_unlock_md_type;
1299 } 1299 }
1300 1300
1301 if (dm_get_md_type(md) == DM_TYPE_NONE) 1301 if (dm_get_md_type(md) == DM_TYPE_NONE) {
1302 /* Initial table load: acquire type of table. */ 1302 /* Initial table load: acquire type of table. */
1303 dm_set_md_type(md, dm_table_get_type(t)); 1303 dm_set_md_type(md, dm_table_get_type(t));
1304 else if (dm_get_md_type(md) != dm_table_get_type(t)) { 1304
1305 /* setup md->queue to reflect md's type (may block) */
1306 r = dm_setup_md_queue(md);
1307 if (r) {
1308 DMWARN("unable to set up device queue for new table.");
1309 goto err_unlock_md_type;
1310 }
1311 } else if (dm_get_md_type(md) != dm_table_get_type(t)) {
1305 DMWARN("can't change device type after initial table load."); 1312 DMWARN("can't change device type after initial table load.");
1306 r = -EINVAL; 1313 r = -EINVAL;
1307 goto err_unlock_md_type; 1314 goto err_unlock_md_type;
1308 } 1315 }
1309 1316
1310 /* setup md->queue to reflect md's type (may block) */
1311 r = dm_setup_md_queue(md);
1312 if (r) {
1313 DMWARN("unable to set up device queue for new table.");
1314 goto err_unlock_md_type;
1315 }
1316 dm_unlock_md_type(md); 1317 dm_unlock_md_type(md);
1317 1318
1318 /* stage inactive table */ 1319 /* stage inactive table */
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index f8c7ca3e8947..a930b72314ac 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1082,18 +1082,26 @@ static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
1082 dm_put(md); 1082 dm_put(md);
1083} 1083}
1084 1084
1085static void free_rq_clone(struct request *clone) 1085static void free_rq_clone(struct request *clone, bool must_be_mapped)
1086{ 1086{
1087 struct dm_rq_target_io *tio = clone->end_io_data; 1087 struct dm_rq_target_io *tio = clone->end_io_data;
1088 struct mapped_device *md = tio->md; 1088 struct mapped_device *md = tio->md;
1089 1089
1090 WARN_ON_ONCE(must_be_mapped && !clone->q);
1091
1090 blk_rq_unprep_clone(clone); 1092 blk_rq_unprep_clone(clone);
1091 1093
1092 if (clone->q->mq_ops) 1094 if (md->type == DM_TYPE_MQ_REQUEST_BASED)
1095 /* stacked on blk-mq queue(s) */
1093 tio->ti->type->release_clone_rq(clone); 1096 tio->ti->type->release_clone_rq(clone);
1094 else if (!md->queue->mq_ops) 1097 else if (!md->queue->mq_ops)
1095 /* request_fn queue stacked on request_fn queue(s) */ 1098 /* request_fn queue stacked on request_fn queue(s) */
1096 free_clone_request(md, clone); 1099 free_clone_request(md, clone);
1100 /*
1101 * NOTE: for the blk-mq queue stacked on request_fn queue(s) case:
1102 * no need to call free_clone_request() because we leverage blk-mq by
1103 * allocating the clone at the end of the blk-mq pdu (see: clone_rq)
1104 */
1097 1105
1098 if (!md->queue->mq_ops) 1106 if (!md->queue->mq_ops)
1099 free_rq_tio(tio); 1107 free_rq_tio(tio);
@@ -1124,7 +1132,7 @@ static void dm_end_request(struct request *clone, int error)
1124 rq->sense_len = clone->sense_len; 1132 rq->sense_len = clone->sense_len;
1125 } 1133 }
1126 1134
1127 free_rq_clone(clone); 1135 free_rq_clone(clone, true);
1128 if (!rq->q->mq_ops) 1136 if (!rq->q->mq_ops)
1129 blk_end_request_all(rq, error); 1137 blk_end_request_all(rq, error);
1130 else 1138 else
@@ -1143,7 +1151,7 @@ static void dm_unprep_request(struct request *rq)
1143 } 1151 }
1144 1152
1145 if (clone) 1153 if (clone)
1146 free_rq_clone(clone); 1154 free_rq_clone(clone, false);
1147} 1155}
1148 1156
1149/* 1157/*
@@ -2662,9 +2670,6 @@ static int dm_init_request_based_queue(struct mapped_device *md)
2662{ 2670{
2663 struct request_queue *q = NULL; 2671 struct request_queue *q = NULL;
2664 2672
2665 if (md->queue->elevator)
2666 return 0;
2667
2668 /* Fully initialize the queue */ 2673 /* Fully initialize the queue */
2669 q = blk_init_allocated_queue(md->queue, dm_request_fn, NULL); 2674 q = blk_init_allocated_queue(md->queue, dm_request_fn, NULL);
2670 if (!q) 2675 if (!q)
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 78dde56ae6e6..d5fe5d5f490f 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -82,6 +82,8 @@
82#include <net/bond_3ad.h> 82#include <net/bond_3ad.h>
83#include <net/bond_alb.h> 83#include <net/bond_alb.h>
84 84
85#include "bonding_priv.h"
86
85/*---------------------------- Module parameters ----------------------------*/ 87/*---------------------------- Module parameters ----------------------------*/
86 88
87/* monitor all links that often (in milliseconds). <=0 disables monitoring */ 89/* monitor all links that often (in milliseconds). <=0 disables monitoring */
@@ -4542,6 +4544,8 @@ unsigned int bond_get_num_tx_queues(void)
4542int bond_create(struct net *net, const char *name) 4544int bond_create(struct net *net, const char *name)
4543{ 4545{
4544 struct net_device *bond_dev; 4546 struct net_device *bond_dev;
4547 struct bonding *bond;
4548 struct alb_bond_info *bond_info;
4545 int res; 4549 int res;
4546 4550
4547 rtnl_lock(); 4551 rtnl_lock();
@@ -4555,6 +4559,14 @@ int bond_create(struct net *net, const char *name)
4555 return -ENOMEM; 4559 return -ENOMEM;
4556 } 4560 }
4557 4561
4562 /*
4563 * Initialize rx_hashtbl_used_head to RLB_NULL_INDEX.
4564 * It is set to 0 by default which is wrong.
4565 */
4566 bond = netdev_priv(bond_dev);
4567 bond_info = &(BOND_ALB_INFO(bond));
4568 bond_info->rx_hashtbl_used_head = RLB_NULL_INDEX;
4569
4558 dev_net_set(bond_dev, net); 4570 dev_net_set(bond_dev, net);
4559 bond_dev->rtnl_link_ops = &bond_link_ops; 4571 bond_dev->rtnl_link_ops = &bond_link_ops;
4560 4572
diff --git a/drivers/net/bonding/bond_procfs.c b/drivers/net/bonding/bond_procfs.c
index 62694cfc05b6..b20b35acb47d 100644
--- a/drivers/net/bonding/bond_procfs.c
+++ b/drivers/net/bonding/bond_procfs.c
@@ -4,6 +4,7 @@
4#include <net/netns/generic.h> 4#include <net/netns/generic.h>
5#include <net/bonding.h> 5#include <net/bonding.h>
6 6
7#include "bonding_priv.h"
7 8
8static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos) 9static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos)
9 __acquires(RCU) 10 __acquires(RCU)
diff --git a/drivers/net/bonding/bonding_priv.h b/drivers/net/bonding/bonding_priv.h
new file mode 100644
index 000000000000..5a4d81a9437c
--- /dev/null
+++ b/drivers/net/bonding/bonding_priv.h
@@ -0,0 +1,25 @@
1/*
2 * Bond several ethernet interfaces into a Cisco, running 'Etherchannel'.
3 *
4 * Portions are (c) Copyright 1995 Simon "Guru Aleph-Null" Janes
5 * NCM: Network and Communications Management, Inc.
6 *
7 * BUT, I'm the one who modified it for ethernet, so:
8 * (c) Copyright 1999, Thomas Davis, tadavis@lbl.gov
9 *
10 * This software may be used and distributed according to the terms
11 * of the GNU Public License, incorporated herein by reference.
12 *
13 */
14
15#ifndef _BONDING_PRIV_H
16#define _BONDING_PRIV_H
17
18#define DRV_VERSION "3.7.1"
19#define DRV_RELDATE "April 27, 2011"
20#define DRV_NAME "bonding"
21#define DRV_DESCRIPTION "Ethernet Channel Bonding Driver"
22
23#define bond_version DRV_DESCRIPTION ": v" DRV_VERSION " (" DRV_RELDATE ")\n"
24
25#endif
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index 58808f651452..e8c96b8e86f4 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -112,7 +112,7 @@ config PCH_CAN
112 112
113config CAN_GRCAN 113config CAN_GRCAN
114 tristate "Aeroflex Gaisler GRCAN and GRHCAN CAN devices" 114 tristate "Aeroflex Gaisler GRCAN and GRHCAN CAN devices"
115 depends on OF 115 depends on OF && HAS_DMA
116 ---help--- 116 ---help---
117 Say Y here if you want to use Aeroflex Gaisler GRCAN or GRHCAN. 117 Say Y here if you want to use Aeroflex Gaisler GRCAN or GRHCAN.
118 Note that the driver supports little endian, even though little 118 Note that the driver supports little endian, even though little
diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c
index 4643914859b2..8b17a9065b0b 100644
--- a/drivers/net/can/usb/kvaser_usb.c
+++ b/drivers/net/can/usb/kvaser_usb.c
@@ -1102,7 +1102,7 @@ static void kvaser_usb_rx_can_err(const struct kvaser_usb_net_priv *priv,
1102 1102
1103 if (msg->u.rx_can_header.flag & (MSG_FLAG_ERROR_FRAME | 1103 if (msg->u.rx_can_header.flag & (MSG_FLAG_ERROR_FRAME |
1104 MSG_FLAG_NERR)) { 1104 MSG_FLAG_NERR)) {
1105 netdev_err(priv->netdev, "Unknow error (flags: 0x%02x)\n", 1105 netdev_err(priv->netdev, "Unknown error (flags: 0x%02x)\n",
1106 msg->u.rx_can_header.flag); 1106 msg->u.rx_can_header.flag);
1107 1107
1108 stats->rx_errors++; 1108 stats->rx_errors++;
diff --git a/drivers/net/ethernet/8390/etherh.c b/drivers/net/ethernet/8390/etherh.c
index b36ee9e0d220..d686b9cac29f 100644
--- a/drivers/net/ethernet/8390/etherh.c
+++ b/drivers/net/ethernet/8390/etherh.c
@@ -523,7 +523,7 @@ static int etherh_addr(char *addr, struct expansion_card *ec)
523 char *s; 523 char *s;
524 524
525 if (!ecard_readchunk(&cd, ec, 0xf5, 0)) { 525 if (!ecard_readchunk(&cd, ec, 0xf5, 0)) {
526 printk(KERN_ERR "%s: unable to read podule description string\n", 526 printk(KERN_ERR "%s: unable to read module description string\n",
527 dev_name(&ec->dev)); 527 dev_name(&ec->dev));
528 goto no_addr; 528 goto no_addr;
529 } 529 }
diff --git a/drivers/net/ethernet/altera/altera_msgdmahw.h b/drivers/net/ethernet/altera/altera_msgdmahw.h
index eba070f16782..89cd11d86642 100644
--- a/drivers/net/ethernet/altera/altera_msgdmahw.h
+++ b/drivers/net/ethernet/altera/altera_msgdmahw.h
@@ -58,15 +58,12 @@ struct msgdma_extended_desc {
58/* Tx buffer control flags 58/* Tx buffer control flags
59 */ 59 */
60#define MSGDMA_DESC_CTL_TX_FIRST (MSGDMA_DESC_CTL_GEN_SOP | \ 60#define MSGDMA_DESC_CTL_TX_FIRST (MSGDMA_DESC_CTL_GEN_SOP | \
61 MSGDMA_DESC_CTL_TR_ERR_IRQ | \
62 MSGDMA_DESC_CTL_GO) 61 MSGDMA_DESC_CTL_GO)
63 62
64#define MSGDMA_DESC_CTL_TX_MIDDLE (MSGDMA_DESC_CTL_TR_ERR_IRQ | \ 63#define MSGDMA_DESC_CTL_TX_MIDDLE (MSGDMA_DESC_CTL_GO)
65 MSGDMA_DESC_CTL_GO)
66 64
67#define MSGDMA_DESC_CTL_TX_LAST (MSGDMA_DESC_CTL_GEN_EOP | \ 65#define MSGDMA_DESC_CTL_TX_LAST (MSGDMA_DESC_CTL_GEN_EOP | \
68 MSGDMA_DESC_CTL_TR_COMP_IRQ | \ 66 MSGDMA_DESC_CTL_TR_COMP_IRQ | \
69 MSGDMA_DESC_CTL_TR_ERR_IRQ | \
70 MSGDMA_DESC_CTL_GO) 67 MSGDMA_DESC_CTL_GO)
71 68
72#define MSGDMA_DESC_CTL_TX_SINGLE (MSGDMA_DESC_CTL_GEN_SOP | \ 69#define MSGDMA_DESC_CTL_TX_SINGLE (MSGDMA_DESC_CTL_GEN_SOP | \
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
index 90a76306ad0f..da48e66377b5 100644
--- a/drivers/net/ethernet/altera/altera_tse_main.c
+++ b/drivers/net/ethernet/altera/altera_tse_main.c
@@ -391,6 +391,12 @@ static int tse_rx(struct altera_tse_private *priv, int limit)
391 "RCV pktstatus %08X pktlength %08X\n", 391 "RCV pktstatus %08X pktlength %08X\n",
392 pktstatus, pktlength); 392 pktstatus, pktlength);
393 393
394 /* DMA trasfer from TSE starts with 2 aditional bytes for
395 * IP payload alignment. Status returned by get_rx_status()
396 * contains DMA transfer length. Packet is 2 bytes shorter.
397 */
398 pktlength -= 2;
399
394 count++; 400 count++;
395 next_entry = (++priv->rx_cons) % priv->rx_ring_size; 401 next_entry = (++priv->rx_cons) % priv->rx_ring_size;
396 402
@@ -777,6 +783,8 @@ static int init_phy(struct net_device *dev)
777 struct altera_tse_private *priv = netdev_priv(dev); 783 struct altera_tse_private *priv = netdev_priv(dev);
778 struct phy_device *phydev; 784 struct phy_device *phydev;
779 struct device_node *phynode; 785 struct device_node *phynode;
786 bool fixed_link = false;
787 int rc = 0;
780 788
781 /* Avoid init phy in case of no phy present */ 789 /* Avoid init phy in case of no phy present */
782 if (!priv->phy_iface) 790 if (!priv->phy_iface)
@@ -789,13 +797,32 @@ static int init_phy(struct net_device *dev)
789 phynode = of_parse_phandle(priv->device->of_node, "phy-handle", 0); 797 phynode = of_parse_phandle(priv->device->of_node, "phy-handle", 0);
790 798
791 if (!phynode) { 799 if (!phynode) {
792 netdev_dbg(dev, "no phy-handle found\n"); 800 /* check if a fixed-link is defined in device-tree */
793 if (!priv->mdio) { 801 if (of_phy_is_fixed_link(priv->device->of_node)) {
794 netdev_err(dev, 802 rc = of_phy_register_fixed_link(priv->device->of_node);
795 "No phy-handle nor local mdio specified\n"); 803 if (rc < 0) {
796 return -ENODEV; 804 netdev_err(dev, "cannot register fixed PHY\n");
805 return rc;
806 }
807
808 /* In the case of a fixed PHY, the DT node associated
809 * to the PHY is the Ethernet MAC DT node.
810 */
811 phynode = of_node_get(priv->device->of_node);
812 fixed_link = true;
813
814 netdev_dbg(dev, "fixed-link detected\n");
815 phydev = of_phy_connect(dev, phynode,
816 &altera_tse_adjust_link,
817 0, priv->phy_iface);
818 } else {
819 netdev_dbg(dev, "no phy-handle found\n");
820 if (!priv->mdio) {
821 netdev_err(dev, "No phy-handle nor local mdio specified\n");
822 return -ENODEV;
823 }
824 phydev = connect_local_phy(dev);
797 } 825 }
798 phydev = connect_local_phy(dev);
799 } else { 826 } else {
800 netdev_dbg(dev, "phy-handle found\n"); 827 netdev_dbg(dev, "phy-handle found\n");
801 phydev = of_phy_connect(dev, phynode, 828 phydev = of_phy_connect(dev, phynode,
@@ -819,10 +846,10 @@ static int init_phy(struct net_device *dev)
819 /* Broken HW is sometimes missing the pull-up resistor on the 846 /* Broken HW is sometimes missing the pull-up resistor on the
820 * MDIO line, which results in reads to non-existent devices returning 847 * MDIO line, which results in reads to non-existent devices returning
821 * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent 848 * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
822 * device as well. 849 * device as well. If a fixed-link is used the phy_id is always 0.
823 * Note: phydev->phy_id is the result of reading the UID PHY registers. 850 * Note: phydev->phy_id is the result of reading the UID PHY registers.
824 */ 851 */
825 if (phydev->phy_id == 0) { 852 if ((phydev->phy_id == 0) && !fixed_link) {
826 netdev_err(dev, "Bad PHY UID 0x%08x\n", phydev->phy_id); 853 netdev_err(dev, "Bad PHY UID 0x%08x\n", phydev->phy_id);
827 phy_disconnect(phydev); 854 phy_disconnect(phydev);
828 return -ENODEV; 855 return -ENODEV;
diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig
index c638c85f3954..089c269637b7 100644
--- a/drivers/net/ethernet/amd/Kconfig
+++ b/drivers/net/ethernet/amd/Kconfig
@@ -179,7 +179,7 @@ config SUNLANCE
179 179
180config AMD_XGBE 180config AMD_XGBE
181 tristate "AMD 10GbE Ethernet driver" 181 tristate "AMD 10GbE Ethernet driver"
182 depends on (OF_NET || ACPI) && HAS_IOMEM 182 depends on (OF_NET || ACPI) && HAS_IOMEM && HAS_DMA
183 select PHYLIB 183 select PHYLIB
184 select AMD_XGBE_PHY 184 select AMD_XGBE_PHY
185 select BITREVERSE 185 select BITREVERSE
diff --git a/drivers/net/ethernet/arc/Kconfig b/drivers/net/ethernet/arc/Kconfig
index 8e262e2b39b6..dea29ee24da4 100644
--- a/drivers/net/ethernet/arc/Kconfig
+++ b/drivers/net/ethernet/arc/Kconfig
@@ -25,8 +25,7 @@ config ARC_EMAC_CORE
25config ARC_EMAC 25config ARC_EMAC
26 tristate "ARC EMAC support" 26 tristate "ARC EMAC support"
27 select ARC_EMAC_CORE 27 select ARC_EMAC_CORE
28 depends on OF_IRQ 28 depends on OF_IRQ && OF_NET && HAS_DMA
29 depends on OF_NET
30 ---help--- 29 ---help---
31 On some legacy ARC (Synopsys) FPGA boards such as ARCAngel4/ML50x 30 On some legacy ARC (Synopsys) FPGA boards such as ARCAngel4/ML50x
32 non-standard on-chip ethernet device ARC EMAC 10/100 is used. 31 non-standard on-chip ethernet device ARC EMAC 10/100 is used.
@@ -35,7 +34,7 @@ config ARC_EMAC
35config EMAC_ROCKCHIP 34config EMAC_ROCKCHIP
36 tristate "Rockchip EMAC support" 35 tristate "Rockchip EMAC support"
37 select ARC_EMAC_CORE 36 select ARC_EMAC_CORE
38 depends on OF_IRQ && OF_NET && REGULATOR 37 depends on OF_IRQ && OF_NET && REGULATOR && HAS_DMA
39 ---help--- 38 ---help---
40 Support for Rockchip RK3066/RK3188 EMAC ethernet controllers. 39 Support for Rockchip RK3066/RK3188 EMAC ethernet controllers.
41 This selects Rockchip SoC glue layer support for the 40 This selects Rockchip SoC glue layer support for the
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_hw.h b/drivers/net/ethernet/atheros/atl1e/atl1e_hw.h
index 74df16aef793..88a6271de5bc 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_hw.h
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_hw.h
@@ -129,7 +129,7 @@ s32 atl1e_restart_autoneg(struct atl1e_hw *hw);
129#define TWSI_CTRL_LD_SLV_ADDR_SHIFT 8 129#define TWSI_CTRL_LD_SLV_ADDR_SHIFT 8
130#define TWSI_CTRL_SW_LDSTART 0x800 130#define TWSI_CTRL_SW_LDSTART 0x800
131#define TWSI_CTRL_HW_LDSTART 0x1000 131#define TWSI_CTRL_HW_LDSTART 0x1000
132#define TWSI_CTRL_SMB_SLV_ADDR_MASK 0x0x7F 132#define TWSI_CTRL_SMB_SLV_ADDR_MASK 0x7F
133#define TWSI_CTRL_SMB_SLV_ADDR_SHIFT 15 133#define TWSI_CTRL_SMB_SLV_ADDR_SHIFT 15
134#define TWSI_CTRL_LD_EXIST 0x400000 134#define TWSI_CTRL_LD_EXIST 0x400000
135#define TWSI_CTRL_READ_FREQ_SEL_MASK 0x3 135#define TWSI_CTRL_READ_FREQ_SEL_MASK 0x3
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h
index 7e3d87a88c76..e2c043eabbf3 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.h
+++ b/drivers/net/ethernet/broadcom/bcmsysport.h
@@ -543,7 +543,7 @@ struct bcm_sysport_tx_counters {
543 u32 jbr; /* RO # of xmited jabber count*/ 543 u32 jbr; /* RO # of xmited jabber count*/
544 u32 bytes; /* RO # of xmited byte count */ 544 u32 bytes; /* RO # of xmited byte count */
545 u32 pok; /* RO # of xmited good pkt */ 545 u32 pok; /* RO # of xmited good pkt */
546 u32 uc; /* RO (0x0x4f0)# of xmited unitcast pkt */ 546 u32 uc; /* RO (0x4f0) # of xmited unicast pkt */
547}; 547};
548 548
549struct bcm_sysport_mib { 549struct bcm_sysport_mib {
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index de77d3a74abc..21e3c38c7c75 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -1260,7 +1260,7 @@ static int bgmac_poll(struct napi_struct *napi, int weight)
1260 1260
1261 /* Poll again if more events arrived in the meantime */ 1261 /* Poll again if more events arrived in the meantime */
1262 if (bgmac_read(bgmac, BGMAC_INT_STATUS) & (BGMAC_IS_TX0 | BGMAC_IS_RX)) 1262 if (bgmac_read(bgmac, BGMAC_INT_STATUS) & (BGMAC_IS_TX0 | BGMAC_IS_RX))
1263 return handled; 1263 return weight;
1264 1264
1265 if (handled < weight) { 1265 if (handled < weight) {
1266 napi_complete(napi); 1266 napi_complete(napi);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 355d5fea5be9..a3b0f7a0c61e 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -521,6 +521,7 @@ struct bnx2x_fp_txdata {
521}; 521};
522 522
523enum bnx2x_tpa_mode_t { 523enum bnx2x_tpa_mode_t {
524 TPA_MODE_DISABLED,
524 TPA_MODE_LRO, 525 TPA_MODE_LRO,
525 TPA_MODE_GRO 526 TPA_MODE_GRO
526}; 527};
@@ -589,7 +590,6 @@ struct bnx2x_fastpath {
589 590
590 /* TPA related */ 591 /* TPA related */
591 struct bnx2x_agg_info *tpa_info; 592 struct bnx2x_agg_info *tpa_info;
592 u8 disable_tpa;
593#ifdef BNX2X_STOP_ON_ERROR 593#ifdef BNX2X_STOP_ON_ERROR
594 u64 tpa_queue_used; 594 u64 tpa_queue_used;
595#endif 595#endif
@@ -1545,9 +1545,7 @@ struct bnx2x {
1545#define USING_MSIX_FLAG (1 << 5) 1545#define USING_MSIX_FLAG (1 << 5)
1546#define USING_MSI_FLAG (1 << 6) 1546#define USING_MSI_FLAG (1 << 6)
1547#define DISABLE_MSI_FLAG (1 << 7) 1547#define DISABLE_MSI_FLAG (1 << 7)
1548#define TPA_ENABLE_FLAG (1 << 8)
1549#define NO_MCP_FLAG (1 << 9) 1548#define NO_MCP_FLAG (1 << 9)
1550#define GRO_ENABLE_FLAG (1 << 10)
1551#define MF_FUNC_DIS (1 << 11) 1549#define MF_FUNC_DIS (1 << 11)
1552#define OWN_CNIC_IRQ (1 << 12) 1550#define OWN_CNIC_IRQ (1 << 12)
1553#define NO_ISCSI_OOO_FLAG (1 << 13) 1551#define NO_ISCSI_OOO_FLAG (1 << 13)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 2f63467bce46..a8bb8f664d3d 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -947,10 +947,10 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
947 u16 frag_size, pages; 947 u16 frag_size, pages;
948#ifdef BNX2X_STOP_ON_ERROR 948#ifdef BNX2X_STOP_ON_ERROR
949 /* sanity check */ 949 /* sanity check */
950 if (fp->disable_tpa && 950 if (fp->mode == TPA_MODE_DISABLED &&
951 (CQE_TYPE_START(cqe_fp_type) || 951 (CQE_TYPE_START(cqe_fp_type) ||
952 CQE_TYPE_STOP(cqe_fp_type))) 952 CQE_TYPE_STOP(cqe_fp_type)))
953 BNX2X_ERR("START/STOP packet while disable_tpa type %x\n", 953 BNX2X_ERR("START/STOP packet while TPA disabled, type %x\n",
954 CQE_TYPE(cqe_fp_type)); 954 CQE_TYPE(cqe_fp_type));
955#endif 955#endif
956 956
@@ -1396,7 +1396,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
1396 DP(NETIF_MSG_IFUP, 1396 DP(NETIF_MSG_IFUP,
1397 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size); 1397 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1398 1398
1399 if (!fp->disable_tpa) { 1399 if (fp->mode != TPA_MODE_DISABLED) {
1400 /* Fill the per-aggregation pool */ 1400 /* Fill the per-aggregation pool */
1401 for (i = 0; i < MAX_AGG_QS(bp); i++) { 1401 for (i = 0; i < MAX_AGG_QS(bp); i++) {
1402 struct bnx2x_agg_info *tpa_info = 1402 struct bnx2x_agg_info *tpa_info =
@@ -1410,7 +1410,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
1410 BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n", 1410 BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1411 j); 1411 j);
1412 bnx2x_free_tpa_pool(bp, fp, i); 1412 bnx2x_free_tpa_pool(bp, fp, i);
1413 fp->disable_tpa = 1; 1413 fp->mode = TPA_MODE_DISABLED;
1414 break; 1414 break;
1415 } 1415 }
1416 dma_unmap_addr_set(first_buf, mapping, 0); 1416 dma_unmap_addr_set(first_buf, mapping, 0);
@@ -1438,7 +1438,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
1438 ring_prod); 1438 ring_prod);
1439 bnx2x_free_tpa_pool(bp, fp, 1439 bnx2x_free_tpa_pool(bp, fp,
1440 MAX_AGG_QS(bp)); 1440 MAX_AGG_QS(bp));
1441 fp->disable_tpa = 1; 1441 fp->mode = TPA_MODE_DISABLED;
1442 ring_prod = 0; 1442 ring_prod = 0;
1443 break; 1443 break;
1444 } 1444 }
@@ -1560,7 +1560,7 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1560 1560
1561 bnx2x_free_rx_bds(fp); 1561 bnx2x_free_rx_bds(fp);
1562 1562
1563 if (!fp->disable_tpa) 1563 if (fp->mode != TPA_MODE_DISABLED)
1564 bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp)); 1564 bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
1565 } 1565 }
1566} 1566}
@@ -2477,17 +2477,19 @@ static void bnx2x_bz_fp(struct bnx2x *bp, int index)
2477 /* set the tpa flag for each queue. The tpa flag determines the queue 2477 /* set the tpa flag for each queue. The tpa flag determines the queue
2478 * minimal size so it must be set prior to queue memory allocation 2478 * minimal size so it must be set prior to queue memory allocation
2479 */ 2479 */
2480 fp->disable_tpa = !(bp->flags & TPA_ENABLE_FLAG || 2480 if (bp->dev->features & NETIF_F_LRO)
2481 (bp->flags & GRO_ENABLE_FLAG &&
2482 bnx2x_mtu_allows_gro(bp->dev->mtu)));
2483 if (bp->flags & TPA_ENABLE_FLAG)
2484 fp->mode = TPA_MODE_LRO; 2481 fp->mode = TPA_MODE_LRO;
2485 else if (bp->flags & GRO_ENABLE_FLAG) 2482 else if (bp->dev->features & NETIF_F_GRO &&
2483 bnx2x_mtu_allows_gro(bp->dev->mtu))
2486 fp->mode = TPA_MODE_GRO; 2484 fp->mode = TPA_MODE_GRO;
2485 else
2486 fp->mode = TPA_MODE_DISABLED;
2487 2487
2488 /* We don't want TPA on an FCoE L2 ring */ 2488 /* We don't want TPA if it's disabled in bp
2489 if (IS_FCOE_FP(fp)) 2489 * or if this is an FCoE L2 ring.
2490 fp->disable_tpa = 1; 2490 */
2491 if (bp->disable_tpa || IS_FCOE_FP(fp))
2492 fp->mode = TPA_MODE_DISABLED;
2491} 2493}
2492 2494
2493int bnx2x_load_cnic(struct bnx2x *bp) 2495int bnx2x_load_cnic(struct bnx2x *bp)
@@ -2608,7 +2610,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2608 /* 2610 /*
2609 * Zero fastpath structures preserving invariants like napi, which are 2611 * Zero fastpath structures preserving invariants like napi, which are
2610 * allocated only once, fp index, max_cos, bp pointer. 2612 * allocated only once, fp index, max_cos, bp pointer.
2611 * Also set fp->disable_tpa and txdata_ptr. 2613 * Also set fp->mode and txdata_ptr.
2612 */ 2614 */
2613 DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues); 2615 DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
2614 for_each_queue(bp, i) 2616 for_each_queue(bp, i)
@@ -3247,7 +3249,7 @@ int bnx2x_low_latency_recv(struct napi_struct *napi)
3247 3249
3248 if ((bp->state == BNX2X_STATE_CLOSED) || 3250 if ((bp->state == BNX2X_STATE_CLOSED) ||
3249 (bp->state == BNX2X_STATE_ERROR) || 3251 (bp->state == BNX2X_STATE_ERROR) ||
3250 (bp->flags & (TPA_ENABLE_FLAG | GRO_ENABLE_FLAG))) 3252 (bp->dev->features & (NETIF_F_LRO | NETIF_F_GRO)))
3251 return LL_FLUSH_FAILED; 3253 return LL_FLUSH_FAILED;
3252 3254
3253 if (!bnx2x_fp_lock_poll(fp)) 3255 if (!bnx2x_fp_lock_poll(fp))
@@ -4543,7 +4545,7 @@ alloc_mem_err:
4543 * In these cases we disable the queue 4545 * In these cases we disable the queue
4544 * Min size is different for OOO, TPA and non-TPA queues 4546 * Min size is different for OOO, TPA and non-TPA queues
4545 */ 4547 */
4546 if (ring_size < (fp->disable_tpa ? 4548 if (ring_size < (fp->mode == TPA_MODE_DISABLED ?
4547 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) { 4549 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
4548 /* release memory allocated for this queue */ 4550 /* release memory allocated for this queue */
4549 bnx2x_free_fp_mem_at(bp, index); 4551 bnx2x_free_fp_mem_at(bp, index);
@@ -4809,66 +4811,71 @@ netdev_features_t bnx2x_fix_features(struct net_device *dev,
4809{ 4811{
4810 struct bnx2x *bp = netdev_priv(dev); 4812 struct bnx2x *bp = netdev_priv(dev);
4811 4813
4814 if (pci_num_vf(bp->pdev)) {
4815 netdev_features_t changed = dev->features ^ features;
4816
4817 /* Revert the requested changes in features if they
4818 * would require internal reload of PF in bnx2x_set_features().
4819 */
4820 if (!(features & NETIF_F_RXCSUM) && !bp->disable_tpa) {
4821 features &= ~NETIF_F_RXCSUM;
4822 features |= dev->features & NETIF_F_RXCSUM;
4823 }
4824
4825 if (changed & NETIF_F_LOOPBACK) {
4826 features &= ~NETIF_F_LOOPBACK;
4827 features |= dev->features & NETIF_F_LOOPBACK;
4828 }
4829 }
4830
4812 /* TPA requires Rx CSUM offloading */ 4831 /* TPA requires Rx CSUM offloading */
4813 if (!(features & NETIF_F_RXCSUM)) { 4832 if (!(features & NETIF_F_RXCSUM)) {
4814 features &= ~NETIF_F_LRO; 4833 features &= ~NETIF_F_LRO;
4815 features &= ~NETIF_F_GRO; 4834 features &= ~NETIF_F_GRO;
4816 } 4835 }
4817 4836
4818 /* Note: do not disable SW GRO in kernel when HW GRO is off */
4819 if (bp->disable_tpa)
4820 features &= ~NETIF_F_LRO;
4821
4822 return features; 4837 return features;
4823} 4838}
4824 4839
4825int bnx2x_set_features(struct net_device *dev, netdev_features_t features) 4840int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
4826{ 4841{
4827 struct bnx2x *bp = netdev_priv(dev); 4842 struct bnx2x *bp = netdev_priv(dev);
4828 u32 flags = bp->flags; 4843 netdev_features_t changes = features ^ dev->features;
4829 u32 changes;
4830 bool bnx2x_reload = false; 4844 bool bnx2x_reload = false;
4845 int rc;
4831 4846
4832 if (features & NETIF_F_LRO) 4847 /* VFs or non SRIOV PFs should be able to change loopback feature */
4833 flags |= TPA_ENABLE_FLAG; 4848 if (!pci_num_vf(bp->pdev)) {
4834 else 4849 if (features & NETIF_F_LOOPBACK) {
4835 flags &= ~TPA_ENABLE_FLAG; 4850 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
4836 4851 bp->link_params.loopback_mode = LOOPBACK_BMAC;
4837 if (features & NETIF_F_GRO) 4852 bnx2x_reload = true;
4838 flags |= GRO_ENABLE_FLAG; 4853 }
4839 else 4854 } else {
4840 flags &= ~GRO_ENABLE_FLAG; 4855 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
4841 4856 bp->link_params.loopback_mode = LOOPBACK_NONE;
4842 if (features & NETIF_F_LOOPBACK) { 4857 bnx2x_reload = true;
4843 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) { 4858 }
4844 bp->link_params.loopback_mode = LOOPBACK_BMAC;
4845 bnx2x_reload = true;
4846 }
4847 } else {
4848 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
4849 bp->link_params.loopback_mode = LOOPBACK_NONE;
4850 bnx2x_reload = true;
4851 } 4859 }
4852 } 4860 }
4853 4861
4854 changes = flags ^ bp->flags;
4855
4856 /* if GRO is changed while LRO is enabled, don't force a reload */ 4862 /* if GRO is changed while LRO is enabled, don't force a reload */
4857 if ((changes & GRO_ENABLE_FLAG) && (flags & TPA_ENABLE_FLAG)) 4863 if ((changes & NETIF_F_GRO) && (features & NETIF_F_LRO))
4858 changes &= ~GRO_ENABLE_FLAG; 4864 changes &= ~NETIF_F_GRO;
4859 4865
4860 /* if GRO is changed while HW TPA is off, don't force a reload */ 4866 /* if GRO is changed while HW TPA is off, don't force a reload */
4861 if ((changes & GRO_ENABLE_FLAG) && bp->disable_tpa) 4867 if ((changes & NETIF_F_GRO) && bp->disable_tpa)
4862 changes &= ~GRO_ENABLE_FLAG; 4868 changes &= ~NETIF_F_GRO;
4863 4869
4864 if (changes) 4870 if (changes)
4865 bnx2x_reload = true; 4871 bnx2x_reload = true;
4866 4872
4867 bp->flags = flags;
4868
4869 if (bnx2x_reload) { 4873 if (bnx2x_reload) {
4870 if (bp->recovery_state == BNX2X_RECOVERY_DONE) 4874 if (bp->recovery_state == BNX2X_RECOVERY_DONE) {
4871 return bnx2x_reload_if_running(dev); 4875 dev->features = features;
4876 rc = bnx2x_reload_if_running(dev);
4877 return rc ? rc : 1;
4878 }
4872 /* else: bnx2x_nic_load() will be called at end of recovery */ 4879 /* else: bnx2x_nic_load() will be called at end of recovery */
4873 } 4880 }
4874 4881
@@ -4931,6 +4938,11 @@ int bnx2x_resume(struct pci_dev *pdev)
4931 } 4938 }
4932 bp = netdev_priv(dev); 4939 bp = netdev_priv(dev);
4933 4940
4941 if (pci_num_vf(bp->pdev)) {
4942 DP(BNX2X_MSG_IOV, "VFs are enabled, can not change MTU\n");
4943 return -EPERM;
4944 }
4945
4934 if (bp->recovery_state != BNX2X_RECOVERY_DONE) { 4946 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
4935 BNX2X_ERR("Handling parity error recovery. Try again later\n"); 4947 BNX2X_ERR("Handling parity error recovery. Try again later\n");
4936 return -EAGAIN; 4948 return -EAGAIN;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index adcacda7af7b..d7a71758e876 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -969,7 +969,7 @@ static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
969{ 969{
970 int i; 970 int i;
971 971
972 if (fp->disable_tpa) 972 if (fp->mode == TPA_MODE_DISABLED)
973 return; 973 return;
974 974
975 for (i = 0; i < last; i++) 975 for (i = 0; i < last; i++)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index e3d853cab7c9..48ed005ba73f 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -1843,6 +1843,12 @@ static int bnx2x_set_ringparam(struct net_device *dev,
1843 "set ring params command parameters: rx_pending = %d, tx_pending = %d\n", 1843 "set ring params command parameters: rx_pending = %d, tx_pending = %d\n",
1844 ering->rx_pending, ering->tx_pending); 1844 ering->rx_pending, ering->tx_pending);
1845 1845
1846 if (pci_num_vf(bp->pdev)) {
1847 DP(BNX2X_MSG_IOV,
1848 "VFs are enabled, can not change ring parameters\n");
1849 return -EPERM;
1850 }
1851
1846 if (bp->recovery_state != BNX2X_RECOVERY_DONE) { 1852 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
1847 DP(BNX2X_MSG_ETHTOOL, 1853 DP(BNX2X_MSG_ETHTOOL,
1848 "Handling parity error recovery. Try again later\n"); 1854 "Handling parity error recovery. Try again later\n");
@@ -2899,6 +2905,12 @@ static void bnx2x_self_test(struct net_device *dev,
2899 u8 is_serdes, link_up; 2905 u8 is_serdes, link_up;
2900 int rc, cnt = 0; 2906 int rc, cnt = 0;
2901 2907
2908 if (pci_num_vf(bp->pdev)) {
2909 DP(BNX2X_MSG_IOV,
2910 "VFs are enabled, can not perform self test\n");
2911 return;
2912 }
2913
2902 if (bp->recovery_state != BNX2X_RECOVERY_DONE) { 2914 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
2903 netdev_err(bp->dev, 2915 netdev_err(bp->dev,
2904 "Handling parity error recovery. Try again later\n"); 2916 "Handling parity error recovery. Try again later\n");
@@ -3468,6 +3480,11 @@ static int bnx2x_set_channels(struct net_device *dev,
3468 channels->rx_count, channels->tx_count, channels->other_count, 3480 channels->rx_count, channels->tx_count, channels->other_count,
3469 channels->combined_count); 3481 channels->combined_count);
3470 3482
3483 if (pci_num_vf(bp->pdev)) {
3484 DP(BNX2X_MSG_IOV, "VFs are enabled, can not set channels\n");
3485 return -EPERM;
3486 }
3487
3471 /* We don't support separate rx / tx channels. 3488 /* We don't support separate rx / tx channels.
3472 * We don't allow setting 'other' channels. 3489 * We don't allow setting 'other' channels.
3473 */ 3490 */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index b9f85fccb419..556dcc162a62 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -3128,7 +3128,7 @@ static unsigned long bnx2x_get_q_flags(struct bnx2x *bp,
3128 __set_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, &flags); 3128 __set_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, &flags);
3129 } 3129 }
3130 3130
3131 if (!fp->disable_tpa) { 3131 if (fp->mode != TPA_MODE_DISABLED) {
3132 __set_bit(BNX2X_Q_FLG_TPA, &flags); 3132 __set_bit(BNX2X_Q_FLG_TPA, &flags);
3133 __set_bit(BNX2X_Q_FLG_TPA_IPV6, &flags); 3133 __set_bit(BNX2X_Q_FLG_TPA_IPV6, &flags);
3134 if (fp->mode == TPA_MODE_GRO) 3134 if (fp->mode == TPA_MODE_GRO)
@@ -3176,7 +3176,7 @@ static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,
3176 u16 sge_sz = 0; 3176 u16 sge_sz = 0;
3177 u16 tpa_agg_size = 0; 3177 u16 tpa_agg_size = 0;
3178 3178
3179 if (!fp->disable_tpa) { 3179 if (fp->mode != TPA_MODE_DISABLED) {
3180 pause->sge_th_lo = SGE_TH_LO(bp); 3180 pause->sge_th_lo = SGE_TH_LO(bp);
3181 pause->sge_th_hi = SGE_TH_HI(bp); 3181 pause->sge_th_hi = SGE_TH_HI(bp);
3182 3182
@@ -3304,7 +3304,7 @@ static void bnx2x_pf_init(struct bnx2x *bp)
3304 /* This flag is relevant for E1x only. 3304 /* This flag is relevant for E1x only.
3305 * E2 doesn't have a TPA configuration in a function level. 3305 * E2 doesn't have a TPA configuration in a function level.
3306 */ 3306 */
3307 flags |= (bp->flags & TPA_ENABLE_FLAG) ? FUNC_FLG_TPA : 0; 3307 flags |= (bp->dev->features & NETIF_F_LRO) ? FUNC_FLG_TPA : 0;
3308 3308
3309 func_init.func_flgs = flags; 3309 func_init.func_flgs = flags;
3310 func_init.pf_id = BP_FUNC(bp); 3310 func_init.pf_id = BP_FUNC(bp);
@@ -12107,11 +12107,8 @@ static int bnx2x_init_bp(struct bnx2x *bp)
12107 12107
12108 /* Set TPA flags */ 12108 /* Set TPA flags */
12109 if (bp->disable_tpa) { 12109 if (bp->disable_tpa) {
12110 bp->flags &= ~(TPA_ENABLE_FLAG | GRO_ENABLE_FLAG); 12110 bp->dev->hw_features &= ~NETIF_F_LRO;
12111 bp->dev->features &= ~NETIF_F_LRO; 12111 bp->dev->features &= ~NETIF_F_LRO;
12112 } else {
12113 bp->flags |= (TPA_ENABLE_FLAG | GRO_ENABLE_FLAG);
12114 bp->dev->features |= NETIF_F_LRO;
12115 } 12112 }
12116 12113
12117 if (CHIP_IS_E1(bp)) 12114 if (CHIP_IS_E1(bp))
@@ -13371,6 +13368,12 @@ static int bnx2x_init_one(struct pci_dev *pdev,
13371 bool is_vf; 13368 bool is_vf;
13372 int cnic_cnt; 13369 int cnic_cnt;
13373 13370
13371 /* Management FW 'remembers' living interfaces. Allow it some time
13372 * to forget previously living interfaces, allowing a proper re-load.
13373 */
13374 if (is_kdump_kernel())
13375 msleep(5000);
13376
13374 /* An estimated maximum supported CoS number according to the chip 13377 /* An estimated maximum supported CoS number according to the chip
13375 * version. 13378 * version.
13376 * We will try to roughly estimate the maximum number of CoSes this chip 13379 * We will try to roughly estimate the maximum number of CoSes this chip
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index 15b2d1647560..06b8c0d8fd3b 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -594,7 +594,7 @@ int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp,
594 bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SETUP_Q, sizeof(*req)); 594 bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SETUP_Q, sizeof(*req));
595 595
596 /* select tpa mode to request */ 596 /* select tpa mode to request */
597 if (!fp->disable_tpa) { 597 if (fp->mode != TPA_MODE_DISABLED) {
598 flags |= VFPF_QUEUE_FLG_TPA; 598 flags |= VFPF_QUEUE_FLG_TPA;
599 flags |= VFPF_QUEUE_FLG_TPA_IPV6; 599 flags |= VFPF_QUEUE_FLG_TPA_IPV6;
600 if (fp->mode == TPA_MODE_GRO) 600 if (fp->mode == TPA_MODE_GRO)
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 1270b189a9a2..069952fa5d64 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -18129,7 +18129,9 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
18129 18129
18130 rtnl_lock(); 18130 rtnl_lock();
18131 18131
18132 tp->pcierr_recovery = true; 18132 /* We needn't recover from permanent error */
18133 if (state == pci_channel_io_frozen)
18134 tp->pcierr_recovery = true;
18133 18135
18134 /* We probably don't have netdev yet */ 18136 /* We probably don't have netdev yet */
18135 if (!netdev || !netif_running(netdev)) 18137 if (!netdev || !netif_running(netdev))
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index 9f5387249f24..4104d49f005d 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -707,6 +707,9 @@ static void gem_rx_refill(struct macb *bp)
707 707
708 /* properly align Ethernet header */ 708 /* properly align Ethernet header */
709 skb_reserve(skb, NET_IP_ALIGN); 709 skb_reserve(skb, NET_IP_ALIGN);
710 } else {
711 bp->rx_ring[entry].addr &= ~MACB_BIT(RX_USED);
712 bp->rx_ring[entry].ctrl = 0;
710 } 713 }
711 } 714 }
712 715
@@ -1473,9 +1476,9 @@ static void macb_init_rings(struct macb *bp)
1473 for (i = 0; i < TX_RING_SIZE; i++) { 1476 for (i = 0; i < TX_RING_SIZE; i++) {
1474 bp->queues[0].tx_ring[i].addr = 0; 1477 bp->queues[0].tx_ring[i].addr = 0;
1475 bp->queues[0].tx_ring[i].ctrl = MACB_BIT(TX_USED); 1478 bp->queues[0].tx_ring[i].ctrl = MACB_BIT(TX_USED);
1476 bp->queues[0].tx_head = 0;
1477 bp->queues[0].tx_tail = 0;
1478 } 1479 }
1480 bp->queues[0].tx_head = 0;
1481 bp->queues[0].tx_tail = 0;
1479 bp->queues[0].tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP); 1482 bp->queues[0].tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP);
1480 1483
1481 bp->rx_tail = 0; 1484 bp->rx_tail = 0;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 5959e3ae72da..e8578a742f2a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -492,7 +492,7 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
492 memoffset = (mtype * (edc_size * 1024 * 1024)); 492 memoffset = (mtype * (edc_size * 1024 * 1024));
493 else { 493 else {
494 mc_size = EXT_MEM0_SIZE_G(t4_read_reg(adap, 494 mc_size = EXT_MEM0_SIZE_G(t4_read_reg(adap,
495 MA_EXT_MEMORY1_BAR_A)); 495 MA_EXT_MEMORY0_BAR_A));
496 memoffset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024; 496 memoffset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024;
497 } 497 }
498 498
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index fb0bc3c3620e..a6dcbf850c1f 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -4846,7 +4846,8 @@ err:
4846} 4846}
4847 4847
4848static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, 4848static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
4849 struct net_device *dev, u32 filter_mask) 4849 struct net_device *dev, u32 filter_mask,
4850 int nlflags)
4850{ 4851{
4851 struct be_adapter *adapter = netdev_priv(dev); 4852 struct be_adapter *adapter = netdev_priv(dev);
4852 int status = 0; 4853 int status = 0;
@@ -4868,7 +4869,7 @@ static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
4868 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, 4869 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4869 hsw_mode == PORT_FWD_TYPE_VEPA ? 4870 hsw_mode == PORT_FWD_TYPE_VEPA ?
4870 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB, 4871 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB,
4871 0, 0); 4872 0, 0, nlflags);
4872} 4873}
4873 4874
4874#ifdef CONFIG_BE2NET_VXLAN 4875#ifdef CONFIG_BE2NET_VXLAN
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index f6a3a7abd468..66d47e448e4d 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -988,7 +988,10 @@ fec_restart(struct net_device *ndev)
988 rcntl |= 0x40000000 | 0x00000020; 988 rcntl |= 0x40000000 | 0x00000020;
989 989
990 /* RGMII, RMII or MII */ 990 /* RGMII, RMII or MII */
991 if (fep->phy_interface == PHY_INTERFACE_MODE_RGMII) 991 if (fep->phy_interface == PHY_INTERFACE_MODE_RGMII ||
992 fep->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
993 fep->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID ||
994 fep->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID)
992 rcntl |= (1 << 6); 995 rcntl |= (1 << 6);
993 else if (fep->phy_interface == PHY_INTERFACE_MODE_RMII) 996 else if (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
994 rcntl |= (1 << 8); 997 rcntl |= (1 << 8);
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index 291c87036e17..2a0dc127df3f 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -3347,7 +3347,7 @@ static int ehea_register_memory_hooks(void)
3347{ 3347{
3348 int ret = 0; 3348 int ret = 0;
3349 3349
3350 if (atomic_inc_and_test(&ehea_memory_hooks_registered)) 3350 if (atomic_inc_return(&ehea_memory_hooks_registered) > 1)
3351 return 0; 3351 return 0;
3352 3352
3353 ret = ehea_create_busmap(); 3353 ret = ehea_create_busmap();
@@ -3381,12 +3381,14 @@ out3:
3381out2: 3381out2:
3382 unregister_reboot_notifier(&ehea_reboot_nb); 3382 unregister_reboot_notifier(&ehea_reboot_nb);
3383out: 3383out:
3384 atomic_dec(&ehea_memory_hooks_registered);
3384 return ret; 3385 return ret;
3385} 3386}
3386 3387
3387static void ehea_unregister_memory_hooks(void) 3388static void ehea_unregister_memory_hooks(void)
3388{ 3389{
3389 if (atomic_read(&ehea_memory_hooks_registered)) 3390 /* Only remove the hooks if we've registered them */
3391 if (atomic_read(&ehea_memory_hooks_registered) == 0)
3390 return; 3392 return;
3391 3393
3392 unregister_reboot_notifier(&ehea_reboot_nb); 3394 unregister_reboot_notifier(&ehea_reboot_nb);
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index cd7675ac5bf9..18134766a114 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -1238,7 +1238,7 @@ static int ibmveth_change_mtu(struct net_device *dev, int new_mtu)
1238 return -EINVAL; 1238 return -EINVAL;
1239 1239
1240 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) 1240 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
1241 if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size) 1241 if (new_mtu_oh <= adapter->rx_buff_pool[i].buff_size)
1242 break; 1242 break;
1243 1243
1244 if (i == IBMVETH_NUM_BUFF_POOLS) 1244 if (i == IBMVETH_NUM_BUFF_POOLS)
@@ -1257,7 +1257,7 @@ static int ibmveth_change_mtu(struct net_device *dev, int new_mtu)
1257 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) { 1257 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
1258 adapter->rx_buff_pool[i].active = 1; 1258 adapter->rx_buff_pool[i].active = 1;
1259 1259
1260 if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size) { 1260 if (new_mtu_oh <= adapter->rx_buff_pool[i].buff_size) {
1261 dev->mtu = new_mtu; 1261 dev->mtu = new_mtu;
1262 vio_cmo_set_dev_desired(viodev, 1262 vio_cmo_set_dev_desired(viodev,
1263 ibmveth_get_desired_dma 1263 ibmveth_get_desired_dma
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 24481cd7e59a..a54c14491e3b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -8053,10 +8053,10 @@ static int i40e_ndo_bridge_setlink(struct net_device *dev,
8053#ifdef HAVE_BRIDGE_FILTER 8053#ifdef HAVE_BRIDGE_FILTER
8054static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, 8054static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
8055 struct net_device *dev, 8055 struct net_device *dev,
8056 u32 __always_unused filter_mask) 8056 u32 __always_unused filter_mask, int nlflags)
8057#else 8057#else
8058static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, 8058static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
8059 struct net_device *dev) 8059 struct net_device *dev, int nlflags)
8060#endif /* HAVE_BRIDGE_FILTER */ 8060#endif /* HAVE_BRIDGE_FILTER */
8061{ 8061{
8062 struct i40e_netdev_priv *np = netdev_priv(dev); 8062 struct i40e_netdev_priv *np = netdev_priv(dev);
@@ -8078,7 +8078,8 @@ static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
8078 if (!veb) 8078 if (!veb)
8079 return 0; 8079 return 0;
8080 8080
8081 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode); 8081 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode,
8082 nlflags);
8082} 8083}
8083#endif /* HAVE_BRIDGE_ATTRIBS */ 8084#endif /* HAVE_BRIDGE_ATTRIBS */
8084 8085
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index d3f4b0ceb3f7..5be12a00e1f4 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -8044,7 +8044,7 @@ static int ixgbe_ndo_bridge_setlink(struct net_device *dev,
8044 8044
8045static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, 8045static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
8046 struct net_device *dev, 8046 struct net_device *dev,
8047 u32 filter_mask) 8047 u32 filter_mask, int nlflags)
8048{ 8048{
8049 struct ixgbe_adapter *adapter = netdev_priv(dev); 8049 struct ixgbe_adapter *adapter = netdev_priv(dev);
8050 8050
@@ -8052,7 +8052,7 @@ static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
8052 return 0; 8052 return 0;
8053 8053
8054 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, 8054 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
8055 adapter->bridge_mode, 0, 0); 8055 adapter->bridge_mode, 0, 0, nlflags);
8056} 8056}
8057 8057
8058static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev) 8058static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index af829c578400..7ace07dad6a3 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -1508,7 +1508,8 @@ static int pxa168_eth_probe(struct platform_device *pdev)
1508 np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0); 1508 np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
1509 if (!np) { 1509 if (!np) {
1510 dev_err(&pdev->dev, "missing phy-handle\n"); 1510 dev_err(&pdev->dev, "missing phy-handle\n");
1511 return -EINVAL; 1511 err = -EINVAL;
1512 goto err_netdev;
1512 } 1513 }
1513 of_property_read_u32(np, "reg", &pep->phy_addr); 1514 of_property_read_u32(np, "reg", &pep->phy_addr);
1514 pep->phy_intf = of_get_phy_mode(pdev->dev.of_node); 1515 pep->phy_intf = of_get_phy_mode(pdev->dev.of_node);
@@ -1526,7 +1527,7 @@ static int pxa168_eth_probe(struct platform_device *pdev)
1526 pep->smi_bus = mdiobus_alloc(); 1527 pep->smi_bus = mdiobus_alloc();
1527 if (pep->smi_bus == NULL) { 1528 if (pep->smi_bus == NULL) {
1528 err = -ENOMEM; 1529 err = -ENOMEM;
1529 goto err_base; 1530 goto err_netdev;
1530 } 1531 }
1531 pep->smi_bus->priv = pep; 1532 pep->smi_bus->priv = pep;
1532 pep->smi_bus->name = "pxa168_eth smi"; 1533 pep->smi_bus->name = "pxa168_eth smi";
@@ -1551,13 +1552,10 @@ err_mdiobus:
1551 mdiobus_unregister(pep->smi_bus); 1552 mdiobus_unregister(pep->smi_bus);
1552err_free_mdio: 1553err_free_mdio:
1553 mdiobus_free(pep->smi_bus); 1554 mdiobus_free(pep->smi_bus);
1554err_base:
1555 iounmap(pep->base);
1556err_netdev: 1555err_netdev:
1557 free_netdev(dev); 1556 free_netdev(dev);
1558err_clk: 1557err_clk:
1559 clk_disable(clk); 1558 clk_disable_unprepare(clk);
1560 clk_put(clk);
1561 return err; 1559 return err;
1562} 1560}
1563 1561
@@ -1574,13 +1572,9 @@ static int pxa168_eth_remove(struct platform_device *pdev)
1574 if (pep->phy) 1572 if (pep->phy)
1575 phy_disconnect(pep->phy); 1573 phy_disconnect(pep->phy);
1576 if (pep->clk) { 1574 if (pep->clk) {
1577 clk_disable(pep->clk); 1575 clk_disable_unprepare(pep->clk);
1578 clk_put(pep->clk);
1579 pep->clk = NULL;
1580 } 1576 }
1581 1577
1582 iounmap(pep->base);
1583 pep->base = NULL;
1584 mdiobus_unregister(pep->smi_bus); 1578 mdiobus_unregister(pep->smi_bus);
1585 mdiobus_free(pep->smi_bus); 1579 mdiobus_free(pep->smi_bus);
1586 unregister_netdev(dev); 1580 unregister_netdev(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index 3f44e2bbb982..a2ddf3d75ff8 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -1102,20 +1102,21 @@ static int mlx4_en_check_rxfh_func(struct net_device *dev, u8 hfunc)
1102 struct mlx4_en_priv *priv = netdev_priv(dev); 1102 struct mlx4_en_priv *priv = netdev_priv(dev);
1103 1103
1104 /* check if requested function is supported by the device */ 1104 /* check if requested function is supported by the device */
1105 if ((hfunc == ETH_RSS_HASH_TOP && 1105 if (hfunc == ETH_RSS_HASH_TOP) {
1106 !(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_TOP)) || 1106 if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_TOP))
1107 (hfunc == ETH_RSS_HASH_XOR && 1107 return -EINVAL;
1108 !(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_XOR))) 1108 if (!(dev->features & NETIF_F_RXHASH))
1109 return -EINVAL; 1109 en_warn(priv, "Toeplitz hash function should be used in conjunction with RX hashing for optimal performance\n");
1110 return 0;
1111 } else if (hfunc == ETH_RSS_HASH_XOR) {
1112 if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_XOR))
1113 return -EINVAL;
1114 if (dev->features & NETIF_F_RXHASH)
1115 en_warn(priv, "Enabling both XOR Hash function and RX Hashing can limit RPS functionality\n");
1116 return 0;
1117 }
1110 1118
1111 priv->rss_hash_fn = hfunc; 1119 return -EINVAL;
1112 if (hfunc == ETH_RSS_HASH_TOP && !(dev->features & NETIF_F_RXHASH))
1113 en_warn(priv,
1114 "Toeplitz hash function should be used in conjunction with RX hashing for optimal performance\n");
1115 if (hfunc == ETH_RSS_HASH_XOR && (dev->features & NETIF_F_RXHASH))
1116 en_warn(priv,
1117 "Enabling both XOR Hash function and RX Hashing can limit RPS functionality\n");
1118 return 0;
1119} 1120}
1120 1121
1121static int mlx4_en_get_rxfh(struct net_device *dev, u32 *ring_index, u8 *key, 1122static int mlx4_en_get_rxfh(struct net_device *dev, u32 *ring_index, u8 *key,
@@ -1189,6 +1190,8 @@ static int mlx4_en_set_rxfh(struct net_device *dev, const u32 *ring_index,
1189 priv->prof->rss_rings = rss_rings; 1190 priv->prof->rss_rings = rss_rings;
1190 if (key) 1191 if (key)
1191 memcpy(priv->rss_key, key, MLX4_EN_RSS_KEY_SIZE); 1192 memcpy(priv->rss_key, key, MLX4_EN_RSS_KEY_SIZE);
1193 if (hfunc != ETH_RSS_HASH_NO_CHANGE)
1194 priv->rss_hash_fn = hfunc;
1192 1195
1193 if (port_up) { 1196 if (port_up) {
1194 err = mlx4_en_start_port(dev); 1197 err = mlx4_en_start_port(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 0f1afc085d58..32f5ec737472 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -1467,6 +1467,7 @@ static void mlx4_en_service_task(struct work_struct *work)
1467 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) 1467 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
1468 mlx4_en_ptp_overflow_check(mdev); 1468 mlx4_en_ptp_overflow_check(mdev);
1469 1469
1470 mlx4_en_recover_from_oom(priv);
1470 queue_delayed_work(mdev->workqueue, &priv->service_task, 1471 queue_delayed_work(mdev->workqueue, &priv->service_task,
1471 SERVICE_TASK_DELAY); 1472 SERVICE_TASK_DELAY);
1472 } 1473 }
@@ -1721,7 +1722,7 @@ mac_err:
1721cq_err: 1722cq_err:
1722 while (rx_index--) { 1723 while (rx_index--) {
1723 mlx4_en_deactivate_cq(priv, priv->rx_cq[rx_index]); 1724 mlx4_en_deactivate_cq(priv, priv->rx_cq[rx_index]);
1724 mlx4_en_free_affinity_hint(priv, i); 1725 mlx4_en_free_affinity_hint(priv, rx_index);
1725 } 1726 }
1726 for (i = 0; i < priv->rx_ring_num; i++) 1727 for (i = 0; i < priv->rx_ring_num; i++)
1727 mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]); 1728 mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 4fdd3c37e47b..2a77a6b19121 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -244,6 +244,12 @@ static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv,
244 return mlx4_en_alloc_frags(priv, rx_desc, frags, ring->page_alloc, gfp); 244 return mlx4_en_alloc_frags(priv, rx_desc, frags, ring->page_alloc, gfp);
245} 245}
246 246
247static inline bool mlx4_en_is_ring_empty(struct mlx4_en_rx_ring *ring)
248{
249 BUG_ON((u32)(ring->prod - ring->cons) > ring->actual_size);
250 return ring->prod == ring->cons;
251}
252
247static inline void mlx4_en_update_rx_prod_db(struct mlx4_en_rx_ring *ring) 253static inline void mlx4_en_update_rx_prod_db(struct mlx4_en_rx_ring *ring)
248{ 254{
249 *ring->wqres.db.db = cpu_to_be32(ring->prod & 0xffff); 255 *ring->wqres.db.db = cpu_to_be32(ring->prod & 0xffff);
@@ -315,8 +321,7 @@ static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv,
315 ring->cons, ring->prod); 321 ring->cons, ring->prod);
316 322
317 /* Unmap and free Rx buffers */ 323 /* Unmap and free Rx buffers */
318 BUG_ON((u32) (ring->prod - ring->cons) > ring->actual_size); 324 while (!mlx4_en_is_ring_empty(ring)) {
319 while (ring->cons != ring->prod) {
320 index = ring->cons & ring->size_mask; 325 index = ring->cons & ring->size_mask;
321 en_dbg(DRV, priv, "Processing descriptor:%d\n", index); 326 en_dbg(DRV, priv, "Processing descriptor:%d\n", index);
322 mlx4_en_free_rx_desc(priv, ring, index); 327 mlx4_en_free_rx_desc(priv, ring, index);
@@ -491,6 +496,23 @@ err_allocator:
491 return err; 496 return err;
492} 497}
493 498
499/* We recover from out of memory by scheduling our napi poll
500 * function (mlx4_en_process_cq), which tries to allocate
501 * all missing RX buffers (call to mlx4_en_refill_rx_buffers).
502 */
503void mlx4_en_recover_from_oom(struct mlx4_en_priv *priv)
504{
505 int ring;
506
507 if (!priv->port_up)
508 return;
509
510 for (ring = 0; ring < priv->rx_ring_num; ring++) {
511 if (mlx4_en_is_ring_empty(priv->rx_ring[ring]))
512 napi_reschedule(&priv->rx_cq[ring]->napi);
513 }
514}
515
494void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv, 516void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
495 struct mlx4_en_rx_ring **pring, 517 struct mlx4_en_rx_ring **pring,
496 u32 size, u16 stride) 518 u32 size, u16 stride)
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 1783705273d8..f7bf312fb443 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -143,8 +143,10 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
143 ring->hwtstamp_tx_type = priv->hwtstamp_config.tx_type; 143 ring->hwtstamp_tx_type = priv->hwtstamp_config.tx_type;
144 ring->queue_index = queue_index; 144 ring->queue_index = queue_index;
145 145
146 if (queue_index < priv->num_tx_rings_p_up && cpu_online(queue_index)) 146 if (queue_index < priv->num_tx_rings_p_up)
147 cpumask_set_cpu(queue_index, &ring->affinity_mask); 147 cpumask_set_cpu_local_first(queue_index,
148 priv->mdev->dev->numa_node,
149 &ring->affinity_mask);
148 150
149 *pring = ring; 151 *pring = ring;
150 return 0; 152 return 0;
@@ -213,7 +215,7 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
213 215
214 err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context, 216 err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context,
215 &ring->qp, &ring->qp_state); 217 &ring->qp, &ring->qp_state);
216 if (!user_prio && cpu_online(ring->queue_index)) 218 if (!cpumask_empty(&ring->affinity_mask))
217 netif_set_xps_queue(priv->dev, &ring->affinity_mask, 219 netif_set_xps_queue(priv->dev, &ring->affinity_mask,
218 ring->queue_index); 220 ring->queue_index);
219 221
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index a4079811b176..e30bf57ad7a1 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -56,11 +56,13 @@ MODULE_PARM_DESC(enable_qos, "Enable Enhanced QoS support (default: on)");
56#define MLX4_GET(dest, source, offset) \ 56#define MLX4_GET(dest, source, offset) \
57 do { \ 57 do { \
58 void *__p = (char *) (source) + (offset); \ 58 void *__p = (char *) (source) + (offset); \
59 u64 val; \
59 switch (sizeof (dest)) { \ 60 switch (sizeof (dest)) { \
60 case 1: (dest) = *(u8 *) __p; break; \ 61 case 1: (dest) = *(u8 *) __p; break; \
61 case 2: (dest) = be16_to_cpup(__p); break; \ 62 case 2: (dest) = be16_to_cpup(__p); break; \
62 case 4: (dest) = be32_to_cpup(__p); break; \ 63 case 4: (dest) = be32_to_cpup(__p); break; \
63 case 8: (dest) = be64_to_cpup(__p); break; \ 64 case 8: val = get_unaligned((u64 *)__p); \
65 (dest) = be64_to_cpu(val); break; \
64 default: __buggy_use_of_MLX4_GET(); \ 66 default: __buggy_use_of_MLX4_GET(); \
65 } \ 67 } \
66 } while (0) 68 } while (0)
@@ -1605,9 +1607,17 @@ static void get_board_id(void *vsd, char *board_id)
1605 * swaps each 4-byte word before passing it back to 1607 * swaps each 4-byte word before passing it back to
1606 * us. Therefore we need to swab it before printing. 1608 * us. Therefore we need to swab it before printing.
1607 */ 1609 */
1608 for (i = 0; i < 4; ++i) 1610 u32 *bid_u32 = (u32 *)board_id;
1609 ((u32 *) board_id)[i] = 1611
1610 swab32(*(u32 *) (vsd + VSD_OFFSET_MLX_BOARD_ID + i * 4)); 1612 for (i = 0; i < 4; ++i) {
1613 u32 *addr;
1614 u32 val;
1615
1616 addr = (u32 *) (vsd + VSD_OFFSET_MLX_BOARD_ID + i * 4);
1617 val = get_unaligned(addr);
1618 val = swab32(val);
1619 put_unaligned(val, &bid_u32[i]);
1620 }
1611 } 1621 }
1612} 1622}
1613 1623
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 9de30216b146..d021f079f181 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -774,6 +774,7 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
774void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv, 774void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv,
775 struct mlx4_en_tx_ring *ring); 775 struct mlx4_en_tx_ring *ring);
776void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev); 776void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev);
777void mlx4_en_recover_from_oom(struct mlx4_en_priv *priv);
777int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv, 778int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
778 struct mlx4_en_rx_ring **pring, 779 struct mlx4_en_rx_ring **pring,
779 u32 size, u16 stride, int node); 780 u32 size, u16 stride, int node);
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index 1412f5af05ec..2bae50292dcd 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -69,11 +69,7 @@
69#include <net/ip.h> 69#include <net/ip.h>
70#include <net/tcp.h> 70#include <net/tcp.h>
71#include <asm/byteorder.h> 71#include <asm/byteorder.h>
72#include <asm/io.h>
73#include <asm/processor.h> 72#include <asm/processor.h>
74#ifdef CONFIG_MTRR
75#include <asm/mtrr.h>
76#endif
77#include <net/busy_poll.h> 73#include <net/busy_poll.h>
78 74
79#include "myri10ge_mcp.h" 75#include "myri10ge_mcp.h"
@@ -242,8 +238,7 @@ struct myri10ge_priv {
242 unsigned int rdma_tags_available; 238 unsigned int rdma_tags_available;
243 int intr_coal_delay; 239 int intr_coal_delay;
244 __be32 __iomem *intr_coal_delay_ptr; 240 __be32 __iomem *intr_coal_delay_ptr;
245 int mtrr; 241 int wc_cookie;
246 int wc_enabled;
247 int down_cnt; 242 int down_cnt;
248 wait_queue_head_t down_wq; 243 wait_queue_head_t down_wq;
249 struct work_struct watchdog_work; 244 struct work_struct watchdog_work;
@@ -1905,7 +1900,7 @@ static const char myri10ge_gstrings_main_stats[][ETH_GSTRING_LEN] = {
1905 "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors", 1900 "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors",
1906 "tx_heartbeat_errors", "tx_window_errors", 1901 "tx_heartbeat_errors", "tx_window_errors",
1907 /* device-specific stats */ 1902 /* device-specific stats */
1908 "tx_boundary", "WC", "irq", "MSI", "MSIX", 1903 "tx_boundary", "irq", "MSI", "MSIX",
1909 "read_dma_bw_MBs", "write_dma_bw_MBs", "read_write_dma_bw_MBs", 1904 "read_dma_bw_MBs", "write_dma_bw_MBs", "read_write_dma_bw_MBs",
1910 "serial_number", "watchdog_resets", 1905 "serial_number", "watchdog_resets",
1911#ifdef CONFIG_MYRI10GE_DCA 1906#ifdef CONFIG_MYRI10GE_DCA
@@ -1984,7 +1979,6 @@ myri10ge_get_ethtool_stats(struct net_device *netdev,
1984 data[i] = ((u64 *)&link_stats)[i]; 1979 data[i] = ((u64 *)&link_stats)[i];
1985 1980
1986 data[i++] = (unsigned int)mgp->tx_boundary; 1981 data[i++] = (unsigned int)mgp->tx_boundary;
1987 data[i++] = (unsigned int)mgp->wc_enabled;
1988 data[i++] = (unsigned int)mgp->pdev->irq; 1982 data[i++] = (unsigned int)mgp->pdev->irq;
1989 data[i++] = (unsigned int)mgp->msi_enabled; 1983 data[i++] = (unsigned int)mgp->msi_enabled;
1990 data[i++] = (unsigned int)mgp->msix_enabled; 1984 data[i++] = (unsigned int)mgp->msix_enabled;
@@ -4040,14 +4034,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
4040 4034
4041 mgp->board_span = pci_resource_len(pdev, 0); 4035 mgp->board_span = pci_resource_len(pdev, 0);
4042 mgp->iomem_base = pci_resource_start(pdev, 0); 4036 mgp->iomem_base = pci_resource_start(pdev, 0);
4043 mgp->mtrr = -1; 4037 mgp->wc_cookie = arch_phys_wc_add(mgp->iomem_base, mgp->board_span);
4044 mgp->wc_enabled = 0;
4045#ifdef CONFIG_MTRR
4046 mgp->mtrr = mtrr_add(mgp->iomem_base, mgp->board_span,
4047 MTRR_TYPE_WRCOMB, 1);
4048 if (mgp->mtrr >= 0)
4049 mgp->wc_enabled = 1;
4050#endif
4051 mgp->sram = ioremap_wc(mgp->iomem_base, mgp->board_span); 4038 mgp->sram = ioremap_wc(mgp->iomem_base, mgp->board_span);
4052 if (mgp->sram == NULL) { 4039 if (mgp->sram == NULL) {
4053 dev_err(&pdev->dev, "ioremap failed for %ld bytes at 0x%lx\n", 4040 dev_err(&pdev->dev, "ioremap failed for %ld bytes at 0x%lx\n",
@@ -4146,14 +4133,14 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
4146 goto abort_with_state; 4133 goto abort_with_state;
4147 } 4134 }
4148 if (mgp->msix_enabled) 4135 if (mgp->msix_enabled)
4149 dev_info(dev, "%d MSI-X IRQs, tx bndry %d, fw %s, WC %s\n", 4136 dev_info(dev, "%d MSI-X IRQs, tx bndry %d, fw %s, MTRR %s, WC Enabled\n",
4150 mgp->num_slices, mgp->tx_boundary, mgp->fw_name, 4137 mgp->num_slices, mgp->tx_boundary, mgp->fw_name,
4151 (mgp->wc_enabled ? "Enabled" : "Disabled")); 4138 (mgp->wc_cookie > 0 ? "Enabled" : "Disabled"));
4152 else 4139 else
4153 dev_info(dev, "%s IRQ %d, tx bndry %d, fw %s, WC %s\n", 4140 dev_info(dev, "%s IRQ %d, tx bndry %d, fw %s, MTRR %s, WC Enabled\n",
4154 mgp->msi_enabled ? "MSI" : "xPIC", 4141 mgp->msi_enabled ? "MSI" : "xPIC",
4155 pdev->irq, mgp->tx_boundary, mgp->fw_name, 4142 pdev->irq, mgp->tx_boundary, mgp->fw_name,
4156 (mgp->wc_enabled ? "Enabled" : "Disabled")); 4143 (mgp->wc_cookie > 0 ? "Enabled" : "Disabled"));
4157 4144
4158 board_number++; 4145 board_number++;
4159 return 0; 4146 return 0;
@@ -4175,10 +4162,7 @@ abort_with_ioremap:
4175 iounmap(mgp->sram); 4162 iounmap(mgp->sram);
4176 4163
4177abort_with_mtrr: 4164abort_with_mtrr:
4178#ifdef CONFIG_MTRR 4165 arch_phys_wc_del(mgp->wc_cookie);
4179 if (mgp->mtrr >= 0)
4180 mtrr_del(mgp->mtrr, mgp->iomem_base, mgp->board_span);
4181#endif
4182 dma_free_coherent(&pdev->dev, sizeof(*mgp->cmd), 4166 dma_free_coherent(&pdev->dev, sizeof(*mgp->cmd),
4183 mgp->cmd, mgp->cmd_bus); 4167 mgp->cmd, mgp->cmd_bus);
4184 4168
@@ -4220,11 +4204,7 @@ static void myri10ge_remove(struct pci_dev *pdev)
4220 pci_restore_state(pdev); 4204 pci_restore_state(pdev);
4221 4205
4222 iounmap(mgp->sram); 4206 iounmap(mgp->sram);
4223 4207 arch_phys_wc_del(mgp->wc_cookie);
4224#ifdef CONFIG_MTRR
4225 if (mgp->mtrr >= 0)
4226 mtrr_del(mgp->mtrr, mgp->iomem_base, mgp->board_span);
4227#endif
4228 myri10ge_free_slices(mgp); 4208 myri10ge_free_slices(mgp);
4229 kfree(mgp->msix_vectors); 4209 kfree(mgp->msix_vectors);
4230 dma_free_coherent(&pdev->dev, sizeof(*mgp->cmd), 4210 dma_free_coherent(&pdev->dev, sizeof(*mgp->cmd),
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
index 5c4068353f66..8da7c3faf817 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
@@ -135,7 +135,7 @@ void netxen_release_tx_buffers(struct netxen_adapter *adapter)
135 int i, j; 135 int i, j;
136 struct nx_host_tx_ring *tx_ring = adapter->tx_ring; 136 struct nx_host_tx_ring *tx_ring = adapter->tx_ring;
137 137
138 spin_lock(&adapter->tx_clean_lock); 138 spin_lock_bh(&adapter->tx_clean_lock);
139 cmd_buf = tx_ring->cmd_buf_arr; 139 cmd_buf = tx_ring->cmd_buf_arr;
140 for (i = 0; i < tx_ring->num_desc; i++) { 140 for (i = 0; i < tx_ring->num_desc; i++) {
141 buffrag = cmd_buf->frag_array; 141 buffrag = cmd_buf->frag_array;
@@ -159,7 +159,7 @@ void netxen_release_tx_buffers(struct netxen_adapter *adapter)
159 } 159 }
160 cmd_buf++; 160 cmd_buf++;
161 } 161 }
162 spin_unlock(&adapter->tx_clean_lock); 162 spin_unlock_bh(&adapter->tx_clean_lock);
163} 163}
164 164
165void netxen_free_sw_resources(struct netxen_adapter *adapter) 165void netxen_free_sw_resources(struct netxen_adapter *adapter)
diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c
index a570a60533be..ec251531bd9f 100644
--- a/drivers/net/ethernet/rocker/rocker.c
+++ b/drivers/net/ethernet/rocker/rocker.c
@@ -4176,14 +4176,15 @@ static int rocker_port_bridge_setlink(struct net_device *dev,
4176 4176
4177static int rocker_port_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, 4177static int rocker_port_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
4178 struct net_device *dev, 4178 struct net_device *dev,
4179 u32 filter_mask) 4179 u32 filter_mask, int nlflags)
4180{ 4180{
4181 struct rocker_port *rocker_port = netdev_priv(dev); 4181 struct rocker_port *rocker_port = netdev_priv(dev);
4182 u16 mode = BRIDGE_MODE_UNDEF; 4182 u16 mode = BRIDGE_MODE_UNDEF;
4183 u32 mask = BR_LEARNING | BR_LEARNING_SYNC; 4183 u32 mask = BR_LEARNING | BR_LEARNING_SYNC;
4184 4184
4185 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode, 4185 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode,
4186 rocker_port->brport_flags, mask); 4186 rocker_port->brport_flags, mask,
4187 nlflags);
4187} 4188}
4188 4189
4189static int rocker_port_get_phys_port_name(struct net_device *dev, 4190static int rocker_port_get_phys_port_name(struct net_device *dev,
diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c
index 2bef655279f3..9b7e0a34c98b 100644
--- a/drivers/net/ethernet/ti/netcp_ethss.c
+++ b/drivers/net/ethernet/ti/netcp_ethss.c
@@ -1765,7 +1765,9 @@ static void netcp_ethss_link_state_action(struct gbe_priv *gbe_dev,
1765 ALE_PORT_STATE, 1765 ALE_PORT_STATE,
1766 ALE_PORT_STATE_FORWARD); 1766 ALE_PORT_STATE_FORWARD);
1767 1767
1768 if (ndev && slave->open) 1768 if (ndev && slave->open &&
1769 slave->link_interface != SGMII_LINK_MAC_PHY &&
1770 slave->link_interface != XGMII_LINK_MAC_PHY)
1769 netif_carrier_on(ndev); 1771 netif_carrier_on(ndev);
1770 } else { 1772 } else {
1771 writel(mac_control, GBE_REG_ADDR(slave, emac_regs, 1773 writel(mac_control, GBE_REG_ADDR(slave, emac_regs,
@@ -1773,7 +1775,9 @@ static void netcp_ethss_link_state_action(struct gbe_priv *gbe_dev,
1773 cpsw_ale_control_set(gbe_dev->ale, slave->port_num, 1775 cpsw_ale_control_set(gbe_dev->ale, slave->port_num,
1774 ALE_PORT_STATE, 1776 ALE_PORT_STATE,
1775 ALE_PORT_STATE_DISABLE); 1777 ALE_PORT_STATE_DISABLE);
1776 if (ndev) 1778 if (ndev &&
1779 slave->link_interface != SGMII_LINK_MAC_PHY &&
1780 slave->link_interface != XGMII_LINK_MAC_PHY)
1777 netif_carrier_off(ndev); 1781 netif_carrier_off(ndev);
1778 } 1782 }
1779 1783
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index a10b31664709..41071d32bc8e 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -128,7 +128,6 @@ struct ndis_tcp_ip_checksum_info;
128struct hv_netvsc_packet { 128struct hv_netvsc_packet {
129 /* Bookkeeping stuff */ 129 /* Bookkeeping stuff */
130 u32 status; 130 u32 status;
131 bool part_of_skb;
132 131
133 bool is_data_pkt; 132 bool is_data_pkt;
134 bool xmit_more; /* from skb */ 133 bool xmit_more; /* from skb */
@@ -612,6 +611,15 @@ struct multi_send_data {
612 u32 count; /* counter of batched packets */ 611 u32 count; /* counter of batched packets */
613}; 612};
614 613
614/* The context of the netvsc device */
615struct net_device_context {
616 /* point back to our device context */
617 struct hv_device *device_ctx;
618 struct delayed_work dwork;
619 struct work_struct work;
620 u32 msg_enable; /* debug level */
621};
622
615/* Per netvsc device */ 623/* Per netvsc device */
616struct netvsc_device { 624struct netvsc_device {
617 struct hv_device *dev; 625 struct hv_device *dev;
@@ -667,6 +675,9 @@ struct netvsc_device {
667 struct multi_send_data msd[NR_CPUS]; 675 struct multi_send_data msd[NR_CPUS];
668 u32 max_pkt; /* max number of pkt in one send, e.g. 8 */ 676 u32 max_pkt; /* max number of pkt in one send, e.g. 8 */
669 u32 pkt_align; /* alignment bytes, e.g. 8 */ 677 u32 pkt_align; /* alignment bytes, e.g. 8 */
678
679 /* The net device context */
680 struct net_device_context *nd_ctx;
670}; 681};
671 682
672/* NdisInitialize message */ 683/* NdisInitialize message */
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 2e8ad0636b46..2d9ef533cc48 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -889,11 +889,6 @@ int netvsc_send(struct hv_device *device,
889 } else { 889 } else {
890 packet->page_buf_cnt = 0; 890 packet->page_buf_cnt = 0;
891 packet->total_data_buflen += msd_len; 891 packet->total_data_buflen += msd_len;
892 if (!packet->part_of_skb) {
893 skb = (struct sk_buff *)(unsigned long)packet->
894 send_completion_tid;
895 packet->send_completion_tid = 0;
896 }
897 } 892 }
898 893
899 if (msdp->pkt) 894 if (msdp->pkt)
@@ -1197,6 +1192,9 @@ int netvsc_device_add(struct hv_device *device, void *additional_info)
1197 */ 1192 */
1198 ndev = net_device->ndev; 1193 ndev = net_device->ndev;
1199 1194
1195 /* Add netvsc_device context to netvsc_device */
1196 net_device->nd_ctx = netdev_priv(ndev);
1197
1200 /* Initialize the NetVSC channel extension */ 1198 /* Initialize the NetVSC channel extension */
1201 init_completion(&net_device->channel_init_wait); 1199 init_completion(&net_device->channel_init_wait);
1202 1200
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index a3a9d3898a6e..5993c7e2d723 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -40,18 +40,21 @@
40 40
41#include "hyperv_net.h" 41#include "hyperv_net.h"
42 42
43struct net_device_context {
44 /* point back to our device context */
45 struct hv_device *device_ctx;
46 struct delayed_work dwork;
47 struct work_struct work;
48};
49 43
50#define RING_SIZE_MIN 64 44#define RING_SIZE_MIN 64
51static int ring_size = 128; 45static int ring_size = 128;
52module_param(ring_size, int, S_IRUGO); 46module_param(ring_size, int, S_IRUGO);
53MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)"); 47MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)");
54 48
49static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
50 NETIF_MSG_LINK | NETIF_MSG_IFUP |
51 NETIF_MSG_IFDOWN | NETIF_MSG_RX_ERR |
52 NETIF_MSG_TX_ERR;
53
54static int debug = -1;
55module_param(debug, int, S_IRUGO);
56MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
57
55static void do_set_multicast(struct work_struct *w) 58static void do_set_multicast(struct work_struct *w)
56{ 59{
57 struct net_device_context *ndevctx = 60 struct net_device_context *ndevctx =
@@ -235,9 +238,6 @@ void netvsc_xmit_completion(void *context)
235 struct sk_buff *skb = (struct sk_buff *) 238 struct sk_buff *skb = (struct sk_buff *)
236 (unsigned long)packet->send_completion_tid; 239 (unsigned long)packet->send_completion_tid;
237 240
238 if (!packet->part_of_skb)
239 kfree(packet);
240
241 if (skb) 241 if (skb)
242 dev_kfree_skb_any(skb); 242 dev_kfree_skb_any(skb);
243} 243}
@@ -389,7 +389,6 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
389 u32 net_trans_info; 389 u32 net_trans_info;
390 u32 hash; 390 u32 hash;
391 u32 skb_length; 391 u32 skb_length;
392 u32 head_room;
393 u32 pkt_sz; 392 u32 pkt_sz;
394 struct hv_page_buffer page_buf[MAX_PAGE_BUFFER_COUNT]; 393 struct hv_page_buffer page_buf[MAX_PAGE_BUFFER_COUNT];
395 394
@@ -402,7 +401,6 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
402 401
403check_size: 402check_size:
404 skb_length = skb->len; 403 skb_length = skb->len;
405 head_room = skb_headroom(skb);
406 num_data_pgs = netvsc_get_slots(skb) + 2; 404 num_data_pgs = netvsc_get_slots(skb) + 2;
407 if (num_data_pgs > MAX_PAGE_BUFFER_COUNT && linear) { 405 if (num_data_pgs > MAX_PAGE_BUFFER_COUNT && linear) {
408 net_alert_ratelimited("packet too big: %u pages (%u bytes)\n", 406 net_alert_ratelimited("packet too big: %u pages (%u bytes)\n",
@@ -421,20 +419,14 @@ check_size:
421 419
422 pkt_sz = sizeof(struct hv_netvsc_packet) + RNDIS_AND_PPI_SIZE; 420 pkt_sz = sizeof(struct hv_netvsc_packet) + RNDIS_AND_PPI_SIZE;
423 421
424 if (head_room < pkt_sz) { 422 ret = skb_cow_head(skb, pkt_sz);
425 packet = kmalloc(pkt_sz, GFP_ATOMIC); 423 if (ret) {
426 if (!packet) { 424 netdev_err(net, "unable to alloc hv_netvsc_packet\n");
427 /* out of memory, drop packet */ 425 ret = -ENOMEM;
428 netdev_err(net, "unable to alloc hv_netvsc_packet\n"); 426 goto drop;
429 ret = -ENOMEM;
430 goto drop;
431 }
432 packet->part_of_skb = false;
433 } else {
434 /* Use the headroom for building up the packet */
435 packet = (struct hv_netvsc_packet *)skb->head;
436 packet->part_of_skb = true;
437 } 427 }
428 /* Use the headroom for building up the packet */
429 packet = (struct hv_netvsc_packet *)skb->head;
438 430
439 packet->status = 0; 431 packet->status = 0;
440 packet->xmit_more = skb->xmit_more; 432 packet->xmit_more = skb->xmit_more;
@@ -591,8 +583,6 @@ drop:
591 net->stats.tx_bytes += skb_length; 583 net->stats.tx_bytes += skb_length;
592 net->stats.tx_packets++; 584 net->stats.tx_packets++;
593 } else { 585 } else {
594 if (packet && !packet->part_of_skb)
595 kfree(packet);
596 if (ret != -EAGAIN) { 586 if (ret != -EAGAIN) {
597 dev_kfree_skb_any(skb); 587 dev_kfree_skb_any(skb);
598 net->stats.tx_dropped++; 588 net->stats.tx_dropped++;
@@ -888,6 +878,11 @@ static int netvsc_probe(struct hv_device *dev,
888 878
889 net_device_ctx = netdev_priv(net); 879 net_device_ctx = netdev_priv(net);
890 net_device_ctx->device_ctx = dev; 880 net_device_ctx->device_ctx = dev;
881 net_device_ctx->msg_enable = netif_msg_init(debug, default_msg);
882 if (netif_msg_probe(net_device_ctx))
883 netdev_dbg(net, "netvsc msg_enable: %d\n",
884 net_device_ctx->msg_enable);
885
891 hv_set_drvdata(dev, net); 886 hv_set_drvdata(dev, net);
892 INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change); 887 INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change);
893 INIT_WORK(&net_device_ctx->work, do_set_multicast); 888 INIT_WORK(&net_device_ctx->work, do_set_multicast);
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index 0d92efefd796..9118cea91882 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -429,7 +429,8 @@ int rndis_filter_receive(struct hv_device *dev,
429 429
430 rndis_msg = pkt->data; 430 rndis_msg = pkt->data;
431 431
432 dump_rndis_message(dev, rndis_msg); 432 if (netif_msg_rx_err(net_dev->nd_ctx))
433 dump_rndis_message(dev, rndis_msg);
433 434
434 switch (rndis_msg->ndis_msg_type) { 435 switch (rndis_msg->ndis_msg_type) {
435 case RNDIS_MSG_PACKET: 436 case RNDIS_MSG_PACKET:
diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c
index 49ce7ece5af3..c9cb486c753d 100644
--- a/drivers/net/phy/mdio-gpio.c
+++ b/drivers/net/phy/mdio-gpio.c
@@ -80,7 +80,8 @@ static void mdio_dir(struct mdiobb_ctrl *ctrl, int dir)
80 * assume the pin serves as pull-up. If direction is 80 * assume the pin serves as pull-up. If direction is
81 * output, the default value is high. 81 * output, the default value is high.
82 */ 82 */
83 gpio_set_value(bitbang->mdo, 1 ^ bitbang->mdo_active_low); 83 gpio_set_value_cansleep(bitbang->mdo,
84 1 ^ bitbang->mdo_active_low);
84 return; 85 return;
85 } 86 }
86 87
@@ -96,7 +97,8 @@ static int mdio_get(struct mdiobb_ctrl *ctrl)
96 struct mdio_gpio_info *bitbang = 97 struct mdio_gpio_info *bitbang =
97 container_of(ctrl, struct mdio_gpio_info, ctrl); 98 container_of(ctrl, struct mdio_gpio_info, ctrl);
98 99
99 return gpio_get_value(bitbang->mdio) ^ bitbang->mdio_active_low; 100 return gpio_get_value_cansleep(bitbang->mdio) ^
101 bitbang->mdio_active_low;
100} 102}
101 103
102static void mdio_set(struct mdiobb_ctrl *ctrl, int what) 104static void mdio_set(struct mdiobb_ctrl *ctrl, int what)
@@ -105,9 +107,11 @@ static void mdio_set(struct mdiobb_ctrl *ctrl, int what)
105 container_of(ctrl, struct mdio_gpio_info, ctrl); 107 container_of(ctrl, struct mdio_gpio_info, ctrl);
106 108
107 if (bitbang->mdo) 109 if (bitbang->mdo)
108 gpio_set_value(bitbang->mdo, what ^ bitbang->mdo_active_low); 110 gpio_set_value_cansleep(bitbang->mdo,
111 what ^ bitbang->mdo_active_low);
109 else 112 else
110 gpio_set_value(bitbang->mdio, what ^ bitbang->mdio_active_low); 113 gpio_set_value_cansleep(bitbang->mdio,
114 what ^ bitbang->mdio_active_low);
111} 115}
112 116
113static void mdc_set(struct mdiobb_ctrl *ctrl, int what) 117static void mdc_set(struct mdiobb_ctrl *ctrl, int what)
@@ -115,7 +119,7 @@ static void mdc_set(struct mdiobb_ctrl *ctrl, int what)
115 struct mdio_gpio_info *bitbang = 119 struct mdio_gpio_info *bitbang =
116 container_of(ctrl, struct mdio_gpio_info, ctrl); 120 container_of(ctrl, struct mdio_gpio_info, ctrl);
117 121
118 gpio_set_value(bitbang->mdc, what ^ bitbang->mdc_active_low); 122 gpio_set_value_cansleep(bitbang->mdc, what ^ bitbang->mdc_active_low);
119} 123}
120 124
121static struct mdiobb_ops mdio_gpio_ops = { 125static struct mdiobb_ops mdio_gpio_ops = {
diff --git a/drivers/net/phy/mdio-mux-gpio.c b/drivers/net/phy/mdio-mux-gpio.c
index 1a87a585e74d..66edd99bc302 100644
--- a/drivers/net/phy/mdio-mux-gpio.c
+++ b/drivers/net/phy/mdio-mux-gpio.c
@@ -12,33 +12,30 @@
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/phy.h> 13#include <linux/phy.h>
14#include <linux/mdio-mux.h> 14#include <linux/mdio-mux.h>
15#include <linux/of_gpio.h> 15#include <linux/gpio/consumer.h>
16 16
17#define DRV_VERSION "1.1" 17#define DRV_VERSION "1.1"
18#define DRV_DESCRIPTION "GPIO controlled MDIO bus multiplexer driver" 18#define DRV_DESCRIPTION "GPIO controlled MDIO bus multiplexer driver"
19 19
20#define MDIO_MUX_GPIO_MAX_BITS 8
21
22struct mdio_mux_gpio_state { 20struct mdio_mux_gpio_state {
23 struct gpio_desc *gpio[MDIO_MUX_GPIO_MAX_BITS]; 21 struct gpio_descs *gpios;
24 unsigned int num_gpios;
25 void *mux_handle; 22 void *mux_handle;
26}; 23};
27 24
28static int mdio_mux_gpio_switch_fn(int current_child, int desired_child, 25static int mdio_mux_gpio_switch_fn(int current_child, int desired_child,
29 void *data) 26 void *data)
30{ 27{
31 int values[MDIO_MUX_GPIO_MAX_BITS];
32 unsigned int n;
33 struct mdio_mux_gpio_state *s = data; 28 struct mdio_mux_gpio_state *s = data;
29 int values[s->gpios->ndescs];
30 unsigned int n;
34 31
35 if (current_child == desired_child) 32 if (current_child == desired_child)
36 return 0; 33 return 0;
37 34
38 for (n = 0; n < s->num_gpios; n++) { 35 for (n = 0; n < s->gpios->ndescs; n++)
39 values[n] = (desired_child >> n) & 1; 36 values[n] = (desired_child >> n) & 1;
40 } 37
41 gpiod_set_array_cansleep(s->num_gpios, s->gpio, values); 38 gpiod_set_array_cansleep(s->gpios->ndescs, s->gpios->desc, values);
42 39
43 return 0; 40 return 0;
44} 41}
@@ -46,56 +43,33 @@ static int mdio_mux_gpio_switch_fn(int current_child, int desired_child,
46static int mdio_mux_gpio_probe(struct platform_device *pdev) 43static int mdio_mux_gpio_probe(struct platform_device *pdev)
47{ 44{
48 struct mdio_mux_gpio_state *s; 45 struct mdio_mux_gpio_state *s;
49 int num_gpios;
50 unsigned int n;
51 int r; 46 int r;
52 47
53 if (!pdev->dev.of_node)
54 return -ENODEV;
55
56 num_gpios = of_gpio_count(pdev->dev.of_node);
57 if (num_gpios <= 0 || num_gpios > MDIO_MUX_GPIO_MAX_BITS)
58 return -ENODEV;
59
60 s = devm_kzalloc(&pdev->dev, sizeof(*s), GFP_KERNEL); 48 s = devm_kzalloc(&pdev->dev, sizeof(*s), GFP_KERNEL);
61 if (!s) 49 if (!s)
62 return -ENOMEM; 50 return -ENOMEM;
63 51
64 s->num_gpios = num_gpios; 52 s->gpios = gpiod_get_array(&pdev->dev, NULL, GPIOD_OUT_LOW);
65 53 if (IS_ERR(s->gpios))
66 for (n = 0; n < num_gpios; ) { 54 return PTR_ERR(s->gpios);
67 struct gpio_desc *gpio = gpiod_get_index(&pdev->dev, NULL, n,
68 GPIOD_OUT_LOW);
69 if (IS_ERR(gpio)) {
70 r = PTR_ERR(gpio);
71 goto err;
72 }
73 s->gpio[n] = gpio;
74 n++;
75 }
76 55
77 r = mdio_mux_init(&pdev->dev, 56 r = mdio_mux_init(&pdev->dev,
78 mdio_mux_gpio_switch_fn, &s->mux_handle, s); 57 mdio_mux_gpio_switch_fn, &s->mux_handle, s);
79 58
80 if (r == 0) { 59 if (r != 0) {
81 pdev->dev.platform_data = s; 60 gpiod_put_array(s->gpios);
82 return 0; 61 return r;
83 }
84err:
85 while (n) {
86 n--;
87 gpiod_put(s->gpio[n]);
88 } 62 }
89 return r; 63
64 pdev->dev.platform_data = s;
65 return 0;
90} 66}
91 67
92static int mdio_mux_gpio_remove(struct platform_device *pdev) 68static int mdio_mux_gpio_remove(struct platform_device *pdev)
93{ 69{
94 unsigned int n;
95 struct mdio_mux_gpio_state *s = dev_get_platdata(&pdev->dev); 70 struct mdio_mux_gpio_state *s = dev_get_platdata(&pdev->dev);
96 mdio_mux_uninit(s->mux_handle); 71 mdio_mux_uninit(s->mux_handle);
97 for (n = 0; n < s->num_gpios; n++) 72 gpiod_put_array(s->gpios);
98 gpiod_put(s->gpio[n]);
99 return 0; 73 return 0;
100} 74}
101 75
diff --git a/drivers/net/ppp/ppp_mppe.c b/drivers/net/ppp/ppp_mppe.c
index 911b21602ff2..05005c660d4d 100644
--- a/drivers/net/ppp/ppp_mppe.c
+++ b/drivers/net/ppp/ppp_mppe.c
@@ -478,7 +478,6 @@ mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf,
478 struct blkcipher_desc desc = { .tfm = state->arc4 }; 478 struct blkcipher_desc desc = { .tfm = state->arc4 };
479 unsigned ccount; 479 unsigned ccount;
480 int flushed = MPPE_BITS(ibuf) & MPPE_BIT_FLUSHED; 480 int flushed = MPPE_BITS(ibuf) & MPPE_BIT_FLUSHED;
481 int sanity = 0;
482 struct scatterlist sg_in[1], sg_out[1]; 481 struct scatterlist sg_in[1], sg_out[1];
483 482
484 if (isize <= PPP_HDRLEN + MPPE_OVHD) { 483 if (isize <= PPP_HDRLEN + MPPE_OVHD) {
@@ -514,31 +513,19 @@ mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf,
514 "mppe_decompress[%d]: ENCRYPTED bit not set!\n", 513 "mppe_decompress[%d]: ENCRYPTED bit not set!\n",
515 state->unit); 514 state->unit);
516 state->sanity_errors += 100; 515 state->sanity_errors += 100;
517 sanity = 1; 516 goto sanity_error;
518 } 517 }
519 if (!state->stateful && !flushed) { 518 if (!state->stateful && !flushed) {
520 printk(KERN_DEBUG "mppe_decompress[%d]: FLUSHED bit not set in " 519 printk(KERN_DEBUG "mppe_decompress[%d]: FLUSHED bit not set in "
521 "stateless mode!\n", state->unit); 520 "stateless mode!\n", state->unit);
522 state->sanity_errors += 100; 521 state->sanity_errors += 100;
523 sanity = 1; 522 goto sanity_error;
524 } 523 }
525 if (state->stateful && ((ccount & 0xff) == 0xff) && !flushed) { 524 if (state->stateful && ((ccount & 0xff) == 0xff) && !flushed) {
526 printk(KERN_DEBUG "mppe_decompress[%d]: FLUSHED bit not set on " 525 printk(KERN_DEBUG "mppe_decompress[%d]: FLUSHED bit not set on "
527 "flag packet!\n", state->unit); 526 "flag packet!\n", state->unit);
528 state->sanity_errors += 100; 527 state->sanity_errors += 100;
529 sanity = 1; 528 goto sanity_error;
530 }
531
532 if (sanity) {
533 if (state->sanity_errors < SANITY_MAX)
534 return DECOMP_ERROR;
535 else
536 /*
537 * Take LCP down if the peer is sending too many bogons.
538 * We don't want to do this for a single or just a few
539 * instances since it could just be due to packet corruption.
540 */
541 return DECOMP_FATALERROR;
542 } 529 }
543 530
544 /* 531 /*
@@ -546,6 +533,13 @@ mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf,
546 */ 533 */
547 534
548 if (!state->stateful) { 535 if (!state->stateful) {
536 /* Discard late packet */
537 if ((ccount - state->ccount) % MPPE_CCOUNT_SPACE
538 > MPPE_CCOUNT_SPACE / 2) {
539 state->sanity_errors++;
540 goto sanity_error;
541 }
542
549 /* RFC 3078, sec 8.1. Rekey for every packet. */ 543 /* RFC 3078, sec 8.1. Rekey for every packet. */
550 while (state->ccount != ccount) { 544 while (state->ccount != ccount) {
551 mppe_rekey(state, 0); 545 mppe_rekey(state, 0);
@@ -649,6 +643,16 @@ mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf,
649 state->sanity_errors >>= 1; 643 state->sanity_errors >>= 1;
650 644
651 return osize; 645 return osize;
646
647sanity_error:
648 if (state->sanity_errors < SANITY_MAX)
649 return DECOMP_ERROR;
650 else
651 /* Take LCP down if the peer is sending too many bogons.
652 * We don't want to do this for a single or just a few
653 * instances since it could just be due to packet corruption.
654 */
655 return DECOMP_FATALERROR;
652} 656}
653 657
654/* 658/*
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 154116aafd0d..27a5f954f8e9 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -730,12 +730,8 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan,
730 /* Only change unicasts */ 730 /* Only change unicasts */
731 if (!(is_multicast_ether_addr(f->eth_addr) || 731 if (!(is_multicast_ether_addr(f->eth_addr) ||
732 is_zero_ether_addr(f->eth_addr))) { 732 is_zero_ether_addr(f->eth_addr))) {
733 int rc = vxlan_fdb_replace(f, ip, port, vni, 733 notify |= vxlan_fdb_replace(f, ip, port, vni,
734 ifindex); 734 ifindex);
735
736 if (rc < 0)
737 return rc;
738 notify |= rc;
739 } else 735 } else
740 return -EOPNOTSUPP; 736 return -EOPNOTSUPP;
741 } 737 }
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c
index c43aca69fb30..0fc3fe5fd5b8 100644
--- a/drivers/s390/char/con3215.c
+++ b/drivers/s390/char/con3215.c
@@ -667,6 +667,8 @@ static struct raw3215_info *raw3215_alloc_info(void)
667 info->buffer = kzalloc(RAW3215_BUFFER_SIZE, GFP_KERNEL | GFP_DMA); 667 info->buffer = kzalloc(RAW3215_BUFFER_SIZE, GFP_KERNEL | GFP_DMA);
668 info->inbuf = kzalloc(RAW3215_INBUF_SIZE, GFP_KERNEL | GFP_DMA); 668 info->inbuf = kzalloc(RAW3215_INBUF_SIZE, GFP_KERNEL | GFP_DMA);
669 if (!info->buffer || !info->inbuf) { 669 if (!info->buffer || !info->inbuf) {
670 kfree(info->inbuf);
671 kfree(info->buffer);
670 kfree(info); 672 kfree(info);
671 return NULL; 673 return NULL;
672 } 674 }
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index 7600639db4c4..add419d6ff34 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -149,7 +149,6 @@ static int twa_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset);
149static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry *sglistarg); 149static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry *sglistarg);
150static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int request_id); 150static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int request_id);
151static char *twa_string_lookup(twa_message_type *table, unsigned int aen_code); 151static char *twa_string_lookup(twa_message_type *table, unsigned int aen_code);
152static void twa_unmap_scsi_data(TW_Device_Extension *tw_dev, int request_id);
153 152
154/* Functions */ 153/* Functions */
155 154
@@ -1340,11 +1339,11 @@ static irqreturn_t twa_interrupt(int irq, void *dev_instance)
1340 } 1339 }
1341 1340
1342 /* Now complete the io */ 1341 /* Now complete the io */
1342 scsi_dma_unmap(cmd);
1343 cmd->scsi_done(cmd);
1343 tw_dev->state[request_id] = TW_S_COMPLETED; 1344 tw_dev->state[request_id] = TW_S_COMPLETED;
1344 twa_free_request_id(tw_dev, request_id); 1345 twa_free_request_id(tw_dev, request_id);
1345 tw_dev->posted_request_count--; 1346 tw_dev->posted_request_count--;
1346 tw_dev->srb[request_id]->scsi_done(tw_dev->srb[request_id]);
1347 twa_unmap_scsi_data(tw_dev, request_id);
1348 } 1347 }
1349 1348
1350 /* Check for valid status after each drain */ 1349 /* Check for valid status after each drain */
@@ -1402,26 +1401,6 @@ static void twa_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_comm
1402 } 1401 }
1403} /* End twa_load_sgl() */ 1402} /* End twa_load_sgl() */
1404 1403
1405/* This function will perform a pci-dma mapping for a scatter gather list */
1406static int twa_map_scsi_sg_data(TW_Device_Extension *tw_dev, int request_id)
1407{
1408 int use_sg;
1409 struct scsi_cmnd *cmd = tw_dev->srb[request_id];
1410
1411 use_sg = scsi_dma_map(cmd);
1412 if (!use_sg)
1413 return 0;
1414 else if (use_sg < 0) {
1415 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1c, "Failed to map scatter gather list");
1416 return 0;
1417 }
1418
1419 cmd->SCp.phase = TW_PHASE_SGLIST;
1420 cmd->SCp.have_data_in = use_sg;
1421
1422 return use_sg;
1423} /* End twa_map_scsi_sg_data() */
1424
1425/* This function will poll for a response interrupt of a request */ 1404/* This function will poll for a response interrupt of a request */
1426static int twa_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds) 1405static int twa_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds)
1427{ 1406{
@@ -1600,9 +1579,11 @@ static int twa_reset_device_extension(TW_Device_Extension *tw_dev)
1600 (tw_dev->state[i] != TW_S_INITIAL) && 1579 (tw_dev->state[i] != TW_S_INITIAL) &&
1601 (tw_dev->state[i] != TW_S_COMPLETED)) { 1580 (tw_dev->state[i] != TW_S_COMPLETED)) {
1602 if (tw_dev->srb[i]) { 1581 if (tw_dev->srb[i]) {
1603 tw_dev->srb[i]->result = (DID_RESET << 16); 1582 struct scsi_cmnd *cmd = tw_dev->srb[i];
1604 tw_dev->srb[i]->scsi_done(tw_dev->srb[i]); 1583
1605 twa_unmap_scsi_data(tw_dev, i); 1584 cmd->result = (DID_RESET << 16);
1585 scsi_dma_unmap(cmd);
1586 cmd->scsi_done(cmd);
1606 } 1587 }
1607 } 1588 }
1608 } 1589 }
@@ -1781,21 +1762,18 @@ static int twa_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_
1781 /* Save the scsi command for use by the ISR */ 1762 /* Save the scsi command for use by the ISR */
1782 tw_dev->srb[request_id] = SCpnt; 1763 tw_dev->srb[request_id] = SCpnt;
1783 1764
1784 /* Initialize phase to zero */
1785 SCpnt->SCp.phase = TW_PHASE_INITIAL;
1786
1787 retval = twa_scsiop_execute_scsi(tw_dev, request_id, NULL, 0, NULL); 1765 retval = twa_scsiop_execute_scsi(tw_dev, request_id, NULL, 0, NULL);
1788 switch (retval) { 1766 switch (retval) {
1789 case SCSI_MLQUEUE_HOST_BUSY: 1767 case SCSI_MLQUEUE_HOST_BUSY:
1768 scsi_dma_unmap(SCpnt);
1790 twa_free_request_id(tw_dev, request_id); 1769 twa_free_request_id(tw_dev, request_id);
1791 twa_unmap_scsi_data(tw_dev, request_id);
1792 break; 1770 break;
1793 case 1: 1771 case 1:
1794 tw_dev->state[request_id] = TW_S_COMPLETED;
1795 twa_free_request_id(tw_dev, request_id);
1796 twa_unmap_scsi_data(tw_dev, request_id);
1797 SCpnt->result = (DID_ERROR << 16); 1772 SCpnt->result = (DID_ERROR << 16);
1773 scsi_dma_unmap(SCpnt);
1798 done(SCpnt); 1774 done(SCpnt);
1775 tw_dev->state[request_id] = TW_S_COMPLETED;
1776 twa_free_request_id(tw_dev, request_id);
1799 retval = 0; 1777 retval = 0;
1800 } 1778 }
1801out: 1779out:
@@ -1863,8 +1841,8 @@ static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id,
1863 command_packet->sg_list[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]); 1841 command_packet->sg_list[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
1864 command_packet->sg_list[0].length = cpu_to_le32(TW_MIN_SGL_LENGTH); 1842 command_packet->sg_list[0].length = cpu_to_le32(TW_MIN_SGL_LENGTH);
1865 } else { 1843 } else {
1866 sg_count = twa_map_scsi_sg_data(tw_dev, request_id); 1844 sg_count = scsi_dma_map(srb);
1867 if (sg_count == 0) 1845 if (sg_count < 0)
1868 goto out; 1846 goto out;
1869 1847
1870 scsi_for_each_sg(srb, sg, sg_count, i) { 1848 scsi_for_each_sg(srb, sg, sg_count, i) {
@@ -1979,15 +1957,6 @@ static char *twa_string_lookup(twa_message_type *table, unsigned int code)
1979 return(table[index].text); 1957 return(table[index].text);
1980} /* End twa_string_lookup() */ 1958} /* End twa_string_lookup() */
1981 1959
1982/* This function will perform a pci-dma unmap */
1983static void twa_unmap_scsi_data(TW_Device_Extension *tw_dev, int request_id)
1984{
1985 struct scsi_cmnd *cmd = tw_dev->srb[request_id];
1986
1987 if (cmd->SCp.phase == TW_PHASE_SGLIST)
1988 scsi_dma_unmap(cmd);
1989} /* End twa_unmap_scsi_data() */
1990
1991/* This function gets called when a disk is coming on-line */ 1960/* This function gets called when a disk is coming on-line */
1992static int twa_slave_configure(struct scsi_device *sdev) 1961static int twa_slave_configure(struct scsi_device *sdev)
1993{ 1962{
diff --git a/drivers/scsi/3w-9xxx.h b/drivers/scsi/3w-9xxx.h
index 040f7214e5b7..0fdc83cfa0e1 100644
--- a/drivers/scsi/3w-9xxx.h
+++ b/drivers/scsi/3w-9xxx.h
@@ -324,11 +324,6 @@ static twa_message_type twa_error_table[] = {
324#define TW_CURRENT_DRIVER_BUILD 0 324#define TW_CURRENT_DRIVER_BUILD 0
325#define TW_CURRENT_DRIVER_BRANCH 0 325#define TW_CURRENT_DRIVER_BRANCH 0
326 326
327/* Phase defines */
328#define TW_PHASE_INITIAL 0
329#define TW_PHASE_SINGLE 1
330#define TW_PHASE_SGLIST 2
331
332/* Misc defines */ 327/* Misc defines */
333#define TW_9550SX_DRAIN_COMPLETED 0xFFFF 328#define TW_9550SX_DRAIN_COMPLETED 0xFFFF
334#define TW_SECTOR_SIZE 512 329#define TW_SECTOR_SIZE 512
diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c
index 2361772d5909..f8374850f714 100644
--- a/drivers/scsi/3w-sas.c
+++ b/drivers/scsi/3w-sas.c
@@ -290,26 +290,6 @@ static int twl_post_command_packet(TW_Device_Extension *tw_dev, int request_id)
290 return 0; 290 return 0;
291} /* End twl_post_command_packet() */ 291} /* End twl_post_command_packet() */
292 292
293/* This function will perform a pci-dma mapping for a scatter gather list */
294static int twl_map_scsi_sg_data(TW_Device_Extension *tw_dev, int request_id)
295{
296 int use_sg;
297 struct scsi_cmnd *cmd = tw_dev->srb[request_id];
298
299 use_sg = scsi_dma_map(cmd);
300 if (!use_sg)
301 return 0;
302 else if (use_sg < 0) {
303 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1, "Failed to map scatter gather list");
304 return 0;
305 }
306
307 cmd->SCp.phase = TW_PHASE_SGLIST;
308 cmd->SCp.have_data_in = use_sg;
309
310 return use_sg;
311} /* End twl_map_scsi_sg_data() */
312
313/* This function hands scsi cdb's to the firmware */ 293/* This function hands scsi cdb's to the firmware */
314static int twl_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry_ISO *sglistarg) 294static int twl_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry_ISO *sglistarg)
315{ 295{
@@ -357,8 +337,8 @@ static int twl_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id,
357 if (!sglistarg) { 337 if (!sglistarg) {
358 /* Map sglist from scsi layer to cmd packet */ 338 /* Map sglist from scsi layer to cmd packet */
359 if (scsi_sg_count(srb)) { 339 if (scsi_sg_count(srb)) {
360 sg_count = twl_map_scsi_sg_data(tw_dev, request_id); 340 sg_count = scsi_dma_map(srb);
361 if (sg_count == 0) 341 if (sg_count <= 0)
362 goto out; 342 goto out;
363 343
364 scsi_for_each_sg(srb, sg, sg_count, i) { 344 scsi_for_each_sg(srb, sg, sg_count, i) {
@@ -1102,15 +1082,6 @@ out:
1102 return retval; 1082 return retval;
1103} /* End twl_initialize_device_extension() */ 1083} /* End twl_initialize_device_extension() */
1104 1084
1105/* This function will perform a pci-dma unmap */
1106static void twl_unmap_scsi_data(TW_Device_Extension *tw_dev, int request_id)
1107{
1108 struct scsi_cmnd *cmd = tw_dev->srb[request_id];
1109
1110 if (cmd->SCp.phase == TW_PHASE_SGLIST)
1111 scsi_dma_unmap(cmd);
1112} /* End twl_unmap_scsi_data() */
1113
1114/* This function will handle attention interrupts */ 1085/* This function will handle attention interrupts */
1115static int twl_handle_attention_interrupt(TW_Device_Extension *tw_dev) 1086static int twl_handle_attention_interrupt(TW_Device_Extension *tw_dev)
1116{ 1087{
@@ -1251,11 +1222,11 @@ static irqreturn_t twl_interrupt(int irq, void *dev_instance)
1251 } 1222 }
1252 1223
1253 /* Now complete the io */ 1224 /* Now complete the io */
1225 scsi_dma_unmap(cmd);
1226 cmd->scsi_done(cmd);
1254 tw_dev->state[request_id] = TW_S_COMPLETED; 1227 tw_dev->state[request_id] = TW_S_COMPLETED;
1255 twl_free_request_id(tw_dev, request_id); 1228 twl_free_request_id(tw_dev, request_id);
1256 tw_dev->posted_request_count--; 1229 tw_dev->posted_request_count--;
1257 tw_dev->srb[request_id]->scsi_done(tw_dev->srb[request_id]);
1258 twl_unmap_scsi_data(tw_dev, request_id);
1259 } 1230 }
1260 1231
1261 /* Check for another response interrupt */ 1232 /* Check for another response interrupt */
@@ -1400,10 +1371,12 @@ static int twl_reset_device_extension(TW_Device_Extension *tw_dev, int ioctl_res
1400 if ((tw_dev->state[i] != TW_S_FINISHED) && 1371 if ((tw_dev->state[i] != TW_S_FINISHED) &&
1401 (tw_dev->state[i] != TW_S_INITIAL) && 1372 (tw_dev->state[i] != TW_S_INITIAL) &&
1402 (tw_dev->state[i] != TW_S_COMPLETED)) { 1373 (tw_dev->state[i] != TW_S_COMPLETED)) {
1403 if (tw_dev->srb[i]) { 1374 struct scsi_cmnd *cmd = tw_dev->srb[i];
1404 tw_dev->srb[i]->result = (DID_RESET << 16); 1375
1405 tw_dev->srb[i]->scsi_done(tw_dev->srb[i]); 1376 if (cmd) {
1406 twl_unmap_scsi_data(tw_dev, i); 1377 cmd->result = (DID_RESET << 16);
1378 scsi_dma_unmap(cmd);
1379 cmd->scsi_done(cmd);
1407 } 1380 }
1408 } 1381 }
1409 } 1382 }
@@ -1507,9 +1480,6 @@ static int twl_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_
1507 /* Save the scsi command for use by the ISR */ 1480 /* Save the scsi command for use by the ISR */
1508 tw_dev->srb[request_id] = SCpnt; 1481 tw_dev->srb[request_id] = SCpnt;
1509 1482
1510 /* Initialize phase to zero */
1511 SCpnt->SCp.phase = TW_PHASE_INITIAL;
1512
1513 retval = twl_scsiop_execute_scsi(tw_dev, request_id, NULL, 0, NULL); 1483 retval = twl_scsiop_execute_scsi(tw_dev, request_id, NULL, 0, NULL);
1514 if (retval) { 1484 if (retval) {
1515 tw_dev->state[request_id] = TW_S_COMPLETED; 1485 tw_dev->state[request_id] = TW_S_COMPLETED;
diff --git a/drivers/scsi/3w-sas.h b/drivers/scsi/3w-sas.h
index d474892701d4..fec6449c7595 100644
--- a/drivers/scsi/3w-sas.h
+++ b/drivers/scsi/3w-sas.h
@@ -103,10 +103,6 @@ static char *twl_aen_severity_table[] =
103#define TW_CURRENT_DRIVER_BUILD 0 103#define TW_CURRENT_DRIVER_BUILD 0
104#define TW_CURRENT_DRIVER_BRANCH 0 104#define TW_CURRENT_DRIVER_BRANCH 0
105 105
106/* Phase defines */
107#define TW_PHASE_INITIAL 0
108#define TW_PHASE_SGLIST 2
109
110/* Misc defines */ 106/* Misc defines */
111#define TW_SECTOR_SIZE 512 107#define TW_SECTOR_SIZE 512
112#define TW_MAX_UNITS 32 108#define TW_MAX_UNITS 32
diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c
index c75f2048319f..2940bd769936 100644
--- a/drivers/scsi/3w-xxxx.c
+++ b/drivers/scsi/3w-xxxx.c
@@ -1271,32 +1271,6 @@ static int tw_initialize_device_extension(TW_Device_Extension *tw_dev)
1271 return 0; 1271 return 0;
1272} /* End tw_initialize_device_extension() */ 1272} /* End tw_initialize_device_extension() */
1273 1273
1274static int tw_map_scsi_sg_data(struct pci_dev *pdev, struct scsi_cmnd *cmd)
1275{
1276 int use_sg;
1277
1278 dprintk(KERN_WARNING "3w-xxxx: tw_map_scsi_sg_data()\n");
1279
1280 use_sg = scsi_dma_map(cmd);
1281 if (use_sg < 0) {
1282 printk(KERN_WARNING "3w-xxxx: tw_map_scsi_sg_data(): pci_map_sg() failed.\n");
1283 return 0;
1284 }
1285
1286 cmd->SCp.phase = TW_PHASE_SGLIST;
1287 cmd->SCp.have_data_in = use_sg;
1288
1289 return use_sg;
1290} /* End tw_map_scsi_sg_data() */
1291
1292static void tw_unmap_scsi_data(struct pci_dev *pdev, struct scsi_cmnd *cmd)
1293{
1294 dprintk(KERN_WARNING "3w-xxxx: tw_unmap_scsi_data()\n");
1295
1296 if (cmd->SCp.phase == TW_PHASE_SGLIST)
1297 scsi_dma_unmap(cmd);
1298} /* End tw_unmap_scsi_data() */
1299
1300/* This function will reset a device extension */ 1274/* This function will reset a device extension */
1301static int tw_reset_device_extension(TW_Device_Extension *tw_dev) 1275static int tw_reset_device_extension(TW_Device_Extension *tw_dev)
1302{ 1276{
@@ -1319,8 +1293,8 @@ static int tw_reset_device_extension(TW_Device_Extension *tw_dev)
1319 srb = tw_dev->srb[i]; 1293 srb = tw_dev->srb[i];
1320 if (srb != NULL) { 1294 if (srb != NULL) {
1321 srb->result = (DID_RESET << 16); 1295 srb->result = (DID_RESET << 16);
1322 tw_dev->srb[i]->scsi_done(tw_dev->srb[i]); 1296 scsi_dma_unmap(srb);
1323 tw_unmap_scsi_data(tw_dev->tw_pci_dev, tw_dev->srb[i]); 1297 srb->scsi_done(srb);
1324 } 1298 }
1325 } 1299 }
1326 } 1300 }
@@ -1767,8 +1741,8 @@ static int tw_scsiop_read_write(TW_Device_Extension *tw_dev, int request_id)
1767 command_packet->byte8.io.lba = lba; 1741 command_packet->byte8.io.lba = lba;
1768 command_packet->byte6.block_count = num_sectors; 1742 command_packet->byte6.block_count = num_sectors;
1769 1743
1770 use_sg = tw_map_scsi_sg_data(tw_dev->tw_pci_dev, tw_dev->srb[request_id]); 1744 use_sg = scsi_dma_map(srb);
1771 if (!use_sg) 1745 if (use_sg <= 0)
1772 return 1; 1746 return 1;
1773 1747
1774 scsi_for_each_sg(tw_dev->srb[request_id], sg, use_sg, i) { 1748 scsi_for_each_sg(tw_dev->srb[request_id], sg, use_sg, i) {
@@ -1955,9 +1929,6 @@ static int tw_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_c
1955 /* Save the scsi command for use by the ISR */ 1929 /* Save the scsi command for use by the ISR */
1956 tw_dev->srb[request_id] = SCpnt; 1930 tw_dev->srb[request_id] = SCpnt;
1957 1931
1958 /* Initialize phase to zero */
1959 SCpnt->SCp.phase = TW_PHASE_INITIAL;
1960
1961 switch (*command) { 1932 switch (*command) {
1962 case READ_10: 1933 case READ_10:
1963 case READ_6: 1934 case READ_6:
@@ -2185,12 +2156,11 @@ static irqreturn_t tw_interrupt(int irq, void *dev_instance)
2185 2156
2186 /* Now complete the io */ 2157 /* Now complete the io */
2187 if ((error != TW_ISR_DONT_COMPLETE)) { 2158 if ((error != TW_ISR_DONT_COMPLETE)) {
2159 scsi_dma_unmap(tw_dev->srb[request_id]);
2160 tw_dev->srb[request_id]->scsi_done(tw_dev->srb[request_id]);
2188 tw_dev->state[request_id] = TW_S_COMPLETED; 2161 tw_dev->state[request_id] = TW_S_COMPLETED;
2189 tw_state_request_finish(tw_dev, request_id); 2162 tw_state_request_finish(tw_dev, request_id);
2190 tw_dev->posted_request_count--; 2163 tw_dev->posted_request_count--;
2191 tw_dev->srb[request_id]->scsi_done(tw_dev->srb[request_id]);
2192
2193 tw_unmap_scsi_data(tw_dev->tw_pci_dev, tw_dev->srb[request_id]);
2194 } 2164 }
2195 } 2165 }
2196 2166
diff --git a/drivers/scsi/3w-xxxx.h b/drivers/scsi/3w-xxxx.h
index 29b0b84ed69e..6f65e663d393 100644
--- a/drivers/scsi/3w-xxxx.h
+++ b/drivers/scsi/3w-xxxx.h
@@ -195,11 +195,6 @@ static unsigned char tw_sense_table[][4] =
195#define TW_AEN_SMART_FAIL 0x000F 195#define TW_AEN_SMART_FAIL 0x000F
196#define TW_AEN_SBUF_FAIL 0x0024 196#define TW_AEN_SBUF_FAIL 0x0024
197 197
198/* Phase defines */
199#define TW_PHASE_INITIAL 0
200#define TW_PHASE_SINGLE 1
201#define TW_PHASE_SGLIST 2
202
203/* Misc defines */ 198/* Misc defines */
204#define TW_ALIGNMENT_6000 64 /* 64 bytes */ 199#define TW_ALIGNMENT_6000 64 /* 64 bytes */
205#define TW_ALIGNMENT_7000 4 /* 4 bytes */ 200#define TW_ALIGNMENT_7000 4 /* 4 bytes */
diff --git a/drivers/scsi/aha1542.c b/drivers/scsi/aha1542.c
index ec432763a29a..b95d2779f467 100644
--- a/drivers/scsi/aha1542.c
+++ b/drivers/scsi/aha1542.c
@@ -375,9 +375,10 @@ static int aha1542_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
375 u8 lun = cmd->device->lun; 375 u8 lun = cmd->device->lun;
376 unsigned long flags; 376 unsigned long flags;
377 int bufflen = scsi_bufflen(cmd); 377 int bufflen = scsi_bufflen(cmd);
378 int mbo; 378 int mbo, sg_count;
379 struct mailbox *mb = aha1542->mb; 379 struct mailbox *mb = aha1542->mb;
380 struct ccb *ccb = aha1542->ccb; 380 struct ccb *ccb = aha1542->ccb;
381 struct chain *cptr;
381 382
382 if (*cmd->cmnd == REQUEST_SENSE) { 383 if (*cmd->cmnd == REQUEST_SENSE) {
383 /* Don't do the command - we have the sense data already */ 384 /* Don't do the command - we have the sense data already */
@@ -397,6 +398,13 @@ static int aha1542_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
397 print_hex_dump_bytes("command: ", DUMP_PREFIX_NONE, cmd->cmnd, cmd->cmd_len); 398 print_hex_dump_bytes("command: ", DUMP_PREFIX_NONE, cmd->cmnd, cmd->cmd_len);
398 } 399 }
399#endif 400#endif
401 if (bufflen) { /* allocate memory before taking host_lock */
402 sg_count = scsi_sg_count(cmd);
403 cptr = kmalloc(sizeof(*cptr) * sg_count, GFP_KERNEL | GFP_DMA);
404 if (!cptr)
405 return SCSI_MLQUEUE_HOST_BUSY;
406 }
407
400 /* Use the outgoing mailboxes in a round-robin fashion, because this 408 /* Use the outgoing mailboxes in a round-robin fashion, because this
401 is how the host adapter will scan for them */ 409 is how the host adapter will scan for them */
402 410
@@ -441,19 +449,10 @@ static int aha1542_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
441 449
442 if (bufflen) { 450 if (bufflen) {
443 struct scatterlist *sg; 451 struct scatterlist *sg;
444 struct chain *cptr; 452 int i;
445 int i, sg_count = scsi_sg_count(cmd);
446 453
447 ccb[mbo].op = 2; /* SCSI Initiator Command w/scatter-gather */ 454 ccb[mbo].op = 2; /* SCSI Initiator Command w/scatter-gather */
448 cmd->host_scribble = kmalloc(sizeof(*cptr)*sg_count, 455 cmd->host_scribble = (void *)cptr;
449 GFP_KERNEL | GFP_DMA);
450 cptr = (struct chain *) cmd->host_scribble;
451 if (cptr == NULL) {
452 /* free the claimed mailbox slot */
453 aha1542->int_cmds[mbo] = NULL;
454 spin_unlock_irqrestore(sh->host_lock, flags);
455 return SCSI_MLQUEUE_HOST_BUSY;
456 }
457 scsi_for_each_sg(cmd, sg, sg_count, i) { 456 scsi_for_each_sg(cmd, sg, sg_count, i) {
458 any2scsi(cptr[i].dataptr, isa_page_to_bus(sg_page(sg)) 457 any2scsi(cptr[i].dataptr, isa_page_to_bus(sg_page(sg))
459 + sg->offset); 458 + sg->offset);
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index 262ab837a704..9f77d23239a2 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -226,6 +226,7 @@ static struct {
226 {"PIONEER", "CD-ROM DRM-624X", NULL, BLIST_FORCELUN | BLIST_SINGLELUN}, 226 {"PIONEER", "CD-ROM DRM-624X", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
227 {"Promise", "VTrak E610f", NULL, BLIST_SPARSELUN | BLIST_NO_RSOC}, 227 {"Promise", "VTrak E610f", NULL, BLIST_SPARSELUN | BLIST_NO_RSOC},
228 {"Promise", "", NULL, BLIST_SPARSELUN}, 228 {"Promise", "", NULL, BLIST_SPARSELUN},
229 {"QNAP", "iSCSI Storage", NULL, BLIST_MAX_1024},
229 {"QUANTUM", "XP34301", "1071", BLIST_NOTQ}, 230 {"QUANTUM", "XP34301", "1071", BLIST_NOTQ},
230 {"REGAL", "CDC-4X", NULL, BLIST_MAX5LUN | BLIST_SINGLELUN}, 231 {"REGAL", "CDC-4X", NULL, BLIST_MAX5LUN | BLIST_SINGLELUN},
231 {"SanDisk", "ImageMate CF-SD1", NULL, BLIST_FORCELUN}, 232 {"SanDisk", "ImageMate CF-SD1", NULL, BLIST_FORCELUN},
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 60aae01caa89..6efab1c455e1 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -897,6 +897,12 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
897 */ 897 */
898 if (*bflags & BLIST_MAX_512) 898 if (*bflags & BLIST_MAX_512)
899 blk_queue_max_hw_sectors(sdev->request_queue, 512); 899 blk_queue_max_hw_sectors(sdev->request_queue, 512);
900 /*
901 * Max 1024 sector transfer length for targets that report incorrect
902 * max/optimal lengths and relied on the old block layer safe default
903 */
904 else if (*bflags & BLIST_MAX_1024)
905 blk_queue_max_hw_sectors(sdev->request_queue, 1024);
900 906
901 /* 907 /*
902 * Some devices may not want to have a start command automatically 908 * Some devices may not want to have a start command automatically
diff --git a/drivers/sh/pm_runtime.c b/drivers/sh/pm_runtime.c
index cd4c293f0dd0..fe8875f0d7be 100644
--- a/drivers/sh/pm_runtime.c
+++ b/drivers/sh/pm_runtime.c
@@ -80,9 +80,10 @@ static int __init sh_pm_runtime_init(void)
80 if (IS_ENABLED(CONFIG_ARCH_SHMOBILE_MULTI)) { 80 if (IS_ENABLED(CONFIG_ARCH_SHMOBILE_MULTI)) {
81 if (!of_machine_is_compatible("renesas,emev2") && 81 if (!of_machine_is_compatible("renesas,emev2") &&
82 !of_machine_is_compatible("renesas,r7s72100") && 82 !of_machine_is_compatible("renesas,r7s72100") &&
83 !of_machine_is_compatible("renesas,r8a73a4") &&
84#ifndef CONFIG_PM_GENERIC_DOMAINS_OF 83#ifndef CONFIG_PM_GENERIC_DOMAINS_OF
84 !of_machine_is_compatible("renesas,r8a73a4") &&
85 !of_machine_is_compatible("renesas,r8a7740") && 85 !of_machine_is_compatible("renesas,r8a7740") &&
86 !of_machine_is_compatible("renesas,sh73a0") &&
86#endif 87#endif
87 !of_machine_is_compatible("renesas,r8a7778") && 88 !of_machine_is_compatible("renesas,r8a7778") &&
88 !of_machine_is_compatible("renesas,r8a7779") && 89 !of_machine_is_compatible("renesas,r8a7779") &&
@@ -90,9 +91,7 @@ static int __init sh_pm_runtime_init(void)
90 !of_machine_is_compatible("renesas,r8a7791") && 91 !of_machine_is_compatible("renesas,r8a7791") &&
91 !of_machine_is_compatible("renesas,r8a7792") && 92 !of_machine_is_compatible("renesas,r8a7792") &&
92 !of_machine_is_compatible("renesas,r8a7793") && 93 !of_machine_is_compatible("renesas,r8a7793") &&
93 !of_machine_is_compatible("renesas,r8a7794") && 94 !of_machine_is_compatible("renesas,r8a7794"))
94 !of_machine_is_compatible("renesas,sh7372") &&
95 !of_machine_is_compatible("renesas,sh73a0"))
96 return 0; 95 return 0;
97 } 96 }
98 97
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index 08da4d3e2162..46bcebba54b2 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -1998,6 +1998,8 @@ pci_wch_ch38x_setup(struct serial_private *priv,
1998#define PCIE_DEVICE_ID_WCH_CH382_2S1P 0x3250 1998#define PCIE_DEVICE_ID_WCH_CH382_2S1P 0x3250
1999#define PCIE_DEVICE_ID_WCH_CH384_4S 0x3470 1999#define PCIE_DEVICE_ID_WCH_CH384_4S 0x3470
2000 2000
2001#define PCI_DEVICE_ID_EXAR_XR17V8358 0x8358
2002
2001/* Unknown vendors/cards - this should not be in linux/pci_ids.h */ 2003/* Unknown vendors/cards - this should not be in linux/pci_ids.h */
2002#define PCI_SUBDEVICE_ID_UNKNOWN_0x1584 0x1584 2004#define PCI_SUBDEVICE_ID_UNKNOWN_0x1584 0x1584
2003#define PCI_SUBDEVICE_ID_UNKNOWN_0x1588 0x1588 2005#define PCI_SUBDEVICE_ID_UNKNOWN_0x1588 0x1588
@@ -2520,6 +2522,13 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
2520 .subdevice = PCI_ANY_ID, 2522 .subdevice = PCI_ANY_ID,
2521 .setup = pci_xr17v35x_setup, 2523 .setup = pci_xr17v35x_setup,
2522 }, 2524 },
2525 {
2526 .vendor = PCI_VENDOR_ID_EXAR,
2527 .device = PCI_DEVICE_ID_EXAR_XR17V8358,
2528 .subvendor = PCI_ANY_ID,
2529 .subdevice = PCI_ANY_ID,
2530 .setup = pci_xr17v35x_setup,
2531 },
2523 /* 2532 /*
2524 * Xircom cards 2533 * Xircom cards
2525 */ 2534 */
@@ -2999,6 +3008,7 @@ enum pci_board_num_t {
2999 pbn_exar_XR17V352, 3008 pbn_exar_XR17V352,
3000 pbn_exar_XR17V354, 3009 pbn_exar_XR17V354,
3001 pbn_exar_XR17V358, 3010 pbn_exar_XR17V358,
3011 pbn_exar_XR17V8358,
3002 pbn_exar_ibm_saturn, 3012 pbn_exar_ibm_saturn,
3003 pbn_pasemi_1682M, 3013 pbn_pasemi_1682M,
3004 pbn_ni8430_2, 3014 pbn_ni8430_2,
@@ -3685,6 +3695,14 @@ static struct pciserial_board pci_boards[] = {
3685 .reg_shift = 0, 3695 .reg_shift = 0,
3686 .first_offset = 0, 3696 .first_offset = 0,
3687 }, 3697 },
3698 [pbn_exar_XR17V8358] = {
3699 .flags = FL_BASE0,
3700 .num_ports = 16,
3701 .base_baud = 7812500,
3702 .uart_offset = 0x400,
3703 .reg_shift = 0,
3704 .first_offset = 0,
3705 },
3688 [pbn_exar_ibm_saturn] = { 3706 [pbn_exar_ibm_saturn] = {
3689 .flags = FL_BASE0, 3707 .flags = FL_BASE0,
3690 .num_ports = 1, 3708 .num_ports = 1,
@@ -5080,7 +5098,7 @@ static struct pci_device_id serial_pci_tbl[] = {
5080 0, 5098 0,
5081 0, pbn_exar_XR17C158 }, 5099 0, pbn_exar_XR17C158 },
5082 /* 5100 /*
5083 * Exar Corp. XR17V35[248] Dual/Quad/Octal PCIe UARTs 5101 * Exar Corp. XR17V[48]35[248] Dual/Quad/Octal/Hexa PCIe UARTs
5084 */ 5102 */
5085 { PCI_VENDOR_ID_EXAR, PCI_DEVICE_ID_EXAR_XR17V352, 5103 { PCI_VENDOR_ID_EXAR, PCI_DEVICE_ID_EXAR_XR17V352,
5086 PCI_ANY_ID, PCI_ANY_ID, 5104 PCI_ANY_ID, PCI_ANY_ID,
@@ -5094,7 +5112,10 @@ static struct pci_device_id serial_pci_tbl[] = {
5094 PCI_ANY_ID, PCI_ANY_ID, 5112 PCI_ANY_ID, PCI_ANY_ID,
5095 0, 5113 0,
5096 0, pbn_exar_XR17V358 }, 5114 0, pbn_exar_XR17V358 },
5097 5115 { PCI_VENDOR_ID_EXAR, PCI_DEVICE_ID_EXAR_XR17V8358,
5116 PCI_ANY_ID, PCI_ANY_ID,
5117 0,
5118 0, pbn_exar_XR17V8358 },
5098 /* 5119 /*
5099 * Topic TP560 Data/Fax/Voice 56k modem (reported by Evan Clarke) 5120 * Topic TP560 Data/Fax/Voice 56k modem (reported by Evan Clarke)
5100 */ 5121 */
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index d58fe4763d9e..27dade29646b 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -880,6 +880,7 @@ static int atmel_prepare_tx_dma(struct uart_port *port)
880 config.direction = DMA_MEM_TO_DEV; 880 config.direction = DMA_MEM_TO_DEV;
881 config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; 881 config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
882 config.dst_addr = port->mapbase + ATMEL_US_THR; 882 config.dst_addr = port->mapbase + ATMEL_US_THR;
883 config.dst_maxburst = 1;
883 884
884 ret = dmaengine_slave_config(atmel_port->chan_tx, 885 ret = dmaengine_slave_config(atmel_port->chan_tx,
885 &config); 886 &config);
@@ -1059,6 +1060,7 @@ static int atmel_prepare_rx_dma(struct uart_port *port)
1059 config.direction = DMA_DEV_TO_MEM; 1060 config.direction = DMA_DEV_TO_MEM;
1060 config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; 1061 config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
1061 config.src_addr = port->mapbase + ATMEL_US_RHR; 1062 config.src_addr = port->mapbase + ATMEL_US_RHR;
1063 config.src_maxburst = 1;
1062 1064
1063 ret = dmaengine_slave_config(atmel_port->chan_rx, 1065 ret = dmaengine_slave_config(atmel_port->chan_rx,
1064 &config); 1066 &config);
diff --git a/drivers/tty/serial/of_serial.c b/drivers/tty/serial/of_serial.c
index 5b73afb9f9f3..137381e649e5 100644
--- a/drivers/tty/serial/of_serial.c
+++ b/drivers/tty/serial/of_serial.c
@@ -346,7 +346,6 @@ static const struct of_device_id of_platform_serial_table[] = {
346 { .compatible = "ibm,qpace-nwp-serial", 346 { .compatible = "ibm,qpace-nwp-serial",
347 .data = (void *)PORT_NWPSERIAL, }, 347 .data = (void *)PORT_NWPSERIAL, },
348#endif 348#endif
349 { .type = "serial", .data = (void *)PORT_UNKNOWN, },
350 { /* end of list */ }, 349 { /* end of list */ },
351}; 350};
352 351
diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
index cf08876922f1..a0ae942d9562 100644
--- a/drivers/tty/serial/samsung.c
+++ b/drivers/tty/serial/samsung.c
@@ -1068,8 +1068,9 @@ static int s3c64xx_serial_startup(struct uart_port *port)
1068 spin_lock_irqsave(&port->lock, flags); 1068 spin_lock_irqsave(&port->lock, flags);
1069 1069
1070 ufcon = rd_regl(port, S3C2410_UFCON); 1070 ufcon = rd_regl(port, S3C2410_UFCON);
1071 ufcon |= S3C2410_UFCON_RESETRX | S3C2410_UFCON_RESETTX | 1071 ufcon |= S3C2410_UFCON_RESETRX | S5PV210_UFCON_RXTRIG8;
1072 S5PV210_UFCON_RXTRIG8; 1072 if (!uart_console(port))
1073 ufcon |= S3C2410_UFCON_RESETTX;
1073 wr_regl(port, S3C2410_UFCON, ufcon); 1074 wr_regl(port, S3C2410_UFCON, ufcon);
1074 1075
1075 enable_rx_pio(ourport); 1076 enable_rx_pio(ourport);
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index eb5b03be9dfd..0b7bb12dfc68 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -1770,7 +1770,7 @@ static const struct file_operations uart_proc_fops = {
1770 * @port: the port to write the message 1770 * @port: the port to write the message
1771 * @s: array of characters 1771 * @s: array of characters
1772 * @count: number of characters in string to write 1772 * @count: number of characters in string to write
1773 * @write: function to write character to port 1773 * @putchar: function to write character to port
1774 */ 1774 */
1775void uart_console_write(struct uart_port *port, const char *s, 1775void uart_console_write(struct uart_port *port, const char *s,
1776 unsigned int count, 1776 unsigned int count,
diff --git a/drivers/tty/serial/uartlite.c b/drivers/tty/serial/uartlite.c
index 708eead850b0..b1c6bd3d483f 100644
--- a/drivers/tty/serial/uartlite.c
+++ b/drivers/tty/serial/uartlite.c
@@ -632,7 +632,8 @@ MODULE_DEVICE_TABLE(of, ulite_of_match);
632 632
633static int ulite_probe(struct platform_device *pdev) 633static int ulite_probe(struct platform_device *pdev)
634{ 634{
635 struct resource *res, *res2; 635 struct resource *res;
636 int irq;
636 int id = pdev->id; 637 int id = pdev->id;
637#ifdef CONFIG_OF 638#ifdef CONFIG_OF
638 const __be32 *prop; 639 const __be32 *prop;
@@ -646,11 +647,11 @@ static int ulite_probe(struct platform_device *pdev)
646 if (!res) 647 if (!res)
647 return -ENODEV; 648 return -ENODEV;
648 649
649 res2 = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 650 irq = platform_get_irq(pdev, 0);
650 if (!res2) 651 if (irq <= 0)
651 return -ENODEV; 652 return -ENXIO;
652 653
653 return ulite_assign(&pdev->dev, id, res->start, res2->start); 654 return ulite_assign(&pdev->dev, id, res->start, irq);
654} 655}
655 656
656static int ulite_remove(struct platform_device *pdev) 657static int ulite_remove(struct platform_device *pdev)
diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
index f218ec658f5d..3ddbac767db3 100644
--- a/drivers/tty/serial/xilinx_uartps.c
+++ b/drivers/tty/serial/xilinx_uartps.c
@@ -1331,9 +1331,9 @@ static SIMPLE_DEV_PM_OPS(cdns_uart_dev_pm_ops, cdns_uart_suspend,
1331 */ 1331 */
1332static int cdns_uart_probe(struct platform_device *pdev) 1332static int cdns_uart_probe(struct platform_device *pdev)
1333{ 1333{
1334 int rc, id; 1334 int rc, id, irq;
1335 struct uart_port *port; 1335 struct uart_port *port;
1336 struct resource *res, *res2; 1336 struct resource *res;
1337 struct cdns_uart *cdns_uart_data; 1337 struct cdns_uart *cdns_uart_data;
1338 1338
1339 cdns_uart_data = devm_kzalloc(&pdev->dev, sizeof(*cdns_uart_data), 1339 cdns_uart_data = devm_kzalloc(&pdev->dev, sizeof(*cdns_uart_data),
@@ -1380,9 +1380,9 @@ static int cdns_uart_probe(struct platform_device *pdev)
1380 goto err_out_clk_disable; 1380 goto err_out_clk_disable;
1381 } 1381 }
1382 1382
1383 res2 = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 1383 irq = platform_get_irq(pdev, 0);
1384 if (!res2) { 1384 if (irq <= 0) {
1385 rc = -ENODEV; 1385 rc = -ENXIO;
1386 goto err_out_clk_disable; 1386 goto err_out_clk_disable;
1387 } 1387 }
1388 1388
@@ -1411,7 +1411,7 @@ static int cdns_uart_probe(struct platform_device *pdev)
1411 * and triggers invocation of the config_port() entry point. 1411 * and triggers invocation of the config_port() entry point.
1412 */ 1412 */
1413 port->mapbase = res->start; 1413 port->mapbase = res->start;
1414 port->irq = res2->start; 1414 port->irq = irq;
1415 port->dev = &pdev->dev; 1415 port->dev = &pdev->dev;
1416 port->uartclk = clk_get_rate(cdns_uart_data->uartclk); 1416 port->uartclk = clk_get_rate(cdns_uart_data->uartclk);
1417 port->private_data = cdns_uart_data; 1417 port->private_data = cdns_uart_data;
diff --git a/drivers/tty/tty_ioctl.c b/drivers/tty/tty_ioctl.c
index 632fc8152061..8e53fe469664 100644
--- a/drivers/tty/tty_ioctl.c
+++ b/drivers/tty/tty_ioctl.c
@@ -536,7 +536,7 @@ EXPORT_SYMBOL(tty_termios_hw_change);
536 * Locking: termios_rwsem 536 * Locking: termios_rwsem
537 */ 537 */
538 538
539static int tty_set_termios(struct tty_struct *tty, struct ktermios *new_termios) 539int tty_set_termios(struct tty_struct *tty, struct ktermios *new_termios)
540{ 540{
541 struct ktermios old_termios; 541 struct ktermios old_termios;
542 struct tty_ldisc *ld; 542 struct tty_ldisc *ld;
@@ -569,6 +569,7 @@ static int tty_set_termios(struct tty_struct *tty, struct ktermios *new_termios)
569 up_write(&tty->termios_rwsem); 569 up_write(&tty->termios_rwsem);
570 return 0; 570 return 0;
571} 571}
572EXPORT_SYMBOL_GPL(tty_set_termios);
572 573
573/** 574/**
574 * set_termios - set termios values for a tty 575 * set_termios - set termios values for a tty
diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c
index edba5348be18..6b486a36863c 100644
--- a/drivers/usb/dwc3/dwc3-omap.c
+++ b/drivers/usb/dwc3/dwc3-omap.c
@@ -65,8 +65,8 @@
65#define USBOTGSS_IRQENABLE_SET_MISC 0x003c 65#define USBOTGSS_IRQENABLE_SET_MISC 0x003c
66#define USBOTGSS_IRQENABLE_CLR_MISC 0x0040 66#define USBOTGSS_IRQENABLE_CLR_MISC 0x0040
67#define USBOTGSS_IRQMISC_OFFSET 0x03fc 67#define USBOTGSS_IRQMISC_OFFSET 0x03fc
68#define USBOTGSS_UTMI_OTG_CTRL 0x0080 68#define USBOTGSS_UTMI_OTG_STATUS 0x0080
69#define USBOTGSS_UTMI_OTG_STATUS 0x0084 69#define USBOTGSS_UTMI_OTG_CTRL 0x0084
70#define USBOTGSS_UTMI_OTG_OFFSET 0x0480 70#define USBOTGSS_UTMI_OTG_OFFSET 0x0480
71#define USBOTGSS_TXFIFO_DEPTH 0x0508 71#define USBOTGSS_TXFIFO_DEPTH 0x0508
72#define USBOTGSS_RXFIFO_DEPTH 0x050c 72#define USBOTGSS_RXFIFO_DEPTH 0x050c
@@ -98,20 +98,20 @@
98#define USBOTGSS_IRQMISC_DISCHRGVBUS_FALL (1 << 3) 98#define USBOTGSS_IRQMISC_DISCHRGVBUS_FALL (1 << 3)
99#define USBOTGSS_IRQMISC_IDPULLUP_FALL (1 << 0) 99#define USBOTGSS_IRQMISC_IDPULLUP_FALL (1 << 0)
100 100
101/* UTMI_OTG_CTRL REGISTER */
102#define USBOTGSS_UTMI_OTG_CTRL_DRVVBUS (1 << 5)
103#define USBOTGSS_UTMI_OTG_CTRL_CHRGVBUS (1 << 4)
104#define USBOTGSS_UTMI_OTG_CTRL_DISCHRGVBUS (1 << 3)
105#define USBOTGSS_UTMI_OTG_CTRL_IDPULLUP (1 << 0)
106
107/* UTMI_OTG_STATUS REGISTER */ 101/* UTMI_OTG_STATUS REGISTER */
108#define USBOTGSS_UTMI_OTG_STATUS_SW_MODE (1 << 31) 102#define USBOTGSS_UTMI_OTG_STATUS_DRVVBUS (1 << 5)
109#define USBOTGSS_UTMI_OTG_STATUS_POWERPRESENT (1 << 9) 103#define USBOTGSS_UTMI_OTG_STATUS_CHRGVBUS (1 << 4)
110#define USBOTGSS_UTMI_OTG_STATUS_TXBITSTUFFENABLE (1 << 8) 104#define USBOTGSS_UTMI_OTG_STATUS_DISCHRGVBUS (1 << 3)
111#define USBOTGSS_UTMI_OTG_STATUS_IDDIG (1 << 4) 105#define USBOTGSS_UTMI_OTG_STATUS_IDPULLUP (1 << 0)
112#define USBOTGSS_UTMI_OTG_STATUS_SESSEND (1 << 3) 106
113#define USBOTGSS_UTMI_OTG_STATUS_SESSVALID (1 << 2) 107/* UTMI_OTG_CTRL REGISTER */
114#define USBOTGSS_UTMI_OTG_STATUS_VBUSVALID (1 << 1) 108#define USBOTGSS_UTMI_OTG_CTRL_SW_MODE (1 << 31)
109#define USBOTGSS_UTMI_OTG_CTRL_POWERPRESENT (1 << 9)
110#define USBOTGSS_UTMI_OTG_CTRL_TXBITSTUFFENABLE (1 << 8)
111#define USBOTGSS_UTMI_OTG_CTRL_IDDIG (1 << 4)
112#define USBOTGSS_UTMI_OTG_CTRL_SESSEND (1 << 3)
113#define USBOTGSS_UTMI_OTG_CTRL_SESSVALID (1 << 2)
114#define USBOTGSS_UTMI_OTG_CTRL_VBUSVALID (1 << 1)
115 115
116struct dwc3_omap { 116struct dwc3_omap {
117 struct device *dev; 117 struct device *dev;
@@ -119,7 +119,7 @@ struct dwc3_omap {
119 int irq; 119 int irq;
120 void __iomem *base; 120 void __iomem *base;
121 121
122 u32 utmi_otg_status; 122 u32 utmi_otg_ctrl;
123 u32 utmi_otg_offset; 123 u32 utmi_otg_offset;
124 u32 irqmisc_offset; 124 u32 irqmisc_offset;
125 u32 irq_eoi_offset; 125 u32 irq_eoi_offset;
@@ -153,15 +153,15 @@ static inline void dwc3_omap_writel(void __iomem *base, u32 offset, u32 value)
153 writel(value, base + offset); 153 writel(value, base + offset);
154} 154}
155 155
156static u32 dwc3_omap_read_utmi_status(struct dwc3_omap *omap) 156static u32 dwc3_omap_read_utmi_ctrl(struct dwc3_omap *omap)
157{ 157{
158 return dwc3_omap_readl(omap->base, USBOTGSS_UTMI_OTG_STATUS + 158 return dwc3_omap_readl(omap->base, USBOTGSS_UTMI_OTG_CTRL +
159 omap->utmi_otg_offset); 159 omap->utmi_otg_offset);
160} 160}
161 161
162static void dwc3_omap_write_utmi_status(struct dwc3_omap *omap, u32 value) 162static void dwc3_omap_write_utmi_ctrl(struct dwc3_omap *omap, u32 value)
163{ 163{
164 dwc3_omap_writel(omap->base, USBOTGSS_UTMI_OTG_STATUS + 164 dwc3_omap_writel(omap->base, USBOTGSS_UTMI_OTG_CTRL +
165 omap->utmi_otg_offset, value); 165 omap->utmi_otg_offset, value);
166 166
167} 167}
@@ -235,25 +235,25 @@ static void dwc3_omap_set_mailbox(struct dwc3_omap *omap,
235 } 235 }
236 } 236 }
237 237
238 val = dwc3_omap_read_utmi_status(omap); 238 val = dwc3_omap_read_utmi_ctrl(omap);
239 val &= ~(USBOTGSS_UTMI_OTG_STATUS_IDDIG 239 val &= ~(USBOTGSS_UTMI_OTG_CTRL_IDDIG
240 | USBOTGSS_UTMI_OTG_STATUS_VBUSVALID 240 | USBOTGSS_UTMI_OTG_CTRL_VBUSVALID
241 | USBOTGSS_UTMI_OTG_STATUS_SESSEND); 241 | USBOTGSS_UTMI_OTG_CTRL_SESSEND);
242 val |= USBOTGSS_UTMI_OTG_STATUS_SESSVALID 242 val |= USBOTGSS_UTMI_OTG_CTRL_SESSVALID
243 | USBOTGSS_UTMI_OTG_STATUS_POWERPRESENT; 243 | USBOTGSS_UTMI_OTG_CTRL_POWERPRESENT;
244 dwc3_omap_write_utmi_status(omap, val); 244 dwc3_omap_write_utmi_ctrl(omap, val);
245 break; 245 break;
246 246
247 case OMAP_DWC3_VBUS_VALID: 247 case OMAP_DWC3_VBUS_VALID:
248 dev_dbg(omap->dev, "VBUS Connect\n"); 248 dev_dbg(omap->dev, "VBUS Connect\n");
249 249
250 val = dwc3_omap_read_utmi_status(omap); 250 val = dwc3_omap_read_utmi_ctrl(omap);
251 val &= ~USBOTGSS_UTMI_OTG_STATUS_SESSEND; 251 val &= ~USBOTGSS_UTMI_OTG_CTRL_SESSEND;
252 val |= USBOTGSS_UTMI_OTG_STATUS_IDDIG 252 val |= USBOTGSS_UTMI_OTG_CTRL_IDDIG
253 | USBOTGSS_UTMI_OTG_STATUS_VBUSVALID 253 | USBOTGSS_UTMI_OTG_CTRL_VBUSVALID
254 | USBOTGSS_UTMI_OTG_STATUS_SESSVALID 254 | USBOTGSS_UTMI_OTG_CTRL_SESSVALID
255 | USBOTGSS_UTMI_OTG_STATUS_POWERPRESENT; 255 | USBOTGSS_UTMI_OTG_CTRL_POWERPRESENT;
256 dwc3_omap_write_utmi_status(omap, val); 256 dwc3_omap_write_utmi_ctrl(omap, val);
257 break; 257 break;
258 258
259 case OMAP_DWC3_ID_FLOAT: 259 case OMAP_DWC3_ID_FLOAT:
@@ -263,13 +263,13 @@ static void dwc3_omap_set_mailbox(struct dwc3_omap *omap,
263 case OMAP_DWC3_VBUS_OFF: 263 case OMAP_DWC3_VBUS_OFF:
264 dev_dbg(omap->dev, "VBUS Disconnect\n"); 264 dev_dbg(omap->dev, "VBUS Disconnect\n");
265 265
266 val = dwc3_omap_read_utmi_status(omap); 266 val = dwc3_omap_read_utmi_ctrl(omap);
267 val &= ~(USBOTGSS_UTMI_OTG_STATUS_SESSVALID 267 val &= ~(USBOTGSS_UTMI_OTG_CTRL_SESSVALID
268 | USBOTGSS_UTMI_OTG_STATUS_VBUSVALID 268 | USBOTGSS_UTMI_OTG_CTRL_VBUSVALID
269 | USBOTGSS_UTMI_OTG_STATUS_POWERPRESENT); 269 | USBOTGSS_UTMI_OTG_CTRL_POWERPRESENT);
270 val |= USBOTGSS_UTMI_OTG_STATUS_SESSEND 270 val |= USBOTGSS_UTMI_OTG_CTRL_SESSEND
271 | USBOTGSS_UTMI_OTG_STATUS_IDDIG; 271 | USBOTGSS_UTMI_OTG_CTRL_IDDIG;
272 dwc3_omap_write_utmi_status(omap, val); 272 dwc3_omap_write_utmi_ctrl(omap, val);
273 break; 273 break;
274 274
275 default: 275 default:
@@ -422,22 +422,22 @@ static void dwc3_omap_set_utmi_mode(struct dwc3_omap *omap)
422 struct device_node *node = omap->dev->of_node; 422 struct device_node *node = omap->dev->of_node;
423 int utmi_mode = 0; 423 int utmi_mode = 0;
424 424
425 reg = dwc3_omap_read_utmi_status(omap); 425 reg = dwc3_omap_read_utmi_ctrl(omap);
426 426
427 of_property_read_u32(node, "utmi-mode", &utmi_mode); 427 of_property_read_u32(node, "utmi-mode", &utmi_mode);
428 428
429 switch (utmi_mode) { 429 switch (utmi_mode) {
430 case DWC3_OMAP_UTMI_MODE_SW: 430 case DWC3_OMAP_UTMI_MODE_SW:
431 reg |= USBOTGSS_UTMI_OTG_STATUS_SW_MODE; 431 reg |= USBOTGSS_UTMI_OTG_CTRL_SW_MODE;
432 break; 432 break;
433 case DWC3_OMAP_UTMI_MODE_HW: 433 case DWC3_OMAP_UTMI_MODE_HW:
434 reg &= ~USBOTGSS_UTMI_OTG_STATUS_SW_MODE; 434 reg &= ~USBOTGSS_UTMI_OTG_CTRL_SW_MODE;
435 break; 435 break;
436 default: 436 default:
437 dev_dbg(omap->dev, "UNKNOWN utmi mode %d\n", utmi_mode); 437 dev_dbg(omap->dev, "UNKNOWN utmi mode %d\n", utmi_mode);
438 } 438 }
439 439
440 dwc3_omap_write_utmi_status(omap, reg); 440 dwc3_omap_write_utmi_ctrl(omap, reg);
441} 441}
442 442
443static int dwc3_omap_extcon_register(struct dwc3_omap *omap) 443static int dwc3_omap_extcon_register(struct dwc3_omap *omap)
@@ -614,7 +614,7 @@ static int dwc3_omap_suspend(struct device *dev)
614{ 614{
615 struct dwc3_omap *omap = dev_get_drvdata(dev); 615 struct dwc3_omap *omap = dev_get_drvdata(dev);
616 616
617 omap->utmi_otg_status = dwc3_omap_read_utmi_status(omap); 617 omap->utmi_otg_ctrl = dwc3_omap_read_utmi_ctrl(omap);
618 dwc3_omap_disable_irqs(omap); 618 dwc3_omap_disable_irqs(omap);
619 619
620 return 0; 620 return 0;
@@ -624,7 +624,7 @@ static int dwc3_omap_resume(struct device *dev)
624{ 624{
625 struct dwc3_omap *omap = dev_get_drvdata(dev); 625 struct dwc3_omap *omap = dev_get_drvdata(dev);
626 626
627 dwc3_omap_write_utmi_status(omap, omap->utmi_otg_status); 627 dwc3_omap_write_utmi_ctrl(omap, omap->utmi_otg_ctrl);
628 dwc3_omap_enable_irqs(omap); 628 dwc3_omap_enable_irqs(omap);
629 629
630 pm_runtime_disable(dev); 630 pm_runtime_disable(dev);
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
index c42765b3a060..0495c94a23d7 100644
--- a/drivers/usb/gadget/configfs.c
+++ b/drivers/usb/gadget/configfs.c
@@ -1295,6 +1295,7 @@ static void purge_configs_funcs(struct gadget_info *gi)
1295 } 1295 }
1296 } 1296 }
1297 c->next_interface_id = 0; 1297 c->next_interface_id = 0;
1298 memset(c->interface, 0, sizeof(c->interface));
1298 c->superspeed = 0; 1299 c->superspeed = 0;
1299 c->highspeed = 0; 1300 c->highspeed = 0;
1300 c->fullspeed = 0; 1301 c->fullspeed = 0;
diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c
index 13dfc9915b1d..f7f35a36c09a 100644
--- a/drivers/usb/gadget/function/f_hid.c
+++ b/drivers/usb/gadget/function/f_hid.c
@@ -437,12 +437,20 @@ static int hidg_setup(struct usb_function *f,
437 | USB_REQ_GET_DESCRIPTOR): 437 | USB_REQ_GET_DESCRIPTOR):
438 switch (value >> 8) { 438 switch (value >> 8) {
439 case HID_DT_HID: 439 case HID_DT_HID:
440 {
441 struct hid_descriptor hidg_desc_copy = hidg_desc;
442
440 VDBG(cdev, "USB_REQ_GET_DESCRIPTOR: HID\n"); 443 VDBG(cdev, "USB_REQ_GET_DESCRIPTOR: HID\n");
444 hidg_desc_copy.desc[0].bDescriptorType = HID_DT_REPORT;
445 hidg_desc_copy.desc[0].wDescriptorLength =
446 cpu_to_le16(hidg->report_desc_length);
447
441 length = min_t(unsigned short, length, 448 length = min_t(unsigned short, length,
442 hidg_desc.bLength); 449 hidg_desc_copy.bLength);
443 memcpy(req->buf, &hidg_desc, length); 450 memcpy(req->buf, &hidg_desc_copy, length);
444 goto respond; 451 goto respond;
445 break; 452 break;
453 }
446 case HID_DT_REPORT: 454 case HID_DT_REPORT:
447 VDBG(cdev, "USB_REQ_GET_DESCRIPTOR: REPORT\n"); 455 VDBG(cdev, "USB_REQ_GET_DESCRIPTOR: REPORT\n");
448 length = min_t(unsigned short, length, 456 length = min_t(unsigned short, length,
@@ -632,6 +640,10 @@ static int hidg_bind(struct usb_configuration *c, struct usb_function *f)
632 hidg_fs_in_ep_desc.wMaxPacketSize = cpu_to_le16(hidg->report_length); 640 hidg_fs_in_ep_desc.wMaxPacketSize = cpu_to_le16(hidg->report_length);
633 hidg_hs_out_ep_desc.wMaxPacketSize = cpu_to_le16(hidg->report_length); 641 hidg_hs_out_ep_desc.wMaxPacketSize = cpu_to_le16(hidg->report_length);
634 hidg_fs_out_ep_desc.wMaxPacketSize = cpu_to_le16(hidg->report_length); 642 hidg_fs_out_ep_desc.wMaxPacketSize = cpu_to_le16(hidg->report_length);
643 /*
644 * We can use hidg_desc struct here but we should not relay
645 * that its content won't change after returning from this function.
646 */
635 hidg_desc.desc[0].bDescriptorType = HID_DT_REPORT; 647 hidg_desc.desc[0].bDescriptorType = HID_DT_REPORT;
636 hidg_desc.desc[0].wDescriptorLength = 648 hidg_desc.desc[0].wDescriptorLength =
637 cpu_to_le16(hidg->report_desc_length); 649 cpu_to_le16(hidg->report_desc_length);
diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c
index 89179ab20c10..7ee057930ae7 100644
--- a/drivers/usb/gadget/function/u_serial.c
+++ b/drivers/usb/gadget/function/u_serial.c
@@ -113,6 +113,7 @@ struct gs_port {
113 int write_allocated; 113 int write_allocated;
114 struct gs_buf port_write_buf; 114 struct gs_buf port_write_buf;
115 wait_queue_head_t drain_wait; /* wait while writes drain */ 115 wait_queue_head_t drain_wait; /* wait while writes drain */
116 bool write_busy;
116 117
117 /* REVISIT this state ... */ 118 /* REVISIT this state ... */
118 struct usb_cdc_line_coding port_line_coding; /* 8-N-1 etc */ 119 struct usb_cdc_line_coding port_line_coding; /* 8-N-1 etc */
@@ -363,7 +364,7 @@ __acquires(&port->port_lock)
363 int status = 0; 364 int status = 0;
364 bool do_tty_wake = false; 365 bool do_tty_wake = false;
365 366
366 while (!list_empty(pool)) { 367 while (!port->write_busy && !list_empty(pool)) {
367 struct usb_request *req; 368 struct usb_request *req;
368 int len; 369 int len;
369 370
@@ -393,9 +394,11 @@ __acquires(&port->port_lock)
393 * NOTE that we may keep sending data for a while after 394 * NOTE that we may keep sending data for a while after
394 * the TTY closed (dev->ioport->port_tty is NULL). 395 * the TTY closed (dev->ioport->port_tty is NULL).
395 */ 396 */
397 port->write_busy = true;
396 spin_unlock(&port->port_lock); 398 spin_unlock(&port->port_lock);
397 status = usb_ep_queue(in, req, GFP_ATOMIC); 399 status = usb_ep_queue(in, req, GFP_ATOMIC);
398 spin_lock(&port->port_lock); 400 spin_lock(&port->port_lock);
401 port->write_busy = false;
399 402
400 if (status) { 403 if (status) {
401 pr_debug("%s: %s %s err %d\n", 404 pr_debug("%s: %s %s err %d\n",
diff --git a/drivers/usb/gadget/legacy/acm_ms.c b/drivers/usb/gadget/legacy/acm_ms.c
index c30b7b572465..1194b09ae746 100644
--- a/drivers/usb/gadget/legacy/acm_ms.c
+++ b/drivers/usb/gadget/legacy/acm_ms.c
@@ -121,7 +121,7 @@ static struct usb_function *f_msg;
121/* 121/*
122 * We _always_ have both ACM and mass storage functions. 122 * We _always_ have both ACM and mass storage functions.
123 */ 123 */
124static int __init acm_ms_do_config(struct usb_configuration *c) 124static int acm_ms_do_config(struct usb_configuration *c)
125{ 125{
126 struct fsg_opts *opts; 126 struct fsg_opts *opts;
127 int status; 127 int status;
@@ -174,7 +174,7 @@ static struct usb_configuration acm_ms_config_driver = {
174 174
175/*-------------------------------------------------------------------------*/ 175/*-------------------------------------------------------------------------*/
176 176
177static int __init acm_ms_bind(struct usb_composite_dev *cdev) 177static int acm_ms_bind(struct usb_composite_dev *cdev)
178{ 178{
179 struct usb_gadget *gadget = cdev->gadget; 179 struct usb_gadget *gadget = cdev->gadget;
180 struct fsg_opts *opts; 180 struct fsg_opts *opts;
@@ -249,7 +249,7 @@ fail_get_msg:
249 return status; 249 return status;
250} 250}
251 251
252static int __exit acm_ms_unbind(struct usb_composite_dev *cdev) 252static int acm_ms_unbind(struct usb_composite_dev *cdev)
253{ 253{
254 usb_put_function(f_msg); 254 usb_put_function(f_msg);
255 usb_put_function_instance(fi_msg); 255 usb_put_function_instance(fi_msg);
@@ -258,13 +258,13 @@ static int __exit acm_ms_unbind(struct usb_composite_dev *cdev)
258 return 0; 258 return 0;
259} 259}
260 260
261static __refdata struct usb_composite_driver acm_ms_driver = { 261static struct usb_composite_driver acm_ms_driver = {
262 .name = "g_acm_ms", 262 .name = "g_acm_ms",
263 .dev = &device_desc, 263 .dev = &device_desc,
264 .max_speed = USB_SPEED_SUPER, 264 .max_speed = USB_SPEED_SUPER,
265 .strings = dev_strings, 265 .strings = dev_strings,
266 .bind = acm_ms_bind, 266 .bind = acm_ms_bind,
267 .unbind = __exit_p(acm_ms_unbind), 267 .unbind = acm_ms_unbind,
268}; 268};
269 269
270module_usb_composite_driver(acm_ms_driver); 270module_usb_composite_driver(acm_ms_driver);
diff --git a/drivers/usb/gadget/legacy/audio.c b/drivers/usb/gadget/legacy/audio.c
index f46a3956e43d..f289caf18a45 100644
--- a/drivers/usb/gadget/legacy/audio.c
+++ b/drivers/usb/gadget/legacy/audio.c
@@ -167,7 +167,7 @@ static const struct usb_descriptor_header *otg_desc[] = {
167 167
168/*-------------------------------------------------------------------------*/ 168/*-------------------------------------------------------------------------*/
169 169
170static int __init audio_do_config(struct usb_configuration *c) 170static int audio_do_config(struct usb_configuration *c)
171{ 171{
172 int status; 172 int status;
173 173
@@ -216,7 +216,7 @@ static struct usb_configuration audio_config_driver = {
216 216
217/*-------------------------------------------------------------------------*/ 217/*-------------------------------------------------------------------------*/
218 218
219static int __init audio_bind(struct usb_composite_dev *cdev) 219static int audio_bind(struct usb_composite_dev *cdev)
220{ 220{
221#ifndef CONFIG_GADGET_UAC1 221#ifndef CONFIG_GADGET_UAC1
222 struct f_uac2_opts *uac2_opts; 222 struct f_uac2_opts *uac2_opts;
@@ -276,7 +276,7 @@ fail:
276 return status; 276 return status;
277} 277}
278 278
279static int __exit audio_unbind(struct usb_composite_dev *cdev) 279static int audio_unbind(struct usb_composite_dev *cdev)
280{ 280{
281#ifdef CONFIG_GADGET_UAC1 281#ifdef CONFIG_GADGET_UAC1
282 if (!IS_ERR_OR_NULL(f_uac1)) 282 if (!IS_ERR_OR_NULL(f_uac1))
@@ -292,13 +292,13 @@ static int __exit audio_unbind(struct usb_composite_dev *cdev)
292 return 0; 292 return 0;
293} 293}
294 294
295static __refdata struct usb_composite_driver audio_driver = { 295static struct usb_composite_driver audio_driver = {
296 .name = "g_audio", 296 .name = "g_audio",
297 .dev = &device_desc, 297 .dev = &device_desc,
298 .strings = audio_strings, 298 .strings = audio_strings,
299 .max_speed = USB_SPEED_HIGH, 299 .max_speed = USB_SPEED_HIGH,
300 .bind = audio_bind, 300 .bind = audio_bind,
301 .unbind = __exit_p(audio_unbind), 301 .unbind = audio_unbind,
302}; 302};
303 303
304module_usb_composite_driver(audio_driver); 304module_usb_composite_driver(audio_driver);
diff --git a/drivers/usb/gadget/legacy/cdc2.c b/drivers/usb/gadget/legacy/cdc2.c
index 2e85d9473478..afd3e37921a7 100644
--- a/drivers/usb/gadget/legacy/cdc2.c
+++ b/drivers/usb/gadget/legacy/cdc2.c
@@ -104,7 +104,7 @@ static struct usb_function_instance *fi_ecm;
104/* 104/*
105 * We _always_ have both CDC ECM and CDC ACM functions. 105 * We _always_ have both CDC ECM and CDC ACM functions.
106 */ 106 */
107static int __init cdc_do_config(struct usb_configuration *c) 107static int cdc_do_config(struct usb_configuration *c)
108{ 108{
109 int status; 109 int status;
110 110
@@ -153,7 +153,7 @@ static struct usb_configuration cdc_config_driver = {
153 153
154/*-------------------------------------------------------------------------*/ 154/*-------------------------------------------------------------------------*/
155 155
156static int __init cdc_bind(struct usb_composite_dev *cdev) 156static int cdc_bind(struct usb_composite_dev *cdev)
157{ 157{
158 struct usb_gadget *gadget = cdev->gadget; 158 struct usb_gadget *gadget = cdev->gadget;
159 struct f_ecm_opts *ecm_opts; 159 struct f_ecm_opts *ecm_opts;
@@ -211,7 +211,7 @@ fail:
211 return status; 211 return status;
212} 212}
213 213
214static int __exit cdc_unbind(struct usb_composite_dev *cdev) 214static int cdc_unbind(struct usb_composite_dev *cdev)
215{ 215{
216 usb_put_function(f_acm); 216 usb_put_function(f_acm);
217 usb_put_function_instance(fi_serial); 217 usb_put_function_instance(fi_serial);
@@ -222,13 +222,13 @@ static int __exit cdc_unbind(struct usb_composite_dev *cdev)
222 return 0; 222 return 0;
223} 223}
224 224
225static __refdata struct usb_composite_driver cdc_driver = { 225static struct usb_composite_driver cdc_driver = {
226 .name = "g_cdc", 226 .name = "g_cdc",
227 .dev = &device_desc, 227 .dev = &device_desc,
228 .strings = dev_strings, 228 .strings = dev_strings,
229 .max_speed = USB_SPEED_HIGH, 229 .max_speed = USB_SPEED_HIGH,
230 .bind = cdc_bind, 230 .bind = cdc_bind,
231 .unbind = __exit_p(cdc_unbind), 231 .unbind = cdc_unbind,
232}; 232};
233 233
234module_usb_composite_driver(cdc_driver); 234module_usb_composite_driver(cdc_driver);
diff --git a/drivers/usb/gadget/legacy/dbgp.c b/drivers/usb/gadget/legacy/dbgp.c
index 633683a72a11..204b10b1a7e7 100644
--- a/drivers/usb/gadget/legacy/dbgp.c
+++ b/drivers/usb/gadget/legacy/dbgp.c
@@ -284,7 +284,7 @@ fail_1:
284 return -ENODEV; 284 return -ENODEV;
285} 285}
286 286
287static int __init dbgp_bind(struct usb_gadget *gadget, 287static int dbgp_bind(struct usb_gadget *gadget,
288 struct usb_gadget_driver *driver) 288 struct usb_gadget_driver *driver)
289{ 289{
290 int err, stp; 290 int err, stp;
@@ -406,7 +406,7 @@ fail:
406 return err; 406 return err;
407} 407}
408 408
409static __refdata struct usb_gadget_driver dbgp_driver = { 409static struct usb_gadget_driver dbgp_driver = {
410 .function = "dbgp", 410 .function = "dbgp",
411 .max_speed = USB_SPEED_HIGH, 411 .max_speed = USB_SPEED_HIGH,
412 .bind = dbgp_bind, 412 .bind = dbgp_bind,
diff --git a/drivers/usb/gadget/legacy/ether.c b/drivers/usb/gadget/legacy/ether.c
index c5fdc61cdc4a..a3323dca218f 100644
--- a/drivers/usb/gadget/legacy/ether.c
+++ b/drivers/usb/gadget/legacy/ether.c
@@ -222,7 +222,7 @@ static struct usb_function *f_rndis;
222 * the first one present. That's to make Microsoft's drivers happy, 222 * the first one present. That's to make Microsoft's drivers happy,
223 * and to follow DOCSIS 1.0 (cable modem standard). 223 * and to follow DOCSIS 1.0 (cable modem standard).
224 */ 224 */
225static int __init rndis_do_config(struct usb_configuration *c) 225static int rndis_do_config(struct usb_configuration *c)
226{ 226{
227 int status; 227 int status;
228 228
@@ -264,7 +264,7 @@ MODULE_PARM_DESC(use_eem, "use CDC EEM mode");
264/* 264/*
265 * We _always_ have an ECM, CDC Subset, or EEM configuration. 265 * We _always_ have an ECM, CDC Subset, or EEM configuration.
266 */ 266 */
267static int __init eth_do_config(struct usb_configuration *c) 267static int eth_do_config(struct usb_configuration *c)
268{ 268{
269 int status = 0; 269 int status = 0;
270 270
@@ -318,7 +318,7 @@ static struct usb_configuration eth_config_driver = {
318 318
319/*-------------------------------------------------------------------------*/ 319/*-------------------------------------------------------------------------*/
320 320
321static int __init eth_bind(struct usb_composite_dev *cdev) 321static int eth_bind(struct usb_composite_dev *cdev)
322{ 322{
323 struct usb_gadget *gadget = cdev->gadget; 323 struct usb_gadget *gadget = cdev->gadget;
324 struct f_eem_opts *eem_opts = NULL; 324 struct f_eem_opts *eem_opts = NULL;
@@ -447,7 +447,7 @@ fail:
447 return status; 447 return status;
448} 448}
449 449
450static int __exit eth_unbind(struct usb_composite_dev *cdev) 450static int eth_unbind(struct usb_composite_dev *cdev)
451{ 451{
452 if (has_rndis()) { 452 if (has_rndis()) {
453 usb_put_function(f_rndis); 453 usb_put_function(f_rndis);
@@ -466,13 +466,13 @@ static int __exit eth_unbind(struct usb_composite_dev *cdev)
466 return 0; 466 return 0;
467} 467}
468 468
469static __refdata struct usb_composite_driver eth_driver = { 469static struct usb_composite_driver eth_driver = {
470 .name = "g_ether", 470 .name = "g_ether",
471 .dev = &device_desc, 471 .dev = &device_desc,
472 .strings = dev_strings, 472 .strings = dev_strings,
473 .max_speed = USB_SPEED_SUPER, 473 .max_speed = USB_SPEED_SUPER,
474 .bind = eth_bind, 474 .bind = eth_bind,
475 .unbind = __exit_p(eth_unbind), 475 .unbind = eth_unbind,
476}; 476};
477 477
478module_usb_composite_driver(eth_driver); 478module_usb_composite_driver(eth_driver);
diff --git a/drivers/usb/gadget/legacy/g_ffs.c b/drivers/usb/gadget/legacy/g_ffs.c
index b01b88e1b716..7b9ef7e257d2 100644
--- a/drivers/usb/gadget/legacy/g_ffs.c
+++ b/drivers/usb/gadget/legacy/g_ffs.c
@@ -163,7 +163,7 @@ static int gfs_unbind(struct usb_composite_dev *cdev);
163static int gfs_do_config(struct usb_configuration *c); 163static int gfs_do_config(struct usb_configuration *c);
164 164
165 165
166static __refdata struct usb_composite_driver gfs_driver = { 166static struct usb_composite_driver gfs_driver = {
167 .name = DRIVER_NAME, 167 .name = DRIVER_NAME,
168 .dev = &gfs_dev_desc, 168 .dev = &gfs_dev_desc,
169 .strings = gfs_dev_strings, 169 .strings = gfs_dev_strings,
diff --git a/drivers/usb/gadget/legacy/gmidi.c b/drivers/usb/gadget/legacy/gmidi.c
index e02a095294ac..da19c486b61e 100644
--- a/drivers/usb/gadget/legacy/gmidi.c
+++ b/drivers/usb/gadget/legacy/gmidi.c
@@ -118,7 +118,7 @@ static struct usb_gadget_strings *dev_strings[] = {
118static struct usb_function_instance *fi_midi; 118static struct usb_function_instance *fi_midi;
119static struct usb_function *f_midi; 119static struct usb_function *f_midi;
120 120
121static int __exit midi_unbind(struct usb_composite_dev *dev) 121static int midi_unbind(struct usb_composite_dev *dev)
122{ 122{
123 usb_put_function(f_midi); 123 usb_put_function(f_midi);
124 usb_put_function_instance(fi_midi); 124 usb_put_function_instance(fi_midi);
@@ -133,7 +133,7 @@ static struct usb_configuration midi_config = {
133 .MaxPower = CONFIG_USB_GADGET_VBUS_DRAW, 133 .MaxPower = CONFIG_USB_GADGET_VBUS_DRAW,
134}; 134};
135 135
136static int __init midi_bind_config(struct usb_configuration *c) 136static int midi_bind_config(struct usb_configuration *c)
137{ 137{
138 int status; 138 int status;
139 139
@@ -150,7 +150,7 @@ static int __init midi_bind_config(struct usb_configuration *c)
150 return 0; 150 return 0;
151} 151}
152 152
153static int __init midi_bind(struct usb_composite_dev *cdev) 153static int midi_bind(struct usb_composite_dev *cdev)
154{ 154{
155 struct f_midi_opts *midi_opts; 155 struct f_midi_opts *midi_opts;
156 int status; 156 int status;
@@ -185,13 +185,13 @@ put:
185 return status; 185 return status;
186} 186}
187 187
188static __refdata struct usb_composite_driver midi_driver = { 188static struct usb_composite_driver midi_driver = {
189 .name = (char *) longname, 189 .name = (char *) longname,
190 .dev = &device_desc, 190 .dev = &device_desc,
191 .strings = dev_strings, 191 .strings = dev_strings,
192 .max_speed = USB_SPEED_HIGH, 192 .max_speed = USB_SPEED_HIGH,
193 .bind = midi_bind, 193 .bind = midi_bind,
194 .unbind = __exit_p(midi_unbind), 194 .unbind = midi_unbind,
195}; 195};
196 196
197module_usb_composite_driver(midi_driver); 197module_usb_composite_driver(midi_driver);
diff --git a/drivers/usb/gadget/legacy/hid.c b/drivers/usb/gadget/legacy/hid.c
index 614b06d80b41..2baa572686c6 100644
--- a/drivers/usb/gadget/legacy/hid.c
+++ b/drivers/usb/gadget/legacy/hid.c
@@ -106,7 +106,7 @@ static struct usb_gadget_strings *dev_strings[] = {
106 106
107/****************************** Configurations ******************************/ 107/****************************** Configurations ******************************/
108 108
109static int __init do_config(struct usb_configuration *c) 109static int do_config(struct usb_configuration *c)
110{ 110{
111 struct hidg_func_node *e, *n; 111 struct hidg_func_node *e, *n;
112 int status = 0; 112 int status = 0;
@@ -147,7 +147,7 @@ static struct usb_configuration config_driver = {
147 147
148/****************************** Gadget Bind ******************************/ 148/****************************** Gadget Bind ******************************/
149 149
150static int __init hid_bind(struct usb_composite_dev *cdev) 150static int hid_bind(struct usb_composite_dev *cdev)
151{ 151{
152 struct usb_gadget *gadget = cdev->gadget; 152 struct usb_gadget *gadget = cdev->gadget;
153 struct list_head *tmp; 153 struct list_head *tmp;
@@ -205,7 +205,7 @@ put:
205 return status; 205 return status;
206} 206}
207 207
208static int __exit hid_unbind(struct usb_composite_dev *cdev) 208static int hid_unbind(struct usb_composite_dev *cdev)
209{ 209{
210 struct hidg_func_node *n; 210 struct hidg_func_node *n;
211 211
@@ -216,7 +216,7 @@ static int __exit hid_unbind(struct usb_composite_dev *cdev)
216 return 0; 216 return 0;
217} 217}
218 218
219static int __init hidg_plat_driver_probe(struct platform_device *pdev) 219static int hidg_plat_driver_probe(struct platform_device *pdev)
220{ 220{
221 struct hidg_func_descriptor *func = dev_get_platdata(&pdev->dev); 221 struct hidg_func_descriptor *func = dev_get_platdata(&pdev->dev);
222 struct hidg_func_node *entry; 222 struct hidg_func_node *entry;
@@ -252,13 +252,13 @@ static int hidg_plat_driver_remove(struct platform_device *pdev)
252/****************************** Some noise ******************************/ 252/****************************** Some noise ******************************/
253 253
254 254
255static __refdata struct usb_composite_driver hidg_driver = { 255static struct usb_composite_driver hidg_driver = {
256 .name = "g_hid", 256 .name = "g_hid",
257 .dev = &device_desc, 257 .dev = &device_desc,
258 .strings = dev_strings, 258 .strings = dev_strings,
259 .max_speed = USB_SPEED_HIGH, 259 .max_speed = USB_SPEED_HIGH,
260 .bind = hid_bind, 260 .bind = hid_bind,
261 .unbind = __exit_p(hid_unbind), 261 .unbind = hid_unbind,
262}; 262};
263 263
264static struct platform_driver hidg_plat_driver = { 264static struct platform_driver hidg_plat_driver = {
diff --git a/drivers/usb/gadget/legacy/mass_storage.c b/drivers/usb/gadget/legacy/mass_storage.c
index 8e27a8c96444..e7bfb081f111 100644
--- a/drivers/usb/gadget/legacy/mass_storage.c
+++ b/drivers/usb/gadget/legacy/mass_storage.c
@@ -130,7 +130,7 @@ static int msg_thread_exits(struct fsg_common *common)
130 return 0; 130 return 0;
131} 131}
132 132
133static int __init msg_do_config(struct usb_configuration *c) 133static int msg_do_config(struct usb_configuration *c)
134{ 134{
135 struct fsg_opts *opts; 135 struct fsg_opts *opts;
136 int ret; 136 int ret;
@@ -170,7 +170,7 @@ static struct usb_configuration msg_config_driver = {
170 170
171/****************************** Gadget Bind ******************************/ 171/****************************** Gadget Bind ******************************/
172 172
173static int __init msg_bind(struct usb_composite_dev *cdev) 173static int msg_bind(struct usb_composite_dev *cdev)
174{ 174{
175 static const struct fsg_operations ops = { 175 static const struct fsg_operations ops = {
176 .thread_exits = msg_thread_exits, 176 .thread_exits = msg_thread_exits,
@@ -248,7 +248,7 @@ static int msg_unbind(struct usb_composite_dev *cdev)
248 248
249/****************************** Some noise ******************************/ 249/****************************** Some noise ******************************/
250 250
251static __refdata struct usb_composite_driver msg_driver = { 251static struct usb_composite_driver msg_driver = {
252 .name = "g_mass_storage", 252 .name = "g_mass_storage",
253 .dev = &msg_device_desc, 253 .dev = &msg_device_desc,
254 .max_speed = USB_SPEED_SUPER, 254 .max_speed = USB_SPEED_SUPER,
diff --git a/drivers/usb/gadget/legacy/multi.c b/drivers/usb/gadget/legacy/multi.c
index 39d27bb343b4..b21b51f0c9fa 100644
--- a/drivers/usb/gadget/legacy/multi.c
+++ b/drivers/usb/gadget/legacy/multi.c
@@ -149,7 +149,7 @@ static struct usb_function *f_acm_rndis;
149static struct usb_function *f_rndis; 149static struct usb_function *f_rndis;
150static struct usb_function *f_msg_rndis; 150static struct usb_function *f_msg_rndis;
151 151
152static __init int rndis_do_config(struct usb_configuration *c) 152static int rndis_do_config(struct usb_configuration *c)
153{ 153{
154 struct fsg_opts *fsg_opts; 154 struct fsg_opts *fsg_opts;
155 int ret; 155 int ret;
@@ -237,7 +237,7 @@ static struct usb_function *f_acm_multi;
237static struct usb_function *f_ecm; 237static struct usb_function *f_ecm;
238static struct usb_function *f_msg_multi; 238static struct usb_function *f_msg_multi;
239 239
240static __init int cdc_do_config(struct usb_configuration *c) 240static int cdc_do_config(struct usb_configuration *c)
241{ 241{
242 struct fsg_opts *fsg_opts; 242 struct fsg_opts *fsg_opts;
243 int ret; 243 int ret;
@@ -466,7 +466,7 @@ fail:
466 return status; 466 return status;
467} 467}
468 468
469static int __exit multi_unbind(struct usb_composite_dev *cdev) 469static int multi_unbind(struct usb_composite_dev *cdev)
470{ 470{
471#ifdef CONFIG_USB_G_MULTI_CDC 471#ifdef CONFIG_USB_G_MULTI_CDC
472 usb_put_function(f_msg_multi); 472 usb_put_function(f_msg_multi);
@@ -497,13 +497,13 @@ static int __exit multi_unbind(struct usb_composite_dev *cdev)
497/****************************** Some noise ******************************/ 497/****************************** Some noise ******************************/
498 498
499 499
500static __refdata struct usb_composite_driver multi_driver = { 500static struct usb_composite_driver multi_driver = {
501 .name = "g_multi", 501 .name = "g_multi",
502 .dev = &device_desc, 502 .dev = &device_desc,
503 .strings = dev_strings, 503 .strings = dev_strings,
504 .max_speed = USB_SPEED_HIGH, 504 .max_speed = USB_SPEED_HIGH,
505 .bind = multi_bind, 505 .bind = multi_bind,
506 .unbind = __exit_p(multi_unbind), 506 .unbind = multi_unbind,
507 .needs_serial = 1, 507 .needs_serial = 1,
508}; 508};
509 509
diff --git a/drivers/usb/gadget/legacy/ncm.c b/drivers/usb/gadget/legacy/ncm.c
index e90e23db2acb..6ce7421412e9 100644
--- a/drivers/usb/gadget/legacy/ncm.c
+++ b/drivers/usb/gadget/legacy/ncm.c
@@ -107,7 +107,7 @@ static struct usb_function *f_ncm;
107 107
108/*-------------------------------------------------------------------------*/ 108/*-------------------------------------------------------------------------*/
109 109
110static int __init ncm_do_config(struct usb_configuration *c) 110static int ncm_do_config(struct usb_configuration *c)
111{ 111{
112 int status; 112 int status;
113 113
@@ -143,7 +143,7 @@ static struct usb_configuration ncm_config_driver = {
143 143
144/*-------------------------------------------------------------------------*/ 144/*-------------------------------------------------------------------------*/
145 145
146static int __init gncm_bind(struct usb_composite_dev *cdev) 146static int gncm_bind(struct usb_composite_dev *cdev)
147{ 147{
148 struct usb_gadget *gadget = cdev->gadget; 148 struct usb_gadget *gadget = cdev->gadget;
149 struct f_ncm_opts *ncm_opts; 149 struct f_ncm_opts *ncm_opts;
@@ -186,7 +186,7 @@ fail:
186 return status; 186 return status;
187} 187}
188 188
189static int __exit gncm_unbind(struct usb_composite_dev *cdev) 189static int gncm_unbind(struct usb_composite_dev *cdev)
190{ 190{
191 if (!IS_ERR_OR_NULL(f_ncm)) 191 if (!IS_ERR_OR_NULL(f_ncm))
192 usb_put_function(f_ncm); 192 usb_put_function(f_ncm);
@@ -195,13 +195,13 @@ static int __exit gncm_unbind(struct usb_composite_dev *cdev)
195 return 0; 195 return 0;
196} 196}
197 197
198static __refdata struct usb_composite_driver ncm_driver = { 198static struct usb_composite_driver ncm_driver = {
199 .name = "g_ncm", 199 .name = "g_ncm",
200 .dev = &device_desc, 200 .dev = &device_desc,
201 .strings = dev_strings, 201 .strings = dev_strings,
202 .max_speed = USB_SPEED_HIGH, 202 .max_speed = USB_SPEED_HIGH,
203 .bind = gncm_bind, 203 .bind = gncm_bind,
204 .unbind = __exit_p(gncm_unbind), 204 .unbind = gncm_unbind,
205}; 205};
206 206
207module_usb_composite_driver(ncm_driver); 207module_usb_composite_driver(ncm_driver);
diff --git a/drivers/usb/gadget/legacy/nokia.c b/drivers/usb/gadget/legacy/nokia.c
index 9b8fd701648c..4bb498a38a1c 100644
--- a/drivers/usb/gadget/legacy/nokia.c
+++ b/drivers/usb/gadget/legacy/nokia.c
@@ -118,7 +118,7 @@ static struct usb_function_instance *fi_obex1;
118static struct usb_function_instance *fi_obex2; 118static struct usb_function_instance *fi_obex2;
119static struct usb_function_instance *fi_phonet; 119static struct usb_function_instance *fi_phonet;
120 120
121static int __init nokia_bind_config(struct usb_configuration *c) 121static int nokia_bind_config(struct usb_configuration *c)
122{ 122{
123 struct usb_function *f_acm; 123 struct usb_function *f_acm;
124 struct usb_function *f_phonet = NULL; 124 struct usb_function *f_phonet = NULL;
@@ -224,7 +224,7 @@ err_get_acm:
224 return status; 224 return status;
225} 225}
226 226
227static int __init nokia_bind(struct usb_composite_dev *cdev) 227static int nokia_bind(struct usb_composite_dev *cdev)
228{ 228{
229 struct usb_gadget *gadget = cdev->gadget; 229 struct usb_gadget *gadget = cdev->gadget;
230 int status; 230 int status;
@@ -307,7 +307,7 @@ err_usb:
307 return status; 307 return status;
308} 308}
309 309
310static int __exit nokia_unbind(struct usb_composite_dev *cdev) 310static int nokia_unbind(struct usb_composite_dev *cdev)
311{ 311{
312 if (!IS_ERR_OR_NULL(f_obex1_cfg2)) 312 if (!IS_ERR_OR_NULL(f_obex1_cfg2))
313 usb_put_function(f_obex1_cfg2); 313 usb_put_function(f_obex1_cfg2);
@@ -338,13 +338,13 @@ static int __exit nokia_unbind(struct usb_composite_dev *cdev)
338 return 0; 338 return 0;
339} 339}
340 340
341static __refdata struct usb_composite_driver nokia_driver = { 341static struct usb_composite_driver nokia_driver = {
342 .name = "g_nokia", 342 .name = "g_nokia",
343 .dev = &device_desc, 343 .dev = &device_desc,
344 .strings = dev_strings, 344 .strings = dev_strings,
345 .max_speed = USB_SPEED_HIGH, 345 .max_speed = USB_SPEED_HIGH,
346 .bind = nokia_bind, 346 .bind = nokia_bind,
347 .unbind = __exit_p(nokia_unbind), 347 .unbind = nokia_unbind,
348}; 348};
349 349
350module_usb_composite_driver(nokia_driver); 350module_usb_composite_driver(nokia_driver);
diff --git a/drivers/usb/gadget/legacy/printer.c b/drivers/usb/gadget/legacy/printer.c
index d5b6ee725a2a..1ce7df1060a5 100644
--- a/drivers/usb/gadget/legacy/printer.c
+++ b/drivers/usb/gadget/legacy/printer.c
@@ -126,7 +126,7 @@ static struct usb_configuration printer_cfg_driver = {
126 .bmAttributes = USB_CONFIG_ATT_ONE | USB_CONFIG_ATT_SELFPOWER, 126 .bmAttributes = USB_CONFIG_ATT_ONE | USB_CONFIG_ATT_SELFPOWER,
127}; 127};
128 128
129static int __init printer_do_config(struct usb_configuration *c) 129static int printer_do_config(struct usb_configuration *c)
130{ 130{
131 struct usb_gadget *gadget = c->cdev->gadget; 131 struct usb_gadget *gadget = c->cdev->gadget;
132 int status = 0; 132 int status = 0;
@@ -152,7 +152,7 @@ static int __init printer_do_config(struct usb_configuration *c)
152 return status; 152 return status;
153} 153}
154 154
155static int __init printer_bind(struct usb_composite_dev *cdev) 155static int printer_bind(struct usb_composite_dev *cdev)
156{ 156{
157 struct f_printer_opts *opts; 157 struct f_printer_opts *opts;
158 int ret, len; 158 int ret, len;
@@ -191,7 +191,7 @@ static int __init printer_bind(struct usb_composite_dev *cdev)
191 return ret; 191 return ret;
192} 192}
193 193
194static int __exit printer_unbind(struct usb_composite_dev *cdev) 194static int printer_unbind(struct usb_composite_dev *cdev)
195{ 195{
196 usb_put_function(f_printer); 196 usb_put_function(f_printer);
197 usb_put_function_instance(fi_printer); 197 usb_put_function_instance(fi_printer);
@@ -199,7 +199,7 @@ static int __exit printer_unbind(struct usb_composite_dev *cdev)
199 return 0; 199 return 0;
200} 200}
201 201
202static __refdata struct usb_composite_driver printer_driver = { 202static struct usb_composite_driver printer_driver = {
203 .name = shortname, 203 .name = shortname,
204 .dev = &device_desc, 204 .dev = &device_desc,
205 .strings = dev_strings, 205 .strings = dev_strings,
diff --git a/drivers/usb/gadget/legacy/serial.c b/drivers/usb/gadget/legacy/serial.c
index 1f5f978d35d5..8b7528f9b78e 100644
--- a/drivers/usb/gadget/legacy/serial.c
+++ b/drivers/usb/gadget/legacy/serial.c
@@ -174,7 +174,7 @@ out:
174 return ret; 174 return ret;
175} 175}
176 176
177static int __init gs_bind(struct usb_composite_dev *cdev) 177static int gs_bind(struct usb_composite_dev *cdev)
178{ 178{
179 int status; 179 int status;
180 180
@@ -230,7 +230,7 @@ static int gs_unbind(struct usb_composite_dev *cdev)
230 return 0; 230 return 0;
231} 231}
232 232
233static __refdata struct usb_composite_driver gserial_driver = { 233static struct usb_composite_driver gserial_driver = {
234 .name = "g_serial", 234 .name = "g_serial",
235 .dev = &device_desc, 235 .dev = &device_desc,
236 .strings = dev_strings, 236 .strings = dev_strings,
diff --git a/drivers/usb/gadget/legacy/tcm_usb_gadget.c b/drivers/usb/gadget/legacy/tcm_usb_gadget.c
index 8b80addc4ce6..f9b4882fce52 100644
--- a/drivers/usb/gadget/legacy/tcm_usb_gadget.c
+++ b/drivers/usb/gadget/legacy/tcm_usb_gadget.c
@@ -2397,7 +2397,7 @@ static int usb_target_bind(struct usb_composite_dev *cdev)
2397 return 0; 2397 return 0;
2398} 2398}
2399 2399
2400static __refdata struct usb_composite_driver usbg_driver = { 2400static struct usb_composite_driver usbg_driver = {
2401 .name = "g_target", 2401 .name = "g_target",
2402 .dev = &usbg_device_desc, 2402 .dev = &usbg_device_desc,
2403 .strings = usbg_strings, 2403 .strings = usbg_strings,
diff --git a/drivers/usb/gadget/legacy/webcam.c b/drivers/usb/gadget/legacy/webcam.c
index 04a3da20f742..72c976bf3530 100644
--- a/drivers/usb/gadget/legacy/webcam.c
+++ b/drivers/usb/gadget/legacy/webcam.c
@@ -334,7 +334,7 @@ static const struct uvc_descriptor_header * const uvc_ss_streaming_cls[] = {
334 * USB configuration 334 * USB configuration
335 */ 335 */
336 336
337static int __init 337static int
338webcam_config_bind(struct usb_configuration *c) 338webcam_config_bind(struct usb_configuration *c)
339{ 339{
340 int status = 0; 340 int status = 0;
@@ -358,7 +358,7 @@ static struct usb_configuration webcam_config_driver = {
358 .MaxPower = CONFIG_USB_GADGET_VBUS_DRAW, 358 .MaxPower = CONFIG_USB_GADGET_VBUS_DRAW,
359}; 359};
360 360
361static int /* __init_or_exit */ 361static int
362webcam_unbind(struct usb_composite_dev *cdev) 362webcam_unbind(struct usb_composite_dev *cdev)
363{ 363{
364 if (!IS_ERR_OR_NULL(f_uvc)) 364 if (!IS_ERR_OR_NULL(f_uvc))
@@ -368,7 +368,7 @@ webcam_unbind(struct usb_composite_dev *cdev)
368 return 0; 368 return 0;
369} 369}
370 370
371static int __init 371static int
372webcam_bind(struct usb_composite_dev *cdev) 372webcam_bind(struct usb_composite_dev *cdev)
373{ 373{
374 struct f_uvc_opts *uvc_opts; 374 struct f_uvc_opts *uvc_opts;
@@ -422,7 +422,7 @@ error:
422 * Driver 422 * Driver
423 */ 423 */
424 424
425static __refdata struct usb_composite_driver webcam_driver = { 425static struct usb_composite_driver webcam_driver = {
426 .name = "g_webcam", 426 .name = "g_webcam",
427 .dev = &webcam_device_descriptor, 427 .dev = &webcam_device_descriptor,
428 .strings = webcam_device_strings, 428 .strings = webcam_device_strings,
diff --git a/drivers/usb/gadget/legacy/zero.c b/drivers/usb/gadget/legacy/zero.c
index 5ee95152493c..c986e8addb90 100644
--- a/drivers/usb/gadget/legacy/zero.c
+++ b/drivers/usb/gadget/legacy/zero.c
@@ -272,7 +272,7 @@ static struct usb_function_instance *func_inst_lb;
272module_param_named(qlen, gzero_options.qlen, uint, S_IRUGO|S_IWUSR); 272module_param_named(qlen, gzero_options.qlen, uint, S_IRUGO|S_IWUSR);
273MODULE_PARM_DESC(qlen, "depth of loopback queue"); 273MODULE_PARM_DESC(qlen, "depth of loopback queue");
274 274
275static int __init zero_bind(struct usb_composite_dev *cdev) 275static int zero_bind(struct usb_composite_dev *cdev)
276{ 276{
277 struct f_ss_opts *ss_opts; 277 struct f_ss_opts *ss_opts;
278 struct f_lb_opts *lb_opts; 278 struct f_lb_opts *lb_opts;
@@ -400,7 +400,7 @@ static int zero_unbind(struct usb_composite_dev *cdev)
400 return 0; 400 return 0;
401} 401}
402 402
403static __refdata struct usb_composite_driver zero_driver = { 403static struct usb_composite_driver zero_driver = {
404 .name = "zero", 404 .name = "zero",
405 .dev = &device_desc, 405 .dev = &device_desc,
406 .strings = dev_strings, 406 .strings = dev_strings,
diff --git a/drivers/usb/gadget/udc/at91_udc.c b/drivers/usb/gadget/udc/at91_udc.c
index 2fbedca3c2b4..fc4226462f8f 100644
--- a/drivers/usb/gadget/udc/at91_udc.c
+++ b/drivers/usb/gadget/udc/at91_udc.c
@@ -1942,7 +1942,7 @@ err_unprepare_fclk:
1942 return retval; 1942 return retval;
1943} 1943}
1944 1944
1945static int __exit at91udc_remove(struct platform_device *pdev) 1945static int at91udc_remove(struct platform_device *pdev)
1946{ 1946{
1947 struct at91_udc *udc = platform_get_drvdata(pdev); 1947 struct at91_udc *udc = platform_get_drvdata(pdev);
1948 unsigned long flags; 1948 unsigned long flags;
@@ -2018,7 +2018,7 @@ static int at91udc_resume(struct platform_device *pdev)
2018#endif 2018#endif
2019 2019
2020static struct platform_driver at91_udc_driver = { 2020static struct platform_driver at91_udc_driver = {
2021 .remove = __exit_p(at91udc_remove), 2021 .remove = at91udc_remove,
2022 .shutdown = at91udc_shutdown, 2022 .shutdown = at91udc_shutdown,
2023 .suspend = at91udc_suspend, 2023 .suspend = at91udc_suspend,
2024 .resume = at91udc_resume, 2024 .resume = at91udc_resume,
diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c
index 4c01953a0869..351d48550c33 100644
--- a/drivers/usb/gadget/udc/atmel_usba_udc.c
+++ b/drivers/usb/gadget/udc/atmel_usba_udc.c
@@ -2186,7 +2186,7 @@ static int usba_udc_probe(struct platform_device *pdev)
2186 return 0; 2186 return 0;
2187} 2187}
2188 2188
2189static int __exit usba_udc_remove(struct platform_device *pdev) 2189static int usba_udc_remove(struct platform_device *pdev)
2190{ 2190{
2191 struct usba_udc *udc; 2191 struct usba_udc *udc;
2192 int i; 2192 int i;
@@ -2258,7 +2258,7 @@ static int usba_udc_resume(struct device *dev)
2258static SIMPLE_DEV_PM_OPS(usba_udc_pm_ops, usba_udc_suspend, usba_udc_resume); 2258static SIMPLE_DEV_PM_OPS(usba_udc_pm_ops, usba_udc_suspend, usba_udc_resume);
2259 2259
2260static struct platform_driver udc_driver = { 2260static struct platform_driver udc_driver = {
2261 .remove = __exit_p(usba_udc_remove), 2261 .remove = usba_udc_remove,
2262 .driver = { 2262 .driver = {
2263 .name = "atmel_usba_udc", 2263 .name = "atmel_usba_udc",
2264 .pm = &usba_udc_pm_ops, 2264 .pm = &usba_udc_pm_ops,
diff --git a/drivers/usb/gadget/udc/fsl_udc_core.c b/drivers/usb/gadget/udc/fsl_udc_core.c
index 55fcb930f92e..c60022b46a48 100644
--- a/drivers/usb/gadget/udc/fsl_udc_core.c
+++ b/drivers/usb/gadget/udc/fsl_udc_core.c
@@ -2525,7 +2525,7 @@ err_kfree:
2525/* Driver removal function 2525/* Driver removal function
2526 * Free resources and finish pending transactions 2526 * Free resources and finish pending transactions
2527 */ 2527 */
2528static int __exit fsl_udc_remove(struct platform_device *pdev) 2528static int fsl_udc_remove(struct platform_device *pdev)
2529{ 2529{
2530 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2530 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2531 struct fsl_usb2_platform_data *pdata = dev_get_platdata(&pdev->dev); 2531 struct fsl_usb2_platform_data *pdata = dev_get_platdata(&pdev->dev);
@@ -2663,7 +2663,7 @@ static const struct platform_device_id fsl_udc_devtype[] = {
2663}; 2663};
2664MODULE_DEVICE_TABLE(platform, fsl_udc_devtype); 2664MODULE_DEVICE_TABLE(platform, fsl_udc_devtype);
2665static struct platform_driver udc_driver = { 2665static struct platform_driver udc_driver = {
2666 .remove = __exit_p(fsl_udc_remove), 2666 .remove = fsl_udc_remove,
2667 /* Just for FSL i.mx SoC currently */ 2667 /* Just for FSL i.mx SoC currently */
2668 .id_table = fsl_udc_devtype, 2668 .id_table = fsl_udc_devtype,
2669 /* these suspend and resume are not usb suspend and resume */ 2669 /* these suspend and resume are not usb suspend and resume */
diff --git a/drivers/usb/gadget/udc/fusb300_udc.c b/drivers/usb/gadget/udc/fusb300_udc.c
index fb4df159d32d..3970f453de49 100644
--- a/drivers/usb/gadget/udc/fusb300_udc.c
+++ b/drivers/usb/gadget/udc/fusb300_udc.c
@@ -1342,7 +1342,7 @@ static const struct usb_gadget_ops fusb300_gadget_ops = {
1342 .udc_stop = fusb300_udc_stop, 1342 .udc_stop = fusb300_udc_stop,
1343}; 1343};
1344 1344
1345static int __exit fusb300_remove(struct platform_device *pdev) 1345static int fusb300_remove(struct platform_device *pdev)
1346{ 1346{
1347 struct fusb300 *fusb300 = platform_get_drvdata(pdev); 1347 struct fusb300 *fusb300 = platform_get_drvdata(pdev);
1348 1348
@@ -1492,7 +1492,7 @@ clean_up:
1492} 1492}
1493 1493
1494static struct platform_driver fusb300_driver = { 1494static struct platform_driver fusb300_driver = {
1495 .remove = __exit_p(fusb300_remove), 1495 .remove = fusb300_remove,
1496 .driver = { 1496 .driver = {
1497 .name = (char *) udc_name, 1497 .name = (char *) udc_name,
1498 }, 1498 },
diff --git a/drivers/usb/gadget/udc/m66592-udc.c b/drivers/usb/gadget/udc/m66592-udc.c
index 8c7c83c93713..309706fe4bf0 100644
--- a/drivers/usb/gadget/udc/m66592-udc.c
+++ b/drivers/usb/gadget/udc/m66592-udc.c
@@ -1528,7 +1528,7 @@ static const struct usb_gadget_ops m66592_gadget_ops = {
1528 .pullup = m66592_pullup, 1528 .pullup = m66592_pullup,
1529}; 1529};
1530 1530
1531static int __exit m66592_remove(struct platform_device *pdev) 1531static int m66592_remove(struct platform_device *pdev)
1532{ 1532{
1533 struct m66592 *m66592 = platform_get_drvdata(pdev); 1533 struct m66592 *m66592 = platform_get_drvdata(pdev);
1534 1534
@@ -1695,7 +1695,7 @@ clean_up:
1695 1695
1696/*-------------------------------------------------------------------------*/ 1696/*-------------------------------------------------------------------------*/
1697static struct platform_driver m66592_driver = { 1697static struct platform_driver m66592_driver = {
1698 .remove = __exit_p(m66592_remove), 1698 .remove = m66592_remove,
1699 .driver = { 1699 .driver = {
1700 .name = (char *) udc_name, 1700 .name = (char *) udc_name,
1701 }, 1701 },
diff --git a/drivers/usb/gadget/udc/r8a66597-udc.c b/drivers/usb/gadget/udc/r8a66597-udc.c
index 2495fe9c95c5..0293f7169dee 100644
--- a/drivers/usb/gadget/udc/r8a66597-udc.c
+++ b/drivers/usb/gadget/udc/r8a66597-udc.c
@@ -1820,7 +1820,7 @@ static const struct usb_gadget_ops r8a66597_gadget_ops = {
1820 .set_selfpowered = r8a66597_set_selfpowered, 1820 .set_selfpowered = r8a66597_set_selfpowered,
1821}; 1821};
1822 1822
1823static int __exit r8a66597_remove(struct platform_device *pdev) 1823static int r8a66597_remove(struct platform_device *pdev)
1824{ 1824{
1825 struct r8a66597 *r8a66597 = platform_get_drvdata(pdev); 1825 struct r8a66597 *r8a66597 = platform_get_drvdata(pdev);
1826 1826
@@ -1974,7 +1974,7 @@ clean_up2:
1974 1974
1975/*-------------------------------------------------------------------------*/ 1975/*-------------------------------------------------------------------------*/
1976static struct platform_driver r8a66597_driver = { 1976static struct platform_driver r8a66597_driver = {
1977 .remove = __exit_p(r8a66597_remove), 1977 .remove = r8a66597_remove,
1978 .driver = { 1978 .driver = {
1979 .name = (char *) udc_name, 1979 .name = (char *) udc_name,
1980 }, 1980 },
diff --git a/drivers/usb/gadget/udc/udc-xilinx.c b/drivers/usb/gadget/udc/udc-xilinx.c
index dd3e9fd31b80..1f24274477ab 100644
--- a/drivers/usb/gadget/udc/udc-xilinx.c
+++ b/drivers/usb/gadget/udc/udc-xilinx.c
@@ -2071,8 +2071,8 @@ static int xudc_probe(struct platform_device *pdev)
2071 /* Map the registers */ 2071 /* Map the registers */
2072 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2072 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2073 udc->addr = devm_ioremap_resource(&pdev->dev, res); 2073 udc->addr = devm_ioremap_resource(&pdev->dev, res);
2074 if (!udc->addr) 2074 if (IS_ERR(udc->addr))
2075 return -ENOMEM; 2075 return PTR_ERR(udc->addr);
2076 2076
2077 irq = platform_get_irq(pdev, 0); 2077 irq = platform_get_irq(pdev, 0);
2078 if (irq < 0) { 2078 if (irq < 0) {
diff --git a/drivers/usb/phy/phy-isp1301-omap.c b/drivers/usb/phy/phy-isp1301-omap.c
index 1e0e10dd6ba5..3af263cc0caa 100644
--- a/drivers/usb/phy/phy-isp1301-omap.c
+++ b/drivers/usb/phy/phy-isp1301-omap.c
@@ -94,7 +94,7 @@ struct isp1301 {
94 94
95#if defined(CONFIG_MACH_OMAP_H2) || defined(CONFIG_MACH_OMAP_H3) 95#if defined(CONFIG_MACH_OMAP_H2) || defined(CONFIG_MACH_OMAP_H3)
96 96
97#if defined(CONFIG_TPS65010) || defined(CONFIG_TPS65010_MODULE) 97#if defined(CONFIG_TPS65010) || (defined(CONFIG_TPS65010_MODULE) && defined(MODULE))
98 98
99#include <linux/i2c/tps65010.h> 99#include <linux/i2c/tps65010.h>
100 100
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
index cde698a07d21..a2ae42720a6a 100644
--- a/fs/btrfs/delayed-inode.c
+++ b/fs/btrfs/delayed-inode.c
@@ -1802,6 +1802,8 @@ int btrfs_fill_inode(struct inode *inode, u32 *rdev)
1802 set_nlink(inode, btrfs_stack_inode_nlink(inode_item)); 1802 set_nlink(inode, btrfs_stack_inode_nlink(inode_item));
1803 inode_set_bytes(inode, btrfs_stack_inode_nbytes(inode_item)); 1803 inode_set_bytes(inode, btrfs_stack_inode_nbytes(inode_item));
1804 BTRFS_I(inode)->generation = btrfs_stack_inode_generation(inode_item); 1804 BTRFS_I(inode)->generation = btrfs_stack_inode_generation(inode_item);
1805 BTRFS_I(inode)->last_trans = btrfs_stack_inode_transid(inode_item);
1806
1805 inode->i_version = btrfs_stack_inode_sequence(inode_item); 1807 inode->i_version = btrfs_stack_inode_sequence(inode_item);
1806 inode->i_rdev = 0; 1808 inode->i_rdev = 0;
1807 *rdev = btrfs_stack_inode_rdev(inode_item); 1809 *rdev = btrfs_stack_inode_rdev(inode_item);
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 1eef4ee01d1a..0ec8e228b89f 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -3178,8 +3178,8 @@ static int write_one_cache_group(struct btrfs_trans_handle *trans,
3178 bi = btrfs_item_ptr_offset(leaf, path->slots[0]); 3178 bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
3179 write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item)); 3179 write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
3180 btrfs_mark_buffer_dirty(leaf); 3180 btrfs_mark_buffer_dirty(leaf);
3181 btrfs_release_path(path);
3182fail: 3181fail:
3182 btrfs_release_path(path);
3183 if (ret) 3183 if (ret)
3184 btrfs_abort_transaction(trans, root, ret); 3184 btrfs_abort_transaction(trans, root, ret);
3185 return ret; 3185 return ret;
@@ -3305,8 +3305,7 @@ again:
3305 3305
3306 spin_lock(&block_group->lock); 3306 spin_lock(&block_group->lock);
3307 if (block_group->cached != BTRFS_CACHE_FINISHED || 3307 if (block_group->cached != BTRFS_CACHE_FINISHED ||
3308 !btrfs_test_opt(root, SPACE_CACHE) || 3308 !btrfs_test_opt(root, SPACE_CACHE)) {
3309 block_group->delalloc_bytes) {
3310 /* 3309 /*
3311 * don't bother trying to write stuff out _if_ 3310 * don't bother trying to write stuff out _if_
3312 * a) we're not cached, 3311 * a) we're not cached,
@@ -3408,17 +3407,14 @@ int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans,
3408 int loops = 0; 3407 int loops = 0;
3409 3408
3410 spin_lock(&cur_trans->dirty_bgs_lock); 3409 spin_lock(&cur_trans->dirty_bgs_lock);
3411 if (!list_empty(&cur_trans->dirty_bgs)) { 3410 if (list_empty(&cur_trans->dirty_bgs)) {
3412 list_splice_init(&cur_trans->dirty_bgs, &dirty); 3411 spin_unlock(&cur_trans->dirty_bgs_lock);
3412 return 0;
3413 } 3413 }
3414 list_splice_init(&cur_trans->dirty_bgs, &dirty);
3414 spin_unlock(&cur_trans->dirty_bgs_lock); 3415 spin_unlock(&cur_trans->dirty_bgs_lock);
3415 3416
3416again: 3417again:
3417 if (list_empty(&dirty)) {
3418 btrfs_free_path(path);
3419 return 0;
3420 }
3421
3422 /* 3418 /*
3423 * make sure all the block groups on our dirty list actually 3419 * make sure all the block groups on our dirty list actually
3424 * exist 3420 * exist
@@ -3431,18 +3427,16 @@ again:
3431 return -ENOMEM; 3427 return -ENOMEM;
3432 } 3428 }
3433 3429
3430 /*
3431 * cache_write_mutex is here only to save us from balance or automatic
3432 * removal of empty block groups deleting this block group while we are
3433 * writing out the cache
3434 */
3435 mutex_lock(&trans->transaction->cache_write_mutex);
3434 while (!list_empty(&dirty)) { 3436 while (!list_empty(&dirty)) {
3435 cache = list_first_entry(&dirty, 3437 cache = list_first_entry(&dirty,
3436 struct btrfs_block_group_cache, 3438 struct btrfs_block_group_cache,
3437 dirty_list); 3439 dirty_list);
3438
3439 /*
3440 * cache_write_mutex is here only to save us from balance
3441 * deleting this block group while we are writing out the
3442 * cache
3443 */
3444 mutex_lock(&trans->transaction->cache_write_mutex);
3445
3446 /* 3440 /*
3447 * this can happen if something re-dirties a block 3441 * this can happen if something re-dirties a block
3448 * group that is already under IO. Just wait for it to 3442 * group that is already under IO. Just wait for it to
@@ -3495,7 +3489,6 @@ again:
3495 } 3489 }
3496 if (!ret) 3490 if (!ret)
3497 ret = write_one_cache_group(trans, root, path, cache); 3491 ret = write_one_cache_group(trans, root, path, cache);
3498 mutex_unlock(&trans->transaction->cache_write_mutex);
3499 3492
3500 /* if its not on the io list, we need to put the block group */ 3493 /* if its not on the io list, we need to put the block group */
3501 if (should_put) 3494 if (should_put)
@@ -3503,7 +3496,16 @@ again:
3503 3496
3504 if (ret) 3497 if (ret)
3505 break; 3498 break;
3499
3500 /*
3501 * Avoid blocking other tasks for too long. It might even save
3502 * us from writing caches for block groups that are going to be
3503 * removed.
3504 */
3505 mutex_unlock(&trans->transaction->cache_write_mutex);
3506 mutex_lock(&trans->transaction->cache_write_mutex);
3506 } 3507 }
3508 mutex_unlock(&trans->transaction->cache_write_mutex);
3507 3509
3508 /* 3510 /*
3509 * go through delayed refs for all the stuff we've just kicked off 3511 * go through delayed refs for all the stuff we've just kicked off
@@ -3514,8 +3516,15 @@ again:
3514 loops++; 3516 loops++;
3515 spin_lock(&cur_trans->dirty_bgs_lock); 3517 spin_lock(&cur_trans->dirty_bgs_lock);
3516 list_splice_init(&cur_trans->dirty_bgs, &dirty); 3518 list_splice_init(&cur_trans->dirty_bgs, &dirty);
3519 /*
3520 * dirty_bgs_lock protects us from concurrent block group
3521 * deletes too (not just cache_write_mutex).
3522 */
3523 if (!list_empty(&dirty)) {
3524 spin_unlock(&cur_trans->dirty_bgs_lock);
3525 goto again;
3526 }
3517 spin_unlock(&cur_trans->dirty_bgs_lock); 3527 spin_unlock(&cur_trans->dirty_bgs_lock);
3518 goto again;
3519 } 3528 }
3520 3529
3521 btrfs_free_path(path); 3530 btrfs_free_path(path);
@@ -7537,7 +7546,7 @@ static void unuse_block_rsv(struct btrfs_fs_info *fs_info,
7537 * returns the key for the extent through ins, and a tree buffer for 7546 * returns the key for the extent through ins, and a tree buffer for
7538 * the first block of the extent through buf. 7547 * the first block of the extent through buf.
7539 * 7548 *
7540 * returns the tree buffer or NULL. 7549 * returns the tree buffer or an ERR_PTR on error.
7541 */ 7550 */
7542struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans, 7551struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
7543 struct btrfs_root *root, 7552 struct btrfs_root *root,
@@ -7548,6 +7557,7 @@ struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
7548 struct btrfs_key ins; 7557 struct btrfs_key ins;
7549 struct btrfs_block_rsv *block_rsv; 7558 struct btrfs_block_rsv *block_rsv;
7550 struct extent_buffer *buf; 7559 struct extent_buffer *buf;
7560 struct btrfs_delayed_extent_op *extent_op;
7551 u64 flags = 0; 7561 u64 flags = 0;
7552 int ret; 7562 int ret;
7553 u32 blocksize = root->nodesize; 7563 u32 blocksize = root->nodesize;
@@ -7568,13 +7578,14 @@ struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
7568 7578
7569 ret = btrfs_reserve_extent(root, blocksize, blocksize, 7579 ret = btrfs_reserve_extent(root, blocksize, blocksize,
7570 empty_size, hint, &ins, 0, 0); 7580 empty_size, hint, &ins, 0, 0);
7571 if (ret) { 7581 if (ret)
7572 unuse_block_rsv(root->fs_info, block_rsv, blocksize); 7582 goto out_unuse;
7573 return ERR_PTR(ret);
7574 }
7575 7583
7576 buf = btrfs_init_new_buffer(trans, root, ins.objectid, level); 7584 buf = btrfs_init_new_buffer(trans, root, ins.objectid, level);
7577 BUG_ON(IS_ERR(buf)); /* -ENOMEM */ 7585 if (IS_ERR(buf)) {
7586 ret = PTR_ERR(buf);
7587 goto out_free_reserved;
7588 }
7578 7589
7579 if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) { 7590 if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
7580 if (parent == 0) 7591 if (parent == 0)
@@ -7584,9 +7595,11 @@ struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
7584 BUG_ON(parent > 0); 7595 BUG_ON(parent > 0);
7585 7596
7586 if (root_objectid != BTRFS_TREE_LOG_OBJECTID) { 7597 if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
7587 struct btrfs_delayed_extent_op *extent_op;
7588 extent_op = btrfs_alloc_delayed_extent_op(); 7598 extent_op = btrfs_alloc_delayed_extent_op();
7589 BUG_ON(!extent_op); /* -ENOMEM */ 7599 if (!extent_op) {
7600 ret = -ENOMEM;
7601 goto out_free_buf;
7602 }
7590 if (key) 7603 if (key)
7591 memcpy(&extent_op->key, key, sizeof(extent_op->key)); 7604 memcpy(&extent_op->key, key, sizeof(extent_op->key));
7592 else 7605 else
@@ -7601,13 +7614,24 @@ struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
7601 extent_op->level = level; 7614 extent_op->level = level;
7602 7615
7603 ret = btrfs_add_delayed_tree_ref(root->fs_info, trans, 7616 ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
7604 ins.objectid, 7617 ins.objectid, ins.offset,
7605 ins.offset, parent, root_objectid, 7618 parent, root_objectid, level,
7606 level, BTRFS_ADD_DELAYED_EXTENT, 7619 BTRFS_ADD_DELAYED_EXTENT,
7607 extent_op, 0); 7620 extent_op, 0);
7608 BUG_ON(ret); /* -ENOMEM */ 7621 if (ret)
7622 goto out_free_delayed;
7609 } 7623 }
7610 return buf; 7624 return buf;
7625
7626out_free_delayed:
7627 btrfs_free_delayed_extent_op(extent_op);
7628out_free_buf:
7629 free_extent_buffer(buf);
7630out_free_reserved:
7631 btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 0);
7632out_unuse:
7633 unuse_block_rsv(root->fs_info, block_rsv, blocksize);
7634 return ERR_PTR(ret);
7611} 7635}
7612 7636
7613struct walk_control { 7637struct walk_control {
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 782f3bc4651d..43af5a61ad25 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -4560,36 +4560,37 @@ static void btrfs_release_extent_buffer_page(struct extent_buffer *eb)
4560 do { 4560 do {
4561 index--; 4561 index--;
4562 page = eb->pages[index]; 4562 page = eb->pages[index];
4563 if (page && mapped) { 4563 if (!page)
4564 continue;
4565 if (mapped)
4564 spin_lock(&page->mapping->private_lock); 4566 spin_lock(&page->mapping->private_lock);
4567 /*
4568 * We do this since we'll remove the pages after we've
4569 * removed the eb from the radix tree, so we could race
4570 * and have this page now attached to the new eb. So
4571 * only clear page_private if it's still connected to
4572 * this eb.
4573 */
4574 if (PagePrivate(page) &&
4575 page->private == (unsigned long)eb) {
4576 BUG_ON(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
4577 BUG_ON(PageDirty(page));
4578 BUG_ON(PageWriteback(page));
4565 /* 4579 /*
4566 * We do this since we'll remove the pages after we've 4580 * We need to make sure we haven't be attached
4567 * removed the eb from the radix tree, so we could race 4581 * to a new eb.
4568 * and have this page now attached to the new eb. So
4569 * only clear page_private if it's still connected to
4570 * this eb.
4571 */ 4582 */
4572 if (PagePrivate(page) && 4583 ClearPagePrivate(page);
4573 page->private == (unsigned long)eb) { 4584 set_page_private(page, 0);
4574 BUG_ON(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)); 4585 /* One for the page private */
4575 BUG_ON(PageDirty(page));
4576 BUG_ON(PageWriteback(page));
4577 /*
4578 * We need to make sure we haven't be attached
4579 * to a new eb.
4580 */
4581 ClearPagePrivate(page);
4582 set_page_private(page, 0);
4583 /* One for the page private */
4584 page_cache_release(page);
4585 }
4586 spin_unlock(&page->mapping->private_lock);
4587
4588 }
4589 if (page) {
4590 /* One for when we alloced the page */
4591 page_cache_release(page); 4586 page_cache_release(page);
4592 } 4587 }
4588
4589 if (mapped)
4590 spin_unlock(&page->mapping->private_lock);
4591
4592 /* One for when we alloced the page */
4593 page_cache_release(page);
4593 } while (index != 0); 4594 } while (index != 0);
4594} 4595}
4595 4596
@@ -4870,6 +4871,7 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
4870 mark_extent_buffer_accessed(exists, p); 4871 mark_extent_buffer_accessed(exists, p);
4871 goto free_eb; 4872 goto free_eb;
4872 } 4873 }
4874 exists = NULL;
4873 4875
4874 /* 4876 /*
4875 * Do this so attach doesn't complain and we need to 4877 * Do this so attach doesn't complain and we need to
@@ -4933,12 +4935,12 @@ again:
4933 return eb; 4935 return eb;
4934 4936
4935free_eb: 4937free_eb:
4938 WARN_ON(!atomic_dec_and_test(&eb->refs));
4936 for (i = 0; i < num_pages; i++) { 4939 for (i = 0; i < num_pages; i++) {
4937 if (eb->pages[i]) 4940 if (eb->pages[i])
4938 unlock_page(eb->pages[i]); 4941 unlock_page(eb->pages[i]);
4939 } 4942 }
4940 4943
4941 WARN_ON(!atomic_dec_and_test(&eb->refs));
4942 btrfs_release_extent_buffer(eb); 4944 btrfs_release_extent_buffer(eb);
4943 return exists; 4945 return exists;
4944} 4946}
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 81fa75a8e1f3..41c510b7cc11 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -1218,7 +1218,7 @@ out:
1218 * 1218 *
1219 * This function writes out a free space cache struct to disk for quick recovery 1219 * This function writes out a free space cache struct to disk for quick recovery
1220 * on mount. This will return 0 if it was successfull in writing the cache out, 1220 * on mount. This will return 0 if it was successfull in writing the cache out,
1221 * and -1 if it was not. 1221 * or an errno if it was not.
1222 */ 1222 */
1223static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode, 1223static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
1224 struct btrfs_free_space_ctl *ctl, 1224 struct btrfs_free_space_ctl *ctl,
@@ -1235,12 +1235,12 @@ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
1235 int must_iput = 0; 1235 int must_iput = 0;
1236 1236
1237 if (!i_size_read(inode)) 1237 if (!i_size_read(inode))
1238 return -1; 1238 return -EIO;
1239 1239
1240 WARN_ON(io_ctl->pages); 1240 WARN_ON(io_ctl->pages);
1241 ret = io_ctl_init(io_ctl, inode, root, 1); 1241 ret = io_ctl_init(io_ctl, inode, root, 1);
1242 if (ret) 1242 if (ret)
1243 return -1; 1243 return ret;
1244 1244
1245 if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA)) { 1245 if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA)) {
1246 down_write(&block_group->data_rwsem); 1246 down_write(&block_group->data_rwsem);
@@ -1258,7 +1258,9 @@ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
1258 } 1258 }
1259 1259
1260 /* Lock all pages first so we can lock the extent safely. */ 1260 /* Lock all pages first so we can lock the extent safely. */
1261 io_ctl_prepare_pages(io_ctl, inode, 0); 1261 ret = io_ctl_prepare_pages(io_ctl, inode, 0);
1262 if (ret)
1263 goto out;
1262 1264
1263 lock_extent_bits(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1, 1265 lock_extent_bits(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1,
1264 0, &cached_state); 1266 0, &cached_state);
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index ada4d24ed11b..8bb013672aee 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -3632,25 +3632,28 @@ static void btrfs_read_locked_inode(struct inode *inode)
3632 BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item); 3632 BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item);
3633 BTRFS_I(inode)->last_trans = btrfs_inode_transid(leaf, inode_item); 3633 BTRFS_I(inode)->last_trans = btrfs_inode_transid(leaf, inode_item);
3634 3634
3635 inode->i_version = btrfs_inode_sequence(leaf, inode_item);
3636 inode->i_generation = BTRFS_I(inode)->generation;
3637 inode->i_rdev = 0;
3638 rdev = btrfs_inode_rdev(leaf, inode_item);
3639
3640 BTRFS_I(inode)->index_cnt = (u64)-1;
3641 BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item);
3642
3643cache_index:
3635 /* 3644 /*
3636 * If we were modified in the current generation and evicted from memory 3645 * If we were modified in the current generation and evicted from memory
3637 * and then re-read we need to do a full sync since we don't have any 3646 * and then re-read we need to do a full sync since we don't have any
3638 * idea about which extents were modified before we were evicted from 3647 * idea about which extents were modified before we were evicted from
3639 * cache. 3648 * cache.
3649 *
3650 * This is required for both inode re-read from disk and delayed inode
3651 * in delayed_nodes_tree.
3640 */ 3652 */
3641 if (BTRFS_I(inode)->last_trans == root->fs_info->generation) 3653 if (BTRFS_I(inode)->last_trans == root->fs_info->generation)
3642 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, 3654 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
3643 &BTRFS_I(inode)->runtime_flags); 3655 &BTRFS_I(inode)->runtime_flags);
3644 3656
3645 inode->i_version = btrfs_inode_sequence(leaf, inode_item);
3646 inode->i_generation = BTRFS_I(inode)->generation;
3647 inode->i_rdev = 0;
3648 rdev = btrfs_inode_rdev(leaf, inode_item);
3649
3650 BTRFS_I(inode)->index_cnt = (u64)-1;
3651 BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item);
3652
3653cache_index:
3654 path->slots[0]++; 3657 path->slots[0]++;
3655 if (inode->i_nlink != 1 || 3658 if (inode->i_nlink != 1 ||
3656 path->slots[0] >= btrfs_header_nritems(leaf)) 3659 path->slots[0] >= btrfs_header_nritems(leaf))
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index b05653f182c2..1c22c6518504 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -2410,7 +2410,7 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
2410 "Attempt to delete subvolume %llu during send", 2410 "Attempt to delete subvolume %llu during send",
2411 dest->root_key.objectid); 2411 dest->root_key.objectid);
2412 err = -EPERM; 2412 err = -EPERM;
2413 goto out_dput; 2413 goto out_unlock_inode;
2414 } 2414 }
2415 2415
2416 d_invalidate(dentry); 2416 d_invalidate(dentry);
@@ -2505,6 +2505,7 @@ out_up_write:
2505 root_flags & ~BTRFS_ROOT_SUBVOL_DEAD); 2505 root_flags & ~BTRFS_ROOT_SUBVOL_DEAD);
2506 spin_unlock(&dest->root_item_lock); 2506 spin_unlock(&dest->root_item_lock);
2507 } 2507 }
2508out_unlock_inode:
2508 mutex_unlock(&inode->i_mutex); 2509 mutex_unlock(&inode->i_mutex);
2509 if (!err) { 2510 if (!err) {
2510 shrink_dcache_sb(root->fs_info->sb); 2511 shrink_dcache_sb(root->fs_info->sb);
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 8bcd2a007517..96aebf3bcd5b 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -1058,6 +1058,7 @@ static int contains_pending_extent(struct btrfs_trans_handle *trans,
1058 struct extent_map *em; 1058 struct extent_map *em;
1059 struct list_head *search_list = &trans->transaction->pending_chunks; 1059 struct list_head *search_list = &trans->transaction->pending_chunks;
1060 int ret = 0; 1060 int ret = 0;
1061 u64 physical_start = *start;
1061 1062
1062again: 1063again:
1063 list_for_each_entry(em, search_list, list) { 1064 list_for_each_entry(em, search_list, list) {
@@ -1068,9 +1069,9 @@ again:
1068 for (i = 0; i < map->num_stripes; i++) { 1069 for (i = 0; i < map->num_stripes; i++) {
1069 if (map->stripes[i].dev != device) 1070 if (map->stripes[i].dev != device)
1070 continue; 1071 continue;
1071 if (map->stripes[i].physical >= *start + len || 1072 if (map->stripes[i].physical >= physical_start + len ||
1072 map->stripes[i].physical + em->orig_block_len <= 1073 map->stripes[i].physical + em->orig_block_len <=
1073 *start) 1074 physical_start)
1074 continue; 1075 continue;
1075 *start = map->stripes[i].physical + 1076 *start = map->stripes[i].physical +
1076 em->orig_block_len; 1077 em->orig_block_len;
@@ -1193,8 +1194,14 @@ again:
1193 */ 1194 */
1194 if (contains_pending_extent(trans, device, 1195 if (contains_pending_extent(trans, device,
1195 &search_start, 1196 &search_start,
1196 hole_size)) 1197 hole_size)) {
1197 hole_size = 0; 1198 if (key.offset >= search_start) {
1199 hole_size = key.offset - search_start;
1200 } else {
1201 WARN_ON_ONCE(1);
1202 hole_size = 0;
1203 }
1204 }
1198 1205
1199 if (hole_size > max_hole_size) { 1206 if (hole_size > max_hole_size) {
1200 max_hole_start = search_start; 1207 max_hole_start = search_start;
diff --git a/fs/ext4/Kconfig b/fs/ext4/Kconfig
index 18228c201f7f..024f2284d3f6 100644
--- a/fs/ext4/Kconfig
+++ b/fs/ext4/Kconfig
@@ -64,8 +64,8 @@ config EXT4_FS_SECURITY
64 If you are not using a security module that requires using 64 If you are not using a security module that requires using
65 extended attributes for file security labels, say N. 65 extended attributes for file security labels, say N.
66 66
67config EXT4_FS_ENCRYPTION 67config EXT4_ENCRYPTION
68 bool "Ext4 Encryption" 68 tristate "Ext4 Encryption"
69 depends on EXT4_FS 69 depends on EXT4_FS
70 select CRYPTO_AES 70 select CRYPTO_AES
71 select CRYPTO_CBC 71 select CRYPTO_CBC
@@ -81,6 +81,11 @@ config EXT4_FS_ENCRYPTION
81 efficient since it avoids caching the encrypted and 81 efficient since it avoids caching the encrypted and
82 decrypted pages in the page cache. 82 decrypted pages in the page cache.
83 83
84config EXT4_FS_ENCRYPTION
85 bool
86 default y
87 depends on EXT4_ENCRYPTION
88
84config EXT4_DEBUG 89config EXT4_DEBUG
85 bool "EXT4 debugging support" 90 bool "EXT4 debugging support"
86 depends on EXT4_FS 91 depends on EXT4_FS
diff --git a/fs/ext4/crypto_fname.c b/fs/ext4/crypto_fname.c
index ca2f5948c1ac..fded02f72299 100644
--- a/fs/ext4/crypto_fname.c
+++ b/fs/ext4/crypto_fname.c
@@ -66,6 +66,7 @@ static int ext4_fname_encrypt(struct ext4_fname_crypto_ctx *ctx,
66 int res = 0; 66 int res = 0;
67 char iv[EXT4_CRYPTO_BLOCK_SIZE]; 67 char iv[EXT4_CRYPTO_BLOCK_SIZE];
68 struct scatterlist sg[1]; 68 struct scatterlist sg[1];
69 int padding = 4 << (ctx->flags & EXT4_POLICY_FLAGS_PAD_MASK);
69 char *workbuf; 70 char *workbuf;
70 71
71 if (iname->len <= 0 || iname->len > ctx->lim) 72 if (iname->len <= 0 || iname->len > ctx->lim)
@@ -73,6 +74,7 @@ static int ext4_fname_encrypt(struct ext4_fname_crypto_ctx *ctx,
73 74
74 ciphertext_len = (iname->len < EXT4_CRYPTO_BLOCK_SIZE) ? 75 ciphertext_len = (iname->len < EXT4_CRYPTO_BLOCK_SIZE) ?
75 EXT4_CRYPTO_BLOCK_SIZE : iname->len; 76 EXT4_CRYPTO_BLOCK_SIZE : iname->len;
77 ciphertext_len = ext4_fname_crypto_round_up(ciphertext_len, padding);
76 ciphertext_len = (ciphertext_len > ctx->lim) 78 ciphertext_len = (ciphertext_len > ctx->lim)
77 ? ctx->lim : ciphertext_len; 79 ? ctx->lim : ciphertext_len;
78 80
@@ -101,7 +103,7 @@ static int ext4_fname_encrypt(struct ext4_fname_crypto_ctx *ctx,
101 /* Create encryption request */ 103 /* Create encryption request */
102 sg_init_table(sg, 1); 104 sg_init_table(sg, 1);
103 sg_set_page(sg, ctx->workpage, PAGE_SIZE, 0); 105 sg_set_page(sg, ctx->workpage, PAGE_SIZE, 0);
104 ablkcipher_request_set_crypt(req, sg, sg, iname->len, iv); 106 ablkcipher_request_set_crypt(req, sg, sg, ciphertext_len, iv);
105 res = crypto_ablkcipher_encrypt(req); 107 res = crypto_ablkcipher_encrypt(req);
106 if (res == -EINPROGRESS || res == -EBUSY) { 108 if (res == -EINPROGRESS || res == -EBUSY) {
107 BUG_ON(req->base.data != &ecr); 109 BUG_ON(req->base.data != &ecr);
@@ -198,106 +200,57 @@ static int ext4_fname_decrypt(struct ext4_fname_crypto_ctx *ctx,
198 return oname->len; 200 return oname->len;
199} 201}
200 202
203static const char *lookup_table =
204 "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+,";
205
201/** 206/**
202 * ext4_fname_encode_digest() - 207 * ext4_fname_encode_digest() -
203 * 208 *
204 * Encodes the input digest using characters from the set [a-zA-Z0-9_+]. 209 * Encodes the input digest using characters from the set [a-zA-Z0-9_+].
205 * The encoded string is roughly 4/3 times the size of the input string. 210 * The encoded string is roughly 4/3 times the size of the input string.
206 */ 211 */
207int ext4_fname_encode_digest(char *dst, char *src, u32 len) 212static int digest_encode(const char *src, int len, char *dst)
208{ 213{
209 static const char *lookup_table = 214 int i = 0, bits = 0, ac = 0;
210 "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_+"; 215 char *cp = dst;
211 u32 current_chunk, num_chunks, i; 216
212 char tmp_buf[3]; 217 while (i < len) {
213 u32 c0, c1, c2, c3; 218 ac += (((unsigned char) src[i]) << bits);
214 219 bits += 8;
215 current_chunk = 0; 220 do {
216 num_chunks = len/3; 221 *cp++ = lookup_table[ac & 0x3f];
217 for (i = 0; i < num_chunks; i++) { 222 ac >>= 6;
218 c0 = src[3*i] & 0x3f; 223 bits -= 6;
219 c1 = (((src[3*i]>>6)&0x3) | ((src[3*i+1] & 0xf)<<2)) & 0x3f; 224 } while (bits >= 6);
220 c2 = (((src[3*i+1]>>4)&0xf) | ((src[3*i+2] & 0x3)<<4)) & 0x3f;
221 c3 = (src[3*i+2]>>2) & 0x3f;
222 dst[4*i] = lookup_table[c0];
223 dst[4*i+1] = lookup_table[c1];
224 dst[4*i+2] = lookup_table[c2];
225 dst[4*i+3] = lookup_table[c3];
226 }
227 if (i*3 < len) {
228 memset(tmp_buf, 0, 3);
229 memcpy(tmp_buf, &src[3*i], len-3*i);
230 c0 = tmp_buf[0] & 0x3f;
231 c1 = (((tmp_buf[0]>>6)&0x3) | ((tmp_buf[1] & 0xf)<<2)) & 0x3f;
232 c2 = (((tmp_buf[1]>>4)&0xf) | ((tmp_buf[2] & 0x3)<<4)) & 0x3f;
233 c3 = (tmp_buf[2]>>2) & 0x3f;
234 dst[4*i] = lookup_table[c0];
235 dst[4*i+1] = lookup_table[c1];
236 dst[4*i+2] = lookup_table[c2];
237 dst[4*i+3] = lookup_table[c3];
238 i++; 225 i++;
239 } 226 }
240 return (i * 4); 227 if (bits)
228 *cp++ = lookup_table[ac & 0x3f];
229 return cp - dst;
241} 230}
242 231
243/** 232static int digest_decode(const char *src, int len, char *dst)
244 * ext4_fname_hash() -
245 *
246 * This function computes the hash of the input filename, and sets the output
247 * buffer to the *encoded* digest. It returns the length of the digest as its
248 * return value. Errors are returned as negative numbers. We trust the caller
249 * to allocate sufficient memory to oname string.
250 */
251static int ext4_fname_hash(struct ext4_fname_crypto_ctx *ctx,
252 const struct ext4_str *iname,
253 struct ext4_str *oname)
254{ 233{
255 struct scatterlist sg; 234 int i = 0, bits = 0, ac = 0;
256 struct hash_desc desc = { 235 const char *p;
257 .tfm = (struct crypto_hash *)ctx->htfm, 236 char *cp = dst;
258 .flags = CRYPTO_TFM_REQ_MAY_SLEEP 237
259 }; 238 while (i < len) {
260 int res = 0; 239 p = strchr(lookup_table, src[i]);
261 240 if (p == NULL || src[i] == 0)
262 if (iname->len <= EXT4_FNAME_CRYPTO_DIGEST_SIZE) { 241 return -2;
263 res = ext4_fname_encode_digest(oname->name, iname->name, 242 ac += (p - lookup_table) << bits;
264 iname->len); 243 bits += 6;
265 oname->len = res; 244 if (bits >= 8) {
266 return res; 245 *cp++ = ac & 0xff;
267 } 246 ac >>= 8;
268 247 bits -= 8;
269 sg_init_one(&sg, iname->name, iname->len); 248 }
270 res = crypto_hash_init(&desc); 249 i++;
271 if (res) {
272 printk(KERN_ERR
273 "%s: Error initializing crypto hash; res = [%d]\n",
274 __func__, res);
275 goto out;
276 }
277 res = crypto_hash_update(&desc, &sg, iname->len);
278 if (res) {
279 printk(KERN_ERR
280 "%s: Error updating crypto hash; res = [%d]\n",
281 __func__, res);
282 goto out;
283 }
284 res = crypto_hash_final(&desc,
285 &oname->name[EXT4_FNAME_CRYPTO_DIGEST_SIZE]);
286 if (res) {
287 printk(KERN_ERR
288 "%s: Error finalizing crypto hash; res = [%d]\n",
289 __func__, res);
290 goto out;
291 } 250 }
292 /* Encode the digest as a printable string--this will increase the 251 if (ac)
293 * size of the digest */ 252 return -1;
294 oname->name[0] = 'I'; 253 return cp - dst;
295 res = ext4_fname_encode_digest(oname->name+1,
296 &oname->name[EXT4_FNAME_CRYPTO_DIGEST_SIZE],
297 EXT4_FNAME_CRYPTO_DIGEST_SIZE) + 1;
298 oname->len = res;
299out:
300 return res;
301} 254}
302 255
303/** 256/**
@@ -405,6 +358,7 @@ struct ext4_fname_crypto_ctx *ext4_get_fname_crypto_ctx(
405 if (IS_ERR(ctx)) 358 if (IS_ERR(ctx))
406 return ctx; 359 return ctx;
407 360
361 ctx->flags = ei->i_crypt_policy_flags;
408 if (ctx->has_valid_key) { 362 if (ctx->has_valid_key) {
409 if (ctx->key.mode != EXT4_ENCRYPTION_MODE_AES_256_CTS) { 363 if (ctx->key.mode != EXT4_ENCRYPTION_MODE_AES_256_CTS) {
410 printk_once(KERN_WARNING 364 printk_once(KERN_WARNING
@@ -517,6 +471,7 @@ int ext4_fname_crypto_namelen_on_disk(struct ext4_fname_crypto_ctx *ctx,
517 u32 namelen) 471 u32 namelen)
518{ 472{
519 u32 ciphertext_len; 473 u32 ciphertext_len;
474 int padding = 4 << (ctx->flags & EXT4_POLICY_FLAGS_PAD_MASK);
520 475
521 if (ctx == NULL) 476 if (ctx == NULL)
522 return -EIO; 477 return -EIO;
@@ -524,6 +479,7 @@ int ext4_fname_crypto_namelen_on_disk(struct ext4_fname_crypto_ctx *ctx,
524 return -EACCES; 479 return -EACCES;
525 ciphertext_len = (namelen < EXT4_CRYPTO_BLOCK_SIZE) ? 480 ciphertext_len = (namelen < EXT4_CRYPTO_BLOCK_SIZE) ?
526 EXT4_CRYPTO_BLOCK_SIZE : namelen; 481 EXT4_CRYPTO_BLOCK_SIZE : namelen;
482 ciphertext_len = ext4_fname_crypto_round_up(ciphertext_len, padding);
527 ciphertext_len = (ciphertext_len > ctx->lim) 483 ciphertext_len = (ciphertext_len > ctx->lim)
528 ? ctx->lim : ciphertext_len; 484 ? ctx->lim : ciphertext_len;
529 return (int) ciphertext_len; 485 return (int) ciphertext_len;
@@ -539,10 +495,13 @@ int ext4_fname_crypto_alloc_buffer(struct ext4_fname_crypto_ctx *ctx,
539 u32 ilen, struct ext4_str *crypto_str) 495 u32 ilen, struct ext4_str *crypto_str)
540{ 496{
541 unsigned int olen; 497 unsigned int olen;
498 int padding = 4 << (ctx->flags & EXT4_POLICY_FLAGS_PAD_MASK);
542 499
543 if (!ctx) 500 if (!ctx)
544 return -EIO; 501 return -EIO;
545 olen = ext4_fname_crypto_round_up(ilen, EXT4_CRYPTO_BLOCK_SIZE); 502 if (padding < EXT4_CRYPTO_BLOCK_SIZE)
503 padding = EXT4_CRYPTO_BLOCK_SIZE;
504 olen = ext4_fname_crypto_round_up(ilen, padding);
546 crypto_str->len = olen; 505 crypto_str->len = olen;
547 if (olen < EXT4_FNAME_CRYPTO_DIGEST_SIZE*2) 506 if (olen < EXT4_FNAME_CRYPTO_DIGEST_SIZE*2)
548 olen = EXT4_FNAME_CRYPTO_DIGEST_SIZE*2; 507 olen = EXT4_FNAME_CRYPTO_DIGEST_SIZE*2;
@@ -571,9 +530,13 @@ void ext4_fname_crypto_free_buffer(struct ext4_str *crypto_str)
571 * ext4_fname_disk_to_usr() - converts a filename from disk space to user space 530 * ext4_fname_disk_to_usr() - converts a filename from disk space to user space
572 */ 531 */
573int _ext4_fname_disk_to_usr(struct ext4_fname_crypto_ctx *ctx, 532int _ext4_fname_disk_to_usr(struct ext4_fname_crypto_ctx *ctx,
574 const struct ext4_str *iname, 533 struct dx_hash_info *hinfo,
575 struct ext4_str *oname) 534 const struct ext4_str *iname,
535 struct ext4_str *oname)
576{ 536{
537 char buf[24];
538 int ret;
539
577 if (ctx == NULL) 540 if (ctx == NULL)
578 return -EIO; 541 return -EIO;
579 if (iname->len < 3) { 542 if (iname->len < 3) {
@@ -587,18 +550,33 @@ int _ext4_fname_disk_to_usr(struct ext4_fname_crypto_ctx *ctx,
587 } 550 }
588 if (ctx->has_valid_key) 551 if (ctx->has_valid_key)
589 return ext4_fname_decrypt(ctx, iname, oname); 552 return ext4_fname_decrypt(ctx, iname, oname);
590 else 553
591 return ext4_fname_hash(ctx, iname, oname); 554 if (iname->len <= EXT4_FNAME_CRYPTO_DIGEST_SIZE) {
555 ret = digest_encode(iname->name, iname->len, oname->name);
556 oname->len = ret;
557 return ret;
558 }
559 if (hinfo) {
560 memcpy(buf, &hinfo->hash, 4);
561 memcpy(buf+4, &hinfo->minor_hash, 4);
562 } else
563 memset(buf, 0, 8);
564 memcpy(buf + 8, iname->name + iname->len - 16, 16);
565 oname->name[0] = '_';
566 ret = digest_encode(buf, 24, oname->name+1);
567 oname->len = ret + 1;
568 return ret + 1;
592} 569}
593 570
594int ext4_fname_disk_to_usr(struct ext4_fname_crypto_ctx *ctx, 571int ext4_fname_disk_to_usr(struct ext4_fname_crypto_ctx *ctx,
572 struct dx_hash_info *hinfo,
595 const struct ext4_dir_entry_2 *de, 573 const struct ext4_dir_entry_2 *de,
596 struct ext4_str *oname) 574 struct ext4_str *oname)
597{ 575{
598 struct ext4_str iname = {.name = (unsigned char *) de->name, 576 struct ext4_str iname = {.name = (unsigned char *) de->name,
599 .len = de->name_len }; 577 .len = de->name_len };
600 578
601 return _ext4_fname_disk_to_usr(ctx, &iname, oname); 579 return _ext4_fname_disk_to_usr(ctx, hinfo, &iname, oname);
602} 580}
603 581
604 582
@@ -640,10 +618,11 @@ int ext4_fname_usr_to_hash(struct ext4_fname_crypto_ctx *ctx,
640 const struct qstr *iname, 618 const struct qstr *iname,
641 struct dx_hash_info *hinfo) 619 struct dx_hash_info *hinfo)
642{ 620{
643 struct ext4_str tmp, tmp2; 621 struct ext4_str tmp;
644 int ret = 0; 622 int ret = 0;
623 char buf[EXT4_FNAME_CRYPTO_DIGEST_SIZE+1];
645 624
646 if (!ctx || !ctx->has_valid_key || 625 if (!ctx ||
647 ((iname->name[0] == '.') && 626 ((iname->name[0] == '.') &&
648 ((iname->len == 1) || 627 ((iname->len == 1) ||
649 ((iname->name[1] == '.') && (iname->len == 2))))) { 628 ((iname->name[1] == '.') && (iname->len == 2))))) {
@@ -651,59 +630,90 @@ int ext4_fname_usr_to_hash(struct ext4_fname_crypto_ctx *ctx,
651 return 0; 630 return 0;
652 } 631 }
653 632
633 if (!ctx->has_valid_key && iname->name[0] == '_') {
634 if (iname->len != 33)
635 return -ENOENT;
636 ret = digest_decode(iname->name+1, iname->len, buf);
637 if (ret != 24)
638 return -ENOENT;
639 memcpy(&hinfo->hash, buf, 4);
640 memcpy(&hinfo->minor_hash, buf + 4, 4);
641 return 0;
642 }
643
644 if (!ctx->has_valid_key && iname->name[0] != '_') {
645 if (iname->len > 43)
646 return -ENOENT;
647 ret = digest_decode(iname->name, iname->len, buf);
648 ext4fs_dirhash(buf, ret, hinfo);
649 return 0;
650 }
651
654 /* First encrypt the plaintext name */ 652 /* First encrypt the plaintext name */
655 ret = ext4_fname_crypto_alloc_buffer(ctx, iname->len, &tmp); 653 ret = ext4_fname_crypto_alloc_buffer(ctx, iname->len, &tmp);
656 if (ret < 0) 654 if (ret < 0)
657 return ret; 655 return ret;
658 656
659 ret = ext4_fname_encrypt(ctx, iname, &tmp); 657 ret = ext4_fname_encrypt(ctx, iname, &tmp);
660 if (ret < 0) 658 if (ret >= 0) {
661 goto out; 659 ext4fs_dirhash(tmp.name, tmp.len, hinfo);
662 660 ret = 0;
663 tmp2.len = (4 * ((EXT4_FNAME_CRYPTO_DIGEST_SIZE + 2) / 3)) + 1;
664 tmp2.name = kmalloc(tmp2.len + 1, GFP_KERNEL);
665 if (tmp2.name == NULL) {
666 ret = -ENOMEM;
667 goto out;
668 } 661 }
669 662
670 ret = ext4_fname_hash(ctx, &tmp, &tmp2);
671 if (ret > 0)
672 ext4fs_dirhash(tmp2.name, tmp2.len, hinfo);
673 ext4_fname_crypto_free_buffer(&tmp2);
674out:
675 ext4_fname_crypto_free_buffer(&tmp); 663 ext4_fname_crypto_free_buffer(&tmp);
676 return ret; 664 return ret;
677} 665}
678 666
679/** 667int ext4_fname_match(struct ext4_fname_crypto_ctx *ctx, struct ext4_str *cstr,
680 * ext4_fname_disk_to_htree() - converts a filename from disk space to htree-access string 668 int len, const char * const name,
681 */ 669 struct ext4_dir_entry_2 *de)
682int ext4_fname_disk_to_hash(struct ext4_fname_crypto_ctx *ctx,
683 const struct ext4_dir_entry_2 *de,
684 struct dx_hash_info *hinfo)
685{ 670{
686 struct ext4_str iname = {.name = (unsigned char *) de->name, 671 int ret = -ENOENT;
687 .len = de->name_len}; 672 int bigname = (*name == '_');
688 struct ext4_str tmp;
689 int ret;
690 673
691 if (!ctx || 674 if (ctx->has_valid_key) {
692 ((iname.name[0] == '.') && 675 if (cstr->name == NULL) {
693 ((iname.len == 1) || 676 struct qstr istr;
694 ((iname.name[1] == '.') && (iname.len == 2))))) { 677
695 ext4fs_dirhash(iname.name, iname.len, hinfo); 678 ret = ext4_fname_crypto_alloc_buffer(ctx, len, cstr);
696 return 0; 679 if (ret < 0)
680 goto errout;
681 istr.name = name;
682 istr.len = len;
683 ret = ext4_fname_encrypt(ctx, &istr, cstr);
684 if (ret < 0)
685 goto errout;
686 }
687 } else {
688 if (cstr->name == NULL) {
689 cstr->name = kmalloc(32, GFP_KERNEL);
690 if (cstr->name == NULL)
691 return -ENOMEM;
692 if ((bigname && (len != 33)) ||
693 (!bigname && (len > 43)))
694 goto errout;
695 ret = digest_decode(name+bigname, len-bigname,
696 cstr->name);
697 if (ret < 0) {
698 ret = -ENOENT;
699 goto errout;
700 }
701 cstr->len = ret;
702 }
703 if (bigname) {
704 if (de->name_len < 16)
705 return 0;
706 ret = memcmp(de->name + de->name_len - 16,
707 cstr->name + 8, 16);
708 return (ret == 0) ? 1 : 0;
709 }
697 } 710 }
698 711 if (de->name_len != cstr->len)
699 tmp.len = (4 * ((EXT4_FNAME_CRYPTO_DIGEST_SIZE + 2) / 3)) + 1; 712 return 0;
700 tmp.name = kmalloc(tmp.len + 1, GFP_KERNEL); 713 ret = memcmp(de->name, cstr->name, cstr->len);
701 if (tmp.name == NULL) 714 return (ret == 0) ? 1 : 0;
702 return -ENOMEM; 715errout:
703 716 kfree(cstr->name);
704 ret = ext4_fname_hash(ctx, &iname, &tmp); 717 cstr->name = NULL;
705 if (ret > 0)
706 ext4fs_dirhash(tmp.name, tmp.len, hinfo);
707 ext4_fname_crypto_free_buffer(&tmp);
708 return ret; 718 return ret;
709} 719}
diff --git a/fs/ext4/crypto_key.c b/fs/ext4/crypto_key.c
index c8392af8abbb..52170d0b7c40 100644
--- a/fs/ext4/crypto_key.c
+++ b/fs/ext4/crypto_key.c
@@ -110,6 +110,7 @@ int ext4_generate_encryption_key(struct inode *inode)
110 } 110 }
111 res = 0; 111 res = 0;
112 112
113 ei->i_crypt_policy_flags = ctx.flags;
113 if (S_ISREG(inode->i_mode)) 114 if (S_ISREG(inode->i_mode))
114 crypt_key->mode = ctx.contents_encryption_mode; 115 crypt_key->mode = ctx.contents_encryption_mode;
115 else if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) 116 else if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
diff --git a/fs/ext4/crypto_policy.c b/fs/ext4/crypto_policy.c
index 30eaf9e9864a..a6d6291aea16 100644
--- a/fs/ext4/crypto_policy.c
+++ b/fs/ext4/crypto_policy.c
@@ -37,6 +37,8 @@ static int ext4_is_encryption_context_consistent_with_policy(
37 return 0; 37 return 0;
38 return (memcmp(ctx.master_key_descriptor, policy->master_key_descriptor, 38 return (memcmp(ctx.master_key_descriptor, policy->master_key_descriptor,
39 EXT4_KEY_DESCRIPTOR_SIZE) == 0 && 39 EXT4_KEY_DESCRIPTOR_SIZE) == 0 &&
40 (ctx.flags ==
41 policy->flags) &&
40 (ctx.contents_encryption_mode == 42 (ctx.contents_encryption_mode ==
41 policy->contents_encryption_mode) && 43 policy->contents_encryption_mode) &&
42 (ctx.filenames_encryption_mode == 44 (ctx.filenames_encryption_mode ==
@@ -56,25 +58,25 @@ static int ext4_create_encryption_context_from_policy(
56 printk(KERN_WARNING 58 printk(KERN_WARNING
57 "%s: Invalid contents encryption mode %d\n", __func__, 59 "%s: Invalid contents encryption mode %d\n", __func__,
58 policy->contents_encryption_mode); 60 policy->contents_encryption_mode);
59 res = -EINVAL; 61 return -EINVAL;
60 goto out;
61 } 62 }
62 if (!ext4_valid_filenames_enc_mode(policy->filenames_encryption_mode)) { 63 if (!ext4_valid_filenames_enc_mode(policy->filenames_encryption_mode)) {
63 printk(KERN_WARNING 64 printk(KERN_WARNING
64 "%s: Invalid filenames encryption mode %d\n", __func__, 65 "%s: Invalid filenames encryption mode %d\n", __func__,
65 policy->filenames_encryption_mode); 66 policy->filenames_encryption_mode);
66 res = -EINVAL; 67 return -EINVAL;
67 goto out;
68 } 68 }
69 if (policy->flags & ~EXT4_POLICY_FLAGS_VALID)
70 return -EINVAL;
69 ctx.contents_encryption_mode = policy->contents_encryption_mode; 71 ctx.contents_encryption_mode = policy->contents_encryption_mode;
70 ctx.filenames_encryption_mode = policy->filenames_encryption_mode; 72 ctx.filenames_encryption_mode = policy->filenames_encryption_mode;
73 ctx.flags = policy->flags;
71 BUILD_BUG_ON(sizeof(ctx.nonce) != EXT4_KEY_DERIVATION_NONCE_SIZE); 74 BUILD_BUG_ON(sizeof(ctx.nonce) != EXT4_KEY_DERIVATION_NONCE_SIZE);
72 get_random_bytes(ctx.nonce, EXT4_KEY_DERIVATION_NONCE_SIZE); 75 get_random_bytes(ctx.nonce, EXT4_KEY_DERIVATION_NONCE_SIZE);
73 76
74 res = ext4_xattr_set(inode, EXT4_XATTR_INDEX_ENCRYPTION, 77 res = ext4_xattr_set(inode, EXT4_XATTR_INDEX_ENCRYPTION,
75 EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, &ctx, 78 EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, &ctx,
76 sizeof(ctx), 0); 79 sizeof(ctx), 0);
77out:
78 if (!res) 80 if (!res)
79 ext4_set_inode_flag(inode, EXT4_INODE_ENCRYPT); 81 ext4_set_inode_flag(inode, EXT4_INODE_ENCRYPT);
80 return res; 82 return res;
@@ -115,6 +117,7 @@ int ext4_get_policy(struct inode *inode, struct ext4_encryption_policy *policy)
115 policy->version = 0; 117 policy->version = 0;
116 policy->contents_encryption_mode = ctx.contents_encryption_mode; 118 policy->contents_encryption_mode = ctx.contents_encryption_mode;
117 policy->filenames_encryption_mode = ctx.filenames_encryption_mode; 119 policy->filenames_encryption_mode = ctx.filenames_encryption_mode;
120 policy->flags = ctx.flags;
118 memcpy(&policy->master_key_descriptor, ctx.master_key_descriptor, 121 memcpy(&policy->master_key_descriptor, ctx.master_key_descriptor,
119 EXT4_KEY_DESCRIPTOR_SIZE); 122 EXT4_KEY_DESCRIPTOR_SIZE);
120 return 0; 123 return 0;
@@ -176,6 +179,7 @@ int ext4_inherit_context(struct inode *parent, struct inode *child)
176 EXT4_ENCRYPTION_MODE_AES_256_XTS; 179 EXT4_ENCRYPTION_MODE_AES_256_XTS;
177 ctx.filenames_encryption_mode = 180 ctx.filenames_encryption_mode =
178 EXT4_ENCRYPTION_MODE_AES_256_CTS; 181 EXT4_ENCRYPTION_MODE_AES_256_CTS;
182 ctx.flags = 0;
179 memset(ctx.master_key_descriptor, 0x42, 183 memset(ctx.master_key_descriptor, 0x42,
180 EXT4_KEY_DESCRIPTOR_SIZE); 184 EXT4_KEY_DESCRIPTOR_SIZE);
181 res = 0; 185 res = 0;
diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c
index 61db51a5ce4c..5665d82d2332 100644
--- a/fs/ext4/dir.c
+++ b/fs/ext4/dir.c
@@ -249,7 +249,7 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx)
249 } else { 249 } else {
250 /* Directory is encrypted */ 250 /* Directory is encrypted */
251 err = ext4_fname_disk_to_usr(enc_ctx, 251 err = ext4_fname_disk_to_usr(enc_ctx,
252 de, &fname_crypto_str); 252 NULL, de, &fname_crypto_str);
253 if (err < 0) 253 if (err < 0)
254 goto errout; 254 goto errout;
255 if (!dir_emit(ctx, 255 if (!dir_emit(ctx,
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index ef267adce19a..009a0590b20f 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -911,6 +911,7 @@ struct ext4_inode_info {
911 911
912 /* on-disk additional length */ 912 /* on-disk additional length */
913 __u16 i_extra_isize; 913 __u16 i_extra_isize;
914 char i_crypt_policy_flags;
914 915
915 /* Indicate the inline data space. */ 916 /* Indicate the inline data space. */
916 u16 i_inline_off; 917 u16 i_inline_off;
@@ -1066,12 +1067,6 @@ extern void ext4_set_bits(void *bm, int cur, int len);
1066/* Metadata checksum algorithm codes */ 1067/* Metadata checksum algorithm codes */
1067#define EXT4_CRC32C_CHKSUM 1 1068#define EXT4_CRC32C_CHKSUM 1
1068 1069
1069/* Encryption algorithms */
1070#define EXT4_ENCRYPTION_MODE_INVALID 0
1071#define EXT4_ENCRYPTION_MODE_AES_256_XTS 1
1072#define EXT4_ENCRYPTION_MODE_AES_256_GCM 2
1073#define EXT4_ENCRYPTION_MODE_AES_256_CBC 3
1074
1075/* 1070/*
1076 * Structure of the super block 1071 * Structure of the super block
1077 */ 1072 */
@@ -2093,9 +2088,11 @@ u32 ext4_fname_crypto_round_up(u32 size, u32 blksize);
2093int ext4_fname_crypto_alloc_buffer(struct ext4_fname_crypto_ctx *ctx, 2088int ext4_fname_crypto_alloc_buffer(struct ext4_fname_crypto_ctx *ctx,
2094 u32 ilen, struct ext4_str *crypto_str); 2089 u32 ilen, struct ext4_str *crypto_str);
2095int _ext4_fname_disk_to_usr(struct ext4_fname_crypto_ctx *ctx, 2090int _ext4_fname_disk_to_usr(struct ext4_fname_crypto_ctx *ctx,
2091 struct dx_hash_info *hinfo,
2096 const struct ext4_str *iname, 2092 const struct ext4_str *iname,
2097 struct ext4_str *oname); 2093 struct ext4_str *oname);
2098int ext4_fname_disk_to_usr(struct ext4_fname_crypto_ctx *ctx, 2094int ext4_fname_disk_to_usr(struct ext4_fname_crypto_ctx *ctx,
2095 struct dx_hash_info *hinfo,
2099 const struct ext4_dir_entry_2 *de, 2096 const struct ext4_dir_entry_2 *de,
2100 struct ext4_str *oname); 2097 struct ext4_str *oname);
2101int ext4_fname_usr_to_disk(struct ext4_fname_crypto_ctx *ctx, 2098int ext4_fname_usr_to_disk(struct ext4_fname_crypto_ctx *ctx,
@@ -2104,11 +2101,12 @@ int ext4_fname_usr_to_disk(struct ext4_fname_crypto_ctx *ctx,
2104int ext4_fname_usr_to_hash(struct ext4_fname_crypto_ctx *ctx, 2101int ext4_fname_usr_to_hash(struct ext4_fname_crypto_ctx *ctx,
2105 const struct qstr *iname, 2102 const struct qstr *iname,
2106 struct dx_hash_info *hinfo); 2103 struct dx_hash_info *hinfo);
2107int ext4_fname_disk_to_hash(struct ext4_fname_crypto_ctx *ctx,
2108 const struct ext4_dir_entry_2 *de,
2109 struct dx_hash_info *hinfo);
2110int ext4_fname_crypto_namelen_on_disk(struct ext4_fname_crypto_ctx *ctx, 2104int ext4_fname_crypto_namelen_on_disk(struct ext4_fname_crypto_ctx *ctx,
2111 u32 namelen); 2105 u32 namelen);
2106int ext4_fname_match(struct ext4_fname_crypto_ctx *ctx, struct ext4_str *cstr,
2107 int len, const char * const name,
2108 struct ext4_dir_entry_2 *de);
2109
2112 2110
2113#ifdef CONFIG_EXT4_FS_ENCRYPTION 2111#ifdef CONFIG_EXT4_FS_ENCRYPTION
2114void ext4_put_fname_crypto_ctx(struct ext4_fname_crypto_ctx **ctx); 2112void ext4_put_fname_crypto_ctx(struct ext4_fname_crypto_ctx **ctx);
diff --git a/fs/ext4/ext4_crypto.h b/fs/ext4/ext4_crypto.h
index c2ba35a914b6..d75159c101ce 100644
--- a/fs/ext4/ext4_crypto.h
+++ b/fs/ext4/ext4_crypto.h
@@ -20,12 +20,20 @@ struct ext4_encryption_policy {
20 char version; 20 char version;
21 char contents_encryption_mode; 21 char contents_encryption_mode;
22 char filenames_encryption_mode; 22 char filenames_encryption_mode;
23 char flags;
23 char master_key_descriptor[EXT4_KEY_DESCRIPTOR_SIZE]; 24 char master_key_descriptor[EXT4_KEY_DESCRIPTOR_SIZE];
24} __attribute__((__packed__)); 25} __attribute__((__packed__));
25 26
26#define EXT4_ENCRYPTION_CONTEXT_FORMAT_V1 1 27#define EXT4_ENCRYPTION_CONTEXT_FORMAT_V1 1
27#define EXT4_KEY_DERIVATION_NONCE_SIZE 16 28#define EXT4_KEY_DERIVATION_NONCE_SIZE 16
28 29
30#define EXT4_POLICY_FLAGS_PAD_4 0x00
31#define EXT4_POLICY_FLAGS_PAD_8 0x01
32#define EXT4_POLICY_FLAGS_PAD_16 0x02
33#define EXT4_POLICY_FLAGS_PAD_32 0x03
34#define EXT4_POLICY_FLAGS_PAD_MASK 0x03
35#define EXT4_POLICY_FLAGS_VALID 0x03
36
29/** 37/**
30 * Encryption context for inode 38 * Encryption context for inode
31 * 39 *
@@ -41,7 +49,7 @@ struct ext4_encryption_context {
41 char format; 49 char format;
42 char contents_encryption_mode; 50 char contents_encryption_mode;
43 char filenames_encryption_mode; 51 char filenames_encryption_mode;
44 char reserved; 52 char flags;
45 char master_key_descriptor[EXT4_KEY_DESCRIPTOR_SIZE]; 53 char master_key_descriptor[EXT4_KEY_DESCRIPTOR_SIZE];
46 char nonce[EXT4_KEY_DERIVATION_NONCE_SIZE]; 54 char nonce[EXT4_KEY_DERIVATION_NONCE_SIZE];
47} __attribute__((__packed__)); 55} __attribute__((__packed__));
@@ -120,6 +128,7 @@ struct ext4_fname_crypto_ctx {
120 struct crypto_hash *htfm; 128 struct crypto_hash *htfm;
121 struct page *workpage; 129 struct page *workpage;
122 struct ext4_encryption_key key; 130 struct ext4_encryption_key key;
131 unsigned flags : 8;
123 unsigned has_valid_key : 1; 132 unsigned has_valid_key : 1;
124 unsigned ctfm_key_is_ready : 1; 133 unsigned ctfm_key_is_ready : 1;
125}; 134};
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 973816bfe4a9..d74e08029643 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -4927,13 +4927,6 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
4927 if (ret) 4927 if (ret)
4928 return ret; 4928 return ret;
4929 4929
4930 /*
4931 * currently supporting (pre)allocate mode for extent-based
4932 * files _only_
4933 */
4934 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
4935 return -EOPNOTSUPP;
4936
4937 if (mode & FALLOC_FL_COLLAPSE_RANGE) 4930 if (mode & FALLOC_FL_COLLAPSE_RANGE)
4938 return ext4_collapse_range(inode, offset, len); 4931 return ext4_collapse_range(inode, offset, len);
4939 4932
@@ -4955,6 +4948,14 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
4955 4948
4956 mutex_lock(&inode->i_mutex); 4949 mutex_lock(&inode->i_mutex);
4957 4950
4951 /*
4952 * We only support preallocation for extent-based files only
4953 */
4954 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
4955 ret = -EOPNOTSUPP;
4956 goto out;
4957 }
4958
4958 if (!(mode & FALLOC_FL_KEEP_SIZE) && 4959 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
4959 offset + len > i_size_read(inode)) { 4960 offset + len > i_size_read(inode)) {
4960 new_size = offset + len; 4961 new_size = offset + len;
diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c
index d33d5a6852b9..26724aeece73 100644
--- a/fs/ext4/extents_status.c
+++ b/fs/ext4/extents_status.c
@@ -703,6 +703,14 @@ int ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk,
703 703
704 BUG_ON(end < lblk); 704 BUG_ON(end < lblk);
705 705
706 if ((status & EXTENT_STATUS_DELAYED) &&
707 (status & EXTENT_STATUS_WRITTEN)) {
708 ext4_warning(inode->i_sb, "Inserting extent [%u/%u] as "
709 " delayed and written which can potentially "
710 " cause data loss.\n", lblk, len);
711 WARN_ON(1);
712 }
713
706 newes.es_lblk = lblk; 714 newes.es_lblk = lblk;
707 newes.es_len = len; 715 newes.es_len = len;
708 ext4_es_store_pblock_status(&newes, pblk, status); 716 ext4_es_store_pblock_status(&newes, pblk, status);
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index cbd0654a2675..55b187c3bac1 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -531,6 +531,7 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode,
531 status = map->m_flags & EXT4_MAP_UNWRITTEN ? 531 status = map->m_flags & EXT4_MAP_UNWRITTEN ?
532 EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN; 532 EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
533 if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) && 533 if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) &&
534 !(status & EXTENT_STATUS_WRITTEN) &&
534 ext4_find_delalloc_range(inode, map->m_lblk, 535 ext4_find_delalloc_range(inode, map->m_lblk,
535 map->m_lblk + map->m_len - 1)) 536 map->m_lblk + map->m_len - 1))
536 status |= EXTENT_STATUS_DELAYED; 537 status |= EXTENT_STATUS_DELAYED;
@@ -635,6 +636,7 @@ found:
635 status = map->m_flags & EXT4_MAP_UNWRITTEN ? 636 status = map->m_flags & EXT4_MAP_UNWRITTEN ?
636 EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN; 637 EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
637 if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) && 638 if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) &&
639 !(status & EXTENT_STATUS_WRITTEN) &&
638 ext4_find_delalloc_range(inode, map->m_lblk, 640 ext4_find_delalloc_range(inode, map->m_lblk,
639 map->m_lblk + map->m_len - 1)) 641 map->m_lblk + map->m_len - 1))
640 status |= EXTENT_STATUS_DELAYED; 642 status |= EXTENT_STATUS_DELAYED;
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index 7223b0b4bc38..814f3beb4369 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -640,7 +640,7 @@ static struct stats dx_show_leaf(struct inode *dir,
640 ext4_put_fname_crypto_ctx(&ctx); 640 ext4_put_fname_crypto_ctx(&ctx);
641 ctx = NULL; 641 ctx = NULL;
642 } 642 }
643 res = ext4_fname_disk_to_usr(ctx, de, 643 res = ext4_fname_disk_to_usr(ctx, NULL, de,
644 &fname_crypto_str); 644 &fname_crypto_str);
645 if (res < 0) { 645 if (res < 0) {
646 printk(KERN_WARNING "Error " 646 printk(KERN_WARNING "Error "
@@ -653,15 +653,8 @@ static struct stats dx_show_leaf(struct inode *dir,
653 name = fname_crypto_str.name; 653 name = fname_crypto_str.name;
654 len = fname_crypto_str.len; 654 len = fname_crypto_str.len;
655 } 655 }
656 res = ext4_fname_disk_to_hash(ctx, de, 656 ext4fs_dirhash(de->name, de->name_len,
657 &h); 657 &h);
658 if (res < 0) {
659 printk(KERN_WARNING "Error "
660 "converting filename "
661 "from disk to htree"
662 "\n");
663 h.hash = 0xDEADBEEF;
664 }
665 printk("%*.s:(E)%x.%u ", len, name, 658 printk("%*.s:(E)%x.%u ", len, name,
666 h.hash, (unsigned) ((char *) de 659 h.hash, (unsigned) ((char *) de
667 - base)); 660 - base));
@@ -1008,15 +1001,7 @@ static int htree_dirblock_to_tree(struct file *dir_file,
1008 /* silently ignore the rest of the block */ 1001 /* silently ignore the rest of the block */
1009 break; 1002 break;
1010 } 1003 }
1011#ifdef CONFIG_EXT4_FS_ENCRYPTION
1012 err = ext4_fname_disk_to_hash(ctx, de, hinfo);
1013 if (err < 0) {
1014 count = err;
1015 goto errout;
1016 }
1017#else
1018 ext4fs_dirhash(de->name, de->name_len, hinfo); 1004 ext4fs_dirhash(de->name, de->name_len, hinfo);
1019#endif
1020 if ((hinfo->hash < start_hash) || 1005 if ((hinfo->hash < start_hash) ||
1021 ((hinfo->hash == start_hash) && 1006 ((hinfo->hash == start_hash) &&
1022 (hinfo->minor_hash < start_minor_hash))) 1007 (hinfo->minor_hash < start_minor_hash)))
@@ -1032,7 +1017,7 @@ static int htree_dirblock_to_tree(struct file *dir_file,
1032 &tmp_str); 1017 &tmp_str);
1033 } else { 1018 } else {
1034 /* Directory is encrypted */ 1019 /* Directory is encrypted */
1035 err = ext4_fname_disk_to_usr(ctx, de, 1020 err = ext4_fname_disk_to_usr(ctx, hinfo, de,
1036 &fname_crypto_str); 1021 &fname_crypto_str);
1037 if (err < 0) { 1022 if (err < 0) {
1038 count = err; 1023 count = err;
@@ -1193,26 +1178,10 @@ static int dx_make_map(struct inode *dir, struct ext4_dir_entry_2 *de,
1193 int count = 0; 1178 int count = 0;
1194 char *base = (char *) de; 1179 char *base = (char *) de;
1195 struct dx_hash_info h = *hinfo; 1180 struct dx_hash_info h = *hinfo;
1196#ifdef CONFIG_EXT4_FS_ENCRYPTION
1197 struct ext4_fname_crypto_ctx *ctx = NULL;
1198 int err;
1199
1200 ctx = ext4_get_fname_crypto_ctx(dir, EXT4_NAME_LEN);
1201 if (IS_ERR(ctx))
1202 return PTR_ERR(ctx);
1203#endif
1204 1181
1205 while ((char *) de < base + blocksize) { 1182 while ((char *) de < base + blocksize) {
1206 if (de->name_len && de->inode) { 1183 if (de->name_len && de->inode) {
1207#ifdef CONFIG_EXT4_FS_ENCRYPTION
1208 err = ext4_fname_disk_to_hash(ctx, de, &h);
1209 if (err < 0) {
1210 ext4_put_fname_crypto_ctx(&ctx);
1211 return err;
1212 }
1213#else
1214 ext4fs_dirhash(de->name, de->name_len, &h); 1184 ext4fs_dirhash(de->name, de->name_len, &h);
1215#endif
1216 map_tail--; 1185 map_tail--;
1217 map_tail->hash = h.hash; 1186 map_tail->hash = h.hash;
1218 map_tail->offs = ((char *) de - base)>>2; 1187 map_tail->offs = ((char *) de - base)>>2;
@@ -1223,9 +1192,6 @@ static int dx_make_map(struct inode *dir, struct ext4_dir_entry_2 *de,
1223 /* XXX: do we need to check rec_len == 0 case? -Chris */ 1192 /* XXX: do we need to check rec_len == 0 case? -Chris */
1224 de = ext4_next_entry(de, blocksize); 1193 de = ext4_next_entry(de, blocksize);
1225 } 1194 }
1226#ifdef CONFIG_EXT4_FS_ENCRYPTION
1227 ext4_put_fname_crypto_ctx(&ctx);
1228#endif
1229 return count; 1195 return count;
1230} 1196}
1231 1197
@@ -1287,16 +1253,8 @@ static inline int ext4_match(struct ext4_fname_crypto_ctx *ctx,
1287 return 0; 1253 return 0;
1288 1254
1289#ifdef CONFIG_EXT4_FS_ENCRYPTION 1255#ifdef CONFIG_EXT4_FS_ENCRYPTION
1290 if (ctx) { 1256 if (ctx)
1291 /* Directory is encrypted */ 1257 return ext4_fname_match(ctx, fname_crypto_str, len, name, de);
1292 res = ext4_fname_disk_to_usr(ctx, de, fname_crypto_str);
1293 if (res < 0)
1294 return res;
1295 if (len != res)
1296 return 0;
1297 res = memcmp(name, fname_crypto_str->name, len);
1298 return (res == 0) ? 1 : 0;
1299 }
1300#endif 1258#endif
1301 if (len != de->name_len) 1259 if (len != de->name_len)
1302 return 0; 1260 return 0;
@@ -1324,16 +1282,6 @@ int search_dir(struct buffer_head *bh, char *search_buf, int buf_size,
1324 if (IS_ERR(ctx)) 1282 if (IS_ERR(ctx))
1325 return -1; 1283 return -1;
1326 1284
1327 if (ctx != NULL) {
1328 /* Allocate buffer to hold maximum name length */
1329 res = ext4_fname_crypto_alloc_buffer(ctx, EXT4_NAME_LEN,
1330 &fname_crypto_str);
1331 if (res < 0) {
1332 ext4_put_fname_crypto_ctx(&ctx);
1333 return -1;
1334 }
1335 }
1336
1337 de = (struct ext4_dir_entry_2 *)search_buf; 1285 de = (struct ext4_dir_entry_2 *)search_buf;
1338 dlimit = search_buf + buf_size; 1286 dlimit = search_buf + buf_size;
1339 while ((char *) de < dlimit) { 1287 while ((char *) de < dlimit) {
@@ -1872,14 +1820,6 @@ int ext4_find_dest_de(struct inode *dir, struct inode *inode,
1872 return res; 1820 return res;
1873 } 1821 }
1874 reclen = EXT4_DIR_REC_LEN(res); 1822 reclen = EXT4_DIR_REC_LEN(res);
1875
1876 /* Allocate buffer to hold maximum name length */
1877 res = ext4_fname_crypto_alloc_buffer(ctx, EXT4_NAME_LEN,
1878 &fname_crypto_str);
1879 if (res < 0) {
1880 ext4_put_fname_crypto_ctx(&ctx);
1881 return -1;
1882 }
1883 } 1823 }
1884 1824
1885 de = (struct ext4_dir_entry_2 *)buf; 1825 de = (struct ext4_dir_entry_2 *)buf;
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
index 8a8ec6293b19..cf0c472047e3 100644
--- a/fs/ext4/resize.c
+++ b/fs/ext4/resize.c
@@ -1432,12 +1432,15 @@ static int ext4_flex_group_add(struct super_block *sb,
1432 goto exit; 1432 goto exit;
1433 /* 1433 /*
1434 * We will always be modifying at least the superblock and GDT 1434 * We will always be modifying at least the superblock and GDT
1435 * block. If we are adding a group past the last current GDT block, 1435 * blocks. If we are adding a group past the last current GDT block,
1436 * we will also modify the inode and the dindirect block. If we 1436 * we will also modify the inode and the dindirect block. If we
1437 * are adding a group with superblock/GDT backups we will also 1437 * are adding a group with superblock/GDT backups we will also
1438 * modify each of the reserved GDT dindirect blocks. 1438 * modify each of the reserved GDT dindirect blocks.
1439 */ 1439 */
1440 credit = flex_gd->count * 4 + reserved_gdb; 1440 credit = 3; /* sb, resize inode, resize inode dindirect */
1441 /* GDT blocks */
1442 credit += 1 + DIV_ROUND_UP(flex_gd->count, EXT4_DESC_PER_BLOCK(sb));
1443 credit += reserved_gdb; /* Reserved GDT dindirect blocks */
1441 handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, credit); 1444 handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, credit);
1442 if (IS_ERR(handle)) { 1445 if (IS_ERR(handle)) {
1443 err = PTR_ERR(handle); 1446 err = PTR_ERR(handle);
diff --git a/fs/ext4/symlink.c b/fs/ext4/symlink.c
index 19f78f20975e..187b78920314 100644
--- a/fs/ext4/symlink.c
+++ b/fs/ext4/symlink.c
@@ -74,7 +74,7 @@ static void *ext4_follow_link(struct dentry *dentry, struct nameidata *nd)
74 goto errout; 74 goto errout;
75 } 75 }
76 pstr.name = paddr; 76 pstr.name = paddr;
77 res = _ext4_fname_disk_to_usr(ctx, &cstr, &pstr); 77 res = _ext4_fname_disk_to_usr(ctx, NULL, &cstr, &pstr);
78 if (res < 0) 78 if (res < 0)
79 goto errout; 79 goto errout;
80 /* Null-terminate the name */ 80 /* Null-terminate the name */
diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
index f5ca0e989bba..1c3002e1db20 100644
--- a/include/acpi/actypes.h
+++ b/include/acpi/actypes.h
@@ -124,7 +124,6 @@
124#ifndef ACPI_USE_SYSTEM_INTTYPES 124#ifndef ACPI_USE_SYSTEM_INTTYPES
125 125
126typedef unsigned char u8; 126typedef unsigned char u8;
127typedef unsigned char u8;
128typedef unsigned short u16; 127typedef unsigned short u16;
129typedef short s16; 128typedef short s16;
130typedef COMPILER_DEPENDENT_UINT64 u64; 129typedef COMPILER_DEPENDENT_UINT64 u64;
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index e60a745ac198..e804306ef5e8 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -40,6 +40,10 @@
40#error KEXEC_CONTROL_MEMORY_LIMIT not defined 40#error KEXEC_CONTROL_MEMORY_LIMIT not defined
41#endif 41#endif
42 42
43#ifndef KEXEC_CONTROL_MEMORY_GFP
44#define KEXEC_CONTROL_MEMORY_GFP GFP_KERNEL
45#endif
46
43#ifndef KEXEC_CONTROL_PAGE_SIZE 47#ifndef KEXEC_CONTROL_PAGE_SIZE
44#error KEXEC_CONTROL_PAGE_SIZE not defined 48#error KEXEC_CONTROL_PAGE_SIZE not defined
45#endif 49#endif
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index bcbde799ec69..1899c74a7127 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -60,6 +60,7 @@ struct phy_device;
60struct wireless_dev; 60struct wireless_dev;
61/* 802.15.4 specific */ 61/* 802.15.4 specific */
62struct wpan_dev; 62struct wpan_dev;
63struct mpls_dev;
63 64
64void netdev_set_default_ethtool_ops(struct net_device *dev, 65void netdev_set_default_ethtool_ops(struct net_device *dev,
65 const struct ethtool_ops *ops); 66 const struct ethtool_ops *ops);
@@ -976,7 +977,8 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
976 * int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh, 977 * int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh,
977 * u16 flags) 978 * u16 flags)
978 * int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq, 979 * int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq,
979 * struct net_device *dev, u32 filter_mask) 980 * struct net_device *dev, u32 filter_mask,
981 * int nlflags)
980 * int (*ndo_bridge_dellink)(struct net_device *dev, struct nlmsghdr *nlh, 982 * int (*ndo_bridge_dellink)(struct net_device *dev, struct nlmsghdr *nlh,
981 * u16 flags); 983 * u16 flags);
982 * 984 *
@@ -1172,7 +1174,8 @@ struct net_device_ops {
1172 int (*ndo_bridge_getlink)(struct sk_buff *skb, 1174 int (*ndo_bridge_getlink)(struct sk_buff *skb,
1173 u32 pid, u32 seq, 1175 u32 pid, u32 seq,
1174 struct net_device *dev, 1176 struct net_device *dev,
1175 u32 filter_mask); 1177 u32 filter_mask,
1178 int nlflags);
1176 int (*ndo_bridge_dellink)(struct net_device *dev, 1179 int (*ndo_bridge_dellink)(struct net_device *dev,
1177 struct nlmsghdr *nlh, 1180 struct nlmsghdr *nlh,
1178 u16 flags); 1181 u16 flags);
@@ -1627,6 +1630,9 @@ struct net_device {
1627 void *ax25_ptr; 1630 void *ax25_ptr;
1628 struct wireless_dev *ieee80211_ptr; 1631 struct wireless_dev *ieee80211_ptr;
1629 struct wpan_dev *ieee802154_ptr; 1632 struct wpan_dev *ieee802154_ptr;
1633#if IS_ENABLED(CONFIG_MPLS_ROUTING)
1634 struct mpls_dev __rcu *mpls_ptr;
1635#endif
1630 1636
1631/* 1637/*
1632 * Cache lines mostly used on receive path (including eth_type_trans()) 1638 * Cache lines mostly used on receive path (including eth_type_trans())
@@ -2021,10 +2027,10 @@ struct pcpu_sw_netstats {
2021({ \ 2027({ \
2022 typeof(type) __percpu *pcpu_stats = alloc_percpu(type); \ 2028 typeof(type) __percpu *pcpu_stats = alloc_percpu(type); \
2023 if (pcpu_stats) { \ 2029 if (pcpu_stats) { \
2024 int i; \ 2030 int __cpu; \
2025 for_each_possible_cpu(i) { \ 2031 for_each_possible_cpu(__cpu) { \
2026 typeof(type) *stat; \ 2032 typeof(type) *stat; \
2027 stat = per_cpu_ptr(pcpu_stats, i); \ 2033 stat = per_cpu_ptr(pcpu_stats, __cpu); \
2028 u64_stats_init(&stat->syncp); \ 2034 u64_stats_init(&stat->syncp); \
2029 } \ 2035 } \
2030 } \ 2036 } \
diff --git a/include/linux/netfilter_bridge.h b/include/linux/netfilter_bridge.h
index ab8f76dba668..f2fdb5a52070 100644
--- a/include/linux/netfilter_bridge.h
+++ b/include/linux/netfilter_bridge.h
@@ -39,12 +39,24 @@ static inline void br_drop_fake_rtable(struct sk_buff *skb)
39 39
40static inline int nf_bridge_get_physinif(const struct sk_buff *skb) 40static inline int nf_bridge_get_physinif(const struct sk_buff *skb)
41{ 41{
42 return skb->nf_bridge ? skb->nf_bridge->physindev->ifindex : 0; 42 struct nf_bridge_info *nf_bridge;
43
44 if (skb->nf_bridge == NULL)
45 return 0;
46
47 nf_bridge = skb->nf_bridge;
48 return nf_bridge->physindev ? nf_bridge->physindev->ifindex : 0;
43} 49}
44 50
45static inline int nf_bridge_get_physoutif(const struct sk_buff *skb) 51static inline int nf_bridge_get_physoutif(const struct sk_buff *skb)
46{ 52{
47 return skb->nf_bridge ? skb->nf_bridge->physoutdev->ifindex : 0; 53 struct nf_bridge_info *nf_bridge;
54
55 if (skb->nf_bridge == NULL)
56 return 0;
57
58 nf_bridge = skb->nf_bridge;
59 return nf_bridge->physoutdev ? nf_bridge->physoutdev->ifindex : 0;
48} 60}
49 61
50static inline struct net_device * 62static inline struct net_device *
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index e23d242d1230..dbcbcc59aa92 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -282,7 +282,8 @@ static inline bool rht_shrink_below_30(const struct rhashtable *ht,
282static inline bool rht_grow_above_100(const struct rhashtable *ht, 282static inline bool rht_grow_above_100(const struct rhashtable *ht,
283 const struct bucket_table *tbl) 283 const struct bucket_table *tbl)
284{ 284{
285 return atomic_read(&ht->nelems) > tbl->size; 285 return atomic_read(&ht->nelems) > tbl->size &&
286 (!ht->p.max_size || tbl->size < ht->p.max_size);
286} 287}
287 288
288/* The bucket lock is selected based on the hash and protects mutations 289/* The bucket lock is selected based on the hash and protects mutations
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index 2da5d1081ad9..7b8e260c4a27 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -122,5 +122,5 @@ extern int ndo_dflt_fdb_del(struct ndmsg *ndm,
122 122
123extern int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, 123extern int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
124 struct net_device *dev, u16 mode, 124 struct net_device *dev, u16 mode,
125 u32 flags, u32 mask); 125 u32 flags, u32 mask, int nlflags);
126#endif /* __LINUX_RTNETLINK_H */ 126#endif /* __LINUX_RTNETLINK_H */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 8222ae40ecb0..26a2e6122734 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -175,14 +175,6 @@ extern void get_iowait_load(unsigned long *nr_waiters, unsigned long *load);
175extern void calc_global_load(unsigned long ticks); 175extern void calc_global_load(unsigned long ticks);
176extern void update_cpu_load_nohz(void); 176extern void update_cpu_load_nohz(void);
177 177
178/* Notifier for when a task gets migrated to a new CPU */
179struct task_migration_notifier {
180 struct task_struct *task;
181 int from_cpu;
182 int to_cpu;
183};
184extern void register_task_migration_notifier(struct notifier_block *n);
185
186extern unsigned long get_parent_ip(unsigned long addr); 178extern unsigned long get_parent_ip(unsigned long addr);
187 179
188extern void dump_cpu_task(int cpu); 180extern void dump_cpu_task(int cpu);
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 06793b598f44..66e374d62f64 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -773,6 +773,7 @@ bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
773 773
774struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags, 774struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags,
775 int node); 775 int node);
776struct sk_buff *__build_skb(void *data, unsigned int frag_size);
776struct sk_buff *build_skb(void *data, unsigned int frag_size); 777struct sk_buff *build_skb(void *data, unsigned int frag_size);
777static inline struct sk_buff *alloc_skb(unsigned int size, 778static inline struct sk_buff *alloc_skb(unsigned int size,
778 gfp_t priority) 779 gfp_t priority)
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 358a337af598..fe5623c9af71 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -491,6 +491,7 @@ static inline speed_t tty_get_baud_rate(struct tty_struct *tty)
491 491
492extern void tty_termios_copy_hw(struct ktermios *new, struct ktermios *old); 492extern void tty_termios_copy_hw(struct ktermios *new, struct ktermios *old);
493extern int tty_termios_hw_change(struct ktermios *a, struct ktermios *b); 493extern int tty_termios_hw_change(struct ktermios *a, struct ktermios *b);
494extern int tty_set_termios(struct tty_struct *tty, struct ktermios *kt);
494 495
495extern struct tty_ldisc *tty_ldisc_ref(struct tty_struct *); 496extern struct tty_ldisc *tty_ldisc_ref(struct tty_struct *);
496extern void tty_ldisc_deref(struct tty_ldisc *); 497extern void tty_ldisc_deref(struct tty_ldisc *);
diff --git a/include/net/bonding.h b/include/net/bonding.h
index fda6feeb6c1f..78ed135e9dea 100644
--- a/include/net/bonding.h
+++ b/include/net/bonding.h
@@ -30,13 +30,6 @@
30#include <net/bond_alb.h> 30#include <net/bond_alb.h>
31#include <net/bond_options.h> 31#include <net/bond_options.h>
32 32
33#define DRV_VERSION "3.7.1"
34#define DRV_RELDATE "April 27, 2011"
35#define DRV_NAME "bonding"
36#define DRV_DESCRIPTION "Ethernet Channel Bonding Driver"
37
38#define bond_version DRV_DESCRIPTION ": v" DRV_VERSION " (" DRV_RELDATE ")\n"
39
40#define BOND_MAX_ARP_TARGETS 16 33#define BOND_MAX_ARP_TARGETS 16
41 34
42#define BOND_DEFAULT_MIIMON 100 35#define BOND_DEFAULT_MIIMON 100
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index 7b5887cd1172..48a815823587 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -279,12 +279,6 @@ static inline void inet_csk_reqsk_queue_add(struct sock *sk,
279void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req, 279void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
280 unsigned long timeout); 280 unsigned long timeout);
281 281
282static inline void inet_csk_reqsk_queue_removed(struct sock *sk,
283 struct request_sock *req)
284{
285 reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req);
286}
287
288static inline void inet_csk_reqsk_queue_added(struct sock *sk, 282static inline void inet_csk_reqsk_queue_added(struct sock *sk,
289 const unsigned long timeout) 283 const unsigned long timeout)
290{ 284{
@@ -306,19 +300,7 @@ static inline int inet_csk_reqsk_queue_is_full(const struct sock *sk)
306 return reqsk_queue_is_full(&inet_csk(sk)->icsk_accept_queue); 300 return reqsk_queue_is_full(&inet_csk(sk)->icsk_accept_queue);
307} 301}
308 302
309static inline void inet_csk_reqsk_queue_unlink(struct sock *sk, 303void inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req);
310 struct request_sock *req)
311{
312 reqsk_queue_unlink(&inet_csk(sk)->icsk_accept_queue, req);
313}
314
315static inline void inet_csk_reqsk_queue_drop(struct sock *sk,
316 struct request_sock *req)
317{
318 inet_csk_reqsk_queue_unlink(sk, req);
319 inet_csk_reqsk_queue_removed(sk, req);
320 reqsk_put(req);
321}
322 304
323void inet_csk_destroy_sock(struct sock *sk); 305void inet_csk_destroy_sock(struct sock *sk);
324void inet_csk_prepare_forced_close(struct sock *sk); 306void inet_csk_prepare_forced_close(struct sock *sk);
diff --git a/include/net/request_sock.h b/include/net/request_sock.h
index fe41f3ceb008..9f4265ce8892 100644
--- a/include/net/request_sock.h
+++ b/include/net/request_sock.h
@@ -212,24 +212,6 @@ static inline int reqsk_queue_empty(struct request_sock_queue *queue)
212 return queue->rskq_accept_head == NULL; 212 return queue->rskq_accept_head == NULL;
213} 213}
214 214
215static inline void reqsk_queue_unlink(struct request_sock_queue *queue,
216 struct request_sock *req)
217{
218 struct listen_sock *lopt = queue->listen_opt;
219 struct request_sock **prev;
220
221 spin_lock(&queue->syn_wait_lock);
222
223 prev = &lopt->syn_table[req->rsk_hash];
224 while (*prev != req)
225 prev = &(*prev)->dl_next;
226 *prev = req->dl_next;
227
228 spin_unlock(&queue->syn_wait_lock);
229 if (del_timer(&req->rsk_timer))
230 reqsk_put(req);
231}
232
233static inline void reqsk_queue_add(struct request_sock_queue *queue, 215static inline void reqsk_queue_add(struct request_sock_queue *queue,
234 struct request_sock *req, 216 struct request_sock *req,
235 struct sock *parent, 217 struct sock *parent,
diff --git a/include/scsi/scsi_devinfo.h b/include/scsi/scsi_devinfo.h
index 183eaab7c380..96e3f56519e7 100644
--- a/include/scsi/scsi_devinfo.h
+++ b/include/scsi/scsi_devinfo.h
@@ -36,5 +36,6 @@
36 for sequential scan */ 36 for sequential scan */
37#define BLIST_TRY_VPD_PAGES 0x10000000 /* Attempt to read VPD pages */ 37#define BLIST_TRY_VPD_PAGES 0x10000000 /* Attempt to read VPD pages */
38#define BLIST_NO_RSOC 0x20000000 /* don't try to issue RSOC */ 38#define BLIST_NO_RSOC 0x20000000 /* don't try to issue RSOC */
39#define BLIST_MAX_1024 0x40000000 /* maximum 1024 sector cdb length */
39 40
40#endif 41#endif
diff --git a/include/sound/designware_i2s.h b/include/sound/designware_i2s.h
index 26f406e0f673..3a8fca9409a7 100644
--- a/include/sound/designware_i2s.h
+++ b/include/sound/designware_i2s.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (ST) 2012 Rajeev Kumar (rajeev-dlh.kumar@st.com) 2 * Copyright (ST) 2012 Rajeev Kumar (rajeevkumar.linux@gmail.com)
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify 4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by 5 * it under the terms of the GNU General Public License as published by
diff --git a/include/sound/emu10k1.h b/include/sound/emu10k1.h
index 0de95ccb92cf..5bd134651f5e 100644
--- a/include/sound/emu10k1.h
+++ b/include/sound/emu10k1.h
@@ -41,7 +41,8 @@
41 41
42#define EMUPAGESIZE 4096 42#define EMUPAGESIZE 4096
43#define MAXREQVOICES 8 43#define MAXREQVOICES 8
44#define MAXPAGES 8192 44#define MAXPAGES0 4096 /* 32 bit mode */
45#define MAXPAGES1 8192 /* 31 bit mode */
45#define RESERVED 0 46#define RESERVED 0
46#define NUM_MIDI 16 47#define NUM_MIDI 16
47#define NUM_G 64 /* use all channels */ 48#define NUM_G 64 /* use all channels */
@@ -50,8 +51,7 @@
50 51
51/* FIXME? - according to the OSS driver the EMU10K1 needs a 29 bit DMA mask */ 52/* FIXME? - according to the OSS driver the EMU10K1 needs a 29 bit DMA mask */
52#define EMU10K1_DMA_MASK 0x7fffffffUL /* 31bit */ 53#define EMU10K1_DMA_MASK 0x7fffffffUL /* 31bit */
53#define AUDIGY_DMA_MASK 0x7fffffffUL /* 31bit FIXME - 32 should work? */ 54#define AUDIGY_DMA_MASK 0xffffffffUL /* 32bit mode */
54 /* See ALSA bug #1276 - rlrevell */
55 55
56#define TMEMSIZE 256*1024 56#define TMEMSIZE 256*1024
57#define TMEMSIZEREG 4 57#define TMEMSIZEREG 4
@@ -466,8 +466,11 @@
466 466
467#define MAPB 0x0d /* Cache map B */ 467#define MAPB 0x0d /* Cache map B */
468 468
469#define MAP_PTE_MASK 0xffffe000 /* The 19 MSBs of the PTE indexed by the PTI */ 469#define MAP_PTE_MASK0 0xfffff000 /* The 20 MSBs of the PTE indexed by the PTI */
470#define MAP_PTI_MASK 0x00001fff /* The 13 bit index to one of the 8192 PTE dwords */ 470#define MAP_PTI_MASK0 0x00000fff /* The 12 bit index to one of the 4096 PTE dwords */
471
472#define MAP_PTE_MASK1 0xffffe000 /* The 19 MSBs of the PTE indexed by the PTI */
473#define MAP_PTI_MASK1 0x00001fff /* The 13 bit index to one of the 8192 PTE dwords */
471 474
472/* 0x0e, 0x0f: Not used */ 475/* 0x0e, 0x0f: Not used */
473 476
@@ -1704,6 +1707,7 @@ struct snd_emu10k1 {
1704 unsigned short model; /* subsystem id */ 1707 unsigned short model; /* subsystem id */
1705 unsigned int card_type; /* EMU10K1_CARD_* */ 1708 unsigned int card_type; /* EMU10K1_CARD_* */
1706 unsigned int ecard_ctrl; /* ecard control bits */ 1709 unsigned int ecard_ctrl; /* ecard control bits */
1710 unsigned int address_mode; /* address mode */
1707 unsigned long dma_mask; /* PCI DMA mask */ 1711 unsigned long dma_mask; /* PCI DMA mask */
1708 unsigned int delay_pcm_irq; /* in samples */ 1712 unsigned int delay_pcm_irq; /* in samples */
1709 int max_cache_pages; /* max memory size / PAGE_SIZE */ 1713 int max_cache_pages; /* max memory size / PAGE_SIZE */
diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h
index 0bc83647d3fa..1065095c6973 100644
--- a/include/sound/soc-dapm.h
+++ b/include/sound/soc-dapm.h
@@ -287,7 +287,7 @@ struct device;
287 .access = SNDRV_CTL_ELEM_ACCESS_TLV_READ | SNDRV_CTL_ELEM_ACCESS_READWRITE,\ 287 .access = SNDRV_CTL_ELEM_ACCESS_TLV_READ | SNDRV_CTL_ELEM_ACCESS_READWRITE,\
288 .tlv.p = (tlv_array), \ 288 .tlv.p = (tlv_array), \
289 .get = snd_soc_dapm_get_volsw, .put = snd_soc_dapm_put_volsw, \ 289 .get = snd_soc_dapm_get_volsw, .put = snd_soc_dapm_put_volsw, \
290 .private_value = SOC_SINGLE_VALUE(reg, shift, max, invert, 0) } 290 .private_value = SOC_SINGLE_VALUE(reg, shift, max, invert, 1) }
291#define SOC_DAPM_SINGLE_TLV_VIRT(xname, max, tlv_array) \ 291#define SOC_DAPM_SINGLE_TLV_VIRT(xname, max, tlv_array) \
292 SOC_DAPM_SINGLE(xname, SND_SOC_NOPM, 0, max, 0, tlv_array) 292 SOC_DAPM_SINGLE(xname, SND_SOC_NOPM, 0, max, 0, tlv_array)
293#define SOC_DAPM_ENUM(xname, xenum) \ 293#define SOC_DAPM_ENUM(xname, xenum) \
diff --git a/include/sound/soc.h b/include/sound/soc.h
index fcb312b3f258..f6226914acfe 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -387,8 +387,20 @@ int snd_soc_codec_set_pll(struct snd_soc_codec *codec, int pll_id, int source,
387int snd_soc_register_card(struct snd_soc_card *card); 387int snd_soc_register_card(struct snd_soc_card *card);
388int snd_soc_unregister_card(struct snd_soc_card *card); 388int snd_soc_unregister_card(struct snd_soc_card *card);
389int devm_snd_soc_register_card(struct device *dev, struct snd_soc_card *card); 389int devm_snd_soc_register_card(struct device *dev, struct snd_soc_card *card);
390#ifdef CONFIG_PM_SLEEP
390int snd_soc_suspend(struct device *dev); 391int snd_soc_suspend(struct device *dev);
391int snd_soc_resume(struct device *dev); 392int snd_soc_resume(struct device *dev);
393#else
394static inline int snd_soc_suspend(struct device *dev)
395{
396 return 0;
397}
398
399static inline int snd_soc_resume(struct device *dev)
400{
401 return 0;
402}
403#endif
392int snd_soc_poweroff(struct device *dev); 404int snd_soc_poweroff(struct device *dev);
393int snd_soc_register_platform(struct device *dev, 405int snd_soc_register_platform(struct device *dev,
394 const struct snd_soc_platform_driver *platform_drv); 406 const struct snd_soc_platform_driver *platform_drv);
diff --git a/include/sound/spear_dma.h b/include/sound/spear_dma.h
index 65aca51fe255..e290de4e7e82 100644
--- a/include/sound/spear_dma.h
+++ b/include/sound/spear_dma.h
@@ -1,7 +1,7 @@
1/* 1/*
2* linux/spear_dma.h 2* linux/spear_dma.h
3* 3*
4* Copyright (ST) 2012 Rajeev Kumar (rajeev-dlh.kumar@st.com) 4* Copyright (ST) 2012 Rajeev Kumar (rajeevkumar.linux@gmail.com)
5* 5*
6* This program is free software; you can redistribute it and/or modify 6* This program is free software; you can redistribute it and/or modify
7* it under the terms of the GNU General Public License as published by 7* it under the terms of the GNU General Public License as published by
diff --git a/include/uapi/linux/virtio_ring.h b/include/uapi/linux/virtio_ring.h
index a3318f31e8e7..915980ac68df 100644
--- a/include/uapi/linux/virtio_ring.h
+++ b/include/uapi/linux/virtio_ring.h
@@ -155,7 +155,7 @@ static inline unsigned vring_size(unsigned int num, unsigned long align)
155} 155}
156 156
157/* The following is used with USED_EVENT_IDX and AVAIL_EVENT_IDX */ 157/* The following is used with USED_EVENT_IDX and AVAIL_EVENT_IDX */
158/* Assuming a given event_idx value from the other size, if 158/* Assuming a given event_idx value from the other side, if
159 * we have just incremented index from old to new_idx, 159 * we have just incremented index from old to new_idx,
160 * should we trigger an event? */ 160 * should we trigger an event? */
161static inline int vring_need_event(__u16 event_idx, __u16 new_idx, __u16 old) 161static inline int vring_need_event(__u16 event_idx, __u16 new_idx, __u16 old)
diff --git a/kernel/Makefile b/kernel/Makefile
index 0f8f8b0bc1bf..60c302cfb4d3 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -197,9 +197,9 @@ x509.genkey:
197 @echo >>x509.genkey "x509_extensions = myexts" 197 @echo >>x509.genkey "x509_extensions = myexts"
198 @echo >>x509.genkey 198 @echo >>x509.genkey
199 @echo >>x509.genkey "[ req_distinguished_name ]" 199 @echo >>x509.genkey "[ req_distinguished_name ]"
200 @echo >>x509.genkey "O = Magrathea" 200 @echo >>x509.genkey "#O = Unspecified company"
201 @echo >>x509.genkey "CN = Glacier signing key" 201 @echo >>x509.genkey "CN = Build time autogenerated kernel key"
202 @echo >>x509.genkey "emailAddress = slartibartfast@magrathea.h2g2" 202 @echo >>x509.genkey "#emailAddress = unspecified.user@unspecified.company"
203 @echo >>x509.genkey 203 @echo >>x509.genkey
204 @echo >>x509.genkey "[ myexts ]" 204 @echo >>x509.genkey "[ myexts ]"
205 @echo >>x509.genkey "basicConstraints=critical,CA:FALSE" 205 @echo >>x509.genkey "basicConstraints=critical,CA:FALSE"
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 4139a0f8b558..54f0e7fcd0e2 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -357,8 +357,8 @@ select_insn:
357 ALU64_MOD_X: 357 ALU64_MOD_X:
358 if (unlikely(SRC == 0)) 358 if (unlikely(SRC == 0))
359 return 0; 359 return 0;
360 tmp = DST; 360 div64_u64_rem(DST, SRC, &tmp);
361 DST = do_div(tmp, SRC); 361 DST = tmp;
362 CONT; 362 CONT;
363 ALU_MOD_X: 363 ALU_MOD_X:
364 if (unlikely(SRC == 0)) 364 if (unlikely(SRC == 0))
@@ -367,8 +367,8 @@ select_insn:
367 DST = do_div(tmp, (u32) SRC); 367 DST = do_div(tmp, (u32) SRC);
368 CONT; 368 CONT;
369 ALU64_MOD_K: 369 ALU64_MOD_K:
370 tmp = DST; 370 div64_u64_rem(DST, IMM, &tmp);
371 DST = do_div(tmp, IMM); 371 DST = tmp;
372 CONT; 372 CONT;
373 ALU_MOD_K: 373 ALU_MOD_K:
374 tmp = (u32) DST; 374 tmp = (u32) DST;
@@ -377,7 +377,7 @@ select_insn:
377 ALU64_DIV_X: 377 ALU64_DIV_X:
378 if (unlikely(SRC == 0)) 378 if (unlikely(SRC == 0))
379 return 0; 379 return 0;
380 do_div(DST, SRC); 380 DST = div64_u64(DST, SRC);
381 CONT; 381 CONT;
382 ALU_DIV_X: 382 ALU_DIV_X:
383 if (unlikely(SRC == 0)) 383 if (unlikely(SRC == 0))
@@ -387,7 +387,7 @@ select_insn:
387 DST = (u32) tmp; 387 DST = (u32) tmp;
388 CONT; 388 CONT;
389 ALU64_DIV_K: 389 ALU64_DIV_K:
390 do_div(DST, IMM); 390 DST = div64_u64(DST, IMM);
391 CONT; 391 CONT;
392 ALU_DIV_K: 392 ALU_DIV_K:
393 tmp = (u32) DST; 393 tmp = (u32) DST;
diff --git a/kernel/kexec.c b/kernel/kexec.c
index 38c25b1f2fd5..7a36fdcca5bf 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -707,7 +707,7 @@ static struct page *kimage_alloc_normal_control_pages(struct kimage *image,
707 do { 707 do {
708 unsigned long pfn, epfn, addr, eaddr; 708 unsigned long pfn, epfn, addr, eaddr;
709 709
710 pages = kimage_alloc_pages(GFP_KERNEL, order); 710 pages = kimage_alloc_pages(KEXEC_CONTROL_MEMORY_GFP, order);
711 if (!pages) 711 if (!pages)
712 break; 712 break;
713 pfn = page_to_pfn(pages); 713 pfn = page_to_pfn(pages);
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index f9123a82cbb6..fe22f7510bce 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1016,13 +1016,6 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
1016 rq_clock_skip_update(rq, true); 1016 rq_clock_skip_update(rq, true);
1017} 1017}
1018 1018
1019static ATOMIC_NOTIFIER_HEAD(task_migration_notifier);
1020
1021void register_task_migration_notifier(struct notifier_block *n)
1022{
1023 atomic_notifier_chain_register(&task_migration_notifier, n);
1024}
1025
1026#ifdef CONFIG_SMP 1019#ifdef CONFIG_SMP
1027void set_task_cpu(struct task_struct *p, unsigned int new_cpu) 1020void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
1028{ 1021{
@@ -1053,18 +1046,10 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
1053 trace_sched_migrate_task(p, new_cpu); 1046 trace_sched_migrate_task(p, new_cpu);
1054 1047
1055 if (task_cpu(p) != new_cpu) { 1048 if (task_cpu(p) != new_cpu) {
1056 struct task_migration_notifier tmn;
1057
1058 if (p->sched_class->migrate_task_rq) 1049 if (p->sched_class->migrate_task_rq)
1059 p->sched_class->migrate_task_rq(p, new_cpu); 1050 p->sched_class->migrate_task_rq(p, new_cpu);
1060 p->se.nr_migrations++; 1051 p->se.nr_migrations++;
1061 perf_sw_event_sched(PERF_COUNT_SW_CPU_MIGRATIONS, 1, 0); 1052 perf_sw_event_sched(PERF_COUNT_SW_CPU_MIGRATIONS, 1, 0);
1062
1063 tmn.task = p;
1064 tmn.from_cpu = task_cpu(p);
1065 tmn.to_cpu = new_cpu;
1066
1067 atomic_notifier_call_chain(&task_migration_notifier, 0, &tmn);
1068 } 1053 }
1069 1054
1070 __set_task_cpu(p, new_cpu); 1055 __set_task_cpu(p, new_cpu);
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
index deef1caa94c6..fefcb1fa5160 100644
--- a/kernel/sched/idle.c
+++ b/kernel/sched/idle.c
@@ -81,7 +81,6 @@ static void cpuidle_idle_call(void)
81 struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); 81 struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
82 struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); 82 struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
83 int next_state, entered_state; 83 int next_state, entered_state;
84 unsigned int broadcast;
85 bool reflect; 84 bool reflect;
86 85
87 /* 86 /*
@@ -150,17 +149,6 @@ static void cpuidle_idle_call(void)
150 goto exit_idle; 149 goto exit_idle;
151 } 150 }
152 151
153 broadcast = drv->states[next_state].flags & CPUIDLE_FLAG_TIMER_STOP;
154
155 /*
156 * Tell the time framework to switch to a broadcast timer
157 * because our local timer will be shutdown. If a local timer
158 * is used from another cpu as a broadcast timer, this call may
159 * fail if it is not available
160 */
161 if (broadcast && tick_broadcast_enter())
162 goto use_default;
163
164 /* Take note of the planned idle state. */ 152 /* Take note of the planned idle state. */
165 idle_set_state(this_rq(), &drv->states[next_state]); 153 idle_set_state(this_rq(), &drv->states[next_state]);
166 154
@@ -174,8 +162,8 @@ static void cpuidle_idle_call(void)
174 /* The cpu is no longer idle or about to enter idle. */ 162 /* The cpu is no longer idle or about to enter idle. */
175 idle_set_state(this_rq(), NULL); 163 idle_set_state(this_rq(), NULL);
176 164
177 if (broadcast) 165 if (entered_state == -EBUSY)
178 tick_broadcast_exit(); 166 goto use_default;
179 167
180 /* 168 /*
181 * Give the governor an opportunity to reflect on the outcome 169 * Give the governor an opportunity to reflect on the outcome
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index 4898442b837f..b28df4019ade 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -405,13 +405,18 @@ int rhashtable_insert_rehash(struct rhashtable *ht)
405 405
406 if (rht_grow_above_75(ht, tbl)) 406 if (rht_grow_above_75(ht, tbl))
407 size *= 2; 407 size *= 2;
408 /* More than two rehashes (not resizes) detected. */ 408 /* Do not schedule more than one rehash */
409 else if (WARN_ON(old_tbl != tbl && old_tbl->size == size)) 409 else if (old_tbl != tbl)
410 return -EBUSY; 410 return -EBUSY;
411 411
412 new_tbl = bucket_table_alloc(ht, size, GFP_ATOMIC); 412 new_tbl = bucket_table_alloc(ht, size, GFP_ATOMIC);
413 if (new_tbl == NULL) 413 if (new_tbl == NULL) {
414 /* Schedule async resize/rehash to try allocation
415 * non-atomic context.
416 */
417 schedule_work(&ht->run_work);
414 return -ENOMEM; 418 return -ENOMEM;
419 }
415 420
416 err = rhashtable_rehash_attach(ht, tbl, new_tbl); 421 err = rhashtable_rehash_attach(ht, tbl, new_tbl);
417 if (err) { 422 if (err) {
diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c
index 409608960899..e29ad70b3000 100644
--- a/net/bridge/br_mdb.c
+++ b/net/bridge/br_mdb.c
@@ -170,7 +170,7 @@ static int nlmsg_populate_mdb_fill(struct sk_buff *skb,
170 struct br_port_msg *bpm; 170 struct br_port_msg *bpm;
171 struct nlattr *nest, *nest2; 171 struct nlattr *nest, *nest2;
172 172
173 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), NLM_F_MULTI); 173 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), 0);
174 if (!nlh) 174 if (!nlh)
175 return -EMSGSIZE; 175 return -EMSGSIZE;
176 176
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index 0e4ddb81610d..4b5c236998ff 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -394,7 +394,7 @@ errout:
394 * Dump information about all ports, in response to GETLINK 394 * Dump information about all ports, in response to GETLINK
395 */ 395 */
396int br_getlink(struct sk_buff *skb, u32 pid, u32 seq, 396int br_getlink(struct sk_buff *skb, u32 pid, u32 seq,
397 struct net_device *dev, u32 filter_mask) 397 struct net_device *dev, u32 filter_mask, int nlflags)
398{ 398{
399 struct net_bridge_port *port = br_port_get_rtnl(dev); 399 struct net_bridge_port *port = br_port_get_rtnl(dev);
400 400
@@ -402,7 +402,7 @@ int br_getlink(struct sk_buff *skb, u32 pid, u32 seq,
402 !(filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)) 402 !(filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED))
403 return 0; 403 return 0;
404 404
405 return br_fill_ifinfo(skb, port, pid, seq, RTM_NEWLINK, NLM_F_MULTI, 405 return br_fill_ifinfo(skb, port, pid, seq, RTM_NEWLINK, nlflags,
406 filter_mask, dev); 406 filter_mask, dev);
407} 407}
408 408
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 6ca0251cb478..3362c29400f1 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -828,7 +828,7 @@ void br_ifinfo_notify(int event, struct net_bridge_port *port);
828int br_setlink(struct net_device *dev, struct nlmsghdr *nlmsg, u16 flags); 828int br_setlink(struct net_device *dev, struct nlmsghdr *nlmsg, u16 flags);
829int br_dellink(struct net_device *dev, struct nlmsghdr *nlmsg, u16 flags); 829int br_dellink(struct net_device *dev, struct nlmsghdr *nlmsg, u16 flags);
830int br_getlink(struct sk_buff *skb, u32 pid, u32 seq, struct net_device *dev, 830int br_getlink(struct sk_buff *skb, u32 pid, u32 seq, struct net_device *dev,
831 u32 filter_mask); 831 u32 filter_mask, int nlflags);
832 832
833#ifdef CONFIG_SYSFS 833#ifdef CONFIG_SYSFS
834/* br_sysfs_if.c */ 834/* br_sysfs_if.c */
diff --git a/net/core/dev.c b/net/core/dev.c
index 1796cef55ab5..c7ba0388f1be 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3079,7 +3079,7 @@ static struct rps_dev_flow *
3079set_rps_cpu(struct net_device *dev, struct sk_buff *skb, 3079set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
3080 struct rps_dev_flow *rflow, u16 next_cpu) 3080 struct rps_dev_flow *rflow, u16 next_cpu)
3081{ 3081{
3082 if (next_cpu != RPS_NO_CPU) { 3082 if (next_cpu < nr_cpu_ids) {
3083#ifdef CONFIG_RFS_ACCEL 3083#ifdef CONFIG_RFS_ACCEL
3084 struct netdev_rx_queue *rxqueue; 3084 struct netdev_rx_queue *rxqueue;
3085 struct rps_dev_flow_table *flow_table; 3085 struct rps_dev_flow_table *flow_table;
@@ -3184,7 +3184,7 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
3184 * If the desired CPU (where last recvmsg was done) is 3184 * If the desired CPU (where last recvmsg was done) is
3185 * different from current CPU (one in the rx-queue flow 3185 * different from current CPU (one in the rx-queue flow
3186 * table entry), switch if one of the following holds: 3186 * table entry), switch if one of the following holds:
3187 * - Current CPU is unset (equal to RPS_NO_CPU). 3187 * - Current CPU is unset (>= nr_cpu_ids).
3188 * - Current CPU is offline. 3188 * - Current CPU is offline.
3189 * - The current CPU's queue tail has advanced beyond the 3189 * - The current CPU's queue tail has advanced beyond the
3190 * last packet that was enqueued using this table entry. 3190 * last packet that was enqueued using this table entry.
@@ -3192,14 +3192,14 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
3192 * have been dequeued, thus preserving in order delivery. 3192 * have been dequeued, thus preserving in order delivery.
3193 */ 3193 */
3194 if (unlikely(tcpu != next_cpu) && 3194 if (unlikely(tcpu != next_cpu) &&
3195 (tcpu == RPS_NO_CPU || !cpu_online(tcpu) || 3195 (tcpu >= nr_cpu_ids || !cpu_online(tcpu) ||
3196 ((int)(per_cpu(softnet_data, tcpu).input_queue_head - 3196 ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
3197 rflow->last_qtail)) >= 0)) { 3197 rflow->last_qtail)) >= 0)) {
3198 tcpu = next_cpu; 3198 tcpu = next_cpu;
3199 rflow = set_rps_cpu(dev, skb, rflow, next_cpu); 3199 rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
3200 } 3200 }
3201 3201
3202 if (tcpu != RPS_NO_CPU && cpu_online(tcpu)) { 3202 if (tcpu < nr_cpu_ids && cpu_online(tcpu)) {
3203 *rflowp = rflow; 3203 *rflowp = rflow;
3204 cpu = tcpu; 3204 cpu = tcpu;
3205 goto done; 3205 goto done;
@@ -3240,14 +3240,14 @@ bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
3240 struct rps_dev_flow_table *flow_table; 3240 struct rps_dev_flow_table *flow_table;
3241 struct rps_dev_flow *rflow; 3241 struct rps_dev_flow *rflow;
3242 bool expire = true; 3242 bool expire = true;
3243 int cpu; 3243 unsigned int cpu;
3244 3244
3245 rcu_read_lock(); 3245 rcu_read_lock();
3246 flow_table = rcu_dereference(rxqueue->rps_flow_table); 3246 flow_table = rcu_dereference(rxqueue->rps_flow_table);
3247 if (flow_table && flow_id <= flow_table->mask) { 3247 if (flow_table && flow_id <= flow_table->mask) {
3248 rflow = &flow_table->flows[flow_id]; 3248 rflow = &flow_table->flows[flow_id];
3249 cpu = ACCESS_ONCE(rflow->cpu); 3249 cpu = ACCESS_ONCE(rflow->cpu);
3250 if (rflow->filter == filter_id && cpu != RPS_NO_CPU && 3250 if (rflow->filter == filter_id && cpu < nr_cpu_ids &&
3251 ((int)(per_cpu(softnet_data, cpu).input_queue_head - 3251 ((int)(per_cpu(softnet_data, cpu).input_queue_head -
3252 rflow->last_qtail) < 3252 rflow->last_qtail) <
3253 (int)(10 * flow_table->mask))) 3253 (int)(10 * flow_table->mask)))
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 358d52a38533..666e0928ba40 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -2854,7 +2854,7 @@ static int brport_nla_put_flag(struct sk_buff *skb, u32 flags, u32 mask,
2854 2854
2855int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, 2855int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
2856 struct net_device *dev, u16 mode, 2856 struct net_device *dev, u16 mode,
2857 u32 flags, u32 mask) 2857 u32 flags, u32 mask, int nlflags)
2858{ 2858{
2859 struct nlmsghdr *nlh; 2859 struct nlmsghdr *nlh;
2860 struct ifinfomsg *ifm; 2860 struct ifinfomsg *ifm;
@@ -2863,7 +2863,7 @@ int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
2863 u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN; 2863 u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN;
2864 struct net_device *br_dev = netdev_master_upper_dev_get(dev); 2864 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
2865 2865
2866 nlh = nlmsg_put(skb, pid, seq, RTM_NEWLINK, sizeof(*ifm), NLM_F_MULTI); 2866 nlh = nlmsg_put(skb, pid, seq, RTM_NEWLINK, sizeof(*ifm), nlflags);
2867 if (nlh == NULL) 2867 if (nlh == NULL)
2868 return -EMSGSIZE; 2868 return -EMSGSIZE;
2869 2869
@@ -2969,7 +2969,8 @@ static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb)
2969 if (br_dev && br_dev->netdev_ops->ndo_bridge_getlink) { 2969 if (br_dev && br_dev->netdev_ops->ndo_bridge_getlink) {
2970 if (idx >= cb->args[0] && 2970 if (idx >= cb->args[0] &&
2971 br_dev->netdev_ops->ndo_bridge_getlink( 2971 br_dev->netdev_ops->ndo_bridge_getlink(
2972 skb, portid, seq, dev, filter_mask) < 0) 2972 skb, portid, seq, dev, filter_mask,
2973 NLM_F_MULTI) < 0)
2973 break; 2974 break;
2974 idx++; 2975 idx++;
2975 } 2976 }
@@ -2977,7 +2978,8 @@ static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb)
2977 if (ops->ndo_bridge_getlink) { 2978 if (ops->ndo_bridge_getlink) {
2978 if (idx >= cb->args[0] && 2979 if (idx >= cb->args[0] &&
2979 ops->ndo_bridge_getlink(skb, portid, seq, dev, 2980 ops->ndo_bridge_getlink(skb, portid, seq, dev,
2980 filter_mask) < 0) 2981 filter_mask,
2982 NLM_F_MULTI) < 0)
2981 break; 2983 break;
2982 idx++; 2984 idx++;
2983 } 2985 }
@@ -3018,7 +3020,7 @@ static int rtnl_bridge_notify(struct net_device *dev)
3018 goto errout; 3020 goto errout;
3019 } 3021 }
3020 3022
3021 err = dev->netdev_ops->ndo_bridge_getlink(skb, 0, 0, dev, 0); 3023 err = dev->netdev_ops->ndo_bridge_getlink(skb, 0, 0, dev, 0, 0);
3022 if (err < 0) 3024 if (err < 0)
3023 goto errout; 3025 goto errout;
3024 3026
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index d1967dab9cc6..3cfff2a3d651 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -280,13 +280,14 @@ nodata:
280EXPORT_SYMBOL(__alloc_skb); 280EXPORT_SYMBOL(__alloc_skb);
281 281
282/** 282/**
283 * build_skb - build a network buffer 283 * __build_skb - build a network buffer
284 * @data: data buffer provided by caller 284 * @data: data buffer provided by caller
285 * @frag_size: size of fragment, or 0 if head was kmalloced 285 * @frag_size: size of data, or 0 if head was kmalloced
286 * 286 *
287 * Allocate a new &sk_buff. Caller provides space holding head and 287 * Allocate a new &sk_buff. Caller provides space holding head and
288 * skb_shared_info. @data must have been allocated by kmalloc() only if 288 * skb_shared_info. @data must have been allocated by kmalloc() only if
289 * @frag_size is 0, otherwise data should come from the page allocator. 289 * @frag_size is 0, otherwise data should come from the page allocator
290 * or vmalloc()
290 * The return is the new skb buffer. 291 * The return is the new skb buffer.
291 * On a failure the return is %NULL, and @data is not freed. 292 * On a failure the return is %NULL, and @data is not freed.
292 * Notes : 293 * Notes :
@@ -297,7 +298,7 @@ EXPORT_SYMBOL(__alloc_skb);
297 * before giving packet to stack. 298 * before giving packet to stack.
298 * RX rings only contains data buffers, not full skbs. 299 * RX rings only contains data buffers, not full skbs.
299 */ 300 */
300struct sk_buff *build_skb(void *data, unsigned int frag_size) 301struct sk_buff *__build_skb(void *data, unsigned int frag_size)
301{ 302{
302 struct skb_shared_info *shinfo; 303 struct skb_shared_info *shinfo;
303 struct sk_buff *skb; 304 struct sk_buff *skb;
@@ -311,7 +312,6 @@ struct sk_buff *build_skb(void *data, unsigned int frag_size)
311 312
312 memset(skb, 0, offsetof(struct sk_buff, tail)); 313 memset(skb, 0, offsetof(struct sk_buff, tail));
313 skb->truesize = SKB_TRUESIZE(size); 314 skb->truesize = SKB_TRUESIZE(size);
314 skb->head_frag = frag_size != 0;
315 atomic_set(&skb->users, 1); 315 atomic_set(&skb->users, 1);
316 skb->head = data; 316 skb->head = data;
317 skb->data = data; 317 skb->data = data;
@@ -328,6 +328,23 @@ struct sk_buff *build_skb(void *data, unsigned int frag_size)
328 328
329 return skb; 329 return skb;
330} 330}
331
332/* build_skb() is wrapper over __build_skb(), that specifically
333 * takes care of skb->head and skb->pfmemalloc
334 * This means that if @frag_size is not zero, then @data must be backed
335 * by a page fragment, not kmalloc() or vmalloc()
336 */
337struct sk_buff *build_skb(void *data, unsigned int frag_size)
338{
339 struct sk_buff *skb = __build_skb(data, frag_size);
340
341 if (skb && frag_size) {
342 skb->head_frag = 1;
343 if (virt_to_head_page(data)->pfmemalloc)
344 skb->pfmemalloc = 1;
345 }
346 return skb;
347}
331EXPORT_SYMBOL(build_skb); 348EXPORT_SYMBOL(build_skb);
332 349
333struct netdev_alloc_cache { 350struct netdev_alloc_cache {
@@ -348,7 +365,8 @@ static struct page *__page_frag_refill(struct netdev_alloc_cache *nc,
348 gfp_t gfp = gfp_mask; 365 gfp_t gfp = gfp_mask;
349 366
350 if (order) { 367 if (order) {
351 gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY; 368 gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY |
369 __GFP_NOMEMALLOC;
352 page = alloc_pages_node(NUMA_NO_NODE, gfp_mask, order); 370 page = alloc_pages_node(NUMA_NO_NODE, gfp_mask, order);
353 nc->frag.size = PAGE_SIZE << (page ? order : 0); 371 nc->frag.size = PAGE_SIZE << (page ? order : 0);
354 } 372 }
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 2b4f21d34df6..ccf4c5629b3c 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -453,7 +453,8 @@ static struct sock *dccp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
453 iph->saddr, iph->daddr); 453 iph->saddr, iph->daddr);
454 if (req) { 454 if (req) {
455 nsk = dccp_check_req(sk, skb, req); 455 nsk = dccp_check_req(sk, skb, req);
456 reqsk_put(req); 456 if (!nsk)
457 reqsk_put(req);
457 return nsk; 458 return nsk;
458 } 459 }
459 nsk = inet_lookup_established(sock_net(sk), &dccp_hashinfo, 460 nsk = inet_lookup_established(sock_net(sk), &dccp_hashinfo,
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 9d0551092c6c..5165571f397a 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -301,7 +301,8 @@ static struct sock *dccp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
301 &iph->daddr, inet6_iif(skb)); 301 &iph->daddr, inet6_iif(skb));
302 if (req) { 302 if (req) {
303 nsk = dccp_check_req(sk, skb, req); 303 nsk = dccp_check_req(sk, skb, req);
304 reqsk_put(req); 304 if (!nsk)
305 reqsk_put(req);
305 return nsk; 306 return nsk;
306 } 307 }
307 nsk = __inet6_lookup_established(sock_net(sk), &dccp_hashinfo, 308 nsk = __inet6_lookup_established(sock_net(sk), &dccp_hashinfo,
diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c
index 5f566663e47f..30addee2dd03 100644
--- a/net/dccp/minisocks.c
+++ b/net/dccp/minisocks.c
@@ -186,8 +186,7 @@ struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb,
186 if (child == NULL) 186 if (child == NULL)
187 goto listen_overflow; 187 goto listen_overflow;
188 188
189 inet_csk_reqsk_queue_unlink(sk, req); 189 inet_csk_reqsk_queue_drop(sk, req);
190 inet_csk_reqsk_queue_removed(sk, req);
191 inet_csk_reqsk_queue_add(sk, req, child); 190 inet_csk_reqsk_queue_add(sk, req, child);
192out: 191out:
193 return child; 192 return child;
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
index 079a224471e7..e6f6cc3a1bcf 100644
--- a/net/dsa/dsa.c
+++ b/net/dsa/dsa.c
@@ -633,7 +633,7 @@ static int dsa_of_probe(struct device *dev)
633 if (cd->sw_addr > PHY_MAX_ADDR) 633 if (cd->sw_addr > PHY_MAX_ADDR)
634 continue; 634 continue;
635 635
636 if (!of_property_read_u32(np, "eeprom-length", &eeprom_len)) 636 if (!of_property_read_u32(child, "eeprom-length", &eeprom_len))
637 cd->eeprom_len = eeprom_len; 637 cd->eeprom_len = eeprom_len;
638 638
639 for_each_available_child_of_node(child, port) { 639 for_each_available_child_of_node(child, port) {
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 5c3dd6267ed3..8976ca423a07 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -564,6 +564,40 @@ int inet_rtx_syn_ack(struct sock *parent, struct request_sock *req)
564} 564}
565EXPORT_SYMBOL(inet_rtx_syn_ack); 565EXPORT_SYMBOL(inet_rtx_syn_ack);
566 566
567/* return true if req was found in the syn_table[] */
568static bool reqsk_queue_unlink(struct request_sock_queue *queue,
569 struct request_sock *req)
570{
571 struct listen_sock *lopt = queue->listen_opt;
572 struct request_sock **prev;
573 bool found = false;
574
575 spin_lock(&queue->syn_wait_lock);
576
577 for (prev = &lopt->syn_table[req->rsk_hash]; *prev != NULL;
578 prev = &(*prev)->dl_next) {
579 if (*prev == req) {
580 *prev = req->dl_next;
581 found = true;
582 break;
583 }
584 }
585
586 spin_unlock(&queue->syn_wait_lock);
587 if (del_timer(&req->rsk_timer))
588 reqsk_put(req);
589 return found;
590}
591
592void inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req)
593{
594 if (reqsk_queue_unlink(&inet_csk(sk)->icsk_accept_queue, req)) {
595 reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req);
596 reqsk_put(req);
597 }
598}
599EXPORT_SYMBOL(inet_csk_reqsk_queue_drop);
600
567static void reqsk_timer_handler(unsigned long data) 601static void reqsk_timer_handler(unsigned long data)
568{ 602{
569 struct request_sock *req = (struct request_sock *)data; 603 struct request_sock *req = (struct request_sock *)data;
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index a93f260cf24c..05ff44b758df 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -158,6 +158,7 @@ void ping_unhash(struct sock *sk)
158 if (sk_hashed(sk)) { 158 if (sk_hashed(sk)) {
159 write_lock_bh(&ping_table.lock); 159 write_lock_bh(&ping_table.lock);
160 hlist_nulls_del(&sk->sk_nulls_node); 160 hlist_nulls_del(&sk->sk_nulls_node);
161 sk_nulls_node_init(&sk->sk_nulls_node);
161 sock_put(sk); 162 sock_put(sk);
162 isk->inet_num = 0; 163 isk->inet_num = 0;
163 isk->inet_sport = 0; 164 isk->inet_sport = 0;
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index a78540f28276..bff62fc87b8e 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -962,10 +962,7 @@ static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
962 if (dst_metric_locked(dst, RTAX_MTU)) 962 if (dst_metric_locked(dst, RTAX_MTU))
963 return; 963 return;
964 964
965 if (dst->dev->mtu < mtu) 965 if (ipv4_mtu(dst) < mtu)
966 return;
967
968 if (rt->rt_pmtu && rt->rt_pmtu < mtu)
969 return; 966 return;
970 967
971 if (mtu < ip_rt_min_pmtu) 968 if (mtu < ip_rt_min_pmtu)
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 3571f2be4470..fc1c658ec6c1 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1348,7 +1348,8 @@ static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
1348 req = inet_csk_search_req(sk, th->source, iph->saddr, iph->daddr); 1348 req = inet_csk_search_req(sk, th->source, iph->saddr, iph->daddr);
1349 if (req) { 1349 if (req) {
1350 nsk = tcp_check_req(sk, skb, req, false); 1350 nsk = tcp_check_req(sk, skb, req, false);
1351 reqsk_put(req); 1351 if (!nsk)
1352 reqsk_put(req);
1352 return nsk; 1353 return nsk;
1353 } 1354 }
1354 1355
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 63d6311b5365..e5d7649136fc 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -755,10 +755,11 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
755 if (!child) 755 if (!child)
756 goto listen_overflow; 756 goto listen_overflow;
757 757
758 inet_csk_reqsk_queue_unlink(sk, req); 758 inet_csk_reqsk_queue_drop(sk, req);
759 inet_csk_reqsk_queue_removed(sk, req);
760
761 inet_csk_reqsk_queue_add(sk, req, child); 759 inet_csk_reqsk_queue_add(sk, req, child);
760 /* Warning: caller must not call reqsk_put(req);
761 * child stole last reference on it.
762 */
762 return child; 763 return child;
763 764
764listen_overflow: 765listen_overflow:
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 8c8d7e06b72f..a369e8a70b2c 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2812,39 +2812,65 @@ begin_fwd:
2812 } 2812 }
2813} 2813}
2814 2814
2815/* Send a fin. The caller locks the socket for us. This cannot be 2815/* We allow to exceed memory limits for FIN packets to expedite
2816 * allowed to fail queueing a FIN frame under any circumstances. 2816 * connection tear down and (memory) recovery.
2817 * Otherwise tcp_send_fin() could be tempted to either delay FIN
2818 * or even be forced to close flow without any FIN.
2819 */
2820static void sk_forced_wmem_schedule(struct sock *sk, int size)
2821{
2822 int amt, status;
2823
2824 if (size <= sk->sk_forward_alloc)
2825 return;
2826 amt = sk_mem_pages(size);
2827 sk->sk_forward_alloc += amt * SK_MEM_QUANTUM;
2828 sk_memory_allocated_add(sk, amt, &status);
2829}
2830
2831/* Send a FIN. The caller locks the socket for us.
2832 * We should try to send a FIN packet really hard, but eventually give up.
2817 */ 2833 */
2818void tcp_send_fin(struct sock *sk) 2834void tcp_send_fin(struct sock *sk)
2819{ 2835{
2836 struct sk_buff *skb, *tskb = tcp_write_queue_tail(sk);
2820 struct tcp_sock *tp = tcp_sk(sk); 2837 struct tcp_sock *tp = tcp_sk(sk);
2821 struct sk_buff *skb = tcp_write_queue_tail(sk);
2822 int mss_now;
2823 2838
2824 /* Optimization, tack on the FIN if we have a queue of 2839 /* Optimization, tack on the FIN if we have one skb in write queue and
2825 * unsent frames. But be careful about outgoing SACKS 2840 * this skb was not yet sent, or we are under memory pressure.
2826 * and IP options. 2841 * Note: in the latter case, FIN packet will be sent after a timeout,
2842 * as TCP stack thinks it has already been transmitted.
2827 */ 2843 */
2828 mss_now = tcp_current_mss(sk); 2844 if (tskb && (tcp_send_head(sk) || sk_under_memory_pressure(sk))) {
2829 2845coalesce:
2830 if (tcp_send_head(sk)) { 2846 TCP_SKB_CB(tskb)->tcp_flags |= TCPHDR_FIN;
2831 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_FIN; 2847 TCP_SKB_CB(tskb)->end_seq++;
2832 TCP_SKB_CB(skb)->end_seq++;
2833 tp->write_seq++; 2848 tp->write_seq++;
2849 if (!tcp_send_head(sk)) {
2850 /* This means tskb was already sent.
2851 * Pretend we included the FIN on previous transmit.
2852 * We need to set tp->snd_nxt to the value it would have
2853 * if FIN had been sent. This is because retransmit path
2854 * does not change tp->snd_nxt.
2855 */
2856 tp->snd_nxt++;
2857 return;
2858 }
2834 } else { 2859 } else {
2835 /* Socket is locked, keep trying until memory is available. */ 2860 skb = alloc_skb_fclone(MAX_TCP_HEADER, sk->sk_allocation);
2836 for (;;) { 2861 if (unlikely(!skb)) {
2837 skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation); 2862 if (tskb)
2838 if (skb) 2863 goto coalesce;
2839 break; 2864 return;
2840 yield();
2841 } 2865 }
2866 skb_reserve(skb, MAX_TCP_HEADER);
2867 sk_forced_wmem_schedule(sk, skb->truesize);
2842 /* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */ 2868 /* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */
2843 tcp_init_nondata_skb(skb, tp->write_seq, 2869 tcp_init_nondata_skb(skb, tp->write_seq,
2844 TCPHDR_ACK | TCPHDR_FIN); 2870 TCPHDR_ACK | TCPHDR_FIN);
2845 tcp_queue_skb(sk, skb); 2871 tcp_queue_skb(sk, skb);
2846 } 2872 }
2847 __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_OFF); 2873 __tcp_push_pending_frames(sk, tcp_current_mss(sk), TCP_NAGLE_OFF);
2848} 2874}
2849 2875
2850/* We get here when a process closes a file descriptor (either due to 2876/* We get here when a process closes a file descriptor (either due to
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index b5e6cc1d4a73..a38d3ac0f18f 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -1246,7 +1246,6 @@ static void ip6gre_tunnel_setup(struct net_device *dev)
1246static int ip6gre_tunnel_init(struct net_device *dev) 1246static int ip6gre_tunnel_init(struct net_device *dev)
1247{ 1247{
1248 struct ip6_tnl *tunnel; 1248 struct ip6_tnl *tunnel;
1249 int i;
1250 1249
1251 tunnel = netdev_priv(dev); 1250 tunnel = netdev_priv(dev);
1252 1251
@@ -1260,16 +1259,10 @@ static int ip6gre_tunnel_init(struct net_device *dev)
1260 if (ipv6_addr_any(&tunnel->parms.raddr)) 1259 if (ipv6_addr_any(&tunnel->parms.raddr))
1261 dev->header_ops = &ip6gre_header_ops; 1260 dev->header_ops = &ip6gre_header_ops;
1262 1261
1263 dev->tstats = alloc_percpu(struct pcpu_sw_netstats); 1262 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
1264 if (!dev->tstats) 1263 if (!dev->tstats)
1265 return -ENOMEM; 1264 return -ENOMEM;
1266 1265
1267 for_each_possible_cpu(i) {
1268 struct pcpu_sw_netstats *ip6gre_tunnel_stats;
1269 ip6gre_tunnel_stats = per_cpu_ptr(dev->tstats, i);
1270 u64_stats_init(&ip6gre_tunnel_stats->syncp);
1271 }
1272
1273 return 0; 1266 return 0;
1274} 1267}
1275 1268
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index ad51df85aa00..b6575d665568 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -946,7 +946,8 @@ static struct sock *tcp_v6_hnd_req(struct sock *sk, struct sk_buff *skb)
946 &ipv6_hdr(skb)->daddr, tcp_v6_iif(skb)); 946 &ipv6_hdr(skb)->daddr, tcp_v6_iif(skb));
947 if (req) { 947 if (req) {
948 nsk = tcp_check_req(sk, skb, req, false); 948 nsk = tcp_check_req(sk, skb, req, false);
949 reqsk_put(req); 949 if (!nsk)
950 reqsk_put(req);
950 return nsk; 951 return nsk;
951 } 952 }
952 nsk = __inet6_lookup_established(sock_net(sk), &tcp_hashinfo, 953 nsk = __inet6_lookup_established(sock_net(sk), &tcp_hashinfo,
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
index db8a2ea6d4de..954810c76a86 100644
--- a/net/mpls/af_mpls.c
+++ b/net/mpls/af_mpls.c
@@ -53,6 +53,11 @@ static struct mpls_route *mpls_route_input_rcu(struct net *net, unsigned index)
53 return rt; 53 return rt;
54} 54}
55 55
56static inline struct mpls_dev *mpls_dev_get(const struct net_device *dev)
57{
58 return rcu_dereference_rtnl(dev->mpls_ptr);
59}
60
56static bool mpls_output_possible(const struct net_device *dev) 61static bool mpls_output_possible(const struct net_device *dev)
57{ 62{
58 return dev && (dev->flags & IFF_UP) && netif_carrier_ok(dev); 63 return dev && (dev->flags & IFF_UP) && netif_carrier_ok(dev);
@@ -136,6 +141,7 @@ static int mpls_forward(struct sk_buff *skb, struct net_device *dev,
136 struct mpls_route *rt; 141 struct mpls_route *rt;
137 struct mpls_entry_decoded dec; 142 struct mpls_entry_decoded dec;
138 struct net_device *out_dev; 143 struct net_device *out_dev;
144 struct mpls_dev *mdev;
139 unsigned int hh_len; 145 unsigned int hh_len;
140 unsigned int new_header_size; 146 unsigned int new_header_size;
141 unsigned int mtu; 147 unsigned int mtu;
@@ -143,6 +149,10 @@ static int mpls_forward(struct sk_buff *skb, struct net_device *dev,
143 149
144 /* Careful this entire function runs inside of an rcu critical section */ 150 /* Careful this entire function runs inside of an rcu critical section */
145 151
152 mdev = mpls_dev_get(dev);
153 if (!mdev || !mdev->input_enabled)
154 goto drop;
155
146 if (skb->pkt_type != PACKET_HOST) 156 if (skb->pkt_type != PACKET_HOST)
147 goto drop; 157 goto drop;
148 158
@@ -352,9 +362,9 @@ static int mpls_route_add(struct mpls_route_config *cfg)
352 if (!dev) 362 if (!dev)
353 goto errout; 363 goto errout;
354 364
355 /* For now just support ethernet devices */ 365 /* Ensure this is a supported device */
356 err = -EINVAL; 366 err = -EINVAL;
357 if ((dev->type != ARPHRD_ETHER) && (dev->type != ARPHRD_LOOPBACK)) 367 if (!mpls_dev_get(dev))
358 goto errout; 368 goto errout;
359 369
360 err = -EINVAL; 370 err = -EINVAL;
@@ -428,10 +438,89 @@ errout:
428 return err; 438 return err;
429} 439}
430 440
441#define MPLS_PERDEV_SYSCTL_OFFSET(field) \
442 (&((struct mpls_dev *)0)->field)
443
444static const struct ctl_table mpls_dev_table[] = {
445 {
446 .procname = "input",
447 .maxlen = sizeof(int),
448 .mode = 0644,
449 .proc_handler = proc_dointvec,
450 .data = MPLS_PERDEV_SYSCTL_OFFSET(input_enabled),
451 },
452 { }
453};
454
455static int mpls_dev_sysctl_register(struct net_device *dev,
456 struct mpls_dev *mdev)
457{
458 char path[sizeof("net/mpls/conf/") + IFNAMSIZ];
459 struct ctl_table *table;
460 int i;
461
462 table = kmemdup(&mpls_dev_table, sizeof(mpls_dev_table), GFP_KERNEL);
463 if (!table)
464 goto out;
465
466 /* Table data contains only offsets relative to the base of
467 * the mdev at this point, so make them absolute.
468 */
469 for (i = 0; i < ARRAY_SIZE(mpls_dev_table); i++)
470 table[i].data = (char *)mdev + (uintptr_t)table[i].data;
471
472 snprintf(path, sizeof(path), "net/mpls/conf/%s", dev->name);
473
474 mdev->sysctl = register_net_sysctl(dev_net(dev), path, table);
475 if (!mdev->sysctl)
476 goto free;
477
478 return 0;
479
480free:
481 kfree(table);
482out:
483 return -ENOBUFS;
484}
485
486static void mpls_dev_sysctl_unregister(struct mpls_dev *mdev)
487{
488 struct ctl_table *table;
489
490 table = mdev->sysctl->ctl_table_arg;
491 unregister_net_sysctl_table(mdev->sysctl);
492 kfree(table);
493}
494
495static struct mpls_dev *mpls_add_dev(struct net_device *dev)
496{
497 struct mpls_dev *mdev;
498 int err = -ENOMEM;
499
500 ASSERT_RTNL();
501
502 mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
503 if (!mdev)
504 return ERR_PTR(err);
505
506 err = mpls_dev_sysctl_register(dev, mdev);
507 if (err)
508 goto free;
509
510 rcu_assign_pointer(dev->mpls_ptr, mdev);
511
512 return mdev;
513
514free:
515 kfree(mdev);
516 return ERR_PTR(err);
517}
518
431static void mpls_ifdown(struct net_device *dev) 519static void mpls_ifdown(struct net_device *dev)
432{ 520{
433 struct mpls_route __rcu **platform_label; 521 struct mpls_route __rcu **platform_label;
434 struct net *net = dev_net(dev); 522 struct net *net = dev_net(dev);
523 struct mpls_dev *mdev;
435 unsigned index; 524 unsigned index;
436 525
437 platform_label = rtnl_dereference(net->mpls.platform_label); 526 platform_label = rtnl_dereference(net->mpls.platform_label);
@@ -443,14 +532,35 @@ static void mpls_ifdown(struct net_device *dev)
443 continue; 532 continue;
444 rt->rt_dev = NULL; 533 rt->rt_dev = NULL;
445 } 534 }
535
536 mdev = mpls_dev_get(dev);
537 if (!mdev)
538 return;
539
540 mpls_dev_sysctl_unregister(mdev);
541
542 RCU_INIT_POINTER(dev->mpls_ptr, NULL);
543
544 kfree(mdev);
446} 545}
447 546
448static int mpls_dev_notify(struct notifier_block *this, unsigned long event, 547static int mpls_dev_notify(struct notifier_block *this, unsigned long event,
449 void *ptr) 548 void *ptr)
450{ 549{
451 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 550 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
551 struct mpls_dev *mdev;
452 552
453 switch(event) { 553 switch(event) {
554 case NETDEV_REGISTER:
555 /* For now just support ethernet devices */
556 if ((dev->type == ARPHRD_ETHER) ||
557 (dev->type == ARPHRD_LOOPBACK)) {
558 mdev = mpls_add_dev(dev);
559 if (IS_ERR(mdev))
560 return notifier_from_errno(PTR_ERR(mdev));
561 }
562 break;
563
454 case NETDEV_UNREGISTER: 564 case NETDEV_UNREGISTER:
455 mpls_ifdown(dev); 565 mpls_ifdown(dev);
456 break; 566 break;
@@ -536,6 +646,15 @@ int nla_get_labels(const struct nlattr *nla,
536 if ((dec.bos != bos) || dec.ttl || dec.tc) 646 if ((dec.bos != bos) || dec.ttl || dec.tc)
537 return -EINVAL; 647 return -EINVAL;
538 648
649 switch (dec.label) {
650 case LABEL_IMPLICIT_NULL:
651 /* RFC3032: This is a label that an LSR may
652 * assign and distribute, but which never
653 * actually appears in the encapsulation.
654 */
655 return -EINVAL;
656 }
657
539 label[i] = dec.label; 658 label[i] = dec.label;
540 } 659 }
541 *labels = nla_labels; 660 *labels = nla_labels;
@@ -912,7 +1031,7 @@ static int mpls_platform_labels(struct ctl_table *table, int write,
912 return ret; 1031 return ret;
913} 1032}
914 1033
915static struct ctl_table mpls_table[] = { 1034static const struct ctl_table mpls_table[] = {
916 { 1035 {
917 .procname = "platform_labels", 1036 .procname = "platform_labels",
918 .data = NULL, 1037 .data = NULL,
diff --git a/net/mpls/internal.h b/net/mpls/internal.h
index fb6de92052c4..693877d69606 100644
--- a/net/mpls/internal.h
+++ b/net/mpls/internal.h
@@ -22,6 +22,12 @@ struct mpls_entry_decoded {
22 u8 bos; 22 u8 bos;
23}; 23};
24 24
25struct mpls_dev {
26 int input_enabled;
27
28 struct ctl_table_header *sysctl;
29};
30
25struct sk_buff; 31struct sk_buff;
26 32
27static inline struct mpls_shim_hdr *mpls_hdr(const struct sk_buff *skb) 33static inline struct mpls_shim_hdr *mpls_hdr(const struct sk_buff *skb)
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 78af83bc9c8e..ad9d11fb29fd 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -4340,7 +4340,6 @@ static int nft_verdict_init(const struct nft_ctx *ctx, struct nft_data *data,
4340 case NFT_CONTINUE: 4340 case NFT_CONTINUE:
4341 case NFT_BREAK: 4341 case NFT_BREAK:
4342 case NFT_RETURN: 4342 case NFT_RETURN:
4343 desc->len = sizeof(data->verdict);
4344 break; 4343 break;
4345 case NFT_JUMP: 4344 case NFT_JUMP:
4346 case NFT_GOTO: 4345 case NFT_GOTO:
@@ -4355,10 +4354,10 @@ static int nft_verdict_init(const struct nft_ctx *ctx, struct nft_data *data,
4355 4354
4356 chain->use++; 4355 chain->use++;
4357 data->verdict.chain = chain; 4356 data->verdict.chain = chain;
4358 desc->len = sizeof(data);
4359 break; 4357 break;
4360 } 4358 }
4361 4359
4360 desc->len = sizeof(data->verdict);
4362 desc->type = NFT_DATA_VERDICT; 4361 desc->type = NFT_DATA_VERDICT;
4363 return 0; 4362 return 0;
4364} 4363}
diff --git a/net/netfilter/nft_reject.c b/net/netfilter/nft_reject.c
index 57d3e1af5630..0522fc9bfb0a 100644
--- a/net/netfilter/nft_reject.c
+++ b/net/netfilter/nft_reject.c
@@ -63,6 +63,8 @@ int nft_reject_dump(struct sk_buff *skb, const struct nft_expr *expr)
63 if (nla_put_u8(skb, NFTA_REJECT_ICMP_CODE, priv->icmp_code)) 63 if (nla_put_u8(skb, NFTA_REJECT_ICMP_CODE, priv->icmp_code))
64 goto nla_put_failure; 64 goto nla_put_failure;
65 break; 65 break;
66 default:
67 break;
66 } 68 }
67 69
68 return 0; 70 return 0;
diff --git a/net/netfilter/nft_reject_inet.c b/net/netfilter/nft_reject_inet.c
index 62cabee42fbe..635dbba93d01 100644
--- a/net/netfilter/nft_reject_inet.c
+++ b/net/netfilter/nft_reject_inet.c
@@ -108,6 +108,8 @@ static int nft_reject_inet_dump(struct sk_buff *skb,
108 if (nla_put_u8(skb, NFTA_REJECT_ICMP_CODE, priv->icmp_code)) 108 if (nla_put_u8(skb, NFTA_REJECT_ICMP_CODE, priv->icmp_code))
109 goto nla_put_failure; 109 goto nla_put_failure;
110 break; 110 break;
111 default:
112 break;
111 } 113 }
112 114
113 return 0; 115 return 0;
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 19909d0786a2..ec4adbdcb9b4 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1629,13 +1629,11 @@ static struct sk_buff *netlink_alloc_large_skb(unsigned int size,
1629 if (data == NULL) 1629 if (data == NULL)
1630 return NULL; 1630 return NULL;
1631 1631
1632 skb = build_skb(data, size); 1632 skb = __build_skb(data, size);
1633 if (skb == NULL) 1633 if (skb == NULL)
1634 vfree(data); 1634 vfree(data);
1635 else { 1635 else
1636 skb->head_frag = 0;
1637 skb->destructor = netlink_skb_destructor; 1636 skb->destructor = netlink_skb_destructor;
1638 }
1639 1637
1640 return skb; 1638 return skb;
1641} 1639}
diff --git a/net/sched/act_connmark.c b/net/sched/act_connmark.c
index 8e472518f9f6..295d14bd6c67 100644
--- a/net/sched/act_connmark.c
+++ b/net/sched/act_connmark.c
@@ -63,7 +63,6 @@ static int tcf_connmark(struct sk_buff *skb, const struct tc_action *a,
63 skb->mark = c->mark; 63 skb->mark = c->mark;
64 /* using overlimits stats to count how many packets marked */ 64 /* using overlimits stats to count how many packets marked */
65 ca->tcf_qstats.overlimits++; 65 ca->tcf_qstats.overlimits++;
66 nf_ct_put(c);
67 goto out; 66 goto out;
68 } 67 }
69 68
@@ -82,7 +81,6 @@ static int tcf_connmark(struct sk_buff *skb, const struct tc_action *a,
82 nf_ct_put(c); 81 nf_ct_put(c);
83 82
84out: 83out:
85 skb->nfct = NULL;
86 spin_unlock(&ca->tcf_lock); 84 spin_unlock(&ca->tcf_lock);
87 return ca->tcf_action; 85 return ca->tcf_action;
88} 86}
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index 3613e72e858e..70e3dacbf84a 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -591,14 +591,14 @@ void tipc_bearer_stop(struct net *net)
591 591
592/* Caller should hold rtnl_lock to protect the bearer */ 592/* Caller should hold rtnl_lock to protect the bearer */
593static int __tipc_nl_add_bearer(struct tipc_nl_msg *msg, 593static int __tipc_nl_add_bearer(struct tipc_nl_msg *msg,
594 struct tipc_bearer *bearer) 594 struct tipc_bearer *bearer, int nlflags)
595{ 595{
596 void *hdr; 596 void *hdr;
597 struct nlattr *attrs; 597 struct nlattr *attrs;
598 struct nlattr *prop; 598 struct nlattr *prop;
599 599
600 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family, 600 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
601 NLM_F_MULTI, TIPC_NL_BEARER_GET); 601 nlflags, TIPC_NL_BEARER_GET);
602 if (!hdr) 602 if (!hdr)
603 return -EMSGSIZE; 603 return -EMSGSIZE;
604 604
@@ -657,7 +657,7 @@ int tipc_nl_bearer_dump(struct sk_buff *skb, struct netlink_callback *cb)
657 if (!bearer) 657 if (!bearer)
658 continue; 658 continue;
659 659
660 err = __tipc_nl_add_bearer(&msg, bearer); 660 err = __tipc_nl_add_bearer(&msg, bearer, NLM_F_MULTI);
661 if (err) 661 if (err)
662 break; 662 break;
663 } 663 }
@@ -705,7 +705,7 @@ int tipc_nl_bearer_get(struct sk_buff *skb, struct genl_info *info)
705 goto err_out; 705 goto err_out;
706 } 706 }
707 707
708 err = __tipc_nl_add_bearer(&msg, bearer); 708 err = __tipc_nl_add_bearer(&msg, bearer, 0);
709 if (err) 709 if (err)
710 goto err_out; 710 goto err_out;
711 rtnl_unlock(); 711 rtnl_unlock();
@@ -857,14 +857,14 @@ int tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info)
857} 857}
858 858
859static int __tipc_nl_add_media(struct tipc_nl_msg *msg, 859static int __tipc_nl_add_media(struct tipc_nl_msg *msg,
860 struct tipc_media *media) 860 struct tipc_media *media, int nlflags)
861{ 861{
862 void *hdr; 862 void *hdr;
863 struct nlattr *attrs; 863 struct nlattr *attrs;
864 struct nlattr *prop; 864 struct nlattr *prop;
865 865
866 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family, 866 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
867 NLM_F_MULTI, TIPC_NL_MEDIA_GET); 867 nlflags, TIPC_NL_MEDIA_GET);
868 if (!hdr) 868 if (!hdr)
869 return -EMSGSIZE; 869 return -EMSGSIZE;
870 870
@@ -916,7 +916,8 @@ int tipc_nl_media_dump(struct sk_buff *skb, struct netlink_callback *cb)
916 916
917 rtnl_lock(); 917 rtnl_lock();
918 for (; media_info_array[i] != NULL; i++) { 918 for (; media_info_array[i] != NULL; i++) {
919 err = __tipc_nl_add_media(&msg, media_info_array[i]); 919 err = __tipc_nl_add_media(&msg, media_info_array[i],
920 NLM_F_MULTI);
920 if (err) 921 if (err)
921 break; 922 break;
922 } 923 }
@@ -963,7 +964,7 @@ int tipc_nl_media_get(struct sk_buff *skb, struct genl_info *info)
963 goto err_out; 964 goto err_out;
964 } 965 }
965 966
966 err = __tipc_nl_add_media(&msg, media); 967 err = __tipc_nl_add_media(&msg, media, 0);
967 if (err) 968 if (err)
968 goto err_out; 969 goto err_out;
969 rtnl_unlock(); 970 rtnl_unlock();
diff --git a/net/tipc/link.c b/net/tipc/link.c
index a6b30df6ec02..43a515dc97b0 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -1145,11 +1145,8 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b_ptr)
1145 } 1145 }
1146 /* Synchronize with parallel link if applicable */ 1146 /* Synchronize with parallel link if applicable */
1147 if (unlikely((l_ptr->flags & LINK_SYNCHING) && !msg_dup(msg))) { 1147 if (unlikely((l_ptr->flags & LINK_SYNCHING) && !msg_dup(msg))) {
1148 link_handle_out_of_seq_msg(l_ptr, skb); 1148 if (!link_synch(l_ptr))
1149 if (link_synch(l_ptr)) 1149 goto unlock;
1150 link_retrieve_defq(l_ptr, &head);
1151 skb = NULL;
1152 goto unlock;
1153 } 1150 }
1154 l_ptr->next_in_no++; 1151 l_ptr->next_in_no++;
1155 if (unlikely(!skb_queue_empty(&l_ptr->deferdq))) 1152 if (unlikely(!skb_queue_empty(&l_ptr->deferdq)))
@@ -2013,7 +2010,7 @@ msg_full:
2013 2010
2014/* Caller should hold appropriate locks to protect the link */ 2011/* Caller should hold appropriate locks to protect the link */
2015static int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg, 2012static int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg,
2016 struct tipc_link *link) 2013 struct tipc_link *link, int nlflags)
2017{ 2014{
2018 int err; 2015 int err;
2019 void *hdr; 2016 void *hdr;
@@ -2022,7 +2019,7 @@ static int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg,
2022 struct tipc_net *tn = net_generic(net, tipc_net_id); 2019 struct tipc_net *tn = net_generic(net, tipc_net_id);
2023 2020
2024 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family, 2021 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
2025 NLM_F_MULTI, TIPC_NL_LINK_GET); 2022 nlflags, TIPC_NL_LINK_GET);
2026 if (!hdr) 2023 if (!hdr)
2027 return -EMSGSIZE; 2024 return -EMSGSIZE;
2028 2025
@@ -2095,7 +2092,7 @@ static int __tipc_nl_add_node_links(struct net *net, struct tipc_nl_msg *msg,
2095 if (!node->links[i]) 2092 if (!node->links[i])
2096 continue; 2093 continue;
2097 2094
2098 err = __tipc_nl_add_link(net, msg, node->links[i]); 2095 err = __tipc_nl_add_link(net, msg, node->links[i], NLM_F_MULTI);
2099 if (err) 2096 if (err)
2100 return err; 2097 return err;
2101 } 2098 }
@@ -2143,7 +2140,6 @@ int tipc_nl_link_dump(struct sk_buff *skb, struct netlink_callback *cb)
2143 err = __tipc_nl_add_node_links(net, &msg, node, 2140 err = __tipc_nl_add_node_links(net, &msg, node,
2144 &prev_link); 2141 &prev_link);
2145 tipc_node_unlock(node); 2142 tipc_node_unlock(node);
2146 tipc_node_put(node);
2147 if (err) 2143 if (err)
2148 goto out; 2144 goto out;
2149 2145
@@ -2210,7 +2206,7 @@ int tipc_nl_link_get(struct sk_buff *skb, struct genl_info *info)
2210 goto err_out; 2206 goto err_out;
2211 } 2207 }
2212 2208
2213 err = __tipc_nl_add_link(net, &msg, link); 2209 err = __tipc_nl_add_link(net, &msg, link, 0);
2214 if (err) 2210 if (err)
2215 goto err_out; 2211 goto err_out;
2216 2212
diff --git a/net/tipc/server.c b/net/tipc/server.c
index ab6183cdb121..77ff03ed1e18 100644
--- a/net/tipc/server.c
+++ b/net/tipc/server.c
@@ -102,7 +102,7 @@ static void tipc_conn_kref_release(struct kref *kref)
102 } 102 }
103 saddr->scope = -TIPC_NODE_SCOPE; 103 saddr->scope = -TIPC_NODE_SCOPE;
104 kernel_bind(sock, (struct sockaddr *)saddr, sizeof(*saddr)); 104 kernel_bind(sock, (struct sockaddr *)saddr, sizeof(*saddr));
105 sk_release_kernel(sk); 105 sock_release(sock);
106 con->sock = NULL; 106 con->sock = NULL;
107 } 107 }
108 108
@@ -321,12 +321,9 @@ static struct socket *tipc_create_listen_sock(struct tipc_conn *con)
321 struct socket *sock = NULL; 321 struct socket *sock = NULL;
322 int ret; 322 int ret;
323 323
324 ret = sock_create_kern(AF_TIPC, SOCK_SEQPACKET, 0, &sock); 324 ret = __sock_create(s->net, AF_TIPC, SOCK_SEQPACKET, 0, &sock, 1);
325 if (ret < 0) 325 if (ret < 0)
326 return NULL; 326 return NULL;
327
328 sk_change_net(sock->sk, s->net);
329
330 ret = kernel_setsockopt(sock, SOL_TIPC, TIPC_IMPORTANCE, 327 ret = kernel_setsockopt(sock, SOL_TIPC, TIPC_IMPORTANCE,
331 (char *)&s->imp, sizeof(s->imp)); 328 (char *)&s->imp, sizeof(s->imp));
332 if (ret < 0) 329 if (ret < 0)
@@ -376,7 +373,7 @@ static struct socket *tipc_create_listen_sock(struct tipc_conn *con)
376 373
377create_err: 374create_err:
378 kernel_sock_shutdown(sock, SHUT_RDWR); 375 kernel_sock_shutdown(sock, SHUT_RDWR);
379 sk_release_kernel(sock->sk); 376 sock_release(sock);
380 return NULL; 377 return NULL;
381} 378}
382 379
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index ee90d74d7516..9074b5cede38 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -1764,13 +1764,14 @@ static int tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
1764int tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq) 1764int tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq)
1765{ 1765{
1766 u32 dnode, dport = 0; 1766 u32 dnode, dport = 0;
1767 int err = -TIPC_ERR_NO_PORT; 1767 int err;
1768 struct sk_buff *skb; 1768 struct sk_buff *skb;
1769 struct tipc_sock *tsk; 1769 struct tipc_sock *tsk;
1770 struct tipc_net *tn; 1770 struct tipc_net *tn;
1771 struct sock *sk; 1771 struct sock *sk;
1772 1772
1773 while (skb_queue_len(inputq)) { 1773 while (skb_queue_len(inputq)) {
1774 err = -TIPC_ERR_NO_PORT;
1774 skb = NULL; 1775 skb = NULL;
1775 dport = tipc_skb_peek_port(inputq, dport); 1776 dport = tipc_skb_peek_port(inputq, dport);
1776 tsk = tipc_sk_lookup(net, dport); 1777 tsk = tipc_sk_lookup(net, dport);
diff --git a/net/unix/garbage.c b/net/unix/garbage.c
index 99f7012b23b9..a73a226f2d33 100644
--- a/net/unix/garbage.c
+++ b/net/unix/garbage.c
@@ -95,39 +95,36 @@ static DECLARE_WAIT_QUEUE_HEAD(unix_gc_wait);
95 95
96unsigned int unix_tot_inflight; 96unsigned int unix_tot_inflight;
97 97
98
99struct sock *unix_get_socket(struct file *filp) 98struct sock *unix_get_socket(struct file *filp)
100{ 99{
101 struct sock *u_sock = NULL; 100 struct sock *u_sock = NULL;
102 struct inode *inode = file_inode(filp); 101 struct inode *inode = file_inode(filp);
103 102
104 /* 103 /* Socket ? */
105 * Socket ?
106 */
107 if (S_ISSOCK(inode->i_mode) && !(filp->f_mode & FMODE_PATH)) { 104 if (S_ISSOCK(inode->i_mode) && !(filp->f_mode & FMODE_PATH)) {
108 struct socket *sock = SOCKET_I(inode); 105 struct socket *sock = SOCKET_I(inode);
109 struct sock *s = sock->sk; 106 struct sock *s = sock->sk;
110 107
111 /* 108 /* PF_UNIX ? */
112 * PF_UNIX ?
113 */
114 if (s && sock->ops && sock->ops->family == PF_UNIX) 109 if (s && sock->ops && sock->ops->family == PF_UNIX)
115 u_sock = s; 110 u_sock = s;
116 } 111 }
117 return u_sock; 112 return u_sock;
118} 113}
119 114
120/* 115/* Keep the number of times in flight count for the file
121 * Keep the number of times in flight count for the file 116 * descriptor if it is for an AF_UNIX socket.
122 * descriptor if it is for an AF_UNIX socket.
123 */ 117 */
124 118
125void unix_inflight(struct file *fp) 119void unix_inflight(struct file *fp)
126{ 120{
127 struct sock *s = unix_get_socket(fp); 121 struct sock *s = unix_get_socket(fp);
122
128 if (s) { 123 if (s) {
129 struct unix_sock *u = unix_sk(s); 124 struct unix_sock *u = unix_sk(s);
125
130 spin_lock(&unix_gc_lock); 126 spin_lock(&unix_gc_lock);
127
131 if (atomic_long_inc_return(&u->inflight) == 1) { 128 if (atomic_long_inc_return(&u->inflight) == 1) {
132 BUG_ON(!list_empty(&u->link)); 129 BUG_ON(!list_empty(&u->link));
133 list_add_tail(&u->link, &gc_inflight_list); 130 list_add_tail(&u->link, &gc_inflight_list);
@@ -142,10 +139,13 @@ void unix_inflight(struct file *fp)
142void unix_notinflight(struct file *fp) 139void unix_notinflight(struct file *fp)
143{ 140{
144 struct sock *s = unix_get_socket(fp); 141 struct sock *s = unix_get_socket(fp);
142
145 if (s) { 143 if (s) {
146 struct unix_sock *u = unix_sk(s); 144 struct unix_sock *u = unix_sk(s);
145
147 spin_lock(&unix_gc_lock); 146 spin_lock(&unix_gc_lock);
148 BUG_ON(list_empty(&u->link)); 147 BUG_ON(list_empty(&u->link));
148
149 if (atomic_long_dec_and_test(&u->inflight)) 149 if (atomic_long_dec_and_test(&u->inflight))
150 list_del_init(&u->link); 150 list_del_init(&u->link);
151 unix_tot_inflight--; 151 unix_tot_inflight--;
@@ -161,32 +161,27 @@ static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *),
161 161
162 spin_lock(&x->sk_receive_queue.lock); 162 spin_lock(&x->sk_receive_queue.lock);
163 skb_queue_walk_safe(&x->sk_receive_queue, skb, next) { 163 skb_queue_walk_safe(&x->sk_receive_queue, skb, next) {
164 /* 164 /* Do we have file descriptors ? */
165 * Do we have file descriptors ?
166 */
167 if (UNIXCB(skb).fp) { 165 if (UNIXCB(skb).fp) {
168 bool hit = false; 166 bool hit = false;
169 /* 167 /* Process the descriptors of this socket */
170 * Process the descriptors of this socket
171 */
172 int nfd = UNIXCB(skb).fp->count; 168 int nfd = UNIXCB(skb).fp->count;
173 struct file **fp = UNIXCB(skb).fp->fp; 169 struct file **fp = UNIXCB(skb).fp->fp;
170
174 while (nfd--) { 171 while (nfd--) {
175 /* 172 /* Get the socket the fd matches if it indeed does so */
176 * Get the socket the fd matches
177 * if it indeed does so
178 */
179 struct sock *sk = unix_get_socket(*fp++); 173 struct sock *sk = unix_get_socket(*fp++);
174
180 if (sk) { 175 if (sk) {
181 struct unix_sock *u = unix_sk(sk); 176 struct unix_sock *u = unix_sk(sk);
182 177
183 /* 178 /* Ignore non-candidates, they could
184 * Ignore non-candidates, they could
185 * have been added to the queues after 179 * have been added to the queues after
186 * starting the garbage collection 180 * starting the garbage collection
187 */ 181 */
188 if (test_bit(UNIX_GC_CANDIDATE, &u->gc_flags)) { 182 if (test_bit(UNIX_GC_CANDIDATE, &u->gc_flags)) {
189 hit = true; 183 hit = true;
184
190 func(u); 185 func(u);
191 } 186 }
192 } 187 }
@@ -203,24 +198,22 @@ static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *),
203static void scan_children(struct sock *x, void (*func)(struct unix_sock *), 198static void scan_children(struct sock *x, void (*func)(struct unix_sock *),
204 struct sk_buff_head *hitlist) 199 struct sk_buff_head *hitlist)
205{ 200{
206 if (x->sk_state != TCP_LISTEN) 201 if (x->sk_state != TCP_LISTEN) {
207 scan_inflight(x, func, hitlist); 202 scan_inflight(x, func, hitlist);
208 else { 203 } else {
209 struct sk_buff *skb; 204 struct sk_buff *skb;
210 struct sk_buff *next; 205 struct sk_buff *next;
211 struct unix_sock *u; 206 struct unix_sock *u;
212 LIST_HEAD(embryos); 207 LIST_HEAD(embryos);
213 208
214 /* 209 /* For a listening socket collect the queued embryos
215 * For a listening socket collect the queued embryos
216 * and perform a scan on them as well. 210 * and perform a scan on them as well.
217 */ 211 */
218 spin_lock(&x->sk_receive_queue.lock); 212 spin_lock(&x->sk_receive_queue.lock);
219 skb_queue_walk_safe(&x->sk_receive_queue, skb, next) { 213 skb_queue_walk_safe(&x->sk_receive_queue, skb, next) {
220 u = unix_sk(skb->sk); 214 u = unix_sk(skb->sk);
221 215
222 /* 216 /* An embryo cannot be in-flight, so it's safe
223 * An embryo cannot be in-flight, so it's safe
224 * to use the list link. 217 * to use the list link.
225 */ 218 */
226 BUG_ON(!list_empty(&u->link)); 219 BUG_ON(!list_empty(&u->link));
@@ -249,8 +242,7 @@ static void inc_inflight(struct unix_sock *usk)
249static void inc_inflight_move_tail(struct unix_sock *u) 242static void inc_inflight_move_tail(struct unix_sock *u)
250{ 243{
251 atomic_long_inc(&u->inflight); 244 atomic_long_inc(&u->inflight);
252 /* 245 /* If this still might be part of a cycle, move it to the end
253 * If this still might be part of a cycle, move it to the end
254 * of the list, so that it's checked even if it was already 246 * of the list, so that it's checked even if it was already
255 * passed over 247 * passed over
256 */ 248 */
@@ -263,8 +255,7 @@ static bool gc_in_progress;
263 255
264void wait_for_unix_gc(void) 256void wait_for_unix_gc(void)
265{ 257{
266 /* 258 /* If number of inflight sockets is insane,
267 * If number of inflight sockets is insane,
268 * force a garbage collect right now. 259 * force a garbage collect right now.
269 */ 260 */
270 if (unix_tot_inflight > UNIX_INFLIGHT_TRIGGER_GC && !gc_in_progress) 261 if (unix_tot_inflight > UNIX_INFLIGHT_TRIGGER_GC && !gc_in_progress)
@@ -288,8 +279,7 @@ void unix_gc(void)
288 goto out; 279 goto out;
289 280
290 gc_in_progress = true; 281 gc_in_progress = true;
291 /* 282 /* First, select candidates for garbage collection. Only
292 * First, select candidates for garbage collection. Only
293 * in-flight sockets are considered, and from those only ones 283 * in-flight sockets are considered, and from those only ones
294 * which don't have any external reference. 284 * which don't have any external reference.
295 * 285 *
@@ -320,15 +310,13 @@ void unix_gc(void)
320 } 310 }
321 } 311 }
322 312
323 /* 313 /* Now remove all internal in-flight reference to children of
324 * Now remove all internal in-flight reference to children of
325 * the candidates. 314 * the candidates.
326 */ 315 */
327 list_for_each_entry(u, &gc_candidates, link) 316 list_for_each_entry(u, &gc_candidates, link)
328 scan_children(&u->sk, dec_inflight, NULL); 317 scan_children(&u->sk, dec_inflight, NULL);
329 318
330 /* 319 /* Restore the references for children of all candidates,
331 * Restore the references for children of all candidates,
332 * which have remaining references. Do this recursively, so 320 * which have remaining references. Do this recursively, so
333 * only those remain, which form cyclic references. 321 * only those remain, which form cyclic references.
334 * 322 *
@@ -350,8 +338,7 @@ void unix_gc(void)
350 } 338 }
351 list_del(&cursor); 339 list_del(&cursor);
352 340
353 /* 341 /* not_cycle_list contains those sockets which do not make up a
354 * not_cycle_list contains those sockets which do not make up a
355 * cycle. Restore these to the inflight list. 342 * cycle. Restore these to the inflight list.
356 */ 343 */
357 while (!list_empty(&not_cycle_list)) { 344 while (!list_empty(&not_cycle_list)) {
@@ -360,8 +347,7 @@ void unix_gc(void)
360 list_move_tail(&u->link, &gc_inflight_list); 347 list_move_tail(&u->link, &gc_inflight_list);
361 } 348 }
362 349
363 /* 350 /* Now gc_candidates contains only garbage. Restore original
364 * Now gc_candidates contains only garbage. Restore original
365 * inflight counters for these as well, and remove the skbuffs 351 * inflight counters for these as well, and remove the skbuffs
366 * which are creating the cycle(s). 352 * which are creating the cycle(s).
367 */ 353 */
diff --git a/sound/pci/emu10k1/emu10k1.c b/sound/pci/emu10k1/emu10k1.c
index 37d0220a094c..db7a2e5e4a14 100644
--- a/sound/pci/emu10k1/emu10k1.c
+++ b/sound/pci/emu10k1/emu10k1.c
@@ -183,8 +183,10 @@ static int snd_card_emu10k1_probe(struct pci_dev *pci,
183 } 183 }
184#endif 184#endif
185 185
186 strcpy(card->driver, emu->card_capabilities->driver); 186 strlcpy(card->driver, emu->card_capabilities->driver,
187 strcpy(card->shortname, emu->card_capabilities->name); 187 sizeof(card->driver));
188 strlcpy(card->shortname, emu->card_capabilities->name,
189 sizeof(card->shortname));
188 snprintf(card->longname, sizeof(card->longname), 190 snprintf(card->longname, sizeof(card->longname),
189 "%s (rev.%d, serial:0x%x) at 0x%lx, irq %i", 191 "%s (rev.%d, serial:0x%x) at 0x%lx, irq %i",
190 card->shortname, emu->revision, emu->serial, emu->port, emu->irq); 192 card->shortname, emu->revision, emu->serial, emu->port, emu->irq);
diff --git a/sound/pci/emu10k1/emu10k1_callback.c b/sound/pci/emu10k1/emu10k1_callback.c
index 874cd76c7b7f..d2c7ea3a7610 100644
--- a/sound/pci/emu10k1/emu10k1_callback.c
+++ b/sound/pci/emu10k1/emu10k1_callback.c
@@ -415,7 +415,7 @@ start_voice(struct snd_emux_voice *vp)
415 snd_emu10k1_ptr_write(hw, Z2, ch, 0); 415 snd_emu10k1_ptr_write(hw, Z2, ch, 0);
416 416
417 /* invalidate maps */ 417 /* invalidate maps */
418 temp = (hw->silent_page.addr << 1) | MAP_PTI_MASK; 418 temp = (hw->silent_page.addr << hw->address_mode) | (hw->address_mode ? MAP_PTI_MASK1 : MAP_PTI_MASK0);
419 snd_emu10k1_ptr_write(hw, MAPA, ch, temp); 419 snd_emu10k1_ptr_write(hw, MAPA, ch, temp);
420 snd_emu10k1_ptr_write(hw, MAPB, ch, temp); 420 snd_emu10k1_ptr_write(hw, MAPB, ch, temp);
421#if 0 421#if 0
@@ -436,7 +436,7 @@ start_voice(struct snd_emux_voice *vp)
436 snd_emu10k1_ptr_write(hw, CDF, ch, sample); 436 snd_emu10k1_ptr_write(hw, CDF, ch, sample);
437 437
438 /* invalidate maps */ 438 /* invalidate maps */
439 temp = ((unsigned int)hw->silent_page.addr << 1) | MAP_PTI_MASK; 439 temp = ((unsigned int)hw->silent_page.addr << hw_address_mode) | (hw->address_mode ? MAP_PTI_MASK1 : MAP_PTI_MASK0);
440 snd_emu10k1_ptr_write(hw, MAPA, ch, temp); 440 snd_emu10k1_ptr_write(hw, MAPA, ch, temp);
441 snd_emu10k1_ptr_write(hw, MAPB, ch, temp); 441 snd_emu10k1_ptr_write(hw, MAPB, ch, temp);
442 442
diff --git a/sound/pci/emu10k1/emu10k1_main.c b/sound/pci/emu10k1/emu10k1_main.c
index 54079f5d5673..a4548147c621 100644
--- a/sound/pci/emu10k1/emu10k1_main.c
+++ b/sound/pci/emu10k1/emu10k1_main.c
@@ -282,7 +282,7 @@ static int snd_emu10k1_init(struct snd_emu10k1 *emu, int enable_ir, int resume)
282 snd_emu10k1_ptr_write(emu, TCB, 0, 0); /* taken from original driver */ 282 snd_emu10k1_ptr_write(emu, TCB, 0, 0); /* taken from original driver */
283 snd_emu10k1_ptr_write(emu, TCBS, 0, 4); /* taken from original driver */ 283 snd_emu10k1_ptr_write(emu, TCBS, 0, 4); /* taken from original driver */
284 284
285 silent_page = (emu->silent_page.addr << 1) | MAP_PTI_MASK; 285 silent_page = (emu->silent_page.addr << emu->address_mode) | (emu->address_mode ? MAP_PTI_MASK1 : MAP_PTI_MASK0);
286 for (ch = 0; ch < NUM_G; ch++) { 286 for (ch = 0; ch < NUM_G; ch++) {
287 snd_emu10k1_ptr_write(emu, MAPA, ch, silent_page); 287 snd_emu10k1_ptr_write(emu, MAPA, ch, silent_page);
288 snd_emu10k1_ptr_write(emu, MAPB, ch, silent_page); 288 snd_emu10k1_ptr_write(emu, MAPB, ch, silent_page);
@@ -348,6 +348,11 @@ static int snd_emu10k1_init(struct snd_emu10k1 *emu, int enable_ir, int resume)
348 outl(reg | A_IOCFG_GPOUT0, emu->port + A_IOCFG); 348 outl(reg | A_IOCFG_GPOUT0, emu->port + A_IOCFG);
349 } 349 }
350 350
351 if (emu->address_mode == 0) {
352 /* use 16M in 4G */
353 outl(inl(emu->port + HCFG) | HCFG_EXPANDED_MEM, emu->port + HCFG);
354 }
355
351 return 0; 356 return 0;
352} 357}
353 358
@@ -1446,7 +1451,7 @@ static struct snd_emu_chip_details emu_chip_details[] = {
1446 * 1451 *
1447 */ 1452 */
1448 {.vendor = 0x1102, .device = 0x0008, .subsystem = 0x20011102, 1453 {.vendor = 0x1102, .device = 0x0008, .subsystem = 0x20011102,
1449 .driver = "Audigy2", .name = "SB Audigy 2 ZS Notebook [SB0530]", 1454 .driver = "Audigy2", .name = "Audigy 2 ZS Notebook [SB0530]",
1450 .id = "Audigy2", 1455 .id = "Audigy2",
1451 .emu10k2_chip = 1, 1456 .emu10k2_chip = 1,
1452 .ca0108_chip = 1, 1457 .ca0108_chip = 1,
@@ -1596,7 +1601,7 @@ static struct snd_emu_chip_details emu_chip_details[] = {
1596 .adc_1361t = 1, /* 24 bit capture instead of 16bit */ 1601 .adc_1361t = 1, /* 24 bit capture instead of 16bit */
1597 .ac97_chip = 1} , 1602 .ac97_chip = 1} ,
1598 {.vendor = 0x1102, .device = 0x0004, .subsystem = 0x10051102, 1603 {.vendor = 0x1102, .device = 0x0004, .subsystem = 0x10051102,
1599 .driver = "Audigy2", .name = "SB Audigy 2 Platinum EX [SB0280]", 1604 .driver = "Audigy2", .name = "Audigy 2 Platinum EX [SB0280]",
1600 .id = "Audigy2", 1605 .id = "Audigy2",
1601 .emu10k2_chip = 1, 1606 .emu10k2_chip = 1,
1602 .ca0102_chip = 1, 1607 .ca0102_chip = 1,
@@ -1902,8 +1907,10 @@ int snd_emu10k1_create(struct snd_card *card,
1902 1907
1903 is_audigy = emu->audigy = c->emu10k2_chip; 1908 is_audigy = emu->audigy = c->emu10k2_chip;
1904 1909
1910 /* set addressing mode */
1911 emu->address_mode = is_audigy ? 0 : 1;
1905 /* set the DMA transfer mask */ 1912 /* set the DMA transfer mask */
1906 emu->dma_mask = is_audigy ? AUDIGY_DMA_MASK : EMU10K1_DMA_MASK; 1913 emu->dma_mask = emu->address_mode ? EMU10K1_DMA_MASK : AUDIGY_DMA_MASK;
1907 if (pci_set_dma_mask(pci, emu->dma_mask) < 0 || 1914 if (pci_set_dma_mask(pci, emu->dma_mask) < 0 ||
1908 pci_set_consistent_dma_mask(pci, emu->dma_mask) < 0) { 1915 pci_set_consistent_dma_mask(pci, emu->dma_mask) < 0) {
1909 dev_err(card->dev, 1916 dev_err(card->dev,
@@ -1928,7 +1935,7 @@ int snd_emu10k1_create(struct snd_card *card,
1928 1935
1929 emu->max_cache_pages = max_cache_bytes >> PAGE_SHIFT; 1936 emu->max_cache_pages = max_cache_bytes >> PAGE_SHIFT;
1930 if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(pci), 1937 if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(pci),
1931 32 * 1024, &emu->ptb_pages) < 0) { 1938 (emu->address_mode ? 32 : 16) * 1024, &emu->ptb_pages) < 0) {
1932 err = -ENOMEM; 1939 err = -ENOMEM;
1933 goto error; 1940 goto error;
1934 } 1941 }
@@ -2027,8 +2034,8 @@ int snd_emu10k1_create(struct snd_card *card,
2027 2034
2028 /* Clear silent pages and set up pointers */ 2035 /* Clear silent pages and set up pointers */
2029 memset(emu->silent_page.area, 0, PAGE_SIZE); 2036 memset(emu->silent_page.area, 0, PAGE_SIZE);
2030 silent_page = emu->silent_page.addr << 1; 2037 silent_page = emu->silent_page.addr << emu->address_mode;
2031 for (idx = 0; idx < MAXPAGES; idx++) 2038 for (idx = 0; idx < (emu->address_mode ? MAXPAGES1 : MAXPAGES0); idx++)
2032 ((u32 *)emu->ptb_pages.area)[idx] = cpu_to_le32(silent_page | idx); 2039 ((u32 *)emu->ptb_pages.area)[idx] = cpu_to_le32(silent_page | idx);
2033 2040
2034 /* set up voice indices */ 2041 /* set up voice indices */
diff --git a/sound/pci/emu10k1/emupcm.c b/sound/pci/emu10k1/emupcm.c
index 0dc07385af0e..14a305bd8a98 100644
--- a/sound/pci/emu10k1/emupcm.c
+++ b/sound/pci/emu10k1/emupcm.c
@@ -380,7 +380,7 @@ static void snd_emu10k1_pcm_init_voice(struct snd_emu10k1 *emu,
380 snd_emu10k1_ptr_write(emu, Z1, voice, 0); 380 snd_emu10k1_ptr_write(emu, Z1, voice, 0);
381 snd_emu10k1_ptr_write(emu, Z2, voice, 0); 381 snd_emu10k1_ptr_write(emu, Z2, voice, 0);
382 /* invalidate maps */ 382 /* invalidate maps */
383 silent_page = ((unsigned int)emu->silent_page.addr << 1) | MAP_PTI_MASK; 383 silent_page = ((unsigned int)emu->silent_page.addr << emu->address_mode) | (emu->address_mode ? MAP_PTI_MASK1 : MAP_PTI_MASK0);
384 snd_emu10k1_ptr_write(emu, MAPA, voice, silent_page); 384 snd_emu10k1_ptr_write(emu, MAPA, voice, silent_page);
385 snd_emu10k1_ptr_write(emu, MAPB, voice, silent_page); 385 snd_emu10k1_ptr_write(emu, MAPB, voice, silent_page);
386 /* modulation envelope */ 386 /* modulation envelope */
diff --git a/sound/pci/emu10k1/memory.c b/sound/pci/emu10k1/memory.c
index c68e6dd2fa67..4f1f69be1865 100644
--- a/sound/pci/emu10k1/memory.c
+++ b/sound/pci/emu10k1/memory.c
@@ -34,10 +34,11 @@
34 * aligned pages in others 34 * aligned pages in others
35 */ 35 */
36#define __set_ptb_entry(emu,page,addr) \ 36#define __set_ptb_entry(emu,page,addr) \
37 (((u32 *)(emu)->ptb_pages.area)[page] = cpu_to_le32(((addr) << 1) | (page))) 37 (((u32 *)(emu)->ptb_pages.area)[page] = cpu_to_le32(((addr) << (emu->address_mode)) | (page)))
38 38
39#define UNIT_PAGES (PAGE_SIZE / EMUPAGESIZE) 39#define UNIT_PAGES (PAGE_SIZE / EMUPAGESIZE)
40#define MAX_ALIGN_PAGES (MAXPAGES / UNIT_PAGES) 40#define MAX_ALIGN_PAGES0 (MAXPAGES0 / UNIT_PAGES)
41#define MAX_ALIGN_PAGES1 (MAXPAGES1 / UNIT_PAGES)
41/* get aligned page from offset address */ 42/* get aligned page from offset address */
42#define get_aligned_page(offset) ((offset) >> PAGE_SHIFT) 43#define get_aligned_page(offset) ((offset) >> PAGE_SHIFT)
43/* get offset address from aligned page */ 44/* get offset address from aligned page */
@@ -124,7 +125,7 @@ static int search_empty_map_area(struct snd_emu10k1 *emu, int npages, struct lis
124 } 125 }
125 page = blk->mapped_page + blk->pages; 126 page = blk->mapped_page + blk->pages;
126 } 127 }
127 size = MAX_ALIGN_PAGES - page; 128 size = (emu->address_mode ? MAX_ALIGN_PAGES1 : MAX_ALIGN_PAGES0) - page;
128 if (size >= max_size) { 129 if (size >= max_size) {
129 *nextp = pos; 130 *nextp = pos;
130 return page; 131 return page;
@@ -181,7 +182,7 @@ static int unmap_memblk(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
181 q = get_emu10k1_memblk(p, mapped_link); 182 q = get_emu10k1_memblk(p, mapped_link);
182 end_page = q->mapped_page; 183 end_page = q->mapped_page;
183 } else 184 } else
184 end_page = MAX_ALIGN_PAGES; 185 end_page = (emu->address_mode ? MAX_ALIGN_PAGES1 : MAX_ALIGN_PAGES0);
185 186
186 /* remove links */ 187 /* remove links */
187 list_del(&blk->mapped_link); 188 list_del(&blk->mapped_link);
@@ -307,7 +308,7 @@ snd_emu10k1_alloc_pages(struct snd_emu10k1 *emu, struct snd_pcm_substream *subst
307 if (snd_BUG_ON(!emu)) 308 if (snd_BUG_ON(!emu))
308 return NULL; 309 return NULL;
309 if (snd_BUG_ON(runtime->dma_bytes <= 0 || 310 if (snd_BUG_ON(runtime->dma_bytes <= 0 ||
310 runtime->dma_bytes >= MAXPAGES * EMUPAGESIZE)) 311 runtime->dma_bytes >= (emu->address_mode ? MAXPAGES1 : MAXPAGES0) * EMUPAGESIZE))
311 return NULL; 312 return NULL;
312 hdr = emu->memhdr; 313 hdr = emu->memhdr;
313 if (snd_BUG_ON(!hdr)) 314 if (snd_BUG_ON(!hdr))
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index 873ed1bce12b..b49feff0a319 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -873,14 +873,15 @@ struct hda_pcm *snd_hda_codec_pcm_new(struct hda_codec *codec,
873 struct hda_pcm *pcm; 873 struct hda_pcm *pcm;
874 va_list args; 874 va_list args;
875 875
876 va_start(args, fmt);
877 pcm = kzalloc(sizeof(*pcm), GFP_KERNEL); 876 pcm = kzalloc(sizeof(*pcm), GFP_KERNEL);
878 if (!pcm) 877 if (!pcm)
879 return NULL; 878 return NULL;
880 879
881 pcm->codec = codec; 880 pcm->codec = codec;
882 kref_init(&pcm->kref); 881 kref_init(&pcm->kref);
882 va_start(args, fmt);
883 pcm->name = kvasprintf(GFP_KERNEL, fmt, args); 883 pcm->name = kvasprintf(GFP_KERNEL, fmt, args);
884 va_end(args);
884 if (!pcm->name) { 885 if (!pcm->name) {
885 kfree(pcm); 886 kfree(pcm);
886 return NULL; 887 return NULL;
@@ -2082,6 +2083,16 @@ static struct snd_kcontrol_new vmaster_mute_mode = {
2082 .put = vmaster_mute_mode_put, 2083 .put = vmaster_mute_mode_put,
2083}; 2084};
2084 2085
2086/* meta hook to call each driver's vmaster hook */
2087static void vmaster_hook(void *private_data, int enabled)
2088{
2089 struct hda_vmaster_mute_hook *hook = private_data;
2090
2091 if (hook->mute_mode != HDA_VMUTE_FOLLOW_MASTER)
2092 enabled = hook->mute_mode;
2093 hook->hook(hook->codec, enabled);
2094}
2095
2085/** 2096/**
2086 * snd_hda_add_vmaster_hook - Add a vmaster hook for mute-LED 2097 * snd_hda_add_vmaster_hook - Add a vmaster hook for mute-LED
2087 * @codec: the HDA codec 2098 * @codec: the HDA codec
@@ -2100,9 +2111,9 @@ int snd_hda_add_vmaster_hook(struct hda_codec *codec,
2100 2111
2101 if (!hook->hook || !hook->sw_kctl) 2112 if (!hook->hook || !hook->sw_kctl)
2102 return 0; 2113 return 0;
2103 snd_ctl_add_vmaster_hook(hook->sw_kctl, hook->hook, codec);
2104 hook->codec = codec; 2114 hook->codec = codec;
2105 hook->mute_mode = HDA_VMUTE_FOLLOW_MASTER; 2115 hook->mute_mode = HDA_VMUTE_FOLLOW_MASTER;
2116 snd_ctl_add_vmaster_hook(hook->sw_kctl, vmaster_hook, hook);
2106 if (!expose_enum_ctl) 2117 if (!expose_enum_ctl)
2107 return 0; 2118 return 0;
2108 kctl = snd_ctl_new1(&vmaster_mute_mode, hook); 2119 kctl = snd_ctl_new1(&vmaster_mute_mode, hook);
@@ -2128,14 +2139,7 @@ void snd_hda_sync_vmaster_hook(struct hda_vmaster_mute_hook *hook)
2128 */ 2139 */
2129 if (hook->codec->bus->shutdown) 2140 if (hook->codec->bus->shutdown)
2130 return; 2141 return;
2131 switch (hook->mute_mode) { 2142 snd_ctl_sync_vmaster_hook(hook->sw_kctl);
2132 case HDA_VMUTE_FOLLOW_MASTER:
2133 snd_ctl_sync_vmaster_hook(hook->sw_kctl);
2134 break;
2135 default:
2136 hook->hook(hook->codec, hook->mute_mode);
2137 break;
2138 }
2139} 2143}
2140EXPORT_SYMBOL_GPL(snd_hda_sync_vmaster_hook); 2144EXPORT_SYMBOL_GPL(snd_hda_sync_vmaster_hook);
2141 2145
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
index 3d2597b7037b..788f969b1a68 100644
--- a/sound/pci/hda/hda_generic.c
+++ b/sound/pci/hda/hda_generic.c
@@ -3259,7 +3259,8 @@ static int create_input_ctls(struct hda_codec *codec)
3259 val = PIN_IN; 3259 val = PIN_IN;
3260 if (cfg->inputs[i].type == AUTO_PIN_MIC) 3260 if (cfg->inputs[i].type == AUTO_PIN_MIC)
3261 val |= snd_hda_get_default_vref(codec, pin); 3261 val |= snd_hda_get_default_vref(codec, pin);
3262 if (pin != spec->hp_mic_pin) 3262 if (pin != spec->hp_mic_pin &&
3263 !snd_hda_codec_get_pin_target(codec, pin))
3263 set_pin_target(codec, pin, val, false); 3264 set_pin_target(codec, pin, val, false);
3264 3265
3265 if (mixer) { 3266 if (mixer) {
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 06199e4e930f..e2afd53cc14c 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -4190,11 +4190,18 @@ static void alc_shutup_dell_xps13(struct hda_codec *codec)
4190static void alc_fixup_dell_xps13(struct hda_codec *codec, 4190static void alc_fixup_dell_xps13(struct hda_codec *codec,
4191 const struct hda_fixup *fix, int action) 4191 const struct hda_fixup *fix, int action)
4192{ 4192{
4193 if (action == HDA_FIXUP_ACT_PROBE) { 4193 struct alc_spec *spec = codec->spec;
4194 struct alc_spec *spec = codec->spec; 4194 struct hda_input_mux *imux = &spec->gen.input_mux;
4195 struct hda_input_mux *imux = &spec->gen.input_mux; 4195 int i;
4196 int i;
4197 4196
4197 switch (action) {
4198 case HDA_FIXUP_ACT_PRE_PROBE:
4199 /* mic pin 0x19 must be initialized with Vref Hi-Z, otherwise
4200 * it causes a click noise at start up
4201 */
4202 snd_hda_codec_set_pin_target(codec, 0x19, PIN_VREFHIZ);
4203 break;
4204 case HDA_FIXUP_ACT_PROBE:
4198 spec->shutup = alc_shutup_dell_xps13; 4205 spec->shutup = alc_shutup_dell_xps13;
4199 4206
4200 /* Make the internal mic the default input source. */ 4207 /* Make the internal mic the default input source. */
@@ -4204,6 +4211,7 @@ static void alc_fixup_dell_xps13(struct hda_codec *codec,
4204 break; 4211 break;
4205 } 4212 }
4206 } 4213 }
4214 break;
4207 } 4215 }
4208} 4216}
4209 4217
diff --git a/sound/pci/hda/thinkpad_helper.c b/sound/pci/hda/thinkpad_helper.c
index 0a4ad5feb82e..d51703e30523 100644
--- a/sound/pci/hda/thinkpad_helper.c
+++ b/sound/pci/hda/thinkpad_helper.c
@@ -72,6 +72,7 @@ static void hda_fixup_thinkpad_acpi(struct hda_codec *codec,
72 if (led_set_func(TPACPI_LED_MUTE, false) >= 0) { 72 if (led_set_func(TPACPI_LED_MUTE, false) >= 0) {
73 old_vmaster_hook = spec->vmaster_mute.hook; 73 old_vmaster_hook = spec->vmaster_mute.hook;
74 spec->vmaster_mute.hook = update_tpacpi_mute_led; 74 spec->vmaster_mute.hook = update_tpacpi_mute_led;
75 spec->vmaster_mute_enum = 1;
75 removefunc = false; 76 removefunc = false;
76 } 77 }
77 if (led_set_func(TPACPI_LED_MICMUTE, false) >= 0) { 78 if (led_set_func(TPACPI_LED_MICMUTE, false) >= 0) {
diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c
index 69528ae5410c..be4d741c45ba 100644
--- a/sound/soc/codecs/rt5645.c
+++ b/sound/soc/codecs/rt5645.c
@@ -18,6 +18,7 @@
18#include <linux/platform_device.h> 18#include <linux/platform_device.h>
19#include <linux/spi/spi.h> 19#include <linux/spi/spi.h>
20#include <linux/gpio.h> 20#include <linux/gpio.h>
21#include <linux/acpi.h>
21#include <sound/core.h> 22#include <sound/core.h>
22#include <sound/pcm.h> 23#include <sound/pcm.h>
23#include <sound/pcm_params.h> 24#include <sound/pcm_params.h>
@@ -2656,6 +2657,15 @@ static const struct i2c_device_id rt5645_i2c_id[] = {
2656}; 2657};
2657MODULE_DEVICE_TABLE(i2c, rt5645_i2c_id); 2658MODULE_DEVICE_TABLE(i2c, rt5645_i2c_id);
2658 2659
2660#ifdef CONFIG_ACPI
2661static struct acpi_device_id rt5645_acpi_match[] = {
2662 { "10EC5645", 0 },
2663 { "10EC5650", 0 },
2664 {},
2665};
2666MODULE_DEVICE_TABLE(acpi, rt5645_acpi_match);
2667#endif
2668
2659static int rt5645_i2c_probe(struct i2c_client *i2c, 2669static int rt5645_i2c_probe(struct i2c_client *i2c,
2660 const struct i2c_device_id *id) 2670 const struct i2c_device_id *id)
2661{ 2671{
@@ -2770,7 +2780,7 @@ static int rt5645_i2c_probe(struct i2c_client *i2c,
2770 2780
2771 case RT5645_DMIC_DATA_GPIO12: 2781 case RT5645_DMIC_DATA_GPIO12:
2772 regmap_update_bits(rt5645->regmap, RT5645_DMIC_CTRL1, 2782 regmap_update_bits(rt5645->regmap, RT5645_DMIC_CTRL1,
2773 RT5645_DMIC_1_DP_MASK, RT5645_DMIC_2_DP_GPIO12); 2783 RT5645_DMIC_2_DP_MASK, RT5645_DMIC_2_DP_GPIO12);
2774 regmap_update_bits(rt5645->regmap, RT5645_GPIO_CTRL1, 2784 regmap_update_bits(rt5645->regmap, RT5645_GPIO_CTRL1,
2775 RT5645_GP12_PIN_MASK, 2785 RT5645_GP12_PIN_MASK,
2776 RT5645_GP12_PIN_DMIC2_SDA); 2786 RT5645_GP12_PIN_DMIC2_SDA);
@@ -2872,6 +2882,7 @@ static struct i2c_driver rt5645_i2c_driver = {
2872 .driver = { 2882 .driver = {
2873 .name = "rt5645", 2883 .name = "rt5645",
2874 .owner = THIS_MODULE, 2884 .owner = THIS_MODULE,
2885 .acpi_match_table = ACPI_PTR(rt5645_acpi_match),
2875 }, 2886 },
2876 .probe = rt5645_i2c_probe, 2887 .probe = rt5645_i2c_probe,
2877 .remove = rt5645_i2c_remove, 2888 .remove = rt5645_i2c_remove,
diff --git a/sound/soc/codecs/rt5677.c b/sound/soc/codecs/rt5677.c
index af182586712d..169aa471ffbd 100644
--- a/sound/soc/codecs/rt5677.c
+++ b/sound/soc/codecs/rt5677.c
@@ -62,6 +62,9 @@ static const struct reg_default init_list[] = {
62 {RT5677_PR_BASE + 0x1e, 0x0000}, 62 {RT5677_PR_BASE + 0x1e, 0x0000},
63 {RT5677_PR_BASE + 0x12, 0x0eaa}, 63 {RT5677_PR_BASE + 0x12, 0x0eaa},
64 {RT5677_PR_BASE + 0x14, 0x018a}, 64 {RT5677_PR_BASE + 0x14, 0x018a},
65 {RT5677_PR_BASE + 0x15, 0x0490},
66 {RT5677_PR_BASE + 0x38, 0x0f71},
67 {RT5677_PR_BASE + 0x39, 0x0f71},
65}; 68};
66#define RT5677_INIT_REG_LEN ARRAY_SIZE(init_list) 69#define RT5677_INIT_REG_LEN ARRAY_SIZE(init_list)
67 70
@@ -914,7 +917,7 @@ static int set_dmic_clk(struct snd_soc_dapm_widget *w,
914{ 917{
915 struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm); 918 struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
916 struct rt5677_priv *rt5677 = snd_soc_codec_get_drvdata(codec); 919 struct rt5677_priv *rt5677 = snd_soc_codec_get_drvdata(codec);
917 int idx = rl6231_calc_dmic_clk(rt5677->sysclk); 920 int idx = rl6231_calc_dmic_clk(rt5677->lrck[RT5677_AIF1] << 8);
918 921
919 if (idx < 0) 922 if (idx < 0)
920 dev_err(codec->dev, "Failed to set DMIC clock\n"); 923 dev_err(codec->dev, "Failed to set DMIC clock\n");
diff --git a/sound/soc/codecs/tfa9879.c b/sound/soc/codecs/tfa9879.c
index 16f1b71edb55..aab0af681e8c 100644
--- a/sound/soc/codecs/tfa9879.c
+++ b/sound/soc/codecs/tfa9879.c
@@ -280,8 +280,8 @@ static int tfa9879_i2c_probe(struct i2c_client *i2c,
280 int i; 280 int i;
281 281
282 tfa9879 = devm_kzalloc(&i2c->dev, sizeof(*tfa9879), GFP_KERNEL); 282 tfa9879 = devm_kzalloc(&i2c->dev, sizeof(*tfa9879), GFP_KERNEL);
283 if (IS_ERR(tfa9879)) 283 if (!tfa9879)
284 return PTR_ERR(tfa9879); 284 return -ENOMEM;
285 285
286 i2c_set_clientdata(i2c, tfa9879); 286 i2c_set_clientdata(i2c, tfa9879);
287 287
diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c
index e8bb8eef1d16..0d48804218b1 100644
--- a/sound/soc/fsl/fsl_ssi.c
+++ b/sound/soc/fsl/fsl_ssi.c
@@ -1357,7 +1357,7 @@ static int fsl_ssi_probe(struct platform_device *pdev)
1357 } 1357 }
1358 1358
1359 ssi_private->irq = platform_get_irq(pdev, 0); 1359 ssi_private->irq = platform_get_irq(pdev, 0);
1360 if (!ssi_private->irq) { 1360 if (ssi_private->irq < 0) {
1361 dev_err(&pdev->dev, "no irq for node %s\n", pdev->name); 1361 dev_err(&pdev->dev, "no irq for node %s\n", pdev->name);
1362 return ssi_private->irq; 1362 return ssi_private->irq;
1363 } 1363 }
diff --git a/sound/soc/intel/Makefile b/sound/soc/intel/Makefile
index cd9aee9871a3..3853ec2ddbc7 100644
--- a/sound/soc/intel/Makefile
+++ b/sound/soc/intel/Makefile
@@ -4,7 +4,7 @@ obj-$(CONFIG_SND_SOC_INTEL_SST) += common/
4# Platform Support 4# Platform Support
5obj-$(CONFIG_SND_SOC_INTEL_HASWELL) += haswell/ 5obj-$(CONFIG_SND_SOC_INTEL_HASWELL) += haswell/
6obj-$(CONFIG_SND_SOC_INTEL_BAYTRAIL) += baytrail/ 6obj-$(CONFIG_SND_SOC_INTEL_BAYTRAIL) += baytrail/
7obj-$(CONFIG_SND_SOC_INTEL_BAYTRAIL) += atom/ 7obj-$(CONFIG_SND_SST_MFLD_PLATFORM) += atom/
8 8
9# Machine support 9# Machine support
10obj-$(CONFIG_SND_SOC_INTEL_SST) += boards/ 10obj-$(CONFIG_SND_SOC_INTEL_SST) += boards/
diff --git a/sound/soc/intel/baytrail/sst-baytrail-ipc.c b/sound/soc/intel/baytrail/sst-baytrail-ipc.c
index 1efb33b36303..a839dbfa5218 100644
--- a/sound/soc/intel/baytrail/sst-baytrail-ipc.c
+++ b/sound/soc/intel/baytrail/sst-baytrail-ipc.c
@@ -759,7 +759,6 @@ fw_err:
759dsp_new_err: 759dsp_new_err:
760 sst_ipc_fini(ipc); 760 sst_ipc_fini(ipc);
761ipc_init_err: 761ipc_init_err:
762 kfree(byt);
763 762
764 return err; 763 return err;
765} 764}
diff --git a/sound/soc/intel/haswell/sst-haswell-ipc.c b/sound/soc/intel/haswell/sst-haswell-ipc.c
index 344a1e9bbce5..324eceb07b25 100644
--- a/sound/soc/intel/haswell/sst-haswell-ipc.c
+++ b/sound/soc/intel/haswell/sst-haswell-ipc.c
@@ -2201,7 +2201,6 @@ dma_err:
2201dsp_new_err: 2201dsp_new_err:
2202 sst_ipc_fini(ipc); 2202 sst_ipc_fini(ipc);
2203ipc_init_err: 2203ipc_init_err:
2204 kfree(hsw);
2205 return ret; 2204 return ret;
2206} 2205}
2207EXPORT_SYMBOL_GPL(sst_hsw_dsp_init); 2206EXPORT_SYMBOL_GPL(sst_hsw_dsp_init);
diff --git a/sound/soc/qcom/lpass-cpu.c b/sound/soc/qcom/lpass-cpu.c
index 6698d058de29..dc790abaa331 100644
--- a/sound/soc/qcom/lpass-cpu.c
+++ b/sound/soc/qcom/lpass-cpu.c
@@ -194,7 +194,7 @@ static int lpass_cpu_daiops_trigger(struct snd_pcm_substream *substream,
194 int cmd, struct snd_soc_dai *dai) 194 int cmd, struct snd_soc_dai *dai)
195{ 195{
196 struct lpass_data *drvdata = snd_soc_dai_get_drvdata(dai); 196 struct lpass_data *drvdata = snd_soc_dai_get_drvdata(dai);
197 int ret; 197 int ret = -EINVAL;
198 198
199 switch (cmd) { 199 switch (cmd) {
200 case SNDRV_PCM_TRIGGER_START: 200 case SNDRV_PCM_TRIGGER_START:
diff --git a/sound/soc/samsung/s3c24xx-i2s.c b/sound/soc/samsung/s3c24xx-i2s.c
index 326d3c3804e3..5bf723689692 100644
--- a/sound/soc/samsung/s3c24xx-i2s.c
+++ b/sound/soc/samsung/s3c24xx-i2s.c
@@ -461,8 +461,8 @@ static int s3c24xx_iis_dev_probe(struct platform_device *pdev)
461 return -ENOENT; 461 return -ENOENT;
462 } 462 }
463 s3c24xx_i2s.regs = devm_ioremap_resource(&pdev->dev, res); 463 s3c24xx_i2s.regs = devm_ioremap_resource(&pdev->dev, res);
464 if (s3c24xx_i2s.regs == NULL) 464 if (IS_ERR(s3c24xx_i2s.regs))
465 return -ENXIO; 465 return PTR_ERR(s3c24xx_i2s.regs);
466 466
467 s3c24xx_i2s_pcm_stereo_out.dma_addr = res->start + S3C2410_IISFIFO; 467 s3c24xx_i2s_pcm_stereo_out.dma_addr = res->start + S3C2410_IISFIFO;
468 s3c24xx_i2s_pcm_stereo_in.dma_addr = res->start + S3C2410_IISFIFO; 468 s3c24xx_i2s_pcm_stereo_in.dma_addr = res->start + S3C2410_IISFIFO;
diff --git a/sound/soc/sh/rcar/dma.c b/sound/soc/sh/rcar/dma.c
index ac3756f6af60..144308f15fb3 100644
--- a/sound/soc/sh/rcar/dma.c
+++ b/sound/soc/sh/rcar/dma.c
@@ -156,6 +156,7 @@ static int rsnd_dmaen_init(struct rsnd_priv *priv, struct rsnd_dma *dma, int id,
156 (void *)id); 156 (void *)id);
157 } 157 }
158 if (IS_ERR_OR_NULL(dmaen->chan)) { 158 if (IS_ERR_OR_NULL(dmaen->chan)) {
159 dmaen->chan = NULL;
159 dev_err(dev, "can't get dma channel\n"); 160 dev_err(dev, "can't get dma channel\n");
160 goto rsnd_dma_channel_err; 161 goto rsnd_dma_channel_err;
161 } 162 }
diff --git a/sound/synth/emux/emux_oss.c b/sound/synth/emux/emux_oss.c
index ab37add269ae..82e350e9501c 100644
--- a/sound/synth/emux/emux_oss.c
+++ b/sound/synth/emux/emux_oss.c
@@ -118,12 +118,8 @@ snd_emux_open_seq_oss(struct snd_seq_oss_arg *arg, void *closure)
118 if (snd_BUG_ON(!arg || !emu)) 118 if (snd_BUG_ON(!arg || !emu))
119 return -ENXIO; 119 return -ENXIO;
120 120
121 mutex_lock(&emu->register_mutex); 121 if (!snd_emux_inc_count(emu))
122
123 if (!snd_emux_inc_count(emu)) {
124 mutex_unlock(&emu->register_mutex);
125 return -EFAULT; 122 return -EFAULT;
126 }
127 123
128 memset(&callback, 0, sizeof(callback)); 124 memset(&callback, 0, sizeof(callback));
129 callback.owner = THIS_MODULE; 125 callback.owner = THIS_MODULE;
@@ -135,7 +131,6 @@ snd_emux_open_seq_oss(struct snd_seq_oss_arg *arg, void *closure)
135 if (p == NULL) { 131 if (p == NULL) {
136 snd_printk(KERN_ERR "can't create port\n"); 132 snd_printk(KERN_ERR "can't create port\n");
137 snd_emux_dec_count(emu); 133 snd_emux_dec_count(emu);
138 mutex_unlock(&emu->register_mutex);
139 return -ENOMEM; 134 return -ENOMEM;
140 } 135 }
141 136
@@ -148,8 +143,6 @@ snd_emux_open_seq_oss(struct snd_seq_oss_arg *arg, void *closure)
148 reset_port_mode(p, arg->seq_mode); 143 reset_port_mode(p, arg->seq_mode);
149 144
150 snd_emux_reset_port(p); 145 snd_emux_reset_port(p);
151
152 mutex_unlock(&emu->register_mutex);
153 return 0; 146 return 0;
154} 147}
155 148
@@ -195,13 +188,11 @@ snd_emux_close_seq_oss(struct snd_seq_oss_arg *arg)
195 if (snd_BUG_ON(!emu)) 188 if (snd_BUG_ON(!emu))
196 return -ENXIO; 189 return -ENXIO;
197 190
198 mutex_lock(&emu->register_mutex);
199 snd_emux_sounds_off_all(p); 191 snd_emux_sounds_off_all(p);
200 snd_soundfont_close_check(emu->sflist, SF_CLIENT_NO(p->chset.port)); 192 snd_soundfont_close_check(emu->sflist, SF_CLIENT_NO(p->chset.port));
201 snd_seq_event_port_detach(p->chset.client, p->chset.port); 193 snd_seq_event_port_detach(p->chset.client, p->chset.port);
202 snd_emux_dec_count(emu); 194 snd_emux_dec_count(emu);
203 195
204 mutex_unlock(&emu->register_mutex);
205 return 0; 196 return 0;
206} 197}
207 198
diff --git a/sound/synth/emux/emux_seq.c b/sound/synth/emux/emux_seq.c
index 7778b8e19782..a0209204ae48 100644
--- a/sound/synth/emux/emux_seq.c
+++ b/sound/synth/emux/emux_seq.c
@@ -124,12 +124,10 @@ snd_emux_detach_seq(struct snd_emux *emu)
124 if (emu->voices) 124 if (emu->voices)
125 snd_emux_terminate_all(emu); 125 snd_emux_terminate_all(emu);
126 126
127 mutex_lock(&emu->register_mutex);
128 if (emu->client >= 0) { 127 if (emu->client >= 0) {
129 snd_seq_delete_kernel_client(emu->client); 128 snd_seq_delete_kernel_client(emu->client);
130 emu->client = -1; 129 emu->client = -1;
131 } 130 }
132 mutex_unlock(&emu->register_mutex);
133} 131}
134 132
135 133
@@ -269,8 +267,8 @@ snd_emux_event_input(struct snd_seq_event *ev, int direct, void *private_data,
269/* 267/*
270 * increment usage count 268 * increment usage count
271 */ 269 */
272int 270static int
273snd_emux_inc_count(struct snd_emux *emu) 271__snd_emux_inc_count(struct snd_emux *emu)
274{ 272{
275 emu->used++; 273 emu->used++;
276 if (!try_module_get(emu->ops.owner)) 274 if (!try_module_get(emu->ops.owner))
@@ -284,12 +282,21 @@ snd_emux_inc_count(struct snd_emux *emu)
284 return 1; 282 return 1;
285} 283}
286 284
285int snd_emux_inc_count(struct snd_emux *emu)
286{
287 int ret;
288
289 mutex_lock(&emu->register_mutex);
290 ret = __snd_emux_inc_count(emu);
291 mutex_unlock(&emu->register_mutex);
292 return ret;
293}
287 294
288/* 295/*
289 * decrease usage count 296 * decrease usage count
290 */ 297 */
291void 298static void
292snd_emux_dec_count(struct snd_emux *emu) 299__snd_emux_dec_count(struct snd_emux *emu)
293{ 300{
294 module_put(emu->card->module); 301 module_put(emu->card->module);
295 emu->used--; 302 emu->used--;
@@ -298,6 +305,12 @@ snd_emux_dec_count(struct snd_emux *emu)
298 module_put(emu->ops.owner); 305 module_put(emu->ops.owner);
299} 306}
300 307
308void snd_emux_dec_count(struct snd_emux *emu)
309{
310 mutex_lock(&emu->register_mutex);
311 __snd_emux_dec_count(emu);
312 mutex_unlock(&emu->register_mutex);
313}
301 314
302/* 315/*
303 * Routine that is called upon a first use of a particular port 316 * Routine that is called upon a first use of a particular port
@@ -317,7 +330,7 @@ snd_emux_use(void *private_data, struct snd_seq_port_subscribe *info)
317 330
318 mutex_lock(&emu->register_mutex); 331 mutex_lock(&emu->register_mutex);
319 snd_emux_init_port(p); 332 snd_emux_init_port(p);
320 snd_emux_inc_count(emu); 333 __snd_emux_inc_count(emu);
321 mutex_unlock(&emu->register_mutex); 334 mutex_unlock(&emu->register_mutex);
322 return 0; 335 return 0;
323} 336}
@@ -340,7 +353,7 @@ snd_emux_unuse(void *private_data, struct snd_seq_port_subscribe *info)
340 353
341 mutex_lock(&emu->register_mutex); 354 mutex_lock(&emu->register_mutex);
342 snd_emux_sounds_off_all(p); 355 snd_emux_sounds_off_all(p);
343 snd_emux_dec_count(emu); 356 __snd_emux_dec_count(emu);
344 mutex_unlock(&emu->register_mutex); 357 mutex_unlock(&emu->register_mutex);
345 return 0; 358 return 0;
346} 359}
diff --git a/tools/testing/selftests/powerpc/pmu/Makefile b/tools/testing/selftests/powerpc/pmu/Makefile
index 5a161175bbd4..a9099d9f8f39 100644
--- a/tools/testing/selftests/powerpc/pmu/Makefile
+++ b/tools/testing/selftests/powerpc/pmu/Makefile
@@ -26,7 +26,7 @@ override define EMIT_TESTS
26 $(MAKE) -s -C ebb emit_tests 26 $(MAKE) -s -C ebb emit_tests
27endef 27endef
28 28
29DEFAULT_INSTALL := $(INSTALL_RULE) 29DEFAULT_INSTALL_RULE := $(INSTALL_RULE)
30override define INSTALL_RULE 30override define INSTALL_RULE
31 $(DEFAULT_INSTALL_RULE) 31 $(DEFAULT_INSTALL_RULE)
32 $(MAKE) -C ebb install 32 $(MAKE) -C ebb install
diff --git a/tools/testing/selftests/powerpc/tm/Makefile b/tools/testing/selftests/powerpc/tm/Makefile
index 1b616fa79e93..6bff955e1d55 100644
--- a/tools/testing/selftests/powerpc/tm/Makefile
+++ b/tools/testing/selftests/powerpc/tm/Makefile
@@ -1,4 +1,4 @@
1TEST_PROGS := tm-resched-dscr tm-syscall 1TEST_PROGS := tm-resched-dscr
2 2
3all: $(TEST_PROGS) 3all: $(TEST_PROGS)
4 4