aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2015-11-24 13:34:40 -0500
committerPaolo Bonzini <pbonzini@redhat.com>2015-11-24 13:34:40 -0500
commit8bd142c01648cdb33e9bcafa0448ba2c20ed814c (patch)
tree9197c60d3f9d4036f38f281a183e94750ceea1d7
parentd792abacaf1a1a8dfea353fab699b97fa6251c2a (diff)
parentfbb4574ce9a37e15a9872860bf202f2be5bdf6c4 (diff)
Merge tag 'kvm-arm-for-v4.4-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into kvm-master
KVM/ARM Fixes for v4.4-rc3. Includes some timer fixes, properly unmapping PTEs, an errata fix, and two tweaks to the EL2 panic code.
-rw-r--r--Documentation/IPMI.txt7
-rw-r--r--Documentation/i2c/busses/i2c-i8011
-rw-r--r--Documentation/kernel-parameters.txt3
-rw-r--r--MAINTAINERS40
-rw-r--r--Makefile2
-rw-r--r--arch/arm/boot/dts/imx27.dtsi16
-rw-r--r--arch/arm/kvm/arm.c7
-rw-r--r--arch/arm/kvm/mmu.c15
-rw-r--r--arch/arm/net/bpf_jit_32.c2
-rw-r--r--arch/arm64/Kconfig21
-rw-r--r--arch/arm64/crypto/aes-ce-cipher.c2
-rw-r--r--arch/arm64/include/asm/barrier.h16
-rw-r--r--arch/arm64/include/asm/compat.h3
-rw-r--r--arch/arm64/include/asm/cpufeature.h3
-rw-r--r--arch/arm64/include/asm/dma-mapping.h13
-rw-r--r--arch/arm64/include/asm/kvm_emulate.h8
-rw-r--r--arch/arm64/include/asm/mmu_context.h2
-rw-r--r--arch/arm64/include/asm/pgtable.h1
-rw-r--r--arch/arm64/kernel/cpu_errata.c9
-rw-r--r--arch/arm64/kernel/cpuinfo.c5
-rw-r--r--arch/arm64/kernel/efi.c14
-rw-r--r--arch/arm64/kernel/suspend.c10
-rw-r--r--arch/arm64/kvm/hyp.S14
-rw-r--r--arch/arm64/kvm/inject_fault.c2
-rw-r--r--arch/arm64/mm/dma-mapping.c35
-rw-r--r--arch/arm64/mm/mmu.c14
-rw-r--r--arch/arm64/net/bpf_jit_comp.c48
-rw-r--r--arch/mips/ath79/setup.c7
-rw-r--r--arch/mips/boot/dts/qca/ar9132.dtsi2
-rw-r--r--arch/mips/include/asm/page.h3
-rw-r--r--arch/parisc/Kconfig3
-rw-r--r--arch/parisc/include/asm/hugetlb.h85
-rw-r--r--arch/parisc/include/asm/page.h13
-rw-r--r--arch/parisc/include/asm/pgalloc.h2
-rw-r--r--arch/parisc/include/asm/pgtable.h26
-rw-r--r--arch/parisc/include/asm/processor.h27
-rw-r--r--arch/parisc/include/uapi/asm/mman.h10
-rw-r--r--arch/parisc/kernel/asm-offsets.c8
-rw-r--r--arch/parisc/kernel/entry.S56
-rw-r--r--arch/parisc/kernel/head.S4
-rw-r--r--arch/parisc/kernel/setup.c14
-rw-r--r--arch/parisc/kernel/syscall.S4
-rw-r--r--arch/parisc/kernel/traps.c35
-rw-r--r--arch/parisc/kernel/vmlinux.lds.S9
-rw-r--r--arch/parisc/mm/Makefile1
-rw-r--r--arch/parisc/mm/hugetlbpage.c161
-rw-r--r--arch/parisc/mm/init.c40
-rw-r--r--arch/powerpc/include/asm/systbl.h1
-rw-r--r--arch/powerpc/include/asm/unistd.h2
-rw-r--r--arch/powerpc/include/uapi/asm/unistd.h1
-rw-r--r--arch/s390/include/asm/cio.h1
-rw-r--r--arch/s390/include/asm/elf.h13
-rw-r--r--arch/s390/include/asm/ipl.h3
-rw-r--r--arch/s390/include/asm/pci_dma.h4
-rw-r--r--arch/s390/include/asm/trace/diag.h6
-rw-r--r--arch/s390/include/uapi/asm/unistd.h19
-rw-r--r--arch/s390/kernel/compat_wrapper.c1
-rw-r--r--arch/s390/kernel/diag.c4
-rw-r--r--arch/s390/kernel/head.S95
-rw-r--r--arch/s390/kernel/ipl.c65
-rw-r--r--arch/s390/kernel/process.c6
-rw-r--r--arch/s390/kernel/sclp.c2
-rw-r--r--arch/s390/kernel/setup.c3
-rw-r--r--arch/s390/kernel/syscalls.S1
-rw-r--r--arch/s390/kernel/trace.c6
-rw-r--r--arch/s390/mm/init.c30
-rw-r--r--arch/s390/mm/mmap.c60
-rw-r--r--arch/s390/pci/pci_dma.c84
-rw-r--r--arch/x86/include/asm/msr-index.h3
-rw-r--r--arch/x86/kernel/cpu/common.c3
-rw-r--r--arch/x86/kernel/fpu/signal.c11
-rw-r--r--arch/x86/kernel/fpu/xstate.c1
-rw-r--r--arch/x86/kernel/mcount_64.S6
-rw-r--r--arch/x86/mm/mpx.c47
-rw-r--r--block/blk.h2
-rw-r--r--drivers/acpi/cppc_acpi.c2
-rw-r--r--drivers/acpi/ec.c2
-rw-r--r--drivers/acpi/sbshc.c48
-rw-r--r--drivers/base/power/wakeirq.c6
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c82
-rw-r--r--drivers/char/ipmi/ipmi_watchdog.c8
-rw-r--r--drivers/clocksource/Kconfig1
-rw-r--r--drivers/clocksource/fsl_ftm_timer.c4
-rw-r--r--drivers/cpufreq/Kconfig.arm1
-rw-r--r--drivers/cpufreq/Kconfig.x861
-rw-r--r--drivers/cpufreq/intel_pstate.c316
-rw-r--r--drivers/crypto/qat/qat_common/adf_ctl_drv.c2
-rw-r--r--drivers/dma/at_hdmac.c20
-rw-r--r--drivers/dma/at_hdmac_regs.h6
-rw-r--r--drivers/dma/at_xdmac.c20
-rw-r--r--drivers/dma/edma.c4
-rw-r--r--drivers/dma/imx-sdma.c2
-rw-r--r--drivers/dma/sh/usb-dmac.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h120
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c177
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c101
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_semaphore.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h94
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c138
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ci_dpm.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c302
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c9
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h24
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.c24
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.h6
-rw-r--r--drivers/gpu/drm/amd/scheduler/sched_fence.c10
-rw-r--r--drivers/gpu/drm/drm_atomic.c61
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c29
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c51
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h4
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c8
-rw-r--r--drivers/gpu/drm/i915/i915_params.c5
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c31
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c75
-rw-r--r--drivers/gpu/drm/i915/intel_display.c37
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c10
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_cursor.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c15
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c3
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_crtc.c9
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.c1
-rw-r--r--drivers/gpu/drm/vc4/vc4_hvs.c8
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c18
-rw-r--r--drivers/hid/wacom_wac.c5
-rw-r--r--drivers/hwmon/Kconfig2
-rw-r--r--drivers/hwmon/applesmc.c2
-rw-r--r--drivers/hwmon/scpi-hwmon.c21
-rw-r--r--drivers/i2c/busses/Kconfig1
-rw-r--r--drivers/i2c/busses/i2c-i801.c6
-rw-r--r--drivers/i2c/busses/i2c-imx.c1
-rw-r--r--drivers/i2c/busses/i2c-xiic.c4
-rw-r--r--drivers/i2c/i2c-core.c2
-rw-r--r--drivers/iio/adc/ad7793.c2
-rw-r--r--drivers/iio/adc/vf610_adc.c22
-rw-r--r--drivers/iio/adc/xilinx-xadc-core.c1
-rw-r--r--drivers/iio/dac/ad5064.c91
-rw-r--r--drivers/iio/humidity/si7020.c8
-rw-r--r--drivers/iommu/s390-iommu.c23
-rw-r--r--drivers/irqchip/irq-gic-common.c13
-rw-r--r--drivers/irqchip/irq-gic.c38
-rw-r--r--drivers/media/pci/cx23885/cx23885-core.c4
-rw-r--r--drivers/media/pci/cx25821/cx25821-core.c3
-rw-r--r--drivers/media/pci/cx88/cx88-alsa.c4
-rw-r--r--drivers/media/pci/cx88/cx88-mpeg.c3
-rw-r--r--drivers/media/pci/cx88/cx88-video.c4
-rw-r--r--drivers/media/pci/netup_unidvb/netup_unidvb_core.c2
-rw-r--r--drivers/media/pci/saa7134/saa7134-core.c4
-rw-r--r--drivers/media/pci/saa7164/saa7164-core.c4
-rw-r--r--drivers/media/pci/tw68/tw68-core.c4
-rw-r--r--drivers/mmc/card/block.c11
-rw-r--r--drivers/mmc/core/mmc.c93
-rw-r--r--drivers/mmc/host/Kconfig1
-rw-r--r--drivers/mmc/host/mtk-sd.c2
-rw-r--r--drivers/mmc/host/pxamci.c2
-rw-r--r--drivers/mtd/nand/jz4740_nand.c1
-rw-r--r--drivers/mtd/nand/nand_base.c2
-rw-r--r--drivers/net/dsa/mv88e6060.c114
-rw-r--r--drivers/net/dsa/mv88e6060.h111
-rw-r--r--drivers/net/ethernet/Kconfig1
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/amd/pcnet32.c5
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c2
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c10
-rw-r--r--drivers/net/ethernet/dlink/Kconfig5
-rw-r--r--drivers/net/ethernet/dlink/dl2k.c55
-rw-r--r--drivers/net/ethernet/dlink/dl2k.h15
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c19
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c2
-rw-r--r--drivers/net/ethernet/icplus/Kconfig13
-rw-r--r--drivers/net/ethernet/icplus/Makefile5
-rw-r--r--drivers/net/ethernet/icplus/ipg.c2300
-rw-r--r--drivers/net/ethernet/icplus/ipg.h748
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c39
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c50
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c76
-rw-r--r--drivers/net/ethernet/realtek/r8169.c6
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c8
-rw-r--r--drivers/net/ethernet/sfc/efx.c2
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c17
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c10
-rw-r--r--drivers/net/ethernet/via/via-velocity.c24
-rw-r--r--drivers/net/fjes/fjes_hw.c2
-rw-r--r--drivers/net/ipvlan/ipvlan_core.c14
-rw-r--r--drivers/net/macvlan.c2
-rw-r--r--drivers/net/phy/at803x.c4
-rw-r--r--drivers/net/phy/marvell.c16
-rw-r--r--drivers/net/phy/phy.c3
-rw-r--r--drivers/net/phy/vitesse.c16
-rw-r--r--drivers/net/usb/cdc_ether.c5
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c7
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h4
-rw-r--r--drivers/pci/probe.c4
-rw-r--r--drivers/s390/cio/chsc.c37
-rw-r--r--drivers/s390/cio/chsc.h15
-rw-r--r--drivers/s390/cio/cio.c14
-rw-r--r--drivers/s390/cio/css.c5
-rw-r--r--drivers/s390/crypto/Makefile7
-rw-r--r--drivers/s390/crypto/ap_bus.c6
-rw-r--r--drivers/s390/crypto/zcrypt_api.c10
-rw-r--r--drivers/s390/crypto/zcrypt_api.h1
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype50.c1
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype6.c3
-rw-r--r--drivers/sh/pm_runtime.c2
-rw-r--r--drivers/staging/iio/Kconfig3
-rw-r--r--drivers/staging/iio/adc/lpc32xx_adc.c4
-rw-r--r--drivers/staging/wilc1000/coreconfigurator.c48
-rw-r--r--drivers/tty/n_tty.c2
-rw-r--r--drivers/tty/serial/8250/8250_fsl.c1
-rw-r--r--drivers/tty/serial/8250/Kconfig1
-rw-r--r--drivers/tty/serial/Kconfig2
-rw-r--r--drivers/tty/serial/bcm63xx_uart.c2
-rw-r--r--drivers/tty/serial/etraxfs-uart.c2
-rw-r--r--drivers/tty/tty_audit.c2
-rw-r--r--drivers/tty/tty_io.c4
-rw-r--r--drivers/tty/tty_ioctl.c4
-rw-r--r--drivers/tty/tty_ldisc.c2
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.c142
-rw-r--r--drivers/usb/chipidea/debug.c2
-rw-r--r--drivers/usb/chipidea/udc.c17
-rw-r--r--drivers/usb/chipidea/usbmisc_imx.c10
-rw-r--r--drivers/usb/class/usblp.c2
-rw-r--r--drivers/usb/core/Kconfig3
-rw-r--r--drivers/usb/dwc2/hcd.c9
-rw-r--r--drivers/usb/dwc2/platform.c3
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c4
-rw-r--r--drivers/usb/dwc3/gadget.c24
-rw-r--r--drivers/usb/gadget/function/f_loopback.c2
-rw-r--r--drivers/usb/gadget/udc/atmel_usba_udc.c2
-rw-r--r--drivers/usb/host/xhci-hub.c15
-rw-r--r--drivers/usb/host/xhci-ring.c32
-rw-r--r--drivers/usb/host/xhci.c10
-rw-r--r--drivers/usb/musb/musb_core.c12
-rw-r--r--drivers/usb/musb/musb_host.c22
-rw-r--r--drivers/usb/phy/Kconfig4
-rw-r--r--drivers/usb/phy/phy-mxs-usb.c7
-rw-r--r--drivers/usb/phy/phy-omap-otg.c2
-rw-r--r--drivers/usb/serial/option.c11
-rw-r--r--drivers/usb/serial/qcserial.c94
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.c2
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.h4
-rw-r--r--fs/Kconfig6
-rw-r--r--fs/block_dev.c18
-rw-r--r--fs/cachefiles/rdwr.c2
-rw-r--r--fs/configfs/dir.c110
-rw-r--r--fs/dax.c4
-rw-r--r--fs/ext2/super.c2
-rw-r--r--fs/ext4/super.c6
-rw-r--r--fs/fat/dir.c16
-rw-r--r--fs/hugetlbfs/inode.c65
-rw-r--r--fs/ncpfs/ioctl.c2
-rw-r--r--fs/ocfs2/namei.c2
-rw-r--r--include/drm/drm_atomic.h3
-rw-r--r--include/kvm/arm_vgic.h2
-rw-r--r--include/linux/blkdev.h2
-rw-r--r--include/linux/configfs.h10
-rw-r--r--include/linux/gfp.h2
-rw-r--r--include/linux/marvell_phy.h1
-rw-r--r--include/linux/mlx5/mlx5_ifc.h24
-rw-r--r--include/linux/netdevice.h32
-rw-r--r--include/linux/netfilter/ipset/ip_set.h2
-rw-r--r--include/linux/netfilter_ingress.h13
-rw-r--r--include/linux/of_dma.h2
-rw-r--r--include/linux/signal.h1
-rw-r--r--include/linux/slab.h45
-rw-r--r--include/linux/tty.h6
-rw-r--r--include/net/ip6_fib.h3
-rw-r--r--include/net/ip6_tunnel.h3
-rw-r--r--include/net/ip_tunnels.h3
-rw-r--r--include/net/netfilter/nf_tables.h16
-rw-r--r--include/net/sock.h25
-rw-r--r--include/net/switchdev.h2
-rw-r--r--kernel/livepatch/core.c6
-rw-r--r--kernel/panic.c5
-rw-r--r--kernel/signal.c2
-rw-r--r--mm/huge_memory.c4
-rw-r--r--mm/kasan/kasan.c2
-rw-r--r--mm/memory.c8
-rw-r--r--mm/page-writeback.c4
-rw-r--r--mm/slab.c2
-rw-r--r--mm/slab.h2
-rw-r--r--mm/slab_common.c6
-rw-r--r--mm/slob.c2
-rw-r--r--mm/slub.c304
-rw-r--r--mm/vmalloc.c5
-rw-r--r--net/8021q/vlan_core.c4
-rw-r--r--net/bridge/br_stp.c2
-rw-r--r--net/bridge/br_stp_if.c2
-rw-r--r--net/core/dev.c18
-rw-r--r--net/core/neighbour.c2
-rw-r--r--net/core/rtnetlink.c274
-rw-r--r--net/core/skbuff.c3
-rw-r--r--net/ipv4/inet_connection_sock.c4
-rw-r--r--net/ipv4/netfilter/nf_nat_pptp.c2
-rw-r--r--net/ipv4/raw.c8
-rw-r--r--net/ipv4/tcp.c21
-rw-r--r--net/ipv4/tcp_diag.c2
-rw-r--r--net/ipv4/tcp_ipv4.c14
-rw-r--r--net/ipv6/mcast.c2
-rw-r--r--net/ipv6/route.c22
-rw-r--r--net/ipv6/tcp_ipv6.c19
-rw-r--r--net/netfilter/Kconfig6
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_gen.h17
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_ip.c14
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_ipmac.c64
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_port.c18
-rw-r--r--net/netfilter/ipset/ip_set_core.c14
-rw-r--r--net/netfilter/ipset/ip_set_hash_gen.h26
-rw-r--r--net/netfilter/ipset/ip_set_list_set.c5
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c16
-rw-r--r--net/netfilter/nfnetlink_log.c2
-rw-r--r--net/netfilter/nft_counter.c49
-rw-r--r--net/netfilter/nft_dynset.c5
-rw-r--r--net/packet/af_packet.c92
-rw-r--r--net/sctp/auth.c4
-rw-r--r--net/unix/af_unix.c24
-rw-r--r--samples/bpf/Makefile7
-rwxr-xr-xscripts/kernel-doc2
-rw-r--r--tools/Makefile11
-rw-r--r--tools/net/Makefile7
-rw-r--r--tools/perf/builtin-inject.c1
-rw-r--r--tools/perf/builtin-report.c6
-rw-r--r--tools/perf/ui/browsers/hists.c7
-rw-r--r--tools/perf/util/build-id.c1
-rw-r--r--tools/perf/util/dso.c17
-rw-r--r--tools/perf/util/dso.h1
-rw-r--r--tools/perf/util/machine.c1
-rw-r--r--tools/perf/util/probe-finder.c24
-rw-r--r--tools/perf/util/symbol.c34
-rw-r--r--tools/perf/util/symbol.h1
-rw-r--r--tools/power/x86/turbostat/turbostat.c8
-rw-r--r--tools/vm/page-types.c1
-rw-r--r--virt/kvm/arm/arch_timer.c28
-rw-r--r--virt/kvm/arm/vgic.c50
345 files changed, 4249 insertions, 5556 deletions
diff --git a/Documentation/IPMI.txt b/Documentation/IPMI.txt
index 31d1d658827f..c0d8788e75d3 100644
--- a/Documentation/IPMI.txt
+++ b/Documentation/IPMI.txt
@@ -587,7 +587,7 @@ used to control it:
587 587
588 modprobe ipmi_watchdog timeout=<t> pretimeout=<t> action=<action type> 588 modprobe ipmi_watchdog timeout=<t> pretimeout=<t> action=<action type>
589 preaction=<preaction type> preop=<preop type> start_now=x 589 preaction=<preaction type> preop=<preop type> start_now=x
590 nowayout=x ifnum_to_use=n 590 nowayout=x ifnum_to_use=n panic_wdt_timeout=<t>
591 591
592ifnum_to_use specifies which interface the watchdog timer should use. 592ifnum_to_use specifies which interface the watchdog timer should use.
593The default is -1, which means to pick the first one registered. 593The default is -1, which means to pick the first one registered.
@@ -597,7 +597,9 @@ is the amount of seconds before the reset that the pre-timeout panic will
597occur (if pretimeout is zero, then pretimeout will not be enabled). Note 597occur (if pretimeout is zero, then pretimeout will not be enabled). Note
598that the pretimeout is the time before the final timeout. So if the 598that the pretimeout is the time before the final timeout. So if the
599timeout is 50 seconds and the pretimeout is 10 seconds, then the pretimeout 599timeout is 50 seconds and the pretimeout is 10 seconds, then the pretimeout
600will occur in 40 second (10 seconds before the timeout). 600will occur in 40 second (10 seconds before the timeout). The panic_wdt_timeout
601is the value of timeout which is set on kernel panic, in order to let actions
602such as kdump to occur during panic.
601 603
602The action may be "reset", "power_cycle", or "power_off", and 604The action may be "reset", "power_cycle", or "power_off", and
603specifies what to do when the timer times out, and defaults to 605specifies what to do when the timer times out, and defaults to
@@ -634,6 +636,7 @@ for configuring the watchdog:
634 ipmi_watchdog.preop=<preop type> 636 ipmi_watchdog.preop=<preop type>
635 ipmi_watchdog.start_now=x 637 ipmi_watchdog.start_now=x
636 ipmi_watchdog.nowayout=x 638 ipmi_watchdog.nowayout=x
639 ipmi_watchdog.panic_wdt_timeout=<t>
637 640
638The options are the same as the module parameter options. 641The options are the same as the module parameter options.
639 642
diff --git a/Documentation/i2c/busses/i2c-i801 b/Documentation/i2c/busses/i2c-i801
index 6a4b1af724f8..1bba38dd2637 100644
--- a/Documentation/i2c/busses/i2c-i801
+++ b/Documentation/i2c/busses/i2c-i801
@@ -32,6 +32,7 @@ Supported adapters:
32 * Intel Sunrise Point-LP (PCH) 32 * Intel Sunrise Point-LP (PCH)
33 * Intel DNV (SOC) 33 * Intel DNV (SOC)
34 * Intel Broxton (SOC) 34 * Intel Broxton (SOC)
35 * Intel Lewisburg (PCH)
35 Datasheets: Publicly available at the Intel website 36 Datasheets: Publicly available at the Intel website
36 37
37On Intel Patsburg and later chipsets, both the normal host SMBus controller 38On Intel Patsburg and later chipsets, both the normal host SMBus controller
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index f8aae632f02f..742f69d18fc8 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1583,9 +1583,6 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
1583 hwp_only 1583 hwp_only
1584 Only load intel_pstate on systems which support 1584 Only load intel_pstate on systems which support
1585 hardware P state control (HWP) if available. 1585 hardware P state control (HWP) if available.
1586 no_acpi
1587 Don't use ACPI processor performance control objects
1588 _PSS and _PPC specified limits.
1589 1586
1590 intremap= [X86-64, Intel-IOMMU] 1587 intremap= [X86-64, Intel-IOMMU]
1591 on enable Interrupt Remapping (default) 1588 on enable Interrupt Remapping (default)
diff --git a/MAINTAINERS b/MAINTAINERS
index e9caa4b28828..050d0e77a2cf 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2449,7 +2449,9 @@ F: drivers/firmware/broadcom/*
2449 2449
2450BROADCOM STB NAND FLASH DRIVER 2450BROADCOM STB NAND FLASH DRIVER
2451M: Brian Norris <computersforpeace@gmail.com> 2451M: Brian Norris <computersforpeace@gmail.com>
2452M: Kamal Dasu <kdasu.kdev@gmail.com>
2452L: linux-mtd@lists.infradead.org 2453L: linux-mtd@lists.infradead.org
2454L: bcm-kernel-feedback-list@broadcom.com
2453S: Maintained 2455S: Maintained
2454F: drivers/mtd/nand/brcmnand/ 2456F: drivers/mtd/nand/brcmnand/
2455 2457
@@ -2546,7 +2548,7 @@ F: arch/c6x/
2546 2548
2547CACHEFILES: FS-CACHE BACKEND FOR CACHING ON MOUNTED FILESYSTEMS 2549CACHEFILES: FS-CACHE BACKEND FOR CACHING ON MOUNTED FILESYSTEMS
2548M: David Howells <dhowells@redhat.com> 2550M: David Howells <dhowells@redhat.com>
2549L: linux-cachefs@redhat.com 2551L: linux-cachefs@redhat.com (moderated for non-subscribers)
2550S: Supported 2552S: Supported
2551F: Documentation/filesystems/caching/cachefiles.txt 2553F: Documentation/filesystems/caching/cachefiles.txt
2552F: fs/cachefiles/ 2554F: fs/cachefiles/
@@ -2929,10 +2931,9 @@ S: Maintained
2929F: drivers/platform/x86/compal-laptop.c 2931F: drivers/platform/x86/compal-laptop.c
2930 2932
2931CONEXANT ACCESSRUNNER USB DRIVER 2933CONEXANT ACCESSRUNNER USB DRIVER
2932M: Simon Arlott <cxacru@fire.lp0.eu>
2933L: accessrunner-general@lists.sourceforge.net 2934L: accessrunner-general@lists.sourceforge.net
2934W: http://accessrunner.sourceforge.net/ 2935W: http://accessrunner.sourceforge.net/
2935S: Maintained 2936S: Orphan
2936F: drivers/usb/atm/cxacru.c 2937F: drivers/usb/atm/cxacru.c
2937 2938
2938CONFIGFS 2939CONFIGFS
@@ -4409,6 +4410,7 @@ K: fmc_d.*register
4409 4410
4410FPGA MANAGER FRAMEWORK 4411FPGA MANAGER FRAMEWORK
4411M: Alan Tull <atull@opensource.altera.com> 4412M: Alan Tull <atull@opensource.altera.com>
4413R: Moritz Fischer <moritz.fischer@ettus.com>
4412S: Maintained 4414S: Maintained
4413F: drivers/fpga/ 4415F: drivers/fpga/
4414F: include/linux/fpga/fpga-mgr.h 4416F: include/linux/fpga/fpga-mgr.h
@@ -4559,7 +4561,7 @@ F: include/linux/frontswap.h
4559 4561
4560FS-CACHE: LOCAL CACHING FOR NETWORK FILESYSTEMS 4562FS-CACHE: LOCAL CACHING FOR NETWORK FILESYSTEMS
4561M: David Howells <dhowells@redhat.com> 4563M: David Howells <dhowells@redhat.com>
4562L: linux-cachefs@redhat.com 4564L: linux-cachefs@redhat.com (moderated for non-subscribers)
4563S: Supported 4565S: Supported
4564F: Documentation/filesystems/caching/ 4566F: Documentation/filesystems/caching/
4565F: fs/fscache/ 4567F: fs/fscache/
@@ -5711,13 +5713,6 @@ M: Juanjo Ciarlante <jjciarla@raiz.uncu.edu.ar>
5711S: Maintained 5713S: Maintained
5712F: net/ipv4/netfilter/ipt_MASQUERADE.c 5714F: net/ipv4/netfilter/ipt_MASQUERADE.c
5713 5715
5714IP1000A 10/100/1000 GIGABIT ETHERNET DRIVER
5715M: Francois Romieu <romieu@fr.zoreil.com>
5716M: Sorbica Shieh <sorbica@icplus.com.tw>
5717L: netdev@vger.kernel.org
5718S: Maintained
5719F: drivers/net/ethernet/icplus/ipg.*
5720
5721IPATH DRIVER 5716IPATH DRIVER
5722M: Mike Marciniszyn <infinipath@intel.com> 5717M: Mike Marciniszyn <infinipath@intel.com>
5723L: linux-rdma@vger.kernel.org 5718L: linux-rdma@vger.kernel.org
@@ -6923,13 +6918,21 @@ F: drivers/scsi/megaraid.*
6923F: drivers/scsi/megaraid/ 6918F: drivers/scsi/megaraid/
6924 6919
6925MELLANOX ETHERNET DRIVER (mlx4_en) 6920MELLANOX ETHERNET DRIVER (mlx4_en)
6926M: Amir Vadai <amirv@mellanox.com> 6921M: Eugenia Emantayev <eugenia@mellanox.com>
6927L: netdev@vger.kernel.org 6922L: netdev@vger.kernel.org
6928S: Supported 6923S: Supported
6929W: http://www.mellanox.com 6924W: http://www.mellanox.com
6930Q: http://patchwork.ozlabs.org/project/netdev/list/ 6925Q: http://patchwork.ozlabs.org/project/netdev/list/
6931F: drivers/net/ethernet/mellanox/mlx4/en_* 6926F: drivers/net/ethernet/mellanox/mlx4/en_*
6932 6927
6928MELLANOX ETHERNET DRIVER (mlx5e)
6929M: Saeed Mahameed <saeedm@mellanox.com>
6930L: netdev@vger.kernel.org
6931S: Supported
6932W: http://www.mellanox.com
6933Q: http://patchwork.ozlabs.org/project/netdev/list/
6934F: drivers/net/ethernet/mellanox/mlx5/core/en_*
6935
6933MELLANOX ETHERNET SWITCH DRIVERS 6936MELLANOX ETHERNET SWITCH DRIVERS
6934M: Jiri Pirko <jiri@mellanox.com> 6937M: Jiri Pirko <jiri@mellanox.com>
6935M: Ido Schimmel <idosch@mellanox.com> 6938M: Ido Schimmel <idosch@mellanox.com>
@@ -7901,6 +7904,18 @@ S: Maintained
7901F: net/openvswitch/ 7904F: net/openvswitch/
7902F: include/uapi/linux/openvswitch.h 7905F: include/uapi/linux/openvswitch.h
7903 7906
7907OPERATING PERFORMANCE POINTS (OPP)
7908M: Viresh Kumar <vireshk@kernel.org>
7909M: Nishanth Menon <nm@ti.com>
7910M: Stephen Boyd <sboyd@codeaurora.org>
7911L: linux-pm@vger.kernel.org
7912S: Maintained
7913T: git git://git.kernel.org/pub/scm/linux/kernel/git/vireshk/pm.git
7914F: drivers/base/power/opp/
7915F: include/linux/pm_opp.h
7916F: Documentation/power/opp.txt
7917F: Documentation/devicetree/bindings/opp/
7918
7904OPL4 DRIVER 7919OPL4 DRIVER
7905M: Clemens Ladisch <clemens@ladisch.de> 7920M: Clemens Ladisch <clemens@ladisch.de>
7906L: alsa-devel@alsa-project.org (moderated for non-subscribers) 7921L: alsa-devel@alsa-project.org (moderated for non-subscribers)
@@ -9314,7 +9329,6 @@ F: drivers/i2c/busses/i2c-designware-*
9314F: include/linux/platform_data/i2c-designware.h 9329F: include/linux/platform_data/i2c-designware.h
9315 9330
9316SYNOPSYS DESIGNWARE MMC/SD/SDIO DRIVER 9331SYNOPSYS DESIGNWARE MMC/SD/SDIO DRIVER
9317M: Seungwon Jeon <tgih.jun@samsung.com>
9318M: Jaehoon Chung <jh80.chung@samsung.com> 9332M: Jaehoon Chung <jh80.chung@samsung.com>
9319L: linux-mmc@vger.kernel.org 9333L: linux-mmc@vger.kernel.org
9320S: Maintained 9334S: Maintained
diff --git a/Makefile b/Makefile
index 3a0234f50f36..2ffdf9d6f339 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 4 1VERSION = 4
2PATCHLEVEL = 4 2PATCHLEVEL = 4
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc1 4EXTRAVERSION = -rc2
5NAME = Blurry Fish Butt 5NAME = Blurry Fish Butt
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/arm/boot/dts/imx27.dtsi b/arch/arm/boot/dts/imx27.dtsi
index feb9d34b239c..f818ea483aeb 100644
--- a/arch/arm/boot/dts/imx27.dtsi
+++ b/arch/arm/boot/dts/imx27.dtsi
@@ -486,7 +486,10 @@
486 compatible = "fsl,imx27-usb"; 486 compatible = "fsl,imx27-usb";
487 reg = <0x10024000 0x200>; 487 reg = <0x10024000 0x200>;
488 interrupts = <56>; 488 interrupts = <56>;
489 clocks = <&clks IMX27_CLK_USB_IPG_GATE>; 489 clocks = <&clks IMX27_CLK_USB_IPG_GATE>,
490 <&clks IMX27_CLK_USB_AHB_GATE>,
491 <&clks IMX27_CLK_USB_DIV>;
492 clock-names = "ipg", "ahb", "per";
490 fsl,usbmisc = <&usbmisc 0>; 493 fsl,usbmisc = <&usbmisc 0>;
491 status = "disabled"; 494 status = "disabled";
492 }; 495 };
@@ -495,7 +498,10 @@
495 compatible = "fsl,imx27-usb"; 498 compatible = "fsl,imx27-usb";
496 reg = <0x10024200 0x200>; 499 reg = <0x10024200 0x200>;
497 interrupts = <54>; 500 interrupts = <54>;
498 clocks = <&clks IMX27_CLK_USB_IPG_GATE>; 501 clocks = <&clks IMX27_CLK_USB_IPG_GATE>,
502 <&clks IMX27_CLK_USB_AHB_GATE>,
503 <&clks IMX27_CLK_USB_DIV>;
504 clock-names = "ipg", "ahb", "per";
499 fsl,usbmisc = <&usbmisc 1>; 505 fsl,usbmisc = <&usbmisc 1>;
500 dr_mode = "host"; 506 dr_mode = "host";
501 status = "disabled"; 507 status = "disabled";
@@ -505,7 +511,10 @@
505 compatible = "fsl,imx27-usb"; 511 compatible = "fsl,imx27-usb";
506 reg = <0x10024400 0x200>; 512 reg = <0x10024400 0x200>;
507 interrupts = <55>; 513 interrupts = <55>;
508 clocks = <&clks IMX27_CLK_USB_IPG_GATE>; 514 clocks = <&clks IMX27_CLK_USB_IPG_GATE>,
515 <&clks IMX27_CLK_USB_AHB_GATE>,
516 <&clks IMX27_CLK_USB_DIV>;
517 clock-names = "ipg", "ahb", "per";
509 fsl,usbmisc = <&usbmisc 2>; 518 fsl,usbmisc = <&usbmisc 2>;
510 dr_mode = "host"; 519 dr_mode = "host";
511 status = "disabled"; 520 status = "disabled";
@@ -515,7 +524,6 @@
515 #index-cells = <1>; 524 #index-cells = <1>;
516 compatible = "fsl,imx27-usbmisc"; 525 compatible = "fsl,imx27-usbmisc";
517 reg = <0x10024600 0x200>; 526 reg = <0x10024600 0x200>;
518 clocks = <&clks IMX27_CLK_USB_AHB_GATE>;
519 }; 527 };
520 528
521 sahara2: sahara@10025000 { 529 sahara2: sahara@10025000 {
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index eab83b2435b8..e06fd299de08 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -564,17 +564,12 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
564 vcpu_sleep(vcpu); 564 vcpu_sleep(vcpu);
565 565
566 /* 566 /*
567 * Disarming the background timer must be done in a
568 * preemptible context, as this call may sleep.
569 */
570 kvm_timer_flush_hwstate(vcpu);
571
572 /*
573 * Preparing the interrupts to be injected also 567 * Preparing the interrupts to be injected also
574 * involves poking the GIC, which must be done in a 568 * involves poking the GIC, which must be done in a
575 * non-preemptible context. 569 * non-preemptible context.
576 */ 570 */
577 preempt_disable(); 571 preempt_disable();
572 kvm_timer_flush_hwstate(vcpu);
578 kvm_vgic_flush_hwstate(vcpu); 573 kvm_vgic_flush_hwstate(vcpu);
579 574
580 local_irq_disable(); 575 local_irq_disable();
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 6984342da13d..7dace909d5cf 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -98,6 +98,11 @@ static void kvm_flush_dcache_pud(pud_t pud)
98 __kvm_flush_dcache_pud(pud); 98 __kvm_flush_dcache_pud(pud);
99} 99}
100 100
101static bool kvm_is_device_pfn(unsigned long pfn)
102{
103 return !pfn_valid(pfn);
104}
105
101/** 106/**
102 * stage2_dissolve_pmd() - clear and flush huge PMD entry 107 * stage2_dissolve_pmd() - clear and flush huge PMD entry
103 * @kvm: pointer to kvm structure. 108 * @kvm: pointer to kvm structure.
@@ -213,7 +218,7 @@ static void unmap_ptes(struct kvm *kvm, pmd_t *pmd,
213 kvm_tlb_flush_vmid_ipa(kvm, addr); 218 kvm_tlb_flush_vmid_ipa(kvm, addr);
214 219
215 /* No need to invalidate the cache for device mappings */ 220 /* No need to invalidate the cache for device mappings */
216 if ((pte_val(old_pte) & PAGE_S2_DEVICE) != PAGE_S2_DEVICE) 221 if (!kvm_is_device_pfn(__phys_to_pfn(addr)))
217 kvm_flush_dcache_pte(old_pte); 222 kvm_flush_dcache_pte(old_pte);
218 223
219 put_page(virt_to_page(pte)); 224 put_page(virt_to_page(pte));
@@ -305,8 +310,7 @@ static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd,
305 310
306 pte = pte_offset_kernel(pmd, addr); 311 pte = pte_offset_kernel(pmd, addr);
307 do { 312 do {
308 if (!pte_none(*pte) && 313 if (!pte_none(*pte) && !kvm_is_device_pfn(__phys_to_pfn(addr)))
309 (pte_val(*pte) & PAGE_S2_DEVICE) != PAGE_S2_DEVICE)
310 kvm_flush_dcache_pte(*pte); 314 kvm_flush_dcache_pte(*pte);
311 } while (pte++, addr += PAGE_SIZE, addr != end); 315 } while (pte++, addr += PAGE_SIZE, addr != end);
312} 316}
@@ -1037,11 +1041,6 @@ static bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
1037 return kvm_vcpu_dabt_iswrite(vcpu); 1041 return kvm_vcpu_dabt_iswrite(vcpu);
1038} 1042}
1039 1043
1040static bool kvm_is_device_pfn(unsigned long pfn)
1041{
1042 return !pfn_valid(pfn);
1043}
1044
1045/** 1044/**
1046 * stage2_wp_ptes - write protect PMD range 1045 * stage2_wp_ptes - write protect PMD range
1047 * @pmd: pointer to pmd entry 1046 * @pmd: pointer to pmd entry
diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
index 2f4b14cfddb4..591f9db3bf40 100644
--- a/arch/arm/net/bpf_jit_32.c
+++ b/arch/arm/net/bpf_jit_32.c
@@ -1061,7 +1061,7 @@ void bpf_jit_compile(struct bpf_prog *fp)
1061 } 1061 }
1062 build_epilogue(&ctx); 1062 build_epilogue(&ctx);
1063 1063
1064 flush_icache_range((u32)ctx.target, (u32)(ctx.target + ctx.idx)); 1064 flush_icache_range((u32)header, (u32)(ctx.target + ctx.idx));
1065 1065
1066#if __LINUX_ARM_ARCH__ < 7 1066#if __LINUX_ARM_ARCH__ < 7
1067 if (ctx.imm_count) 1067 if (ctx.imm_count)
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 9ac16a482ff1..e55848c1edf4 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -316,6 +316,27 @@ config ARM64_ERRATUM_832075
316 316
317 If unsure, say Y. 317 If unsure, say Y.
318 318
319config ARM64_ERRATUM_834220
320 bool "Cortex-A57: 834220: Stage 2 translation fault might be incorrectly reported in presence of a Stage 1 fault"
321 depends on KVM
322 default y
323 help
324 This option adds an alternative code sequence to work around ARM
325 erratum 834220 on Cortex-A57 parts up to r1p2.
326
327 Affected Cortex-A57 parts might report a Stage 2 translation
328 fault as the result of a Stage 1 fault for load crossing a
329 page boundary when there is a permission or device memory
330 alignment fault at Stage 1 and a translation fault at Stage 2.
331
332 The workaround is to verify that the Stage 1 translation
333 doesn't generate a fault before handling the Stage 2 fault.
334 Please note that this does not necessarily enable the workaround,
335 as it depends on the alternative framework, which will only patch
336 the kernel if an affected CPU is detected.
337
338 If unsure, say Y.
339
319config ARM64_ERRATUM_845719 340config ARM64_ERRATUM_845719
320 bool "Cortex-A53: 845719: a load might read incorrect data" 341 bool "Cortex-A53: 845719: a load might read incorrect data"
321 depends on COMPAT 342 depends on COMPAT
diff --git a/arch/arm64/crypto/aes-ce-cipher.c b/arch/arm64/crypto/aes-ce-cipher.c
index ce47792a983d..f7bd9bf0bbb3 100644
--- a/arch/arm64/crypto/aes-ce-cipher.c
+++ b/arch/arm64/crypto/aes-ce-cipher.c
@@ -237,7 +237,7 @@ EXPORT_SYMBOL(ce_aes_setkey);
237static struct crypto_alg aes_alg = { 237static struct crypto_alg aes_alg = {
238 .cra_name = "aes", 238 .cra_name = "aes",
239 .cra_driver_name = "aes-ce", 239 .cra_driver_name = "aes-ce",
240 .cra_priority = 300, 240 .cra_priority = 250,
241 .cra_flags = CRYPTO_ALG_TYPE_CIPHER, 241 .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
242 .cra_blocksize = AES_BLOCK_SIZE, 242 .cra_blocksize = AES_BLOCK_SIZE,
243 .cra_ctxsize = sizeof(struct crypto_aes_ctx), 243 .cra_ctxsize = sizeof(struct crypto_aes_ctx),
diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
index 624f9679f4b0..9622eb48f894 100644
--- a/arch/arm64/include/asm/barrier.h
+++ b/arch/arm64/include/asm/barrier.h
@@ -64,27 +64,31 @@ do { \
64 64
65#define smp_load_acquire(p) \ 65#define smp_load_acquire(p) \
66({ \ 66({ \
67 typeof(*p) ___p1; \ 67 union { typeof(*p) __val; char __c[1]; } __u; \
68 compiletime_assert_atomic_type(*p); \ 68 compiletime_assert_atomic_type(*p); \
69 switch (sizeof(*p)) { \ 69 switch (sizeof(*p)) { \
70 case 1: \ 70 case 1: \
71 asm volatile ("ldarb %w0, %1" \ 71 asm volatile ("ldarb %w0, %1" \
72 : "=r" (___p1) : "Q" (*p) : "memory"); \ 72 : "=r" (*(__u8 *)__u.__c) \
73 : "Q" (*p) : "memory"); \
73 break; \ 74 break; \
74 case 2: \ 75 case 2: \
75 asm volatile ("ldarh %w0, %1" \ 76 asm volatile ("ldarh %w0, %1" \
76 : "=r" (___p1) : "Q" (*p) : "memory"); \ 77 : "=r" (*(__u16 *)__u.__c) \
78 : "Q" (*p) : "memory"); \
77 break; \ 79 break; \
78 case 4: \ 80 case 4: \
79 asm volatile ("ldar %w0, %1" \ 81 asm volatile ("ldar %w0, %1" \
80 : "=r" (___p1) : "Q" (*p) : "memory"); \ 82 : "=r" (*(__u32 *)__u.__c) \
83 : "Q" (*p) : "memory"); \
81 break; \ 84 break; \
82 case 8: \ 85 case 8: \
83 asm volatile ("ldar %0, %1" \ 86 asm volatile ("ldar %0, %1" \
84 : "=r" (___p1) : "Q" (*p) : "memory"); \ 87 : "=r" (*(__u64 *)__u.__c) \
88 : "Q" (*p) : "memory"); \
85 break; \ 89 break; \
86 } \ 90 } \
87 ___p1; \ 91 __u.__val; \
88}) 92})
89 93
90#define read_barrier_depends() do { } while(0) 94#define read_barrier_depends() do { } while(0)
diff --git a/arch/arm64/include/asm/compat.h b/arch/arm64/include/asm/compat.h
index 7fbed6919b54..eb8432bb82b8 100644
--- a/arch/arm64/include/asm/compat.h
+++ b/arch/arm64/include/asm/compat.h
@@ -23,7 +23,6 @@
23 */ 23 */
24#include <linux/types.h> 24#include <linux/types.h>
25#include <linux/sched.h> 25#include <linux/sched.h>
26#include <linux/ptrace.h>
27 26
28#define COMPAT_USER_HZ 100 27#define COMPAT_USER_HZ 100
29#ifdef __AARCH64EB__ 28#ifdef __AARCH64EB__
@@ -234,7 +233,7 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr)
234 return (u32)(unsigned long)uptr; 233 return (u32)(unsigned long)uptr;
235} 234}
236 235
237#define compat_user_stack_pointer() (user_stack_pointer(current_pt_regs())) 236#define compat_user_stack_pointer() (user_stack_pointer(task_pt_regs(current)))
238 237
239static inline void __user *arch_compat_alloc_user_space(long len) 238static inline void __user *arch_compat_alloc_user_space(long len)
240{ 239{
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index 11d5bb0fdd54..52722ee73dba 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -29,8 +29,9 @@
29#define ARM64_HAS_PAN 4 29#define ARM64_HAS_PAN 4
30#define ARM64_HAS_LSE_ATOMICS 5 30#define ARM64_HAS_LSE_ATOMICS 5
31#define ARM64_WORKAROUND_CAVIUM_23154 6 31#define ARM64_WORKAROUND_CAVIUM_23154 6
32#define ARM64_WORKAROUND_834220 7
32 33
33#define ARM64_NCAPS 7 34#define ARM64_NCAPS 8
34 35
35#ifndef __ASSEMBLY__ 36#ifndef __ASSEMBLY__
36 37
diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h
index 54d0ead41afc..61e08f360e31 100644
--- a/arch/arm64/include/asm/dma-mapping.h
+++ b/arch/arm64/include/asm/dma-mapping.h
@@ -18,7 +18,6 @@
18 18
19#ifdef __KERNEL__ 19#ifdef __KERNEL__
20 20
21#include <linux/acpi.h>
22#include <linux/types.h> 21#include <linux/types.h>
23#include <linux/vmalloc.h> 22#include <linux/vmalloc.h>
24 23
@@ -26,22 +25,16 @@
26#include <asm/xen/hypervisor.h> 25#include <asm/xen/hypervisor.h>
27 26
28#define DMA_ERROR_CODE (~(dma_addr_t)0) 27#define DMA_ERROR_CODE (~(dma_addr_t)0)
29extern struct dma_map_ops *dma_ops;
30extern struct dma_map_ops dummy_dma_ops; 28extern struct dma_map_ops dummy_dma_ops;
31 29
32static inline struct dma_map_ops *__generic_dma_ops(struct device *dev) 30static inline struct dma_map_ops *__generic_dma_ops(struct device *dev)
33{ 31{
34 if (unlikely(!dev)) 32 if (dev && dev->archdata.dma_ops)
35 return dma_ops;
36 else if (dev->archdata.dma_ops)
37 return dev->archdata.dma_ops; 33 return dev->archdata.dma_ops;
38 else if (acpi_disabled)
39 return dma_ops;
40 34
41 /* 35 /*
42 * When ACPI is enabled, if arch_set_dma_ops is not called, 36 * We expect no ISA devices, and all other DMA masters are expected to
43 * we will disable device DMA capability by setting it 37 * have someone call arch_setup_dma_ops at device creation time.
44 * to dummy_dma_ops.
45 */ 38 */
46 return &dummy_dma_ops; 39 return &dummy_dma_ops;
47} 40}
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index 17e92f05b1fe..3ca894ecf699 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -99,11 +99,13 @@ static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
99 *vcpu_cpsr(vcpu) |= COMPAT_PSR_T_BIT; 99 *vcpu_cpsr(vcpu) |= COMPAT_PSR_T_BIT;
100} 100}
101 101
102/*
103 * vcpu_reg should always be passed a register number coming from a
104 * read of ESR_EL2. Otherwise, it may give the wrong result on AArch32
105 * with banked registers.
106 */
102static inline unsigned long *vcpu_reg(const struct kvm_vcpu *vcpu, u8 reg_num) 107static inline unsigned long *vcpu_reg(const struct kvm_vcpu *vcpu, u8 reg_num)
103{ 108{
104 if (vcpu_mode_is_32bit(vcpu))
105 return vcpu_reg32(vcpu, reg_num);
106
107 return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.regs[reg_num]; 109 return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.regs[reg_num];
108} 110}
109 111
diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
index c0e87898ba96..24165784b803 100644
--- a/arch/arm64/include/asm/mmu_context.h
+++ b/arch/arm64/include/asm/mmu_context.h
@@ -101,7 +101,7 @@ static inline void cpu_set_default_tcr_t0sz(void)
101#define destroy_context(mm) do { } while(0) 101#define destroy_context(mm) do { } while(0)
102void check_and_switch_context(struct mm_struct *mm, unsigned int cpu); 102void check_and_switch_context(struct mm_struct *mm, unsigned int cpu);
103 103
104#define init_new_context(tsk,mm) ({ atomic64_set(&mm->context.id, 0); 0; }) 104#define init_new_context(tsk,mm) ({ atomic64_set(&(mm)->context.id, 0); 0; })
105 105
106/* 106/*
107 * This is called when "tsk" is about to enter lazy TLB mode. 107 * This is called when "tsk" is about to enter lazy TLB mode.
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 9819a9426b69..7e074f93f383 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -81,6 +81,7 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
81 81
82#define PAGE_KERNEL __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE) 82#define PAGE_KERNEL __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE)
83#define PAGE_KERNEL_RO __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_RDONLY) 83#define PAGE_KERNEL_RO __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_RDONLY)
84#define PAGE_KERNEL_ROX __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_RDONLY)
84#define PAGE_KERNEL_EXEC __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE) 85#define PAGE_KERNEL_EXEC __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE)
85#define PAGE_KERNEL_EXEC_CONT __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_CONT) 86#define PAGE_KERNEL_EXEC_CONT __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_CONT)
86 87
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index 24926f2504f7..feb6b4efa641 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -75,6 +75,15 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
75 (1 << MIDR_VARIANT_SHIFT) | 2), 75 (1 << MIDR_VARIANT_SHIFT) | 2),
76 }, 76 },
77#endif 77#endif
78#ifdef CONFIG_ARM64_ERRATUM_834220
79 {
80 /* Cortex-A57 r0p0 - r1p2 */
81 .desc = "ARM erratum 834220",
82 .capability = ARM64_WORKAROUND_834220,
83 MIDR_RANGE(MIDR_CORTEX_A57, 0x00,
84 (1 << MIDR_VARIANT_SHIFT) | 2),
85 },
86#endif
78#ifdef CONFIG_ARM64_ERRATUM_845719 87#ifdef CONFIG_ARM64_ERRATUM_845719
79 { 88 {
80 /* Cortex-A53 r0p[01234] */ 89 /* Cortex-A53 r0p[01234] */
diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c
index 706679d0a0b4..212ae6361d8b 100644
--- a/arch/arm64/kernel/cpuinfo.c
+++ b/arch/arm64/kernel/cpuinfo.c
@@ -30,6 +30,7 @@
30#include <linux/seq_file.h> 30#include <linux/seq_file.h>
31#include <linux/sched.h> 31#include <linux/sched.h>
32#include <linux/smp.h> 32#include <linux/smp.h>
33#include <linux/delay.h>
33 34
34/* 35/*
35 * In case the boot CPU is hotpluggable, we record its initial state and 36 * In case the boot CPU is hotpluggable, we record its initial state and
@@ -112,6 +113,10 @@ static int c_show(struct seq_file *m, void *v)
112 */ 113 */
113 seq_printf(m, "processor\t: %d\n", i); 114 seq_printf(m, "processor\t: %d\n", i);
114 115
116 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
117 loops_per_jiffy / (500000UL/HZ),
118 loops_per_jiffy / (5000UL/HZ) % 100);
119
115 /* 120 /*
116 * Dump out the common processor features in a single line. 121 * Dump out the common processor features in a single line.
117 * Userspace should read the hwcaps with getauxval(AT_HWCAP) 122 * Userspace should read the hwcaps with getauxval(AT_HWCAP)
diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c
index de46b50f4cdf..fc5508e0df57 100644
--- a/arch/arm64/kernel/efi.c
+++ b/arch/arm64/kernel/efi.c
@@ -224,6 +224,8 @@ static bool __init efi_virtmap_init(void)
224{ 224{
225 efi_memory_desc_t *md; 225 efi_memory_desc_t *md;
226 226
227 init_new_context(NULL, &efi_mm);
228
227 for_each_efi_memory_desc(&memmap, md) { 229 for_each_efi_memory_desc(&memmap, md) {
228 u64 paddr, npages, size; 230 u64 paddr, npages, size;
229 pgprot_t prot; 231 pgprot_t prot;
@@ -254,7 +256,8 @@ static bool __init efi_virtmap_init(void)
254 else 256 else
255 prot = PAGE_KERNEL; 257 prot = PAGE_KERNEL;
256 258
257 create_pgd_mapping(&efi_mm, paddr, md->virt_addr, size, prot); 259 create_pgd_mapping(&efi_mm, paddr, md->virt_addr, size,
260 __pgprot(pgprot_val(prot) | PTE_NG));
258 } 261 }
259 return true; 262 return true;
260} 263}
@@ -329,14 +332,7 @@ core_initcall(arm64_dmi_init);
329 332
330static void efi_set_pgd(struct mm_struct *mm) 333static void efi_set_pgd(struct mm_struct *mm)
331{ 334{
332 if (mm == &init_mm) 335 switch_mm(NULL, mm, NULL);
333 cpu_set_reserved_ttbr0();
334 else
335 cpu_switch_mm(mm->pgd, mm);
336
337 local_flush_tlb_all();
338 if (icache_is_aivivt())
339 __local_flush_icache_all();
340} 336}
341 337
342void efi_virtmap_load(void) 338void efi_virtmap_load(void)
diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c
index fce95e17cf7f..1095aa483a1c 100644
--- a/arch/arm64/kernel/suspend.c
+++ b/arch/arm64/kernel/suspend.c
@@ -1,3 +1,4 @@
1#include <linux/ftrace.h>
1#include <linux/percpu.h> 2#include <linux/percpu.h>
2#include <linux/slab.h> 3#include <linux/slab.h>
3#include <asm/cacheflush.h> 4#include <asm/cacheflush.h>
@@ -71,6 +72,13 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
71 local_dbg_save(flags); 72 local_dbg_save(flags);
72 73
73 /* 74 /*
75 * Function graph tracer state gets incosistent when the kernel
76 * calls functions that never return (aka suspend finishers) hence
77 * disable graph tracing during their execution.
78 */
79 pause_graph_tracing();
80
81 /*
74 * mm context saved on the stack, it will be restored when 82 * mm context saved on the stack, it will be restored when
75 * the cpu comes out of reset through the identity mapped 83 * the cpu comes out of reset through the identity mapped
76 * page tables, so that the thread address space is properly 84 * page tables, so that the thread address space is properly
@@ -111,6 +119,8 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
111 hw_breakpoint_restore(NULL); 119 hw_breakpoint_restore(NULL);
112 } 120 }
113 121
122 unpause_graph_tracing();
123
114 /* 124 /*
115 * Restore pstate flags. OS lock and mdscr have been already 125 * Restore pstate flags. OS lock and mdscr have been already
116 * restored, so from this point onwards, debugging is fully 126 * restored, so from this point onwards, debugging is fully
diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S
index 1599701ef044..86c289832272 100644
--- a/arch/arm64/kvm/hyp.S
+++ b/arch/arm64/kvm/hyp.S
@@ -864,6 +864,10 @@ ENTRY(__kvm_flush_vm_context)
864ENDPROC(__kvm_flush_vm_context) 864ENDPROC(__kvm_flush_vm_context)
865 865
866__kvm_hyp_panic: 866__kvm_hyp_panic:
867 // Stash PAR_EL1 before corrupting it in __restore_sysregs
868 mrs x0, par_el1
869 push x0, xzr
870
867 // Guess the context by looking at VTTBR: 871 // Guess the context by looking at VTTBR:
868 // If zero, then we're already a host. 872 // If zero, then we're already a host.
869 // Otherwise restore a minimal host context before panicing. 873 // Otherwise restore a minimal host context before panicing.
@@ -898,7 +902,7 @@ __kvm_hyp_panic:
898 mrs x3, esr_el2 902 mrs x3, esr_el2
899 mrs x4, far_el2 903 mrs x4, far_el2
900 mrs x5, hpfar_el2 904 mrs x5, hpfar_el2
901 mrs x6, par_el1 905 pop x6, xzr // active context PAR_EL1
902 mrs x7, tpidr_el2 906 mrs x7, tpidr_el2
903 907
904 mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\ 908 mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
@@ -914,7 +918,7 @@ __kvm_hyp_panic:
914ENDPROC(__kvm_hyp_panic) 918ENDPROC(__kvm_hyp_panic)
915 919
916__hyp_panic_str: 920__hyp_panic_str:
917 .ascii "HYP panic:\nPS:%08x PC:%p ESR:%p\nFAR:%p HPFAR:%p PAR:%p\nVCPU:%p\n\0" 921 .ascii "HYP panic:\nPS:%08x PC:%016x ESR:%08x\nFAR:%016x HPFAR:%016x PAR:%016x\nVCPU:%p\n\0"
918 922
919 .align 2 923 .align 2
920 924
@@ -1015,9 +1019,15 @@ el1_trap:
1015 b.ne 1f // Not an abort we care about 1019 b.ne 1f // Not an abort we care about
1016 1020
1017 /* This is an abort. Check for permission fault */ 1021 /* This is an abort. Check for permission fault */
1022alternative_if_not ARM64_WORKAROUND_834220
1018 and x2, x1, #ESR_ELx_FSC_TYPE 1023 and x2, x1, #ESR_ELx_FSC_TYPE
1019 cmp x2, #FSC_PERM 1024 cmp x2, #FSC_PERM
1020 b.ne 1f // Not a permission fault 1025 b.ne 1f // Not a permission fault
1026alternative_else
1027 nop // Use the permission fault path to
1028 nop // check for a valid S1 translation,
1029 nop // regardless of the ESR value.
1030alternative_endif
1021 1031
1022 /* 1032 /*
1023 * Check for Stage-1 page table walk, which is guaranteed 1033 * Check for Stage-1 page table walk, which is guaranteed
diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c
index 85c57158dcd9..648112e90ed5 100644
--- a/arch/arm64/kvm/inject_fault.c
+++ b/arch/arm64/kvm/inject_fault.c
@@ -48,7 +48,7 @@ static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
48 48
49 /* Note: These now point to the banked copies */ 49 /* Note: These now point to the banked copies */
50 *vcpu_spsr(vcpu) = new_spsr_value; 50 *vcpu_spsr(vcpu) = new_spsr_value;
51 *vcpu_reg(vcpu, 14) = *vcpu_pc(vcpu) + return_offset; 51 *vcpu_reg32(vcpu, 14) = *vcpu_pc(vcpu) + return_offset;
52 52
53 /* Branch to exception vector */ 53 /* Branch to exception vector */
54 if (sctlr & (1 << 13)) 54 if (sctlr & (1 << 13))
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index 131a199114b4..7963aa4b5d28 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -18,6 +18,7 @@
18 */ 18 */
19 19
20#include <linux/gfp.h> 20#include <linux/gfp.h>
21#include <linux/acpi.h>
21#include <linux/export.h> 22#include <linux/export.h>
22#include <linux/slab.h> 23#include <linux/slab.h>
23#include <linux/genalloc.h> 24#include <linux/genalloc.h>
@@ -28,9 +29,6 @@
28 29
29#include <asm/cacheflush.h> 30#include <asm/cacheflush.h>
30 31
31struct dma_map_ops *dma_ops;
32EXPORT_SYMBOL(dma_ops);
33
34static pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot, 32static pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot,
35 bool coherent) 33 bool coherent)
36{ 34{
@@ -515,13 +513,7 @@ EXPORT_SYMBOL(dummy_dma_ops);
515 513
516static int __init arm64_dma_init(void) 514static int __init arm64_dma_init(void)
517{ 515{
518 int ret; 516 return atomic_pool_init();
519
520 dma_ops = &swiotlb_dma_ops;
521
522 ret = atomic_pool_init();
523
524 return ret;
525} 517}
526arch_initcall(arm64_dma_init); 518arch_initcall(arm64_dma_init);
527 519
@@ -552,10 +544,14 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
552{ 544{
553 bool coherent = is_device_dma_coherent(dev); 545 bool coherent = is_device_dma_coherent(dev);
554 int ioprot = dma_direction_to_prot(DMA_BIDIRECTIONAL, coherent); 546 int ioprot = dma_direction_to_prot(DMA_BIDIRECTIONAL, coherent);
547 size_t iosize = size;
555 void *addr; 548 void *addr;
556 549
557 if (WARN(!dev, "cannot create IOMMU mapping for unknown device\n")) 550 if (WARN(!dev, "cannot create IOMMU mapping for unknown device\n"))
558 return NULL; 551 return NULL;
552
553 size = PAGE_ALIGN(size);
554
559 /* 555 /*
560 * Some drivers rely on this, and we probably don't want the 556 * Some drivers rely on this, and we probably don't want the
561 * possibility of stale kernel data being read by devices anyway. 557 * possibility of stale kernel data being read by devices anyway.
@@ -566,7 +562,7 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
566 struct page **pages; 562 struct page **pages;
567 pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, coherent); 563 pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, coherent);
568 564
569 pages = iommu_dma_alloc(dev, size, gfp, ioprot, handle, 565 pages = iommu_dma_alloc(dev, iosize, gfp, ioprot, handle,
570 flush_page); 566 flush_page);
571 if (!pages) 567 if (!pages)
572 return NULL; 568 return NULL;
@@ -574,7 +570,7 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
574 addr = dma_common_pages_remap(pages, size, VM_USERMAP, prot, 570 addr = dma_common_pages_remap(pages, size, VM_USERMAP, prot,
575 __builtin_return_address(0)); 571 __builtin_return_address(0));
576 if (!addr) 572 if (!addr)
577 iommu_dma_free(dev, pages, size, handle); 573 iommu_dma_free(dev, pages, iosize, handle);
578 } else { 574 } else {
579 struct page *page; 575 struct page *page;
580 /* 576 /*
@@ -591,7 +587,7 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
591 if (!addr) 587 if (!addr)
592 return NULL; 588 return NULL;
593 589
594 *handle = iommu_dma_map_page(dev, page, 0, size, ioprot); 590 *handle = iommu_dma_map_page(dev, page, 0, iosize, ioprot);
595 if (iommu_dma_mapping_error(dev, *handle)) { 591 if (iommu_dma_mapping_error(dev, *handle)) {
596 if (coherent) 592 if (coherent)
597 __free_pages(page, get_order(size)); 593 __free_pages(page, get_order(size));
@@ -606,6 +602,9 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
606static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, 602static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
607 dma_addr_t handle, struct dma_attrs *attrs) 603 dma_addr_t handle, struct dma_attrs *attrs)
608{ 604{
605 size_t iosize = size;
606
607 size = PAGE_ALIGN(size);
609 /* 608 /*
610 * @cpu_addr will be one of 3 things depending on how it was allocated: 609 * @cpu_addr will be one of 3 things depending on how it was allocated:
611 * - A remapped array of pages from iommu_dma_alloc(), for all 610 * - A remapped array of pages from iommu_dma_alloc(), for all
@@ -617,17 +616,17 @@ static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
617 * Hence how dodgy the below logic looks... 616 * Hence how dodgy the below logic looks...
618 */ 617 */
619 if (__in_atomic_pool(cpu_addr, size)) { 618 if (__in_atomic_pool(cpu_addr, size)) {
620 iommu_dma_unmap_page(dev, handle, size, 0, NULL); 619 iommu_dma_unmap_page(dev, handle, iosize, 0, NULL);
621 __free_from_pool(cpu_addr, size); 620 __free_from_pool(cpu_addr, size);
622 } else if (is_vmalloc_addr(cpu_addr)){ 621 } else if (is_vmalloc_addr(cpu_addr)){
623 struct vm_struct *area = find_vm_area(cpu_addr); 622 struct vm_struct *area = find_vm_area(cpu_addr);
624 623
625 if (WARN_ON(!area || !area->pages)) 624 if (WARN_ON(!area || !area->pages))
626 return; 625 return;
627 iommu_dma_free(dev, area->pages, size, &handle); 626 iommu_dma_free(dev, area->pages, iosize, &handle);
628 dma_common_free_remap(cpu_addr, size, VM_USERMAP); 627 dma_common_free_remap(cpu_addr, size, VM_USERMAP);
629 } else { 628 } else {
630 iommu_dma_unmap_page(dev, handle, size, 0, NULL); 629 iommu_dma_unmap_page(dev, handle, iosize, 0, NULL);
631 __free_pages(virt_to_page(cpu_addr), get_order(size)); 630 __free_pages(virt_to_page(cpu_addr), get_order(size));
632 } 631 }
633} 632}
@@ -984,8 +983,8 @@ static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
984void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, 983void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
985 struct iommu_ops *iommu, bool coherent) 984 struct iommu_ops *iommu, bool coherent)
986{ 985{
987 if (!acpi_disabled && !dev->archdata.dma_ops) 986 if (!dev->archdata.dma_ops)
988 dev->archdata.dma_ops = dma_ops; 987 dev->archdata.dma_ops = &swiotlb_dma_ops;
989 988
990 dev->archdata.dma_coherent = coherent; 989 dev->archdata.dma_coherent = coherent;
991 __iommu_setup_dma_ops(dev, dma_base, size, iommu); 990 __iommu_setup_dma_ops(dev, dma_base, size, iommu);
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index e3f563c81c48..abb66f84d4ac 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -362,8 +362,8 @@ static void __init __map_memblock(phys_addr_t start, phys_addr_t end)
362 * for now. This will get more fine grained later once all memory 362 * for now. This will get more fine grained later once all memory
363 * is mapped 363 * is mapped
364 */ 364 */
365 unsigned long kernel_x_start = round_down(__pa(_stext), SECTION_SIZE); 365 unsigned long kernel_x_start = round_down(__pa(_stext), SWAPPER_BLOCK_SIZE);
366 unsigned long kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE); 366 unsigned long kernel_x_end = round_up(__pa(__init_end), SWAPPER_BLOCK_SIZE);
367 367
368 if (end < kernel_x_start) { 368 if (end < kernel_x_start) {
369 create_mapping(start, __phys_to_virt(start), 369 create_mapping(start, __phys_to_virt(start),
@@ -451,18 +451,18 @@ static void __init fixup_executable(void)
451{ 451{
452#ifdef CONFIG_DEBUG_RODATA 452#ifdef CONFIG_DEBUG_RODATA
453 /* now that we are actually fully mapped, make the start/end more fine grained */ 453 /* now that we are actually fully mapped, make the start/end more fine grained */
454 if (!IS_ALIGNED((unsigned long)_stext, SECTION_SIZE)) { 454 if (!IS_ALIGNED((unsigned long)_stext, SWAPPER_BLOCK_SIZE)) {
455 unsigned long aligned_start = round_down(__pa(_stext), 455 unsigned long aligned_start = round_down(__pa(_stext),
456 SECTION_SIZE); 456 SWAPPER_BLOCK_SIZE);
457 457
458 create_mapping(aligned_start, __phys_to_virt(aligned_start), 458 create_mapping(aligned_start, __phys_to_virt(aligned_start),
459 __pa(_stext) - aligned_start, 459 __pa(_stext) - aligned_start,
460 PAGE_KERNEL); 460 PAGE_KERNEL);
461 } 461 }
462 462
463 if (!IS_ALIGNED((unsigned long)__init_end, SECTION_SIZE)) { 463 if (!IS_ALIGNED((unsigned long)__init_end, SWAPPER_BLOCK_SIZE)) {
464 unsigned long aligned_end = round_up(__pa(__init_end), 464 unsigned long aligned_end = round_up(__pa(__init_end),
465 SECTION_SIZE); 465 SWAPPER_BLOCK_SIZE);
466 create_mapping(__pa(__init_end), (unsigned long)__init_end, 466 create_mapping(__pa(__init_end), (unsigned long)__init_end,
467 aligned_end - __pa(__init_end), 467 aligned_end - __pa(__init_end),
468 PAGE_KERNEL); 468 PAGE_KERNEL);
@@ -475,7 +475,7 @@ void mark_rodata_ro(void)
475{ 475{
476 create_mapping_late(__pa(_stext), (unsigned long)_stext, 476 create_mapping_late(__pa(_stext), (unsigned long)_stext,
477 (unsigned long)_etext - (unsigned long)_stext, 477 (unsigned long)_etext - (unsigned long)_stext,
478 PAGE_KERNEL_EXEC | PTE_RDONLY); 478 PAGE_KERNEL_ROX);
479 479
480} 480}
481#endif 481#endif
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index cf3c7d4a1b58..d6a53ef2350b 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -50,7 +50,7 @@ static const int bpf2a64[] = {
50 [BPF_REG_8] = A64_R(21), 50 [BPF_REG_8] = A64_R(21),
51 [BPF_REG_9] = A64_R(22), 51 [BPF_REG_9] = A64_R(22),
52 /* read-only frame pointer to access stack */ 52 /* read-only frame pointer to access stack */
53 [BPF_REG_FP] = A64_FP, 53 [BPF_REG_FP] = A64_R(25),
54 /* temporary register for internal BPF JIT */ 54 /* temporary register for internal BPF JIT */
55 [TMP_REG_1] = A64_R(23), 55 [TMP_REG_1] = A64_R(23),
56 [TMP_REG_2] = A64_R(24), 56 [TMP_REG_2] = A64_R(24),
@@ -155,18 +155,49 @@ static void build_prologue(struct jit_ctx *ctx)
155 stack_size += 4; /* extra for skb_copy_bits buffer */ 155 stack_size += 4; /* extra for skb_copy_bits buffer */
156 stack_size = STACK_ALIGN(stack_size); 156 stack_size = STACK_ALIGN(stack_size);
157 157
158 /*
159 * BPF prog stack layout
160 *
161 * high
162 * original A64_SP => 0:+-----+ BPF prologue
163 * |FP/LR|
164 * current A64_FP => -16:+-----+
165 * | ... | callee saved registers
166 * +-----+
167 * | | x25/x26
168 * BPF fp register => -80:+-----+
169 * | |
170 * | ... | BPF prog stack
171 * | |
172 * | |
173 * current A64_SP => +-----+
174 * | |
175 * | ... | Function call stack
176 * | |
177 * +-----+
178 * low
179 *
180 */
181
182 /* Save FP and LR registers to stay align with ARM64 AAPCS */
183 emit(A64_PUSH(A64_FP, A64_LR, A64_SP), ctx);
184 emit(A64_MOV(1, A64_FP, A64_SP), ctx);
185
158 /* Save callee-saved register */ 186 /* Save callee-saved register */
159 emit(A64_PUSH(r6, r7, A64_SP), ctx); 187 emit(A64_PUSH(r6, r7, A64_SP), ctx);
160 emit(A64_PUSH(r8, r9, A64_SP), ctx); 188 emit(A64_PUSH(r8, r9, A64_SP), ctx);
161 if (ctx->tmp_used) 189 if (ctx->tmp_used)
162 emit(A64_PUSH(tmp1, tmp2, A64_SP), ctx); 190 emit(A64_PUSH(tmp1, tmp2, A64_SP), ctx);
163 191
164 /* Set up BPF stack */ 192 /* Save fp (x25) and x26. SP requires 16 bytes alignment */
165 emit(A64_SUB_I(1, A64_SP, A64_SP, stack_size), ctx); 193 emit(A64_PUSH(fp, A64_R(26), A64_SP), ctx);
166 194
167 /* Set up frame pointer */ 195 /* Set up BPF prog stack base register (x25) */
168 emit(A64_MOV(1, fp, A64_SP), ctx); 196 emit(A64_MOV(1, fp, A64_SP), ctx);
169 197
198 /* Set up function call stack */
199 emit(A64_SUB_I(1, A64_SP, A64_SP, stack_size), ctx);
200
170 /* Clear registers A and X */ 201 /* Clear registers A and X */
171 emit_a64_mov_i64(ra, 0, ctx); 202 emit_a64_mov_i64(ra, 0, ctx);
172 emit_a64_mov_i64(rx, 0, ctx); 203 emit_a64_mov_i64(rx, 0, ctx);
@@ -190,14 +221,17 @@ static void build_epilogue(struct jit_ctx *ctx)
190 /* We're done with BPF stack */ 221 /* We're done with BPF stack */
191 emit(A64_ADD_I(1, A64_SP, A64_SP, stack_size), ctx); 222 emit(A64_ADD_I(1, A64_SP, A64_SP, stack_size), ctx);
192 223
224 /* Restore fs (x25) and x26 */
225 emit(A64_POP(fp, A64_R(26), A64_SP), ctx);
226
193 /* Restore callee-saved register */ 227 /* Restore callee-saved register */
194 if (ctx->tmp_used) 228 if (ctx->tmp_used)
195 emit(A64_POP(tmp1, tmp2, A64_SP), ctx); 229 emit(A64_POP(tmp1, tmp2, A64_SP), ctx);
196 emit(A64_POP(r8, r9, A64_SP), ctx); 230 emit(A64_POP(r8, r9, A64_SP), ctx);
197 emit(A64_POP(r6, r7, A64_SP), ctx); 231 emit(A64_POP(r6, r7, A64_SP), ctx);
198 232
199 /* Restore frame pointer */ 233 /* Restore FP/LR registers */
200 emit(A64_MOV(1, fp, A64_SP), ctx); 234 emit(A64_POP(A64_FP, A64_LR, A64_SP), ctx);
201 235
202 /* Set return value */ 236 /* Set return value */
203 emit(A64_MOV(1, A64_R(0), r0), ctx); 237 emit(A64_MOV(1, A64_R(0), r0), ctx);
@@ -758,7 +792,7 @@ void bpf_int_jit_compile(struct bpf_prog *prog)
758 if (bpf_jit_enable > 1) 792 if (bpf_jit_enable > 1)
759 bpf_jit_dump(prog->len, image_size, 2, ctx.image); 793 bpf_jit_dump(prog->len, image_size, 2, ctx.image);
760 794
761 bpf_flush_icache(ctx.image, ctx.image + ctx.idx); 795 bpf_flush_icache(header, ctx.image + ctx.idx);
762 796
763 set_memory_ro((unsigned long)header, header->pages); 797 set_memory_ro((unsigned long)header, header->pages);
764 prog->bpf_func = (void *)ctx.image; 798 prog->bpf_func = (void *)ctx.image;
diff --git a/arch/mips/ath79/setup.c b/arch/mips/ath79/setup.c
index 1ba21204ebe0..8755d618e116 100644
--- a/arch/mips/ath79/setup.c
+++ b/arch/mips/ath79/setup.c
@@ -216,9 +216,9 @@ void __init plat_mem_setup(void)
216 AR71XX_RESET_SIZE); 216 AR71XX_RESET_SIZE);
217 ath79_pll_base = ioremap_nocache(AR71XX_PLL_BASE, 217 ath79_pll_base = ioremap_nocache(AR71XX_PLL_BASE,
218 AR71XX_PLL_SIZE); 218 AR71XX_PLL_SIZE);
219 ath79_detect_sys_type();
219 ath79_ddr_ctrl_init(); 220 ath79_ddr_ctrl_init();
220 221
221 ath79_detect_sys_type();
222 if (mips_machtype != ATH79_MACH_GENERIC_OF) 222 if (mips_machtype != ATH79_MACH_GENERIC_OF)
223 detect_memory_region(0, ATH79_MEM_SIZE_MIN, ATH79_MEM_SIZE_MAX); 223 detect_memory_region(0, ATH79_MEM_SIZE_MIN, ATH79_MEM_SIZE_MAX);
224 224
@@ -281,3 +281,8 @@ MIPS_MACHINE(ATH79_MACH_GENERIC,
281 "Generic", 281 "Generic",
282 "Generic AR71XX/AR724X/AR913X based board", 282 "Generic AR71XX/AR724X/AR913X based board",
283 ath79_generic_init); 283 ath79_generic_init);
284
285MIPS_MACHINE(ATH79_MACH_GENERIC_OF,
286 "DTB",
287 "Generic AR71XX/AR724X/AR913X based board (DT)",
288 NULL);
diff --git a/arch/mips/boot/dts/qca/ar9132.dtsi b/arch/mips/boot/dts/qca/ar9132.dtsi
index fb7734eadbf0..13d0439496a9 100644
--- a/arch/mips/boot/dts/qca/ar9132.dtsi
+++ b/arch/mips/boot/dts/qca/ar9132.dtsi
@@ -107,7 +107,7 @@
107 miscintc: interrupt-controller@18060010 { 107 miscintc: interrupt-controller@18060010 {
108 compatible = "qca,ar9132-misc-intc", 108 compatible = "qca,ar9132-misc-intc",
109 "qca,ar7100-misc-intc"; 109 "qca,ar7100-misc-intc";
110 reg = <0x18060010 0x4>; 110 reg = <0x18060010 0x8>;
111 111
112 interrupt-parent = <&cpuintc>; 112 interrupt-parent = <&cpuintc>;
113 interrupts = <6>; 113 interrupts = <6>;
diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
index ad1fccdb8d13..2046c0230224 100644
--- a/arch/mips/include/asm/page.h
+++ b/arch/mips/include/asm/page.h
@@ -200,8 +200,9 @@ static inline int pfn_valid(unsigned long pfn)
200{ 200{
201 /* avoid <linux/mm.h> include hell */ 201 /* avoid <linux/mm.h> include hell */
202 extern unsigned long max_mapnr; 202 extern unsigned long max_mapnr;
203 unsigned long pfn_offset = ARCH_PFN_OFFSET;
203 204
204 return pfn >= ARCH_PFN_OFFSET && pfn < max_mapnr; 205 return pfn >= pfn_offset && pfn < max_mapnr;
205} 206}
206 207
207#elif defined(CONFIG_SPARSEMEM) 208#elif defined(CONFIG_SPARSEMEM)
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index c36546959e86..729f89163bc3 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -108,6 +108,9 @@ config PGTABLE_LEVELS
108 default 3 if 64BIT && PARISC_PAGE_SIZE_4KB 108 default 3 if 64BIT && PARISC_PAGE_SIZE_4KB
109 default 2 109 default 2
110 110
111config SYS_SUPPORTS_HUGETLBFS
112 def_bool y if PA20
113
111source "init/Kconfig" 114source "init/Kconfig"
112 115
113source "kernel/Kconfig.freezer" 116source "kernel/Kconfig.freezer"
diff --git a/arch/parisc/include/asm/hugetlb.h b/arch/parisc/include/asm/hugetlb.h
new file mode 100644
index 000000000000..7d56a9ccb752
--- /dev/null
+++ b/arch/parisc/include/asm/hugetlb.h
@@ -0,0 +1,85 @@
1#ifndef _ASM_PARISC64_HUGETLB_H
2#define _ASM_PARISC64_HUGETLB_H
3
4#include <asm/page.h>
5#include <asm-generic/hugetlb.h>
6
7
8void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
9 pte_t *ptep, pte_t pte);
10
11pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
12 pte_t *ptep);
13
14static inline int is_hugepage_only_range(struct mm_struct *mm,
15 unsigned long addr,
16 unsigned long len) {
17 return 0;
18}
19
20/*
21 * If the arch doesn't supply something else, assume that hugepage
22 * size aligned regions are ok without further preparation.
23 */
24static inline int prepare_hugepage_range(struct file *file,
25 unsigned long addr, unsigned long len)
26{
27 if (len & ~HPAGE_MASK)
28 return -EINVAL;
29 if (addr & ~HPAGE_MASK)
30 return -EINVAL;
31 return 0;
32}
33
34static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
35 unsigned long addr, unsigned long end,
36 unsigned long floor,
37 unsigned long ceiling)
38{
39 free_pgd_range(tlb, addr, end, floor, ceiling);
40}
41
42static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
43 unsigned long addr, pte_t *ptep)
44{
45}
46
47static inline int huge_pte_none(pte_t pte)
48{
49 return pte_none(pte);
50}
51
52static inline pte_t huge_pte_wrprotect(pte_t pte)
53{
54 return pte_wrprotect(pte);
55}
56
57static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
58 unsigned long addr, pte_t *ptep)
59{
60 pte_t old_pte = *ptep;
61 set_huge_pte_at(mm, addr, ptep, pte_wrprotect(old_pte));
62}
63
64static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
65 unsigned long addr, pte_t *ptep,
66 pte_t pte, int dirty)
67{
68 int changed = !pte_same(*ptep, pte);
69 if (changed) {
70 set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
71 flush_tlb_page(vma, addr);
72 }
73 return changed;
74}
75
76static inline pte_t huge_ptep_get(pte_t *ptep)
77{
78 return *ptep;
79}
80
81static inline void arch_clear_hugepage_flags(struct page *page)
82{
83}
84
85#endif /* _ASM_PARISC64_HUGETLB_H */
diff --git a/arch/parisc/include/asm/page.h b/arch/parisc/include/asm/page.h
index 60d5d174dfe4..80e742a1c162 100644
--- a/arch/parisc/include/asm/page.h
+++ b/arch/parisc/include/asm/page.h
@@ -145,11 +145,22 @@ extern int npmem_ranges;
145#endif /* CONFIG_DISCONTIGMEM */ 145#endif /* CONFIG_DISCONTIGMEM */
146 146
147#ifdef CONFIG_HUGETLB_PAGE 147#ifdef CONFIG_HUGETLB_PAGE
148#define HPAGE_SHIFT 22 /* 4MB (is this fixed?) */ 148#define HPAGE_SHIFT PMD_SHIFT /* fixed for transparent huge pages */
149#define HPAGE_SIZE ((1UL) << HPAGE_SHIFT) 149#define HPAGE_SIZE ((1UL) << HPAGE_SHIFT)
150#define HPAGE_MASK (~(HPAGE_SIZE - 1)) 150#define HPAGE_MASK (~(HPAGE_SIZE - 1))
151#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) 151#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
152
153#if defined(CONFIG_64BIT) && defined(CONFIG_PARISC_PAGE_SIZE_4KB)
154# define REAL_HPAGE_SHIFT 20 /* 20 = 1MB */
155# define _HUGE_PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_1M
156#elif !defined(CONFIG_64BIT) && defined(CONFIG_PARISC_PAGE_SIZE_4KB)
157# define REAL_HPAGE_SHIFT 22 /* 22 = 4MB */
158# define _HUGE_PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_4M
159#else
160# define REAL_HPAGE_SHIFT 24 /* 24 = 16MB */
161# define _HUGE_PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_16M
152#endif 162#endif
163#endif /* CONFIG_HUGETLB_PAGE */
153 164
154#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) 165#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
155 166
diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
index 3edbb9fc91b4..f2fd327dce2e 100644
--- a/arch/parisc/include/asm/pgalloc.h
+++ b/arch/parisc/include/asm/pgalloc.h
@@ -35,7 +35,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
35 PxD_FLAG_VALID | 35 PxD_FLAG_VALID |
36 PxD_FLAG_ATTACHED) 36 PxD_FLAG_ATTACHED)
37 + (__u32)(__pa((unsigned long)pgd) >> PxD_VALUE_SHIFT)); 37 + (__u32)(__pa((unsigned long)pgd) >> PxD_VALUE_SHIFT));
38 /* The first pmd entry also is marked with _PAGE_GATEWAY as 38 /* The first pmd entry also is marked with PxD_FLAG_ATTACHED as
39 * a signal that this pmd may not be freed */ 39 * a signal that this pmd may not be freed */
40 __pgd_val_set(*pgd, PxD_FLAG_ATTACHED); 40 __pgd_val_set(*pgd, PxD_FLAG_ATTACHED);
41#endif 41#endif
diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
index f93c4a4e6580..d8534f95915a 100644
--- a/arch/parisc/include/asm/pgtable.h
+++ b/arch/parisc/include/asm/pgtable.h
@@ -83,7 +83,11 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
83 printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, (unsigned long)pgd_val(e)) 83 printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, (unsigned long)pgd_val(e))
84 84
85/* This is the size of the initially mapped kernel memory */ 85/* This is the size of the initially mapped kernel memory */
86#define KERNEL_INITIAL_ORDER 24 /* 0 to 1<<24 = 16MB */ 86#ifdef CONFIG_64BIT
87#define KERNEL_INITIAL_ORDER 25 /* 1<<25 = 32MB */
88#else
89#define KERNEL_INITIAL_ORDER 24 /* 1<<24 = 16MB */
90#endif
87#define KERNEL_INITIAL_SIZE (1 << KERNEL_INITIAL_ORDER) 91#define KERNEL_INITIAL_SIZE (1 << KERNEL_INITIAL_ORDER)
88 92
89#if CONFIG_PGTABLE_LEVELS == 3 93#if CONFIG_PGTABLE_LEVELS == 3
@@ -167,7 +171,7 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
167#define _PAGE_NO_CACHE_BIT 24 /* (0x080) Uncached Page (U bit) */ 171#define _PAGE_NO_CACHE_BIT 24 /* (0x080) Uncached Page (U bit) */
168#define _PAGE_ACCESSED_BIT 23 /* (0x100) Software: Page Accessed */ 172#define _PAGE_ACCESSED_BIT 23 /* (0x100) Software: Page Accessed */
169#define _PAGE_PRESENT_BIT 22 /* (0x200) Software: translation valid */ 173#define _PAGE_PRESENT_BIT 22 /* (0x200) Software: translation valid */
170/* bit 21 was formerly the FLUSH bit but is now unused */ 174#define _PAGE_HPAGE_BIT 21 /* (0x400) Software: Huge Page */
171#define _PAGE_USER_BIT 20 /* (0x800) Software: User accessible page */ 175#define _PAGE_USER_BIT 20 /* (0x800) Software: User accessible page */
172 176
173/* N.B. The bits are defined in terms of a 32 bit word above, so the */ 177/* N.B. The bits are defined in terms of a 32 bit word above, so the */
@@ -194,6 +198,7 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
194#define _PAGE_NO_CACHE (1 << xlate_pabit(_PAGE_NO_CACHE_BIT)) 198#define _PAGE_NO_CACHE (1 << xlate_pabit(_PAGE_NO_CACHE_BIT))
195#define _PAGE_ACCESSED (1 << xlate_pabit(_PAGE_ACCESSED_BIT)) 199#define _PAGE_ACCESSED (1 << xlate_pabit(_PAGE_ACCESSED_BIT))
196#define _PAGE_PRESENT (1 << xlate_pabit(_PAGE_PRESENT_BIT)) 200#define _PAGE_PRESENT (1 << xlate_pabit(_PAGE_PRESENT_BIT))
201#define _PAGE_HUGE (1 << xlate_pabit(_PAGE_HPAGE_BIT))
197#define _PAGE_USER (1 << xlate_pabit(_PAGE_USER_BIT)) 202#define _PAGE_USER (1 << xlate_pabit(_PAGE_USER_BIT))
198 203
199#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | _PAGE_DIRTY | _PAGE_ACCESSED) 204#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | _PAGE_DIRTY | _PAGE_ACCESSED)
@@ -217,7 +222,7 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
217#define PxD_FLAG_VALID (1 << xlate_pabit(_PxD_VALID_BIT)) 222#define PxD_FLAG_VALID (1 << xlate_pabit(_PxD_VALID_BIT))
218#define PxD_FLAG_MASK (0xf) 223#define PxD_FLAG_MASK (0xf)
219#define PxD_FLAG_SHIFT (4) 224#define PxD_FLAG_SHIFT (4)
220#define PxD_VALUE_SHIFT (8) /* (PAGE_SHIFT-PxD_FLAG_SHIFT) */ 225#define PxD_VALUE_SHIFT (PFN_PTE_SHIFT-PxD_FLAG_SHIFT)
221 226
222#ifndef __ASSEMBLY__ 227#ifndef __ASSEMBLY__
223 228
@@ -363,6 +368,18 @@ static inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) |= _PAGE_WRITE; return
363static inline pte_t pte_mkspecial(pte_t pte) { return pte; } 368static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
364 369
365/* 370/*
371 * Huge pte definitions.
372 */
373#ifdef CONFIG_HUGETLB_PAGE
374#define pte_huge(pte) (pte_val(pte) & _PAGE_HUGE)
375#define pte_mkhuge(pte) (__pte(pte_val(pte) | _PAGE_HUGE))
376#else
377#define pte_huge(pte) (0)
378#define pte_mkhuge(pte) (pte)
379#endif
380
381
382/*
366 * Conversion functions: convert a page and protection to a page entry, 383 * Conversion functions: convert a page and protection to a page entry,
367 * and a page entry and page directory to the page they refer to. 384 * and a page entry and page directory to the page they refer to.
368 */ 385 */
@@ -410,8 +427,9 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
410/* Find an entry in the second-level page table.. */ 427/* Find an entry in the second-level page table.. */
411 428
412#if CONFIG_PGTABLE_LEVELS == 3 429#if CONFIG_PGTABLE_LEVELS == 3
430#define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
413#define pmd_offset(dir,address) \ 431#define pmd_offset(dir,address) \
414((pmd_t *) pgd_page_vaddr(*(dir)) + (((address)>>PMD_SHIFT) & (PTRS_PER_PMD-1))) 432((pmd_t *) pgd_page_vaddr(*(dir)) + pmd_index(address))
415#else 433#else
416#define pmd_offset(dir,addr) ((pmd_t *) dir) 434#define pmd_offset(dir,addr) ((pmd_t *) dir)
417#endif 435#endif
diff --git a/arch/parisc/include/asm/processor.h b/arch/parisc/include/asm/processor.h
index 54adb60c0a42..7e759ecb1343 100644
--- a/arch/parisc/include/asm/processor.h
+++ b/arch/parisc/include/asm/processor.h
@@ -192,33 +192,6 @@ void show_trace(struct task_struct *task, unsigned long *stack);
192 */ 192 */
193typedef unsigned int elf_caddr_t; 193typedef unsigned int elf_caddr_t;
194 194
195#define start_thread_som(regs, new_pc, new_sp) do { \
196 unsigned long *sp = (unsigned long *)new_sp; \
197 __u32 spaceid = (__u32)current->mm->context; \
198 unsigned long pc = (unsigned long)new_pc; \
199 /* offset pc for priv. level */ \
200 pc |= 3; \
201 \
202 regs->iasq[0] = spaceid; \
203 regs->iasq[1] = spaceid; \
204 regs->iaoq[0] = pc; \
205 regs->iaoq[1] = pc + 4; \
206 regs->sr[2] = LINUX_GATEWAY_SPACE; \
207 regs->sr[3] = 0xffff; \
208 regs->sr[4] = spaceid; \
209 regs->sr[5] = spaceid; \
210 regs->sr[6] = spaceid; \
211 regs->sr[7] = spaceid; \
212 regs->gr[ 0] = USER_PSW; \
213 regs->gr[30] = ((new_sp)+63)&~63; \
214 regs->gr[31] = pc; \
215 \
216 get_user(regs->gr[26],&sp[0]); \
217 get_user(regs->gr[25],&sp[-1]); \
218 get_user(regs->gr[24],&sp[-2]); \
219 get_user(regs->gr[23],&sp[-3]); \
220} while(0)
221
222/* The ELF abi wants things done a "wee bit" differently than 195/* The ELF abi wants things done a "wee bit" differently than
223 * som does. Supporting this behavior here avoids 196 * som does. Supporting this behavior here avoids
224 * having our own version of create_elf_tables. 197 * having our own version of create_elf_tables.
diff --git a/arch/parisc/include/uapi/asm/mman.h b/arch/parisc/include/uapi/asm/mman.h
index ecc3ae1ca28e..dd4d1876a020 100644
--- a/arch/parisc/include/uapi/asm/mman.h
+++ b/arch/parisc/include/uapi/asm/mman.h
@@ -49,16 +49,6 @@
49#define MADV_DONTFORK 10 /* don't inherit across fork */ 49#define MADV_DONTFORK 10 /* don't inherit across fork */
50#define MADV_DOFORK 11 /* do inherit across fork */ 50#define MADV_DOFORK 11 /* do inherit across fork */
51 51
52/* The range 12-64 is reserved for page size specification. */
53#define MADV_4K_PAGES 12 /* Use 4K pages */
54#define MADV_16K_PAGES 14 /* Use 16K pages */
55#define MADV_64K_PAGES 16 /* Use 64K pages */
56#define MADV_256K_PAGES 18 /* Use 256K pages */
57#define MADV_1M_PAGES 20 /* Use 1 Megabyte pages */
58#define MADV_4M_PAGES 22 /* Use 4 Megabyte pages */
59#define MADV_16M_PAGES 24 /* Use 16 Megabyte pages */
60#define MADV_64M_PAGES 26 /* Use 64 Megabyte pages */
61
62#define MADV_MERGEABLE 65 /* KSM may merge identical pages */ 52#define MADV_MERGEABLE 65 /* KSM may merge identical pages */
63#define MADV_UNMERGEABLE 66 /* KSM may not merge identical pages */ 53#define MADV_UNMERGEABLE 66 /* KSM may not merge identical pages */
64 54
diff --git a/arch/parisc/kernel/asm-offsets.c b/arch/parisc/kernel/asm-offsets.c
index 59001cea13f9..d2f62570a7b1 100644
--- a/arch/parisc/kernel/asm-offsets.c
+++ b/arch/parisc/kernel/asm-offsets.c
@@ -290,6 +290,14 @@ int main(void)
290 DEFINE(ASM_PFN_PTE_SHIFT, PFN_PTE_SHIFT); 290 DEFINE(ASM_PFN_PTE_SHIFT, PFN_PTE_SHIFT);
291 DEFINE(ASM_PT_INITIAL, PT_INITIAL); 291 DEFINE(ASM_PT_INITIAL, PT_INITIAL);
292 BLANK(); 292 BLANK();
293 /* HUGEPAGE_SIZE is only used in vmlinux.lds.S to align kernel text
294 * and kernel data on physical huge pages */
295#ifdef CONFIG_HUGETLB_PAGE
296 DEFINE(HUGEPAGE_SIZE, 1UL << REAL_HPAGE_SHIFT);
297#else
298 DEFINE(HUGEPAGE_SIZE, PAGE_SIZE);
299#endif
300 BLANK();
293 DEFINE(EXCDATA_IP, offsetof(struct exception_data, fault_ip)); 301 DEFINE(EXCDATA_IP, offsetof(struct exception_data, fault_ip));
294 DEFINE(EXCDATA_SPACE, offsetof(struct exception_data, fault_space)); 302 DEFINE(EXCDATA_SPACE, offsetof(struct exception_data, fault_space));
295 DEFINE(EXCDATA_ADDR, offsetof(struct exception_data, fault_addr)); 303 DEFINE(EXCDATA_ADDR, offsetof(struct exception_data, fault_addr));
diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
index c5ef4081b01d..623496c11756 100644
--- a/arch/parisc/kernel/entry.S
+++ b/arch/parisc/kernel/entry.S
@@ -502,21 +502,38 @@
502 STREG \pte,0(\ptp) 502 STREG \pte,0(\ptp)
503 .endm 503 .endm
504 504
505 /* We have (depending on the page size):
506 * - 38 to 52-bit Physical Page Number
507 * - 12 to 26-bit page offset
508 */
505 /* bitshift difference between a PFN (based on kernel's PAGE_SIZE) 509 /* bitshift difference between a PFN (based on kernel's PAGE_SIZE)
506 * to a CPU TLB 4k PFN (4k => 12 bits to shift) */ 510 * to a CPU TLB 4k PFN (4k => 12 bits to shift) */
507 #define PAGE_ADD_SHIFT (PAGE_SHIFT-12) 511 #define PAGE_ADD_SHIFT (PAGE_SHIFT-12)
512 #define PAGE_ADD_HUGE_SHIFT (REAL_HPAGE_SHIFT-12)
508 513
509 /* Drop prot bits and convert to page addr for iitlbt and idtlbt */ 514 /* Drop prot bits and convert to page addr for iitlbt and idtlbt */
510 .macro convert_for_tlb_insert20 pte 515 .macro convert_for_tlb_insert20 pte,tmp
516#ifdef CONFIG_HUGETLB_PAGE
517 copy \pte,\tmp
518 extrd,u \tmp,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\
519 64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte
520
521 depdi _PAGE_SIZE_ENCODING_DEFAULT,63,\
522 (63-58)+PAGE_ADD_SHIFT,\pte
523 extrd,u,*= \tmp,_PAGE_HPAGE_BIT+32,1,%r0
524 depdi _HUGE_PAGE_SIZE_ENCODING_DEFAULT,63,\
525 (63-58)+PAGE_ADD_HUGE_SHIFT,\pte
526#else /* Huge pages disabled */
511 extrd,u \pte,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\ 527 extrd,u \pte,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\
512 64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte 528 64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte
513 depdi _PAGE_SIZE_ENCODING_DEFAULT,63,\ 529 depdi _PAGE_SIZE_ENCODING_DEFAULT,63,\
514 (63-58)+PAGE_ADD_SHIFT,\pte 530 (63-58)+PAGE_ADD_SHIFT,\pte
531#endif
515 .endm 532 .endm
516 533
517 /* Convert the pte and prot to tlb insertion values. How 534 /* Convert the pte and prot to tlb insertion values. How
518 * this happens is quite subtle, read below */ 535 * this happens is quite subtle, read below */
519 .macro make_insert_tlb spc,pte,prot 536 .macro make_insert_tlb spc,pte,prot,tmp
520 space_to_prot \spc \prot /* create prot id from space */ 537 space_to_prot \spc \prot /* create prot id from space */
521 /* The following is the real subtlety. This is depositing 538 /* The following is the real subtlety. This is depositing
522 * T <-> _PAGE_REFTRAP 539 * T <-> _PAGE_REFTRAP
@@ -553,7 +570,7 @@
553 depdi 1,12,1,\prot 570 depdi 1,12,1,\prot
554 571
555 /* Drop prot bits and convert to page addr for iitlbt and idtlbt */ 572 /* Drop prot bits and convert to page addr for iitlbt and idtlbt */
556 convert_for_tlb_insert20 \pte 573 convert_for_tlb_insert20 \pte \tmp
557 .endm 574 .endm
558 575
559 /* Identical macro to make_insert_tlb above, except it 576 /* Identical macro to make_insert_tlb above, except it
@@ -646,17 +663,12 @@
646 663
647 664
648 /* 665 /*
649 * Align fault_vector_20 on 4K boundary so that both 666 * Fault_vectors are architecturally required to be aligned on a 2K
650 * fault_vector_11 and fault_vector_20 are on the 667 * boundary
651 * same page. This is only necessary as long as we
652 * write protect the kernel text, which we may stop
653 * doing once we use large page translations to cover
654 * the static part of the kernel address space.
655 */ 668 */
656 669
657 .text 670 .text
658 671 .align 2048
659 .align 4096
660 672
661ENTRY(fault_vector_20) 673ENTRY(fault_vector_20)
662 /* First vector is invalid (0) */ 674 /* First vector is invalid (0) */
@@ -1147,7 +1159,7 @@ dtlb_miss_20w:
1147 tlb_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20w 1159 tlb_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20w
1148 update_accessed ptp,pte,t0,t1 1160 update_accessed ptp,pte,t0,t1
1149 1161
1150 make_insert_tlb spc,pte,prot 1162 make_insert_tlb spc,pte,prot,t1
1151 1163
1152 idtlbt pte,prot 1164 idtlbt pte,prot
1153 1165
@@ -1173,7 +1185,7 @@ nadtlb_miss_20w:
1173 tlb_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20w 1185 tlb_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20w
1174 update_accessed ptp,pte,t0,t1 1186 update_accessed ptp,pte,t0,t1
1175 1187
1176 make_insert_tlb spc,pte,prot 1188 make_insert_tlb spc,pte,prot,t1
1177 1189
1178 idtlbt pte,prot 1190 idtlbt pte,prot
1179 1191
@@ -1267,7 +1279,7 @@ dtlb_miss_20:
1267 tlb_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20 1279 tlb_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20
1268 update_accessed ptp,pte,t0,t1 1280 update_accessed ptp,pte,t0,t1
1269 1281
1270 make_insert_tlb spc,pte,prot 1282 make_insert_tlb spc,pte,prot,t1
1271 1283
1272 f_extend pte,t1 1284 f_extend pte,t1
1273 1285
@@ -1295,7 +1307,7 @@ nadtlb_miss_20:
1295 tlb_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20 1307 tlb_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20
1296 update_accessed ptp,pte,t0,t1 1308 update_accessed ptp,pte,t0,t1
1297 1309
1298 make_insert_tlb spc,pte,prot 1310 make_insert_tlb spc,pte,prot,t1
1299 1311
1300 f_extend pte,t1 1312 f_extend pte,t1
1301 1313
@@ -1404,7 +1416,7 @@ itlb_miss_20w:
1404 tlb_lock spc,ptp,pte,t0,t1,itlb_fault 1416 tlb_lock spc,ptp,pte,t0,t1,itlb_fault
1405 update_accessed ptp,pte,t0,t1 1417 update_accessed ptp,pte,t0,t1
1406 1418
1407 make_insert_tlb spc,pte,prot 1419 make_insert_tlb spc,pte,prot,t1
1408 1420
1409 iitlbt pte,prot 1421 iitlbt pte,prot
1410 1422
@@ -1428,7 +1440,7 @@ naitlb_miss_20w:
1428 tlb_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20w 1440 tlb_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20w
1429 update_accessed ptp,pte,t0,t1 1441 update_accessed ptp,pte,t0,t1
1430 1442
1431 make_insert_tlb spc,pte,prot 1443 make_insert_tlb spc,pte,prot,t1
1432 1444
1433 iitlbt pte,prot 1445 iitlbt pte,prot
1434 1446
@@ -1514,7 +1526,7 @@ itlb_miss_20:
1514 tlb_lock spc,ptp,pte,t0,t1,itlb_fault 1526 tlb_lock spc,ptp,pte,t0,t1,itlb_fault
1515 update_accessed ptp,pte,t0,t1 1527 update_accessed ptp,pte,t0,t1
1516 1528
1517 make_insert_tlb spc,pte,prot 1529 make_insert_tlb spc,pte,prot,t1
1518 1530
1519 f_extend pte,t1 1531 f_extend pte,t1
1520 1532
@@ -1534,7 +1546,7 @@ naitlb_miss_20:
1534 tlb_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20 1546 tlb_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20
1535 update_accessed ptp,pte,t0,t1 1547 update_accessed ptp,pte,t0,t1
1536 1548
1537 make_insert_tlb spc,pte,prot 1549 make_insert_tlb spc,pte,prot,t1
1538 1550
1539 f_extend pte,t1 1551 f_extend pte,t1
1540 1552
@@ -1566,7 +1578,7 @@ dbit_trap_20w:
1566 tlb_lock spc,ptp,pte,t0,t1,dbit_fault 1578 tlb_lock spc,ptp,pte,t0,t1,dbit_fault
1567 update_dirty ptp,pte,t1 1579 update_dirty ptp,pte,t1
1568 1580
1569 make_insert_tlb spc,pte,prot 1581 make_insert_tlb spc,pte,prot,t1
1570 1582
1571 idtlbt pte,prot 1583 idtlbt pte,prot
1572 1584
@@ -1610,7 +1622,7 @@ dbit_trap_20:
1610 tlb_lock spc,ptp,pte,t0,t1,dbit_fault 1622 tlb_lock spc,ptp,pte,t0,t1,dbit_fault
1611 update_dirty ptp,pte,t1 1623 update_dirty ptp,pte,t1
1612 1624
1613 make_insert_tlb spc,pte,prot 1625 make_insert_tlb spc,pte,prot,t1
1614 1626
1615 f_extend pte,t1 1627 f_extend pte,t1
1616 1628
diff --git a/arch/parisc/kernel/head.S b/arch/parisc/kernel/head.S
index e7d64527aff9..75aa0db9f69e 100644
--- a/arch/parisc/kernel/head.S
+++ b/arch/parisc/kernel/head.S
@@ -69,7 +69,7 @@ $bss_loop:
69 stw,ma %arg2,4(%r1) 69 stw,ma %arg2,4(%r1)
70 stw,ma %arg3,4(%r1) 70 stw,ma %arg3,4(%r1)
71 71
72 /* Initialize startup VM. Just map first 8/16 MB of memory */ 72 /* Initialize startup VM. Just map first 16/32 MB of memory */
73 load32 PA(swapper_pg_dir),%r4 73 load32 PA(swapper_pg_dir),%r4
74 mtctl %r4,%cr24 /* Initialize kernel root pointer */ 74 mtctl %r4,%cr24 /* Initialize kernel root pointer */
75 mtctl %r4,%cr25 /* Initialize user root pointer */ 75 mtctl %r4,%cr25 /* Initialize user root pointer */
@@ -107,7 +107,7 @@ $bss_loop:
107 /* Now initialize the PTEs themselves. We use RWX for 107 /* Now initialize the PTEs themselves. We use RWX for
108 * everything ... it will get remapped correctly later */ 108 * everything ... it will get remapped correctly later */
109 ldo 0+_PAGE_KERNEL_RWX(%r0),%r3 /* Hardwired 0 phys addr start */ 109 ldo 0+_PAGE_KERNEL_RWX(%r0),%r3 /* Hardwired 0 phys addr start */
110 ldi (1<<(KERNEL_INITIAL_ORDER-PAGE_SHIFT)),%r11 /* PFN count */ 110 load32 (1<<(KERNEL_INITIAL_ORDER-PAGE_SHIFT)),%r11 /* PFN count */
111 load32 PA(pg0),%r1 111 load32 PA(pg0),%r1
112 112
113$pgt_fill_loop: 113$pgt_fill_loop:
diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c
index 72a3c658ad7b..f7ea626e29c9 100644
--- a/arch/parisc/kernel/setup.c
+++ b/arch/parisc/kernel/setup.c
@@ -130,7 +130,16 @@ void __init setup_arch(char **cmdline_p)
130 printk(KERN_INFO "The 32-bit Kernel has started...\n"); 130 printk(KERN_INFO "The 32-bit Kernel has started...\n");
131#endif 131#endif
132 132
133 printk(KERN_INFO "Default page size is %dKB.\n", (int)(PAGE_SIZE / 1024)); 133 printk(KERN_INFO "Kernel default page size is %d KB. Huge pages ",
134 (int)(PAGE_SIZE / 1024));
135#ifdef CONFIG_HUGETLB_PAGE
136 printk(KERN_CONT "enabled with %d MB physical and %d MB virtual size",
137 1 << (REAL_HPAGE_SHIFT - 20), 1 << (HPAGE_SHIFT - 20));
138#else
139 printk(KERN_CONT "disabled");
140#endif
141 printk(KERN_CONT ".\n");
142
134 143
135 pdc_console_init(); 144 pdc_console_init();
136 145
@@ -377,6 +386,7 @@ arch_initcall(parisc_init);
377void start_parisc(void) 386void start_parisc(void)
378{ 387{
379 extern void start_kernel(void); 388 extern void start_kernel(void);
389 extern void early_trap_init(void);
380 390
381 int ret, cpunum; 391 int ret, cpunum;
382 struct pdc_coproc_cfg coproc_cfg; 392 struct pdc_coproc_cfg coproc_cfg;
@@ -397,6 +407,8 @@ void start_parisc(void)
397 panic("must have an fpu to boot linux"); 407 panic("must have an fpu to boot linux");
398 } 408 }
399 409
410 early_trap_init(); /* initialize checksum of fault_vector */
411
400 start_kernel(); 412 start_kernel();
401 // not reached 413 // not reached
402} 414}
diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
index 0b8d26d3ba43..3fbd7252a4b2 100644
--- a/arch/parisc/kernel/syscall.S
+++ b/arch/parisc/kernel/syscall.S
@@ -369,7 +369,7 @@ tracesys_exit:
369 ldo -16(%r30),%r29 /* Reference param save area */ 369 ldo -16(%r30),%r29 /* Reference param save area */
370#endif 370#endif
371 ldo TASK_REGS(%r1),%r26 371 ldo TASK_REGS(%r1),%r26
372 bl do_syscall_trace_exit,%r2 372 BL do_syscall_trace_exit,%r2
373 STREG %r28,TASK_PT_GR28(%r1) /* save return value now */ 373 STREG %r28,TASK_PT_GR28(%r1) /* save return value now */
374 ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */ 374 ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */
375 LDREG TI_TASK(%r1), %r1 375 LDREG TI_TASK(%r1), %r1
@@ -390,7 +390,7 @@ tracesys_sigexit:
390#ifdef CONFIG_64BIT 390#ifdef CONFIG_64BIT
391 ldo -16(%r30),%r29 /* Reference param save area */ 391 ldo -16(%r30),%r29 /* Reference param save area */
392#endif 392#endif
393 bl do_syscall_trace_exit,%r2 393 BL do_syscall_trace_exit,%r2
394 ldo TASK_REGS(%r1),%r26 394 ldo TASK_REGS(%r1),%r26
395 395
396 ldil L%syscall_exit_rfi,%r1 396 ldil L%syscall_exit_rfi,%r1
diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
index b99b39f1da02..553b09855cfd 100644
--- a/arch/parisc/kernel/traps.c
+++ b/arch/parisc/kernel/traps.c
@@ -807,7 +807,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
807} 807}
808 808
809 809
810int __init check_ivt(void *iva) 810void __init initialize_ivt(const void *iva)
811{ 811{
812 extern u32 os_hpmc_size; 812 extern u32 os_hpmc_size;
813 extern const u32 os_hpmc[]; 813 extern const u32 os_hpmc[];
@@ -818,8 +818,8 @@ int __init check_ivt(void *iva)
818 u32 *hpmcp; 818 u32 *hpmcp;
819 u32 length; 819 u32 length;
820 820
821 if (strcmp((char *)iva, "cows can fly")) 821 if (strcmp((const char *)iva, "cows can fly"))
822 return -1; 822 panic("IVT invalid");
823 823
824 ivap = (u32 *)iva; 824 ivap = (u32 *)iva;
825 825
@@ -839,28 +839,23 @@ int __init check_ivt(void *iva)
839 check += ivap[i]; 839 check += ivap[i];
840 840
841 ivap[5] = -check; 841 ivap[5] = -check;
842
843 return 0;
844} 842}
845 843
846#ifndef CONFIG_64BIT
847extern const void fault_vector_11;
848#endif
849extern const void fault_vector_20;
850 844
851void __init trap_init(void) 845/* early_trap_init() is called before we set up kernel mappings and
846 * write-protect the kernel */
847void __init early_trap_init(void)
852{ 848{
853 void *iva; 849 extern const void fault_vector_20;
854 850
855 if (boot_cpu_data.cpu_type >= pcxu) 851#ifndef CONFIG_64BIT
856 iva = (void *) &fault_vector_20; 852 extern const void fault_vector_11;
857 else 853 initialize_ivt(&fault_vector_11);
858#ifdef CONFIG_64BIT
859 panic("Can't boot 64-bit OS on PA1.1 processor!");
860#else
861 iva = (void *) &fault_vector_11;
862#endif 854#endif
863 855
864 if (check_ivt(iva)) 856 initialize_ivt(&fault_vector_20);
865 panic("IVT invalid"); 857}
858
859void __init trap_init(void)
860{
866} 861}
diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S
index 0dacc5ca555a..308f29081d46 100644
--- a/arch/parisc/kernel/vmlinux.lds.S
+++ b/arch/parisc/kernel/vmlinux.lds.S
@@ -60,7 +60,7 @@ SECTIONS
60 EXIT_DATA 60 EXIT_DATA
61 } 61 }
62 PERCPU_SECTION(8) 62 PERCPU_SECTION(8)
63 . = ALIGN(PAGE_SIZE); 63 . = ALIGN(HUGEPAGE_SIZE);
64 __init_end = .; 64 __init_end = .;
65 /* freed after init ends here */ 65 /* freed after init ends here */
66 66
@@ -116,7 +116,7 @@ SECTIONS
116 * that we can properly leave these 116 * that we can properly leave these
117 * as writable 117 * as writable
118 */ 118 */
119 . = ALIGN(PAGE_SIZE); 119 . = ALIGN(HUGEPAGE_SIZE);
120 data_start = .; 120 data_start = .;
121 121
122 EXCEPTION_TABLE(8) 122 EXCEPTION_TABLE(8)
@@ -135,8 +135,11 @@ SECTIONS
135 _edata = .; 135 _edata = .;
136 136
137 /* BSS */ 137 /* BSS */
138 BSS_SECTION(PAGE_SIZE, PAGE_SIZE, 8) 138 BSS_SECTION(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE)
139
140 /* bootmap is allocated in setup_bootmem() directly behind bss. */
139 141
142 . = ALIGN(HUGEPAGE_SIZE);
140 _end = . ; 143 _end = . ;
141 144
142 STABS_DEBUG 145 STABS_DEBUG
diff --git a/arch/parisc/mm/Makefile b/arch/parisc/mm/Makefile
index 758ceefb373a..134393de69d2 100644
--- a/arch/parisc/mm/Makefile
+++ b/arch/parisc/mm/Makefile
@@ -3,3 +3,4 @@
3# 3#
4 4
5obj-y := init.o fault.o ioremap.o 5obj-y := init.o fault.o ioremap.o
6obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
diff --git a/arch/parisc/mm/hugetlbpage.c b/arch/parisc/mm/hugetlbpage.c
new file mode 100644
index 000000000000..f6fdc77a72bd
--- /dev/null
+++ b/arch/parisc/mm/hugetlbpage.c
@@ -0,0 +1,161 @@
1/*
2 * PARISC64 Huge TLB page support.
3 *
4 * This parisc implementation is heavily based on the SPARC and x86 code.
5 *
6 * Copyright (C) 2015 Helge Deller <deller@gmx.de>
7 */
8
9#include <linux/fs.h>
10#include <linux/mm.h>
11#include <linux/hugetlb.h>
12#include <linux/pagemap.h>
13#include <linux/sysctl.h>
14
15#include <asm/mman.h>
16#include <asm/pgalloc.h>
17#include <asm/tlb.h>
18#include <asm/tlbflush.h>
19#include <asm/cacheflush.h>
20#include <asm/mmu_context.h>
21
22
23unsigned long
24hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
25 unsigned long len, unsigned long pgoff, unsigned long flags)
26{
27 struct hstate *h = hstate_file(file);
28
29 if (len & ~huge_page_mask(h))
30 return -EINVAL;
31 if (len > TASK_SIZE)
32 return -ENOMEM;
33
34 if (flags & MAP_FIXED)
35 if (prepare_hugepage_range(file, addr, len))
36 return -EINVAL;
37
38 if (addr)
39 addr = ALIGN(addr, huge_page_size(h));
40
41 /* we need to make sure the colouring is OK */
42 return arch_get_unmapped_area(file, addr, len, pgoff, flags);
43}
44
45
46pte_t *huge_pte_alloc(struct mm_struct *mm,
47 unsigned long addr, unsigned long sz)
48{
49 pgd_t *pgd;
50 pud_t *pud;
51 pmd_t *pmd;
52 pte_t *pte = NULL;
53
54 /* We must align the address, because our caller will run
55 * set_huge_pte_at() on whatever we return, which writes out
56 * all of the sub-ptes for the hugepage range. So we have
57 * to give it the first such sub-pte.
58 */
59 addr &= HPAGE_MASK;
60
61 pgd = pgd_offset(mm, addr);
62 pud = pud_alloc(mm, pgd, addr);
63 if (pud) {
64 pmd = pmd_alloc(mm, pud, addr);
65 if (pmd)
66 pte = pte_alloc_map(mm, NULL, pmd, addr);
67 }
68 return pte;
69}
70
71pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
72{
73 pgd_t *pgd;
74 pud_t *pud;
75 pmd_t *pmd;
76 pte_t *pte = NULL;
77
78 addr &= HPAGE_MASK;
79
80 pgd = pgd_offset(mm, addr);
81 if (!pgd_none(*pgd)) {
82 pud = pud_offset(pgd, addr);
83 if (!pud_none(*pud)) {
84 pmd = pmd_offset(pud, addr);
85 if (!pmd_none(*pmd))
86 pte = pte_offset_map(pmd, addr);
87 }
88 }
89 return pte;
90}
91
92/* Purge data and instruction TLB entries. Must be called holding
93 * the pa_tlb_lock. The TLB purge instructions are slow on SMP
94 * machines since the purge must be broadcast to all CPUs.
95 */
96static inline void purge_tlb_entries_huge(struct mm_struct *mm, unsigned long addr)
97{
98 int i;
99
100 /* We may use multiple physical huge pages (e.g. 2x1 MB) to emulate
101 * Linux standard huge pages (e.g. 2 MB) */
102 BUILD_BUG_ON(REAL_HPAGE_SHIFT > HPAGE_SHIFT);
103
104 addr &= HPAGE_MASK;
105 addr |= _HUGE_PAGE_SIZE_ENCODING_DEFAULT;
106
107 for (i = 0; i < (1 << (HPAGE_SHIFT-REAL_HPAGE_SHIFT)); i++) {
108 mtsp(mm->context, 1);
109 pdtlb(addr);
110 if (unlikely(split_tlb))
111 pitlb(addr);
112 addr += (1UL << REAL_HPAGE_SHIFT);
113 }
114}
115
116void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
117 pte_t *ptep, pte_t entry)
118{
119 unsigned long addr_start;
120 int i;
121
122 addr &= HPAGE_MASK;
123 addr_start = addr;
124
125 for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
126 /* Directly write pte entry. We could call set_pte_at(mm, addr, ptep, entry)
127 * instead, but then we get double locking on pa_tlb_lock. */
128 *ptep = entry;
129 ptep++;
130
131 /* Drop the PAGE_SIZE/non-huge tlb entry */
132 purge_tlb_entries(mm, addr);
133
134 addr += PAGE_SIZE;
135 pte_val(entry) += PAGE_SIZE;
136 }
137
138 purge_tlb_entries_huge(mm, addr_start);
139}
140
141
142pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
143 pte_t *ptep)
144{
145 pte_t entry;
146
147 entry = *ptep;
148 set_huge_pte_at(mm, addr, ptep, __pte(0));
149
150 return entry;
151}
152
153int pmd_huge(pmd_t pmd)
154{
155 return 0;
156}
157
158int pud_huge(pud_t pud)
159{
160 return 0;
161}
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
index c5fec4890fdf..1b366c477687 100644
--- a/arch/parisc/mm/init.c
+++ b/arch/parisc/mm/init.c
@@ -409,15 +409,11 @@ static void __init map_pages(unsigned long start_vaddr,
409 unsigned long vaddr; 409 unsigned long vaddr;
410 unsigned long ro_start; 410 unsigned long ro_start;
411 unsigned long ro_end; 411 unsigned long ro_end;
412 unsigned long fv_addr; 412 unsigned long kernel_end;
413 unsigned long gw_addr;
414 extern const unsigned long fault_vector_20;
415 extern void * const linux_gateway_page;
416 413
417 ro_start = __pa((unsigned long)_text); 414 ro_start = __pa((unsigned long)_text);
418 ro_end = __pa((unsigned long)&data_start); 415 ro_end = __pa((unsigned long)&data_start);
419 fv_addr = __pa((unsigned long)&fault_vector_20) & PAGE_MASK; 416 kernel_end = __pa((unsigned long)&_end);
420 gw_addr = __pa((unsigned long)&linux_gateway_page) & PAGE_MASK;
421 417
422 end_paddr = start_paddr + size; 418 end_paddr = start_paddr + size;
423 419
@@ -475,24 +471,25 @@ static void __init map_pages(unsigned long start_vaddr,
475 for (tmp2 = start_pte; tmp2 < PTRS_PER_PTE; tmp2++, pg_table++) { 471 for (tmp2 = start_pte; tmp2 < PTRS_PER_PTE; tmp2++, pg_table++) {
476 pte_t pte; 472 pte_t pte;
477 473
478 /*
479 * Map the fault vector writable so we can
480 * write the HPMC checksum.
481 */
482 if (force) 474 if (force)
483 pte = __mk_pte(address, pgprot); 475 pte = __mk_pte(address, pgprot);
484 else if (parisc_text_address(vaddr) && 476 else if (parisc_text_address(vaddr)) {
485 address != fv_addr)
486 pte = __mk_pte(address, PAGE_KERNEL_EXEC); 477 pte = __mk_pte(address, PAGE_KERNEL_EXEC);
478 if (address >= ro_start && address < kernel_end)
479 pte = pte_mkhuge(pte);
480 }
487 else 481 else
488#if defined(CONFIG_PARISC_PAGE_SIZE_4KB) 482#if defined(CONFIG_PARISC_PAGE_SIZE_4KB)
489 if (address >= ro_start && address < ro_end 483 if (address >= ro_start && address < ro_end) {
490 && address != fv_addr 484 pte = __mk_pte(address, PAGE_KERNEL_EXEC);
491 && address != gw_addr) 485 pte = pte_mkhuge(pte);
492 pte = __mk_pte(address, PAGE_KERNEL_RO); 486 } else
493 else
494#endif 487#endif
488 {
495 pte = __mk_pte(address, pgprot); 489 pte = __mk_pte(address, pgprot);
490 if (address >= ro_start && address < kernel_end)
491 pte = pte_mkhuge(pte);
492 }
496 493
497 if (address >= end_paddr) { 494 if (address >= end_paddr) {
498 if (force) 495 if (force)
@@ -536,15 +533,12 @@ void free_initmem(void)
536 533
537 /* force the kernel to see the new TLB entries */ 534 /* force the kernel to see the new TLB entries */
538 __flush_tlb_range(0, init_begin, init_end); 535 __flush_tlb_range(0, init_begin, init_end);
539 /* Attempt to catch anyone trying to execute code here 536
540 * by filling the page with BRK insns.
541 */
542 memset((void *)init_begin, 0x00, init_end - init_begin);
543 /* finally dump all the instructions which were cached, since the 537 /* finally dump all the instructions which were cached, since the
544 * pages are no-longer executable */ 538 * pages are no-longer executable */
545 flush_icache_range(init_begin, init_end); 539 flush_icache_range(init_begin, init_end);
546 540
547 free_initmem_default(-1); 541 free_initmem_default(POISON_FREE_INITMEM);
548 542
549 /* set up a new led state on systems shipped LED State panel */ 543 /* set up a new led state on systems shipped LED State panel */
550 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_BCOMPLETE); 544 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_BCOMPLETE);
@@ -728,8 +722,8 @@ static void __init pagetable_init(void)
728 unsigned long size; 722 unsigned long size;
729 723
730 start_paddr = pmem_ranges[range].start_pfn << PAGE_SHIFT; 724 start_paddr = pmem_ranges[range].start_pfn << PAGE_SHIFT;
731 end_paddr = start_paddr + (pmem_ranges[range].pages << PAGE_SHIFT);
732 size = pmem_ranges[range].pages << PAGE_SHIFT; 725 size = pmem_ranges[range].pages << PAGE_SHIFT;
726 end_paddr = start_paddr + size;
733 727
734 map_pages((unsigned long)__va(start_paddr), start_paddr, 728 map_pages((unsigned long)__va(start_paddr), start_paddr,
735 size, PAGE_KERNEL, 0); 729 size, PAGE_KERNEL, 0);
diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h
index c9e26cb264f4..f2b0b1b0c72a 100644
--- a/arch/powerpc/include/asm/systbl.h
+++ b/arch/powerpc/include/asm/systbl.h
@@ -382,3 +382,4 @@ COMPAT_SYS(shmat)
382SYSCALL(shmdt) 382SYSCALL(shmdt)
383SYSCALL(shmget) 383SYSCALL(shmget)
384COMPAT_SYS(shmctl) 384COMPAT_SYS(shmctl)
385SYSCALL(mlock2)
diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h
index 6d8f8023ac27..4b6b8ace18e0 100644
--- a/arch/powerpc/include/asm/unistd.h
+++ b/arch/powerpc/include/asm/unistd.h
@@ -12,7 +12,7 @@
12#include <uapi/asm/unistd.h> 12#include <uapi/asm/unistd.h>
13 13
14 14
15#define __NR_syscalls 378 15#define __NR_syscalls 379
16 16
17#define __NR__exit __NR_exit 17#define __NR__exit __NR_exit
18#define NR_syscalls __NR_syscalls 18#define NR_syscalls __NR_syscalls
diff --git a/arch/powerpc/include/uapi/asm/unistd.h b/arch/powerpc/include/uapi/asm/unistd.h
index 81579e93c659..1effea5193d6 100644
--- a/arch/powerpc/include/uapi/asm/unistd.h
+++ b/arch/powerpc/include/uapi/asm/unistd.h
@@ -400,5 +400,6 @@
400#define __NR_shmdt 375 400#define __NR_shmdt 375
401#define __NR_shmget 376 401#define __NR_shmget 376
402#define __NR_shmctl 377 402#define __NR_shmctl 377
403#define __NR_mlock2 378
403 404
404#endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */ 405#endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
diff --git a/arch/s390/include/asm/cio.h b/arch/s390/include/asm/cio.h
index 0c5d8ee657f0..d1e7b0a0feeb 100644
--- a/arch/s390/include/asm/cio.h
+++ b/arch/s390/include/asm/cio.h
@@ -312,6 +312,7 @@ extern void css_schedule_reprobe(void);
312extern void reipl_ccw_dev(struct ccw_dev_id *id); 312extern void reipl_ccw_dev(struct ccw_dev_id *id);
313 313
314struct cio_iplinfo { 314struct cio_iplinfo {
315 u8 ssid;
315 u16 devno; 316 u16 devno;
316 int is_qdio; 317 int is_qdio;
317}; 318};
diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
index 3ad48f22de78..bab6739a1154 100644
--- a/arch/s390/include/asm/elf.h
+++ b/arch/s390/include/asm/elf.h
@@ -206,9 +206,16 @@ do { \
206} while (0) 206} while (0)
207#endif /* CONFIG_COMPAT */ 207#endif /* CONFIG_COMPAT */
208 208
209extern unsigned long mmap_rnd_mask; 209/*
210 210 * Cache aliasing on the latest machines calls for a mapping granularity
211#define STACK_RND_MASK (test_thread_flag(TIF_31BIT) ? 0x7ff : mmap_rnd_mask) 211 * of 512KB. For 64-bit processes use a 512KB alignment and a randomization
212 * of up to 1GB. For 31-bit processes the virtual address space is limited,
213 * use no alignment and limit the randomization to 8MB.
214 */
215#define BRK_RND_MASK (is_32bit_task() ? 0x7ffUL : 0x3ffffUL)
216#define MMAP_RND_MASK (is_32bit_task() ? 0x7ffUL : 0x3ff80UL)
217#define MMAP_ALIGN_MASK (is_32bit_task() ? 0 : 0x7fUL)
218#define STACK_RND_MASK MMAP_RND_MASK
212 219
213#define ARCH_DLINFO \ 220#define ARCH_DLINFO \
214do { \ 221do { \
diff --git a/arch/s390/include/asm/ipl.h b/arch/s390/include/asm/ipl.h
index 39ae6a359747..86634e71b69f 100644
--- a/arch/s390/include/asm/ipl.h
+++ b/arch/s390/include/asm/ipl.h
@@ -64,7 +64,8 @@ struct ipl_block_fcp {
64 64
65struct ipl_block_ccw { 65struct ipl_block_ccw {
66 u8 reserved1[84]; 66 u8 reserved1[84];
67 u8 reserved2[2]; 67 u16 reserved2 : 13;
68 u8 ssid : 3;
68 u16 devno; 69 u16 devno;
69 u8 vm_flags; 70 u8 vm_flags;
70 u8 reserved3[3]; 71 u8 reserved3[3];
diff --git a/arch/s390/include/asm/pci_dma.h b/arch/s390/include/asm/pci_dma.h
index 7a7abf1a5537..1aac41e83ea1 100644
--- a/arch/s390/include/asm/pci_dma.h
+++ b/arch/s390/include/asm/pci_dma.h
@@ -195,5 +195,7 @@ void zpci_dma_exit_device(struct zpci_dev *);
195void dma_free_seg_table(unsigned long); 195void dma_free_seg_table(unsigned long);
196unsigned long *dma_alloc_cpu_table(void); 196unsigned long *dma_alloc_cpu_table(void);
197void dma_cleanup_tables(unsigned long *); 197void dma_cleanup_tables(unsigned long *);
198void dma_update_cpu_trans(unsigned long *, void *, dma_addr_t, int); 198unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr);
199void dma_update_cpu_trans(unsigned long *entry, void *page_addr, int flags);
200
199#endif 201#endif
diff --git a/arch/s390/include/asm/trace/diag.h b/arch/s390/include/asm/trace/diag.h
index 776f307960cc..cc6cfe7889da 100644
--- a/arch/s390/include/asm/trace/diag.h
+++ b/arch/s390/include/asm/trace/diag.h
@@ -19,7 +19,7 @@
19#define TRACE_INCLUDE_PATH asm/trace 19#define TRACE_INCLUDE_PATH asm/trace
20#define TRACE_INCLUDE_FILE diag 20#define TRACE_INCLUDE_FILE diag
21 21
22TRACE_EVENT(diagnose, 22TRACE_EVENT(s390_diagnose,
23 TP_PROTO(unsigned short nr), 23 TP_PROTO(unsigned short nr),
24 TP_ARGS(nr), 24 TP_ARGS(nr),
25 TP_STRUCT__entry( 25 TP_STRUCT__entry(
@@ -32,9 +32,9 @@ TRACE_EVENT(diagnose,
32); 32);
33 33
34#ifdef CONFIG_TRACEPOINTS 34#ifdef CONFIG_TRACEPOINTS
35void trace_diagnose_norecursion(int diag_nr); 35void trace_s390_diagnose_norecursion(int diag_nr);
36#else 36#else
37static inline void trace_diagnose_norecursion(int diag_nr) { } 37static inline void trace_s390_diagnose_norecursion(int diag_nr) { }
38#endif 38#endif
39 39
40#endif /* _TRACE_S390_DIAG_H */ 40#endif /* _TRACE_S390_DIAG_H */
diff --git a/arch/s390/include/uapi/asm/unistd.h b/arch/s390/include/uapi/asm/unistd.h
index a848adba1504..34ec202472c6 100644
--- a/arch/s390/include/uapi/asm/unistd.h
+++ b/arch/s390/include/uapi/asm/unistd.h
@@ -192,14 +192,14 @@
192#define __NR_set_tid_address 252 192#define __NR_set_tid_address 252
193#define __NR_fadvise64 253 193#define __NR_fadvise64 253
194#define __NR_timer_create 254 194#define __NR_timer_create 254
195#define __NR_timer_settime (__NR_timer_create+1) 195#define __NR_timer_settime 255
196#define __NR_timer_gettime (__NR_timer_create+2) 196#define __NR_timer_gettime 256
197#define __NR_timer_getoverrun (__NR_timer_create+3) 197#define __NR_timer_getoverrun 257
198#define __NR_timer_delete (__NR_timer_create+4) 198#define __NR_timer_delete 258
199#define __NR_clock_settime (__NR_timer_create+5) 199#define __NR_clock_settime 259
200#define __NR_clock_gettime (__NR_timer_create+6) 200#define __NR_clock_gettime 260
201#define __NR_clock_getres (__NR_timer_create+7) 201#define __NR_clock_getres 261
202#define __NR_clock_nanosleep (__NR_timer_create+8) 202#define __NR_clock_nanosleep 262
203/* Number 263 is reserved for vserver */ 203/* Number 263 is reserved for vserver */
204#define __NR_statfs64 265 204#define __NR_statfs64 265
205#define __NR_fstatfs64 266 205#define __NR_fstatfs64 266
@@ -309,7 +309,8 @@
309#define __NR_recvfrom 371 309#define __NR_recvfrom 371
310#define __NR_recvmsg 372 310#define __NR_recvmsg 372
311#define __NR_shutdown 373 311#define __NR_shutdown 373
312#define NR_syscalls 374 312#define __NR_mlock2 374
313#define NR_syscalls 375
313 314
314/* 315/*
315 * There are some system calls that are not present on 64 bit, some 316 * There are some system calls that are not present on 64 bit, some
diff --git a/arch/s390/kernel/compat_wrapper.c b/arch/s390/kernel/compat_wrapper.c
index 09f194052df3..fac4eeddef91 100644
--- a/arch/s390/kernel/compat_wrapper.c
+++ b/arch/s390/kernel/compat_wrapper.c
@@ -176,3 +176,4 @@ COMPAT_SYSCALL_WRAP4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
176COMPAT_SYSCALL_WRAP3(getsockname, int, fd, struct sockaddr __user *, usockaddr, int __user *, usockaddr_len); 176COMPAT_SYSCALL_WRAP3(getsockname, int, fd, struct sockaddr __user *, usockaddr, int __user *, usockaddr_len);
177COMPAT_SYSCALL_WRAP3(getpeername, int, fd, struct sockaddr __user *, usockaddr, int __user *, usockaddr_len); 177COMPAT_SYSCALL_WRAP3(getpeername, int, fd, struct sockaddr __user *, usockaddr, int __user *, usockaddr_len);
178COMPAT_SYSCALL_WRAP6(sendto, int, fd, void __user *, buff, size_t, len, unsigned int, flags, struct sockaddr __user *, addr, int, addr_len); 178COMPAT_SYSCALL_WRAP6(sendto, int, fd, void __user *, buff, size_t, len, unsigned int, flags, struct sockaddr __user *, addr, int, addr_len);
179COMPAT_SYSCALL_WRAP3(mlock2, unsigned long, start, size_t, len, int, flags);
diff --git a/arch/s390/kernel/diag.c b/arch/s390/kernel/diag.c
index f98766ede4e1..48b37b8357e6 100644
--- a/arch/s390/kernel/diag.c
+++ b/arch/s390/kernel/diag.c
@@ -121,14 +121,14 @@ device_initcall(show_diag_stat_init);
121void diag_stat_inc(enum diag_stat_enum nr) 121void diag_stat_inc(enum diag_stat_enum nr)
122{ 122{
123 this_cpu_inc(diag_stat.counter[nr]); 123 this_cpu_inc(diag_stat.counter[nr]);
124 trace_diagnose(diag_map[nr].code); 124 trace_s390_diagnose(diag_map[nr].code);
125} 125}
126EXPORT_SYMBOL(diag_stat_inc); 126EXPORT_SYMBOL(diag_stat_inc);
127 127
128void diag_stat_inc_norecursion(enum diag_stat_enum nr) 128void diag_stat_inc_norecursion(enum diag_stat_enum nr)
129{ 129{
130 this_cpu_inc(diag_stat.counter[nr]); 130 this_cpu_inc(diag_stat.counter[nr]);
131 trace_diagnose_norecursion(diag_map[nr].code); 131 trace_s390_diagnose_norecursion(diag_map[nr].code);
132} 132}
133EXPORT_SYMBOL(diag_stat_inc_norecursion); 133EXPORT_SYMBOL(diag_stat_inc_norecursion);
134 134
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S
index 1255c6c5353e..301ee9c70688 100644
--- a/arch/s390/kernel/head.S
+++ b/arch/s390/kernel/head.S
@@ -26,6 +26,7 @@
26#include <asm/asm-offsets.h> 26#include <asm/asm-offsets.h>
27#include <asm/thread_info.h> 27#include <asm/thread_info.h>
28#include <asm/page.h> 28#include <asm/page.h>
29#include <asm/ptrace.h>
29 30
30#define ARCH_OFFSET 4 31#define ARCH_OFFSET 4
31 32
@@ -59,19 +60,6 @@ __HEAD
59 .long 0x020006e0,0x20000050 60 .long 0x020006e0,0x20000050
60 61
61 .org 0x200 62 .org 0x200
62#
63# subroutine to set architecture mode
64#
65.Lsetmode:
66 mvi __LC_AR_MODE_ID,1 # set esame flag
67 slr %r0,%r0 # set cpuid to zero
68 lhi %r1,2 # mode 2 = esame (dump)
69 sigp %r1,%r0,0x12 # switch to esame mode
70 bras %r13,0f
71 .fill 16,4,0x0
720: lmh %r0,%r15,0(%r13) # clear high-order half of gprs
73 sam31 # switch to 31 bit addressing mode
74 br %r14
75 63
76# 64#
77# subroutine to wait for end I/O 65# subroutine to wait for end I/O
@@ -159,7 +147,14 @@ __HEAD
159 .long 0x02200050,0x00000000 147 .long 0x02200050,0x00000000
160 148
161iplstart: 149iplstart:
162 bas %r14,.Lsetmode # Immediately switch to 64 bit mode 150 mvi __LC_AR_MODE_ID,1 # set esame flag
151 slr %r0,%r0 # set cpuid to zero
152 lhi %r1,2 # mode 2 = esame (dump)
153 sigp %r1,%r0,0x12 # switch to esame mode
154 bras %r13,0f
155 .fill 16,4,0x0
1560: lmh %r0,%r15,0(%r13) # clear high-order half of gprs
157 sam31 # switch to 31 bit addressing mode
163 lh %r1,0xb8 # test if subchannel number 158 lh %r1,0xb8 # test if subchannel number
164 bct %r1,.Lnoload # is valid 159 bct %r1,.Lnoload # is valid
165 l %r1,0xb8 # load ipl subchannel number 160 l %r1,0xb8 # load ipl subchannel number
@@ -269,71 +264,6 @@ iplstart:
269.Lcpuid:.fill 8,1,0 264.Lcpuid:.fill 8,1,0
270 265
271# 266#
272# SALIPL loader support. Based on a patch by Rob van der Heij.
273# This entry point is called directly from the SALIPL loader and
274# doesn't need a builtin ipl record.
275#
276 .org 0x800
277ENTRY(start)
278 stm %r0,%r15,0x07b0 # store registers
279 bas %r14,.Lsetmode # Immediately switch to 64 bit mode
280 basr %r12,%r0
281.base:
282 l %r11,.parm
283 l %r8,.cmd # pointer to command buffer
284
285 ltr %r9,%r9 # do we have SALIPL parameters?
286 bp .sk8x8
287
288 mvc 0(64,%r8),0x00b0 # copy saved registers
289 xc 64(240-64,%r8),0(%r8) # remainder of buffer
290 tr 0(64,%r8),.lowcase
291 b .gotr
292.sk8x8:
293 mvc 0(240,%r8),0(%r9) # copy iplparms into buffer
294.gotr:
295 slr %r0,%r0
296 st %r0,INITRD_SIZE+ARCH_OFFSET-PARMAREA(%r11)
297 st %r0,INITRD_START+ARCH_OFFSET-PARMAREA(%r11)
298 j startup # continue with startup
299.cmd: .long COMMAND_LINE # address of command line buffer
300.parm: .long PARMAREA
301.lowcase:
302 .byte 0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07
303 .byte 0x08,0x09,0x0a,0x0b,0x0c,0x0d,0x0e,0x0f
304 .byte 0x10,0x11,0x12,0x13,0x14,0x15,0x16,0x17
305 .byte 0x18,0x19,0x1a,0x1b,0x1c,0x1d,0x1e,0x1f
306 .byte 0x20,0x21,0x22,0x23,0x24,0x25,0x26,0x27
307 .byte 0x28,0x29,0x2a,0x2b,0x2c,0x2d,0x2e,0x2f
308 .byte 0x30,0x31,0x32,0x33,0x34,0x35,0x36,0x37
309 .byte 0x38,0x39,0x3a,0x3b,0x3c,0x3d,0x3e,0x3f
310 .byte 0x40,0x41,0x42,0x43,0x44,0x45,0x46,0x47
311 .byte 0x48,0x49,0x4a,0x4b,0x4c,0x4d,0x4e,0x4f
312 .byte 0x50,0x51,0x52,0x53,0x54,0x55,0x56,0x57
313 .byte 0x58,0x59,0x5a,0x5b,0x5c,0x5d,0x5e,0x5f
314 .byte 0x60,0x61,0x62,0x63,0x64,0x65,0x66,0x67
315 .byte 0x68,0x69,0x6a,0x6b,0x6c,0x6d,0x6e,0x6f
316 .byte 0x70,0x71,0x72,0x73,0x74,0x75,0x76,0x77
317 .byte 0x78,0x79,0x7a,0x7b,0x7c,0x7d,0x7e,0x7f
318
319 .byte 0x80,0x81,0x82,0x83,0x84,0x85,0x86,0x87
320 .byte 0x88,0x89,0x8a,0x8b,0x8c,0x8d,0x8e,0x8f
321 .byte 0x90,0x91,0x92,0x93,0x94,0x95,0x96,0x97
322 .byte 0x98,0x99,0x9a,0x9b,0x9c,0x9d,0x9e,0x9f
323 .byte 0xa0,0xa1,0xa2,0xa3,0xa4,0xa5,0xa6,0xa7
324 .byte 0xa8,0xa9,0xaa,0xab,0xac,0xad,0xae,0xaf
325 .byte 0xb0,0xb1,0xb2,0xb3,0xb4,0xb5,0xb6,0xb7
326 .byte 0xb8,0xb9,0xba,0xbb,0xbc,0xbd,0xbe,0xbf
327 .byte 0xc0,0x81,0x82,0x83,0x84,0x85,0x86,0x87 # .abcdefg
328 .byte 0x88,0x89,0xca,0xcb,0xcc,0xcd,0xce,0xcf # hi
329 .byte 0xd0,0x91,0x92,0x93,0x94,0x95,0x96,0x97 # .jklmnop
330 .byte 0x98,0x99,0xda,0xdb,0xdc,0xdd,0xde,0xdf # qr
331 .byte 0xe0,0xe1,0xa2,0xa3,0xa4,0xa5,0xa6,0xa7 # ..stuvwx
332 .byte 0xa8,0xa9,0xea,0xeb,0xec,0xed,0xee,0xef # yz
333 .byte 0xf0,0xf1,0xf2,0xf3,0xf4,0xf5,0xf6,0xf7
334 .byte 0xf8,0xf9,0xfa,0xfb,0xfc,0xfd,0xfe,0xff
335
336#
337# startup-code at 0x10000, running in absolute addressing mode 267# startup-code at 0x10000, running in absolute addressing mode
338# this is called either by the ipl loader or directly by PSW restart 268# this is called either by the ipl loader or directly by PSW restart
339# or linload or SALIPL 269# or linload or SALIPL
@@ -364,7 +294,7 @@ ENTRY(startup_kdump)
364 bras %r13,0f 294 bras %r13,0f
365 .fill 16,4,0x0 295 .fill 16,4,0x0
3660: lmh %r0,%r15,0(%r13) # clear high-order half of gprs 2960: lmh %r0,%r15,0(%r13) # clear high-order half of gprs
367 sam31 # switch to 31 bit addressing mode 297 sam64 # switch to 64 bit addressing mode
368 basr %r13,0 # get base 298 basr %r13,0 # get base
369.LPG0: 299.LPG0:
370 xc 0x200(256),0x200 # partially clear lowcore 300 xc 0x200(256),0x200 # partially clear lowcore
@@ -395,7 +325,7 @@ ENTRY(startup_kdump)
395 jnz 1b 325 jnz 1b
396 j 4f 326 j 4f
3972: l %r15,.Lstack-.LPG0(%r13) 3272: l %r15,.Lstack-.LPG0(%r13)
398 ahi %r15,-96 328 ahi %r15,-STACK_FRAME_OVERHEAD
399 la %r2,.Lals_string-.LPG0(%r13) 329 la %r2,.Lals_string-.LPG0(%r13)
400 l %r3,.Lsclp_print-.LPG0(%r13) 330 l %r3,.Lsclp_print-.LPG0(%r13)
401 basr %r14,%r3 331 basr %r14,%r3
@@ -429,8 +359,7 @@ ENTRY(startup_kdump)
429 .long 1, 0xc0000000 359 .long 1, 0xc0000000
430#endif 360#endif
4314: 3614:
432 /* Continue with 64bit startup code in head64.S */ 362 /* Continue with startup code in head64.S */
433 sam64 # switch to 64 bit mode
434 jg startup_continue 363 jg startup_continue
435 364
436 .align 8 365 .align 8
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index f6d8acd7e136..b1f0a90f933b 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -121,6 +121,7 @@ static char *dump_type_str(enum dump_type type)
121 * Must be in data section since the bss section 121 * Must be in data section since the bss section
122 * is not cleared when these are accessed. 122 * is not cleared when these are accessed.
123 */ 123 */
124static u8 ipl_ssid __attribute__((__section__(".data"))) = 0;
124static u16 ipl_devno __attribute__((__section__(".data"))) = 0; 125static u16 ipl_devno __attribute__((__section__(".data"))) = 0;
125u32 ipl_flags __attribute__((__section__(".data"))) = 0; 126u32 ipl_flags __attribute__((__section__(".data"))) = 0;
126 127
@@ -197,6 +198,33 @@ static ssize_t sys_##_prefix##_##_name##_show(struct kobject *kobj, \
197 return snprintf(page, PAGE_SIZE, _format, ##args); \ 198 return snprintf(page, PAGE_SIZE, _format, ##args); \
198} 199}
199 200
201#define IPL_ATTR_CCW_STORE_FN(_prefix, _name, _ipl_blk) \
202static ssize_t sys_##_prefix##_##_name##_store(struct kobject *kobj, \
203 struct kobj_attribute *attr, \
204 const char *buf, size_t len) \
205{ \
206 unsigned long long ssid, devno; \
207 \
208 if (sscanf(buf, "0.%llx.%llx\n", &ssid, &devno) != 2) \
209 return -EINVAL; \
210 \
211 if (ssid > __MAX_SSID || devno > __MAX_SUBCHANNEL) \
212 return -EINVAL; \
213 \
214 _ipl_blk.ssid = ssid; \
215 _ipl_blk.devno = devno; \
216 return len; \
217}
218
219#define DEFINE_IPL_CCW_ATTR_RW(_prefix, _name, _ipl_blk) \
220IPL_ATTR_SHOW_FN(_prefix, _name, "0.%x.%04x\n", \
221 _ipl_blk.ssid, _ipl_blk.devno); \
222IPL_ATTR_CCW_STORE_FN(_prefix, _name, _ipl_blk); \
223static struct kobj_attribute sys_##_prefix##_##_name##_attr = \
224 __ATTR(_name, (S_IRUGO | S_IWUSR), \
225 sys_##_prefix##_##_name##_show, \
226 sys_##_prefix##_##_name##_store) \
227
200#define DEFINE_IPL_ATTR_RO(_prefix, _name, _format, _value) \ 228#define DEFINE_IPL_ATTR_RO(_prefix, _name, _format, _value) \
201IPL_ATTR_SHOW_FN(_prefix, _name, _format, _value) \ 229IPL_ATTR_SHOW_FN(_prefix, _name, _format, _value) \
202static struct kobj_attribute sys_##_prefix##_##_name##_attr = \ 230static struct kobj_attribute sys_##_prefix##_##_name##_attr = \
@@ -395,7 +423,7 @@ static ssize_t sys_ipl_device_show(struct kobject *kobj,
395 423
396 switch (ipl_info.type) { 424 switch (ipl_info.type) {
397 case IPL_TYPE_CCW: 425 case IPL_TYPE_CCW:
398 return sprintf(page, "0.0.%04x\n", ipl_devno); 426 return sprintf(page, "0.%x.%04x\n", ipl_ssid, ipl_devno);
399 case IPL_TYPE_FCP: 427 case IPL_TYPE_FCP:
400 case IPL_TYPE_FCP_DUMP: 428 case IPL_TYPE_FCP_DUMP:
401 return sprintf(page, "0.0.%04x\n", ipl->ipl_info.fcp.devno); 429 return sprintf(page, "0.0.%04x\n", ipl->ipl_info.fcp.devno);
@@ -687,21 +715,14 @@ static ssize_t reipl_fcp_scpdata_write(struct file *filp, struct kobject *kobj,
687 struct bin_attribute *attr, 715 struct bin_attribute *attr,
688 char *buf, loff_t off, size_t count) 716 char *buf, loff_t off, size_t count)
689{ 717{
718 size_t scpdata_len = count;
690 size_t padding; 719 size_t padding;
691 size_t scpdata_len;
692
693 if (off < 0)
694 return -EINVAL;
695 720
696 if (off >= DIAG308_SCPDATA_SIZE)
697 return -ENOSPC;
698 721
699 if (count > DIAG308_SCPDATA_SIZE - off) 722 if (off)
700 count = DIAG308_SCPDATA_SIZE - off; 723 return -EINVAL;
701
702 memcpy(reipl_block_fcp->ipl_info.fcp.scp_data, buf + off, count);
703 scpdata_len = off + count;
704 724
725 memcpy(reipl_block_fcp->ipl_info.fcp.scp_data, buf, count);
705 if (scpdata_len % 8) { 726 if (scpdata_len % 8) {
706 padding = 8 - (scpdata_len % 8); 727 padding = 8 - (scpdata_len % 8);
707 memset(reipl_block_fcp->ipl_info.fcp.scp_data + scpdata_len, 728 memset(reipl_block_fcp->ipl_info.fcp.scp_data + scpdata_len,
@@ -717,7 +738,7 @@ static ssize_t reipl_fcp_scpdata_write(struct file *filp, struct kobject *kobj,
717} 738}
718static struct bin_attribute sys_reipl_fcp_scp_data_attr = 739static struct bin_attribute sys_reipl_fcp_scp_data_attr =
719 __BIN_ATTR(scp_data, (S_IRUGO | S_IWUSR), reipl_fcp_scpdata_read, 740 __BIN_ATTR(scp_data, (S_IRUGO | S_IWUSR), reipl_fcp_scpdata_read,
720 reipl_fcp_scpdata_write, PAGE_SIZE); 741 reipl_fcp_scpdata_write, DIAG308_SCPDATA_SIZE);
721 742
722static struct bin_attribute *reipl_fcp_bin_attrs[] = { 743static struct bin_attribute *reipl_fcp_bin_attrs[] = {
723 &sys_reipl_fcp_scp_data_attr, 744 &sys_reipl_fcp_scp_data_attr,
@@ -814,9 +835,7 @@ static struct attribute_group reipl_fcp_attr_group = {
814}; 835};
815 836
816/* CCW reipl device attributes */ 837/* CCW reipl device attributes */
817 838DEFINE_IPL_CCW_ATTR_RW(reipl_ccw, device, reipl_block_ccw->ipl_info.ccw);
818DEFINE_IPL_ATTR_RW(reipl_ccw, device, "0.0.%04llx\n", "0.0.%llx\n",
819 reipl_block_ccw->ipl_info.ccw.devno);
820 839
821/* NSS wrapper */ 840/* NSS wrapper */
822static ssize_t reipl_nss_loadparm_show(struct kobject *kobj, 841static ssize_t reipl_nss_loadparm_show(struct kobject *kobj,
@@ -1056,8 +1075,8 @@ static void __reipl_run(void *unused)
1056 1075
1057 switch (reipl_method) { 1076 switch (reipl_method) {
1058 case REIPL_METHOD_CCW_CIO: 1077 case REIPL_METHOD_CCW_CIO:
1078 devid.ssid = reipl_block_ccw->ipl_info.ccw.ssid;
1059 devid.devno = reipl_block_ccw->ipl_info.ccw.devno; 1079 devid.devno = reipl_block_ccw->ipl_info.ccw.devno;
1060 devid.ssid = 0;
1061 reipl_ccw_dev(&devid); 1080 reipl_ccw_dev(&devid);
1062 break; 1081 break;
1063 case REIPL_METHOD_CCW_VM: 1082 case REIPL_METHOD_CCW_VM:
@@ -1192,6 +1211,7 @@ static int __init reipl_ccw_init(void)
1192 1211
1193 reipl_block_ccw_init(reipl_block_ccw); 1212 reipl_block_ccw_init(reipl_block_ccw);
1194 if (ipl_info.type == IPL_TYPE_CCW) { 1213 if (ipl_info.type == IPL_TYPE_CCW) {
1214 reipl_block_ccw->ipl_info.ccw.ssid = ipl_ssid;
1195 reipl_block_ccw->ipl_info.ccw.devno = ipl_devno; 1215 reipl_block_ccw->ipl_info.ccw.devno = ipl_devno;
1196 reipl_block_ccw_fill_parms(reipl_block_ccw); 1216 reipl_block_ccw_fill_parms(reipl_block_ccw);
1197 } 1217 }
@@ -1336,9 +1356,7 @@ static struct attribute_group dump_fcp_attr_group = {
1336}; 1356};
1337 1357
1338/* CCW dump device attributes */ 1358/* CCW dump device attributes */
1339 1359DEFINE_IPL_CCW_ATTR_RW(dump_ccw, device, dump_block_ccw->ipl_info.ccw);
1340DEFINE_IPL_ATTR_RW(dump_ccw, device, "0.0.%04llx\n", "0.0.%llx\n",
1341 dump_block_ccw->ipl_info.ccw.devno);
1342 1360
1343static struct attribute *dump_ccw_attrs[] = { 1361static struct attribute *dump_ccw_attrs[] = {
1344 &sys_dump_ccw_device_attr.attr, 1362 &sys_dump_ccw_device_attr.attr,
@@ -1418,8 +1436,8 @@ static void __dump_run(void *unused)
1418 1436
1419 switch (dump_method) { 1437 switch (dump_method) {
1420 case DUMP_METHOD_CCW_CIO: 1438 case DUMP_METHOD_CCW_CIO:
1439 devid.ssid = dump_block_ccw->ipl_info.ccw.ssid;
1421 devid.devno = dump_block_ccw->ipl_info.ccw.devno; 1440 devid.devno = dump_block_ccw->ipl_info.ccw.devno;
1422 devid.ssid = 0;
1423 reipl_ccw_dev(&devid); 1441 reipl_ccw_dev(&devid);
1424 break; 1442 break;
1425 case DUMP_METHOD_CCW_VM: 1443 case DUMP_METHOD_CCW_VM:
@@ -1939,14 +1957,14 @@ void __init setup_ipl(void)
1939 ipl_info.type = get_ipl_type(); 1957 ipl_info.type = get_ipl_type();
1940 switch (ipl_info.type) { 1958 switch (ipl_info.type) {
1941 case IPL_TYPE_CCW: 1959 case IPL_TYPE_CCW:
1960 ipl_info.data.ccw.dev_id.ssid = ipl_ssid;
1942 ipl_info.data.ccw.dev_id.devno = ipl_devno; 1961 ipl_info.data.ccw.dev_id.devno = ipl_devno;
1943 ipl_info.data.ccw.dev_id.ssid = 0;
1944 break; 1962 break;
1945 case IPL_TYPE_FCP: 1963 case IPL_TYPE_FCP:
1946 case IPL_TYPE_FCP_DUMP: 1964 case IPL_TYPE_FCP_DUMP:
1965 ipl_info.data.fcp.dev_id.ssid = 0;
1947 ipl_info.data.fcp.dev_id.devno = 1966 ipl_info.data.fcp.dev_id.devno =
1948 IPL_PARMBLOCK_START->ipl_info.fcp.devno; 1967 IPL_PARMBLOCK_START->ipl_info.fcp.devno;
1949 ipl_info.data.fcp.dev_id.ssid = 0;
1950 ipl_info.data.fcp.wwpn = IPL_PARMBLOCK_START->ipl_info.fcp.wwpn; 1968 ipl_info.data.fcp.wwpn = IPL_PARMBLOCK_START->ipl_info.fcp.wwpn;
1951 ipl_info.data.fcp.lun = IPL_PARMBLOCK_START->ipl_info.fcp.lun; 1969 ipl_info.data.fcp.lun = IPL_PARMBLOCK_START->ipl_info.fcp.lun;
1952 break; 1970 break;
@@ -1978,6 +1996,7 @@ void __init ipl_save_parameters(void)
1978 if (cio_get_iplinfo(&iplinfo)) 1996 if (cio_get_iplinfo(&iplinfo))
1979 return; 1997 return;
1980 1998
1999 ipl_ssid = iplinfo.ssid;
1981 ipl_devno = iplinfo.devno; 2000 ipl_devno = iplinfo.devno;
1982 ipl_flags |= IPL_DEVNO_VALID; 2001 ipl_flags |= IPL_DEVNO_VALID;
1983 if (!iplinfo.is_qdio) 2002 if (!iplinfo.is_qdio)
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index 688a3aad9c79..114ee8b96f17 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -243,11 +243,7 @@ unsigned long arch_align_stack(unsigned long sp)
243 243
244static inline unsigned long brk_rnd(void) 244static inline unsigned long brk_rnd(void)
245{ 245{
246 /* 8MB for 32bit, 1GB for 64bit */ 246 return (get_random_int() & BRK_RND_MASK) << PAGE_SHIFT;
247 if (is_32bit_task())
248 return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
249 else
250 return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
251} 247}
252 248
253unsigned long arch_randomize_brk(struct mm_struct *mm) 249unsigned long arch_randomize_brk(struct mm_struct *mm)
diff --git a/arch/s390/kernel/sclp.c b/arch/s390/kernel/sclp.c
index fa0bdff1d413..9fe7781a45cd 100644
--- a/arch/s390/kernel/sclp.c
+++ b/arch/s390/kernel/sclp.c
@@ -21,7 +21,7 @@ static void _sclp_wait_int(void)
21 __ctl_load(cr0_new, 0, 0); 21 __ctl_load(cr0_new, 0, 0);
22 22
23 psw_ext_save = S390_lowcore.external_new_psw; 23 psw_ext_save = S390_lowcore.external_new_psw;
24 psw_mask = __extract_psw() & (PSW_MASK_EA | PSW_MASK_BA); 24 psw_mask = __extract_psw();
25 S390_lowcore.external_new_psw.mask = psw_mask; 25 S390_lowcore.external_new_psw.mask = psw_mask;
26 psw_wait.mask = psw_mask | PSW_MASK_EXT | PSW_MASK_WAIT; 26 psw_wait.mask = psw_mask | PSW_MASK_EXT | PSW_MASK_WAIT;
27 S390_lowcore.ext_int_code = 0; 27 S390_lowcore.ext_int_code = 0;
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index ce0cbd6ba7ca..c837bcacf218 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -764,9 +764,6 @@ static int __init setup_hwcaps(void)
764 get_cpu_id(&cpu_id); 764 get_cpu_id(&cpu_id);
765 add_device_randomness(&cpu_id, sizeof(cpu_id)); 765 add_device_randomness(&cpu_id, sizeof(cpu_id));
766 switch (cpu_id.machine) { 766 switch (cpu_id.machine) {
767 case 0x9672:
768 strcpy(elf_platform, "g5");
769 break;
770 case 0x2064: 767 case 0x2064:
771 case 0x2066: 768 case 0x2066:
772 default: /* Use "z900" as default for 64 bit kernels. */ 769 default: /* Use "z900" as default for 64 bit kernels. */
diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S
index 8c56929c8d82..5378c3ea1b98 100644
--- a/arch/s390/kernel/syscalls.S
+++ b/arch/s390/kernel/syscalls.S
@@ -382,3 +382,4 @@ SYSCALL(sys_sendmsg,compat_sys_sendmsg) /* 370 */
382SYSCALL(sys_recvfrom,compat_sys_recvfrom) 382SYSCALL(sys_recvfrom,compat_sys_recvfrom)
383SYSCALL(sys_recvmsg,compat_sys_recvmsg) 383SYSCALL(sys_recvmsg,compat_sys_recvmsg)
384SYSCALL(sys_shutdown,sys_shutdown) 384SYSCALL(sys_shutdown,sys_shutdown)
385SYSCALL(sys_mlock2,compat_sys_mlock2)
diff --git a/arch/s390/kernel/trace.c b/arch/s390/kernel/trace.c
index 73239bb576c4..21a5df99552b 100644
--- a/arch/s390/kernel/trace.c
+++ b/arch/s390/kernel/trace.c
@@ -9,11 +9,11 @@
9#define CREATE_TRACE_POINTS 9#define CREATE_TRACE_POINTS
10#include <asm/trace/diag.h> 10#include <asm/trace/diag.h>
11 11
12EXPORT_TRACEPOINT_SYMBOL(diagnose); 12EXPORT_TRACEPOINT_SYMBOL(s390_diagnose);
13 13
14static DEFINE_PER_CPU(unsigned int, diagnose_trace_depth); 14static DEFINE_PER_CPU(unsigned int, diagnose_trace_depth);
15 15
16void trace_diagnose_norecursion(int diag_nr) 16void trace_s390_diagnose_norecursion(int diag_nr)
17{ 17{
18 unsigned long flags; 18 unsigned long flags;
19 unsigned int *depth; 19 unsigned int *depth;
@@ -22,7 +22,7 @@ void trace_diagnose_norecursion(int diag_nr)
22 depth = this_cpu_ptr(&diagnose_trace_depth); 22 depth = this_cpu_ptr(&diagnose_trace_depth);
23 if (*depth == 0) { 23 if (*depth == 0) {
24 (*depth)++; 24 (*depth)++;
25 trace_diagnose(diag_nr); 25 trace_s390_diagnose(diag_nr);
26 (*depth)--; 26 (*depth)--;
27 } 27 }
28 local_irq_restore(flags); 28 local_irq_restore(flags);
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index c3c07d3505ba..c722400c7697 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -48,37 +48,13 @@ EXPORT_SYMBOL(zero_page_mask);
48 48
49static void __init setup_zero_pages(void) 49static void __init setup_zero_pages(void)
50{ 50{
51 struct cpuid cpu_id;
52 unsigned int order; 51 unsigned int order;
53 struct page *page; 52 struct page *page;
54 int i; 53 int i;
55 54
56 get_cpu_id(&cpu_id); 55 /* Latest machines require a mapping granularity of 512KB */
57 switch (cpu_id.machine) { 56 order = 7;
58 case 0x9672: /* g5 */ 57
59 case 0x2064: /* z900 */
60 case 0x2066: /* z900 */
61 case 0x2084: /* z990 */
62 case 0x2086: /* z990 */
63 case 0x2094: /* z9-109 */
64 case 0x2096: /* z9-109 */
65 order = 0;
66 break;
67 case 0x2097: /* z10 */
68 case 0x2098: /* z10 */
69 case 0x2817: /* z196 */
70 case 0x2818: /* z196 */
71 order = 2;
72 break;
73 case 0x2827: /* zEC12 */
74 case 0x2828: /* zEC12 */
75 order = 5;
76 break;
77 case 0x2964: /* z13 */
78 default:
79 order = 7;
80 break;
81 }
82 /* Limit number of empty zero pages for small memory sizes */ 58 /* Limit number of empty zero pages for small memory sizes */
83 while (order > 2 && (totalram_pages >> 10) < (1UL << order)) 59 while (order > 2 && (totalram_pages >> 10) < (1UL << order))
84 order--; 60 order--;
diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
index 6e552af08c76..ea01477b4aa6 100644
--- a/arch/s390/mm/mmap.c
+++ b/arch/s390/mm/mmap.c
@@ -31,9 +31,6 @@
31#include <linux/security.h> 31#include <linux/security.h>
32#include <asm/pgalloc.h> 32#include <asm/pgalloc.h>
33 33
34unsigned long mmap_rnd_mask;
35static unsigned long mmap_align_mask;
36
37static unsigned long stack_maxrandom_size(void) 34static unsigned long stack_maxrandom_size(void)
38{ 35{
39 if (!(current->flags & PF_RANDOMIZE)) 36 if (!(current->flags & PF_RANDOMIZE))
@@ -62,10 +59,7 @@ static inline int mmap_is_legacy(void)
62 59
63unsigned long arch_mmap_rnd(void) 60unsigned long arch_mmap_rnd(void)
64{ 61{
65 if (is_32bit_task()) 62 return (get_random_int() & MMAP_RND_MASK) << PAGE_SHIFT;
66 return (get_random_int() & 0x7ff) << PAGE_SHIFT;
67 else
68 return (get_random_int() & mmap_rnd_mask) << PAGE_SHIFT;
69} 63}
70 64
71static unsigned long mmap_base_legacy(unsigned long rnd) 65static unsigned long mmap_base_legacy(unsigned long rnd)
@@ -92,7 +86,6 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
92 struct mm_struct *mm = current->mm; 86 struct mm_struct *mm = current->mm;
93 struct vm_area_struct *vma; 87 struct vm_area_struct *vma;
94 struct vm_unmapped_area_info info; 88 struct vm_unmapped_area_info info;
95 int do_color_align;
96 89
97 if (len > TASK_SIZE - mmap_min_addr) 90 if (len > TASK_SIZE - mmap_min_addr)
98 return -ENOMEM; 91 return -ENOMEM;
@@ -108,15 +101,14 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
108 return addr; 101 return addr;
109 } 102 }
110 103
111 do_color_align = 0;
112 if (filp || (flags & MAP_SHARED))
113 do_color_align = !is_32bit_task();
114
115 info.flags = 0; 104 info.flags = 0;
116 info.length = len; 105 info.length = len;
117 info.low_limit = mm->mmap_base; 106 info.low_limit = mm->mmap_base;
118 info.high_limit = TASK_SIZE; 107 info.high_limit = TASK_SIZE;
119 info.align_mask = do_color_align ? (mmap_align_mask << PAGE_SHIFT) : 0; 108 if (filp || (flags & MAP_SHARED))
109 info.align_mask = MMAP_ALIGN_MASK << PAGE_SHIFT;
110 else
111 info.align_mask = 0;
120 info.align_offset = pgoff << PAGE_SHIFT; 112 info.align_offset = pgoff << PAGE_SHIFT;
121 return vm_unmapped_area(&info); 113 return vm_unmapped_area(&info);
122} 114}
@@ -130,7 +122,6 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
130 struct mm_struct *mm = current->mm; 122 struct mm_struct *mm = current->mm;
131 unsigned long addr = addr0; 123 unsigned long addr = addr0;
132 struct vm_unmapped_area_info info; 124 struct vm_unmapped_area_info info;
133 int do_color_align;
134 125
135 /* requested length too big for entire address space */ 126 /* requested length too big for entire address space */
136 if (len > TASK_SIZE - mmap_min_addr) 127 if (len > TASK_SIZE - mmap_min_addr)
@@ -148,15 +139,14 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
148 return addr; 139 return addr;
149 } 140 }
150 141
151 do_color_align = 0;
152 if (filp || (flags & MAP_SHARED))
153 do_color_align = !is_32bit_task();
154
155 info.flags = VM_UNMAPPED_AREA_TOPDOWN; 142 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
156 info.length = len; 143 info.length = len;
157 info.low_limit = max(PAGE_SIZE, mmap_min_addr); 144 info.low_limit = max(PAGE_SIZE, mmap_min_addr);
158 info.high_limit = mm->mmap_base; 145 info.high_limit = mm->mmap_base;
159 info.align_mask = do_color_align ? (mmap_align_mask << PAGE_SHIFT) : 0; 146 if (filp || (flags & MAP_SHARED))
147 info.align_mask = MMAP_ALIGN_MASK << PAGE_SHIFT;
148 else
149 info.align_mask = 0;
160 info.align_offset = pgoff << PAGE_SHIFT; 150 info.align_offset = pgoff << PAGE_SHIFT;
161 addr = vm_unmapped_area(&info); 151 addr = vm_unmapped_area(&info);
162 152
@@ -254,35 +244,3 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
254 mm->get_unmapped_area = s390_get_unmapped_area_topdown; 244 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
255 } 245 }
256} 246}
257
258static int __init setup_mmap_rnd(void)
259{
260 struct cpuid cpu_id;
261
262 get_cpu_id(&cpu_id);
263 switch (cpu_id.machine) {
264 case 0x9672:
265 case 0x2064:
266 case 0x2066:
267 case 0x2084:
268 case 0x2086:
269 case 0x2094:
270 case 0x2096:
271 case 0x2097:
272 case 0x2098:
273 case 0x2817:
274 case 0x2818:
275 case 0x2827:
276 case 0x2828:
277 mmap_rnd_mask = 0x7ffUL;
278 mmap_align_mask = 0UL;
279 break;
280 case 0x2964: /* z13 */
281 default:
282 mmap_rnd_mask = 0x3ff80UL;
283 mmap_align_mask = 0x7fUL;
284 break;
285 }
286 return 0;
287}
288early_initcall(setup_mmap_rnd);
diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c
index 37d10f74425a..d348f2c09a1e 100644
--- a/arch/s390/pci/pci_dma.c
+++ b/arch/s390/pci/pci_dma.c
@@ -33,7 +33,7 @@ unsigned long *dma_alloc_cpu_table(void)
33 return NULL; 33 return NULL;
34 34
35 for (entry = table; entry < table + ZPCI_TABLE_ENTRIES; entry++) 35 for (entry = table; entry < table + ZPCI_TABLE_ENTRIES; entry++)
36 *entry = ZPCI_TABLE_INVALID | ZPCI_TABLE_PROTECTED; 36 *entry = ZPCI_TABLE_INVALID;
37 return table; 37 return table;
38} 38}
39 39
@@ -51,7 +51,7 @@ static unsigned long *dma_alloc_page_table(void)
51 return NULL; 51 return NULL;
52 52
53 for (entry = table; entry < table + ZPCI_PT_ENTRIES; entry++) 53 for (entry = table; entry < table + ZPCI_PT_ENTRIES; entry++)
54 *entry = ZPCI_PTE_INVALID | ZPCI_TABLE_PROTECTED; 54 *entry = ZPCI_PTE_INVALID;
55 return table; 55 return table;
56} 56}
57 57
@@ -95,7 +95,7 @@ static unsigned long *dma_get_page_table_origin(unsigned long *entry)
95 return pto; 95 return pto;
96} 96}
97 97
98static unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr) 98unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr)
99{ 99{
100 unsigned long *sto, *pto; 100 unsigned long *sto, *pto;
101 unsigned int rtx, sx, px; 101 unsigned int rtx, sx, px;
@@ -114,20 +114,10 @@ static unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr
114 return &pto[px]; 114 return &pto[px];
115} 115}
116 116
117void dma_update_cpu_trans(unsigned long *dma_table, void *page_addr, 117void dma_update_cpu_trans(unsigned long *entry, void *page_addr, int flags)
118 dma_addr_t dma_addr, int flags)
119{ 118{
120 unsigned long *entry;
121
122 entry = dma_walk_cpu_trans(dma_table, dma_addr);
123 if (!entry) {
124 WARN_ON_ONCE(1);
125 return;
126 }
127
128 if (flags & ZPCI_PTE_INVALID) { 119 if (flags & ZPCI_PTE_INVALID) {
129 invalidate_pt_entry(entry); 120 invalidate_pt_entry(entry);
130 return;
131 } else { 121 } else {
132 set_pt_pfaa(entry, page_addr); 122 set_pt_pfaa(entry, page_addr);
133 validate_pt_entry(entry); 123 validate_pt_entry(entry);
@@ -146,18 +136,25 @@ static int dma_update_trans(struct zpci_dev *zdev, unsigned long pa,
146 u8 *page_addr = (u8 *) (pa & PAGE_MASK); 136 u8 *page_addr = (u8 *) (pa & PAGE_MASK);
147 dma_addr_t start_dma_addr = dma_addr; 137 dma_addr_t start_dma_addr = dma_addr;
148 unsigned long irq_flags; 138 unsigned long irq_flags;
139 unsigned long *entry;
149 int i, rc = 0; 140 int i, rc = 0;
150 141
151 if (!nr_pages) 142 if (!nr_pages)
152 return -EINVAL; 143 return -EINVAL;
153 144
154 spin_lock_irqsave(&zdev->dma_table_lock, irq_flags); 145 spin_lock_irqsave(&zdev->dma_table_lock, irq_flags);
155 if (!zdev->dma_table) 146 if (!zdev->dma_table) {
147 rc = -EINVAL;
156 goto no_refresh; 148 goto no_refresh;
149 }
157 150
158 for (i = 0; i < nr_pages; i++) { 151 for (i = 0; i < nr_pages; i++) {
159 dma_update_cpu_trans(zdev->dma_table, page_addr, dma_addr, 152 entry = dma_walk_cpu_trans(zdev->dma_table, dma_addr);
160 flags); 153 if (!entry) {
154 rc = -ENOMEM;
155 goto undo_cpu_trans;
156 }
157 dma_update_cpu_trans(entry, page_addr, flags);
161 page_addr += PAGE_SIZE; 158 page_addr += PAGE_SIZE;
162 dma_addr += PAGE_SIZE; 159 dma_addr += PAGE_SIZE;
163 } 160 }
@@ -176,6 +173,18 @@ static int dma_update_trans(struct zpci_dev *zdev, unsigned long pa,
176 173
177 rc = zpci_refresh_trans((u64) zdev->fh << 32, start_dma_addr, 174 rc = zpci_refresh_trans((u64) zdev->fh << 32, start_dma_addr,
178 nr_pages * PAGE_SIZE); 175 nr_pages * PAGE_SIZE);
176undo_cpu_trans:
177 if (rc && ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID)) {
178 flags = ZPCI_PTE_INVALID;
179 while (i-- > 0) {
180 page_addr -= PAGE_SIZE;
181 dma_addr -= PAGE_SIZE;
182 entry = dma_walk_cpu_trans(zdev->dma_table, dma_addr);
183 if (!entry)
184 break;
185 dma_update_cpu_trans(entry, page_addr, flags);
186 }
187 }
179 188
180no_refresh: 189no_refresh:
181 spin_unlock_irqrestore(&zdev->dma_table_lock, irq_flags); 190 spin_unlock_irqrestore(&zdev->dma_table_lock, irq_flags);
@@ -260,6 +269,16 @@ out:
260 spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags); 269 spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags);
261} 270}
262 271
272static inline void zpci_err_dma(unsigned long rc, unsigned long addr)
273{
274 struct {
275 unsigned long rc;
276 unsigned long addr;
277 } __packed data = {rc, addr};
278
279 zpci_err_hex(&data, sizeof(data));
280}
281
263static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page, 282static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page,
264 unsigned long offset, size_t size, 283 unsigned long offset, size_t size,
265 enum dma_data_direction direction, 284 enum dma_data_direction direction,
@@ -270,33 +289,40 @@ static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page,
270 unsigned long pa = page_to_phys(page) + offset; 289 unsigned long pa = page_to_phys(page) + offset;
271 int flags = ZPCI_PTE_VALID; 290 int flags = ZPCI_PTE_VALID;
272 dma_addr_t dma_addr; 291 dma_addr_t dma_addr;
292 int ret;
273 293
274 /* This rounds up number of pages based on size and offset */ 294 /* This rounds up number of pages based on size and offset */
275 nr_pages = iommu_num_pages(pa, size, PAGE_SIZE); 295 nr_pages = iommu_num_pages(pa, size, PAGE_SIZE);
276 iommu_page_index = dma_alloc_iommu(zdev, nr_pages); 296 iommu_page_index = dma_alloc_iommu(zdev, nr_pages);
277 if (iommu_page_index == -1) 297 if (iommu_page_index == -1) {
298 ret = -ENOSPC;
278 goto out_err; 299 goto out_err;
300 }
279 301
280 /* Use rounded up size */ 302 /* Use rounded up size */
281 size = nr_pages * PAGE_SIZE; 303 size = nr_pages * PAGE_SIZE;
282 304
283 dma_addr = zdev->start_dma + iommu_page_index * PAGE_SIZE; 305 dma_addr = zdev->start_dma + iommu_page_index * PAGE_SIZE;
284 if (dma_addr + size > zdev->end_dma) 306 if (dma_addr + size > zdev->end_dma) {
307 ret = -ERANGE;
285 goto out_free; 308 goto out_free;
309 }
286 310
287 if (direction == DMA_NONE || direction == DMA_TO_DEVICE) 311 if (direction == DMA_NONE || direction == DMA_TO_DEVICE)
288 flags |= ZPCI_TABLE_PROTECTED; 312 flags |= ZPCI_TABLE_PROTECTED;
289 313
290 if (!dma_update_trans(zdev, pa, dma_addr, size, flags)) { 314 ret = dma_update_trans(zdev, pa, dma_addr, size, flags);
291 atomic64_add(nr_pages, &zdev->mapped_pages); 315 if (ret)
292 return dma_addr + (offset & ~PAGE_MASK); 316 goto out_free;
293 } 317
318 atomic64_add(nr_pages, &zdev->mapped_pages);
319 return dma_addr + (offset & ~PAGE_MASK);
294 320
295out_free: 321out_free:
296 dma_free_iommu(zdev, iommu_page_index, nr_pages); 322 dma_free_iommu(zdev, iommu_page_index, nr_pages);
297out_err: 323out_err:
298 zpci_err("map error:\n"); 324 zpci_err("map error:\n");
299 zpci_err_hex(&pa, sizeof(pa)); 325 zpci_err_dma(ret, pa);
300 return DMA_ERROR_CODE; 326 return DMA_ERROR_CODE;
301} 327}
302 328
@@ -306,14 +332,16 @@ static void s390_dma_unmap_pages(struct device *dev, dma_addr_t dma_addr,
306{ 332{
307 struct zpci_dev *zdev = to_zpci(to_pci_dev(dev)); 333 struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
308 unsigned long iommu_page_index; 334 unsigned long iommu_page_index;
309 int npages; 335 int npages, ret;
310 336
311 npages = iommu_num_pages(dma_addr, size, PAGE_SIZE); 337 npages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
312 dma_addr = dma_addr & PAGE_MASK; 338 dma_addr = dma_addr & PAGE_MASK;
313 if (dma_update_trans(zdev, 0, dma_addr, npages * PAGE_SIZE, 339 ret = dma_update_trans(zdev, 0, dma_addr, npages * PAGE_SIZE,
314 ZPCI_TABLE_PROTECTED | ZPCI_PTE_INVALID)) { 340 ZPCI_PTE_INVALID);
341 if (ret) {
315 zpci_err("unmap error:\n"); 342 zpci_err("unmap error:\n");
316 zpci_err_hex(&dma_addr, sizeof(dma_addr)); 343 zpci_err_dma(ret, dma_addr);
344 return;
317 } 345 }
318 346
319 atomic64_add(npages, &zdev->unmapped_pages); 347 atomic64_add(npages, &zdev->unmapped_pages);
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 9f3905697f12..690b4027e17c 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -35,7 +35,7 @@
35#define MSR_IA32_PERFCTR0 0x000000c1 35#define MSR_IA32_PERFCTR0 0x000000c1
36#define MSR_IA32_PERFCTR1 0x000000c2 36#define MSR_IA32_PERFCTR1 0x000000c2
37#define MSR_FSB_FREQ 0x000000cd 37#define MSR_FSB_FREQ 0x000000cd
38#define MSR_NHM_PLATFORM_INFO 0x000000ce 38#define MSR_PLATFORM_INFO 0x000000ce
39 39
40#define MSR_NHM_SNB_PKG_CST_CFG_CTL 0x000000e2 40#define MSR_NHM_SNB_PKG_CST_CFG_CTL 0x000000e2
41#define NHM_C3_AUTO_DEMOTE (1UL << 25) 41#define NHM_C3_AUTO_DEMOTE (1UL << 25)
@@ -44,7 +44,6 @@
44#define SNB_C1_AUTO_UNDEMOTE (1UL << 27) 44#define SNB_C1_AUTO_UNDEMOTE (1UL << 27)
45#define SNB_C3_AUTO_UNDEMOTE (1UL << 28) 45#define SNB_C3_AUTO_UNDEMOTE (1UL << 28)
46 46
47#define MSR_PLATFORM_INFO 0x000000ce
48#define MSR_MTRRcap 0x000000fe 47#define MSR_MTRRcap 0x000000fe
49#define MSR_IA32_BBL_CR_CTL 0x00000119 48#define MSR_IA32_BBL_CR_CTL 0x00000119
50#define MSR_IA32_BBL_CR_CTL3 0x0000011e 49#define MSR_IA32_BBL_CR_CTL3 0x0000011e
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 4ddd780aeac9..c2b7522cbf35 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -273,10 +273,9 @@ __setup("nosmap", setup_disable_smap);
273 273
274static __always_inline void setup_smap(struct cpuinfo_x86 *c) 274static __always_inline void setup_smap(struct cpuinfo_x86 *c)
275{ 275{
276 unsigned long eflags; 276 unsigned long eflags = native_save_fl();
277 277
278 /* This should have been cleared long ago */ 278 /* This should have been cleared long ago */
279 raw_local_save_flags(eflags);
280 BUG_ON(eflags & X86_EFLAGS_AC); 279 BUG_ON(eflags & X86_EFLAGS_AC);
281 280
282 if (cpu_has(c, X86_FEATURE_SMAP)) { 281 if (cpu_has(c, X86_FEATURE_SMAP)) {
diff --git a/arch/x86/kernel/fpu/signal.c b/arch/x86/kernel/fpu/signal.c
index ef29b742cea7..31c6a60505e6 100644
--- a/arch/x86/kernel/fpu/signal.c
+++ b/arch/x86/kernel/fpu/signal.c
@@ -385,20 +385,19 @@ fpu__alloc_mathframe(unsigned long sp, int ia32_frame,
385 */ 385 */
386void fpu__init_prepare_fx_sw_frame(void) 386void fpu__init_prepare_fx_sw_frame(void)
387{ 387{
388 int fsave_header_size = sizeof(struct fregs_state);
389 int size = xstate_size + FP_XSTATE_MAGIC2_SIZE; 388 int size = xstate_size + FP_XSTATE_MAGIC2_SIZE;
390 389
391 if (config_enabled(CONFIG_X86_32))
392 size += fsave_header_size;
393
394 fx_sw_reserved.magic1 = FP_XSTATE_MAGIC1; 390 fx_sw_reserved.magic1 = FP_XSTATE_MAGIC1;
395 fx_sw_reserved.extended_size = size; 391 fx_sw_reserved.extended_size = size;
396 fx_sw_reserved.xfeatures = xfeatures_mask; 392 fx_sw_reserved.xfeatures = xfeatures_mask;
397 fx_sw_reserved.xstate_size = xstate_size; 393 fx_sw_reserved.xstate_size = xstate_size;
398 394
399 if (config_enabled(CONFIG_IA32_EMULATION)) { 395 if (config_enabled(CONFIG_IA32_EMULATION) ||
396 config_enabled(CONFIG_X86_32)) {
397 int fsave_header_size = sizeof(struct fregs_state);
398
400 fx_sw_reserved_ia32 = fx_sw_reserved; 399 fx_sw_reserved_ia32 = fx_sw_reserved;
401 fx_sw_reserved_ia32.extended_size += fsave_header_size; 400 fx_sw_reserved_ia32.extended_size = size + fsave_header_size;
402 } 401 }
403} 402}
404 403
diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c
index 6454f2731b56..70fc312221fc 100644
--- a/arch/x86/kernel/fpu/xstate.c
+++ b/arch/x86/kernel/fpu/xstate.c
@@ -694,7 +694,6 @@ void *get_xsave_addr(struct xregs_state *xsave, int xstate_feature)
694 if (!boot_cpu_has(X86_FEATURE_XSAVE)) 694 if (!boot_cpu_has(X86_FEATURE_XSAVE))
695 return NULL; 695 return NULL;
696 696
697 xsave = &current->thread.fpu.state.xsave;
698 /* 697 /*
699 * We should not ever be requesting features that we 698 * We should not ever be requesting features that we
700 * have not enabled. Remember that pcntxt_mask is 699 * have not enabled. Remember that pcntxt_mask is
diff --git a/arch/x86/kernel/mcount_64.S b/arch/x86/kernel/mcount_64.S
index 94ea120fa21f..87e1762e2bca 100644
--- a/arch/x86/kernel/mcount_64.S
+++ b/arch/x86/kernel/mcount_64.S
@@ -278,6 +278,12 @@ trace:
278 /* save_mcount_regs fills in first two parameters */ 278 /* save_mcount_regs fills in first two parameters */
279 save_mcount_regs 279 save_mcount_regs
280 280
281 /*
282 * When DYNAMIC_FTRACE is not defined, ARCH_SUPPORTS_FTRACE_OPS is not
283 * set (see include/asm/ftrace.h and include/linux/ftrace.h). Only the
284 * ip and parent ip are used and the list function is called when
285 * function tracing is enabled.
286 */
281 call *ftrace_trace_function 287 call *ftrace_trace_function
282 288
283 restore_mcount_regs 289 restore_mcount_regs
diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c
index b0ae85f90f10..1202d5ca2fb5 100644
--- a/arch/x86/mm/mpx.c
+++ b/arch/x86/mm/mpx.c
@@ -586,6 +586,29 @@ static unsigned long mpx_bd_entry_to_bt_addr(struct mm_struct *mm,
586} 586}
587 587
588/* 588/*
589 * We only want to do a 4-byte get_user() on 32-bit. Otherwise,
590 * we might run off the end of the bounds table if we are on
591 * a 64-bit kernel and try to get 8 bytes.
592 */
593int get_user_bd_entry(struct mm_struct *mm, unsigned long *bd_entry_ret,
594 long __user *bd_entry_ptr)
595{
596 u32 bd_entry_32;
597 int ret;
598
599 if (is_64bit_mm(mm))
600 return get_user(*bd_entry_ret, bd_entry_ptr);
601
602 /*
603 * Note that get_user() uses the type of the *pointer* to
604 * establish the size of the get, not the destination.
605 */
606 ret = get_user(bd_entry_32, (u32 __user *)bd_entry_ptr);
607 *bd_entry_ret = bd_entry_32;
608 return ret;
609}
610
611/*
589 * Get the base of bounds tables pointed by specific bounds 612 * Get the base of bounds tables pointed by specific bounds
590 * directory entry. 613 * directory entry.
591 */ 614 */
@@ -605,7 +628,7 @@ static int get_bt_addr(struct mm_struct *mm,
605 int need_write = 0; 628 int need_write = 0;
606 629
607 pagefault_disable(); 630 pagefault_disable();
608 ret = get_user(bd_entry, bd_entry_ptr); 631 ret = get_user_bd_entry(mm, &bd_entry, bd_entry_ptr);
609 pagefault_enable(); 632 pagefault_enable();
610 if (!ret) 633 if (!ret)
611 break; 634 break;
@@ -700,11 +723,23 @@ static unsigned long mpx_get_bt_entry_offset_bytes(struct mm_struct *mm,
700 */ 723 */
701static inline unsigned long bd_entry_virt_space(struct mm_struct *mm) 724static inline unsigned long bd_entry_virt_space(struct mm_struct *mm)
702{ 725{
703 unsigned long long virt_space = (1ULL << boot_cpu_data.x86_virt_bits); 726 unsigned long long virt_space;
704 if (is_64bit_mm(mm)) 727 unsigned long long GB = (1ULL << 30);
705 return virt_space / MPX_BD_NR_ENTRIES_64; 728
706 else 729 /*
707 return virt_space / MPX_BD_NR_ENTRIES_32; 730 * This covers 32-bit emulation as well as 32-bit kernels
731 * running on 64-bit harware.
732 */
733 if (!is_64bit_mm(mm))
734 return (4ULL * GB) / MPX_BD_NR_ENTRIES_32;
735
736 /*
737 * 'x86_virt_bits' returns what the hardware is capable
738 * of, and returns the full >32-bit adddress space when
739 * running 32-bit kernels on 64-bit hardware.
740 */
741 virt_space = (1ULL << boot_cpu_data.x86_virt_bits);
742 return virt_space / MPX_BD_NR_ENTRIES_64;
708} 743}
709 744
710/* 745/*
diff --git a/block/blk.h b/block/blk.h
index da722eb786df..c43926d3d74d 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -72,8 +72,6 @@ void blk_dequeue_request(struct request *rq);
72void __blk_queue_free_tags(struct request_queue *q); 72void __blk_queue_free_tags(struct request_queue *q);
73bool __blk_end_bidi_request(struct request *rq, int error, 73bool __blk_end_bidi_request(struct request *rq, int error,
74 unsigned int nr_bytes, unsigned int bidi_bytes); 74 unsigned int nr_bytes, unsigned int bidi_bytes);
75int blk_queue_enter(struct request_queue *q, gfp_t gfp);
76void blk_queue_exit(struct request_queue *q);
77void blk_freeze_queue(struct request_queue *q); 75void blk_freeze_queue(struct request_queue *q);
78 76
79static inline void blk_queue_enter_live(struct request_queue *q) 77static inline void blk_queue_enter_live(struct request_queue *q)
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index 3c083d2cc434..6730f965b379 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -304,7 +304,7 @@ EXPORT_SYMBOL_GPL(acpi_get_psd_map);
304 304
305static int register_pcc_channel(int pcc_subspace_idx) 305static int register_pcc_channel(int pcc_subspace_idx)
306{ 306{
307 struct acpi_pcct_subspace *cppc_ss; 307 struct acpi_pcct_hw_reduced *cppc_ss;
308 unsigned int len; 308 unsigned int len;
309 309
310 if (pcc_subspace_idx >= 0) { 310 if (pcc_subspace_idx >= 0) {
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index f61a7c834540..b420fb46669d 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -1103,7 +1103,7 @@ static int acpi_ec_query(struct acpi_ec *ec, u8 *data)
1103 } 1103 }
1104 1104
1105err_exit: 1105err_exit:
1106 if (result && q) 1106 if (result)
1107 acpi_ec_delete_query(q); 1107 acpi_ec_delete_query(q);
1108 if (data) 1108 if (data)
1109 *data = value; 1109 *data = value;
diff --git a/drivers/acpi/sbshc.c b/drivers/acpi/sbshc.c
index bf034f8b7c1a..2fa8304171e0 100644
--- a/drivers/acpi/sbshc.c
+++ b/drivers/acpi/sbshc.c
@@ -14,7 +14,6 @@
14#include <linux/delay.h> 14#include <linux/delay.h>
15#include <linux/module.h> 15#include <linux/module.h>
16#include <linux/interrupt.h> 16#include <linux/interrupt.h>
17#include <linux/dmi.h>
18#include "sbshc.h" 17#include "sbshc.h"
19 18
20#define PREFIX "ACPI: " 19#define PREFIX "ACPI: "
@@ -30,6 +29,7 @@ struct acpi_smb_hc {
30 u8 query_bit; 29 u8 query_bit;
31 smbus_alarm_callback callback; 30 smbus_alarm_callback callback;
32 void *context; 31 void *context;
32 bool done;
33}; 33};
34 34
35static int acpi_smbus_hc_add(struct acpi_device *device); 35static int acpi_smbus_hc_add(struct acpi_device *device);
@@ -88,8 +88,6 @@ enum acpi_smb_offset {
88 ACPI_SMB_ALARM_DATA = 0x26, /* 2 bytes alarm data */ 88 ACPI_SMB_ALARM_DATA = 0x26, /* 2 bytes alarm data */
89}; 89};
90 90
91static bool macbook;
92
93static inline int smb_hc_read(struct acpi_smb_hc *hc, u8 address, u8 *data) 91static inline int smb_hc_read(struct acpi_smb_hc *hc, u8 address, u8 *data)
94{ 92{
95 return ec_read(hc->offset + address, data); 93 return ec_read(hc->offset + address, data);
@@ -100,27 +98,11 @@ static inline int smb_hc_write(struct acpi_smb_hc *hc, u8 address, u8 data)
100 return ec_write(hc->offset + address, data); 98 return ec_write(hc->offset + address, data);
101} 99}
102 100
103static inline int smb_check_done(struct acpi_smb_hc *hc)
104{
105 union acpi_smb_status status = {.raw = 0};
106 smb_hc_read(hc, ACPI_SMB_STATUS, &status.raw);
107 return status.fields.done && (status.fields.status == SMBUS_OK);
108}
109
110static int wait_transaction_complete(struct acpi_smb_hc *hc, int timeout) 101static int wait_transaction_complete(struct acpi_smb_hc *hc, int timeout)
111{ 102{
112 if (wait_event_timeout(hc->wait, smb_check_done(hc), 103 if (wait_event_timeout(hc->wait, hc->done, msecs_to_jiffies(timeout)))
113 msecs_to_jiffies(timeout)))
114 return 0; 104 return 0;
115 /* 105 return -ETIME;
116 * After the timeout happens, OS will try to check the status of SMbus.
117 * If the status is what OS expected, it will be regarded as the bogus
118 * timeout.
119 */
120 if (smb_check_done(hc))
121 return 0;
122 else
123 return -ETIME;
124} 106}
125 107
126static int acpi_smbus_transaction(struct acpi_smb_hc *hc, u8 protocol, 108static int acpi_smbus_transaction(struct acpi_smb_hc *hc, u8 protocol,
@@ -135,8 +117,7 @@ static int acpi_smbus_transaction(struct acpi_smb_hc *hc, u8 protocol,
135 } 117 }
136 118
137 mutex_lock(&hc->lock); 119 mutex_lock(&hc->lock);
138 if (macbook) 120 hc->done = false;
139 udelay(5);
140 if (smb_hc_read(hc, ACPI_SMB_PROTOCOL, &temp)) 121 if (smb_hc_read(hc, ACPI_SMB_PROTOCOL, &temp))
141 goto end; 122 goto end;
142 if (temp) { 123 if (temp) {
@@ -235,8 +216,10 @@ static int smbus_alarm(void *context)
235 if (smb_hc_read(hc, ACPI_SMB_STATUS, &status.raw)) 216 if (smb_hc_read(hc, ACPI_SMB_STATUS, &status.raw))
236 return 0; 217 return 0;
237 /* Check if it is only a completion notify */ 218 /* Check if it is only a completion notify */
238 if (status.fields.done) 219 if (status.fields.done && status.fields.status == SMBUS_OK) {
220 hc->done = true;
239 wake_up(&hc->wait); 221 wake_up(&hc->wait);
222 }
240 if (!status.fields.alarm) 223 if (!status.fields.alarm)
241 return 0; 224 return 0;
242 mutex_lock(&hc->lock); 225 mutex_lock(&hc->lock);
@@ -262,29 +245,12 @@ extern int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit,
262 acpi_handle handle, acpi_ec_query_func func, 245 acpi_handle handle, acpi_ec_query_func func,
263 void *data); 246 void *data);
264 247
265static int macbook_dmi_match(const struct dmi_system_id *d)
266{
267 pr_debug("Detected MacBook, enabling workaround\n");
268 macbook = true;
269 return 0;
270}
271
272static struct dmi_system_id acpi_smbus_dmi_table[] = {
273 { macbook_dmi_match, "Apple MacBook", {
274 DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
275 DMI_MATCH(DMI_PRODUCT_NAME, "MacBook") },
276 },
277 { },
278};
279
280static int acpi_smbus_hc_add(struct acpi_device *device) 248static int acpi_smbus_hc_add(struct acpi_device *device)
281{ 249{
282 int status; 250 int status;
283 unsigned long long val; 251 unsigned long long val;
284 struct acpi_smb_hc *hc; 252 struct acpi_smb_hc *hc;
285 253
286 dmi_check_system(acpi_smbus_dmi_table);
287
288 if (!device) 254 if (!device)
289 return -EINVAL; 255 return -EINVAL;
290 256
diff --git a/drivers/base/power/wakeirq.c b/drivers/base/power/wakeirq.c
index eb6e67451dec..0d77cd6fd8d1 100644
--- a/drivers/base/power/wakeirq.c
+++ b/drivers/base/power/wakeirq.c
@@ -68,6 +68,9 @@ int dev_pm_set_wake_irq(struct device *dev, int irq)
68 struct wake_irq *wirq; 68 struct wake_irq *wirq;
69 int err; 69 int err;
70 70
71 if (irq < 0)
72 return -EINVAL;
73
71 wirq = kzalloc(sizeof(*wirq), GFP_KERNEL); 74 wirq = kzalloc(sizeof(*wirq), GFP_KERNEL);
72 if (!wirq) 75 if (!wirq)
73 return -ENOMEM; 76 return -ENOMEM;
@@ -167,6 +170,9 @@ int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq)
167 struct wake_irq *wirq; 170 struct wake_irq *wirq;
168 int err; 171 int err;
169 172
173 if (irq < 0)
174 return -EINVAL;
175
170 wirq = kzalloc(sizeof(*wirq), GFP_KERNEL); 176 wirq = kzalloc(sizeof(*wirq), GFP_KERNEL);
171 if (!wirq) 177 if (!wirq)
172 return -ENOMEM; 178 return -ENOMEM;
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 654f6f36a071..55fe9020459f 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -412,18 +412,42 @@ static enum si_sm_result start_next_msg(struct smi_info *smi_info)
412 return rv; 412 return rv;
413} 413}
414 414
415static void start_check_enables(struct smi_info *smi_info) 415static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val)
416{
417 smi_info->last_timeout_jiffies = jiffies;
418 mod_timer(&smi_info->si_timer, new_val);
419 smi_info->timer_running = true;
420}
421
422/*
423 * Start a new message and (re)start the timer and thread.
424 */
425static void start_new_msg(struct smi_info *smi_info, unsigned char *msg,
426 unsigned int size)
427{
428 smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES);
429
430 if (smi_info->thread)
431 wake_up_process(smi_info->thread);
432
433 smi_info->handlers->start_transaction(smi_info->si_sm, msg, size);
434}
435
436static void start_check_enables(struct smi_info *smi_info, bool start_timer)
416{ 437{
417 unsigned char msg[2]; 438 unsigned char msg[2];
418 439
419 msg[0] = (IPMI_NETFN_APP_REQUEST << 2); 440 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
420 msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD; 441 msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
421 442
422 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2); 443 if (start_timer)
444 start_new_msg(smi_info, msg, 2);
445 else
446 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
423 smi_info->si_state = SI_CHECKING_ENABLES; 447 smi_info->si_state = SI_CHECKING_ENABLES;
424} 448}
425 449
426static void start_clear_flags(struct smi_info *smi_info) 450static void start_clear_flags(struct smi_info *smi_info, bool start_timer)
427{ 451{
428 unsigned char msg[3]; 452 unsigned char msg[3];
429 453
@@ -432,7 +456,10 @@ static void start_clear_flags(struct smi_info *smi_info)
432 msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD; 456 msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD;
433 msg[2] = WDT_PRE_TIMEOUT_INT; 457 msg[2] = WDT_PRE_TIMEOUT_INT;
434 458
435 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3); 459 if (start_timer)
460 start_new_msg(smi_info, msg, 3);
461 else
462 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
436 smi_info->si_state = SI_CLEARING_FLAGS; 463 smi_info->si_state = SI_CLEARING_FLAGS;
437} 464}
438 465
@@ -442,10 +469,8 @@ static void start_getting_msg_queue(struct smi_info *smi_info)
442 smi_info->curr_msg->data[1] = IPMI_GET_MSG_CMD; 469 smi_info->curr_msg->data[1] = IPMI_GET_MSG_CMD;
443 smi_info->curr_msg->data_size = 2; 470 smi_info->curr_msg->data_size = 2;
444 471
445 smi_info->handlers->start_transaction( 472 start_new_msg(smi_info, smi_info->curr_msg->data,
446 smi_info->si_sm, 473 smi_info->curr_msg->data_size);
447 smi_info->curr_msg->data,
448 smi_info->curr_msg->data_size);
449 smi_info->si_state = SI_GETTING_MESSAGES; 474 smi_info->si_state = SI_GETTING_MESSAGES;
450} 475}
451 476
@@ -455,20 +480,11 @@ static void start_getting_events(struct smi_info *smi_info)
455 smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD; 480 smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
456 smi_info->curr_msg->data_size = 2; 481 smi_info->curr_msg->data_size = 2;
457 482
458 smi_info->handlers->start_transaction( 483 start_new_msg(smi_info, smi_info->curr_msg->data,
459 smi_info->si_sm, 484 smi_info->curr_msg->data_size);
460 smi_info->curr_msg->data,
461 smi_info->curr_msg->data_size);
462 smi_info->si_state = SI_GETTING_EVENTS; 485 smi_info->si_state = SI_GETTING_EVENTS;
463} 486}
464 487
465static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val)
466{
467 smi_info->last_timeout_jiffies = jiffies;
468 mod_timer(&smi_info->si_timer, new_val);
469 smi_info->timer_running = true;
470}
471
472/* 488/*
473 * When we have a situtaion where we run out of memory and cannot 489 * When we have a situtaion where we run out of memory and cannot
474 * allocate messages, we just leave them in the BMC and run the system 490 * allocate messages, we just leave them in the BMC and run the system
@@ -478,11 +494,11 @@ static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val)
478 * Note that we cannot just use disable_irq(), since the interrupt may 494 * Note that we cannot just use disable_irq(), since the interrupt may
479 * be shared. 495 * be shared.
480 */ 496 */
481static inline bool disable_si_irq(struct smi_info *smi_info) 497static inline bool disable_si_irq(struct smi_info *smi_info, bool start_timer)
482{ 498{
483 if ((smi_info->irq) && (!smi_info->interrupt_disabled)) { 499 if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
484 smi_info->interrupt_disabled = true; 500 smi_info->interrupt_disabled = true;
485 start_check_enables(smi_info); 501 start_check_enables(smi_info, start_timer);
486 return true; 502 return true;
487 } 503 }
488 return false; 504 return false;
@@ -492,7 +508,7 @@ static inline bool enable_si_irq(struct smi_info *smi_info)
492{ 508{
493 if ((smi_info->irq) && (smi_info->interrupt_disabled)) { 509 if ((smi_info->irq) && (smi_info->interrupt_disabled)) {
494 smi_info->interrupt_disabled = false; 510 smi_info->interrupt_disabled = false;
495 start_check_enables(smi_info); 511 start_check_enables(smi_info, true);
496 return true; 512 return true;
497 } 513 }
498 return false; 514 return false;
@@ -510,7 +526,7 @@ static struct ipmi_smi_msg *alloc_msg_handle_irq(struct smi_info *smi_info)
510 526
511 msg = ipmi_alloc_smi_msg(); 527 msg = ipmi_alloc_smi_msg();
512 if (!msg) { 528 if (!msg) {
513 if (!disable_si_irq(smi_info)) 529 if (!disable_si_irq(smi_info, true))
514 smi_info->si_state = SI_NORMAL; 530 smi_info->si_state = SI_NORMAL;
515 } else if (enable_si_irq(smi_info)) { 531 } else if (enable_si_irq(smi_info)) {
516 ipmi_free_smi_msg(msg); 532 ipmi_free_smi_msg(msg);
@@ -526,7 +542,7 @@ static void handle_flags(struct smi_info *smi_info)
526 /* Watchdog pre-timeout */ 542 /* Watchdog pre-timeout */
527 smi_inc_stat(smi_info, watchdog_pretimeouts); 543 smi_inc_stat(smi_info, watchdog_pretimeouts);
528 544
529 start_clear_flags(smi_info); 545 start_clear_flags(smi_info, true);
530 smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT; 546 smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT;
531 if (smi_info->intf) 547 if (smi_info->intf)
532 ipmi_smi_watchdog_pretimeout(smi_info->intf); 548 ipmi_smi_watchdog_pretimeout(smi_info->intf);
@@ -879,8 +895,7 @@ static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
879 msg[0] = (IPMI_NETFN_APP_REQUEST << 2); 895 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
880 msg[1] = IPMI_GET_MSG_FLAGS_CMD; 896 msg[1] = IPMI_GET_MSG_FLAGS_CMD;
881 897
882 smi_info->handlers->start_transaction( 898 start_new_msg(smi_info, msg, 2);
883 smi_info->si_sm, msg, 2);
884 smi_info->si_state = SI_GETTING_FLAGS; 899 smi_info->si_state = SI_GETTING_FLAGS;
885 goto restart; 900 goto restart;
886 } 901 }
@@ -910,7 +925,7 @@ static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
910 * disable and messages disabled. 925 * disable and messages disabled.
911 */ 926 */
912 if (smi_info->supports_event_msg_buff || smi_info->irq) { 927 if (smi_info->supports_event_msg_buff || smi_info->irq) {
913 start_check_enables(smi_info); 928 start_check_enables(smi_info, true);
914 } else { 929 } else {
915 smi_info->curr_msg = alloc_msg_handle_irq(smi_info); 930 smi_info->curr_msg = alloc_msg_handle_irq(smi_info);
916 if (!smi_info->curr_msg) 931 if (!smi_info->curr_msg)
@@ -920,6 +935,13 @@ static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
920 } 935 }
921 goto restart; 936 goto restart;
922 } 937 }
938
939 if (si_sm_result == SI_SM_IDLE && smi_info->timer_running) {
940 /* Ok it if fails, the timer will just go off. */
941 if (del_timer(&smi_info->si_timer))
942 smi_info->timer_running = false;
943 }
944
923 out: 945 out:
924 return si_sm_result; 946 return si_sm_result;
925} 947}
@@ -2560,6 +2582,7 @@ static const struct of_device_id of_ipmi_match[] = {
2560 .data = (void *)(unsigned long) SI_BT }, 2582 .data = (void *)(unsigned long) SI_BT },
2561 {}, 2583 {},
2562}; 2584};
2585MODULE_DEVICE_TABLE(of, of_ipmi_match);
2563 2586
2564static int of_ipmi_probe(struct platform_device *dev) 2587static int of_ipmi_probe(struct platform_device *dev)
2565{ 2588{
@@ -2646,7 +2669,6 @@ static int of_ipmi_probe(struct platform_device *dev)
2646 } 2669 }
2647 return 0; 2670 return 0;
2648} 2671}
2649MODULE_DEVICE_TABLE(of, of_ipmi_match);
2650#else 2672#else
2651#define of_ipmi_match NULL 2673#define of_ipmi_match NULL
2652static int of_ipmi_probe(struct platform_device *dev) 2674static int of_ipmi_probe(struct platform_device *dev)
@@ -3613,7 +3635,7 @@ static int try_smi_init(struct smi_info *new_smi)
3613 * Start clearing the flags before we enable interrupts or the 3635 * Start clearing the flags before we enable interrupts or the
3614 * timer to avoid racing with the timer. 3636 * timer to avoid racing with the timer.
3615 */ 3637 */
3616 start_clear_flags(new_smi); 3638 start_clear_flags(new_smi, false);
3617 3639
3618 /* 3640 /*
3619 * IRQ is defined to be set when non-zero. req_events will 3641 * IRQ is defined to be set when non-zero. req_events will
@@ -3908,7 +3930,7 @@ static void cleanup_one_si(struct smi_info *to_clean)
3908 poll(to_clean); 3930 poll(to_clean);
3909 schedule_timeout_uninterruptible(1); 3931 schedule_timeout_uninterruptible(1);
3910 } 3932 }
3911 disable_si_irq(to_clean); 3933 disable_si_irq(to_clean, false);
3912 while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) { 3934 while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
3913 poll(to_clean); 3935 poll(to_clean);
3914 schedule_timeout_uninterruptible(1); 3936 schedule_timeout_uninterruptible(1);
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
index 0ac3bd1a5497..096f0cef4da1 100644
--- a/drivers/char/ipmi/ipmi_watchdog.c
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -153,6 +153,9 @@ static int timeout = 10;
153/* The pre-timeout is disabled by default. */ 153/* The pre-timeout is disabled by default. */
154static int pretimeout; 154static int pretimeout;
155 155
156/* Default timeout to set on panic */
157static int panic_wdt_timeout = 255;
158
156/* Default action is to reset the board on a timeout. */ 159/* Default action is to reset the board on a timeout. */
157static unsigned char action_val = WDOG_TIMEOUT_RESET; 160static unsigned char action_val = WDOG_TIMEOUT_RESET;
158 161
@@ -293,6 +296,9 @@ MODULE_PARM_DESC(timeout, "Timeout value in seconds.");
293module_param(pretimeout, timeout, 0644); 296module_param(pretimeout, timeout, 0644);
294MODULE_PARM_DESC(pretimeout, "Pretimeout value in seconds."); 297MODULE_PARM_DESC(pretimeout, "Pretimeout value in seconds.");
295 298
299module_param(panic_wdt_timeout, timeout, 0644);
300MODULE_PARM_DESC(timeout, "Timeout value on kernel panic in seconds.");
301
296module_param_cb(action, &param_ops_str, action_op, 0644); 302module_param_cb(action, &param_ops_str, action_op, 0644);
297MODULE_PARM_DESC(action, "Timeout action. One of: " 303MODULE_PARM_DESC(action, "Timeout action. One of: "
298 "reset, none, power_cycle, power_off."); 304 "reset, none, power_cycle, power_off.");
@@ -1189,7 +1195,7 @@ static int wdog_panic_handler(struct notifier_block *this,
1189 /* Make sure we do this only once. */ 1195 /* Make sure we do this only once. */
1190 panic_event_handled = 1; 1196 panic_event_handled = 1;
1191 1197
1192 timeout = 255; 1198 timeout = panic_wdt_timeout;
1193 pretimeout = 0; 1199 pretimeout = 0;
1194 panic_halt_ipmi_set_timeout(); 1200 panic_halt_ipmi_set_timeout();
1195 } 1201 }
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 71cfdf7c9708..2eb5f0efae90 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -1,4 +1,5 @@
1menu "Clock Source drivers" 1menu "Clock Source drivers"
2 depends on !ARCH_USES_GETTIMEOFFSET
2 3
3config CLKSRC_OF 4config CLKSRC_OF
4 bool 5 bool
diff --git a/drivers/clocksource/fsl_ftm_timer.c b/drivers/clocksource/fsl_ftm_timer.c
index 10202f1fdfd7..517e1c7624d4 100644
--- a/drivers/clocksource/fsl_ftm_timer.c
+++ b/drivers/clocksource/fsl_ftm_timer.c
@@ -203,7 +203,7 @@ static int __init ftm_clockevent_init(unsigned long freq, int irq)
203 int err; 203 int err;
204 204
205 ftm_writel(0x00, priv->clkevt_base + FTM_CNTIN); 205 ftm_writel(0x00, priv->clkevt_base + FTM_CNTIN);
206 ftm_writel(~0UL, priv->clkevt_base + FTM_MOD); 206 ftm_writel(~0u, priv->clkevt_base + FTM_MOD);
207 207
208 ftm_reset_counter(priv->clkevt_base); 208 ftm_reset_counter(priv->clkevt_base);
209 209
@@ -230,7 +230,7 @@ static int __init ftm_clocksource_init(unsigned long freq)
230 int err; 230 int err;
231 231
232 ftm_writel(0x00, priv->clksrc_base + FTM_CNTIN); 232 ftm_writel(0x00, priv->clksrc_base + FTM_CNTIN);
233 ftm_writel(~0UL, priv->clksrc_base + FTM_MOD); 233 ftm_writel(~0u, priv->clksrc_base + FTM_MOD);
234 234
235 ftm_reset_counter(priv->clksrc_base); 235 ftm_reset_counter(priv->clksrc_base);
236 236
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 1582c1c016b0..8014c2307332 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -84,6 +84,7 @@ config ARM_KIRKWOOD_CPUFREQ
84config ARM_MT8173_CPUFREQ 84config ARM_MT8173_CPUFREQ
85 bool "Mediatek MT8173 CPUFreq support" 85 bool "Mediatek MT8173 CPUFreq support"
86 depends on ARCH_MEDIATEK && REGULATOR 86 depends on ARCH_MEDIATEK && REGULATOR
87 depends on ARM64 || (ARM_CPU_TOPOLOGY && COMPILE_TEST)
87 depends on !CPU_THERMAL || THERMAL=y 88 depends on !CPU_THERMAL || THERMAL=y
88 select PM_OPP 89 select PM_OPP
89 help 90 help
diff --git a/drivers/cpufreq/Kconfig.x86 b/drivers/cpufreq/Kconfig.x86
index adbd1de1cea5..c59bdcb83217 100644
--- a/drivers/cpufreq/Kconfig.x86
+++ b/drivers/cpufreq/Kconfig.x86
@@ -5,7 +5,6 @@
5config X86_INTEL_PSTATE 5config X86_INTEL_PSTATE
6 bool "Intel P state control" 6 bool "Intel P state control"
7 depends on X86 7 depends on X86
8 select ACPI_PROCESSOR if ACPI
9 help 8 help
10 This driver provides a P state for Intel core processors. 9 This driver provides a P state for Intel core processors.
11 The driver implements an internal governor and will become 10 The driver implements an internal governor and will become
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 2e31d097def6..001a532e342e 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -34,14 +34,10 @@
34#include <asm/cpu_device_id.h> 34#include <asm/cpu_device_id.h>
35#include <asm/cpufeature.h> 35#include <asm/cpufeature.h>
36 36
37#if IS_ENABLED(CONFIG_ACPI) 37#define ATOM_RATIOS 0x66a
38#include <acpi/processor.h> 38#define ATOM_VIDS 0x66b
39#endif 39#define ATOM_TURBO_RATIOS 0x66c
40 40#define ATOM_TURBO_VIDS 0x66d
41#define BYT_RATIOS 0x66a
42#define BYT_VIDS 0x66b
43#define BYT_TURBO_RATIOS 0x66c
44#define BYT_TURBO_VIDS 0x66d
45 41
46#define FRAC_BITS 8 42#define FRAC_BITS 8
47#define int_tofp(X) ((int64_t)(X) << FRAC_BITS) 43#define int_tofp(X) ((int64_t)(X) << FRAC_BITS)
@@ -117,9 +113,6 @@ struct cpudata {
117 u64 prev_mperf; 113 u64 prev_mperf;
118 u64 prev_tsc; 114 u64 prev_tsc;
119 struct sample sample; 115 struct sample sample;
120#if IS_ENABLED(CONFIG_ACPI)
121 struct acpi_processor_performance acpi_perf_data;
122#endif
123}; 116};
124 117
125static struct cpudata **all_cpu_data; 118static struct cpudata **all_cpu_data;
@@ -150,7 +143,6 @@ struct cpu_defaults {
150static struct pstate_adjust_policy pid_params; 143static struct pstate_adjust_policy pid_params;
151static struct pstate_funcs pstate_funcs; 144static struct pstate_funcs pstate_funcs;
152static int hwp_active; 145static int hwp_active;
153static int no_acpi_perf;
154 146
155struct perf_limits { 147struct perf_limits {
156 int no_turbo; 148 int no_turbo;
@@ -163,8 +155,6 @@ struct perf_limits {
163 int max_sysfs_pct; 155 int max_sysfs_pct;
164 int min_policy_pct; 156 int min_policy_pct;
165 int min_sysfs_pct; 157 int min_sysfs_pct;
166 int max_perf_ctl;
167 int min_perf_ctl;
168}; 158};
169 159
170static struct perf_limits performance_limits = { 160static struct perf_limits performance_limits = {
@@ -191,8 +181,6 @@ static struct perf_limits powersave_limits = {
191 .max_sysfs_pct = 100, 181 .max_sysfs_pct = 100,
192 .min_policy_pct = 0, 182 .min_policy_pct = 0,
193 .min_sysfs_pct = 0, 183 .min_sysfs_pct = 0,
194 .max_perf_ctl = 0,
195 .min_perf_ctl = 0,
196}; 184};
197 185
198#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE 186#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE
@@ -201,153 +189,6 @@ static struct perf_limits *limits = &performance_limits;
201static struct perf_limits *limits = &powersave_limits; 189static struct perf_limits *limits = &powersave_limits;
202#endif 190#endif
203 191
204#if IS_ENABLED(CONFIG_ACPI)
205/*
206 * The max target pstate ratio is a 8 bit value in both PLATFORM_INFO MSR and
207 * in TURBO_RATIO_LIMIT MSR, which pstate driver stores in max_pstate and
208 * max_turbo_pstate fields. The PERF_CTL MSR contains 16 bit value for P state
209 * ratio, out of it only high 8 bits are used. For example 0x1700 is setting
210 * target ratio 0x17. The _PSS control value stores in a format which can be
211 * directly written to PERF_CTL MSR. But in intel_pstate driver this shift
212 * occurs during write to PERF_CTL (E.g. for cores core_set_pstate()).
213 * This function converts the _PSS control value to intel pstate driver format
214 * for comparison and assignment.
215 */
216static int convert_to_native_pstate_format(struct cpudata *cpu, int index)
217{
218 return cpu->acpi_perf_data.states[index].control >> 8;
219}
220
221static int intel_pstate_init_perf_limits(struct cpufreq_policy *policy)
222{
223 struct cpudata *cpu;
224 int ret;
225 bool turbo_absent = false;
226 int max_pstate_index;
227 int min_pss_ctl, max_pss_ctl, turbo_pss_ctl;
228 int i;
229
230 cpu = all_cpu_data[policy->cpu];
231
232 pr_debug("intel_pstate: default limits 0x%x 0x%x 0x%x\n",
233 cpu->pstate.min_pstate, cpu->pstate.max_pstate,
234 cpu->pstate.turbo_pstate);
235
236 if (!cpu->acpi_perf_data.shared_cpu_map &&
237 zalloc_cpumask_var_node(&cpu->acpi_perf_data.shared_cpu_map,
238 GFP_KERNEL, cpu_to_node(policy->cpu))) {
239 return -ENOMEM;
240 }
241
242 ret = acpi_processor_register_performance(&cpu->acpi_perf_data,
243 policy->cpu);
244 if (ret)
245 return ret;
246
247 /*
248 * Check if the control value in _PSS is for PERF_CTL MSR, which should
249 * guarantee that the states returned by it map to the states in our
250 * list directly.
251 */
252 if (cpu->acpi_perf_data.control_register.space_id !=
253 ACPI_ADR_SPACE_FIXED_HARDWARE)
254 return -EIO;
255
256 pr_debug("intel_pstate: CPU%u - ACPI _PSS perf data\n", policy->cpu);
257 for (i = 0; i < cpu->acpi_perf_data.state_count; i++)
258 pr_debug(" %cP%d: %u MHz, %u mW, 0x%x\n",
259 (i == cpu->acpi_perf_data.state ? '*' : ' '), i,
260 (u32) cpu->acpi_perf_data.states[i].core_frequency,
261 (u32) cpu->acpi_perf_data.states[i].power,
262 (u32) cpu->acpi_perf_data.states[i].control);
263
264 /*
265 * If there is only one entry _PSS, simply ignore _PSS and continue as
266 * usual without taking _PSS into account
267 */
268 if (cpu->acpi_perf_data.state_count < 2)
269 return 0;
270
271 turbo_pss_ctl = convert_to_native_pstate_format(cpu, 0);
272 min_pss_ctl = convert_to_native_pstate_format(cpu,
273 cpu->acpi_perf_data.state_count - 1);
274 /* Check if there is a turbo freq in _PSS */
275 if (turbo_pss_ctl <= cpu->pstate.max_pstate &&
276 turbo_pss_ctl > cpu->pstate.min_pstate) {
277 pr_debug("intel_pstate: no turbo range exists in _PSS\n");
278 limits->no_turbo = limits->turbo_disabled = 1;
279 cpu->pstate.turbo_pstate = cpu->pstate.max_pstate;
280 turbo_absent = true;
281 }
282
283 /* Check if the max non turbo p state < Intel P state max */
284 max_pstate_index = turbo_absent ? 0 : 1;
285 max_pss_ctl = convert_to_native_pstate_format(cpu, max_pstate_index);
286 if (max_pss_ctl < cpu->pstate.max_pstate &&
287 max_pss_ctl > cpu->pstate.min_pstate)
288 cpu->pstate.max_pstate = max_pss_ctl;
289
290 /* check If min perf > Intel P State min */
291 if (min_pss_ctl > cpu->pstate.min_pstate &&
292 min_pss_ctl < cpu->pstate.max_pstate) {
293 cpu->pstate.min_pstate = min_pss_ctl;
294 policy->cpuinfo.min_freq = min_pss_ctl * cpu->pstate.scaling;
295 }
296
297 if (turbo_absent)
298 policy->cpuinfo.max_freq = cpu->pstate.max_pstate *
299 cpu->pstate.scaling;
300 else {
301 policy->cpuinfo.max_freq = cpu->pstate.turbo_pstate *
302 cpu->pstate.scaling;
303 /*
304 * The _PSS table doesn't contain whole turbo frequency range.
305 * This just contains +1 MHZ above the max non turbo frequency,
306 * with control value corresponding to max turbo ratio. But
307 * when cpufreq set policy is called, it will call with this
308 * max frequency, which will cause a reduced performance as
309 * this driver uses real max turbo frequency as the max
310 * frequeny. So correct this frequency in _PSS table to
311 * correct max turbo frequency based on the turbo ratio.
312 * Also need to convert to MHz as _PSS freq is in MHz.
313 */
314 cpu->acpi_perf_data.states[0].core_frequency =
315 turbo_pss_ctl * 100;
316 }
317
318 pr_debug("intel_pstate: Updated limits using _PSS 0x%x 0x%x 0x%x\n",
319 cpu->pstate.min_pstate, cpu->pstate.max_pstate,
320 cpu->pstate.turbo_pstate);
321 pr_debug("intel_pstate: policy max_freq=%d Khz min_freq = %d KHz\n",
322 policy->cpuinfo.max_freq, policy->cpuinfo.min_freq);
323
324 return 0;
325}
326
327static int intel_pstate_exit_perf_limits(struct cpufreq_policy *policy)
328{
329 struct cpudata *cpu;
330
331 if (!no_acpi_perf)
332 return 0;
333
334 cpu = all_cpu_data[policy->cpu];
335 acpi_processor_unregister_performance(policy->cpu);
336 return 0;
337}
338
339#else
340static int intel_pstate_init_perf_limits(struct cpufreq_policy *policy)
341{
342 return 0;
343}
344
345static int intel_pstate_exit_perf_limits(struct cpufreq_policy *policy)
346{
347 return 0;
348}
349#endif
350
351static inline void pid_reset(struct _pid *pid, int setpoint, int busy, 192static inline void pid_reset(struct _pid *pid, int setpoint, int busy,
352 int deadband, int integral) { 193 int deadband, int integral) {
353 pid->setpoint = setpoint; 194 pid->setpoint = setpoint;
@@ -687,31 +528,31 @@ static void intel_pstate_hwp_enable(struct cpudata *cpudata)
687 wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1); 528 wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1);
688} 529}
689 530
690static int byt_get_min_pstate(void) 531static int atom_get_min_pstate(void)
691{ 532{
692 u64 value; 533 u64 value;
693 534
694 rdmsrl(BYT_RATIOS, value); 535 rdmsrl(ATOM_RATIOS, value);
695 return (value >> 8) & 0x7F; 536 return (value >> 8) & 0x7F;
696} 537}
697 538
698static int byt_get_max_pstate(void) 539static int atom_get_max_pstate(void)
699{ 540{
700 u64 value; 541 u64 value;
701 542
702 rdmsrl(BYT_RATIOS, value); 543 rdmsrl(ATOM_RATIOS, value);
703 return (value >> 16) & 0x7F; 544 return (value >> 16) & 0x7F;
704} 545}
705 546
706static int byt_get_turbo_pstate(void) 547static int atom_get_turbo_pstate(void)
707{ 548{
708 u64 value; 549 u64 value;
709 550
710 rdmsrl(BYT_TURBO_RATIOS, value); 551 rdmsrl(ATOM_TURBO_RATIOS, value);
711 return value & 0x7F; 552 return value & 0x7F;
712} 553}
713 554
714static void byt_set_pstate(struct cpudata *cpudata, int pstate) 555static void atom_set_pstate(struct cpudata *cpudata, int pstate)
715{ 556{
716 u64 val; 557 u64 val;
717 int32_t vid_fp; 558 int32_t vid_fp;
@@ -736,27 +577,42 @@ static void byt_set_pstate(struct cpudata *cpudata, int pstate)
736 wrmsrl_on_cpu(cpudata->cpu, MSR_IA32_PERF_CTL, val); 577 wrmsrl_on_cpu(cpudata->cpu, MSR_IA32_PERF_CTL, val);
737} 578}
738 579
739#define BYT_BCLK_FREQS 5 580static int silvermont_get_scaling(void)
740static int byt_freq_table[BYT_BCLK_FREQS] = { 833, 1000, 1333, 1167, 800};
741
742static int byt_get_scaling(void)
743{ 581{
744 u64 value; 582 u64 value;
745 int i; 583 int i;
584 /* Defined in Table 35-6 from SDM (Sept 2015) */
585 static int silvermont_freq_table[] = {
586 83300, 100000, 133300, 116700, 80000};
746 587
747 rdmsrl(MSR_FSB_FREQ, value); 588 rdmsrl(MSR_FSB_FREQ, value);
748 i = value & 0x3; 589 i = value & 0x7;
590 WARN_ON(i > 4);
749 591
750 BUG_ON(i > BYT_BCLK_FREQS); 592 return silvermont_freq_table[i];
593}
751 594
752 return byt_freq_table[i] * 100; 595static int airmont_get_scaling(void)
596{
597 u64 value;
598 int i;
599 /* Defined in Table 35-10 from SDM (Sept 2015) */
600 static int airmont_freq_table[] = {
601 83300, 100000, 133300, 116700, 80000,
602 93300, 90000, 88900, 87500};
603
604 rdmsrl(MSR_FSB_FREQ, value);
605 i = value & 0xF;
606 WARN_ON(i > 8);
607
608 return airmont_freq_table[i];
753} 609}
754 610
755static void byt_get_vid(struct cpudata *cpudata) 611static void atom_get_vid(struct cpudata *cpudata)
756{ 612{
757 u64 value; 613 u64 value;
758 614
759 rdmsrl(BYT_VIDS, value); 615 rdmsrl(ATOM_VIDS, value);
760 cpudata->vid.min = int_tofp((value >> 8) & 0x7f); 616 cpudata->vid.min = int_tofp((value >> 8) & 0x7f);
761 cpudata->vid.max = int_tofp((value >> 16) & 0x7f); 617 cpudata->vid.max = int_tofp((value >> 16) & 0x7f);
762 cpudata->vid.ratio = div_fp( 618 cpudata->vid.ratio = div_fp(
@@ -764,7 +620,7 @@ static void byt_get_vid(struct cpudata *cpudata)
764 int_tofp(cpudata->pstate.max_pstate - 620 int_tofp(cpudata->pstate.max_pstate -
765 cpudata->pstate.min_pstate)); 621 cpudata->pstate.min_pstate));
766 622
767 rdmsrl(BYT_TURBO_VIDS, value); 623 rdmsrl(ATOM_TURBO_VIDS, value);
768 cpudata->vid.turbo = value & 0x7f; 624 cpudata->vid.turbo = value & 0x7f;
769} 625}
770 626
@@ -885,7 +741,7 @@ static struct cpu_defaults core_params = {
885 }, 741 },
886}; 742};
887 743
888static struct cpu_defaults byt_params = { 744static struct cpu_defaults silvermont_params = {
889 .pid_policy = { 745 .pid_policy = {
890 .sample_rate_ms = 10, 746 .sample_rate_ms = 10,
891 .deadband = 0, 747 .deadband = 0,
@@ -895,13 +751,33 @@ static struct cpu_defaults byt_params = {
895 .i_gain_pct = 4, 751 .i_gain_pct = 4,
896 }, 752 },
897 .funcs = { 753 .funcs = {
898 .get_max = byt_get_max_pstate, 754 .get_max = atom_get_max_pstate,
899 .get_max_physical = byt_get_max_pstate, 755 .get_max_physical = atom_get_max_pstate,
900 .get_min = byt_get_min_pstate, 756 .get_min = atom_get_min_pstate,
901 .get_turbo = byt_get_turbo_pstate, 757 .get_turbo = atom_get_turbo_pstate,
902 .set = byt_set_pstate, 758 .set = atom_set_pstate,
903 .get_scaling = byt_get_scaling, 759 .get_scaling = silvermont_get_scaling,
904 .get_vid = byt_get_vid, 760 .get_vid = atom_get_vid,
761 },
762};
763
764static struct cpu_defaults airmont_params = {
765 .pid_policy = {
766 .sample_rate_ms = 10,
767 .deadband = 0,
768 .setpoint = 60,
769 .p_gain_pct = 14,
770 .d_gain_pct = 0,
771 .i_gain_pct = 4,
772 },
773 .funcs = {
774 .get_max = atom_get_max_pstate,
775 .get_max_physical = atom_get_max_pstate,
776 .get_min = atom_get_min_pstate,
777 .get_turbo = atom_get_turbo_pstate,
778 .set = atom_set_pstate,
779 .get_scaling = airmont_get_scaling,
780 .get_vid = atom_get_vid,
905 }, 781 },
906}; 782};
907 783
@@ -938,23 +814,12 @@ static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
938 * policy, or by cpu specific default values determined through 814 * policy, or by cpu specific default values determined through
939 * experimentation. 815 * experimentation.
940 */ 816 */
941 if (limits->max_perf_ctl && limits->max_sysfs_pct >= 817 max_perf_adj = fp_toint(mul_fp(int_tofp(max_perf), limits->max_perf));
942 limits->max_policy_pct) { 818 *max = clamp_t(int, max_perf_adj,
943 *max = limits->max_perf_ctl; 819 cpu->pstate.min_pstate, cpu->pstate.turbo_pstate);
944 } else {
945 max_perf_adj = fp_toint(mul_fp(int_tofp(max_perf),
946 limits->max_perf));
947 *max = clamp_t(int, max_perf_adj, cpu->pstate.min_pstate,
948 cpu->pstate.turbo_pstate);
949 }
950 820
951 if (limits->min_perf_ctl) { 821 min_perf = fp_toint(mul_fp(int_tofp(max_perf), limits->min_perf));
952 *min = limits->min_perf_ctl; 822 *min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf);
953 } else {
954 min_perf = fp_toint(mul_fp(int_tofp(max_perf),
955 limits->min_perf));
956 *min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf);
957 }
958} 823}
959 824
960static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate, bool force) 825static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate, bool force)
@@ -1153,7 +1018,7 @@ static void intel_pstate_timer_func(unsigned long __data)
1153static const struct x86_cpu_id intel_pstate_cpu_ids[] = { 1018static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
1154 ICPU(0x2a, core_params), 1019 ICPU(0x2a, core_params),
1155 ICPU(0x2d, core_params), 1020 ICPU(0x2d, core_params),
1156 ICPU(0x37, byt_params), 1021 ICPU(0x37, silvermont_params),
1157 ICPU(0x3a, core_params), 1022 ICPU(0x3a, core_params),
1158 ICPU(0x3c, core_params), 1023 ICPU(0x3c, core_params),
1159 ICPU(0x3d, core_params), 1024 ICPU(0x3d, core_params),
@@ -1162,7 +1027,7 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
1162 ICPU(0x45, core_params), 1027 ICPU(0x45, core_params),
1163 ICPU(0x46, core_params), 1028 ICPU(0x46, core_params),
1164 ICPU(0x47, core_params), 1029 ICPU(0x47, core_params),
1165 ICPU(0x4c, byt_params), 1030 ICPU(0x4c, airmont_params),
1166 ICPU(0x4e, core_params), 1031 ICPU(0x4e, core_params),
1167 ICPU(0x4f, core_params), 1032 ICPU(0x4f, core_params),
1168 ICPU(0x5e, core_params), 1033 ICPU(0x5e, core_params),
@@ -1229,12 +1094,6 @@ static unsigned int intel_pstate_get(unsigned int cpu_num)
1229 1094
1230static int intel_pstate_set_policy(struct cpufreq_policy *policy) 1095static int intel_pstate_set_policy(struct cpufreq_policy *policy)
1231{ 1096{
1232#if IS_ENABLED(CONFIG_ACPI)
1233 struct cpudata *cpu;
1234 int i;
1235#endif
1236 pr_debug("intel_pstate: %s max %u policy->max %u\n", __func__,
1237 policy->cpuinfo.max_freq, policy->max);
1238 if (!policy->cpuinfo.max_freq) 1097 if (!policy->cpuinfo.max_freq)
1239 return -ENODEV; 1098 return -ENODEV;
1240 1099
@@ -1270,23 +1129,6 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
1270 limits->max_perf = div_fp(int_tofp(limits->max_perf_pct), 1129 limits->max_perf = div_fp(int_tofp(limits->max_perf_pct),
1271 int_tofp(100)); 1130 int_tofp(100));
1272 1131
1273#if IS_ENABLED(CONFIG_ACPI)
1274 cpu = all_cpu_data[policy->cpu];
1275 for (i = 0; i < cpu->acpi_perf_data.state_count; i++) {
1276 int control;
1277
1278 control = convert_to_native_pstate_format(cpu, i);
1279 if (control * cpu->pstate.scaling == policy->max)
1280 limits->max_perf_ctl = control;
1281 if (control * cpu->pstate.scaling == policy->min)
1282 limits->min_perf_ctl = control;
1283 }
1284
1285 pr_debug("intel_pstate: max %u policy_max %u perf_ctl [0x%x-0x%x]\n",
1286 policy->cpuinfo.max_freq, policy->max, limits->min_perf_ctl,
1287 limits->max_perf_ctl);
1288#endif
1289
1290 if (hwp_active) 1132 if (hwp_active)
1291 intel_pstate_hwp_set(); 1133 intel_pstate_hwp_set();
1292 1134
@@ -1341,30 +1183,18 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
1341 policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling; 1183 policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling;
1342 policy->cpuinfo.max_freq = 1184 policy->cpuinfo.max_freq =
1343 cpu->pstate.turbo_pstate * cpu->pstate.scaling; 1185 cpu->pstate.turbo_pstate * cpu->pstate.scaling;
1344 if (!no_acpi_perf)
1345 intel_pstate_init_perf_limits(policy);
1346 /*
1347 * If there is no acpi perf data or error, we ignore and use Intel P
1348 * state calculated limits, So this is not fatal error.
1349 */
1350 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; 1186 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
1351 cpumask_set_cpu(policy->cpu, policy->cpus); 1187 cpumask_set_cpu(policy->cpu, policy->cpus);
1352 1188
1353 return 0; 1189 return 0;
1354} 1190}
1355 1191
1356static int intel_pstate_cpu_exit(struct cpufreq_policy *policy)
1357{
1358 return intel_pstate_exit_perf_limits(policy);
1359}
1360
1361static struct cpufreq_driver intel_pstate_driver = { 1192static struct cpufreq_driver intel_pstate_driver = {
1362 .flags = CPUFREQ_CONST_LOOPS, 1193 .flags = CPUFREQ_CONST_LOOPS,
1363 .verify = intel_pstate_verify_policy, 1194 .verify = intel_pstate_verify_policy,
1364 .setpolicy = intel_pstate_set_policy, 1195 .setpolicy = intel_pstate_set_policy,
1365 .get = intel_pstate_get, 1196 .get = intel_pstate_get,
1366 .init = intel_pstate_cpu_init, 1197 .init = intel_pstate_cpu_init,
1367 .exit = intel_pstate_cpu_exit,
1368 .stop_cpu = intel_pstate_stop_cpu, 1198 .stop_cpu = intel_pstate_stop_cpu,
1369 .name = "intel_pstate", 1199 .name = "intel_pstate",
1370}; 1200};
@@ -1406,6 +1236,7 @@ static void copy_cpu_funcs(struct pstate_funcs *funcs)
1406} 1236}
1407 1237
1408#if IS_ENABLED(CONFIG_ACPI) 1238#if IS_ENABLED(CONFIG_ACPI)
1239#include <acpi/processor.h>
1409 1240
1410static bool intel_pstate_no_acpi_pss(void) 1241static bool intel_pstate_no_acpi_pss(void)
1411{ 1242{
@@ -1601,9 +1432,6 @@ static int __init intel_pstate_setup(char *str)
1601 force_load = 1; 1432 force_load = 1;
1602 if (!strcmp(str, "hwp_only")) 1433 if (!strcmp(str, "hwp_only"))
1603 hwp_only = 1; 1434 hwp_only = 1;
1604 if (!strcmp(str, "no_acpi"))
1605 no_acpi_perf = 1;
1606
1607 return 0; 1435 return 0;
1608} 1436}
1609early_param("intel_pstate", intel_pstate_setup); 1437early_param("intel_pstate", intel_pstate_setup);
diff --git a/drivers/crypto/qat/qat_common/adf_ctl_drv.c b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
index 03856ad280b9..473d36d91644 100644
--- a/drivers/crypto/qat/qat_common/adf_ctl_drv.c
+++ b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
@@ -198,7 +198,7 @@ static int adf_copy_key_value_data(struct adf_accel_dev *accel_dev,
198 goto out_err; 198 goto out_err;
199 } 199 }
200 200
201 params_head = section_head->params; 201 params_head = section.params;
202 202
203 while (params_head) { 203 while (params_head) {
204 if (copy_from_user(&key_val, (void __user *)params_head, 204 if (copy_from_user(&key_val, (void __user *)params_head,
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index 4e55239c7a30..53d22eb73b56 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -729,8 +729,8 @@ atc_prep_dma_interleaved(struct dma_chan *chan,
729 return NULL; 729 return NULL;
730 730
731 dev_info(chan2dev(chan), 731 dev_info(chan2dev(chan),
732 "%s: src=0x%08x, dest=0x%08x, numf=%d, frame_size=%d, flags=0x%lx\n", 732 "%s: src=%pad, dest=%pad, numf=%d, frame_size=%d, flags=0x%lx\n",
733 __func__, xt->src_start, xt->dst_start, xt->numf, 733 __func__, &xt->src_start, &xt->dst_start, xt->numf,
734 xt->frame_size, flags); 734 xt->frame_size, flags);
735 735
736 /* 736 /*
@@ -824,8 +824,8 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
824 u32 ctrla; 824 u32 ctrla;
825 u32 ctrlb; 825 u32 ctrlb;
826 826
827 dev_vdbg(chan2dev(chan), "prep_dma_memcpy: d0x%x s0x%x l0x%zx f0x%lx\n", 827 dev_vdbg(chan2dev(chan), "prep_dma_memcpy: d%pad s%pad l0x%zx f0x%lx\n",
828 dest, src, len, flags); 828 &dest, &src, len, flags);
829 829
830 if (unlikely(!len)) { 830 if (unlikely(!len)) {
831 dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n"); 831 dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n");
@@ -938,8 +938,8 @@ atc_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
938 void __iomem *vaddr; 938 void __iomem *vaddr;
939 dma_addr_t paddr; 939 dma_addr_t paddr;
940 940
941 dev_vdbg(chan2dev(chan), "%s: d0x%x v0x%x l0x%zx f0x%lx\n", __func__, 941 dev_vdbg(chan2dev(chan), "%s: d%pad v0x%x l0x%zx f0x%lx\n", __func__,
942 dest, value, len, flags); 942 &dest, value, len, flags);
943 943
944 if (unlikely(!len)) { 944 if (unlikely(!len)) {
945 dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__); 945 dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__);
@@ -1022,8 +1022,8 @@ atc_prep_dma_memset_sg(struct dma_chan *chan,
1022 dma_addr_t dest = sg_dma_address(sg); 1022 dma_addr_t dest = sg_dma_address(sg);
1023 size_t len = sg_dma_len(sg); 1023 size_t len = sg_dma_len(sg);
1024 1024
1025 dev_vdbg(chan2dev(chan), "%s: d0x%08x, l0x%zx\n", 1025 dev_vdbg(chan2dev(chan), "%s: d%pad, l0x%zx\n",
1026 __func__, dest, len); 1026 __func__, &dest, len);
1027 1027
1028 if (!is_dma_fill_aligned(chan->device, dest, 0, len)) { 1028 if (!is_dma_fill_aligned(chan->device, dest, 0, len)) {
1029 dev_err(chan2dev(chan), "%s: buffer is not aligned\n", 1029 dev_err(chan2dev(chan), "%s: buffer is not aligned\n",
@@ -1439,9 +1439,9 @@ atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
1439 unsigned int periods = buf_len / period_len; 1439 unsigned int periods = buf_len / period_len;
1440 unsigned int i; 1440 unsigned int i;
1441 1441
1442 dev_vdbg(chan2dev(chan), "prep_dma_cyclic: %s buf@0x%08x - %d (%d/%d)\n", 1442 dev_vdbg(chan2dev(chan), "prep_dma_cyclic: %s buf@%pad - %d (%d/%d)\n",
1443 direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE", 1443 direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE",
1444 buf_addr, 1444 &buf_addr,
1445 periods, buf_len, period_len); 1445 periods, buf_len, period_len);
1446 1446
1447 if (unlikely(!atslave || !buf_len || !period_len)) { 1447 if (unlikely(!atslave || !buf_len || !period_len)) {
diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h
index d1cfc8c876f9..7f58f06157f6 100644
--- a/drivers/dma/at_hdmac_regs.h
+++ b/drivers/dma/at_hdmac_regs.h
@@ -385,9 +385,9 @@ static void vdbg_dump_regs(struct at_dma_chan *atchan) {}
385static void atc_dump_lli(struct at_dma_chan *atchan, struct at_lli *lli) 385static void atc_dump_lli(struct at_dma_chan *atchan, struct at_lli *lli)
386{ 386{
387 dev_crit(chan2dev(&atchan->chan_common), 387 dev_crit(chan2dev(&atchan->chan_common),
388 " desc: s0x%x d0x%x ctrl0x%x:0x%x l0x%x\n", 388 " desc: s%pad d%pad ctrl0x%x:0x%x l0x%pad\n",
389 lli->saddr, lli->daddr, 389 &lli->saddr, &lli->daddr,
390 lli->ctrla, lli->ctrlb, lli->dscr); 390 lli->ctrla, lli->ctrlb, &lli->dscr);
391} 391}
392 392
393 393
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index b5e132d4bae5..7f039de143f0 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -920,8 +920,8 @@ at_xdmac_interleaved_queue_desc(struct dma_chan *chan,
920 desc->lld.mbr_cfg = chan_cc; 920 desc->lld.mbr_cfg = chan_cc;
921 921
922 dev_dbg(chan2dev(chan), 922 dev_dbg(chan2dev(chan),
923 "%s: lld: mbr_sa=0x%08x, mbr_da=0x%08x, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n", 923 "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n",
924 __func__, desc->lld.mbr_sa, desc->lld.mbr_da, 924 __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da,
925 desc->lld.mbr_ubc, desc->lld.mbr_cfg); 925 desc->lld.mbr_ubc, desc->lld.mbr_cfg);
926 926
927 /* Chain lld. */ 927 /* Chain lld. */
@@ -953,8 +953,8 @@ at_xdmac_prep_interleaved(struct dma_chan *chan,
953 if ((xt->numf > 1) && (xt->frame_size > 1)) 953 if ((xt->numf > 1) && (xt->frame_size > 1))
954 return NULL; 954 return NULL;
955 955
956 dev_dbg(chan2dev(chan), "%s: src=0x%08x, dest=0x%08x, numf=%d, frame_size=%d, flags=0x%lx\n", 956 dev_dbg(chan2dev(chan), "%s: src=%pad, dest=%pad, numf=%d, frame_size=%d, flags=0x%lx\n",
957 __func__, xt->src_start, xt->dst_start, xt->numf, 957 __func__, &xt->src_start, &xt->dst_start, xt->numf,
958 xt->frame_size, flags); 958 xt->frame_size, flags);
959 959
960 src_addr = xt->src_start; 960 src_addr = xt->src_start;
@@ -1179,8 +1179,8 @@ static struct at_xdmac_desc *at_xdmac_memset_create_desc(struct dma_chan *chan,
1179 desc->lld.mbr_cfg = chan_cc; 1179 desc->lld.mbr_cfg = chan_cc;
1180 1180
1181 dev_dbg(chan2dev(chan), 1181 dev_dbg(chan2dev(chan),
1182 "%s: lld: mbr_da=0x%08x, mbr_ds=0x%08x, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n", 1182 "%s: lld: mbr_da=%pad, mbr_ds=%pad, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n",
1183 __func__, desc->lld.mbr_da, desc->lld.mbr_ds, desc->lld.mbr_ubc, 1183 __func__, &desc->lld.mbr_da, &desc->lld.mbr_ds, desc->lld.mbr_ubc,
1184 desc->lld.mbr_cfg); 1184 desc->lld.mbr_cfg);
1185 1185
1186 return desc; 1186 return desc;
@@ -1193,8 +1193,8 @@ at_xdmac_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
1193 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); 1193 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1194 struct at_xdmac_desc *desc; 1194 struct at_xdmac_desc *desc;
1195 1195
1196 dev_dbg(chan2dev(chan), "%s: dest=0x%08x, len=%d, pattern=0x%x, flags=0x%lx\n", 1196 dev_dbg(chan2dev(chan), "%s: dest=%pad, len=%d, pattern=0x%x, flags=0x%lx\n",
1197 __func__, dest, len, value, flags); 1197 __func__, &dest, len, value, flags);
1198 1198
1199 if (unlikely(!len)) 1199 if (unlikely(!len))
1200 return NULL; 1200 return NULL;
@@ -1229,8 +1229,8 @@ at_xdmac_prep_dma_memset_sg(struct dma_chan *chan, struct scatterlist *sgl,
1229 1229
1230 /* Prepare descriptors. */ 1230 /* Prepare descriptors. */
1231 for_each_sg(sgl, sg, sg_len, i) { 1231 for_each_sg(sgl, sg, sg_len, i) {
1232 dev_dbg(chan2dev(chan), "%s: dest=0x%08x, len=%d, pattern=0x%x, flags=0x%lx\n", 1232 dev_dbg(chan2dev(chan), "%s: dest=%pad, len=%d, pattern=0x%x, flags=0x%lx\n",
1233 __func__, sg_dma_address(sg), sg_dma_len(sg), 1233 __func__, &sg_dma_address(sg), sg_dma_len(sg),
1234 value, flags); 1234 value, flags);
1235 desc = at_xdmac_memset_create_desc(chan, atchan, 1235 desc = at_xdmac_memset_create_desc(chan, atchan,
1236 sg_dma_address(sg), 1236 sg_dma_address(sg),
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index 6b03e4e84e6b..0675e268d577 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -107,7 +107,7 @@
107 107
108/* CCCFG register */ 108/* CCCFG register */
109#define GET_NUM_DMACH(x) (x & 0x7) /* bits 0-2 */ 109#define GET_NUM_DMACH(x) (x & 0x7) /* bits 0-2 */
110#define GET_NUM_QDMACH(x) (x & 0x70 >> 4) /* bits 4-6 */ 110#define GET_NUM_QDMACH(x) ((x & 0x70) >> 4) /* bits 4-6 */
111#define GET_NUM_PAENTRY(x) ((x & 0x7000) >> 12) /* bits 12-14 */ 111#define GET_NUM_PAENTRY(x) ((x & 0x7000) >> 12) /* bits 12-14 */
112#define GET_NUM_EVQUE(x) ((x & 0x70000) >> 16) /* bits 16-18 */ 112#define GET_NUM_EVQUE(x) ((x & 0x70000) >> 16) /* bits 16-18 */
113#define GET_NUM_REGN(x) ((x & 0x300000) >> 20) /* bits 20-21 */ 113#define GET_NUM_REGN(x) ((x & 0x300000) >> 20) /* bits 20-21 */
@@ -1565,7 +1565,7 @@ static void edma_tc_set_pm_state(struct edma_tc *tc, bool enable)
1565 struct platform_device *tc_pdev; 1565 struct platform_device *tc_pdev;
1566 int ret; 1566 int ret;
1567 1567
1568 if (!tc) 1568 if (!IS_ENABLED(CONFIG_OF) || !tc)
1569 return; 1569 return;
1570 1570
1571 tc_pdev = of_find_device_by_node(tc->node); 1571 tc_pdev = of_find_device_by_node(tc->node);
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 7058d58ba588..0f6fd42f55ca 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -1462,7 +1462,7 @@ err_firmware:
1462 1462
1463#define EVENT_REMAP_CELLS 3 1463#define EVENT_REMAP_CELLS 3
1464 1464
1465static int __init sdma_event_remap(struct sdma_engine *sdma) 1465static int sdma_event_remap(struct sdma_engine *sdma)
1466{ 1466{
1467 struct device_node *np = sdma->dev->of_node; 1467 struct device_node *np = sdma->dev->of_node;
1468 struct device_node *gpr_np = of_parse_phandle(np, "gpr", 0); 1468 struct device_node *gpr_np = of_parse_phandle(np, "gpr", 0);
diff --git a/drivers/dma/sh/usb-dmac.c b/drivers/dma/sh/usb-dmac.c
index ebd8a5f398b0..f1bcc2a163b3 100644
--- a/drivers/dma/sh/usb-dmac.c
+++ b/drivers/dma/sh/usb-dmac.c
@@ -679,8 +679,11 @@ static int usb_dmac_runtime_suspend(struct device *dev)
679 struct usb_dmac *dmac = dev_get_drvdata(dev); 679 struct usb_dmac *dmac = dev_get_drvdata(dev);
680 int i; 680 int i;
681 681
682 for (i = 0; i < dmac->n_channels; ++i) 682 for (i = 0; i < dmac->n_channels; ++i) {
683 if (!dmac->channels[i].iomem)
684 break;
683 usb_dmac_chan_halt(&dmac->channels[i]); 685 usb_dmac_chan_halt(&dmac->channels[i]);
686 }
684 687
685 return 0; 688 return 0;
686} 689}
@@ -799,11 +802,10 @@ static int usb_dmac_probe(struct platform_device *pdev)
799 ret = pm_runtime_get_sync(&pdev->dev); 802 ret = pm_runtime_get_sync(&pdev->dev);
800 if (ret < 0) { 803 if (ret < 0) {
801 dev_err(&pdev->dev, "runtime PM get sync failed (%d)\n", ret); 804 dev_err(&pdev->dev, "runtime PM get sync failed (%d)\n", ret);
802 return ret; 805 goto error_pm;
803 } 806 }
804 807
805 ret = usb_dmac_init(dmac); 808 ret = usb_dmac_init(dmac);
806 pm_runtime_put(&pdev->dev);
807 809
808 if (ret) { 810 if (ret) {
809 dev_err(&pdev->dev, "failed to reset device\n"); 811 dev_err(&pdev->dev, "failed to reset device\n");
@@ -851,10 +853,13 @@ static int usb_dmac_probe(struct platform_device *pdev)
851 if (ret < 0) 853 if (ret < 0)
852 goto error; 854 goto error;
853 855
856 pm_runtime_put(&pdev->dev);
854 return 0; 857 return 0;
855 858
856error: 859error:
857 of_dma_controller_free(pdev->dev.of_node); 860 of_dma_controller_free(pdev->dev.of_node);
861 pm_runtime_put(&pdev->dev);
862error_pm:
858 pm_runtime_disable(&pdev->dev); 863 pm_runtime_disable(&pdev->dev);
859 return ret; 864 return ret;
860} 865}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 615ce6d464fb..306f75700bf8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -389,7 +389,6 @@ struct amdgpu_clock {
389 * Fences. 389 * Fences.
390 */ 390 */
391struct amdgpu_fence_driver { 391struct amdgpu_fence_driver {
392 struct amdgpu_ring *ring;
393 uint64_t gpu_addr; 392 uint64_t gpu_addr;
394 volatile uint32_t *cpu_addr; 393 volatile uint32_t *cpu_addr;
395 /* sync_seq is protected by ring emission lock */ 394 /* sync_seq is protected by ring emission lock */
@@ -398,7 +397,7 @@ struct amdgpu_fence_driver {
398 bool initialized; 397 bool initialized;
399 struct amdgpu_irq_src *irq_src; 398 struct amdgpu_irq_src *irq_src;
400 unsigned irq_type; 399 unsigned irq_type;
401 struct delayed_work lockup_work; 400 struct timer_list fallback_timer;
402 wait_queue_head_t fence_queue; 401 wait_queue_head_t fence_queue;
403}; 402};
404 403
@@ -917,8 +916,8 @@ struct amdgpu_ring {
917#define AMDGPU_VM_FAULT_STOP_ALWAYS 2 916#define AMDGPU_VM_FAULT_STOP_ALWAYS 2
918 917
919struct amdgpu_vm_pt { 918struct amdgpu_vm_pt {
920 struct amdgpu_bo *bo; 919 struct amdgpu_bo *bo;
921 uint64_t addr; 920 uint64_t addr;
922}; 921};
923 922
924struct amdgpu_vm_id { 923struct amdgpu_vm_id {
@@ -926,8 +925,6 @@ struct amdgpu_vm_id {
926 uint64_t pd_gpu_addr; 925 uint64_t pd_gpu_addr;
927 /* last flushed PD/PT update */ 926 /* last flushed PD/PT update */
928 struct fence *flushed_updates; 927 struct fence *flushed_updates;
929 /* last use of vmid */
930 struct fence *last_id_use;
931}; 928};
932 929
933struct amdgpu_vm { 930struct amdgpu_vm {
@@ -957,24 +954,70 @@ struct amdgpu_vm {
957 954
958 /* for id and flush management per ring */ 955 /* for id and flush management per ring */
959 struct amdgpu_vm_id ids[AMDGPU_MAX_RINGS]; 956 struct amdgpu_vm_id ids[AMDGPU_MAX_RINGS];
957 /* for interval tree */
958 spinlock_t it_lock;
960}; 959};
961 960
962struct amdgpu_vm_manager { 961struct amdgpu_vm_manager {
963 struct fence *active[AMDGPU_NUM_VM]; 962 struct {
964 uint32_t max_pfn; 963 struct fence *active;
964 atomic_long_t owner;
965 } ids[AMDGPU_NUM_VM];
966
967 uint32_t max_pfn;
965 /* number of VMIDs */ 968 /* number of VMIDs */
966 unsigned nvm; 969 unsigned nvm;
967 /* vram base address for page table entry */ 970 /* vram base address for page table entry */
968 u64 vram_base_offset; 971 u64 vram_base_offset;
969 /* is vm enabled? */ 972 /* is vm enabled? */
970 bool enabled; 973 bool enabled;
971 /* for hw to save the PD addr on suspend/resume */
972 uint32_t saved_table_addr[AMDGPU_NUM_VM];
973 /* vm pte handling */ 974 /* vm pte handling */
974 const struct amdgpu_vm_pte_funcs *vm_pte_funcs; 975 const struct amdgpu_vm_pte_funcs *vm_pte_funcs;
975 struct amdgpu_ring *vm_pte_funcs_ring; 976 struct amdgpu_ring *vm_pte_funcs_ring;
976}; 977};
977 978
979void amdgpu_vm_manager_fini(struct amdgpu_device *adev);
980int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm);
981void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
982struct amdgpu_bo_list_entry *amdgpu_vm_get_bos(struct amdgpu_device *adev,
983 struct amdgpu_vm *vm,
984 struct list_head *head);
985int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
986 struct amdgpu_sync *sync);
987void amdgpu_vm_flush(struct amdgpu_ring *ring,
988 struct amdgpu_vm *vm,
989 struct fence *updates);
990void amdgpu_vm_fence(struct amdgpu_device *adev,
991 struct amdgpu_vm *vm,
992 struct fence *fence);
993uint64_t amdgpu_vm_map_gart(struct amdgpu_device *adev, uint64_t addr);
994int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
995 struct amdgpu_vm *vm);
996int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
997 struct amdgpu_vm *vm);
998int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, struct amdgpu_vm *vm,
999 struct amdgpu_sync *sync);
1000int amdgpu_vm_bo_update(struct amdgpu_device *adev,
1001 struct amdgpu_bo_va *bo_va,
1002 struct ttm_mem_reg *mem);
1003void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
1004 struct amdgpu_bo *bo);
1005struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
1006 struct amdgpu_bo *bo);
1007struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
1008 struct amdgpu_vm *vm,
1009 struct amdgpu_bo *bo);
1010int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1011 struct amdgpu_bo_va *bo_va,
1012 uint64_t addr, uint64_t offset,
1013 uint64_t size, uint32_t flags);
1014int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
1015 struct amdgpu_bo_va *bo_va,
1016 uint64_t addr);
1017void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
1018 struct amdgpu_bo_va *bo_va);
1019int amdgpu_vm_free_job(struct amdgpu_job *job);
1020
978/* 1021/*
979 * context related structures 1022 * context related structures
980 */ 1023 */
@@ -1211,6 +1254,7 @@ struct amdgpu_cs_parser {
1211 /* relocations */ 1254 /* relocations */
1212 struct amdgpu_bo_list_entry *vm_bos; 1255 struct amdgpu_bo_list_entry *vm_bos;
1213 struct list_head validated; 1256 struct list_head validated;
1257 struct fence *fence;
1214 1258
1215 struct amdgpu_ib *ibs; 1259 struct amdgpu_ib *ibs;
1216 uint32_t num_ibs; 1260 uint32_t num_ibs;
@@ -1226,7 +1270,7 @@ struct amdgpu_job {
1226 struct amdgpu_device *adev; 1270 struct amdgpu_device *adev;
1227 struct amdgpu_ib *ibs; 1271 struct amdgpu_ib *ibs;
1228 uint32_t num_ibs; 1272 uint32_t num_ibs;
1229 struct mutex job_lock; 1273 void *owner;
1230 struct amdgpu_user_fence uf; 1274 struct amdgpu_user_fence uf;
1231 int (*free_job)(struct amdgpu_job *job); 1275 int (*free_job)(struct amdgpu_job *job);
1232}; 1276};
@@ -2257,11 +2301,6 @@ void amdgpu_pci_config_reset(struct amdgpu_device *adev);
2257bool amdgpu_card_posted(struct amdgpu_device *adev); 2301bool amdgpu_card_posted(struct amdgpu_device *adev);
2258void amdgpu_update_display_priority(struct amdgpu_device *adev); 2302void amdgpu_update_display_priority(struct amdgpu_device *adev);
2259bool amdgpu_boot_test_post_card(struct amdgpu_device *adev); 2303bool amdgpu_boot_test_post_card(struct amdgpu_device *adev);
2260struct amdgpu_cs_parser *amdgpu_cs_parser_create(struct amdgpu_device *adev,
2261 struct drm_file *filp,
2262 struct amdgpu_ctx *ctx,
2263 struct amdgpu_ib *ibs,
2264 uint32_t num_ibs);
2265 2304
2266int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data); 2305int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data);
2267int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type, 2306int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
@@ -2319,49 +2358,6 @@ long amdgpu_kms_compat_ioctl(struct file *filp, unsigned int cmd,
2319 unsigned long arg); 2358 unsigned long arg);
2320 2359
2321/* 2360/*
2322 * vm
2323 */
2324int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm);
2325void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
2326struct amdgpu_bo_list_entry *amdgpu_vm_get_bos(struct amdgpu_device *adev,
2327 struct amdgpu_vm *vm,
2328 struct list_head *head);
2329int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
2330 struct amdgpu_sync *sync);
2331void amdgpu_vm_flush(struct amdgpu_ring *ring,
2332 struct amdgpu_vm *vm,
2333 struct fence *updates);
2334void amdgpu_vm_fence(struct amdgpu_device *adev,
2335 struct amdgpu_vm *vm,
2336 struct amdgpu_fence *fence);
2337uint64_t amdgpu_vm_map_gart(struct amdgpu_device *adev, uint64_t addr);
2338int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
2339 struct amdgpu_vm *vm);
2340int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
2341 struct amdgpu_vm *vm);
2342int amdgpu_vm_clear_invalids(struct amdgpu_device *adev,
2343 struct amdgpu_vm *vm, struct amdgpu_sync *sync);
2344int amdgpu_vm_bo_update(struct amdgpu_device *adev,
2345 struct amdgpu_bo_va *bo_va,
2346 struct ttm_mem_reg *mem);
2347void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
2348 struct amdgpu_bo *bo);
2349struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
2350 struct amdgpu_bo *bo);
2351struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
2352 struct amdgpu_vm *vm,
2353 struct amdgpu_bo *bo);
2354int amdgpu_vm_bo_map(struct amdgpu_device *adev,
2355 struct amdgpu_bo_va *bo_va,
2356 uint64_t addr, uint64_t offset,
2357 uint64_t size, uint32_t flags);
2358int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
2359 struct amdgpu_bo_va *bo_va,
2360 uint64_t addr);
2361void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
2362 struct amdgpu_bo_va *bo_va);
2363int amdgpu_vm_free_job(struct amdgpu_job *job);
2364/*
2365 * functions used by amdgpu_encoder.c 2361 * functions used by amdgpu_encoder.c
2366 */ 2362 */
2367struct amdgpu_afmt_acr { 2363struct amdgpu_afmt_acr {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index dfc4d02c7a38..3afcf0237c25 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -127,30 +127,6 @@ int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
127 return 0; 127 return 0;
128} 128}
129 129
130struct amdgpu_cs_parser *amdgpu_cs_parser_create(struct amdgpu_device *adev,
131 struct drm_file *filp,
132 struct amdgpu_ctx *ctx,
133 struct amdgpu_ib *ibs,
134 uint32_t num_ibs)
135{
136 struct amdgpu_cs_parser *parser;
137 int i;
138
139 parser = kzalloc(sizeof(struct amdgpu_cs_parser), GFP_KERNEL);
140 if (!parser)
141 return NULL;
142
143 parser->adev = adev;
144 parser->filp = filp;
145 parser->ctx = ctx;
146 parser->ibs = ibs;
147 parser->num_ibs = num_ibs;
148 for (i = 0; i < num_ibs; i++)
149 ibs[i].ctx = ctx;
150
151 return parser;
152}
153
154int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) 130int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
155{ 131{
156 union drm_amdgpu_cs *cs = data; 132 union drm_amdgpu_cs *cs = data;
@@ -463,8 +439,18 @@ static int cmp_size_smaller_first(void *priv, struct list_head *a,
463 return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages; 439 return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages;
464} 440}
465 441
466static void amdgpu_cs_parser_fini_early(struct amdgpu_cs_parser *parser, int error, bool backoff) 442/**
443 * cs_parser_fini() - clean parser states
444 * @parser: parser structure holding parsing context.
445 * @error: error number
446 *
447 * If error is set than unvalidate buffer, otherwise just free memory
448 * used by parsing context.
449 **/
450static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bool backoff)
467{ 451{
452 unsigned i;
453
468 if (!error) { 454 if (!error) {
469 /* Sort the buffer list from the smallest to largest buffer, 455 /* Sort the buffer list from the smallest to largest buffer,
470 * which affects the order of buffers in the LRU list. 456 * which affects the order of buffers in the LRU list.
@@ -479,17 +465,14 @@ static void amdgpu_cs_parser_fini_early(struct amdgpu_cs_parser *parser, int err
479 list_sort(NULL, &parser->validated, cmp_size_smaller_first); 465 list_sort(NULL, &parser->validated, cmp_size_smaller_first);
480 466
481 ttm_eu_fence_buffer_objects(&parser->ticket, 467 ttm_eu_fence_buffer_objects(&parser->ticket,
482 &parser->validated, 468 &parser->validated,
483 &parser->ibs[parser->num_ibs-1].fence->base); 469 parser->fence);
484 } else if (backoff) { 470 } else if (backoff) {
485 ttm_eu_backoff_reservation(&parser->ticket, 471 ttm_eu_backoff_reservation(&parser->ticket,
486 &parser->validated); 472 &parser->validated);
487 } 473 }
488} 474 fence_put(parser->fence);
489 475
490static void amdgpu_cs_parser_fini_late(struct amdgpu_cs_parser *parser)
491{
492 unsigned i;
493 if (parser->ctx) 476 if (parser->ctx)
494 amdgpu_ctx_put(parser->ctx); 477 amdgpu_ctx_put(parser->ctx);
495 if (parser->bo_list) 478 if (parser->bo_list)
@@ -499,31 +482,12 @@ static void amdgpu_cs_parser_fini_late(struct amdgpu_cs_parser *parser)
499 for (i = 0; i < parser->nchunks; i++) 482 for (i = 0; i < parser->nchunks; i++)
500 drm_free_large(parser->chunks[i].kdata); 483 drm_free_large(parser->chunks[i].kdata);
501 kfree(parser->chunks); 484 kfree(parser->chunks);
502 if (!amdgpu_enable_scheduler) 485 if (parser->ibs)
503 { 486 for (i = 0; i < parser->num_ibs; i++)
504 if (parser->ibs) 487 amdgpu_ib_free(parser->adev, &parser->ibs[i]);
505 for (i = 0; i < parser->num_ibs; i++) 488 kfree(parser->ibs);
506 amdgpu_ib_free(parser->adev, &parser->ibs[i]); 489 if (parser->uf.bo)
507 kfree(parser->ibs); 490 drm_gem_object_unreference_unlocked(&parser->uf.bo->gem_base);
508 if (parser->uf.bo)
509 drm_gem_object_unreference_unlocked(&parser->uf.bo->gem_base);
510 }
511
512 kfree(parser);
513}
514
515/**
516 * cs_parser_fini() - clean parser states
517 * @parser: parser structure holding parsing context.
518 * @error: error number
519 *
520 * If error is set than unvalidate buffer, otherwise just free memory
521 * used by parsing context.
522 **/
523static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bool backoff)
524{
525 amdgpu_cs_parser_fini_early(parser, error, backoff);
526 amdgpu_cs_parser_fini_late(parser);
527} 491}
528 492
529static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p, 493static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p,
@@ -610,15 +574,9 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
610 } 574 }
611 575
612 r = amdgpu_bo_vm_update_pte(parser, vm); 576 r = amdgpu_bo_vm_update_pte(parser, vm);
613 if (r) { 577 if (!r)
614 goto out; 578 amdgpu_cs_sync_rings(parser);
615 }
616 amdgpu_cs_sync_rings(parser);
617 if (!amdgpu_enable_scheduler)
618 r = amdgpu_ib_schedule(adev, parser->num_ibs, parser->ibs,
619 parser->filp);
620 579
621out:
622 return r; 580 return r;
623} 581}
624 582
@@ -828,36 +786,36 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
828 union drm_amdgpu_cs *cs = data; 786 union drm_amdgpu_cs *cs = data;
829 struct amdgpu_fpriv *fpriv = filp->driver_priv; 787 struct amdgpu_fpriv *fpriv = filp->driver_priv;
830 struct amdgpu_vm *vm = &fpriv->vm; 788 struct amdgpu_vm *vm = &fpriv->vm;
831 struct amdgpu_cs_parser *parser; 789 struct amdgpu_cs_parser parser = {};
832 bool reserved_buffers = false; 790 bool reserved_buffers = false;
833 int i, r; 791 int i, r;
834 792
835 if (!adev->accel_working) 793 if (!adev->accel_working)
836 return -EBUSY; 794 return -EBUSY;
837 795
838 parser = amdgpu_cs_parser_create(adev, filp, NULL, NULL, 0); 796 parser.adev = adev;
839 if (!parser) 797 parser.filp = filp;
840 return -ENOMEM; 798
841 r = amdgpu_cs_parser_init(parser, data); 799 r = amdgpu_cs_parser_init(&parser, data);
842 if (r) { 800 if (r) {
843 DRM_ERROR("Failed to initialize parser !\n"); 801 DRM_ERROR("Failed to initialize parser !\n");
844 amdgpu_cs_parser_fini(parser, r, false); 802 amdgpu_cs_parser_fini(&parser, r, false);
845 r = amdgpu_cs_handle_lockup(adev, r); 803 r = amdgpu_cs_handle_lockup(adev, r);
846 return r; 804 return r;
847 } 805 }
848 mutex_lock(&vm->mutex); 806 mutex_lock(&vm->mutex);
849 r = amdgpu_cs_parser_relocs(parser); 807 r = amdgpu_cs_parser_relocs(&parser);
850 if (r == -ENOMEM) 808 if (r == -ENOMEM)
851 DRM_ERROR("Not enough memory for command submission!\n"); 809 DRM_ERROR("Not enough memory for command submission!\n");
852 else if (r && r != -ERESTARTSYS) 810 else if (r && r != -ERESTARTSYS)
853 DRM_ERROR("Failed to process the buffer list %d!\n", r); 811 DRM_ERROR("Failed to process the buffer list %d!\n", r);
854 else if (!r) { 812 else if (!r) {
855 reserved_buffers = true; 813 reserved_buffers = true;
856 r = amdgpu_cs_ib_fill(adev, parser); 814 r = amdgpu_cs_ib_fill(adev, &parser);
857 } 815 }
858 816
859 if (!r) { 817 if (!r) {
860 r = amdgpu_cs_dependencies(adev, parser); 818 r = amdgpu_cs_dependencies(adev, &parser);
861 if (r) 819 if (r)
862 DRM_ERROR("Failed in the dependencies handling %d!\n", r); 820 DRM_ERROR("Failed in the dependencies handling %d!\n", r);
863 } 821 }
@@ -865,62 +823,71 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
865 if (r) 823 if (r)
866 goto out; 824 goto out;
867 825
868 for (i = 0; i < parser->num_ibs; i++) 826 for (i = 0; i < parser.num_ibs; i++)
869 trace_amdgpu_cs(parser, i); 827 trace_amdgpu_cs(&parser, i);
870 828
871 r = amdgpu_cs_ib_vm_chunk(adev, parser); 829 r = amdgpu_cs_ib_vm_chunk(adev, &parser);
872 if (r) 830 if (r)
873 goto out; 831 goto out;
874 832
875 if (amdgpu_enable_scheduler && parser->num_ibs) { 833 if (amdgpu_enable_scheduler && parser.num_ibs) {
834 struct amdgpu_ring * ring = parser.ibs->ring;
835 struct amd_sched_fence *fence;
876 struct amdgpu_job *job; 836 struct amdgpu_job *job;
877 struct amdgpu_ring * ring = parser->ibs->ring; 837
878 job = kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL); 838 job = kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL);
879 if (!job) { 839 if (!job) {
880 r = -ENOMEM; 840 r = -ENOMEM;
881 goto out; 841 goto out;
882 } 842 }
843
883 job->base.sched = &ring->sched; 844 job->base.sched = &ring->sched;
884 job->base.s_entity = &parser->ctx->rings[ring->idx].entity; 845 job->base.s_entity = &parser.ctx->rings[ring->idx].entity;
885 job->adev = parser->adev; 846 job->adev = parser.adev;
886 job->ibs = parser->ibs; 847 job->owner = parser.filp;
887 job->num_ibs = parser->num_ibs; 848 job->free_job = amdgpu_cs_free_job;
888 job->base.owner = parser->filp; 849
889 mutex_init(&job->job_lock); 850 job->ibs = parser.ibs;
851 job->num_ibs = parser.num_ibs;
852 parser.ibs = NULL;
853 parser.num_ibs = 0;
854
890 if (job->ibs[job->num_ibs - 1].user) { 855 if (job->ibs[job->num_ibs - 1].user) {
891 memcpy(&job->uf, &parser->uf, 856 job->uf = parser.uf;
892 sizeof(struct amdgpu_user_fence));
893 job->ibs[job->num_ibs - 1].user = &job->uf; 857 job->ibs[job->num_ibs - 1].user = &job->uf;
858 parser.uf.bo = NULL;
894 } 859 }
895 860
896 job->free_job = amdgpu_cs_free_job; 861 fence = amd_sched_fence_create(job->base.s_entity,
897 mutex_lock(&job->job_lock); 862 parser.filp);
898 r = amd_sched_entity_push_job(&job->base); 863 if (!fence) {
899 if (r) { 864 r = -ENOMEM;
900 mutex_unlock(&job->job_lock);
901 amdgpu_cs_free_job(job); 865 amdgpu_cs_free_job(job);
902 kfree(job); 866 kfree(job);
903 goto out; 867 goto out;
904 } 868 }
905 cs->out.handle = 869 job->base.s_fence = fence;
906 amdgpu_ctx_add_fence(parser->ctx, ring, 870 parser.fence = fence_get(&fence->base);
907 &job->base.s_fence->base);
908 parser->ibs[parser->num_ibs - 1].sequence = cs->out.handle;
909 871
910 list_sort(NULL, &parser->validated, cmp_size_smaller_first); 872 cs->out.handle = amdgpu_ctx_add_fence(parser.ctx, ring,
911 ttm_eu_fence_buffer_objects(&parser->ticket, 873 &fence->base);
912 &parser->validated, 874 job->ibs[job->num_ibs - 1].sequence = cs->out.handle;
913 &job->base.s_fence->base);
914 875
915 mutex_unlock(&job->job_lock); 876 trace_amdgpu_cs_ioctl(job);
916 amdgpu_cs_parser_fini_late(parser); 877 amd_sched_entity_push_job(&job->base);
917 mutex_unlock(&vm->mutex); 878
918 return 0; 879 } else {
880 struct amdgpu_fence *fence;
881
882 r = amdgpu_ib_schedule(adev, parser.num_ibs, parser.ibs,
883 parser.filp);
884 fence = parser.ibs[parser.num_ibs - 1].fence;
885 parser.fence = fence_get(&fence->base);
886 cs->out.handle = parser.ibs[parser.num_ibs - 1].sequence;
919 } 887 }
920 888
921 cs->out.handle = parser->ibs[parser->num_ibs - 1].sequence;
922out: 889out:
923 amdgpu_cs_parser_fini(parser, r, reserved_buffers); 890 amdgpu_cs_parser_fini(&parser, r, reserved_buffers);
924 mutex_unlock(&vm->mutex); 891 mutex_unlock(&vm->mutex);
925 r = amdgpu_cs_handle_lockup(adev, r); 892 r = amdgpu_cs_handle_lockup(adev, r);
926 return r; 893 return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 257d72205bb5..3671f9f220bd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -47,6 +47,9 @@
47 * that the the relevant GPU caches have been flushed. 47 * that the the relevant GPU caches have been flushed.
48 */ 48 */
49 49
50static struct kmem_cache *amdgpu_fence_slab;
51static atomic_t amdgpu_fence_slab_ref = ATOMIC_INIT(0);
52
50/** 53/**
51 * amdgpu_fence_write - write a fence value 54 * amdgpu_fence_write - write a fence value
52 * 55 *
@@ -85,24 +88,6 @@ static u32 amdgpu_fence_read(struct amdgpu_ring *ring)
85} 88}
86 89
87/** 90/**
88 * amdgpu_fence_schedule_check - schedule lockup check
89 *
90 * @ring: pointer to struct amdgpu_ring
91 *
92 * Queues a delayed work item to check for lockups.
93 */
94static void amdgpu_fence_schedule_check(struct amdgpu_ring *ring)
95{
96 /*
97 * Do not reset the timer here with mod_delayed_work,
98 * this can livelock in an interaction with TTM delayed destroy.
99 */
100 queue_delayed_work(system_power_efficient_wq,
101 &ring->fence_drv.lockup_work,
102 AMDGPU_FENCE_JIFFIES_TIMEOUT);
103}
104
105/**
106 * amdgpu_fence_emit - emit a fence on the requested ring 91 * amdgpu_fence_emit - emit a fence on the requested ring
107 * 92 *
108 * @ring: ring the fence is associated with 93 * @ring: ring the fence is associated with
@@ -118,7 +103,7 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner,
118 struct amdgpu_device *adev = ring->adev; 103 struct amdgpu_device *adev = ring->adev;
119 104
120 /* we are protected by the ring emission mutex */ 105 /* we are protected by the ring emission mutex */
121 *fence = kmalloc(sizeof(struct amdgpu_fence), GFP_KERNEL); 106 *fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL);
122 if ((*fence) == NULL) { 107 if ((*fence) == NULL) {
123 return -ENOMEM; 108 return -ENOMEM;
124 } 109 }
@@ -132,11 +117,23 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner,
132 amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr, 117 amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
133 (*fence)->seq, 118 (*fence)->seq,
134 AMDGPU_FENCE_FLAG_INT); 119 AMDGPU_FENCE_FLAG_INT);
135 trace_amdgpu_fence_emit(ring->adev->ddev, ring->idx, (*fence)->seq);
136 return 0; 120 return 0;
137} 121}
138 122
139/** 123/**
124 * amdgpu_fence_schedule_fallback - schedule fallback check
125 *
126 * @ring: pointer to struct amdgpu_ring
127 *
128 * Start a timer as fallback to our interrupts.
129 */
130static void amdgpu_fence_schedule_fallback(struct amdgpu_ring *ring)
131{
132 mod_timer(&ring->fence_drv.fallback_timer,
133 jiffies + AMDGPU_FENCE_JIFFIES_TIMEOUT);
134}
135
136/**
140 * amdgpu_fence_activity - check for fence activity 137 * amdgpu_fence_activity - check for fence activity
141 * 138 *
142 * @ring: pointer to struct amdgpu_ring 139 * @ring: pointer to struct amdgpu_ring
@@ -202,45 +199,38 @@ static bool amdgpu_fence_activity(struct amdgpu_ring *ring)
202 } while (atomic64_xchg(&ring->fence_drv.last_seq, seq) > seq); 199 } while (atomic64_xchg(&ring->fence_drv.last_seq, seq) > seq);
203 200
204 if (seq < last_emitted) 201 if (seq < last_emitted)
205 amdgpu_fence_schedule_check(ring); 202 amdgpu_fence_schedule_fallback(ring);
206 203
207 return wake; 204 return wake;
208} 205}
209 206
210/** 207/**
211 * amdgpu_fence_check_lockup - check for hardware lockup 208 * amdgpu_fence_process - process a fence
212 * 209 *
213 * @work: delayed work item 210 * @adev: amdgpu_device pointer
211 * @ring: ring index the fence is associated with
214 * 212 *
215 * Checks for fence activity and if there is none probe 213 * Checks the current fence value and wakes the fence queue
216 * the hardware if a lockup occured. 214 * if the sequence number has increased (all asics).
217 */ 215 */
218static void amdgpu_fence_check_lockup(struct work_struct *work) 216void amdgpu_fence_process(struct amdgpu_ring *ring)
219{ 217{
220 struct amdgpu_fence_driver *fence_drv;
221 struct amdgpu_ring *ring;
222
223 fence_drv = container_of(work, struct amdgpu_fence_driver,
224 lockup_work.work);
225 ring = fence_drv->ring;
226
227 if (amdgpu_fence_activity(ring)) 218 if (amdgpu_fence_activity(ring))
228 wake_up_all(&ring->fence_drv.fence_queue); 219 wake_up_all(&ring->fence_drv.fence_queue);
229} 220}
230 221
231/** 222/**
232 * amdgpu_fence_process - process a fence 223 * amdgpu_fence_fallback - fallback for hardware interrupts
233 * 224 *
234 * @adev: amdgpu_device pointer 225 * @work: delayed work item
235 * @ring: ring index the fence is associated with
236 * 226 *
237 * Checks the current fence value and wakes the fence queue 227 * Checks for fence activity.
238 * if the sequence number has increased (all asics).
239 */ 228 */
240void amdgpu_fence_process(struct amdgpu_ring *ring) 229static void amdgpu_fence_fallback(unsigned long arg)
241{ 230{
242 if (amdgpu_fence_activity(ring)) 231 struct amdgpu_ring *ring = (void *)arg;
243 wake_up_all(&ring->fence_drv.fence_queue); 232
233 amdgpu_fence_process(ring);
244} 234}
245 235
246/** 236/**
@@ -290,7 +280,7 @@ static int amdgpu_fence_ring_wait_seq(struct amdgpu_ring *ring, uint64_t seq)
290 if (atomic64_read(&ring->fence_drv.last_seq) >= seq) 280 if (atomic64_read(&ring->fence_drv.last_seq) >= seq)
291 return 0; 281 return 0;
292 282
293 amdgpu_fence_schedule_check(ring); 283 amdgpu_fence_schedule_fallback(ring);
294 wait_event(ring->fence_drv.fence_queue, ( 284 wait_event(ring->fence_drv.fence_queue, (
295 (signaled = amdgpu_fence_seq_signaled(ring, seq)))); 285 (signaled = amdgpu_fence_seq_signaled(ring, seq))));
296 286
@@ -491,9 +481,8 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring)
491 atomic64_set(&ring->fence_drv.last_seq, 0); 481 atomic64_set(&ring->fence_drv.last_seq, 0);
492 ring->fence_drv.initialized = false; 482 ring->fence_drv.initialized = false;
493 483
494 INIT_DELAYED_WORK(&ring->fence_drv.lockup_work, 484 setup_timer(&ring->fence_drv.fallback_timer, amdgpu_fence_fallback,
495 amdgpu_fence_check_lockup); 485 (unsigned long)ring);
496 ring->fence_drv.ring = ring;
497 486
498 init_waitqueue_head(&ring->fence_drv.fence_queue); 487 init_waitqueue_head(&ring->fence_drv.fence_queue);
499 488
@@ -536,6 +525,13 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring)
536 */ 525 */
537int amdgpu_fence_driver_init(struct amdgpu_device *adev) 526int amdgpu_fence_driver_init(struct amdgpu_device *adev)
538{ 527{
528 if (atomic_inc_return(&amdgpu_fence_slab_ref) == 1) {
529 amdgpu_fence_slab = kmem_cache_create(
530 "amdgpu_fence", sizeof(struct amdgpu_fence), 0,
531 SLAB_HWCACHE_ALIGN, NULL);
532 if (!amdgpu_fence_slab)
533 return -ENOMEM;
534 }
539 if (amdgpu_debugfs_fence_init(adev)) 535 if (amdgpu_debugfs_fence_init(adev))
540 dev_err(adev->dev, "fence debugfs file creation failed\n"); 536 dev_err(adev->dev, "fence debugfs file creation failed\n");
541 537
@@ -554,9 +550,12 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
554{ 550{
555 int i, r; 551 int i, r;
556 552
553 if (atomic_dec_and_test(&amdgpu_fence_slab_ref))
554 kmem_cache_destroy(amdgpu_fence_slab);
557 mutex_lock(&adev->ring_lock); 555 mutex_lock(&adev->ring_lock);
558 for (i = 0; i < AMDGPU_MAX_RINGS; i++) { 556 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
559 struct amdgpu_ring *ring = adev->rings[i]; 557 struct amdgpu_ring *ring = adev->rings[i];
558
560 if (!ring || !ring->fence_drv.initialized) 559 if (!ring || !ring->fence_drv.initialized)
561 continue; 560 continue;
562 r = amdgpu_fence_wait_empty(ring); 561 r = amdgpu_fence_wait_empty(ring);
@@ -568,6 +567,7 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
568 amdgpu_irq_put(adev, ring->fence_drv.irq_src, 567 amdgpu_irq_put(adev, ring->fence_drv.irq_src,
569 ring->fence_drv.irq_type); 568 ring->fence_drv.irq_type);
570 amd_sched_fini(&ring->sched); 569 amd_sched_fini(&ring->sched);
570 del_timer_sync(&ring->fence_drv.fallback_timer);
571 ring->fence_drv.initialized = false; 571 ring->fence_drv.initialized = false;
572 } 572 }
573 mutex_unlock(&adev->ring_lock); 573 mutex_unlock(&adev->ring_lock);
@@ -751,18 +751,25 @@ static bool amdgpu_fence_enable_signaling(struct fence *f)
751 fence->fence_wake.func = amdgpu_fence_check_signaled; 751 fence->fence_wake.func = amdgpu_fence_check_signaled;
752 __add_wait_queue(&ring->fence_drv.fence_queue, &fence->fence_wake); 752 __add_wait_queue(&ring->fence_drv.fence_queue, &fence->fence_wake);
753 fence_get(f); 753 fence_get(f);
754 amdgpu_fence_schedule_check(ring); 754 if (!timer_pending(&ring->fence_drv.fallback_timer))
755 amdgpu_fence_schedule_fallback(ring);
755 FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx); 756 FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx);
756 return true; 757 return true;
757} 758}
758 759
760static void amdgpu_fence_release(struct fence *f)
761{
762 struct amdgpu_fence *fence = to_amdgpu_fence(f);
763 kmem_cache_free(amdgpu_fence_slab, fence);
764}
765
759const struct fence_ops amdgpu_fence_ops = { 766const struct fence_ops amdgpu_fence_ops = {
760 .get_driver_name = amdgpu_fence_get_driver_name, 767 .get_driver_name = amdgpu_fence_get_driver_name,
761 .get_timeline_name = amdgpu_fence_get_timeline_name, 768 .get_timeline_name = amdgpu_fence_get_timeline_name,
762 .enable_signaling = amdgpu_fence_enable_signaling, 769 .enable_signaling = amdgpu_fence_enable_signaling,
763 .signaled = amdgpu_fence_is_signaled, 770 .signaled = amdgpu_fence_is_signaled,
764 .wait = fence_default_wait, 771 .wait = fence_default_wait,
765 .release = NULL, 772 .release = amdgpu_fence_release,
766}; 773};
767 774
768/* 775/*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 087332858853..00c5b580f56c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -483,6 +483,9 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
483 if (domain == AMDGPU_GEM_DOMAIN_CPU) 483 if (domain == AMDGPU_GEM_DOMAIN_CPU)
484 goto error_unreserve; 484 goto error_unreserve;
485 } 485 }
486 r = amdgpu_vm_update_page_directory(adev, bo_va->vm);
487 if (r)
488 goto error_unreserve;
486 489
487 r = amdgpu_vm_clear_freed(adev, bo_va->vm); 490 r = amdgpu_vm_clear_freed(adev, bo_va->vm);
488 if (r) 491 if (r)
@@ -512,6 +515,9 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
512 struct amdgpu_fpriv *fpriv = filp->driver_priv; 515 struct amdgpu_fpriv *fpriv = filp->driver_priv;
513 struct amdgpu_bo *rbo; 516 struct amdgpu_bo *rbo;
514 struct amdgpu_bo_va *bo_va; 517 struct amdgpu_bo_va *bo_va;
518 struct ttm_validate_buffer tv, tv_pd;
519 struct ww_acquire_ctx ticket;
520 struct list_head list, duplicates;
515 uint32_t invalid_flags, va_flags = 0; 521 uint32_t invalid_flags, va_flags = 0;
516 int r = 0; 522 int r = 0;
517 523
@@ -549,7 +555,18 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
549 return -ENOENT; 555 return -ENOENT;
550 mutex_lock(&fpriv->vm.mutex); 556 mutex_lock(&fpriv->vm.mutex);
551 rbo = gem_to_amdgpu_bo(gobj); 557 rbo = gem_to_amdgpu_bo(gobj);
552 r = amdgpu_bo_reserve(rbo, false); 558 INIT_LIST_HEAD(&list);
559 INIT_LIST_HEAD(&duplicates);
560 tv.bo = &rbo->tbo;
561 tv.shared = true;
562 list_add(&tv.head, &list);
563
564 if (args->operation == AMDGPU_VA_OP_MAP) {
565 tv_pd.bo = &fpriv->vm.page_directory->tbo;
566 tv_pd.shared = true;
567 list_add(&tv_pd.head, &list);
568 }
569 r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
553 if (r) { 570 if (r) {
554 mutex_unlock(&fpriv->vm.mutex); 571 mutex_unlock(&fpriv->vm.mutex);
555 drm_gem_object_unreference_unlocked(gobj); 572 drm_gem_object_unreference_unlocked(gobj);
@@ -558,7 +575,8 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
558 575
559 bo_va = amdgpu_vm_bo_find(&fpriv->vm, rbo); 576 bo_va = amdgpu_vm_bo_find(&fpriv->vm, rbo);
560 if (!bo_va) { 577 if (!bo_va) {
561 amdgpu_bo_unreserve(rbo); 578 ttm_eu_backoff_reservation(&ticket, &list);
579 drm_gem_object_unreference_unlocked(gobj);
562 mutex_unlock(&fpriv->vm.mutex); 580 mutex_unlock(&fpriv->vm.mutex);
563 return -ENOENT; 581 return -ENOENT;
564 } 582 }
@@ -581,7 +599,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
581 default: 599 default:
582 break; 600 break;
583 } 601 }
584 602 ttm_eu_backoff_reservation(&ticket, &list);
585 if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE)) 603 if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE))
586 amdgpu_gem_va_update_vm(adev, bo_va, args->operation); 604 amdgpu_gem_va_update_vm(adev, bo_va, args->operation);
587 mutex_unlock(&fpriv->vm.mutex); 605 mutex_unlock(&fpriv->vm.mutex);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index e65987743871..9e25edafa721 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -62,7 +62,7 @@ int amdgpu_ib_get(struct amdgpu_ring *ring, struct amdgpu_vm *vm,
62 int r; 62 int r;
63 63
64 if (size) { 64 if (size) {
65 r = amdgpu_sa_bo_new(adev, &adev->ring_tmp_bo, 65 r = amdgpu_sa_bo_new(&adev->ring_tmp_bo,
66 &ib->sa_bo, size, 256); 66 &ib->sa_bo, size, 256);
67 if (r) { 67 if (r) {
68 dev_err(adev->dev, "failed to get a new IB (%d)\n", r); 68 dev_err(adev->dev, "failed to get a new IB (%d)\n", r);
@@ -216,7 +216,7 @@ int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs,
216 } 216 }
217 217
218 if (ib->vm) 218 if (ib->vm)
219 amdgpu_vm_fence(adev, ib->vm, ib->fence); 219 amdgpu_vm_fence(adev, ib->vm, &ib->fence->base);
220 220
221 amdgpu_ring_unlock_commit(ring); 221 amdgpu_ring_unlock_commit(ring);
222 return 0; 222 return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 3c2ff4567798..ea756e77b023 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -189,10 +189,9 @@ int amdgpu_sa_bo_manager_start(struct amdgpu_device *adev,
189 struct amdgpu_sa_manager *sa_manager); 189 struct amdgpu_sa_manager *sa_manager);
190int amdgpu_sa_bo_manager_suspend(struct amdgpu_device *adev, 190int amdgpu_sa_bo_manager_suspend(struct amdgpu_device *adev,
191 struct amdgpu_sa_manager *sa_manager); 191 struct amdgpu_sa_manager *sa_manager);
192int amdgpu_sa_bo_new(struct amdgpu_device *adev, 192int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
193 struct amdgpu_sa_manager *sa_manager, 193 struct amdgpu_sa_bo **sa_bo,
194 struct amdgpu_sa_bo **sa_bo, 194 unsigned size, unsigned align);
195 unsigned size, unsigned align);
196void amdgpu_sa_bo_free(struct amdgpu_device *adev, 195void amdgpu_sa_bo_free(struct amdgpu_device *adev,
197 struct amdgpu_sa_bo **sa_bo, 196 struct amdgpu_sa_bo **sa_bo,
198 struct fence *fence); 197 struct fence *fence);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
index 0212b31dc194..8b88edb0434b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
@@ -311,8 +311,7 @@ static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager,
311 return false; 311 return false;
312} 312}
313 313
314int amdgpu_sa_bo_new(struct amdgpu_device *adev, 314int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
315 struct amdgpu_sa_manager *sa_manager,
316 struct amdgpu_sa_bo **sa_bo, 315 struct amdgpu_sa_bo **sa_bo,
317 unsigned size, unsigned align) 316 unsigned size, unsigned align)
318{ 317{
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
index dcf4a8aca680..438c05254695 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
@@ -26,6 +26,7 @@
26#include <linux/sched.h> 26#include <linux/sched.h>
27#include <drm/drmP.h> 27#include <drm/drmP.h>
28#include "amdgpu.h" 28#include "amdgpu.h"
29#include "amdgpu_trace.h"
29 30
30static struct fence *amdgpu_sched_dependency(struct amd_sched_job *sched_job) 31static struct fence *amdgpu_sched_dependency(struct amd_sched_job *sched_job)
31{ 32{
@@ -44,11 +45,8 @@ static struct fence *amdgpu_sched_run_job(struct amd_sched_job *sched_job)
44 return NULL; 45 return NULL;
45 } 46 }
46 job = to_amdgpu_job(sched_job); 47 job = to_amdgpu_job(sched_job);
47 mutex_lock(&job->job_lock); 48 trace_amdgpu_sched_run_job(job);
48 r = amdgpu_ib_schedule(job->adev, 49 r = amdgpu_ib_schedule(job->adev, job->num_ibs, job->ibs, job->owner);
49 job->num_ibs,
50 job->ibs,
51 job->base.owner);
52 if (r) { 50 if (r) {
53 DRM_ERROR("Error scheduling IBs (%d)\n", r); 51 DRM_ERROR("Error scheduling IBs (%d)\n", r);
54 goto err; 52 goto err;
@@ -61,8 +59,6 @@ err:
61 if (job->free_job) 59 if (job->free_job)
62 job->free_job(job); 60 job->free_job(job);
63 61
64 mutex_unlock(&job->job_lock);
65 fence_put(&job->base.s_fence->base);
66 kfree(job); 62 kfree(job);
67 return fence ? &fence->base : NULL; 63 return fence ? &fence->base : NULL;
68} 64}
@@ -88,21 +84,19 @@ int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev,
88 return -ENOMEM; 84 return -ENOMEM;
89 job->base.sched = &ring->sched; 85 job->base.sched = &ring->sched;
90 job->base.s_entity = &adev->kernel_ctx.rings[ring->idx].entity; 86 job->base.s_entity = &adev->kernel_ctx.rings[ring->idx].entity;
87 job->base.s_fence = amd_sched_fence_create(job->base.s_entity, owner);
88 if (!job->base.s_fence) {
89 kfree(job);
90 return -ENOMEM;
91 }
92 *f = fence_get(&job->base.s_fence->base);
93
91 job->adev = adev; 94 job->adev = adev;
92 job->ibs = ibs; 95 job->ibs = ibs;
93 job->num_ibs = num_ibs; 96 job->num_ibs = num_ibs;
94 job->base.owner = owner; 97 job->owner = owner;
95 mutex_init(&job->job_lock);
96 job->free_job = free_job; 98 job->free_job = free_job;
97 mutex_lock(&job->job_lock); 99 amd_sched_entity_push_job(&job->base);
98 r = amd_sched_entity_push_job(&job->base);
99 if (r) {
100 mutex_unlock(&job->job_lock);
101 kfree(job);
102 return r;
103 }
104 *f = fence_get(&job->base.s_fence->base);
105 mutex_unlock(&job->job_lock);
106 } else { 100 } else {
107 r = amdgpu_ib_schedule(adev, num_ibs, ibs, owner); 101 r = amdgpu_ib_schedule(adev, num_ibs, ibs, owner);
108 if (r) 102 if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_semaphore.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_semaphore.c
index ff3ca52ec6fe..1caaf201b708 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_semaphore.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_semaphore.c
@@ -40,7 +40,7 @@ int amdgpu_semaphore_create(struct amdgpu_device *adev,
40 if (*semaphore == NULL) { 40 if (*semaphore == NULL) {
41 return -ENOMEM; 41 return -ENOMEM;
42 } 42 }
43 r = amdgpu_sa_bo_new(adev, &adev->ring_tmp_bo, 43 r = amdgpu_sa_bo_new(&adev->ring_tmp_bo,
44 &(*semaphore)->sa_bo, 8, 8); 44 &(*semaphore)->sa_bo, 8, 8);
45 if (r) { 45 if (r) {
46 kfree(*semaphore); 46 kfree(*semaphore);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index a6697fd05217..dd005c336c97 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -302,8 +302,14 @@ int amdgpu_sync_rings(struct amdgpu_sync *sync,
302 return -EINVAL; 302 return -EINVAL;
303 } 303 }
304 304
305 if (amdgpu_enable_scheduler || !amdgpu_enable_semaphores || 305 if (amdgpu_enable_scheduler || !amdgpu_enable_semaphores) {
306 (count >= AMDGPU_NUM_SYNCS)) { 306 r = fence_wait(&fence->base, true);
307 if (r)
308 return r;
309 continue;
310 }
311
312 if (count >= AMDGPU_NUM_SYNCS) {
307 /* not enough room, wait manually */ 313 /* not enough room, wait manually */
308 r = fence_wait(&fence->base, false); 314 r = fence_wait(&fence->base, false);
309 if (r) 315 if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index 76ecbaf72a2e..8f9834ab1bd5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -48,6 +48,57 @@ TRACE_EVENT(amdgpu_cs,
48 __entry->fences) 48 __entry->fences)
49); 49);
50 50
51TRACE_EVENT(amdgpu_cs_ioctl,
52 TP_PROTO(struct amdgpu_job *job),
53 TP_ARGS(job),
54 TP_STRUCT__entry(
55 __field(struct amdgpu_device *, adev)
56 __field(struct amd_sched_job *, sched_job)
57 __field(struct amdgpu_ib *, ib)
58 __field(struct fence *, fence)
59 __field(char *, ring_name)
60 __field(u32, num_ibs)
61 ),
62
63 TP_fast_assign(
64 __entry->adev = job->adev;
65 __entry->sched_job = &job->base;
66 __entry->ib = job->ibs;
67 __entry->fence = &job->base.s_fence->base;
68 __entry->ring_name = job->ibs[0].ring->name;
69 __entry->num_ibs = job->num_ibs;
70 ),
71 TP_printk("adev=%p, sched_job=%p, first ib=%p, sched fence=%p, ring name:%s, num_ibs:%u",
72 __entry->adev, __entry->sched_job, __entry->ib,
73 __entry->fence, __entry->ring_name, __entry->num_ibs)
74);
75
76TRACE_EVENT(amdgpu_sched_run_job,
77 TP_PROTO(struct amdgpu_job *job),
78 TP_ARGS(job),
79 TP_STRUCT__entry(
80 __field(struct amdgpu_device *, adev)
81 __field(struct amd_sched_job *, sched_job)
82 __field(struct amdgpu_ib *, ib)
83 __field(struct fence *, fence)
84 __field(char *, ring_name)
85 __field(u32, num_ibs)
86 ),
87
88 TP_fast_assign(
89 __entry->adev = job->adev;
90 __entry->sched_job = &job->base;
91 __entry->ib = job->ibs;
92 __entry->fence = &job->base.s_fence->base;
93 __entry->ring_name = job->ibs[0].ring->name;
94 __entry->num_ibs = job->num_ibs;
95 ),
96 TP_printk("adev=%p, sched_job=%p, first ib=%p, sched fence=%p, ring name:%s, num_ibs:%u",
97 __entry->adev, __entry->sched_job, __entry->ib,
98 __entry->fence, __entry->ring_name, __entry->num_ibs)
99);
100
101
51TRACE_EVENT(amdgpu_vm_grab_id, 102TRACE_EVENT(amdgpu_vm_grab_id,
52 TP_PROTO(unsigned vmid, int ring), 103 TP_PROTO(unsigned vmid, int ring),
53 TP_ARGS(vmid, ring), 104 TP_ARGS(vmid, ring),
@@ -196,49 +247,6 @@ TRACE_EVENT(amdgpu_bo_list_set,
196 TP_printk("list=%p, bo=%p", __entry->list, __entry->bo) 247 TP_printk("list=%p, bo=%p", __entry->list, __entry->bo)
197); 248);
198 249
199DECLARE_EVENT_CLASS(amdgpu_fence_request,
200
201 TP_PROTO(struct drm_device *dev, int ring, u32 seqno),
202
203 TP_ARGS(dev, ring, seqno),
204
205 TP_STRUCT__entry(
206 __field(u32, dev)
207 __field(int, ring)
208 __field(u32, seqno)
209 ),
210
211 TP_fast_assign(
212 __entry->dev = dev->primary->index;
213 __entry->ring = ring;
214 __entry->seqno = seqno;
215 ),
216
217 TP_printk("dev=%u, ring=%d, seqno=%u",
218 __entry->dev, __entry->ring, __entry->seqno)
219);
220
221DEFINE_EVENT(amdgpu_fence_request, amdgpu_fence_emit,
222
223 TP_PROTO(struct drm_device *dev, int ring, u32 seqno),
224
225 TP_ARGS(dev, ring, seqno)
226);
227
228DEFINE_EVENT(amdgpu_fence_request, amdgpu_fence_wait_begin,
229
230 TP_PROTO(struct drm_device *dev, int ring, u32 seqno),
231
232 TP_ARGS(dev, ring, seqno)
233);
234
235DEFINE_EVENT(amdgpu_fence_request, amdgpu_fence_wait_end,
236
237 TP_PROTO(struct drm_device *dev, int ring, u32 seqno),
238
239 TP_ARGS(dev, ring, seqno)
240);
241
242DECLARE_EVENT_CLASS(amdgpu_semaphore_request, 250DECLARE_EVENT_CLASS(amdgpu_semaphore_request,
243 251
244 TP_PROTO(int ring, struct amdgpu_semaphore *sem), 252 TP_PROTO(int ring, struct amdgpu_semaphore *sem),
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 81bb8e9fc26d..d4bac5f49939 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -1073,10 +1073,10 @@ static int amdgpu_mm_dump_table(struct seq_file *m, void *data)
1073 ret = drm_mm_dump_table(m, mm); 1073 ret = drm_mm_dump_table(m, mm);
1074 spin_unlock(&glob->lru_lock); 1074 spin_unlock(&glob->lru_lock);
1075 if (ttm_pl == TTM_PL_VRAM) 1075 if (ttm_pl == TTM_PL_VRAM)
1076 seq_printf(m, "man size:%llu pages, ram usage:%luMB, vis usage:%luMB\n", 1076 seq_printf(m, "man size:%llu pages, ram usage:%lluMB, vis usage:%lluMB\n",
1077 adev->mman.bdev.man[ttm_pl].size, 1077 adev->mman.bdev.man[ttm_pl].size,
1078 atomic64_read(&adev->vram_usage) >> 20, 1078 (u64)atomic64_read(&adev->vram_usage) >> 20,
1079 atomic64_read(&adev->vram_vis_usage) >> 20); 1079 (u64)atomic64_read(&adev->vram_vis_usage) >> 20);
1080 return ret; 1080 return ret;
1081} 1081}
1082 1082
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 633a32a48560..159ce54bbd8d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -143,10 +143,15 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
143 unsigned i; 143 unsigned i;
144 144
145 /* check if the id is still valid */ 145 /* check if the id is still valid */
146 if (vm_id->id && vm_id->last_id_use && 146 if (vm_id->id) {
147 vm_id->last_id_use == adev->vm_manager.active[vm_id->id]) { 147 unsigned id = vm_id->id;
148 trace_amdgpu_vm_grab_id(vm_id->id, ring->idx); 148 long owner;
149 return 0; 149
150 owner = atomic_long_read(&adev->vm_manager.ids[id].owner);
151 if (owner == (long)vm) {
152 trace_amdgpu_vm_grab_id(vm_id->id, ring->idx);
153 return 0;
154 }
150 } 155 }
151 156
152 /* we definately need to flush */ 157 /* we definately need to flush */
@@ -154,7 +159,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
154 159
155 /* skip over VMID 0, since it is the system VM */ 160 /* skip over VMID 0, since it is the system VM */
156 for (i = 1; i < adev->vm_manager.nvm; ++i) { 161 for (i = 1; i < adev->vm_manager.nvm; ++i) {
157 struct fence *fence = adev->vm_manager.active[i]; 162 struct fence *fence = adev->vm_manager.ids[i].active;
158 struct amdgpu_ring *fring; 163 struct amdgpu_ring *fring;
159 164
160 if (fence == NULL) { 165 if (fence == NULL) {
@@ -176,7 +181,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
176 if (choices[i]) { 181 if (choices[i]) {
177 struct fence *fence; 182 struct fence *fence;
178 183
179 fence = adev->vm_manager.active[choices[i]]; 184 fence = adev->vm_manager.ids[choices[i]].active;
180 vm_id->id = choices[i]; 185 vm_id->id = choices[i];
181 186
182 trace_amdgpu_vm_grab_id(choices[i], ring->idx); 187 trace_amdgpu_vm_grab_id(choices[i], ring->idx);
@@ -207,24 +212,21 @@ void amdgpu_vm_flush(struct amdgpu_ring *ring,
207 uint64_t pd_addr = amdgpu_bo_gpu_offset(vm->page_directory); 212 uint64_t pd_addr = amdgpu_bo_gpu_offset(vm->page_directory);
208 struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx]; 213 struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx];
209 struct fence *flushed_updates = vm_id->flushed_updates; 214 struct fence *flushed_updates = vm_id->flushed_updates;
210 bool is_earlier = false; 215 bool is_later;
211
212 if (flushed_updates && updates) {
213 BUG_ON(flushed_updates->context != updates->context);
214 is_earlier = (updates->seqno - flushed_updates->seqno <=
215 INT_MAX) ? true : false;
216 }
217 216
218 if (pd_addr != vm_id->pd_gpu_addr || !flushed_updates || 217 if (!flushed_updates)
219 is_earlier) { 218 is_later = true;
219 else if (!updates)
220 is_later = false;
221 else
222 is_later = fence_is_later(updates, flushed_updates);
220 223
224 if (pd_addr != vm_id->pd_gpu_addr || is_later) {
221 trace_amdgpu_vm_flush(pd_addr, ring->idx, vm_id->id); 225 trace_amdgpu_vm_flush(pd_addr, ring->idx, vm_id->id);
222 if (is_earlier) { 226 if (is_later) {
223 vm_id->flushed_updates = fence_get(updates); 227 vm_id->flushed_updates = fence_get(updates);
224 fence_put(flushed_updates); 228 fence_put(flushed_updates);
225 } 229 }
226 if (!flushed_updates)
227 vm_id->flushed_updates = fence_get(updates);
228 vm_id->pd_gpu_addr = pd_addr; 230 vm_id->pd_gpu_addr = pd_addr;
229 amdgpu_ring_emit_vm_flush(ring, vm_id->id, vm_id->pd_gpu_addr); 231 amdgpu_ring_emit_vm_flush(ring, vm_id->id, vm_id->pd_gpu_addr);
230 } 232 }
@@ -244,16 +246,14 @@ void amdgpu_vm_flush(struct amdgpu_ring *ring,
244 */ 246 */
245void amdgpu_vm_fence(struct amdgpu_device *adev, 247void amdgpu_vm_fence(struct amdgpu_device *adev,
246 struct amdgpu_vm *vm, 248 struct amdgpu_vm *vm,
247 struct amdgpu_fence *fence) 249 struct fence *fence)
248{ 250{
249 unsigned ridx = fence->ring->idx; 251 struct amdgpu_ring *ring = amdgpu_ring_from_fence(fence);
250 unsigned vm_id = vm->ids[ridx].id; 252 unsigned vm_id = vm->ids[ring->idx].id;
251
252 fence_put(adev->vm_manager.active[vm_id]);
253 adev->vm_manager.active[vm_id] = fence_get(&fence->base);
254 253
255 fence_put(vm->ids[ridx].last_id_use); 254 fence_put(adev->vm_manager.ids[vm_id].active);
256 vm->ids[ridx].last_id_use = fence_get(&fence->base); 255 adev->vm_manager.ids[vm_id].active = fence_get(fence);
256 atomic_long_set(&adev->vm_manager.ids[vm_id].owner, (long)vm);
257} 257}
258 258
259/** 259/**
@@ -332,6 +332,8 @@ int amdgpu_vm_free_job(struct amdgpu_job *job)
332 * 332 *
333 * @adev: amdgpu_device pointer 333 * @adev: amdgpu_device pointer
334 * @bo: bo to clear 334 * @bo: bo to clear
335 *
336 * need to reserve bo first before calling it.
335 */ 337 */
336static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, 338static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
337 struct amdgpu_bo *bo) 339 struct amdgpu_bo *bo)
@@ -343,24 +345,20 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
343 uint64_t addr; 345 uint64_t addr;
344 int r; 346 int r;
345 347
346 r = amdgpu_bo_reserve(bo, false);
347 if (r)
348 return r;
349
350 r = reservation_object_reserve_shared(bo->tbo.resv); 348 r = reservation_object_reserve_shared(bo->tbo.resv);
351 if (r) 349 if (r)
352 return r; 350 return r;
353 351
354 r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); 352 r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
355 if (r) 353 if (r)
356 goto error_unreserve; 354 goto error;
357 355
358 addr = amdgpu_bo_gpu_offset(bo); 356 addr = amdgpu_bo_gpu_offset(bo);
359 entries = amdgpu_bo_size(bo) / 8; 357 entries = amdgpu_bo_size(bo) / 8;
360 358
361 ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL); 359 ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL);
362 if (!ib) 360 if (!ib)
363 goto error_unreserve; 361 goto error;
364 362
365 r = amdgpu_ib_get(ring, NULL, entries * 2 + 64, ib); 363 r = amdgpu_ib_get(ring, NULL, entries * 2 + 64, ib);
366 if (r) 364 if (r)
@@ -378,16 +376,14 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
378 if (!r) 376 if (!r)
379 amdgpu_bo_fence(bo, fence, true); 377 amdgpu_bo_fence(bo, fence, true);
380 fence_put(fence); 378 fence_put(fence);
381 if (amdgpu_enable_scheduler) { 379 if (amdgpu_enable_scheduler)
382 amdgpu_bo_unreserve(bo);
383 return 0; 380 return 0;
384 } 381
385error_free: 382error_free:
386 amdgpu_ib_free(adev, ib); 383 amdgpu_ib_free(adev, ib);
387 kfree(ib); 384 kfree(ib);
388 385
389error_unreserve: 386error:
390 amdgpu_bo_unreserve(bo);
391 return r; 387 return r;
392} 388}
393 389
@@ -989,7 +985,7 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
989 * Add a mapping of the BO at the specefied addr into the VM. 985 * Add a mapping of the BO at the specefied addr into the VM.
990 * Returns 0 for success, error for failure. 986 * Returns 0 for success, error for failure.
991 * 987 *
992 * Object has to be reserved and gets unreserved by this function! 988 * Object has to be reserved and unreserved outside!
993 */ 989 */
994int amdgpu_vm_bo_map(struct amdgpu_device *adev, 990int amdgpu_vm_bo_map(struct amdgpu_device *adev,
995 struct amdgpu_bo_va *bo_va, 991 struct amdgpu_bo_va *bo_va,
@@ -1005,30 +1001,27 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1005 1001
1006 /* validate the parameters */ 1002 /* validate the parameters */
1007 if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK || 1003 if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
1008 size == 0 || size & AMDGPU_GPU_PAGE_MASK) { 1004 size == 0 || size & AMDGPU_GPU_PAGE_MASK)
1009 amdgpu_bo_unreserve(bo_va->bo);
1010 return -EINVAL; 1005 return -EINVAL;
1011 }
1012 1006
1013 /* make sure object fit at this offset */ 1007 /* make sure object fit at this offset */
1014 eaddr = saddr + size; 1008 eaddr = saddr + size;
1015 if ((saddr >= eaddr) || (offset + size > amdgpu_bo_size(bo_va->bo))) { 1009 if ((saddr >= eaddr) || (offset + size > amdgpu_bo_size(bo_va->bo)))
1016 amdgpu_bo_unreserve(bo_va->bo);
1017 return -EINVAL; 1010 return -EINVAL;
1018 }
1019 1011
1020 last_pfn = eaddr / AMDGPU_GPU_PAGE_SIZE; 1012 last_pfn = eaddr / AMDGPU_GPU_PAGE_SIZE;
1021 if (last_pfn > adev->vm_manager.max_pfn) { 1013 if (last_pfn > adev->vm_manager.max_pfn) {
1022 dev_err(adev->dev, "va above limit (0x%08X > 0x%08X)\n", 1014 dev_err(adev->dev, "va above limit (0x%08X > 0x%08X)\n",
1023 last_pfn, adev->vm_manager.max_pfn); 1015 last_pfn, adev->vm_manager.max_pfn);
1024 amdgpu_bo_unreserve(bo_va->bo);
1025 return -EINVAL; 1016 return -EINVAL;
1026 } 1017 }
1027 1018
1028 saddr /= AMDGPU_GPU_PAGE_SIZE; 1019 saddr /= AMDGPU_GPU_PAGE_SIZE;
1029 eaddr /= AMDGPU_GPU_PAGE_SIZE; 1020 eaddr /= AMDGPU_GPU_PAGE_SIZE;
1030 1021
1022 spin_lock(&vm->it_lock);
1031 it = interval_tree_iter_first(&vm->va, saddr, eaddr - 1); 1023 it = interval_tree_iter_first(&vm->va, saddr, eaddr - 1);
1024 spin_unlock(&vm->it_lock);
1032 if (it) { 1025 if (it) {
1033 struct amdgpu_bo_va_mapping *tmp; 1026 struct amdgpu_bo_va_mapping *tmp;
1034 tmp = container_of(it, struct amdgpu_bo_va_mapping, it); 1027 tmp = container_of(it, struct amdgpu_bo_va_mapping, it);
@@ -1036,14 +1029,12 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1036 dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with " 1029 dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with "
1037 "0x%010lx-0x%010lx\n", bo_va->bo, saddr, eaddr, 1030 "0x%010lx-0x%010lx\n", bo_va->bo, saddr, eaddr,
1038 tmp->it.start, tmp->it.last + 1); 1031 tmp->it.start, tmp->it.last + 1);
1039 amdgpu_bo_unreserve(bo_va->bo);
1040 r = -EINVAL; 1032 r = -EINVAL;
1041 goto error; 1033 goto error;
1042 } 1034 }
1043 1035
1044 mapping = kmalloc(sizeof(*mapping), GFP_KERNEL); 1036 mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
1045 if (!mapping) { 1037 if (!mapping) {
1046 amdgpu_bo_unreserve(bo_va->bo);
1047 r = -ENOMEM; 1038 r = -ENOMEM;
1048 goto error; 1039 goto error;
1049 } 1040 }
@@ -1055,7 +1046,9 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1055 mapping->flags = flags; 1046 mapping->flags = flags;
1056 1047
1057 list_add(&mapping->list, &bo_va->invalids); 1048 list_add(&mapping->list, &bo_va->invalids);
1049 spin_lock(&vm->it_lock);
1058 interval_tree_insert(&mapping->it, &vm->va); 1050 interval_tree_insert(&mapping->it, &vm->va);
1051 spin_unlock(&vm->it_lock);
1059 trace_amdgpu_vm_bo_map(bo_va, mapping); 1052 trace_amdgpu_vm_bo_map(bo_va, mapping);
1060 1053
1061 /* Make sure the page tables are allocated */ 1054 /* Make sure the page tables are allocated */
@@ -1067,8 +1060,6 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1067 if (eaddr > vm->max_pde_used) 1060 if (eaddr > vm->max_pde_used)
1068 vm->max_pde_used = eaddr; 1061 vm->max_pde_used = eaddr;
1069 1062
1070 amdgpu_bo_unreserve(bo_va->bo);
1071
1072 /* walk over the address space and allocate the page tables */ 1063 /* walk over the address space and allocate the page tables */
1073 for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) { 1064 for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) {
1074 struct reservation_object *resv = vm->page_directory->tbo.resv; 1065 struct reservation_object *resv = vm->page_directory->tbo.resv;
@@ -1077,13 +1068,11 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1077 if (vm->page_tables[pt_idx].bo) 1068 if (vm->page_tables[pt_idx].bo)
1078 continue; 1069 continue;
1079 1070
1080 ww_mutex_lock(&resv->lock, NULL);
1081 r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8, 1071 r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8,
1082 AMDGPU_GPU_PAGE_SIZE, true, 1072 AMDGPU_GPU_PAGE_SIZE, true,
1083 AMDGPU_GEM_DOMAIN_VRAM, 1073 AMDGPU_GEM_DOMAIN_VRAM,
1084 AMDGPU_GEM_CREATE_NO_CPU_ACCESS, 1074 AMDGPU_GEM_CREATE_NO_CPU_ACCESS,
1085 NULL, resv, &pt); 1075 NULL, resv, &pt);
1086 ww_mutex_unlock(&resv->lock);
1087 if (r) 1076 if (r)
1088 goto error_free; 1077 goto error_free;
1089 1078
@@ -1101,7 +1090,9 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1101 1090
1102error_free: 1091error_free:
1103 list_del(&mapping->list); 1092 list_del(&mapping->list);
1093 spin_lock(&vm->it_lock);
1104 interval_tree_remove(&mapping->it, &vm->va); 1094 interval_tree_remove(&mapping->it, &vm->va);
1095 spin_unlock(&vm->it_lock);
1105 trace_amdgpu_vm_bo_unmap(bo_va, mapping); 1096 trace_amdgpu_vm_bo_unmap(bo_va, mapping);
1106 kfree(mapping); 1097 kfree(mapping);
1107 1098
@@ -1119,7 +1110,7 @@ error:
1119 * Remove a mapping of the BO at the specefied addr from the VM. 1110 * Remove a mapping of the BO at the specefied addr from the VM.
1120 * Returns 0 for success, error for failure. 1111 * Returns 0 for success, error for failure.
1121 * 1112 *
1122 * Object has to be reserved and gets unreserved by this function! 1113 * Object has to be reserved and unreserved outside!
1123 */ 1114 */
1124int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, 1115int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
1125 struct amdgpu_bo_va *bo_va, 1116 struct amdgpu_bo_va *bo_va,
@@ -1144,21 +1135,20 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
1144 break; 1135 break;
1145 } 1136 }
1146 1137
1147 if (&mapping->list == &bo_va->invalids) { 1138 if (&mapping->list == &bo_va->invalids)
1148 amdgpu_bo_unreserve(bo_va->bo);
1149 return -ENOENT; 1139 return -ENOENT;
1150 }
1151 } 1140 }
1152 1141
1153 list_del(&mapping->list); 1142 list_del(&mapping->list);
1143 spin_lock(&vm->it_lock);
1154 interval_tree_remove(&mapping->it, &vm->va); 1144 interval_tree_remove(&mapping->it, &vm->va);
1145 spin_unlock(&vm->it_lock);
1155 trace_amdgpu_vm_bo_unmap(bo_va, mapping); 1146 trace_amdgpu_vm_bo_unmap(bo_va, mapping);
1156 1147
1157 if (valid) 1148 if (valid)
1158 list_add(&mapping->list, &vm->freed); 1149 list_add(&mapping->list, &vm->freed);
1159 else 1150 else
1160 kfree(mapping); 1151 kfree(mapping);
1161 amdgpu_bo_unreserve(bo_va->bo);
1162 1152
1163 return 0; 1153 return 0;
1164} 1154}
@@ -1187,13 +1177,17 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
1187 1177
1188 list_for_each_entry_safe(mapping, next, &bo_va->valids, list) { 1178 list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
1189 list_del(&mapping->list); 1179 list_del(&mapping->list);
1180 spin_lock(&vm->it_lock);
1190 interval_tree_remove(&mapping->it, &vm->va); 1181 interval_tree_remove(&mapping->it, &vm->va);
1182 spin_unlock(&vm->it_lock);
1191 trace_amdgpu_vm_bo_unmap(bo_va, mapping); 1183 trace_amdgpu_vm_bo_unmap(bo_va, mapping);
1192 list_add(&mapping->list, &vm->freed); 1184 list_add(&mapping->list, &vm->freed);
1193 } 1185 }
1194 list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) { 1186 list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
1195 list_del(&mapping->list); 1187 list_del(&mapping->list);
1188 spin_lock(&vm->it_lock);
1196 interval_tree_remove(&mapping->it, &vm->va); 1189 interval_tree_remove(&mapping->it, &vm->va);
1190 spin_unlock(&vm->it_lock);
1197 kfree(mapping); 1191 kfree(mapping);
1198 } 1192 }
1199 1193
@@ -1241,7 +1235,6 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1241 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 1235 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
1242 vm->ids[i].id = 0; 1236 vm->ids[i].id = 0;
1243 vm->ids[i].flushed_updates = NULL; 1237 vm->ids[i].flushed_updates = NULL;
1244 vm->ids[i].last_id_use = NULL;
1245 } 1238 }
1246 mutex_init(&vm->mutex); 1239 mutex_init(&vm->mutex);
1247 vm->va = RB_ROOT; 1240 vm->va = RB_ROOT;
@@ -1249,7 +1242,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1249 INIT_LIST_HEAD(&vm->invalidated); 1242 INIT_LIST_HEAD(&vm->invalidated);
1250 INIT_LIST_HEAD(&vm->cleared); 1243 INIT_LIST_HEAD(&vm->cleared);
1251 INIT_LIST_HEAD(&vm->freed); 1244 INIT_LIST_HEAD(&vm->freed);
1252 1245 spin_lock_init(&vm->it_lock);
1253 pd_size = amdgpu_vm_directory_size(adev); 1246 pd_size = amdgpu_vm_directory_size(adev);
1254 pd_entries = amdgpu_vm_num_pdes(adev); 1247 pd_entries = amdgpu_vm_num_pdes(adev);
1255 1248
@@ -1269,8 +1262,14 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1269 NULL, NULL, &vm->page_directory); 1262 NULL, NULL, &vm->page_directory);
1270 if (r) 1263 if (r)
1271 return r; 1264 return r;
1272 1265 r = amdgpu_bo_reserve(vm->page_directory, false);
1266 if (r) {
1267 amdgpu_bo_unref(&vm->page_directory);
1268 vm->page_directory = NULL;
1269 return r;
1270 }
1273 r = amdgpu_vm_clear_bo(adev, vm->page_directory); 1271 r = amdgpu_vm_clear_bo(adev, vm->page_directory);
1272 amdgpu_bo_unreserve(vm->page_directory);
1274 if (r) { 1273 if (r) {
1275 amdgpu_bo_unref(&vm->page_directory); 1274 amdgpu_bo_unref(&vm->page_directory);
1276 vm->page_directory = NULL; 1275 vm->page_directory = NULL;
@@ -1313,11 +1312,28 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1313 1312
1314 amdgpu_bo_unref(&vm->page_directory); 1313 amdgpu_bo_unref(&vm->page_directory);
1315 fence_put(vm->page_directory_fence); 1314 fence_put(vm->page_directory_fence);
1316
1317 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 1315 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
1316 unsigned id = vm->ids[i].id;
1317
1318 atomic_long_cmpxchg(&adev->vm_manager.ids[id].owner,
1319 (long)vm, 0);
1318 fence_put(vm->ids[i].flushed_updates); 1320 fence_put(vm->ids[i].flushed_updates);
1319 fence_put(vm->ids[i].last_id_use);
1320 } 1321 }
1321 1322
1322 mutex_destroy(&vm->mutex); 1323 mutex_destroy(&vm->mutex);
1323} 1324}
1325
1326/**
1327 * amdgpu_vm_manager_fini - cleanup VM manager
1328 *
1329 * @adev: amdgpu_device pointer
1330 *
1331 * Cleanup the VM manager and free resources.
1332 */
1333void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
1334{
1335 unsigned i;
1336
1337 for (i = 0; i < AMDGPU_NUM_VM; ++i)
1338 fence_put(adev->vm_manager.ids[i].active);
1339}
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
index a1a35a5df8e7..57a2e347f04d 100644
--- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
@@ -6569,12 +6569,12 @@ static int ci_dpm_set_interrupt_state(struct amdgpu_device *adev,
6569 switch (state) { 6569 switch (state) {
6570 case AMDGPU_IRQ_STATE_DISABLE: 6570 case AMDGPU_IRQ_STATE_DISABLE:
6571 cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT); 6571 cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
6572 cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK; 6572 cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
6573 WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int); 6573 WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
6574 break; 6574 break;
6575 case AMDGPU_IRQ_STATE_ENABLE: 6575 case AMDGPU_IRQ_STATE_ENABLE:
6576 cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT); 6576 cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
6577 cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK; 6577 cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
6578 WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int); 6578 WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
6579 break; 6579 break;
6580 default: 6580 default:
@@ -6586,12 +6586,12 @@ static int ci_dpm_set_interrupt_state(struct amdgpu_device *adev,
6586 switch (state) { 6586 switch (state) {
6587 case AMDGPU_IRQ_STATE_DISABLE: 6587 case AMDGPU_IRQ_STATE_DISABLE:
6588 cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT); 6588 cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
6589 cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK; 6589 cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
6590 WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int); 6590 WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
6591 break; 6591 break;
6592 case AMDGPU_IRQ_STATE_ENABLE: 6592 case AMDGPU_IRQ_STATE_ENABLE:
6593 cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT); 6593 cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
6594 cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK; 6594 cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
6595 WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int); 6595 WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
6596 break; 6596 break;
6597 default: 6597 default:
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 6776cf756d40..e1dcab98e249 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -268,7 +268,6 @@ static const u32 fiji_mgcg_cgcg_init[] =
268 mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100, 268 mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100,
269 mmCGTT_CPC_CLK_CTRL, 0xffffffff, 0x00000100, 269 mmCGTT_CPC_CLK_CTRL, 0xffffffff, 0x00000100,
270 mmCGTT_CPF_CLK_CTRL, 0xffffffff, 0x40000100, 270 mmCGTT_CPF_CLK_CTRL, 0xffffffff, 0x40000100,
271 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
272 mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100, 271 mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100,
273 mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100, 272 mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100,
274 mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100, 273 mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100,
@@ -296,10 +295,6 @@ static const u32 fiji_mgcg_cgcg_init[] =
296 mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96e00200, 295 mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96e00200,
297 mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100, 296 mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100,
298 mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003c, 297 mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003c,
299 mmPCIE_INDEX, 0xffffffff, 0x0140001c,
300 mmPCIE_DATA, 0x000f0000, 0x00000000,
301 mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
302 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
303 mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001, 298 mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001,
304}; 299};
305 300
@@ -1000,7 +995,7 @@ static void gfx_v8_0_gpu_early_init(struct amdgpu_device *adev)
1000 adev->gfx.config.max_cu_per_sh = 16; 995 adev->gfx.config.max_cu_per_sh = 16;
1001 adev->gfx.config.max_sh_per_se = 1; 996 adev->gfx.config.max_sh_per_se = 1;
1002 adev->gfx.config.max_backends_per_se = 4; 997 adev->gfx.config.max_backends_per_se = 4;
1003 adev->gfx.config.max_texture_channel_caches = 8; 998 adev->gfx.config.max_texture_channel_caches = 16;
1004 adev->gfx.config.max_gprs = 256; 999 adev->gfx.config.max_gprs = 256;
1005 adev->gfx.config.max_gs_threads = 32; 1000 adev->gfx.config.max_gs_threads = 32;
1006 adev->gfx.config.max_hw_contexts = 8; 1001 adev->gfx.config.max_hw_contexts = 8;
@@ -1613,6 +1608,296 @@ static void gfx_v8_0_tiling_mode_table_init(struct amdgpu_device *adev)
1613 WREG32(mmGB_MACROTILE_MODE0 + reg_offset, gb_tile_moden); 1608 WREG32(mmGB_MACROTILE_MODE0 + reg_offset, gb_tile_moden);
1614 } 1609 }
1615 case CHIP_FIJI: 1610 case CHIP_FIJI:
1611 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
1612 switch (reg_offset) {
1613 case 0:
1614 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1615 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1616 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
1617 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1618 break;
1619 case 1:
1620 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1621 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1622 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
1623 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1624 break;
1625 case 2:
1626 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1627 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1628 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
1629 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1630 break;
1631 case 3:
1632 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1633 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1634 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
1635 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1636 break;
1637 case 4:
1638 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1639 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1640 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
1641 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1642 break;
1643 case 5:
1644 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1645 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1646 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
1647 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1648 break;
1649 case 6:
1650 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1651 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1652 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
1653 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1654 break;
1655 case 7:
1656 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1657 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1658 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
1659 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1660 break;
1661 case 8:
1662 gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
1663 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16));
1664 break;
1665 case 9:
1666 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1667 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1668 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1669 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1670 break;
1671 case 10:
1672 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1673 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1674 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1675 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1676 break;
1677 case 11:
1678 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1679 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1680 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1681 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1682 break;
1683 case 12:
1684 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1685 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1686 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1687 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1688 break;
1689 case 13:
1690 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1691 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1692 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1693 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1694 break;
1695 case 14:
1696 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1697 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1698 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1699 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1700 break;
1701 case 15:
1702 gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
1703 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1704 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1705 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1706 break;
1707 case 16:
1708 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1709 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1710 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1711 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1712 break;
1713 case 17:
1714 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1715 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1716 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1717 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1718 break;
1719 case 18:
1720 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
1721 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1722 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1723 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1724 break;
1725 case 19:
1726 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
1727 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1728 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1729 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1730 break;
1731 case 20:
1732 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
1733 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1734 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1735 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1736 break;
1737 case 21:
1738 gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
1739 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1740 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1741 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1742 break;
1743 case 22:
1744 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
1745 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1746 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1747 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1748 break;
1749 case 23:
1750 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
1751 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1752 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1753 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1754 break;
1755 case 24:
1756 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
1757 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1758 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1759 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1760 break;
1761 case 25:
1762 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
1763 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1764 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1765 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1766 break;
1767 case 26:
1768 gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
1769 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1770 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1771 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1772 break;
1773 case 27:
1774 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1775 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1776 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1777 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1778 break;
1779 case 28:
1780 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1781 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1782 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1783 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1784 break;
1785 case 29:
1786 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1787 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1788 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1789 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1790 break;
1791 case 30:
1792 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1793 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1794 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1795 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1796 break;
1797 default:
1798 gb_tile_moden = 0;
1799 break;
1800 }
1801 adev->gfx.config.tile_mode_array[reg_offset] = gb_tile_moden;
1802 WREG32(mmGB_TILE_MODE0 + reg_offset, gb_tile_moden);
1803 }
1804 for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) {
1805 switch (reg_offset) {
1806 case 0:
1807 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1808 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1809 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1810 NUM_BANKS(ADDR_SURF_8_BANK));
1811 break;
1812 case 1:
1813 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1814 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1815 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1816 NUM_BANKS(ADDR_SURF_8_BANK));
1817 break;
1818 case 2:
1819 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1820 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1821 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1822 NUM_BANKS(ADDR_SURF_8_BANK));
1823 break;
1824 case 3:
1825 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1826 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1827 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1828 NUM_BANKS(ADDR_SURF_8_BANK));
1829 break;
1830 case 4:
1831 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1832 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1833 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1834 NUM_BANKS(ADDR_SURF_8_BANK));
1835 break;
1836 case 5:
1837 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1838 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1839 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1840 NUM_BANKS(ADDR_SURF_8_BANK));
1841 break;
1842 case 6:
1843 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1844 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1845 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1846 NUM_BANKS(ADDR_SURF_8_BANK));
1847 break;
1848 case 8:
1849 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1850 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
1851 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1852 NUM_BANKS(ADDR_SURF_8_BANK));
1853 break;
1854 case 9:
1855 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1856 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1857 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1858 NUM_BANKS(ADDR_SURF_8_BANK));
1859 break;
1860 case 10:
1861 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1862 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1863 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1864 NUM_BANKS(ADDR_SURF_8_BANK));
1865 break;
1866 case 11:
1867 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1868 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1869 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1870 NUM_BANKS(ADDR_SURF_8_BANK));
1871 break;
1872 case 12:
1873 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1874 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1875 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1876 NUM_BANKS(ADDR_SURF_8_BANK));
1877 break;
1878 case 13:
1879 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1880 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1881 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1882 NUM_BANKS(ADDR_SURF_8_BANK));
1883 break;
1884 case 14:
1885 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1886 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1887 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1888 NUM_BANKS(ADDR_SURF_4_BANK));
1889 break;
1890 case 7:
1891 /* unused idx */
1892 continue;
1893 default:
1894 gb_tile_moden = 0;
1895 break;
1896 }
1897 adev->gfx.config.macrotile_mode_array[reg_offset] = gb_tile_moden;
1898 WREG32(mmGB_MACROTILE_MODE0 + reg_offset, gb_tile_moden);
1899 }
1900 break;
1616 case CHIP_TONGA: 1901 case CHIP_TONGA:
1617 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) { 1902 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
1618 switch (reg_offset) { 1903 switch (reg_offset) {
@@ -2971,10 +3256,13 @@ static int gfx_v8_0_cp_gfx_start(struct amdgpu_device *adev)
2971 amdgpu_ring_write(ring, mmPA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START); 3256 amdgpu_ring_write(ring, mmPA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
2972 switch (adev->asic_type) { 3257 switch (adev->asic_type) {
2973 case CHIP_TONGA: 3258 case CHIP_TONGA:
2974 case CHIP_FIJI:
2975 amdgpu_ring_write(ring, 0x16000012); 3259 amdgpu_ring_write(ring, 0x16000012);
2976 amdgpu_ring_write(ring, 0x0000002A); 3260 amdgpu_ring_write(ring, 0x0000002A);
2977 break; 3261 break;
3262 case CHIP_FIJI:
3263 amdgpu_ring_write(ring, 0x3a00161a);
3264 amdgpu_ring_write(ring, 0x0000002e);
3265 break;
2978 case CHIP_TOPAZ: 3266 case CHIP_TOPAZ:
2979 case CHIP_CARRIZO: 3267 case CHIP_CARRIZO:
2980 amdgpu_ring_write(ring, 0x00000002); 3268 amdgpu_ring_write(ring, 0x00000002);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 85bbcdc73fff..7427d8cd4c43 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -40,7 +40,7 @@
40static void gmc_v7_0_set_gart_funcs(struct amdgpu_device *adev); 40static void gmc_v7_0_set_gart_funcs(struct amdgpu_device *adev);
41static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev); 41static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev);
42 42
43MODULE_FIRMWARE("radeon/boniare_mc.bin"); 43MODULE_FIRMWARE("radeon/bonaire_mc.bin");
44MODULE_FIRMWARE("radeon/hawaii_mc.bin"); 44MODULE_FIRMWARE("radeon/hawaii_mc.bin");
45 45
46/** 46/**
@@ -501,6 +501,7 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
501 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE, 1); 501 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE, 1);
502 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, EFFECTIVE_L2_QUEUE_SIZE, 7); 502 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, EFFECTIVE_L2_QUEUE_SIZE, 7);
503 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1); 503 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
504 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY, 1);
504 WREG32(mmVM_L2_CNTL, tmp); 505 WREG32(mmVM_L2_CNTL, tmp);
505 tmp = REG_SET_FIELD(0, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1); 506 tmp = REG_SET_FIELD(0, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
506 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1); 507 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
@@ -960,12 +961,10 @@ static int gmc_v7_0_sw_init(void *handle)
960 961
961static int gmc_v7_0_sw_fini(void *handle) 962static int gmc_v7_0_sw_fini(void *handle)
962{ 963{
963 int i;
964 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 964 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
965 965
966 if (adev->vm_manager.enabled) { 966 if (adev->vm_manager.enabled) {
967 for (i = 0; i < AMDGPU_NUM_VM; ++i) 967 amdgpu_vm_manager_fini(adev);
968 fence_put(adev->vm_manager.active[i]);
969 gmc_v7_0_vm_fini(adev); 968 gmc_v7_0_vm_fini(adev);
970 adev->vm_manager.enabled = false; 969 adev->vm_manager.enabled = false;
971 } 970 }
@@ -1010,12 +1009,10 @@ static int gmc_v7_0_hw_fini(void *handle)
1010 1009
1011static int gmc_v7_0_suspend(void *handle) 1010static int gmc_v7_0_suspend(void *handle)
1012{ 1011{
1013 int i;
1014 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1012 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1015 1013
1016 if (adev->vm_manager.enabled) { 1014 if (adev->vm_manager.enabled) {
1017 for (i = 0; i < AMDGPU_NUM_VM; ++i) 1015 amdgpu_vm_manager_fini(adev);
1018 fence_put(adev->vm_manager.active[i]);
1019 gmc_v7_0_vm_fini(adev); 1016 gmc_v7_0_vm_fini(adev);
1020 adev->vm_manager.enabled = false; 1017 adev->vm_manager.enabled = false;
1021 } 1018 }
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 1bcc4e74e3b4..cb0e50ebb528 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -629,6 +629,7 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
629 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE, 1); 629 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE, 1);
630 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, EFFECTIVE_L2_QUEUE_SIZE, 7); 630 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, EFFECTIVE_L2_QUEUE_SIZE, 7);
631 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1); 631 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
632 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY, 1);
632 WREG32(mmVM_L2_CNTL, tmp); 633 WREG32(mmVM_L2_CNTL, tmp);
633 tmp = RREG32(mmVM_L2_CNTL2); 634 tmp = RREG32(mmVM_L2_CNTL2);
634 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1); 635 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
@@ -979,12 +980,10 @@ static int gmc_v8_0_sw_init(void *handle)
979 980
980static int gmc_v8_0_sw_fini(void *handle) 981static int gmc_v8_0_sw_fini(void *handle)
981{ 982{
982 int i;
983 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 983 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
984 984
985 if (adev->vm_manager.enabled) { 985 if (adev->vm_manager.enabled) {
986 for (i = 0; i < AMDGPU_NUM_VM; ++i) 986 amdgpu_vm_manager_fini(adev);
987 fence_put(adev->vm_manager.active[i]);
988 gmc_v8_0_vm_fini(adev); 987 gmc_v8_0_vm_fini(adev);
989 adev->vm_manager.enabled = false; 988 adev->vm_manager.enabled = false;
990 } 989 }
@@ -1031,12 +1030,10 @@ static int gmc_v8_0_hw_fini(void *handle)
1031 1030
1032static int gmc_v8_0_suspend(void *handle) 1031static int gmc_v8_0_suspend(void *handle)
1033{ 1032{
1034 int i;
1035 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1033 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1036 1034
1037 if (adev->vm_manager.enabled) { 1035 if (adev->vm_manager.enabled) {
1038 for (i = 0; i < AMDGPU_NUM_VM; ++i) 1036 amdgpu_vm_manager_fini(adev);
1039 fence_put(adev->vm_manager.active[i]);
1040 gmc_v8_0_vm_fini(adev); 1037 gmc_v8_0_vm_fini(adev);
1041 adev->vm_manager.enabled = false; 1038 adev->vm_manager.enabled = false;
1042 } 1039 }
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h
index 144f50acc971..c89dc777768f 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h
+++ b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h
@@ -16,6 +16,8 @@ TRACE_EVENT(amd_sched_job,
16 TP_ARGS(sched_job), 16 TP_ARGS(sched_job),
17 TP_STRUCT__entry( 17 TP_STRUCT__entry(
18 __field(struct amd_sched_entity *, entity) 18 __field(struct amd_sched_entity *, entity)
19 __field(struct amd_sched_job *, sched_job)
20 __field(struct fence *, fence)
19 __field(const char *, name) 21 __field(const char *, name)
20 __field(u32, job_count) 22 __field(u32, job_count)
21 __field(int, hw_job_count) 23 __field(int, hw_job_count)
@@ -23,16 +25,32 @@ TRACE_EVENT(amd_sched_job,
23 25
24 TP_fast_assign( 26 TP_fast_assign(
25 __entry->entity = sched_job->s_entity; 27 __entry->entity = sched_job->s_entity;
28 __entry->sched_job = sched_job;
29 __entry->fence = &sched_job->s_fence->base;
26 __entry->name = sched_job->sched->name; 30 __entry->name = sched_job->sched->name;
27 __entry->job_count = kfifo_len( 31 __entry->job_count = kfifo_len(
28 &sched_job->s_entity->job_queue) / sizeof(sched_job); 32 &sched_job->s_entity->job_queue) / sizeof(sched_job);
29 __entry->hw_job_count = atomic_read( 33 __entry->hw_job_count = atomic_read(
30 &sched_job->sched->hw_rq_count); 34 &sched_job->sched->hw_rq_count);
31 ), 35 ),
32 TP_printk("entity=%p, ring=%s, job count:%u, hw job count:%d", 36 TP_printk("entity=%p, sched job=%p, fence=%p, ring=%s, job count:%u, hw job count:%d",
33 __entry->entity, __entry->name, __entry->job_count, 37 __entry->entity, __entry->sched_job, __entry->fence, __entry->name,
34 __entry->hw_job_count) 38 __entry->job_count, __entry->hw_job_count)
35); 39);
40
41TRACE_EVENT(amd_sched_process_job,
42 TP_PROTO(struct amd_sched_fence *fence),
43 TP_ARGS(fence),
44 TP_STRUCT__entry(
45 __field(struct fence *, fence)
46 ),
47
48 TP_fast_assign(
49 __entry->fence = &fence->base;
50 ),
51 TP_printk("fence=%p signaled", __entry->fence)
52);
53
36#endif 54#endif
37 55
38/* This part must be outside protection */ 56/* This part must be outside protection */
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
index 89619a5a4289..ea30d6ad4c13 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
@@ -34,6 +34,9 @@ static struct amd_sched_job *
34amd_sched_entity_pop_job(struct amd_sched_entity *entity); 34amd_sched_entity_pop_job(struct amd_sched_entity *entity);
35static void amd_sched_wakeup(struct amd_gpu_scheduler *sched); 35static void amd_sched_wakeup(struct amd_gpu_scheduler *sched);
36 36
37struct kmem_cache *sched_fence_slab;
38atomic_t sched_fence_slab_ref = ATOMIC_INIT(0);
39
37/* Initialize a given run queue struct */ 40/* Initialize a given run queue struct */
38static void amd_sched_rq_init(struct amd_sched_rq *rq) 41static void amd_sched_rq_init(struct amd_sched_rq *rq)
39{ 42{
@@ -273,22 +276,13 @@ static bool amd_sched_entity_in(struct amd_sched_job *sched_job)
273 * 276 *
274 * Returns 0 for success, negative error code otherwise. 277 * Returns 0 for success, negative error code otherwise.
275 */ 278 */
276int amd_sched_entity_push_job(struct amd_sched_job *sched_job) 279void amd_sched_entity_push_job(struct amd_sched_job *sched_job)
277{ 280{
278 struct amd_sched_entity *entity = sched_job->s_entity; 281 struct amd_sched_entity *entity = sched_job->s_entity;
279 struct amd_sched_fence *fence = amd_sched_fence_create(
280 entity, sched_job->owner);
281
282 if (!fence)
283 return -ENOMEM;
284
285 fence_get(&fence->base);
286 sched_job->s_fence = fence;
287 282
288 wait_event(entity->sched->job_scheduled, 283 wait_event(entity->sched->job_scheduled,
289 amd_sched_entity_in(sched_job)); 284 amd_sched_entity_in(sched_job));
290 trace_amd_sched_job(sched_job); 285 trace_amd_sched_job(sched_job);
291 return 0;
292} 286}
293 287
294/** 288/**
@@ -343,6 +337,7 @@ static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
343 list_del_init(&s_fence->list); 337 list_del_init(&s_fence->list);
344 spin_unlock_irqrestore(&sched->fence_list_lock, flags); 338 spin_unlock_irqrestore(&sched->fence_list_lock, flags);
345 } 339 }
340 trace_amd_sched_process_job(s_fence);
346 fence_put(&s_fence->base); 341 fence_put(&s_fence->base);
347 wake_up_interruptible(&sched->wake_up_worker); 342 wake_up_interruptible(&sched->wake_up_worker);
348} 343}
@@ -450,6 +445,13 @@ int amd_sched_init(struct amd_gpu_scheduler *sched,
450 init_waitqueue_head(&sched->wake_up_worker); 445 init_waitqueue_head(&sched->wake_up_worker);
451 init_waitqueue_head(&sched->job_scheduled); 446 init_waitqueue_head(&sched->job_scheduled);
452 atomic_set(&sched->hw_rq_count, 0); 447 atomic_set(&sched->hw_rq_count, 0);
448 if (atomic_inc_return(&sched_fence_slab_ref) == 1) {
449 sched_fence_slab = kmem_cache_create(
450 "amd_sched_fence", sizeof(struct amd_sched_fence), 0,
451 SLAB_HWCACHE_ALIGN, NULL);
452 if (!sched_fence_slab)
453 return -ENOMEM;
454 }
453 455
454 /* Each scheduler will run on a seperate kernel thread */ 456 /* Each scheduler will run on a seperate kernel thread */
455 sched->thread = kthread_run(amd_sched_main, sched, sched->name); 457 sched->thread = kthread_run(amd_sched_main, sched, sched->name);
@@ -470,4 +472,6 @@ void amd_sched_fini(struct amd_gpu_scheduler *sched)
470{ 472{
471 if (sched->thread) 473 if (sched->thread)
472 kthread_stop(sched->thread); 474 kthread_stop(sched->thread);
475 if (atomic_dec_and_test(&sched_fence_slab_ref))
476 kmem_cache_destroy(sched_fence_slab);
473} 477}
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
index 929e9aced041..939692b14f4b 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
@@ -30,6 +30,9 @@
30struct amd_gpu_scheduler; 30struct amd_gpu_scheduler;
31struct amd_sched_rq; 31struct amd_sched_rq;
32 32
33extern struct kmem_cache *sched_fence_slab;
34extern atomic_t sched_fence_slab_ref;
35
33/** 36/**
34 * A scheduler entity is a wrapper around a job queue or a group 37 * A scheduler entity is a wrapper around a job queue or a group
35 * of other entities. Entities take turns emitting jobs from their 38 * of other entities. Entities take turns emitting jobs from their
@@ -76,7 +79,6 @@ struct amd_sched_job {
76 struct amd_gpu_scheduler *sched; 79 struct amd_gpu_scheduler *sched;
77 struct amd_sched_entity *s_entity; 80 struct amd_sched_entity *s_entity;
78 struct amd_sched_fence *s_fence; 81 struct amd_sched_fence *s_fence;
79 void *owner;
80}; 82};
81 83
82extern const struct fence_ops amd_sched_fence_ops; 84extern const struct fence_ops amd_sched_fence_ops;
@@ -128,7 +130,7 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
128 uint32_t jobs); 130 uint32_t jobs);
129void amd_sched_entity_fini(struct amd_gpu_scheduler *sched, 131void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
130 struct amd_sched_entity *entity); 132 struct amd_sched_entity *entity);
131int amd_sched_entity_push_job(struct amd_sched_job *sched_job); 133void amd_sched_entity_push_job(struct amd_sched_job *sched_job);
132 134
133struct amd_sched_fence *amd_sched_fence_create( 135struct amd_sched_fence *amd_sched_fence_create(
134 struct amd_sched_entity *s_entity, void *owner); 136 struct amd_sched_entity *s_entity, void *owner);
diff --git a/drivers/gpu/drm/amd/scheduler/sched_fence.c b/drivers/gpu/drm/amd/scheduler/sched_fence.c
index d802638094f4..8d2130b9ff05 100644
--- a/drivers/gpu/drm/amd/scheduler/sched_fence.c
+++ b/drivers/gpu/drm/amd/scheduler/sched_fence.c
@@ -32,7 +32,7 @@ struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *s_entity
32 struct amd_sched_fence *fence = NULL; 32 struct amd_sched_fence *fence = NULL;
33 unsigned seq; 33 unsigned seq;
34 34
35 fence = kzalloc(sizeof(struct amd_sched_fence), GFP_KERNEL); 35 fence = kmem_cache_zalloc(sched_fence_slab, GFP_KERNEL);
36 if (fence == NULL) 36 if (fence == NULL)
37 return NULL; 37 return NULL;
38 fence->owner = owner; 38 fence->owner = owner;
@@ -71,11 +71,17 @@ static bool amd_sched_fence_enable_signaling(struct fence *f)
71 return true; 71 return true;
72} 72}
73 73
74static void amd_sched_fence_release(struct fence *f)
75{
76 struct amd_sched_fence *fence = to_amd_sched_fence(f);
77 kmem_cache_free(sched_fence_slab, fence);
78}
79
74const struct fence_ops amd_sched_fence_ops = { 80const struct fence_ops amd_sched_fence_ops = {
75 .get_driver_name = amd_sched_fence_get_driver_name, 81 .get_driver_name = amd_sched_fence_get_driver_name,
76 .get_timeline_name = amd_sched_fence_get_timeline_name, 82 .get_timeline_name = amd_sched_fence_get_timeline_name,
77 .enable_signaling = amd_sched_fence_enable_signaling, 83 .enable_signaling = amd_sched_fence_enable_signaling,
78 .signaled = NULL, 84 .signaled = NULL,
79 .wait = fence_default_wait, 85 .wait = fence_default_wait,
80 .release = NULL, 86 .release = amd_sched_fence_release,
81}; 87};
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 7bb3845d9974..aeee083c7f95 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -1432,6 +1432,45 @@ static int atomic_set_prop(struct drm_atomic_state *state,
1432 return ret; 1432 return ret;
1433} 1433}
1434 1434
1435/**
1436 * drm_atomic_update_old_fb -- Unset old_fb pointers and set plane->fb pointers.
1437 *
1438 * @dev: drm device to check.
1439 * @plane_mask: plane mask for planes that were updated.
1440 * @ret: return value, can be -EDEADLK for a retry.
1441 *
1442 * Before doing an update plane->old_fb is set to plane->fb,
1443 * but before dropping the locks old_fb needs to be set to NULL
1444 * and plane->fb updated. This is a common operation for each
1445 * atomic update, so this call is split off as a helper.
1446 */
1447void drm_atomic_clean_old_fb(struct drm_device *dev,
1448 unsigned plane_mask,
1449 int ret)
1450{
1451 struct drm_plane *plane;
1452
1453 /* if succeeded, fixup legacy plane crtc/fb ptrs before dropping
1454 * locks (ie. while it is still safe to deref plane->state). We
1455 * need to do this here because the driver entry points cannot
1456 * distinguish between legacy and atomic ioctls.
1457 */
1458 drm_for_each_plane_mask(plane, dev, plane_mask) {
1459 if (ret == 0) {
1460 struct drm_framebuffer *new_fb = plane->state->fb;
1461 if (new_fb)
1462 drm_framebuffer_reference(new_fb);
1463 plane->fb = new_fb;
1464 plane->crtc = plane->state->crtc;
1465
1466 if (plane->old_fb)
1467 drm_framebuffer_unreference(plane->old_fb);
1468 }
1469 plane->old_fb = NULL;
1470 }
1471}
1472EXPORT_SYMBOL(drm_atomic_clean_old_fb);
1473
1435int drm_mode_atomic_ioctl(struct drm_device *dev, 1474int drm_mode_atomic_ioctl(struct drm_device *dev,
1436 void *data, struct drm_file *file_priv) 1475 void *data, struct drm_file *file_priv)
1437{ 1476{
@@ -1446,7 +1485,7 @@ int drm_mode_atomic_ioctl(struct drm_device *dev,
1446 struct drm_plane *plane; 1485 struct drm_plane *plane;
1447 struct drm_crtc *crtc; 1486 struct drm_crtc *crtc;
1448 struct drm_crtc_state *crtc_state; 1487 struct drm_crtc_state *crtc_state;
1449 unsigned plane_mask = 0; 1488 unsigned plane_mask;
1450 int ret = 0; 1489 int ret = 0;
1451 unsigned int i, j; 1490 unsigned int i, j;
1452 1491
@@ -1486,6 +1525,7 @@ int drm_mode_atomic_ioctl(struct drm_device *dev,
1486 state->allow_modeset = !!(arg->flags & DRM_MODE_ATOMIC_ALLOW_MODESET); 1525 state->allow_modeset = !!(arg->flags & DRM_MODE_ATOMIC_ALLOW_MODESET);
1487 1526
1488retry: 1527retry:
1528 plane_mask = 0;
1489 copied_objs = 0; 1529 copied_objs = 0;
1490 copied_props = 0; 1530 copied_props = 0;
1491 1531
@@ -1576,24 +1616,7 @@ retry:
1576 } 1616 }
1577 1617
1578out: 1618out:
1579 /* if succeeded, fixup legacy plane crtc/fb ptrs before dropping 1619 drm_atomic_clean_old_fb(dev, plane_mask, ret);
1580 * locks (ie. while it is still safe to deref plane->state). We
1581 * need to do this here because the driver entry points cannot
1582 * distinguish between legacy and atomic ioctls.
1583 */
1584 drm_for_each_plane_mask(plane, dev, plane_mask) {
1585 if (ret == 0) {
1586 struct drm_framebuffer *new_fb = plane->state->fb;
1587 if (new_fb)
1588 drm_framebuffer_reference(new_fb);
1589 plane->fb = new_fb;
1590 plane->crtc = plane->state->crtc;
1591
1592 if (plane->old_fb)
1593 drm_framebuffer_unreference(plane->old_fb);
1594 }
1595 plane->old_fb = NULL;
1596 }
1597 1620
1598 if (ret && arg->flags & DRM_MODE_PAGE_FLIP_EVENT) { 1621 if (ret && arg->flags & DRM_MODE_PAGE_FLIP_EVENT) {
1599 /* 1622 /*
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 0c6f62168776..e5aec45bf985 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -210,6 +210,14 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx)
210 return -EINVAL; 210 return -EINVAL;
211 } 211 }
212 212
213 if (!drm_encoder_crtc_ok(new_encoder, connector_state->crtc)) {
214 DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] incompatible with [CRTC:%d]\n",
215 new_encoder->base.id,
216 new_encoder->name,
217 connector_state->crtc->base.id);
218 return -EINVAL;
219 }
220
213 if (new_encoder == connector_state->best_encoder) { 221 if (new_encoder == connector_state->best_encoder) {
214 DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] keeps [ENCODER:%d:%s], now on [CRTC:%d]\n", 222 DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] keeps [ENCODER:%d:%s], now on [CRTC:%d]\n",
215 connector->base.id, 223 connector->base.id,
@@ -1553,6 +1561,9 @@ retry:
1553 goto fail; 1561 goto fail;
1554 } 1562 }
1555 1563
1564 if (plane_state->crtc && (plane == plane->crtc->cursor))
1565 plane_state->state->legacy_cursor_update = true;
1566
1556 ret = __drm_atomic_helper_disable_plane(plane, plane_state); 1567 ret = __drm_atomic_helper_disable_plane(plane, plane_state);
1557 if (ret != 0) 1568 if (ret != 0)
1558 goto fail; 1569 goto fail;
@@ -1605,9 +1616,6 @@ int __drm_atomic_helper_disable_plane(struct drm_plane *plane,
1605 plane_state->src_h = 0; 1616 plane_state->src_h = 0;
1606 plane_state->src_w = 0; 1617 plane_state->src_w = 0;
1607 1618
1608 if (plane->crtc && (plane == plane->crtc->cursor))
1609 plane_state->state->legacy_cursor_update = true;
1610
1611 return 0; 1619 return 0;
1612} 1620}
1613 1621
@@ -1741,6 +1749,7 @@ int __drm_atomic_helper_set_config(struct drm_mode_set *set,
1741 struct drm_crtc_state *crtc_state; 1749 struct drm_crtc_state *crtc_state;
1742 struct drm_plane_state *primary_state; 1750 struct drm_plane_state *primary_state;
1743 struct drm_crtc *crtc = set->crtc; 1751 struct drm_crtc *crtc = set->crtc;
1752 int hdisplay, vdisplay;
1744 int ret; 1753 int ret;
1745 1754
1746 crtc_state = drm_atomic_get_crtc_state(state, crtc); 1755 crtc_state = drm_atomic_get_crtc_state(state, crtc);
@@ -1783,19 +1792,21 @@ int __drm_atomic_helper_set_config(struct drm_mode_set *set,
1783 if (ret != 0) 1792 if (ret != 0)
1784 return ret; 1793 return ret;
1785 1794
1795 drm_crtc_get_hv_timing(set->mode, &hdisplay, &vdisplay);
1796
1786 drm_atomic_set_fb_for_plane(primary_state, set->fb); 1797 drm_atomic_set_fb_for_plane(primary_state, set->fb);
1787 primary_state->crtc_x = 0; 1798 primary_state->crtc_x = 0;
1788 primary_state->crtc_y = 0; 1799 primary_state->crtc_y = 0;
1789 primary_state->crtc_h = set->mode->vdisplay; 1800 primary_state->crtc_h = vdisplay;
1790 primary_state->crtc_w = set->mode->hdisplay; 1801 primary_state->crtc_w = hdisplay;
1791 primary_state->src_x = set->x << 16; 1802 primary_state->src_x = set->x << 16;
1792 primary_state->src_y = set->y << 16; 1803 primary_state->src_y = set->y << 16;
1793 if (primary_state->rotation & (BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270))) { 1804 if (primary_state->rotation & (BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270))) {
1794 primary_state->src_h = set->mode->hdisplay << 16; 1805 primary_state->src_h = hdisplay << 16;
1795 primary_state->src_w = set->mode->vdisplay << 16; 1806 primary_state->src_w = vdisplay << 16;
1796 } else { 1807 } else {
1797 primary_state->src_h = set->mode->vdisplay << 16; 1808 primary_state->src_h = vdisplay << 16;
1798 primary_state->src_w = set->mode->hdisplay << 16; 1809 primary_state->src_w = hdisplay << 16;
1799 } 1810 }
1800 1811
1801commit: 1812commit:
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index e673c13c7391..69cbab5e5c81 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -342,6 +342,7 @@ static int restore_fbdev_mode_atomic(struct drm_fb_helper *fb_helper)
342 struct drm_plane *plane; 342 struct drm_plane *plane;
343 struct drm_atomic_state *state; 343 struct drm_atomic_state *state;
344 int i, ret; 344 int i, ret;
345 unsigned plane_mask;
345 346
346 state = drm_atomic_state_alloc(dev); 347 state = drm_atomic_state_alloc(dev);
347 if (!state) 348 if (!state)
@@ -349,11 +350,10 @@ static int restore_fbdev_mode_atomic(struct drm_fb_helper *fb_helper)
349 350
350 state->acquire_ctx = dev->mode_config.acquire_ctx; 351 state->acquire_ctx = dev->mode_config.acquire_ctx;
351retry: 352retry:
353 plane_mask = 0;
352 drm_for_each_plane(plane, dev) { 354 drm_for_each_plane(plane, dev) {
353 struct drm_plane_state *plane_state; 355 struct drm_plane_state *plane_state;
354 356
355 plane->old_fb = plane->fb;
356
357 plane_state = drm_atomic_get_plane_state(state, plane); 357 plane_state = drm_atomic_get_plane_state(state, plane);
358 if (IS_ERR(plane_state)) { 358 if (IS_ERR(plane_state)) {
359 ret = PTR_ERR(plane_state); 359 ret = PTR_ERR(plane_state);
@@ -362,6 +362,9 @@ retry:
362 362
363 plane_state->rotation = BIT(DRM_ROTATE_0); 363 plane_state->rotation = BIT(DRM_ROTATE_0);
364 364
365 plane->old_fb = plane->fb;
366 plane_mask |= 1 << drm_plane_index(plane);
367
365 /* disable non-primary: */ 368 /* disable non-primary: */
366 if (plane->type == DRM_PLANE_TYPE_PRIMARY) 369 if (plane->type == DRM_PLANE_TYPE_PRIMARY)
367 continue; 370 continue;
@@ -382,19 +385,7 @@ retry:
382 ret = drm_atomic_commit(state); 385 ret = drm_atomic_commit(state);
383 386
384fail: 387fail:
385 drm_for_each_plane(plane, dev) { 388 drm_atomic_clean_old_fb(dev, plane_mask, ret);
386 if (ret == 0) {
387 struct drm_framebuffer *new_fb = plane->state->fb;
388 if (new_fb)
389 drm_framebuffer_reference(new_fb);
390 plane->fb = new_fb;
391 plane->crtc = plane->state->crtc;
392
393 if (plane->old_fb)
394 drm_framebuffer_unreference(plane->old_fb);
395 }
396 plane->old_fb = NULL;
397 }
398 389
399 if (ret == -EDEADLK) 390 if (ret == -EDEADLK)
400 goto backoff; 391 goto backoff;
@@ -1236,7 +1227,9 @@ static int pan_display_atomic(struct fb_var_screeninfo *var,
1236 struct drm_fb_helper *fb_helper = info->par; 1227 struct drm_fb_helper *fb_helper = info->par;
1237 struct drm_device *dev = fb_helper->dev; 1228 struct drm_device *dev = fb_helper->dev;
1238 struct drm_atomic_state *state; 1229 struct drm_atomic_state *state;
1230 struct drm_plane *plane;
1239 int i, ret; 1231 int i, ret;
1232 unsigned plane_mask;
1240 1233
1241 state = drm_atomic_state_alloc(dev); 1234 state = drm_atomic_state_alloc(dev);
1242 if (!state) 1235 if (!state)
@@ -1244,19 +1237,22 @@ static int pan_display_atomic(struct fb_var_screeninfo *var,
1244 1237
1245 state->acquire_ctx = dev->mode_config.acquire_ctx; 1238 state->acquire_ctx = dev->mode_config.acquire_ctx;
1246retry: 1239retry:
1240 plane_mask = 0;
1247 for(i = 0; i < fb_helper->crtc_count; i++) { 1241 for(i = 0; i < fb_helper->crtc_count; i++) {
1248 struct drm_mode_set *mode_set; 1242 struct drm_mode_set *mode_set;
1249 1243
1250 mode_set = &fb_helper->crtc_info[i].mode_set; 1244 mode_set = &fb_helper->crtc_info[i].mode_set;
1251 1245
1252 mode_set->crtc->primary->old_fb = mode_set->crtc->primary->fb;
1253
1254 mode_set->x = var->xoffset; 1246 mode_set->x = var->xoffset;
1255 mode_set->y = var->yoffset; 1247 mode_set->y = var->yoffset;
1256 1248
1257 ret = __drm_atomic_helper_set_config(mode_set, state); 1249 ret = __drm_atomic_helper_set_config(mode_set, state);
1258 if (ret != 0) 1250 if (ret != 0)
1259 goto fail; 1251 goto fail;
1252
1253 plane = mode_set->crtc->primary;
1254 plane_mask |= drm_plane_index(plane);
1255 plane->old_fb = plane->fb;
1260 } 1256 }
1261 1257
1262 ret = drm_atomic_commit(state); 1258 ret = drm_atomic_commit(state);
@@ -1268,26 +1264,7 @@ retry:
1268 1264
1269 1265
1270fail: 1266fail:
1271 for(i = 0; i < fb_helper->crtc_count; i++) { 1267 drm_atomic_clean_old_fb(dev, plane_mask, ret);
1272 struct drm_mode_set *mode_set;
1273 struct drm_plane *plane;
1274
1275 mode_set = &fb_helper->crtc_info[i].mode_set;
1276 plane = mode_set->crtc->primary;
1277
1278 if (ret == 0) {
1279 struct drm_framebuffer *new_fb = plane->state->fb;
1280
1281 if (new_fb)
1282 drm_framebuffer_reference(new_fb);
1283 plane->fb = new_fb;
1284 plane->crtc = plane->state->crtc;
1285
1286 if (plane->old_fb)
1287 drm_framebuffer_unreference(plane->old_fb);
1288 }
1289 plane->old_fb = NULL;
1290 }
1291 1268
1292 if (ret == -EDEADLK) 1269 if (ret == -EDEADLK)
1293 goto backoff; 1270 goto backoff;
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 8afda459a26e..95bb27de774f 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -351,6 +351,8 @@ enum intel_dpll_id {
351 /* hsw/bdw */ 351 /* hsw/bdw */
352 DPLL_ID_WRPLL1 = 0, 352 DPLL_ID_WRPLL1 = 0,
353 DPLL_ID_WRPLL2 = 1, 353 DPLL_ID_WRPLL2 = 1,
354 DPLL_ID_SPLL = 2,
355
354 /* skl */ 356 /* skl */
355 DPLL_ID_SKL_DPLL1 = 0, 357 DPLL_ID_SKL_DPLL1 = 0,
356 DPLL_ID_SKL_DPLL2 = 1, 358 DPLL_ID_SKL_DPLL2 = 1,
@@ -367,6 +369,7 @@ struct intel_dpll_hw_state {
367 369
368 /* hsw, bdw */ 370 /* hsw, bdw */
369 uint32_t wrpll; 371 uint32_t wrpll;
372 uint32_t spll;
370 373
371 /* skl */ 374 /* skl */
372 /* 375 /*
@@ -2648,6 +2651,7 @@ struct i915_params {
2648 int enable_cmd_parser; 2651 int enable_cmd_parser;
2649 /* leave bools at the end to not create holes */ 2652 /* leave bools at the end to not create holes */
2650 bool enable_hangcheck; 2653 bool enable_hangcheck;
2654 bool fastboot;
2651 bool prefault_disable; 2655 bool prefault_disable;
2652 bool load_detect_test; 2656 bool load_detect_test;
2653 bool reset; 2657 bool reset;
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 5cf4a1998273..91bb1fc27420 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -3809,6 +3809,7 @@ int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
3809int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data, 3809int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
3810 struct drm_file *file) 3810 struct drm_file *file)
3811{ 3811{
3812 struct drm_i915_private *dev_priv = dev->dev_private;
3812 struct drm_i915_gem_caching *args = data; 3813 struct drm_i915_gem_caching *args = data;
3813 struct drm_i915_gem_object *obj; 3814 struct drm_i915_gem_object *obj;
3814 enum i915_cache_level level; 3815 enum i915_cache_level level;
@@ -3837,9 +3838,11 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
3837 return -EINVAL; 3838 return -EINVAL;
3838 } 3839 }
3839 3840
3841 intel_runtime_pm_get(dev_priv);
3842
3840 ret = i915_mutex_lock_interruptible(dev); 3843 ret = i915_mutex_lock_interruptible(dev);
3841 if (ret) 3844 if (ret)
3842 return ret; 3845 goto rpm_put;
3843 3846
3844 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); 3847 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3845 if (&obj->base == NULL) { 3848 if (&obj->base == NULL) {
@@ -3852,6 +3855,9 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
3852 drm_gem_object_unreference(&obj->base); 3855 drm_gem_object_unreference(&obj->base);
3853unlock: 3856unlock:
3854 mutex_unlock(&dev->struct_mutex); 3857 mutex_unlock(&dev->struct_mutex);
3858rpm_put:
3859 intel_runtime_pm_put(dev_priv);
3860
3855 return ret; 3861 return ret;
3856} 3862}
3857 3863
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index 96bb23865eac..4be13a5eb932 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -40,6 +40,7 @@ struct i915_params i915 __read_mostly = {
40 .preliminary_hw_support = IS_ENABLED(CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT), 40 .preliminary_hw_support = IS_ENABLED(CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT),
41 .disable_power_well = -1, 41 .disable_power_well = -1,
42 .enable_ips = 1, 42 .enable_ips = 1,
43 .fastboot = 0,
43 .prefault_disable = 0, 44 .prefault_disable = 0,
44 .load_detect_test = 0, 45 .load_detect_test = 0,
45 .reset = true, 46 .reset = true,
@@ -133,6 +134,10 @@ MODULE_PARM_DESC(disable_power_well,
133module_param_named_unsafe(enable_ips, i915.enable_ips, int, 0600); 134module_param_named_unsafe(enable_ips, i915.enable_ips, int, 0600);
134MODULE_PARM_DESC(enable_ips, "Enable IPS (default: true)"); 135MODULE_PARM_DESC(enable_ips, "Enable IPS (default: true)");
135 136
137module_param_named(fastboot, i915.fastboot, bool, 0600);
138MODULE_PARM_DESC(fastboot,
139 "Try to skip unnecessary mode sets at boot time (default: false)");
140
136module_param_named_unsafe(prefault_disable, i915.prefault_disable, bool, 0600); 141module_param_named_unsafe(prefault_disable, i915.prefault_disable, bool, 0600);
137MODULE_PARM_DESC(prefault_disable, 142MODULE_PARM_DESC(prefault_disable,
138 "Disable page prefaulting for pread/pwrite/reloc (default:false). " 143 "Disable page prefaulting for pread/pwrite/reloc (default:false). "
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index b84aaa0bb48a..6a2c76e367a5 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -138,18 +138,6 @@ static void hsw_crt_get_config(struct intel_encoder *encoder,
138 pipe_config->base.adjusted_mode.flags |= intel_crt_get_flags(encoder); 138 pipe_config->base.adjusted_mode.flags |= intel_crt_get_flags(encoder);
139} 139}
140 140
141static void hsw_crt_pre_enable(struct intel_encoder *encoder)
142{
143 struct drm_device *dev = encoder->base.dev;
144 struct drm_i915_private *dev_priv = dev->dev_private;
145
146 WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL already enabled\n");
147 I915_WRITE(SPLL_CTL,
148 SPLL_PLL_ENABLE | SPLL_PLL_FREQ_1350MHz | SPLL_PLL_SSC);
149 POSTING_READ(SPLL_CTL);
150 udelay(20);
151}
152
153/* Note: The caller is required to filter out dpms modes not supported by the 141/* Note: The caller is required to filter out dpms modes not supported by the
154 * platform. */ 142 * platform. */
155static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode) 143static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode)
@@ -216,19 +204,6 @@ static void pch_post_disable_crt(struct intel_encoder *encoder)
216 intel_disable_crt(encoder); 204 intel_disable_crt(encoder);
217} 205}
218 206
219static void hsw_crt_post_disable(struct intel_encoder *encoder)
220{
221 struct drm_device *dev = encoder->base.dev;
222 struct drm_i915_private *dev_priv = dev->dev_private;
223 uint32_t val;
224
225 DRM_DEBUG_KMS("Disabling SPLL\n");
226 val = I915_READ(SPLL_CTL);
227 WARN_ON(!(val & SPLL_PLL_ENABLE));
228 I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE);
229 POSTING_READ(SPLL_CTL);
230}
231
232static void intel_enable_crt(struct intel_encoder *encoder) 207static void intel_enable_crt(struct intel_encoder *encoder)
233{ 208{
234 struct intel_crt *crt = intel_encoder_to_crt(encoder); 209 struct intel_crt *crt = intel_encoder_to_crt(encoder);
@@ -280,6 +255,10 @@ static bool intel_crt_compute_config(struct intel_encoder *encoder,
280 if (HAS_DDI(dev)) { 255 if (HAS_DDI(dev)) {
281 pipe_config->ddi_pll_sel = PORT_CLK_SEL_SPLL; 256 pipe_config->ddi_pll_sel = PORT_CLK_SEL_SPLL;
282 pipe_config->port_clock = 135000 * 2; 257 pipe_config->port_clock = 135000 * 2;
258
259 pipe_config->dpll_hw_state.wrpll = 0;
260 pipe_config->dpll_hw_state.spll =
261 SPLL_PLL_ENABLE | SPLL_PLL_FREQ_1350MHz | SPLL_PLL_SSC;
283 } 262 }
284 263
285 return true; 264 return true;
@@ -860,8 +839,6 @@ void intel_crt_init(struct drm_device *dev)
860 if (HAS_DDI(dev)) { 839 if (HAS_DDI(dev)) {
861 crt->base.get_config = hsw_crt_get_config; 840 crt->base.get_config = hsw_crt_get_config;
862 crt->base.get_hw_state = intel_ddi_get_hw_state; 841 crt->base.get_hw_state = intel_ddi_get_hw_state;
863 crt->base.pre_enable = hsw_crt_pre_enable;
864 crt->base.post_disable = hsw_crt_post_disable;
865 } else { 842 } else {
866 crt->base.get_config = intel_crt_get_config; 843 crt->base.get_config = intel_crt_get_config;
867 crt->base.get_hw_state = intel_crt_get_hw_state; 844 crt->base.get_hw_state = intel_crt_get_hw_state;
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index b25e99a432fb..a6752a61d99f 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -1286,6 +1286,18 @@ hsw_ddi_pll_select(struct intel_crtc *intel_crtc,
1286 } 1286 }
1287 1287
1288 crtc_state->ddi_pll_sel = PORT_CLK_SEL_WRPLL(pll->id); 1288 crtc_state->ddi_pll_sel = PORT_CLK_SEL_WRPLL(pll->id);
1289 } else if (crtc_state->ddi_pll_sel == PORT_CLK_SEL_SPLL) {
1290 struct drm_atomic_state *state = crtc_state->base.state;
1291 struct intel_shared_dpll_config *spll =
1292 &intel_atomic_get_shared_dpll_state(state)[DPLL_ID_SPLL];
1293
1294 if (spll->crtc_mask &&
1295 WARN_ON(spll->hw_state.spll != crtc_state->dpll_hw_state.spll))
1296 return false;
1297
1298 crtc_state->shared_dpll = DPLL_ID_SPLL;
1299 spll->hw_state.spll = crtc_state->dpll_hw_state.spll;
1300 spll->crtc_mask |= 1 << intel_crtc->pipe;
1289 } 1301 }
1290 1302
1291 return true; 1303 return true;
@@ -2437,7 +2449,7 @@ static void intel_disable_ddi(struct intel_encoder *intel_encoder)
2437 } 2449 }
2438} 2450}
2439 2451
2440static void hsw_ddi_pll_enable(struct drm_i915_private *dev_priv, 2452static void hsw_ddi_wrpll_enable(struct drm_i915_private *dev_priv,
2441 struct intel_shared_dpll *pll) 2453 struct intel_shared_dpll *pll)
2442{ 2454{
2443 I915_WRITE(WRPLL_CTL(pll->id), pll->config.hw_state.wrpll); 2455 I915_WRITE(WRPLL_CTL(pll->id), pll->config.hw_state.wrpll);
@@ -2445,9 +2457,17 @@ static void hsw_ddi_pll_enable(struct drm_i915_private *dev_priv,
2445 udelay(20); 2457 udelay(20);
2446} 2458}
2447 2459
2448static void hsw_ddi_pll_disable(struct drm_i915_private *dev_priv, 2460static void hsw_ddi_spll_enable(struct drm_i915_private *dev_priv,
2449 struct intel_shared_dpll *pll) 2461 struct intel_shared_dpll *pll)
2450{ 2462{
2463 I915_WRITE(SPLL_CTL, pll->config.hw_state.spll);
2464 POSTING_READ(SPLL_CTL);
2465 udelay(20);
2466}
2467
2468static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv,
2469 struct intel_shared_dpll *pll)
2470{
2451 uint32_t val; 2471 uint32_t val;
2452 2472
2453 val = I915_READ(WRPLL_CTL(pll->id)); 2473 val = I915_READ(WRPLL_CTL(pll->id));
@@ -2455,9 +2475,19 @@ static void hsw_ddi_pll_disable(struct drm_i915_private *dev_priv,
2455 POSTING_READ(WRPLL_CTL(pll->id)); 2475 POSTING_READ(WRPLL_CTL(pll->id));
2456} 2476}
2457 2477
2458static bool hsw_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv, 2478static void hsw_ddi_spll_disable(struct drm_i915_private *dev_priv,
2459 struct intel_shared_dpll *pll, 2479 struct intel_shared_dpll *pll)
2460 struct intel_dpll_hw_state *hw_state) 2480{
2481 uint32_t val;
2482
2483 val = I915_READ(SPLL_CTL);
2484 I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE);
2485 POSTING_READ(SPLL_CTL);
2486}
2487
2488static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv,
2489 struct intel_shared_dpll *pll,
2490 struct intel_dpll_hw_state *hw_state)
2461{ 2491{
2462 uint32_t val; 2492 uint32_t val;
2463 2493
@@ -2470,25 +2500,50 @@ static bool hsw_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
2470 return val & WRPLL_PLL_ENABLE; 2500 return val & WRPLL_PLL_ENABLE;
2471} 2501}
2472 2502
2503static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *dev_priv,
2504 struct intel_shared_dpll *pll,
2505 struct intel_dpll_hw_state *hw_state)
2506{
2507 uint32_t val;
2508
2509 if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS))
2510 return false;
2511
2512 val = I915_READ(SPLL_CTL);
2513 hw_state->spll = val;
2514
2515 return val & SPLL_PLL_ENABLE;
2516}
2517
2518
2473static const char * const hsw_ddi_pll_names[] = { 2519static const char * const hsw_ddi_pll_names[] = {
2474 "WRPLL 1", 2520 "WRPLL 1",
2475 "WRPLL 2", 2521 "WRPLL 2",
2522 "SPLL"
2476}; 2523};
2477 2524
2478static void hsw_shared_dplls_init(struct drm_i915_private *dev_priv) 2525static void hsw_shared_dplls_init(struct drm_i915_private *dev_priv)
2479{ 2526{
2480 int i; 2527 int i;
2481 2528
2482 dev_priv->num_shared_dpll = 2; 2529 dev_priv->num_shared_dpll = 3;
2483 2530
2484 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 2531 for (i = 0; i < 2; i++) {
2485 dev_priv->shared_dplls[i].id = i; 2532 dev_priv->shared_dplls[i].id = i;
2486 dev_priv->shared_dplls[i].name = hsw_ddi_pll_names[i]; 2533 dev_priv->shared_dplls[i].name = hsw_ddi_pll_names[i];
2487 dev_priv->shared_dplls[i].disable = hsw_ddi_pll_disable; 2534 dev_priv->shared_dplls[i].disable = hsw_ddi_wrpll_disable;
2488 dev_priv->shared_dplls[i].enable = hsw_ddi_pll_enable; 2535 dev_priv->shared_dplls[i].enable = hsw_ddi_wrpll_enable;
2489 dev_priv->shared_dplls[i].get_hw_state = 2536 dev_priv->shared_dplls[i].get_hw_state =
2490 hsw_ddi_pll_get_hw_state; 2537 hsw_ddi_wrpll_get_hw_state;
2491 } 2538 }
2539
2540 /* SPLL is special, but needs to be initialized anyway.. */
2541 dev_priv->shared_dplls[i].id = i;
2542 dev_priv->shared_dplls[i].name = hsw_ddi_pll_names[i];
2543 dev_priv->shared_dplls[i].disable = hsw_ddi_spll_disable;
2544 dev_priv->shared_dplls[i].enable = hsw_ddi_spll_enable;
2545 dev_priv->shared_dplls[i].get_hw_state = hsw_ddi_spll_get_hw_state;
2546
2492} 2547}
2493 2548
2494static const char * const skl_ddi_pll_names[] = { 2549static const char * const skl_ddi_pll_names[] = {
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index f62ffc04c21d..71860f8680f9 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -2646,11 +2646,13 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
2646 return; 2646 return;
2647 2647
2648valid_fb: 2648valid_fb:
2649 plane_state->src_x = plane_state->src_y = 0; 2649 plane_state->src_x = 0;
2650 plane_state->src_y = 0;
2650 plane_state->src_w = fb->width << 16; 2651 plane_state->src_w = fb->width << 16;
2651 plane_state->src_h = fb->height << 16; 2652 plane_state->src_h = fb->height << 16;
2652 2653
2653 plane_state->crtc_x = plane_state->src_y = 0; 2654 plane_state->crtc_x = 0;
2655 plane_state->crtc_y = 0;
2654 plane_state->crtc_w = fb->width; 2656 plane_state->crtc_w = fb->width;
2655 plane_state->crtc_h = fb->height; 2657 plane_state->crtc_h = fb->height;
2656 2658
@@ -4237,6 +4239,7 @@ struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
4237 struct intel_shared_dpll *pll; 4239 struct intel_shared_dpll *pll;
4238 struct intel_shared_dpll_config *shared_dpll; 4240 struct intel_shared_dpll_config *shared_dpll;
4239 enum intel_dpll_id i; 4241 enum intel_dpll_id i;
4242 int max = dev_priv->num_shared_dpll;
4240 4243
4241 shared_dpll = intel_atomic_get_shared_dpll_state(crtc_state->base.state); 4244 shared_dpll = intel_atomic_get_shared_dpll_state(crtc_state->base.state);
4242 4245
@@ -4271,9 +4274,11 @@ struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
4271 WARN_ON(shared_dpll[i].crtc_mask); 4274 WARN_ON(shared_dpll[i].crtc_mask);
4272 4275
4273 goto found; 4276 goto found;
4274 } 4277 } else if (INTEL_INFO(dev_priv)->gen < 9 && HAS_DDI(dev_priv))
4278 /* Do not consider SPLL */
4279 max = 2;
4275 4280
4276 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 4281 for (i = 0; i < max; i++) {
4277 pll = &dev_priv->shared_dplls[i]; 4282 pll = &dev_priv->shared_dplls[i];
4278 4283
4279 /* Only want to check enabled timings first */ 4284 /* Only want to check enabled timings first */
@@ -9723,6 +9728,8 @@ static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
9723 case PORT_CLK_SEL_WRPLL2: 9728 case PORT_CLK_SEL_WRPLL2:
9724 pipe_config->shared_dpll = DPLL_ID_WRPLL2; 9729 pipe_config->shared_dpll = DPLL_ID_WRPLL2;
9725 break; 9730 break;
9731 case PORT_CLK_SEL_SPLL:
9732 pipe_config->shared_dpll = DPLL_ID_SPLL;
9726 } 9733 }
9727} 9734}
9728 9735
@@ -12003,9 +12010,10 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
12003 pipe_config->dpll_hw_state.cfgcr1, 12010 pipe_config->dpll_hw_state.cfgcr1,
12004 pipe_config->dpll_hw_state.cfgcr2); 12011 pipe_config->dpll_hw_state.cfgcr2);
12005 } else if (HAS_DDI(dev)) { 12012 } else if (HAS_DDI(dev)) {
12006 DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: wrpll: 0x%x\n", 12013 DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
12007 pipe_config->ddi_pll_sel, 12014 pipe_config->ddi_pll_sel,
12008 pipe_config->dpll_hw_state.wrpll); 12015 pipe_config->dpll_hw_state.wrpll,
12016 pipe_config->dpll_hw_state.spll);
12009 } else { 12017 } else {
12010 DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, " 12018 DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
12011 "fp0: 0x%x, fp1: 0x%x\n", 12019 "fp0: 0x%x, fp1: 0x%x\n",
@@ -12528,6 +12536,7 @@ intel_pipe_config_compare(struct drm_device *dev,
12528 PIPE_CONF_CHECK_X(dpll_hw_state.fp0); 12536 PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
12529 PIPE_CONF_CHECK_X(dpll_hw_state.fp1); 12537 PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
12530 PIPE_CONF_CHECK_X(dpll_hw_state.wrpll); 12538 PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
12539 PIPE_CONF_CHECK_X(dpll_hw_state.spll);
12531 PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1); 12540 PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
12532 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1); 12541 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
12533 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2); 12542 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
@@ -13032,6 +13041,9 @@ static int intel_atomic_check(struct drm_device *dev,
13032 struct intel_crtc_state *pipe_config = 13041 struct intel_crtc_state *pipe_config =
13033 to_intel_crtc_state(crtc_state); 13042 to_intel_crtc_state(crtc_state);
13034 13043
13044 memset(&to_intel_crtc(crtc)->atomic, 0,
13045 sizeof(struct intel_crtc_atomic_commit));
13046
13035 /* Catch I915_MODE_FLAG_INHERITED */ 13047 /* Catch I915_MODE_FLAG_INHERITED */
13036 if (crtc_state->mode.private_flags != crtc->state->mode.private_flags) 13048 if (crtc_state->mode.private_flags != crtc->state->mode.private_flags)
13037 crtc_state->mode_changed = true; 13049 crtc_state->mode_changed = true;
@@ -13056,7 +13068,8 @@ static int intel_atomic_check(struct drm_device *dev,
13056 if (ret) 13068 if (ret)
13057 return ret; 13069 return ret;
13058 13070
13059 if (intel_pipe_config_compare(state->dev, 13071 if (i915.fastboot &&
13072 intel_pipe_config_compare(state->dev,
13060 to_intel_crtc_state(crtc->state), 13073 to_intel_crtc_state(crtc->state),
13061 pipe_config, true)) { 13074 pipe_config, true)) {
13062 crtc_state->mode_changed = false; 13075 crtc_state->mode_changed = false;
@@ -14364,16 +14377,17 @@ static int intel_framebuffer_init(struct drm_device *dev,
14364static struct drm_framebuffer * 14377static struct drm_framebuffer *
14365intel_user_framebuffer_create(struct drm_device *dev, 14378intel_user_framebuffer_create(struct drm_device *dev,
14366 struct drm_file *filp, 14379 struct drm_file *filp,
14367 struct drm_mode_fb_cmd2 *mode_cmd) 14380 struct drm_mode_fb_cmd2 *user_mode_cmd)
14368{ 14381{
14369 struct drm_i915_gem_object *obj; 14382 struct drm_i915_gem_object *obj;
14383 struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
14370 14384
14371 obj = to_intel_bo(drm_gem_object_lookup(dev, filp, 14385 obj = to_intel_bo(drm_gem_object_lookup(dev, filp,
14372 mode_cmd->handles[0])); 14386 mode_cmd.handles[0]));
14373 if (&obj->base == NULL) 14387 if (&obj->base == NULL)
14374 return ERR_PTR(-ENOENT); 14388 return ERR_PTR(-ENOENT);
14375 14389
14376 return intel_framebuffer_create(dev, mode_cmd, obj); 14390 return intel_framebuffer_create(dev, &mode_cmd, obj);
14377} 14391}
14378 14392
14379#ifndef CONFIG_DRM_FBDEV_EMULATION 14393#ifndef CONFIG_DRM_FBDEV_EMULATION
@@ -14705,6 +14719,9 @@ static struct intel_quirk intel_quirks[] = {
14705 /* Apple Macbook 2,1 (Core 2 T7400) */ 14719 /* Apple Macbook 2,1 (Core 2 T7400) */
14706 { 0x27a2, 0x8086, 0x7270, quirk_backlight_present }, 14720 { 0x27a2, 0x8086, 0x7270, quirk_backlight_present },
14707 14721
14722 /* Apple Macbook 4,1 */
14723 { 0x2a02, 0x106b, 0x00a1, quirk_backlight_present },
14724
14708 /* Toshiba CB35 Chromebook (Celeron 2955U) */ 14725 /* Toshiba CB35 Chromebook (Celeron 2955U) */
14709 { 0x0a06, 0x1179, 0x0a88, quirk_backlight_present }, 14726 { 0x0a06, 0x1179, 0x0a88, quirk_backlight_present },
14710 14727
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index d52a15df6917..071a76b9ac52 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -4449,7 +4449,7 @@ static void gen6_set_rps(struct drm_device *dev, u8 val)
4449 POSTING_READ(GEN6_RPNSWREQ); 4449 POSTING_READ(GEN6_RPNSWREQ);
4450 4450
4451 dev_priv->rps.cur_freq = val; 4451 dev_priv->rps.cur_freq = val;
4452 trace_intel_gpu_freq_change(val * 50); 4452 trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
4453} 4453}
4454 4454
4455static void valleyview_set_rps(struct drm_device *dev, u8 val) 4455static void valleyview_set_rps(struct drm_device *dev, u8 val)
@@ -7255,7 +7255,8 @@ static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val)
7255int intel_gpu_freq(struct drm_i915_private *dev_priv, int val) 7255int intel_gpu_freq(struct drm_i915_private *dev_priv, int val)
7256{ 7256{
7257 if (IS_GEN9(dev_priv->dev)) 7257 if (IS_GEN9(dev_priv->dev))
7258 return (val * GT_FREQUENCY_MULTIPLIER) / GEN9_FREQ_SCALER; 7258 return DIV_ROUND_CLOSEST(val * GT_FREQUENCY_MULTIPLIER,
7259 GEN9_FREQ_SCALER);
7259 else if (IS_CHERRYVIEW(dev_priv->dev)) 7260 else if (IS_CHERRYVIEW(dev_priv->dev))
7260 return chv_gpu_freq(dev_priv, val); 7261 return chv_gpu_freq(dev_priv, val);
7261 else if (IS_VALLEYVIEW(dev_priv->dev)) 7262 else if (IS_VALLEYVIEW(dev_priv->dev))
@@ -7267,13 +7268,14 @@ int intel_gpu_freq(struct drm_i915_private *dev_priv, int val)
7267int intel_freq_opcode(struct drm_i915_private *dev_priv, int val) 7268int intel_freq_opcode(struct drm_i915_private *dev_priv, int val)
7268{ 7269{
7269 if (IS_GEN9(dev_priv->dev)) 7270 if (IS_GEN9(dev_priv->dev))
7270 return (val * GEN9_FREQ_SCALER) / GT_FREQUENCY_MULTIPLIER; 7271 return DIV_ROUND_CLOSEST(val * GEN9_FREQ_SCALER,
7272 GT_FREQUENCY_MULTIPLIER);
7271 else if (IS_CHERRYVIEW(dev_priv->dev)) 7273 else if (IS_CHERRYVIEW(dev_priv->dev))
7272 return chv_freq_opcode(dev_priv, val); 7274 return chv_freq_opcode(dev_priv, val);
7273 else if (IS_VALLEYVIEW(dev_priv->dev)) 7275 else if (IS_VALLEYVIEW(dev_priv->dev))
7274 return byt_freq_opcode(dev_priv, val); 7276 return byt_freq_opcode(dev_priv, val);
7275 else 7277 else
7276 return val / GT_FREQUENCY_MULTIPLIER; 7278 return DIV_ROUND_CLOSEST(val, GT_FREQUENCY_MULTIPLIER);
7277} 7279}
7278 7280
7279struct request_boost { 7281struct request_boost {
diff --git a/drivers/gpu/drm/mgag200/mgag200_cursor.c b/drivers/gpu/drm/mgag200/mgag200_cursor.c
index 4f2068fe5d88..a7bf6a90eae5 100644
--- a/drivers/gpu/drm/mgag200/mgag200_cursor.c
+++ b/drivers/gpu/drm/mgag200/mgag200_cursor.c
@@ -70,6 +70,11 @@ int mga_crtc_cursor_set(struct drm_crtc *crtc,
70 BUG_ON(pixels_2 != pixels_current && pixels_2 != pixels_prev); 70 BUG_ON(pixels_2 != pixels_current && pixels_2 != pixels_prev);
71 BUG_ON(pixels_current == pixels_prev); 71 BUG_ON(pixels_current == pixels_prev);
72 72
73 if (!handle || !file_priv) {
74 mga_hide_cursor(mdev);
75 return 0;
76 }
77
73 obj = drm_gem_object_lookup(dev, file_priv, handle); 78 obj = drm_gem_object_lookup(dev, file_priv, handle);
74 if (!obj) 79 if (!obj)
75 return -ENOENT; 80 return -ENOENT;
@@ -88,12 +93,6 @@ int mga_crtc_cursor_set(struct drm_crtc *crtc,
88 goto out_unreserve1; 93 goto out_unreserve1;
89 } 94 }
90 95
91 if (!handle) {
92 mga_hide_cursor(mdev);
93 ret = 0;
94 goto out1;
95 }
96
97 /* Move cursor buffers into VRAM if they aren't already */ 96 /* Move cursor buffers into VRAM if they aren't already */
98 if (!pixels_1->pin_count) { 97 if (!pixels_1->pin_count) {
99 ret = mgag200_bo_pin(pixels_1, TTM_PL_FLAG_VRAM, 98 ret = mgag200_bo_pin(pixels_1, TTM_PL_FLAG_VRAM,
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index d3024883b844..84d45633d28c 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -221,11 +221,17 @@ int radeon_bo_create(struct radeon_device *rdev,
221 if (!(rdev->flags & RADEON_IS_PCIE)) 221 if (!(rdev->flags & RADEON_IS_PCIE))
222 bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC); 222 bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
223 223
224 /* Write-combined CPU mappings of GTT cause GPU hangs with RV6xx
225 * See https://bugs.freedesktop.org/show_bug.cgi?id=91268
226 */
227 if (rdev->family >= CHIP_RV610 && rdev->family <= CHIP_RV635)
228 bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
229
224#ifdef CONFIG_X86_32 230#ifdef CONFIG_X86_32
225 /* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit 231 /* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit
226 * See https://bugs.freedesktop.org/show_bug.cgi?id=84627 232 * See https://bugs.freedesktop.org/show_bug.cgi?id=84627
227 */ 233 */
228 bo->flags &= ~RADEON_GEM_GTT_WC; 234 bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
229#elif defined(CONFIG_X86) && !defined(CONFIG_X86_PAT) 235#elif defined(CONFIG_X86) && !defined(CONFIG_X86_PAT)
230 /* Don't try to enable write-combining when it can't work, or things 236 /* Don't try to enable write-combining when it can't work, or things
231 * may be slow 237 * may be slow
@@ -235,9 +241,10 @@ int radeon_bo_create(struct radeon_device *rdev,
235#warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \ 241#warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \
236 thanks to write-combining 242 thanks to write-combining
237 243
238 DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for " 244 if (bo->flags & RADEON_GEM_GTT_WC)
239 "better performance thanks to write-combining\n"); 245 DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for "
240 bo->flags &= ~RADEON_GEM_GTT_WC; 246 "better performance thanks to write-combining\n");
247 bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
241#endif 248#endif
242 249
243 radeon_ttm_placement_from_domain(bo, domain); 250 radeon_ttm_placement_from_domain(bo, domain);
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 6d80dde23400..f4f03dcc1530 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -1542,8 +1542,7 @@ int radeon_pm_late_init(struct radeon_device *rdev)
1542 ret = device_create_file(rdev->dev, &dev_attr_power_method); 1542 ret = device_create_file(rdev->dev, &dev_attr_power_method);
1543 if (ret) 1543 if (ret)
1544 DRM_ERROR("failed to create device file for power method\n"); 1544 DRM_ERROR("failed to create device file for power method\n");
1545 if (!ret) 1545 rdev->pm.sysfs_initialized = true;
1546 rdev->pm.sysfs_initialized = true;
1547 } 1546 }
1548 1547
1549 mutex_lock(&rdev->pm.mutex); 1548 mutex_lock(&rdev->pm.mutex);
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index e72bf46042e0..a82b891ae1fe 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -2927,7 +2927,7 @@ static struct si_dpm_quirk si_dpm_quirk_list[] = {
2927 { PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 }, 2927 { PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 },
2928 { PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 }, 2928 { PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 },
2929 { PCI_VENDOR_ID_ATI, 0x6810, 0x174b, 0xe271, 85000, 90000 }, 2929 { PCI_VENDOR_ID_ATI, 0x6810, 0x174b, 0xe271, 85000, 90000 },
2930 { PCI_VENDOR_ID_ATI, 0x6811, 0x1762, 0x2015, 0, 120000 }, 2930 { PCI_VENDOR_ID_ATI, 0x6811, 0x1462, 0x2015, 0, 120000 },
2931 { PCI_VENDOR_ID_ATI, 0x6811, 0x1043, 0x2015, 0, 120000 }, 2931 { PCI_VENDOR_ID_ATI, 0x6811, 0x1043, 0x2015, 0, 120000 },
2932 { 0, 0, 0, 0 }, 2932 { 0, 0, 0, 0 },
2933}; 2933};
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index 7a9f4768591e..265064c62d49 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -168,7 +168,7 @@ static int vc4_get_clock_select(struct drm_crtc *crtc)
168 struct drm_connector *connector; 168 struct drm_connector *connector;
169 169
170 drm_for_each_connector(connector, crtc->dev) { 170 drm_for_each_connector(connector, crtc->dev) {
171 if (connector && connector->state->crtc == crtc) { 171 if (connector->state->crtc == crtc) {
172 struct drm_encoder *encoder = connector->encoder; 172 struct drm_encoder *encoder = connector->encoder;
173 struct vc4_encoder *vc4_encoder = 173 struct vc4_encoder *vc4_encoder =
174 to_vc4_encoder(encoder); 174 to_vc4_encoder(encoder);
@@ -401,7 +401,8 @@ static void vc4_crtc_atomic_flush(struct drm_crtc *crtc,
401 dlist_next++; 401 dlist_next++;
402 402
403 HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel), 403 HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel),
404 (u32 *)vc4_crtc->dlist - (u32 *)vc4->hvs->dlist); 404 (u32 __iomem *)vc4_crtc->dlist -
405 (u32 __iomem *)vc4->hvs->dlist);
405 406
406 /* Make the next display list start after ours. */ 407 /* Make the next display list start after ours. */
407 vc4_crtc->dlist_size -= (dlist_next - vc4_crtc->dlist); 408 vc4_crtc->dlist_size -= (dlist_next - vc4_crtc->dlist);
@@ -591,14 +592,14 @@ static int vc4_crtc_bind(struct device *dev, struct device *master, void *data)
591 * that will take too much. 592 * that will take too much.
592 */ 593 */
593 primary_plane = vc4_plane_init(drm, DRM_PLANE_TYPE_PRIMARY); 594 primary_plane = vc4_plane_init(drm, DRM_PLANE_TYPE_PRIMARY);
594 if (!primary_plane) { 595 if (IS_ERR(primary_plane)) {
595 dev_err(dev, "failed to construct primary plane\n"); 596 dev_err(dev, "failed to construct primary plane\n");
596 ret = PTR_ERR(primary_plane); 597 ret = PTR_ERR(primary_plane);
597 goto err; 598 goto err;
598 } 599 }
599 600
600 cursor_plane = vc4_plane_init(drm, DRM_PLANE_TYPE_CURSOR); 601 cursor_plane = vc4_plane_init(drm, DRM_PLANE_TYPE_CURSOR);
601 if (!cursor_plane) { 602 if (IS_ERR(cursor_plane)) {
602 dev_err(dev, "failed to construct cursor plane\n"); 603 dev_err(dev, "failed to construct cursor plane\n");
603 ret = PTR_ERR(cursor_plane); 604 ret = PTR_ERR(cursor_plane);
604 goto err_primary; 605 goto err_primary;
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
index 6e730605edcc..d5db9e0f3b73 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -259,7 +259,6 @@ static struct platform_driver vc4_platform_driver = {
259 .remove = vc4_platform_drm_remove, 259 .remove = vc4_platform_drm_remove,
260 .driver = { 260 .driver = {
261 .name = "vc4-drm", 261 .name = "vc4-drm",
262 .owner = THIS_MODULE,
263 .of_match_table = vc4_of_match, 262 .of_match_table = vc4_of_match,
264 }, 263 },
265}; 264};
diff --git a/drivers/gpu/drm/vc4/vc4_hvs.c b/drivers/gpu/drm/vc4/vc4_hvs.c
index ab1673f672a4..8098c5b21ba4 100644
--- a/drivers/gpu/drm/vc4/vc4_hvs.c
+++ b/drivers/gpu/drm/vc4/vc4_hvs.c
@@ -75,10 +75,10 @@ void vc4_hvs_dump_state(struct drm_device *dev)
75 for (i = 0; i < 64; i += 4) { 75 for (i = 0; i < 64; i += 4) {
76 DRM_INFO("0x%08x (%s): 0x%08x 0x%08x 0x%08x 0x%08x\n", 76 DRM_INFO("0x%08x (%s): 0x%08x 0x%08x 0x%08x 0x%08x\n",
77 i * 4, i < HVS_BOOTLOADER_DLIST_END ? "B" : "D", 77 i * 4, i < HVS_BOOTLOADER_DLIST_END ? "B" : "D",
78 ((uint32_t *)vc4->hvs->dlist)[i + 0], 78 readl((u32 __iomem *)vc4->hvs->dlist + i + 0),
79 ((uint32_t *)vc4->hvs->dlist)[i + 1], 79 readl((u32 __iomem *)vc4->hvs->dlist + i + 1),
80 ((uint32_t *)vc4->hvs->dlist)[i + 2], 80 readl((u32 __iomem *)vc4->hvs->dlist + i + 2),
81 ((uint32_t *)vc4->hvs->dlist)[i + 3]); 81 readl((u32 __iomem *)vc4->hvs->dlist + i + 3));
82 } 82 }
83} 83}
84 84
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index cdd8b10c0147..887f3caad0be 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -70,7 +70,7 @@ static bool plane_enabled(struct drm_plane_state *state)
70 return state->fb && state->crtc; 70 return state->fb && state->crtc;
71} 71}
72 72
73struct drm_plane_state *vc4_plane_duplicate_state(struct drm_plane *plane) 73static struct drm_plane_state *vc4_plane_duplicate_state(struct drm_plane *plane)
74{ 74{
75 struct vc4_plane_state *vc4_state; 75 struct vc4_plane_state *vc4_state;
76 76
@@ -97,8 +97,8 @@ struct drm_plane_state *vc4_plane_duplicate_state(struct drm_plane *plane)
97 return &vc4_state->base; 97 return &vc4_state->base;
98} 98}
99 99
100void vc4_plane_destroy_state(struct drm_plane *plane, 100static void vc4_plane_destroy_state(struct drm_plane *plane,
101 struct drm_plane_state *state) 101 struct drm_plane_state *state)
102{ 102{
103 struct vc4_plane_state *vc4_state = to_vc4_plane_state(state); 103 struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
104 104
@@ -108,7 +108,7 @@ void vc4_plane_destroy_state(struct drm_plane *plane,
108} 108}
109 109
110/* Called during init to allocate the plane's atomic state. */ 110/* Called during init to allocate the plane's atomic state. */
111void vc4_plane_reset(struct drm_plane *plane) 111static void vc4_plane_reset(struct drm_plane *plane)
112{ 112{
113 struct vc4_plane_state *vc4_state; 113 struct vc4_plane_state *vc4_state;
114 114
@@ -157,6 +157,16 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
157 int crtc_w = state->crtc_w; 157 int crtc_w = state->crtc_w;
158 int crtc_h = state->crtc_h; 158 int crtc_h = state->crtc_h;
159 159
160 if (state->crtc_w << 16 != state->src_w ||
161 state->crtc_h << 16 != state->src_h) {
162 /* We don't support scaling yet, which involves
163 * allocating the LBM memory for scaling temporary
164 * storage, and putting filter kernels in the HVS
165 * context.
166 */
167 return -EINVAL;
168 }
169
160 if (crtc_x < 0) { 170 if (crtc_x < 0) {
161 offset += drm_format_plane_cpp(fb->pixel_format, 0) * -crtc_x; 171 offset += drm_format_plane_cpp(fb->pixel_format, 0) * -crtc_x;
162 crtc_w += crtc_x; 172 crtc_w += crtc_x;
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 8b29949507d1..01a4f05c1642 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -2481,7 +2481,7 @@ void wacom_setup_device_quirks(struct wacom *wacom)
2481 if (features->pktlen == WACOM_PKGLEN_BBTOUCH3) { 2481 if (features->pktlen == WACOM_PKGLEN_BBTOUCH3) {
2482 if (features->touch_max) 2482 if (features->touch_max)
2483 features->device_type |= WACOM_DEVICETYPE_TOUCH; 2483 features->device_type |= WACOM_DEVICETYPE_TOUCH;
2484 if (features->type >= INTUOSHT || features->type <= BAMBOO_PT) 2484 if (features->type >= INTUOSHT && features->type <= BAMBOO_PT)
2485 features->device_type |= WACOM_DEVICETYPE_PAD; 2485 features->device_type |= WACOM_DEVICETYPE_PAD;
2486 2486
2487 features->x_max = 4096; 2487 features->x_max = 4096;
@@ -3213,7 +3213,8 @@ static const struct wacom_features wacom_features_0x32F =
3213 WACOM_DTU_OFFSET, WACOM_DTU_OFFSET }; 3213 WACOM_DTU_OFFSET, WACOM_DTU_OFFSET };
3214static const struct wacom_features wacom_features_0x336 = 3214static const struct wacom_features wacom_features_0x336 =
3215 { "Wacom DTU1141", 23472, 13203, 1023, 0, 3215 { "Wacom DTU1141", 23472, 13203, 1023, 0,
3216 DTUS, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 4 }; 3216 DTUS, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 4,
3217 WACOM_DTU_OFFSET, WACOM_DTU_OFFSET };
3217static const struct wacom_features wacom_features_0x57 = 3218static const struct wacom_features wacom_features_0x57 =
3218 { "Wacom DTK2241", 95640, 54060, 2047, 63, 3219 { "Wacom DTK2241", 95640, 54060, 2047, 63,
3219 DTK, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 6, 3220 DTK, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 6,
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 842b0043ad94..8f59f057cdf4 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -324,6 +324,7 @@ config SENSORS_APPLESMC
324config SENSORS_ARM_SCPI 324config SENSORS_ARM_SCPI
325 tristate "ARM SCPI Sensors" 325 tristate "ARM SCPI Sensors"
326 depends on ARM_SCPI_PROTOCOL 326 depends on ARM_SCPI_PROTOCOL
327 depends on THERMAL || !THERMAL_OF
327 help 328 help
328 This driver provides support for temperature, voltage, current 329 This driver provides support for temperature, voltage, current
329 and power sensors available on ARM Ltd's SCP based platforms. The 330 and power sensors available on ARM Ltd's SCP based platforms. The
@@ -1471,6 +1472,7 @@ config SENSORS_INA209
1471config SENSORS_INA2XX 1472config SENSORS_INA2XX
1472 tristate "Texas Instruments INA219 and compatibles" 1473 tristate "Texas Instruments INA219 and compatibles"
1473 depends on I2C 1474 depends on I2C
1475 select REGMAP_I2C
1474 help 1476 help
1475 If you say yes here you get support for INA219, INA220, INA226, 1477 If you say yes here you get support for INA219, INA220, INA226,
1476 INA230, and INA231 power monitor chips. 1478 INA230, and INA231 power monitor chips.
diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
index 1f5e956941b1..0af7fd311979 100644
--- a/drivers/hwmon/applesmc.c
+++ b/drivers/hwmon/applesmc.c
@@ -537,7 +537,7 @@ static int applesmc_init_index(struct applesmc_registers *s)
537static int applesmc_init_smcreg_try(void) 537static int applesmc_init_smcreg_try(void)
538{ 538{
539 struct applesmc_registers *s = &smcreg; 539 struct applesmc_registers *s = &smcreg;
540 bool left_light_sensor, right_light_sensor; 540 bool left_light_sensor = 0, right_light_sensor = 0;
541 unsigned int count; 541 unsigned int count;
542 u8 tmp[1]; 542 u8 tmp[1];
543 int ret; 543 int ret;
diff --git a/drivers/hwmon/scpi-hwmon.c b/drivers/hwmon/scpi-hwmon.c
index 2c1241bbf9af..7e20567bc369 100644
--- a/drivers/hwmon/scpi-hwmon.c
+++ b/drivers/hwmon/scpi-hwmon.c
@@ -117,7 +117,7 @@ static int scpi_hwmon_probe(struct platform_device *pdev)
117 struct scpi_ops *scpi_ops; 117 struct scpi_ops *scpi_ops;
118 struct device *hwdev, *dev = &pdev->dev; 118 struct device *hwdev, *dev = &pdev->dev;
119 struct scpi_sensors *scpi_sensors; 119 struct scpi_sensors *scpi_sensors;
120 int ret; 120 int ret, idx;
121 121
122 scpi_ops = get_scpi_ops(); 122 scpi_ops = get_scpi_ops();
123 if (!scpi_ops) 123 if (!scpi_ops)
@@ -146,8 +146,8 @@ static int scpi_hwmon_probe(struct platform_device *pdev)
146 146
147 scpi_sensors->scpi_ops = scpi_ops; 147 scpi_sensors->scpi_ops = scpi_ops;
148 148
149 for (i = 0; i < nr_sensors; i++) { 149 for (i = 0, idx = 0; i < nr_sensors; i++) {
150 struct sensor_data *sensor = &scpi_sensors->data[i]; 150 struct sensor_data *sensor = &scpi_sensors->data[idx];
151 151
152 ret = scpi_ops->sensor_get_info(i, &sensor->info); 152 ret = scpi_ops->sensor_get_info(i, &sensor->info);
153 if (ret) 153 if (ret)
@@ -183,7 +183,7 @@ static int scpi_hwmon_probe(struct platform_device *pdev)
183 num_power++; 183 num_power++;
184 break; 184 break;
185 default: 185 default:
186 break; 186 continue;
187 } 187 }
188 188
189 sensor->dev_attr_input.attr.mode = S_IRUGO; 189 sensor->dev_attr_input.attr.mode = S_IRUGO;
@@ -194,11 +194,12 @@ static int scpi_hwmon_probe(struct platform_device *pdev)
194 sensor->dev_attr_label.show = scpi_show_label; 194 sensor->dev_attr_label.show = scpi_show_label;
195 sensor->dev_attr_label.attr.name = sensor->label; 195 sensor->dev_attr_label.attr.name = sensor->label;
196 196
197 scpi_sensors->attrs[i << 1] = &sensor->dev_attr_input.attr; 197 scpi_sensors->attrs[idx << 1] = &sensor->dev_attr_input.attr;
198 scpi_sensors->attrs[(i << 1) + 1] = &sensor->dev_attr_label.attr; 198 scpi_sensors->attrs[(idx << 1) + 1] = &sensor->dev_attr_label.attr;
199 199
200 sysfs_attr_init(scpi_sensors->attrs[i << 1]); 200 sysfs_attr_init(scpi_sensors->attrs[idx << 1]);
201 sysfs_attr_init(scpi_sensors->attrs[(i << 1) + 1]); 201 sysfs_attr_init(scpi_sensors->attrs[(idx << 1) + 1]);
202 idx++;
202 } 203 }
203 204
204 scpi_sensors->group.attrs = scpi_sensors->attrs; 205 scpi_sensors->group.attrs = scpi_sensors->attrs;
@@ -236,8 +237,8 @@ static int scpi_hwmon_probe(struct platform_device *pdev)
236 237
237 zone->sensor_id = i; 238 zone->sensor_id = i;
238 zone->scpi_sensors = scpi_sensors; 239 zone->scpi_sensors = scpi_sensors;
239 zone->tzd = thermal_zone_of_sensor_register(dev, i, zone, 240 zone->tzd = thermal_zone_of_sensor_register(dev,
240 &scpi_sensor_ops); 241 sensor->info.sensor_id, zone, &scpi_sensor_ops);
241 /* 242 /*
242 * The call to thermal_zone_of_sensor_register returns 243 * The call to thermal_zone_of_sensor_register returns
243 * an error for sensors that are not associated with 244 * an error for sensors that are not associated with
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index e24c2b680b47..7b0aa82ea38b 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -126,6 +126,7 @@ config I2C_I801
126 Sunrise Point-LP (PCH) 126 Sunrise Point-LP (PCH)
127 DNV (SOC) 127 DNV (SOC)
128 Broxton (SOC) 128 Broxton (SOC)
129 Lewisburg (PCH)
129 130
130 This driver can also be built as a module. If so, the module 131 This driver can also be built as a module. If so, the module
131 will be called i2c-i801. 132 will be called i2c-i801.
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index c306751ceadb..f62d69799a9c 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -62,6 +62,8 @@
62 * Sunrise Point-LP (PCH) 0x9d23 32 hard yes yes yes 62 * Sunrise Point-LP (PCH) 0x9d23 32 hard yes yes yes
63 * DNV (SOC) 0x19df 32 hard yes yes yes 63 * DNV (SOC) 0x19df 32 hard yes yes yes
64 * Broxton (SOC) 0x5ad4 32 hard yes yes yes 64 * Broxton (SOC) 0x5ad4 32 hard yes yes yes
65 * Lewisburg (PCH) 0xa1a3 32 hard yes yes yes
66 * Lewisburg Supersku (PCH) 0xa223 32 hard yes yes yes
65 * 67 *
66 * Features supported by this driver: 68 * Features supported by this driver:
67 * Software PEC no 69 * Software PEC no
@@ -206,6 +208,8 @@
206#define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_SMBUS 0x9d23 208#define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_SMBUS 0x9d23
207#define PCI_DEVICE_ID_INTEL_DNV_SMBUS 0x19df 209#define PCI_DEVICE_ID_INTEL_DNV_SMBUS 0x19df
208#define PCI_DEVICE_ID_INTEL_BROXTON_SMBUS 0x5ad4 210#define PCI_DEVICE_ID_INTEL_BROXTON_SMBUS 0x5ad4
211#define PCI_DEVICE_ID_INTEL_LEWISBURG_SMBUS 0xa1a3
212#define PCI_DEVICE_ID_INTEL_LEWISBURG_SSKU_SMBUS 0xa223
209 213
210struct i801_mux_config { 214struct i801_mux_config {
211 char *gpio_chip; 215 char *gpio_chip;
@@ -869,6 +873,8 @@ static const struct pci_device_id i801_ids[] = {
869 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_SMBUS) }, 873 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_SMBUS) },
870 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_DNV_SMBUS) }, 874 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_DNV_SMBUS) },
871 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BROXTON_SMBUS) }, 875 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BROXTON_SMBUS) },
876 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LEWISBURG_SMBUS) },
877 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LEWISBURG_SSKU_SMBUS) },
872 { 0, } 878 { 0, }
873}; 879};
874 880
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index 1e4d99da4164..9bb0b056b25f 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -50,6 +50,7 @@
50#include <linux/of_device.h> 50#include <linux/of_device.h>
51#include <linux/of_dma.h> 51#include <linux/of_dma.h>
52#include <linux/of_gpio.h> 52#include <linux/of_gpio.h>
53#include <linux/pinctrl/consumer.h>
53#include <linux/platform_data/i2c-imx.h> 54#include <linux/platform_data/i2c-imx.h>
54#include <linux/platform_device.h> 55#include <linux/platform_device.h>
55#include <linux/sched.h> 56#include <linux/sched.h>
diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c
index e23a7b068c60..0b20449e48cf 100644
--- a/drivers/i2c/busses/i2c-xiic.c
+++ b/drivers/i2c/busses/i2c-xiic.c
@@ -662,8 +662,10 @@ static void __xiic_start_xfer(struct xiic_i2c *i2c)
662 662
663static void xiic_start_xfer(struct xiic_i2c *i2c) 663static void xiic_start_xfer(struct xiic_i2c *i2c)
664{ 664{
665 665 spin_lock(&i2c->lock);
666 xiic_reinit(i2c);
666 __xiic_start_xfer(i2c); 667 __xiic_start_xfer(i2c);
668 spin_unlock(&i2c->lock);
667} 669}
668 670
669static int xiic_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) 671static int xiic_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 040af5cc8143..ba8eb087f224 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -715,7 +715,7 @@ static int i2c_device_probe(struct device *dev)
715 if (wakeirq > 0 && wakeirq != client->irq) 715 if (wakeirq > 0 && wakeirq != client->irq)
716 status = dev_pm_set_dedicated_wake_irq(dev, wakeirq); 716 status = dev_pm_set_dedicated_wake_irq(dev, wakeirq);
717 else if (client->irq > 0) 717 else if (client->irq > 0)
718 status = dev_pm_set_wake_irq(dev, wakeirq); 718 status = dev_pm_set_wake_irq(dev, client->irq);
719 else 719 else
720 status = 0; 720 status = 0;
721 721
diff --git a/drivers/iio/adc/ad7793.c b/drivers/iio/adc/ad7793.c
index eea0c79111e7..4d960d3b93c0 100644
--- a/drivers/iio/adc/ad7793.c
+++ b/drivers/iio/adc/ad7793.c
@@ -101,7 +101,7 @@
101#define AD7795_CH_AIN1M_AIN1M 8 /* AIN1(-) - AIN1(-) */ 101#define AD7795_CH_AIN1M_AIN1M 8 /* AIN1(-) - AIN1(-) */
102 102
103/* ID Register Bit Designations (AD7793_REG_ID) */ 103/* ID Register Bit Designations (AD7793_REG_ID) */
104#define AD7785_ID 0xB 104#define AD7785_ID 0x3
105#define AD7792_ID 0xA 105#define AD7792_ID 0xA
106#define AD7793_ID 0xB 106#define AD7793_ID 0xB
107#define AD7794_ID 0xF 107#define AD7794_ID 0xF
diff --git a/drivers/iio/adc/vf610_adc.c b/drivers/iio/adc/vf610_adc.c
index 599cde3d03a1..b10f629cc44b 100644
--- a/drivers/iio/adc/vf610_adc.c
+++ b/drivers/iio/adc/vf610_adc.c
@@ -106,6 +106,13 @@
106 106
107#define DEFAULT_SAMPLE_TIME 1000 107#define DEFAULT_SAMPLE_TIME 1000
108 108
109/* V at 25°C of 696 mV */
110#define VF610_VTEMP25_3V0 950
111/* V at 25°C of 699 mV */
112#define VF610_VTEMP25_3V3 867
113/* Typical sensor slope coefficient at all temperatures */
114#define VF610_TEMP_SLOPE_COEFF 1840
115
109enum clk_sel { 116enum clk_sel {
110 VF610_ADCIOC_BUSCLK_SET, 117 VF610_ADCIOC_BUSCLK_SET,
111 VF610_ADCIOC_ALTCLK_SET, 118 VF610_ADCIOC_ALTCLK_SET,
@@ -197,6 +204,8 @@ static inline void vf610_adc_calculate_rates(struct vf610_adc *info)
197 adc_feature->clk_div = 8; 204 adc_feature->clk_div = 8;
198 } 205 }
199 206
207 adck_rate = ipg_rate / adc_feature->clk_div;
208
200 /* 209 /*
201 * Determine the long sample time adder value to be used based 210 * Determine the long sample time adder value to be used based
202 * on the default minimum sample time provided. 211 * on the default minimum sample time provided.
@@ -221,7 +230,6 @@ static inline void vf610_adc_calculate_rates(struct vf610_adc *info)
221 * BCT (Base Conversion Time): fixed to 25 ADCK cycles for 12 bit mode 230 * BCT (Base Conversion Time): fixed to 25 ADCK cycles for 12 bit mode
222 * LSTAdder(Long Sample Time): 3, 5, 7, 9, 13, 17, 21, 25 ADCK cycles 231 * LSTAdder(Long Sample Time): 3, 5, 7, 9, 13, 17, 21, 25 ADCK cycles
223 */ 232 */
224 adck_rate = ipg_rate / info->adc_feature.clk_div;
225 for (i = 0; i < ARRAY_SIZE(vf610_hw_avgs); i++) 233 for (i = 0; i < ARRAY_SIZE(vf610_hw_avgs); i++)
226 info->sample_freq_avail[i] = 234 info->sample_freq_avail[i] =
227 adck_rate / (6 + vf610_hw_avgs[i] * 235 adck_rate / (6 + vf610_hw_avgs[i] *
@@ -663,11 +671,13 @@ static int vf610_read_raw(struct iio_dev *indio_dev,
663 break; 671 break;
664 case IIO_TEMP: 672 case IIO_TEMP:
665 /* 673 /*
666 * Calculate in degree Celsius times 1000 674 * Calculate in degree Celsius times 1000
667 * Using sensor slope of 1.84 mV/°C and 675 * Using the typical sensor slope of 1.84 mV/°C
668 * V at 25°C of 696 mV 676 * and VREFH_ADC at 3.3V, V at 25°C of 699 mV
669 */ 677 */
670 *val = 25000 - ((int)info->value - 864) * 1000000 / 1840; 678 *val = 25000 - ((int)info->value - VF610_VTEMP25_3V3) *
679 1000000 / VF610_TEMP_SLOPE_COEFF;
680
671 break; 681 break;
672 default: 682 default:
673 mutex_unlock(&indio_dev->mlock); 683 mutex_unlock(&indio_dev->mlock);
diff --git a/drivers/iio/adc/xilinx-xadc-core.c b/drivers/iio/adc/xilinx-xadc-core.c
index 0370624a35db..02e636a1c49a 100644
--- a/drivers/iio/adc/xilinx-xadc-core.c
+++ b/drivers/iio/adc/xilinx-xadc-core.c
@@ -841,6 +841,7 @@ static int xadc_read_raw(struct iio_dev *indio_dev,
841 case XADC_REG_VCCINT: 841 case XADC_REG_VCCINT:
842 case XADC_REG_VCCAUX: 842 case XADC_REG_VCCAUX:
843 case XADC_REG_VREFP: 843 case XADC_REG_VREFP:
844 case XADC_REG_VREFN:
844 case XADC_REG_VCCBRAM: 845 case XADC_REG_VCCBRAM:
845 case XADC_REG_VCCPINT: 846 case XADC_REG_VCCPINT:
846 case XADC_REG_VCCPAUX: 847 case XADC_REG_VCCPAUX:
diff --git a/drivers/iio/dac/ad5064.c b/drivers/iio/dac/ad5064.c
index 9e4d2c18b554..81ca0081a019 100644
--- a/drivers/iio/dac/ad5064.c
+++ b/drivers/iio/dac/ad5064.c
@@ -113,12 +113,16 @@ enum ad5064_type {
113 ID_AD5065, 113 ID_AD5065,
114 ID_AD5628_1, 114 ID_AD5628_1,
115 ID_AD5628_2, 115 ID_AD5628_2,
116 ID_AD5629_1,
117 ID_AD5629_2,
116 ID_AD5648_1, 118 ID_AD5648_1,
117 ID_AD5648_2, 119 ID_AD5648_2,
118 ID_AD5666_1, 120 ID_AD5666_1,
119 ID_AD5666_2, 121 ID_AD5666_2,
120 ID_AD5668_1, 122 ID_AD5668_1,
121 ID_AD5668_2, 123 ID_AD5668_2,
124 ID_AD5669_1,
125 ID_AD5669_2,
122}; 126};
123 127
124static int ad5064_write(struct ad5064_state *st, unsigned int cmd, 128static int ad5064_write(struct ad5064_state *st, unsigned int cmd,
@@ -291,7 +295,7 @@ static const struct iio_chan_spec_ext_info ad5064_ext_info[] = {
291 { }, 295 { },
292}; 296};
293 297
294#define AD5064_CHANNEL(chan, addr, bits) { \ 298#define AD5064_CHANNEL(chan, addr, bits, _shift) { \
295 .type = IIO_VOLTAGE, \ 299 .type = IIO_VOLTAGE, \
296 .indexed = 1, \ 300 .indexed = 1, \
297 .output = 1, \ 301 .output = 1, \
@@ -303,36 +307,39 @@ static const struct iio_chan_spec_ext_info ad5064_ext_info[] = {
303 .sign = 'u', \ 307 .sign = 'u', \
304 .realbits = (bits), \ 308 .realbits = (bits), \
305 .storagebits = 16, \ 309 .storagebits = 16, \
306 .shift = 20 - bits, \ 310 .shift = (_shift), \
307 }, \ 311 }, \
308 .ext_info = ad5064_ext_info, \ 312 .ext_info = ad5064_ext_info, \
309} 313}
310 314
311#define DECLARE_AD5064_CHANNELS(name, bits) \ 315#define DECLARE_AD5064_CHANNELS(name, bits, shift) \
312const struct iio_chan_spec name[] = { \ 316const struct iio_chan_spec name[] = { \
313 AD5064_CHANNEL(0, 0, bits), \ 317 AD5064_CHANNEL(0, 0, bits, shift), \
314 AD5064_CHANNEL(1, 1, bits), \ 318 AD5064_CHANNEL(1, 1, bits, shift), \
315 AD5064_CHANNEL(2, 2, bits), \ 319 AD5064_CHANNEL(2, 2, bits, shift), \
316 AD5064_CHANNEL(3, 3, bits), \ 320 AD5064_CHANNEL(3, 3, bits, shift), \
317 AD5064_CHANNEL(4, 4, bits), \ 321 AD5064_CHANNEL(4, 4, bits, shift), \
318 AD5064_CHANNEL(5, 5, bits), \ 322 AD5064_CHANNEL(5, 5, bits, shift), \
319 AD5064_CHANNEL(6, 6, bits), \ 323 AD5064_CHANNEL(6, 6, bits, shift), \
320 AD5064_CHANNEL(7, 7, bits), \ 324 AD5064_CHANNEL(7, 7, bits, shift), \
321} 325}
322 326
323#define DECLARE_AD5065_CHANNELS(name, bits) \ 327#define DECLARE_AD5065_CHANNELS(name, bits, shift) \
324const struct iio_chan_spec name[] = { \ 328const struct iio_chan_spec name[] = { \
325 AD5064_CHANNEL(0, 0, bits), \ 329 AD5064_CHANNEL(0, 0, bits, shift), \
326 AD5064_CHANNEL(1, 3, bits), \ 330 AD5064_CHANNEL(1, 3, bits, shift), \
327} 331}
328 332
329static DECLARE_AD5064_CHANNELS(ad5024_channels, 12); 333static DECLARE_AD5064_CHANNELS(ad5024_channels, 12, 8);
330static DECLARE_AD5064_CHANNELS(ad5044_channels, 14); 334static DECLARE_AD5064_CHANNELS(ad5044_channels, 14, 6);
331static DECLARE_AD5064_CHANNELS(ad5064_channels, 16); 335static DECLARE_AD5064_CHANNELS(ad5064_channels, 16, 4);
332 336
333static DECLARE_AD5065_CHANNELS(ad5025_channels, 12); 337static DECLARE_AD5065_CHANNELS(ad5025_channels, 12, 8);
334static DECLARE_AD5065_CHANNELS(ad5045_channels, 14); 338static DECLARE_AD5065_CHANNELS(ad5045_channels, 14, 6);
335static DECLARE_AD5065_CHANNELS(ad5065_channels, 16); 339static DECLARE_AD5065_CHANNELS(ad5065_channels, 16, 4);
340
341static DECLARE_AD5064_CHANNELS(ad5629_channels, 12, 4);
342static DECLARE_AD5064_CHANNELS(ad5669_channels, 16, 0);
336 343
337static const struct ad5064_chip_info ad5064_chip_info_tbl[] = { 344static const struct ad5064_chip_info ad5064_chip_info_tbl[] = {
338 [ID_AD5024] = { 345 [ID_AD5024] = {
@@ -382,6 +389,18 @@ static const struct ad5064_chip_info ad5064_chip_info_tbl[] = {
382 .channels = ad5024_channels, 389 .channels = ad5024_channels,
383 .num_channels = 8, 390 .num_channels = 8,
384 }, 391 },
392 [ID_AD5629_1] = {
393 .shared_vref = true,
394 .internal_vref = 2500000,
395 .channels = ad5629_channels,
396 .num_channels = 8,
397 },
398 [ID_AD5629_2] = {
399 .shared_vref = true,
400 .internal_vref = 5000000,
401 .channels = ad5629_channels,
402 .num_channels = 8,
403 },
385 [ID_AD5648_1] = { 404 [ID_AD5648_1] = {
386 .shared_vref = true, 405 .shared_vref = true,
387 .internal_vref = 2500000, 406 .internal_vref = 2500000,
@@ -418,6 +437,18 @@ static const struct ad5064_chip_info ad5064_chip_info_tbl[] = {
418 .channels = ad5064_channels, 437 .channels = ad5064_channels,
419 .num_channels = 8, 438 .num_channels = 8,
420 }, 439 },
440 [ID_AD5669_1] = {
441 .shared_vref = true,
442 .internal_vref = 2500000,
443 .channels = ad5669_channels,
444 .num_channels = 8,
445 },
446 [ID_AD5669_2] = {
447 .shared_vref = true,
448 .internal_vref = 5000000,
449 .channels = ad5669_channels,
450 .num_channels = 8,
451 },
421}; 452};
422 453
423static inline unsigned int ad5064_num_vref(struct ad5064_state *st) 454static inline unsigned int ad5064_num_vref(struct ad5064_state *st)
@@ -597,10 +628,16 @@ static int ad5064_i2c_write(struct ad5064_state *st, unsigned int cmd,
597 unsigned int addr, unsigned int val) 628 unsigned int addr, unsigned int val)
598{ 629{
599 struct i2c_client *i2c = to_i2c_client(st->dev); 630 struct i2c_client *i2c = to_i2c_client(st->dev);
631 int ret;
600 632
601 st->data.i2c[0] = (cmd << 4) | addr; 633 st->data.i2c[0] = (cmd << 4) | addr;
602 put_unaligned_be16(val, &st->data.i2c[1]); 634 put_unaligned_be16(val, &st->data.i2c[1]);
603 return i2c_master_send(i2c, st->data.i2c, 3); 635
636 ret = i2c_master_send(i2c, st->data.i2c, 3);
637 if (ret < 0)
638 return ret;
639
640 return 0;
604} 641}
605 642
606static int ad5064_i2c_probe(struct i2c_client *i2c, 643static int ad5064_i2c_probe(struct i2c_client *i2c,
@@ -616,12 +653,12 @@ static int ad5064_i2c_remove(struct i2c_client *i2c)
616} 653}
617 654
618static const struct i2c_device_id ad5064_i2c_ids[] = { 655static const struct i2c_device_id ad5064_i2c_ids[] = {
619 {"ad5629-1", ID_AD5628_1}, 656 {"ad5629-1", ID_AD5629_1},
620 {"ad5629-2", ID_AD5628_2}, 657 {"ad5629-2", ID_AD5629_2},
621 {"ad5629-3", ID_AD5628_2}, /* similar enough to ad5629-2 */ 658 {"ad5629-3", ID_AD5629_2}, /* similar enough to ad5629-2 */
622 {"ad5669-1", ID_AD5668_1}, 659 {"ad5669-1", ID_AD5669_1},
623 {"ad5669-2", ID_AD5668_2}, 660 {"ad5669-2", ID_AD5669_2},
624 {"ad5669-3", ID_AD5668_2}, /* similar enough to ad5669-2 */ 661 {"ad5669-3", ID_AD5669_2}, /* similar enough to ad5669-2 */
625 {} 662 {}
626}; 663};
627MODULE_DEVICE_TABLE(i2c, ad5064_i2c_ids); 664MODULE_DEVICE_TABLE(i2c, ad5064_i2c_ids);
diff --git a/drivers/iio/humidity/si7020.c b/drivers/iio/humidity/si7020.c
index 12128d1ca570..71991b5c0658 100644
--- a/drivers/iio/humidity/si7020.c
+++ b/drivers/iio/humidity/si7020.c
@@ -50,10 +50,10 @@ static int si7020_read_raw(struct iio_dev *indio_dev,
50 50
51 switch (mask) { 51 switch (mask) {
52 case IIO_CHAN_INFO_RAW: 52 case IIO_CHAN_INFO_RAW:
53 ret = i2c_smbus_read_word_data(*client, 53 ret = i2c_smbus_read_word_swapped(*client,
54 chan->type == IIO_TEMP ? 54 chan->type == IIO_TEMP ?
55 SI7020CMD_TEMP_HOLD : 55 SI7020CMD_TEMP_HOLD :
56 SI7020CMD_RH_HOLD); 56 SI7020CMD_RH_HOLD);
57 if (ret < 0) 57 if (ret < 0)
58 return ret; 58 return ret;
59 *val = ret >> 2; 59 *val = ret >> 2;
diff --git a/drivers/iommu/s390-iommu.c b/drivers/iommu/s390-iommu.c
index cbe198cb3699..471ee36b9c6e 100644
--- a/drivers/iommu/s390-iommu.c
+++ b/drivers/iommu/s390-iommu.c
@@ -216,6 +216,7 @@ static int s390_iommu_update_trans(struct s390_domain *s390_domain,
216 u8 *page_addr = (u8 *) (pa & PAGE_MASK); 216 u8 *page_addr = (u8 *) (pa & PAGE_MASK);
217 dma_addr_t start_dma_addr = dma_addr; 217 dma_addr_t start_dma_addr = dma_addr;
218 unsigned long irq_flags, nr_pages, i; 218 unsigned long irq_flags, nr_pages, i;
219 unsigned long *entry;
219 int rc = 0; 220 int rc = 0;
220 221
221 if (dma_addr < s390_domain->domain.geometry.aperture_start || 222 if (dma_addr < s390_domain->domain.geometry.aperture_start ||
@@ -228,8 +229,12 @@ static int s390_iommu_update_trans(struct s390_domain *s390_domain,
228 229
229 spin_lock_irqsave(&s390_domain->dma_table_lock, irq_flags); 230 spin_lock_irqsave(&s390_domain->dma_table_lock, irq_flags);
230 for (i = 0; i < nr_pages; i++) { 231 for (i = 0; i < nr_pages; i++) {
231 dma_update_cpu_trans(s390_domain->dma_table, page_addr, 232 entry = dma_walk_cpu_trans(s390_domain->dma_table, dma_addr);
232 dma_addr, flags); 233 if (!entry) {
234 rc = -ENOMEM;
235 goto undo_cpu_trans;
236 }
237 dma_update_cpu_trans(entry, page_addr, flags);
233 page_addr += PAGE_SIZE; 238 page_addr += PAGE_SIZE;
234 dma_addr += PAGE_SIZE; 239 dma_addr += PAGE_SIZE;
235 } 240 }
@@ -242,6 +247,20 @@ static int s390_iommu_update_trans(struct s390_domain *s390_domain,
242 break; 247 break;
243 } 248 }
244 spin_unlock(&s390_domain->list_lock); 249 spin_unlock(&s390_domain->list_lock);
250
251undo_cpu_trans:
252 if (rc && ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID)) {
253 flags = ZPCI_PTE_INVALID;
254 while (i-- > 0) {
255 page_addr -= PAGE_SIZE;
256 dma_addr -= PAGE_SIZE;
257 entry = dma_walk_cpu_trans(s390_domain->dma_table,
258 dma_addr);
259 if (!entry)
260 break;
261 dma_update_cpu_trans(entry, page_addr, flags);
262 }
263 }
245 spin_unlock_irqrestore(&s390_domain->dma_table_lock, irq_flags); 264 spin_unlock_irqrestore(&s390_domain->dma_table_lock, irq_flags);
246 265
247 return rc; 266 return rc;
diff --git a/drivers/irqchip/irq-gic-common.c b/drivers/irqchip/irq-gic-common.c
index 44a077f3a4a2..f174ce0ca361 100644
--- a/drivers/irqchip/irq-gic-common.c
+++ b/drivers/irqchip/irq-gic-common.c
@@ -84,12 +84,15 @@ void __init gic_dist_config(void __iomem *base, int gic_irqs,
84 writel_relaxed(GICD_INT_DEF_PRI_X4, base + GIC_DIST_PRI + i); 84 writel_relaxed(GICD_INT_DEF_PRI_X4, base + GIC_DIST_PRI + i);
85 85
86 /* 86 /*
87 * Disable all interrupts. Leave the PPI and SGIs alone 87 * Deactivate and disable all SPIs. Leave the PPI and SGIs
88 * as they are enabled by redistributor registers. 88 * alone as they are in the redistributor registers on GICv3.
89 */ 89 */
90 for (i = 32; i < gic_irqs; i += 32) 90 for (i = 32; i < gic_irqs; i += 32) {
91 writel_relaxed(GICD_INT_EN_CLR_X32, 91 writel_relaxed(GICD_INT_EN_CLR_X32,
92 base + GIC_DIST_ENABLE_CLEAR + i / 8); 92 base + GIC_DIST_ACTIVE_CLEAR + i / 8);
93 writel_relaxed(GICD_INT_EN_CLR_X32,
94 base + GIC_DIST_ENABLE_CLEAR + i / 8);
95 }
93 96
94 if (sync_access) 97 if (sync_access)
95 sync_access(); 98 sync_access();
@@ -102,7 +105,9 @@ void gic_cpu_config(void __iomem *base, void (*sync_access)(void))
102 /* 105 /*
103 * Deal with the banked PPI and SGI interrupts - disable all 106 * Deal with the banked PPI and SGI interrupts - disable all
104 * PPI interrupts, ensure all SGI interrupts are enabled. 107 * PPI interrupts, ensure all SGI interrupts are enabled.
108 * Make sure everything is deactivated.
105 */ 109 */
110 writel_relaxed(GICD_INT_EN_CLR_X32, base + GIC_DIST_ACTIVE_CLEAR);
106 writel_relaxed(GICD_INT_EN_CLR_PPI, base + GIC_DIST_ENABLE_CLEAR); 111 writel_relaxed(GICD_INT_EN_CLR_PPI, base + GIC_DIST_ENABLE_CLEAR);
107 writel_relaxed(GICD_INT_EN_SET_SGI, base + GIC_DIST_ENABLE_SET); 112 writel_relaxed(GICD_INT_EN_SET_SGI, base + GIC_DIST_ENABLE_SET);
108 113
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index 515c823c1c95..abf2ffaed392 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -73,9 +73,11 @@ struct gic_chip_data {
73 union gic_base cpu_base; 73 union gic_base cpu_base;
74#ifdef CONFIG_CPU_PM 74#ifdef CONFIG_CPU_PM
75 u32 saved_spi_enable[DIV_ROUND_UP(1020, 32)]; 75 u32 saved_spi_enable[DIV_ROUND_UP(1020, 32)];
76 u32 saved_spi_active[DIV_ROUND_UP(1020, 32)];
76 u32 saved_spi_conf[DIV_ROUND_UP(1020, 16)]; 77 u32 saved_spi_conf[DIV_ROUND_UP(1020, 16)];
77 u32 saved_spi_target[DIV_ROUND_UP(1020, 4)]; 78 u32 saved_spi_target[DIV_ROUND_UP(1020, 4)];
78 u32 __percpu *saved_ppi_enable; 79 u32 __percpu *saved_ppi_enable;
80 u32 __percpu *saved_ppi_active;
79 u32 __percpu *saved_ppi_conf; 81 u32 __percpu *saved_ppi_conf;
80#endif 82#endif
81 struct irq_domain *domain; 83 struct irq_domain *domain;
@@ -566,6 +568,10 @@ static void gic_dist_save(unsigned int gic_nr)
566 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) 568 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++)
567 gic_data[gic_nr].saved_spi_enable[i] = 569 gic_data[gic_nr].saved_spi_enable[i] =
568 readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4); 570 readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4);
571
572 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++)
573 gic_data[gic_nr].saved_spi_active[i] =
574 readl_relaxed(dist_base + GIC_DIST_ACTIVE_SET + i * 4);
569} 575}
570 576
571/* 577/*
@@ -604,9 +610,19 @@ static void gic_dist_restore(unsigned int gic_nr)
604 writel_relaxed(gic_data[gic_nr].saved_spi_target[i], 610 writel_relaxed(gic_data[gic_nr].saved_spi_target[i],
605 dist_base + GIC_DIST_TARGET + i * 4); 611 dist_base + GIC_DIST_TARGET + i * 4);
606 612
607 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) 613 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) {
614 writel_relaxed(GICD_INT_EN_CLR_X32,
615 dist_base + GIC_DIST_ENABLE_CLEAR + i * 4);
608 writel_relaxed(gic_data[gic_nr].saved_spi_enable[i], 616 writel_relaxed(gic_data[gic_nr].saved_spi_enable[i],
609 dist_base + GIC_DIST_ENABLE_SET + i * 4); 617 dist_base + GIC_DIST_ENABLE_SET + i * 4);
618 }
619
620 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) {
621 writel_relaxed(GICD_INT_EN_CLR_X32,
622 dist_base + GIC_DIST_ACTIVE_CLEAR + i * 4);
623 writel_relaxed(gic_data[gic_nr].saved_spi_active[i],
624 dist_base + GIC_DIST_ACTIVE_SET + i * 4);
625 }
610 626
611 writel_relaxed(GICD_ENABLE, dist_base + GIC_DIST_CTRL); 627 writel_relaxed(GICD_ENABLE, dist_base + GIC_DIST_CTRL);
612} 628}
@@ -631,6 +647,10 @@ static void gic_cpu_save(unsigned int gic_nr)
631 for (i = 0; i < DIV_ROUND_UP(32, 32); i++) 647 for (i = 0; i < DIV_ROUND_UP(32, 32); i++)
632 ptr[i] = readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4); 648 ptr[i] = readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4);
633 649
650 ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_active);
651 for (i = 0; i < DIV_ROUND_UP(32, 32); i++)
652 ptr[i] = readl_relaxed(dist_base + GIC_DIST_ACTIVE_SET + i * 4);
653
634 ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_conf); 654 ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_conf);
635 for (i = 0; i < DIV_ROUND_UP(32, 16); i++) 655 for (i = 0; i < DIV_ROUND_UP(32, 16); i++)
636 ptr[i] = readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4); 656 ptr[i] = readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4);
@@ -654,8 +674,18 @@ static void gic_cpu_restore(unsigned int gic_nr)
654 return; 674 return;
655 675
656 ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_enable); 676 ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_enable);
657 for (i = 0; i < DIV_ROUND_UP(32, 32); i++) 677 for (i = 0; i < DIV_ROUND_UP(32, 32); i++) {
678 writel_relaxed(GICD_INT_EN_CLR_X32,
679 dist_base + GIC_DIST_ENABLE_CLEAR + i * 4);
658 writel_relaxed(ptr[i], dist_base + GIC_DIST_ENABLE_SET + i * 4); 680 writel_relaxed(ptr[i], dist_base + GIC_DIST_ENABLE_SET + i * 4);
681 }
682
683 ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_active);
684 for (i = 0; i < DIV_ROUND_UP(32, 32); i++) {
685 writel_relaxed(GICD_INT_EN_CLR_X32,
686 dist_base + GIC_DIST_ACTIVE_CLEAR + i * 4);
687 writel_relaxed(ptr[i], dist_base + GIC_DIST_ACTIVE_SET + i * 4);
688 }
659 689
660 ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_conf); 690 ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_conf);
661 for (i = 0; i < DIV_ROUND_UP(32, 16); i++) 691 for (i = 0; i < DIV_ROUND_UP(32, 16); i++)
@@ -710,6 +740,10 @@ static void __init gic_pm_init(struct gic_chip_data *gic)
710 sizeof(u32)); 740 sizeof(u32));
711 BUG_ON(!gic->saved_ppi_enable); 741 BUG_ON(!gic->saved_ppi_enable);
712 742
743 gic->saved_ppi_active = __alloc_percpu(DIV_ROUND_UP(32, 32) * 4,
744 sizeof(u32));
745 BUG_ON(!gic->saved_ppi_active);
746
713 gic->saved_ppi_conf = __alloc_percpu(DIV_ROUND_UP(32, 16) * 4, 747 gic->saved_ppi_conf = __alloc_percpu(DIV_ROUND_UP(32, 16) * 4,
714 sizeof(u32)); 748 sizeof(u32));
715 BUG_ON(!gic->saved_ppi_conf); 749 BUG_ON(!gic->saved_ppi_conf);
diff --git a/drivers/media/pci/cx23885/cx23885-core.c b/drivers/media/pci/cx23885/cx23885-core.c
index 35759a91d47d..e8f847226a19 100644
--- a/drivers/media/pci/cx23885/cx23885-core.c
+++ b/drivers/media/pci/cx23885/cx23885-core.c
@@ -1992,9 +1992,9 @@ static int cx23885_initdev(struct pci_dev *pci_dev,
1992 (unsigned long long)pci_resource_start(pci_dev, 0)); 1992 (unsigned long long)pci_resource_start(pci_dev, 0));
1993 1993
1994 pci_set_master(pci_dev); 1994 pci_set_master(pci_dev);
1995 if (!pci_set_dma_mask(pci_dev, 0xffffffff)) { 1995 err = pci_set_dma_mask(pci_dev, 0xffffffff);
1996 if (err) {
1996 printk("%s/0: Oops: no 32bit PCI DMA ???\n", dev->name); 1997 printk("%s/0: Oops: no 32bit PCI DMA ???\n", dev->name);
1997 err = -EIO;
1998 goto fail_context; 1998 goto fail_context;
1999 } 1999 }
2000 2000
diff --git a/drivers/media/pci/cx25821/cx25821-core.c b/drivers/media/pci/cx25821/cx25821-core.c
index dbc695f32760..0042803a9de7 100644
--- a/drivers/media/pci/cx25821/cx25821-core.c
+++ b/drivers/media/pci/cx25821/cx25821-core.c
@@ -1319,7 +1319,8 @@ static int cx25821_initdev(struct pci_dev *pci_dev,
1319 dev->pci_lat, (unsigned long long)dev->base_io_addr); 1319 dev->pci_lat, (unsigned long long)dev->base_io_addr);
1320 1320
1321 pci_set_master(pci_dev); 1321 pci_set_master(pci_dev);
1322 if (!pci_set_dma_mask(pci_dev, 0xffffffff)) { 1322 err = pci_set_dma_mask(pci_dev, 0xffffffff);
1323 if (err) {
1323 pr_err("%s/0: Oops: no 32bit PCI DMA ???\n", dev->name); 1324 pr_err("%s/0: Oops: no 32bit PCI DMA ???\n", dev->name);
1324 err = -EIO; 1325 err = -EIO;
1325 goto fail_irq; 1326 goto fail_irq;
diff --git a/drivers/media/pci/cx88/cx88-alsa.c b/drivers/media/pci/cx88/cx88-alsa.c
index 0ed1b6530374..1b5268f9bb24 100644
--- a/drivers/media/pci/cx88/cx88-alsa.c
+++ b/drivers/media/pci/cx88/cx88-alsa.c
@@ -890,9 +890,9 @@ static int snd_cx88_create(struct snd_card *card, struct pci_dev *pci,
890 return err; 890 return err;
891 } 891 }
892 892
893 if (!pci_set_dma_mask(pci,DMA_BIT_MASK(32))) { 893 err = pci_set_dma_mask(pci,DMA_BIT_MASK(32));
894 if (err) {
894 dprintk(0, "%s/1: Oops: no 32bit PCI DMA ???\n",core->name); 895 dprintk(0, "%s/1: Oops: no 32bit PCI DMA ???\n",core->name);
895 err = -EIO;
896 cx88_core_put(core, pci); 896 cx88_core_put(core, pci);
897 return err; 897 return err;
898 } 898 }
diff --git a/drivers/media/pci/cx88/cx88-mpeg.c b/drivers/media/pci/cx88/cx88-mpeg.c
index 9db7767d1fe0..f34c229f9b37 100644
--- a/drivers/media/pci/cx88/cx88-mpeg.c
+++ b/drivers/media/pci/cx88/cx88-mpeg.c
@@ -393,7 +393,8 @@ static int cx8802_init_common(struct cx8802_dev *dev)
393 if (pci_enable_device(dev->pci)) 393 if (pci_enable_device(dev->pci))
394 return -EIO; 394 return -EIO;
395 pci_set_master(dev->pci); 395 pci_set_master(dev->pci);
396 if (!pci_set_dma_mask(dev->pci,DMA_BIT_MASK(32))) { 396 err = pci_set_dma_mask(dev->pci,DMA_BIT_MASK(32));
397 if (err) {
397 printk("%s/2: Oops: no 32bit PCI DMA ???\n",dev->core->name); 398 printk("%s/2: Oops: no 32bit PCI DMA ???\n",dev->core->name);
398 return -EIO; 399 return -EIO;
399 } 400 }
diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c
index 0de1ad5a977d..aef9acf351f6 100644
--- a/drivers/media/pci/cx88/cx88-video.c
+++ b/drivers/media/pci/cx88/cx88-video.c
@@ -1314,9 +1314,9 @@ static int cx8800_initdev(struct pci_dev *pci_dev,
1314 dev->pci_lat,(unsigned long long)pci_resource_start(pci_dev,0)); 1314 dev->pci_lat,(unsigned long long)pci_resource_start(pci_dev,0));
1315 1315
1316 pci_set_master(pci_dev); 1316 pci_set_master(pci_dev);
1317 if (!pci_set_dma_mask(pci_dev,DMA_BIT_MASK(32))) { 1317 err = pci_set_dma_mask(pci_dev,DMA_BIT_MASK(32));
1318 if (err) {
1318 printk("%s/0: Oops: no 32bit PCI DMA ???\n",core->name); 1319 printk("%s/0: Oops: no 32bit PCI DMA ???\n",core->name);
1319 err = -EIO;
1320 goto fail_core; 1320 goto fail_core;
1321 } 1321 }
1322 dev->alloc_ctx = vb2_dma_sg_init_ctx(&pci_dev->dev); 1322 dev->alloc_ctx = vb2_dma_sg_init_ctx(&pci_dev->dev);
diff --git a/drivers/media/pci/netup_unidvb/netup_unidvb_core.c b/drivers/media/pci/netup_unidvb/netup_unidvb_core.c
index 60b2d462f98d..3fdbd81b5580 100644
--- a/drivers/media/pci/netup_unidvb/netup_unidvb_core.c
+++ b/drivers/media/pci/netup_unidvb/netup_unidvb_core.c
@@ -810,7 +810,7 @@ static int netup_unidvb_initdev(struct pci_dev *pci_dev,
810 "%s(): board vendor 0x%x, revision 0x%x\n", 810 "%s(): board vendor 0x%x, revision 0x%x\n",
811 __func__, board_vendor, board_revision); 811 __func__, board_vendor, board_revision);
812 pci_set_master(pci_dev); 812 pci_set_master(pci_dev);
813 if (!pci_set_dma_mask(pci_dev, 0xffffffff)) { 813 if (pci_set_dma_mask(pci_dev, 0xffffffff) < 0) {
814 dev_err(&pci_dev->dev, 814 dev_err(&pci_dev->dev,
815 "%s(): 32bit PCI DMA is not supported\n", __func__); 815 "%s(): 32bit PCI DMA is not supported\n", __func__);
816 goto pci_detect_err; 816 goto pci_detect_err;
diff --git a/drivers/media/pci/saa7134/saa7134-core.c b/drivers/media/pci/saa7134/saa7134-core.c
index e79d63eb774e..f720cea80e28 100644
--- a/drivers/media/pci/saa7134/saa7134-core.c
+++ b/drivers/media/pci/saa7134/saa7134-core.c
@@ -951,9 +951,9 @@ static int saa7134_initdev(struct pci_dev *pci_dev,
951 pci_name(pci_dev), dev->pci_rev, pci_dev->irq, 951 pci_name(pci_dev), dev->pci_rev, pci_dev->irq,
952 dev->pci_lat,(unsigned long long)pci_resource_start(pci_dev,0)); 952 dev->pci_lat,(unsigned long long)pci_resource_start(pci_dev,0));
953 pci_set_master(pci_dev); 953 pci_set_master(pci_dev);
954 if (!pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32))) { 954 err = pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32));
955 if (err) {
955 pr_warn("%s: Oops: no 32bit PCI DMA ???\n", dev->name); 956 pr_warn("%s: Oops: no 32bit PCI DMA ???\n", dev->name);
956 err = -EIO;
957 goto fail1; 957 goto fail1;
958 } 958 }
959 959
diff --git a/drivers/media/pci/saa7164/saa7164-core.c b/drivers/media/pci/saa7164/saa7164-core.c
index 8f36b48ef733..8bbd092fbe1d 100644
--- a/drivers/media/pci/saa7164/saa7164-core.c
+++ b/drivers/media/pci/saa7164/saa7164-core.c
@@ -1264,9 +1264,9 @@ static int saa7164_initdev(struct pci_dev *pci_dev,
1264 1264
1265 pci_set_master(pci_dev); 1265 pci_set_master(pci_dev);
1266 /* TODO */ 1266 /* TODO */
1267 if (!pci_set_dma_mask(pci_dev, 0xffffffff)) { 1267 err = pci_set_dma_mask(pci_dev, 0xffffffff);
1268 if (err) {
1268 printk("%s/0: Oops: no 32bit PCI DMA ???\n", dev->name); 1269 printk("%s/0: Oops: no 32bit PCI DMA ???\n", dev->name);
1269 err = -EIO;
1270 goto fail_irq; 1270 goto fail_irq;
1271 } 1271 }
1272 1272
diff --git a/drivers/media/pci/tw68/tw68-core.c b/drivers/media/pci/tw68/tw68-core.c
index 8c5655d351d3..4e77618fbb2b 100644
--- a/drivers/media/pci/tw68/tw68-core.c
+++ b/drivers/media/pci/tw68/tw68-core.c
@@ -257,9 +257,9 @@ static int tw68_initdev(struct pci_dev *pci_dev,
257 dev->name, pci_name(pci_dev), dev->pci_rev, pci_dev->irq, 257 dev->name, pci_name(pci_dev), dev->pci_rev, pci_dev->irq,
258 dev->pci_lat, (u64)pci_resource_start(pci_dev, 0)); 258 dev->pci_lat, (u64)pci_resource_start(pci_dev, 0));
259 pci_set_master(pci_dev); 259 pci_set_master(pci_dev);
260 if (!pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32))) { 260 err = pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32));
261 if (err) {
261 pr_info("%s: Oops: no 32bit PCI DMA ???\n", dev->name); 262 pr_info("%s: Oops: no 32bit PCI DMA ???\n", dev->name);
262 err = -EIO;
263 goto fail1; 263 goto fail1;
264 } 264 }
265 265
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 23b6c8e8701c..d8486168415a 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -65,8 +65,7 @@ MODULE_ALIAS("mmc:block");
65#define MMC_SANITIZE_REQ_TIMEOUT 240000 65#define MMC_SANITIZE_REQ_TIMEOUT 240000
66#define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x & 0x00FF0000) >> 16) 66#define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x & 0x00FF0000) >> 16)
67 67
68#define mmc_req_rel_wr(req) (((req->cmd_flags & REQ_FUA) || \ 68#define mmc_req_rel_wr(req) ((req->cmd_flags & REQ_FUA) && \
69 (req->cmd_flags & REQ_META)) && \
70 (rq_data_dir(req) == WRITE)) 69 (rq_data_dir(req) == WRITE))
71#define PACKED_CMD_VER 0x01 70#define PACKED_CMD_VER 0x01
72#define PACKED_CMD_WR 0x02 71#define PACKED_CMD_WR 0x02
@@ -1467,13 +1466,9 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
1467 1466
1468 /* 1467 /*
1469 * Reliable writes are used to implement Forced Unit Access and 1468 * Reliable writes are used to implement Forced Unit Access and
1470 * REQ_META accesses, and are supported only on MMCs. 1469 * are supported only on MMCs.
1471 *
1472 * XXX: this really needs a good explanation of why REQ_META
1473 * is treated special.
1474 */ 1470 */
1475 bool do_rel_wr = ((req->cmd_flags & REQ_FUA) || 1471 bool do_rel_wr = (req->cmd_flags & REQ_FUA) &&
1476 (req->cmd_flags & REQ_META)) &&
1477 (rq_data_dir(req) == WRITE) && 1472 (rq_data_dir(req) == WRITE) &&
1478 (md->flags & MMC_BLK_REL_WR); 1473 (md->flags & MMC_BLK_REL_WR);
1479 1474
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index c793fda27321..3a9a79ec4343 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -1040,9 +1040,24 @@ static int mmc_select_hs_ddr(struct mmc_card *card)
1040 return err; 1040 return err;
1041} 1041}
1042 1042
1043/* Caller must hold re-tuning */
1044static int mmc_switch_status(struct mmc_card *card)
1045{
1046 u32 status;
1047 int err;
1048
1049 err = mmc_send_status(card, &status);
1050 if (err)
1051 return err;
1052
1053 return mmc_switch_status_error(card->host, status);
1054}
1055
1043static int mmc_select_hs400(struct mmc_card *card) 1056static int mmc_select_hs400(struct mmc_card *card)
1044{ 1057{
1045 struct mmc_host *host = card->host; 1058 struct mmc_host *host = card->host;
1059 bool send_status = true;
1060 unsigned int max_dtr;
1046 int err = 0; 1061 int err = 0;
1047 u8 val; 1062 u8 val;
1048 1063
@@ -1053,25 +1068,36 @@ static int mmc_select_hs400(struct mmc_card *card)
1053 host->ios.bus_width == MMC_BUS_WIDTH_8)) 1068 host->ios.bus_width == MMC_BUS_WIDTH_8))
1054 return 0; 1069 return 0;
1055 1070
1056 /* 1071 if (host->caps & MMC_CAP_WAIT_WHILE_BUSY)
1057 * Before switching to dual data rate operation for HS400, 1072 send_status = false;
1058 * it is required to convert from HS200 mode to HS mode.
1059 */
1060 mmc_set_timing(card->host, MMC_TIMING_MMC_HS);
1061 mmc_set_bus_speed(card);
1062 1073
1074 /* Reduce frequency to HS frequency */
1075 max_dtr = card->ext_csd.hs_max_dtr;
1076 mmc_set_clock(host, max_dtr);
1077
1078 /* Switch card to HS mode */
1063 val = EXT_CSD_TIMING_HS | 1079 val = EXT_CSD_TIMING_HS |
1064 card->drive_strength << EXT_CSD_DRV_STR_SHIFT; 1080 card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
1065 err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 1081 err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1066 EXT_CSD_HS_TIMING, val, 1082 EXT_CSD_HS_TIMING, val,
1067 card->ext_csd.generic_cmd6_time, 1083 card->ext_csd.generic_cmd6_time,
1068 true, true, true); 1084 true, send_status, true);
1069 if (err) { 1085 if (err) {
1070 pr_err("%s: switch to high-speed from hs200 failed, err:%d\n", 1086 pr_err("%s: switch to high-speed from hs200 failed, err:%d\n",
1071 mmc_hostname(host), err); 1087 mmc_hostname(host), err);
1072 return err; 1088 return err;
1073 } 1089 }
1074 1090
1091 /* Set host controller to HS timing */
1092 mmc_set_timing(card->host, MMC_TIMING_MMC_HS);
1093
1094 if (!send_status) {
1095 err = mmc_switch_status(card);
1096 if (err)
1097 goto out_err;
1098 }
1099
1100 /* Switch card to DDR */
1075 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 1101 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1076 EXT_CSD_BUS_WIDTH, 1102 EXT_CSD_BUS_WIDTH,
1077 EXT_CSD_DDR_BUS_WIDTH_8, 1103 EXT_CSD_DDR_BUS_WIDTH_8,
@@ -1082,22 +1108,35 @@ static int mmc_select_hs400(struct mmc_card *card)
1082 return err; 1108 return err;
1083 } 1109 }
1084 1110
1111 /* Switch card to HS400 */
1085 val = EXT_CSD_TIMING_HS400 | 1112 val = EXT_CSD_TIMING_HS400 |
1086 card->drive_strength << EXT_CSD_DRV_STR_SHIFT; 1113 card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
1087 err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 1114 err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1088 EXT_CSD_HS_TIMING, val, 1115 EXT_CSD_HS_TIMING, val,
1089 card->ext_csd.generic_cmd6_time, 1116 card->ext_csd.generic_cmd6_time,
1090 true, true, true); 1117 true, send_status, true);
1091 if (err) { 1118 if (err) {
1092 pr_err("%s: switch to hs400 failed, err:%d\n", 1119 pr_err("%s: switch to hs400 failed, err:%d\n",
1093 mmc_hostname(host), err); 1120 mmc_hostname(host), err);
1094 return err; 1121 return err;
1095 } 1122 }
1096 1123
1124 /* Set host controller to HS400 timing and frequency */
1097 mmc_set_timing(host, MMC_TIMING_MMC_HS400); 1125 mmc_set_timing(host, MMC_TIMING_MMC_HS400);
1098 mmc_set_bus_speed(card); 1126 mmc_set_bus_speed(card);
1099 1127
1128 if (!send_status) {
1129 err = mmc_switch_status(card);
1130 if (err)
1131 goto out_err;
1132 }
1133
1100 return 0; 1134 return 0;
1135
1136out_err:
1137 pr_err("%s: %s failed, error %d\n", mmc_hostname(card->host),
1138 __func__, err);
1139 return err;
1101} 1140}
1102 1141
1103int mmc_hs200_to_hs400(struct mmc_card *card) 1142int mmc_hs200_to_hs400(struct mmc_card *card)
@@ -1105,19 +1144,6 @@ int mmc_hs200_to_hs400(struct mmc_card *card)
1105 return mmc_select_hs400(card); 1144 return mmc_select_hs400(card);
1106} 1145}
1107 1146
1108/* Caller must hold re-tuning */
1109static int mmc_switch_status(struct mmc_card *card)
1110{
1111 u32 status;
1112 int err;
1113
1114 err = mmc_send_status(card, &status);
1115 if (err)
1116 return err;
1117
1118 return mmc_switch_status_error(card->host, status);
1119}
1120
1121int mmc_hs400_to_hs200(struct mmc_card *card) 1147int mmc_hs400_to_hs200(struct mmc_card *card)
1122{ 1148{
1123 struct mmc_host *host = card->host; 1149 struct mmc_host *host = card->host;
@@ -1219,6 +1245,8 @@ static void mmc_select_driver_type(struct mmc_card *card)
1219static int mmc_select_hs200(struct mmc_card *card) 1245static int mmc_select_hs200(struct mmc_card *card)
1220{ 1246{
1221 struct mmc_host *host = card->host; 1247 struct mmc_host *host = card->host;
1248 bool send_status = true;
1249 unsigned int old_timing;
1222 int err = -EINVAL; 1250 int err = -EINVAL;
1223 u8 val; 1251 u8 val;
1224 1252
@@ -1234,6 +1262,9 @@ static int mmc_select_hs200(struct mmc_card *card)
1234 1262
1235 mmc_select_driver_type(card); 1263 mmc_select_driver_type(card);
1236 1264
1265 if (host->caps & MMC_CAP_WAIT_WHILE_BUSY)
1266 send_status = false;
1267
1237 /* 1268 /*
1238 * Set the bus width(4 or 8) with host's support and 1269 * Set the bus width(4 or 8) with host's support and
1239 * switch to HS200 mode if bus width is set successfully. 1270 * switch to HS200 mode if bus width is set successfully.
@@ -1245,11 +1276,25 @@ static int mmc_select_hs200(struct mmc_card *card)
1245 err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 1276 err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1246 EXT_CSD_HS_TIMING, val, 1277 EXT_CSD_HS_TIMING, val,
1247 card->ext_csd.generic_cmd6_time, 1278 card->ext_csd.generic_cmd6_time,
1248 true, true, true); 1279 true, send_status, true);
1249 if (!err) 1280 if (err)
1250 mmc_set_timing(host, MMC_TIMING_MMC_HS200); 1281 goto err;
1282 old_timing = host->ios.timing;
1283 mmc_set_timing(host, MMC_TIMING_MMC_HS200);
1284 if (!send_status) {
1285 err = mmc_switch_status(card);
1286 /*
1287 * mmc_select_timing() assumes timing has not changed if
1288 * it is a switch error.
1289 */
1290 if (err == -EBADMSG)
1291 mmc_set_timing(host, old_timing);
1292 }
1251 } 1293 }
1252err: 1294err:
1295 if (err)
1296 pr_err("%s: %s failed, error %d\n", mmc_hostname(card->host),
1297 __func__, err);
1253 return err; 1298 return err;
1254} 1299}
1255 1300
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index af71de5fda3b..1dee533634c9 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -473,6 +473,7 @@ config MMC_DAVINCI
473 473
474config MMC_GOLDFISH 474config MMC_GOLDFISH
475 tristate "goldfish qemu Multimedia Card Interface support" 475 tristate "goldfish qemu Multimedia Card Interface support"
476 depends on HAS_DMA
476 depends on GOLDFISH || COMPILE_TEST 477 depends on GOLDFISH || COMPILE_TEST
477 help 478 help
478 This selects the Goldfish Multimedia card Interface emulation 479 This selects the Goldfish Multimedia card Interface emulation
diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
index 39568cc29a2a..33dfd7e72516 100644
--- a/drivers/mmc/host/mtk-sd.c
+++ b/drivers/mmc/host/mtk-sd.c
@@ -1276,7 +1276,7 @@ static struct msdc_delay_phase get_best_delay(struct msdc_host *host, u32 delay)
1276 int start = 0, len = 0; 1276 int start = 0, len = 0;
1277 int start_final = 0, len_final = 0; 1277 int start_final = 0, len_final = 0;
1278 u8 final_phase = 0xff; 1278 u8 final_phase = 0xff;
1279 struct msdc_delay_phase delay_phase; 1279 struct msdc_delay_phase delay_phase = { 0, };
1280 1280
1281 if (delay == 0) { 1281 if (delay == 0) {
1282 dev_err(host->dev, "phase error: [map:%x]\n", delay); 1282 dev_err(host->dev, "phase error: [map:%x]\n", delay);
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
index 8cadd74e8407..ce08896b9d69 100644
--- a/drivers/mmc/host/pxamci.c
+++ b/drivers/mmc/host/pxamci.c
@@ -805,7 +805,7 @@ static int pxamci_probe(struct platform_device *pdev)
805 goto out; 805 goto out;
806 } else { 806 } else {
807 mmc->caps |= host->pdata->gpio_card_ro_invert ? 807 mmc->caps |= host->pdata->gpio_card_ro_invert ?
808 MMC_CAP2_RO_ACTIVE_HIGH : 0; 808 0 : MMC_CAP2_RO_ACTIVE_HIGH;
809 } 809 }
810 810
811 if (gpio_is_valid(gpio_cd)) 811 if (gpio_is_valid(gpio_cd))
diff --git a/drivers/mtd/nand/jz4740_nand.c b/drivers/mtd/nand/jz4740_nand.c
index dc4e8446f1ff..5a99a93ed025 100644
--- a/drivers/mtd/nand/jz4740_nand.c
+++ b/drivers/mtd/nand/jz4740_nand.c
@@ -25,6 +25,7 @@
25 25
26#include <linux/gpio.h> 26#include <linux/gpio.h>
27 27
28#include <asm/mach-jz4740/gpio.h>
28#include <asm/mach-jz4740/jz4740_nand.h> 29#include <asm/mach-jz4740/jz4740_nand.h>
29 30
30#define JZ_REG_NAND_CTRL 0x50 31#define JZ_REG_NAND_CTRL 0x50
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index cc74142938b0..ece544efccc3 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -3110,7 +3110,7 @@ static void nand_resume(struct mtd_info *mtd)
3110 */ 3110 */
3111static void nand_shutdown(struct mtd_info *mtd) 3111static void nand_shutdown(struct mtd_info *mtd)
3112{ 3112{
3113 nand_get_device(mtd, FL_SHUTDOWN); 3113 nand_get_device(mtd, FL_PM_SUSPENDED);
3114} 3114}
3115 3115
3116/* Set default functions */ 3116/* Set default functions */
diff --git a/drivers/net/dsa/mv88e6060.c b/drivers/net/dsa/mv88e6060.c
index 9093577755f6..0527f485c3dc 100644
--- a/drivers/net/dsa/mv88e6060.c
+++ b/drivers/net/dsa/mv88e6060.c
@@ -15,9 +15,7 @@
15#include <linux/netdevice.h> 15#include <linux/netdevice.h>
16#include <linux/phy.h> 16#include <linux/phy.h>
17#include <net/dsa.h> 17#include <net/dsa.h>
18 18#include "mv88e6060.h"
19#define REG_PORT(p) (8 + (p))
20#define REG_GLOBAL 0x0f
21 19
22static int reg_read(struct dsa_switch *ds, int addr, int reg) 20static int reg_read(struct dsa_switch *ds, int addr, int reg)
23{ 21{
@@ -67,13 +65,14 @@ static char *mv88e6060_probe(struct device *host_dev, int sw_addr)
67 if (bus == NULL) 65 if (bus == NULL)
68 return NULL; 66 return NULL;
69 67
70 ret = mdiobus_read(bus, sw_addr + REG_PORT(0), 0x03); 68 ret = mdiobus_read(bus, sw_addr + REG_PORT(0), PORT_SWITCH_ID);
71 if (ret >= 0) { 69 if (ret >= 0) {
72 if (ret == 0x0600) 70 if (ret == PORT_SWITCH_ID_6060)
73 return "Marvell 88E6060 (A0)"; 71 return "Marvell 88E6060 (A0)";
74 if (ret == 0x0601 || ret == 0x0602) 72 if (ret == PORT_SWITCH_ID_6060_R1 ||
73 ret == PORT_SWITCH_ID_6060_R2)
75 return "Marvell 88E6060 (B0)"; 74 return "Marvell 88E6060 (B0)";
76 if ((ret & 0xfff0) == 0x0600) 75 if ((ret & PORT_SWITCH_ID_6060_MASK) == PORT_SWITCH_ID_6060)
77 return "Marvell 88E6060"; 76 return "Marvell 88E6060";
78 } 77 }
79 78
@@ -87,22 +86,26 @@ static int mv88e6060_switch_reset(struct dsa_switch *ds)
87 unsigned long timeout; 86 unsigned long timeout;
88 87
89 /* Set all ports to the disabled state. */ 88 /* Set all ports to the disabled state. */
90 for (i = 0; i < 6; i++) { 89 for (i = 0; i < MV88E6060_PORTS; i++) {
91 ret = REG_READ(REG_PORT(i), 0x04); 90 ret = REG_READ(REG_PORT(i), PORT_CONTROL);
92 REG_WRITE(REG_PORT(i), 0x04, ret & 0xfffc); 91 REG_WRITE(REG_PORT(i), PORT_CONTROL,
92 ret & ~PORT_CONTROL_STATE_MASK);
93 } 93 }
94 94
95 /* Wait for transmit queues to drain. */ 95 /* Wait for transmit queues to drain. */
96 usleep_range(2000, 4000); 96 usleep_range(2000, 4000);
97 97
98 /* Reset the switch. */ 98 /* Reset the switch. */
99 REG_WRITE(REG_GLOBAL, 0x0a, 0xa130); 99 REG_WRITE(REG_GLOBAL, GLOBAL_ATU_CONTROL,
100 GLOBAL_ATU_CONTROL_SWRESET |
101 GLOBAL_ATU_CONTROL_ATUSIZE_1024 |
102 GLOBAL_ATU_CONTROL_ATE_AGE_5MIN);
100 103
101 /* Wait up to one second for reset to complete. */ 104 /* Wait up to one second for reset to complete. */
102 timeout = jiffies + 1 * HZ; 105 timeout = jiffies + 1 * HZ;
103 while (time_before(jiffies, timeout)) { 106 while (time_before(jiffies, timeout)) {
104 ret = REG_READ(REG_GLOBAL, 0x00); 107 ret = REG_READ(REG_GLOBAL, GLOBAL_STATUS);
105 if ((ret & 0x8000) == 0x0000) 108 if (ret & GLOBAL_STATUS_INIT_READY)
106 break; 109 break;
107 110
108 usleep_range(1000, 2000); 111 usleep_range(1000, 2000);
@@ -119,13 +122,15 @@ static int mv88e6060_setup_global(struct dsa_switch *ds)
119 * set the maximum frame size to 1536 bytes, and mask all 122 * set the maximum frame size to 1536 bytes, and mask all
120 * interrupt sources. 123 * interrupt sources.
121 */ 124 */
122 REG_WRITE(REG_GLOBAL, 0x04, 0x0800); 125 REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL, GLOBAL_CONTROL_MAX_FRAME_1536);
123 126
124 /* Enable automatic address learning, set the address 127 /* Enable automatic address learning, set the address
125 * database size to 1024 entries, and set the default aging 128 * database size to 1024 entries, and set the default aging
126 * time to 5 minutes. 129 * time to 5 minutes.
127 */ 130 */
128 REG_WRITE(REG_GLOBAL, 0x0a, 0x2130); 131 REG_WRITE(REG_GLOBAL, GLOBAL_ATU_CONTROL,
132 GLOBAL_ATU_CONTROL_ATUSIZE_1024 |
133 GLOBAL_ATU_CONTROL_ATE_AGE_5MIN);
129 134
130 return 0; 135 return 0;
131} 136}
@@ -139,25 +144,30 @@ static int mv88e6060_setup_port(struct dsa_switch *ds, int p)
139 * state to Forwarding. Additionally, if this is the CPU 144 * state to Forwarding. Additionally, if this is the CPU
140 * port, enable Ingress and Egress Trailer tagging mode. 145 * port, enable Ingress and Egress Trailer tagging mode.
141 */ 146 */
142 REG_WRITE(addr, 0x04, dsa_is_cpu_port(ds, p) ? 0x4103 : 0x0003); 147 REG_WRITE(addr, PORT_CONTROL,
148 dsa_is_cpu_port(ds, p) ?
149 PORT_CONTROL_TRAILER |
150 PORT_CONTROL_INGRESS_MODE |
151 PORT_CONTROL_STATE_FORWARDING :
152 PORT_CONTROL_STATE_FORWARDING);
143 153
144 /* Port based VLAN map: give each port its own address 154 /* Port based VLAN map: give each port its own address
145 * database, allow the CPU port to talk to each of the 'real' 155 * database, allow the CPU port to talk to each of the 'real'
146 * ports, and allow each of the 'real' ports to only talk to 156 * ports, and allow each of the 'real' ports to only talk to
147 * the CPU port. 157 * the CPU port.
148 */ 158 */
149 REG_WRITE(addr, 0x06, 159 REG_WRITE(addr, PORT_VLAN_MAP,
150 ((p & 0xf) << 12) | 160 ((p & 0xf) << PORT_VLAN_MAP_DBNUM_SHIFT) |
151 (dsa_is_cpu_port(ds, p) ? 161 (dsa_is_cpu_port(ds, p) ?
152 ds->phys_port_mask : 162 ds->phys_port_mask :
153 (1 << ds->dst->cpu_port))); 163 BIT(ds->dst->cpu_port)));
154 164
155 /* Port Association Vector: when learning source addresses 165 /* Port Association Vector: when learning source addresses
156 * of packets, add the address to the address database using 166 * of packets, add the address to the address database using
157 * a port bitmap that has only the bit for this port set and 167 * a port bitmap that has only the bit for this port set and
158 * the other bits clear. 168 * the other bits clear.
159 */ 169 */
160 REG_WRITE(addr, 0x0b, 1 << p); 170 REG_WRITE(addr, PORT_ASSOC_VECTOR, BIT(p));
161 171
162 return 0; 172 return 0;
163} 173}
@@ -177,7 +187,7 @@ static int mv88e6060_setup(struct dsa_switch *ds)
177 if (ret < 0) 187 if (ret < 0)
178 return ret; 188 return ret;
179 189
180 for (i = 0; i < 6; i++) { 190 for (i = 0; i < MV88E6060_PORTS; i++) {
181 ret = mv88e6060_setup_port(ds, i); 191 ret = mv88e6060_setup_port(ds, i);
182 if (ret < 0) 192 if (ret < 0)
183 return ret; 193 return ret;
@@ -188,16 +198,17 @@ static int mv88e6060_setup(struct dsa_switch *ds)
188 198
189static int mv88e6060_set_addr(struct dsa_switch *ds, u8 *addr) 199static int mv88e6060_set_addr(struct dsa_switch *ds, u8 *addr)
190{ 200{
191 REG_WRITE(REG_GLOBAL, 0x01, (addr[0] << 8) | addr[1]); 201 /* Use the same MAC Address as FD Pause frames for all ports */
192 REG_WRITE(REG_GLOBAL, 0x02, (addr[2] << 8) | addr[3]); 202 REG_WRITE(REG_GLOBAL, GLOBAL_MAC_01, (addr[0] << 9) | addr[1]);
193 REG_WRITE(REG_GLOBAL, 0x03, (addr[4] << 8) | addr[5]); 203 REG_WRITE(REG_GLOBAL, GLOBAL_MAC_23, (addr[2] << 8) | addr[3]);
204 REG_WRITE(REG_GLOBAL, GLOBAL_MAC_45, (addr[4] << 8) | addr[5]);
194 205
195 return 0; 206 return 0;
196} 207}
197 208
198static int mv88e6060_port_to_phy_addr(int port) 209static int mv88e6060_port_to_phy_addr(int port)
199{ 210{
200 if (port >= 0 && port <= 5) 211 if (port >= 0 && port < MV88E6060_PORTS)
201 return port; 212 return port;
202 return -1; 213 return -1;
203} 214}
@@ -225,54 +236,6 @@ mv88e6060_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val)
225 return reg_write(ds, addr, regnum, val); 236 return reg_write(ds, addr, regnum, val);
226} 237}
227 238
228static void mv88e6060_poll_link(struct dsa_switch *ds)
229{
230 int i;
231
232 for (i = 0; i < DSA_MAX_PORTS; i++) {
233 struct net_device *dev;
234 int uninitialized_var(port_status);
235 int link;
236 int speed;
237 int duplex;
238 int fc;
239
240 dev = ds->ports[i];
241 if (dev == NULL)
242 continue;
243
244 link = 0;
245 if (dev->flags & IFF_UP) {
246 port_status = reg_read(ds, REG_PORT(i), 0x00);
247 if (port_status < 0)
248 continue;
249
250 link = !!(port_status & 0x1000);
251 }
252
253 if (!link) {
254 if (netif_carrier_ok(dev)) {
255 netdev_info(dev, "link down\n");
256 netif_carrier_off(dev);
257 }
258 continue;
259 }
260
261 speed = (port_status & 0x0100) ? 100 : 10;
262 duplex = (port_status & 0x0200) ? 1 : 0;
263 fc = ((port_status & 0xc000) == 0xc000) ? 1 : 0;
264
265 if (!netif_carrier_ok(dev)) {
266 netdev_info(dev,
267 "link up, %d Mb/s, %s duplex, flow control %sabled\n",
268 speed,
269 duplex ? "full" : "half",
270 fc ? "en" : "dis");
271 netif_carrier_on(dev);
272 }
273 }
274}
275
276static struct dsa_switch_driver mv88e6060_switch_driver = { 239static struct dsa_switch_driver mv88e6060_switch_driver = {
277 .tag_protocol = DSA_TAG_PROTO_TRAILER, 240 .tag_protocol = DSA_TAG_PROTO_TRAILER,
278 .probe = mv88e6060_probe, 241 .probe = mv88e6060_probe,
@@ -280,7 +243,6 @@ static struct dsa_switch_driver mv88e6060_switch_driver = {
280 .set_addr = mv88e6060_set_addr, 243 .set_addr = mv88e6060_set_addr,
281 .phy_read = mv88e6060_phy_read, 244 .phy_read = mv88e6060_phy_read,
282 .phy_write = mv88e6060_phy_write, 245 .phy_write = mv88e6060_phy_write,
283 .poll_link = mv88e6060_poll_link,
284}; 246};
285 247
286static int __init mv88e6060_init(void) 248static int __init mv88e6060_init(void)
diff --git a/drivers/net/dsa/mv88e6060.h b/drivers/net/dsa/mv88e6060.h
new file mode 100644
index 000000000000..cc9b2ed4aff4
--- /dev/null
+++ b/drivers/net/dsa/mv88e6060.h
@@ -0,0 +1,111 @@
1/*
2 * drivers/net/dsa/mv88e6060.h - Marvell 88e6060 switch chip support
3 * Copyright (c) 2015 Neil Armstrong
4 *
5 * Based on mv88e6xxx.h
6 * Copyright (c) 2008 Marvell Semiconductor
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14#ifndef __MV88E6060_H
15#define __MV88E6060_H
16
17#define MV88E6060_PORTS 6
18
19#define REG_PORT(p) (0x8 + (p))
20#define PORT_STATUS 0x00
21#define PORT_STATUS_PAUSE_EN BIT(15)
22#define PORT_STATUS_MY_PAUSE BIT(14)
23#define PORT_STATUS_FC (PORT_STATUS_MY_PAUSE | PORT_STATUS_PAUSE_EN)
24#define PORT_STATUS_RESOLVED BIT(13)
25#define PORT_STATUS_LINK BIT(12)
26#define PORT_STATUS_PORTMODE BIT(11)
27#define PORT_STATUS_PHYMODE BIT(10)
28#define PORT_STATUS_DUPLEX BIT(9)
29#define PORT_STATUS_SPEED BIT(8)
30#define PORT_SWITCH_ID 0x03
31#define PORT_SWITCH_ID_6060 0x0600
32#define PORT_SWITCH_ID_6060_MASK 0xfff0
33#define PORT_SWITCH_ID_6060_R1 0x0601
34#define PORT_SWITCH_ID_6060_R2 0x0602
35#define PORT_CONTROL 0x04
36#define PORT_CONTROL_FORCE_FLOW_CTRL BIT(15)
37#define PORT_CONTROL_TRAILER BIT(14)
38#define PORT_CONTROL_HEADER BIT(11)
39#define PORT_CONTROL_INGRESS_MODE BIT(8)
40#define PORT_CONTROL_VLAN_TUNNEL BIT(7)
41#define PORT_CONTROL_STATE_MASK 0x03
42#define PORT_CONTROL_STATE_DISABLED 0x00
43#define PORT_CONTROL_STATE_BLOCKING 0x01
44#define PORT_CONTROL_STATE_LEARNING 0x02
45#define PORT_CONTROL_STATE_FORWARDING 0x03
46#define PORT_VLAN_MAP 0x06
47#define PORT_VLAN_MAP_DBNUM_SHIFT 12
48#define PORT_VLAN_MAP_TABLE_MASK 0x1f
49#define PORT_ASSOC_VECTOR 0x0b
50#define PORT_ASSOC_VECTOR_MONITOR BIT(15)
51#define PORT_ASSOC_VECTOR_PAV_MASK 0x1f
52#define PORT_RX_CNTR 0x10
53#define PORT_TX_CNTR 0x11
54
55#define REG_GLOBAL 0x0f
56#define GLOBAL_STATUS 0x00
57#define GLOBAL_STATUS_SW_MODE_MASK (0x3 << 12)
58#define GLOBAL_STATUS_SW_MODE_0 (0x0 << 12)
59#define GLOBAL_STATUS_SW_MODE_1 (0x1 << 12)
60#define GLOBAL_STATUS_SW_MODE_2 (0x2 << 12)
61#define GLOBAL_STATUS_SW_MODE_3 (0x3 << 12)
62#define GLOBAL_STATUS_INIT_READY BIT(11)
63#define GLOBAL_STATUS_ATU_FULL BIT(3)
64#define GLOBAL_STATUS_ATU_DONE BIT(2)
65#define GLOBAL_STATUS_PHY_INT BIT(1)
66#define GLOBAL_STATUS_EEINT BIT(0)
67#define GLOBAL_MAC_01 0x01
68#define GLOBAL_MAC_01_DIFF_ADDR BIT(8)
69#define GLOBAL_MAC_23 0x02
70#define GLOBAL_MAC_45 0x03
71#define GLOBAL_CONTROL 0x04
72#define GLOBAL_CONTROL_DISCARD_EXCESS BIT(13)
73#define GLOBAL_CONTROL_MAX_FRAME_1536 BIT(10)
74#define GLOBAL_CONTROL_RELOAD_EEPROM BIT(9)
75#define GLOBAL_CONTROL_CTRMODE BIT(8)
76#define GLOBAL_CONTROL_ATU_FULL_EN BIT(3)
77#define GLOBAL_CONTROL_ATU_DONE_EN BIT(2)
78#define GLOBAL_CONTROL_PHYINT_EN BIT(1)
79#define GLOBAL_CONTROL_EEPROM_DONE_EN BIT(0)
80#define GLOBAL_ATU_CONTROL 0x0a
81#define GLOBAL_ATU_CONTROL_SWRESET BIT(15)
82#define GLOBAL_ATU_CONTROL_LEARNDIS BIT(14)
83#define GLOBAL_ATU_CONTROL_ATUSIZE_256 (0x0 << 12)
84#define GLOBAL_ATU_CONTROL_ATUSIZE_512 (0x1 << 12)
85#define GLOBAL_ATU_CONTROL_ATUSIZE_1024 (0x2 << 12)
86#define GLOBAL_ATU_CONTROL_ATE_AGE_SHIFT 4
87#define GLOBAL_ATU_CONTROL_ATE_AGE_MASK (0xff << 4)
88#define GLOBAL_ATU_CONTROL_ATE_AGE_5MIN (0x13 << 4)
89#define GLOBAL_ATU_OP 0x0b
90#define GLOBAL_ATU_OP_BUSY BIT(15)
91#define GLOBAL_ATU_OP_NOP (0 << 12)
92#define GLOBAL_ATU_OP_FLUSH_ALL ((1 << 12) | GLOBAL_ATU_OP_BUSY)
93#define GLOBAL_ATU_OP_FLUSH_UNLOCKED ((2 << 12) | GLOBAL_ATU_OP_BUSY)
94#define GLOBAL_ATU_OP_LOAD_DB ((3 << 12) | GLOBAL_ATU_OP_BUSY)
95#define GLOBAL_ATU_OP_GET_NEXT_DB ((4 << 12) | GLOBAL_ATU_OP_BUSY)
96#define GLOBAL_ATU_OP_FLUSH_DB ((5 << 12) | GLOBAL_ATU_OP_BUSY)
97#define GLOBAL_ATU_OP_FLUSH_UNLOCKED_DB ((6 << 12) | GLOBAL_ATU_OP_BUSY)
98#define GLOBAL_ATU_DATA 0x0c
99#define GLOBAL_ATU_DATA_PORT_VECTOR_MASK 0x3f0
100#define GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT 4
101#define GLOBAL_ATU_DATA_STATE_MASK 0x0f
102#define GLOBAL_ATU_DATA_STATE_UNUSED 0x00
103#define GLOBAL_ATU_DATA_STATE_UC_STATIC 0x0e
104#define GLOBAL_ATU_DATA_STATE_UC_LOCKED 0x0f
105#define GLOBAL_ATU_DATA_STATE_MC_STATIC 0x07
106#define GLOBAL_ATU_DATA_STATE_MC_LOCKED 0x0e
107#define GLOBAL_ATU_MAC_01 0x0d
108#define GLOBAL_ATU_MAC_23 0x0e
109#define GLOBAL_ATU_MAC_45 0x0f
110
111#endif
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index 05aa7597dab9..955d06b9cdba 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -78,7 +78,6 @@ source "drivers/net/ethernet/ibm/Kconfig"
78source "drivers/net/ethernet/intel/Kconfig" 78source "drivers/net/ethernet/intel/Kconfig"
79source "drivers/net/ethernet/i825xx/Kconfig" 79source "drivers/net/ethernet/i825xx/Kconfig"
80source "drivers/net/ethernet/xscale/Kconfig" 80source "drivers/net/ethernet/xscale/Kconfig"
81source "drivers/net/ethernet/icplus/Kconfig"
82 81
83config JME 82config JME
84 tristate "JMicron(R) PCI-Express Gigabit Ethernet support" 83 tristate "JMicron(R) PCI-Express Gigabit Ethernet support"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index ddfc808110a1..4a2ee98738f0 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -41,7 +41,6 @@ obj-$(CONFIG_NET_VENDOR_IBM) += ibm/
41obj-$(CONFIG_NET_VENDOR_INTEL) += intel/ 41obj-$(CONFIG_NET_VENDOR_INTEL) += intel/
42obj-$(CONFIG_NET_VENDOR_I825XX) += i825xx/ 42obj-$(CONFIG_NET_VENDOR_I825XX) += i825xx/
43obj-$(CONFIG_NET_VENDOR_XSCALE) += xscale/ 43obj-$(CONFIG_NET_VENDOR_XSCALE) += xscale/
44obj-$(CONFIG_IP1000) += icplus/
45obj-$(CONFIG_JME) += jme.o 44obj-$(CONFIG_JME) += jme.o
46obj-$(CONFIG_KORINA) += korina.o 45obj-$(CONFIG_KORINA) += korina.o
47obj-$(CONFIG_LANTIQ_ETOP) += lantiq_etop.o 46obj-$(CONFIG_LANTIQ_ETOP) += lantiq_etop.o
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
index e2afabf3a465..7ccebae9cb48 100644
--- a/drivers/net/ethernet/amd/pcnet32.c
+++ b/drivers/net/ethernet/amd/pcnet32.c
@@ -1500,10 +1500,11 @@ pcnet32_probe_pci(struct pci_dev *pdev, const struct pci_device_id *ent)
1500 return -ENODEV; 1500 return -ENODEV;
1501 } 1501 }
1502 1502
1503 if (!pci_set_dma_mask(pdev, PCNET32_DMA_MASK)) { 1503 err = pci_set_dma_mask(pdev, PCNET32_DMA_MASK);
1504 if (err) {
1504 if (pcnet32_debug & NETIF_MSG_PROBE) 1505 if (pcnet32_debug & NETIF_MSG_PROBE)
1505 pr_err("architecture does not support 32bit PCI busmaster DMA\n"); 1506 pr_err("architecture does not support 32bit PCI busmaster DMA\n");
1506 return -ENODEV; 1507 return err;
1507 } 1508 }
1508 if (!request_region(ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_pci")) { 1509 if (!request_region(ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_pci")) {
1509 if (pcnet32_debug & NETIF_MSG_PROBE) 1510 if (pcnet32_debug & NETIF_MSG_PROBE)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index f1d62d5dbaff..c9b036789184 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -13207,7 +13207,7 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
13207 13207
13208 /* VF with OLD Hypervisor or old PF do not support filtering */ 13208 /* VF with OLD Hypervisor or old PF do not support filtering */
13209 if (IS_PF(bp)) { 13209 if (IS_PF(bp)) {
13210 if (CHIP_IS_E1x(bp)) 13210 if (chip_is_e1x)
13211 bp->accept_any_vlan = true; 13211 bp->accept_any_vlan = true;
13212 else 13212 else
13213 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; 13213 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index f683d97d7614..b89504405b72 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -560,7 +560,7 @@ static int liquidio_resume(struct pci_dev *pdev)
560#endif 560#endif
561 561
562/* For PCI-E Advanced Error Recovery (AER) Interface */ 562/* For PCI-E Advanced Error Recovery (AER) Interface */
563static struct pci_error_handlers liquidio_err_handler = { 563static const struct pci_error_handlers liquidio_err_handler = {
564 .error_detected = liquidio_pcie_error_detected, 564 .error_detected = liquidio_pcie_error_detected,
565 .mmio_enabled = liquidio_pcie_mmio_enabled, 565 .mmio_enabled = liquidio_pcie_mmio_enabled,
566 .slot_reset = liquidio_pcie_slot_reset, 566 .slot_reset = liquidio_pcie_slot_reset,
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index a9377727c11c..7f709cbdcd87 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -1583,8 +1583,14 @@ err_disable_device:
1583static void nicvf_remove(struct pci_dev *pdev) 1583static void nicvf_remove(struct pci_dev *pdev)
1584{ 1584{
1585 struct net_device *netdev = pci_get_drvdata(pdev); 1585 struct net_device *netdev = pci_get_drvdata(pdev);
1586 struct nicvf *nic = netdev_priv(netdev); 1586 struct nicvf *nic;
1587 struct net_device *pnetdev = nic->pnicvf->netdev; 1587 struct net_device *pnetdev;
1588
1589 if (!netdev)
1590 return;
1591
1592 nic = netdev_priv(netdev);
1593 pnetdev = nic->pnicvf->netdev;
1588 1594
1589 /* Check if this Qset is assigned to different VF. 1595 /* Check if this Qset is assigned to different VF.
1590 * If yes, clean primary and all secondary Qsets. 1596 * If yes, clean primary and all secondary Qsets.
diff --git a/drivers/net/ethernet/dlink/Kconfig b/drivers/net/ethernet/dlink/Kconfig
index f6e858d0b9d4..ebdc83247bb6 100644
--- a/drivers/net/ethernet/dlink/Kconfig
+++ b/drivers/net/ethernet/dlink/Kconfig
@@ -17,15 +17,16 @@ config NET_VENDOR_DLINK
17if NET_VENDOR_DLINK 17if NET_VENDOR_DLINK
18 18
19config DL2K 19config DL2K
20 tristate "DL2000/TC902x-based Gigabit Ethernet support" 20 tristate "DL2000/TC902x/IP1000A-based Gigabit Ethernet support"
21 depends on PCI 21 depends on PCI
22 select CRC32 22 select CRC32
23 ---help--- 23 ---help---
24 This driver supports DL2000/TC902x-based Gigabit ethernet cards, 24 This driver supports DL2000/TC902x/IP1000A-based Gigabit ethernet cards,
25 which includes 25 which includes
26 D-Link DGE-550T Gigabit Ethernet Adapter. 26 D-Link DGE-550T Gigabit Ethernet Adapter.
27 D-Link DL2000-based Gigabit Ethernet Adapter. 27 D-Link DL2000-based Gigabit Ethernet Adapter.
28 Sundance/Tamarack TC902x Gigabit Ethernet Adapter. 28 Sundance/Tamarack TC902x Gigabit Ethernet Adapter.
29 ICPlus IP1000A-based cards
29 30
30 To compile this driver as a module, choose M here: the 31 To compile this driver as a module, choose M here: the
31 module will be called dl2k. 32 module will be called dl2k.
diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
index cf0a5fcdaaaf..ccca4799c27b 100644
--- a/drivers/net/ethernet/dlink/dl2k.c
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -253,6 +253,19 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
253 if (err) 253 if (err)
254 goto err_out_unmap_rx; 254 goto err_out_unmap_rx;
255 255
256 if (np->chip_id == CHIP_IP1000A &&
257 (np->pdev->revision == 0x40 || np->pdev->revision == 0x41)) {
258 /* PHY magic taken from ipg driver, undocumented registers */
259 mii_write(dev, np->phy_addr, 31, 0x0001);
260 mii_write(dev, np->phy_addr, 27, 0x01e0);
261 mii_write(dev, np->phy_addr, 31, 0x0002);
262 mii_write(dev, np->phy_addr, 27, 0xeb8e);
263 mii_write(dev, np->phy_addr, 31, 0x0000);
264 mii_write(dev, np->phy_addr, 30, 0x005e);
265 /* advertise 1000BASE-T half & full duplex, prefer MASTER */
266 mii_write(dev, np->phy_addr, MII_CTRL1000, 0x0700);
267 }
268
256 /* Fiber device? */ 269 /* Fiber device? */
257 np->phy_media = (dr16(ASICCtrl) & PhyMedia) ? 1 : 0; 270 np->phy_media = (dr16(ASICCtrl) & PhyMedia) ? 1 : 0;
258 np->link_status = 0; 271 np->link_status = 0;
@@ -361,6 +374,11 @@ parse_eeprom (struct net_device *dev)
361 for (i = 0; i < 6; i++) 374 for (i = 0; i < 6; i++)
362 dev->dev_addr[i] = psrom->mac_addr[i]; 375 dev->dev_addr[i] = psrom->mac_addr[i];
363 376
377 if (np->chip_id == CHIP_IP1000A) {
378 np->led_mode = psrom->led_mode;
379 return 0;
380 }
381
364 if (np->pdev->vendor != PCI_VENDOR_ID_DLINK) { 382 if (np->pdev->vendor != PCI_VENDOR_ID_DLINK) {
365 return 0; 383 return 0;
366 } 384 }
@@ -406,6 +424,28 @@ parse_eeprom (struct net_device *dev)
406 return 0; 424 return 0;
407} 425}
408 426
427static void rio_set_led_mode(struct net_device *dev)
428{
429 struct netdev_private *np = netdev_priv(dev);
430 void __iomem *ioaddr = np->ioaddr;
431 u32 mode;
432
433 if (np->chip_id != CHIP_IP1000A)
434 return;
435
436 mode = dr32(ASICCtrl);
437 mode &= ~(IPG_AC_LED_MODE_BIT_1 | IPG_AC_LED_MODE | IPG_AC_LED_SPEED);
438
439 if (np->led_mode & 0x01)
440 mode |= IPG_AC_LED_MODE;
441 if (np->led_mode & 0x02)
442 mode |= IPG_AC_LED_MODE_BIT_1;
443 if (np->led_mode & 0x08)
444 mode |= IPG_AC_LED_SPEED;
445
446 dw32(ASICCtrl, mode);
447}
448
409static int 449static int
410rio_open (struct net_device *dev) 450rio_open (struct net_device *dev)
411{ 451{
@@ -424,6 +464,8 @@ rio_open (struct net_device *dev)
424 GlobalReset | DMAReset | FIFOReset | NetworkReset | HostReset); 464 GlobalReset | DMAReset | FIFOReset | NetworkReset | HostReset);
425 mdelay(10); 465 mdelay(10);
426 466
467 rio_set_led_mode(dev);
468
427 /* DebugCtrl bit 4, 5, 9 must set */ 469 /* DebugCtrl bit 4, 5, 9 must set */
428 dw32(DebugCtrl, dr32(DebugCtrl) | 0x0230); 470 dw32(DebugCtrl, dr32(DebugCtrl) | 0x0230);
429 471
@@ -433,9 +475,13 @@ rio_open (struct net_device *dev)
433 475
434 alloc_list (dev); 476 alloc_list (dev);
435 477
436 /* Get station address */ 478 /* Set station address */
437 for (i = 0; i < 6; i++) 479 /* 16 or 32-bit access is required by TC9020 datasheet but 8-bit works
438 dw8(StationAddr0 + i, dev->dev_addr[i]); 480 * too. However, it doesn't work on IP1000A so we use 16-bit access.
481 */
482 for (i = 0; i < 3; i++)
483 dw16(StationAddr0 + 2 * i,
484 cpu_to_le16(((u16 *)dev->dev_addr)[i]));
439 485
440 set_multicast (dev); 486 set_multicast (dev);
441 if (np->coalesce) { 487 if (np->coalesce) {
@@ -780,6 +826,7 @@ tx_error (struct net_device *dev, int tx_status)
780 break; 826 break;
781 mdelay (1); 827 mdelay (1);
782 } 828 }
829 rio_set_led_mode(dev);
783 rio_free_tx (dev, 1); 830 rio_free_tx (dev, 1);
784 /* Reset TFDListPtr */ 831 /* Reset TFDListPtr */
785 dw32(TFDListPtr0, np->tx_ring_dma + 832 dw32(TFDListPtr0, np->tx_ring_dma +
@@ -799,6 +846,7 @@ tx_error (struct net_device *dev, int tx_status)
799 break; 846 break;
800 mdelay (1); 847 mdelay (1);
801 } 848 }
849 rio_set_led_mode(dev);
802 /* Let TxStartThresh stay default value */ 850 /* Let TxStartThresh stay default value */
803 } 851 }
804 /* Maximum Collisions */ 852 /* Maximum Collisions */
@@ -965,6 +1013,7 @@ rio_error (struct net_device *dev, int int_status)
965 dev->name, int_status); 1013 dev->name, int_status);
966 dw16(ASICCtrl + 2, GlobalReset | HostReset); 1014 dw16(ASICCtrl + 2, GlobalReset | HostReset);
967 mdelay (500); 1015 mdelay (500);
1016 rio_set_led_mode(dev);
968 } 1017 }
969} 1018}
970 1019
diff --git a/drivers/net/ethernet/dlink/dl2k.h b/drivers/net/ethernet/dlink/dl2k.h
index 23c07b007069..8f4f61262d5c 100644
--- a/drivers/net/ethernet/dlink/dl2k.h
+++ b/drivers/net/ethernet/dlink/dl2k.h
@@ -211,6 +211,10 @@ enum ASICCtrl_HiWord_bits {
211 ResetBusy = 0x0400, 211 ResetBusy = 0x0400,
212}; 212};
213 213
214#define IPG_AC_LED_MODE BIT(14)
215#define IPG_AC_LED_SPEED BIT(27)
216#define IPG_AC_LED_MODE_BIT_1 BIT(29)
217
214/* Transmit Frame Control bits */ 218/* Transmit Frame Control bits */
215enum TFC_bits { 219enum TFC_bits {
216 DwordAlign = 0x00000000, 220 DwordAlign = 0x00000000,
@@ -332,7 +336,10 @@ typedef struct t_SROM {
332 u16 asic_ctrl; /* 0x02 */ 336 u16 asic_ctrl; /* 0x02 */
333 u16 sub_vendor_id; /* 0x04 */ 337 u16 sub_vendor_id; /* 0x04 */
334 u16 sub_system_id; /* 0x06 */ 338 u16 sub_system_id; /* 0x06 */
335 u16 reserved1[12]; /* 0x08-0x1f */ 339 u16 pci_base_1; /* 0x08 (IP1000A only) */
340 u16 pci_base_2; /* 0x0a (IP1000A only) */
341 u16 led_mode; /* 0x0c (IP1000A only) */
342 u16 reserved1[9]; /* 0x0e-0x1f */
336 u8 mac_addr[6]; /* 0x20-0x25 */ 343 u8 mac_addr[6]; /* 0x20-0x25 */
337 u8 reserved2[10]; /* 0x26-0x2f */ 344 u8 reserved2[10]; /* 0x26-0x2f */
338 u8 sib[204]; /* 0x30-0xfb */ 345 u8 sib[204]; /* 0x30-0xfb */
@@ -397,6 +404,7 @@ struct netdev_private {
397 u16 advertising; /* NWay media advertisement */ 404 u16 advertising; /* NWay media advertisement */
398 u16 negotiate; /* Negotiated media */ 405 u16 negotiate; /* Negotiated media */
399 int phy_addr; /* PHY addresses. */ 406 int phy_addr; /* PHY addresses. */
407 u16 led_mode; /* LED mode read from EEPROM (IP1000A only) */
400}; 408};
401 409
402/* The station address location in the EEPROM. */ 410/* The station address location in the EEPROM. */
@@ -407,10 +415,15 @@ struct netdev_private {
407 class_mask of the class are honored during the comparison. 415 class_mask of the class are honored during the comparison.
408 driver_data Data private to the driver. 416 driver_data Data private to the driver.
409*/ 417*/
418#define CHIP_IP1000A 1
410 419
411static const struct pci_device_id rio_pci_tbl[] = { 420static const struct pci_device_id rio_pci_tbl[] = {
412 {0x1186, 0x4000, PCI_ANY_ID, PCI_ANY_ID, }, 421 {0x1186, 0x4000, PCI_ANY_ID, PCI_ANY_ID, },
413 {0x13f0, 0x1021, PCI_ANY_ID, PCI_ANY_ID, }, 422 {0x13f0, 0x1021, PCI_ANY_ID, PCI_ANY_ID, },
423 { PCI_VDEVICE(SUNDANCE, 0x1023), CHIP_IP1000A },
424 { PCI_VDEVICE(SUNDANCE, 0x2021), CHIP_IP1000A },
425 { PCI_VDEVICE(DLINK, 0x9021), CHIP_IP1000A },
426 { PCI_VDEVICE(DLINK, 0x4020), CHIP_IP1000A },
414 { } 427 { }
415}; 428};
416MODULE_DEVICE_TABLE (pci, rio_pci_tbl); 429MODULE_DEVICE_TABLE (pci, rio_pci_tbl);
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index f4cb8e425853..734f655c99c1 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -1062,9 +1062,7 @@ static int be_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
1062static int be_set_rss_hash_opts(struct be_adapter *adapter, 1062static int be_set_rss_hash_opts(struct be_adapter *adapter,
1063 struct ethtool_rxnfc *cmd) 1063 struct ethtool_rxnfc *cmd)
1064{ 1064{
1065 struct be_rx_obj *rxo; 1065 int status;
1066 int status = 0, i, j;
1067 u8 rsstable[128];
1068 u32 rss_flags = adapter->rss_info.rss_flags; 1066 u32 rss_flags = adapter->rss_info.rss_flags;
1069 1067
1070 if (cmd->data != L3_RSS_FLAGS && 1068 if (cmd->data != L3_RSS_FLAGS &&
@@ -1113,20 +1111,11 @@ static int be_set_rss_hash_opts(struct be_adapter *adapter,
1113 } 1111 }
1114 1112
1115 if (rss_flags == adapter->rss_info.rss_flags) 1113 if (rss_flags == adapter->rss_info.rss_flags)
1116 return status; 1114 return 0;
1117
1118 if (be_multi_rxq(adapter)) {
1119 for (j = 0; j < 128; j += adapter->num_rss_qs) {
1120 for_all_rss_queues(adapter, rxo, i) {
1121 if ((j + i) >= 128)
1122 break;
1123 rsstable[j + i] = rxo->rss_id;
1124 }
1125 }
1126 }
1127 1115
1128 status = be_cmd_rss_config(adapter, adapter->rss_info.rsstable, 1116 status = be_cmd_rss_config(adapter, adapter->rss_info.rsstable,
1129 rss_flags, 128, adapter->rss_info.rss_hkey); 1117 rss_flags, RSS_INDIR_TABLE_LEN,
1118 adapter->rss_info.rss_hkey);
1130 if (!status) 1119 if (!status)
1131 adapter->rss_info.rss_flags = rss_flags; 1120 adapter->rss_info.rss_flags = rss_flags;
1132 1121
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index eb48a977f8da..b6ad02909d6b 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -3518,7 +3518,7 @@ static int be_rx_qs_create(struct be_adapter *adapter)
3518 3518
3519 netdev_rss_key_fill(rss_key, RSS_HASH_KEY_LEN); 3519 netdev_rss_key_fill(rss_key, RSS_HASH_KEY_LEN);
3520 rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags, 3520 rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
3521 128, rss_key); 3521 RSS_INDIR_TABLE_LEN, rss_key);
3522 if (rc) { 3522 if (rc) {
3523 rss->rss_flags = RSS_ENABLE_NONE; 3523 rss->rss_flags = RSS_ENABLE_NONE;
3524 return rc; 3524 return rc;
diff --git a/drivers/net/ethernet/icplus/Kconfig b/drivers/net/ethernet/icplus/Kconfig
deleted file mode 100644
index 14a66e9d2e26..000000000000
--- a/drivers/net/ethernet/icplus/Kconfig
+++ /dev/null
@@ -1,13 +0,0 @@
1#
2# IC Plus device configuration
3#
4
5config IP1000
6 tristate "IP1000 Gigabit Ethernet support"
7 depends on PCI
8 select MII
9 ---help---
10 This driver supports IP1000 gigabit Ethernet cards.
11
12 To compile this driver as a module, choose M here: the module
13 will be called ipg. This is recommended.
diff --git a/drivers/net/ethernet/icplus/Makefile b/drivers/net/ethernet/icplus/Makefile
deleted file mode 100644
index 5bc87c1f36aa..000000000000
--- a/drivers/net/ethernet/icplus/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
1#
2# Makefile for the IC Plus device drivers
3#
4
5obj-$(CONFIG_IP1000) += ipg.o
diff --git a/drivers/net/ethernet/icplus/ipg.c b/drivers/net/ethernet/icplus/ipg.c
deleted file mode 100644
index c3b6af83f070..000000000000
--- a/drivers/net/ethernet/icplus/ipg.c
+++ /dev/null
@@ -1,2300 +0,0 @@
1/*
2 * ipg.c: Device Driver for the IP1000 Gigabit Ethernet Adapter
3 *
4 * Copyright (C) 2003, 2007 IC Plus Corp
5 *
6 * Original Author:
7 *
8 * Craig Rich
9 * Sundance Technology, Inc.
10 * www.sundanceti.com
11 * craig_rich@sundanceti.com
12 *
13 * Current Maintainer:
14 *
15 * Sorbica Shieh.
16 * http://www.icplus.com.tw
17 * sorbica@icplus.com.tw
18 *
19 * Jesse Huang
20 * http://www.icplus.com.tw
21 * jesse@icplus.com.tw
22 */
23
24#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25
26#include <linux/crc32.h>
27#include <linux/ethtool.h>
28#include <linux/interrupt.h>
29#include <linux/gfp.h>
30#include <linux/mii.h>
31#include <linux/mutex.h>
32
33#include <asm/div64.h>
34
35#define IPG_RX_RING_BYTES (sizeof(struct ipg_rx) * IPG_RFDLIST_LENGTH)
36#define IPG_TX_RING_BYTES (sizeof(struct ipg_tx) * IPG_TFDLIST_LENGTH)
37#define IPG_RESET_MASK \
38 (IPG_AC_GLOBAL_RESET | IPG_AC_RX_RESET | IPG_AC_TX_RESET | \
39 IPG_AC_DMA | IPG_AC_FIFO | IPG_AC_NETWORK | IPG_AC_HOST | \
40 IPG_AC_AUTO_INIT)
41
42#define ipg_w32(val32, reg) iowrite32((val32), ioaddr + (reg))
43#define ipg_w16(val16, reg) iowrite16((val16), ioaddr + (reg))
44#define ipg_w8(val8, reg) iowrite8((val8), ioaddr + (reg))
45
46#define ipg_r32(reg) ioread32(ioaddr + (reg))
47#define ipg_r16(reg) ioread16(ioaddr + (reg))
48#define ipg_r8(reg) ioread8(ioaddr + (reg))
49
50enum {
51 netdev_io_size = 128
52};
53
54#include "ipg.h"
55#define DRV_NAME "ipg"
56
57MODULE_AUTHOR("IC Plus Corp. 2003");
58MODULE_DESCRIPTION("IC Plus IP1000 Gigabit Ethernet Adapter Linux Driver");
59MODULE_LICENSE("GPL");
60
61/*
62 * Defaults
63 */
64#define IPG_MAX_RXFRAME_SIZE 0x0600
65#define IPG_RXFRAG_SIZE 0x0600
66#define IPG_RXSUPPORT_SIZE 0x0600
67#define IPG_IS_JUMBO false
68
69/*
70 * Variable record -- index by leading revision/length
71 * Revision/Length(=N*4), Address1, Data1, Address2, Data2,...,AddressN,DataN
72 */
73static const unsigned short DefaultPhyParam[] = {
74 /* 11/12/03 IP1000A v1-3 rev=0x40 */
75 /*--------------------------------------------------------------------------
76 (0x4000|(15*4)), 31, 0x0001, 27, 0x01e0, 31, 0x0002, 22, 0x85bd, 24, 0xfff2,
77 27, 0x0c10, 28, 0x0c10, 29, 0x2c10, 31, 0x0003, 23, 0x92f6,
78 31, 0x0000, 23, 0x003d, 30, 0x00de, 20, 0x20e7, 9, 0x0700,
79 --------------------------------------------------------------------------*/
80 /* 12/17/03 IP1000A v1-4 rev=0x40 */
81 (0x4000 | (07 * 4)), 31, 0x0001, 27, 0x01e0, 31, 0x0002, 27, 0xeb8e, 31,
82 0x0000,
83 30, 0x005e, 9, 0x0700,
84 /* 01/09/04 IP1000A v1-5 rev=0x41 */
85 (0x4100 | (07 * 4)), 31, 0x0001, 27, 0x01e0, 31, 0x0002, 27, 0xeb8e, 31,
86 0x0000,
87 30, 0x005e, 9, 0x0700,
88 0x0000
89};
90
91static const char * const ipg_brand_name[] = {
92 "IC PLUS IP1000 1000/100/10 based NIC",
93 "Sundance Technology ST2021 based NIC",
94 "Tamarack Microelectronics TC9020/9021 based NIC",
95 "D-Link NIC IP1000A"
96};
97
98static const struct pci_device_id ipg_pci_tbl[] = {
99 { PCI_VDEVICE(SUNDANCE, 0x1023), 0 },
100 { PCI_VDEVICE(SUNDANCE, 0x2021), 1 },
101 { PCI_VDEVICE(DLINK, 0x9021), 2 },
102 { PCI_VDEVICE(DLINK, 0x4020), 3 },
103 { 0, }
104};
105
106MODULE_DEVICE_TABLE(pci, ipg_pci_tbl);
107
108static inline void __iomem *ipg_ioaddr(struct net_device *dev)
109{
110 struct ipg_nic_private *sp = netdev_priv(dev);
111 return sp->ioaddr;
112}
113
114#ifdef IPG_DEBUG
115static void ipg_dump_rfdlist(struct net_device *dev)
116{
117 struct ipg_nic_private *sp = netdev_priv(dev);
118 void __iomem *ioaddr = sp->ioaddr;
119 unsigned int i;
120 u32 offset;
121
122 IPG_DEBUG_MSG("_dump_rfdlist\n");
123
124 netdev_info(dev, "rx_current = %02x\n", sp->rx_current);
125 netdev_info(dev, "rx_dirty = %02x\n", sp->rx_dirty);
126 netdev_info(dev, "RFDList start address = %016lx\n",
127 (unsigned long)sp->rxd_map);
128 netdev_info(dev, "RFDListPtr register = %08x%08x\n",
129 ipg_r32(IPG_RFDLISTPTR1), ipg_r32(IPG_RFDLISTPTR0));
130
131 for (i = 0; i < IPG_RFDLIST_LENGTH; i++) {
132 offset = (u32) &sp->rxd[i].next_desc - (u32) sp->rxd;
133 netdev_info(dev, "%02x %04x RFDNextPtr = %016lx\n",
134 i, offset, (unsigned long)sp->rxd[i].next_desc);
135 offset = (u32) &sp->rxd[i].rfs - (u32) sp->rxd;
136 netdev_info(dev, "%02x %04x RFS = %016lx\n",
137 i, offset, (unsigned long)sp->rxd[i].rfs);
138 offset = (u32) &sp->rxd[i].frag_info - (u32) sp->rxd;
139 netdev_info(dev, "%02x %04x frag_info = %016lx\n",
140 i, offset, (unsigned long)sp->rxd[i].frag_info);
141 }
142}
143
144static void ipg_dump_tfdlist(struct net_device *dev)
145{
146 struct ipg_nic_private *sp = netdev_priv(dev);
147 void __iomem *ioaddr = sp->ioaddr;
148 unsigned int i;
149 u32 offset;
150
151 IPG_DEBUG_MSG("_dump_tfdlist\n");
152
153 netdev_info(dev, "tx_current = %02x\n", sp->tx_current);
154 netdev_info(dev, "tx_dirty = %02x\n", sp->tx_dirty);
155 netdev_info(dev, "TFDList start address = %016lx\n",
156 (unsigned long) sp->txd_map);
157 netdev_info(dev, "TFDListPtr register = %08x%08x\n",
158 ipg_r32(IPG_TFDLISTPTR1), ipg_r32(IPG_TFDLISTPTR0));
159
160 for (i = 0; i < IPG_TFDLIST_LENGTH; i++) {
161 offset = (u32) &sp->txd[i].next_desc - (u32) sp->txd;
162 netdev_info(dev, "%02x %04x TFDNextPtr = %016lx\n",
163 i, offset, (unsigned long)sp->txd[i].next_desc);
164
165 offset = (u32) &sp->txd[i].tfc - (u32) sp->txd;
166 netdev_info(dev, "%02x %04x TFC = %016lx\n",
167 i, offset, (unsigned long) sp->txd[i].tfc);
168 offset = (u32) &sp->txd[i].frag_info - (u32) sp->txd;
169 netdev_info(dev, "%02x %04x frag_info = %016lx\n",
170 i, offset, (unsigned long) sp->txd[i].frag_info);
171 }
172}
173#endif
174
175static void ipg_write_phy_ctl(void __iomem *ioaddr, u8 data)
176{
177 ipg_w8(IPG_PC_RSVD_MASK & data, PHY_CTRL);
178 ndelay(IPG_PC_PHYCTRLWAIT_NS);
179}
180
181static void ipg_drive_phy_ctl_low_high(void __iomem *ioaddr, u8 data)
182{
183 ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_LO | data);
184 ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_HI | data);
185}
186
187static void send_three_state(void __iomem *ioaddr, u8 phyctrlpolarity)
188{
189 phyctrlpolarity |= (IPG_PC_MGMTDATA & 0) | IPG_PC_MGMTDIR;
190
191 ipg_drive_phy_ctl_low_high(ioaddr, phyctrlpolarity);
192}
193
194static void send_end(void __iomem *ioaddr, u8 phyctrlpolarity)
195{
196 ipg_w8((IPG_PC_MGMTCLK_LO | (IPG_PC_MGMTDATA & 0) | IPG_PC_MGMTDIR |
197 phyctrlpolarity) & IPG_PC_RSVD_MASK, PHY_CTRL);
198}
199
200static u16 read_phy_bit(void __iomem *ioaddr, u8 phyctrlpolarity)
201{
202 u16 bit_data;
203
204 ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_LO | phyctrlpolarity);
205
206 bit_data = ((ipg_r8(PHY_CTRL) & IPG_PC_MGMTDATA) >> 1) & 1;
207
208 ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_HI | phyctrlpolarity);
209
210 return bit_data;
211}
212
213/*
214 * Read a register from the Physical Layer device located
215 * on the IPG NIC, using the IPG PHYCTRL register.
216 */
217static int mdio_read(struct net_device *dev, int phy_id, int phy_reg)
218{
219 void __iomem *ioaddr = ipg_ioaddr(dev);
220 /*
221 * The GMII mangement frame structure for a read is as follows:
222 *
223 * |Preamble|st|op|phyad|regad|ta| data |idle|
224 * |< 32 1s>|01|10|AAAAA|RRRRR|z0|DDDDDDDDDDDDDDDD|z |
225 *
226 * <32 1s> = 32 consecutive logic 1 values
227 * A = bit of Physical Layer device address (MSB first)
228 * R = bit of register address (MSB first)
229 * z = High impedance state
230 * D = bit of read data (MSB first)
231 *
232 * Transmission order is 'Preamble' field first, bits transmitted
233 * left to right (first to last).
234 */
235 struct {
236 u32 field;
237 unsigned int len;
238 } p[] = {
239 { GMII_PREAMBLE, 32 }, /* Preamble */
240 { GMII_ST, 2 }, /* ST */
241 { GMII_READ, 2 }, /* OP */
242 { phy_id, 5 }, /* PHYAD */
243 { phy_reg, 5 }, /* REGAD */
244 { 0x0000, 2 }, /* TA */
245 { 0x0000, 16 }, /* DATA */
246 { 0x0000, 1 } /* IDLE */
247 };
248 unsigned int i, j;
249 u8 polarity, data;
250
251 polarity = ipg_r8(PHY_CTRL);
252 polarity &= (IPG_PC_DUPLEX_POLARITY | IPG_PC_LINK_POLARITY);
253
254 /* Create the Preamble, ST, OP, PHYAD, and REGAD field. */
255 for (j = 0; j < 5; j++) {
256 for (i = 0; i < p[j].len; i++) {
257 /* For each variable length field, the MSB must be
258 * transmitted first. Rotate through the field bits,
259 * starting with the MSB, and move each bit into the
260 * the 1st (2^1) bit position (this is the bit position
261 * corresponding to the MgmtData bit of the PhyCtrl
262 * register for the IPG).
263 *
264 * Example: ST = 01;
265 *
266 * First write a '0' to bit 1 of the PhyCtrl
267 * register, then write a '1' to bit 1 of the
268 * PhyCtrl register.
269 *
270 * To do this, right shift the MSB of ST by the value:
271 * [field length - 1 - #ST bits already written]
272 * then left shift this result by 1.
273 */
274 data = (p[j].field >> (p[j].len - 1 - i)) << 1;
275 data &= IPG_PC_MGMTDATA;
276 data |= polarity | IPG_PC_MGMTDIR;
277
278 ipg_drive_phy_ctl_low_high(ioaddr, data);
279 }
280 }
281
282 send_three_state(ioaddr, polarity);
283
284 read_phy_bit(ioaddr, polarity);
285
286 /*
287 * For a read cycle, the bits for the next two fields (TA and
288 * DATA) are driven by the PHY (the IPG reads these bits).
289 */
290 for (i = 0; i < p[6].len; i++) {
291 p[6].field |=
292 (read_phy_bit(ioaddr, polarity) << (p[6].len - 1 - i));
293 }
294
295 send_three_state(ioaddr, polarity);
296 send_three_state(ioaddr, polarity);
297 send_three_state(ioaddr, polarity);
298 send_end(ioaddr, polarity);
299
300 /* Return the value of the DATA field. */
301 return p[6].field;
302}
303
304/*
305 * Write to a register from the Physical Layer device located
306 * on the IPG NIC, using the IPG PHYCTRL register.
307 */
308static void mdio_write(struct net_device *dev, int phy_id, int phy_reg, int val)
309{
310 void __iomem *ioaddr = ipg_ioaddr(dev);
311 /*
312 * The GMII mangement frame structure for a read is as follows:
313 *
314 * |Preamble|st|op|phyad|regad|ta| data |idle|
315 * |< 32 1s>|01|10|AAAAA|RRRRR|z0|DDDDDDDDDDDDDDDD|z |
316 *
317 * <32 1s> = 32 consecutive logic 1 values
318 * A = bit of Physical Layer device address (MSB first)
319 * R = bit of register address (MSB first)
320 * z = High impedance state
321 * D = bit of write data (MSB first)
322 *
323 * Transmission order is 'Preamble' field first, bits transmitted
324 * left to right (first to last).
325 */
326 struct {
327 u32 field;
328 unsigned int len;
329 } p[] = {
330 { GMII_PREAMBLE, 32 }, /* Preamble */
331 { GMII_ST, 2 }, /* ST */
332 { GMII_WRITE, 2 }, /* OP */
333 { phy_id, 5 }, /* PHYAD */
334 { phy_reg, 5 }, /* REGAD */
335 { 0x0002, 2 }, /* TA */
336 { val & 0xffff, 16 }, /* DATA */
337 { 0x0000, 1 } /* IDLE */
338 };
339 unsigned int i, j;
340 u8 polarity, data;
341
342 polarity = ipg_r8(PHY_CTRL);
343 polarity &= (IPG_PC_DUPLEX_POLARITY | IPG_PC_LINK_POLARITY);
344
345 /* Create the Preamble, ST, OP, PHYAD, and REGAD field. */
346 for (j = 0; j < 7; j++) {
347 for (i = 0; i < p[j].len; i++) {
348 /* For each variable length field, the MSB must be
349 * transmitted first. Rotate through the field bits,
350 * starting with the MSB, and move each bit into the
351 * the 1st (2^1) bit position (this is the bit position
352 * corresponding to the MgmtData bit of the PhyCtrl
353 * register for the IPG).
354 *
355 * Example: ST = 01;
356 *
357 * First write a '0' to bit 1 of the PhyCtrl
358 * register, then write a '1' to bit 1 of the
359 * PhyCtrl register.
360 *
361 * To do this, right shift the MSB of ST by the value:
362 * [field length - 1 - #ST bits already written]
363 * then left shift this result by 1.
364 */
365 data = (p[j].field >> (p[j].len - 1 - i)) << 1;
366 data &= IPG_PC_MGMTDATA;
367 data |= polarity | IPG_PC_MGMTDIR;
368
369 ipg_drive_phy_ctl_low_high(ioaddr, data);
370 }
371 }
372
373 /* The last cycle is a tri-state, so read from the PHY. */
374 ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_LO | polarity);
375 ipg_r8(PHY_CTRL);
376 ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_HI | polarity);
377}
378
379static void ipg_set_led_mode(struct net_device *dev)
380{
381 struct ipg_nic_private *sp = netdev_priv(dev);
382 void __iomem *ioaddr = sp->ioaddr;
383 u32 mode;
384
385 mode = ipg_r32(ASIC_CTRL);
386 mode &= ~(IPG_AC_LED_MODE_BIT_1 | IPG_AC_LED_MODE | IPG_AC_LED_SPEED);
387
388 if ((sp->led_mode & 0x03) > 1)
389 mode |= IPG_AC_LED_MODE_BIT_1; /* Write Asic Control Bit 29 */
390
391 if ((sp->led_mode & 0x01) == 1)
392 mode |= IPG_AC_LED_MODE; /* Write Asic Control Bit 14 */
393
394 if ((sp->led_mode & 0x08) == 8)
395 mode |= IPG_AC_LED_SPEED; /* Write Asic Control Bit 27 */
396
397 ipg_w32(mode, ASIC_CTRL);
398}
399
400static void ipg_set_phy_set(struct net_device *dev)
401{
402 struct ipg_nic_private *sp = netdev_priv(dev);
403 void __iomem *ioaddr = sp->ioaddr;
404 int physet;
405
406 physet = ipg_r8(PHY_SET);
407 physet &= ~(IPG_PS_MEM_LENB9B | IPG_PS_MEM_LEN9 | IPG_PS_NON_COMPDET);
408 physet |= ((sp->led_mode & 0x70) >> 4);
409 ipg_w8(physet, PHY_SET);
410}
411
412static int ipg_reset(struct net_device *dev, u32 resetflags)
413{
414 /* Assert functional resets via the IPG AsicCtrl
415 * register as specified by the 'resetflags' input
416 * parameter.
417 */
418 void __iomem *ioaddr = ipg_ioaddr(dev);
419 unsigned int timeout_count = 0;
420
421 IPG_DEBUG_MSG("_reset\n");
422
423 ipg_w32(ipg_r32(ASIC_CTRL) | resetflags, ASIC_CTRL);
424
425 /* Delay added to account for problem with 10Mbps reset. */
426 mdelay(IPG_AC_RESETWAIT);
427
428 while (IPG_AC_RESET_BUSY & ipg_r32(ASIC_CTRL)) {
429 mdelay(IPG_AC_RESETWAIT);
430 if (++timeout_count > IPG_AC_RESET_TIMEOUT)
431 return -ETIME;
432 }
433 /* Set LED Mode in Asic Control */
434 ipg_set_led_mode(dev);
435
436 /* Set PHYSet Register Value */
437 ipg_set_phy_set(dev);
438 return 0;
439}
440
441/* Find the GMII PHY address. */
442static int ipg_find_phyaddr(struct net_device *dev)
443{
444 unsigned int phyaddr, i;
445
446 for (i = 0; i < 32; i++) {
447 u32 status;
448
449 /* Search for the correct PHY address among 32 possible. */
450 phyaddr = (IPG_NIC_PHY_ADDRESS + i) % 32;
451
452 /* 10/22/03 Grace change verify from GMII_PHY_STATUS to
453 GMII_PHY_ID1
454 */
455
456 status = mdio_read(dev, phyaddr, MII_BMSR);
457
458 if ((status != 0xFFFF) && (status != 0))
459 return phyaddr;
460 }
461
462 return 0x1f;
463}
464
465/*
466 * Configure IPG based on result of IEEE 802.3 PHY
467 * auto-negotiation.
468 */
469static int ipg_config_autoneg(struct net_device *dev)
470{
471 struct ipg_nic_private *sp = netdev_priv(dev);
472 void __iomem *ioaddr = sp->ioaddr;
473 unsigned int txflowcontrol;
474 unsigned int rxflowcontrol;
475 unsigned int fullduplex;
476 u32 mac_ctrl_val;
477 u32 asicctrl;
478 u8 phyctrl;
479 const char *speed;
480 const char *duplex;
481 const char *tx_desc;
482 const char *rx_desc;
483
484 IPG_DEBUG_MSG("_config_autoneg\n");
485
486 asicctrl = ipg_r32(ASIC_CTRL);
487 phyctrl = ipg_r8(PHY_CTRL);
488 mac_ctrl_val = ipg_r32(MAC_CTRL);
489
490 /* Set flags for use in resolving auto-negotiation, assuming
491 * non-1000Mbps, half duplex, no flow control.
492 */
493 fullduplex = 0;
494 txflowcontrol = 0;
495 rxflowcontrol = 0;
496
497 /* To accommodate a problem in 10Mbps operation,
498 * set a global flag if PHY running in 10Mbps mode.
499 */
500 sp->tenmbpsmode = 0;
501
502 /* Determine actual speed of operation. */
503 switch (phyctrl & IPG_PC_LINK_SPEED) {
504 case IPG_PC_LINK_SPEED_10MBPS:
505 speed = "10Mbps";
506 sp->tenmbpsmode = 1;
507 break;
508 case IPG_PC_LINK_SPEED_100MBPS:
509 speed = "100Mbps";
510 break;
511 case IPG_PC_LINK_SPEED_1000MBPS:
512 speed = "1000Mbps";
513 break;
514 default:
515 speed = "undefined!";
516 return 0;
517 }
518
519 netdev_info(dev, "Link speed = %s\n", speed);
520 if (sp->tenmbpsmode == 1)
521 netdev_info(dev, "10Mbps operational mode enabled\n");
522
523 if (phyctrl & IPG_PC_DUPLEX_STATUS) {
524 fullduplex = 1;
525 txflowcontrol = 1;
526 rxflowcontrol = 1;
527 }
528
529 /* Configure full duplex, and flow control. */
530 if (fullduplex == 1) {
531
532 /* Configure IPG for full duplex operation. */
533
534 duplex = "full";
535
536 mac_ctrl_val |= IPG_MC_DUPLEX_SELECT_FD;
537
538 if (txflowcontrol == 1) {
539 tx_desc = "";
540 mac_ctrl_val |= IPG_MC_TX_FLOW_CONTROL_ENABLE;
541 } else {
542 tx_desc = "no ";
543 mac_ctrl_val &= ~IPG_MC_TX_FLOW_CONTROL_ENABLE;
544 }
545
546 if (rxflowcontrol == 1) {
547 rx_desc = "";
548 mac_ctrl_val |= IPG_MC_RX_FLOW_CONTROL_ENABLE;
549 } else {
550 rx_desc = "no ";
551 mac_ctrl_val &= ~IPG_MC_RX_FLOW_CONTROL_ENABLE;
552 }
553 } else {
554 duplex = "half";
555 tx_desc = "no ";
556 rx_desc = "no ";
557 mac_ctrl_val &= (~IPG_MC_DUPLEX_SELECT_FD &
558 ~IPG_MC_TX_FLOW_CONTROL_ENABLE &
559 ~IPG_MC_RX_FLOW_CONTROL_ENABLE);
560 }
561
562 netdev_info(dev, "setting %s duplex, %sTX, %sRX flow control\n",
563 duplex, tx_desc, rx_desc);
564 ipg_w32(mac_ctrl_val, MAC_CTRL);
565
566 return 0;
567}
568
569/* Determine and configure multicast operation and set
570 * receive mode for IPG.
571 */
572static void ipg_nic_set_multicast_list(struct net_device *dev)
573{
574 void __iomem *ioaddr = ipg_ioaddr(dev);
575 struct netdev_hw_addr *ha;
576 unsigned int hashindex;
577 u32 hashtable[2];
578 u8 receivemode;
579
580 IPG_DEBUG_MSG("_nic_set_multicast_list\n");
581
582 receivemode = IPG_RM_RECEIVEUNICAST | IPG_RM_RECEIVEBROADCAST;
583
584 if (dev->flags & IFF_PROMISC) {
585 /* NIC to be configured in promiscuous mode. */
586 receivemode = IPG_RM_RECEIVEALLFRAMES;
587 } else if ((dev->flags & IFF_ALLMULTI) ||
588 ((dev->flags & IFF_MULTICAST) &&
589 (netdev_mc_count(dev) > IPG_MULTICAST_HASHTABLE_SIZE))) {
590 /* NIC to be configured to receive all multicast
591 * frames. */
592 receivemode |= IPG_RM_RECEIVEMULTICAST;
593 } else if ((dev->flags & IFF_MULTICAST) && !netdev_mc_empty(dev)) {
594 /* NIC to be configured to receive selected
595 * multicast addresses. */
596 receivemode |= IPG_RM_RECEIVEMULTICASTHASH;
597 }
598
599 /* Calculate the bits to set for the 64 bit, IPG HASHTABLE.
600 * The IPG applies a cyclic-redundancy-check (the same CRC
601 * used to calculate the frame data FCS) to the destination
602 * address all incoming multicast frames whose destination
603 * address has the multicast bit set. The least significant
604 * 6 bits of the CRC result are used as an addressing index
605 * into the hash table. If the value of the bit addressed by
606 * this index is a 1, the frame is passed to the host system.
607 */
608
609 /* Clear hashtable. */
610 hashtable[0] = 0x00000000;
611 hashtable[1] = 0x00000000;
612
613 /* Cycle through all multicast addresses to filter. */
614 netdev_for_each_mc_addr(ha, dev) {
615 /* Calculate CRC result for each multicast address. */
616 hashindex = crc32_le(0xffffffff, ha->addr,
617 ETH_ALEN);
618
619 /* Use only the least significant 6 bits. */
620 hashindex = hashindex & 0x3F;
621
622 /* Within "hashtable", set bit number "hashindex"
623 * to a logic 1.
624 */
625 set_bit(hashindex, (void *)hashtable);
626 }
627
628 /* Write the value of the hashtable, to the 4, 16 bit
629 * HASHTABLE IPG registers.
630 */
631 ipg_w32(hashtable[0], HASHTABLE_0);
632 ipg_w32(hashtable[1], HASHTABLE_1);
633
634 ipg_w8(IPG_RM_RSVD_MASK & receivemode, RECEIVE_MODE);
635
636 IPG_DEBUG_MSG("ReceiveMode = %x\n", ipg_r8(RECEIVE_MODE));
637}
638
639static int ipg_io_config(struct net_device *dev)
640{
641 struct ipg_nic_private *sp = netdev_priv(dev);
642 void __iomem *ioaddr = ipg_ioaddr(dev);
643 u32 origmacctrl;
644 u32 restoremacctrl;
645
646 IPG_DEBUG_MSG("_io_config\n");
647
648 origmacctrl = ipg_r32(MAC_CTRL);
649
650 restoremacctrl = origmacctrl | IPG_MC_STATISTICS_ENABLE;
651
652 /* Based on compilation option, determine if FCS is to be
653 * stripped on receive frames by IPG.
654 */
655 if (!IPG_STRIP_FCS_ON_RX)
656 restoremacctrl |= IPG_MC_RCV_FCS;
657
658 /* Determine if transmitter and/or receiver are
659 * enabled so we may restore MACCTRL correctly.
660 */
661 if (origmacctrl & IPG_MC_TX_ENABLED)
662 restoremacctrl |= IPG_MC_TX_ENABLE;
663
664 if (origmacctrl & IPG_MC_RX_ENABLED)
665 restoremacctrl |= IPG_MC_RX_ENABLE;
666
667 /* Transmitter and receiver must be disabled before setting
668 * IFSSelect.
669 */
670 ipg_w32((origmacctrl & (IPG_MC_RX_DISABLE | IPG_MC_TX_DISABLE)) &
671 IPG_MC_RSVD_MASK, MAC_CTRL);
672
673 /* Now that transmitter and receiver are disabled, write
674 * to IFSSelect.
675 */
676 ipg_w32((origmacctrl & IPG_MC_IFS_96BIT) & IPG_MC_RSVD_MASK, MAC_CTRL);
677
678 /* Set RECEIVEMODE register. */
679 ipg_nic_set_multicast_list(dev);
680
681 ipg_w16(sp->max_rxframe_size, MAX_FRAME_SIZE);
682
683 ipg_w8(IPG_RXDMAPOLLPERIOD_VALUE, RX_DMA_POLL_PERIOD);
684 ipg_w8(IPG_RXDMAURGENTTHRESH_VALUE, RX_DMA_URGENT_THRESH);
685 ipg_w8(IPG_RXDMABURSTTHRESH_VALUE, RX_DMA_BURST_THRESH);
686 ipg_w8(IPG_TXDMAPOLLPERIOD_VALUE, TX_DMA_POLL_PERIOD);
687 ipg_w8(IPG_TXDMAURGENTTHRESH_VALUE, TX_DMA_URGENT_THRESH);
688 ipg_w8(IPG_TXDMABURSTTHRESH_VALUE, TX_DMA_BURST_THRESH);
689 ipg_w16((IPG_IE_HOST_ERROR | IPG_IE_TX_DMA_COMPLETE |
690 IPG_IE_TX_COMPLETE | IPG_IE_INT_REQUESTED |
691 IPG_IE_UPDATE_STATS | IPG_IE_LINK_EVENT |
692 IPG_IE_RX_DMA_COMPLETE | IPG_IE_RX_DMA_PRIORITY), INT_ENABLE);
693 ipg_w16(IPG_FLOWONTHRESH_VALUE, FLOW_ON_THRESH);
694 ipg_w16(IPG_FLOWOFFTHRESH_VALUE, FLOW_OFF_THRESH);
695
696 /* IPG multi-frag frame bug workaround.
697 * Per silicon revision B3 eratta.
698 */
699 ipg_w16(ipg_r16(DEBUG_CTRL) | 0x0200, DEBUG_CTRL);
700
701 /* IPG TX poll now bug workaround.
702 * Per silicon revision B3 eratta.
703 */
704 ipg_w16(ipg_r16(DEBUG_CTRL) | 0x0010, DEBUG_CTRL);
705
706 /* IPG RX poll now bug workaround.
707 * Per silicon revision B3 eratta.
708 */
709 ipg_w16(ipg_r16(DEBUG_CTRL) | 0x0020, DEBUG_CTRL);
710
711 /* Now restore MACCTRL to original setting. */
712 ipg_w32(IPG_MC_RSVD_MASK & restoremacctrl, MAC_CTRL);
713
714 /* Disable unused RMON statistics. */
715 ipg_w32(IPG_RZ_ALL, RMON_STATISTICS_MASK);
716
717 /* Disable unused MIB statistics. */
718 ipg_w32(IPG_SM_MACCONTROLFRAMESXMTD | IPG_SM_MACCONTROLFRAMESRCVD |
719 IPG_SM_BCSTOCTETXMTOK_BCSTFRAMESXMTDOK | IPG_SM_TXJUMBOFRAMES |
720 IPG_SM_MCSTOCTETXMTOK_MCSTFRAMESXMTDOK | IPG_SM_RXJUMBOFRAMES |
721 IPG_SM_BCSTOCTETRCVDOK_BCSTFRAMESRCVDOK |
722 IPG_SM_UDPCHECKSUMERRORS | IPG_SM_TCPCHECKSUMERRORS |
723 IPG_SM_IPCHECKSUMERRORS, STATISTICS_MASK);
724
725 return 0;
726}
727
728/*
729 * Create a receive buffer within system memory and update
730 * NIC private structure appropriately.
731 */
732static int ipg_get_rxbuff(struct net_device *dev, int entry)
733{
734 struct ipg_nic_private *sp = netdev_priv(dev);
735 struct ipg_rx *rxfd = sp->rxd + entry;
736 struct sk_buff *skb;
737 u64 rxfragsize;
738
739 IPG_DEBUG_MSG("_get_rxbuff\n");
740
741 skb = netdev_alloc_skb_ip_align(dev, sp->rxsupport_size);
742 if (!skb) {
743 sp->rx_buff[entry] = NULL;
744 return -ENOMEM;
745 }
746
747 /* Save the address of the sk_buff structure. */
748 sp->rx_buff[entry] = skb;
749
750 rxfd->frag_info = cpu_to_le64(pci_map_single(sp->pdev, skb->data,
751 sp->rx_buf_sz, PCI_DMA_FROMDEVICE));
752
753 /* Set the RFD fragment length. */
754 rxfragsize = sp->rxfrag_size;
755 rxfd->frag_info |= cpu_to_le64((rxfragsize << 48) & IPG_RFI_FRAGLEN);
756
757 return 0;
758}
759
760static int init_rfdlist(struct net_device *dev)
761{
762 struct ipg_nic_private *sp = netdev_priv(dev);
763 void __iomem *ioaddr = sp->ioaddr;
764 unsigned int i;
765
766 IPG_DEBUG_MSG("_init_rfdlist\n");
767
768 for (i = 0; i < IPG_RFDLIST_LENGTH; i++) {
769 struct ipg_rx *rxfd = sp->rxd + i;
770
771 if (sp->rx_buff[i]) {
772 pci_unmap_single(sp->pdev,
773 le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN,
774 sp->rx_buf_sz, PCI_DMA_FROMDEVICE);
775 dev_kfree_skb_irq(sp->rx_buff[i]);
776 sp->rx_buff[i] = NULL;
777 }
778
779 /* Clear out the RFS field. */
780 rxfd->rfs = 0x0000000000000000;
781
782 if (ipg_get_rxbuff(dev, i) < 0) {
783 /*
784 * A receive buffer was not ready, break the
785 * RFD list here.
786 */
787 IPG_DEBUG_MSG("Cannot allocate Rx buffer\n");
788
789 /* Just in case we cannot allocate a single RFD.
790 * Should not occur.
791 */
792 if (i == 0) {
793 netdev_err(dev, "No memory available for RFD list\n");
794 return -ENOMEM;
795 }
796 }
797
798 rxfd->next_desc = cpu_to_le64(sp->rxd_map +
799 sizeof(struct ipg_rx)*(i + 1));
800 }
801 sp->rxd[i - 1].next_desc = cpu_to_le64(sp->rxd_map);
802
803 sp->rx_current = 0;
804 sp->rx_dirty = 0;
805
806 /* Write the location of the RFDList to the IPG. */
807 ipg_w32((u32) sp->rxd_map, RFD_LIST_PTR_0);
808 ipg_w32(0x00000000, RFD_LIST_PTR_1);
809
810 return 0;
811}
812
813static void init_tfdlist(struct net_device *dev)
814{
815 struct ipg_nic_private *sp = netdev_priv(dev);
816 void __iomem *ioaddr = sp->ioaddr;
817 unsigned int i;
818
819 IPG_DEBUG_MSG("_init_tfdlist\n");
820
821 for (i = 0; i < IPG_TFDLIST_LENGTH; i++) {
822 struct ipg_tx *txfd = sp->txd + i;
823
824 txfd->tfc = cpu_to_le64(IPG_TFC_TFDDONE);
825
826 if (sp->tx_buff[i]) {
827 dev_kfree_skb_irq(sp->tx_buff[i]);
828 sp->tx_buff[i] = NULL;
829 }
830
831 txfd->next_desc = cpu_to_le64(sp->txd_map +
832 sizeof(struct ipg_tx)*(i + 1));
833 }
834 sp->txd[i - 1].next_desc = cpu_to_le64(sp->txd_map);
835
836 sp->tx_current = 0;
837 sp->tx_dirty = 0;
838
839 /* Write the location of the TFDList to the IPG. */
840 IPG_DDEBUG_MSG("Starting TFDListPtr = %08x\n",
841 (u32) sp->txd_map);
842 ipg_w32((u32) sp->txd_map, TFD_LIST_PTR_0);
843 ipg_w32(0x00000000, TFD_LIST_PTR_1);
844
845 sp->reset_current_tfd = 1;
846}
847
848/*
849 * Free all transmit buffers which have already been transferred
850 * via DMA to the IPG.
851 */
852static void ipg_nic_txfree(struct net_device *dev)
853{
854 struct ipg_nic_private *sp = netdev_priv(dev);
855 unsigned int released, pending, dirty;
856
857 IPG_DEBUG_MSG("_nic_txfree\n");
858
859 pending = sp->tx_current - sp->tx_dirty;
860 dirty = sp->tx_dirty % IPG_TFDLIST_LENGTH;
861
862 for (released = 0; released < pending; released++) {
863 struct sk_buff *skb = sp->tx_buff[dirty];
864 struct ipg_tx *txfd = sp->txd + dirty;
865
866 IPG_DEBUG_MSG("TFC = %016lx\n", (unsigned long) txfd->tfc);
867
868 /* Look at each TFD's TFC field beginning
869 * at the last freed TFD up to the current TFD.
870 * If the TFDDone bit is set, free the associated
871 * buffer.
872 */
873 if (!(txfd->tfc & cpu_to_le64(IPG_TFC_TFDDONE)))
874 break;
875
876 /* Free the transmit buffer. */
877 if (skb) {
878 pci_unmap_single(sp->pdev,
879 le64_to_cpu(txfd->frag_info) & ~IPG_TFI_FRAGLEN,
880 skb->len, PCI_DMA_TODEVICE);
881
882 dev_kfree_skb_irq(skb);
883
884 sp->tx_buff[dirty] = NULL;
885 }
886 dirty = (dirty + 1) % IPG_TFDLIST_LENGTH;
887 }
888
889 sp->tx_dirty += released;
890
891 if (netif_queue_stopped(dev) &&
892 (sp->tx_current != (sp->tx_dirty + IPG_TFDLIST_LENGTH))) {
893 netif_wake_queue(dev);
894 }
895}
896
897static void ipg_tx_timeout(struct net_device *dev)
898{
899 struct ipg_nic_private *sp = netdev_priv(dev);
900 void __iomem *ioaddr = sp->ioaddr;
901
902 ipg_reset(dev, IPG_AC_TX_RESET | IPG_AC_DMA | IPG_AC_NETWORK |
903 IPG_AC_FIFO);
904
905 spin_lock_irq(&sp->lock);
906
907 /* Re-configure after DMA reset. */
908 if (ipg_io_config(dev) < 0)
909 netdev_info(dev, "Error during re-configuration\n");
910
911 init_tfdlist(dev);
912
913 spin_unlock_irq(&sp->lock);
914
915 ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_TX_ENABLE) & IPG_MC_RSVD_MASK,
916 MAC_CTRL);
917}
918
919/*
920 * For TxComplete interrupts, free all transmit
921 * buffers which have already been transferred via DMA
922 * to the IPG.
923 */
924static void ipg_nic_txcleanup(struct net_device *dev)
925{
926 struct ipg_nic_private *sp = netdev_priv(dev);
927 void __iomem *ioaddr = sp->ioaddr;
928 unsigned int i;
929
930 IPG_DEBUG_MSG("_nic_txcleanup\n");
931
932 for (i = 0; i < IPG_TFDLIST_LENGTH; i++) {
933 /* Reading the TXSTATUS register clears the
934 * TX_COMPLETE interrupt.
935 */
936 u32 txstatusdword = ipg_r32(TX_STATUS);
937
938 IPG_DEBUG_MSG("TxStatus = %08x\n", txstatusdword);
939
940 /* Check for Transmit errors. Error bits only valid if
941 * TX_COMPLETE bit in the TXSTATUS register is a 1.
942 */
943 if (!(txstatusdword & IPG_TS_TX_COMPLETE))
944 break;
945
946 /* If in 10Mbps mode, indicate transmit is ready. */
947 if (sp->tenmbpsmode) {
948 netif_wake_queue(dev);
949 }
950
951 /* Transmit error, increment stat counters. */
952 if (txstatusdword & IPG_TS_TX_ERROR) {
953 IPG_DEBUG_MSG("Transmit error\n");
954 sp->stats.tx_errors++;
955 }
956
957 /* Late collision, re-enable transmitter. */
958 if (txstatusdword & IPG_TS_LATE_COLLISION) {
959 IPG_DEBUG_MSG("Late collision on transmit\n");
960 ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_TX_ENABLE) &
961 IPG_MC_RSVD_MASK, MAC_CTRL);
962 }
963
964 /* Maximum collisions, re-enable transmitter. */
965 if (txstatusdword & IPG_TS_TX_MAX_COLL) {
966 IPG_DEBUG_MSG("Maximum collisions on transmit\n");
967 ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_TX_ENABLE) &
968 IPG_MC_RSVD_MASK, MAC_CTRL);
969 }
970
971 /* Transmit underrun, reset and re-enable
972 * transmitter.
973 */
974 if (txstatusdword & IPG_TS_TX_UNDERRUN) {
975 IPG_DEBUG_MSG("Transmitter underrun\n");
976 sp->stats.tx_fifo_errors++;
977 ipg_reset(dev, IPG_AC_TX_RESET | IPG_AC_DMA |
978 IPG_AC_NETWORK | IPG_AC_FIFO);
979
980 /* Re-configure after DMA reset. */
981 if (ipg_io_config(dev) < 0) {
982 netdev_info(dev, "Error during re-configuration\n");
983 }
984 init_tfdlist(dev);
985
986 ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_TX_ENABLE) &
987 IPG_MC_RSVD_MASK, MAC_CTRL);
988 }
989 }
990
991 ipg_nic_txfree(dev);
992}
993
994/* Provides statistical information about the IPG NIC. */
995static struct net_device_stats *ipg_nic_get_stats(struct net_device *dev)
996{
997 struct ipg_nic_private *sp = netdev_priv(dev);
998 void __iomem *ioaddr = sp->ioaddr;
999 u16 temp1;
1000 u16 temp2;
1001
1002 IPG_DEBUG_MSG("_nic_get_stats\n");
1003
1004 /* Check to see if the NIC has been initialized via nic_open,
1005 * before trying to read statistic registers.
1006 */
1007 if (!netif_running(dev))
1008 return &sp->stats;
1009
1010 sp->stats.rx_packets += ipg_r32(IPG_FRAMESRCVDOK);
1011 sp->stats.tx_packets += ipg_r32(IPG_FRAMESXMTDOK);
1012 sp->stats.rx_bytes += ipg_r32(IPG_OCTETRCVOK);
1013 sp->stats.tx_bytes += ipg_r32(IPG_OCTETXMTOK);
1014 temp1 = ipg_r16(IPG_FRAMESLOSTRXERRORS);
1015 sp->stats.rx_errors += temp1;
1016 sp->stats.rx_missed_errors += temp1;
1017 temp1 = ipg_r32(IPG_SINGLECOLFRAMES) + ipg_r32(IPG_MULTICOLFRAMES) +
1018 ipg_r32(IPG_LATECOLLISIONS);
1019 temp2 = ipg_r16(IPG_CARRIERSENSEERRORS);
1020 sp->stats.collisions += temp1;
1021 sp->stats.tx_dropped += ipg_r16(IPG_FRAMESABORTXSCOLLS);
1022 sp->stats.tx_errors += ipg_r16(IPG_FRAMESWEXDEFERRAL) +
1023 ipg_r32(IPG_FRAMESWDEFERREDXMT) + temp1 + temp2;
1024 sp->stats.multicast += ipg_r32(IPG_MCSTOCTETRCVDOK);
1025
1026 /* detailed tx_errors */
1027 sp->stats.tx_carrier_errors += temp2;
1028
1029 /* detailed rx_errors */
1030 sp->stats.rx_length_errors += ipg_r16(IPG_INRANGELENGTHERRORS) +
1031 ipg_r16(IPG_FRAMETOOLONGERRORS);
1032 sp->stats.rx_crc_errors += ipg_r16(IPG_FRAMECHECKSEQERRORS);
1033
1034 /* Unutilized IPG statistic registers. */
1035 ipg_r32(IPG_MCSTFRAMESRCVDOK);
1036
1037 return &sp->stats;
1038}
1039
1040/* Restore used receive buffers. */
1041static int ipg_nic_rxrestore(struct net_device *dev)
1042{
1043 struct ipg_nic_private *sp = netdev_priv(dev);
1044 const unsigned int curr = sp->rx_current;
1045 unsigned int dirty = sp->rx_dirty;
1046
1047 IPG_DEBUG_MSG("_nic_rxrestore\n");
1048
1049 for (dirty = sp->rx_dirty; curr - dirty > 0; dirty++) {
1050 unsigned int entry = dirty % IPG_RFDLIST_LENGTH;
1051
1052 /* rx_copybreak may poke hole here and there. */
1053 if (sp->rx_buff[entry])
1054 continue;
1055
1056 /* Generate a new receive buffer to replace the
1057 * current buffer (which will be released by the
1058 * Linux system).
1059 */
1060 if (ipg_get_rxbuff(dev, entry) < 0) {
1061 IPG_DEBUG_MSG("Cannot allocate new Rx buffer\n");
1062
1063 break;
1064 }
1065
1066 /* Reset the RFS field. */
1067 sp->rxd[entry].rfs = 0x0000000000000000;
1068 }
1069 sp->rx_dirty = dirty;
1070
1071 return 0;
1072}
1073
1074/* use jumboindex and jumbosize to control jumbo frame status
1075 * initial status is jumboindex=-1 and jumbosize=0
1076 * 1. jumboindex = -1 and jumbosize=0 : previous jumbo frame has been done.
1077 * 2. jumboindex != -1 and jumbosize != 0 : jumbo frame is not over size and receiving
1078 * 3. jumboindex = -1 and jumbosize != 0 : jumbo frame is over size, already dump
1079 * previous receiving and need to continue dumping the current one
1080 */
1081enum {
1082 NORMAL_PACKET,
1083 ERROR_PACKET
1084};
1085
1086enum {
1087 FRAME_NO_START_NO_END = 0,
1088 FRAME_WITH_START = 1,
1089 FRAME_WITH_END = 10,
1090 FRAME_WITH_START_WITH_END = 11
1091};
1092
1093static void ipg_nic_rx_free_skb(struct net_device *dev)
1094{
1095 struct ipg_nic_private *sp = netdev_priv(dev);
1096 unsigned int entry = sp->rx_current % IPG_RFDLIST_LENGTH;
1097
1098 if (sp->rx_buff[entry]) {
1099 struct ipg_rx *rxfd = sp->rxd + entry;
1100
1101 pci_unmap_single(sp->pdev,
1102 le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN,
1103 sp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1104 dev_kfree_skb_irq(sp->rx_buff[entry]);
1105 sp->rx_buff[entry] = NULL;
1106 }
1107}
1108
1109static int ipg_nic_rx_check_frame_type(struct net_device *dev)
1110{
1111 struct ipg_nic_private *sp = netdev_priv(dev);
1112 struct ipg_rx *rxfd = sp->rxd + (sp->rx_current % IPG_RFDLIST_LENGTH);
1113 int type = FRAME_NO_START_NO_END;
1114
1115 if (le64_to_cpu(rxfd->rfs) & IPG_RFS_FRAMESTART)
1116 type += FRAME_WITH_START;
1117 if (le64_to_cpu(rxfd->rfs) & IPG_RFS_FRAMEEND)
1118 type += FRAME_WITH_END;
1119 return type;
1120}
1121
1122static int ipg_nic_rx_check_error(struct net_device *dev)
1123{
1124 struct ipg_nic_private *sp = netdev_priv(dev);
1125 unsigned int entry = sp->rx_current % IPG_RFDLIST_LENGTH;
1126 struct ipg_rx *rxfd = sp->rxd + entry;
1127
1128 if (IPG_DROP_ON_RX_ETH_ERRORS && (le64_to_cpu(rxfd->rfs) &
1129 (IPG_RFS_RXFIFOOVERRUN | IPG_RFS_RXRUNTFRAME |
1130 IPG_RFS_RXALIGNMENTERROR | IPG_RFS_RXFCSERROR |
1131 IPG_RFS_RXOVERSIZEDFRAME | IPG_RFS_RXLENGTHERROR))) {
1132 IPG_DEBUG_MSG("Rx error, RFS = %016lx\n",
1133 (unsigned long) rxfd->rfs);
1134
1135 /* Increment general receive error statistic. */
1136 sp->stats.rx_errors++;
1137
1138 /* Increment detailed receive error statistics. */
1139 if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFIFOOVERRUN) {
1140 IPG_DEBUG_MSG("RX FIFO overrun occurred\n");
1141
1142 sp->stats.rx_fifo_errors++;
1143 }
1144
1145 if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXRUNTFRAME) {
1146 IPG_DEBUG_MSG("RX runt occurred\n");
1147 sp->stats.rx_length_errors++;
1148 }
1149
1150 /* Do nothing for IPG_RFS_RXOVERSIZEDFRAME,
1151 * error count handled by a IPG statistic register.
1152 */
1153
1154 if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXALIGNMENTERROR) {
1155 IPG_DEBUG_MSG("RX alignment error occurred\n");
1156 sp->stats.rx_frame_errors++;
1157 }
1158
1159 /* Do nothing for IPG_RFS_RXFCSERROR, error count
1160 * handled by a IPG statistic register.
1161 */
1162
1163 /* Free the memory associated with the RX
1164 * buffer since it is erroneous and we will
1165 * not pass it to higher layer processes.
1166 */
1167 if (sp->rx_buff[entry]) {
1168 pci_unmap_single(sp->pdev,
1169 le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN,
1170 sp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1171
1172 dev_kfree_skb_irq(sp->rx_buff[entry]);
1173 sp->rx_buff[entry] = NULL;
1174 }
1175 return ERROR_PACKET;
1176 }
1177 return NORMAL_PACKET;
1178}
1179
1180static void ipg_nic_rx_with_start_and_end(struct net_device *dev,
1181 struct ipg_nic_private *sp,
1182 struct ipg_rx *rxfd, unsigned entry)
1183{
1184 struct ipg_jumbo *jumbo = &sp->jumbo;
1185 struct sk_buff *skb;
1186 int framelen;
1187
1188 if (jumbo->found_start) {
1189 dev_kfree_skb_irq(jumbo->skb);
1190 jumbo->found_start = 0;
1191 jumbo->current_size = 0;
1192 jumbo->skb = NULL;
1193 }
1194
1195 /* 1: found error, 0 no error */
1196 if (ipg_nic_rx_check_error(dev) != NORMAL_PACKET)
1197 return;
1198
1199 skb = sp->rx_buff[entry];
1200 if (!skb)
1201 return;
1202
1203 /* accept this frame and send to upper layer */
1204 framelen = le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFRAMELEN;
1205 if (framelen > sp->rxfrag_size)
1206 framelen = sp->rxfrag_size;
1207
1208 skb_put(skb, framelen);
1209 skb->protocol = eth_type_trans(skb, dev);
1210 skb_checksum_none_assert(skb);
1211 netif_rx(skb);
1212 sp->rx_buff[entry] = NULL;
1213}
1214
1215static void ipg_nic_rx_with_start(struct net_device *dev,
1216 struct ipg_nic_private *sp,
1217 struct ipg_rx *rxfd, unsigned entry)
1218{
1219 struct ipg_jumbo *jumbo = &sp->jumbo;
1220 struct pci_dev *pdev = sp->pdev;
1221 struct sk_buff *skb;
1222
1223 /* 1: found error, 0 no error */
1224 if (ipg_nic_rx_check_error(dev) != NORMAL_PACKET)
1225 return;
1226
1227 /* accept this frame and send to upper layer */
1228 skb = sp->rx_buff[entry];
1229 if (!skb)
1230 return;
1231
1232 if (jumbo->found_start)
1233 dev_kfree_skb_irq(jumbo->skb);
1234
1235 pci_unmap_single(pdev, le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN,
1236 sp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1237
1238 skb_put(skb, sp->rxfrag_size);
1239
1240 jumbo->found_start = 1;
1241 jumbo->current_size = sp->rxfrag_size;
1242 jumbo->skb = skb;
1243
1244 sp->rx_buff[entry] = NULL;
1245}
1246
1247static void ipg_nic_rx_with_end(struct net_device *dev,
1248 struct ipg_nic_private *sp,
1249 struct ipg_rx *rxfd, unsigned entry)
1250{
1251 struct ipg_jumbo *jumbo = &sp->jumbo;
1252
1253 /* 1: found error, 0 no error */
1254 if (ipg_nic_rx_check_error(dev) == NORMAL_PACKET) {
1255 struct sk_buff *skb = sp->rx_buff[entry];
1256
1257 if (!skb)
1258 return;
1259
1260 if (jumbo->found_start) {
1261 int framelen, endframelen;
1262
1263 framelen = le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFRAMELEN;
1264
1265 endframelen = framelen - jumbo->current_size;
1266 if (framelen > sp->rxsupport_size)
1267 dev_kfree_skb_irq(jumbo->skb);
1268 else {
1269 memcpy(skb_put(jumbo->skb, endframelen),
1270 skb->data, endframelen);
1271
1272 jumbo->skb->protocol =
1273 eth_type_trans(jumbo->skb, dev);
1274
1275 skb_checksum_none_assert(jumbo->skb);
1276 netif_rx(jumbo->skb);
1277 }
1278 }
1279
1280 jumbo->found_start = 0;
1281 jumbo->current_size = 0;
1282 jumbo->skb = NULL;
1283
1284 ipg_nic_rx_free_skb(dev);
1285 } else {
1286 dev_kfree_skb_irq(jumbo->skb);
1287 jumbo->found_start = 0;
1288 jumbo->current_size = 0;
1289 jumbo->skb = NULL;
1290 }
1291}
1292
1293static void ipg_nic_rx_no_start_no_end(struct net_device *dev,
1294 struct ipg_nic_private *sp,
1295 struct ipg_rx *rxfd, unsigned entry)
1296{
1297 struct ipg_jumbo *jumbo = &sp->jumbo;
1298
1299 /* 1: found error, 0 no error */
1300 if (ipg_nic_rx_check_error(dev) == NORMAL_PACKET) {
1301 struct sk_buff *skb = sp->rx_buff[entry];
1302
1303 if (skb) {
1304 if (jumbo->found_start) {
1305 jumbo->current_size += sp->rxfrag_size;
1306 if (jumbo->current_size <= sp->rxsupport_size) {
1307 memcpy(skb_put(jumbo->skb,
1308 sp->rxfrag_size),
1309 skb->data, sp->rxfrag_size);
1310 }
1311 }
1312 ipg_nic_rx_free_skb(dev);
1313 }
1314 } else {
1315 dev_kfree_skb_irq(jumbo->skb);
1316 jumbo->found_start = 0;
1317 jumbo->current_size = 0;
1318 jumbo->skb = NULL;
1319 }
1320}
1321
1322static int ipg_nic_rx_jumbo(struct net_device *dev)
1323{
1324 struct ipg_nic_private *sp = netdev_priv(dev);
1325 unsigned int curr = sp->rx_current;
1326 void __iomem *ioaddr = sp->ioaddr;
1327 unsigned int i;
1328
1329 IPG_DEBUG_MSG("_nic_rx\n");
1330
1331 for (i = 0; i < IPG_MAXRFDPROCESS_COUNT; i++, curr++) {
1332 unsigned int entry = curr % IPG_RFDLIST_LENGTH;
1333 struct ipg_rx *rxfd = sp->rxd + entry;
1334
1335 if (!(rxfd->rfs & cpu_to_le64(IPG_RFS_RFDDONE)))
1336 break;
1337
1338 switch (ipg_nic_rx_check_frame_type(dev)) {
1339 case FRAME_WITH_START_WITH_END:
1340 ipg_nic_rx_with_start_and_end(dev, sp, rxfd, entry);
1341 break;
1342 case FRAME_WITH_START:
1343 ipg_nic_rx_with_start(dev, sp, rxfd, entry);
1344 break;
1345 case FRAME_WITH_END:
1346 ipg_nic_rx_with_end(dev, sp, rxfd, entry);
1347 break;
1348 case FRAME_NO_START_NO_END:
1349 ipg_nic_rx_no_start_no_end(dev, sp, rxfd, entry);
1350 break;
1351 }
1352 }
1353
1354 sp->rx_current = curr;
1355
1356 if (i == IPG_MAXRFDPROCESS_COUNT) {
1357 /* There are more RFDs to process, however the
1358 * allocated amount of RFD processing time has
1359 * expired. Assert Interrupt Requested to make
1360 * sure we come back to process the remaining RFDs.
1361 */
1362 ipg_w32(ipg_r32(ASIC_CTRL) | IPG_AC_INT_REQUEST, ASIC_CTRL);
1363 }
1364
1365 ipg_nic_rxrestore(dev);
1366
1367 return 0;
1368}
1369
1370static int ipg_nic_rx(struct net_device *dev)
1371{
1372 /* Transfer received Ethernet frames to higher network layers. */
1373 struct ipg_nic_private *sp = netdev_priv(dev);
1374 unsigned int curr = sp->rx_current;
1375 void __iomem *ioaddr = sp->ioaddr;
1376 struct ipg_rx *rxfd;
1377 unsigned int i;
1378
1379 IPG_DEBUG_MSG("_nic_rx\n");
1380
1381#define __RFS_MASK \
1382 cpu_to_le64(IPG_RFS_RFDDONE | IPG_RFS_FRAMESTART | IPG_RFS_FRAMEEND)
1383
1384 for (i = 0; i < IPG_MAXRFDPROCESS_COUNT; i++, curr++) {
1385 unsigned int entry = curr % IPG_RFDLIST_LENGTH;
1386 struct sk_buff *skb = sp->rx_buff[entry];
1387 unsigned int framelen;
1388
1389 rxfd = sp->rxd + entry;
1390
1391 if (((rxfd->rfs & __RFS_MASK) != __RFS_MASK) || !skb)
1392 break;
1393
1394 /* Get received frame length. */
1395 framelen = le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFRAMELEN;
1396
1397 /* Check for jumbo frame arrival with too small
1398 * RXFRAG_SIZE.
1399 */
1400 if (framelen > sp->rxfrag_size) {
1401 IPG_DEBUG_MSG
1402 ("RFS FrameLen > allocated fragment size\n");
1403
1404 framelen = sp->rxfrag_size;
1405 }
1406
1407 if ((IPG_DROP_ON_RX_ETH_ERRORS && (le64_to_cpu(rxfd->rfs) &
1408 (IPG_RFS_RXFIFOOVERRUN | IPG_RFS_RXRUNTFRAME |
1409 IPG_RFS_RXALIGNMENTERROR | IPG_RFS_RXFCSERROR |
1410 IPG_RFS_RXOVERSIZEDFRAME | IPG_RFS_RXLENGTHERROR)))) {
1411
1412 IPG_DEBUG_MSG("Rx error, RFS = %016lx\n",
1413 (unsigned long int) rxfd->rfs);
1414
1415 /* Increment general receive error statistic. */
1416 sp->stats.rx_errors++;
1417
1418 /* Increment detailed receive error statistics. */
1419 if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFIFOOVERRUN) {
1420 IPG_DEBUG_MSG("RX FIFO overrun occurred\n");
1421 sp->stats.rx_fifo_errors++;
1422 }
1423
1424 if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXRUNTFRAME) {
1425 IPG_DEBUG_MSG("RX runt occurred\n");
1426 sp->stats.rx_length_errors++;
1427 }
1428
1429 if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXOVERSIZEDFRAME) ;
1430 /* Do nothing, error count handled by a IPG
1431 * statistic register.
1432 */
1433
1434 if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXALIGNMENTERROR) {
1435 IPG_DEBUG_MSG("RX alignment error occurred\n");
1436 sp->stats.rx_frame_errors++;
1437 }
1438
1439 if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFCSERROR) ;
1440 /* Do nothing, error count handled by a IPG
1441 * statistic register.
1442 */
1443
1444 /* Free the memory associated with the RX
1445 * buffer since it is erroneous and we will
1446 * not pass it to higher layer processes.
1447 */
1448 if (skb) {
1449 __le64 info = rxfd->frag_info;
1450
1451 pci_unmap_single(sp->pdev,
1452 le64_to_cpu(info) & ~IPG_RFI_FRAGLEN,
1453 sp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1454
1455 dev_kfree_skb_irq(skb);
1456 }
1457 } else {
1458
1459 /* Adjust the new buffer length to accommodate the size
1460 * of the received frame.
1461 */
1462 skb_put(skb, framelen);
1463
1464 /* Set the buffer's protocol field to Ethernet. */
1465 skb->protocol = eth_type_trans(skb, dev);
1466
1467 /* The IPG encountered an error with (or
1468 * there were no) IP/TCP/UDP checksums.
1469 * This may or may not indicate an invalid
1470 * IP/TCP/UDP frame was received. Let the
1471 * upper layer decide.
1472 */
1473 skb_checksum_none_assert(skb);
1474
1475 /* Hand off frame for higher layer processing.
1476 * The function netif_rx() releases the sk_buff
1477 * when processing completes.
1478 */
1479 netif_rx(skb);
1480 }
1481
1482 /* Assure RX buffer is not reused by IPG. */
1483 sp->rx_buff[entry] = NULL;
1484 }
1485
1486 /*
1487 * If there are more RFDs to process and the allocated amount of RFD
1488 * processing time has expired, assert Interrupt Requested to make
1489 * sure we come back to process the remaining RFDs.
1490 */
1491 if (i == IPG_MAXRFDPROCESS_COUNT)
1492 ipg_w32(ipg_r32(ASIC_CTRL) | IPG_AC_INT_REQUEST, ASIC_CTRL);
1493
1494#ifdef IPG_DEBUG
1495 /* Check if the RFD list contained no receive frame data. */
1496 if (!i)
1497 sp->EmptyRFDListCount++;
1498#endif
1499 while ((le64_to_cpu(rxfd->rfs) & IPG_RFS_RFDDONE) &&
1500 !((le64_to_cpu(rxfd->rfs) & IPG_RFS_FRAMESTART) &&
1501 (le64_to_cpu(rxfd->rfs) & IPG_RFS_FRAMEEND))) {
1502 unsigned int entry = curr++ % IPG_RFDLIST_LENGTH;
1503
1504 rxfd = sp->rxd + entry;
1505
1506 IPG_DEBUG_MSG("Frame requires multiple RFDs\n");
1507
1508 /* An unexpected event, additional code needed to handle
1509 * properly. So for the time being, just disregard the
1510 * frame.
1511 */
1512
1513 /* Free the memory associated with the RX
1514 * buffer since it is erroneous and we will
1515 * not pass it to higher layer processes.
1516 */
1517 if (sp->rx_buff[entry]) {
1518 pci_unmap_single(sp->pdev,
1519 le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN,
1520 sp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1521 dev_kfree_skb_irq(sp->rx_buff[entry]);
1522 }
1523
1524 /* Assure RX buffer is not reused by IPG. */
1525 sp->rx_buff[entry] = NULL;
1526 }
1527
1528 sp->rx_current = curr;
1529
1530 /* Check to see if there are a minimum number of used
1531 * RFDs before restoring any (should improve performance.)
1532 */
1533 if ((curr - sp->rx_dirty) >= IPG_MINUSEDRFDSTOFREE)
1534 ipg_nic_rxrestore(dev);
1535
1536 return 0;
1537}
1538
1539static void ipg_reset_after_host_error(struct work_struct *work)
1540{
1541 struct ipg_nic_private *sp =
1542 container_of(work, struct ipg_nic_private, task.work);
1543 struct net_device *dev = sp->dev;
1544
1545 /*
1546 * Acknowledge HostError interrupt by resetting
1547 * IPG DMA and HOST.
1548 */
1549 ipg_reset(dev, IPG_AC_GLOBAL_RESET | IPG_AC_HOST | IPG_AC_DMA);
1550
1551 init_rfdlist(dev);
1552 init_tfdlist(dev);
1553
1554 if (ipg_io_config(dev) < 0) {
1555 netdev_info(dev, "Cannot recover from PCI error\n");
1556 schedule_delayed_work(&sp->task, HZ);
1557 }
1558}
1559
1560static irqreturn_t ipg_interrupt_handler(int irq, void *dev_inst)
1561{
1562 struct net_device *dev = dev_inst;
1563 struct ipg_nic_private *sp = netdev_priv(dev);
1564 void __iomem *ioaddr = sp->ioaddr;
1565 unsigned int handled = 0;
1566 u16 status;
1567
1568 IPG_DEBUG_MSG("_interrupt_handler\n");
1569
1570 if (sp->is_jumbo)
1571 ipg_nic_rxrestore(dev);
1572
1573 spin_lock(&sp->lock);
1574
1575 /* Get interrupt source information, and acknowledge
1576 * some (i.e. TxDMAComplete, RxDMAComplete, RxEarly,
1577 * IntRequested, MacControlFrame, LinkEvent) interrupts
1578 * if issued. Also, all IPG interrupts are disabled by
1579 * reading IntStatusAck.
1580 */
1581 status = ipg_r16(INT_STATUS_ACK);
1582
1583 IPG_DEBUG_MSG("IntStatusAck = %04x\n", status);
1584
1585 /* Shared IRQ of remove event. */
1586 if (!(status & IPG_IS_RSVD_MASK))
1587 goto out_enable;
1588
1589 handled = 1;
1590
1591 if (unlikely(!netif_running(dev)))
1592 goto out_unlock;
1593
1594 /* If RFDListEnd interrupt, restore all used RFDs. */
1595 if (status & IPG_IS_RFD_LIST_END) {
1596 IPG_DEBUG_MSG("RFDListEnd Interrupt\n");
1597
1598 /* The RFD list end indicates an RFD was encountered
1599 * with a 0 NextPtr, or with an RFDDone bit set to 1
1600 * (indicating the RFD is not read for use by the
1601 * IPG.) Try to restore all RFDs.
1602 */
1603 ipg_nic_rxrestore(dev);
1604
1605#ifdef IPG_DEBUG
1606 /* Increment the RFDlistendCount counter. */
1607 sp->RFDlistendCount++;
1608#endif
1609 }
1610
1611 /* If RFDListEnd, RxDMAPriority, RxDMAComplete, or
1612 * IntRequested interrupt, process received frames. */
1613 if ((status & IPG_IS_RX_DMA_PRIORITY) ||
1614 (status & IPG_IS_RFD_LIST_END) ||
1615 (status & IPG_IS_RX_DMA_COMPLETE) ||
1616 (status & IPG_IS_INT_REQUESTED)) {
1617#ifdef IPG_DEBUG
1618 /* Increment the RFD list checked counter if interrupted
1619 * only to check the RFD list. */
1620 if (status & (~(IPG_IS_RX_DMA_PRIORITY | IPG_IS_RFD_LIST_END |
1621 IPG_IS_RX_DMA_COMPLETE | IPG_IS_INT_REQUESTED) &
1622 (IPG_IS_HOST_ERROR | IPG_IS_TX_DMA_COMPLETE |
1623 IPG_IS_LINK_EVENT | IPG_IS_TX_COMPLETE |
1624 IPG_IS_UPDATE_STATS)))
1625 sp->RFDListCheckedCount++;
1626#endif
1627
1628 if (sp->is_jumbo)
1629 ipg_nic_rx_jumbo(dev);
1630 else
1631 ipg_nic_rx(dev);
1632 }
1633
1634 /* If TxDMAComplete interrupt, free used TFDs. */
1635 if (status & IPG_IS_TX_DMA_COMPLETE)
1636 ipg_nic_txfree(dev);
1637
1638 /* TxComplete interrupts indicate one of numerous actions.
1639 * Determine what action to take based on TXSTATUS register.
1640 */
1641 if (status & IPG_IS_TX_COMPLETE)
1642 ipg_nic_txcleanup(dev);
1643
1644 /* If UpdateStats interrupt, update Linux Ethernet statistics */
1645 if (status & IPG_IS_UPDATE_STATS)
1646 ipg_nic_get_stats(dev);
1647
1648 /* If HostError interrupt, reset IPG. */
1649 if (status & IPG_IS_HOST_ERROR) {
1650 IPG_DDEBUG_MSG("HostError Interrupt\n");
1651
1652 schedule_delayed_work(&sp->task, 0);
1653 }
1654
1655 /* If LinkEvent interrupt, resolve autonegotiation. */
1656 if (status & IPG_IS_LINK_EVENT) {
1657 if (ipg_config_autoneg(dev) < 0)
1658 netdev_info(dev, "Auto-negotiation error\n");
1659 }
1660
1661 /* If MACCtrlFrame interrupt, do nothing. */
1662 if (status & IPG_IS_MAC_CTRL_FRAME)
1663 IPG_DEBUG_MSG("MACCtrlFrame interrupt\n");
1664
1665 /* If RxComplete interrupt, do nothing. */
1666 if (status & IPG_IS_RX_COMPLETE)
1667 IPG_DEBUG_MSG("RxComplete interrupt\n");
1668
1669 /* If RxEarly interrupt, do nothing. */
1670 if (status & IPG_IS_RX_EARLY)
1671 IPG_DEBUG_MSG("RxEarly interrupt\n");
1672
1673out_enable:
1674 /* Re-enable IPG interrupts. */
1675 ipg_w16(IPG_IE_TX_DMA_COMPLETE | IPG_IE_RX_DMA_COMPLETE |
1676 IPG_IE_HOST_ERROR | IPG_IE_INT_REQUESTED | IPG_IE_TX_COMPLETE |
1677 IPG_IE_LINK_EVENT | IPG_IE_UPDATE_STATS, INT_ENABLE);
1678out_unlock:
1679 spin_unlock(&sp->lock);
1680
1681 return IRQ_RETVAL(handled);
1682}
1683
1684static void ipg_rx_clear(struct ipg_nic_private *sp)
1685{
1686 unsigned int i;
1687
1688 for (i = 0; i < IPG_RFDLIST_LENGTH; i++) {
1689 if (sp->rx_buff[i]) {
1690 struct ipg_rx *rxfd = sp->rxd + i;
1691
1692 dev_kfree_skb_irq(sp->rx_buff[i]);
1693 sp->rx_buff[i] = NULL;
1694 pci_unmap_single(sp->pdev,
1695 le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN,
1696 sp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1697 }
1698 }
1699}
1700
1701static void ipg_tx_clear(struct ipg_nic_private *sp)
1702{
1703 unsigned int i;
1704
1705 for (i = 0; i < IPG_TFDLIST_LENGTH; i++) {
1706 if (sp->tx_buff[i]) {
1707 struct ipg_tx *txfd = sp->txd + i;
1708
1709 pci_unmap_single(sp->pdev,
1710 le64_to_cpu(txfd->frag_info) & ~IPG_TFI_FRAGLEN,
1711 sp->tx_buff[i]->len, PCI_DMA_TODEVICE);
1712
1713 dev_kfree_skb_irq(sp->tx_buff[i]);
1714
1715 sp->tx_buff[i] = NULL;
1716 }
1717 }
1718}
1719
1720static int ipg_nic_open(struct net_device *dev)
1721{
1722 struct ipg_nic_private *sp = netdev_priv(dev);
1723 void __iomem *ioaddr = sp->ioaddr;
1724 struct pci_dev *pdev = sp->pdev;
1725 int rc;
1726
1727 IPG_DEBUG_MSG("_nic_open\n");
1728
1729 sp->rx_buf_sz = sp->rxsupport_size;
1730
1731 /* Check for interrupt line conflicts, and request interrupt
1732 * line for IPG.
1733 *
1734 * IMPORTANT: Disable IPG interrupts prior to registering
1735 * IRQ.
1736 */
1737 ipg_w16(0x0000, INT_ENABLE);
1738
1739 /* Register the interrupt line to be used by the IPG within
1740 * the Linux system.
1741 */
1742 rc = request_irq(pdev->irq, ipg_interrupt_handler, IRQF_SHARED,
1743 dev->name, dev);
1744 if (rc < 0) {
1745 netdev_info(dev, "Error when requesting interrupt\n");
1746 goto out;
1747 }
1748
1749 dev->irq = pdev->irq;
1750
1751 rc = -ENOMEM;
1752
1753 sp->rxd = dma_alloc_coherent(&pdev->dev, IPG_RX_RING_BYTES,
1754 &sp->rxd_map, GFP_KERNEL);
1755 if (!sp->rxd)
1756 goto err_free_irq_0;
1757
1758 sp->txd = dma_alloc_coherent(&pdev->dev, IPG_TX_RING_BYTES,
1759 &sp->txd_map, GFP_KERNEL);
1760 if (!sp->txd)
1761 goto err_free_rx_1;
1762
1763 rc = init_rfdlist(dev);
1764 if (rc < 0) {
1765 netdev_info(dev, "Error during configuration\n");
1766 goto err_free_tx_2;
1767 }
1768
1769 init_tfdlist(dev);
1770
1771 rc = ipg_io_config(dev);
1772 if (rc < 0) {
1773 netdev_info(dev, "Error during configuration\n");
1774 goto err_release_tfdlist_3;
1775 }
1776
1777 /* Resolve autonegotiation. */
1778 if (ipg_config_autoneg(dev) < 0)
1779 netdev_info(dev, "Auto-negotiation error\n");
1780
1781 /* initialize JUMBO Frame control variable */
1782 sp->jumbo.found_start = 0;
1783 sp->jumbo.current_size = 0;
1784 sp->jumbo.skb = NULL;
1785
1786 /* Enable transmit and receive operation of the IPG. */
1787 ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_RX_ENABLE | IPG_MC_TX_ENABLE) &
1788 IPG_MC_RSVD_MASK, MAC_CTRL);
1789
1790 netif_start_queue(dev);
1791out:
1792 return rc;
1793
1794err_release_tfdlist_3:
1795 ipg_tx_clear(sp);
1796 ipg_rx_clear(sp);
1797err_free_tx_2:
1798 dma_free_coherent(&pdev->dev, IPG_TX_RING_BYTES, sp->txd, sp->txd_map);
1799err_free_rx_1:
1800 dma_free_coherent(&pdev->dev, IPG_RX_RING_BYTES, sp->rxd, sp->rxd_map);
1801err_free_irq_0:
1802 free_irq(pdev->irq, dev);
1803 goto out;
1804}
1805
1806static int ipg_nic_stop(struct net_device *dev)
1807{
1808 struct ipg_nic_private *sp = netdev_priv(dev);
1809 void __iomem *ioaddr = sp->ioaddr;
1810 struct pci_dev *pdev = sp->pdev;
1811
1812 IPG_DEBUG_MSG("_nic_stop\n");
1813
1814 netif_stop_queue(dev);
1815
1816 IPG_DUMPTFDLIST(dev);
1817
1818 do {
1819 (void) ipg_r16(INT_STATUS_ACK);
1820
1821 ipg_reset(dev, IPG_AC_GLOBAL_RESET | IPG_AC_HOST | IPG_AC_DMA);
1822
1823 synchronize_irq(pdev->irq);
1824 } while (ipg_r16(INT_ENABLE) & IPG_IE_RSVD_MASK);
1825
1826 ipg_rx_clear(sp);
1827
1828 ipg_tx_clear(sp);
1829
1830 pci_free_consistent(pdev, IPG_RX_RING_BYTES, sp->rxd, sp->rxd_map);
1831 pci_free_consistent(pdev, IPG_TX_RING_BYTES, sp->txd, sp->txd_map);
1832
1833 free_irq(pdev->irq, dev);
1834
1835 return 0;
1836}
1837
1838static netdev_tx_t ipg_nic_hard_start_xmit(struct sk_buff *skb,
1839 struct net_device *dev)
1840{
1841 struct ipg_nic_private *sp = netdev_priv(dev);
1842 void __iomem *ioaddr = sp->ioaddr;
1843 unsigned int entry = sp->tx_current % IPG_TFDLIST_LENGTH;
1844 unsigned long flags;
1845 struct ipg_tx *txfd;
1846
1847 IPG_DDEBUG_MSG("_nic_hard_start_xmit\n");
1848
1849 /* If in 10Mbps mode, stop the transmit queue so
1850 * no more transmit frames are accepted.
1851 */
1852 if (sp->tenmbpsmode)
1853 netif_stop_queue(dev);
1854
1855 if (sp->reset_current_tfd) {
1856 sp->reset_current_tfd = 0;
1857 entry = 0;
1858 }
1859
1860 txfd = sp->txd + entry;
1861
1862 sp->tx_buff[entry] = skb;
1863
1864 /* Clear all TFC fields, except TFDDONE. */
1865 txfd->tfc = cpu_to_le64(IPG_TFC_TFDDONE);
1866
1867 /* Specify the TFC field within the TFD. */
1868 txfd->tfc |= cpu_to_le64(IPG_TFC_WORDALIGNDISABLED |
1869 (IPG_TFC_FRAMEID & sp->tx_current) |
1870 (IPG_TFC_FRAGCOUNT & (1 << 24)));
1871 /*
1872 * 16--17 (WordAlign) <- 3 (disable),
1873 * 0--15 (FrameId) <- sp->tx_current,
1874 * 24--27 (FragCount) <- 1
1875 */
1876
1877 /* Request TxComplete interrupts at an interval defined
1878 * by the constant IPG_FRAMESBETWEENTXCOMPLETES.
1879 * Request TxComplete interrupt for every frame
1880 * if in 10Mbps mode to accommodate problem with 10Mbps
1881 * processing.
1882 */
1883 if (sp->tenmbpsmode)
1884 txfd->tfc |= cpu_to_le64(IPG_TFC_TXINDICATE);
1885 txfd->tfc |= cpu_to_le64(IPG_TFC_TXDMAINDICATE);
1886 /* Based on compilation option, determine if FCS is to be
1887 * appended to transmit frame by IPG.
1888 */
1889 if (!(IPG_APPEND_FCS_ON_TX))
1890 txfd->tfc |= cpu_to_le64(IPG_TFC_FCSAPPENDDISABLE);
1891
1892 /* Based on compilation option, determine if IP, TCP and/or
1893 * UDP checksums are to be added to transmit frame by IPG.
1894 */
1895 if (IPG_ADD_IPCHECKSUM_ON_TX)
1896 txfd->tfc |= cpu_to_le64(IPG_TFC_IPCHECKSUMENABLE);
1897
1898 if (IPG_ADD_TCPCHECKSUM_ON_TX)
1899 txfd->tfc |= cpu_to_le64(IPG_TFC_TCPCHECKSUMENABLE);
1900
1901 if (IPG_ADD_UDPCHECKSUM_ON_TX)
1902 txfd->tfc |= cpu_to_le64(IPG_TFC_UDPCHECKSUMENABLE);
1903
1904 /* Based on compilation option, determine if VLAN tag info is to be
1905 * inserted into transmit frame by IPG.
1906 */
1907 if (IPG_INSERT_MANUAL_VLAN_TAG) {
1908 txfd->tfc |= cpu_to_le64(IPG_TFC_VLANTAGINSERT |
1909 ((u64) IPG_MANUAL_VLAN_VID << 32) |
1910 ((u64) IPG_MANUAL_VLAN_CFI << 44) |
1911 ((u64) IPG_MANUAL_VLAN_USERPRIORITY << 45));
1912 }
1913
1914 /* The fragment start location within system memory is defined
1915 * by the sk_buff structure's data field. The physical address
1916 * of this location within the system's virtual memory space
1917 * is determined using the IPG_HOST2BUS_MAP function.
1918 */
1919 txfd->frag_info = cpu_to_le64(pci_map_single(sp->pdev, skb->data,
1920 skb->len, PCI_DMA_TODEVICE));
1921
1922 /* The length of the fragment within system memory is defined by
1923 * the sk_buff structure's len field.
1924 */
1925 txfd->frag_info |= cpu_to_le64(IPG_TFI_FRAGLEN &
1926 ((u64) (skb->len & 0xffff) << 48));
1927
1928 /* Clear the TFDDone bit last to indicate the TFD is ready
1929 * for transfer to the IPG.
1930 */
1931 txfd->tfc &= cpu_to_le64(~IPG_TFC_TFDDONE);
1932
1933 spin_lock_irqsave(&sp->lock, flags);
1934
1935 sp->tx_current++;
1936
1937 mmiowb();
1938
1939 ipg_w32(IPG_DC_TX_DMA_POLL_NOW, DMA_CTRL);
1940
1941 if (sp->tx_current == (sp->tx_dirty + IPG_TFDLIST_LENGTH))
1942 netif_stop_queue(dev);
1943
1944 spin_unlock_irqrestore(&sp->lock, flags);
1945
1946 return NETDEV_TX_OK;
1947}
1948
1949static void ipg_set_phy_default_param(unsigned char rev,
1950 struct net_device *dev, int phy_address)
1951{
1952 unsigned short length;
1953 unsigned char revision;
1954 const unsigned short *phy_param;
1955 unsigned short address, value;
1956
1957 phy_param = &DefaultPhyParam[0];
1958 length = *phy_param & 0x00FF;
1959 revision = (unsigned char)((*phy_param) >> 8);
1960 phy_param++;
1961 while (length != 0) {
1962 if (rev == revision) {
1963 while (length > 1) {
1964 address = *phy_param;
1965 value = *(phy_param + 1);
1966 phy_param += 2;
1967 mdio_write(dev, phy_address, address, value);
1968 length -= 4;
1969 }
1970 break;
1971 } else {
1972 phy_param += length / 2;
1973 length = *phy_param & 0x00FF;
1974 revision = (unsigned char)((*phy_param) >> 8);
1975 phy_param++;
1976 }
1977 }
1978}
1979
1980static int read_eeprom(struct net_device *dev, int eep_addr)
1981{
1982 void __iomem *ioaddr = ipg_ioaddr(dev);
1983 unsigned int i;
1984 int ret = 0;
1985 u16 value;
1986
1987 value = IPG_EC_EEPROM_READOPCODE | (eep_addr & 0xff);
1988 ipg_w16(value, EEPROM_CTRL);
1989
1990 for (i = 0; i < 1000; i++) {
1991 u16 data;
1992
1993 mdelay(10);
1994 data = ipg_r16(EEPROM_CTRL);
1995 if (!(data & IPG_EC_EEPROM_BUSY)) {
1996 ret = ipg_r16(EEPROM_DATA);
1997 break;
1998 }
1999 }
2000 return ret;
2001}
2002
2003static void ipg_init_mii(struct net_device *dev)
2004{
2005 struct ipg_nic_private *sp = netdev_priv(dev);
2006 struct mii_if_info *mii_if = &sp->mii_if;
2007 int phyaddr;
2008
2009 mii_if->dev = dev;
2010 mii_if->mdio_read = mdio_read;
2011 mii_if->mdio_write = mdio_write;
2012 mii_if->phy_id_mask = 0x1f;
2013 mii_if->reg_num_mask = 0x1f;
2014
2015 mii_if->phy_id = phyaddr = ipg_find_phyaddr(dev);
2016
2017 if (phyaddr != 0x1f) {
2018 u16 mii_phyctrl, mii_1000cr;
2019
2020 mii_1000cr = mdio_read(dev, phyaddr, MII_CTRL1000);
2021 mii_1000cr |= ADVERTISE_1000FULL | ADVERTISE_1000HALF |
2022 GMII_PHY_1000BASETCONTROL_PreferMaster;
2023 mdio_write(dev, phyaddr, MII_CTRL1000, mii_1000cr);
2024
2025 mii_phyctrl = mdio_read(dev, phyaddr, MII_BMCR);
2026
2027 /* Set default phyparam */
2028 ipg_set_phy_default_param(sp->pdev->revision, dev, phyaddr);
2029
2030 /* Reset PHY */
2031 mii_phyctrl |= BMCR_RESET | BMCR_ANRESTART;
2032 mdio_write(dev, phyaddr, MII_BMCR, mii_phyctrl);
2033
2034 }
2035}
2036
2037static int ipg_hw_init(struct net_device *dev)
2038{
2039 struct ipg_nic_private *sp = netdev_priv(dev);
2040 void __iomem *ioaddr = sp->ioaddr;
2041 unsigned int i;
2042 int rc;
2043
2044 /* Read/Write and Reset EEPROM Value */
2045 /* Read LED Mode Configuration from EEPROM */
2046 sp->led_mode = read_eeprom(dev, 6);
2047
2048 /* Reset all functions within the IPG. Do not assert
2049 * RST_OUT as not compatible with some PHYs.
2050 */
2051 rc = ipg_reset(dev, IPG_RESET_MASK);
2052 if (rc < 0)
2053 goto out;
2054
2055 ipg_init_mii(dev);
2056
2057 /* Read MAC Address from EEPROM */
2058 for (i = 0; i < 3; i++)
2059 sp->station_addr[i] = read_eeprom(dev, 16 + i);
2060
2061 for (i = 0; i < 3; i++)
2062 ipg_w16(sp->station_addr[i], STATION_ADDRESS_0 + 2*i);
2063
2064 /* Set station address in ethernet_device structure. */
2065 dev->dev_addr[0] = ipg_r16(STATION_ADDRESS_0) & 0x00ff;
2066 dev->dev_addr[1] = (ipg_r16(STATION_ADDRESS_0) & 0xff00) >> 8;
2067 dev->dev_addr[2] = ipg_r16(STATION_ADDRESS_1) & 0x00ff;
2068 dev->dev_addr[3] = (ipg_r16(STATION_ADDRESS_1) & 0xff00) >> 8;
2069 dev->dev_addr[4] = ipg_r16(STATION_ADDRESS_2) & 0x00ff;
2070 dev->dev_addr[5] = (ipg_r16(STATION_ADDRESS_2) & 0xff00) >> 8;
2071out:
2072 return rc;
2073}
2074
2075static int ipg_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2076{
2077 struct ipg_nic_private *sp = netdev_priv(dev);
2078 int rc;
2079
2080 mutex_lock(&sp->mii_mutex);
2081 rc = generic_mii_ioctl(&sp->mii_if, if_mii(ifr), cmd, NULL);
2082 mutex_unlock(&sp->mii_mutex);
2083
2084 return rc;
2085}
2086
2087static int ipg_nic_change_mtu(struct net_device *dev, int new_mtu)
2088{
2089 struct ipg_nic_private *sp = netdev_priv(dev);
2090 int err;
2091
2092 /* Function to accommodate changes to Maximum Transfer Unit
2093 * (or MTU) of IPG NIC. Cannot use default function since
2094 * the default will not allow for MTU > 1500 bytes.
2095 */
2096
2097 IPG_DEBUG_MSG("_nic_change_mtu\n");
2098
2099 /*
2100 * Check that the new MTU value is between 68 (14 byte header, 46 byte
2101 * payload, 4 byte FCS) and 10 KB, which is the largest supported MTU.
2102 */
2103 if (new_mtu < 68 || new_mtu > 10240)
2104 return -EINVAL;
2105
2106 err = ipg_nic_stop(dev);
2107 if (err)
2108 return err;
2109
2110 dev->mtu = new_mtu;
2111
2112 sp->max_rxframe_size = new_mtu;
2113
2114 sp->rxfrag_size = new_mtu;
2115 if (sp->rxfrag_size > 4088)
2116 sp->rxfrag_size = 4088;
2117
2118 sp->rxsupport_size = sp->max_rxframe_size;
2119
2120 if (new_mtu > 0x0600)
2121 sp->is_jumbo = true;
2122 else
2123 sp->is_jumbo = false;
2124
2125 return ipg_nic_open(dev);
2126}
2127
2128static int ipg_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2129{
2130 struct ipg_nic_private *sp = netdev_priv(dev);
2131 int rc;
2132
2133 mutex_lock(&sp->mii_mutex);
2134 rc = mii_ethtool_gset(&sp->mii_if, cmd);
2135 mutex_unlock(&sp->mii_mutex);
2136
2137 return rc;
2138}
2139
2140static int ipg_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2141{
2142 struct ipg_nic_private *sp = netdev_priv(dev);
2143 int rc;
2144
2145 mutex_lock(&sp->mii_mutex);
2146 rc = mii_ethtool_sset(&sp->mii_if, cmd);
2147 mutex_unlock(&sp->mii_mutex);
2148
2149 return rc;
2150}
2151
2152static int ipg_nway_reset(struct net_device *dev)
2153{
2154 struct ipg_nic_private *sp = netdev_priv(dev);
2155 int rc;
2156
2157 mutex_lock(&sp->mii_mutex);
2158 rc = mii_nway_restart(&sp->mii_if);
2159 mutex_unlock(&sp->mii_mutex);
2160
2161 return rc;
2162}
2163
2164static const struct ethtool_ops ipg_ethtool_ops = {
2165 .get_settings = ipg_get_settings,
2166 .set_settings = ipg_set_settings,
2167 .nway_reset = ipg_nway_reset,
2168};
2169
2170static void ipg_remove(struct pci_dev *pdev)
2171{
2172 struct net_device *dev = pci_get_drvdata(pdev);
2173 struct ipg_nic_private *sp = netdev_priv(dev);
2174
2175 IPG_DEBUG_MSG("_remove\n");
2176
2177 /* Un-register Ethernet device. */
2178 unregister_netdev(dev);
2179
2180 pci_iounmap(pdev, sp->ioaddr);
2181
2182 pci_release_regions(pdev);
2183
2184 free_netdev(dev);
2185 pci_disable_device(pdev);
2186}
2187
2188static const struct net_device_ops ipg_netdev_ops = {
2189 .ndo_open = ipg_nic_open,
2190 .ndo_stop = ipg_nic_stop,
2191 .ndo_start_xmit = ipg_nic_hard_start_xmit,
2192 .ndo_get_stats = ipg_nic_get_stats,
2193 .ndo_set_rx_mode = ipg_nic_set_multicast_list,
2194 .ndo_do_ioctl = ipg_ioctl,
2195 .ndo_tx_timeout = ipg_tx_timeout,
2196 .ndo_change_mtu = ipg_nic_change_mtu,
2197 .ndo_set_mac_address = eth_mac_addr,
2198 .ndo_validate_addr = eth_validate_addr,
2199};
2200
2201static int ipg_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2202{
2203 unsigned int i = id->driver_data;
2204 struct ipg_nic_private *sp;
2205 struct net_device *dev;
2206 void __iomem *ioaddr;
2207 int rc;
2208
2209 rc = pci_enable_device(pdev);
2210 if (rc < 0)
2211 goto out;
2212
2213 pr_info("%s: %s\n", pci_name(pdev), ipg_brand_name[i]);
2214
2215 pci_set_master(pdev);
2216
2217 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(40));
2218 if (rc < 0) {
2219 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2220 if (rc < 0) {
2221 pr_err("%s: DMA config failed\n", pci_name(pdev));
2222 goto err_disable_0;
2223 }
2224 }
2225
2226 /*
2227 * Initialize net device.
2228 */
2229 dev = alloc_etherdev(sizeof(struct ipg_nic_private));
2230 if (!dev) {
2231 rc = -ENOMEM;
2232 goto err_disable_0;
2233 }
2234
2235 sp = netdev_priv(dev);
2236 spin_lock_init(&sp->lock);
2237 mutex_init(&sp->mii_mutex);
2238
2239 sp->is_jumbo = IPG_IS_JUMBO;
2240 sp->rxfrag_size = IPG_RXFRAG_SIZE;
2241 sp->rxsupport_size = IPG_RXSUPPORT_SIZE;
2242 sp->max_rxframe_size = IPG_MAX_RXFRAME_SIZE;
2243
2244 /* Declare IPG NIC functions for Ethernet device methods.
2245 */
2246 dev->netdev_ops = &ipg_netdev_ops;
2247 SET_NETDEV_DEV(dev, &pdev->dev);
2248 dev->ethtool_ops = &ipg_ethtool_ops;
2249
2250 rc = pci_request_regions(pdev, DRV_NAME);
2251 if (rc)
2252 goto err_free_dev_1;
2253
2254 ioaddr = pci_iomap(pdev, 1, pci_resource_len(pdev, 1));
2255 if (!ioaddr) {
2256 pr_err("%s: cannot map MMIO\n", pci_name(pdev));
2257 rc = -EIO;
2258 goto err_release_regions_2;
2259 }
2260
2261 /* Save the pointer to the PCI device information. */
2262 sp->ioaddr = ioaddr;
2263 sp->pdev = pdev;
2264 sp->dev = dev;
2265
2266 INIT_DELAYED_WORK(&sp->task, ipg_reset_after_host_error);
2267
2268 pci_set_drvdata(pdev, dev);
2269
2270 rc = ipg_hw_init(dev);
2271 if (rc < 0)
2272 goto err_unmap_3;
2273
2274 rc = register_netdev(dev);
2275 if (rc < 0)
2276 goto err_unmap_3;
2277
2278 netdev_info(dev, "Ethernet device registered\n");
2279out:
2280 return rc;
2281
2282err_unmap_3:
2283 pci_iounmap(pdev, ioaddr);
2284err_release_regions_2:
2285 pci_release_regions(pdev);
2286err_free_dev_1:
2287 free_netdev(dev);
2288err_disable_0:
2289 pci_disable_device(pdev);
2290 goto out;
2291}
2292
2293static struct pci_driver ipg_pci_driver = {
2294 .name = IPG_DRIVER_NAME,
2295 .id_table = ipg_pci_tbl,
2296 .probe = ipg_probe,
2297 .remove = ipg_remove,
2298};
2299
2300module_pci_driver(ipg_pci_driver);
diff --git a/drivers/net/ethernet/icplus/ipg.h b/drivers/net/ethernet/icplus/ipg.h
deleted file mode 100644
index de606281f97b..000000000000
--- a/drivers/net/ethernet/icplus/ipg.h
+++ /dev/null
@@ -1,748 +0,0 @@
1/*
2 * Include file for Gigabit Ethernet device driver for Network
3 * Interface Cards (NICs) utilizing the Tamarack Microelectronics
4 * Inc. IPG Gigabit or Triple Speed Ethernet Media Access
5 * Controller.
6 */
7#ifndef __LINUX_IPG_H
8#define __LINUX_IPG_H
9
10#include <linux/module.h>
11
12#include <linux/kernel.h>
13#include <linux/pci.h>
14#include <linux/ioport.h>
15#include <linux/errno.h>
16#include <asm/io.h>
17#include <linux/delay.h>
18#include <linux/types.h>
19#include <linux/netdevice.h>
20#include <linux/etherdevice.h>
21#include <linux/skbuff.h>
22#include <asm/bitops.h>
23
24/*
25 * Constants
26 */
27
28/* GMII based PHY IDs */
29#define NS 0x2000
30#define MARVELL 0x0141
31#define ICPLUS_PHY 0x243
32
33/* NIC Physical Layer Device MII register fields. */
34#define MII_PHY_SELECTOR_IEEE8023 0x0001
35#define MII_PHY_TECHABILITYFIELD 0x1FE0
36
37/* GMII_PHY_1000 need to set to prefer master */
38#define GMII_PHY_1000BASETCONTROL_PreferMaster 0x0400
39
40/* NIC Physical Layer Device GMII constants. */
41#define GMII_PREAMBLE 0xFFFFFFFF
42#define GMII_ST 0x1
43#define GMII_READ 0x2
44#define GMII_WRITE 0x1
45#define GMII_TA_READ_MASK 0x1
46#define GMII_TA_WRITE 0x2
47
48/* I/O register offsets. */
49enum ipg_regs {
50 DMA_CTRL = 0x00,
51 RX_DMA_STATUS = 0x08, /* Unused + reserved */
52 TFD_LIST_PTR_0 = 0x10,
53 TFD_LIST_PTR_1 = 0x14,
54 TX_DMA_BURST_THRESH = 0x18,
55 TX_DMA_URGENT_THRESH = 0x19,
56 TX_DMA_POLL_PERIOD = 0x1a,
57 RFD_LIST_PTR_0 = 0x1c,
58 RFD_LIST_PTR_1 = 0x20,
59 RX_DMA_BURST_THRESH = 0x24,
60 RX_DMA_URGENT_THRESH = 0x25,
61 RX_DMA_POLL_PERIOD = 0x26,
62 DEBUG_CTRL = 0x2c,
63 ASIC_CTRL = 0x30,
64 FIFO_CTRL = 0x38, /* Unused */
65 FLOW_OFF_THRESH = 0x3c,
66 FLOW_ON_THRESH = 0x3e,
67 EEPROM_DATA = 0x48,
68 EEPROM_CTRL = 0x4a,
69 EXPROM_ADDR = 0x4c, /* Unused */
70 EXPROM_DATA = 0x50, /* Unused */
71 WAKE_EVENT = 0x51, /* Unused */
72 COUNTDOWN = 0x54, /* Unused */
73 INT_STATUS_ACK = 0x5a,
74 INT_ENABLE = 0x5c,
75 INT_STATUS = 0x5e, /* Unused */
76 TX_STATUS = 0x60,
77 MAC_CTRL = 0x6c,
78 VLAN_TAG = 0x70, /* Unused */
79 PHY_SET = 0x75,
80 PHY_CTRL = 0x76,
81 STATION_ADDRESS_0 = 0x78,
82 STATION_ADDRESS_1 = 0x7a,
83 STATION_ADDRESS_2 = 0x7c,
84 MAX_FRAME_SIZE = 0x86,
85 RECEIVE_MODE = 0x88,
86 HASHTABLE_0 = 0x8c,
87 HASHTABLE_1 = 0x90,
88 RMON_STATISTICS_MASK = 0x98,
89 STATISTICS_MASK = 0x9c,
90 RX_JUMBO_FRAMES = 0xbc, /* Unused */
91 TCP_CHECKSUM_ERRORS = 0xc0, /* Unused */
92 IP_CHECKSUM_ERRORS = 0xc2, /* Unused */
93 UDP_CHECKSUM_ERRORS = 0xc4, /* Unused */
94 TX_JUMBO_FRAMES = 0xf4 /* Unused */
95};
96
97/* Ethernet MIB statistic register offsets. */
98#define IPG_OCTETRCVOK 0xA8
99#define IPG_MCSTOCTETRCVDOK 0xAC
100#define IPG_BCSTOCTETRCVOK 0xB0
101#define IPG_FRAMESRCVDOK 0xB4
102#define IPG_MCSTFRAMESRCVDOK 0xB8
103#define IPG_BCSTFRAMESRCVDOK 0xBE
104#define IPG_MACCONTROLFRAMESRCVD 0xC6
105#define IPG_FRAMETOOLONGERRORS 0xC8
106#define IPG_INRANGELENGTHERRORS 0xCA
107#define IPG_FRAMECHECKSEQERRORS 0xCC
108#define IPG_FRAMESLOSTRXERRORS 0xCE
109#define IPG_OCTETXMTOK 0xD0
110#define IPG_MCSTOCTETXMTOK 0xD4
111#define IPG_BCSTOCTETXMTOK 0xD8
112#define IPG_FRAMESXMTDOK 0xDC
113#define IPG_MCSTFRAMESXMTDOK 0xE0
114#define IPG_FRAMESWDEFERREDXMT 0xE4
115#define IPG_LATECOLLISIONS 0xE8
116#define IPG_MULTICOLFRAMES 0xEC
117#define IPG_SINGLECOLFRAMES 0xF0
118#define IPG_BCSTFRAMESXMTDOK 0xF6
119#define IPG_CARRIERSENSEERRORS 0xF8
120#define IPG_MACCONTROLFRAMESXMTDOK 0xFA
121#define IPG_FRAMESABORTXSCOLLS 0xFC
122#define IPG_FRAMESWEXDEFERRAL 0xFE
123
124/* RMON statistic register offsets. */
125#define IPG_ETHERSTATSCOLLISIONS 0x100
126#define IPG_ETHERSTATSOCTETSTRANSMIT 0x104
127#define IPG_ETHERSTATSPKTSTRANSMIT 0x108
128#define IPG_ETHERSTATSPKTS64OCTESTSTRANSMIT 0x10C
129#define IPG_ETHERSTATSPKTS65TO127OCTESTSTRANSMIT 0x110
130#define IPG_ETHERSTATSPKTS128TO255OCTESTSTRANSMIT 0x114
131#define IPG_ETHERSTATSPKTS256TO511OCTESTSTRANSMIT 0x118
132#define IPG_ETHERSTATSPKTS512TO1023OCTESTSTRANSMIT 0x11C
133#define IPG_ETHERSTATSPKTS1024TO1518OCTESTSTRANSMIT 0x120
134#define IPG_ETHERSTATSCRCALIGNERRORS 0x124
135#define IPG_ETHERSTATSUNDERSIZEPKTS 0x128
136#define IPG_ETHERSTATSFRAGMENTS 0x12C
137#define IPG_ETHERSTATSJABBERS 0x130
138#define IPG_ETHERSTATSOCTETS 0x134
139#define IPG_ETHERSTATSPKTS 0x138
140#define IPG_ETHERSTATSPKTS64OCTESTS 0x13C
141#define IPG_ETHERSTATSPKTS65TO127OCTESTS 0x140
142#define IPG_ETHERSTATSPKTS128TO255OCTESTS 0x144
143#define IPG_ETHERSTATSPKTS256TO511OCTESTS 0x148
144#define IPG_ETHERSTATSPKTS512TO1023OCTESTS 0x14C
145#define IPG_ETHERSTATSPKTS1024TO1518OCTESTS 0x150
146
147/* RMON statistic register equivalents. */
148#define IPG_ETHERSTATSMULTICASTPKTSTRANSMIT 0xE0
149#define IPG_ETHERSTATSBROADCASTPKTSTRANSMIT 0xF6
150#define IPG_ETHERSTATSMULTICASTPKTS 0xB8
151#define IPG_ETHERSTATSBROADCASTPKTS 0xBE
152#define IPG_ETHERSTATSOVERSIZEPKTS 0xC8
153#define IPG_ETHERSTATSDROPEVENTS 0xCE
154
155/* Serial EEPROM offsets */
156#define IPG_EEPROM_CONFIGPARAM 0x00
157#define IPG_EEPROM_ASICCTRL 0x01
158#define IPG_EEPROM_SUBSYSTEMVENDORID 0x02
159#define IPG_EEPROM_SUBSYSTEMID 0x03
160#define IPG_EEPROM_STATIONADDRESS0 0x10
161#define IPG_EEPROM_STATIONADDRESS1 0x11
162#define IPG_EEPROM_STATIONADDRESS2 0x12
163
164/* Register & data structure bit masks */
165
166/* PCI register masks. */
167
168/* IOBaseAddress */
169#define IPG_PIB_RSVD_MASK 0xFFFFFE01
170#define IPG_PIB_IOBASEADDRESS 0xFFFFFF00
171#define IPG_PIB_IOBASEADDRIND 0x00000001
172
173/* MemBaseAddress */
174#define IPG_PMB_RSVD_MASK 0xFFFFFE07
175#define IPG_PMB_MEMBASEADDRIND 0x00000001
176#define IPG_PMB_MEMMAPTYPE 0x00000006
177#define IPG_PMB_MEMMAPTYPE0 0x00000002
178#define IPG_PMB_MEMMAPTYPE1 0x00000004
179#define IPG_PMB_MEMBASEADDRESS 0xFFFFFE00
180
181/* ConfigStatus */
182#define IPG_CS_RSVD_MASK 0xFFB0
183#define IPG_CS_CAPABILITIES 0x0010
184#define IPG_CS_66MHZCAPABLE 0x0020
185#define IPG_CS_FASTBACK2BACK 0x0080
186#define IPG_CS_DATAPARITYREPORTED 0x0100
187#define IPG_CS_DEVSELTIMING 0x0600
188#define IPG_CS_SIGNALEDTARGETABORT 0x0800
189#define IPG_CS_RECEIVEDTARGETABORT 0x1000
190#define IPG_CS_RECEIVEDMASTERABORT 0x2000
191#define IPG_CS_SIGNALEDSYSTEMERROR 0x4000
192#define IPG_CS_DETECTEDPARITYERROR 0x8000
193
194/* TFD data structure masks. */
195
196/* TFDList, TFC */
197#define IPG_TFC_RSVD_MASK 0x0000FFFF9FFFFFFFULL
198#define IPG_TFC_FRAMEID 0x000000000000FFFFULL
199#define IPG_TFC_WORDALIGN 0x0000000000030000ULL
200#define IPG_TFC_WORDALIGNTODWORD 0x0000000000000000ULL
201#define IPG_TFC_WORDALIGNTOWORD 0x0000000000020000ULL
202#define IPG_TFC_WORDALIGNDISABLED 0x0000000000030000ULL
203#define IPG_TFC_TCPCHECKSUMENABLE 0x0000000000040000ULL
204#define IPG_TFC_UDPCHECKSUMENABLE 0x0000000000080000ULL
205#define IPG_TFC_IPCHECKSUMENABLE 0x0000000000100000ULL
206#define IPG_TFC_FCSAPPENDDISABLE 0x0000000000200000ULL
207#define IPG_TFC_TXINDICATE 0x0000000000400000ULL
208#define IPG_TFC_TXDMAINDICATE 0x0000000000800000ULL
209#define IPG_TFC_FRAGCOUNT 0x000000000F000000ULL
210#define IPG_TFC_VLANTAGINSERT 0x0000000010000000ULL
211#define IPG_TFC_TFDDONE 0x0000000080000000ULL
212#define IPG_TFC_VID 0x00000FFF00000000ULL
213#define IPG_TFC_CFI 0x0000100000000000ULL
214#define IPG_TFC_USERPRIORITY 0x0000E00000000000ULL
215
216/* TFDList, FragInfo */
217#define IPG_TFI_RSVD_MASK 0xFFFF00FFFFFFFFFFULL
218#define IPG_TFI_FRAGADDR 0x000000FFFFFFFFFFULL
219#define IPG_TFI_FRAGLEN 0xFFFF000000000000ULL
220
221/* RFD data structure masks. */
222
223/* RFDList, RFS */
224#define IPG_RFS_RSVD_MASK 0x0000FFFFFFFFFFFFULL
225#define IPG_RFS_RXFRAMELEN 0x000000000000FFFFULL
226#define IPG_RFS_RXFIFOOVERRUN 0x0000000000010000ULL
227#define IPG_RFS_RXRUNTFRAME 0x0000000000020000ULL
228#define IPG_RFS_RXALIGNMENTERROR 0x0000000000040000ULL
229#define IPG_RFS_RXFCSERROR 0x0000000000080000ULL
230#define IPG_RFS_RXOVERSIZEDFRAME 0x0000000000100000ULL
231#define IPG_RFS_RXLENGTHERROR 0x0000000000200000ULL
232#define IPG_RFS_VLANDETECTED 0x0000000000400000ULL
233#define IPG_RFS_TCPDETECTED 0x0000000000800000ULL
234#define IPG_RFS_TCPERROR 0x0000000001000000ULL
235#define IPG_RFS_UDPDETECTED 0x0000000002000000ULL
236#define IPG_RFS_UDPERROR 0x0000000004000000ULL
237#define IPG_RFS_IPDETECTED 0x0000000008000000ULL
238#define IPG_RFS_IPERROR 0x0000000010000000ULL
239#define IPG_RFS_FRAMESTART 0x0000000020000000ULL
240#define IPG_RFS_FRAMEEND 0x0000000040000000ULL
241#define IPG_RFS_RFDDONE 0x0000000080000000ULL
242#define IPG_RFS_TCI 0x0000FFFF00000000ULL
243
244/* RFDList, FragInfo */
245#define IPG_RFI_RSVD_MASK 0xFFFF00FFFFFFFFFFULL
246#define IPG_RFI_FRAGADDR 0x000000FFFFFFFFFFULL
247#define IPG_RFI_FRAGLEN 0xFFFF000000000000ULL
248
249/* I/O Register masks. */
250
251/* RMON Statistics Mask */
252#define IPG_RZ_ALL 0x0FFFFFFF
253
254/* Statistics Mask */
255#define IPG_SM_ALL 0x0FFFFFFF
256#define IPG_SM_OCTETRCVOK_FRAMESRCVDOK 0x00000001
257#define IPG_SM_MCSTOCTETRCVDOK_MCSTFRAMESRCVDOK 0x00000002
258#define IPG_SM_BCSTOCTETRCVDOK_BCSTFRAMESRCVDOK 0x00000004
259#define IPG_SM_RXJUMBOFRAMES 0x00000008
260#define IPG_SM_TCPCHECKSUMERRORS 0x00000010
261#define IPG_SM_IPCHECKSUMERRORS 0x00000020
262#define IPG_SM_UDPCHECKSUMERRORS 0x00000040
263#define IPG_SM_MACCONTROLFRAMESRCVD 0x00000080
264#define IPG_SM_FRAMESTOOLONGERRORS 0x00000100
265#define IPG_SM_INRANGELENGTHERRORS 0x00000200
266#define IPG_SM_FRAMECHECKSEQERRORS 0x00000400
267#define IPG_SM_FRAMESLOSTRXERRORS 0x00000800
268#define IPG_SM_OCTETXMTOK_FRAMESXMTOK 0x00001000
269#define IPG_SM_MCSTOCTETXMTOK_MCSTFRAMESXMTDOK 0x00002000
270#define IPG_SM_BCSTOCTETXMTOK_BCSTFRAMESXMTDOK 0x00004000
271#define IPG_SM_FRAMESWDEFERREDXMT 0x00008000
272#define IPG_SM_LATECOLLISIONS 0x00010000
273#define IPG_SM_MULTICOLFRAMES 0x00020000
274#define IPG_SM_SINGLECOLFRAMES 0x00040000
275#define IPG_SM_TXJUMBOFRAMES 0x00080000
276#define IPG_SM_CARRIERSENSEERRORS 0x00100000
277#define IPG_SM_MACCONTROLFRAMESXMTD 0x00200000
278#define IPG_SM_FRAMESABORTXSCOLLS 0x00400000
279#define IPG_SM_FRAMESWEXDEFERAL 0x00800000
280
281/* Countdown */
282#define IPG_CD_RSVD_MASK 0x0700FFFF
283#define IPG_CD_COUNT 0x0000FFFF
284#define IPG_CD_COUNTDOWNSPEED 0x01000000
285#define IPG_CD_COUNTDOWNMODE 0x02000000
286#define IPG_CD_COUNTINTENABLED 0x04000000
287
288/* TxDMABurstThresh */
289#define IPG_TB_RSVD_MASK 0xFF
290
291/* TxDMAUrgentThresh */
292#define IPG_TU_RSVD_MASK 0xFF
293
294/* TxDMAPollPeriod */
295#define IPG_TP_RSVD_MASK 0xFF
296
297/* RxDMAUrgentThresh */
298#define IPG_RU_RSVD_MASK 0xFF
299
300/* RxDMAPollPeriod */
301#define IPG_RP_RSVD_MASK 0xFF
302
303/* ReceiveMode */
304#define IPG_RM_RSVD_MASK 0x3F
305#define IPG_RM_RECEIVEUNICAST 0x01
306#define IPG_RM_RECEIVEMULTICAST 0x02
307#define IPG_RM_RECEIVEBROADCAST 0x04
308#define IPG_RM_RECEIVEALLFRAMES 0x08
309#define IPG_RM_RECEIVEMULTICASTHASH 0x10
310#define IPG_RM_RECEIVEIPMULTICAST 0x20
311
312/* PhySet */
313#define IPG_PS_MEM_LENB9B 0x01
314#define IPG_PS_MEM_LEN9 0x02
315#define IPG_PS_NON_COMPDET 0x04
316
317/* PhyCtrl */
318#define IPG_PC_RSVD_MASK 0xFF
319#define IPG_PC_MGMTCLK_LO 0x00
320#define IPG_PC_MGMTCLK_HI 0x01
321#define IPG_PC_MGMTCLK 0x01
322#define IPG_PC_MGMTDATA 0x02
323#define IPG_PC_MGMTDIR 0x04
324#define IPG_PC_DUPLEX_POLARITY 0x08
325#define IPG_PC_DUPLEX_STATUS 0x10
326#define IPG_PC_LINK_POLARITY 0x20
327#define IPG_PC_LINK_SPEED 0xC0
328#define IPG_PC_LINK_SPEED_10MBPS 0x40
329#define IPG_PC_LINK_SPEED_100MBPS 0x80
330#define IPG_PC_LINK_SPEED_1000MBPS 0xC0
331
332/* DMACtrl */
333#define IPG_DC_RSVD_MASK 0xC07D9818
334#define IPG_DC_RX_DMA_COMPLETE 0x00000008
335#define IPG_DC_RX_DMA_POLL_NOW 0x00000010
336#define IPG_DC_TX_DMA_COMPLETE 0x00000800
337#define IPG_DC_TX_DMA_POLL_NOW 0x00001000
338#define IPG_DC_TX_DMA_IN_PROG 0x00008000
339#define IPG_DC_RX_EARLY_DISABLE 0x00010000
340#define IPG_DC_MWI_DISABLE 0x00040000
341#define IPG_DC_TX_WRITE_BACK_DISABLE 0x00080000
342#define IPG_DC_TX_BURST_LIMIT 0x00700000
343#define IPG_DC_TARGET_ABORT 0x40000000
344#define IPG_DC_MASTER_ABORT 0x80000000
345
346/* ASICCtrl */
347#define IPG_AC_RSVD_MASK 0x07FFEFF2
348#define IPG_AC_EXP_ROM_SIZE 0x00000002
349#define IPG_AC_PHY_SPEED10 0x00000010
350#define IPG_AC_PHY_SPEED100 0x00000020
351#define IPG_AC_PHY_SPEED1000 0x00000040
352#define IPG_AC_PHY_MEDIA 0x00000080
353#define IPG_AC_FORCED_CFG 0x00000700
354#define IPG_AC_D3RESETDISABLE 0x00000800
355#define IPG_AC_SPEED_UP_MODE 0x00002000
356#define IPG_AC_LED_MODE 0x00004000
357#define IPG_AC_RST_OUT_POLARITY 0x00008000
358#define IPG_AC_GLOBAL_RESET 0x00010000
359#define IPG_AC_RX_RESET 0x00020000
360#define IPG_AC_TX_RESET 0x00040000
361#define IPG_AC_DMA 0x00080000
362#define IPG_AC_FIFO 0x00100000
363#define IPG_AC_NETWORK 0x00200000
364#define IPG_AC_HOST 0x00400000
365#define IPG_AC_AUTO_INIT 0x00800000
366#define IPG_AC_RST_OUT 0x01000000
367#define IPG_AC_INT_REQUEST 0x02000000
368#define IPG_AC_RESET_BUSY 0x04000000
369#define IPG_AC_LED_SPEED 0x08000000
370#define IPG_AC_LED_MODE_BIT_1 0x20000000
371
372/* EepromCtrl */
373#define IPG_EC_RSVD_MASK 0x83FF
374#define IPG_EC_EEPROM_ADDR 0x00FF
375#define IPG_EC_EEPROM_OPCODE 0x0300
376#define IPG_EC_EEPROM_SUBCOMMAD 0x0000
377#define IPG_EC_EEPROM_WRITEOPCODE 0x0100
378#define IPG_EC_EEPROM_READOPCODE 0x0200
379#define IPG_EC_EEPROM_ERASEOPCODE 0x0300
380#define IPG_EC_EEPROM_BUSY 0x8000
381
382/* FIFOCtrl */
383#define IPG_FC_RSVD_MASK 0xC001
384#define IPG_FC_RAM_TEST_MODE 0x0001
385#define IPG_FC_TRANSMITTING 0x4000
386#define IPG_FC_RECEIVING 0x8000
387
388/* TxStatus */
389#define IPG_TS_RSVD_MASK 0xFFFF00DD
390#define IPG_TS_TX_ERROR 0x00000001
391#define IPG_TS_LATE_COLLISION 0x00000004
392#define IPG_TS_TX_MAX_COLL 0x00000008
393#define IPG_TS_TX_UNDERRUN 0x00000010
394#define IPG_TS_TX_IND_REQD 0x00000040
395#define IPG_TS_TX_COMPLETE 0x00000080
396#define IPG_TS_TX_FRAMEID 0xFFFF0000
397
398/* WakeEvent */
399#define IPG_WE_WAKE_PKT_ENABLE 0x01
400#define IPG_WE_MAGIC_PKT_ENABLE 0x02
401#define IPG_WE_LINK_EVT_ENABLE 0x04
402#define IPG_WE_WAKE_POLARITY 0x08
403#define IPG_WE_WAKE_PKT_EVT 0x10
404#define IPG_WE_MAGIC_PKT_EVT 0x20
405#define IPG_WE_LINK_EVT 0x40
406#define IPG_WE_WOL_ENABLE 0x80
407
408/* IntEnable */
409#define IPG_IE_RSVD_MASK 0x1FFE
410#define IPG_IE_HOST_ERROR 0x0002
411#define IPG_IE_TX_COMPLETE 0x0004
412#define IPG_IE_MAC_CTRL_FRAME 0x0008
413#define IPG_IE_RX_COMPLETE 0x0010
414#define IPG_IE_RX_EARLY 0x0020
415#define IPG_IE_INT_REQUESTED 0x0040
416#define IPG_IE_UPDATE_STATS 0x0080
417#define IPG_IE_LINK_EVENT 0x0100
418#define IPG_IE_TX_DMA_COMPLETE 0x0200
419#define IPG_IE_RX_DMA_COMPLETE 0x0400
420#define IPG_IE_RFD_LIST_END 0x0800
421#define IPG_IE_RX_DMA_PRIORITY 0x1000
422
423/* IntStatus */
424#define IPG_IS_RSVD_MASK 0x1FFF
425#define IPG_IS_INTERRUPT_STATUS 0x0001
426#define IPG_IS_HOST_ERROR 0x0002
427#define IPG_IS_TX_COMPLETE 0x0004
428#define IPG_IS_MAC_CTRL_FRAME 0x0008
429#define IPG_IS_RX_COMPLETE 0x0010
430#define IPG_IS_RX_EARLY 0x0020
431#define IPG_IS_INT_REQUESTED 0x0040
432#define IPG_IS_UPDATE_STATS 0x0080
433#define IPG_IS_LINK_EVENT 0x0100
434#define IPG_IS_TX_DMA_COMPLETE 0x0200
435#define IPG_IS_RX_DMA_COMPLETE 0x0400
436#define IPG_IS_RFD_LIST_END 0x0800
437#define IPG_IS_RX_DMA_PRIORITY 0x1000
438
439/* MACCtrl */
440#define IPG_MC_RSVD_MASK 0x7FE33FA3
441#define IPG_MC_IFS_SELECT 0x00000003
442#define IPG_MC_IFS_4352BIT 0x00000003
443#define IPG_MC_IFS_1792BIT 0x00000002
444#define IPG_MC_IFS_1024BIT 0x00000001
445#define IPG_MC_IFS_96BIT 0x00000000
446#define IPG_MC_DUPLEX_SELECT 0x00000020
447#define IPG_MC_DUPLEX_SELECT_FD 0x00000020
448#define IPG_MC_DUPLEX_SELECT_HD 0x00000000
449#define IPG_MC_TX_FLOW_CONTROL_ENABLE 0x00000080
450#define IPG_MC_RX_FLOW_CONTROL_ENABLE 0x00000100
451#define IPG_MC_RCV_FCS 0x00000200
452#define IPG_MC_FIFO_LOOPBACK 0x00000400
453#define IPG_MC_MAC_LOOPBACK 0x00000800
454#define IPG_MC_AUTO_VLAN_TAGGING 0x00001000
455#define IPG_MC_AUTO_VLAN_UNTAGGING 0x00002000
456#define IPG_MC_COLLISION_DETECT 0x00010000
457#define IPG_MC_CARRIER_SENSE 0x00020000
458#define IPG_MC_STATISTICS_ENABLE 0x00200000
459#define IPG_MC_STATISTICS_DISABLE 0x00400000
460#define IPG_MC_STATISTICS_ENABLED 0x00800000
461#define IPG_MC_TX_ENABLE 0x01000000
462#define IPG_MC_TX_DISABLE 0x02000000
463#define IPG_MC_TX_ENABLED 0x04000000
464#define IPG_MC_RX_ENABLE 0x08000000
465#define IPG_MC_RX_DISABLE 0x10000000
466#define IPG_MC_RX_ENABLED 0x20000000
467#define IPG_MC_PAUSED 0x40000000
468
469/*
470 * Tune
471 */
472
473/* Assign IPG_APPEND_FCS_ON_TX > 0 for auto FCS append on TX. */
474#define IPG_APPEND_FCS_ON_TX 1
475
476/* Assign IPG_APPEND_FCS_ON_TX > 0 for auto FCS strip on RX. */
477#define IPG_STRIP_FCS_ON_RX 1
478
479/* Assign IPG_DROP_ON_RX_ETH_ERRORS > 0 to drop RX frames with
480 * Ethernet errors.
481 */
482#define IPG_DROP_ON_RX_ETH_ERRORS 1
483
484/* Assign IPG_INSERT_MANUAL_VLAN_TAG > 0 to insert VLAN tags manually
485 * (via TFC).
486 */
487#define IPG_INSERT_MANUAL_VLAN_TAG 0
488
489/* Assign IPG_ADD_IPCHECKSUM_ON_TX > 0 for auto IP checksum on TX. */
490#define IPG_ADD_IPCHECKSUM_ON_TX 0
491
492/* Assign IPG_ADD_TCPCHECKSUM_ON_TX > 0 for auto TCP checksum on TX.
493 * DO NOT USE FOR SILICON REVISIONS B3 AND EARLIER.
494 */
495#define IPG_ADD_TCPCHECKSUM_ON_TX 0
496
497/* Assign IPG_ADD_UDPCHECKSUM_ON_TX > 0 for auto UDP checksum on TX.
498 * DO NOT USE FOR SILICON REVISIONS B3 AND EARLIER.
499 */
500#define IPG_ADD_UDPCHECKSUM_ON_TX 0
501
502/* If inserting VLAN tags manually, assign the IPG_MANUAL_VLAN_xx
503 * constants as desired.
504 */
505#define IPG_MANUAL_VLAN_VID 0xABC
506#define IPG_MANUAL_VLAN_CFI 0x1
507#define IPG_MANUAL_VLAN_USERPRIORITY 0x5
508
509#define IPG_IO_REG_RANGE 0xFF
510#define IPG_MEM_REG_RANGE 0x154
511#define IPG_DRIVER_NAME "Sundance Technology IPG Triple-Speed Ethernet"
512#define IPG_NIC_PHY_ADDRESS 0x01
513#define IPG_DMALIST_ALIGN_PAD 0x07
514#define IPG_MULTICAST_HASHTABLE_SIZE 0x40
515
516/* Number of milliseconds to wait after issuing a software reset.
517 * 0x05 <= IPG_AC_RESETWAIT to account for proper 10Mbps operation.
518 */
519#define IPG_AC_RESETWAIT 0x05
520
521/* Number of IPG_AC_RESETWAIT timeperiods before declaring timeout. */
522#define IPG_AC_RESET_TIMEOUT 0x0A
523
524/* Minimum number of nanoseconds used to toggle MDC clock during
525 * MII/GMII register access.
526 */
527#define IPG_PC_PHYCTRLWAIT_NS 200
528
529#define IPG_TFDLIST_LENGTH 0x100
530
531/* Number of frames between TxDMAComplete interrupt.
532 * 0 < IPG_FRAMESBETWEENTXDMACOMPLETES <= IPG_TFDLIST_LENGTH
533 */
534#define IPG_FRAMESBETWEENTXDMACOMPLETES 0x1
535
536#define IPG_RFDLIST_LENGTH 0x100
537
538/* Maximum number of RFDs to process per interrupt.
539 * 1 < IPG_MAXRFDPROCESS_COUNT < IPG_RFDLIST_LENGTH
540 */
541#define IPG_MAXRFDPROCESS_COUNT 0x80
542
543/* Minimum margin between last freed RFD, and current RFD.
544 * 1 < IPG_MINUSEDRFDSTOFREE < IPG_RFDLIST_LENGTH
545 */
546#define IPG_MINUSEDRFDSTOFREE 0x80
547
548/* specify the jumbo frame maximum size
549 * per unit is 0x600 (the rx_buffer size that one RFD can carry)
550 */
551#define MAX_JUMBOSIZE 0x8 /* max is 12K */
552
553/* Key register values loaded at driver start up. */
554
555/* TXDMAPollPeriod is specified in 320ns increments.
556 *
557 * Value Time
558 * ---------------------
559 * 0x00-0x01 320ns
560 * 0x03 ~1us
561 * 0x1F ~10us
562 * 0xFF ~82us
563 */
564#define IPG_TXDMAPOLLPERIOD_VALUE 0x26
565
566/* TxDMAUrgentThresh specifies the minimum amount of
567 * data in the transmit FIFO before asserting an
568 * urgent transmit DMA request.
569 *
570 * Value Min TxFIFO occupied space before urgent TX request
571 * ---------------------------------------------------------------
572 * 0x00-0x04 128 bytes (1024 bits)
573 * 0x27 1248 bytes (~10000 bits)
574 * 0x30 1536 bytes (12288 bits)
575 * 0xFF 8192 bytes (65535 bits)
576 */
577#define IPG_TXDMAURGENTTHRESH_VALUE 0x04
578
579/* TxDMABurstThresh specifies the minimum amount of
580 * free space in the transmit FIFO before asserting an
581 * transmit DMA request.
582 *
583 * Value Min TxFIFO free space before TX request
584 * ----------------------------------------------------
585 * 0x00-0x08 256 bytes
586 * 0x30 1536 bytes
587 * 0xFF 8192 bytes
588 */
589#define IPG_TXDMABURSTTHRESH_VALUE 0x30
590
591/* RXDMAPollPeriod is specified in 320ns increments.
592 *
593 * Value Time
594 * ---------------------
595 * 0x00-0x01 320ns
596 * 0x03 ~1us
597 * 0x1F ~10us
598 * 0xFF ~82us
599 */
600#define IPG_RXDMAPOLLPERIOD_VALUE 0x01
601
602/* RxDMAUrgentThresh specifies the minimum amount of
603 * free space within the receive FIFO before asserting
604 * a urgent receive DMA request.
605 *
606 * Value Min RxFIFO free space before urgent RX request
607 * ---------------------------------------------------------------
608 * 0x00-0x04 128 bytes (1024 bits)
609 * 0x27 1248 bytes (~10000 bits)
610 * 0x30 1536 bytes (12288 bits)
611 * 0xFF 8192 bytes (65535 bits)
612 */
613#define IPG_RXDMAURGENTTHRESH_VALUE 0x30
614
615/* RxDMABurstThresh specifies the minimum amount of
616 * occupied space within the receive FIFO before asserting
617 * a receive DMA request.
618 *
619 * Value Min TxFIFO free space before TX request
620 * ----------------------------------------------------
621 * 0x00-0x08 256 bytes
622 * 0x30 1536 bytes
623 * 0xFF 8192 bytes
624 */
625#define IPG_RXDMABURSTTHRESH_VALUE 0x30
626
627/* FlowOnThresh specifies the maximum amount of occupied
628 * space in the receive FIFO before a PAUSE frame with
629 * maximum pause time transmitted.
630 *
631 * Value Max RxFIFO occupied space before PAUSE
632 * ---------------------------------------------------
633 * 0x0000 0 bytes
634 * 0x0740 29,696 bytes
635 * 0x07FF 32,752 bytes
636 */
637#define IPG_FLOWONTHRESH_VALUE 0x0740
638
639/* FlowOffThresh specifies the minimum amount of occupied
640 * space in the receive FIFO before a PAUSE frame with
641 * zero pause time is transmitted.
642 *
643 * Value Max RxFIFO occupied space before PAUSE
644 * ---------------------------------------------------
645 * 0x0000 0 bytes
646 * 0x00BF 3056 bytes
647 * 0x07FF 32,752 bytes
648 */
649#define IPG_FLOWOFFTHRESH_VALUE 0x00BF
650
651/*
652 * Miscellaneous macros.
653 */
654
655/* Macros for printing debug statements. */
656#ifdef IPG_DEBUG
657# define IPG_DEBUG_MSG(fmt, args...) \
658do { \
659 if (0) \
660 printk(KERN_DEBUG "IPG: " fmt, ##args); \
661} while (0)
662# define IPG_DDEBUG_MSG(fmt, args...) \
663 printk(KERN_DEBUG "IPG: " fmt, ##args)
664# define IPG_DUMPRFDLIST(args) ipg_dump_rfdlist(args)
665# define IPG_DUMPTFDLIST(args) ipg_dump_tfdlist(args)
666#else
667# define IPG_DEBUG_MSG(fmt, args...) \
668do { \
669 if (0) \
670 printk(KERN_DEBUG "IPG: " fmt, ##args); \
671} while (0)
672# define IPG_DDEBUG_MSG(fmt, args...) \
673do { \
674 if (0) \
675 printk(KERN_DEBUG "IPG: " fmt, ##args); \
676} while (0)
677# define IPG_DUMPRFDLIST(args)
678# define IPG_DUMPTFDLIST(args)
679#endif
680
681/*
682 * End miscellaneous macros.
683 */
684
685/* Transmit Frame Descriptor. The IPG supports 15 fragments,
686 * however Linux requires only a single fragment. Note, each
687 * TFD field is 64 bits wide.
688 */
689struct ipg_tx {
690 __le64 next_desc;
691 __le64 tfc;
692 __le64 frag_info;
693};
694
695/* Receive Frame Descriptor. Note, each RFD field is 64 bits wide.
696 */
697struct ipg_rx {
698 __le64 next_desc;
699 __le64 rfs;
700 __le64 frag_info;
701};
702
703struct ipg_jumbo {
704 int found_start;
705 int current_size;
706 struct sk_buff *skb;
707};
708
709/* Structure of IPG NIC specific data. */
710struct ipg_nic_private {
711 void __iomem *ioaddr;
712 struct ipg_tx *txd;
713 struct ipg_rx *rxd;
714 dma_addr_t txd_map;
715 dma_addr_t rxd_map;
716 struct sk_buff *tx_buff[IPG_TFDLIST_LENGTH];
717 struct sk_buff *rx_buff[IPG_RFDLIST_LENGTH];
718 unsigned int tx_current;
719 unsigned int tx_dirty;
720 unsigned int rx_current;
721 unsigned int rx_dirty;
722 bool is_jumbo;
723 struct ipg_jumbo jumbo;
724 unsigned long rxfrag_size;
725 unsigned long rxsupport_size;
726 unsigned long max_rxframe_size;
727 unsigned int rx_buf_sz;
728 struct pci_dev *pdev;
729 struct net_device *dev;
730 struct net_device_stats stats;
731 spinlock_t lock;
732 int tenmbpsmode;
733
734 u16 led_mode;
735 u16 station_addr[3]; /* Station Address in EEPROM Reg 0x10..0x12 */
736
737 struct mutex mii_mutex;
738 struct mii_if_info mii_if;
739 int reset_current_tfd;
740#ifdef IPG_DEBUG
741 int RFDlistendCount;
742 int RFDListCheckedCount;
743 int EmptyRFDListCount;
744#endif
745 struct delayed_work task;
746};
747
748#endif /* __LINUX_IPG_H */
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 85f1b1e7e505..31c491e02e69 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -892,9 +892,10 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
892 dev->caps.qp1_proxy[i - 1] = func_cap.qp1_proxy_qpn; 892 dev->caps.qp1_proxy[i - 1] = func_cap.qp1_proxy_qpn;
893 dev->caps.port_mask[i] = dev->caps.port_type[i]; 893 dev->caps.port_mask[i] = dev->caps.port_type[i];
894 dev->caps.phys_port_id[i] = func_cap.phys_port_id; 894 dev->caps.phys_port_id[i] = func_cap.phys_port_id;
895 if (mlx4_get_slave_pkey_gid_tbl_len(dev, i, 895 err = mlx4_get_slave_pkey_gid_tbl_len(dev, i,
896 &dev->caps.gid_table_len[i], 896 &dev->caps.gid_table_len[i],
897 &dev->caps.pkey_table_len[i])) 897 &dev->caps.pkey_table_len[i]);
898 if (err)
898 goto err_mem; 899 goto err_mem;
899 } 900 }
900 901
@@ -906,6 +907,7 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
906 dev->caps.uar_page_size * dev->caps.num_uars, 907 dev->caps.uar_page_size * dev->caps.num_uars,
907 (unsigned long long) 908 (unsigned long long)
908 pci_resource_len(dev->persist->pdev, 2)); 909 pci_resource_len(dev->persist->pdev, 2));
910 err = -ENOMEM;
909 goto err_mem; 911 goto err_mem;
910 } 912 }
911 913
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 9813d34f3e5b..6fec3e993d02 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -4952,26 +4952,41 @@ static void rem_slave_counters(struct mlx4_dev *dev, int slave)
4952 struct res_counter *counter; 4952 struct res_counter *counter;
4953 struct res_counter *tmp; 4953 struct res_counter *tmp;
4954 int err; 4954 int err;
4955 int index; 4955 int *counters_arr = NULL;
4956 int i, j;
4956 4957
4957 err = move_all_busy(dev, slave, RES_COUNTER); 4958 err = move_all_busy(dev, slave, RES_COUNTER);
4958 if (err) 4959 if (err)
4959 mlx4_warn(dev, "rem_slave_counters: Could not move all counters - too busy for slave %d\n", 4960 mlx4_warn(dev, "rem_slave_counters: Could not move all counters - too busy for slave %d\n",
4960 slave); 4961 slave);
4961 4962
4962 spin_lock_irq(mlx4_tlock(dev)); 4963 counters_arr = kmalloc_array(dev->caps.max_counters,
4963 list_for_each_entry_safe(counter, tmp, counter_list, com.list) { 4964 sizeof(*counters_arr), GFP_KERNEL);
4964 if (counter->com.owner == slave) { 4965 if (!counters_arr)
4965 index = counter->com.res_id; 4966 return;
4966 rb_erase(&counter->com.node, 4967
4967 &tracker->res_tree[RES_COUNTER]); 4968 do {
4968 list_del(&counter->com.list); 4969 i = 0;
4969 kfree(counter); 4970 j = 0;
4970 __mlx4_counter_free(dev, index); 4971 spin_lock_irq(mlx4_tlock(dev));
4972 list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
4973 if (counter->com.owner == slave) {
4974 counters_arr[i++] = counter->com.res_id;
4975 rb_erase(&counter->com.node,
4976 &tracker->res_tree[RES_COUNTER]);
4977 list_del(&counter->com.list);
4978 kfree(counter);
4979 }
4980 }
4981 spin_unlock_irq(mlx4_tlock(dev));
4982
4983 while (j < i) {
4984 __mlx4_counter_free(dev, counters_arr[j++]);
4971 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0); 4985 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
4972 } 4986 }
4973 } 4987 } while (i);
4974 spin_unlock_irq(mlx4_tlock(dev)); 4988
4989 kfree(counters_arr);
4975} 4990}
4976 4991
4977static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave) 4992static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index f2ae62dd8c09..22e72bf1ae48 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -334,9 +334,15 @@ struct mlx5e_tx_skb_cb {
334 334
335#define MLX5E_TX_SKB_CB(__skb) ((struct mlx5e_tx_skb_cb *)__skb->cb) 335#define MLX5E_TX_SKB_CB(__skb) ((struct mlx5e_tx_skb_cb *)__skb->cb)
336 336
337enum mlx5e_dma_map_type {
338 MLX5E_DMA_MAP_SINGLE,
339 MLX5E_DMA_MAP_PAGE
340};
341
337struct mlx5e_sq_dma { 342struct mlx5e_sq_dma {
338 dma_addr_t addr; 343 dma_addr_t addr;
339 u32 size; 344 u32 size;
345 enum mlx5e_dma_map_type type;
340}; 346};
341 347
342enum { 348enum {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 5fc4d2d78cdf..1e52db32c73d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -1332,6 +1332,42 @@ static int mlx5e_modify_tir_lro(struct mlx5e_priv *priv, int tt)
1332 return err; 1332 return err;
1333} 1333}
1334 1334
1335static int mlx5e_refresh_tir_self_loopback_enable(struct mlx5_core_dev *mdev,
1336 u32 tirn)
1337{
1338 void *in;
1339 int inlen;
1340 int err;
1341
1342 inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
1343 in = mlx5_vzalloc(inlen);
1344 if (!in)
1345 return -ENOMEM;
1346
1347 MLX5_SET(modify_tir_in, in, bitmask.self_lb_en, 1);
1348
1349 err = mlx5_core_modify_tir(mdev, tirn, in, inlen);
1350
1351 kvfree(in);
1352
1353 return err;
1354}
1355
1356static int mlx5e_refresh_tirs_self_loopback_enable(struct mlx5e_priv *priv)
1357{
1358 int err;
1359 int i;
1360
1361 for (i = 0; i < MLX5E_NUM_TT; i++) {
1362 err = mlx5e_refresh_tir_self_loopback_enable(priv->mdev,
1363 priv->tirn[i]);
1364 if (err)
1365 return err;
1366 }
1367
1368 return 0;
1369}
1370
1335static int mlx5e_set_dev_port_mtu(struct net_device *netdev) 1371static int mlx5e_set_dev_port_mtu(struct net_device *netdev)
1336{ 1372{
1337 struct mlx5e_priv *priv = netdev_priv(netdev); 1373 struct mlx5e_priv *priv = netdev_priv(netdev);
@@ -1376,6 +1412,13 @@ int mlx5e_open_locked(struct net_device *netdev)
1376 goto err_clear_state_opened_flag; 1412 goto err_clear_state_opened_flag;
1377 } 1413 }
1378 1414
1415 err = mlx5e_refresh_tirs_self_loopback_enable(priv);
1416 if (err) {
1417 netdev_err(netdev, "%s: mlx5e_refresh_tirs_self_loopback_enable failed, %d\n",
1418 __func__, err);
1419 goto err_close_channels;
1420 }
1421
1379 mlx5e_update_carrier(priv); 1422 mlx5e_update_carrier(priv);
1380 mlx5e_redirect_rqts(priv); 1423 mlx5e_redirect_rqts(priv);
1381 1424
@@ -1383,6 +1426,8 @@ int mlx5e_open_locked(struct net_device *netdev)
1383 1426
1384 return 0; 1427 return 0;
1385 1428
1429err_close_channels:
1430 mlx5e_close_channels(priv);
1386err_clear_state_opened_flag: 1431err_clear_state_opened_flag:
1387 clear_bit(MLX5E_STATE_OPENED, &priv->state); 1432 clear_bit(MLX5E_STATE_OPENED, &priv->state);
1388 return err; 1433 return err;
@@ -1856,6 +1901,8 @@ static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
1856 1901
1857 mlx5_query_port_max_mtu(mdev, &max_mtu, 1); 1902 mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
1858 1903
1904 max_mtu = MLX5E_HW2SW_MTU(max_mtu);
1905
1859 if (new_mtu > max_mtu) { 1906 if (new_mtu > max_mtu) {
1860 netdev_err(netdev, 1907 netdev_err(netdev,
1861 "%s: Bad MTU (%d) > (%d) Max\n", 1908 "%s: Bad MTU (%d) > (%d) Max\n",
@@ -1909,6 +1956,9 @@ static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
1909 "Not creating net device, some required device capabilities are missing\n"); 1956 "Not creating net device, some required device capabilities are missing\n");
1910 return -ENOTSUPP; 1957 return -ENOTSUPP;
1911 } 1958 }
1959 if (!MLX5_CAP_ETH(mdev, self_lb_en_modifiable))
1960 mlx5_core_warn(mdev, "Self loop back prevention is not supported\n");
1961
1912 return 0; 1962 return 0;
1913} 1963}
1914 1964
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index cd8f85a251d7..1341b1d3c421 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -61,39 +61,47 @@ void mlx5e_send_nop(struct mlx5e_sq *sq, bool notify_hw)
61 } 61 }
62} 62}
63 63
64static void mlx5e_dma_pop_last_pushed(struct mlx5e_sq *sq, dma_addr_t *addr, 64static inline void mlx5e_tx_dma_unmap(struct device *pdev,
65 u32 *size) 65 struct mlx5e_sq_dma *dma)
66{ 66{
67 sq->dma_fifo_pc--; 67 switch (dma->type) {
68 *addr = sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].addr; 68 case MLX5E_DMA_MAP_SINGLE:
69 *size = sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].size; 69 dma_unmap_single(pdev, dma->addr, dma->size, DMA_TO_DEVICE);
70} 70 break;
71 71 case MLX5E_DMA_MAP_PAGE:
72static void mlx5e_dma_unmap_wqe_err(struct mlx5e_sq *sq, struct sk_buff *skb) 72 dma_unmap_page(pdev, dma->addr, dma->size, DMA_TO_DEVICE);
73{ 73 break;
74 dma_addr_t addr; 74 default:
75 u32 size; 75 WARN_ONCE(true, "mlx5e_tx_dma_unmap unknown DMA type!\n");
76 int i;
77
78 for (i = 0; i < MLX5E_TX_SKB_CB(skb)->num_dma; i++) {
79 mlx5e_dma_pop_last_pushed(sq, &addr, &size);
80 dma_unmap_single(sq->pdev, addr, size, DMA_TO_DEVICE);
81 } 76 }
82} 77}
83 78
84static inline void mlx5e_dma_push(struct mlx5e_sq *sq, dma_addr_t addr, 79static inline void mlx5e_dma_push(struct mlx5e_sq *sq,
85 u32 size) 80 dma_addr_t addr,
81 u32 size,
82 enum mlx5e_dma_map_type map_type)
86{ 83{
87 sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].addr = addr; 84 sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].addr = addr;
88 sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].size = size; 85 sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].size = size;
86 sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].type = map_type;
89 sq->dma_fifo_pc++; 87 sq->dma_fifo_pc++;
90} 88}
91 89
92static inline void mlx5e_dma_get(struct mlx5e_sq *sq, u32 i, dma_addr_t *addr, 90static inline struct mlx5e_sq_dma *mlx5e_dma_get(struct mlx5e_sq *sq, u32 i)
93 u32 *size)
94{ 91{
95 *addr = sq->dma_fifo[i & sq->dma_fifo_mask].addr; 92 return &sq->dma_fifo[i & sq->dma_fifo_mask];
96 *size = sq->dma_fifo[i & sq->dma_fifo_mask].size; 93}
94
95static void mlx5e_dma_unmap_wqe_err(struct mlx5e_sq *sq, struct sk_buff *skb)
96{
97 int i;
98
99 for (i = 0; i < MLX5E_TX_SKB_CB(skb)->num_dma; i++) {
100 struct mlx5e_sq_dma *last_pushed_dma =
101 mlx5e_dma_get(sq, --sq->dma_fifo_pc);
102
103 mlx5e_tx_dma_unmap(sq->pdev, last_pushed_dma);
104 }
97} 105}
98 106
99u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb, 107u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
@@ -118,8 +126,15 @@ static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq,
118 */ 126 */
119#define MLX5E_MIN_INLINE ETH_HLEN 127#define MLX5E_MIN_INLINE ETH_HLEN
120 128
121 if (bf && (skb_headlen(skb) <= sq->max_inline)) 129 if (bf) {
122 return skb_headlen(skb); 130 u16 ihs = skb_headlen(skb);
131
132 if (skb_vlan_tag_present(skb))
133 ihs += VLAN_HLEN;
134
135 if (ihs <= sq->max_inline)
136 return skb_headlen(skb);
137 }
123 138
124 return MLX5E_MIN_INLINE; 139 return MLX5E_MIN_INLINE;
125} 140}
@@ -218,7 +233,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
218 dseg->lkey = sq->mkey_be; 233 dseg->lkey = sq->mkey_be;
219 dseg->byte_count = cpu_to_be32(headlen); 234 dseg->byte_count = cpu_to_be32(headlen);
220 235
221 mlx5e_dma_push(sq, dma_addr, headlen); 236 mlx5e_dma_push(sq, dma_addr, headlen, MLX5E_DMA_MAP_SINGLE);
222 MLX5E_TX_SKB_CB(skb)->num_dma++; 237 MLX5E_TX_SKB_CB(skb)->num_dma++;
223 238
224 dseg++; 239 dseg++;
@@ -237,7 +252,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
237 dseg->lkey = sq->mkey_be; 252 dseg->lkey = sq->mkey_be;
238 dseg->byte_count = cpu_to_be32(fsz); 253 dseg->byte_count = cpu_to_be32(fsz);
239 254
240 mlx5e_dma_push(sq, dma_addr, fsz); 255 mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE);
241 MLX5E_TX_SKB_CB(skb)->num_dma++; 256 MLX5E_TX_SKB_CB(skb)->num_dma++;
242 257
243 dseg++; 258 dseg++;
@@ -353,13 +368,10 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq)
353 } 368 }
354 369
355 for (j = 0; j < MLX5E_TX_SKB_CB(skb)->num_dma; j++) { 370 for (j = 0; j < MLX5E_TX_SKB_CB(skb)->num_dma; j++) {
356 dma_addr_t addr; 371 struct mlx5e_sq_dma *dma =
357 u32 size; 372 mlx5e_dma_get(sq, dma_fifo_cc++);
358 373
359 mlx5e_dma_get(sq, dma_fifo_cc, &addr, &size); 374 mlx5e_tx_dma_unmap(sq->pdev, dma);
360 dma_fifo_cc++;
361 dma_unmap_single(sq->pdev, addr, size,
362 DMA_TO_DEVICE);
363 } 375 }
364 376
365 npkts++; 377 npkts++;
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index b4f21232019a..79ef799f88ab 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -7429,15 +7429,15 @@ process_pkt:
7429 7429
7430 rtl8169_rx_vlan_tag(desc, skb); 7430 rtl8169_rx_vlan_tag(desc, skb);
7431 7431
7432 if (skb->pkt_type == PACKET_MULTICAST)
7433 dev->stats.multicast++;
7434
7432 napi_gro_receive(&tp->napi, skb); 7435 napi_gro_receive(&tp->napi, skb);
7433 7436
7434 u64_stats_update_begin(&tp->rx_stats.syncp); 7437 u64_stats_update_begin(&tp->rx_stats.syncp);
7435 tp->rx_stats.packets++; 7438 tp->rx_stats.packets++;
7436 tp->rx_stats.bytes += pkt_size; 7439 tp->rx_stats.bytes += pkt_size;
7437 u64_stats_update_end(&tp->rx_stats.syncp); 7440 u64_stats_update_end(&tp->rx_stats.syncp);
7438
7439 if (skb->pkt_type == PACKET_MULTICAST)
7440 dev->stats.multicast++;
7441 } 7441 }
7442release_descriptor: 7442release_descriptor:
7443 desc->opts2 = 0; 7443 desc->opts2 = 0;
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index aa7b2083cb53..ee8d1ec61fab 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -408,8 +408,6 @@ static int ravb_dmac_init(struct net_device *ndev)
408 /* Interrupt enable: */ 408 /* Interrupt enable: */
409 /* Frame receive */ 409 /* Frame receive */
410 ravb_write(ndev, RIC0_FRE0 | RIC0_FRE1, RIC0); 410 ravb_write(ndev, RIC0_FRE0 | RIC0_FRE1, RIC0);
411 /* Receive FIFO full warning */
412 ravb_write(ndev, RIC1_RFWE, RIC1);
413 /* Receive FIFO full error, descriptor empty */ 411 /* Receive FIFO full error, descriptor empty */
414 ravb_write(ndev, RIC2_QFE0 | RIC2_QFE1 | RIC2_RFFE, RIC2); 412 ravb_write(ndev, RIC2_QFE0 | RIC2_QFE1 | RIC2_RFFE, RIC2);
415 /* Frame transmitted, timestamp FIFO updated */ 413 /* Frame transmitted, timestamp FIFO updated */
@@ -733,8 +731,10 @@ static irqreturn_t ravb_interrupt(int irq, void *dev_id)
733 ((tis & tic) & BIT(q))) { 731 ((tis & tic) & BIT(q))) {
734 if (napi_schedule_prep(&priv->napi[q])) { 732 if (napi_schedule_prep(&priv->napi[q])) {
735 /* Mask RX and TX interrupts */ 733 /* Mask RX and TX interrupts */
736 ravb_write(ndev, ric0 & ~BIT(q), RIC0); 734 ric0 &= ~BIT(q);
737 ravb_write(ndev, tic & ~BIT(q), TIC); 735 tic &= ~BIT(q);
736 ravb_write(ndev, ric0, RIC0);
737 ravb_write(ndev, tic, TIC);
738 __napi_schedule(&priv->napi[q]); 738 __napi_schedule(&priv->napi[q]);
739 } else { 739 } else {
740 netdev_warn(ndev, 740 netdev_warn(ndev,
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index d288f1c928de..a3c42a376741 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -3422,7 +3422,7 @@ out:
3422 * with our request for slot reset the mmio_enabled callback will never be 3422 * with our request for slot reset the mmio_enabled callback will never be
3423 * called, and the link_reset callback is not used by AER or EEH mechanisms. 3423 * called, and the link_reset callback is not used by AER or EEH mechanisms.
3424 */ 3424 */
3425static struct pci_error_handlers efx_err_handlers = { 3425static const struct pci_error_handlers efx_err_handlers = {
3426 .error_detected = efx_io_error_detected, 3426 .error_detected = efx_io_error_detected,
3427 .slot_reset = efx_io_slot_reset, 3427 .slot_reset = efx_io_slot_reset,
3428 .resume = efx_io_resume, 3428 .resume = efx_io_resume,
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index c860c9007e49..219a99b7a631 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -809,22 +809,17 @@ static int smsc911x_phy_check_loopbackpkt(struct smsc911x_data *pdata)
809 809
810static int smsc911x_phy_reset(struct smsc911x_data *pdata) 810static int smsc911x_phy_reset(struct smsc911x_data *pdata)
811{ 811{
812 struct phy_device *phy_dev = pdata->phy_dev;
813 unsigned int temp; 812 unsigned int temp;
814 unsigned int i = 100000; 813 unsigned int i = 100000;
815 814
816 BUG_ON(!phy_dev); 815 temp = smsc911x_reg_read(pdata, PMT_CTRL);
817 BUG_ON(!phy_dev->bus); 816 smsc911x_reg_write(pdata, PMT_CTRL, temp | PMT_CTRL_PHY_RST_);
818
819 SMSC_TRACE(pdata, hw, "Performing PHY BCR Reset");
820 smsc911x_mii_write(phy_dev->bus, phy_dev->addr, MII_BMCR, BMCR_RESET);
821 do { 817 do {
822 msleep(1); 818 msleep(1);
823 temp = smsc911x_mii_read(phy_dev->bus, phy_dev->addr, 819 temp = smsc911x_reg_read(pdata, PMT_CTRL);
824 MII_BMCR); 820 } while ((i--) && (temp & PMT_CTRL_PHY_RST_));
825 } while ((i--) && (temp & BMCR_RESET));
826 821
827 if (temp & BMCR_RESET) { 822 if (unlikely(temp & PMT_CTRL_PHY_RST_)) {
828 SMSC_WARN(pdata, hw, "PHY reset failed to complete"); 823 SMSC_WARN(pdata, hw, "PHY reset failed to complete");
829 return -EIO; 824 return -EIO;
830 } 825 }
@@ -2296,7 +2291,7 @@ static int smsc911x_init(struct net_device *dev)
2296 } 2291 }
2297 2292
2298 /* Reset the LAN911x */ 2293 /* Reset the LAN911x */
2299 if (smsc911x_soft_reset(pdata)) 2294 if (smsc911x_phy_reset(pdata) || smsc911x_soft_reset(pdata))
2300 return -ENODEV; 2295 return -ENODEV;
2301 2296
2302 dev->flags |= IFF_MULTICAST; 2297 dev->flags |= IFF_MULTICAST;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
index 9d89bdbf029f..82de68b1a452 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
@@ -337,11 +337,11 @@ static int ipq806x_gmac_probe(struct platform_device *pdev)
337 QSGMII_PHY_RX_SIGNAL_DETECT_EN | 337 QSGMII_PHY_RX_SIGNAL_DETECT_EN |
338 QSGMII_PHY_TX_DRIVER_EN | 338 QSGMII_PHY_TX_DRIVER_EN |
339 QSGMII_PHY_QSGMII_EN | 339 QSGMII_PHY_QSGMII_EN |
340 0x4 << QSGMII_PHY_PHASE_LOOP_GAIN_OFFSET | 340 0x4ul << QSGMII_PHY_PHASE_LOOP_GAIN_OFFSET |
341 0x3 << QSGMII_PHY_RX_DC_BIAS_OFFSET | 341 0x3ul << QSGMII_PHY_RX_DC_BIAS_OFFSET |
342 0x1 << QSGMII_PHY_RX_INPUT_EQU_OFFSET | 342 0x1ul << QSGMII_PHY_RX_INPUT_EQU_OFFSET |
343 0x2 << QSGMII_PHY_CDR_PI_SLEW_OFFSET | 343 0x2ul << QSGMII_PHY_CDR_PI_SLEW_OFFSET |
344 0xC << QSGMII_PHY_TX_DRV_AMP_OFFSET); 344 0xCul << QSGMII_PHY_TX_DRV_AMP_OFFSET);
345 } 345 }
346 346
347 plat_dat->has_gmac = true; 347 plat_dat->has_gmac = true;
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
index ae68afd50a15..f38696ceee74 100644
--- a/drivers/net/ethernet/via/via-velocity.c
+++ b/drivers/net/ethernet/via/via-velocity.c
@@ -345,13 +345,6 @@ VELOCITY_PARAM(flow_control, "Enable flow control ability");
345*/ 345*/
346VELOCITY_PARAM(speed_duplex, "Setting the speed and duplex mode"); 346VELOCITY_PARAM(speed_duplex, "Setting the speed and duplex mode");
347 347
348#define VAL_PKT_LEN_DEF 0
349/* ValPktLen[] is used for setting the checksum offload ability of NIC.
350 0: Receive frame with invalid layer 2 length (Default)
351 1: Drop frame with invalid layer 2 length
352*/
353VELOCITY_PARAM(ValPktLen, "Receiving or Drop invalid 802.3 frame");
354
355#define WOL_OPT_DEF 0 348#define WOL_OPT_DEF 0
356#define WOL_OPT_MIN 0 349#define WOL_OPT_MIN 0
357#define WOL_OPT_MAX 7 350#define WOL_OPT_MAX 7
@@ -494,7 +487,6 @@ static void velocity_get_options(struct velocity_opt *opts, int index,
494 487
495 velocity_set_int_opt(&opts->flow_cntl, flow_control[index], FLOW_CNTL_MIN, FLOW_CNTL_MAX, FLOW_CNTL_DEF, "flow_control", devname); 488 velocity_set_int_opt(&opts->flow_cntl, flow_control[index], FLOW_CNTL_MIN, FLOW_CNTL_MAX, FLOW_CNTL_DEF, "flow_control", devname);
496 velocity_set_bool_opt(&opts->flags, IP_byte_align[index], IP_ALIG_DEF, VELOCITY_FLAGS_IP_ALIGN, "IP_byte_align", devname); 489 velocity_set_bool_opt(&opts->flags, IP_byte_align[index], IP_ALIG_DEF, VELOCITY_FLAGS_IP_ALIGN, "IP_byte_align", devname);
497 velocity_set_bool_opt(&opts->flags, ValPktLen[index], VAL_PKT_LEN_DEF, VELOCITY_FLAGS_VAL_PKT_LEN, "ValPktLen", devname);
498 velocity_set_int_opt((int *) &opts->spd_dpx, speed_duplex[index], MED_LNK_MIN, MED_LNK_MAX, MED_LNK_DEF, "Media link mode", devname); 490 velocity_set_int_opt((int *) &opts->spd_dpx, speed_duplex[index], MED_LNK_MIN, MED_LNK_MAX, MED_LNK_DEF, "Media link mode", devname);
499 velocity_set_int_opt(&opts->wol_opts, wol_opts[index], WOL_OPT_MIN, WOL_OPT_MAX, WOL_OPT_DEF, "Wake On Lan options", devname); 491 velocity_set_int_opt(&opts->wol_opts, wol_opts[index], WOL_OPT_MIN, WOL_OPT_MAX, WOL_OPT_DEF, "Wake On Lan options", devname);
500 opts->numrx = (opts->numrx & ~3); 492 opts->numrx = (opts->numrx & ~3);
@@ -2055,8 +2047,9 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx)
2055 int pkt_len = le16_to_cpu(rd->rdesc0.len) & 0x3fff; 2047 int pkt_len = le16_to_cpu(rd->rdesc0.len) & 0x3fff;
2056 struct sk_buff *skb; 2048 struct sk_buff *skb;
2057 2049
2058 if (rd->rdesc0.RSR & (RSR_STP | RSR_EDP)) { 2050 if (unlikely(rd->rdesc0.RSR & (RSR_STP | RSR_EDP | RSR_RL))) {
2059 VELOCITY_PRT(MSG_LEVEL_VERBOSE, KERN_ERR " %s : the received frame spans multiple RDs.\n", vptr->netdev->name); 2051 if (rd->rdesc0.RSR & (RSR_STP | RSR_EDP))
2052 VELOCITY_PRT(MSG_LEVEL_VERBOSE, KERN_ERR " %s : the received frame spans multiple RDs.\n", vptr->netdev->name);
2060 stats->rx_length_errors++; 2053 stats->rx_length_errors++;
2061 return -EINVAL; 2054 return -EINVAL;
2062 } 2055 }
@@ -2069,17 +2062,6 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx)
2069 dma_sync_single_for_cpu(vptr->dev, rd_info->skb_dma, 2062 dma_sync_single_for_cpu(vptr->dev, rd_info->skb_dma,
2070 vptr->rx.buf_sz, DMA_FROM_DEVICE); 2063 vptr->rx.buf_sz, DMA_FROM_DEVICE);
2071 2064
2072 /*
2073 * Drop frame not meeting IEEE 802.3
2074 */
2075
2076 if (vptr->flags & VELOCITY_FLAGS_VAL_PKT_LEN) {
2077 if (rd->rdesc0.RSR & RSR_RL) {
2078 stats->rx_length_errors++;
2079 return -EINVAL;
2080 }
2081 }
2082
2083 velocity_rx_csum(rd, skb); 2065 velocity_rx_csum(rd, skb);
2084 2066
2085 if (velocity_rx_copy(&skb, pkt_len, vptr) < 0) { 2067 if (velocity_rx_copy(&skb, pkt_len, vptr) < 0) {
diff --git a/drivers/net/fjes/fjes_hw.c b/drivers/net/fjes/fjes_hw.c
index bb8b5304d851..b103adb8d62e 100644
--- a/drivers/net/fjes/fjes_hw.c
+++ b/drivers/net/fjes/fjes_hw.c
@@ -599,7 +599,7 @@ int fjes_hw_unregister_buff_addr(struct fjes_hw *hw, int dest_epid)
599 FJES_CMD_REQ_RES_CODE_BUSY) && 599 FJES_CMD_REQ_RES_CODE_BUSY) &&
600 (timeout > 0)) { 600 (timeout > 0)) {
601 msleep(200 + hw->my_epid * 20); 601 msleep(200 + hw->my_epid * 20);
602 timeout -= (200 + hw->my_epid * 20); 602 timeout -= (200 + hw->my_epid * 20);
603 603
604 res_buf->unshare_buffer.length = 0; 604 res_buf->unshare_buffer.length = 0;
605 res_buf->unshare_buffer.code = 0; 605 res_buf->unshare_buffer.code = 0;
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
index d50887e3df6d..8c48bb2a94ea 100644
--- a/drivers/net/ipvlan/ipvlan_core.c
+++ b/drivers/net/ipvlan/ipvlan_core.c
@@ -254,7 +254,7 @@ acct:
254 } 254 }
255} 255}
256 256
257static int ipvlan_rcv_frame(struct ipvl_addr *addr, struct sk_buff *skb, 257static int ipvlan_rcv_frame(struct ipvl_addr *addr, struct sk_buff **pskb,
258 bool local) 258 bool local)
259{ 259{
260 struct ipvl_dev *ipvlan = addr->master; 260 struct ipvl_dev *ipvlan = addr->master;
@@ -262,6 +262,7 @@ static int ipvlan_rcv_frame(struct ipvl_addr *addr, struct sk_buff *skb,
262 unsigned int len; 262 unsigned int len;
263 rx_handler_result_t ret = RX_HANDLER_CONSUMED; 263 rx_handler_result_t ret = RX_HANDLER_CONSUMED;
264 bool success = false; 264 bool success = false;
265 struct sk_buff *skb = *pskb;
265 266
266 len = skb->len + ETH_HLEN; 267 len = skb->len + ETH_HLEN;
267 if (unlikely(!(dev->flags & IFF_UP))) { 268 if (unlikely(!(dev->flags & IFF_UP))) {
@@ -273,6 +274,7 @@ static int ipvlan_rcv_frame(struct ipvl_addr *addr, struct sk_buff *skb,
273 if (!skb) 274 if (!skb)
274 goto out; 275 goto out;
275 276
277 *pskb = skb;
276 skb->dev = dev; 278 skb->dev = dev;
277 skb->pkt_type = PACKET_HOST; 279 skb->pkt_type = PACKET_HOST;
278 280
@@ -486,7 +488,7 @@ static int ipvlan_xmit_mode_l3(struct sk_buff *skb, struct net_device *dev)
486 488
487 addr = ipvlan_addr_lookup(ipvlan->port, lyr3h, addr_type, true); 489 addr = ipvlan_addr_lookup(ipvlan->port, lyr3h, addr_type, true);
488 if (addr) 490 if (addr)
489 return ipvlan_rcv_frame(addr, skb, true); 491 return ipvlan_rcv_frame(addr, &skb, true);
490 492
491out: 493out:
492 skb->dev = ipvlan->phy_dev; 494 skb->dev = ipvlan->phy_dev;
@@ -506,7 +508,7 @@ static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev)
506 if (lyr3h) { 508 if (lyr3h) {
507 addr = ipvlan_addr_lookup(ipvlan->port, lyr3h, addr_type, true); 509 addr = ipvlan_addr_lookup(ipvlan->port, lyr3h, addr_type, true);
508 if (addr) 510 if (addr)
509 return ipvlan_rcv_frame(addr, skb, true); 511 return ipvlan_rcv_frame(addr, &skb, true);
510 } 512 }
511 skb = skb_share_check(skb, GFP_ATOMIC); 513 skb = skb_share_check(skb, GFP_ATOMIC);
512 if (!skb) 514 if (!skb)
@@ -589,7 +591,7 @@ static rx_handler_result_t ipvlan_handle_mode_l3(struct sk_buff **pskb,
589 591
590 addr = ipvlan_addr_lookup(port, lyr3h, addr_type, true); 592 addr = ipvlan_addr_lookup(port, lyr3h, addr_type, true);
591 if (addr) 593 if (addr)
592 ret = ipvlan_rcv_frame(addr, skb, false); 594 ret = ipvlan_rcv_frame(addr, pskb, false);
593 595
594out: 596out:
595 return ret; 597 return ret;
@@ -626,7 +628,7 @@ static rx_handler_result_t ipvlan_handle_mode_l2(struct sk_buff **pskb,
626 628
627 addr = ipvlan_addr_lookup(port, lyr3h, addr_type, true); 629 addr = ipvlan_addr_lookup(port, lyr3h, addr_type, true);
628 if (addr) 630 if (addr)
629 ret = ipvlan_rcv_frame(addr, skb, false); 631 ret = ipvlan_rcv_frame(addr, pskb, false);
630 } 632 }
631 633
632 return ret; 634 return ret;
@@ -651,5 +653,5 @@ rx_handler_result_t ipvlan_handle_frame(struct sk_buff **pskb)
651 WARN_ONCE(true, "ipvlan_handle_frame() called for mode = [%hx]\n", 653 WARN_ONCE(true, "ipvlan_handle_frame() called for mode = [%hx]\n",
652 port->mode); 654 port->mode);
653 kfree_skb(skb); 655 kfree_skb(skb);
654 return NET_RX_DROP; 656 return RX_HANDLER_CONSUMED;
655} 657}
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 86f6c6292c27..06c8bfeaccd6 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -415,6 +415,7 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
415 skb = ip_check_defrag(dev_net(skb->dev), skb, IP_DEFRAG_MACVLAN); 415 skb = ip_check_defrag(dev_net(skb->dev), skb, IP_DEFRAG_MACVLAN);
416 if (!skb) 416 if (!skb)
417 return RX_HANDLER_CONSUMED; 417 return RX_HANDLER_CONSUMED;
418 *pskb = skb;
418 eth = eth_hdr(skb); 419 eth = eth_hdr(skb);
419 macvlan_forward_source(skb, port, eth->h_source); 420 macvlan_forward_source(skb, port, eth->h_source);
420 src = macvlan_hash_lookup(port, eth->h_source); 421 src = macvlan_hash_lookup(port, eth->h_source);
@@ -456,6 +457,7 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
456 goto out; 457 goto out;
457 } 458 }
458 459
460 *pskb = skb;
459 skb->dev = dev; 461 skb->dev = dev;
460 skb->pkt_type = PACKET_HOST; 462 skb->pkt_type = PACKET_HOST;
461 463
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index fabf11d32d27..2d020a3ec0b5 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -308,6 +308,8 @@ static struct phy_driver at803x_driver[] = {
308 .flags = PHY_HAS_INTERRUPT, 308 .flags = PHY_HAS_INTERRUPT,
309 .config_aneg = genphy_config_aneg, 309 .config_aneg = genphy_config_aneg,
310 .read_status = genphy_read_status, 310 .read_status = genphy_read_status,
311 .ack_interrupt = at803x_ack_interrupt,
312 .config_intr = at803x_config_intr,
311 .driver = { 313 .driver = {
312 .owner = THIS_MODULE, 314 .owner = THIS_MODULE,
313 }, 315 },
@@ -327,6 +329,8 @@ static struct phy_driver at803x_driver[] = {
327 .flags = PHY_HAS_INTERRUPT, 329 .flags = PHY_HAS_INTERRUPT,
328 .config_aneg = genphy_config_aneg, 330 .config_aneg = genphy_config_aneg,
329 .read_status = genphy_read_status, 331 .read_status = genphy_read_status,
332 .ack_interrupt = at803x_ack_interrupt,
333 .config_intr = at803x_config_intr,
330 .driver = { 334 .driver = {
331 .owner = THIS_MODULE, 335 .owner = THIS_MODULE,
332 }, 336 },
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 5de8d5827536..0240552b50f3 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -1154,6 +1154,21 @@ static struct phy_driver marvell_drivers[] = {
1154 .driver = { .owner = THIS_MODULE }, 1154 .driver = { .owner = THIS_MODULE },
1155 }, 1155 },
1156 { 1156 {
1157 .phy_id = MARVELL_PHY_ID_88E1540,
1158 .phy_id_mask = MARVELL_PHY_ID_MASK,
1159 .name = "Marvell 88E1540",
1160 .features = PHY_GBIT_FEATURES,
1161 .flags = PHY_HAS_INTERRUPT,
1162 .config_aneg = &m88e1510_config_aneg,
1163 .read_status = &marvell_read_status,
1164 .ack_interrupt = &marvell_ack_interrupt,
1165 .config_intr = &marvell_config_intr,
1166 .did_interrupt = &m88e1121_did_interrupt,
1167 .resume = &genphy_resume,
1168 .suspend = &genphy_suspend,
1169 .driver = { .owner = THIS_MODULE },
1170 },
1171 {
1157 .phy_id = MARVELL_PHY_ID_88E3016, 1172 .phy_id = MARVELL_PHY_ID_88E3016,
1158 .phy_id_mask = MARVELL_PHY_ID_MASK, 1173 .phy_id_mask = MARVELL_PHY_ID_MASK,
1159 .name = "Marvell 88E3016", 1174 .name = "Marvell 88E3016",
@@ -1186,6 +1201,7 @@ static struct mdio_device_id __maybe_unused marvell_tbl[] = {
1186 { MARVELL_PHY_ID_88E1318S, MARVELL_PHY_ID_MASK }, 1201 { MARVELL_PHY_ID_88E1318S, MARVELL_PHY_ID_MASK },
1187 { MARVELL_PHY_ID_88E1116R, MARVELL_PHY_ID_MASK }, 1202 { MARVELL_PHY_ID_88E1116R, MARVELL_PHY_ID_MASK },
1188 { MARVELL_PHY_ID_88E1510, MARVELL_PHY_ID_MASK }, 1203 { MARVELL_PHY_ID_88E1510, MARVELL_PHY_ID_MASK },
1204 { MARVELL_PHY_ID_88E1540, MARVELL_PHY_ID_MASK },
1189 { MARVELL_PHY_ID_88E3016, MARVELL_PHY_ID_MASK }, 1205 { MARVELL_PHY_ID_88E3016, MARVELL_PHY_ID_MASK },
1190 { } 1206 { }
1191}; 1207};
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index adb48abafc87..48ce6ef400fe 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -863,6 +863,9 @@ void phy_state_machine(struct work_struct *work)
863 needs_aneg = true; 863 needs_aneg = true;
864 break; 864 break;
865 case PHY_NOLINK: 865 case PHY_NOLINK:
866 if (phy_interrupt_is_valid(phydev))
867 break;
868
866 err = phy_read_status(phydev); 869 err = phy_read_status(phydev);
867 if (err) 870 if (err)
868 break; 871 break;
diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c
index 76cad712ddb2..dd295dbaa074 100644
--- a/drivers/net/phy/vitesse.c
+++ b/drivers/net/phy/vitesse.c
@@ -66,6 +66,7 @@
66#define PHY_ID_VSC8244 0x000fc6c0 66#define PHY_ID_VSC8244 0x000fc6c0
67#define PHY_ID_VSC8514 0x00070670 67#define PHY_ID_VSC8514 0x00070670
68#define PHY_ID_VSC8574 0x000704a0 68#define PHY_ID_VSC8574 0x000704a0
69#define PHY_ID_VSC8601 0x00070420
69#define PHY_ID_VSC8662 0x00070660 70#define PHY_ID_VSC8662 0x00070660
70#define PHY_ID_VSC8221 0x000fc550 71#define PHY_ID_VSC8221 0x000fc550
71#define PHY_ID_VSC8211 0x000fc4b0 72#define PHY_ID_VSC8211 0x000fc4b0
@@ -133,7 +134,8 @@ static int vsc82xx_config_intr(struct phy_device *phydev)
133 (phydev->drv->phy_id == PHY_ID_VSC8234 || 134 (phydev->drv->phy_id == PHY_ID_VSC8234 ||
134 phydev->drv->phy_id == PHY_ID_VSC8244 || 135 phydev->drv->phy_id == PHY_ID_VSC8244 ||
135 phydev->drv->phy_id == PHY_ID_VSC8514 || 136 phydev->drv->phy_id == PHY_ID_VSC8514 ||
136 phydev->drv->phy_id == PHY_ID_VSC8574) ? 137 phydev->drv->phy_id == PHY_ID_VSC8574 ||
138 phydev->drv->phy_id == PHY_ID_VSC8601) ?
137 MII_VSC8244_IMASK_MASK : 139 MII_VSC8244_IMASK_MASK :
138 MII_VSC8221_IMASK_MASK); 140 MII_VSC8221_IMASK_MASK);
139 else { 141 else {
@@ -272,6 +274,18 @@ static struct phy_driver vsc82xx_driver[] = {
272 .config_intr = &vsc82xx_config_intr, 274 .config_intr = &vsc82xx_config_intr,
273 .driver = { .owner = THIS_MODULE,}, 275 .driver = { .owner = THIS_MODULE,},
274}, { 276}, {
277 .phy_id = PHY_ID_VSC8601,
278 .name = "Vitesse VSC8601",
279 .phy_id_mask = 0x000ffff0,
280 .features = PHY_GBIT_FEATURES,
281 .flags = PHY_HAS_INTERRUPT,
282 .config_init = &genphy_config_init,
283 .config_aneg = &genphy_config_aneg,
284 .read_status = &genphy_read_status,
285 .ack_interrupt = &vsc824x_ack_interrupt,
286 .config_intr = &vsc82xx_config_intr,
287 .driver = { .owner = THIS_MODULE,},
288}, {
275 .phy_id = PHY_ID_VSC8662, 289 .phy_id = PHY_ID_VSC8662,
276 .name = "Vitesse VSC8662", 290 .name = "Vitesse VSC8662",
277 .phy_id_mask = 0x000ffff0, 291 .phy_id_mask = 0x000ffff0,
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index c78d3cb1b464..3da70bf9936a 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -696,6 +696,11 @@ static const struct usb_device_id products[] = {
696 USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), 696 USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
697 .driver_info = (kernel_ulong_t) &wwan_info, 697 .driver_info = (kernel_ulong_t) &wwan_info,
698}, { 698}, {
699 /* Dell DW5580 modules */
700 USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, 0x81ba, USB_CLASS_COMM,
701 USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
702 .driver_info = (kernel_ulong_t)&wwan_info,
703}, {
699 USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET, 704 USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET,
700 USB_CDC_PROTO_NONE), 705 USB_CDC_PROTO_NONE),
701 .driver_info = (unsigned long) &cdc_info, 706 .driver_info = (unsigned long) &cdc_info,
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 46f4caddccbe..899ea4288197 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -2157,12 +2157,13 @@ vmxnet3_set_mc(struct net_device *netdev)
2157 if (!netdev_mc_empty(netdev)) { 2157 if (!netdev_mc_empty(netdev)) {
2158 new_table = vmxnet3_copy_mc(netdev); 2158 new_table = vmxnet3_copy_mc(netdev);
2159 if (new_table) { 2159 if (new_table) {
2160 rxConf->mfTableLen = cpu_to_le16( 2160 size_t sz = netdev_mc_count(netdev) * ETH_ALEN;
2161 netdev_mc_count(netdev) * ETH_ALEN); 2161
2162 rxConf->mfTableLen = cpu_to_le16(sz);
2162 new_table_pa = dma_map_single( 2163 new_table_pa = dma_map_single(
2163 &adapter->pdev->dev, 2164 &adapter->pdev->dev,
2164 new_table, 2165 new_table,
2165 rxConf->mfTableLen, 2166 sz,
2166 PCI_DMA_TODEVICE); 2167 PCI_DMA_TODEVICE);
2167 } 2168 }
2168 2169
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index 3f859a55c035..4c58c83dc225 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -69,10 +69,10 @@
69/* 69/*
70 * Version numbers 70 * Version numbers
71 */ 71 */
72#define VMXNET3_DRIVER_VERSION_STRING "1.4.3.0-k" 72#define VMXNET3_DRIVER_VERSION_STRING "1.4.4.0-k"
73 73
74/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ 74/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
75#define VMXNET3_DRIVER_VERSION_NUM 0x01040300 75#define VMXNET3_DRIVER_VERSION_NUM 0x01040400
76 76
77#if defined(CONFIG_PCI_MSI) 77#if defined(CONFIG_PCI_MSI)
78 /* RSS only makes sense if MSI-X is supported. */ 78 /* RSS only makes sense if MSI-X is supported. */
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index e735c728e3b3..edb1984201e9 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -1685,8 +1685,8 @@ static void pci_dma_configure(struct pci_dev *dev)
1685{ 1685{
1686 struct device *bridge = pci_get_host_bridge_device(dev); 1686 struct device *bridge = pci_get_host_bridge_device(dev);
1687 1687
1688 if (IS_ENABLED(CONFIG_OF) && dev->dev.of_node) { 1688 if (IS_ENABLED(CONFIG_OF) &&
1689 if (bridge->parent) 1689 bridge->parent && bridge->parent->of_node) {
1690 of_dma_configure(&dev->dev, bridge->parent->of_node); 1690 of_dma_configure(&dev->dev, bridge->parent->of_node);
1691 } else if (has_acpi_companion(bridge)) { 1691 } else if (has_acpi_companion(bridge)) {
1692 struct acpi_device *adev = to_acpi_device_node(bridge->fwnode); 1692 struct acpi_device *adev = to_acpi_device_node(bridge->fwnode);
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 548a18916a31..a831d18596a5 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -1080,28 +1080,10 @@ void __init chsc_init_cleanup(void)
1080 free_page((unsigned long)sei_page); 1080 free_page((unsigned long)sei_page);
1081} 1081}
1082 1082
1083int chsc_enable_facility(int operation_code) 1083int __chsc_enable_facility(struct chsc_sda_area *sda_area, int operation_code)
1084{ 1084{
1085 unsigned long flags;
1086 int ret; 1085 int ret;
1087 struct {
1088 struct chsc_header request;
1089 u8 reserved1:4;
1090 u8 format:4;
1091 u8 reserved2;
1092 u16 operation_code;
1093 u32 reserved3;
1094 u32 reserved4;
1095 u32 operation_data_area[252];
1096 struct chsc_header response;
1097 u32 reserved5:4;
1098 u32 format2:4;
1099 u32 reserved6:24;
1100 } __attribute__ ((packed)) *sda_area;
1101 1086
1102 spin_lock_irqsave(&chsc_page_lock, flags);
1103 memset(chsc_page, 0, PAGE_SIZE);
1104 sda_area = chsc_page;
1105 sda_area->request.length = 0x0400; 1087 sda_area->request.length = 0x0400;
1106 sda_area->request.code = 0x0031; 1088 sda_area->request.code = 0x0031;
1107 sda_area->operation_code = operation_code; 1089 sda_area->operation_code = operation_code;
@@ -1119,10 +1101,25 @@ int chsc_enable_facility(int operation_code)
1119 default: 1101 default:
1120 ret = chsc_error_from_response(sda_area->response.code); 1102 ret = chsc_error_from_response(sda_area->response.code);
1121 } 1103 }
1104out:
1105 return ret;
1106}
1107
1108int chsc_enable_facility(int operation_code)
1109{
1110 struct chsc_sda_area *sda_area;
1111 unsigned long flags;
1112 int ret;
1113
1114 spin_lock_irqsave(&chsc_page_lock, flags);
1115 memset(chsc_page, 0, PAGE_SIZE);
1116 sda_area = chsc_page;
1117
1118 ret = __chsc_enable_facility(sda_area, operation_code);
1122 if (ret != 0) 1119 if (ret != 0)
1123 CIO_CRW_EVENT(2, "chsc: sda (oc=%x) failed (rc=%04x)\n", 1120 CIO_CRW_EVENT(2, "chsc: sda (oc=%x) failed (rc=%04x)\n",
1124 operation_code, sda_area->response.code); 1121 operation_code, sda_area->response.code);
1125out: 1122
1126 spin_unlock_irqrestore(&chsc_page_lock, flags); 1123 spin_unlock_irqrestore(&chsc_page_lock, flags);
1127 return ret; 1124 return ret;
1128} 1125}
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h
index 76c9b50700b2..0de134c3a204 100644
--- a/drivers/s390/cio/chsc.h
+++ b/drivers/s390/cio/chsc.h
@@ -115,6 +115,20 @@ struct chsc_scpd {
115 u8 data[PAGE_SIZE - 20]; 115 u8 data[PAGE_SIZE - 20];
116} __attribute__ ((packed)); 116} __attribute__ ((packed));
117 117
118struct chsc_sda_area {
119 struct chsc_header request;
120 u8 :4;
121 u8 format:4;
122 u8 :8;
123 u16 operation_code;
124 u32 :32;
125 u32 :32;
126 u32 operation_data_area[252];
127 struct chsc_header response;
128 u32 :4;
129 u32 format2:4;
130 u32 :24;
131} __packed __aligned(PAGE_SIZE);
118 132
119extern int chsc_get_ssd_info(struct subchannel_id schid, 133extern int chsc_get_ssd_info(struct subchannel_id schid,
120 struct chsc_ssd_info *ssd); 134 struct chsc_ssd_info *ssd);
@@ -122,6 +136,7 @@ extern int chsc_determine_css_characteristics(void);
122extern int chsc_init(void); 136extern int chsc_init(void);
123extern void chsc_init_cleanup(void); 137extern void chsc_init_cleanup(void);
124 138
139int __chsc_enable_facility(struct chsc_sda_area *sda_area, int operation_code);
125extern int chsc_enable_facility(int); 140extern int chsc_enable_facility(int);
126struct channel_subsystem; 141struct channel_subsystem;
127extern int chsc_secm(struct channel_subsystem *, int); 142extern int chsc_secm(struct channel_subsystem *, int);
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index b5620e818d6b..690b8547e828 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -925,18 +925,32 @@ void reipl_ccw_dev(struct ccw_dev_id *devid)
925 925
926int __init cio_get_iplinfo(struct cio_iplinfo *iplinfo) 926int __init cio_get_iplinfo(struct cio_iplinfo *iplinfo)
927{ 927{
928 static struct chsc_sda_area sda_area __initdata;
928 struct subchannel_id schid; 929 struct subchannel_id schid;
929 struct schib schib; 930 struct schib schib;
930 931
931 schid = *(struct subchannel_id *)&S390_lowcore.subchannel_id; 932 schid = *(struct subchannel_id *)&S390_lowcore.subchannel_id;
932 if (!schid.one) 933 if (!schid.one)
933 return -ENODEV; 934 return -ENODEV;
935
936 if (schid.ssid) {
937 /*
938 * Firmware should have already enabled MSS but whoever started
939 * the kernel might have initiated a channel subsystem reset.
940 * Ensure that MSS is enabled.
941 */
942 memset(&sda_area, 0, sizeof(sda_area));
943 if (__chsc_enable_facility(&sda_area, CHSC_SDA_OC_MSS))
944 return -ENODEV;
945 }
934 if (stsch_err(schid, &schib)) 946 if (stsch_err(schid, &schib))
935 return -ENODEV; 947 return -ENODEV;
936 if (schib.pmcw.st != SUBCHANNEL_TYPE_IO) 948 if (schib.pmcw.st != SUBCHANNEL_TYPE_IO)
937 return -ENODEV; 949 return -ENODEV;
938 if (!schib.pmcw.dnv) 950 if (!schib.pmcw.dnv)
939 return -ENODEV; 951 return -ENODEV;
952
953 iplinfo->ssid = schid.ssid;
940 iplinfo->devno = schib.pmcw.dev; 954 iplinfo->devno = schib.pmcw.dev;
941 iplinfo->is_qdio = schib.pmcw.qf; 955 iplinfo->is_qdio = schib.pmcw.qf;
942 return 0; 956 return 0;
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 2ee3053bdc12..489e703dc82d 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -702,17 +702,12 @@ css_generate_pgid(struct channel_subsystem *css, u32 tod_high)
702 css->global_pgid.pgid_high.ext_cssid.version = 0x80; 702 css->global_pgid.pgid_high.ext_cssid.version = 0x80;
703 css->global_pgid.pgid_high.ext_cssid.cssid = css->cssid; 703 css->global_pgid.pgid_high.ext_cssid.cssid = css->cssid;
704 } else { 704 } else {
705#ifdef CONFIG_SMP
706 css->global_pgid.pgid_high.cpu_addr = stap(); 705 css->global_pgid.pgid_high.cpu_addr = stap();
707#else
708 css->global_pgid.pgid_high.cpu_addr = 0;
709#endif
710 } 706 }
711 get_cpu_id(&cpu_id); 707 get_cpu_id(&cpu_id);
712 css->global_pgid.cpu_id = cpu_id.ident; 708 css->global_pgid.cpu_id = cpu_id.ident;
713 css->global_pgid.cpu_model = cpu_id.machine; 709 css->global_pgid.cpu_model = cpu_id.machine;
714 css->global_pgid.tod_high = tod_high; 710 css->global_pgid.tod_high = tod_high;
715
716} 711}
717 712
718static void 713static void
diff --git a/drivers/s390/crypto/Makefile b/drivers/s390/crypto/Makefile
index 57f710b3c8a4..b8ab18676e69 100644
--- a/drivers/s390/crypto/Makefile
+++ b/drivers/s390/crypto/Makefile
@@ -3,6 +3,9 @@
3# 3#
4 4
5ap-objs := ap_bus.o 5ap-objs := ap_bus.o
6obj-$(CONFIG_ZCRYPT) += ap.o zcrypt_api.o zcrypt_pcixcc.o 6# zcrypt_api depends on ap
7obj-$(CONFIG_ZCRYPT) += zcrypt_cex2a.o zcrypt_cex4.o 7obj-$(CONFIG_ZCRYPT) += ap.o zcrypt_api.o
8# msgtype* depend on zcrypt_api
8obj-$(CONFIG_ZCRYPT) += zcrypt_msgtype6.o zcrypt_msgtype50.o 9obj-$(CONFIG_ZCRYPT) += zcrypt_msgtype6.o zcrypt_msgtype50.o
10# adapter drivers depend on ap, zcrypt_api and msgtype*
11obj-$(CONFIG_ZCRYPT) += zcrypt_pcixcc.o zcrypt_cex2a.o zcrypt_cex4.o
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 9cb3dfbcaddb..61f768518a34 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -74,6 +74,7 @@ static struct device *ap_root_device = NULL;
74static struct ap_config_info *ap_configuration; 74static struct ap_config_info *ap_configuration;
75static DEFINE_SPINLOCK(ap_device_list_lock); 75static DEFINE_SPINLOCK(ap_device_list_lock);
76static LIST_HEAD(ap_device_list); 76static LIST_HEAD(ap_device_list);
77static bool initialised;
77 78
78/* 79/*
79 * Workqueue timer for bus rescan. 80 * Workqueue timer for bus rescan.
@@ -1384,6 +1385,9 @@ int ap_driver_register(struct ap_driver *ap_drv, struct module *owner,
1384{ 1385{
1385 struct device_driver *drv = &ap_drv->driver; 1386 struct device_driver *drv = &ap_drv->driver;
1386 1387
1388 if (!initialised)
1389 return -ENODEV;
1390
1387 drv->bus = &ap_bus_type; 1391 drv->bus = &ap_bus_type;
1388 drv->probe = ap_device_probe; 1392 drv->probe = ap_device_probe;
1389 drv->remove = ap_device_remove; 1393 drv->remove = ap_device_remove;
@@ -1808,6 +1812,7 @@ int __init ap_module_init(void)
1808 goto out_pm; 1812 goto out_pm;
1809 1813
1810 queue_work(system_long_wq, &ap_scan_work); 1814 queue_work(system_long_wq, &ap_scan_work);
1815 initialised = true;
1811 1816
1812 return 0; 1817 return 0;
1813 1818
@@ -1837,6 +1842,7 @@ void ap_module_exit(void)
1837{ 1842{
1838 int i; 1843 int i;
1839 1844
1845 initialised = false;
1840 ap_reset_domain(); 1846 ap_reset_domain();
1841 ap_poll_thread_stop(); 1847 ap_poll_thread_stop();
1842 del_timer_sync(&ap_config_timer); 1848 del_timer_sync(&ap_config_timer);
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index a9603ebbc1f8..9f8fa42c062c 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -317,11 +317,9 @@ EXPORT_SYMBOL(zcrypt_device_unregister);
317 317
318void zcrypt_msgtype_register(struct zcrypt_ops *zops) 318void zcrypt_msgtype_register(struct zcrypt_ops *zops)
319{ 319{
320 if (zops->owner) { 320 spin_lock_bh(&zcrypt_ops_list_lock);
321 spin_lock_bh(&zcrypt_ops_list_lock); 321 list_add_tail(&zops->list, &zcrypt_ops_list);
322 list_add_tail(&zops->list, &zcrypt_ops_list); 322 spin_unlock_bh(&zcrypt_ops_list_lock);
323 spin_unlock_bh(&zcrypt_ops_list_lock);
324 }
325} 323}
326EXPORT_SYMBOL(zcrypt_msgtype_register); 324EXPORT_SYMBOL(zcrypt_msgtype_register);
327 325
@@ -342,7 +340,7 @@ struct zcrypt_ops *__ops_lookup(unsigned char *name, int variant)
342 spin_lock_bh(&zcrypt_ops_list_lock); 340 spin_lock_bh(&zcrypt_ops_list_lock);
343 list_for_each_entry(zops, &zcrypt_ops_list, list) { 341 list_for_each_entry(zops, &zcrypt_ops_list, list) {
344 if ((zops->variant == variant) && 342 if ((zops->variant == variant) &&
345 (!strncmp(zops->owner->name, name, MODULE_NAME_LEN))) { 343 (!strncmp(zops->name, name, sizeof(zops->name)))) {
346 found = 1; 344 found = 1;
347 break; 345 break;
348 } 346 }
diff --git a/drivers/s390/crypto/zcrypt_api.h b/drivers/s390/crypto/zcrypt_api.h
index 750876891931..38618f05ad92 100644
--- a/drivers/s390/crypto/zcrypt_api.h
+++ b/drivers/s390/crypto/zcrypt_api.h
@@ -96,6 +96,7 @@ struct zcrypt_ops {
96 struct list_head list; /* zcrypt ops list. */ 96 struct list_head list; /* zcrypt ops list. */
97 struct module *owner; 97 struct module *owner;
98 int variant; 98 int variant;
99 char name[128];
99}; 100};
100 101
101struct zcrypt_device { 102struct zcrypt_device {
diff --git a/drivers/s390/crypto/zcrypt_msgtype50.c b/drivers/s390/crypto/zcrypt_msgtype50.c
index 71ceee9137a8..74edf2934e7c 100644
--- a/drivers/s390/crypto/zcrypt_msgtype50.c
+++ b/drivers/s390/crypto/zcrypt_msgtype50.c
@@ -513,6 +513,7 @@ static struct zcrypt_ops zcrypt_msgtype50_ops = {
513 .rsa_modexpo = zcrypt_cex2a_modexpo, 513 .rsa_modexpo = zcrypt_cex2a_modexpo,
514 .rsa_modexpo_crt = zcrypt_cex2a_modexpo_crt, 514 .rsa_modexpo_crt = zcrypt_cex2a_modexpo_crt,
515 .owner = THIS_MODULE, 515 .owner = THIS_MODULE,
516 .name = MSGTYPE50_NAME,
516 .variant = MSGTYPE50_VARIANT_DEFAULT, 517 .variant = MSGTYPE50_VARIANT_DEFAULT,
517}; 518};
518 519
diff --git a/drivers/s390/crypto/zcrypt_msgtype6.c b/drivers/s390/crypto/zcrypt_msgtype6.c
index 74762214193b..9a2dd472c1cc 100644
--- a/drivers/s390/crypto/zcrypt_msgtype6.c
+++ b/drivers/s390/crypto/zcrypt_msgtype6.c
@@ -1119,6 +1119,7 @@ static long zcrypt_msgtype6_rng(struct zcrypt_device *zdev,
1119 */ 1119 */
1120static struct zcrypt_ops zcrypt_msgtype6_norng_ops = { 1120static struct zcrypt_ops zcrypt_msgtype6_norng_ops = {
1121 .owner = THIS_MODULE, 1121 .owner = THIS_MODULE,
1122 .name = MSGTYPE06_NAME,
1122 .variant = MSGTYPE06_VARIANT_NORNG, 1123 .variant = MSGTYPE06_VARIANT_NORNG,
1123 .rsa_modexpo = zcrypt_msgtype6_modexpo, 1124 .rsa_modexpo = zcrypt_msgtype6_modexpo,
1124 .rsa_modexpo_crt = zcrypt_msgtype6_modexpo_crt, 1125 .rsa_modexpo_crt = zcrypt_msgtype6_modexpo_crt,
@@ -1127,6 +1128,7 @@ static struct zcrypt_ops zcrypt_msgtype6_norng_ops = {
1127 1128
1128static struct zcrypt_ops zcrypt_msgtype6_ops = { 1129static struct zcrypt_ops zcrypt_msgtype6_ops = {
1129 .owner = THIS_MODULE, 1130 .owner = THIS_MODULE,
1131 .name = MSGTYPE06_NAME,
1130 .variant = MSGTYPE06_VARIANT_DEFAULT, 1132 .variant = MSGTYPE06_VARIANT_DEFAULT,
1131 .rsa_modexpo = zcrypt_msgtype6_modexpo, 1133 .rsa_modexpo = zcrypt_msgtype6_modexpo,
1132 .rsa_modexpo_crt = zcrypt_msgtype6_modexpo_crt, 1134 .rsa_modexpo_crt = zcrypt_msgtype6_modexpo_crt,
@@ -1136,6 +1138,7 @@ static struct zcrypt_ops zcrypt_msgtype6_ops = {
1136 1138
1137static struct zcrypt_ops zcrypt_msgtype6_ep11_ops = { 1139static struct zcrypt_ops zcrypt_msgtype6_ep11_ops = {
1138 .owner = THIS_MODULE, 1140 .owner = THIS_MODULE,
1141 .name = MSGTYPE06_NAME,
1139 .variant = MSGTYPE06_VARIANT_EP11, 1142 .variant = MSGTYPE06_VARIANT_EP11,
1140 .rsa_modexpo = NULL, 1143 .rsa_modexpo = NULL,
1141 .rsa_modexpo_crt = NULL, 1144 .rsa_modexpo_crt = NULL,
diff --git a/drivers/sh/pm_runtime.c b/drivers/sh/pm_runtime.c
index 25abd4eb7d10..91a003011acf 100644
--- a/drivers/sh/pm_runtime.c
+++ b/drivers/sh/pm_runtime.c
@@ -34,7 +34,7 @@ static struct pm_clk_notifier_block platform_bus_notifier = {
34 34
35static int __init sh_pm_runtime_init(void) 35static int __init sh_pm_runtime_init(void)
36{ 36{
37 if (IS_ENABLED(CONFIG_ARCH_SHMOBILE_MULTI)) { 37 if (IS_ENABLED(CONFIG_ARCH_SHMOBILE)) {
38 if (!of_find_compatible_node(NULL, NULL, 38 if (!of_find_compatible_node(NULL, NULL,
39 "renesas,cpg-mstp-clocks")) 39 "renesas,cpg-mstp-clocks"))
40 return 0; 40 return 0;
diff --git a/drivers/staging/iio/Kconfig b/drivers/staging/iio/Kconfig
index 6d5b38d69578..9d7f0004d2d7 100644
--- a/drivers/staging/iio/Kconfig
+++ b/drivers/staging/iio/Kconfig
@@ -18,7 +18,8 @@ source "drivers/staging/iio/resolver/Kconfig"
18source "drivers/staging/iio/trigger/Kconfig" 18source "drivers/staging/iio/trigger/Kconfig"
19 19
20config IIO_DUMMY_EVGEN 20config IIO_DUMMY_EVGEN
21 tristate 21 tristate
22 select IRQ_WORK
22 23
23config IIO_SIMPLE_DUMMY 24config IIO_SIMPLE_DUMMY
24 tristate "An example driver with no hardware requirements" 25 tristate "An example driver with no hardware requirements"
diff --git a/drivers/staging/iio/adc/lpc32xx_adc.c b/drivers/staging/iio/adc/lpc32xx_adc.c
index d11c54b72186..b51f237cd817 100644
--- a/drivers/staging/iio/adc/lpc32xx_adc.c
+++ b/drivers/staging/iio/adc/lpc32xx_adc.c
@@ -76,7 +76,7 @@ static int lpc32xx_read_raw(struct iio_dev *indio_dev,
76 76
77 if (mask == IIO_CHAN_INFO_RAW) { 77 if (mask == IIO_CHAN_INFO_RAW) {
78 mutex_lock(&indio_dev->mlock); 78 mutex_lock(&indio_dev->mlock);
79 clk_enable(info->clk); 79 clk_prepare_enable(info->clk);
80 /* Measurement setup */ 80 /* Measurement setup */
81 __raw_writel(AD_INTERNAL | (chan->address) | AD_REFp | AD_REFm, 81 __raw_writel(AD_INTERNAL | (chan->address) | AD_REFp | AD_REFm,
82 LPC32XX_ADC_SELECT(info->adc_base)); 82 LPC32XX_ADC_SELECT(info->adc_base));
@@ -84,7 +84,7 @@ static int lpc32xx_read_raw(struct iio_dev *indio_dev,
84 __raw_writel(AD_PDN_CTRL | AD_STROBE, 84 __raw_writel(AD_PDN_CTRL | AD_STROBE,
85 LPC32XX_ADC_CTRL(info->adc_base)); 85 LPC32XX_ADC_CTRL(info->adc_base));
86 wait_for_completion(&info->completion); /* set by ISR */ 86 wait_for_completion(&info->completion); /* set by ISR */
87 clk_disable(info->clk); 87 clk_disable_unprepare(info->clk);
88 *val = info->value; 88 *val = info->value;
89 mutex_unlock(&indio_dev->mlock); 89 mutex_unlock(&indio_dev->mlock);
90 90
diff --git a/drivers/staging/wilc1000/coreconfigurator.c b/drivers/staging/wilc1000/coreconfigurator.c
index e10c6ffa698a..9568bdb6319b 100644
--- a/drivers/staging/wilc1000/coreconfigurator.c
+++ b/drivers/staging/wilc1000/coreconfigurator.c
@@ -13,12 +13,8 @@
13#include "wilc_wlan.h" 13#include "wilc_wlan.h"
14#include <linux/errno.h> 14#include <linux/errno.h>
15#include <linux/slab.h> 15#include <linux/slab.h>
16#include <linux/etherdevice.h>
17#define TAG_PARAM_OFFSET (MAC_HDR_LEN + TIME_STAMP_LEN + \ 16#define TAG_PARAM_OFFSET (MAC_HDR_LEN + TIME_STAMP_LEN + \
18 BEACON_INTERVAL_LEN + CAP_INFO_LEN) 17 BEACON_INTERVAL_LEN + CAP_INFO_LEN)
19#define ADDR1 4
20#define ADDR2 10
21#define ADDR3 16
22 18
23/* Basic Frame Type Codes (2-bit) */ 19/* Basic Frame Type Codes (2-bit) */
24enum basic_frame_type { 20enum basic_frame_type {
@@ -175,32 +171,38 @@ static inline u8 get_from_ds(u8 *header)
175 return ((header[1] & 0x02) >> 1); 171 return ((header[1] & 0x02) >> 1);
176} 172}
177 173
174/* This function extracts the MAC Address in 'address1' field of the MAC */
175/* header and updates the MAC Address in the allocated 'addr' variable. */
176static inline void get_address1(u8 *pu8msa, u8 *addr)
177{
178 memcpy(addr, pu8msa + 4, 6);
179}
180
181/* This function extracts the MAC Address in 'address2' field of the MAC */
182/* header and updates the MAC Address in the allocated 'addr' variable. */
183static inline void get_address2(u8 *pu8msa, u8 *addr)
184{
185 memcpy(addr, pu8msa + 10, 6);
186}
187
188/* This function extracts the MAC Address in 'address3' field of the MAC */
189/* header and updates the MAC Address in the allocated 'addr' variable. */
190static inline void get_address3(u8 *pu8msa, u8 *addr)
191{
192 memcpy(addr, pu8msa + 16, 6);
193}
194
178/* This function extracts the BSSID from the incoming WLAN packet based on */ 195/* This function extracts the BSSID from the incoming WLAN packet based on */
179/* the 'from ds' bit, and updates the MAC Address in the allocated 'data' */ 196/* the 'from ds' bit, and updates the MAC Address in the allocated 'addr' */
180/* variable. */ 197/* variable. */
181static inline void get_BSSID(u8 *data, u8 *bssid) 198static inline void get_BSSID(u8 *data, u8 *bssid)
182{ 199{
183 if (get_from_ds(data) == 1) 200 if (get_from_ds(data) == 1)
184 /* 201 get_address2(data, bssid);
185 * Extract the MAC Address in 'address2' field of the MAC
186 * header and update the MAC Address in the allocated 'data'
187 * variable.
188 */
189 ether_addr_copy(data, bssid + ADDR2);
190 else if (get_to_ds(data) == 1) 202 else if (get_to_ds(data) == 1)
191 /* 203 get_address1(data, bssid);
192 * Extract the MAC Address in 'address1' field of the MAC
193 * header and update the MAC Address in the allocated 'data'
194 * variable.
195 */
196 ether_addr_copy(data, bssid + ADDR1);
197 else 204 else
198 /* 205 get_address3(data, bssid);
199 * Extract the MAC Address in 'address3' field of the MAC
200 * header and update the MAC Address in the allocated 'data'
201 * variable.
202 */
203 ether_addr_copy(data, bssid + ADDR3);
204} 206}
205 207
206/* This function extracts the SSID from a beacon/probe response frame */ 208/* This function extracts the SSID from a beacon/probe response frame */
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index 13844261cd5f..ed776149261e 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -169,7 +169,7 @@ static inline int tty_copy_to_user(struct tty_struct *tty,
169{ 169{
170 struct n_tty_data *ldata = tty->disc_data; 170 struct n_tty_data *ldata = tty->disc_data;
171 171
172 tty_audit_add_data(tty, to, n, ldata->icanon); 172 tty_audit_add_data(tty, from, n, ldata->icanon);
173 return copy_to_user(to, from, n); 173 return copy_to_user(to, from, n);
174} 174}
175 175
diff --git a/drivers/tty/serial/8250/8250_fsl.c b/drivers/tty/serial/8250/8250_fsl.c
index c0533a57ec53..910bfee5a88b 100644
--- a/drivers/tty/serial/8250/8250_fsl.c
+++ b/drivers/tty/serial/8250/8250_fsl.c
@@ -60,3 +60,4 @@ int fsl8250_handle_irq(struct uart_port *port)
60 spin_unlock_irqrestore(&up->port.lock, flags); 60 spin_unlock_irqrestore(&up->port.lock, flags);
61 return 1; 61 return 1;
62} 62}
63EXPORT_SYMBOL_GPL(fsl8250_handle_irq);
diff --git a/drivers/tty/serial/8250/Kconfig b/drivers/tty/serial/8250/Kconfig
index e6f5e12a2d83..6412f1455beb 100644
--- a/drivers/tty/serial/8250/Kconfig
+++ b/drivers/tty/serial/8250/Kconfig
@@ -373,6 +373,7 @@ config SERIAL_8250_MID
373 depends on SERIAL_8250 && PCI 373 depends on SERIAL_8250 && PCI
374 select HSU_DMA if SERIAL_8250_DMA 374 select HSU_DMA if SERIAL_8250_DMA
375 select HSU_DMA_PCI if X86_INTEL_MID 375 select HSU_DMA_PCI if X86_INTEL_MID
376 select RATIONAL
376 help 377 help
377 Selecting this option will enable handling of the extra features 378 Selecting this option will enable handling of the extra features
378 present on the UART found on Intel Medfield SOC and various other 379 present on the UART found on Intel Medfield SOC and various other
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index 1aec4404062d..f38beb28e7ae 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -1539,7 +1539,6 @@ config SERIAL_FSL_LPUART
1539 tristate "Freescale lpuart serial port support" 1539 tristate "Freescale lpuart serial port support"
1540 depends on HAS_DMA 1540 depends on HAS_DMA
1541 select SERIAL_CORE 1541 select SERIAL_CORE
1542 select SERIAL_EARLYCON
1543 help 1542 help
1544 Support for the on-chip lpuart on some Freescale SOCs. 1543 Support for the on-chip lpuart on some Freescale SOCs.
1545 1544
@@ -1547,6 +1546,7 @@ config SERIAL_FSL_LPUART_CONSOLE
1547 bool "Console on Freescale lpuart serial port" 1546 bool "Console on Freescale lpuart serial port"
1548 depends on SERIAL_FSL_LPUART=y 1547 depends on SERIAL_FSL_LPUART=y
1549 select SERIAL_CORE_CONSOLE 1548 select SERIAL_CORE_CONSOLE
1549 select SERIAL_EARLYCON
1550 help 1550 help
1551 If you have enabled the lpuart serial port on the Freescale SoCs, 1551 If you have enabled the lpuart serial port on the Freescale SoCs,
1552 you can make it the console by answering Y to this option. 1552 you can make it the console by answering Y to this option.
diff --git a/drivers/tty/serial/bcm63xx_uart.c b/drivers/tty/serial/bcm63xx_uart.c
index 681e0f3d5e0e..a1c0a89d9c7f 100644
--- a/drivers/tty/serial/bcm63xx_uart.c
+++ b/drivers/tty/serial/bcm63xx_uart.c
@@ -474,7 +474,7 @@ static int bcm_uart_startup(struct uart_port *port)
474 474
475 /* register irq and enable rx interrupts */ 475 /* register irq and enable rx interrupts */
476 ret = request_irq(port->irq, bcm_uart_interrupt, 0, 476 ret = request_irq(port->irq, bcm_uart_interrupt, 0,
477 bcm_uart_type(port), port); 477 dev_name(port->dev), port);
478 if (ret) 478 if (ret)
479 return ret; 479 return ret;
480 bcm_uart_writel(port, UART_RX_INT_MASK, UART_IR_REG); 480 bcm_uart_writel(port, UART_RX_INT_MASK, UART_IR_REG);
diff --git a/drivers/tty/serial/etraxfs-uart.c b/drivers/tty/serial/etraxfs-uart.c
index 6813e316e9ff..2f80bc7e44fb 100644
--- a/drivers/tty/serial/etraxfs-uart.c
+++ b/drivers/tty/serial/etraxfs-uart.c
@@ -894,7 +894,7 @@ static int etraxfs_uart_probe(struct platform_device *pdev)
894 up->regi_ser = of_iomap(np, 0); 894 up->regi_ser = of_iomap(np, 0);
895 up->port.dev = &pdev->dev; 895 up->port.dev = &pdev->dev;
896 896
897 up->gpios = mctrl_gpio_init(&pdev->dev, 0); 897 up->gpios = mctrl_gpio_init_noauto(&pdev->dev, 0);
898 if (IS_ERR(up->gpios)) 898 if (IS_ERR(up->gpios))
899 return PTR_ERR(up->gpios); 899 return PTR_ERR(up->gpios);
900 900
diff --git a/drivers/tty/tty_audit.c b/drivers/tty/tty_audit.c
index 90ca082935f6..3d245cd3d8e6 100644
--- a/drivers/tty/tty_audit.c
+++ b/drivers/tty/tty_audit.c
@@ -265,7 +265,7 @@ static struct tty_audit_buf *tty_audit_buf_get(struct tty_struct *tty,
265 * 265 *
266 * Audit @data of @size from @tty, if necessary. 266 * Audit @data of @size from @tty, if necessary.
267 */ 267 */
268void tty_audit_add_data(struct tty_struct *tty, unsigned char *data, 268void tty_audit_add_data(struct tty_struct *tty, const void *data,
269 size_t size, unsigned icanon) 269 size_t size, unsigned icanon)
270{ 270{
271 struct tty_audit_buf *buf; 271 struct tty_audit_buf *buf;
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 0c41dbcb90b8..bcc8e1e8bb72 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -1282,18 +1282,22 @@ int tty_send_xchar(struct tty_struct *tty, char ch)
1282 int was_stopped = tty->stopped; 1282 int was_stopped = tty->stopped;
1283 1283
1284 if (tty->ops->send_xchar) { 1284 if (tty->ops->send_xchar) {
1285 down_read(&tty->termios_rwsem);
1285 tty->ops->send_xchar(tty, ch); 1286 tty->ops->send_xchar(tty, ch);
1287 up_read(&tty->termios_rwsem);
1286 return 0; 1288 return 0;
1287 } 1289 }
1288 1290
1289 if (tty_write_lock(tty, 0) < 0) 1291 if (tty_write_lock(tty, 0) < 0)
1290 return -ERESTARTSYS; 1292 return -ERESTARTSYS;
1291 1293
1294 down_read(&tty->termios_rwsem);
1292 if (was_stopped) 1295 if (was_stopped)
1293 start_tty(tty); 1296 start_tty(tty);
1294 tty->ops->write(tty, &ch, 1); 1297 tty->ops->write(tty, &ch, 1);
1295 if (was_stopped) 1298 if (was_stopped)
1296 stop_tty(tty); 1299 stop_tty(tty);
1300 up_read(&tty->termios_rwsem);
1297 tty_write_unlock(tty); 1301 tty_write_unlock(tty);
1298 return 0; 1302 return 0;
1299} 1303}
diff --git a/drivers/tty/tty_ioctl.c b/drivers/tty/tty_ioctl.c
index 9c5aebfe7053..1445dd39aa62 100644
--- a/drivers/tty/tty_ioctl.c
+++ b/drivers/tty/tty_ioctl.c
@@ -1147,16 +1147,12 @@ int n_tty_ioctl_helper(struct tty_struct *tty, struct file *file,
1147 spin_unlock_irq(&tty->flow_lock); 1147 spin_unlock_irq(&tty->flow_lock);
1148 break; 1148 break;
1149 case TCIOFF: 1149 case TCIOFF:
1150 down_read(&tty->termios_rwsem);
1151 if (STOP_CHAR(tty) != __DISABLED_CHAR) 1150 if (STOP_CHAR(tty) != __DISABLED_CHAR)
1152 retval = tty_send_xchar(tty, STOP_CHAR(tty)); 1151 retval = tty_send_xchar(tty, STOP_CHAR(tty));
1153 up_read(&tty->termios_rwsem);
1154 break; 1152 break;
1155 case TCION: 1153 case TCION:
1156 down_read(&tty->termios_rwsem);
1157 if (START_CHAR(tty) != __DISABLED_CHAR) 1154 if (START_CHAR(tty) != __DISABLED_CHAR)
1158 retval = tty_send_xchar(tty, START_CHAR(tty)); 1155 retval = tty_send_xchar(tty, START_CHAR(tty));
1159 up_read(&tty->termios_rwsem);
1160 break; 1156 break;
1161 default: 1157 default:
1162 return -EINVAL; 1158 return -EINVAL;
diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
index 5af8f1874c1a..629e3c865072 100644
--- a/drivers/tty/tty_ldisc.c
+++ b/drivers/tty/tty_ldisc.c
@@ -592,7 +592,7 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc)
592 592
593 /* Restart the work queue in case no characters kick it off. Safe if 593 /* Restart the work queue in case no characters kick it off. Safe if
594 already running */ 594 already running */
595 schedule_work(&tty->port->buf.work); 595 tty_buffer_restart_work(tty->port);
596 596
597 tty_unlock(tty); 597 tty_unlock(tty);
598 return retval; 598 return retval;
diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c
index 6ccbf60cdd5c..5a048b7b92e8 100644
--- a/drivers/usb/chipidea/ci_hdrc_imx.c
+++ b/drivers/usb/chipidea/ci_hdrc_imx.c
@@ -84,6 +84,12 @@ struct ci_hdrc_imx_data {
84 struct imx_usbmisc_data *usbmisc_data; 84 struct imx_usbmisc_data *usbmisc_data;
85 bool supports_runtime_pm; 85 bool supports_runtime_pm;
86 bool in_lpm; 86 bool in_lpm;
87 /* SoC before i.mx6 (except imx23/imx28) needs three clks */
88 bool need_three_clks;
89 struct clk *clk_ipg;
90 struct clk *clk_ahb;
91 struct clk *clk_per;
92 /* --------------------------------- */
87}; 93};
88 94
89/* Common functions shared by usbmisc drivers */ 95/* Common functions shared by usbmisc drivers */
@@ -135,6 +141,102 @@ static struct imx_usbmisc_data *usbmisc_get_init_data(struct device *dev)
135} 141}
136 142
137/* End of common functions shared by usbmisc drivers*/ 143/* End of common functions shared by usbmisc drivers*/
144static int imx_get_clks(struct device *dev)
145{
146 struct ci_hdrc_imx_data *data = dev_get_drvdata(dev);
147 int ret = 0;
148
149 data->clk_ipg = devm_clk_get(dev, "ipg");
150 if (IS_ERR(data->clk_ipg)) {
151 /* If the platform only needs one clocks */
152 data->clk = devm_clk_get(dev, NULL);
153 if (IS_ERR(data->clk)) {
154 ret = PTR_ERR(data->clk);
155 dev_err(dev,
156 "Failed to get clks, err=%ld,%ld\n",
157 PTR_ERR(data->clk), PTR_ERR(data->clk_ipg));
158 return ret;
159 }
160 return ret;
161 }
162
163 data->clk_ahb = devm_clk_get(dev, "ahb");
164 if (IS_ERR(data->clk_ahb)) {
165 ret = PTR_ERR(data->clk_ahb);
166 dev_err(dev,
167 "Failed to get ahb clock, err=%d\n", ret);
168 return ret;
169 }
170
171 data->clk_per = devm_clk_get(dev, "per");
172 if (IS_ERR(data->clk_per)) {
173 ret = PTR_ERR(data->clk_per);
174 dev_err(dev,
175 "Failed to get per clock, err=%d\n", ret);
176 return ret;
177 }
178
179 data->need_three_clks = true;
180 return ret;
181}
182
183static int imx_prepare_enable_clks(struct device *dev)
184{
185 struct ci_hdrc_imx_data *data = dev_get_drvdata(dev);
186 int ret = 0;
187
188 if (data->need_three_clks) {
189 ret = clk_prepare_enable(data->clk_ipg);
190 if (ret) {
191 dev_err(dev,
192 "Failed to prepare/enable ipg clk, err=%d\n",
193 ret);
194 return ret;
195 }
196
197 ret = clk_prepare_enable(data->clk_ahb);
198 if (ret) {
199 dev_err(dev,
200 "Failed to prepare/enable ahb clk, err=%d\n",
201 ret);
202 clk_disable_unprepare(data->clk_ipg);
203 return ret;
204 }
205
206 ret = clk_prepare_enable(data->clk_per);
207 if (ret) {
208 dev_err(dev,
209 "Failed to prepare/enable per clk, err=%d\n",
210 ret);
211 clk_disable_unprepare(data->clk_ahb);
212 clk_disable_unprepare(data->clk_ipg);
213 return ret;
214 }
215 } else {
216 ret = clk_prepare_enable(data->clk);
217 if (ret) {
218 dev_err(dev,
219 "Failed to prepare/enable clk, err=%d\n",
220 ret);
221 return ret;
222 }
223 }
224
225 return ret;
226}
227
228static void imx_disable_unprepare_clks(struct device *dev)
229{
230 struct ci_hdrc_imx_data *data = dev_get_drvdata(dev);
231
232 if (data->need_three_clks) {
233 clk_disable_unprepare(data->clk_per);
234 clk_disable_unprepare(data->clk_ahb);
235 clk_disable_unprepare(data->clk_ipg);
236 } else {
237 clk_disable_unprepare(data->clk);
238 }
239}
138 240
139static int ci_hdrc_imx_probe(struct platform_device *pdev) 241static int ci_hdrc_imx_probe(struct platform_device *pdev)
140{ 242{
@@ -145,31 +247,31 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
145 .flags = CI_HDRC_SET_NON_ZERO_TTHA, 247 .flags = CI_HDRC_SET_NON_ZERO_TTHA,
146 }; 248 };
147 int ret; 249 int ret;
148 const struct of_device_id *of_id = 250 const struct of_device_id *of_id;
149 of_match_device(ci_hdrc_imx_dt_ids, &pdev->dev); 251 const struct ci_hdrc_imx_platform_flag *imx_platform_flag;
150 const struct ci_hdrc_imx_platform_flag *imx_platform_flag = of_id->data; 252
253 of_id = of_match_device(ci_hdrc_imx_dt_ids, &pdev->dev);
254 if (!of_id)
255 return -ENODEV;
256
257 imx_platform_flag = of_id->data;
151 258
152 data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); 259 data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
153 if (!data) 260 if (!data)
154 return -ENOMEM; 261 return -ENOMEM;
155 262
263 platform_set_drvdata(pdev, data);
156 data->usbmisc_data = usbmisc_get_init_data(&pdev->dev); 264 data->usbmisc_data = usbmisc_get_init_data(&pdev->dev);
157 if (IS_ERR(data->usbmisc_data)) 265 if (IS_ERR(data->usbmisc_data))
158 return PTR_ERR(data->usbmisc_data); 266 return PTR_ERR(data->usbmisc_data);
159 267
160 data->clk = devm_clk_get(&pdev->dev, NULL); 268 ret = imx_get_clks(&pdev->dev);
161 if (IS_ERR(data->clk)) { 269 if (ret)
162 dev_err(&pdev->dev, 270 return ret;
163 "Failed to get clock, err=%ld\n", PTR_ERR(data->clk));
164 return PTR_ERR(data->clk);
165 }
166 271
167 ret = clk_prepare_enable(data->clk); 272 ret = imx_prepare_enable_clks(&pdev->dev);
168 if (ret) { 273 if (ret)
169 dev_err(&pdev->dev,
170 "Failed to prepare or enable clock, err=%d\n", ret);
171 return ret; 274 return ret;
172 }
173 275
174 data->phy = devm_usb_get_phy_by_phandle(&pdev->dev, "fsl,usbphy", 0); 276 data->phy = devm_usb_get_phy_by_phandle(&pdev->dev, "fsl,usbphy", 0);
175 if (IS_ERR(data->phy)) { 277 if (IS_ERR(data->phy)) {
@@ -212,8 +314,6 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
212 goto disable_device; 314 goto disable_device;
213 } 315 }
214 316
215 platform_set_drvdata(pdev, data);
216
217 if (data->supports_runtime_pm) { 317 if (data->supports_runtime_pm) {
218 pm_runtime_set_active(&pdev->dev); 318 pm_runtime_set_active(&pdev->dev);
219 pm_runtime_enable(&pdev->dev); 319 pm_runtime_enable(&pdev->dev);
@@ -226,7 +326,7 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
226disable_device: 326disable_device:
227 ci_hdrc_remove_device(data->ci_pdev); 327 ci_hdrc_remove_device(data->ci_pdev);
228err_clk: 328err_clk:
229 clk_disable_unprepare(data->clk); 329 imx_disable_unprepare_clks(&pdev->dev);
230 return ret; 330 return ret;
231} 331}
232 332
@@ -240,7 +340,7 @@ static int ci_hdrc_imx_remove(struct platform_device *pdev)
240 pm_runtime_put_noidle(&pdev->dev); 340 pm_runtime_put_noidle(&pdev->dev);
241 } 341 }
242 ci_hdrc_remove_device(data->ci_pdev); 342 ci_hdrc_remove_device(data->ci_pdev);
243 clk_disable_unprepare(data->clk); 343 imx_disable_unprepare_clks(&pdev->dev);
244 344
245 return 0; 345 return 0;
246} 346}
@@ -252,7 +352,7 @@ static int imx_controller_suspend(struct device *dev)
252 352
253 dev_dbg(dev, "at %s\n", __func__); 353 dev_dbg(dev, "at %s\n", __func__);
254 354
255 clk_disable_unprepare(data->clk); 355 imx_disable_unprepare_clks(dev);
256 data->in_lpm = true; 356 data->in_lpm = true;
257 357
258 return 0; 358 return 0;
@@ -270,7 +370,7 @@ static int imx_controller_resume(struct device *dev)
270 return 0; 370 return 0;
271 } 371 }
272 372
273 ret = clk_prepare_enable(data->clk); 373 ret = imx_prepare_enable_clks(dev);
274 if (ret) 374 if (ret)
275 return ret; 375 return ret;
276 376
@@ -285,7 +385,7 @@ static int imx_controller_resume(struct device *dev)
285 return 0; 385 return 0;
286 386
287clk_disable: 387clk_disable:
288 clk_disable_unprepare(data->clk); 388 imx_disable_unprepare_clks(dev);
289 return ret; 389 return ret;
290} 390}
291 391
diff --git a/drivers/usb/chipidea/debug.c b/drivers/usb/chipidea/debug.c
index 080b7be3daf0..58c8485a0715 100644
--- a/drivers/usb/chipidea/debug.c
+++ b/drivers/usb/chipidea/debug.c
@@ -322,8 +322,10 @@ static ssize_t ci_role_write(struct file *file, const char __user *ubuf,
322 return -EINVAL; 322 return -EINVAL;
323 323
324 pm_runtime_get_sync(ci->dev); 324 pm_runtime_get_sync(ci->dev);
325 disable_irq(ci->irq);
325 ci_role_stop(ci); 326 ci_role_stop(ci);
326 ret = ci_role_start(ci, role); 327 ret = ci_role_start(ci, role);
328 enable_irq(ci->irq);
327 pm_runtime_put_sync(ci->dev); 329 pm_runtime_put_sync(ci->dev);
328 330
329 return ret ? ret : count; 331 return ret ? ret : count;
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
index 8223fe73ea85..391a1225b0ba 100644
--- a/drivers/usb/chipidea/udc.c
+++ b/drivers/usb/chipidea/udc.c
@@ -1751,6 +1751,22 @@ static int ci_udc_start(struct usb_gadget *gadget,
1751 return retval; 1751 return retval;
1752} 1752}
1753 1753
1754static void ci_udc_stop_for_otg_fsm(struct ci_hdrc *ci)
1755{
1756 if (!ci_otg_is_fsm_mode(ci))
1757 return;
1758
1759 mutex_lock(&ci->fsm.lock);
1760 if (ci->fsm.otg->state == OTG_STATE_A_PERIPHERAL) {
1761 ci->fsm.a_bidl_adis_tmout = 1;
1762 ci_hdrc_otg_fsm_start(ci);
1763 } else if (ci->fsm.otg->state == OTG_STATE_B_PERIPHERAL) {
1764 ci->fsm.protocol = PROTO_UNDEF;
1765 ci->fsm.otg->state = OTG_STATE_UNDEFINED;
1766 }
1767 mutex_unlock(&ci->fsm.lock);
1768}
1769
1754/** 1770/**
1755 * ci_udc_stop: unregister a gadget driver 1771 * ci_udc_stop: unregister a gadget driver
1756 */ 1772 */
@@ -1775,6 +1791,7 @@ static int ci_udc_stop(struct usb_gadget *gadget)
1775 ci->driver = NULL; 1791 ci->driver = NULL;
1776 spin_unlock_irqrestore(&ci->lock, flags); 1792 spin_unlock_irqrestore(&ci->lock, flags);
1777 1793
1794 ci_udc_stop_for_otg_fsm(ci);
1778 return 0; 1795 return 0;
1779} 1796}
1780 1797
diff --git a/drivers/usb/chipidea/usbmisc_imx.c b/drivers/usb/chipidea/usbmisc_imx.c
index fcea4eb36eee..ab8b027e8cc8 100644
--- a/drivers/usb/chipidea/usbmisc_imx.c
+++ b/drivers/usb/chipidea/usbmisc_imx.c
@@ -500,7 +500,11 @@ static int usbmisc_imx_probe(struct platform_device *pdev)
500{ 500{
501 struct resource *res; 501 struct resource *res;
502 struct imx_usbmisc *data; 502 struct imx_usbmisc *data;
503 struct of_device_id *tmp_dev; 503 const struct of_device_id *of_id;
504
505 of_id = of_match_device(usbmisc_imx_dt_ids, &pdev->dev);
506 if (!of_id)
507 return -ENODEV;
504 508
505 data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); 509 data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
506 if (!data) 510 if (!data)
@@ -513,9 +517,7 @@ static int usbmisc_imx_probe(struct platform_device *pdev)
513 if (IS_ERR(data->base)) 517 if (IS_ERR(data->base))
514 return PTR_ERR(data->base); 518 return PTR_ERR(data->base);
515 519
516 tmp_dev = (struct of_device_id *) 520 data->ops = (const struct usbmisc_ops *)of_id->data;
517 of_match_device(usbmisc_imx_dt_ids, &pdev->dev);
518 data->ops = (const struct usbmisc_ops *)tmp_dev->data;
519 platform_set_drvdata(pdev, data); 521 platform_set_drvdata(pdev, data);
520 522
521 return 0; 523 return 0;
diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c
index 433bbc34a8a4..071964c7847f 100644
--- a/drivers/usb/class/usblp.c
+++ b/drivers/usb/class/usblp.c
@@ -884,11 +884,11 @@ static int usblp_wwait(struct usblp *usblp, int nonblock)
884 884
885 add_wait_queue(&usblp->wwait, &waita); 885 add_wait_queue(&usblp->wwait, &waita);
886 for (;;) { 886 for (;;) {
887 set_current_state(TASK_INTERRUPTIBLE);
888 if (mutex_lock_interruptible(&usblp->mut)) { 887 if (mutex_lock_interruptible(&usblp->mut)) {
889 rc = -EINTR; 888 rc = -EINTR;
890 break; 889 break;
891 } 890 }
891 set_current_state(TASK_INTERRUPTIBLE);
892 rc = usblp_wtest(usblp, nonblock); 892 rc = usblp_wtest(usblp, nonblock);
893 mutex_unlock(&usblp->mut); 893 mutex_unlock(&usblp->mut);
894 if (rc <= 0) 894 if (rc <= 0)
diff --git a/drivers/usb/core/Kconfig b/drivers/usb/core/Kconfig
index a99c89e78126..dd280108758f 100644
--- a/drivers/usb/core/Kconfig
+++ b/drivers/usb/core/Kconfig
@@ -77,8 +77,7 @@ config USB_OTG_BLACKLIST_HUB
77 77
78config USB_OTG_FSM 78config USB_OTG_FSM
79 tristate "USB 2.0 OTG FSM implementation" 79 tristate "USB 2.0 OTG FSM implementation"
80 depends on USB 80 depends on USB && USB_OTG
81 select USB_OTG
82 select USB_PHY 81 select USB_PHY
83 help 82 help
84 Implements OTG Finite State Machine as specified in On-The-Go 83 Implements OTG Finite State Machine as specified in On-The-Go
diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
index e79baf73c234..571c21727ff9 100644
--- a/drivers/usb/dwc2/hcd.c
+++ b/drivers/usb/dwc2/hcd.c
@@ -324,12 +324,13 @@ void dwc2_hcd_disconnect(struct dwc2_hsotg *hsotg)
324 */ 324 */
325static void dwc2_hcd_rem_wakeup(struct dwc2_hsotg *hsotg) 325static void dwc2_hcd_rem_wakeup(struct dwc2_hsotg *hsotg)
326{ 326{
327 if (hsotg->lx_state == DWC2_L2) { 327 if (hsotg->bus_suspended) {
328 hsotg->flags.b.port_suspend_change = 1; 328 hsotg->flags.b.port_suspend_change = 1;
329 usb_hcd_resume_root_hub(hsotg->priv); 329 usb_hcd_resume_root_hub(hsotg->priv);
330 } else {
331 hsotg->flags.b.port_l1_change = 1;
332 } 330 }
331
332 if (hsotg->lx_state == DWC2_L1)
333 hsotg->flags.b.port_l1_change = 1;
333} 334}
334 335
335/** 336/**
@@ -1428,8 +1429,8 @@ static void dwc2_wakeup_detected(unsigned long data)
1428 dev_dbg(hsotg->dev, "Clear Resume: HPRT0=%0x\n", 1429 dev_dbg(hsotg->dev, "Clear Resume: HPRT0=%0x\n",
1429 dwc2_readl(hsotg->regs + HPRT0)); 1430 dwc2_readl(hsotg->regs + HPRT0));
1430 1431
1431 hsotg->bus_suspended = 0;
1432 dwc2_hcd_rem_wakeup(hsotg); 1432 dwc2_hcd_rem_wakeup(hsotg);
1433 hsotg->bus_suspended = 0;
1433 1434
1434 /* Change to L0 state */ 1435 /* Change to L0 state */
1435 hsotg->lx_state = DWC2_L0; 1436 hsotg->lx_state = DWC2_L0;
diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c
index 5859b0fa19ee..e61d773cf65e 100644
--- a/drivers/usb/dwc2/platform.c
+++ b/drivers/usb/dwc2/platform.c
@@ -108,7 +108,8 @@ static const struct dwc2_core_params params_rk3066 = {
108 .host_ls_low_power_phy_clk = -1, 108 .host_ls_low_power_phy_clk = -1,
109 .ts_dline = -1, 109 .ts_dline = -1,
110 .reload_ctl = -1, 110 .reload_ctl = -1,
111 .ahbcfg = 0x7, /* INCR16 */ 111 .ahbcfg = GAHBCFG_HBSTLEN_INCR16 <<
112 GAHBCFG_HBSTLEN_SHIFT,
112 .uframe_sched = -1, 113 .uframe_sched = -1,
113 .external_id_pin_ctl = -1, 114 .external_id_pin_ctl = -1,
114 .hibernation = -1, 115 .hibernation = -1,
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index 77a622cb48ab..009d83048c8c 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -34,6 +34,8 @@
34#define PCI_DEVICE_ID_INTEL_BSW 0x22b7 34#define PCI_DEVICE_ID_INTEL_BSW 0x22b7
35#define PCI_DEVICE_ID_INTEL_SPTLP 0x9d30 35#define PCI_DEVICE_ID_INTEL_SPTLP 0x9d30
36#define PCI_DEVICE_ID_INTEL_SPTH 0xa130 36#define PCI_DEVICE_ID_INTEL_SPTH 0xa130
37#define PCI_DEVICE_ID_INTEL_BXT 0x0aaa
38#define PCI_DEVICE_ID_INTEL_APL 0x5aaa
37 39
38static const struct acpi_gpio_params reset_gpios = { 0, 0, false }; 40static const struct acpi_gpio_params reset_gpios = { 0, 0, false };
39static const struct acpi_gpio_params cs_gpios = { 1, 0, false }; 41static const struct acpi_gpio_params cs_gpios = { 1, 0, false };
@@ -210,6 +212,8 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
210 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MRFLD), }, 212 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MRFLD), },
211 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SPTLP), }, 213 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SPTLP), },
212 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SPTH), }, 214 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SPTH), },
215 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BXT), },
216 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_APL), },
213 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB), }, 217 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB), },
214 { } /* Terminating Entry */ 218 { } /* Terminating Entry */
215}; 219};
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 55ba447fdf8b..e24a01cc98df 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -2744,12 +2744,34 @@ int dwc3_gadget_init(struct dwc3 *dwc)
2744 } 2744 }
2745 2745
2746 dwc->gadget.ops = &dwc3_gadget_ops; 2746 dwc->gadget.ops = &dwc3_gadget_ops;
2747 dwc->gadget.max_speed = USB_SPEED_SUPER;
2748 dwc->gadget.speed = USB_SPEED_UNKNOWN; 2747 dwc->gadget.speed = USB_SPEED_UNKNOWN;
2749 dwc->gadget.sg_supported = true; 2748 dwc->gadget.sg_supported = true;
2750 dwc->gadget.name = "dwc3-gadget"; 2749 dwc->gadget.name = "dwc3-gadget";
2751 2750
2752 /* 2751 /*
2752 * FIXME We might be setting max_speed to <SUPER, however versions
2753 * <2.20a of dwc3 have an issue with metastability (documented
2754 * elsewhere in this driver) which tells us we can't set max speed to
2755 * anything lower than SUPER.
2756 *
2757 * Because gadget.max_speed is only used by composite.c and function
2758 * drivers (i.e. it won't go into dwc3's registers) we are allowing this
2759 * to happen so we avoid sending SuperSpeed Capability descriptor
2760 * together with our BOS descriptor as that could confuse host into
2761 * thinking we can handle super speed.
2762 *
2763 * Note that, in fact, we won't even support GetBOS requests when speed
2764 * is less than super speed because we don't have means, yet, to tell
2765 * composite.c that we are USB 2.0 + LPM ECN.
2766 */
2767 if (dwc->revision < DWC3_REVISION_220A)
2768 dwc3_trace(trace_dwc3_gadget,
2769 "Changing max_speed on rev %08x\n",
2770 dwc->revision);
2771
2772 dwc->gadget.max_speed = dwc->maximum_speed;
2773
2774 /*
2753 * Per databook, DWC3 needs buffer size to be aligned to MaxPacketSize 2775 * Per databook, DWC3 needs buffer size to be aligned to MaxPacketSize
2754 * on ep out. 2776 * on ep out.
2755 */ 2777 */
diff --git a/drivers/usb/gadget/function/f_loopback.c b/drivers/usb/gadget/function/f_loopback.c
index 23933bdf2d9d..ddc3aad886b7 100644
--- a/drivers/usb/gadget/function/f_loopback.c
+++ b/drivers/usb/gadget/function/f_loopback.c
@@ -329,7 +329,7 @@ static int alloc_requests(struct usb_composite_dev *cdev,
329 for (i = 0; i < loop->qlen && result == 0; i++) { 329 for (i = 0; i < loop->qlen && result == 0; i++) {
330 result = -ENOMEM; 330 result = -ENOMEM;
331 331
332 in_req = usb_ep_alloc_request(loop->in_ep, GFP_KERNEL); 332 in_req = usb_ep_alloc_request(loop->in_ep, GFP_ATOMIC);
333 if (!in_req) 333 if (!in_req)
334 goto fail; 334 goto fail;
335 335
diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c
index f0f2b066ac08..f92f5aff0dd5 100644
--- a/drivers/usb/gadget/udc/atmel_usba_udc.c
+++ b/drivers/usb/gadget/udc/atmel_usba_udc.c
@@ -1633,7 +1633,7 @@ static irqreturn_t usba_udc_irq(int irq, void *devid)
1633 spin_lock(&udc->lock); 1633 spin_lock(&udc->lock);
1634 1634
1635 int_enb = usba_int_enb_get(udc); 1635 int_enb = usba_int_enb_get(udc);
1636 status = usba_readl(udc, INT_STA) & int_enb; 1636 status = usba_readl(udc, INT_STA) & (int_enb | USBA_HIGH_SPEED);
1637 DBG(DBG_INT, "irq, status=%#08x\n", status); 1637 DBG(DBG_INT, "irq, status=%#08x\n", status);
1638 1638
1639 if (status & USBA_DET_SUSPEND) { 1639 if (status & USBA_DET_SUSPEND) {
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 5d2d7e954bd4..0230965fb78c 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -782,12 +782,15 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
782 status |= USB_PORT_STAT_SUSPEND; 782 status |= USB_PORT_STAT_SUSPEND;
783 } 783 }
784 } 784 }
785 if ((raw_port_status & PORT_PLS_MASK) == XDEV_U0 785 if ((raw_port_status & PORT_PLS_MASK) == XDEV_U0 &&
786 && (raw_port_status & PORT_POWER) 786 (raw_port_status & PORT_POWER)) {
787 && (bus_state->suspended_ports & (1 << wIndex))) { 787 if (bus_state->suspended_ports & (1 << wIndex)) {
788 bus_state->suspended_ports &= ~(1 << wIndex); 788 bus_state->suspended_ports &= ~(1 << wIndex);
789 if (hcd->speed < HCD_USB3) 789 if (hcd->speed < HCD_USB3)
790 bus_state->port_c_suspend |= 1 << wIndex; 790 bus_state->port_c_suspend |= 1 << wIndex;
791 }
792 bus_state->resume_done[wIndex] = 0;
793 clear_bit(wIndex, &bus_state->resuming_ports);
791 } 794 }
792 if (raw_port_status & PORT_CONNECT) { 795 if (raw_port_status & PORT_CONNECT) {
793 status |= USB_PORT_STAT_CONNECTION; 796 status |= USB_PORT_STAT_CONNECTION;
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index fa836251ca21..6c5e8133cf87 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -3896,28 +3896,6 @@ cleanup:
3896 return ret; 3896 return ret;
3897} 3897}
3898 3898
3899static int ep_ring_is_processing(struct xhci_hcd *xhci,
3900 int slot_id, unsigned int ep_index)
3901{
3902 struct xhci_virt_device *xdev;
3903 struct xhci_ring *ep_ring;
3904 struct xhci_ep_ctx *ep_ctx;
3905 struct xhci_virt_ep *xep;
3906 dma_addr_t hw_deq;
3907
3908 xdev = xhci->devs[slot_id];
3909 xep = &xhci->devs[slot_id]->eps[ep_index];
3910 ep_ring = xep->ring;
3911 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
3912
3913 if ((le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK) != EP_STATE_RUNNING)
3914 return 0;
3915
3916 hw_deq = le64_to_cpu(ep_ctx->deq) & ~EP_CTX_CYCLE_MASK;
3917 return (hw_deq !=
3918 xhci_trb_virt_to_dma(ep_ring->enq_seg, ep_ring->enqueue));
3919}
3920
3921/* 3899/*
3922 * Check transfer ring to guarantee there is enough room for the urb. 3900 * Check transfer ring to guarantee there is enough room for the urb.
3923 * Update ISO URB start_frame and interval. 3901 * Update ISO URB start_frame and interval.
@@ -3983,10 +3961,12 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
3983 } 3961 }
3984 3962
3985 /* Calculate the start frame and put it in urb->start_frame. */ 3963 /* Calculate the start frame and put it in urb->start_frame. */
3986 if (HCC_CFC(xhci->hcc_params) && 3964 if (HCC_CFC(xhci->hcc_params) && !list_empty(&ep_ring->td_list)) {
3987 ep_ring_is_processing(xhci, slot_id, ep_index)) { 3965 if ((le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK) ==
3988 urb->start_frame = xep->next_frame_id; 3966 EP_STATE_RUNNING) {
3989 goto skip_start_over; 3967 urb->start_frame = xep->next_frame_id;
3968 goto skip_start_over;
3969 }
3990 } 3970 }
3991 3971
3992 start_frame = readl(&xhci->run_regs->microframe_index); 3972 start_frame = readl(&xhci->run_regs->microframe_index);
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 6e7dc6f93978..dfa44d3e8eee 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -175,6 +175,16 @@ int xhci_reset(struct xhci_hcd *xhci)
175 command |= CMD_RESET; 175 command |= CMD_RESET;
176 writel(command, &xhci->op_regs->command); 176 writel(command, &xhci->op_regs->command);
177 177
178 /* Existing Intel xHCI controllers require a delay of 1 mS,
179 * after setting the CMD_RESET bit, and before accessing any
180 * HC registers. This allows the HC to complete the
181 * reset operation and be ready for HC register access.
182 * Without this delay, the subsequent HC register access,
183 * may result in a system hang very rarely.
184 */
185 if (xhci->quirks & XHCI_INTEL_HOST)
186 udelay(1000);
187
178 ret = xhci_handshake(&xhci->op_regs->command, 188 ret = xhci_handshake(&xhci->op_regs->command,
179 CMD_RESET, 0, 10 * 1000 * 1000); 189 CMD_RESET, 0, 10 * 1000 * 1000);
180 if (ret) 190 if (ret)
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index ba13529cbd52..18cfc0a361cb 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -132,7 +132,7 @@ static inline struct musb *dev_to_musb(struct device *dev)
132/*-------------------------------------------------------------------------*/ 132/*-------------------------------------------------------------------------*/
133 133
134#ifndef CONFIG_BLACKFIN 134#ifndef CONFIG_BLACKFIN
135static int musb_ulpi_read(struct usb_phy *phy, u32 offset) 135static int musb_ulpi_read(struct usb_phy *phy, u32 reg)
136{ 136{
137 void __iomem *addr = phy->io_priv; 137 void __iomem *addr = phy->io_priv;
138 int i = 0; 138 int i = 0;
@@ -151,7 +151,7 @@ static int musb_ulpi_read(struct usb_phy *phy, u32 offset)
151 * ULPICarKitControlDisableUTMI after clearing POWER_SUSPENDM. 151 * ULPICarKitControlDisableUTMI after clearing POWER_SUSPENDM.
152 */ 152 */
153 153
154 musb_writeb(addr, MUSB_ULPI_REG_ADDR, (u8)offset); 154 musb_writeb(addr, MUSB_ULPI_REG_ADDR, (u8)reg);
155 musb_writeb(addr, MUSB_ULPI_REG_CONTROL, 155 musb_writeb(addr, MUSB_ULPI_REG_CONTROL,
156 MUSB_ULPI_REG_REQ | MUSB_ULPI_RDN_WR); 156 MUSB_ULPI_REG_REQ | MUSB_ULPI_RDN_WR);
157 157
@@ -176,7 +176,7 @@ out:
176 return ret; 176 return ret;
177} 177}
178 178
179static int musb_ulpi_write(struct usb_phy *phy, u32 offset, u32 data) 179static int musb_ulpi_write(struct usb_phy *phy, u32 val, u32 reg)
180{ 180{
181 void __iomem *addr = phy->io_priv; 181 void __iomem *addr = phy->io_priv;
182 int i = 0; 182 int i = 0;
@@ -191,8 +191,8 @@ static int musb_ulpi_write(struct usb_phy *phy, u32 offset, u32 data)
191 power &= ~MUSB_POWER_SUSPENDM; 191 power &= ~MUSB_POWER_SUSPENDM;
192 musb_writeb(addr, MUSB_POWER, power); 192 musb_writeb(addr, MUSB_POWER, power);
193 193
194 musb_writeb(addr, MUSB_ULPI_REG_ADDR, (u8)offset); 194 musb_writeb(addr, MUSB_ULPI_REG_ADDR, (u8)reg);
195 musb_writeb(addr, MUSB_ULPI_REG_DATA, (u8)data); 195 musb_writeb(addr, MUSB_ULPI_REG_DATA, (u8)val);
196 musb_writeb(addr, MUSB_ULPI_REG_CONTROL, MUSB_ULPI_REG_REQ); 196 musb_writeb(addr, MUSB_ULPI_REG_CONTROL, MUSB_ULPI_REG_REQ);
197 197
198 while (!(musb_readb(addr, MUSB_ULPI_REG_CONTROL) 198 while (!(musb_readb(addr, MUSB_ULPI_REG_CONTROL)
@@ -1668,7 +1668,7 @@ EXPORT_SYMBOL_GPL(musb_interrupt);
1668static bool use_dma = 1; 1668static bool use_dma = 1;
1669 1669
1670/* "modprobe ... use_dma=0" etc */ 1670/* "modprobe ... use_dma=0" etc */
1671module_param(use_dma, bool, 0); 1671module_param(use_dma, bool, 0644);
1672MODULE_PARM_DESC(use_dma, "enable/disable use of DMA"); 1672MODULE_PARM_DESC(use_dma, "enable/disable use of DMA");
1673 1673
1674void musb_dma_completion(struct musb *musb, u8 epnum, u8 transmit) 1674void musb_dma_completion(struct musb *musb, u8 epnum, u8 transmit)
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 26c65e66cc0f..795a45b1b25b 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -112,22 +112,32 @@ static void musb_h_tx_flush_fifo(struct musb_hw_ep *ep)
112 struct musb *musb = ep->musb; 112 struct musb *musb = ep->musb;
113 void __iomem *epio = ep->regs; 113 void __iomem *epio = ep->regs;
114 u16 csr; 114 u16 csr;
115 u16 lastcsr = 0;
116 int retries = 1000; 115 int retries = 1000;
117 116
118 csr = musb_readw(epio, MUSB_TXCSR); 117 csr = musb_readw(epio, MUSB_TXCSR);
119 while (csr & MUSB_TXCSR_FIFONOTEMPTY) { 118 while (csr & MUSB_TXCSR_FIFONOTEMPTY) {
120 if (csr != lastcsr)
121 dev_dbg(musb->controller, "Host TX FIFONOTEMPTY csr: %02x\n", csr);
122 lastcsr = csr;
123 csr |= MUSB_TXCSR_FLUSHFIFO | MUSB_TXCSR_TXPKTRDY; 119 csr |= MUSB_TXCSR_FLUSHFIFO | MUSB_TXCSR_TXPKTRDY;
124 musb_writew(epio, MUSB_TXCSR, csr); 120 musb_writew(epio, MUSB_TXCSR, csr);
125 csr = musb_readw(epio, MUSB_TXCSR); 121 csr = musb_readw(epio, MUSB_TXCSR);
126 if (WARN(retries-- < 1, 122
123 /*
124 * FIXME: sometimes the tx fifo flush failed, it has been
125 * observed during device disconnect on AM335x.
126 *
127 * To reproduce the issue, ensure tx urb(s) are queued when
128 * unplug the usb device which is connected to AM335x usb
129 * host port.
130 *
131 * I found using a usb-ethernet device and running iperf
132 * (client on AM335x) has very high chance to trigger it.
133 *
134 * Better to turn on dev_dbg() in musb_cleanup_urb() with
135 * CPPI enabled to see the issue when aborting the tx channel.
136 */
137 if (dev_WARN_ONCE(musb->controller, retries-- < 1,
127 "Could not flush host TX%d fifo: csr: %04x\n", 138 "Could not flush host TX%d fifo: csr: %04x\n",
128 ep->epnum, csr)) 139 ep->epnum, csr))
129 return; 140 return;
130 mdelay(1);
131 } 141 }
132} 142}
133 143
diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig
index 173132416170..22e8ecb6bfbd 100644
--- a/drivers/usb/phy/Kconfig
+++ b/drivers/usb/phy/Kconfig
@@ -21,7 +21,6 @@ config AB8500_USB
21config FSL_USB2_OTG 21config FSL_USB2_OTG
22 bool "Freescale USB OTG Transceiver Driver" 22 bool "Freescale USB OTG Transceiver Driver"
23 depends on USB_EHCI_FSL && USB_FSL_USB2 && USB_OTG_FSM && PM 23 depends on USB_EHCI_FSL && USB_FSL_USB2 && USB_OTG_FSM && PM
24 select USB_OTG
25 select USB_PHY 24 select USB_PHY
26 help 25 help
27 Enable this to support Freescale USB OTG transceiver. 26 Enable this to support Freescale USB OTG transceiver.
@@ -168,8 +167,7 @@ config USB_QCOM_8X16_PHY
168 167
169config USB_MV_OTG 168config USB_MV_OTG
170 tristate "Marvell USB OTG support" 169 tristate "Marvell USB OTG support"
171 depends on USB_EHCI_MV && USB_MV_UDC && PM 170 depends on USB_EHCI_MV && USB_MV_UDC && PM && USB_OTG
172 select USB_OTG
173 select USB_PHY 171 select USB_PHY
174 help 172 help
175 Say Y here if you want to build Marvell USB OTG transciever 173 Say Y here if you want to build Marvell USB OTG transciever
diff --git a/drivers/usb/phy/phy-mxs-usb.c b/drivers/usb/phy/phy-mxs-usb.c
index 4d863ebc117c..b7536af777ab 100644
--- a/drivers/usb/phy/phy-mxs-usb.c
+++ b/drivers/usb/phy/phy-mxs-usb.c
@@ -452,10 +452,13 @@ static int mxs_phy_probe(struct platform_device *pdev)
452 struct clk *clk; 452 struct clk *clk;
453 struct mxs_phy *mxs_phy; 453 struct mxs_phy *mxs_phy;
454 int ret; 454 int ret;
455 const struct of_device_id *of_id = 455 const struct of_device_id *of_id;
456 of_match_device(mxs_phy_dt_ids, &pdev->dev);
457 struct device_node *np = pdev->dev.of_node; 456 struct device_node *np = pdev->dev.of_node;
458 457
458 of_id = of_match_device(mxs_phy_dt_ids, &pdev->dev);
459 if (!of_id)
460 return -ENODEV;
461
459 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 462 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
460 base = devm_ioremap_resource(&pdev->dev, res); 463 base = devm_ioremap_resource(&pdev->dev, res);
461 if (IS_ERR(base)) 464 if (IS_ERR(base))
diff --git a/drivers/usb/phy/phy-omap-otg.c b/drivers/usb/phy/phy-omap-otg.c
index 1270906ccb95..c4bf2de6d14e 100644
--- a/drivers/usb/phy/phy-omap-otg.c
+++ b/drivers/usb/phy/phy-omap-otg.c
@@ -105,7 +105,6 @@ static int omap_otg_probe(struct platform_device *pdev)
105 extcon = extcon_get_extcon_dev(config->extcon); 105 extcon = extcon_get_extcon_dev(config->extcon);
106 if (!extcon) 106 if (!extcon)
107 return -EPROBE_DEFER; 107 return -EPROBE_DEFER;
108 otg_dev->extcon = extcon;
109 108
110 otg_dev = devm_kzalloc(&pdev->dev, sizeof(*otg_dev), GFP_KERNEL); 109 otg_dev = devm_kzalloc(&pdev->dev, sizeof(*otg_dev), GFP_KERNEL);
111 if (!otg_dev) 110 if (!otg_dev)
@@ -115,6 +114,7 @@ static int omap_otg_probe(struct platform_device *pdev)
115 if (IS_ERR(otg_dev->base)) 114 if (IS_ERR(otg_dev->base))
116 return PTR_ERR(otg_dev->base); 115 return PTR_ERR(otg_dev->base);
117 116
117 otg_dev->extcon = extcon;
118 otg_dev->id_nb.notifier_call = omap_otg_id_notifier; 118 otg_dev->id_nb.notifier_call = omap_otg_id_notifier;
119 otg_dev->vbus_nb.notifier_call = omap_otg_vbus_notifier; 119 otg_dev->vbus_nb.notifier_call = omap_otg_vbus_notifier;
120 120
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 685fef71d3d1..f2280606b73c 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -161,6 +161,7 @@ static void option_instat_callback(struct urb *urb);
161#define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED 0x9001 161#define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED 0x9001
162#define NOVATELWIRELESS_PRODUCT_E362 0x9010 162#define NOVATELWIRELESS_PRODUCT_E362 0x9010
163#define NOVATELWIRELESS_PRODUCT_E371 0x9011 163#define NOVATELWIRELESS_PRODUCT_E371 0x9011
164#define NOVATELWIRELESS_PRODUCT_U620L 0x9022
164#define NOVATELWIRELESS_PRODUCT_G2 0xA010 165#define NOVATELWIRELESS_PRODUCT_G2 0xA010
165#define NOVATELWIRELESS_PRODUCT_MC551 0xB001 166#define NOVATELWIRELESS_PRODUCT_MC551 0xB001
166 167
@@ -354,6 +355,7 @@ static void option_instat_callback(struct urb *urb);
354/* This is the 4G XS Stick W14 a.k.a. Mobilcom Debitel Surf-Stick * 355/* This is the 4G XS Stick W14 a.k.a. Mobilcom Debitel Surf-Stick *
355 * It seems to contain a Qualcomm QSC6240/6290 chipset */ 356 * It seems to contain a Qualcomm QSC6240/6290 chipset */
356#define FOUR_G_SYSTEMS_PRODUCT_W14 0x9603 357#define FOUR_G_SYSTEMS_PRODUCT_W14 0x9603
358#define FOUR_G_SYSTEMS_PRODUCT_W100 0x9b01
357 359
358/* iBall 3.5G connect wireless modem */ 360/* iBall 3.5G connect wireless modem */
359#define IBALL_3_5G_CONNECT 0x9605 361#define IBALL_3_5G_CONNECT 0x9605
@@ -519,6 +521,11 @@ static const struct option_blacklist_info four_g_w14_blacklist = {
519 .sendsetup = BIT(0) | BIT(1), 521 .sendsetup = BIT(0) | BIT(1),
520}; 522};
521 523
524static const struct option_blacklist_info four_g_w100_blacklist = {
525 .sendsetup = BIT(1) | BIT(2),
526 .reserved = BIT(3),
527};
528
522static const struct option_blacklist_info alcatel_x200_blacklist = { 529static const struct option_blacklist_info alcatel_x200_blacklist = {
523 .sendsetup = BIT(0) | BIT(1), 530 .sendsetup = BIT(0) | BIT(1),
524 .reserved = BIT(4), 531 .reserved = BIT(4),
@@ -1052,6 +1059,7 @@ static const struct usb_device_id option_ids[] = {
1052 { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC551, 0xff, 0xff, 0xff) }, 1059 { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC551, 0xff, 0xff, 0xff) },
1053 { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_E362, 0xff, 0xff, 0xff) }, 1060 { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_E362, 0xff, 0xff, 0xff) },
1054 { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_E371, 0xff, 0xff, 0xff) }, 1061 { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_E371, 0xff, 0xff, 0xff) },
1062 { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U620L, 0xff, 0x00, 0x00) },
1055 1063
1056 { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01) }, 1064 { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01) },
1057 { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01A) }, 1065 { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01A) },
@@ -1641,6 +1649,9 @@ static const struct usb_device_id option_ids[] = {
1641 { USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14), 1649 { USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14),
1642 .driver_info = (kernel_ulong_t)&four_g_w14_blacklist 1650 .driver_info = (kernel_ulong_t)&four_g_w14_blacklist
1643 }, 1651 },
1652 { USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W100),
1653 .driver_info = (kernel_ulong_t)&four_g_w100_blacklist
1654 },
1644 { USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, SPEEDUP_PRODUCT_SU9800, 0xff) }, 1655 { USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, SPEEDUP_PRODUCT_SU9800, 0xff) },
1645 { USB_DEVICE(LONGCHEER_VENDOR_ID, ZOOM_PRODUCT_4597) }, 1656 { USB_DEVICE(LONGCHEER_VENDOR_ID, ZOOM_PRODUCT_4597) },
1646 { USB_DEVICE(LONGCHEER_VENDOR_ID, IBALL_3_5G_CONNECT) }, 1657 { USB_DEVICE(LONGCHEER_VENDOR_ID, IBALL_3_5G_CONNECT) },
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index 5022fcfa0260..9919d2a9faf2 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -22,6 +22,8 @@
22#define DRIVER_AUTHOR "Qualcomm Inc" 22#define DRIVER_AUTHOR "Qualcomm Inc"
23#define DRIVER_DESC "Qualcomm USB Serial driver" 23#define DRIVER_DESC "Qualcomm USB Serial driver"
24 24
25#define QUECTEL_EC20_PID 0x9215
26
25/* standard device layouts supported by this driver */ 27/* standard device layouts supported by this driver */
26enum qcserial_layouts { 28enum qcserial_layouts {
27 QCSERIAL_G2K = 0, /* Gobi 2000 */ 29 QCSERIAL_G2K = 0, /* Gobi 2000 */
@@ -171,6 +173,38 @@ static const struct usb_device_id id_table[] = {
171}; 173};
172MODULE_DEVICE_TABLE(usb, id_table); 174MODULE_DEVICE_TABLE(usb, id_table);
173 175
176static int handle_quectel_ec20(struct device *dev, int ifnum)
177{
178 int altsetting = 0;
179
180 /*
181 * Quectel EC20 Mini PCIe LTE module layout:
182 * 0: DM/DIAG (use libqcdm from ModemManager for communication)
183 * 1: NMEA
184 * 2: AT-capable modem port
185 * 3: Modem interface
186 * 4: NDIS
187 */
188 switch (ifnum) {
189 case 0:
190 dev_dbg(dev, "Quectel EC20 DM/DIAG interface found\n");
191 break;
192 case 1:
193 dev_dbg(dev, "Quectel EC20 NMEA GPS interface found\n");
194 break;
195 case 2:
196 case 3:
197 dev_dbg(dev, "Quectel EC20 Modem port found\n");
198 break;
199 case 4:
200 /* Don't claim the QMI/net interface */
201 altsetting = -1;
202 break;
203 }
204
205 return altsetting;
206}
207
174static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id) 208static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id)
175{ 209{
176 struct usb_host_interface *intf = serial->interface->cur_altsetting; 210 struct usb_host_interface *intf = serial->interface->cur_altsetting;
@@ -181,6 +215,10 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id)
181 int altsetting = -1; 215 int altsetting = -1;
182 bool sendsetup = false; 216 bool sendsetup = false;
183 217
218 /* we only support vendor specific functions */
219 if (intf->desc.bInterfaceClass != USB_CLASS_VENDOR_SPEC)
220 goto done;
221
184 nintf = serial->dev->actconfig->desc.bNumInterfaces; 222 nintf = serial->dev->actconfig->desc.bNumInterfaces;
185 dev_dbg(dev, "Num Interfaces = %d\n", nintf); 223 dev_dbg(dev, "Num Interfaces = %d\n", nintf);
186 ifnum = intf->desc.bInterfaceNumber; 224 ifnum = intf->desc.bInterfaceNumber;
@@ -240,6 +278,12 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id)
240 altsetting = -1; 278 altsetting = -1;
241 break; 279 break;
242 case QCSERIAL_G2K: 280 case QCSERIAL_G2K:
281 /* handle non-standard layouts */
282 if (nintf == 5 && id->idProduct == QUECTEL_EC20_PID) {
283 altsetting = handle_quectel_ec20(dev, ifnum);
284 goto done;
285 }
286
243 /* 287 /*
244 * Gobi 2K+ USB layout: 288 * Gobi 2K+ USB layout:
245 * 0: QMI/net 289 * 0: QMI/net
@@ -301,29 +345,39 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id)
301 break; 345 break;
302 case QCSERIAL_HWI: 346 case QCSERIAL_HWI:
303 /* 347 /*
304 * Huawei layout: 348 * Huawei devices map functions by subclass + protocol
305 * 0: AT-capable modem port 349 * instead of interface numbers. The protocol identify
306 * 1: DM/DIAG 350 * a specific function, while the subclass indicate a
307 * 2: AT-capable modem port 351 * specific firmware source
308 * 3: CCID-compatible PCSC interface 352 *
309 * 4: QMI/net 353 * This is a blacklist of functions known to be
310 * 5: NMEA 354 * non-serial. The rest are assumed to be serial and
355 * will be handled by this driver
311 */ 356 */
312 switch (ifnum) { 357 switch (intf->desc.bInterfaceProtocol) {
313 case 0: 358 /* QMI combined (qmi_wwan) */
314 case 2: 359 case 0x07:
315 dev_dbg(dev, "Modem port found\n"); 360 case 0x37:
316 break; 361 case 0x67:
317 case 1: 362 /* QMI data (qmi_wwan) */
318 dev_dbg(dev, "DM/DIAG interface found\n"); 363 case 0x08:
319 break; 364 case 0x38:
320 case 5: 365 case 0x68:
321 dev_dbg(dev, "NMEA GPS interface found\n"); 366 /* QMI control (qmi_wwan) */
322 break; 367 case 0x09:
323 default: 368 case 0x39:
324 /* don't claim any unsupported interface */ 369 case 0x69:
370 /* NCM like (huawei_cdc_ncm) */
371 case 0x16:
372 case 0x46:
373 case 0x76:
325 altsetting = -1; 374 altsetting = -1;
326 break; 375 break;
376 default:
377 dev_dbg(dev, "Huawei type serial port found (%02x/%02x/%02x)\n",
378 intf->desc.bInterfaceClass,
379 intf->desc.bInterfaceSubClass,
380 intf->desc.bInterfaceProtocol);
327 } 381 }
328 break; 382 break;
329 default: 383 default:
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index e9da41d9fe7f..2694df2f4559 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -159,6 +159,7 @@ static const struct usb_device_id ti_id_table_3410[] = {
159 { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_STEREO_PLUG_ID) }, 159 { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_STEREO_PLUG_ID) },
160 { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_STRIP_PORT_ID) }, 160 { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_STRIP_PORT_ID) },
161 { USB_DEVICE(TI_VENDOR_ID, FRI2_PRODUCT_ID) }, 161 { USB_DEVICE(TI_VENDOR_ID, FRI2_PRODUCT_ID) },
162 { USB_DEVICE(HONEYWELL_VENDOR_ID, HONEYWELL_HGI80_PRODUCT_ID) },
162 { } /* terminator */ 163 { } /* terminator */
163}; 164};
164 165
@@ -191,6 +192,7 @@ static const struct usb_device_id ti_id_table_combined[] = {
191 { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_PRODUCT_ID) }, 192 { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_PRODUCT_ID) },
192 { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_STRIP_PORT_ID) }, 193 { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_STRIP_PORT_ID) },
193 { USB_DEVICE(TI_VENDOR_ID, FRI2_PRODUCT_ID) }, 194 { USB_DEVICE(TI_VENDOR_ID, FRI2_PRODUCT_ID) },
195 { USB_DEVICE(HONEYWELL_VENDOR_ID, HONEYWELL_HGI80_PRODUCT_ID) },
194 { } /* terminator */ 196 { } /* terminator */
195}; 197};
196 198
diff --git a/drivers/usb/serial/ti_usb_3410_5052.h b/drivers/usb/serial/ti_usb_3410_5052.h
index 4a2423e84d55..98f35c656c02 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.h
+++ b/drivers/usb/serial/ti_usb_3410_5052.h
@@ -56,6 +56,10 @@
56#define ABBOTT_PRODUCT_ID ABBOTT_STEREO_PLUG_ID 56#define ABBOTT_PRODUCT_ID ABBOTT_STEREO_PLUG_ID
57#define ABBOTT_STRIP_PORT_ID 0x3420 57#define ABBOTT_STRIP_PORT_ID 0x3420
58 58
59/* Honeywell vendor and product IDs */
60#define HONEYWELL_VENDOR_ID 0x10ac
61#define HONEYWELL_HGI80_PRODUCT_ID 0x0102 /* Honeywell HGI80 */
62
59/* Commands */ 63/* Commands */
60#define TI_GET_VERSION 0x01 64#define TI_GET_VERSION 0x01
61#define TI_GET_PORT_STATUS 0x02 65#define TI_GET_PORT_STATUS 0x02
diff --git a/fs/Kconfig b/fs/Kconfig
index da3f32f1a4e4..6ce72d8d1ee1 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -46,6 +46,12 @@ config FS_DAX
46 or if unsure, say N. Saying Y will increase the size of the kernel 46 or if unsure, say N. Saying Y will increase the size of the kernel
47 by about 5kB. 47 by about 5kB.
48 48
49config FS_DAX_PMD
50 bool
51 default FS_DAX
52 depends on FS_DAX
53 depends on BROKEN
54
49endif # BLOCK 55endif # BLOCK
50 56
51# Posix ACL utility routines 57# Posix ACL utility routines
diff --git a/fs/block_dev.c b/fs/block_dev.c
index bb0dfb1c7af1..c25639e907bd 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -390,9 +390,17 @@ int bdev_read_page(struct block_device *bdev, sector_t sector,
390 struct page *page) 390 struct page *page)
391{ 391{
392 const struct block_device_operations *ops = bdev->bd_disk->fops; 392 const struct block_device_operations *ops = bdev->bd_disk->fops;
393 int result = -EOPNOTSUPP;
394
393 if (!ops->rw_page || bdev_get_integrity(bdev)) 395 if (!ops->rw_page || bdev_get_integrity(bdev))
394 return -EOPNOTSUPP; 396 return result;
395 return ops->rw_page(bdev, sector + get_start_sect(bdev), page, READ); 397
398 result = blk_queue_enter(bdev->bd_queue, GFP_KERNEL);
399 if (result)
400 return result;
401 result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, READ);
402 blk_queue_exit(bdev->bd_queue);
403 return result;
396} 404}
397EXPORT_SYMBOL_GPL(bdev_read_page); 405EXPORT_SYMBOL_GPL(bdev_read_page);
398 406
@@ -421,14 +429,20 @@ int bdev_write_page(struct block_device *bdev, sector_t sector,
421 int result; 429 int result;
422 int rw = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : WRITE; 430 int rw = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : WRITE;
423 const struct block_device_operations *ops = bdev->bd_disk->fops; 431 const struct block_device_operations *ops = bdev->bd_disk->fops;
432
424 if (!ops->rw_page || bdev_get_integrity(bdev)) 433 if (!ops->rw_page || bdev_get_integrity(bdev))
425 return -EOPNOTSUPP; 434 return -EOPNOTSUPP;
435 result = blk_queue_enter(bdev->bd_queue, GFP_KERNEL);
436 if (result)
437 return result;
438
426 set_page_writeback(page); 439 set_page_writeback(page);
427 result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, rw); 440 result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, rw);
428 if (result) 441 if (result)
429 end_page_writeback(page); 442 end_page_writeback(page);
430 else 443 else
431 unlock_page(page); 444 unlock_page(page);
445 blk_queue_exit(bdev->bd_queue);
432 return result; 446 return result;
433} 447}
434EXPORT_SYMBOL_GPL(bdev_write_page); 448EXPORT_SYMBOL_GPL(bdev_write_page);
diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
index 7a6b02f72787..c0f3da3926a0 100644
--- a/fs/cachefiles/rdwr.c
+++ b/fs/cachefiles/rdwr.c
@@ -879,7 +879,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
879 loff_t pos, eof; 879 loff_t pos, eof;
880 size_t len; 880 size_t len;
881 void *data; 881 void *data;
882 int ret; 882 int ret = -ENOBUFS;
883 883
884 ASSERT(op != NULL); 884 ASSERT(op != NULL);
885 ASSERT(page != NULL); 885 ASSERT(page != NULL);
diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
index c81ce7f200a6..a7a1b218f308 100644
--- a/fs/configfs/dir.c
+++ b/fs/configfs/dir.c
@@ -1636,6 +1636,116 @@ const struct file_operations configfs_dir_operations = {
1636 .iterate = configfs_readdir, 1636 .iterate = configfs_readdir,
1637}; 1637};
1638 1638
1639/**
1640 * configfs_register_group - creates a parent-child relation between two groups
1641 * @parent_group: parent group
1642 * @group: child group
1643 *
1644 * link groups, creates dentry for the child and attaches it to the
1645 * parent dentry.
1646 *
1647 * Return: 0 on success, negative errno code on error
1648 */
1649int configfs_register_group(struct config_group *parent_group,
1650 struct config_group *group)
1651{
1652 struct configfs_subsystem *subsys = parent_group->cg_subsys;
1653 struct dentry *parent;
1654 int ret;
1655
1656 mutex_lock(&subsys->su_mutex);
1657 link_group(parent_group, group);
1658 mutex_unlock(&subsys->su_mutex);
1659
1660 parent = parent_group->cg_item.ci_dentry;
1661
1662 mutex_lock_nested(&d_inode(parent)->i_mutex, I_MUTEX_PARENT);
1663 ret = create_default_group(parent_group, group);
1664 if (!ret) {
1665 spin_lock(&configfs_dirent_lock);
1666 configfs_dir_set_ready(group->cg_item.ci_dentry->d_fsdata);
1667 spin_unlock(&configfs_dirent_lock);
1668 }
1669 mutex_unlock(&d_inode(parent)->i_mutex);
1670 return ret;
1671}
1672EXPORT_SYMBOL(configfs_register_group);
1673
1674/**
1675 * configfs_unregister_group() - unregisters a child group from its parent
1676 * @group: parent group to be unregistered
1677 *
1678 * Undoes configfs_register_group()
1679 */
1680void configfs_unregister_group(struct config_group *group)
1681{
1682 struct configfs_subsystem *subsys = group->cg_subsys;
1683 struct dentry *dentry = group->cg_item.ci_dentry;
1684 struct dentry *parent = group->cg_item.ci_parent->ci_dentry;
1685
1686 mutex_lock_nested(&d_inode(parent)->i_mutex, I_MUTEX_PARENT);
1687 spin_lock(&configfs_dirent_lock);
1688 configfs_detach_prep(dentry, NULL);
1689 spin_unlock(&configfs_dirent_lock);
1690
1691 configfs_detach_group(&group->cg_item);
1692 d_inode(dentry)->i_flags |= S_DEAD;
1693 dont_mount(dentry);
1694 d_delete(dentry);
1695 mutex_unlock(&d_inode(parent)->i_mutex);
1696
1697 dput(dentry);
1698
1699 mutex_lock(&subsys->su_mutex);
1700 unlink_group(group);
1701 mutex_unlock(&subsys->su_mutex);
1702}
1703EXPORT_SYMBOL(configfs_unregister_group);
1704
1705/**
1706 * configfs_register_default_group() - allocates and registers a child group
1707 * @parent_group: parent group
1708 * @name: child group name
1709 * @item_type: child item type description
1710 *
1711 * boilerplate to allocate and register a child group with its parent. We need
1712 * kzalloc'ed memory because child's default_group is initially empty.
1713 *
1714 * Return: allocated config group or ERR_PTR() on error
1715 */
1716struct config_group *
1717configfs_register_default_group(struct config_group *parent_group,
1718 const char *name,
1719 struct config_item_type *item_type)
1720{
1721 int ret;
1722 struct config_group *group;
1723
1724 group = kzalloc(sizeof(*group), GFP_KERNEL);
1725 if (!group)
1726 return ERR_PTR(-ENOMEM);
1727 config_group_init_type_name(group, name, item_type);
1728
1729 ret = configfs_register_group(parent_group, group);
1730 if (ret) {
1731 kfree(group);
1732 return ERR_PTR(ret);
1733 }
1734 return group;
1735}
1736EXPORT_SYMBOL(configfs_register_default_group);
1737
1738/**
1739 * configfs_unregister_default_group() - unregisters and frees a child group
1740 * @group: the group to act on
1741 */
1742void configfs_unregister_default_group(struct config_group *group)
1743{
1744 configfs_unregister_group(group);
1745 kfree(group);
1746}
1747EXPORT_SYMBOL(configfs_unregister_default_group);
1748
1639int configfs_register_subsystem(struct configfs_subsystem *subsys) 1749int configfs_register_subsystem(struct configfs_subsystem *subsys)
1640{ 1750{
1641 int err; 1751 int err;
diff --git a/fs/dax.c b/fs/dax.c
index d1e5cb7311a1..43671b68220e 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -541,6 +541,10 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
541 unsigned long pfn; 541 unsigned long pfn;
542 int result = 0; 542 int result = 0;
543 543
544 /* dax pmd mappings are broken wrt gup and fork */
545 if (!IS_ENABLED(CONFIG_FS_DAX_PMD))
546 return VM_FAULT_FALLBACK;
547
544 /* Fall back to PTEs if we're going to COW */ 548 /* Fall back to PTEs if we're going to COW */
545 if (write && !(vma->vm_flags & VM_SHARED)) 549 if (write && !(vma->vm_flags & VM_SHARED))
546 return VM_FAULT_FALLBACK; 550 return VM_FAULT_FALLBACK;
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index 3a71cea68420..748d35afc902 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -569,6 +569,8 @@ static int parse_options(char *options, struct super_block *sb)
569 /* Fall through */ 569 /* Fall through */
570 case Opt_dax: 570 case Opt_dax:
571#ifdef CONFIG_FS_DAX 571#ifdef CONFIG_FS_DAX
572 ext2_msg(sb, KERN_WARNING,
573 "DAX enabled. Warning: EXPERIMENTAL, use at your own risk");
572 set_opt(sbi->s_mount_opt, DAX); 574 set_opt(sbi->s_mount_opt, DAX);
573#else 575#else
574 ext2_msg(sb, KERN_INFO, "dax option not supported"); 576 ext2_msg(sb, KERN_INFO, "dax option not supported");
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 753f4e68b820..c9ab67da6e5a 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -1664,8 +1664,12 @@ static int handle_mount_opt(struct super_block *sb, char *opt, int token,
1664 } 1664 }
1665 sbi->s_jquota_fmt = m->mount_opt; 1665 sbi->s_jquota_fmt = m->mount_opt;
1666#endif 1666#endif
1667#ifndef CONFIG_FS_DAX
1668 } else if (token == Opt_dax) { 1667 } else if (token == Opt_dax) {
1668#ifdef CONFIG_FS_DAX
1669 ext4_msg(sb, KERN_WARNING,
1670 "DAX enabled. Warning: EXPERIMENTAL, use at your own risk");
1671 sbi->s_mount_opt |= m->mount_opt;
1672#else
1669 ext4_msg(sb, KERN_INFO, "dax option not supported"); 1673 ext4_msg(sb, KERN_INFO, "dax option not supported");
1670 return -1; 1674 return -1;
1671#endif 1675#endif
diff --git a/fs/fat/dir.c b/fs/fat/dir.c
index 4afc4d9d2e41..8b2127ffb226 100644
--- a/fs/fat/dir.c
+++ b/fs/fat/dir.c
@@ -610,9 +610,9 @@ parse_record:
610 int status = fat_parse_long(inode, &cpos, &bh, &de, 610 int status = fat_parse_long(inode, &cpos, &bh, &de,
611 &unicode, &nr_slots); 611 &unicode, &nr_slots);
612 if (status < 0) { 612 if (status < 0) {
613 ctx->pos = cpos; 613 bh = NULL;
614 ret = status; 614 ret = status;
615 goto out; 615 goto end_of_dir;
616 } else if (status == PARSE_INVALID) 616 } else if (status == PARSE_INVALID)
617 goto record_end; 617 goto record_end;
618 else if (status == PARSE_NOT_LONGNAME) 618 else if (status == PARSE_NOT_LONGNAME)
@@ -654,8 +654,9 @@ parse_record:
654 fill_len = short_len; 654 fill_len = short_len;
655 655
656start_filldir: 656start_filldir:
657 if (!fake_offset) 657 ctx->pos = cpos - (nr_slots + 1) * sizeof(struct msdos_dir_entry);
658 ctx->pos = cpos - (nr_slots + 1) * sizeof(struct msdos_dir_entry); 658 if (fake_offset && ctx->pos < 2)
659 ctx->pos = 2;
659 660
660 if (!memcmp(de->name, MSDOS_DOT, MSDOS_NAME)) { 661 if (!memcmp(de->name, MSDOS_DOT, MSDOS_NAME)) {
661 if (!dir_emit_dot(file, ctx)) 662 if (!dir_emit_dot(file, ctx))
@@ -681,14 +682,19 @@ record_end:
681 fake_offset = 0; 682 fake_offset = 0;
682 ctx->pos = cpos; 683 ctx->pos = cpos;
683 goto get_new; 684 goto get_new;
685
684end_of_dir: 686end_of_dir:
685 ctx->pos = cpos; 687 if (fake_offset && cpos < 2)
688 ctx->pos = 2;
689 else
690 ctx->pos = cpos;
686fill_failed: 691fill_failed:
687 brelse(bh); 692 brelse(bh);
688 if (unicode) 693 if (unicode)
689 __putname(unicode); 694 __putname(unicode);
690out: 695out:
691 mutex_unlock(&sbi->s_lock); 696 mutex_unlock(&sbi->s_lock);
697
692 return ret; 698 return ret;
693} 699}
694 700
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 316adb968b65..de4bdfac0cec 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -332,12 +332,17 @@ static void remove_huge_page(struct page *page)
332 * truncation is indicated by end of range being LLONG_MAX 332 * truncation is indicated by end of range being LLONG_MAX
333 * In this case, we first scan the range and release found pages. 333 * In this case, we first scan the range and release found pages.
334 * After releasing pages, hugetlb_unreserve_pages cleans up region/reserv 334 * After releasing pages, hugetlb_unreserve_pages cleans up region/reserv
335 * maps and global counts. 335 * maps and global counts. Page faults can not race with truncation
336 * in this routine. hugetlb_no_page() prevents page faults in the
337 * truncated range. It checks i_size before allocation, and again after
338 * with the page table lock for the page held. The same lock must be
339 * acquired to unmap a page.
336 * hole punch is indicated if end is not LLONG_MAX 340 * hole punch is indicated if end is not LLONG_MAX
337 * In the hole punch case we scan the range and release found pages. 341 * In the hole punch case we scan the range and release found pages.
338 * Only when releasing a page is the associated region/reserv map 342 * Only when releasing a page is the associated region/reserv map
339 * deleted. The region/reserv map for ranges without associated 343 * deleted. The region/reserv map for ranges without associated
340 * pages are not modified. 344 * pages are not modified. Page faults can race with hole punch.
345 * This is indicated if we find a mapped page.
341 * Note: If the passed end of range value is beyond the end of file, but 346 * Note: If the passed end of range value is beyond the end of file, but
342 * not LLONG_MAX this routine still performs a hole punch operation. 347 * not LLONG_MAX this routine still performs a hole punch operation.
343 */ 348 */
@@ -361,46 +366,37 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
361 next = start; 366 next = start;
362 while (next < end) { 367 while (next < end) {
363 /* 368 /*
364 * Make sure to never grab more pages that we 369 * Don't grab more pages than the number left in the range.
365 * might possibly need.
366 */ 370 */
367 if (end - next < lookup_nr) 371 if (end - next < lookup_nr)
368 lookup_nr = end - next; 372 lookup_nr = end - next;
369 373
370 /* 374 /*
371 * This pagevec_lookup() may return pages past 'end', 375 * When no more pages are found, we are done.
372 * so we must check for page->index > end.
373 */ 376 */
374 if (!pagevec_lookup(&pvec, mapping, next, lookup_nr)) { 377 if (!pagevec_lookup(&pvec, mapping, next, lookup_nr))
375 if (next == start) 378 break;
376 break;
377 next = start;
378 continue;
379 }
380 379
381 for (i = 0; i < pagevec_count(&pvec); ++i) { 380 for (i = 0; i < pagevec_count(&pvec); ++i) {
382 struct page *page = pvec.pages[i]; 381 struct page *page = pvec.pages[i];
383 u32 hash; 382 u32 hash;
384 383
384 /*
385 * The page (index) could be beyond end. This is
386 * only possible in the punch hole case as end is
387 * max page offset in the truncate case.
388 */
389 next = page->index;
390 if (next >= end)
391 break;
392
385 hash = hugetlb_fault_mutex_hash(h, current->mm, 393 hash = hugetlb_fault_mutex_hash(h, current->mm,
386 &pseudo_vma, 394 &pseudo_vma,
387 mapping, next, 0); 395 mapping, next, 0);
388 mutex_lock(&hugetlb_fault_mutex_table[hash]); 396 mutex_lock(&hugetlb_fault_mutex_table[hash]);
389 397
390 lock_page(page); 398 lock_page(page);
391 if (page->index >= end) { 399 if (likely(!page_mapped(page))) {
392 unlock_page(page);
393 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
394 next = end; /* we are done */
395 break;
396 }
397
398 /*
399 * If page is mapped, it was faulted in after being
400 * unmapped. Do nothing in this race case. In the
401 * normal case page is not mapped.
402 */
403 if (!page_mapped(page)) {
404 bool rsv_on_error = !PagePrivate(page); 400 bool rsv_on_error = !PagePrivate(page);
405 /* 401 /*
406 * We must free the huge page and remove 402 * We must free the huge page and remove
@@ -421,17 +417,23 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
421 hugetlb_fix_reserve_counts( 417 hugetlb_fix_reserve_counts(
422 inode, rsv_on_error); 418 inode, rsv_on_error);
423 } 419 }
420 } else {
421 /*
422 * If page is mapped, it was faulted in after
423 * being unmapped. It indicates a race between
424 * hole punch and page fault. Do nothing in
425 * this case. Getting here in a truncate
426 * operation is a bug.
427 */
428 BUG_ON(truncate_op);
424 } 429 }
425 430
426 if (page->index > next)
427 next = page->index;
428
429 ++next;
430 unlock_page(page); 431 unlock_page(page);
431
432 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 432 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
433 } 433 }
434 ++next;
434 huge_pagevec_release(&pvec); 435 huge_pagevec_release(&pvec);
436 cond_resched();
435 } 437 }
436 438
437 if (truncate_op) 439 if (truncate_op)
@@ -647,9 +649,6 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
647 if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size) 649 if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size)
648 i_size_write(inode, offset + len); 650 i_size_write(inode, offset + len);
649 inode->i_ctime = CURRENT_TIME; 651 inode->i_ctime = CURRENT_TIME;
650 spin_lock(&inode->i_lock);
651 inode->i_private = NULL;
652 spin_unlock(&inode->i_lock);
653out: 652out:
654 mutex_unlock(&inode->i_mutex); 653 mutex_unlock(&inode->i_mutex);
655 return error; 654 return error;
diff --git a/fs/ncpfs/ioctl.c b/fs/ncpfs/ioctl.c
index 79b113048eac..0a3f9b594602 100644
--- a/fs/ncpfs/ioctl.c
+++ b/fs/ncpfs/ioctl.c
@@ -525,6 +525,8 @@ static long __ncp_ioctl(struct inode *inode, unsigned int cmd, unsigned long arg
525 switch (rqdata.cmd) { 525 switch (rqdata.cmd) {
526 case NCP_LOCK_EX: 526 case NCP_LOCK_EX:
527 case NCP_LOCK_SH: 527 case NCP_LOCK_SH:
528 if (rqdata.timeout < 0)
529 return -EINVAL;
528 if (rqdata.timeout == 0) 530 if (rqdata.timeout == 0)
529 rqdata.timeout = NCP_LOCK_DEFAULT_TIMEOUT; 531 rqdata.timeout = NCP_LOCK_DEFAULT_TIMEOUT;
530 else if (rqdata.timeout > NCP_LOCK_MAX_TIMEOUT) 532 else if (rqdata.timeout > NCP_LOCK_MAX_TIMEOUT)
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
index 3b48ac25d8a7..a03f6f433075 100644
--- a/fs/ocfs2/namei.c
+++ b/fs/ocfs2/namei.c
@@ -372,6 +372,8 @@ static int ocfs2_mknod(struct inode *dir,
372 mlog_errno(status); 372 mlog_errno(status);
373 goto leave; 373 goto leave;
374 } 374 }
375 /* update inode->i_mode after mask with "umask". */
376 inode->i_mode = mode;
375 377
376 handle = ocfs2_start_trans(osb, ocfs2_mknod_credits(osb->sb, 378 handle = ocfs2_start_trans(osb, ocfs2_mknod_credits(osb->sb,
377 S_ISDIR(mode), 379 S_ISDIR(mode),
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h
index e67aeac2aee0..4b74c97d297a 100644
--- a/include/drm/drm_atomic.h
+++ b/include/drm/drm_atomic.h
@@ -136,6 +136,9 @@ drm_atomic_connectors_for_crtc(struct drm_atomic_state *state,
136 136
137void drm_atomic_legacy_backoff(struct drm_atomic_state *state); 137void drm_atomic_legacy_backoff(struct drm_atomic_state *state);
138 138
139void
140drm_atomic_clean_old_fb(struct drm_device *dev, unsigned plane_mask, int ret);
141
139int __must_check drm_atomic_check_only(struct drm_atomic_state *state); 142int __must_check drm_atomic_check_only(struct drm_atomic_state *state);
140int __must_check drm_atomic_commit(struct drm_atomic_state *state); 143int __must_check drm_atomic_commit(struct drm_atomic_state *state);
141int __must_check drm_atomic_async_commit(struct drm_atomic_state *state); 144int __must_check drm_atomic_async_commit(struct drm_atomic_state *state);
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index 9c747cb14ad8..d2f41477f8ae 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -342,10 +342,10 @@ int kvm_vgic_inject_mapped_irq(struct kvm *kvm, int cpuid,
342 struct irq_phys_map *map, bool level); 342 struct irq_phys_map *map, bool level);
343void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg); 343void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg);
344int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu); 344int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu);
345int kvm_vgic_vcpu_active_irq(struct kvm_vcpu *vcpu);
346struct irq_phys_map *kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, 345struct irq_phys_map *kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu,
347 int virt_irq, int irq); 346 int virt_irq, int irq);
348int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, struct irq_phys_map *map); 347int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, struct irq_phys_map *map);
348bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, struct irq_phys_map *map);
349 349
350#define irqchip_in_kernel(k) (!!((k)->arch.vgic.in_kernel)) 350#define irqchip_in_kernel(k) (!!((k)->arch.vgic.in_kernel))
351#define vgic_initialized(k) (!!((k)->arch.vgic.nr_cpus)) 351#define vgic_initialized(k) (!!((k)->arch.vgic.nr_cpus))
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 3fe27f8d91f0..c0d2b7927c1f 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -794,6 +794,8 @@ extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t,
794extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t, 794extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,
795 struct scsi_ioctl_command __user *); 795 struct scsi_ioctl_command __user *);
796 796
797extern int blk_queue_enter(struct request_queue *q, gfp_t gfp);
798extern void blk_queue_exit(struct request_queue *q);
797extern void blk_start_queue(struct request_queue *q); 799extern void blk_start_queue(struct request_queue *q);
798extern void blk_stop_queue(struct request_queue *q); 800extern void blk_stop_queue(struct request_queue *q);
799extern void blk_sync_queue(struct request_queue *q); 801extern void blk_sync_queue(struct request_queue *q);
diff --git a/include/linux/configfs.h b/include/linux/configfs.h
index a8a335b7fce0..758a029011b1 100644
--- a/include/linux/configfs.h
+++ b/include/linux/configfs.h
@@ -197,6 +197,16 @@ static inline struct configfs_subsystem *to_configfs_subsystem(struct config_gro
197int configfs_register_subsystem(struct configfs_subsystem *subsys); 197int configfs_register_subsystem(struct configfs_subsystem *subsys);
198void configfs_unregister_subsystem(struct configfs_subsystem *subsys); 198void configfs_unregister_subsystem(struct configfs_subsystem *subsys);
199 199
200int configfs_register_group(struct config_group *parent_group,
201 struct config_group *group);
202void configfs_unregister_group(struct config_group *group);
203
204struct config_group *
205configfs_register_default_group(struct config_group *parent_group,
206 const char *name,
207 struct config_item_type *item_type);
208void configfs_unregister_default_group(struct config_group *group);
209
200/* These functions can sleep and can alloc with GFP_KERNEL */ 210/* These functions can sleep and can alloc with GFP_KERNEL */
201/* WARNING: These cannot be called underneath configfs callbacks!! */ 211/* WARNING: These cannot be called underneath configfs callbacks!! */
202int configfs_depend_item(struct configfs_subsystem *subsys, struct config_item *target); 212int configfs_depend_item(struct configfs_subsystem *subsys, struct config_item *target);
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 6523109e136d..8942af0813e3 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -271,7 +271,7 @@ static inline int gfpflags_to_migratetype(const gfp_t gfp_flags)
271 271
272static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags) 272static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags)
273{ 273{
274 return gfp_flags & __GFP_DIRECT_RECLAIM; 274 return (bool __force)(gfp_flags & __GFP_DIRECT_RECLAIM);
275} 275}
276 276
277#ifdef CONFIG_HIGHMEM 277#ifdef CONFIG_HIGHMEM
diff --git a/include/linux/marvell_phy.h b/include/linux/marvell_phy.h
index e6982ac3200d..a57f0dfb6db7 100644
--- a/include/linux/marvell_phy.h
+++ b/include/linux/marvell_phy.h
@@ -16,6 +16,7 @@
16#define MARVELL_PHY_ID_88E1318S 0x01410e90 16#define MARVELL_PHY_ID_88E1318S 0x01410e90
17#define MARVELL_PHY_ID_88E1116R 0x01410e40 17#define MARVELL_PHY_ID_88E1116R 0x01410e40
18#define MARVELL_PHY_ID_88E1510 0x01410dd0 18#define MARVELL_PHY_ID_88E1510 0x01410dd0
19#define MARVELL_PHY_ID_88E1540 0x01410eb0
19#define MARVELL_PHY_ID_88E3016 0x01410e60 20#define MARVELL_PHY_ID_88E3016 0x01410e60
20 21
21/* struct phy_device dev_flags definitions */ 22/* struct phy_device dev_flags definitions */
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index dd2097455a2e..1565324eb620 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -453,26 +453,28 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
453 u8 lro_cap[0x1]; 453 u8 lro_cap[0x1];
454 u8 lro_psh_flag[0x1]; 454 u8 lro_psh_flag[0x1];
455 u8 lro_time_stamp[0x1]; 455 u8 lro_time_stamp[0x1];
456 u8 reserved_0[0x6]; 456 u8 reserved_0[0x3];
457 u8 self_lb_en_modifiable[0x1];
458 u8 reserved_1[0x2];
457 u8 max_lso_cap[0x5]; 459 u8 max_lso_cap[0x5];
458 u8 reserved_1[0x4]; 460 u8 reserved_2[0x4];
459 u8 rss_ind_tbl_cap[0x4]; 461 u8 rss_ind_tbl_cap[0x4];
460 u8 reserved_2[0x3]; 462 u8 reserved_3[0x3];
461 u8 tunnel_lso_const_out_ip_id[0x1]; 463 u8 tunnel_lso_const_out_ip_id[0x1];
462 u8 reserved_3[0x2]; 464 u8 reserved_4[0x2];
463 u8 tunnel_statless_gre[0x1]; 465 u8 tunnel_statless_gre[0x1];
464 u8 tunnel_stateless_vxlan[0x1]; 466 u8 tunnel_stateless_vxlan[0x1];
465 467
466 u8 reserved_4[0x20]; 468 u8 reserved_5[0x20];
467 469
468 u8 reserved_5[0x10]; 470 u8 reserved_6[0x10];
469 u8 lro_min_mss_size[0x10]; 471 u8 lro_min_mss_size[0x10];
470 472
471 u8 reserved_6[0x120]; 473 u8 reserved_7[0x120];
472 474
473 u8 lro_timer_supported_periods[4][0x20]; 475 u8 lro_timer_supported_periods[4][0x20];
474 476
475 u8 reserved_7[0x600]; 477 u8 reserved_8[0x600];
476}; 478};
477 479
478struct mlx5_ifc_roce_cap_bits { 480struct mlx5_ifc_roce_cap_bits {
@@ -4051,9 +4053,11 @@ struct mlx5_ifc_modify_tis_in_bits {
4051}; 4053};
4052 4054
4053struct mlx5_ifc_modify_tir_bitmask_bits { 4055struct mlx5_ifc_modify_tir_bitmask_bits {
4054 u8 reserved[0x20]; 4056 u8 reserved_0[0x20];
4055 4057
4056 u8 reserved1[0x1f]; 4058 u8 reserved_1[0x1b];
4059 u8 self_lb_en[0x1];
4060 u8 reserved_2[0x3];
4057 u8 lro[0x1]; 4061 u8 lro[0x1];
4058}; 4062};
4059 4063
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index d20891465247..67bfac1abfc1 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -2068,20 +2068,23 @@ struct pcpu_sw_netstats {
2068 struct u64_stats_sync syncp; 2068 struct u64_stats_sync syncp;
2069}; 2069};
2070 2070
2071#define netdev_alloc_pcpu_stats(type) \ 2071#define __netdev_alloc_pcpu_stats(type, gfp) \
2072({ \ 2072({ \
2073 typeof(type) __percpu *pcpu_stats = alloc_percpu(type); \ 2073 typeof(type) __percpu *pcpu_stats = alloc_percpu_gfp(type, gfp);\
2074 if (pcpu_stats) { \ 2074 if (pcpu_stats) { \
2075 int __cpu; \ 2075 int __cpu; \
2076 for_each_possible_cpu(__cpu) { \ 2076 for_each_possible_cpu(__cpu) { \
2077 typeof(type) *stat; \ 2077 typeof(type) *stat; \
2078 stat = per_cpu_ptr(pcpu_stats, __cpu); \ 2078 stat = per_cpu_ptr(pcpu_stats, __cpu); \
2079 u64_stats_init(&stat->syncp); \ 2079 u64_stats_init(&stat->syncp); \
2080 } \ 2080 } \
2081 } \ 2081 } \
2082 pcpu_stats; \ 2082 pcpu_stats; \
2083}) 2083})
2084 2084
2085#define netdev_alloc_pcpu_stats(type) \
2086 __netdev_alloc_pcpu_stats(type, GFP_KERNEL);
2087
2085#include <linux/notifier.h> 2088#include <linux/notifier.h>
2086 2089
2087/* netdevice notifier chain. Please remember to update the rtnetlink 2090/* netdevice notifier chain. Please remember to update the rtnetlink
@@ -3854,6 +3857,11 @@ static inline bool netif_is_bridge_master(const struct net_device *dev)
3854 return dev->priv_flags & IFF_EBRIDGE; 3857 return dev->priv_flags & IFF_EBRIDGE;
3855} 3858}
3856 3859
3860static inline bool netif_is_bridge_port(const struct net_device *dev)
3861{
3862 return dev->priv_flags & IFF_BRIDGE_PORT;
3863}
3864
3857static inline bool netif_is_ovs_master(const struct net_device *dev) 3865static inline bool netif_is_ovs_master(const struct net_device *dev)
3858{ 3866{
3859 return dev->priv_flags & IFF_OPENVSWITCH; 3867 return dev->priv_flags & IFF_OPENVSWITCH;
diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h
index 48bb01edcf30..0e1f433cc4b7 100644
--- a/include/linux/netfilter/ipset/ip_set.h
+++ b/include/linux/netfilter/ipset/ip_set.h
@@ -421,7 +421,7 @@ extern void ip_set_free(void *members);
421extern int ip_set_get_ipaddr4(struct nlattr *nla, __be32 *ipaddr); 421extern int ip_set_get_ipaddr4(struct nlattr *nla, __be32 *ipaddr);
422extern int ip_set_get_ipaddr6(struct nlattr *nla, union nf_inet_addr *ipaddr); 422extern int ip_set_get_ipaddr6(struct nlattr *nla, union nf_inet_addr *ipaddr);
423extern size_t ip_set_elem_len(struct ip_set *set, struct nlattr *tb[], 423extern size_t ip_set_elem_len(struct ip_set *set, struct nlattr *tb[],
424 size_t len); 424 size_t len, size_t align);
425extern int ip_set_get_extensions(struct ip_set *set, struct nlattr *tb[], 425extern int ip_set_get_extensions(struct ip_set *set, struct nlattr *tb[],
426 struct ip_set_ext *ext); 426 struct ip_set_ext *ext);
427 427
diff --git a/include/linux/netfilter_ingress.h b/include/linux/netfilter_ingress.h
index 187feabe557c..5fcd375ef175 100644
--- a/include/linux/netfilter_ingress.h
+++ b/include/linux/netfilter_ingress.h
@@ -5,10 +5,13 @@
5#include <linux/netdevice.h> 5#include <linux/netdevice.h>
6 6
7#ifdef CONFIG_NETFILTER_INGRESS 7#ifdef CONFIG_NETFILTER_INGRESS
8static inline int nf_hook_ingress_active(struct sk_buff *skb) 8static inline bool nf_hook_ingress_active(const struct sk_buff *skb)
9{ 9{
10 return nf_hook_list_active(&skb->dev->nf_hooks_ingress, 10#ifdef HAVE_JUMP_LABEL
11 NFPROTO_NETDEV, NF_NETDEV_INGRESS); 11 if (!static_key_false(&nf_hooks_needed[NFPROTO_NETDEV][NF_NETDEV_INGRESS]))
12 return false;
13#endif
14 return !list_empty(&skb->dev->nf_hooks_ingress);
12} 15}
13 16
14static inline int nf_hook_ingress(struct sk_buff *skb) 17static inline int nf_hook_ingress(struct sk_buff *skb)
@@ -16,8 +19,8 @@ static inline int nf_hook_ingress(struct sk_buff *skb)
16 struct nf_hook_state state; 19 struct nf_hook_state state;
17 20
18 nf_hook_state_init(&state, &skb->dev->nf_hooks_ingress, 21 nf_hook_state_init(&state, &skb->dev->nf_hooks_ingress,
19 NF_NETDEV_INGRESS, INT_MIN, NFPROTO_NETDEV, NULL, 22 NF_NETDEV_INGRESS, INT_MIN, NFPROTO_NETDEV,
20 skb->dev, NULL, dev_net(skb->dev), NULL); 23 skb->dev, NULL, NULL, dev_net(skb->dev), NULL);
21 return nf_hook_slow(skb, &state); 24 return nf_hook_slow(skb, &state);
22} 25}
23 26
diff --git a/include/linux/of_dma.h b/include/linux/of_dma.h
index 36112cdd665a..b90d8ec57c1f 100644
--- a/include/linux/of_dma.h
+++ b/include/linux/of_dma.h
@@ -80,7 +80,7 @@ static inline int of_dma_router_register(struct device_node *np,
80static inline struct dma_chan *of_dma_request_slave_channel(struct device_node *np, 80static inline struct dma_chan *of_dma_request_slave_channel(struct device_node *np,
81 const char *name) 81 const char *name)
82{ 82{
83 return NULL; 83 return ERR_PTR(-ENODEV);
84} 84}
85 85
86static inline struct dma_chan *of_dma_simple_xlate(struct of_phandle_args *dma_spec, 86static inline struct dma_chan *of_dma_simple_xlate(struct of_phandle_args *dma_spec,
diff --git a/include/linux/signal.h b/include/linux/signal.h
index ab1e0392b5ac..92557bbce7e7 100644
--- a/include/linux/signal.h
+++ b/include/linux/signal.h
@@ -239,7 +239,6 @@ extern int sigprocmask(int, sigset_t *, sigset_t *);
239extern void set_current_blocked(sigset_t *); 239extern void set_current_blocked(sigset_t *);
240extern void __set_current_blocked(const sigset_t *); 240extern void __set_current_blocked(const sigset_t *);
241extern int show_unhandled_signals; 241extern int show_unhandled_signals;
242extern int sigsuspend(sigset_t *);
243 242
244struct sigaction { 243struct sigaction {
245#ifndef __ARCH_HAS_IRIX_SIGACTION 244#ifndef __ARCH_HAS_IRIX_SIGACTION
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 7c82e3b307a3..2037a861e367 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -158,6 +158,24 @@ size_t ksize(const void *);
158#endif 158#endif
159 159
160/* 160/*
161 * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment.
162 * Intended for arches that get misalignment faults even for 64 bit integer
163 * aligned buffers.
164 */
165#ifndef ARCH_SLAB_MINALIGN
166#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
167#endif
168
169/*
170 * kmalloc and friends return ARCH_KMALLOC_MINALIGN aligned
171 * pointers. kmem_cache_alloc and friends return ARCH_SLAB_MINALIGN
172 * aligned pointers.
173 */
174#define __assume_kmalloc_alignment __assume_aligned(ARCH_KMALLOC_MINALIGN)
175#define __assume_slab_alignment __assume_aligned(ARCH_SLAB_MINALIGN)
176#define __assume_page_alignment __assume_aligned(PAGE_SIZE)
177
178/*
161 * Kmalloc array related definitions 179 * Kmalloc array related definitions
162 */ 180 */
163 181
@@ -286,8 +304,8 @@ static __always_inline int kmalloc_index(size_t size)
286} 304}
287#endif /* !CONFIG_SLOB */ 305#endif /* !CONFIG_SLOB */
288 306
289void *__kmalloc(size_t size, gfp_t flags); 307void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment;
290void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags); 308void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags) __assume_slab_alignment;
291void kmem_cache_free(struct kmem_cache *, void *); 309void kmem_cache_free(struct kmem_cache *, void *);
292 310
293/* 311/*
@@ -298,11 +316,11 @@ void kmem_cache_free(struct kmem_cache *, void *);
298 * Note that interrupts must be enabled when calling these functions. 316 * Note that interrupts must be enabled when calling these functions.
299 */ 317 */
300void kmem_cache_free_bulk(struct kmem_cache *, size_t, void **); 318void kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
301bool kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **); 319int kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
302 320
303#ifdef CONFIG_NUMA 321#ifdef CONFIG_NUMA
304void *__kmalloc_node(size_t size, gfp_t flags, int node); 322void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment;
305void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); 323void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node) __assume_slab_alignment;
306#else 324#else
307static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node) 325static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
308{ 326{
@@ -316,12 +334,12 @@ static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t f
316#endif 334#endif
317 335
318#ifdef CONFIG_TRACING 336#ifdef CONFIG_TRACING
319extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t); 337extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t) __assume_slab_alignment;
320 338
321#ifdef CONFIG_NUMA 339#ifdef CONFIG_NUMA
322extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s, 340extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
323 gfp_t gfpflags, 341 gfp_t gfpflags,
324 int node, size_t size); 342 int node, size_t size) __assume_slab_alignment;
325#else 343#else
326static __always_inline void * 344static __always_inline void *
327kmem_cache_alloc_node_trace(struct kmem_cache *s, 345kmem_cache_alloc_node_trace(struct kmem_cache *s,
@@ -354,10 +372,10 @@ kmem_cache_alloc_node_trace(struct kmem_cache *s,
354} 372}
355#endif /* CONFIG_TRACING */ 373#endif /* CONFIG_TRACING */
356 374
357extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order); 375extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment;
358 376
359#ifdef CONFIG_TRACING 377#ifdef CONFIG_TRACING
360extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order); 378extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment;
361#else 379#else
362static __always_inline void * 380static __always_inline void *
363kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) 381kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
@@ -482,15 +500,6 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
482 return __kmalloc_node(size, flags, node); 500 return __kmalloc_node(size, flags, node);
483} 501}
484 502
485/*
486 * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment.
487 * Intended for arches that get misalignment faults even for 64 bit integer
488 * aligned buffers.
489 */
490#ifndef ARCH_SLAB_MINALIGN
491#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
492#endif
493
494struct memcg_cache_array { 503struct memcg_cache_array {
495 struct rcu_head rcu; 504 struct rcu_head rcu;
496 struct kmem_cache *entries[0]; 505 struct kmem_cache *entries[0];
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 5b04b0a5375b..5e31f1b99037 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -607,7 +607,7 @@ extern void n_tty_inherit_ops(struct tty_ldisc_ops *ops);
607 607
608/* tty_audit.c */ 608/* tty_audit.c */
609#ifdef CONFIG_AUDIT 609#ifdef CONFIG_AUDIT
610extern void tty_audit_add_data(struct tty_struct *tty, unsigned char *data, 610extern void tty_audit_add_data(struct tty_struct *tty, const void *data,
611 size_t size, unsigned icanon); 611 size_t size, unsigned icanon);
612extern void tty_audit_exit(void); 612extern void tty_audit_exit(void);
613extern void tty_audit_fork(struct signal_struct *sig); 613extern void tty_audit_fork(struct signal_struct *sig);
@@ -615,8 +615,8 @@ extern void tty_audit_tiocsti(struct tty_struct *tty, char ch);
615extern void tty_audit_push(struct tty_struct *tty); 615extern void tty_audit_push(struct tty_struct *tty);
616extern int tty_audit_push_current(void); 616extern int tty_audit_push_current(void);
617#else 617#else
618static inline void tty_audit_add_data(struct tty_struct *tty, 618static inline void tty_audit_add_data(struct tty_struct *tty, const void *data,
619 unsigned char *data, size_t size, unsigned icanon) 619 size_t size, unsigned icanon)
620{ 620{
621} 621}
622static inline void tty_audit_tiocsti(struct tty_struct *tty, char ch) 622static inline void tty_audit_tiocsti(struct tty_struct *tty, char ch)
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index aaf9700fc9e5..fb961a576abe 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -167,7 +167,8 @@ static inline void rt6_update_expires(struct rt6_info *rt0, int timeout)
167 167
168static inline u32 rt6_get_cookie(const struct rt6_info *rt) 168static inline u32 rt6_get_cookie(const struct rt6_info *rt)
169{ 169{
170 if (rt->rt6i_flags & RTF_PCPU || unlikely(rt->dst.flags & DST_NOCACHE)) 170 if (rt->rt6i_flags & RTF_PCPU ||
171 (unlikely(rt->dst.flags & DST_NOCACHE) && rt->dst.from))
171 rt = (struct rt6_info *)(rt->dst.from); 172 rt = (struct rt6_info *)(rt->dst.from);
172 173
173 return rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0; 174 return rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0;
diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h
index aaee6fa02cf1..ff788b665277 100644
--- a/include/net/ip6_tunnel.h
+++ b/include/net/ip6_tunnel.h
@@ -90,11 +90,12 @@ static inline void ip6tunnel_xmit(struct sock *sk, struct sk_buff *skb,
90 err = ip6_local_out(dev_net(skb_dst(skb)->dev), sk, skb); 90 err = ip6_local_out(dev_net(skb_dst(skb)->dev), sk, skb);
91 91
92 if (net_xmit_eval(err) == 0) { 92 if (net_xmit_eval(err) == 0) {
93 struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats); 93 struct pcpu_sw_netstats *tstats = get_cpu_ptr(dev->tstats);
94 u64_stats_update_begin(&tstats->syncp); 94 u64_stats_update_begin(&tstats->syncp);
95 tstats->tx_bytes += pkt_len; 95 tstats->tx_bytes += pkt_len;
96 tstats->tx_packets++; 96 tstats->tx_packets++;
97 u64_stats_update_end(&tstats->syncp); 97 u64_stats_update_end(&tstats->syncp);
98 put_cpu_ptr(tstats);
98 } else { 99 } else {
99 stats->tx_errors++; 100 stats->tx_errors++;
100 stats->tx_aborted_errors++; 101 stats->tx_aborted_errors++;
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index f6dafec9102c..62a750a6a8f8 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -287,12 +287,13 @@ static inline void iptunnel_xmit_stats(int err,
287 struct pcpu_sw_netstats __percpu *stats) 287 struct pcpu_sw_netstats __percpu *stats)
288{ 288{
289 if (err > 0) { 289 if (err > 0) {
290 struct pcpu_sw_netstats *tstats = this_cpu_ptr(stats); 290 struct pcpu_sw_netstats *tstats = get_cpu_ptr(stats);
291 291
292 u64_stats_update_begin(&tstats->syncp); 292 u64_stats_update_begin(&tstats->syncp);
293 tstats->tx_bytes += err; 293 tstats->tx_bytes += err;
294 tstats->tx_packets++; 294 tstats->tx_packets++;
295 u64_stats_update_end(&tstats->syncp); 295 u64_stats_update_end(&tstats->syncp);
296 put_cpu_ptr(tstats);
296 } else if (err < 0) { 297 } else if (err < 0) {
297 err_stats->tx_errors++; 298 err_stats->tx_errors++;
298 err_stats->tx_aborted_errors++; 299 err_stats->tx_aborted_errors++;
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index c9149cc0a02d..4bd7508bedc9 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -618,6 +618,8 @@ struct nft_expr_ops {
618 void (*eval)(const struct nft_expr *expr, 618 void (*eval)(const struct nft_expr *expr,
619 struct nft_regs *regs, 619 struct nft_regs *regs,
620 const struct nft_pktinfo *pkt); 620 const struct nft_pktinfo *pkt);
621 int (*clone)(struct nft_expr *dst,
622 const struct nft_expr *src);
621 unsigned int size; 623 unsigned int size;
622 624
623 int (*init)(const struct nft_ctx *ctx, 625 int (*init)(const struct nft_ctx *ctx,
@@ -660,10 +662,20 @@ void nft_expr_destroy(const struct nft_ctx *ctx, struct nft_expr *expr);
660int nft_expr_dump(struct sk_buff *skb, unsigned int attr, 662int nft_expr_dump(struct sk_buff *skb, unsigned int attr,
661 const struct nft_expr *expr); 663 const struct nft_expr *expr);
662 664
663static inline void nft_expr_clone(struct nft_expr *dst, struct nft_expr *src) 665static inline int nft_expr_clone(struct nft_expr *dst, struct nft_expr *src)
664{ 666{
667 int err;
668
665 __module_get(src->ops->type->owner); 669 __module_get(src->ops->type->owner);
666 memcpy(dst, src, src->ops->size); 670 if (src->ops->clone) {
671 dst->ops = src->ops;
672 err = src->ops->clone(dst, src);
673 if (err < 0)
674 return err;
675 } else {
676 memcpy(dst, src, src->ops->size);
677 }
678 return 0;
667} 679}
668 680
669/** 681/**
diff --git a/include/net/sock.h b/include/net/sock.h
index bbf7c2cf15b4..7f89e4ba18d1 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -2226,6 +2226,31 @@ static inline bool sk_listener(const struct sock *sk)
2226 return (1 << sk->sk_state) & (TCPF_LISTEN | TCPF_NEW_SYN_RECV); 2226 return (1 << sk->sk_state) & (TCPF_LISTEN | TCPF_NEW_SYN_RECV);
2227} 2227}
2228 2228
2229/**
2230 * sk_state_load - read sk->sk_state for lockless contexts
2231 * @sk: socket pointer
2232 *
2233 * Paired with sk_state_store(). Used in places we do not hold socket lock :
2234 * tcp_diag_get_info(), tcp_get_info(), tcp_poll(), get_tcp4_sock() ...
2235 */
2236static inline int sk_state_load(const struct sock *sk)
2237{
2238 return smp_load_acquire(&sk->sk_state);
2239}
2240
2241/**
2242 * sk_state_store - update sk->sk_state
2243 * @sk: socket pointer
2244 * @newstate: new state
2245 *
2246 * Paired with sk_state_load(). Should be used in contexts where
2247 * state change might impact lockless readers.
2248 */
2249static inline void sk_state_store(struct sock *sk, int newstate)
2250{
2251 smp_store_release(&sk->sk_state, newstate);
2252}
2253
2229void sock_enable_timestamp(struct sock *sk, int flag); 2254void sock_enable_timestamp(struct sock *sk, int flag);
2230int sock_get_timestamp(struct sock *, struct timeval __user *); 2255int sock_get_timestamp(struct sock *, struct timeval __user *);
2231int sock_get_timestampns(struct sock *, struct timespec __user *); 2256int sock_get_timestampns(struct sock *, struct timespec __user *);
diff --git a/include/net/switchdev.h b/include/net/switchdev.h
index bc865e244efe..1d22ce9f352e 100644
--- a/include/net/switchdev.h
+++ b/include/net/switchdev.h
@@ -323,7 +323,7 @@ static inline int switchdev_port_fdb_dump(struct sk_buff *skb,
323 struct net_device *filter_dev, 323 struct net_device *filter_dev,
324 int idx) 324 int idx)
325{ 325{
326 return -EOPNOTSUPP; 326 return idx;
327} 327}
328 328
329static inline void switchdev_port_fwd_mark_set(struct net_device *dev, 329static inline void switchdev_port_fwd_mark_set(struct net_device *dev,
diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c
index 6e5344112419..db545cbcdb89 100644
--- a/kernel/livepatch/core.c
+++ b/kernel/livepatch/core.c
@@ -294,6 +294,12 @@ static int klp_write_object_relocations(struct module *pmod,
294 294
295 for (reloc = obj->relocs; reloc->name; reloc++) { 295 for (reloc = obj->relocs; reloc->name; reloc++) {
296 if (!klp_is_module(obj)) { 296 if (!klp_is_module(obj)) {
297
298#if defined(CONFIG_RANDOMIZE_BASE)
299 /* If KASLR has been enabled, adjust old value accordingly */
300 if (kaslr_enabled())
301 reloc->val += kaslr_offset();
302#endif
297 ret = klp_verify_vmlinux_symbol(reloc->name, 303 ret = klp_verify_vmlinux_symbol(reloc->name,
298 reloc->val); 304 reloc->val);
299 if (ret) 305 if (ret)
diff --git a/kernel/panic.c b/kernel/panic.c
index 4579dbb7ed87..4b150bc0c6c1 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -152,8 +152,11 @@ void panic(const char *fmt, ...)
152 * We may have ended up stopping the CPU holding the lock (in 152 * We may have ended up stopping the CPU holding the lock (in
153 * smp_send_stop()) while still having some valuable data in the console 153 * smp_send_stop()) while still having some valuable data in the console
154 * buffer. Try to acquire the lock then release it regardless of the 154 * buffer. Try to acquire the lock then release it regardless of the
155 * result. The release will also print the buffers out. 155 * result. The release will also print the buffers out. Locks debug
156 * should be disabled to avoid reporting bad unlock balance when
157 * panic() is not being callled from OOPS.
156 */ 158 */
159 debug_locks_off();
157 console_trylock(); 160 console_trylock();
158 console_unlock(); 161 console_unlock();
159 162
diff --git a/kernel/signal.c b/kernel/signal.c
index c0b01fe24bbd..f3f1f7a972fd 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -3503,7 +3503,7 @@ SYSCALL_DEFINE0(pause)
3503 3503
3504#endif 3504#endif
3505 3505
3506int sigsuspend(sigset_t *set) 3506static int sigsuspend(sigset_t *set)
3507{ 3507{
3508 current->saved_sigmask = current->blocked; 3508 current->saved_sigmask = current->blocked;
3509 set_current_blocked(set); 3509 set_current_blocked(set);
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index c29ddebc8705..62fe06bb7d04 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2009,7 +2009,7 @@ int hugepage_madvise(struct vm_area_struct *vma,
2009 /* 2009 /*
2010 * Be somewhat over-protective like KSM for now! 2010 * Be somewhat over-protective like KSM for now!
2011 */ 2011 */
2012 if (*vm_flags & (VM_HUGEPAGE | VM_NO_THP)) 2012 if (*vm_flags & VM_NO_THP)
2013 return -EINVAL; 2013 return -EINVAL;
2014 *vm_flags &= ~VM_NOHUGEPAGE; 2014 *vm_flags &= ~VM_NOHUGEPAGE;
2015 *vm_flags |= VM_HUGEPAGE; 2015 *vm_flags |= VM_HUGEPAGE;
@@ -2025,7 +2025,7 @@ int hugepage_madvise(struct vm_area_struct *vma,
2025 /* 2025 /*
2026 * Be somewhat over-protective like KSM for now! 2026 * Be somewhat over-protective like KSM for now!
2027 */ 2027 */
2028 if (*vm_flags & (VM_NOHUGEPAGE | VM_NO_THP)) 2028 if (*vm_flags & VM_NO_THP)
2029 return -EINVAL; 2029 return -EINVAL;
2030 *vm_flags &= ~VM_HUGEPAGE; 2030 *vm_flags &= ~VM_HUGEPAGE;
2031 *vm_flags |= VM_NOHUGEPAGE; 2031 *vm_flags |= VM_NOHUGEPAGE;
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c
index d41b21bce6a0..bc0a8d8b8f42 100644
--- a/mm/kasan/kasan.c
+++ b/mm/kasan/kasan.c
@@ -19,6 +19,7 @@
19#include <linux/export.h> 19#include <linux/export.h>
20#include <linux/init.h> 20#include <linux/init.h>
21#include <linux/kernel.h> 21#include <linux/kernel.h>
22#include <linux/kmemleak.h>
22#include <linux/memblock.h> 23#include <linux/memblock.h>
23#include <linux/memory.h> 24#include <linux/memory.h>
24#include <linux/mm.h> 25#include <linux/mm.h>
@@ -444,6 +445,7 @@ int kasan_module_alloc(void *addr, size_t size)
444 445
445 if (ret) { 446 if (ret) {
446 find_vm_area(addr)->flags |= VM_KASAN; 447 find_vm_area(addr)->flags |= VM_KASAN;
448 kmemleak_ignore(ret);
447 return 0; 449 return 0;
448 } 450 }
449 451
diff --git a/mm/memory.c b/mm/memory.c
index deb679c31f2a..c387430f06c3 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3015,9 +3015,9 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3015 } else { 3015 } else {
3016 /* 3016 /*
3017 * The fault handler has no page to lock, so it holds 3017 * The fault handler has no page to lock, so it holds
3018 * i_mmap_lock for write to protect against truncate. 3018 * i_mmap_lock for read to protect against truncate.
3019 */ 3019 */
3020 i_mmap_unlock_write(vma->vm_file->f_mapping); 3020 i_mmap_unlock_read(vma->vm_file->f_mapping);
3021 } 3021 }
3022 goto uncharge_out; 3022 goto uncharge_out;
3023 } 3023 }
@@ -3031,9 +3031,9 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3031 } else { 3031 } else {
3032 /* 3032 /*
3033 * The fault handler has no page to lock, so it holds 3033 * The fault handler has no page to lock, so it holds
3034 * i_mmap_lock for write to protect against truncate. 3034 * i_mmap_lock for read to protect against truncate.
3035 */ 3035 */
3036 i_mmap_unlock_write(vma->vm_file->f_mapping); 3036 i_mmap_unlock_read(vma->vm_file->f_mapping);
3037 } 3037 }
3038 return ret; 3038 return ret;
3039uncharge_out: 3039uncharge_out:
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 2c90357c34ea..3e4d65445fa7 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -1542,7 +1542,9 @@ static void balance_dirty_pages(struct address_space *mapping,
1542 for (;;) { 1542 for (;;) {
1543 unsigned long now = jiffies; 1543 unsigned long now = jiffies;
1544 unsigned long dirty, thresh, bg_thresh; 1544 unsigned long dirty, thresh, bg_thresh;
1545 unsigned long m_dirty, m_thresh, m_bg_thresh; 1545 unsigned long m_dirty = 0; /* stop bogus uninit warnings */
1546 unsigned long m_thresh = 0;
1547 unsigned long m_bg_thresh = 0;
1546 1548
1547 /* 1549 /*
1548 * Unstable writes are a feature of certain networked 1550 * Unstable writes are a feature of certain networked
diff --git a/mm/slab.c b/mm/slab.c
index e0819fa96559..4765c97ce690 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3419,7 +3419,7 @@ void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
3419} 3419}
3420EXPORT_SYMBOL(kmem_cache_free_bulk); 3420EXPORT_SYMBOL(kmem_cache_free_bulk);
3421 3421
3422bool kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, 3422int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
3423 void **p) 3423 void **p)
3424{ 3424{
3425 return __kmem_cache_alloc_bulk(s, flags, size, p); 3425 return __kmem_cache_alloc_bulk(s, flags, size, p);
diff --git a/mm/slab.h b/mm/slab.h
index 27492eb678f7..7b6087197997 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -170,7 +170,7 @@ ssize_t slabinfo_write(struct file *file, const char __user *buffer,
170 * may be allocated or freed using these operations. 170 * may be allocated or freed using these operations.
171 */ 171 */
172void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **); 172void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
173bool __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **); 173int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
174 174
175#ifdef CONFIG_MEMCG_KMEM 175#ifdef CONFIG_MEMCG_KMEM
176/* 176/*
diff --git a/mm/slab_common.c b/mm/slab_common.c
index d88e97c10a2e..3c6a86b4ec25 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -112,7 +112,7 @@ void __kmem_cache_free_bulk(struct kmem_cache *s, size_t nr, void **p)
112 kmem_cache_free(s, p[i]); 112 kmem_cache_free(s, p[i]);
113} 113}
114 114
115bool __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr, 115int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr,
116 void **p) 116 void **p)
117{ 117{
118 size_t i; 118 size_t i;
@@ -121,10 +121,10 @@ bool __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr,
121 void *x = p[i] = kmem_cache_alloc(s, flags); 121 void *x = p[i] = kmem_cache_alloc(s, flags);
122 if (!x) { 122 if (!x) {
123 __kmem_cache_free_bulk(s, i, p); 123 __kmem_cache_free_bulk(s, i, p);
124 return false; 124 return 0;
125 } 125 }
126 } 126 }
127 return true; 127 return i;
128} 128}
129 129
130#ifdef CONFIG_MEMCG_KMEM 130#ifdef CONFIG_MEMCG_KMEM
diff --git a/mm/slob.c b/mm/slob.c
index 0d7e5df74d1f..17e8f8cc7c53 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -617,7 +617,7 @@ void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
617} 617}
618EXPORT_SYMBOL(kmem_cache_free_bulk); 618EXPORT_SYMBOL(kmem_cache_free_bulk);
619 619
620bool kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, 620int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
621 void **p) 621 void **p)
622{ 622{
623 return __kmem_cache_alloc_bulk(s, flags, size, p); 623 return __kmem_cache_alloc_bulk(s, flags, size, p);
diff --git a/mm/slub.c b/mm/slub.c
index 7cb4bf9ae320..46997517406e 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1065,11 +1065,15 @@ bad:
1065 return 0; 1065 return 0;
1066} 1066}
1067 1067
1068/* Supports checking bulk free of a constructed freelist */
1068static noinline struct kmem_cache_node *free_debug_processing( 1069static noinline struct kmem_cache_node *free_debug_processing(
1069 struct kmem_cache *s, struct page *page, void *object, 1070 struct kmem_cache *s, struct page *page,
1071 void *head, void *tail, int bulk_cnt,
1070 unsigned long addr, unsigned long *flags) 1072 unsigned long addr, unsigned long *flags)
1071{ 1073{
1072 struct kmem_cache_node *n = get_node(s, page_to_nid(page)); 1074 struct kmem_cache_node *n = get_node(s, page_to_nid(page));
1075 void *object = head;
1076 int cnt = 0;
1073 1077
1074 spin_lock_irqsave(&n->list_lock, *flags); 1078 spin_lock_irqsave(&n->list_lock, *flags);
1075 slab_lock(page); 1079 slab_lock(page);
@@ -1077,6 +1081,9 @@ static noinline struct kmem_cache_node *free_debug_processing(
1077 if (!check_slab(s, page)) 1081 if (!check_slab(s, page))
1078 goto fail; 1082 goto fail;
1079 1083
1084next_object:
1085 cnt++;
1086
1080 if (!check_valid_pointer(s, page, object)) { 1087 if (!check_valid_pointer(s, page, object)) {
1081 slab_err(s, page, "Invalid object pointer 0x%p", object); 1088 slab_err(s, page, "Invalid object pointer 0x%p", object);
1082 goto fail; 1089 goto fail;
@@ -1107,8 +1114,19 @@ static noinline struct kmem_cache_node *free_debug_processing(
1107 if (s->flags & SLAB_STORE_USER) 1114 if (s->flags & SLAB_STORE_USER)
1108 set_track(s, object, TRACK_FREE, addr); 1115 set_track(s, object, TRACK_FREE, addr);
1109 trace(s, page, object, 0); 1116 trace(s, page, object, 0);
1117 /* Freepointer not overwritten by init_object(), SLAB_POISON moved it */
1110 init_object(s, object, SLUB_RED_INACTIVE); 1118 init_object(s, object, SLUB_RED_INACTIVE);
1119
1120 /* Reached end of constructed freelist yet? */
1121 if (object != tail) {
1122 object = get_freepointer(s, object);
1123 goto next_object;
1124 }
1111out: 1125out:
1126 if (cnt != bulk_cnt)
1127 slab_err(s, page, "Bulk freelist count(%d) invalid(%d)\n",
1128 bulk_cnt, cnt);
1129
1112 slab_unlock(page); 1130 slab_unlock(page);
1113 /* 1131 /*
1114 * Keep node_lock to preserve integrity 1132 * Keep node_lock to preserve integrity
@@ -1204,7 +1222,7 @@ unsigned long kmem_cache_flags(unsigned long object_size,
1204 1222
1205 return flags; 1223 return flags;
1206} 1224}
1207#else 1225#else /* !CONFIG_SLUB_DEBUG */
1208static inline void setup_object_debug(struct kmem_cache *s, 1226static inline void setup_object_debug(struct kmem_cache *s,
1209 struct page *page, void *object) {} 1227 struct page *page, void *object) {}
1210 1228
@@ -1212,7 +1230,8 @@ static inline int alloc_debug_processing(struct kmem_cache *s,
1212 struct page *page, void *object, unsigned long addr) { return 0; } 1230 struct page *page, void *object, unsigned long addr) { return 0; }
1213 1231
1214static inline struct kmem_cache_node *free_debug_processing( 1232static inline struct kmem_cache_node *free_debug_processing(
1215 struct kmem_cache *s, struct page *page, void *object, 1233 struct kmem_cache *s, struct page *page,
1234 void *head, void *tail, int bulk_cnt,
1216 unsigned long addr, unsigned long *flags) { return NULL; } 1235 unsigned long addr, unsigned long *flags) { return NULL; }
1217 1236
1218static inline int slab_pad_check(struct kmem_cache *s, struct page *page) 1237static inline int slab_pad_check(struct kmem_cache *s, struct page *page)
@@ -1273,14 +1292,21 @@ static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
1273 return memcg_kmem_get_cache(s, flags); 1292 return memcg_kmem_get_cache(s, flags);
1274} 1293}
1275 1294
1276static inline void slab_post_alloc_hook(struct kmem_cache *s, 1295static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
1277 gfp_t flags, void *object) 1296 size_t size, void **p)
1278{ 1297{
1298 size_t i;
1299
1279 flags &= gfp_allowed_mask; 1300 flags &= gfp_allowed_mask;
1280 kmemcheck_slab_alloc(s, flags, object, slab_ksize(s)); 1301 for (i = 0; i < size; i++) {
1281 kmemleak_alloc_recursive(object, s->object_size, 1, s->flags, flags); 1302 void *object = p[i];
1303
1304 kmemcheck_slab_alloc(s, flags, object, slab_ksize(s));
1305 kmemleak_alloc_recursive(object, s->object_size, 1,
1306 s->flags, flags);
1307 kasan_slab_alloc(s, object);
1308 }
1282 memcg_kmem_put_cache(s); 1309 memcg_kmem_put_cache(s);
1283 kasan_slab_alloc(s, object);
1284} 1310}
1285 1311
1286static inline void slab_free_hook(struct kmem_cache *s, void *x) 1312static inline void slab_free_hook(struct kmem_cache *s, void *x)
@@ -1308,6 +1334,29 @@ static inline void slab_free_hook(struct kmem_cache *s, void *x)
1308 kasan_slab_free(s, x); 1334 kasan_slab_free(s, x);
1309} 1335}
1310 1336
1337static inline void slab_free_freelist_hook(struct kmem_cache *s,
1338 void *head, void *tail)
1339{
1340/*
1341 * Compiler cannot detect this function can be removed if slab_free_hook()
1342 * evaluates to nothing. Thus, catch all relevant config debug options here.
1343 */
1344#if defined(CONFIG_KMEMCHECK) || \
1345 defined(CONFIG_LOCKDEP) || \
1346 defined(CONFIG_DEBUG_KMEMLEAK) || \
1347 defined(CONFIG_DEBUG_OBJECTS_FREE) || \
1348 defined(CONFIG_KASAN)
1349
1350 void *object = head;
1351 void *tail_obj = tail ? : head;
1352
1353 do {
1354 slab_free_hook(s, object);
1355 } while ((object != tail_obj) &&
1356 (object = get_freepointer(s, object)));
1357#endif
1358}
1359
1311static void setup_object(struct kmem_cache *s, struct page *page, 1360static void setup_object(struct kmem_cache *s, struct page *page,
1312 void *object) 1361 void *object)
1313{ 1362{
@@ -2295,23 +2344,15 @@ static inline void *get_freelist(struct kmem_cache *s, struct page *page)
2295 * And if we were unable to get a new slab from the partial slab lists then 2344 * And if we were unable to get a new slab from the partial slab lists then
2296 * we need to allocate a new slab. This is the slowest path since it involves 2345 * we need to allocate a new slab. This is the slowest path since it involves
2297 * a call to the page allocator and the setup of a new slab. 2346 * a call to the page allocator and the setup of a new slab.
2347 *
2348 * Version of __slab_alloc to use when we know that interrupts are
2349 * already disabled (which is the case for bulk allocation).
2298 */ 2350 */
2299static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, 2351static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
2300 unsigned long addr, struct kmem_cache_cpu *c) 2352 unsigned long addr, struct kmem_cache_cpu *c)
2301{ 2353{
2302 void *freelist; 2354 void *freelist;
2303 struct page *page; 2355 struct page *page;
2304 unsigned long flags;
2305
2306 local_irq_save(flags);
2307#ifdef CONFIG_PREEMPT
2308 /*
2309 * We may have been preempted and rescheduled on a different
2310 * cpu before disabling interrupts. Need to reload cpu area
2311 * pointer.
2312 */
2313 c = this_cpu_ptr(s->cpu_slab);
2314#endif
2315 2356
2316 page = c->page; 2357 page = c->page;
2317 if (!page) 2358 if (!page)
@@ -2369,7 +2410,6 @@ load_freelist:
2369 VM_BUG_ON(!c->page->frozen); 2410 VM_BUG_ON(!c->page->frozen);
2370 c->freelist = get_freepointer(s, freelist); 2411 c->freelist = get_freepointer(s, freelist);
2371 c->tid = next_tid(c->tid); 2412 c->tid = next_tid(c->tid);
2372 local_irq_restore(flags);
2373 return freelist; 2413 return freelist;
2374 2414
2375new_slab: 2415new_slab:
@@ -2386,7 +2426,6 @@ new_slab:
2386 2426
2387 if (unlikely(!freelist)) { 2427 if (unlikely(!freelist)) {
2388 slab_out_of_memory(s, gfpflags, node); 2428 slab_out_of_memory(s, gfpflags, node);
2389 local_irq_restore(flags);
2390 return NULL; 2429 return NULL;
2391 } 2430 }
2392 2431
@@ -2402,11 +2441,35 @@ new_slab:
2402 deactivate_slab(s, page, get_freepointer(s, freelist)); 2441 deactivate_slab(s, page, get_freepointer(s, freelist));
2403 c->page = NULL; 2442 c->page = NULL;
2404 c->freelist = NULL; 2443 c->freelist = NULL;
2405 local_irq_restore(flags);
2406 return freelist; 2444 return freelist;
2407} 2445}
2408 2446
2409/* 2447/*
2448 * Another one that disabled interrupt and compensates for possible
2449 * cpu changes by refetching the per cpu area pointer.
2450 */
2451static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
2452 unsigned long addr, struct kmem_cache_cpu *c)
2453{
2454 void *p;
2455 unsigned long flags;
2456
2457 local_irq_save(flags);
2458#ifdef CONFIG_PREEMPT
2459 /*
2460 * We may have been preempted and rescheduled on a different
2461 * cpu before disabling interrupts. Need to reload cpu area
2462 * pointer.
2463 */
2464 c = this_cpu_ptr(s->cpu_slab);
2465#endif
2466
2467 p = ___slab_alloc(s, gfpflags, node, addr, c);
2468 local_irq_restore(flags);
2469 return p;
2470}
2471
2472/*
2410 * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc) 2473 * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc)
2411 * have the fastpath folded into their functions. So no function call 2474 * have the fastpath folded into their functions. So no function call
2412 * overhead for requests that can be satisfied on the fastpath. 2475 * overhead for requests that can be satisfied on the fastpath.
@@ -2419,7 +2482,7 @@ new_slab:
2419static __always_inline void *slab_alloc_node(struct kmem_cache *s, 2482static __always_inline void *slab_alloc_node(struct kmem_cache *s,
2420 gfp_t gfpflags, int node, unsigned long addr) 2483 gfp_t gfpflags, int node, unsigned long addr)
2421{ 2484{
2422 void **object; 2485 void *object;
2423 struct kmem_cache_cpu *c; 2486 struct kmem_cache_cpu *c;
2424 struct page *page; 2487 struct page *page;
2425 unsigned long tid; 2488 unsigned long tid;
@@ -2498,7 +2561,7 @@ redo:
2498 if (unlikely(gfpflags & __GFP_ZERO) && object) 2561 if (unlikely(gfpflags & __GFP_ZERO) && object)
2499 memset(object, 0, s->object_size); 2562 memset(object, 0, s->object_size);
2500 2563
2501 slab_post_alloc_hook(s, gfpflags, object); 2564 slab_post_alloc_hook(s, gfpflags, 1, &object);
2502 2565
2503 return object; 2566 return object;
2504} 2567}
@@ -2569,10 +2632,11 @@ EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
2569 * handling required then we can return immediately. 2632 * handling required then we can return immediately.
2570 */ 2633 */
2571static void __slab_free(struct kmem_cache *s, struct page *page, 2634static void __slab_free(struct kmem_cache *s, struct page *page,
2572 void *x, unsigned long addr) 2635 void *head, void *tail, int cnt,
2636 unsigned long addr)
2637
2573{ 2638{
2574 void *prior; 2639 void *prior;
2575 void **object = (void *)x;
2576 int was_frozen; 2640 int was_frozen;
2577 struct page new; 2641 struct page new;
2578 unsigned long counters; 2642 unsigned long counters;
@@ -2582,7 +2646,8 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
2582 stat(s, FREE_SLOWPATH); 2646 stat(s, FREE_SLOWPATH);
2583 2647
2584 if (kmem_cache_debug(s) && 2648 if (kmem_cache_debug(s) &&
2585 !(n = free_debug_processing(s, page, x, addr, &flags))) 2649 !(n = free_debug_processing(s, page, head, tail, cnt,
2650 addr, &flags)))
2586 return; 2651 return;
2587 2652
2588 do { 2653 do {
@@ -2592,10 +2657,10 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
2592 } 2657 }
2593 prior = page->freelist; 2658 prior = page->freelist;
2594 counters = page->counters; 2659 counters = page->counters;
2595 set_freepointer(s, object, prior); 2660 set_freepointer(s, tail, prior);
2596 new.counters = counters; 2661 new.counters = counters;
2597 was_frozen = new.frozen; 2662 was_frozen = new.frozen;
2598 new.inuse--; 2663 new.inuse -= cnt;
2599 if ((!new.inuse || !prior) && !was_frozen) { 2664 if ((!new.inuse || !prior) && !was_frozen) {
2600 2665
2601 if (kmem_cache_has_cpu_partial(s) && !prior) { 2666 if (kmem_cache_has_cpu_partial(s) && !prior) {
@@ -2626,7 +2691,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
2626 2691
2627 } while (!cmpxchg_double_slab(s, page, 2692 } while (!cmpxchg_double_slab(s, page,
2628 prior, counters, 2693 prior, counters,
2629 object, new.counters, 2694 head, new.counters,
2630 "__slab_free")); 2695 "__slab_free"));
2631 2696
2632 if (likely(!n)) { 2697 if (likely(!n)) {
@@ -2691,15 +2756,20 @@ slab_empty:
2691 * 2756 *
2692 * If fastpath is not possible then fall back to __slab_free where we deal 2757 * If fastpath is not possible then fall back to __slab_free where we deal
2693 * with all sorts of special processing. 2758 * with all sorts of special processing.
2759 *
2760 * Bulk free of a freelist with several objects (all pointing to the
2761 * same page) possible by specifying head and tail ptr, plus objects
2762 * count (cnt). Bulk free indicated by tail pointer being set.
2694 */ 2763 */
2695static __always_inline void slab_free(struct kmem_cache *s, 2764static __always_inline void slab_free(struct kmem_cache *s, struct page *page,
2696 struct page *page, void *x, unsigned long addr) 2765 void *head, void *tail, int cnt,
2766 unsigned long addr)
2697{ 2767{
2698 void **object = (void *)x; 2768 void *tail_obj = tail ? : head;
2699 struct kmem_cache_cpu *c; 2769 struct kmem_cache_cpu *c;
2700 unsigned long tid; 2770 unsigned long tid;
2701 2771
2702 slab_free_hook(s, x); 2772 slab_free_freelist_hook(s, head, tail);
2703 2773
2704redo: 2774redo:
2705 /* 2775 /*
@@ -2718,19 +2788,19 @@ redo:
2718 barrier(); 2788 barrier();
2719 2789
2720 if (likely(page == c->page)) { 2790 if (likely(page == c->page)) {
2721 set_freepointer(s, object, c->freelist); 2791 set_freepointer(s, tail_obj, c->freelist);
2722 2792
2723 if (unlikely(!this_cpu_cmpxchg_double( 2793 if (unlikely(!this_cpu_cmpxchg_double(
2724 s->cpu_slab->freelist, s->cpu_slab->tid, 2794 s->cpu_slab->freelist, s->cpu_slab->tid,
2725 c->freelist, tid, 2795 c->freelist, tid,
2726 object, next_tid(tid)))) { 2796 head, next_tid(tid)))) {
2727 2797
2728 note_cmpxchg_failure("slab_free", s, tid); 2798 note_cmpxchg_failure("slab_free", s, tid);
2729 goto redo; 2799 goto redo;
2730 } 2800 }
2731 stat(s, FREE_FASTPATH); 2801 stat(s, FREE_FASTPATH);
2732 } else 2802 } else
2733 __slab_free(s, page, x, addr); 2803 __slab_free(s, page, head, tail_obj, cnt, addr);
2734 2804
2735} 2805}
2736 2806
@@ -2739,59 +2809,116 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
2739 s = cache_from_obj(s, x); 2809 s = cache_from_obj(s, x);
2740 if (!s) 2810 if (!s)
2741 return; 2811 return;
2742 slab_free(s, virt_to_head_page(x), x, _RET_IP_); 2812 slab_free(s, virt_to_head_page(x), x, NULL, 1, _RET_IP_);
2743 trace_kmem_cache_free(_RET_IP_, x); 2813 trace_kmem_cache_free(_RET_IP_, x);
2744} 2814}
2745EXPORT_SYMBOL(kmem_cache_free); 2815EXPORT_SYMBOL(kmem_cache_free);
2746 2816
2747/* Note that interrupts must be enabled when calling this function. */ 2817struct detached_freelist {
2748void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
2749{
2750 struct kmem_cache_cpu *c;
2751 struct page *page; 2818 struct page *page;
2752 int i; 2819 void *tail;
2820 void *freelist;
2821 int cnt;
2822};
2753 2823
2754 local_irq_disable(); 2824/*
2755 c = this_cpu_ptr(s->cpu_slab); 2825 * This function progressively scans the array with free objects (with
2826 * a limited look ahead) and extract objects belonging to the same
2827 * page. It builds a detached freelist directly within the given
2828 * page/objects. This can happen without any need for
2829 * synchronization, because the objects are owned by running process.
2830 * The freelist is build up as a single linked list in the objects.
2831 * The idea is, that this detached freelist can then be bulk
2832 * transferred to the real freelist(s), but only requiring a single
2833 * synchronization primitive. Look ahead in the array is limited due
2834 * to performance reasons.
2835 */
2836static int build_detached_freelist(struct kmem_cache *s, size_t size,
2837 void **p, struct detached_freelist *df)
2838{
2839 size_t first_skipped_index = 0;
2840 int lookahead = 3;
2841 void *object;
2756 2842
2757 for (i = 0; i < size; i++) { 2843 /* Always re-init detached_freelist */
2758 void *object = p[i]; 2844 df->page = NULL;
2759 2845
2760 BUG_ON(!object); 2846 do {
2761 /* kmem cache debug support */ 2847 object = p[--size];
2762 s = cache_from_obj(s, object); 2848 } while (!object && size);
2763 if (unlikely(!s))
2764 goto exit;
2765 slab_free_hook(s, object);
2766 2849
2767 page = virt_to_head_page(object); 2850 if (!object)
2851 return 0;
2768 2852
2769 if (c->page == page) { 2853 /* Start new detached freelist */
2770 /* Fastpath: local CPU free */ 2854 set_freepointer(s, object, NULL);
2771 set_freepointer(s, object, c->freelist); 2855 df->page = virt_to_head_page(object);
2772 c->freelist = object; 2856 df->tail = object;
2773 } else { 2857 df->freelist = object;
2774 c->tid = next_tid(c->tid); 2858 p[size] = NULL; /* mark object processed */
2775 local_irq_enable(); 2859 df->cnt = 1;
2776 /* Slowpath: overhead locked cmpxchg_double_slab */ 2860
2777 __slab_free(s, page, object, _RET_IP_); 2861 while (size) {
2778 local_irq_disable(); 2862 object = p[--size];
2779 c = this_cpu_ptr(s->cpu_slab); 2863 if (!object)
2864 continue; /* Skip processed objects */
2865
2866 /* df->page is always set at this point */
2867 if (df->page == virt_to_head_page(object)) {
2868 /* Opportunity build freelist */
2869 set_freepointer(s, object, df->freelist);
2870 df->freelist = object;
2871 df->cnt++;
2872 p[size] = NULL; /* mark object processed */
2873
2874 continue;
2780 } 2875 }
2876
2877 /* Limit look ahead search */
2878 if (!--lookahead)
2879 break;
2880
2881 if (!first_skipped_index)
2882 first_skipped_index = size + 1;
2781 } 2883 }
2782exit: 2884
2783 c->tid = next_tid(c->tid); 2885 return first_skipped_index;
2784 local_irq_enable(); 2886}
2887
2888
2889/* Note that interrupts must be enabled when calling this function. */
2890void kmem_cache_free_bulk(struct kmem_cache *orig_s, size_t size, void **p)
2891{
2892 if (WARN_ON(!size))
2893 return;
2894
2895 do {
2896 struct detached_freelist df;
2897 struct kmem_cache *s;
2898
2899 /* Support for memcg */
2900 s = cache_from_obj(orig_s, p[size - 1]);
2901
2902 size = build_detached_freelist(s, size, p, &df);
2903 if (unlikely(!df.page))
2904 continue;
2905
2906 slab_free(s, df.page, df.freelist, df.tail, df.cnt, _RET_IP_);
2907 } while (likely(size));
2785} 2908}
2786EXPORT_SYMBOL(kmem_cache_free_bulk); 2909EXPORT_SYMBOL(kmem_cache_free_bulk);
2787 2910
2788/* Note that interrupts must be enabled when calling this function. */ 2911/* Note that interrupts must be enabled when calling this function. */
2789bool kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, 2912int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
2790 void **p) 2913 void **p)
2791{ 2914{
2792 struct kmem_cache_cpu *c; 2915 struct kmem_cache_cpu *c;
2793 int i; 2916 int i;
2794 2917
2918 /* memcg and kmem_cache debug support */
2919 s = slab_pre_alloc_hook(s, flags);
2920 if (unlikely(!s))
2921 return false;
2795 /* 2922 /*
2796 * Drain objects in the per cpu slab, while disabling local 2923 * Drain objects in the per cpu slab, while disabling local
2797 * IRQs, which protects against PREEMPT and interrupts 2924 * IRQs, which protects against PREEMPT and interrupts
@@ -2804,36 +2931,20 @@ bool kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
2804 void *object = c->freelist; 2931 void *object = c->freelist;
2805 2932
2806 if (unlikely(!object)) { 2933 if (unlikely(!object)) {
2807 local_irq_enable();
2808 /* 2934 /*
2809 * Invoking slow path likely have side-effect 2935 * Invoking slow path likely have side-effect
2810 * of re-populating per CPU c->freelist 2936 * of re-populating per CPU c->freelist
2811 */ 2937 */
2812 p[i] = __slab_alloc(s, flags, NUMA_NO_NODE, 2938 p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE,
2813 _RET_IP_, c); 2939 _RET_IP_, c);
2814 if (unlikely(!p[i])) { 2940 if (unlikely(!p[i]))
2815 __kmem_cache_free_bulk(s, i, p); 2941 goto error;
2816 return false; 2942
2817 }
2818 local_irq_disable();
2819 c = this_cpu_ptr(s->cpu_slab); 2943 c = this_cpu_ptr(s->cpu_slab);
2820 continue; /* goto for-loop */ 2944 continue; /* goto for-loop */
2821 } 2945 }
2822
2823 /* kmem_cache debug support */
2824 s = slab_pre_alloc_hook(s, flags);
2825 if (unlikely(!s)) {
2826 __kmem_cache_free_bulk(s, i, p);
2827 c->tid = next_tid(c->tid);
2828 local_irq_enable();
2829 return false;
2830 }
2831
2832 c->freelist = get_freepointer(s, object); 2946 c->freelist = get_freepointer(s, object);
2833 p[i] = object; 2947 p[i] = object;
2834
2835 /* kmem_cache debug support */
2836 slab_post_alloc_hook(s, flags, object);
2837 } 2948 }
2838 c->tid = next_tid(c->tid); 2949 c->tid = next_tid(c->tid);
2839 local_irq_enable(); 2950 local_irq_enable();
@@ -2846,7 +2957,14 @@ bool kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
2846 memset(p[j], 0, s->object_size); 2957 memset(p[j], 0, s->object_size);
2847 } 2958 }
2848 2959
2849 return true; 2960 /* memcg and kmem_cache debug support */
2961 slab_post_alloc_hook(s, flags, size, p);
2962 return i;
2963error:
2964 local_irq_enable();
2965 slab_post_alloc_hook(s, flags, i, p);
2966 __kmem_cache_free_bulk(s, i, p);
2967 return 0;
2850} 2968}
2851EXPORT_SYMBOL(kmem_cache_alloc_bulk); 2969EXPORT_SYMBOL(kmem_cache_alloc_bulk);
2852 2970
@@ -3511,7 +3629,7 @@ void kfree(const void *x)
3511 __free_kmem_pages(page, compound_order(page)); 3629 __free_kmem_pages(page, compound_order(page));
3512 return; 3630 return;
3513 } 3631 }
3514 slab_free(page->slab_cache, page, object, _RET_IP_); 3632 slab_free(page->slab_cache, page, object, NULL, 1, _RET_IP_);
3515} 3633}
3516EXPORT_SYMBOL(kfree); 3634EXPORT_SYMBOL(kfree);
3517 3635
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index d04563480c94..8e3c9c5a3042 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -1443,7 +1443,6 @@ struct vm_struct *remove_vm_area(const void *addr)
1443 vmap_debug_free_range(va->va_start, va->va_end); 1443 vmap_debug_free_range(va->va_start, va->va_end);
1444 kasan_free_shadow(vm); 1444 kasan_free_shadow(vm);
1445 free_unmap_vmap_area(va); 1445 free_unmap_vmap_area(va);
1446 vm->size -= PAGE_SIZE;
1447 1446
1448 return vm; 1447 return vm;
1449 } 1448 }
@@ -1468,8 +1467,8 @@ static void __vunmap(const void *addr, int deallocate_pages)
1468 return; 1467 return;
1469 } 1468 }
1470 1469
1471 debug_check_no_locks_freed(addr, area->size); 1470 debug_check_no_locks_freed(addr, get_vm_area_size(area));
1472 debug_check_no_obj_freed(addr, area->size); 1471 debug_check_no_obj_freed(addr, get_vm_area_size(area));
1473 1472
1474 if (deallocate_pages) { 1473 if (deallocate_pages) {
1475 int i; 1474 int i;
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index 496b27588493..e2ed69850489 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -30,7 +30,9 @@ bool vlan_do_receive(struct sk_buff **skbp)
30 skb->pkt_type = PACKET_HOST; 30 skb->pkt_type = PACKET_HOST;
31 } 31 }
32 32
33 if (!(vlan_dev_priv(vlan_dev)->flags & VLAN_FLAG_REORDER_HDR)) { 33 if (!(vlan_dev_priv(vlan_dev)->flags & VLAN_FLAG_REORDER_HDR) &&
34 !netif_is_macvlan_port(vlan_dev) &&
35 !netif_is_bridge_port(vlan_dev)) {
34 unsigned int offset = skb->data - skb_mac_header(skb); 36 unsigned int offset = skb->data - skb_mac_header(skb);
35 37
36 /* 38 /*
diff --git a/net/bridge/br_stp.c b/net/bridge/br_stp.c
index f7e8dee64fc8..5f3f64553179 100644
--- a/net/bridge/br_stp.c
+++ b/net/bridge/br_stp.c
@@ -48,7 +48,7 @@ void br_set_state(struct net_bridge_port *p, unsigned int state)
48 48
49 p->state = state; 49 p->state = state;
50 err = switchdev_port_attr_set(p->dev, &attr); 50 err = switchdev_port_attr_set(p->dev, &attr);
51 if (err) 51 if (err && err != -EOPNOTSUPP)
52 br_warn(p->br, "error setting offload STP state on port %u(%s)\n", 52 br_warn(p->br, "error setting offload STP state on port %u(%s)\n",
53 (unsigned int) p->port_no, p->dev->name); 53 (unsigned int) p->port_no, p->dev->name);
54} 54}
diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
index fa53d7a89f48..5396ff08af32 100644
--- a/net/bridge/br_stp_if.c
+++ b/net/bridge/br_stp_if.c
@@ -50,7 +50,7 @@ void br_init_port(struct net_bridge_port *p)
50 p->config_pending = 0; 50 p->config_pending = 0;
51 51
52 err = switchdev_port_attr_set(p->dev, &attr); 52 err = switchdev_port_attr_set(p->dev, &attr);
53 if (err) 53 if (err && err != -EOPNOTSUPP)
54 netdev_err(p->dev, "failed to set HW ageing time\n"); 54 netdev_err(p->dev, "failed to set HW ageing time\n");
55} 55}
56 56
diff --git a/net/core/dev.c b/net/core/dev.c
index ab9b8d0d115e..ae00b894e675 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2403,17 +2403,20 @@ static void skb_warn_bad_offload(const struct sk_buff *skb)
2403{ 2403{
2404 static const netdev_features_t null_features = 0; 2404 static const netdev_features_t null_features = 0;
2405 struct net_device *dev = skb->dev; 2405 struct net_device *dev = skb->dev;
2406 const char *driver = ""; 2406 const char *name = "";
2407 2407
2408 if (!net_ratelimit()) 2408 if (!net_ratelimit())
2409 return; 2409 return;
2410 2410
2411 if (dev && dev->dev.parent) 2411 if (dev) {
2412 driver = dev_driver_string(dev->dev.parent); 2412 if (dev->dev.parent)
2413 2413 name = dev_driver_string(dev->dev.parent);
2414 else
2415 name = netdev_name(dev);
2416 }
2414 WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d gso_size=%d " 2417 WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d gso_size=%d "
2415 "gso_type=%d ip_summed=%d\n", 2418 "gso_type=%d ip_summed=%d\n",
2416 driver, dev ? &dev->features : &null_features, 2419 name, dev ? &dev->features : &null_features,
2417 skb->sk ? &skb->sk->sk_route_caps : &null_features, 2420 skb->sk ? &skb->sk->sk_route_caps : &null_features,
2418 skb->len, skb->data_len, skb_shinfo(skb)->gso_size, 2421 skb->len, skb->data_len, skb_shinfo(skb)->gso_size,
2419 skb_shinfo(skb)->gso_type, skb->ip_summed); 2422 skb_shinfo(skb)->gso_type, skb->ip_summed);
@@ -6426,11 +6429,16 @@ int __netdev_update_features(struct net_device *dev)
6426 6429
6427 if (dev->netdev_ops->ndo_set_features) 6430 if (dev->netdev_ops->ndo_set_features)
6428 err = dev->netdev_ops->ndo_set_features(dev, features); 6431 err = dev->netdev_ops->ndo_set_features(dev, features);
6432 else
6433 err = 0;
6429 6434
6430 if (unlikely(err < 0)) { 6435 if (unlikely(err < 0)) {
6431 netdev_err(dev, 6436 netdev_err(dev,
6432 "set_features() failed (%d); wanted %pNF, left %pNF\n", 6437 "set_features() failed (%d); wanted %pNF, left %pNF\n",
6433 err, &features, &dev->features); 6438 err, &features, &dev->features);
6439 /* return non-0 since some features might have changed and
6440 * it's better to fire a spurious notification than miss it
6441 */
6434 return -1; 6442 return -1;
6435 } 6443 }
6436 6444
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 1aa8437ed6c4..e6af42da28d9 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -857,7 +857,7 @@ static void neigh_probe(struct neighbour *neigh)
857 struct sk_buff *skb = skb_peek_tail(&neigh->arp_queue); 857 struct sk_buff *skb = skb_peek_tail(&neigh->arp_queue);
858 /* keep skb alive even if arp_queue overflows */ 858 /* keep skb alive even if arp_queue overflows */
859 if (skb) 859 if (skb)
860 skb = skb_copy(skb, GFP_ATOMIC); 860 skb = skb_clone(skb, GFP_ATOMIC);
861 write_unlock(&neigh->lock); 861 write_unlock(&neigh->lock);
862 neigh->ops->solicit(neigh, skb); 862 neigh->ops->solicit(neigh, skb);
863 atomic_inc(&neigh->probes); 863 atomic_inc(&neigh->probes);
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 504bd17b7456..34ba7a08876d 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -1045,15 +1045,156 @@ static int rtnl_phys_switch_id_fill(struct sk_buff *skb, struct net_device *dev)
1045 return 0; 1045 return 0;
1046} 1046}
1047 1047
1048static noinline_for_stack int rtnl_fill_stats(struct sk_buff *skb,
1049 struct net_device *dev)
1050{
1051 const struct rtnl_link_stats64 *stats;
1052 struct rtnl_link_stats64 temp;
1053 struct nlattr *attr;
1054
1055 stats = dev_get_stats(dev, &temp);
1056
1057 attr = nla_reserve(skb, IFLA_STATS,
1058 sizeof(struct rtnl_link_stats));
1059 if (!attr)
1060 return -EMSGSIZE;
1061
1062 copy_rtnl_link_stats(nla_data(attr), stats);
1063
1064 attr = nla_reserve(skb, IFLA_STATS64,
1065 sizeof(struct rtnl_link_stats64));
1066 if (!attr)
1067 return -EMSGSIZE;
1068
1069 copy_rtnl_link_stats64(nla_data(attr), stats);
1070
1071 return 0;
1072}
1073
1074static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb,
1075 struct net_device *dev,
1076 int vfs_num,
1077 struct nlattr *vfinfo)
1078{
1079 struct ifla_vf_rss_query_en vf_rss_query_en;
1080 struct ifla_vf_link_state vf_linkstate;
1081 struct ifla_vf_spoofchk vf_spoofchk;
1082 struct ifla_vf_tx_rate vf_tx_rate;
1083 struct ifla_vf_stats vf_stats;
1084 struct ifla_vf_trust vf_trust;
1085 struct ifla_vf_vlan vf_vlan;
1086 struct ifla_vf_rate vf_rate;
1087 struct nlattr *vf, *vfstats;
1088 struct ifla_vf_mac vf_mac;
1089 struct ifla_vf_info ivi;
1090
1091 /* Not all SR-IOV capable drivers support the
1092 * spoofcheck and "RSS query enable" query. Preset to
1093 * -1 so the user space tool can detect that the driver
1094 * didn't report anything.
1095 */
1096 ivi.spoofchk = -1;
1097 ivi.rss_query_en = -1;
1098 ivi.trusted = -1;
1099 memset(ivi.mac, 0, sizeof(ivi.mac));
1100 /* The default value for VF link state is "auto"
1101 * IFLA_VF_LINK_STATE_AUTO which equals zero
1102 */
1103 ivi.linkstate = 0;
1104 if (dev->netdev_ops->ndo_get_vf_config(dev, vfs_num, &ivi))
1105 return 0;
1106
1107 vf_mac.vf =
1108 vf_vlan.vf =
1109 vf_rate.vf =
1110 vf_tx_rate.vf =
1111 vf_spoofchk.vf =
1112 vf_linkstate.vf =
1113 vf_rss_query_en.vf =
1114 vf_trust.vf = ivi.vf;
1115
1116 memcpy(vf_mac.mac, ivi.mac, sizeof(ivi.mac));
1117 vf_vlan.vlan = ivi.vlan;
1118 vf_vlan.qos = ivi.qos;
1119 vf_tx_rate.rate = ivi.max_tx_rate;
1120 vf_rate.min_tx_rate = ivi.min_tx_rate;
1121 vf_rate.max_tx_rate = ivi.max_tx_rate;
1122 vf_spoofchk.setting = ivi.spoofchk;
1123 vf_linkstate.link_state = ivi.linkstate;
1124 vf_rss_query_en.setting = ivi.rss_query_en;
1125 vf_trust.setting = ivi.trusted;
1126 vf = nla_nest_start(skb, IFLA_VF_INFO);
1127 if (!vf) {
1128 nla_nest_cancel(skb, vfinfo);
1129 return -EMSGSIZE;
1130 }
1131 if (nla_put(skb, IFLA_VF_MAC, sizeof(vf_mac), &vf_mac) ||
1132 nla_put(skb, IFLA_VF_VLAN, sizeof(vf_vlan), &vf_vlan) ||
1133 nla_put(skb, IFLA_VF_RATE, sizeof(vf_rate),
1134 &vf_rate) ||
1135 nla_put(skb, IFLA_VF_TX_RATE, sizeof(vf_tx_rate),
1136 &vf_tx_rate) ||
1137 nla_put(skb, IFLA_VF_SPOOFCHK, sizeof(vf_spoofchk),
1138 &vf_spoofchk) ||
1139 nla_put(skb, IFLA_VF_LINK_STATE, sizeof(vf_linkstate),
1140 &vf_linkstate) ||
1141 nla_put(skb, IFLA_VF_RSS_QUERY_EN,
1142 sizeof(vf_rss_query_en),
1143 &vf_rss_query_en) ||
1144 nla_put(skb, IFLA_VF_TRUST,
1145 sizeof(vf_trust), &vf_trust))
1146 return -EMSGSIZE;
1147 memset(&vf_stats, 0, sizeof(vf_stats));
1148 if (dev->netdev_ops->ndo_get_vf_stats)
1149 dev->netdev_ops->ndo_get_vf_stats(dev, vfs_num,
1150 &vf_stats);
1151 vfstats = nla_nest_start(skb, IFLA_VF_STATS);
1152 if (!vfstats) {
1153 nla_nest_cancel(skb, vf);
1154 nla_nest_cancel(skb, vfinfo);
1155 return -EMSGSIZE;
1156 }
1157 if (nla_put_u64(skb, IFLA_VF_STATS_RX_PACKETS,
1158 vf_stats.rx_packets) ||
1159 nla_put_u64(skb, IFLA_VF_STATS_TX_PACKETS,
1160 vf_stats.tx_packets) ||
1161 nla_put_u64(skb, IFLA_VF_STATS_RX_BYTES,
1162 vf_stats.rx_bytes) ||
1163 nla_put_u64(skb, IFLA_VF_STATS_TX_BYTES,
1164 vf_stats.tx_bytes) ||
1165 nla_put_u64(skb, IFLA_VF_STATS_BROADCAST,
1166 vf_stats.broadcast) ||
1167 nla_put_u64(skb, IFLA_VF_STATS_MULTICAST,
1168 vf_stats.multicast))
1169 return -EMSGSIZE;
1170 nla_nest_end(skb, vfstats);
1171 nla_nest_end(skb, vf);
1172 return 0;
1173}
1174
1175static int rtnl_fill_link_ifmap(struct sk_buff *skb, struct net_device *dev)
1176{
1177 struct rtnl_link_ifmap map = {
1178 .mem_start = dev->mem_start,
1179 .mem_end = dev->mem_end,
1180 .base_addr = dev->base_addr,
1181 .irq = dev->irq,
1182 .dma = dev->dma,
1183 .port = dev->if_port,
1184 };
1185 if (nla_put(skb, IFLA_MAP, sizeof(map), &map))
1186 return -EMSGSIZE;
1187
1188 return 0;
1189}
1190
1048static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev, 1191static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
1049 int type, u32 pid, u32 seq, u32 change, 1192 int type, u32 pid, u32 seq, u32 change,
1050 unsigned int flags, u32 ext_filter_mask) 1193 unsigned int flags, u32 ext_filter_mask)
1051{ 1194{
1052 struct ifinfomsg *ifm; 1195 struct ifinfomsg *ifm;
1053 struct nlmsghdr *nlh; 1196 struct nlmsghdr *nlh;
1054 struct rtnl_link_stats64 temp; 1197 struct nlattr *af_spec;
1055 const struct rtnl_link_stats64 *stats;
1056 struct nlattr *attr, *af_spec;
1057 struct rtnl_af_ops *af_ops; 1198 struct rtnl_af_ops *af_ops;
1058 struct net_device *upper_dev = netdev_master_upper_dev_get(dev); 1199 struct net_device *upper_dev = netdev_master_upper_dev_get(dev);
1059 1200
@@ -1096,18 +1237,8 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
1096 nla_put_u8(skb, IFLA_PROTO_DOWN, dev->proto_down)) 1237 nla_put_u8(skb, IFLA_PROTO_DOWN, dev->proto_down))
1097 goto nla_put_failure; 1238 goto nla_put_failure;
1098 1239
1099 if (1) { 1240 if (rtnl_fill_link_ifmap(skb, dev))
1100 struct rtnl_link_ifmap map = { 1241 goto nla_put_failure;
1101 .mem_start = dev->mem_start,
1102 .mem_end = dev->mem_end,
1103 .base_addr = dev->base_addr,
1104 .irq = dev->irq,
1105 .dma = dev->dma,
1106 .port = dev->if_port,
1107 };
1108 if (nla_put(skb, IFLA_MAP, sizeof(map), &map))
1109 goto nla_put_failure;
1110 }
1111 1242
1112 if (dev->addr_len) { 1243 if (dev->addr_len) {
1113 if (nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr) || 1244 if (nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr) ||
@@ -1124,128 +1255,27 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
1124 if (rtnl_phys_switch_id_fill(skb, dev)) 1255 if (rtnl_phys_switch_id_fill(skb, dev))
1125 goto nla_put_failure; 1256 goto nla_put_failure;
1126 1257
1127 attr = nla_reserve(skb, IFLA_STATS, 1258 if (rtnl_fill_stats(skb, dev))
1128 sizeof(struct rtnl_link_stats));
1129 if (attr == NULL)
1130 goto nla_put_failure;
1131
1132 stats = dev_get_stats(dev, &temp);
1133 copy_rtnl_link_stats(nla_data(attr), stats);
1134
1135 attr = nla_reserve(skb, IFLA_STATS64,
1136 sizeof(struct rtnl_link_stats64));
1137 if (attr == NULL)
1138 goto nla_put_failure; 1259 goto nla_put_failure;
1139 copy_rtnl_link_stats64(nla_data(attr), stats);
1140 1260
1141 if (dev->dev.parent && (ext_filter_mask & RTEXT_FILTER_VF) && 1261 if (dev->dev.parent && (ext_filter_mask & RTEXT_FILTER_VF) &&
1142 nla_put_u32(skb, IFLA_NUM_VF, dev_num_vf(dev->dev.parent))) 1262 nla_put_u32(skb, IFLA_NUM_VF, dev_num_vf(dev->dev.parent)))
1143 goto nla_put_failure; 1263 goto nla_put_failure;
1144 1264
1145 if (dev->netdev_ops->ndo_get_vf_config && dev->dev.parent 1265 if (dev->netdev_ops->ndo_get_vf_config && dev->dev.parent &&
1146 && (ext_filter_mask & RTEXT_FILTER_VF)) { 1266 ext_filter_mask & RTEXT_FILTER_VF) {
1147 int i; 1267 int i;
1148 1268 struct nlattr *vfinfo;
1149 struct nlattr *vfinfo, *vf, *vfstats;
1150 int num_vfs = dev_num_vf(dev->dev.parent); 1269 int num_vfs = dev_num_vf(dev->dev.parent);
1151 1270
1152 vfinfo = nla_nest_start(skb, IFLA_VFINFO_LIST); 1271 vfinfo = nla_nest_start(skb, IFLA_VFINFO_LIST);
1153 if (!vfinfo) 1272 if (!vfinfo)
1154 goto nla_put_failure; 1273 goto nla_put_failure;
1155 for (i = 0; i < num_vfs; i++) { 1274 for (i = 0; i < num_vfs; i++) {
1156 struct ifla_vf_info ivi; 1275 if (rtnl_fill_vfinfo(skb, dev, i, vfinfo))
1157 struct ifla_vf_mac vf_mac;
1158 struct ifla_vf_vlan vf_vlan;
1159 struct ifla_vf_rate vf_rate;
1160 struct ifla_vf_tx_rate vf_tx_rate;
1161 struct ifla_vf_spoofchk vf_spoofchk;
1162 struct ifla_vf_link_state vf_linkstate;
1163 struct ifla_vf_rss_query_en vf_rss_query_en;
1164 struct ifla_vf_stats vf_stats;
1165 struct ifla_vf_trust vf_trust;
1166
1167 /*
1168 * Not all SR-IOV capable drivers support the
1169 * spoofcheck and "RSS query enable" query. Preset to
1170 * -1 so the user space tool can detect that the driver
1171 * didn't report anything.
1172 */
1173 ivi.spoofchk = -1;
1174 ivi.rss_query_en = -1;
1175 ivi.trusted = -1;
1176 memset(ivi.mac, 0, sizeof(ivi.mac));
1177 /* The default value for VF link state is "auto"
1178 * IFLA_VF_LINK_STATE_AUTO which equals zero
1179 */
1180 ivi.linkstate = 0;
1181 if (dev->netdev_ops->ndo_get_vf_config(dev, i, &ivi))
1182 break;
1183 vf_mac.vf =
1184 vf_vlan.vf =
1185 vf_rate.vf =
1186 vf_tx_rate.vf =
1187 vf_spoofchk.vf =
1188 vf_linkstate.vf =
1189 vf_rss_query_en.vf =
1190 vf_trust.vf = ivi.vf;
1191
1192 memcpy(vf_mac.mac, ivi.mac, sizeof(ivi.mac));
1193 vf_vlan.vlan = ivi.vlan;
1194 vf_vlan.qos = ivi.qos;
1195 vf_tx_rate.rate = ivi.max_tx_rate;
1196 vf_rate.min_tx_rate = ivi.min_tx_rate;
1197 vf_rate.max_tx_rate = ivi.max_tx_rate;
1198 vf_spoofchk.setting = ivi.spoofchk;
1199 vf_linkstate.link_state = ivi.linkstate;
1200 vf_rss_query_en.setting = ivi.rss_query_en;
1201 vf_trust.setting = ivi.trusted;
1202 vf = nla_nest_start(skb, IFLA_VF_INFO);
1203 if (!vf) {
1204 nla_nest_cancel(skb, vfinfo);
1205 goto nla_put_failure;
1206 }
1207 if (nla_put(skb, IFLA_VF_MAC, sizeof(vf_mac), &vf_mac) ||
1208 nla_put(skb, IFLA_VF_VLAN, sizeof(vf_vlan), &vf_vlan) ||
1209 nla_put(skb, IFLA_VF_RATE, sizeof(vf_rate),
1210 &vf_rate) ||
1211 nla_put(skb, IFLA_VF_TX_RATE, sizeof(vf_tx_rate),
1212 &vf_tx_rate) ||
1213 nla_put(skb, IFLA_VF_SPOOFCHK, sizeof(vf_spoofchk),
1214 &vf_spoofchk) ||
1215 nla_put(skb, IFLA_VF_LINK_STATE, sizeof(vf_linkstate),
1216 &vf_linkstate) ||
1217 nla_put(skb, IFLA_VF_RSS_QUERY_EN,
1218 sizeof(vf_rss_query_en),
1219 &vf_rss_query_en) ||
1220 nla_put(skb, IFLA_VF_TRUST,
1221 sizeof(vf_trust), &vf_trust))
1222 goto nla_put_failure; 1276 goto nla_put_failure;
1223 memset(&vf_stats, 0, sizeof(vf_stats));
1224 if (dev->netdev_ops->ndo_get_vf_stats)
1225 dev->netdev_ops->ndo_get_vf_stats(dev, i,
1226 &vf_stats);
1227 vfstats = nla_nest_start(skb, IFLA_VF_STATS);
1228 if (!vfstats) {
1229 nla_nest_cancel(skb, vf);
1230 nla_nest_cancel(skb, vfinfo);
1231 goto nla_put_failure;
1232 }
1233 if (nla_put_u64(skb, IFLA_VF_STATS_RX_PACKETS,
1234 vf_stats.rx_packets) ||
1235 nla_put_u64(skb, IFLA_VF_STATS_TX_PACKETS,
1236 vf_stats.tx_packets) ||
1237 nla_put_u64(skb, IFLA_VF_STATS_RX_BYTES,
1238 vf_stats.rx_bytes) ||
1239 nla_put_u64(skb, IFLA_VF_STATS_TX_BYTES,
1240 vf_stats.tx_bytes) ||
1241 nla_put_u64(skb, IFLA_VF_STATS_BROADCAST,
1242 vf_stats.broadcast) ||
1243 nla_put_u64(skb, IFLA_VF_STATS_MULTICAST,
1244 vf_stats.multicast))
1245 goto nla_put_failure;
1246 nla_nest_end(skb, vfstats);
1247 nla_nest_end(skb, vf);
1248 } 1277 }
1278
1249 nla_nest_end(skb, vfinfo); 1279 nla_nest_end(skb, vfinfo);
1250 } 1280 }
1251 1281
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index aa41e6dd6429..152b9c70e252 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -4268,7 +4268,8 @@ static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb)
4268 return NULL; 4268 return NULL;
4269 } 4269 }
4270 4270
4271 memmove(skb->data - ETH_HLEN, skb->data - VLAN_ETH_HLEN, 2 * ETH_ALEN); 4271 memmove(skb->data - ETH_HLEN, skb->data - skb->mac_len,
4272 2 * ETH_ALEN);
4272 skb->mac_header += VLAN_HLEN; 4273 skb->mac_header += VLAN_HLEN;
4273 return skb; 4274 return skb;
4274} 4275}
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 1feb15f23de8..46b9c887bede 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -563,7 +563,7 @@ static void reqsk_timer_handler(unsigned long data)
563 int max_retries, thresh; 563 int max_retries, thresh;
564 u8 defer_accept; 564 u8 defer_accept;
565 565
566 if (sk_listener->sk_state != TCP_LISTEN) 566 if (sk_state_load(sk_listener) != TCP_LISTEN)
567 goto drop; 567 goto drop;
568 568
569 max_retries = icsk->icsk_syn_retries ? : sysctl_tcp_synack_retries; 569 max_retries = icsk->icsk_syn_retries ? : sysctl_tcp_synack_retries;
@@ -749,7 +749,7 @@ int inet_csk_listen_start(struct sock *sk, int backlog)
749 * It is OK, because this socket enters to hash table only 749 * It is OK, because this socket enters to hash table only
750 * after validation is complete. 750 * after validation is complete.
751 */ 751 */
752 sk->sk_state = TCP_LISTEN; 752 sk_state_store(sk, TCP_LISTEN);
753 if (!sk->sk_prot->get_port(sk, inet->inet_num)) { 753 if (!sk->sk_prot->get_port(sk, inet->inet_num)) {
754 inet->inet_sport = htons(inet->inet_num); 754 inet->inet_sport = htons(inet->inet_num);
755 755
diff --git a/net/ipv4/netfilter/nf_nat_pptp.c b/net/ipv4/netfilter/nf_nat_pptp.c
index 657d2307f031..b3ca21b2ba9b 100644
--- a/net/ipv4/netfilter/nf_nat_pptp.c
+++ b/net/ipv4/netfilter/nf_nat_pptp.c
@@ -45,7 +45,7 @@ static void pptp_nat_expected(struct nf_conn *ct,
45 struct net *net = nf_ct_net(ct); 45 struct net *net = nf_ct_net(ct);
46 const struct nf_conn *master = ct->master; 46 const struct nf_conn *master = ct->master;
47 struct nf_conntrack_expect *other_exp; 47 struct nf_conntrack_expect *other_exp;
48 struct nf_conntrack_tuple t; 48 struct nf_conntrack_tuple t = {};
49 const struct nf_ct_pptp_master *ct_pptp_info; 49 const struct nf_ct_pptp_master *ct_pptp_info;
50 const struct nf_nat_pptp *nat_pptp_info; 50 const struct nf_nat_pptp *nat_pptp_info;
51 struct nf_nat_range range; 51 struct nf_nat_range range;
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 8c0d0bdc2a7c..63e5be0abd86 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -406,10 +406,12 @@ static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4,
406 ip_select_ident(net, skb, NULL); 406 ip_select_ident(net, skb, NULL);
407 407
408 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); 408 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
409 skb->transport_header += iphlen;
410 if (iph->protocol == IPPROTO_ICMP &&
411 length >= iphlen + sizeof(struct icmphdr))
412 icmp_out_count(net, ((struct icmphdr *)
413 skb_transport_header(skb))->type);
409 } 414 }
410 if (iph->protocol == IPPROTO_ICMP)
411 icmp_out_count(net, ((struct icmphdr *)
412 skb_transport_header(skb))->type);
413 415
414 err = NF_HOOK(NFPROTO_IPV4, NF_INET_LOCAL_OUT, 416 err = NF_HOOK(NFPROTO_IPV4, NF_INET_LOCAL_OUT,
415 net, sk, skb, NULL, rt->dst.dev, 417 net, sk, skb, NULL, rt->dst.dev,
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 0cfa7c0c1e80..c1728771cf89 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -451,11 +451,14 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
451 unsigned int mask; 451 unsigned int mask;
452 struct sock *sk = sock->sk; 452 struct sock *sk = sock->sk;
453 const struct tcp_sock *tp = tcp_sk(sk); 453 const struct tcp_sock *tp = tcp_sk(sk);
454 int state;
454 455
455 sock_rps_record_flow(sk); 456 sock_rps_record_flow(sk);
456 457
457 sock_poll_wait(file, sk_sleep(sk), wait); 458 sock_poll_wait(file, sk_sleep(sk), wait);
458 if (sk->sk_state == TCP_LISTEN) 459
460 state = sk_state_load(sk);
461 if (state == TCP_LISTEN)
459 return inet_csk_listen_poll(sk); 462 return inet_csk_listen_poll(sk);
460 463
461 /* Socket is not locked. We are protected from async events 464 /* Socket is not locked. We are protected from async events
@@ -492,14 +495,14 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
492 * NOTE. Check for TCP_CLOSE is added. The goal is to prevent 495 * NOTE. Check for TCP_CLOSE is added. The goal is to prevent
493 * blocking on fresh not-connected or disconnected socket. --ANK 496 * blocking on fresh not-connected or disconnected socket. --ANK
494 */ 497 */
495 if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == TCP_CLOSE) 498 if (sk->sk_shutdown == SHUTDOWN_MASK || state == TCP_CLOSE)
496 mask |= POLLHUP; 499 mask |= POLLHUP;
497 if (sk->sk_shutdown & RCV_SHUTDOWN) 500 if (sk->sk_shutdown & RCV_SHUTDOWN)
498 mask |= POLLIN | POLLRDNORM | POLLRDHUP; 501 mask |= POLLIN | POLLRDNORM | POLLRDHUP;
499 502
500 /* Connected or passive Fast Open socket? */ 503 /* Connected or passive Fast Open socket? */
501 if (sk->sk_state != TCP_SYN_SENT && 504 if (state != TCP_SYN_SENT &&
502 (sk->sk_state != TCP_SYN_RECV || tp->fastopen_rsk)) { 505 (state != TCP_SYN_RECV || tp->fastopen_rsk)) {
503 int target = sock_rcvlowat(sk, 0, INT_MAX); 506 int target = sock_rcvlowat(sk, 0, INT_MAX);
504 507
505 if (tp->urg_seq == tp->copied_seq && 508 if (tp->urg_seq == tp->copied_seq &&
@@ -507,9 +510,6 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
507 tp->urg_data) 510 tp->urg_data)
508 target++; 511 target++;
509 512
510 /* Potential race condition. If read of tp below will
511 * escape above sk->sk_state, we can be illegally awaken
512 * in SYN_* states. */
513 if (tp->rcv_nxt - tp->copied_seq >= target) 513 if (tp->rcv_nxt - tp->copied_seq >= target)
514 mask |= POLLIN | POLLRDNORM; 514 mask |= POLLIN | POLLRDNORM;
515 515
@@ -1934,7 +1934,7 @@ void tcp_set_state(struct sock *sk, int state)
1934 /* Change state AFTER socket is unhashed to avoid closed 1934 /* Change state AFTER socket is unhashed to avoid closed
1935 * socket sitting in hash tables. 1935 * socket sitting in hash tables.
1936 */ 1936 */
1937 sk->sk_state = state; 1937 sk_state_store(sk, state);
1938 1938
1939#ifdef STATE_TRACE 1939#ifdef STATE_TRACE
1940 SOCK_DEBUG(sk, "TCP sk=%p, State %s -> %s\n", sk, statename[oldstate], statename[state]); 1940 SOCK_DEBUG(sk, "TCP sk=%p, State %s -> %s\n", sk, statename[oldstate], statename[state]);
@@ -2644,7 +2644,8 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
2644 if (sk->sk_type != SOCK_STREAM) 2644 if (sk->sk_type != SOCK_STREAM)
2645 return; 2645 return;
2646 2646
2647 info->tcpi_state = sk->sk_state; 2647 info->tcpi_state = sk_state_load(sk);
2648
2648 info->tcpi_ca_state = icsk->icsk_ca_state; 2649 info->tcpi_ca_state = icsk->icsk_ca_state;
2649 info->tcpi_retransmits = icsk->icsk_retransmits; 2650 info->tcpi_retransmits = icsk->icsk_retransmits;
2650 info->tcpi_probes = icsk->icsk_probes_out; 2651 info->tcpi_probes = icsk->icsk_probes_out;
@@ -2672,7 +2673,7 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
2672 info->tcpi_snd_mss = tp->mss_cache; 2673 info->tcpi_snd_mss = tp->mss_cache;
2673 info->tcpi_rcv_mss = icsk->icsk_ack.rcv_mss; 2674 info->tcpi_rcv_mss = icsk->icsk_ack.rcv_mss;
2674 2675
2675 if (sk->sk_state == TCP_LISTEN) { 2676 if (info->tcpi_state == TCP_LISTEN) {
2676 info->tcpi_unacked = sk->sk_ack_backlog; 2677 info->tcpi_unacked = sk->sk_ack_backlog;
2677 info->tcpi_sacked = sk->sk_max_ack_backlog; 2678 info->tcpi_sacked = sk->sk_max_ack_backlog;
2678 } else { 2679 } else {
diff --git a/net/ipv4/tcp_diag.c b/net/ipv4/tcp_diag.c
index 479f34946177..b31604086edd 100644
--- a/net/ipv4/tcp_diag.c
+++ b/net/ipv4/tcp_diag.c
@@ -21,7 +21,7 @@ static void tcp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
21{ 21{
22 struct tcp_info *info = _info; 22 struct tcp_info *info = _info;
23 23
24 if (sk->sk_state == TCP_LISTEN) { 24 if (sk_state_load(sk) == TCP_LISTEN) {
25 r->idiag_rqueue = sk->sk_ack_backlog; 25 r->idiag_rqueue = sk->sk_ack_backlog;
26 r->idiag_wqueue = sk->sk_max_ack_backlog; 26 r->idiag_wqueue = sk->sk_max_ack_backlog;
27 } else if (sk->sk_type == SOCK_STREAM) { 27 } else if (sk->sk_type == SOCK_STREAM) {
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 950e28c0cdf2..ba09016d1bfd 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -2158,6 +2158,7 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
2158 __u16 destp = ntohs(inet->inet_dport); 2158 __u16 destp = ntohs(inet->inet_dport);
2159 __u16 srcp = ntohs(inet->inet_sport); 2159 __u16 srcp = ntohs(inet->inet_sport);
2160 int rx_queue; 2160 int rx_queue;
2161 int state;
2161 2162
2162 if (icsk->icsk_pending == ICSK_TIME_RETRANS || 2163 if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
2163 icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS || 2164 icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
@@ -2175,17 +2176,18 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
2175 timer_expires = jiffies; 2176 timer_expires = jiffies;
2176 } 2177 }
2177 2178
2178 if (sk->sk_state == TCP_LISTEN) 2179 state = sk_state_load(sk);
2180 if (state == TCP_LISTEN)
2179 rx_queue = sk->sk_ack_backlog; 2181 rx_queue = sk->sk_ack_backlog;
2180 else 2182 else
2181 /* 2183 /* Because we don't lock the socket,
2182 * because we dont lock socket, we might find a transient negative value 2184 * we might find a transient negative value.
2183 */ 2185 */
2184 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0); 2186 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
2185 2187
2186 seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX " 2188 seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2187 "%08X %5u %8d %lu %d %pK %lu %lu %u %u %d", 2189 "%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
2188 i, src, srcp, dest, destp, sk->sk_state, 2190 i, src, srcp, dest, destp, state,
2189 tp->write_seq - tp->snd_una, 2191 tp->write_seq - tp->snd_una,
2190 rx_queue, 2192 rx_queue,
2191 timer_active, 2193 timer_active,
@@ -2199,8 +2201,8 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
2199 jiffies_to_clock_t(icsk->icsk_ack.ato), 2201 jiffies_to_clock_t(icsk->icsk_ack.ato),
2200 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong, 2202 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
2201 tp->snd_cwnd, 2203 tp->snd_cwnd,
2202 sk->sk_state == TCP_LISTEN ? 2204 state == TCP_LISTEN ?
2203 (fastopenq ? fastopenq->max_qlen : 0) : 2205 fastopenq->max_qlen :
2204 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)); 2206 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh));
2205} 2207}
2206 2208
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 124338a39e29..5ee56d0a8699 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -1651,7 +1651,6 @@ out:
1651 if (!err) { 1651 if (!err) {
1652 ICMP6MSGOUT_INC_STATS(net, idev, ICMPV6_MLD2_REPORT); 1652 ICMP6MSGOUT_INC_STATS(net, idev, ICMPV6_MLD2_REPORT);
1653 ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS); 1653 ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
1654 IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUTMCAST, payload_len);
1655 } else { 1654 } else {
1656 IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS); 1655 IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
1657 } 1656 }
@@ -2015,7 +2014,6 @@ out:
2015 if (!err) { 2014 if (!err) {
2016 ICMP6MSGOUT_INC_STATS(net, idev, type); 2015 ICMP6MSGOUT_INC_STATS(net, idev, type);
2017 ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS); 2016 ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
2018 IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUTMCAST, full_len);
2019 } else 2017 } else
2020 IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS); 2018 IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
2021 2019
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index c8bc9b4ac328..6f01fe122abd 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -404,6 +404,14 @@ static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
404 } 404 }
405} 405}
406 406
407static bool __rt6_check_expired(const struct rt6_info *rt)
408{
409 if (rt->rt6i_flags & RTF_EXPIRES)
410 return time_after(jiffies, rt->dst.expires);
411 else
412 return false;
413}
414
407static bool rt6_check_expired(const struct rt6_info *rt) 415static bool rt6_check_expired(const struct rt6_info *rt)
408{ 416{
409 if (rt->rt6i_flags & RTF_EXPIRES) { 417 if (rt->rt6i_flags & RTF_EXPIRES) {
@@ -1252,7 +1260,8 @@ static struct dst_entry *rt6_check(struct rt6_info *rt, u32 cookie)
1252 1260
1253static struct dst_entry *rt6_dst_from_check(struct rt6_info *rt, u32 cookie) 1261static struct dst_entry *rt6_dst_from_check(struct rt6_info *rt, u32 cookie)
1254{ 1262{
1255 if (rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK && 1263 if (!__rt6_check_expired(rt) &&
1264 rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK &&
1256 rt6_check((struct rt6_info *)(rt->dst.from), cookie)) 1265 rt6_check((struct rt6_info *)(rt->dst.from), cookie))
1257 return &rt->dst; 1266 return &rt->dst;
1258 else 1267 else
@@ -1272,7 +1281,8 @@ static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie)
1272 1281
1273 rt6_dst_from_metrics_check(rt); 1282 rt6_dst_from_metrics_check(rt);
1274 1283
1275 if ((rt->rt6i_flags & RTF_PCPU) || unlikely(dst->flags & DST_NOCACHE)) 1284 if (rt->rt6i_flags & RTF_PCPU ||
1285 (unlikely(dst->flags & DST_NOCACHE) && rt->dst.from))
1276 return rt6_dst_from_check(rt, cookie); 1286 return rt6_dst_from_check(rt, cookie);
1277 else 1287 else
1278 return rt6_check(rt, cookie); 1288 return rt6_check(rt, cookie);
@@ -1322,6 +1332,12 @@ static void rt6_do_update_pmtu(struct rt6_info *rt, u32 mtu)
1322 rt6_update_expires(rt, net->ipv6.sysctl.ip6_rt_mtu_expires); 1332 rt6_update_expires(rt, net->ipv6.sysctl.ip6_rt_mtu_expires);
1323} 1333}
1324 1334
1335static bool rt6_cache_allowed_for_pmtu(const struct rt6_info *rt)
1336{
1337 return !(rt->rt6i_flags & RTF_CACHE) &&
1338 (rt->rt6i_flags & RTF_PCPU || rt->rt6i_node);
1339}
1340
1325static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk, 1341static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
1326 const struct ipv6hdr *iph, u32 mtu) 1342 const struct ipv6hdr *iph, u32 mtu)
1327{ 1343{
@@ -1335,7 +1351,7 @@ static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
1335 if (mtu >= dst_mtu(dst)) 1351 if (mtu >= dst_mtu(dst))
1336 return; 1352 return;
1337 1353
1338 if (rt6->rt6i_flags & RTF_CACHE) { 1354 if (!rt6_cache_allowed_for_pmtu(rt6)) {
1339 rt6_do_update_pmtu(rt6, mtu); 1355 rt6_do_update_pmtu(rt6, mtu);
1340 } else { 1356 } else {
1341 const struct in6_addr *daddr, *saddr; 1357 const struct in6_addr *daddr, *saddr;
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 5baa8e754e41..c5429a636f1a 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1690,6 +1690,8 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1690 const struct tcp_sock *tp = tcp_sk(sp); 1690 const struct tcp_sock *tp = tcp_sk(sp);
1691 const struct inet_connection_sock *icsk = inet_csk(sp); 1691 const struct inet_connection_sock *icsk = inet_csk(sp);
1692 const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq; 1692 const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
1693 int rx_queue;
1694 int state;
1693 1695
1694 dest = &sp->sk_v6_daddr; 1696 dest = &sp->sk_v6_daddr;
1695 src = &sp->sk_v6_rcv_saddr; 1697 src = &sp->sk_v6_rcv_saddr;
@@ -1710,6 +1712,15 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1710 timer_expires = jiffies; 1712 timer_expires = jiffies;
1711 } 1713 }
1712 1714
1715 state = sk_state_load(sp);
1716 if (state == TCP_LISTEN)
1717 rx_queue = sp->sk_ack_backlog;
1718 else
1719 /* Because we don't lock the socket,
1720 * we might find a transient negative value.
1721 */
1722 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
1723
1713 seq_printf(seq, 1724 seq_printf(seq,
1714 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X " 1725 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1715 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n", 1726 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
@@ -1718,9 +1729,9 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1718 src->s6_addr32[2], src->s6_addr32[3], srcp, 1729 src->s6_addr32[2], src->s6_addr32[3], srcp,
1719 dest->s6_addr32[0], dest->s6_addr32[1], 1730 dest->s6_addr32[0], dest->s6_addr32[1],
1720 dest->s6_addr32[2], dest->s6_addr32[3], destp, 1731 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1721 sp->sk_state, 1732 state,
1722 tp->write_seq-tp->snd_una, 1733 tp->write_seq - tp->snd_una,
1723 (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq), 1734 rx_queue,
1724 timer_active, 1735 timer_active,
1725 jiffies_delta_to_clock_t(timer_expires - jiffies), 1736 jiffies_delta_to_clock_t(timer_expires - jiffies),
1726 icsk->icsk_retransmits, 1737 icsk->icsk_retransmits,
@@ -1732,7 +1743,7 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1732 jiffies_to_clock_t(icsk->icsk_ack.ato), 1743 jiffies_to_clock_t(icsk->icsk_ack.ato),
1733 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong, 1744 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
1734 tp->snd_cwnd, 1745 tp->snd_cwnd,
1735 sp->sk_state == TCP_LISTEN ? 1746 state == TCP_LISTEN ?
1736 fastopenq->max_qlen : 1747 fastopenq->max_qlen :
1737 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh) 1748 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
1738 ); 1749 );
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index e22349ea7256..4692782b5280 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -869,7 +869,7 @@ config NETFILTER_XT_TARGET_TEE
869 depends on IPV6 || IPV6=n 869 depends on IPV6 || IPV6=n
870 depends on !NF_CONNTRACK || NF_CONNTRACK 870 depends on !NF_CONNTRACK || NF_CONNTRACK
871 select NF_DUP_IPV4 871 select NF_DUP_IPV4
872 select NF_DUP_IPV6 if IP6_NF_IPTABLES 872 select NF_DUP_IPV6 if IP6_NF_IPTABLES != n
873 ---help--- 873 ---help---
874 This option adds a "TEE" target with which a packet can be cloned and 874 This option adds a "TEE" target with which a packet can be cloned and
875 this clone be rerouted to another nexthop. 875 this clone be rerouted to another nexthop.
@@ -882,7 +882,7 @@ config NETFILTER_XT_TARGET_TPROXY
882 depends on IP6_NF_IPTABLES || IP6_NF_IPTABLES=n 882 depends on IP6_NF_IPTABLES || IP6_NF_IPTABLES=n
883 depends on IP_NF_MANGLE 883 depends on IP_NF_MANGLE
884 select NF_DEFRAG_IPV4 884 select NF_DEFRAG_IPV4
885 select NF_DEFRAG_IPV6 if IP6_NF_IPTABLES 885 select NF_DEFRAG_IPV6 if IP6_NF_IPTABLES != n
886 help 886 help
887 This option adds a `TPROXY' target, which is somewhat similar to 887 This option adds a `TPROXY' target, which is somewhat similar to
888 REDIRECT. It can only be used in the mangle table and is useful 888 REDIRECT. It can only be used in the mangle table and is useful
@@ -1375,7 +1375,7 @@ config NETFILTER_XT_MATCH_SOCKET
1375 depends on IPV6 || IPV6=n 1375 depends on IPV6 || IPV6=n
1376 depends on IP6_NF_IPTABLES || IP6_NF_IPTABLES=n 1376 depends on IP6_NF_IPTABLES || IP6_NF_IPTABLES=n
1377 select NF_DEFRAG_IPV4 1377 select NF_DEFRAG_IPV4
1378 select NF_DEFRAG_IPV6 if IP6_NF_IPTABLES 1378 select NF_DEFRAG_IPV6 if IP6_NF_IPTABLES != n
1379 help 1379 help
1380 This option adds a `socket' match, which can be used to match 1380 This option adds a `socket' match, which can be used to match
1381 packets for which a TCP or UDP socket lookup finds a valid socket. 1381 packets for which a TCP or UDP socket lookup finds a valid socket.
diff --git a/net/netfilter/ipset/ip_set_bitmap_gen.h b/net/netfilter/ipset/ip_set_bitmap_gen.h
index d05e759ed0fa..b0bc475f641e 100644
--- a/net/netfilter/ipset/ip_set_bitmap_gen.h
+++ b/net/netfilter/ipset/ip_set_bitmap_gen.h
@@ -33,7 +33,7 @@
33#define mtype_gc IPSET_TOKEN(MTYPE, _gc) 33#define mtype_gc IPSET_TOKEN(MTYPE, _gc)
34#define mtype MTYPE 34#define mtype MTYPE
35 35
36#define get_ext(set, map, id) ((map)->extensions + (set)->dsize * (id)) 36#define get_ext(set, map, id) ((map)->extensions + ((set)->dsize * (id)))
37 37
38static void 38static void
39mtype_gc_init(struct ip_set *set, void (*gc)(unsigned long ul_set)) 39mtype_gc_init(struct ip_set *set, void (*gc)(unsigned long ul_set))
@@ -67,12 +67,9 @@ mtype_destroy(struct ip_set *set)
67 del_timer_sync(&map->gc); 67 del_timer_sync(&map->gc);
68 68
69 ip_set_free(map->members); 69 ip_set_free(map->members);
70 if (set->dsize) { 70 if (set->dsize && set->extensions & IPSET_EXT_DESTROY)
71 if (set->extensions & IPSET_EXT_DESTROY) 71 mtype_ext_cleanup(set);
72 mtype_ext_cleanup(set); 72 ip_set_free(map);
73 ip_set_free(map->extensions);
74 }
75 kfree(map);
76 73
77 set->data = NULL; 74 set->data = NULL;
78} 75}
@@ -92,16 +89,14 @@ mtype_head(struct ip_set *set, struct sk_buff *skb)
92{ 89{
93 const struct mtype *map = set->data; 90 const struct mtype *map = set->data;
94 struct nlattr *nested; 91 struct nlattr *nested;
92 size_t memsize = sizeof(*map) + map->memsize;
95 93
96 nested = ipset_nest_start(skb, IPSET_ATTR_DATA); 94 nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
97 if (!nested) 95 if (!nested)
98 goto nla_put_failure; 96 goto nla_put_failure;
99 if (mtype_do_head(skb, map) || 97 if (mtype_do_head(skb, map) ||
100 nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)) || 98 nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)) ||
101 nla_put_net32(skb, IPSET_ATTR_MEMSIZE, 99 nla_put_net32(skb, IPSET_ATTR_MEMSIZE, htonl(memsize)))
102 htonl(sizeof(*map) +
103 map->memsize +
104 set->dsize * map->elements)))
105 goto nla_put_failure; 100 goto nla_put_failure;
106 if (unlikely(ip_set_put_flags(skb, set))) 101 if (unlikely(ip_set_put_flags(skb, set)))
107 goto nla_put_failure; 102 goto nla_put_failure;
diff --git a/net/netfilter/ipset/ip_set_bitmap_ip.c b/net/netfilter/ipset/ip_set_bitmap_ip.c
index 64a564334418..4783efff0bde 100644
--- a/net/netfilter/ipset/ip_set_bitmap_ip.c
+++ b/net/netfilter/ipset/ip_set_bitmap_ip.c
@@ -41,7 +41,6 @@ MODULE_ALIAS("ip_set_bitmap:ip");
41/* Type structure */ 41/* Type structure */
42struct bitmap_ip { 42struct bitmap_ip {
43 void *members; /* the set members */ 43 void *members; /* the set members */
44 void *extensions; /* data extensions */
45 u32 first_ip; /* host byte order, included in range */ 44 u32 first_ip; /* host byte order, included in range */
46 u32 last_ip; /* host byte order, included in range */ 45 u32 last_ip; /* host byte order, included in range */
47 u32 elements; /* number of max elements in the set */ 46 u32 elements; /* number of max elements in the set */
@@ -49,6 +48,8 @@ struct bitmap_ip {
49 size_t memsize; /* members size */ 48 size_t memsize; /* members size */
50 u8 netmask; /* subnet netmask */ 49 u8 netmask; /* subnet netmask */
51 struct timer_list gc; /* garbage collection */ 50 struct timer_list gc; /* garbage collection */
51 unsigned char extensions[0] /* data extensions */
52 __aligned(__alignof__(u64));
52}; 53};
53 54
54/* ADT structure for generic function args */ 55/* ADT structure for generic function args */
@@ -224,13 +225,6 @@ init_map_ip(struct ip_set *set, struct bitmap_ip *map,
224 map->members = ip_set_alloc(map->memsize); 225 map->members = ip_set_alloc(map->memsize);
225 if (!map->members) 226 if (!map->members)
226 return false; 227 return false;
227 if (set->dsize) {
228 map->extensions = ip_set_alloc(set->dsize * elements);
229 if (!map->extensions) {
230 kfree(map->members);
231 return false;
232 }
233 }
234 map->first_ip = first_ip; 228 map->first_ip = first_ip;
235 map->last_ip = last_ip; 229 map->last_ip = last_ip;
236 map->elements = elements; 230 map->elements = elements;
@@ -316,13 +310,13 @@ bitmap_ip_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
316 pr_debug("hosts %u, elements %llu\n", 310 pr_debug("hosts %u, elements %llu\n",
317 hosts, (unsigned long long)elements); 311 hosts, (unsigned long long)elements);
318 312
319 map = kzalloc(sizeof(*map), GFP_KERNEL); 313 set->dsize = ip_set_elem_len(set, tb, 0, 0);
314 map = ip_set_alloc(sizeof(*map) + elements * set->dsize);
320 if (!map) 315 if (!map)
321 return -ENOMEM; 316 return -ENOMEM;
322 317
323 map->memsize = bitmap_bytes(0, elements - 1); 318 map->memsize = bitmap_bytes(0, elements - 1);
324 set->variant = &bitmap_ip; 319 set->variant = &bitmap_ip;
325 set->dsize = ip_set_elem_len(set, tb, 0);
326 if (!init_map_ip(set, map, first_ip, last_ip, 320 if (!init_map_ip(set, map, first_ip, last_ip,
327 elements, hosts, netmask)) { 321 elements, hosts, netmask)) {
328 kfree(map); 322 kfree(map);
diff --git a/net/netfilter/ipset/ip_set_bitmap_ipmac.c b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
index 1430535118fb..29dde208381d 100644
--- a/net/netfilter/ipset/ip_set_bitmap_ipmac.c
+++ b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
@@ -47,24 +47,26 @@ enum {
47/* Type structure */ 47/* Type structure */
48struct bitmap_ipmac { 48struct bitmap_ipmac {
49 void *members; /* the set members */ 49 void *members; /* the set members */
50 void *extensions; /* MAC + data extensions */
51 u32 first_ip; /* host byte order, included in range */ 50 u32 first_ip; /* host byte order, included in range */
52 u32 last_ip; /* host byte order, included in range */ 51 u32 last_ip; /* host byte order, included in range */
53 u32 elements; /* number of max elements in the set */ 52 u32 elements; /* number of max elements in the set */
54 size_t memsize; /* members size */ 53 size_t memsize; /* members size */
55 struct timer_list gc; /* garbage collector */ 54 struct timer_list gc; /* garbage collector */
55 unsigned char extensions[0] /* MAC + data extensions */
56 __aligned(__alignof__(u64));
56}; 57};
57 58
58/* ADT structure for generic function args */ 59/* ADT structure for generic function args */
59struct bitmap_ipmac_adt_elem { 60struct bitmap_ipmac_adt_elem {
61 unsigned char ether[ETH_ALEN] __aligned(2);
60 u16 id; 62 u16 id;
61 unsigned char *ether; 63 u16 add_mac;
62}; 64};
63 65
64struct bitmap_ipmac_elem { 66struct bitmap_ipmac_elem {
65 unsigned char ether[ETH_ALEN]; 67 unsigned char ether[ETH_ALEN];
66 unsigned char filled; 68 unsigned char filled;
67} __attribute__ ((aligned)); 69} __aligned(__alignof__(u64));
68 70
69static inline u32 71static inline u32
70ip_to_id(const struct bitmap_ipmac *m, u32 ip) 72ip_to_id(const struct bitmap_ipmac *m, u32 ip)
@@ -72,11 +74,11 @@ ip_to_id(const struct bitmap_ipmac *m, u32 ip)
72 return ip - m->first_ip; 74 return ip - m->first_ip;
73} 75}
74 76
75static inline struct bitmap_ipmac_elem * 77#define get_elem(extensions, id, dsize) \
76get_elem(void *extensions, u16 id, size_t dsize) 78 (struct bitmap_ipmac_elem *)(extensions + (id) * (dsize))
77{ 79
78 return (struct bitmap_ipmac_elem *)(extensions + id * dsize); 80#define get_const_elem(extensions, id, dsize) \
79} 81 (const struct bitmap_ipmac_elem *)(extensions + (id) * (dsize))
80 82
81/* Common functions */ 83/* Common functions */
82 84
@@ -88,10 +90,9 @@ bitmap_ipmac_do_test(const struct bitmap_ipmac_adt_elem *e,
88 90
89 if (!test_bit(e->id, map->members)) 91 if (!test_bit(e->id, map->members))
90 return 0; 92 return 0;
91 elem = get_elem(map->extensions, e->id, dsize); 93 elem = get_const_elem(map->extensions, e->id, dsize);
92 if (elem->filled == MAC_FILLED) 94 if (e->add_mac && elem->filled == MAC_FILLED)
93 return !e->ether || 95 return ether_addr_equal(e->ether, elem->ether);
94 ether_addr_equal(e->ether, elem->ether);
95 /* Trigger kernel to fill out the ethernet address */ 96 /* Trigger kernel to fill out the ethernet address */
96 return -EAGAIN; 97 return -EAGAIN;
97} 98}
@@ -103,7 +104,7 @@ bitmap_ipmac_gc_test(u16 id, const struct bitmap_ipmac *map, size_t dsize)
103 104
104 if (!test_bit(id, map->members)) 105 if (!test_bit(id, map->members))
105 return 0; 106 return 0;
106 elem = get_elem(map->extensions, id, dsize); 107 elem = get_const_elem(map->extensions, id, dsize);
107 /* Timer not started for the incomplete elements */ 108 /* Timer not started for the incomplete elements */
108 return elem->filled == MAC_FILLED; 109 return elem->filled == MAC_FILLED;
109} 110}
@@ -133,7 +134,7 @@ bitmap_ipmac_add_timeout(unsigned long *timeout,
133 * and we can reuse it later when MAC is filled out, 134 * and we can reuse it later when MAC is filled out,
134 * possibly by the kernel 135 * possibly by the kernel
135 */ 136 */
136 if (e->ether) 137 if (e->add_mac)
137 ip_set_timeout_set(timeout, t); 138 ip_set_timeout_set(timeout, t);
138 else 139 else
139 *timeout = t; 140 *timeout = t;
@@ -150,7 +151,7 @@ bitmap_ipmac_do_add(const struct bitmap_ipmac_adt_elem *e,
150 elem = get_elem(map->extensions, e->id, dsize); 151 elem = get_elem(map->extensions, e->id, dsize);
151 if (test_bit(e->id, map->members)) { 152 if (test_bit(e->id, map->members)) {
152 if (elem->filled == MAC_FILLED) { 153 if (elem->filled == MAC_FILLED) {
153 if (e->ether && 154 if (e->add_mac &&
154 (flags & IPSET_FLAG_EXIST) && 155 (flags & IPSET_FLAG_EXIST) &&
155 !ether_addr_equal(e->ether, elem->ether)) { 156 !ether_addr_equal(e->ether, elem->ether)) {
156 /* memcpy isn't atomic */ 157 /* memcpy isn't atomic */
@@ -159,7 +160,7 @@ bitmap_ipmac_do_add(const struct bitmap_ipmac_adt_elem *e,
159 ether_addr_copy(elem->ether, e->ether); 160 ether_addr_copy(elem->ether, e->ether);
160 } 161 }
161 return IPSET_ADD_FAILED; 162 return IPSET_ADD_FAILED;
162 } else if (!e->ether) 163 } else if (!e->add_mac)
163 /* Already added without ethernet address */ 164 /* Already added without ethernet address */
164 return IPSET_ADD_FAILED; 165 return IPSET_ADD_FAILED;
165 /* Fill the MAC address and trigger the timer activation */ 166 /* Fill the MAC address and trigger the timer activation */
@@ -168,7 +169,7 @@ bitmap_ipmac_do_add(const struct bitmap_ipmac_adt_elem *e,
168 ether_addr_copy(elem->ether, e->ether); 169 ether_addr_copy(elem->ether, e->ether);
169 elem->filled = MAC_FILLED; 170 elem->filled = MAC_FILLED;
170 return IPSET_ADD_START_STORED_TIMEOUT; 171 return IPSET_ADD_START_STORED_TIMEOUT;
171 } else if (e->ether) { 172 } else if (e->add_mac) {
172 /* We can store MAC too */ 173 /* We can store MAC too */
173 ether_addr_copy(elem->ether, e->ether); 174 ether_addr_copy(elem->ether, e->ether);
174 elem->filled = MAC_FILLED; 175 elem->filled = MAC_FILLED;
@@ -191,7 +192,7 @@ bitmap_ipmac_do_list(struct sk_buff *skb, const struct bitmap_ipmac *map,
191 u32 id, size_t dsize) 192 u32 id, size_t dsize)
192{ 193{
193 const struct bitmap_ipmac_elem *elem = 194 const struct bitmap_ipmac_elem *elem =
194 get_elem(map->extensions, id, dsize); 195 get_const_elem(map->extensions, id, dsize);
195 196
196 return nla_put_ipaddr4(skb, IPSET_ATTR_IP, 197 return nla_put_ipaddr4(skb, IPSET_ATTR_IP,
197 htonl(map->first_ip + id)) || 198 htonl(map->first_ip + id)) ||
@@ -213,7 +214,7 @@ bitmap_ipmac_kadt(struct ip_set *set, const struct sk_buff *skb,
213{ 214{
214 struct bitmap_ipmac *map = set->data; 215 struct bitmap_ipmac *map = set->data;
215 ipset_adtfn adtfn = set->variant->adt[adt]; 216 ipset_adtfn adtfn = set->variant->adt[adt];
216 struct bitmap_ipmac_adt_elem e = { .id = 0 }; 217 struct bitmap_ipmac_adt_elem e = { .id = 0, .add_mac = 1 };
217 struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set); 218 struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
218 u32 ip; 219 u32 ip;
219 220
@@ -231,7 +232,7 @@ bitmap_ipmac_kadt(struct ip_set *set, const struct sk_buff *skb,
231 return -EINVAL; 232 return -EINVAL;
232 233
233 e.id = ip_to_id(map, ip); 234 e.id = ip_to_id(map, ip);
234 e.ether = eth_hdr(skb)->h_source; 235 memcpy(e.ether, eth_hdr(skb)->h_source, ETH_ALEN);
235 236
236 return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags); 237 return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
237} 238}
@@ -265,11 +266,10 @@ bitmap_ipmac_uadt(struct ip_set *set, struct nlattr *tb[],
265 return -IPSET_ERR_BITMAP_RANGE; 266 return -IPSET_ERR_BITMAP_RANGE;
266 267
267 e.id = ip_to_id(map, ip); 268 e.id = ip_to_id(map, ip);
268 if (tb[IPSET_ATTR_ETHER]) 269 if (tb[IPSET_ATTR_ETHER]) {
269 e.ether = nla_data(tb[IPSET_ATTR_ETHER]); 270 memcpy(e.ether, nla_data(tb[IPSET_ATTR_ETHER]), ETH_ALEN);
270 else 271 e.add_mac = 1;
271 e.ether = NULL; 272 }
272
273 ret = adtfn(set, &e, &ext, &ext, flags); 273 ret = adtfn(set, &e, &ext, &ext, flags);
274 274
275 return ip_set_eexist(ret, flags) ? 0 : ret; 275 return ip_set_eexist(ret, flags) ? 0 : ret;
@@ -300,13 +300,6 @@ init_map_ipmac(struct ip_set *set, struct bitmap_ipmac *map,
300 map->members = ip_set_alloc(map->memsize); 300 map->members = ip_set_alloc(map->memsize);
301 if (!map->members) 301 if (!map->members)
302 return false; 302 return false;
303 if (set->dsize) {
304 map->extensions = ip_set_alloc(set->dsize * elements);
305 if (!map->extensions) {
306 kfree(map->members);
307 return false;
308 }
309 }
310 map->first_ip = first_ip; 303 map->first_ip = first_ip;
311 map->last_ip = last_ip; 304 map->last_ip = last_ip;
312 map->elements = elements; 305 map->elements = elements;
@@ -361,14 +354,15 @@ bitmap_ipmac_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
361 if (elements > IPSET_BITMAP_MAX_RANGE + 1) 354 if (elements > IPSET_BITMAP_MAX_RANGE + 1)
362 return -IPSET_ERR_BITMAP_RANGE_SIZE; 355 return -IPSET_ERR_BITMAP_RANGE_SIZE;
363 356
364 map = kzalloc(sizeof(*map), GFP_KERNEL); 357 set->dsize = ip_set_elem_len(set, tb,
358 sizeof(struct bitmap_ipmac_elem),
359 __alignof__(struct bitmap_ipmac_elem));
360 map = ip_set_alloc(sizeof(*map) + elements * set->dsize);
365 if (!map) 361 if (!map)
366 return -ENOMEM; 362 return -ENOMEM;
367 363
368 map->memsize = bitmap_bytes(0, elements - 1); 364 map->memsize = bitmap_bytes(0, elements - 1);
369 set->variant = &bitmap_ipmac; 365 set->variant = &bitmap_ipmac;
370 set->dsize = ip_set_elem_len(set, tb,
371 sizeof(struct bitmap_ipmac_elem));
372 if (!init_map_ipmac(set, map, first_ip, last_ip, elements)) { 366 if (!init_map_ipmac(set, map, first_ip, last_ip, elements)) {
373 kfree(map); 367 kfree(map);
374 return -ENOMEM; 368 return -ENOMEM;
diff --git a/net/netfilter/ipset/ip_set_bitmap_port.c b/net/netfilter/ipset/ip_set_bitmap_port.c
index 5338ccd5da46..7f0c733358a4 100644
--- a/net/netfilter/ipset/ip_set_bitmap_port.c
+++ b/net/netfilter/ipset/ip_set_bitmap_port.c
@@ -35,12 +35,13 @@ MODULE_ALIAS("ip_set_bitmap:port");
35/* Type structure */ 35/* Type structure */
36struct bitmap_port { 36struct bitmap_port {
37 void *members; /* the set members */ 37 void *members; /* the set members */
38 void *extensions; /* data extensions */
39 u16 first_port; /* host byte order, included in range */ 38 u16 first_port; /* host byte order, included in range */
40 u16 last_port; /* host byte order, included in range */ 39 u16 last_port; /* host byte order, included in range */
41 u32 elements; /* number of max elements in the set */ 40 u32 elements; /* number of max elements in the set */
42 size_t memsize; /* members size */ 41 size_t memsize; /* members size */
43 struct timer_list gc; /* garbage collection */ 42 struct timer_list gc; /* garbage collection */
43 unsigned char extensions[0] /* data extensions */
44 __aligned(__alignof__(u64));
44}; 45};
45 46
46/* ADT structure for generic function args */ 47/* ADT structure for generic function args */
@@ -209,13 +210,6 @@ init_map_port(struct ip_set *set, struct bitmap_port *map,
209 map->members = ip_set_alloc(map->memsize); 210 map->members = ip_set_alloc(map->memsize);
210 if (!map->members) 211 if (!map->members)
211 return false; 212 return false;
212 if (set->dsize) {
213 map->extensions = ip_set_alloc(set->dsize * map->elements);
214 if (!map->extensions) {
215 kfree(map->members);
216 return false;
217 }
218 }
219 map->first_port = first_port; 213 map->first_port = first_port;
220 map->last_port = last_port; 214 map->last_port = last_port;
221 set->timeout = IPSET_NO_TIMEOUT; 215 set->timeout = IPSET_NO_TIMEOUT;
@@ -232,6 +226,7 @@ bitmap_port_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
232{ 226{
233 struct bitmap_port *map; 227 struct bitmap_port *map;
234 u16 first_port, last_port; 228 u16 first_port, last_port;
229 u32 elements;
235 230
236 if (unlikely(!ip_set_attr_netorder(tb, IPSET_ATTR_PORT) || 231 if (unlikely(!ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
237 !ip_set_attr_netorder(tb, IPSET_ATTR_PORT_TO) || 232 !ip_set_attr_netorder(tb, IPSET_ATTR_PORT_TO) ||
@@ -248,14 +243,15 @@ bitmap_port_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
248 last_port = tmp; 243 last_port = tmp;
249 } 244 }
250 245
251 map = kzalloc(sizeof(*map), GFP_KERNEL); 246 elements = last_port - first_port + 1;
247 set->dsize = ip_set_elem_len(set, tb, 0, 0);
248 map = ip_set_alloc(sizeof(*map) + elements * set->dsize);
252 if (!map) 249 if (!map)
253 return -ENOMEM; 250 return -ENOMEM;
254 251
255 map->elements = last_port - first_port + 1; 252 map->elements = elements;
256 map->memsize = bitmap_bytes(0, map->elements); 253 map->memsize = bitmap_bytes(0, map->elements);
257 set->variant = &bitmap_port; 254 set->variant = &bitmap_port;
258 set->dsize = ip_set_elem_len(set, tb, 0);
259 if (!init_map_port(set, map, first_port, last_port)) { 255 if (!init_map_port(set, map, first_port, last_port)) {
260 kfree(map); 256 kfree(map);
261 return -ENOMEM; 257 return -ENOMEM;
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
index 69ab9c2634e1..54f3d7cb23e6 100644
--- a/net/netfilter/ipset/ip_set_core.c
+++ b/net/netfilter/ipset/ip_set_core.c
@@ -364,25 +364,27 @@ add_extension(enum ip_set_ext_id id, u32 flags, struct nlattr *tb[])
364} 364}
365 365
366size_t 366size_t
367ip_set_elem_len(struct ip_set *set, struct nlattr *tb[], size_t len) 367ip_set_elem_len(struct ip_set *set, struct nlattr *tb[], size_t len,
368 size_t align)
368{ 369{
369 enum ip_set_ext_id id; 370 enum ip_set_ext_id id;
370 size_t offset = len;
371 u32 cadt_flags = 0; 371 u32 cadt_flags = 0;
372 372
373 if (tb[IPSET_ATTR_CADT_FLAGS]) 373 if (tb[IPSET_ATTR_CADT_FLAGS])
374 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]); 374 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
375 if (cadt_flags & IPSET_FLAG_WITH_FORCEADD) 375 if (cadt_flags & IPSET_FLAG_WITH_FORCEADD)
376 set->flags |= IPSET_CREATE_FLAG_FORCEADD; 376 set->flags |= IPSET_CREATE_FLAG_FORCEADD;
377 if (!align)
378 align = 1;
377 for (id = 0; id < IPSET_EXT_ID_MAX; id++) { 379 for (id = 0; id < IPSET_EXT_ID_MAX; id++) {
378 if (!add_extension(id, cadt_flags, tb)) 380 if (!add_extension(id, cadt_flags, tb))
379 continue; 381 continue;
380 offset = ALIGN(offset, ip_set_extensions[id].align); 382 len = ALIGN(len, ip_set_extensions[id].align);
381 set->offset[id] = offset; 383 set->offset[id] = len;
382 set->extensions |= ip_set_extensions[id].type; 384 set->extensions |= ip_set_extensions[id].type;
383 offset += ip_set_extensions[id].len; 385 len += ip_set_extensions[id].len;
384 } 386 }
385 return offset; 387 return ALIGN(len, align);
386} 388}
387EXPORT_SYMBOL_GPL(ip_set_elem_len); 389EXPORT_SYMBOL_GPL(ip_set_elem_len);
388 390
diff --git a/net/netfilter/ipset/ip_set_hash_gen.h b/net/netfilter/ipset/ip_set_hash_gen.h
index 691b54fcaf2a..e5336ab36d67 100644
--- a/net/netfilter/ipset/ip_set_hash_gen.h
+++ b/net/netfilter/ipset/ip_set_hash_gen.h
@@ -72,8 +72,9 @@ struct hbucket {
72 DECLARE_BITMAP(used, AHASH_MAX_TUNED); 72 DECLARE_BITMAP(used, AHASH_MAX_TUNED);
73 u8 size; /* size of the array */ 73 u8 size; /* size of the array */
74 u8 pos; /* position of the first free entry */ 74 u8 pos; /* position of the first free entry */
75 unsigned char value[0]; /* the array of the values */ 75 unsigned char value[0] /* the array of the values */
76} __attribute__ ((aligned)); 76 __aligned(__alignof__(u64));
77};
77 78
78/* The hash table: the table size stored here in order to make resizing easy */ 79/* The hash table: the table size stored here in order to make resizing easy */
79struct htable { 80struct htable {
@@ -475,7 +476,7 @@ static void
475mtype_expire(struct ip_set *set, struct htype *h, u8 nets_length, size_t dsize) 476mtype_expire(struct ip_set *set, struct htype *h, u8 nets_length, size_t dsize)
476{ 477{
477 struct htable *t; 478 struct htable *t;
478 struct hbucket *n; 479 struct hbucket *n, *tmp;
479 struct mtype_elem *data; 480 struct mtype_elem *data;
480 u32 i, j, d; 481 u32 i, j, d;
481#ifdef IP_SET_HASH_WITH_NETS 482#ifdef IP_SET_HASH_WITH_NETS
@@ -510,9 +511,14 @@ mtype_expire(struct ip_set *set, struct htype *h, u8 nets_length, size_t dsize)
510 } 511 }
511 } 512 }
512 if (d >= AHASH_INIT_SIZE) { 513 if (d >= AHASH_INIT_SIZE) {
513 struct hbucket *tmp = kzalloc(sizeof(*tmp) + 514 if (d >= n->size) {
514 (n->size - AHASH_INIT_SIZE) * dsize, 515 rcu_assign_pointer(hbucket(t, i), NULL);
515 GFP_ATOMIC); 516 kfree_rcu(n, rcu);
517 continue;
518 }
519 tmp = kzalloc(sizeof(*tmp) +
520 (n->size - AHASH_INIT_SIZE) * dsize,
521 GFP_ATOMIC);
516 if (!tmp) 522 if (!tmp)
517 /* Still try to delete expired elements */ 523 /* Still try to delete expired elements */
518 continue; 524 continue;
@@ -522,7 +528,7 @@ mtype_expire(struct ip_set *set, struct htype *h, u8 nets_length, size_t dsize)
522 continue; 528 continue;
523 data = ahash_data(n, j, dsize); 529 data = ahash_data(n, j, dsize);
524 memcpy(tmp->value + d * dsize, data, dsize); 530 memcpy(tmp->value + d * dsize, data, dsize);
525 set_bit(j, tmp->used); 531 set_bit(d, tmp->used);
526 d++; 532 d++;
527 } 533 }
528 tmp->pos = d; 534 tmp->pos = d;
@@ -1323,12 +1329,14 @@ IPSET_TOKEN(HTYPE, _create)(struct net *net, struct ip_set *set,
1323#endif 1329#endif
1324 set->variant = &IPSET_TOKEN(HTYPE, 4_variant); 1330 set->variant = &IPSET_TOKEN(HTYPE, 4_variant);
1325 set->dsize = ip_set_elem_len(set, tb, 1331 set->dsize = ip_set_elem_len(set, tb,
1326 sizeof(struct IPSET_TOKEN(HTYPE, 4_elem))); 1332 sizeof(struct IPSET_TOKEN(HTYPE, 4_elem)),
1333 __alignof__(struct IPSET_TOKEN(HTYPE, 4_elem)));
1327#ifndef IP_SET_PROTO_UNDEF 1334#ifndef IP_SET_PROTO_UNDEF
1328 } else { 1335 } else {
1329 set->variant = &IPSET_TOKEN(HTYPE, 6_variant); 1336 set->variant = &IPSET_TOKEN(HTYPE, 6_variant);
1330 set->dsize = ip_set_elem_len(set, tb, 1337 set->dsize = ip_set_elem_len(set, tb,
1331 sizeof(struct IPSET_TOKEN(HTYPE, 6_elem))); 1338 sizeof(struct IPSET_TOKEN(HTYPE, 6_elem)),
1339 __alignof__(struct IPSET_TOKEN(HTYPE, 6_elem)));
1332 } 1340 }
1333#endif 1341#endif
1334 if (tb[IPSET_ATTR_TIMEOUT]) { 1342 if (tb[IPSET_ATTR_TIMEOUT]) {
diff --git a/net/netfilter/ipset/ip_set_list_set.c b/net/netfilter/ipset/ip_set_list_set.c
index 5a30ce6e8c90..bbede95c9f68 100644
--- a/net/netfilter/ipset/ip_set_list_set.c
+++ b/net/netfilter/ipset/ip_set_list_set.c
@@ -31,7 +31,7 @@ struct set_elem {
31 struct rcu_head rcu; 31 struct rcu_head rcu;
32 struct list_head list; 32 struct list_head list;
33 ip_set_id_t id; 33 ip_set_id_t id;
34}; 34} __aligned(__alignof__(u64));
35 35
36struct set_adt_elem { 36struct set_adt_elem {
37 ip_set_id_t id; 37 ip_set_id_t id;
@@ -618,7 +618,8 @@ list_set_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
618 size = IP_SET_LIST_MIN_SIZE; 618 size = IP_SET_LIST_MIN_SIZE;
619 619
620 set->variant = &set_variant; 620 set->variant = &set_variant;
621 set->dsize = ip_set_elem_len(set, tb, sizeof(struct set_elem)); 621 set->dsize = ip_set_elem_len(set, tb, sizeof(struct set_elem),
622 __alignof__(struct set_elem));
622 if (!init_list_set(net, set, size)) 623 if (!init_list_set(net, set, size))
623 return -ENOMEM; 624 return -ENOMEM;
624 if (tb[IPSET_ATTR_TIMEOUT]) { 625 if (tb[IPSET_ATTR_TIMEOUT]) {
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index 1e24fff53e4b..f57b4dcdb233 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -1176,6 +1176,7 @@ ip_vs_out(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, in
1176 struct ip_vs_protocol *pp; 1176 struct ip_vs_protocol *pp;
1177 struct ip_vs_proto_data *pd; 1177 struct ip_vs_proto_data *pd;
1178 struct ip_vs_conn *cp; 1178 struct ip_vs_conn *cp;
1179 struct sock *sk;
1179 1180
1180 EnterFunction(11); 1181 EnterFunction(11);
1181 1182
@@ -1183,13 +1184,12 @@ ip_vs_out(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, in
1183 if (skb->ipvs_property) 1184 if (skb->ipvs_property)
1184 return NF_ACCEPT; 1185 return NF_ACCEPT;
1185 1186
1187 sk = skb_to_full_sk(skb);
1186 /* Bad... Do not break raw sockets */ 1188 /* Bad... Do not break raw sockets */
1187 if (unlikely(skb->sk != NULL && hooknum == NF_INET_LOCAL_OUT && 1189 if (unlikely(sk && hooknum == NF_INET_LOCAL_OUT &&
1188 af == AF_INET)) { 1190 af == AF_INET)) {
1189 struct sock *sk = skb->sk;
1190 struct inet_sock *inet = inet_sk(skb->sk);
1191 1191
1192 if (inet && sk->sk_family == PF_INET && inet->nodefrag) 1192 if (sk->sk_family == PF_INET && inet_sk(sk)->nodefrag)
1193 return NF_ACCEPT; 1193 return NF_ACCEPT;
1194 } 1194 }
1195 1195
@@ -1681,6 +1681,7 @@ ip_vs_in(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, int
1681 struct ip_vs_conn *cp; 1681 struct ip_vs_conn *cp;
1682 int ret, pkts; 1682 int ret, pkts;
1683 int conn_reuse_mode; 1683 int conn_reuse_mode;
1684 struct sock *sk;
1684 1685
1685 /* Already marked as IPVS request or reply? */ 1686 /* Already marked as IPVS request or reply? */
1686 if (skb->ipvs_property) 1687 if (skb->ipvs_property)
@@ -1708,12 +1709,11 @@ ip_vs_in(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, int
1708 ip_vs_fill_iph_skb(af, skb, false, &iph); 1709 ip_vs_fill_iph_skb(af, skb, false, &iph);
1709 1710
1710 /* Bad... Do not break raw sockets */ 1711 /* Bad... Do not break raw sockets */
1711 if (unlikely(skb->sk != NULL && hooknum == NF_INET_LOCAL_OUT && 1712 sk = skb_to_full_sk(skb);
1713 if (unlikely(sk && hooknum == NF_INET_LOCAL_OUT &&
1712 af == AF_INET)) { 1714 af == AF_INET)) {
1713 struct sock *sk = skb->sk;
1714 struct inet_sock *inet = inet_sk(skb->sk);
1715 1715
1716 if (inet && sk->sk_family == PF_INET && inet->nodefrag) 1716 if (sk->sk_family == PF_INET && inet_sk(sk)->nodefrag)
1717 return NF_ACCEPT; 1717 return NF_ACCEPT;
1718 } 1718 }
1719 1719
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index 06eb48fceb42..740cce4685ac 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -825,7 +825,7 @@ nfulnl_recv_config(struct sock *ctnl, struct sk_buff *skb,
825 struct net *net = sock_net(ctnl); 825 struct net *net = sock_net(ctnl);
826 struct nfnl_log_net *log = nfnl_log_pernet(net); 826 struct nfnl_log_net *log = nfnl_log_pernet(net);
827 int ret = 0; 827 int ret = 0;
828 u16 flags; 828 u16 flags = 0;
829 829
830 if (nfula[NFULA_CFG_CMD]) { 830 if (nfula[NFULA_CFG_CMD]) {
831 u_int8_t pf = nfmsg->nfgen_family; 831 u_int8_t pf = nfmsg->nfgen_family;
diff --git a/net/netfilter/nft_counter.c b/net/netfilter/nft_counter.c
index 1067fb4c1ffa..c7808fc19719 100644
--- a/net/netfilter/nft_counter.c
+++ b/net/netfilter/nft_counter.c
@@ -47,27 +47,34 @@ static void nft_counter_eval(const struct nft_expr *expr,
47 local_bh_enable(); 47 local_bh_enable();
48} 48}
49 49
50static int nft_counter_dump(struct sk_buff *skb, const struct nft_expr *expr) 50static void nft_counter_fetch(const struct nft_counter_percpu __percpu *counter,
51 struct nft_counter *total)
51{ 52{
52 struct nft_counter_percpu_priv *priv = nft_expr_priv(expr); 53 const struct nft_counter_percpu *cpu_stats;
53 struct nft_counter_percpu *cpu_stats;
54 struct nft_counter total;
55 u64 bytes, packets; 54 u64 bytes, packets;
56 unsigned int seq; 55 unsigned int seq;
57 int cpu; 56 int cpu;
58 57
59 memset(&total, 0, sizeof(total)); 58 memset(total, 0, sizeof(*total));
60 for_each_possible_cpu(cpu) { 59 for_each_possible_cpu(cpu) {
61 cpu_stats = per_cpu_ptr(priv->counter, cpu); 60 cpu_stats = per_cpu_ptr(counter, cpu);
62 do { 61 do {
63 seq = u64_stats_fetch_begin_irq(&cpu_stats->syncp); 62 seq = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
64 bytes = cpu_stats->counter.bytes; 63 bytes = cpu_stats->counter.bytes;
65 packets = cpu_stats->counter.packets; 64 packets = cpu_stats->counter.packets;
66 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, seq)); 65 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, seq));
67 66
68 total.packets += packets; 67 total->packets += packets;
69 total.bytes += bytes; 68 total->bytes += bytes;
70 } 69 }
70}
71
72static int nft_counter_dump(struct sk_buff *skb, const struct nft_expr *expr)
73{
74 struct nft_counter_percpu_priv *priv = nft_expr_priv(expr);
75 struct nft_counter total;
76
77 nft_counter_fetch(priv->counter, &total);
71 78
72 if (nla_put_be64(skb, NFTA_COUNTER_BYTES, cpu_to_be64(total.bytes)) || 79 if (nla_put_be64(skb, NFTA_COUNTER_BYTES, cpu_to_be64(total.bytes)) ||
73 nla_put_be64(skb, NFTA_COUNTER_PACKETS, cpu_to_be64(total.packets))) 80 nla_put_be64(skb, NFTA_COUNTER_PACKETS, cpu_to_be64(total.packets)))
@@ -118,6 +125,31 @@ static void nft_counter_destroy(const struct nft_ctx *ctx,
118 free_percpu(priv->counter); 125 free_percpu(priv->counter);
119} 126}
120 127
128static int nft_counter_clone(struct nft_expr *dst, const struct nft_expr *src)
129{
130 struct nft_counter_percpu_priv *priv = nft_expr_priv(src);
131 struct nft_counter_percpu_priv *priv_clone = nft_expr_priv(dst);
132 struct nft_counter_percpu __percpu *cpu_stats;
133 struct nft_counter_percpu *this_cpu;
134 struct nft_counter total;
135
136 nft_counter_fetch(priv->counter, &total);
137
138 cpu_stats = __netdev_alloc_pcpu_stats(struct nft_counter_percpu,
139 GFP_ATOMIC);
140 if (cpu_stats == NULL)
141 return ENOMEM;
142
143 preempt_disable();
144 this_cpu = this_cpu_ptr(cpu_stats);
145 this_cpu->counter.packets = total.packets;
146 this_cpu->counter.bytes = total.bytes;
147 preempt_enable();
148
149 priv_clone->counter = cpu_stats;
150 return 0;
151}
152
121static struct nft_expr_type nft_counter_type; 153static struct nft_expr_type nft_counter_type;
122static const struct nft_expr_ops nft_counter_ops = { 154static const struct nft_expr_ops nft_counter_ops = {
123 .type = &nft_counter_type, 155 .type = &nft_counter_type,
@@ -126,6 +158,7 @@ static const struct nft_expr_ops nft_counter_ops = {
126 .init = nft_counter_init, 158 .init = nft_counter_init,
127 .destroy = nft_counter_destroy, 159 .destroy = nft_counter_destroy,
128 .dump = nft_counter_dump, 160 .dump = nft_counter_dump,
161 .clone = nft_counter_clone,
129}; 162};
130 163
131static struct nft_expr_type nft_counter_type __read_mostly = { 164static struct nft_expr_type nft_counter_type __read_mostly = {
diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c
index 513a8ef60a59..9dec3bd1b63c 100644
--- a/net/netfilter/nft_dynset.c
+++ b/net/netfilter/nft_dynset.c
@@ -50,8 +50,9 @@ static void *nft_dynset_new(struct nft_set *set, const struct nft_expr *expr,
50 } 50 }
51 51
52 ext = nft_set_elem_ext(set, elem); 52 ext = nft_set_elem_ext(set, elem);
53 if (priv->expr != NULL) 53 if (priv->expr != NULL &&
54 nft_expr_clone(nft_set_ext_expr(ext), priv->expr); 54 nft_expr_clone(nft_set_ext_expr(ext), priv->expr) < 0)
55 return NULL;
55 56
56 return elem; 57 return elem;
57} 58}
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index af399cac5205..1cf928fb573e 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -1741,6 +1741,20 @@ static void fanout_release(struct sock *sk)
1741 kfree_rcu(po->rollover, rcu); 1741 kfree_rcu(po->rollover, rcu);
1742} 1742}
1743 1743
1744static bool packet_extra_vlan_len_allowed(const struct net_device *dev,
1745 struct sk_buff *skb)
1746{
1747 /* Earlier code assumed this would be a VLAN pkt, double-check
1748 * this now that we have the actual packet in hand. We can only
1749 * do this check on Ethernet devices.
1750 */
1751 if (unlikely(dev->type != ARPHRD_ETHER))
1752 return false;
1753
1754 skb_reset_mac_header(skb);
1755 return likely(eth_hdr(skb)->h_proto == htons(ETH_P_8021Q));
1756}
1757
1744static const struct proto_ops packet_ops; 1758static const struct proto_ops packet_ops;
1745 1759
1746static const struct proto_ops packet_ops_spkt; 1760static const struct proto_ops packet_ops_spkt;
@@ -1902,18 +1916,10 @@ retry:
1902 goto retry; 1916 goto retry;
1903 } 1917 }
1904 1918
1905 if (len > (dev->mtu + dev->hard_header_len + extra_len)) { 1919 if (len > (dev->mtu + dev->hard_header_len + extra_len) &&
1906 /* Earlier code assumed this would be a VLAN pkt, 1920 !packet_extra_vlan_len_allowed(dev, skb)) {
1907 * double-check this now that we have the actual 1921 err = -EMSGSIZE;
1908 * packet in hand. 1922 goto out_unlock;
1909 */
1910 struct ethhdr *ehdr;
1911 skb_reset_mac_header(skb);
1912 ehdr = eth_hdr(skb);
1913 if (ehdr->h_proto != htons(ETH_P_8021Q)) {
1914 err = -EMSGSIZE;
1915 goto out_unlock;
1916 }
1917 } 1923 }
1918 1924
1919 skb->protocol = proto; 1925 skb->protocol = proto;
@@ -2332,6 +2338,15 @@ static bool ll_header_truncated(const struct net_device *dev, int len)
2332 return false; 2338 return false;
2333} 2339}
2334 2340
2341static void tpacket_set_protocol(const struct net_device *dev,
2342 struct sk_buff *skb)
2343{
2344 if (dev->type == ARPHRD_ETHER) {
2345 skb_reset_mac_header(skb);
2346 skb->protocol = eth_hdr(skb)->h_proto;
2347 }
2348}
2349
2335static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb, 2350static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
2336 void *frame, struct net_device *dev, int size_max, 2351 void *frame, struct net_device *dev, int size_max,
2337 __be16 proto, unsigned char *addr, int hlen) 2352 __be16 proto, unsigned char *addr, int hlen)
@@ -2368,8 +2383,6 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
2368 skb_reserve(skb, hlen); 2383 skb_reserve(skb, hlen);
2369 skb_reset_network_header(skb); 2384 skb_reset_network_header(skb);
2370 2385
2371 if (!packet_use_direct_xmit(po))
2372 skb_probe_transport_header(skb, 0);
2373 if (unlikely(po->tp_tx_has_off)) { 2386 if (unlikely(po->tp_tx_has_off)) {
2374 int off_min, off_max, off; 2387 int off_min, off_max, off;
2375 off_min = po->tp_hdrlen - sizeof(struct sockaddr_ll); 2388 off_min = po->tp_hdrlen - sizeof(struct sockaddr_ll);
@@ -2415,6 +2428,8 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
2415 dev->hard_header_len); 2428 dev->hard_header_len);
2416 if (unlikely(err)) 2429 if (unlikely(err))
2417 return err; 2430 return err;
2431 if (!skb->protocol)
2432 tpacket_set_protocol(dev, skb);
2418 2433
2419 data += dev->hard_header_len; 2434 data += dev->hard_header_len;
2420 to_write -= dev->hard_header_len; 2435 to_write -= dev->hard_header_len;
@@ -2449,6 +2464,8 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
2449 len = ((to_write > len_max) ? len_max : to_write); 2464 len = ((to_write > len_max) ? len_max : to_write);
2450 } 2465 }
2451 2466
2467 skb_probe_transport_header(skb, 0);
2468
2452 return tp_len; 2469 return tp_len;
2453} 2470}
2454 2471
@@ -2493,12 +2510,13 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2493 if (unlikely(!(dev->flags & IFF_UP))) 2510 if (unlikely(!(dev->flags & IFF_UP)))
2494 goto out_put; 2511 goto out_put;
2495 2512
2496 reserve = dev->hard_header_len + VLAN_HLEN; 2513 if (po->sk.sk_socket->type == SOCK_RAW)
2514 reserve = dev->hard_header_len;
2497 size_max = po->tx_ring.frame_size 2515 size_max = po->tx_ring.frame_size
2498 - (po->tp_hdrlen - sizeof(struct sockaddr_ll)); 2516 - (po->tp_hdrlen - sizeof(struct sockaddr_ll));
2499 2517
2500 if (size_max > dev->mtu + reserve) 2518 if (size_max > dev->mtu + reserve + VLAN_HLEN)
2501 size_max = dev->mtu + reserve; 2519 size_max = dev->mtu + reserve + VLAN_HLEN;
2502 2520
2503 do { 2521 do {
2504 ph = packet_current_frame(po, &po->tx_ring, 2522 ph = packet_current_frame(po, &po->tx_ring,
@@ -2525,18 +2543,10 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2525 tp_len = tpacket_fill_skb(po, skb, ph, dev, size_max, proto, 2543 tp_len = tpacket_fill_skb(po, skb, ph, dev, size_max, proto,
2526 addr, hlen); 2544 addr, hlen);
2527 if (likely(tp_len >= 0) && 2545 if (likely(tp_len >= 0) &&
2528 tp_len > dev->mtu + dev->hard_header_len) { 2546 tp_len > dev->mtu + reserve &&
2529 struct ethhdr *ehdr; 2547 !packet_extra_vlan_len_allowed(dev, skb))
2530 /* Earlier code assumed this would be a VLAN pkt, 2548 tp_len = -EMSGSIZE;
2531 * double-check this now that we have the actual
2532 * packet in hand.
2533 */
2534 2549
2535 skb_reset_mac_header(skb);
2536 ehdr = eth_hdr(skb);
2537 if (ehdr->h_proto != htons(ETH_P_8021Q))
2538 tp_len = -EMSGSIZE;
2539 }
2540 if (unlikely(tp_len < 0)) { 2550 if (unlikely(tp_len < 0)) {
2541 if (po->tp_loss) { 2551 if (po->tp_loss) {
2542 __packet_set_status(po, ph, 2552 __packet_set_status(po, ph,
@@ -2765,18 +2775,10 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
2765 2775
2766 sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags); 2776 sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
2767 2777
2768 if (!gso_type && (len > dev->mtu + reserve + extra_len)) { 2778 if (!gso_type && (len > dev->mtu + reserve + extra_len) &&
2769 /* Earlier code assumed this would be a VLAN pkt, 2779 !packet_extra_vlan_len_allowed(dev, skb)) {
2770 * double-check this now that we have the actual 2780 err = -EMSGSIZE;
2771 * packet in hand. 2781 goto out_free;
2772 */
2773 struct ethhdr *ehdr;
2774 skb_reset_mac_header(skb);
2775 ehdr = eth_hdr(skb);
2776 if (ehdr->h_proto != htons(ETH_P_8021Q)) {
2777 err = -EMSGSIZE;
2778 goto out_free;
2779 }
2780 } 2782 }
2781 2783
2782 skb->protocol = proto; 2784 skb->protocol = proto;
@@ -2807,8 +2809,8 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
2807 len += vnet_hdr_len; 2809 len += vnet_hdr_len;
2808 } 2810 }
2809 2811
2810 if (!packet_use_direct_xmit(po)) 2812 skb_probe_transport_header(skb, reserve);
2811 skb_probe_transport_header(skb, reserve); 2813
2812 if (unlikely(extra_len == 4)) 2814 if (unlikely(extra_len == 4))
2813 skb->no_fcs = 1; 2815 skb->no_fcs = 1;
2814 2816
@@ -4107,7 +4109,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
4107 err = -EINVAL; 4109 err = -EINVAL;
4108 if (unlikely((int)req->tp_block_size <= 0)) 4110 if (unlikely((int)req->tp_block_size <= 0))
4109 goto out; 4111 goto out;
4110 if (unlikely(req->tp_block_size & (PAGE_SIZE - 1))) 4112 if (unlikely(!PAGE_ALIGNED(req->tp_block_size)))
4111 goto out; 4113 goto out;
4112 if (po->tp_version >= TPACKET_V3 && 4114 if (po->tp_version >= TPACKET_V3 &&
4113 (int)(req->tp_block_size - 4115 (int)(req->tp_block_size -
@@ -4119,8 +4121,8 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
4119 if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1))) 4121 if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1)))
4120 goto out; 4122 goto out;
4121 4123
4122 rb->frames_per_block = req->tp_block_size/req->tp_frame_size; 4124 rb->frames_per_block = req->tp_block_size / req->tp_frame_size;
4123 if (unlikely(rb->frames_per_block <= 0)) 4125 if (unlikely(rb->frames_per_block == 0))
4124 goto out; 4126 goto out;
4125 if (unlikely((rb->frames_per_block * req->tp_block_nr) != 4127 if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
4126 req->tp_frame_nr)) 4128 req->tp_frame_nr))
diff --git a/net/sctp/auth.c b/net/sctp/auth.c
index 4f15b7d730e1..1543e39f47c3 100644
--- a/net/sctp/auth.c
+++ b/net/sctp/auth.c
@@ -809,8 +809,8 @@ int sctp_auth_ep_set_hmacs(struct sctp_endpoint *ep,
809 if (!has_sha1) 809 if (!has_sha1)
810 return -EINVAL; 810 return -EINVAL;
811 811
812 memcpy(ep->auth_hmacs_list->hmac_ids, &hmacs->shmac_idents[0], 812 for (i = 0; i < hmacs->shmac_num_idents; i++)
813 hmacs->shmac_num_idents * sizeof(__u16)); 813 ep->auth_hmacs_list->hmac_ids[i] = htons(hmacs->shmac_idents[i]);
814 ep->auth_hmacs_list->param_hdr.length = htons(sizeof(sctp_paramhdr_t) + 814 ep->auth_hmacs_list->param_hdr.length = htons(sizeof(sctp_paramhdr_t) +
815 hmacs->shmac_num_idents * sizeof(__u16)); 815 hmacs->shmac_num_idents * sizeof(__u16));
816 return 0; 816 return 0;
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index aaa0b58d6aba..955ec152cb71 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -441,6 +441,7 @@ static void unix_release_sock(struct sock *sk, int embrion)
441 if (state == TCP_LISTEN) 441 if (state == TCP_LISTEN)
442 unix_release_sock(skb->sk, 1); 442 unix_release_sock(skb->sk, 1);
443 /* passed fds are erased in the kfree_skb hook */ 443 /* passed fds are erased in the kfree_skb hook */
444 UNIXCB(skb).consumed = skb->len;
444 kfree_skb(skb); 445 kfree_skb(skb);
445 } 446 }
446 447
@@ -1799,6 +1800,7 @@ alloc_skb:
1799 * this - does no harm 1800 * this - does no harm
1800 */ 1801 */
1801 consume_skb(newskb); 1802 consume_skb(newskb);
1803 newskb = NULL;
1802 } 1804 }
1803 1805
1804 if (skb_append_pagefrags(skb, page, offset, size)) { 1806 if (skb_append_pagefrags(skb, page, offset, size)) {
@@ -1811,8 +1813,11 @@ alloc_skb:
1811 skb->truesize += size; 1813 skb->truesize += size;
1812 atomic_add(size, &sk->sk_wmem_alloc); 1814 atomic_add(size, &sk->sk_wmem_alloc);
1813 1815
1814 if (newskb) 1816 if (newskb) {
1817 spin_lock(&other->sk_receive_queue.lock);
1815 __skb_queue_tail(&other->sk_receive_queue, newskb); 1818 __skb_queue_tail(&other->sk_receive_queue, newskb);
1819 spin_unlock(&other->sk_receive_queue.lock);
1820 }
1816 1821
1817 unix_state_unlock(other); 1822 unix_state_unlock(other);
1818 mutex_unlock(&unix_sk(other)->readlock); 1823 mutex_unlock(&unix_sk(other)->readlock);
@@ -2072,6 +2077,7 @@ static int unix_stream_read_generic(struct unix_stream_read_state *state)
2072 2077
2073 do { 2078 do {
2074 int chunk; 2079 int chunk;
2080 bool drop_skb;
2075 struct sk_buff *skb, *last; 2081 struct sk_buff *skb, *last;
2076 2082
2077 unix_state_lock(sk); 2083 unix_state_lock(sk);
@@ -2152,7 +2158,11 @@ unlock:
2152 } 2158 }
2153 2159
2154 chunk = min_t(unsigned int, unix_skb_len(skb) - skip, size); 2160 chunk = min_t(unsigned int, unix_skb_len(skb) - skip, size);
2161 skb_get(skb);
2155 chunk = state->recv_actor(skb, skip, chunk, state); 2162 chunk = state->recv_actor(skb, skip, chunk, state);
2163 drop_skb = !unix_skb_len(skb);
2164 /* skb is only safe to use if !drop_skb */
2165 consume_skb(skb);
2156 if (chunk < 0) { 2166 if (chunk < 0) {
2157 if (copied == 0) 2167 if (copied == 0)
2158 copied = -EFAULT; 2168 copied = -EFAULT;
@@ -2161,6 +2171,18 @@ unlock:
2161 copied += chunk; 2171 copied += chunk;
2162 size -= chunk; 2172 size -= chunk;
2163 2173
2174 if (drop_skb) {
2175 /* the skb was touched by a concurrent reader;
2176 * we should not expect anything from this skb
2177 * anymore and assume it invalid - we can be
2178 * sure it was dropped from the socket queue
2179 *
2180 * let's report a short read
2181 */
2182 err = 0;
2183 break;
2184 }
2185
2164 /* Mark read part of skb as used */ 2186 /* Mark read part of skb as used */
2165 if (!(flags & MSG_PEEK)) { 2187 if (!(flags & MSG_PEEK)) {
2166 UNIXCB(skb).consumed += chunk; 2188 UNIXCB(skb).consumed += chunk;
diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
index 79b4596b5f9a..edd638b5825f 100644
--- a/samples/bpf/Makefile
+++ b/samples/bpf/Makefile
@@ -67,10 +67,13 @@ HOSTLOADLIBES_lathist += -lelf
67# point this to your LLVM backend with bpf support 67# point this to your LLVM backend with bpf support
68LLC=$(srctree)/tools/bpf/llvm/bld/Debug+Asserts/bin/llc 68LLC=$(srctree)/tools/bpf/llvm/bld/Debug+Asserts/bin/llc
69 69
70# asm/sysreg.h inline assmbly used by it is incompatible with llvm.
71# But, ehere is not easy way to fix it, so just exclude it since it is
72# useless for BPF samples.
70$(obj)/%.o: $(src)/%.c 73$(obj)/%.o: $(src)/%.c
71 clang $(NOSTDINC_FLAGS) $(LINUXINCLUDE) $(EXTRA_CFLAGS) \ 74 clang $(NOSTDINC_FLAGS) $(LINUXINCLUDE) $(EXTRA_CFLAGS) \
72 -D__KERNEL__ -Wno-unused-value -Wno-pointer-sign \ 75 -D__KERNEL__ -D__ASM_SYSREG_H -Wno-unused-value -Wno-pointer-sign \
73 -O2 -emit-llvm -c $< -o -| $(LLC) -march=bpf -filetype=obj -o $@ 76 -O2 -emit-llvm -c $< -o -| $(LLC) -march=bpf -filetype=obj -o $@
74 clang $(NOSTDINC_FLAGS) $(LINUXINCLUDE) $(EXTRA_CFLAGS) \ 77 clang $(NOSTDINC_FLAGS) $(LINUXINCLUDE) $(EXTRA_CFLAGS) \
75 -D__KERNEL__ -Wno-unused-value -Wno-pointer-sign \ 78 -D__KERNEL__ -D__ASM_SYSREG_H -Wno-unused-value -Wno-pointer-sign \
76 -O2 -emit-llvm -c $< -o -| $(LLC) -march=bpf -filetype=asm -o $@.s 79 -O2 -emit-llvm -c $< -o -| $(LLC) -march=bpf -filetype=asm -o $@.s
diff --git a/scripts/kernel-doc b/scripts/kernel-doc
index 125b906cd1d4..638a38e1b419 100755
--- a/scripts/kernel-doc
+++ b/scripts/kernel-doc
@@ -2711,7 +2711,7 @@ $kernelversion = get_kernel_version();
2711 2711
2712# generate a sequence of code that will splice in highlighting information 2712# generate a sequence of code that will splice in highlighting information
2713# using the s// operator. 2713# using the s// operator.
2714foreach my $k (keys @highlights) { 2714for (my $k = 0; $k < @highlights; $k++) {
2715 my $pattern = $highlights[$k][0]; 2715 my $pattern = $highlights[$k][0];
2716 my $result = $highlights[$k][1]; 2716 my $result = $highlights[$k][1];
2717# print STDERR "scanning pattern:$pattern, highlight:($result)\n"; 2717# print STDERR "scanning pattern:$pattern, highlight:($result)\n";
diff --git a/tools/Makefile b/tools/Makefile
index d6f307dfb1a3..7dc820a8c1f1 100644
--- a/tools/Makefile
+++ b/tools/Makefile
@@ -32,6 +32,10 @@ help:
32 @echo ' from the kernel command line to build and install one of' 32 @echo ' from the kernel command line to build and install one of'
33 @echo ' the tools above' 33 @echo ' the tools above'
34 @echo '' 34 @echo ''
35 @echo ' $$ make tools/all'
36 @echo ''
37 @echo ' builds all tools.'
38 @echo ''
35 @echo ' $$ make tools/install' 39 @echo ' $$ make tools/install'
36 @echo '' 40 @echo ''
37 @echo ' installs all tools.' 41 @echo ' installs all tools.'
@@ -77,6 +81,11 @@ tmon: FORCE
77freefall: FORCE 81freefall: FORCE
78 $(call descend,laptop/$@) 82 $(call descend,laptop/$@)
79 83
84all: acpi cgroup cpupower hv firewire lguest \
85 perf selftests turbostat usb \
86 virtio vm net x86_energy_perf_policy \
87 tmon freefall
88
80acpi_install: 89acpi_install:
81 $(call descend,power/$(@:_install=),install) 90 $(call descend,power/$(@:_install=),install)
82 91
@@ -101,7 +110,7 @@ freefall_install:
101install: acpi_install cgroup_install cpupower_install hv_install firewire_install lguest_install \ 110install: acpi_install cgroup_install cpupower_install hv_install firewire_install lguest_install \
102 perf_install selftests_install turbostat_install usb_install \ 111 perf_install selftests_install turbostat_install usb_install \
103 virtio_install vm_install net_install x86_energy_perf_policy_install \ 112 virtio_install vm_install net_install x86_energy_perf_policy_install \
104 tmon freefall_install 113 tmon_install freefall_install
105 114
106acpi_clean: 115acpi_clean:
107 $(call descend,power/acpi,clean) 116 $(call descend,power/acpi,clean)
diff --git a/tools/net/Makefile b/tools/net/Makefile
index ee577ea03ba5..ddf888010652 100644
--- a/tools/net/Makefile
+++ b/tools/net/Makefile
@@ -4,6 +4,9 @@ CC = gcc
4LEX = flex 4LEX = flex
5YACC = bison 5YACC = bison
6 6
7CFLAGS += -Wall -O2
8CFLAGS += -D__EXPORTED_HEADERS__ -I../../include/uapi -I../../include
9
7%.yacc.c: %.y 10%.yacc.c: %.y
8 $(YACC) -o $@ -d $< 11 $(YACC) -o $@ -d $<
9 12
@@ -12,15 +15,13 @@ YACC = bison
12 15
13all : bpf_jit_disasm bpf_dbg bpf_asm 16all : bpf_jit_disasm bpf_dbg bpf_asm
14 17
15bpf_jit_disasm : CFLAGS = -Wall -O2 -DPACKAGE='bpf_jit_disasm' 18bpf_jit_disasm : CFLAGS += -DPACKAGE='bpf_jit_disasm'
16bpf_jit_disasm : LDLIBS = -lopcodes -lbfd -ldl 19bpf_jit_disasm : LDLIBS = -lopcodes -lbfd -ldl
17bpf_jit_disasm : bpf_jit_disasm.o 20bpf_jit_disasm : bpf_jit_disasm.o
18 21
19bpf_dbg : CFLAGS = -Wall -O2
20bpf_dbg : LDLIBS = -lreadline 22bpf_dbg : LDLIBS = -lreadline
21bpf_dbg : bpf_dbg.o 23bpf_dbg : bpf_dbg.o
22 24
23bpf_asm : CFLAGS = -Wall -O2 -I.
24bpf_asm : LDLIBS = 25bpf_asm : LDLIBS =
25bpf_asm : bpf_asm.o bpf_exp.yacc.o bpf_exp.lex.o 26bpf_asm : bpf_asm.o bpf_exp.yacc.o bpf_exp.lex.o
26bpf_exp.lex.o : bpf_exp.yacc.c 27bpf_exp.lex.o : bpf_exp.yacc.c
diff --git a/tools/perf/builtin-inject.c b/tools/perf/builtin-inject.c
index 0a945d2e8ca5..99d127fe9c35 100644
--- a/tools/perf/builtin-inject.c
+++ b/tools/perf/builtin-inject.c
@@ -675,6 +675,7 @@ int cmd_inject(int argc, const char **argv, const char *prefix __maybe_unused)
675 .fork = perf_event__repipe, 675 .fork = perf_event__repipe,
676 .exit = perf_event__repipe, 676 .exit = perf_event__repipe,
677 .lost = perf_event__repipe, 677 .lost = perf_event__repipe,
678 .lost_samples = perf_event__repipe,
678 .aux = perf_event__repipe, 679 .aux = perf_event__repipe,
679 .itrace_start = perf_event__repipe, 680 .itrace_start = perf_event__repipe,
680 .context_switch = perf_event__repipe, 681 .context_switch = perf_event__repipe,
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 2853ad2bd435..f256fac1e722 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -44,7 +44,7 @@
44struct report { 44struct report {
45 struct perf_tool tool; 45 struct perf_tool tool;
46 struct perf_session *session; 46 struct perf_session *session;
47 bool force, use_tui, use_gtk, use_stdio; 47 bool use_tui, use_gtk, use_stdio;
48 bool hide_unresolved; 48 bool hide_unresolved;
49 bool dont_use_callchains; 49 bool dont_use_callchains;
50 bool show_full_info; 50 bool show_full_info;
@@ -678,7 +678,7 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused)
678 "file", "vmlinux pathname"), 678 "file", "vmlinux pathname"),
679 OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name, 679 OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name,
680 "file", "kallsyms pathname"), 680 "file", "kallsyms pathname"),
681 OPT_BOOLEAN('f', "force", &report.force, "don't complain, do it"), 681 OPT_BOOLEAN('f', "force", &symbol_conf.force, "don't complain, do it"),
682 OPT_BOOLEAN('m', "modules", &symbol_conf.use_modules, 682 OPT_BOOLEAN('m', "modules", &symbol_conf.use_modules,
683 "load module symbols - WARNING: use only with -k and LIVE kernel"), 683 "load module symbols - WARNING: use only with -k and LIVE kernel"),
684 OPT_BOOLEAN('n', "show-nr-samples", &symbol_conf.show_nr_samples, 684 OPT_BOOLEAN('n', "show-nr-samples", &symbol_conf.show_nr_samples,
@@ -832,7 +832,7 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused)
832 } 832 }
833 833
834 file.path = input_name; 834 file.path = input_name;
835 file.force = report.force; 835 file.force = symbol_conf.force;
836 836
837repeat: 837repeat:
838 session = perf_session__new(&file, false, &report.tool); 838 session = perf_session__new(&file, false, &report.tool);
diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
index e5afb8936040..fa9eb92c9e24 100644
--- a/tools/perf/ui/browsers/hists.c
+++ b/tools/perf/ui/browsers/hists.c
@@ -1430,7 +1430,6 @@ close_file_and_continue:
1430 1430
1431struct popup_action { 1431struct popup_action {
1432 struct thread *thread; 1432 struct thread *thread;
1433 struct dso *dso;
1434 struct map_symbol ms; 1433 struct map_symbol ms;
1435 int socket; 1434 int socket;
1436 1435
@@ -1565,7 +1564,6 @@ add_dso_opt(struct hist_browser *browser, struct popup_action *act,
1565 return 0; 1564 return 0;
1566 1565
1567 act->ms.map = map; 1566 act->ms.map = map;
1568 act->dso = map->dso;
1569 act->fn = do_zoom_dso; 1567 act->fn = do_zoom_dso;
1570 return 1; 1568 return 1;
1571} 1569}
@@ -1827,7 +1825,6 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
1827 1825
1828 while (1) { 1826 while (1) {
1829 struct thread *thread = NULL; 1827 struct thread *thread = NULL;
1830 struct dso *dso = NULL;
1831 struct map *map = NULL; 1828 struct map *map = NULL;
1832 int choice = 0; 1829 int choice = 0;
1833 int socked_id = -1; 1830 int socked_id = -1;
@@ -1839,8 +1836,6 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
1839 if (browser->he_selection != NULL) { 1836 if (browser->he_selection != NULL) {
1840 thread = hist_browser__selected_thread(browser); 1837 thread = hist_browser__selected_thread(browser);
1841 map = browser->selection->map; 1838 map = browser->selection->map;
1842 if (map)
1843 dso = map->dso;
1844 socked_id = browser->he_selection->socket; 1839 socked_id = browser->he_selection->socket;
1845 } 1840 }
1846 switch (key) { 1841 switch (key) {
@@ -1874,7 +1869,7 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
1874 hist_browser__dump(browser); 1869 hist_browser__dump(browser);
1875 continue; 1870 continue;
1876 case 'd': 1871 case 'd':
1877 actions->dso = dso; 1872 actions->ms.map = map;
1878 do_zoom_dso(browser, actions); 1873 do_zoom_dso(browser, actions);
1879 continue; 1874 continue;
1880 case 'V': 1875 case 'V':
diff --git a/tools/perf/util/build-id.c b/tools/perf/util/build-id.c
index d909459fb54c..217b5a60e2ab 100644
--- a/tools/perf/util/build-id.c
+++ b/tools/perf/util/build-id.c
@@ -76,6 +76,7 @@ struct perf_tool build_id__mark_dso_hit_ops = {
76 .exit = perf_event__exit_del_thread, 76 .exit = perf_event__exit_del_thread,
77 .attr = perf_event__process_attr, 77 .attr = perf_event__process_attr,
78 .build_id = perf_event__process_build_id, 78 .build_id = perf_event__process_build_id,
79 .ordered_events = true,
79}; 80};
80 81
81int build_id__sprintf(const u8 *build_id, int len, char *bf) 82int build_id__sprintf(const u8 *build_id, int len, char *bf)
diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c
index 7c0c08386a1d..425df5c86c9c 100644
--- a/tools/perf/util/dso.c
+++ b/tools/perf/util/dso.c
@@ -933,6 +933,7 @@ static struct dso *__dso__findlink_by_longname(struct rb_root *root,
933 /* Add new node and rebalance tree */ 933 /* Add new node and rebalance tree */
934 rb_link_node(&dso->rb_node, parent, p); 934 rb_link_node(&dso->rb_node, parent, p);
935 rb_insert_color(&dso->rb_node, root); 935 rb_insert_color(&dso->rb_node, root);
936 dso->root = root;
936 } 937 }
937 return NULL; 938 return NULL;
938} 939}
@@ -945,15 +946,30 @@ static inline struct dso *__dso__find_by_longname(struct rb_root *root,
945 946
946void dso__set_long_name(struct dso *dso, const char *name, bool name_allocated) 947void dso__set_long_name(struct dso *dso, const char *name, bool name_allocated)
947{ 948{
949 struct rb_root *root = dso->root;
950
948 if (name == NULL) 951 if (name == NULL)
949 return; 952 return;
950 953
951 if (dso->long_name_allocated) 954 if (dso->long_name_allocated)
952 free((char *)dso->long_name); 955 free((char *)dso->long_name);
953 956
957 if (root) {
958 rb_erase(&dso->rb_node, root);
959 /*
960 * __dso__findlink_by_longname() isn't guaranteed to add it
961 * back, so a clean removal is required here.
962 */
963 RB_CLEAR_NODE(&dso->rb_node);
964 dso->root = NULL;
965 }
966
954 dso->long_name = name; 967 dso->long_name = name;
955 dso->long_name_len = strlen(name); 968 dso->long_name_len = strlen(name);
956 dso->long_name_allocated = name_allocated; 969 dso->long_name_allocated = name_allocated;
970
971 if (root)
972 __dso__findlink_by_longname(root, dso, NULL);
957} 973}
958 974
959void dso__set_short_name(struct dso *dso, const char *name, bool name_allocated) 975void dso__set_short_name(struct dso *dso, const char *name, bool name_allocated)
@@ -1046,6 +1062,7 @@ struct dso *dso__new(const char *name)
1046 dso->kernel = DSO_TYPE_USER; 1062 dso->kernel = DSO_TYPE_USER;
1047 dso->needs_swap = DSO_SWAP__UNSET; 1063 dso->needs_swap = DSO_SWAP__UNSET;
1048 RB_CLEAR_NODE(&dso->rb_node); 1064 RB_CLEAR_NODE(&dso->rb_node);
1065 dso->root = NULL;
1049 INIT_LIST_HEAD(&dso->node); 1066 INIT_LIST_HEAD(&dso->node);
1050 INIT_LIST_HEAD(&dso->data.open_entry); 1067 INIT_LIST_HEAD(&dso->data.open_entry);
1051 pthread_mutex_init(&dso->lock, NULL); 1068 pthread_mutex_init(&dso->lock, NULL);
diff --git a/tools/perf/util/dso.h b/tools/perf/util/dso.h
index fc8db9c764ac..45ec4d0a50ed 100644
--- a/tools/perf/util/dso.h
+++ b/tools/perf/util/dso.h
@@ -135,6 +135,7 @@ struct dso {
135 pthread_mutex_t lock; 135 pthread_mutex_t lock;
136 struct list_head node; 136 struct list_head node;
137 struct rb_node rb_node; /* rbtree node sorted by long name */ 137 struct rb_node rb_node; /* rbtree node sorted by long name */
138 struct rb_root *root; /* root of rbtree that rb_node is in */
138 struct rb_root symbols[MAP__NR_TYPES]; 139 struct rb_root symbols[MAP__NR_TYPES];
139 struct rb_root symbol_names[MAP__NR_TYPES]; 140 struct rb_root symbol_names[MAP__NR_TYPES];
140 struct { 141 struct {
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index 5ef90be2a249..8b303ff20289 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -91,6 +91,7 @@ static void dsos__purge(struct dsos *dsos)
91 91
92 list_for_each_entry_safe(pos, n, &dsos->head, node) { 92 list_for_each_entry_safe(pos, n, &dsos->head, node) {
93 RB_CLEAR_NODE(&pos->rb_node); 93 RB_CLEAR_NODE(&pos->rb_node);
94 pos->root = NULL;
94 list_del_init(&pos->node); 95 list_del_init(&pos->node);
95 dso__put(pos); 96 dso__put(pos);
96 } 97 }
diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
index bd8f03de5e40..05012bb178d7 100644
--- a/tools/perf/util/probe-finder.c
+++ b/tools/perf/util/probe-finder.c
@@ -1183,7 +1183,7 @@ static int add_probe_trace_event(Dwarf_Die *sc_die, struct probe_finder *pf)
1183 container_of(pf, struct trace_event_finder, pf); 1183 container_of(pf, struct trace_event_finder, pf);
1184 struct perf_probe_point *pp = &pf->pev->point; 1184 struct perf_probe_point *pp = &pf->pev->point;
1185 struct probe_trace_event *tev; 1185 struct probe_trace_event *tev;
1186 struct perf_probe_arg *args; 1186 struct perf_probe_arg *args = NULL;
1187 int ret, i; 1187 int ret, i;
1188 1188
1189 /* Check number of tevs */ 1189 /* Check number of tevs */
@@ -1198,19 +1198,23 @@ static int add_probe_trace_event(Dwarf_Die *sc_die, struct probe_finder *pf)
1198 ret = convert_to_trace_point(&pf->sp_die, tf->mod, pf->addr, 1198 ret = convert_to_trace_point(&pf->sp_die, tf->mod, pf->addr,
1199 pp->retprobe, pp->function, &tev->point); 1199 pp->retprobe, pp->function, &tev->point);
1200 if (ret < 0) 1200 if (ret < 0)
1201 return ret; 1201 goto end;
1202 1202
1203 tev->point.realname = strdup(dwarf_diename(sc_die)); 1203 tev->point.realname = strdup(dwarf_diename(sc_die));
1204 if (!tev->point.realname) 1204 if (!tev->point.realname) {
1205 return -ENOMEM; 1205 ret = -ENOMEM;
1206 goto end;
1207 }
1206 1208
1207 pr_debug("Probe point found: %s+%lu\n", tev->point.symbol, 1209 pr_debug("Probe point found: %s+%lu\n", tev->point.symbol,
1208 tev->point.offset); 1210 tev->point.offset);
1209 1211
1210 /* Expand special probe argument if exist */ 1212 /* Expand special probe argument if exist */
1211 args = zalloc(sizeof(struct perf_probe_arg) * MAX_PROBE_ARGS); 1213 args = zalloc(sizeof(struct perf_probe_arg) * MAX_PROBE_ARGS);
1212 if (args == NULL) 1214 if (args == NULL) {
1213 return -ENOMEM; 1215 ret = -ENOMEM;
1216 goto end;
1217 }
1214 1218
1215 ret = expand_probe_args(sc_die, pf, args); 1219 ret = expand_probe_args(sc_die, pf, args);
1216 if (ret < 0) 1220 if (ret < 0)
@@ -1234,6 +1238,10 @@ static int add_probe_trace_event(Dwarf_Die *sc_die, struct probe_finder *pf)
1234 } 1238 }
1235 1239
1236end: 1240end:
1241 if (ret) {
1242 clear_probe_trace_event(tev);
1243 tf->ntevs--;
1244 }
1237 free(args); 1245 free(args);
1238 return ret; 1246 return ret;
1239} 1247}
@@ -1246,7 +1254,7 @@ int debuginfo__find_trace_events(struct debuginfo *dbg,
1246 struct trace_event_finder tf = { 1254 struct trace_event_finder tf = {
1247 .pf = {.pev = pev, .callback = add_probe_trace_event}, 1255 .pf = {.pev = pev, .callback = add_probe_trace_event},
1248 .max_tevs = probe_conf.max_probes, .mod = dbg->mod}; 1256 .max_tevs = probe_conf.max_probes, .mod = dbg->mod};
1249 int ret; 1257 int ret, i;
1250 1258
1251 /* Allocate result tevs array */ 1259 /* Allocate result tevs array */
1252 *tevs = zalloc(sizeof(struct probe_trace_event) * tf.max_tevs); 1260 *tevs = zalloc(sizeof(struct probe_trace_event) * tf.max_tevs);
@@ -1258,6 +1266,8 @@ int debuginfo__find_trace_events(struct debuginfo *dbg,
1258 1266
1259 ret = debuginfo__find_probes(dbg, &tf.pf); 1267 ret = debuginfo__find_probes(dbg, &tf.pf);
1260 if (ret < 0) { 1268 if (ret < 0) {
1269 for (i = 0; i < tf.ntevs; i++)
1270 clear_probe_trace_event(&tf.tevs[i]);
1261 zfree(tevs); 1271 zfree(tevs);
1262 return ret; 1272 return ret;
1263 } 1273 }
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index b4cc7662677e..cd08027a6d2c 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -654,19 +654,24 @@ static int dso__split_kallsyms_for_kcore(struct dso *dso, struct map *map,
654 struct map_groups *kmaps = map__kmaps(map); 654 struct map_groups *kmaps = map__kmaps(map);
655 struct map *curr_map; 655 struct map *curr_map;
656 struct symbol *pos; 656 struct symbol *pos;
657 int count = 0, moved = 0; 657 int count = 0;
658 struct rb_root old_root = dso->symbols[map->type];
658 struct rb_root *root = &dso->symbols[map->type]; 659 struct rb_root *root = &dso->symbols[map->type];
659 struct rb_node *next = rb_first(root); 660 struct rb_node *next = rb_first(root);
660 661
661 if (!kmaps) 662 if (!kmaps)
662 return -1; 663 return -1;
663 664
665 *root = RB_ROOT;
666
664 while (next) { 667 while (next) {
665 char *module; 668 char *module;
666 669
667 pos = rb_entry(next, struct symbol, rb_node); 670 pos = rb_entry(next, struct symbol, rb_node);
668 next = rb_next(&pos->rb_node); 671 next = rb_next(&pos->rb_node);
669 672
673 rb_erase_init(&pos->rb_node, &old_root);
674
670 module = strchr(pos->name, '\t'); 675 module = strchr(pos->name, '\t');
671 if (module) 676 if (module)
672 *module = '\0'; 677 *module = '\0';
@@ -674,28 +679,21 @@ static int dso__split_kallsyms_for_kcore(struct dso *dso, struct map *map,
674 curr_map = map_groups__find(kmaps, map->type, pos->start); 679 curr_map = map_groups__find(kmaps, map->type, pos->start);
675 680
676 if (!curr_map || (filter && filter(curr_map, pos))) { 681 if (!curr_map || (filter && filter(curr_map, pos))) {
677 rb_erase_init(&pos->rb_node, root);
678 symbol__delete(pos); 682 symbol__delete(pos);
679 } else { 683 continue;
680 pos->start -= curr_map->start - curr_map->pgoff;
681 if (pos->end)
682 pos->end -= curr_map->start - curr_map->pgoff;
683 if (curr_map->dso != map->dso) {
684 rb_erase_init(&pos->rb_node, root);
685 symbols__insert(
686 &curr_map->dso->symbols[curr_map->type],
687 pos);
688 ++moved;
689 } else {
690 ++count;
691 }
692 } 684 }
685
686 pos->start -= curr_map->start - curr_map->pgoff;
687 if (pos->end)
688 pos->end -= curr_map->start - curr_map->pgoff;
689 symbols__insert(&curr_map->dso->symbols[curr_map->type], pos);
690 ++count;
693 } 691 }
694 692
695 /* Symbols have been adjusted */ 693 /* Symbols have been adjusted */
696 dso->adjust_symbols = 1; 694 dso->adjust_symbols = 1;
697 695
698 return count + moved; 696 return count;
699} 697}
700 698
701/* 699/*
@@ -1438,9 +1436,9 @@ int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter)
1438 if (lstat(dso->name, &st) < 0) 1436 if (lstat(dso->name, &st) < 0)
1439 goto out; 1437 goto out;
1440 1438
1441 if (st.st_uid && (st.st_uid != geteuid())) { 1439 if (!symbol_conf.force && st.st_uid && (st.st_uid != geteuid())) {
1442 pr_warning("File %s not owned by current user or root, " 1440 pr_warning("File %s not owned by current user or root, "
1443 "ignoring it.\n", dso->name); 1441 "ignoring it (use -f to override).\n", dso->name);
1444 goto out; 1442 goto out;
1445 } 1443 }
1446 1444
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
index 40073c60b83d..dcd786e364f2 100644
--- a/tools/perf/util/symbol.h
+++ b/tools/perf/util/symbol.h
@@ -84,6 +84,7 @@ struct symbol_conf {
84 unsigned short priv_size; 84 unsigned short priv_size;
85 unsigned short nr_events; 85 unsigned short nr_events;
86 bool try_vmlinux_path, 86 bool try_vmlinux_path,
87 force,
87 ignore_vmlinux, 88 ignore_vmlinux,
88 ignore_vmlinux_buildid, 89 ignore_vmlinux_buildid,
89 show_kernel_path, 90 show_kernel_path,
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
index d8e4b20b6d54..0dac7e05a6ac 100644
--- a/tools/power/x86/turbostat/turbostat.c
+++ b/tools/power/x86/turbostat/turbostat.c
@@ -1173,9 +1173,9 @@ dump_nhm_platform_info(void)
1173 unsigned long long msr; 1173 unsigned long long msr;
1174 unsigned int ratio; 1174 unsigned int ratio;
1175 1175
1176 get_msr(base_cpu, MSR_NHM_PLATFORM_INFO, &msr); 1176 get_msr(base_cpu, MSR_PLATFORM_INFO, &msr);
1177 1177
1178 fprintf(stderr, "cpu%d: MSR_NHM_PLATFORM_INFO: 0x%08llx\n", base_cpu, msr); 1178 fprintf(stderr, "cpu%d: MSR_PLATFORM_INFO: 0x%08llx\n", base_cpu, msr);
1179 1179
1180 ratio = (msr >> 40) & 0xFF; 1180 ratio = (msr >> 40) & 0xFF;
1181 fprintf(stderr, "%d * %.0f = %.0f MHz max efficiency frequency\n", 1181 fprintf(stderr, "%d * %.0f = %.0f MHz max efficiency frequency\n",
@@ -1807,7 +1807,7 @@ void check_permissions()
1807 * 1807 *
1808 * MSR_SMI_COUNT 0x00000034 1808 * MSR_SMI_COUNT 0x00000034
1809 * 1809 *
1810 * MSR_NHM_PLATFORM_INFO 0x000000ce 1810 * MSR_PLATFORM_INFO 0x000000ce
1811 * MSR_NHM_SNB_PKG_CST_CFG_CTL 0x000000e2 1811 * MSR_NHM_SNB_PKG_CST_CFG_CTL 0x000000e2
1812 * 1812 *
1813 * MSR_PKG_C3_RESIDENCY 0x000003f8 1813 * MSR_PKG_C3_RESIDENCY 0x000003f8
@@ -1876,7 +1876,7 @@ int probe_nhm_msrs(unsigned int family, unsigned int model)
1876 get_msr(base_cpu, MSR_NHM_SNB_PKG_CST_CFG_CTL, &msr); 1876 get_msr(base_cpu, MSR_NHM_SNB_PKG_CST_CFG_CTL, &msr);
1877 pkg_cstate_limit = pkg_cstate_limits[msr & 0xF]; 1877 pkg_cstate_limit = pkg_cstate_limits[msr & 0xF];
1878 1878
1879 get_msr(base_cpu, MSR_NHM_PLATFORM_INFO, &msr); 1879 get_msr(base_cpu, MSR_PLATFORM_INFO, &msr);
1880 base_ratio = (msr >> 8) & 0xFF; 1880 base_ratio = (msr >> 8) & 0xFF;
1881 1881
1882 base_hz = base_ratio * bclk * 1000000; 1882 base_hz = base_ratio * bclk * 1000000;
diff --git a/tools/vm/page-types.c b/tools/vm/page-types.c
index bcf5ec760eb9..5a6016224bb9 100644
--- a/tools/vm/page-types.c
+++ b/tools/vm/page-types.c
@@ -128,6 +128,7 @@ static const char * const page_flag_names[] = {
128 [KPF_THP] = "t:thp", 128 [KPF_THP] = "t:thp",
129 [KPF_BALLOON] = "o:balloon", 129 [KPF_BALLOON] = "o:balloon",
130 [KPF_ZERO_PAGE] = "z:zero_page", 130 [KPF_ZERO_PAGE] = "z:zero_page",
131 [KPF_IDLE] = "i:idle_page",
131 132
132 [KPF_RESERVED] = "r:reserved", 133 [KPF_RESERVED] = "r:reserved",
133 [KPF_MLOCKED] = "m:mlocked", 134 [KPF_MLOCKED] = "m:mlocked",
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
index 21a0ab2d8919..69bca185c471 100644
--- a/virt/kvm/arm/arch_timer.c
+++ b/virt/kvm/arm/arch_timer.c
@@ -221,17 +221,23 @@ void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu)
221 kvm_timer_update_state(vcpu); 221 kvm_timer_update_state(vcpu);
222 222
223 /* 223 /*
224 * If we enter the guest with the virtual input level to the VGIC 224 * If we enter the guest with the virtual input level to the VGIC
225 * asserted, then we have already told the VGIC what we need to, and 225 * asserted, then we have already told the VGIC what we need to, and
226 * we don't need to exit from the guest until the guest deactivates 226 * we don't need to exit from the guest until the guest deactivates
227 * the already injected interrupt, so therefore we should set the 227 * the already injected interrupt, so therefore we should set the
228 * hardware active state to prevent unnecessary exits from the guest. 228 * hardware active state to prevent unnecessary exits from the guest.
229 * 229 *
230 * Conversely, if the virtual input level is deasserted, then always 230 * Also, if we enter the guest with the virtual timer interrupt active,
231 * clear the hardware active state to ensure that hardware interrupts 231 * then it must be active on the physical distributor, because we set
232 * from the timer triggers a guest exit. 232 * the HW bit and the guest must be able to deactivate the virtual and
233 */ 233 * physical interrupt at the same time.
234 if (timer->irq.level) 234 *
235 * Conversely, if the virtual input level is deasserted and the virtual
236 * interrupt is not active, then always clear the hardware active state
237 * to ensure that hardware interrupts from the timer triggers a guest
238 * exit.
239 */
240 if (timer->irq.level || kvm_vgic_map_is_active(vcpu, timer->map))
235 phys_active = true; 241 phys_active = true;
236 else 242 else
237 phys_active = false; 243 phys_active = false;
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index 533538385d5d..65461f821a75 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -1096,6 +1096,27 @@ static void vgic_retire_lr(int lr_nr, struct kvm_vcpu *vcpu)
1096 vgic_set_lr(vcpu, lr_nr, vlr); 1096 vgic_set_lr(vcpu, lr_nr, vlr);
1097} 1097}
1098 1098
1099static bool dist_active_irq(struct kvm_vcpu *vcpu)
1100{
1101 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1102
1103 return test_bit(vcpu->vcpu_id, dist->irq_active_on_cpu);
1104}
1105
1106bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, struct irq_phys_map *map)
1107{
1108 int i;
1109
1110 for (i = 0; i < vcpu->arch.vgic_cpu.nr_lr; i++) {
1111 struct vgic_lr vlr = vgic_get_lr(vcpu, i);
1112
1113 if (vlr.irq == map->virt_irq && vlr.state & LR_STATE_ACTIVE)
1114 return true;
1115 }
1116
1117 return dist_active_irq(vcpu);
1118}
1119
1099/* 1120/*
1100 * An interrupt may have been disabled after being made pending on the 1121 * An interrupt may have been disabled after being made pending on the
1101 * CPU interface (the classic case is a timer running while we're 1122 * CPU interface (the classic case is a timer running while we're
@@ -1248,7 +1269,7 @@ static void __kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
1248 * may have been serviced from another vcpu. In all cases, 1269 * may have been serviced from another vcpu. In all cases,
1249 * move along. 1270 * move along.
1250 */ 1271 */
1251 if (!kvm_vgic_vcpu_pending_irq(vcpu) && !kvm_vgic_vcpu_active_irq(vcpu)) 1272 if (!kvm_vgic_vcpu_pending_irq(vcpu) && !dist_active_irq(vcpu))
1252 goto epilog; 1273 goto epilog;
1253 1274
1254 /* SGIs */ 1275 /* SGIs */
@@ -1396,25 +1417,13 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
1396static bool vgic_sync_hwirq(struct kvm_vcpu *vcpu, int lr, struct vgic_lr vlr) 1417static bool vgic_sync_hwirq(struct kvm_vcpu *vcpu, int lr, struct vgic_lr vlr)
1397{ 1418{
1398 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; 1419 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1399 struct irq_phys_map *map;
1400 bool phys_active;
1401 bool level_pending; 1420 bool level_pending;
1402 int ret;
1403 1421
1404 if (!(vlr.state & LR_HW)) 1422 if (!(vlr.state & LR_HW))
1405 return false; 1423 return false;
1406 1424
1407 map = vgic_irq_map_search(vcpu, vlr.irq); 1425 if (vlr.state & LR_STATE_ACTIVE)
1408 BUG_ON(!map); 1426 return false;
1409
1410 ret = irq_get_irqchip_state(map->irq,
1411 IRQCHIP_STATE_ACTIVE,
1412 &phys_active);
1413
1414 WARN_ON(ret);
1415
1416 if (phys_active)
1417 return 0;
1418 1427
1419 spin_lock(&dist->lock); 1428 spin_lock(&dist->lock);
1420 level_pending = process_queued_irq(vcpu, lr, vlr); 1429 level_pending = process_queued_irq(vcpu, lr, vlr);
@@ -1479,17 +1488,6 @@ int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
1479 return test_bit(vcpu->vcpu_id, dist->irq_pending_on_cpu); 1488 return test_bit(vcpu->vcpu_id, dist->irq_pending_on_cpu);
1480} 1489}
1481 1490
1482int kvm_vgic_vcpu_active_irq(struct kvm_vcpu *vcpu)
1483{
1484 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1485
1486 if (!irqchip_in_kernel(vcpu->kvm))
1487 return 0;
1488
1489 return test_bit(vcpu->vcpu_id, dist->irq_active_on_cpu);
1490}
1491
1492
1493void vgic_kick_vcpus(struct kvm *kvm) 1491void vgic_kick_vcpus(struct kvm *kvm)
1494{ 1492{
1495 struct kvm_vcpu *vcpu; 1493 struct kvm_vcpu *vcpu;