aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2013-07-01 05:16:54 -0400
committerIngo Molnar <mingo@kernel.org>2013-07-01 05:18:53 -0400
commit2fd1b487884310d0aa0c0640179dc7490ad86313 (patch)
tree1083dce15bd7dc0858c3883b8a361242046c5e09
parent333bb864f192015a53b5060b829089decd0220ef (diff)
parent8bb495e3f02401ee6f76d1b1d77f3ac9f079e376 (diff)
Merge tag 'v3.10' into sched/core
Merge in a recent upstream commit: c2853c8df57f include/linux/math64.h: add div64_ul() because: 72a4cf20cb71 sched: Change cfs_rq load avg to unsigned long relies on it. [ We don't rebase sched/core for this, because the handful of followup commits after the broken commit are not behavioral changes so are unlikely to be needed during bisection. ] Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--Documentation/DocBook/media/v4l/dev-codec.xml35
-rw-r--r--Documentation/DocBook/media/v4l/v4l2.xml2
-rw-r--r--Documentation/bcache.txt12
-rw-r--r--Documentation/devices.txt8
-rw-r--r--Documentation/devicetree/bindings/media/exynos-fimc-lite.txt2
-rw-r--r--Documentation/devicetree/bindings/rtc/atmel,at91rm9200-rtc.txt2
-rw-r--r--Documentation/dmatest.txt6
-rw-r--r--Documentation/filesystems/xfs.txt3
-rw-r--r--Documentation/kernel-parameters.txt3
-rw-r--r--Documentation/m68k/kernel-options.txt2
-rw-r--r--Documentation/networking/ip-sysctl.txt4
-rw-r--r--Documentation/powerpc/transactional_memory.txt27
-rw-r--r--Documentation/sound/alsa/HD-Audio-Models.txt3
-rw-r--r--MAINTAINERS34
-rw-r--r--Makefile2
-rw-r--r--arch/arm/Kconfig26
-rw-r--r--arch/arm/boot/compressed/Makefile5
-rw-r--r--arch/arm/boot/compressed/debug.S28
-rw-r--r--arch/arm/boot/compressed/head-sa1100.S1
-rw-r--r--arch/arm/boot/compressed/head-shark.S1
-rw-r--r--arch/arm/boot/compressed/head.S5
-rw-r--r--arch/arm/boot/dts/am33xx.dtsi4
-rw-r--r--arch/arm/boot/dts/armada-xp-gp.dts5
-rw-r--r--arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts5
-rw-r--r--arch/arm/boot/dts/bcm2835.dtsi1
-rw-r--r--arch/arm/boot/dts/exynos5250-pinctrl.dtsi2
-rw-r--r--arch/arm/boot/dts/exynos5250.dtsi4
-rw-r--r--arch/arm/boot/dts/imx25.dtsi12
-rw-r--r--arch/arm/boot/dts/imx27.dtsi6
-rw-r--r--arch/arm/boot/dts/imx51.dtsi2
-rw-r--r--arch/arm/boot/dts/imx53.dtsi2
-rw-r--r--arch/arm/boot/dts/omap4-panda-common.dtsi20
-rw-r--r--arch/arm/boot/dts/omap4-sdp.dts20
-rw-r--r--arch/arm/boot/dts/omap5.dtsi3
-rw-r--r--arch/arm/include/asm/cacheflush.h4
-rw-r--r--arch/arm/include/asm/cputype.h2
-rw-r--r--arch/arm/include/asm/glue-proc.h9
-rw-r--r--arch/arm/include/asm/percpu.h11
-rw-r--r--arch/arm/include/asm/smp_plat.h2
-rw-r--r--arch/arm/include/asm/tlb.h27
-rw-r--r--arch/arm/kernel/devtree.c10
-rw-r--r--arch/arm/kernel/machine_kexec.c4
-rw-r--r--arch/arm/kernel/process.c43
-rw-r--r--arch/arm/kernel/setup.c2
-rw-r--r--arch/arm/kernel/smp.c13
-rw-r--r--arch/arm/kernel/topology.c2
-rw-r--r--arch/arm/kvm/arm.c15
-rw-r--r--arch/arm/kvm/mmu.c41
-rw-r--r--arch/arm/mach-exynos/common.c2
-rw-r--r--arch/arm/mach-imx/clk-imx6q.c4
-rw-r--r--arch/arm/mach-kirkwood/board-ts219.c10
-rw-r--r--arch/arm/mach-kirkwood/mpp.c5
-rw-r--r--arch/arm/mach-mvebu/coherency_ll.S16
-rw-r--r--arch/arm/mach-omap2/clock36xx.c18
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_33xx_data.c9
-rw-r--r--arch/arm/mach-omap2/pm34xx.c6
-rw-r--r--arch/arm/mach-prima2/pm.c6
-rw-r--r--arch/arm/mach-prima2/rstc.c6
-rw-r--r--arch/arm/mach-shmobile/setup-sh73a0.c2
-rw-r--r--arch/arm/mach-ux500/board-mop500-regulators.c3
-rw-r--r--arch/arm/mach-ux500/cpuidle.c4
-rw-r--r--arch/arm/mm/cache-v7.S8
-rw-r--r--arch/arm/mm/flush.c33
-rw-r--r--arch/arm/mm/mmu.c8
-rw-r--r--arch/arm/mm/nommu.c6
-rw-r--r--arch/arm/mm/proc-fa526.S1
-rw-r--r--arch/arm/mm/proc-macros.S5
-rw-r--r--arch/arm/mm/proc-v7.S38
-rw-r--r--arch/arm/plat-samsung/include/plat/uncompress.h10
-rw-r--r--arch/arm/plat-samsung/pm.c18
-rw-r--r--arch/arm64/kernel/arm64ksyms.c1
-rw-r--r--arch/arm64/kernel/entry.S10
-rw-r--r--arch/arm64/kernel/perf_event.c1
-rw-r--r--arch/arm64/kernel/traps.c17
-rw-r--r--arch/arm64/mm/fault.c3
-rw-r--r--arch/ia64/include/asm/irqflags.h1
-rw-r--r--arch/ia64/include/asm/tlb.h41
-rw-r--r--arch/m68k/include/asm/gpio.h3
-rw-r--r--arch/m68k/kernel/head.S29
-rw-r--r--arch/metag/include/asm/hugetlb.h1
-rw-r--r--arch/microblaze/include/asm/cacheflush.h34
-rw-r--r--arch/microblaze/include/asm/uaccess.h4
-rw-r--r--arch/mips/cavium-octeon/setup.c15
-rw-r--r--arch/mips/include/asm/kvm_host.h4
-rw-r--r--arch/mips/include/asm/mmu_context.h2
-rw-r--r--arch/mips/include/asm/ptrace.h32
-rw-r--r--arch/mips/include/uapi/asm/kvm.h134
-rw-r--r--arch/mips/include/uapi/asm/ptrace.h17
-rw-r--r--arch/mips/kernel/binfmt_elfn32.c11
-rw-r--r--arch/mips/kernel/binfmt_elfo32.c11
-rw-r--r--arch/mips/kernel/ftrace.c4
-rw-r--r--arch/mips/kernel/idle.c13
-rw-r--r--arch/mips/kernel/rtlx.c1
-rw-r--r--arch/mips/kernel/traps.c28
-rw-r--r--arch/mips/kvm/kvm_mips.c305
-rw-r--r--arch/mips/kvm/kvm_trap_emul.c50
-rw-r--r--arch/mips/mm/tlbex.c4
-rw-r--r--arch/mips/ralink/of.c2
-rw-r--r--arch/mn10300/include/asm/irqflags.h5
-rw-r--r--arch/mn10300/include/asm/smp.h4
-rw-r--r--arch/mn10300/include/asm/uaccess.h2
-rw-r--r--arch/mn10300/kernel/setup.c54
-rw-r--r--arch/parisc/Makefile2
-rw-r--r--arch/parisc/include/asm/mmzone.h7
-rw-r--r--arch/parisc/include/asm/pci.h5
-rw-r--r--arch/parisc/kernel/drivers.c2
-rw-r--r--arch/parisc/kernel/hardware.c1
-rw-r--r--arch/parisc/kernel/pacache.S76
-rw-r--r--arch/parisc/kernel/pci.c27
-rw-r--r--arch/parisc/kernel/setup.c3
-rw-r--r--arch/parisc/mm/init.c2
-rw-r--r--arch/powerpc/include/asm/cputable.h17
-rw-r--r--arch/powerpc/include/asm/exception-64s.h2
-rw-r--r--arch/powerpc/include/asm/hvcall.h1
-rw-r--r--arch/powerpc/include/asm/kvm_asm.h16
-rw-r--r--arch/powerpc/include/asm/ppc_asm.h11
-rw-r--r--arch/powerpc/include/asm/processor.h13
-rw-r--r--arch/powerpc/include/asm/reg.h11
-rw-r--r--arch/powerpc/include/asm/signal.h3
-rw-r--r--arch/powerpc/include/asm/tm.h2
-rw-r--r--arch/powerpc/include/uapi/asm/Kbuild1
-rw-r--r--arch/powerpc/include/uapi/asm/tm.h18
-rw-r--r--arch/powerpc/kernel/cputable.c6
-rw-r--r--arch/powerpc/kernel/entry_32.S2
-rw-r--r--arch/powerpc/kernel/entry_64.S35
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S92
-rw-r--r--arch/powerpc/kernel/irq.c2
-rw-r--r--arch/powerpc/kernel/pci-common.c35
-rw-r--r--arch/powerpc/kernel/process.c7
-rw-r--r--arch/powerpc/kernel/signal.c40
-rw-r--r--arch/powerpc/kernel/signal.h2
-rw-r--r--arch/powerpc/kernel/signal_32.c10
-rw-r--r--arch/powerpc/kernel/signal_64.c23
-rw-r--r--arch/powerpc/kernel/traps.c39
-rw-r--r--arch/powerpc/kvm/44x_tlb.c5
-rw-r--r--arch/powerpc/kvm/book3s_hv.c2
-rw-r--r--arch/powerpc/kvm/book3s_pr_papr.c2
-rw-r--r--arch/powerpc/kvm/book3s_xics.c29
-rw-r--r--arch/powerpc/kvm/booke.c21
-rw-r--r--arch/powerpc/kvm/e500_mmu.c5
-rw-r--r--arch/powerpc/kvm/e500mc.c2
-rw-r--r--arch/powerpc/lib/copypage_power7.S19
-rw-r--r--arch/powerpc/lib/copyuser_power7.S12
-rw-r--r--arch/powerpc/mm/hash_native_64.c30
-rw-r--r--arch/powerpc/mm/hugetlbpage.c8
-rw-r--r--arch/powerpc/perf/core-book3s.c69
-rw-r--r--arch/powerpc/platforms/pseries/Kconfig2
-rw-r--r--arch/powerpc/platforms/pseries/eeh_cache.c4
-rw-r--r--arch/powerpc/platforms/pseries/eeh_pe.c3
-rw-r--r--arch/powerpc/platforms/pseries/eeh_pseries.c12
-rw-r--r--arch/powerpc/sysdev/fsl_pci.c24
-rw-r--r--arch/powerpc/sysdev/mpic.c4
-rw-r--r--arch/s390/appldata/appldata_base.c7
-rw-r--r--arch/s390/include/asm/dma-mapping.h5
-rw-r--r--arch/s390/include/asm/io.h1
-rw-r--r--arch/s390/include/asm/pgtable.h45
-rw-r--r--arch/s390/kernel/dumpstack.c12
-rw-r--r--arch/s390/kernel/ipl.c8
-rw-r--r--arch/s390/kernel/irq.c66
-rw-r--r--arch/s390/kernel/sclp.S2
-rw-r--r--arch/s390/kernel/smp.c27
-rw-r--r--arch/s390/mm/mem_detect.c3
-rw-r--r--arch/s390/mm/pgtable.c2
-rw-r--r--arch/s390/pci/pci.c33
-rw-r--r--arch/sparc/include/asm/Kbuild1
-rw-r--r--arch/sparc/include/asm/leon.h2
-rw-r--r--arch/sparc/include/asm/leon_amba.h1
-rw-r--r--arch/sparc/include/asm/linkage.h6
-rw-r--r--arch/sparc/kernel/ds.c3
-rw-r--r--arch/sparc/kernel/leon_kernel.c54
-rw-r--r--arch/sparc/kernel/leon_pci_grpci1.c8
-rw-r--r--arch/sparc/kernel/leon_pmc.c7
-rw-r--r--arch/sparc/kernel/prom_common.c5
-rw-r--r--arch/sparc/kernel/setup_32.c2
-rw-r--r--arch/sparc/kernel/setup_64.c2
-rw-r--r--arch/sparc/mm/init_64.c9
-rw-r--r--arch/sparc/mm/tlb.c2
-rw-r--r--arch/sparc/prom/bootstr_32.c12
-rw-r--r--arch/sparc/prom/tree_64.c16
-rw-r--r--arch/tile/lib/exports.c2
-rw-r--r--arch/um/drivers/mconsole_kern.c2
-rw-r--r--arch/x86/Kconfig1
-rw-r--r--arch/x86/boot/compressed/eboot.c47
-rw-r--r--arch/x86/crypto/aesni-intel_asm.S48
-rw-r--r--arch/x86/crypto/crc32-pclmul_asm.S2
-rw-r--r--arch/x86/ia32/ia32_aout.c2
-rw-r--r--arch/x86/include/asm/efi.h7
-rw-r--r--arch/x86/include/asm/inst.h74
-rw-r--r--arch/x86/include/asm/irq.h5
-rw-r--r--arch/x86/include/asm/microcode.h4
-rw-r--r--arch/x86/include/asm/nmi.h4
-rw-r--r--arch/x86/include/uapi/asm/bootparam.h1
-rw-r--r--arch/x86/kernel/apic/hw_nmi.c1
-rw-r--r--arch/x86/kernel/cpu/mtrr/cleanup.c8
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c2
-rw-r--r--arch/x86/kernel/head_64.S6
-rw-r--r--arch/x86/kernel/i387.c14
-rw-r--r--arch/x86/kernel/kprobes/core.c14
-rw-r--r--arch/x86/kernel/kvmclock.c1
-rw-r--r--arch/x86/kernel/process.c12
-rw-r--r--arch/x86/kernel/relocate_kernel_64.S2
-rw-r--r--arch/x86/kvm/emulate.c9
-rw-r--r--arch/x86/kvm/lapic.c9
-rw-r--r--arch/x86/kvm/x86.c5
-rw-r--r--arch/x86/mm/init.c6
-rw-r--r--arch/x86/pci/common.c5
-rw-r--r--arch/x86/platform/efi/efi.c193
-rw-r--r--arch/x86/tools/relocs.c4
-rw-r--r--arch/x86/xen/smp.c18
-rw-r--r--arch/x86/xen/smp.h1
-rw-r--r--block/blk-core.c2
-rw-r--r--crypto/Kconfig2
-rw-r--r--crypto/algboss.c15
-rw-r--r--crypto/api.c6
-rw-r--r--crypto/internal.h6
-rw-r--r--drivers/acpi/acpi_lpss.c21
-rw-r--r--drivers/acpi/apei/cper.c18
-rw-r--r--drivers/acpi/apei/ghes.c11
-rw-r--r--drivers/acpi/device_pm.c30
-rw-r--r--drivers/acpi/dock.c181
-rw-r--r--drivers/acpi/internal.h5
-rw-r--r--drivers/acpi/power.c1
-rw-r--r--drivers/acpi/resource.c16
-rw-r--r--drivers/acpi/scan.c6
-rw-r--r--drivers/acpi/video.c19
-rw-r--r--drivers/ata/acard-ahci.c2
-rw-r--r--drivers/ata/ahci.c4
-rw-r--r--drivers/ata/ahci.h2
-rw-r--r--drivers/ata/ata_piix.c17
-rw-r--r--drivers/ata/libahci.c2
-rw-r--r--drivers/ata/libata-acpi.c37
-rw-r--r--drivers/ata/libata-core.c10
-rw-r--r--drivers/ata/libata-eh.c2
-rw-r--r--drivers/ata/libata-scsi.c2
-rw-r--r--drivers/ata/libata-sff.c2
-rw-r--r--drivers/ata/libata.h2
-rw-r--r--drivers/ata/pdc_adma.c2
-rw-r--r--drivers/ata/sata_promise.c2
-rw-r--r--drivers/ata/sata_rcar.c24
-rw-r--r--drivers/ata/sata_sil.c2
-rw-r--r--drivers/ata/sata_sx4.c2
-rw-r--r--drivers/ata/sata_via.c2
-rw-r--r--drivers/base/firmware_class.c27
-rw-r--r--drivers/base/regmap/regcache-rbtree.c6
-rw-r--r--drivers/base/regmap/regcache.c20
-rw-r--r--drivers/base/regmap/regmap-debugfs.c5
-rw-r--r--drivers/block/cciss.c32
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c8
-rw-r--r--drivers/block/nvme-core.c62
-rw-r--r--drivers/block/nvme-scsi.c3
-rw-r--r--drivers/block/pktcdvd.c3
-rw-r--r--drivers/block/rbd.c53
-rw-r--r--drivers/bluetooth/Kconfig4
-rw-r--r--drivers/bluetooth/btmrvl_main.c9
-rw-r--r--drivers/bluetooth/btmrvl_sdio.c28
-rw-r--r--drivers/clk/clk-si5351.c12
-rw-r--r--drivers/clk/clk-vt8500.c2
-rw-r--r--drivers/clk/clk.c1
-rw-r--r--drivers/clk/mxs/clk-imx28.c1
-rw-r--r--drivers/clk/samsung/clk-exynos4.c6
-rw-r--r--drivers/clk/samsung/clk-exynos5250.c10
-rw-r--r--drivers/clk/samsung/clk-pll.c5
-rw-r--r--drivers/clk/spear/spear3xx_clock.c2
-rw-r--r--drivers/clk/tegra/clk-tegra30.c11
-rw-r--r--drivers/clk/ux500/clk-sysctrl.c8
-rw-r--r--drivers/clk/ux500/u8500_clk.c2
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c4
-rw-r--r--drivers/cpufreq/cpufreq-cpu0.c5
-rw-r--r--drivers/cpufreq/cpufreq_governor.c3
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c17
-rw-r--r--drivers/crypto/sahara.c2
-rw-r--r--drivers/dma/dmatest.c45
-rw-r--r--drivers/dma/ste_dma40.c8
-rw-r--r--drivers/firmware/efi/efivars.c8
-rw-r--r--drivers/gpio/gpio-omap.c22
-rw-r--r--drivers/gpu/drm/drm_irq.c6
-rw-r--r--drivers/gpu/drm/drm_prime.c3
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_display.c30
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c4
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_display.c33
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h2
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c44
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c1
-rw-r--r--drivers/gpu/drm/i915/intel_display.c5
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c4
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c26
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c9
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c7
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/hdminv84.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c14
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/class.h2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c4
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c3
-rw-r--r--drivers/gpu/drm/qxl/qxl_ioctl.c9
-rw-r--r--drivers/gpu/drm/qxl/qxl_kms.c9
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c11
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c20
-rw-r--r--drivers/gpu/drm/radeon/ni.c10
-rw-r--r--drivers/gpu/drm/radeon/r100.c9
-rw-r--r--drivers/gpu/drm/radeon/r300.c9
-rw-r--r--drivers/gpu/drm/radeon/r420.c10
-rw-r--r--drivers/gpu/drm/radeon/r520.c9
-rw-r--r--drivers/gpu/drm/radeon/r600.c75
-rw-r--r--drivers/gpu/drm/radeon/r600d.h8
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c80
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c48
-rw-r--r--drivers/gpu/drm/radeon/rs400.c9
-rw-r--r--drivers/gpu/drm/radeon/rs600.c9
-rw-r--r--drivers/gpu/drm/radeon/rs690.c9
-rw-r--r--drivers/gpu/drm/radeon/rv515.c9
-rw-r--r--drivers/gpu/drm/radeon/rv770.c23
-rw-r--r--drivers/gpu/drm/radeon/si.c12
-rw-r--r--drivers/gpu/drm/tilcdc/Kconfig1
-rw-r--r--drivers/hid/hid-multitouch.c11
-rw-r--r--drivers/hwmon/adm1021.c58
-rw-r--r--drivers/iio/buffer_cb.c5
-rw-r--r--drivers/iio/frequency/adf4350.c2
-rw-r--r--drivers/iio/inkern.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_keys.c2
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c1
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h1
-rw-r--r--drivers/infiniband/ulp/iser/iser_initiator.c1
-rw-r--r--drivers/infiniband/ulp/iser/iser_memory.c1
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c16
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c34
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.h1
-rw-r--r--drivers/input/joystick/xpad.c2
-rw-r--r--drivers/input/keyboard/Kconfig1
-rw-r--r--drivers/input/mouse/synaptics.c2
-rw-r--r--drivers/input/serio/Kconfig1
-rw-r--r--drivers/input/tablet/wacom_wac.c10
-rw-r--r--drivers/input/touchscreen/cyttsp_core.c28
-rw-r--r--drivers/input/touchscreen/cyttsp_core.h2
-rw-r--r--drivers/irqchip/irq-gic.c2
-rw-r--r--drivers/irqchip/irq-mxs.c14
-rw-r--r--drivers/irqchip/irq-versatile-fpga.c2
-rw-r--r--drivers/irqchip/irq-vic.c2
-rw-r--r--drivers/md/bcache/Kconfig1
-rw-r--r--drivers/md/bcache/bcache.h2
-rw-r--r--drivers/md/bcache/stats.c34
-rw-r--r--drivers/md/bcache/super.c185
-rw-r--r--drivers/md/bcache/writeback.c2
-rw-r--r--drivers/md/md.c2
-rw-r--r--drivers/md/raid1.c38
-rw-r--r--drivers/md/raid10.c29
-rw-r--r--drivers/md/raid5.c6
-rw-r--r--drivers/media/Kconfig12
-rw-r--r--drivers/media/i2c/s5c73m3/s5c73m3-core.c2
-rw-r--r--drivers/media/pci/cx88/cx88-alsa.c7
-rw-r--r--drivers/media/pci/cx88/cx88-video.c8
-rw-r--r--drivers/media/pci/zoran/zoran.h2
-rw-r--r--drivers/media/pci/zoran/zoran_driver.c15
-rw-r--r--drivers/media/platform/coda.c9
-rw-r--r--drivers/media/platform/davinci/vpbe_display.c15
-rw-r--r--drivers/media/platform/davinci/vpfe_capture.c3
-rw-r--r--drivers/media/platform/exynos4-is/fimc-is-regs.c2
-rw-r--r--drivers/media/platform/exynos4-is/fimc-is.c48
-rw-r--r--drivers/media/platform/exynos4-is/fimc-is.h2
-rw-r--r--drivers/media/platform/exynos4-is/fimc-isp.c4
-rw-r--r--drivers/media/platform/exynos4-is/mipi-csis.c2
-rw-r--r--drivers/media/platform/omap/omap_vout.c3
-rw-r--r--drivers/media/platform/s3c-camif/camif-core.h2
-rw-r--r--drivers/media/platform/s5p-jpeg/Makefile2
-rw-r--r--drivers/media/platform/s5p-mfc/Makefile2
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc.c8
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_common.h6
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c2
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_debug.h4
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_dec.c20
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_enc.c82
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c4
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c53
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_pm.c23
-rw-r--r--drivers/media/platform/sh_veu.c15
-rw-r--r--drivers/media/platform/soc_camera/soc_camera.c4
-rw-r--r--drivers/media/radio/Kconfig1
-rw-r--r--drivers/media/radio/radio-si476x.c2
-rw-r--r--drivers/media/tuners/Kconfig20
-rw-r--r--drivers/media/usb/dvb-usb-v2/rtl28xxu.c6
-rw-r--r--drivers/media/usb/gspca/sonixb.c7
-rw-r--r--drivers/media/usb/pwc/pwc.h2
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls.c2
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c47
-rw-r--r--drivers/media/v4l2-core/v4l2-mem2mem.c39
-rw-r--r--drivers/media/v4l2-core/videobuf2-core.c3
-rw-r--r--drivers/mfd/tps6586x.c2
-rw-r--r--drivers/misc/mei/init.c4
-rw-r--r--drivers/misc/mei/nfc.c2
-rw-r--r--drivers/misc/mei/pci-me.c1
-rw-r--r--drivers/misc/sgi-gru/grufile.c1
-rw-r--r--drivers/mmc/host/atmel-mci.c25
-rw-r--r--drivers/mmc/host/omap_hsmmc.c51
-rw-r--r--drivers/mmc/host/sdhci-acpi.c69
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c41
-rw-r--r--drivers/mmc/host/sdhci-pci.c54
-rw-r--r--drivers/net/bonding/bond_main.c22
-rw-r--r--drivers/net/bonding/bonding.h2
-rw-r--r--drivers/net/can/usb/esd_usb2.c127
-rw-r--r--drivers/net/can/usb/kvaser_usb.c64
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_pro.c61
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_pro.h1
-rw-r--r--drivers/net/can/usb/usb_8dev.c5
-rw-r--r--drivers/net/ethernet/atheros/Kconfig18
-rw-r--r--drivers/net/ethernet/atheros/Makefile1
-rw-r--r--drivers/net/ethernet/atheros/alx/Makefile3
-rw-r--r--drivers/net/ethernet/atheros/alx/alx.h114
-rw-r--r--drivers/net/ethernet/atheros/alx/ethtool.c272
-rw-r--r--drivers/net/ethernet/atheros/alx/hw.c1226
-rw-r--r--drivers/net/ethernet/atheros/alx/hw.h499
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c1625
-rw-r--r--drivers/net/ethernet/atheros/alx/reg.h810
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c16
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c67
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h5
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad_debugfs.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/interrupt.c6
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h1
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c6
-rw-r--r--drivers/net/ethernet/emulex/benet/be_hw.h2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c58
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c26
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c2
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c23
-rw-r--r--drivers/net/ethernet/octeon/octeon_mgmt.c31
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c1
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c57
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h2
-rw-r--r--drivers/net/ethernet/sfc/efx.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c68
-rw-r--r--drivers/net/ethernet/ti/cpsw.c5
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.c7
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c14
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c3
-rw-r--r--drivers/net/hyperv/netvsc_drv.c5
-rw-r--r--drivers/net/macvlan.c20
-rw-r--r--drivers/net/macvtap.c6
-rw-r--r--drivers/net/phy/phy.c2
-rw-r--r--drivers/net/team/team.c9
-rw-r--r--drivers/net/team/team_mode_random.c2
-rw-r--r--drivers/net/team/team_mode_roundrobin.c2
-rw-r--r--drivers/net/tun.c14
-rw-r--r--drivers/net/usb/cdc_ether.c6
-rw-r--r--drivers/net/usb/qmi_wwan.c9
-rw-r--r--drivers/net/vxlan.c40
-rw-r--r--drivers/net/wan/dlci.c26
-rw-r--r--drivers/net/wireless/ath/ath9k/Kconfig10
-rw-r--r--drivers/net/wireless/ath/ath9k/Makefile2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h10
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.h9
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h14
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c25
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c7
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/reg.h11
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c144
-rw-r--r--drivers/net/wireless/atmel.c2
-rw-r--r--drivers/net/wireless/b43/main.c2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c18
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c20
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fweh.c3
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h6
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/p2p.c74
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c89
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h3
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/main.c17
-rw-r--r--drivers/net/wireless/iwlegacy/3945-rs.c1
-rw-r--r--drivers/net/wireless/iwlegacy/4965-rs.c2
-rw-r--r--drivers/net/wireless/iwlegacy/common.h6
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rs.c2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rxon.c2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/sta.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-drv.c2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rs.c1
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tx.c3
-rw-r--r--drivers/net/wireless/mwifiex/debugfs.c22
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c29
-rw-r--r--drivers/net/wireless/rtlwifi/pci.c1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/hw.c134
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/hw.h4
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/mac.c18
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/sw.c4
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/sw.h3
-rw-r--r--drivers/net/wireless/rtlwifi/usb.c13
-rw-r--r--drivers/net/wireless/rtlwifi/wifi.h4
-rw-r--r--drivers/net/wireless/ti/wl12xx/scan.c2
-rw-r--r--drivers/net/wireless/ti/wl12xx/wl12xx.h6
-rw-r--r--drivers/net/wireless/ti/wl18xx/scan.c2
-rw-r--r--drivers/net/xen-netback/netback.c11
-rw-r--r--drivers/nfc/Kconfig2
-rw-r--r--drivers/nfc/mei_phy.c9
-rw-r--r--drivers/nfc/microread/mei.c20
-rw-r--r--drivers/nfc/pn544/mei.c20
-rw-r--r--drivers/of/base.c15
-rw-r--r--drivers/parisc/iosapic.c66
-rw-r--r--drivers/parisc/lba_pci.c9
-rw-r--r--drivers/parport/Kconfig2
-rw-r--r--drivers/parport/parport_gsc.c6
-rw-r--r--drivers/parport/parport_gsc.h2
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c53
-rw-r--r--drivers/pci/pci.h5
-rw-r--r--drivers/pci/pcie/aer/aerdrv_core.c5
-rw-r--r--drivers/pci/pcie/aer/aerdrv_errprint.c4
-rw-r--r--drivers/pci/setup-bus.c8
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7779.c45
-rw-r--r--drivers/platform/x86/hp-wmi.c2
-rw-r--r--drivers/ptp/ptp_pch.c8
-rw-r--r--drivers/regulator/core.c7
-rw-r--r--drivers/regulator/dbx500-prcmu.c24
-rw-r--r--drivers/regulator/palmas-regulator.c4
-rw-r--r--drivers/regulator/tps6586x-regulator.c2
-rw-r--r--drivers/rtc/rtc-at91rm9200.c131
-rw-r--r--drivers/rtc/rtc-cmos.c4
-rw-r--r--drivers/rtc/rtc-tps6586x.c3
-rw-r--r--drivers/rtc/rtc-twl.c1
-rw-r--r--drivers/s390/block/dasd.c12
-rw-r--r--drivers/s390/net/netiucv.c6
-rw-r--r--drivers/scsi/bfa/bfad_debugfs.c2
-rw-r--r--drivers/scsi/fcoe/fcoe.c7
-rw-r--r--drivers/scsi/fcoe/fcoe_ctlr.c15
-rw-r--r--drivers/scsi/fnic/fnic_debugfs.c2
-rw-r--r--drivers/scsi/ipr.c16
-rw-r--r--drivers/scsi/ipr.h6
-rw-r--r--drivers/scsi/libfc/fc_exch.c37
-rw-r--r--drivers/scsi/libfc/fc_rport.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h11
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c27
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_mr.c10
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c26
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c8
-rw-r--r--drivers/scsi/scsi_proc.c1
-rw-r--r--drivers/spi/spi-pxa2xx-dma.c2
-rw-r--r--drivers/spi/spi-pxa2xx.c2
-rw-r--r--drivers/spi/spi-s3c64xx.c2
-rw-r--r--drivers/spi/spi-sh-hspi.c2
-rw-r--r--drivers/spi/spi-topcliff-pch.c3
-rw-r--r--drivers/spi/spi-xilinx.c74
-rw-r--r--drivers/staging/android/alarm-dev.c8
-rw-r--r--drivers/staging/dwc2/hcd.c5
-rw-r--r--drivers/staging/media/davinci_vpfe/Kconfig2
-rw-r--r--drivers/staging/media/davinci_vpfe/vpfe_mc_capture.c6
-rw-r--r--drivers/staging/media/solo6x10/Kconfig1
-rw-r--r--drivers/staging/zcache/ramster.h4
-rw-r--r--drivers/staging/zcache/ramster/debug.c2
-rw-r--r--drivers/staging/zcache/ramster/ramster.c6
-rw-r--r--drivers/target/iscsi/iscsi_target.c12
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c27
-rw-r--r--drivers/target/iscsi/iscsi_target_erl0.c4
-rw-r--r--drivers/target/iscsi/iscsi_target_erl2.c12
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c3
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.c3
-rw-r--r--drivers/target/iscsi/iscsi_target_parameters.c8
-rw-r--r--drivers/target/iscsi/iscsi_target_parameters.h4
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c50
-rw-r--r--drivers/target/iscsi/iscsi_target_util.h2
-rw-r--r--drivers/target/target_core_file.c11
-rw-r--r--drivers/target/target_core_transport.c74
-rw-r--r--drivers/tty/pty.c13
-rw-r--r--drivers/tty/serial/8250/8250_core.c14
-rw-r--r--drivers/tty/serial/8250/8250_gsc.c10
-rw-r--r--drivers/tty/serial/imx.c2
-rw-r--r--drivers/tty/serial/samsung.c13
-rw-r--r--drivers/tty/vt/vt_ioctl.c5
-rw-r--r--drivers/usb/chipidea/core.c3
-rw-r--r--drivers/usb/chipidea/udc.c13
-rw-r--r--drivers/usb/core/devio.c10
-rw-r--r--drivers/usb/dwc3/dwc3-exynos.c2
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c2
-rw-r--r--drivers/usb/dwc3/gadget.c16
-rw-r--r--drivers/usb/host/ehci-sched.c9
-rw-r--r--drivers/usb/host/xhci-mem.c10
-rw-r--r--drivers/usb/host/xhci-pci.c8
-rw-r--r--drivers/usb/host/xhci.c16
-rw-r--r--drivers/usb/host/xhci.h3
-rw-r--r--drivers/usb/musb/musb_host.c18
-rw-r--r--drivers/usb/musb/musb_host.h1
-rw-r--r--drivers/usb/phy/Kconfig14
-rw-r--r--drivers/usb/serial/ark3116.c2
-rw-r--r--drivers/usb/serial/cypress_m8.c18
-rw-r--r--drivers/usb/serial/cypress_m8.h4
-rw-r--r--drivers/usb/serial/f81232.c8
-rw-r--r--drivers/usb/serial/iuu_phoenix.c4
-rw-r--r--drivers/usb/serial/keyspan.c10
-rw-r--r--drivers/usb/serial/mos7720.c25
-rw-r--r--drivers/usb/serial/mos7840.c35
-rw-r--r--drivers/usb/serial/option.c26
-rw-r--r--drivers/usb/serial/pl2303.c10
-rw-r--r--drivers/usb/serial/qcserial.c1
-rw-r--r--drivers/usb/serial/spcp8x5.c10
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.c3
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.h4
-rw-r--r--drivers/usb/serial/usb-serial.c4
-rw-r--r--drivers/usb/serial/visor.c9
-rw-r--r--drivers/usb/serial/whiteheat.c2
-rw-r--r--drivers/usb/serial/zte_ev.c58
-rw-r--r--drivers/vfio/vfio.c2
-rw-r--r--drivers/vhost/net.c29
-rw-r--r--drivers/vhost/vhost.c8
-rw-r--r--drivers/vhost/vhost.h1
-rw-r--r--drivers/video/atmel_lcdfb.c15
-rw-r--r--drivers/video/omap2/dss/core.c20
-rw-r--r--drivers/video/omap2/omapfb/omapfb-main.c3
-rw-r--r--drivers/video/ps3fb.c2
-rw-r--r--drivers/xen/tmem.c6
-rw-r--r--drivers/xen/xen-pciback/pci_stub.c4
-rw-r--r--drivers/xen/xenbus/xenbus_client.c5
-rw-r--r--drivers/xen/xenbus/xenbus_comms.h1
-rw-r--r--drivers/xen/xenbus/xenbus_probe.c27
-rw-r--r--drivers/xen/xenbus/xenbus_probe.h7
-rw-r--r--drivers/xen/xenbus/xenbus_probe_frontend.c37
-rw-r--r--fs/aio.c36
-rw-r--r--fs/befs/linuxvfs.c4
-rw-r--r--fs/btrfs/disk-io.c10
-rw-r--r--fs/btrfs/inode.c3
-rw-r--r--fs/btrfs/relocation.c9
-rw-r--r--fs/ceph/locks.c73
-rw-r--r--fs/ceph/mds_client.c65
-rw-r--r--fs/ceph/super.h9
-rw-r--r--fs/cifs/connect.c4
-rw-r--r--fs/ecryptfs/file.c6
-rw-r--r--fs/efivarfs/file.c14
-rw-r--r--fs/exec.c16
-rw-r--r--fs/file_table.c19
-rw-r--r--fs/fuse/dir.c12
-rw-r--r--fs/fuse/file.c62
-rw-r--r--fs/fuse/inode.c7
-rw-r--r--fs/gfs2/bmap.c17
-rw-r--r--fs/gfs2/dir.c43
-rw-r--r--fs/gfs2/file.c19
-rw-r--r--fs/gfs2/inode.c1
-rw-r--r--fs/gfs2/lops.c4
-rw-r--r--fs/gfs2/rgrp.c4
-rw-r--r--fs/gfs2/super.c6
-rw-r--r--fs/hpfs/dir.c10
-rw-r--r--fs/hpfs/file.c4
-rw-r--r--fs/internal.h6
-rw-r--r--fs/jfs/jfs_logmgr.c8
-rw-r--r--fs/jfs/super.c38
-rw-r--r--fs/namei.c4
-rw-r--r--fs/ncpfs/dir.c9
-rw-r--r--fs/nfs/nfs4proc.c2
-rw-r--r--fs/nfs/super.c2
-rw-r--r--fs/ocfs2/dlm/dlmrecovery.c1
-rw-r--r--fs/ocfs2/namei.c4
-rw-r--r--fs/pnode.c3
-rw-r--r--fs/proc/base.c1
-rw-r--r--fs/proc/kmsg.c10
-rw-r--r--fs/qnx6/dir.c2
-rw-r--r--fs/read_write.c24
-rw-r--r--fs/reiserfs/dir.c2
-rw-r--r--fs/reiserfs/inode.c9
-rw-r--r--fs/reiserfs/xattr.c14
-rw-r--r--fs/reiserfs/xattr_acl.c3
-rw-r--r--fs/splice.c32
-rw-r--r--fs/ubifs/dir.c54
-rw-r--r--fs/xfs/xfs_acl.c31
-rw-r--r--fs/xfs/xfs_acl.h31
-rw-r--r--fs/xfs/xfs_attr_leaf.c73
-rw-r--r--fs/xfs/xfs_attr_leaf.h1
-rw-r--r--fs/xfs/xfs_attr_remote.c408
-rw-r--r--fs/xfs/xfs_attr_remote.h10
-rw-r--r--fs/xfs/xfs_btree.c10
-rw-r--r--fs/xfs/xfs_buf.c1
-rw-r--r--fs/xfs/xfs_buf_item.c7
-rw-r--r--fs/xfs/xfs_dfrag.c8
-rw-r--r--fs/xfs/xfs_dir2_format.h4
-rw-r--r--fs/xfs/xfs_dir2_node.c13
-rw-r--r--fs/xfs/xfs_dquot.c37
-rw-r--r--fs/xfs/xfs_fs.h1
-rw-r--r--fs/xfs/xfs_fsops.c4
-rw-r--r--fs/xfs/xfs_inode.c16
-rw-r--r--fs/xfs/xfs_iops.c47
-rw-r--r--fs/xfs/xfs_log_recover.c114
-rw-r--r--fs/xfs/xfs_mount.c18
-rw-r--r--fs/xfs/xfs_qm.c40
-rw-r--r--fs/xfs/xfs_qm_syscalls.c40
-rw-r--r--fs/xfs/xfs_quota.h2
-rw-r--r--fs/xfs/xfs_super.c11
-rw-r--r--fs/xfs/xfs_symlink.c20
-rw-r--r--include/acpi/acpi_bus.h1
-rw-r--r--include/acpi/acpi_drivers.h8
-rw-r--r--include/asm-generic/io.h4
-rw-r--r--include/asm-generic/kvm_para.h5
-rw-r--r--include/asm-generic/tlb.h17
-rw-r--r--include/linux/aer.h5
-rw-r--r--include/linux/cgroup.h2
-rw-r--r--include/linux/context_tracking.h35
-rw-r--r--include/linux/cpu.h4
-rw-r--r--include/linux/filter.h1
-rw-r--r--include/linux/fs.h2
-rw-r--r--include/linux/if_team.h4
-rw-r--r--include/linux/if_vlan.h2
-rw-r--r--include/linux/kvm_host.h37
-rw-r--r--include/linux/list.h11
-rw-r--r--include/linux/math64.h6
-rw-r--r--include/linux/netdevice.h1
-rw-r--r--include/linux/netfilter_ipv6.h16
-rw-r--r--include/linux/perf_event.h3
-rw-r--r--include/linux/rculist.h20
-rw-r--r--include/linux/rculist_nulls.h7
-rw-r--r--include/linux/rcupdate.h9
-rw-r--r--include/linux/scatterlist.h3
-rw-r--r--include/linux/skbuff.h16
-rw-r--r--include/linux/smp.h19
-rw-r--r--include/linux/socket.h3
-rw-r--r--include/linux/splice.h1
-rw-r--r--include/linux/swapops.h3
-rw-r--r--include/linux/syslog.h4
-rw-r--r--include/linux/tracepoint.h4
-rw-r--r--include/linux/vtime.h4
-rw-r--r--include/media/v4l2-mem2mem.h2
-rw-r--r--include/net/addrconf.h2
-rw-r--r--include/net/bluetooth/hci_core.h1
-rw-r--r--include/net/bluetooth/mgmt.h1
-rw-r--r--include/net/ip_tunnels.h6
-rw-r--r--include/net/sch_generic.h18
-rw-r--r--include/net/xfrm.h5
-rw-r--r--include/sound/soc-dapm.h3
-rw-r--r--include/target/target_core_base.h1
-rw-r--r--include/target/target_core_fabric.h4
-rw-r--r--include/uapi/linux/Kbuild1
-rw-r--r--include/uapi/linux/kvm.h1
-rw-r--r--include/video/omapdss.h1
-rw-r--r--include/xen/xenbus.h1
-rw-r--r--init/Kconfig1
-rw-r--r--kernel/audit.c2
-rw-r--r--kernel/audit_tree.c1
-rw-r--r--kernel/cgroup.c31
-rw-r--r--kernel/context_tracking.c1
-rw-r--r--kernel/cpu.c55
-rw-r--r--kernel/cpu/idle.c17
-rw-r--r--kernel/events/core.c233
-rw-r--r--kernel/events/hw_breakpoint.c6
-rw-r--r--kernel/events/internal.h4
-rw-r--r--kernel/exit.c2
-rw-r--r--kernel/irq/irqdomain.c9
-rw-r--r--kernel/kprobes.c30
-rw-r--r--kernel/printk.c91
-rw-r--r--kernel/ptrace.c20
-rw-r--r--kernel/range.c19
-rw-r--r--kernel/rcutree.c21
-rw-r--r--kernel/rcutree.h2
-rw-r--r--kernel/sched/core.c2
-rw-r--r--kernel/sched/cputime.c6
-rw-r--r--kernel/softirq.c13
-rw-r--r--kernel/sys.c29
-rw-r--r--kernel/time/ntp.c1
-rw-r--r--kernel/time/tick-broadcast.c19
-rw-r--r--kernel/time/tick-sched.c2
-rw-r--r--kernel/time/timekeeping.c8
-rw-r--r--kernel/trace/ftrace.c18
-rw-r--r--kernel/trace/trace.c18
-rw-r--r--kernel/trace/trace.h2
-rw-r--r--kernel/trace/trace_selftest.c2
-rw-r--r--lib/mpi/mpicoder.c2
-rw-r--r--mm/frontswap.c2
-rw-r--r--mm/hugetlb.c2
-rw-r--r--mm/memcontrol.c14
-rw-r--r--mm/memory.c9
-rw-r--r--mm/migrate.c23
-rw-r--r--mm/page_alloc.c6
-rw-r--r--mm/slab_common.c4
-rw-r--r--mm/swap_state.c18
-rw-r--r--mm/swapfile.c2
-rw-r--r--net/9p/client.c55
-rw-r--r--net/batman-adv/bat_iv_ogm.c86
-rw-r--r--net/batman-adv/bridge_loop_avoidance.c4
-rw-r--r--net/batman-adv/sysfs.c5
-rw-r--r--net/bluetooth/hci_core.c21
-rw-r--r--net/bluetooth/l2cap_core.c73
-rw-r--r--net/bluetooth/mgmt.c23
-rw-r--r--net/bluetooth/smp.c4
-rw-r--r--net/bridge/br_multicast.c5
-rw-r--r--net/ceph/osd_client.c2
-rw-r--r--net/compat.c13
-rw-r--r--net/core/dev.c34
-rw-r--r--net/core/dev_addr_lists.c17
-rw-r--r--net/core/dev_ioctl.c19
-rw-r--r--net/core/ethtool.c6
-rw-r--r--net/core/filter.c2
-rw-r--r--net/core/skbuff.c24
-rw-r--r--net/core/sock.c23
-rw-r--r--net/core/sock_diag.c9
-rw-r--r--net/ipv4/gre.c2
-rw-r--r--net/ipv4/ip_tunnel.c6
-rw-r--r--net/ipv4/ip_vti.c3
-rw-r--r--net/ipv4/netfilter/ipt_ULOG.c18
-rw-r--r--net/ipv4/route.c7
-rw-r--r--net/ipv4/tcp_ipv4.c4
-rw-r--r--net/ipv6/addrconf.c18
-rw-r--r--net/ipv6/ip6_output.c13
-rw-r--r--net/ipv6/ndisc.c2
-rw-r--r--net/ipv6/netfilter.c7
-rw-r--r--net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c2
-rw-r--r--net/ipv6/proc.c2
-rw-r--r--net/ipv6/udp_offload.c20
-rw-r--r--net/key/af_key.c6
-rw-r--r--net/l2tp/l2tp_ppp.c6
-rw-r--r--net/mac80211/cfg.c6
-rw-r--r--net/mac80211/ieee80211_i.h5
-rw-r--r--net/mac80211/iface.c44
-rw-r--r--net/mac80211/mlme.c99
-rw-r--r--net/mac80211/rate.c2
-rw-r--r--net/mac80211/util.c4
-rw-r--r--net/netfilter/core.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c38
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c1
-rw-r--r--net/netfilter/ipvs/ip_vs_sh.c2
-rw-r--r--net/netfilter/nf_conntrack_labels.c2
-rw-r--r--net/netfilter/nf_conntrack_netlink.c1
-rw-r--r--net/netfilter/nf_nat_sip.c3
-rw-r--r--net/netfilter/nfnetlink_acct.c7
-rw-r--r--net/netfilter/nfnetlink_cttimeout.c7
-rw-r--r--net/netfilter/nfnetlink_queue_core.c6
-rw-r--r--net/netfilter/xt_LOG.c2
-rw-r--r--net/netfilter/xt_TCPMSS.c23
-rw-r--r--net/netfilter/xt_TCPOPTSTRIP.c6
-rw-r--r--net/netfilter/xt_addrtype.c27
-rw-r--r--net/netlink/af_netlink.c4
-rw-r--r--net/nfc/Makefile1
-rw-r--r--net/packet/af_packet.c5
-rw-r--r--net/sched/act_police.c8
-rw-r--r--net/sched/sch_api.c11
-rw-r--r--net/sched/sch_generic.c8
-rw-r--r--net/sched/sch_htb.c42
-rw-r--r--net/sched/sch_tbf.c8
-rw-r--r--net/sctp/outqueue.c6
-rw-r--r--net/sctp/socket.c6
-rw-r--r--net/socket.c61
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c8
-rw-r--r--net/sunrpc/svcauth_unix.c12
-rw-r--r--net/wireless/nl80211.c13
-rw-r--r--net/wireless/sme.c3
-rw-r--r--net/xfrm/xfrm_policy.c3
-rw-r--r--net/xfrm/xfrm_user.c2
-rw-r--r--scripts/Makefile.lib10
-rwxr-xr-xscripts/config2
-rw-r--r--scripts/dtc/dtc-lexer.l2
-rw-r--r--scripts/dtc/dtc-lexer.lex.c_shipped232
-rw-r--r--scripts/dtc/dtc-parser.tab.c_shipped715
-rw-r--r--scripts/dtc/dtc-parser.tab.h_shipped14
-rw-r--r--scripts/kconfig/lxdialog/menubox.c9
-rw-r--r--scripts/kconfig/mconf.c11
-rw-r--r--scripts/kconfig/menu.c15
-rw-r--r--security/selinux/xfrm.c34
-rw-r--r--sound/core/pcm_native.c4
-rw-r--r--sound/pci/hda/hda_generic.c68
-rw-r--r--sound/pci/hda/hda_generic.h1
-rw-r--r--sound/pci/hda/patch_cirrus.c23
-rw-r--r--sound/pci/hda/patch_realtek.c9
-rw-r--r--sound/pci/hda/patch_via.c10
-rw-r--r--sound/pci/sis7019.c3
-rw-r--r--sound/soc/codecs/cs42l52.c12
-rw-r--r--sound/soc/codecs/cs42l52.h2
-rw-r--r--sound/soc/codecs/max98090.c2
-rw-r--r--sound/soc/codecs/tlv320aic3x.c10
-rw-r--r--sound/soc/codecs/wm5102.c3
-rw-r--r--sound/soc/codecs/wm5110.c7
-rw-r--r--sound/soc/codecs/wm8994.c15
-rw-r--r--sound/soc/davinci/davinci-mcasp.c7
-rw-r--r--sound/soc/soc-compress.c8
-rw-r--r--sound/soc/soc-dapm.c49
-rw-r--r--sound/soc/soc-pcm.c13
-rw-r--r--sound/usb/6fire/firmware.c6
-rw-r--r--sound/usb/card.c22
-rw-r--r--sound/usb/mixer.c2
-rw-r--r--sound/usb/quirks-table.h14
-rw-r--r--tools/power/x86/turbostat/turbostat.c2
885 files changed, 13534 insertions, 5164 deletions
diff --git a/Documentation/DocBook/media/v4l/dev-codec.xml b/Documentation/DocBook/media/v4l/dev-codec.xml
index dca0ecd54dc6..ff44c16fc080 100644
--- a/Documentation/DocBook/media/v4l/dev-codec.xml
+++ b/Documentation/DocBook/media/v4l/dev-codec.xml
@@ -1,18 +1,27 @@
1 <title>Codec Interface</title> 1 <title>Codec Interface</title>
2 2
3 <note> 3 <para>A V4L2 codec can compress, decompress, transform, or otherwise
4 <title>Suspended</title> 4convert video data from one format into another format, in memory. Typically
5such devices are memory-to-memory devices (i.e. devices with the
6<constant>V4L2_CAP_VIDEO_M2M</constant> or <constant>V4L2_CAP_VIDEO_M2M_MPLANE</constant>
7capability set).
8</para>
5 9
6 <para>This interface has been be suspended from the V4L2 API 10 <para>A memory-to-memory video node acts just like a normal video node, but it
7implemented in Linux 2.6 until we have more experience with codec 11supports both output (sending frames from memory to the codec hardware) and
8device interfaces.</para> 12capture (receiving the processed frames from the codec hardware into memory)
9 </note> 13stream I/O. An application will have to setup the stream
14I/O for both sides and finally call &VIDIOC-STREAMON; for both capture and output
15to start the codec.</para>
10 16
11 <para>A V4L2 codec can compress, decompress, transform, or otherwise 17 <para>Video compression codecs use the MPEG controls to setup their codec parameters
12convert video data from one format into another format, in memory. 18(note that the MPEG controls actually support many more codecs than just MPEG).
13Applications send data to be converted to the driver through a 19See <xref linkend="mpeg-controls"></xref>.</para>
14&func-write; call, and receive the converted data through a
15&func-read; call. For efficiency a driver may also support streaming
16I/O.</para>
17 20
18 <para>[to do]</para> 21 <para>Memory-to-memory devices can often be used as a shared resource: you can
22open the video node multiple times, each application setting up their own codec properties
23that are local to the file handle, and each can use it independently from the others.
24The driver will arbitrate access to the codec and reprogram it whenever another file
25handler gets access. This is different from the usual video node behavior where the video properties
26are global to the device (i.e. changing something through one file handle is visible
27through another file handle).</para>
diff --git a/Documentation/DocBook/media/v4l/v4l2.xml b/Documentation/DocBook/media/v4l/v4l2.xml
index bfc93cdcf696..bfe823dd0f31 100644
--- a/Documentation/DocBook/media/v4l/v4l2.xml
+++ b/Documentation/DocBook/media/v4l/v4l2.xml
@@ -493,7 +493,7 @@ and discussions on the V4L mailing list.</revremark>
493</partinfo> 493</partinfo>
494 494
495<title>Video for Linux Two API Specification</title> 495<title>Video for Linux Two API Specification</title>
496 <subtitle>Revision 3.9</subtitle> 496 <subtitle>Revision 3.10</subtitle>
497 497
498 <chapter id="common"> 498 <chapter id="common">
499 &sub-common; 499 &sub-common;
diff --git a/Documentation/bcache.txt b/Documentation/bcache.txt
index 77db8809bd96..b3a7e7d384f6 100644
--- a/Documentation/bcache.txt
+++ b/Documentation/bcache.txt
@@ -319,7 +319,10 @@ cache<0..n>
319 Symlink to each of the cache devices comprising this cache set. 319 Symlink to each of the cache devices comprising this cache set.
320 320
321cache_available_percent 321cache_available_percent
322 Percentage of cache device free. 322 Percentage of cache device which doesn't contain dirty data, and could
323 potentially be used for writeback. This doesn't mean this space isn't used
324 for clean cached data; the unused statistic (in priority_stats) is typically
325 much lower.
323 326
324clear_stats 327clear_stats
325 Clears the statistics associated with this cache 328 Clears the statistics associated with this cache
@@ -423,8 +426,11 @@ nbuckets
423 Total buckets in this cache 426 Total buckets in this cache
424 427
425priority_stats 428priority_stats
426 Statistics about how recently data in the cache has been accessed. This can 429 Statistics about how recently data in the cache has been accessed.
427 reveal your working set size. 430 This can reveal your working set size. Unused is the percentage of
431 the cache that doesn't contain any data. Metadata is bcache's
432 metadata overhead. Average is the average priority of cache buckets.
433 Next is a list of quantiles with the priority threshold of each.
428 434
429written 435written
430 Sum of all data that has been written to the cache; comparison with 436 Sum of all data that has been written to the cache; comparison with
diff --git a/Documentation/devices.txt b/Documentation/devices.txt
index 08f01e79c41a..b9015912bca6 100644
--- a/Documentation/devices.txt
+++ b/Documentation/devices.txt
@@ -498,12 +498,8 @@ Your cooperation is appreciated.
498 498
499 Each device type has 5 bits (32 minors). 499 Each device type has 5 bits (32 minors).
500 500
501 13 block 8-bit MFM/RLL/IDE controller 501 13 block Previously used for the XT disk (/dev/xdN)
502 0 = /dev/xda First XT disk whole disk 502 Deleted in kernel v3.9.
503 64 = /dev/xdb Second XT disk whole disk
504
505 Partitions are handled in the same way as IDE disks
506 (see major number 3).
507 503
508 14 char Open Sound System (OSS) 504 14 char Open Sound System (OSS)
509 0 = /dev/mixer Mixer control 505 0 = /dev/mixer Mixer control
diff --git a/Documentation/devicetree/bindings/media/exynos-fimc-lite.txt b/Documentation/devicetree/bindings/media/exynos-fimc-lite.txt
index 3f62adfb3e0b..de9f6b78ee51 100644
--- a/Documentation/devicetree/bindings/media/exynos-fimc-lite.txt
+++ b/Documentation/devicetree/bindings/media/exynos-fimc-lite.txt
@@ -2,7 +2,7 @@ Exynos4x12/Exynos5 SoC series camera host interface (FIMC-LITE)
2 2
3Required properties: 3Required properties:
4 4
5- compatible : should be "samsung,exynos4212-fimc" for Exynos4212 and 5- compatible : should be "samsung,exynos4212-fimc-lite" for Exynos4212 and
6 Exynos4412 SoCs; 6 Exynos4412 SoCs;
7- reg : physical base address and size of the device memory mapped 7- reg : physical base address and size of the device memory mapped
8 registers; 8 registers;
diff --git a/Documentation/devicetree/bindings/rtc/atmel,at91rm9200-rtc.txt b/Documentation/devicetree/bindings/rtc/atmel,at91rm9200-rtc.txt
index 2a3feabd3b22..34c1505774bf 100644
--- a/Documentation/devicetree/bindings/rtc/atmel,at91rm9200-rtc.txt
+++ b/Documentation/devicetree/bindings/rtc/atmel,at91rm9200-rtc.txt
@@ -1,7 +1,7 @@
1Atmel AT91RM9200 Real Time Clock 1Atmel AT91RM9200 Real Time Clock
2 2
3Required properties: 3Required properties:
4- compatible: should be: "atmel,at91rm9200-rtc" 4- compatible: should be: "atmel,at91rm9200-rtc" or "atmel,at91sam9x5-rtc"
5- reg: physical base address of the controller and length of memory mapped 5- reg: physical base address of the controller and length of memory mapped
6 region. 6 region.
7- interrupts: rtc alarm/event interrupt 7- interrupts: rtc alarm/event interrupt
diff --git a/Documentation/dmatest.txt b/Documentation/dmatest.txt
index 279ac0a8c5b1..132a094c7bc3 100644
--- a/Documentation/dmatest.txt
+++ b/Documentation/dmatest.txt
@@ -34,7 +34,7 @@ command:
34After a while you will start to get messages about current status or error like 34After a while you will start to get messages about current status or error like
35in the original code. 35in the original code.
36 36
37Note that running a new test will stop any in progress test. 37Note that running a new test will not stop any in progress test.
38 38
39The following command should return actual state of the test. 39The following command should return actual state of the test.
40 % cat /sys/kernel/debug/dmatest/run 40 % cat /sys/kernel/debug/dmatest/run
@@ -52,8 +52,8 @@ To wait for test done the user may perform a busy loop that checks the state.
52 52
53The module parameters that is supplied to the kernel command line will be used 53The module parameters that is supplied to the kernel command line will be used
54for the first performed test. After user gets a control, the test could be 54for the first performed test. After user gets a control, the test could be
55interrupted or re-run with same or different parameters. For the details see 55re-run with the same or different parameters. For the details see the above
56the above section "Part 2 - When dmatest is built as a module..." 56section "Part 2 - When dmatest is built as a module..."
57 57
58In both cases the module parameters are used as initial values for the test case. 58In both cases the module parameters are used as initial values for the test case.
59You always could check them at run-time by running 59You always could check them at run-time by running
diff --git a/Documentation/filesystems/xfs.txt b/Documentation/filesystems/xfs.txt
index 3e4b3dd1e046..83577f0232a0 100644
--- a/Documentation/filesystems/xfs.txt
+++ b/Documentation/filesystems/xfs.txt
@@ -33,6 +33,9 @@ When mounting an XFS filesystem, the following options are accepted.
33 removing extended attributes) the on-disk superblock feature 33 removing extended attributes) the on-disk superblock feature
34 bit field will be updated to reflect this format being in use. 34 bit field will be updated to reflect this format being in use.
35 35
36 CRC enabled filesystems always use the attr2 format, and so
37 will reject the noattr2 mount option if it is set.
38
36 barrier 39 barrier
37 Enables the use of block layer write barriers for writes into 40 Enables the use of block layer write barriers for writes into
38 the journal and unwritten extent conversion. This allows for 41 the journal and unwritten extent conversion. This allows for
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 6e3b18a8afc6..2fe6e767b3d6 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -3351,9 +3351,6 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
3351 plus one apbt timer for broadcast timer. 3351 plus one apbt timer for broadcast timer.
3352 x86_mrst_timer=apbt_only | lapic_and_apbt 3352 x86_mrst_timer=apbt_only | lapic_and_apbt
3353 3353
3354 xd= [HW,XT] Original XT pre-IDE (RLL encoded) disks.
3355 xd_geo= See header of drivers/block/xd.c.
3356
3357 xen_emul_unplug= [HW,X86,XEN] 3354 xen_emul_unplug= [HW,X86,XEN]
3358 Unplug Xen emulated devices 3355 Unplug Xen emulated devices
3359 Format: [unplug0,][unplug1] 3356 Format: [unplug0,][unplug1]
diff --git a/Documentation/m68k/kernel-options.txt b/Documentation/m68k/kernel-options.txt
index 97d45f276fe6..eaf32a1fd0b1 100644
--- a/Documentation/m68k/kernel-options.txt
+++ b/Documentation/m68k/kernel-options.txt
@@ -80,8 +80,6 @@ Valid names are:
80 /dev/sdd: -> 0x0830 (forth SCSI disk) 80 /dev/sdd: -> 0x0830 (forth SCSI disk)
81 /dev/sde: -> 0x0840 (fifth SCSI disk) 81 /dev/sde: -> 0x0840 (fifth SCSI disk)
82 /dev/fd : -> 0x0200 (floppy disk) 82 /dev/fd : -> 0x0200 (floppy disk)
83 /dev/xda: -> 0x0c00 (first XT disk, unused in Linux/m68k)
84 /dev/xdb: -> 0x0c40 (second XT disk, unused in Linux/m68k)
85 83
86 The name must be followed by a decimal number, that stands for the 84 The name must be followed by a decimal number, that stands for the
87partition number. Internally, the value of the number is just 85partition number. Internally, the value of the number is just
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index f98ca633b528..3458d6343e01 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -420,10 +420,10 @@ tcp_synack_retries - INTEGER
420 for a passive TCP connection will happen after 63seconds. 420 for a passive TCP connection will happen after 63seconds.
421 421
422tcp_syncookies - BOOLEAN 422tcp_syncookies - BOOLEAN
423 Only valid when the kernel was compiled with CONFIG_SYNCOOKIES 423 Only valid when the kernel was compiled with CONFIG_SYN_COOKIES
424 Send out syncookies when the syn backlog queue of a socket 424 Send out syncookies when the syn backlog queue of a socket
425 overflows. This is to prevent against the common 'SYN flood attack' 425 overflows. This is to prevent against the common 'SYN flood attack'
426 Default: FALSE 426 Default: 1
427 427
428 Note, that syncookies is fallback facility. 428 Note, that syncookies is fallback facility.
429 It MUST NOT be used to help highly loaded servers to stand 429 It MUST NOT be used to help highly loaded servers to stand
diff --git a/Documentation/powerpc/transactional_memory.txt b/Documentation/powerpc/transactional_memory.txt
index c907be41d60f..dc23e58ae264 100644
--- a/Documentation/powerpc/transactional_memory.txt
+++ b/Documentation/powerpc/transactional_memory.txt
@@ -147,6 +147,25 @@ Example signal handler:
147 fix_the_problem(ucp->dar); 147 fix_the_problem(ucp->dar);
148 } 148 }
149 149
150When in an active transaction that takes a signal, we need to be careful with
151the stack. It's possible that the stack has moved back up after the tbegin.
152The obvious case here is when the tbegin is called inside a function that
153returns before a tend. In this case, the stack is part of the checkpointed
154transactional memory state. If we write over this non transactionally or in
155suspend, we are in trouble because if we get a tm abort, the program counter and
156stack pointer will be back at the tbegin but our in memory stack won't be valid
157anymore.
158
159To avoid this, when taking a signal in an active transaction, we need to use
160the stack pointer from the checkpointed state, rather than the speculated
161state. This ensures that the signal context (written tm suspended) will be
162written below the stack required for the rollback. The transaction is aborted
163becuase of the treclaim, so any memory written between the tbegin and the
164signal will be rolled back anyway.
165
166For signals taken in non-TM or suspended mode, we use the
167normal/non-checkpointed stack pointer.
168
150 169
151Failure cause codes used by kernel 170Failure cause codes used by kernel
152================================== 171==================================
@@ -155,14 +174,18 @@ These are defined in <asm/reg.h>, and distinguish different reasons why the
155kernel aborted a transaction: 174kernel aborted a transaction:
156 175
157 TM_CAUSE_RESCHED Thread was rescheduled. 176 TM_CAUSE_RESCHED Thread was rescheduled.
177 TM_CAUSE_TLBI Software TLB invalide.
158 TM_CAUSE_FAC_UNAV FP/VEC/VSX unavailable trap. 178 TM_CAUSE_FAC_UNAV FP/VEC/VSX unavailable trap.
159 TM_CAUSE_SYSCALL Currently unused; future syscalls that must abort 179 TM_CAUSE_SYSCALL Currently unused; future syscalls that must abort
160 transactions for consistency will use this. 180 transactions for consistency will use this.
161 TM_CAUSE_SIGNAL Signal delivered. 181 TM_CAUSE_SIGNAL Signal delivered.
162 TM_CAUSE_MISC Currently unused. 182 TM_CAUSE_MISC Currently unused.
183 TM_CAUSE_ALIGNMENT Alignment fault.
184 TM_CAUSE_EMULATE Emulation that touched memory.
163 185
164These can be checked by the user program's abort handler as TEXASR[0:7]. 186These can be checked by the user program's abort handler as TEXASR[0:7]. If
165 187bit 7 is set, it indicates that the error is consider persistent. For example
188a TM_CAUSE_ALIGNMENT will be persistent while a TM_CAUSE_RESCHED will not.q
166 189
167GDB 190GDB
168=== 191===
diff --git a/Documentation/sound/alsa/HD-Audio-Models.txt b/Documentation/sound/alsa/HD-Audio-Models.txt
index bb8b0dc532b8..77d68e23b247 100644
--- a/Documentation/sound/alsa/HD-Audio-Models.txt
+++ b/Documentation/sound/alsa/HD-Audio-Models.txt
@@ -29,6 +29,8 @@ ALC269/270/275/276/280/282
29 alc271-dmic Enable ALC271X digital mic workaround 29 alc271-dmic Enable ALC271X digital mic workaround
30 inv-dmic Inverted internal mic workaround 30 inv-dmic Inverted internal mic workaround
31 lenovo-dock Enables docking station I/O for some Lenovos 31 lenovo-dock Enables docking station I/O for some Lenovos
32 dell-headset-multi Headset jack, which can also be used as mic-in
33 dell-headset-dock Headset jack (without mic-in), and also dock I/O
32 34
33ALC662/663/272 35ALC662/663/272
34============== 36==============
@@ -42,6 +44,7 @@ ALC662/663/272
42 asus-mode7 ASUS 44 asus-mode7 ASUS
43 asus-mode8 ASUS 45 asus-mode8 ASUS
44 inv-dmic Inverted internal mic workaround 46 inv-dmic Inverted internal mic workaround
47 dell-headset-multi Headset jack, which can also be used as mic-in
45 48
46ALC680 49ALC680
47====== 50======
diff --git a/MAINTAINERS b/MAINTAINERS
index fd3a495a0005..ad7e322ad17b 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2890,8 +2890,8 @@ F: drivers/media/dvb-frontends/ec100*
2890 2890
2891ECRYPT FILE SYSTEM 2891ECRYPT FILE SYSTEM
2892M: Tyler Hicks <tyhicks@canonical.com> 2892M: Tyler Hicks <tyhicks@canonical.com>
2893M: Dustin Kirkland <dustin.kirkland@gazzang.com>
2894L: ecryptfs@vger.kernel.org 2893L: ecryptfs@vger.kernel.org
2894W: http://ecryptfs.org
2895W: https://launchpad.net/ecryptfs 2895W: https://launchpad.net/ecryptfs
2896S: Supported 2896S: Supported
2897F: Documentation/filesystems/ecryptfs.txt 2897F: Documentation/filesystems/ecryptfs.txt
@@ -3220,7 +3220,7 @@ F: lib/fault-inject.c
3220 3220
3221FCOE SUBSYSTEM (libfc, libfcoe, fcoe) 3221FCOE SUBSYSTEM (libfc, libfcoe, fcoe)
3222M: Robert Love <robert.w.love@intel.com> 3222M: Robert Love <robert.w.love@intel.com>
3223L: devel@open-fcoe.org 3223L: fcoe-devel@open-fcoe.org
3224W: www.Open-FCoE.org 3224W: www.Open-FCoE.org
3225S: Supported 3225S: Supported
3226F: drivers/scsi/libfc/ 3226F: drivers/scsi/libfc/
@@ -3322,11 +3322,12 @@ F: drivers/net/wan/dlci.c
3322F: drivers/net/wan/sdla.c 3322F: drivers/net/wan/sdla.c
3323 3323
3324FRAMEBUFFER LAYER 3324FRAMEBUFFER LAYER
3325M: Florian Tobias Schandinat <FlorianSchandinat@gmx.de> 3325M: Jean-Christophe Plagniol-Villard <plagnioj@jcrosoft.com>
3326M: Tomi Valkeinen <tomi.valkeinen@ti.com>
3326L: linux-fbdev@vger.kernel.org 3327L: linux-fbdev@vger.kernel.org
3327W: http://linux-fbdev.sourceforge.net/ 3328W: http://linux-fbdev.sourceforge.net/
3328Q: http://patchwork.kernel.org/project/linux-fbdev/list/ 3329Q: http://patchwork.kernel.org/project/linux-fbdev/list/
3329T: git git://github.com/schandinat/linux-2.6.git fbdev-next 3330T: git git://git.kernel.org/pub/scm/linux/kernel/git/plagnioj/linux-fbdev.git
3330S: Maintained 3331S: Maintained
3331F: Documentation/fb/ 3332F: Documentation/fb/
3332F: Documentation/devicetree/bindings/fb/ 3333F: Documentation/devicetree/bindings/fb/
@@ -4447,6 +4448,16 @@ S: Maintained
4447F: drivers/scsi/*iscsi* 4448F: drivers/scsi/*iscsi*
4448F: include/scsi/*iscsi* 4449F: include/scsi/*iscsi*
4449 4450
4451ISCSI EXTENSIONS FOR RDMA (ISER) INITIATOR
4452M: Or Gerlitz <ogerlitz@mellanox.com>
4453M: Roi Dayan <roid@mellanox.com>
4454L: linux-rdma@vger.kernel.org
4455S: Supported
4456W: http://www.openfabrics.org
4457W: www.open-iscsi.org
4458Q: http://patchwork.kernel.org/project/linux-rdma/list/
4459F: drivers/infiniband/ulp/iser
4460
4450ISDN SUBSYSTEM 4461ISDN SUBSYSTEM
4451M: Karsten Keil <isdn@linux-pingi.de> 4462M: Karsten Keil <isdn@linux-pingi.de>
4452L: isdn4linux@listserv.isdn4linux.de (subscribers-only) 4463L: isdn4linux@listserv.isdn4linux.de (subscribers-only)
@@ -5755,7 +5766,7 @@ M: Matthew Wilcox <willy@linux.intel.com>
5755L: linux-nvme@lists.infradead.org 5766L: linux-nvme@lists.infradead.org
5756T: git git://git.infradead.org/users/willy/linux-nvme.git 5767T: git git://git.infradead.org/users/willy/linux-nvme.git
5757S: Supported 5768S: Supported
5758F: drivers/block/nvme.c 5769F: drivers/block/nvme*
5759F: include/linux/nvme.h 5770F: include/linux/nvme.h
5760 5771
5761OMAP SUPPORT 5772OMAP SUPPORT
@@ -6087,7 +6098,15 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/jejb/parisc-2.6.git
6087T: git git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux.git 6098T: git git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux.git
6088S: Maintained 6099S: Maintained
6089F: arch/parisc/ 6100F: arch/parisc/
6101F: Documentation/parisc/
6090F: drivers/parisc/ 6102F: drivers/parisc/
6103F: drivers/char/agp/parisc-agp.c
6104F: drivers/input/serio/gscps2.c
6105F: drivers/parport/parport_gsc.*
6106F: drivers/tty/serial/8250/8250_gsc.c
6107F: drivers/video/sti*
6108F: drivers/video/console/sti*
6109F: drivers/video/logo/logo_parisc*
6091 6110
6092PC87360 HARDWARE MONITORING DRIVER 6111PC87360 HARDWARE MONITORING DRIVER
6093M: Jim Cromie <jim.cromie@gmail.com> 6112M: Jim Cromie <jim.cromie@gmail.com>
@@ -7605,7 +7624,7 @@ F: drivers/clk/spear/
7605SPI SUBSYSTEM 7624SPI SUBSYSTEM
7606M: Mark Brown <broonie@kernel.org> 7625M: Mark Brown <broonie@kernel.org>
7607M: Grant Likely <grant.likely@linaro.org> 7626M: Grant Likely <grant.likely@linaro.org>
7608L: spi-devel-general@lists.sourceforge.net 7627L: linux-spi@vger.kernel.org
7609T: git git://git.kernel.org/pub/scm/linux/kernel/git/broonie/spi.git 7628T: git git://git.kernel.org/pub/scm/linux/kernel/git/broonie/spi.git
7610Q: http://patchwork.kernel.org/project/spi-devel-general/list/ 7629Q: http://patchwork.kernel.org/project/spi-devel-general/list/
7611S: Maintained 7630S: Maintained
@@ -8985,7 +9004,7 @@ S: Maintained
8985F: drivers/net/wireless/wl3501* 9004F: drivers/net/wireless/wl3501*
8986 9005
8987WM97XX TOUCHSCREEN DRIVERS 9006WM97XX TOUCHSCREEN DRIVERS
8988M: Mark Brown <broonie@opensource.wolfsonmicro.com> 9007M: Mark Brown <broonie@kernel.org>
8989M: Liam Girdwood <lrg@slimlogic.co.uk> 9008M: Liam Girdwood <lrg@slimlogic.co.uk>
8990L: linux-input@vger.kernel.org 9009L: linux-input@vger.kernel.org
8991T: git git://opensource.wolfsonmicro.com/linux-2.6-touch 9010T: git git://opensource.wolfsonmicro.com/linux-2.6-touch
@@ -8995,7 +9014,6 @@ F: drivers/input/touchscreen/*wm97*
8995F: include/linux/wm97xx.h 9014F: include/linux/wm97xx.h
8996 9015
8997WOLFSON MICROELECTRONICS DRIVERS 9016WOLFSON MICROELECTRONICS DRIVERS
8998M: Mark Brown <broonie@opensource.wolfsonmicro.com>
8999L: patches@opensource.wolfsonmicro.com 9017L: patches@opensource.wolfsonmicro.com
9000T: git git://opensource.wolfsonmicro.com/linux-2.6-asoc 9018T: git git://opensource.wolfsonmicro.com/linux-2.6-asoc
9001T: git git://opensource.wolfsonmicro.com/linux-2.6-audioplus 9019T: git git://opensource.wolfsonmicro.com/linux-2.6-audioplus
diff --git a/Makefile b/Makefile
index 73e20dba55c1..e5e3ba085191 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 3 1VERSION = 3
2PATCHLEVEL = 10 2PATCHLEVEL = 10
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc3 4EXTRAVERSION =
5NAME = Unicycling Gorilla 5NAME = Unicycling Gorilla
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 49d993cee512..136f263ed47b 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1087,6 +1087,20 @@ if !MMU
1087source "arch/arm/Kconfig-nommu" 1087source "arch/arm/Kconfig-nommu"
1088endif 1088endif
1089 1089
1090config PJ4B_ERRATA_4742
1091 bool "PJ4B Errata 4742: IDLE Wake Up Commands can Cause the CPU Core to Cease Operation"
1092 depends on CPU_PJ4B && MACH_ARMADA_370
1093 default y
1094 help
1095 When coming out of either a Wait for Interrupt (WFI) or a Wait for
1096 Event (WFE) IDLE states, a specific timing sensitivity exists between
1097 the retiring WFI/WFE instructions and the newly issued subsequent
1098 instructions. This sensitivity can result in a CPU hang scenario.
1099 Workaround:
1100 The software must insert either a Data Synchronization Barrier (DSB)
1101 or Data Memory Barrier (DMB) command immediately after the WFI/WFE
1102 instruction
1103
1090config ARM_ERRATA_326103 1104config ARM_ERRATA_326103
1091 bool "ARM errata: FSR write bit incorrect on a SWP to read-only memory" 1105 bool "ARM errata: FSR write bit incorrect on a SWP to read-only memory"
1092 depends on CPU_V6 1106 depends on CPU_V6
@@ -1189,6 +1203,16 @@ config PL310_ERRATA_588369
1189 is not correctly implemented in PL310 as clean lines are not 1203 is not correctly implemented in PL310 as clean lines are not
1190 invalidated as a result of these operations. 1204 invalidated as a result of these operations.
1191 1205
1206config ARM_ERRATA_643719
1207 bool "ARM errata: LoUIS bit field in CLIDR register is incorrect"
1208 depends on CPU_V7 && SMP
1209 help
1210 This option enables the workaround for the 643719 Cortex-A9 (prior to
1211 r1p0) erratum. On affected cores the LoUIS bit field of the CLIDR
1212 register returns zero when it should return one. The workaround
1213 corrects this value, ensuring cache maintenance operations which use
1214 it behave as intended and avoiding data corruption.
1215
1192config ARM_ERRATA_720789 1216config ARM_ERRATA_720789
1193 bool "ARM errata: TLBIASIDIS and TLBIMVAIS operations can broadcast a faulty ASID" 1217 bool "ARM errata: TLBIASIDIS and TLBIMVAIS operations can broadcast a faulty ASID"
1194 depends on CPU_V7 1218 depends on CPU_V7
@@ -2006,7 +2030,7 @@ config XIP_PHYS_ADDR
2006 2030
2007config KEXEC 2031config KEXEC
2008 bool "Kexec system call (EXPERIMENTAL)" 2032 bool "Kexec system call (EXPERIMENTAL)"
2009 depends on (!SMP || HOTPLUG_CPU) 2033 depends on (!SMP || PM_SLEEP_SMP)
2010 help 2034 help
2011 kexec is a system call that implements the ability to shutdown your 2035 kexec is a system call that implements the ability to shutdown your
2012 current kernel, and to start another kernel. It is like a reboot 2036 current kernel, and to start another kernel. It is like a reboot
diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile
index 3580d57ea218..120b83bfde20 100644
--- a/arch/arm/boot/compressed/Makefile
+++ b/arch/arm/boot/compressed/Makefile
@@ -116,7 +116,8 @@ targets := vmlinux vmlinux.lds \
116 116
117# Make sure files are removed during clean 117# Make sure files are removed during clean
118extra-y += piggy.gzip piggy.lzo piggy.lzma piggy.xzkern \ 118extra-y += piggy.gzip piggy.lzo piggy.lzma piggy.xzkern \
119 lib1funcs.S ashldi3.S $(libfdt) $(libfdt_hdrs) 119 lib1funcs.S ashldi3.S $(libfdt) $(libfdt_hdrs) \
120 hyp-stub.S
120 121
121ifeq ($(CONFIG_FUNCTION_TRACER),y) 122ifeq ($(CONFIG_FUNCTION_TRACER),y)
122ORIG_CFLAGS := $(KBUILD_CFLAGS) 123ORIG_CFLAGS := $(KBUILD_CFLAGS)
@@ -124,7 +125,7 @@ KBUILD_CFLAGS = $(subst -pg, , $(ORIG_CFLAGS))
124endif 125endif
125 126
126ccflags-y := -fpic -mno-single-pic-base -fno-builtin -I$(obj) 127ccflags-y := -fpic -mno-single-pic-base -fno-builtin -I$(obj)
127asflags-y := -Wa,-march=all -DZIMAGE 128asflags-y := -DZIMAGE
128 129
129# Supply kernel BSS size to the decompressor via a linker symbol. 130# Supply kernel BSS size to the decompressor via a linker symbol.
130KBSS_SZ = $(shell $(CROSS_COMPILE)size $(obj)/../../../../vmlinux | \ 131KBSS_SZ = $(shell $(CROSS_COMPILE)size $(obj)/../../../../vmlinux | \
diff --git a/arch/arm/boot/compressed/debug.S b/arch/arm/boot/compressed/debug.S
index 6e8382d5b7a4..5392ee63338f 100644
--- a/arch/arm/boot/compressed/debug.S
+++ b/arch/arm/boot/compressed/debug.S
@@ -1,6 +1,8 @@
1#include <linux/linkage.h> 1#include <linux/linkage.h>
2#include <asm/assembler.h> 2#include <asm/assembler.h>
3 3
4#ifndef CONFIG_DEBUG_SEMIHOSTING
5
4#include CONFIG_DEBUG_LL_INCLUDE 6#include CONFIG_DEBUG_LL_INCLUDE
5 7
6ENTRY(putc) 8ENTRY(putc)
@@ -10,3 +12,29 @@ ENTRY(putc)
10 busyuart r3, r1 12 busyuart r3, r1
11 mov pc, lr 13 mov pc, lr
12ENDPROC(putc) 14ENDPROC(putc)
15
16#else
17
18ENTRY(putc)
19 adr r1, 1f
20 ldmia r1, {r2, r3}
21 add r2, r2, r1
22 ldr r1, [r2, r3]
23 strb r0, [r1]
24 mov r0, #0x03 @ SYS_WRITEC
25 ARM( svc #0x123456 )
26 THUMB( svc #0xab )
27 mov pc, lr
28 .align 2
291: .word _GLOBAL_OFFSET_TABLE_ - .
30 .word semi_writec_buf(GOT)
31ENDPROC(putc)
32
33 .bss
34 .global semi_writec_buf
35 .type semi_writec_buf, %object
36semi_writec_buf:
37 .space 4
38 .size semi_writec_buf, 4
39
40#endif
diff --git a/arch/arm/boot/compressed/head-sa1100.S b/arch/arm/boot/compressed/head-sa1100.S
index 6179d94dd5c6..3115e313d9f6 100644
--- a/arch/arm/boot/compressed/head-sa1100.S
+++ b/arch/arm/boot/compressed/head-sa1100.S
@@ -11,6 +11,7 @@
11#include <asm/mach-types.h> 11#include <asm/mach-types.h>
12 12
13 .section ".start", "ax" 13 .section ".start", "ax"
14 .arch armv4
14 15
15__SA1100_start: 16__SA1100_start:
16 17
diff --git a/arch/arm/boot/compressed/head-shark.S b/arch/arm/boot/compressed/head-shark.S
index 089c560e07f1..92b56897ed64 100644
--- a/arch/arm/boot/compressed/head-shark.S
+++ b/arch/arm/boot/compressed/head-shark.S
@@ -18,6 +18,7 @@
18 18
19 .section ".start", "ax" 19 .section ".start", "ax"
20 20
21 .arch armv4
21 b __beginning 22 b __beginning
22 23
23__ofw_data: .long 0 @ the number of memory blocks 24__ofw_data: .long 0 @ the number of memory blocks
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index fe4d9c3ad761..032a8d987148 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -11,6 +11,7 @@
11#include <linux/linkage.h> 11#include <linux/linkage.h>
12#include <asm/assembler.h> 12#include <asm/assembler.h>
13 13
14 .arch armv7-a
14/* 15/*
15 * Debugging stuff 16 * Debugging stuff
16 * 17 *
@@ -805,8 +806,8 @@ call_cache_fn: adr r12, proc_types
805 .align 2 806 .align 2
806 .type proc_types,#object 807 .type proc_types,#object
807proc_types: 808proc_types:
808 .word 0x00000000 @ old ARM ID 809 .word 0x41000000 @ old ARM ID
809 .word 0x0000f000 810 .word 0xff00f000
810 mov pc, lr 811 mov pc, lr
811 THUMB( nop ) 812 THUMB( nop )
812 mov pc, lr 813 mov pc, lr
diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi
index 1460d9b88adf..8e1248f01fab 100644
--- a/arch/arm/boot/dts/am33xx.dtsi
+++ b/arch/arm/boot/dts/am33xx.dtsi
@@ -409,8 +409,8 @@
409 ti,hwmods = "gpmc"; 409 ti,hwmods = "gpmc";
410 reg = <0x50000000 0x2000>; 410 reg = <0x50000000 0x2000>;
411 interrupts = <100>; 411 interrupts = <100>;
412 num-cs = <7>; 412 gpmc,num-cs = <7>;
413 num-waitpins = <2>; 413 gpmc,num-waitpins = <2>;
414 #address-cells = <2>; 414 #address-cells = <2>;
415 #size-cells = <1>; 415 #size-cells = <1>;
416 status = "disabled"; 416 status = "disabled";
diff --git a/arch/arm/boot/dts/armada-xp-gp.dts b/arch/arm/boot/dts/armada-xp-gp.dts
index 3ee63d128e27..76db557adbe7 100644
--- a/arch/arm/boot/dts/armada-xp-gp.dts
+++ b/arch/arm/boot/dts/armada-xp-gp.dts
@@ -39,8 +39,9 @@
39 }; 39 };
40 40
41 soc { 41 soc {
42 ranges = <0 0 0xd0000000 0x100000 42 ranges = <0 0 0xd0000000 0x100000 /* Internal registers 1MiB */
43 0xf0000000 0 0xf0000000 0x1000000>; 43 0xe0000000 0 0xe0000000 0x8100000 /* PCIe */
44 0xf0000000 0 0xf0000000 0x1000000 /* Device Bus, NOR 16MiB */>;
44 45
45 internal-regs { 46 internal-regs {
46 serial@12000 { 47 serial@12000 {
diff --git a/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts b/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts
index 46b785064dd8..fdea75c73411 100644
--- a/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts
+++ b/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts
@@ -27,8 +27,9 @@
27 }; 27 };
28 28
29 soc { 29 soc {
30 ranges = <0 0 0xd0000000 0x100000 30 ranges = <0 0 0xd0000000 0x100000 /* Internal registers 1MiB */
31 0xf0000000 0 0xf0000000 0x8000000>; 31 0xe0000000 0 0xe0000000 0x8100000 /* PCIe */
32 0xf0000000 0 0xf0000000 0x8000000 /* Device Bus, NOR 128MiB */>;
32 33
33 internal-regs { 34 internal-regs {
34 serial@12000 { 35 serial@12000 {
diff --git a/arch/arm/boot/dts/bcm2835.dtsi b/arch/arm/boot/dts/bcm2835.dtsi
index f0052dccf9a8..1e12aeff403b 100644
--- a/arch/arm/boot/dts/bcm2835.dtsi
+++ b/arch/arm/boot/dts/bcm2835.dtsi
@@ -44,6 +44,7 @@
44 reg = <0x7e201000 0x1000>; 44 reg = <0x7e201000 0x1000>;
45 interrupts = <2 25>; 45 interrupts = <2 25>;
46 clock-frequency = <3000000>; 46 clock-frequency = <3000000>;
47 arm,primecell-periphid = <0x00241011>;
47 }; 48 };
48 49
49 gpio: gpio { 50 gpio: gpio {
diff --git a/arch/arm/boot/dts/exynos5250-pinctrl.dtsi b/arch/arm/boot/dts/exynos5250-pinctrl.dtsi
index d1650fb34c0a..ded558bb0f3b 100644
--- a/arch/arm/boot/dts/exynos5250-pinctrl.dtsi
+++ b/arch/arm/boot/dts/exynos5250-pinctrl.dtsi
@@ -763,7 +763,7 @@
763 }; 763 };
764 }; 764 };
765 765
766 pinctrl@03680000 { 766 pinctrl@03860000 {
767 gpz: gpz { 767 gpz: gpz {
768 gpio-controller; 768 gpio-controller;
769 #gpio-cells = <2>; 769 #gpio-cells = <2>;
diff --git a/arch/arm/boot/dts/exynos5250.dtsi b/arch/arm/boot/dts/exynos5250.dtsi
index 0673524238a6..fc9fb3d526e2 100644
--- a/arch/arm/boot/dts/exynos5250.dtsi
+++ b/arch/arm/boot/dts/exynos5250.dtsi
@@ -161,9 +161,9 @@
161 interrupts = <0 50 0>; 161 interrupts = <0 50 0>;
162 }; 162 };
163 163
164 pinctrl_3: pinctrl@03680000 { 164 pinctrl_3: pinctrl@03860000 {
165 compatible = "samsung,exynos5250-pinctrl"; 165 compatible = "samsung,exynos5250-pinctrl";
166 reg = <0x0368000 0x1000>; 166 reg = <0x03860000 0x1000>;
167 interrupts = <0 47 0>; 167 interrupts = <0 47 0>;
168 }; 168 };
169 169
diff --git a/arch/arm/boot/dts/imx25.dtsi b/arch/arm/boot/dts/imx25.dtsi
index d2550e0bca24..701153992c69 100644
--- a/arch/arm/boot/dts/imx25.dtsi
+++ b/arch/arm/boot/dts/imx25.dtsi
@@ -141,8 +141,8 @@
141 #size-cells = <0>; 141 #size-cells = <0>;
142 compatible = "fsl,imx25-cspi", "fsl,imx35-cspi"; 142 compatible = "fsl,imx25-cspi", "fsl,imx35-cspi";
143 reg = <0x43fa4000 0x4000>; 143 reg = <0x43fa4000 0x4000>;
144 clocks = <&clks 62>; 144 clocks = <&clks 62>, <&clks 62>;
145 clock-names = "ipg"; 145 clock-names = "ipg", "per";
146 interrupts = <14>; 146 interrupts = <14>;
147 status = "disabled"; 147 status = "disabled";
148 }; 148 };
@@ -182,8 +182,8 @@
182 compatible = "fsl,imx25-cspi", "fsl,imx35-cspi"; 182 compatible = "fsl,imx25-cspi", "fsl,imx35-cspi";
183 reg = <0x50004000 0x4000>; 183 reg = <0x50004000 0x4000>;
184 interrupts = <0>; 184 interrupts = <0>;
185 clocks = <&clks 80>; 185 clocks = <&clks 80>, <&clks 80>;
186 clock-names = "ipg"; 186 clock-names = "ipg", "per";
187 status = "disabled"; 187 status = "disabled";
188 }; 188 };
189 189
@@ -210,8 +210,8 @@
210 #size-cells = <0>; 210 #size-cells = <0>;
211 compatible = "fsl,imx25-cspi", "fsl,imx35-cspi"; 211 compatible = "fsl,imx25-cspi", "fsl,imx35-cspi";
212 reg = <0x50010000 0x4000>; 212 reg = <0x50010000 0x4000>;
213 clocks = <&clks 79>; 213 clocks = <&clks 79>, <&clks 79>;
214 clock-names = "ipg"; 214 clock-names = "ipg", "per";
215 interrupts = <13>; 215 interrupts = <13>;
216 status = "disabled"; 216 status = "disabled";
217 }; 217 };
diff --git a/arch/arm/boot/dts/imx27.dtsi b/arch/arm/boot/dts/imx27.dtsi
index ff4bd4873edf..75bd11386516 100644
--- a/arch/arm/boot/dts/imx27.dtsi
+++ b/arch/arm/boot/dts/imx27.dtsi
@@ -131,7 +131,7 @@
131 compatible = "fsl,imx27-cspi"; 131 compatible = "fsl,imx27-cspi";
132 reg = <0x1000e000 0x1000>; 132 reg = <0x1000e000 0x1000>;
133 interrupts = <16>; 133 interrupts = <16>;
134 clocks = <&clks 53>, <&clks 0>; 134 clocks = <&clks 53>, <&clks 53>;
135 clock-names = "ipg", "per"; 135 clock-names = "ipg", "per";
136 status = "disabled"; 136 status = "disabled";
137 }; 137 };
@@ -142,7 +142,7 @@
142 compatible = "fsl,imx27-cspi"; 142 compatible = "fsl,imx27-cspi";
143 reg = <0x1000f000 0x1000>; 143 reg = <0x1000f000 0x1000>;
144 interrupts = <15>; 144 interrupts = <15>;
145 clocks = <&clks 52>, <&clks 0>; 145 clocks = <&clks 52>, <&clks 52>;
146 clock-names = "ipg", "per"; 146 clock-names = "ipg", "per";
147 status = "disabled"; 147 status = "disabled";
148 }; 148 };
@@ -223,7 +223,7 @@
223 compatible = "fsl,imx27-cspi"; 223 compatible = "fsl,imx27-cspi";
224 reg = <0x10017000 0x1000>; 224 reg = <0x10017000 0x1000>;
225 interrupts = <6>; 225 interrupts = <6>;
226 clocks = <&clks 51>, <&clks 0>; 226 clocks = <&clks 51>, <&clks 51>;
227 clock-names = "ipg", "per"; 227 clock-names = "ipg", "per";
228 status = "disabled"; 228 status = "disabled";
229 }; 229 };
diff --git a/arch/arm/boot/dts/imx51.dtsi b/arch/arm/boot/dts/imx51.dtsi
index 21bb786c5b31..53fdde69bbf4 100644
--- a/arch/arm/boot/dts/imx51.dtsi
+++ b/arch/arm/boot/dts/imx51.dtsi
@@ -631,7 +631,7 @@
631 compatible = "fsl,imx51-cspi", "fsl,imx35-cspi"; 631 compatible = "fsl,imx51-cspi", "fsl,imx35-cspi";
632 reg = <0x83fc0000 0x4000>; 632 reg = <0x83fc0000 0x4000>;
633 interrupts = <38>; 633 interrupts = <38>;
634 clocks = <&clks 55>, <&clks 0>; 634 clocks = <&clks 55>, <&clks 55>;
635 clock-names = "ipg", "per"; 635 clock-names = "ipg", "per";
636 status = "disabled"; 636 status = "disabled";
637 }; 637 };
diff --git a/arch/arm/boot/dts/imx53.dtsi b/arch/arm/boot/dts/imx53.dtsi
index 845982eaac22..eb83aa039b8b 100644
--- a/arch/arm/boot/dts/imx53.dtsi
+++ b/arch/arm/boot/dts/imx53.dtsi
@@ -714,7 +714,7 @@
714 compatible = "fsl,imx53-cspi", "fsl,imx35-cspi"; 714 compatible = "fsl,imx53-cspi", "fsl,imx35-cspi";
715 reg = <0x63fc0000 0x4000>; 715 reg = <0x63fc0000 0x4000>;
716 interrupts = <38>; 716 interrupts = <38>;
717 clocks = <&clks 55>, <&clks 0>; 717 clocks = <&clks 55>, <&clks 55>;
718 clock-names = "ipg", "per"; 718 clock-names = "ipg", "per";
719 status = "disabled"; 719 status = "disabled";
720 }; 720 };
diff --git a/arch/arm/boot/dts/omap4-panda-common.dtsi b/arch/arm/boot/dts/omap4-panda-common.dtsi
index 03bd60deb52b..eeb734e25709 100644
--- a/arch/arm/boot/dts/omap4-panda-common.dtsi
+++ b/arch/arm/boot/dts/omap4-panda-common.dtsi
@@ -56,9 +56,23 @@
56 }; 56 };
57}; 57};
58 58
59&omap4_pmx_wkup {
60 pinctrl-names = "default";
61 pinctrl-0 = <
62 &twl6030_wkup_pins
63 >;
64
65 twl6030_wkup_pins: pinmux_twl6030_wkup_pins {
66 pinctrl-single,pins = <
67 0x14 0x2 /* fref_clk0_out.sys_drm_msecure OUTPUT | MODE2 */
68 >;
69 };
70};
71
59&omap4_pmx_core { 72&omap4_pmx_core {
60 pinctrl-names = "default"; 73 pinctrl-names = "default";
61 pinctrl-0 = < 74 pinctrl-0 = <
75 &twl6030_pins
62 &twl6040_pins 76 &twl6040_pins
63 &mcpdm_pins 77 &mcpdm_pins
64 &mcbsp1_pins 78 &mcbsp1_pins
@@ -66,6 +80,12 @@
66 &tpd12s015_pins 80 &tpd12s015_pins
67 >; 81 >;
68 82
83 twl6030_pins: pinmux_twl6030_pins {
84 pinctrl-single,pins = <
85 0x15e 0x4118 /* sys_nirq1.sys_nirq1 OMAP_WAKEUP_EN | INPUT_PULLUP | MODE0 */
86 >;
87 };
88
69 twl6040_pins: pinmux_twl6040_pins { 89 twl6040_pins: pinmux_twl6040_pins {
70 pinctrl-single,pins = < 90 pinctrl-single,pins = <
71 0xe0 0x3 /* hdq_sio.gpio_127 OUTPUT | MODE3 */ 91 0xe0 0x3 /* hdq_sio.gpio_127 OUTPUT | MODE3 */
diff --git a/arch/arm/boot/dts/omap4-sdp.dts b/arch/arm/boot/dts/omap4-sdp.dts
index a35d9cd58063..98505a2ef162 100644
--- a/arch/arm/boot/dts/omap4-sdp.dts
+++ b/arch/arm/boot/dts/omap4-sdp.dts
@@ -142,9 +142,23 @@
142 }; 142 };
143}; 143};
144 144
145&omap4_pmx_wkup {
146 pinctrl-names = "default";
147 pinctrl-0 = <
148 &twl6030_wkup_pins
149 >;
150
151 twl6030_wkup_pins: pinmux_twl6030_wkup_pins {
152 pinctrl-single,pins = <
153 0x14 0x2 /* fref_clk0_out.sys_drm_msecure OUTPUT | MODE2 */
154 >;
155 };
156};
157
145&omap4_pmx_core { 158&omap4_pmx_core {
146 pinctrl-names = "default"; 159 pinctrl-names = "default";
147 pinctrl-0 = < 160 pinctrl-0 = <
161 &twl6030_pins
148 &twl6040_pins 162 &twl6040_pins
149 &mcpdm_pins 163 &mcpdm_pins
150 &dmic_pins 164 &dmic_pins
@@ -179,6 +193,12 @@
179 >; 193 >;
180 }; 194 };
181 195
196 twl6030_pins: pinmux_twl6030_pins {
197 pinctrl-single,pins = <
198 0x15e 0x4118 /* sys_nirq1.sys_nirq1 OMAP_WAKEUP_EN | INPUT_PULLUP | MODE0 */
199 >;
200 };
201
182 twl6040_pins: pinmux_twl6040_pins { 202 twl6040_pins: pinmux_twl6040_pins {
183 pinctrl-single,pins = < 203 pinctrl-single,pins = <
184 0xe0 0x3 /* hdq_sio.gpio_127 OUTPUT | MODE3 */ 204 0xe0 0x3 /* hdq_sio.gpio_127 OUTPUT | MODE3 */
diff --git a/arch/arm/boot/dts/omap5.dtsi b/arch/arm/boot/dts/omap5.dtsi
index 3dd7ff825828..635cae283011 100644
--- a/arch/arm/boot/dts/omap5.dtsi
+++ b/arch/arm/boot/dts/omap5.dtsi
@@ -538,6 +538,7 @@
538 interrupts = <0 41 0x4>; 538 interrupts = <0 41 0x4>;
539 ti,hwmods = "timer5"; 539 ti,hwmods = "timer5";
540 ti,timer-dsp; 540 ti,timer-dsp;
541 ti,timer-pwm;
541 }; 542 };
542 543
543 timer6: timer@4013a000 { 544 timer6: timer@4013a000 {
@@ -574,6 +575,7 @@
574 reg = <0x4803e000 0x80>; 575 reg = <0x4803e000 0x80>;
575 interrupts = <0 45 0x4>; 576 interrupts = <0 45 0x4>;
576 ti,hwmods = "timer9"; 577 ti,hwmods = "timer9";
578 ti,timer-pwm;
577 }; 579 };
578 580
579 timer10: timer@48086000 { 581 timer10: timer@48086000 {
@@ -581,6 +583,7 @@
581 reg = <0x48086000 0x80>; 583 reg = <0x48086000 0x80>;
582 interrupts = <0 46 0x4>; 584 interrupts = <0 46 0x4>;
583 ti,hwmods = "timer10"; 585 ti,hwmods = "timer10";
586 ti,timer-pwm;
584 }; 587 };
585 588
586 timer11: timer@48088000 { 589 timer11: timer@48088000 {
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index bff71388e72a..17d0ae8672fa 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -320,9 +320,7 @@ static inline void flush_anon_page(struct vm_area_struct *vma,
320} 320}
321 321
322#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE 322#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
323static inline void flush_kernel_dcache_page(struct page *page) 323extern void flush_kernel_dcache_page(struct page *);
324{
325}
326 324
327#define flush_dcache_mmap_lock(mapping) \ 325#define flush_dcache_mmap_lock(mapping) \
328 spin_lock_irq(&(mapping)->tree_lock) 326 spin_lock_irq(&(mapping)->tree_lock)
diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h
index 7652712d1d14..dba62cb1ad08 100644
--- a/arch/arm/include/asm/cputype.h
+++ b/arch/arm/include/asm/cputype.h
@@ -32,6 +32,8 @@
32 32
33#define MPIDR_HWID_BITMASK 0xFFFFFF 33#define MPIDR_HWID_BITMASK 0xFFFFFF
34 34
35#define MPIDR_INVALID (~MPIDR_HWID_BITMASK)
36
35#define MPIDR_LEVEL_BITS 8 37#define MPIDR_LEVEL_BITS 8
36#define MPIDR_LEVEL_MASK ((1 << MPIDR_LEVEL_BITS) - 1) 38#define MPIDR_LEVEL_MASK ((1 << MPIDR_LEVEL_BITS) - 1)
37 39
diff --git a/arch/arm/include/asm/glue-proc.h b/arch/arm/include/asm/glue-proc.h
index ac1dd54724b6..8017e94acc5e 100644
--- a/arch/arm/include/asm/glue-proc.h
+++ b/arch/arm/include/asm/glue-proc.h
@@ -230,6 +230,15 @@
230# endif 230# endif
231#endif 231#endif
232 232
233#ifdef CONFIG_CPU_PJ4B
234# ifdef CPU_NAME
235# undef MULTI_CPU
236# define MULTI_CPU
237# else
238# define CPU_NAME cpu_pj4b
239# endif
240#endif
241
233#ifndef MULTI_CPU 242#ifndef MULTI_CPU
234#define cpu_proc_init __glue(CPU_NAME,_proc_init) 243#define cpu_proc_init __glue(CPU_NAME,_proc_init)
235#define cpu_proc_fin __glue(CPU_NAME,_proc_fin) 244#define cpu_proc_fin __glue(CPU_NAME,_proc_fin)
diff --git a/arch/arm/include/asm/percpu.h b/arch/arm/include/asm/percpu.h
index 968c0a14e0a3..209e6504922e 100644
--- a/arch/arm/include/asm/percpu.h
+++ b/arch/arm/include/asm/percpu.h
@@ -30,8 +30,15 @@ static inline void set_my_cpu_offset(unsigned long off)
30static inline unsigned long __my_cpu_offset(void) 30static inline unsigned long __my_cpu_offset(void)
31{ 31{
32 unsigned long off; 32 unsigned long off;
33 /* Read TPIDRPRW */ 33 register unsigned long *sp asm ("sp");
34 asm("mrc p15, 0, %0, c13, c0, 4" : "=r" (off) : : "memory"); 34
35 /*
36 * Read TPIDRPRW.
37 * We want to allow caching the value, so avoid using volatile and
38 * instead use a fake stack read to hazard against barrier().
39 */
40 asm("mrc p15, 0, %0, c13, c0, 4" : "=r" (off) : "Q" (*sp));
41
35 return off; 42 return off;
36} 43}
37#define __my_cpu_offset __my_cpu_offset() 44#define __my_cpu_offset __my_cpu_offset()
diff --git a/arch/arm/include/asm/smp_plat.h b/arch/arm/include/asm/smp_plat.h
index aaa61b6f50ff..e78983202737 100644
--- a/arch/arm/include/asm/smp_plat.h
+++ b/arch/arm/include/asm/smp_plat.h
@@ -49,7 +49,7 @@ static inline int cache_ops_need_broadcast(void)
49/* 49/*
50 * Logical CPU mapping. 50 * Logical CPU mapping.
51 */ 51 */
52extern int __cpu_logical_map[]; 52extern u32 __cpu_logical_map[];
53#define cpu_logical_map(cpu) __cpu_logical_map[cpu] 53#define cpu_logical_map(cpu) __cpu_logical_map[cpu]
54/* 54/*
55 * Retrieve logical cpu index corresponding to a given MPIDR[23:0] 55 * Retrieve logical cpu index corresponding to a given MPIDR[23:0]
diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h
index 99a19512ee26..bdf2b8458ec1 100644
--- a/arch/arm/include/asm/tlb.h
+++ b/arch/arm/include/asm/tlb.h
@@ -33,18 +33,6 @@
33#include <asm/pgalloc.h> 33#include <asm/pgalloc.h>
34#include <asm/tlbflush.h> 34#include <asm/tlbflush.h>
35 35
36/*
37 * We need to delay page freeing for SMP as other CPUs can access pages
38 * which have been removed but not yet had their TLB entries invalidated.
39 * Also, as ARMv7 speculative prefetch can drag new entries into the TLB,
40 * we need to apply this same delaying tactic to ensure correct operation.
41 */
42#if defined(CONFIG_SMP) || defined(CONFIG_CPU_32v7)
43#define tlb_fast_mode(tlb) 0
44#else
45#define tlb_fast_mode(tlb) 1
46#endif
47
48#define MMU_GATHER_BUNDLE 8 36#define MMU_GATHER_BUNDLE 8
49 37
50/* 38/*
@@ -112,12 +100,10 @@ static inline void __tlb_alloc_page(struct mmu_gather *tlb)
112static inline void tlb_flush_mmu(struct mmu_gather *tlb) 100static inline void tlb_flush_mmu(struct mmu_gather *tlb)
113{ 101{
114 tlb_flush(tlb); 102 tlb_flush(tlb);
115 if (!tlb_fast_mode(tlb)) { 103 free_pages_and_swap_cache(tlb->pages, tlb->nr);
116 free_pages_and_swap_cache(tlb->pages, tlb->nr); 104 tlb->nr = 0;
117 tlb->nr = 0; 105 if (tlb->pages == tlb->local)
118 if (tlb->pages == tlb->local) 106 __tlb_alloc_page(tlb);
119 __tlb_alloc_page(tlb);
120 }
121} 107}
122 108
123static inline void 109static inline void
@@ -178,11 +164,6 @@ tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
178 164
179static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page) 165static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
180{ 166{
181 if (tlb_fast_mode(tlb)) {
182 free_page_and_swap_cache(page);
183 return 1; /* avoid calling tlb_flush_mmu */
184 }
185
186 tlb->pages[tlb->nr++] = page; 167 tlb->pages[tlb->nr++] = page;
187 VM_BUG_ON(tlb->nr > tlb->max); 168 VM_BUG_ON(tlb->nr > tlb->max);
188 return tlb->max - tlb->nr; 169 return tlb->max - tlb->nr;
diff --git a/arch/arm/kernel/devtree.c b/arch/arm/kernel/devtree.c
index 5af04f6daa33..5859c8bc727c 100644
--- a/arch/arm/kernel/devtree.c
+++ b/arch/arm/kernel/devtree.c
@@ -82,7 +82,7 @@ void __init arm_dt_init_cpu_maps(void)
82 u32 i, j, cpuidx = 1; 82 u32 i, j, cpuidx = 1;
83 u32 mpidr = is_smp() ? read_cpuid_mpidr() & MPIDR_HWID_BITMASK : 0; 83 u32 mpidr = is_smp() ? read_cpuid_mpidr() & MPIDR_HWID_BITMASK : 0;
84 84
85 u32 tmp_map[NR_CPUS] = { [0 ... NR_CPUS-1] = UINT_MAX }; 85 u32 tmp_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID };
86 bool bootcpu_valid = false; 86 bool bootcpu_valid = false;
87 cpus = of_find_node_by_path("/cpus"); 87 cpus = of_find_node_by_path("/cpus");
88 88
@@ -92,6 +92,9 @@ void __init arm_dt_init_cpu_maps(void)
92 for_each_child_of_node(cpus, cpu) { 92 for_each_child_of_node(cpus, cpu) {
93 u32 hwid; 93 u32 hwid;
94 94
95 if (of_node_cmp(cpu->type, "cpu"))
96 continue;
97
95 pr_debug(" * %s...\n", cpu->full_name); 98 pr_debug(" * %s...\n", cpu->full_name);
96 /* 99 /*
97 * A device tree containing CPU nodes with missing "reg" 100 * A device tree containing CPU nodes with missing "reg"
@@ -149,9 +152,10 @@ void __init arm_dt_init_cpu_maps(void)
149 tmp_map[i] = hwid; 152 tmp_map[i] = hwid;
150 } 153 }
151 154
152 if (WARN(!bootcpu_valid, "DT missing boot CPU MPIDR[23:0], " 155 if (!bootcpu_valid) {
153 "fall back to default cpu_logical_map\n")) 156 pr_warn("DT missing boot CPU MPIDR[23:0], fall back to default cpu_logical_map\n");
154 return; 157 return;
158 }
155 159
156 /* 160 /*
157 * Since the boot CPU node contains proper data, and all nodes have 161 * Since the boot CPU node contains proper data, and all nodes have
diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c
index 8ef8c9337809..4fb074c446bf 100644
--- a/arch/arm/kernel/machine_kexec.c
+++ b/arch/arm/kernel/machine_kexec.c
@@ -134,6 +134,10 @@ void machine_kexec(struct kimage *image)
134 unsigned long reboot_code_buffer_phys; 134 unsigned long reboot_code_buffer_phys;
135 void *reboot_code_buffer; 135 void *reboot_code_buffer;
136 136
137 if (num_online_cpus() > 1) {
138 pr_err("kexec: error: multiple CPUs still online\n");
139 return;
140 }
137 141
138 page_list = image->head & PAGE_MASK; 142 page_list = image->head & PAGE_MASK;
139 143
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 282de4826abb..6e8931ccf13e 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -184,30 +184,61 @@ int __init reboot_setup(char *str)
184 184
185__setup("reboot=", reboot_setup); 185__setup("reboot=", reboot_setup);
186 186
187/*
188 * Called by kexec, immediately prior to machine_kexec().
189 *
190 * This must completely disable all secondary CPUs; simply causing those CPUs
191 * to execute e.g. a RAM-based pin loop is not sufficient. This allows the
192 * kexec'd kernel to use any and all RAM as it sees fit, without having to
193 * avoid any code or data used by any SW CPU pin loop. The CPU hotplug
194 * functionality embodied in disable_nonboot_cpus() to achieve this.
195 */
187void machine_shutdown(void) 196void machine_shutdown(void)
188{ 197{
189#ifdef CONFIG_SMP 198 disable_nonboot_cpus();
190 smp_send_stop();
191#endif
192} 199}
193 200
201/*
202 * Halting simply requires that the secondary CPUs stop performing any
203 * activity (executing tasks, handling interrupts). smp_send_stop()
204 * achieves this.
205 */
194void machine_halt(void) 206void machine_halt(void)
195{ 207{
196 machine_shutdown(); 208 smp_send_stop();
209
197 local_irq_disable(); 210 local_irq_disable();
198 while (1); 211 while (1);
199} 212}
200 213
214/*
215 * Power-off simply requires that the secondary CPUs stop performing any
216 * activity (executing tasks, handling interrupts). smp_send_stop()
217 * achieves this. When the system power is turned off, it will take all CPUs
218 * with it.
219 */
201void machine_power_off(void) 220void machine_power_off(void)
202{ 221{
203 machine_shutdown(); 222 smp_send_stop();
223
204 if (pm_power_off) 224 if (pm_power_off)
205 pm_power_off(); 225 pm_power_off();
206} 226}
207 227
228/*
229 * Restart requires that the secondary CPUs stop performing any activity
230 * while the primary CPU resets the system. Systems with a single CPU can
231 * use soft_restart() as their machine descriptor's .restart hook, since that
232 * will cause the only available CPU to reset. Systems with multiple CPUs must
233 * provide a HW restart implementation, to ensure that all CPUs reset at once.
234 * This is required so that any code running after reset on the primary CPU
235 * doesn't have to co-ordinate with other CPUs to ensure they aren't still
236 * executing pre-reset code, and using RAM that the primary CPU's code wishes
237 * to use. Implementing such co-ordination would be essentially impossible.
238 */
208void machine_restart(char *cmd) 239void machine_restart(char *cmd)
209{ 240{
210 machine_shutdown(); 241 smp_send_stop();
211 242
212 arm_pm_restart(reboot_mode, cmd); 243 arm_pm_restart(reboot_mode, cmd);
213 244
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 1522c7ae31b0..b4b1d397592b 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -444,7 +444,7 @@ void notrace cpu_init(void)
444 : "r14"); 444 : "r14");
445} 445}
446 446
447int __cpu_logical_map[NR_CPUS]; 447u32 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID };
448 448
449void __init smp_setup_processor_id(void) 449void __init smp_setup_processor_id(void)
450{ 450{
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 550d63cef68e..5919eb451bb9 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -651,17 +651,6 @@ void smp_send_reschedule(int cpu)
651 smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE); 651 smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
652} 652}
653 653
654#ifdef CONFIG_HOTPLUG_CPU
655static void smp_kill_cpus(cpumask_t *mask)
656{
657 unsigned int cpu;
658 for_each_cpu(cpu, mask)
659 platform_cpu_kill(cpu);
660}
661#else
662static void smp_kill_cpus(cpumask_t *mask) { }
663#endif
664
665void smp_send_stop(void) 654void smp_send_stop(void)
666{ 655{
667 unsigned long timeout; 656 unsigned long timeout;
@@ -679,8 +668,6 @@ void smp_send_stop(void)
679 668
680 if (num_online_cpus() > 1) 669 if (num_online_cpus() > 1)
681 pr_warning("SMP: failed to stop secondary CPUs\n"); 670 pr_warning("SMP: failed to stop secondary CPUs\n");
682
683 smp_kill_cpus(&mask);
684} 671}
685 672
686/* 673/*
diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c
index f10316b4ecdc..c5a59546a256 100644
--- a/arch/arm/kernel/topology.c
+++ b/arch/arm/kernel/topology.c
@@ -13,6 +13,7 @@
13 13
14#include <linux/cpu.h> 14#include <linux/cpu.h>
15#include <linux/cpumask.h> 15#include <linux/cpumask.h>
16#include <linux/export.h>
16#include <linux/init.h> 17#include <linux/init.h>
17#include <linux/percpu.h> 18#include <linux/percpu.h>
18#include <linux/node.h> 19#include <linux/node.h>
@@ -200,6 +201,7 @@ static inline void update_cpu_power(unsigned int cpuid, unsigned int mpidr) {}
200 * cpu topology table 201 * cpu topology table
201 */ 202 */
202struct cputopo_arm cpu_topology[NR_CPUS]; 203struct cputopo_arm cpu_topology[NR_CPUS];
204EXPORT_SYMBOL_GPL(cpu_topology);
203 205
204const struct cpumask *cpu_coregroup_mask(int cpu) 206const struct cpumask *cpu_coregroup_mask(int cpu)
205{ 207{
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index 37d216d814cd..ef1703b9587b 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -492,6 +492,11 @@ static void vcpu_pause(struct kvm_vcpu *vcpu)
492 wait_event_interruptible(*wq, !vcpu->arch.pause); 492 wait_event_interruptible(*wq, !vcpu->arch.pause);
493} 493}
494 494
495static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu)
496{
497 return vcpu->arch.target >= 0;
498}
499
495/** 500/**
496 * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code 501 * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code
497 * @vcpu: The VCPU pointer 502 * @vcpu: The VCPU pointer
@@ -508,8 +513,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
508 int ret; 513 int ret;
509 sigset_t sigsaved; 514 sigset_t sigsaved;
510 515
511 /* Make sure they initialize the vcpu with KVM_ARM_VCPU_INIT */ 516 if (unlikely(!kvm_vcpu_initialized(vcpu)))
512 if (unlikely(vcpu->arch.target < 0))
513 return -ENOEXEC; 517 return -ENOEXEC;
514 518
515 ret = kvm_vcpu_first_run_init(vcpu); 519 ret = kvm_vcpu_first_run_init(vcpu);
@@ -710,6 +714,10 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
710 case KVM_SET_ONE_REG: 714 case KVM_SET_ONE_REG:
711 case KVM_GET_ONE_REG: { 715 case KVM_GET_ONE_REG: {
712 struct kvm_one_reg reg; 716 struct kvm_one_reg reg;
717
718 if (unlikely(!kvm_vcpu_initialized(vcpu)))
719 return -ENOEXEC;
720
713 if (copy_from_user(&reg, argp, sizeof(reg))) 721 if (copy_from_user(&reg, argp, sizeof(reg)))
714 return -EFAULT; 722 return -EFAULT;
715 if (ioctl == KVM_SET_ONE_REG) 723 if (ioctl == KVM_SET_ONE_REG)
@@ -722,6 +730,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
722 struct kvm_reg_list reg_list; 730 struct kvm_reg_list reg_list;
723 unsigned n; 731 unsigned n;
724 732
733 if (unlikely(!kvm_vcpu_initialized(vcpu)))
734 return -ENOEXEC;
735
725 if (copy_from_user(&reg_list, user_list, sizeof(reg_list))) 736 if (copy_from_user(&reg_list, user_list, sizeof(reg_list)))
726 return -EFAULT; 737 return -EFAULT;
727 n = reg_list.n; 738 n = reg_list.n;
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 965706578f13..84ba67b982c0 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -43,7 +43,14 @@ static phys_addr_t hyp_idmap_vector;
43 43
44static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa) 44static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
45{ 45{
46 kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, kvm, ipa); 46 /*
47 * This function also gets called when dealing with HYP page
48 * tables. As HYP doesn't have an associated struct kvm (and
49 * the HYP page tables are fairly static), we don't do
50 * anything there.
51 */
52 if (kvm)
53 kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, kvm, ipa);
47} 54}
48 55
49static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, 56static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
@@ -78,18 +85,20 @@ static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
78 return p; 85 return p;
79} 86}
80 87
81static void clear_pud_entry(pud_t *pud) 88static void clear_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr)
82{ 89{
83 pmd_t *pmd_table = pmd_offset(pud, 0); 90 pmd_t *pmd_table = pmd_offset(pud, 0);
84 pud_clear(pud); 91 pud_clear(pud);
92 kvm_tlb_flush_vmid_ipa(kvm, addr);
85 pmd_free(NULL, pmd_table); 93 pmd_free(NULL, pmd_table);
86 put_page(virt_to_page(pud)); 94 put_page(virt_to_page(pud));
87} 95}
88 96
89static void clear_pmd_entry(pmd_t *pmd) 97static void clear_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr)
90{ 98{
91 pte_t *pte_table = pte_offset_kernel(pmd, 0); 99 pte_t *pte_table = pte_offset_kernel(pmd, 0);
92 pmd_clear(pmd); 100 pmd_clear(pmd);
101 kvm_tlb_flush_vmid_ipa(kvm, addr);
93 pte_free_kernel(NULL, pte_table); 102 pte_free_kernel(NULL, pte_table);
94 put_page(virt_to_page(pmd)); 103 put_page(virt_to_page(pmd));
95} 104}
@@ -100,11 +109,12 @@ static bool pmd_empty(pmd_t *pmd)
100 return page_count(pmd_page) == 1; 109 return page_count(pmd_page) == 1;
101} 110}
102 111
103static void clear_pte_entry(pte_t *pte) 112static void clear_pte_entry(struct kvm *kvm, pte_t *pte, phys_addr_t addr)
104{ 113{
105 if (pte_present(*pte)) { 114 if (pte_present(*pte)) {
106 kvm_set_pte(pte, __pte(0)); 115 kvm_set_pte(pte, __pte(0));
107 put_page(virt_to_page(pte)); 116 put_page(virt_to_page(pte));
117 kvm_tlb_flush_vmid_ipa(kvm, addr);
108 } 118 }
109} 119}
110 120
@@ -114,7 +124,8 @@ static bool pte_empty(pte_t *pte)
114 return page_count(pte_page) == 1; 124 return page_count(pte_page) == 1;
115} 125}
116 126
117static void unmap_range(pgd_t *pgdp, unsigned long long start, u64 size) 127static void unmap_range(struct kvm *kvm, pgd_t *pgdp,
128 unsigned long long start, u64 size)
118{ 129{
119 pgd_t *pgd; 130 pgd_t *pgd;
120 pud_t *pud; 131 pud_t *pud;
@@ -138,15 +149,15 @@ static void unmap_range(pgd_t *pgdp, unsigned long long start, u64 size)
138 } 149 }
139 150
140 pte = pte_offset_kernel(pmd, addr); 151 pte = pte_offset_kernel(pmd, addr);
141 clear_pte_entry(pte); 152 clear_pte_entry(kvm, pte, addr);
142 range = PAGE_SIZE; 153 range = PAGE_SIZE;
143 154
144 /* If we emptied the pte, walk back up the ladder */ 155 /* If we emptied the pte, walk back up the ladder */
145 if (pte_empty(pte)) { 156 if (pte_empty(pte)) {
146 clear_pmd_entry(pmd); 157 clear_pmd_entry(kvm, pmd, addr);
147 range = PMD_SIZE; 158 range = PMD_SIZE;
148 if (pmd_empty(pmd)) { 159 if (pmd_empty(pmd)) {
149 clear_pud_entry(pud); 160 clear_pud_entry(kvm, pud, addr);
150 range = PUD_SIZE; 161 range = PUD_SIZE;
151 } 162 }
152 } 163 }
@@ -165,14 +176,14 @@ void free_boot_hyp_pgd(void)
165 mutex_lock(&kvm_hyp_pgd_mutex); 176 mutex_lock(&kvm_hyp_pgd_mutex);
166 177
167 if (boot_hyp_pgd) { 178 if (boot_hyp_pgd) {
168 unmap_range(boot_hyp_pgd, hyp_idmap_start, PAGE_SIZE); 179 unmap_range(NULL, boot_hyp_pgd, hyp_idmap_start, PAGE_SIZE);
169 unmap_range(boot_hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE); 180 unmap_range(NULL, boot_hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE);
170 kfree(boot_hyp_pgd); 181 kfree(boot_hyp_pgd);
171 boot_hyp_pgd = NULL; 182 boot_hyp_pgd = NULL;
172 } 183 }
173 184
174 if (hyp_pgd) 185 if (hyp_pgd)
175 unmap_range(hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE); 186 unmap_range(NULL, hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE);
176 187
177 kfree(init_bounce_page); 188 kfree(init_bounce_page);
178 init_bounce_page = NULL; 189 init_bounce_page = NULL;
@@ -200,9 +211,10 @@ void free_hyp_pgds(void)
200 211
201 if (hyp_pgd) { 212 if (hyp_pgd) {
202 for (addr = PAGE_OFFSET; virt_addr_valid(addr); addr += PGDIR_SIZE) 213 for (addr = PAGE_OFFSET; virt_addr_valid(addr); addr += PGDIR_SIZE)
203 unmap_range(hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE); 214 unmap_range(NULL, hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE);
204 for (addr = VMALLOC_START; is_vmalloc_addr((void*)addr); addr += PGDIR_SIZE) 215 for (addr = VMALLOC_START; is_vmalloc_addr((void*)addr); addr += PGDIR_SIZE)
205 unmap_range(hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE); 216 unmap_range(NULL, hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE);
217
206 kfree(hyp_pgd); 218 kfree(hyp_pgd);
207 hyp_pgd = NULL; 219 hyp_pgd = NULL;
208 } 220 }
@@ -393,7 +405,7 @@ int kvm_alloc_stage2_pgd(struct kvm *kvm)
393 */ 405 */
394static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size) 406static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
395{ 407{
396 unmap_range(kvm->arch.pgd, start, size); 408 unmap_range(kvm, kvm->arch.pgd, start, size);
397} 409}
398 410
399/** 411/**
@@ -675,7 +687,6 @@ static void handle_hva_to_gpa(struct kvm *kvm,
675static void kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, void *data) 687static void kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, void *data)
676{ 688{
677 unmap_stage2_range(kvm, gpa, PAGE_SIZE); 689 unmap_stage2_range(kvm, gpa, PAGE_SIZE);
678 kvm_tlb_flush_vmid_ipa(kvm, gpa);
679} 690}
680 691
681int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) 692int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
diff --git a/arch/arm/mach-exynos/common.c b/arch/arm/mach-exynos/common.c
index 027c9e7f0d13..f7e504b7874d 100644
--- a/arch/arm/mach-exynos/common.c
+++ b/arch/arm/mach-exynos/common.c
@@ -386,6 +386,8 @@ int __init exynos_fdt_map_chipid(unsigned long node, const char *uname,
386 386
387void __init exynos_init_io(struct map_desc *mach_desc, int size) 387void __init exynos_init_io(struct map_desc *mach_desc, int size)
388{ 388{
389 debug_ll_io_init();
390
389#ifdef CONFIG_OF 391#ifdef CONFIG_OF
390 if (initial_boot_params) 392 if (initial_boot_params)
391 of_scan_flat_dt(exynos_fdt_map_chipid, NULL); 393 of_scan_flat_dt(exynos_fdt_map_chipid, NULL);
diff --git a/arch/arm/mach-imx/clk-imx6q.c b/arch/arm/mach-imx/clk-imx6q.c
index dda9a2bd3acb..4e3148ce852d 100644
--- a/arch/arm/mach-imx/clk-imx6q.c
+++ b/arch/arm/mach-imx/clk-imx6q.c
@@ -181,14 +181,14 @@ static const char *periph_clk2_sels[] = { "pll3_usb_otg", "osc", "osc", "dummy",
181static const char *periph2_clk2_sels[] = { "pll3_usb_otg", "pll2_bus", }; 181static const char *periph2_clk2_sels[] = { "pll3_usb_otg", "pll2_bus", };
182static const char *periph_sels[] = { "periph_pre", "periph_clk2", }; 182static const char *periph_sels[] = { "periph_pre", "periph_clk2", };
183static const char *periph2_sels[] = { "periph2_pre", "periph2_clk2", }; 183static const char *periph2_sels[] = { "periph2_pre", "periph2_clk2", };
184static const char *axi_sels[] = { "periph", "pll2_pfd2_396m", "pll3_pfd1_540m", }; 184static const char *axi_sels[] = { "periph", "pll2_pfd2_396m", "periph", "pll3_pfd1_540m", };
185static const char *audio_sels[] = { "pll4_post_div", "pll3_pfd2_508m", "pll3_pfd3_454m", "pll3_usb_otg", }; 185static const char *audio_sels[] = { "pll4_post_div", "pll3_pfd2_508m", "pll3_pfd3_454m", "pll3_usb_otg", };
186static const char *gpu_axi_sels[] = { "axi", "ahb", }; 186static const char *gpu_axi_sels[] = { "axi", "ahb", };
187static const char *gpu2d_core_sels[] = { "axi", "pll3_usb_otg", "pll2_pfd0_352m", "pll2_pfd2_396m", }; 187static const char *gpu2d_core_sels[] = { "axi", "pll3_usb_otg", "pll2_pfd0_352m", "pll2_pfd2_396m", };
188static const char *gpu3d_core_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll2_pfd1_594m", "pll2_pfd2_396m", }; 188static const char *gpu3d_core_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll2_pfd1_594m", "pll2_pfd2_396m", };
189static const char *gpu3d_shader_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll2_pfd1_594m", "pll3_pfd0_720m", }; 189static const char *gpu3d_shader_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll2_pfd1_594m", "pll3_pfd0_720m", };
190static const char *ipu_sels[] = { "mmdc_ch0_axi", "pll2_pfd2_396m", "pll3_120m", "pll3_pfd1_540m", }; 190static const char *ipu_sels[] = { "mmdc_ch0_axi", "pll2_pfd2_396m", "pll3_120m", "pll3_pfd1_540m", };
191static const char *ldb_di_sels[] = { "pll5_video", "pll2_pfd0_352m", "pll2_pfd2_396m", "mmdc_ch1_axi", "pll3_usb_otg", }; 191static const char *ldb_di_sels[] = { "pll5_video_div", "pll2_pfd0_352m", "pll2_pfd2_396m", "mmdc_ch1_axi", "pll3_usb_otg", };
192static const char *ipu_di_pre_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll5_video_div", "pll2_pfd0_352m", "pll2_pfd2_396m", "pll3_pfd1_540m", }; 192static const char *ipu_di_pre_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll5_video_div", "pll2_pfd0_352m", "pll2_pfd2_396m", "pll3_pfd1_540m", };
193static const char *ipu1_di0_sels[] = { "ipu1_di0_pre", "dummy", "dummy", "ldb_di0", "ldb_di1", }; 193static const char *ipu1_di0_sels[] = { "ipu1_di0_pre", "dummy", "dummy", "ldb_di0", "ldb_di1", };
194static const char *ipu1_di1_sels[] = { "ipu1_di1_pre", "dummy", "dummy", "ldb_di0", "ldb_di1", }; 194static const char *ipu1_di1_sels[] = { "ipu1_di1_pre", "dummy", "dummy", "ldb_di0", "ldb_di1", };
diff --git a/arch/arm/mach-kirkwood/board-ts219.c b/arch/arm/mach-kirkwood/board-ts219.c
index acb0187c7ee1..4695d5f35fc9 100644
--- a/arch/arm/mach-kirkwood/board-ts219.c
+++ b/arch/arm/mach-kirkwood/board-ts219.c
@@ -41,13 +41,3 @@ void __init qnap_dt_ts219_init(void)
41 41
42 pm_power_off = qnap_tsx1x_power_off; 42 pm_power_off = qnap_tsx1x_power_off;
43} 43}
44
45/* FIXME: Will not work with DT. Maybe use MPP40_GPIO? */
46static int __init ts219_pci_init(void)
47{
48 if (machine_is_ts219())
49 kirkwood_pcie_init(KW_PCIE0);
50
51 return 0;
52}
53subsys_initcall(ts219_pci_init);
diff --git a/arch/arm/mach-kirkwood/mpp.c b/arch/arm/mach-kirkwood/mpp.c
index 827cde42414f..e96fd71abd76 100644
--- a/arch/arm/mach-kirkwood/mpp.c
+++ b/arch/arm/mach-kirkwood/mpp.c
@@ -22,9 +22,10 @@ static unsigned int __init kirkwood_variant(void)
22 22
23 kirkwood_pcie_id(&dev, &rev); 23 kirkwood_pcie_id(&dev, &rev);
24 24
25 if ((dev == MV88F6281_DEV_ID && rev >= MV88F6281_REV_A0) || 25 if (dev == MV88F6281_DEV_ID && rev >= MV88F6281_REV_A0)
26 (dev == MV88F6282_DEV_ID))
27 return MPP_F6281_MASK; 26 return MPP_F6281_MASK;
27 if (dev == MV88F6282_DEV_ID)
28 return MPP_F6282_MASK;
28 if (dev == MV88F6192_DEV_ID && rev >= MV88F6192_REV_A0) 29 if (dev == MV88F6192_DEV_ID && rev >= MV88F6192_REV_A0)
29 return MPP_F6192_MASK; 30 return MPP_F6192_MASK;
30 if (dev == MV88F6180_DEV_ID) 31 if (dev == MV88F6180_DEV_ID)
diff --git a/arch/arm/mach-mvebu/coherency_ll.S b/arch/arm/mach-mvebu/coherency_ll.S
index 53e8391192cd..5476669ba905 100644
--- a/arch/arm/mach-mvebu/coherency_ll.S
+++ b/arch/arm/mach-mvebu/coherency_ll.S
@@ -32,15 +32,21 @@ ENTRY(ll_set_cpu_coherent)
32 32
33 /* Add CPU to SMP group - Atomic */ 33 /* Add CPU to SMP group - Atomic */
34 add r3, r0, #ARMADA_XP_CFB_CTL_REG_OFFSET 34 add r3, r0, #ARMADA_XP_CFB_CTL_REG_OFFSET
35 ldr r2, [r3] 351:
36 ldrex r2, [r3]
36 orr r2, r2, r1 37 orr r2, r2, r1
37 str r2, [r3] 38 strex r0, r2, [r3]
39 cmp r0, #0
40 bne 1b
38 41
39 /* Enable coherency on CPU - Atomic */ 42 /* Enable coherency on CPU - Atomic */
40 add r3, r0, #ARMADA_XP_CFB_CFG_REG_OFFSET 43 add r3, r3, #ARMADA_XP_CFB_CFG_REG_OFFSET
41 ldr r2, [r3] 441:
45 ldrex r2, [r3]
42 orr r2, r2, r1 46 orr r2, r2, r1
43 str r2, [r3] 47 strex r0, r2, [r3]
48 cmp r0, #0
49 bne 1b
44 50
45 dsb 51 dsb
46 52
diff --git a/arch/arm/mach-omap2/clock36xx.c b/arch/arm/mach-omap2/clock36xx.c
index 8f3bf4e50908..bbd6a3f717e6 100644
--- a/arch/arm/mach-omap2/clock36xx.c
+++ b/arch/arm/mach-omap2/clock36xx.c
@@ -20,11 +20,12 @@
20 20
21#include <linux/kernel.h> 21#include <linux/kernel.h>
22#include <linux/clk.h> 22#include <linux/clk.h>
23#include <linux/clk-provider.h>
23#include <linux/io.h> 24#include <linux/io.h>
24 25
25#include "clock.h" 26#include "clock.h"
26#include "clock36xx.h" 27#include "clock36xx.h"
27 28#define to_clk_divider(_hw) container_of(_hw, struct clk_divider, hw)
28 29
29/** 30/**
30 * omap36xx_pwrdn_clk_enable_with_hsdiv_restore - enable clocks suffering 31 * omap36xx_pwrdn_clk_enable_with_hsdiv_restore - enable clocks suffering
@@ -39,29 +40,28 @@
39 */ 40 */
40int omap36xx_pwrdn_clk_enable_with_hsdiv_restore(struct clk_hw *clk) 41int omap36xx_pwrdn_clk_enable_with_hsdiv_restore(struct clk_hw *clk)
41{ 42{
42 struct clk_hw_omap *parent; 43 struct clk_divider *parent;
43 struct clk_hw *parent_hw; 44 struct clk_hw *parent_hw;
44 u32 dummy_v, orig_v, clksel_shift; 45 u32 dummy_v, orig_v;
45 int ret; 46 int ret;
46 47
47 /* Clear PWRDN bit of HSDIVIDER */ 48 /* Clear PWRDN bit of HSDIVIDER */
48 ret = omap2_dflt_clk_enable(clk); 49 ret = omap2_dflt_clk_enable(clk);
49 50
50 parent_hw = __clk_get_hw(__clk_get_parent(clk->clk)); 51 parent_hw = __clk_get_hw(__clk_get_parent(clk->clk));
51 parent = to_clk_hw_omap(parent_hw); 52 parent = to_clk_divider(parent_hw);
52 53
53 /* Restore the dividers */ 54 /* Restore the dividers */
54 if (!ret) { 55 if (!ret) {
55 clksel_shift = __ffs(parent->clksel_mask); 56 orig_v = __raw_readl(parent->reg);
56 orig_v = __raw_readl(parent->clksel_reg);
57 dummy_v = orig_v; 57 dummy_v = orig_v;
58 58
59 /* Write any other value different from the Read value */ 59 /* Write any other value different from the Read value */
60 dummy_v ^= (1 << clksel_shift); 60 dummy_v ^= (1 << parent->shift);
61 __raw_writel(dummy_v, parent->clksel_reg); 61 __raw_writel(dummy_v, parent->reg);
62 62
63 /* Write the original divider */ 63 /* Write the original divider */
64 __raw_writel(orig_v, parent->clksel_reg); 64 __raw_writel(orig_v, parent->reg);
65 } 65 }
66 66
67 return ret; 67 return ret;
diff --git a/arch/arm/mach-omap2/omap_hwmod_33xx_data.c b/arch/arm/mach-omap2/omap_hwmod_33xx_data.c
index 075f7cc51026..69337af748cc 100644
--- a/arch/arm/mach-omap2/omap_hwmod_33xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_33xx_data.c
@@ -2007,6 +2007,13 @@ static struct omap_hwmod am33xx_uart1_hwmod = {
2007 }, 2007 },
2008}; 2008};
2009 2009
2010/* uart2 */
2011static struct omap_hwmod_dma_info uart2_edma_reqs[] = {
2012 { .name = "tx", .dma_req = 28, },
2013 { .name = "rx", .dma_req = 29, },
2014 { .dma_req = -1 }
2015};
2016
2010static struct omap_hwmod_irq_info am33xx_uart2_irqs[] = { 2017static struct omap_hwmod_irq_info am33xx_uart2_irqs[] = {
2011 { .irq = 73 + OMAP_INTC_START, }, 2018 { .irq = 73 + OMAP_INTC_START, },
2012 { .irq = -1 }, 2019 { .irq = -1 },
@@ -2018,7 +2025,7 @@ static struct omap_hwmod am33xx_uart2_hwmod = {
2018 .clkdm_name = "l4ls_clkdm", 2025 .clkdm_name = "l4ls_clkdm",
2019 .flags = HWMOD_SWSUP_SIDLE_ACT, 2026 .flags = HWMOD_SWSUP_SIDLE_ACT,
2020 .mpu_irqs = am33xx_uart2_irqs, 2027 .mpu_irqs = am33xx_uart2_irqs,
2021 .sdma_reqs = uart1_edma_reqs, 2028 .sdma_reqs = uart2_edma_reqs,
2022 .main_clk = "dpll_per_m2_div4_ck", 2029 .main_clk = "dpll_per_m2_div4_ck",
2023 .prcm = { 2030 .prcm = {
2024 .omap4 = { 2031 .omap4 = {
diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c
index c01859398b54..5a2d8034c8de 100644
--- a/arch/arm/mach-omap2/pm34xx.c
+++ b/arch/arm/mach-omap2/pm34xx.c
@@ -546,8 +546,10 @@ static void __init prcm_setup_regs(void)
546 /* Clear any pending PRCM interrupts */ 546 /* Clear any pending PRCM interrupts */
547 omap2_prm_write_mod_reg(0, OCP_MOD, OMAP3_PRM_IRQSTATUS_MPU_OFFSET); 547 omap2_prm_write_mod_reg(0, OCP_MOD, OMAP3_PRM_IRQSTATUS_MPU_OFFSET);
548 548
549 if (omap3_has_iva()) 549 /*
550 omap3_iva_idle(); 550 * We need to idle iva2_pwrdm even on am3703 with no iva2.
551 */
552 omap3_iva_idle();
551 553
552 omap3_d2d_idle(); 554 omap3_d2d_idle();
553} 555}
diff --git a/arch/arm/mach-prima2/pm.c b/arch/arm/mach-prima2/pm.c
index 9936c180bf01..8f595c0cc8d9 100644
--- a/arch/arm/mach-prima2/pm.c
+++ b/arch/arm/mach-prima2/pm.c
@@ -101,8 +101,10 @@ static int __init sirfsoc_of_pwrc_init(void)
101 struct device_node *np; 101 struct device_node *np;
102 102
103 np = of_find_matching_node(NULL, pwrc_ids); 103 np = of_find_matching_node(NULL, pwrc_ids);
104 if (!np) 104 if (!np) {
105 panic("unable to find compatible pwrc node in dtb\n"); 105 pr_err("unable to find compatible sirf pwrc node in dtb\n");
106 return -ENOENT;
107 }
106 108
107 /* 109 /*
108 * pwrc behind rtciobrg is not located in memory space 110 * pwrc behind rtciobrg is not located in memory space
diff --git a/arch/arm/mach-prima2/rstc.c b/arch/arm/mach-prima2/rstc.c
index 435019ca0a48..d5e0cbc934c0 100644
--- a/arch/arm/mach-prima2/rstc.c
+++ b/arch/arm/mach-prima2/rstc.c
@@ -28,8 +28,10 @@ static int __init sirfsoc_of_rstc_init(void)
28 struct device_node *np; 28 struct device_node *np;
29 29
30 np = of_find_matching_node(NULL, rstc_ids); 30 np = of_find_matching_node(NULL, rstc_ids);
31 if (!np) 31 if (!np) {
32 panic("unable to find compatible rstc node in dtb\n"); 32 pr_err("unable to find compatible sirf rstc node in dtb\n");
33 return -ENOENT;
34 }
33 35
34 sirfsoc_rstc_base = of_iomap(np, 0); 36 sirfsoc_rstc_base = of_iomap(np, 0);
35 if (!sirfsoc_rstc_base) 37 if (!sirfsoc_rstc_base)
diff --git a/arch/arm/mach-shmobile/setup-sh73a0.c b/arch/arm/mach-shmobile/setup-sh73a0.c
index fdf3894b1cc3..9696f3646864 100644
--- a/arch/arm/mach-shmobile/setup-sh73a0.c
+++ b/arch/arm/mach-shmobile/setup-sh73a0.c
@@ -252,7 +252,7 @@ static struct sh_timer_config cmt10_platform_data = {
252 .name = "CMT10", 252 .name = "CMT10",
253 .channel_offset = 0x10, 253 .channel_offset = 0x10,
254 .timer_bit = 0, 254 .timer_bit = 0,
255 .clockevent_rating = 125, 255 .clockevent_rating = 80,
256 .clocksource_rating = 125, 256 .clocksource_rating = 125,
257}; 257};
258 258
diff --git a/arch/arm/mach-ux500/board-mop500-regulators.c b/arch/arm/mach-ux500/board-mop500-regulators.c
index 33c353bc1c4a..d6b7c8556fa1 100644
--- a/arch/arm/mach-ux500/board-mop500-regulators.c
+++ b/arch/arm/mach-ux500/board-mop500-regulators.c
@@ -374,6 +374,7 @@ static struct ab8500_regulator_reg_init ab8500_reg_init[] = {
374static struct regulator_init_data ab8500_regulators[AB8500_NUM_REGULATORS] = { 374static struct regulator_init_data ab8500_regulators[AB8500_NUM_REGULATORS] = {
375 /* supplies to the display/camera */ 375 /* supplies to the display/camera */
376 [AB8500_LDO_AUX1] = { 376 [AB8500_LDO_AUX1] = {
377 .supply_regulator = "ab8500-ext-supply3",
377 .constraints = { 378 .constraints = {
378 .name = "V-DISPLAY", 379 .name = "V-DISPLAY",
379 .min_uV = 2800000, 380 .min_uV = 2800000,
@@ -387,6 +388,7 @@ static struct regulator_init_data ab8500_regulators[AB8500_NUM_REGULATORS] = {
387 }, 388 },
388 /* supplies to the on-board eMMC */ 389 /* supplies to the on-board eMMC */
389 [AB8500_LDO_AUX2] = { 390 [AB8500_LDO_AUX2] = {
391 .supply_regulator = "ab8500-ext-supply3",
390 .constraints = { 392 .constraints = {
391 .name = "V-eMMC1", 393 .name = "V-eMMC1",
392 .min_uV = 1100000, 394 .min_uV = 1100000,
@@ -402,6 +404,7 @@ static struct regulator_init_data ab8500_regulators[AB8500_NUM_REGULATORS] = {
402 }, 404 },
403 /* supply for VAUX3, supplies to SDcard slots */ 405 /* supply for VAUX3, supplies to SDcard slots */
404 [AB8500_LDO_AUX3] = { 406 [AB8500_LDO_AUX3] = {
407 .supply_regulator = "ab8500-ext-supply3",
405 .constraints = { 408 .constraints = {
406 .name = "V-MMC-SD", 409 .name = "V-MMC-SD",
407 .min_uV = 1100000, 410 .min_uV = 1100000,
diff --git a/arch/arm/mach-ux500/cpuidle.c b/arch/arm/mach-ux500/cpuidle.c
index 317a2be129fb..a45dd09daed9 100644
--- a/arch/arm/mach-ux500/cpuidle.c
+++ b/arch/arm/mach-ux500/cpuidle.c
@@ -21,6 +21,7 @@
21#include <asm/proc-fns.h> 21#include <asm/proc-fns.h>
22 22
23#include "db8500-regs.h" 23#include "db8500-regs.h"
24#include "id.h"
24 25
25static atomic_t master = ATOMIC_INIT(0); 26static atomic_t master = ATOMIC_INIT(0);
26static DEFINE_SPINLOCK(master_lock); 27static DEFINE_SPINLOCK(master_lock);
@@ -114,6 +115,9 @@ static struct cpuidle_driver ux500_idle_driver = {
114 115
115int __init ux500_idle_init(void) 116int __init ux500_idle_init(void)
116{ 117{
118 if (!(cpu_is_u8500_family() || cpu_is_ux540_family()))
119 return -ENODEV;
120
117 /* Configure wake up reasons */ 121 /* Configure wake up reasons */
118 prcmu_enable_wakeups(PRCMU_WAKEUP(ARM) | PRCMU_WAKEUP(RTC) | 122 prcmu_enable_wakeups(PRCMU_WAKEUP(ARM) | PRCMU_WAKEUP(RTC) |
119 PRCMU_WAKEUP(ABB)); 123 PRCMU_WAKEUP(ABB));
diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S
index 15451ee4acc8..515b00064da8 100644
--- a/arch/arm/mm/cache-v7.S
+++ b/arch/arm/mm/cache-v7.S
@@ -92,6 +92,14 @@ ENTRY(v7_flush_dcache_louis)
92 mrc p15, 1, r0, c0, c0, 1 @ read clidr, r0 = clidr 92 mrc p15, 1, r0, c0, c0, 1 @ read clidr, r0 = clidr
93 ALT_SMP(ands r3, r0, #(7 << 21)) @ extract LoUIS from clidr 93 ALT_SMP(ands r3, r0, #(7 << 21)) @ extract LoUIS from clidr
94 ALT_UP(ands r3, r0, #(7 << 27)) @ extract LoUU from clidr 94 ALT_UP(ands r3, r0, #(7 << 27)) @ extract LoUU from clidr
95#ifdef CONFIG_ARM_ERRATA_643719
96 ALT_SMP(mrceq p15, 0, r2, c0, c0, 0) @ read main ID register
97 ALT_UP(moveq pc, lr) @ LoUU is zero, so nothing to do
98 ldreq r1, =0x410fc090 @ ID of ARM Cortex A9 r0p?
99 biceq r2, r2, #0x0000000f @ clear minor revision number
100 teqeq r2, r1 @ test for errata affected core and if so...
101 orreqs r3, #(1 << 21) @ fix LoUIS value (and set flags state to 'ne')
102#endif
95 ALT_SMP(mov r3, r3, lsr #20) @ r3 = LoUIS * 2 103 ALT_SMP(mov r3, r3, lsr #20) @ r3 = LoUIS * 2
96 ALT_UP(mov r3, r3, lsr #26) @ r3 = LoUU * 2 104 ALT_UP(mov r3, r3, lsr #26) @ r3 = LoUU * 2
97 moveq pc, lr @ return if level == 0 105 moveq pc, lr @ return if level == 0
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index 0d473cce501c..32aa5861119f 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -301,6 +301,39 @@ void flush_dcache_page(struct page *page)
301EXPORT_SYMBOL(flush_dcache_page); 301EXPORT_SYMBOL(flush_dcache_page);
302 302
303/* 303/*
304 * Ensure cache coherency for the kernel mapping of this page. We can
305 * assume that the page is pinned via kmap.
306 *
307 * If the page only exists in the page cache and there are no user
308 * space mappings, this is a no-op since the page was already marked
309 * dirty at creation. Otherwise, we need to flush the dirty kernel
310 * cache lines directly.
311 */
312void flush_kernel_dcache_page(struct page *page)
313{
314 if (cache_is_vivt() || cache_is_vipt_aliasing()) {
315 struct address_space *mapping;
316
317 mapping = page_mapping(page);
318
319 if (!mapping || mapping_mapped(mapping)) {
320 void *addr;
321
322 addr = page_address(page);
323 /*
324 * kmap_atomic() doesn't set the page virtual
325 * address for highmem pages, and
326 * kunmap_atomic() takes care of cache
327 * flushing already.
328 */
329 if (!IS_ENABLED(CONFIG_HIGHMEM) || addr)
330 __cpuc_flush_dcache_area(addr, PAGE_SIZE);
331 }
332 }
333}
334EXPORT_SYMBOL(flush_kernel_dcache_page);
335
336/*
304 * Flush an anonymous page so that users of get_user_pages() 337 * Flush an anonymous page so that users of get_user_pages()
305 * can safely access the data. The expected sequence is: 338 * can safely access the data. The expected sequence is:
306 * 339 *
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index e0d8565671a6..4d409e6a552d 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -616,10 +616,12 @@ static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
616 } while (pte++, addr += PAGE_SIZE, addr != end); 616 } while (pte++, addr += PAGE_SIZE, addr != end);
617} 617}
618 618
619static void __init map_init_section(pmd_t *pmd, unsigned long addr, 619static void __init __map_init_section(pmd_t *pmd, unsigned long addr,
620 unsigned long end, phys_addr_t phys, 620 unsigned long end, phys_addr_t phys,
621 const struct mem_type *type) 621 const struct mem_type *type)
622{ 622{
623 pmd_t *p = pmd;
624
623#ifndef CONFIG_ARM_LPAE 625#ifndef CONFIG_ARM_LPAE
624 /* 626 /*
625 * In classic MMU format, puds and pmds are folded in to 627 * In classic MMU format, puds and pmds are folded in to
@@ -638,7 +640,7 @@ static void __init map_init_section(pmd_t *pmd, unsigned long addr,
638 phys += SECTION_SIZE; 640 phys += SECTION_SIZE;
639 } while (pmd++, addr += SECTION_SIZE, addr != end); 641 } while (pmd++, addr += SECTION_SIZE, addr != end);
640 642
641 flush_pmd_entry(pmd); 643 flush_pmd_entry(p);
642} 644}
643 645
644static void __init alloc_init_pmd(pud_t *pud, unsigned long addr, 646static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
@@ -661,7 +663,7 @@ static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
661 */ 663 */
662 if (type->prot_sect && 664 if (type->prot_sect &&
663 ((addr | next | phys) & ~SECTION_MASK) == 0) { 665 ((addr | next | phys) & ~SECTION_MASK) == 0) {
664 map_init_section(pmd, addr, next, phys, type); 666 __map_init_section(pmd, addr, next, phys, type);
665 } else { 667 } else {
666 alloc_init_pte(pmd, addr, next, 668 alloc_init_pte(pmd, addr, next,
667 __phys_to_pfn(phys), type); 669 __phys_to_pfn(phys), type);
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c
index d51225f90ae2..eb5293a69a84 100644
--- a/arch/arm/mm/nommu.c
+++ b/arch/arm/mm/nommu.c
@@ -57,6 +57,12 @@ void flush_dcache_page(struct page *page)
57} 57}
58EXPORT_SYMBOL(flush_dcache_page); 58EXPORT_SYMBOL(flush_dcache_page);
59 59
60void flush_kernel_dcache_page(struct page *page)
61{
62 __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
63}
64EXPORT_SYMBOL(flush_kernel_dcache_page);
65
60void copy_to_user_page(struct vm_area_struct *vma, struct page *page, 66void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
61 unsigned long uaddr, void *dst, const void *src, 67 unsigned long uaddr, void *dst, const void *src,
62 unsigned long len) 68 unsigned long len)
diff --git a/arch/arm/mm/proc-fa526.S b/arch/arm/mm/proc-fa526.S
index d217e9795d74..aaeb6c127c7a 100644
--- a/arch/arm/mm/proc-fa526.S
+++ b/arch/arm/mm/proc-fa526.S
@@ -81,7 +81,6 @@ ENDPROC(cpu_fa526_reset)
81 */ 81 */
82 .align 4 82 .align 4
83ENTRY(cpu_fa526_do_idle) 83ENTRY(cpu_fa526_do_idle)
84 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt
85 mov pc, lr 84 mov pc, lr
86 85
87 86
diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S
index f9a0aa725ea9..e3c48a3fe063 100644
--- a/arch/arm/mm/proc-macros.S
+++ b/arch/arm/mm/proc-macros.S
@@ -333,3 +333,8 @@ ENTRY(\name\()_tlb_fns)
333 .endif 333 .endif
334 .size \name\()_tlb_fns, . - \name\()_tlb_fns 334 .size \name\()_tlb_fns, . - \name\()_tlb_fns
335.endm 335.endm
336
337.macro globl_equ x, y
338 .globl \x
339 .equ \x, \y
340.endm
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index 2c73a7301ff7..e35fec34453e 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -140,6 +140,29 @@ ENTRY(cpu_v7_do_resume)
140ENDPROC(cpu_v7_do_resume) 140ENDPROC(cpu_v7_do_resume)
141#endif 141#endif
142 142
143#ifdef CONFIG_CPU_PJ4B
144 globl_equ cpu_pj4b_switch_mm, cpu_v7_switch_mm
145 globl_equ cpu_pj4b_set_pte_ext, cpu_v7_set_pte_ext
146 globl_equ cpu_pj4b_proc_init, cpu_v7_proc_init
147 globl_equ cpu_pj4b_proc_fin, cpu_v7_proc_fin
148 globl_equ cpu_pj4b_reset, cpu_v7_reset
149#ifdef CONFIG_PJ4B_ERRATA_4742
150ENTRY(cpu_pj4b_do_idle)
151 dsb @ WFI may enter a low-power mode
152 wfi
153 dsb @barrier
154 mov pc, lr
155ENDPROC(cpu_pj4b_do_idle)
156#else
157 globl_equ cpu_pj4b_do_idle, cpu_v7_do_idle
158#endif
159 globl_equ cpu_pj4b_dcache_clean_area, cpu_v7_dcache_clean_area
160 globl_equ cpu_pj4b_do_suspend, cpu_v7_do_suspend
161 globl_equ cpu_pj4b_do_resume, cpu_v7_do_resume
162 globl_equ cpu_pj4b_suspend_size, cpu_v7_suspend_size
163
164#endif
165
143 __CPUINIT 166 __CPUINIT
144 167
145/* 168/*
@@ -350,6 +373,9 @@ __v7_setup_stack:
350 373
351 @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) 374 @ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
352 define_processor_functions v7, dabort=v7_early_abort, pabort=v7_pabort, suspend=1 375 define_processor_functions v7, dabort=v7_early_abort, pabort=v7_pabort, suspend=1
376#ifdef CONFIG_CPU_PJ4B
377 define_processor_functions pj4b, dabort=v7_early_abort, pabort=v7_pabort, suspend=1
378#endif
353 379
354 .section ".rodata" 380 .section ".rodata"
355 381
@@ -362,7 +388,7 @@ __v7_setup_stack:
362 /* 388 /*
363 * Standard v7 proc info content 389 * Standard v7 proc info content
364 */ 390 */
365.macro __v7_proc initfunc, mm_mmuflags = 0, io_mmuflags = 0, hwcaps = 0 391.macro __v7_proc initfunc, mm_mmuflags = 0, io_mmuflags = 0, hwcaps = 0, proc_fns = v7_processor_functions
366 ALT_SMP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | \ 392 ALT_SMP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | \
367 PMD_SECT_AF | PMD_FLAGS_SMP | \mm_mmuflags) 393 PMD_SECT_AF | PMD_FLAGS_SMP | \mm_mmuflags)
368 ALT_UP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | \ 394 ALT_UP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | \
@@ -375,7 +401,7 @@ __v7_setup_stack:
375 .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB | HWCAP_FAST_MULT | \ 401 .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB | HWCAP_FAST_MULT | \
376 HWCAP_EDSP | HWCAP_TLS | \hwcaps 402 HWCAP_EDSP | HWCAP_TLS | \hwcaps
377 .long cpu_v7_name 403 .long cpu_v7_name
378 .long v7_processor_functions 404 .long \proc_fns
379 .long v7wbi_tlb_fns 405 .long v7wbi_tlb_fns
380 .long v6_user_fns 406 .long v6_user_fns
381 .long v7_cache_fns 407 .long v7_cache_fns
@@ -407,12 +433,14 @@ __v7_ca9mp_proc_info:
407 /* 433 /*
408 * Marvell PJ4B processor. 434 * Marvell PJ4B processor.
409 */ 435 */
436#ifdef CONFIG_CPU_PJ4B
410 .type __v7_pj4b_proc_info, #object 437 .type __v7_pj4b_proc_info, #object
411__v7_pj4b_proc_info: 438__v7_pj4b_proc_info:
412 .long 0x562f5840 439 .long 0x560f5800
413 .long 0xfffffff0 440 .long 0xff0fff00
414 __v7_proc __v7_pj4b_setup 441 __v7_proc __v7_pj4b_setup, proc_fns = pj4b_processor_functions
415 .size __v7_pj4b_proc_info, . - __v7_pj4b_proc_info 442 .size __v7_pj4b_proc_info, . - __v7_pj4b_proc_info
443#endif
416 444
417 /* 445 /*
418 * ARM Ltd. Cortex A7 processor. 446 * ARM Ltd. Cortex A7 processor.
diff --git a/arch/arm/plat-samsung/include/plat/uncompress.h b/arch/arm/plat-samsung/include/plat/uncompress.h
index 438b24846e7f..02b66d723d1a 100644
--- a/arch/arm/plat-samsung/include/plat/uncompress.h
+++ b/arch/arm/plat-samsung/include/plat/uncompress.h
@@ -66,6 +66,9 @@ uart_rd(unsigned int reg)
66 66
67static void putc(int ch) 67static void putc(int ch)
68{ 68{
69 if (!config_enabled(CONFIG_DEBUG_LL))
70 return;
71
69 if (uart_rd(S3C2410_UFCON) & S3C2410_UFCON_FIFOMODE) { 72 if (uart_rd(S3C2410_UFCON) & S3C2410_UFCON_FIFOMODE) {
70 int level; 73 int level;
71 74
@@ -118,7 +121,12 @@ static void arch_decomp_error(const char *x)
118#ifdef CONFIG_S3C_BOOT_UART_FORCE_FIFO 121#ifdef CONFIG_S3C_BOOT_UART_FORCE_FIFO
119static inline void arch_enable_uart_fifo(void) 122static inline void arch_enable_uart_fifo(void)
120{ 123{
121 u32 fifocon = uart_rd(S3C2410_UFCON); 124 u32 fifocon;
125
126 if (!config_enabled(CONFIG_DEBUG_LL))
127 return;
128
129 fifocon = uart_rd(S3C2410_UFCON);
122 130
123 if (!(fifocon & S3C2410_UFCON_FIFOMODE)) { 131 if (!(fifocon & S3C2410_UFCON_FIFOMODE)) {
124 fifocon |= S3C2410_UFCON_RESETBOTH; 132 fifocon |= S3C2410_UFCON_RESETBOTH;
diff --git a/arch/arm/plat-samsung/pm.c b/arch/arm/plat-samsung/pm.c
index 53210ec4e8ec..bd7124c87fea 100644
--- a/arch/arm/plat-samsung/pm.c
+++ b/arch/arm/plat-samsung/pm.c
@@ -16,6 +16,7 @@
16#include <linux/suspend.h> 16#include <linux/suspend.h>
17#include <linux/errno.h> 17#include <linux/errno.h>
18#include <linux/delay.h> 18#include <linux/delay.h>
19#include <linux/of.h>
19#include <linux/serial_core.h> 20#include <linux/serial_core.h>
20#include <linux/io.h> 21#include <linux/io.h>
21 22
@@ -261,7 +262,8 @@ static int s3c_pm_enter(suspend_state_t state)
261 * require a full power-cycle) 262 * require a full power-cycle)
262 */ 263 */
263 264
264 if (!any_allowed(s3c_irqwake_intmask, s3c_irqwake_intallow) && 265 if (!of_have_populated_dt() &&
266 !any_allowed(s3c_irqwake_intmask, s3c_irqwake_intallow) &&
265 !any_allowed(s3c_irqwake_eintmask, s3c_irqwake_eintallow)) { 267 !any_allowed(s3c_irqwake_eintmask, s3c_irqwake_eintallow)) {
266 printk(KERN_ERR "%s: No wake-up sources!\n", __func__); 268 printk(KERN_ERR "%s: No wake-up sources!\n", __func__);
267 printk(KERN_ERR "%s: Aborting sleep\n", __func__); 269 printk(KERN_ERR "%s: Aborting sleep\n", __func__);
@@ -270,8 +272,11 @@ static int s3c_pm_enter(suspend_state_t state)
270 272
271 /* save all necessary core registers not covered by the drivers */ 273 /* save all necessary core registers not covered by the drivers */
272 274
273 samsung_pm_save_gpios(); 275 if (!of_have_populated_dt()) {
274 samsung_pm_saved_gpios(); 276 samsung_pm_save_gpios();
277 samsung_pm_saved_gpios();
278 }
279
275 s3c_pm_save_uarts(); 280 s3c_pm_save_uarts();
276 s3c_pm_save_core(); 281 s3c_pm_save_core();
277 282
@@ -310,8 +315,11 @@ static int s3c_pm_enter(suspend_state_t state)
310 315
311 s3c_pm_restore_core(); 316 s3c_pm_restore_core();
312 s3c_pm_restore_uarts(); 317 s3c_pm_restore_uarts();
313 samsung_pm_restore_gpios(); 318
314 s3c_pm_restored_gpios(); 319 if (!of_have_populated_dt()) {
320 samsung_pm_restore_gpios();
321 s3c_pm_restored_gpios();
322 }
315 323
316 s3c_pm_debug_init(); 324 s3c_pm_debug_init();
317 325
diff --git a/arch/arm64/kernel/arm64ksyms.c b/arch/arm64/kernel/arm64ksyms.c
index 7df1aad29b67..41b4f626d554 100644
--- a/arch/arm64/kernel/arm64ksyms.c
+++ b/arch/arm64/kernel/arm64ksyms.c
@@ -34,6 +34,7 @@ EXPORT_SYMBOL(__strnlen_user);
34EXPORT_SYMBOL(__strncpy_from_user); 34EXPORT_SYMBOL(__strncpy_from_user);
35 35
36EXPORT_SYMBOL(copy_page); 36EXPORT_SYMBOL(copy_page);
37EXPORT_SYMBOL(clear_page);
37 38
38EXPORT_SYMBOL(__copy_from_user); 39EXPORT_SYMBOL(__copy_from_user);
39EXPORT_SYMBOL(__copy_to_user); 40EXPORT_SYMBOL(__copy_to_user);
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index c7e047049f2c..1d1314280a03 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -390,6 +390,16 @@ el0_sync_compat:
390 b.eq el0_fpsimd_exc 390 b.eq el0_fpsimd_exc
391 cmp x24, #ESR_EL1_EC_UNKNOWN // unknown exception in EL0 391 cmp x24, #ESR_EL1_EC_UNKNOWN // unknown exception in EL0
392 b.eq el0_undef 392 b.eq el0_undef
393 cmp x24, #ESR_EL1_EC_CP15_32 // CP15 MRC/MCR trap
394 b.eq el0_undef
395 cmp x24, #ESR_EL1_EC_CP15_64 // CP15 MRRC/MCRR trap
396 b.eq el0_undef
397 cmp x24, #ESR_EL1_EC_CP14_MR // CP14 MRC/MCR trap
398 b.eq el0_undef
399 cmp x24, #ESR_EL1_EC_CP14_LS // CP14 LDC/STC trap
400 b.eq el0_undef
401 cmp x24, #ESR_EL1_EC_CP14_64 // CP14 MRRC/MCRR trap
402 b.eq el0_undef
393 cmp x24, #ESR_EL1_EC_BREAKPT_EL0 // debug exception in EL0 403 cmp x24, #ESR_EL1_EC_BREAKPT_EL0 // debug exception in EL0
394 b.ge el0_dbg 404 b.ge el0_dbg
395 b el0_inv 405 b el0_inv
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index 1e49e5eb81e9..9ba33c40cdf8 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -1336,6 +1336,7 @@ void perf_callchain_user(struct perf_callchain_entry *entry,
1336 return; 1336 return;
1337 } 1337 }
1338 1338
1339 perf_callchain_store(entry, regs->pc);
1339 tail = (struct frame_tail __user *)regs->regs[29]; 1340 tail = (struct frame_tail __user *)regs->regs[29];
1340 1341
1341 while (entry->nr < PERF_MAX_STACK_DEPTH && 1342 while (entry->nr < PERF_MAX_STACK_DEPTH &&
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index 61d7dd29f756..f30852d28590 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -267,7 +267,8 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
267 return; 267 return;
268#endif 268#endif
269 269
270 if (show_unhandled_signals) { 270 if (show_unhandled_signals && unhandled_signal(current, SIGILL) &&
271 printk_ratelimit()) {
271 pr_info("%s[%d]: undefined instruction: pc=%p\n", 272 pr_info("%s[%d]: undefined instruction: pc=%p\n",
272 current->comm, task_pid_nr(current), pc); 273 current->comm, task_pid_nr(current), pc);
273 dump_instr(KERN_INFO, regs); 274 dump_instr(KERN_INFO, regs);
@@ -294,7 +295,7 @@ asmlinkage long do_ni_syscall(struct pt_regs *regs)
294 } 295 }
295#endif 296#endif
296 297
297 if (show_unhandled_signals) { 298 if (show_unhandled_signals && printk_ratelimit()) {
298 pr_info("%s[%d]: syscall %d\n", current->comm, 299 pr_info("%s[%d]: syscall %d\n", current->comm,
299 task_pid_nr(current), (int)regs->syscallno); 300 task_pid_nr(current), (int)regs->syscallno);
300 dump_instr("", regs); 301 dump_instr("", regs);
@@ -310,14 +311,20 @@ asmlinkage long do_ni_syscall(struct pt_regs *regs)
310 */ 311 */
311asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr) 312asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
312{ 313{
314 siginfo_t info;
315 void __user *pc = (void __user *)instruction_pointer(regs);
313 console_verbose(); 316 console_verbose();
314 317
315 pr_crit("Bad mode in %s handler detected, code 0x%08x\n", 318 pr_crit("Bad mode in %s handler detected, code 0x%08x\n",
316 handler[reason], esr); 319 handler[reason], esr);
320 __show_regs(regs);
321
322 info.si_signo = SIGILL;
323 info.si_errno = 0;
324 info.si_code = ILL_ILLOPC;
325 info.si_addr = pc;
317 326
318 die("Oops - bad mode", regs, 0); 327 arm64_notify_die("Oops - bad mode", regs, &info, 0);
319 local_irq_disable();
320 panic("bad mode");
321} 328}
322 329
323void __pte_error(const char *file, int line, unsigned long val) 330void __pte_error(const char *file, int line, unsigned long val)
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 98af6e760cce..1426468b77f3 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -113,7 +113,8 @@ static void __do_user_fault(struct task_struct *tsk, unsigned long addr,
113{ 113{
114 struct siginfo si; 114 struct siginfo si;
115 115
116 if (show_unhandled_signals) { 116 if (show_unhandled_signals && unhandled_signal(tsk, sig) &&
117 printk_ratelimit()) {
117 pr_info("%s[%d]: unhandled %s (%d) at 0x%08lx, esr 0x%03x\n", 118 pr_info("%s[%d]: unhandled %s (%d) at 0x%08lx, esr 0x%03x\n",
118 tsk->comm, task_pid_nr(tsk), fault_name(esr), sig, 119 tsk->comm, task_pid_nr(tsk), fault_name(esr), sig,
119 addr, esr); 120 addr, esr);
diff --git a/arch/ia64/include/asm/irqflags.h b/arch/ia64/include/asm/irqflags.h
index 1bf2cf2f4ab4..cec6c06b52c0 100644
--- a/arch/ia64/include/asm/irqflags.h
+++ b/arch/ia64/include/asm/irqflags.h
@@ -11,6 +11,7 @@
11#define _ASM_IA64_IRQFLAGS_H 11#define _ASM_IA64_IRQFLAGS_H
12 12
13#include <asm/pal.h> 13#include <asm/pal.h>
14#include <asm/kregs.h>
14 15
15#ifdef CONFIG_IA64_DEBUG_IRQ 16#ifdef CONFIG_IA64_DEBUG_IRQ
16extern unsigned long last_cli_ip; 17extern unsigned long last_cli_ip;
diff --git a/arch/ia64/include/asm/tlb.h b/arch/ia64/include/asm/tlb.h
index c3ffe3e54edc..ef3a9de01954 100644
--- a/arch/ia64/include/asm/tlb.h
+++ b/arch/ia64/include/asm/tlb.h
@@ -46,12 +46,6 @@
46#include <asm/tlbflush.h> 46#include <asm/tlbflush.h>
47#include <asm/machvec.h> 47#include <asm/machvec.h>
48 48
49#ifdef CONFIG_SMP
50# define tlb_fast_mode(tlb) ((tlb)->nr == ~0U)
51#else
52# define tlb_fast_mode(tlb) (1)
53#endif
54
55/* 49/*
56 * If we can't allocate a page to make a big batch of page pointers 50 * If we can't allocate a page to make a big batch of page pointers
57 * to work on, then just handle a few from the on-stack structure. 51 * to work on, then just handle a few from the on-stack structure.
@@ -60,7 +54,7 @@
60 54
61struct mmu_gather { 55struct mmu_gather {
62 struct mm_struct *mm; 56 struct mm_struct *mm;
63 unsigned int nr; /* == ~0U => fast mode */ 57 unsigned int nr;
64 unsigned int max; 58 unsigned int max;
65 unsigned char fullmm; /* non-zero means full mm flush */ 59 unsigned char fullmm; /* non-zero means full mm flush */
66 unsigned char need_flush; /* really unmapped some PTEs? */ 60 unsigned char need_flush; /* really unmapped some PTEs? */
@@ -103,6 +97,7 @@ extern struct ia64_tr_entry *ia64_idtrs[NR_CPUS];
103static inline void 97static inline void
104ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end) 98ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end)
105{ 99{
100 unsigned long i;
106 unsigned int nr; 101 unsigned int nr;
107 102
108 if (!tlb->need_flush) 103 if (!tlb->need_flush)
@@ -141,13 +136,11 @@ ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long e
141 136
142 /* lastly, release the freed pages */ 137 /* lastly, release the freed pages */
143 nr = tlb->nr; 138 nr = tlb->nr;
144 if (!tlb_fast_mode(tlb)) { 139
145 unsigned long i; 140 tlb->nr = 0;
146 tlb->nr = 0; 141 tlb->start_addr = ~0UL;
147 tlb->start_addr = ~0UL; 142 for (i = 0; i < nr; ++i)
148 for (i = 0; i < nr; ++i) 143 free_page_and_swap_cache(tlb->pages[i]);
149 free_page_and_swap_cache(tlb->pages[i]);
150 }
151} 144}
152 145
153static inline void __tlb_alloc_page(struct mmu_gather *tlb) 146static inline void __tlb_alloc_page(struct mmu_gather *tlb)
@@ -167,20 +160,7 @@ tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_m
167 tlb->mm = mm; 160 tlb->mm = mm;
168 tlb->max = ARRAY_SIZE(tlb->local); 161 tlb->max = ARRAY_SIZE(tlb->local);
169 tlb->pages = tlb->local; 162 tlb->pages = tlb->local;
170 /* 163 tlb->nr = 0;
171 * Use fast mode if only 1 CPU is online.
172 *
173 * It would be tempting to turn on fast-mode for full_mm_flush as well. But this
174 * doesn't work because of speculative accesses and software prefetching: the page
175 * table of "mm" may (and usually is) the currently active page table and even
176 * though the kernel won't do any user-space accesses during the TLB shoot down, a
177 * compiler might use speculation or lfetch.fault on what happens to be a valid
178 * user-space address. This in turn could trigger a TLB miss fault (or a VHPT
179 * walk) and re-insert a TLB entry we just removed. Slow mode avoids such
180 * problems. (We could make fast-mode work by switching the current task to a
181 * different "mm" during the shootdown.) --davidm 08/02/2002
182 */
183 tlb->nr = (num_online_cpus() == 1) ? ~0U : 0;
184 tlb->fullmm = full_mm_flush; 164 tlb->fullmm = full_mm_flush;
185 tlb->start_addr = ~0UL; 165 tlb->start_addr = ~0UL;
186} 166}
@@ -214,11 +194,6 @@ static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
214{ 194{
215 tlb->need_flush = 1; 195 tlb->need_flush = 1;
216 196
217 if (tlb_fast_mode(tlb)) {
218 free_page_and_swap_cache(page);
219 return 1; /* avoid calling tlb_flush_mmu */
220 }
221
222 if (!tlb->nr && tlb->pages == tlb->local) 197 if (!tlb->nr && tlb->pages == tlb->local)
223 __tlb_alloc_page(tlb); 198 __tlb_alloc_page(tlb);
224 199
diff --git a/arch/m68k/include/asm/gpio.h b/arch/m68k/include/asm/gpio.h
index 8cc83431805b..2f6eec1e34b4 100644
--- a/arch/m68k/include/asm/gpio.h
+++ b/arch/m68k/include/asm/gpio.h
@@ -86,6 +86,7 @@ static inline int gpio_cansleep(unsigned gpio)
86 return gpio < MCFGPIO_PIN_MAX ? 0 : __gpio_cansleep(gpio); 86 return gpio < MCFGPIO_PIN_MAX ? 0 : __gpio_cansleep(gpio);
87} 87}
88 88
89#ifndef CONFIG_GPIOLIB
89static inline int gpio_request_one(unsigned gpio, unsigned long flags, const char *label) 90static inline int gpio_request_one(unsigned gpio, unsigned long flags, const char *label)
90{ 91{
91 int err; 92 int err;
@@ -105,5 +106,5 @@ static inline int gpio_request_one(unsigned gpio, unsigned long flags, const cha
105 106
106 return err; 107 return err;
107} 108}
108 109#endif /* !CONFIG_GPIOLIB */
109#endif 110#endif
diff --git a/arch/m68k/kernel/head.S b/arch/m68k/kernel/head.S
index d197e7ff62c5..ac85f16534af 100644
--- a/arch/m68k/kernel/head.S
+++ b/arch/m68k/kernel/head.S
@@ -2752,11 +2752,9 @@ func_return get_new_page
2752#ifdef CONFIG_MAC 2752#ifdef CONFIG_MAC
2753 2753
2754L(scc_initable_mac): 2754L(scc_initable_mac):
2755 .byte 9,12 /* Reset */
2756 .byte 4,0x44 /* x16, 1 stopbit, no parity */ 2755 .byte 4,0x44 /* x16, 1 stopbit, no parity */
2757 .byte 3,0xc0 /* receiver: 8 bpc */ 2756 .byte 3,0xc0 /* receiver: 8 bpc */
2758 .byte 5,0xe2 /* transmitter: 8 bpc, assert dtr/rts */ 2757 .byte 5,0xe2 /* transmitter: 8 bpc, assert dtr/rts */
2759 .byte 9,0 /* no interrupts */
2760 .byte 10,0 /* NRZ */ 2758 .byte 10,0 /* NRZ */
2761 .byte 11,0x50 /* use baud rate generator */ 2759 .byte 11,0x50 /* use baud rate generator */
2762 .byte 12,1,13,0 /* 38400 baud */ 2760 .byte 12,1,13,0 /* 38400 baud */
@@ -2899,6 +2897,7 @@ func_start serial_init,%d0/%d1/%a0/%a1
2899 is_not_mac(L(serial_init_not_mac)) 2897 is_not_mac(L(serial_init_not_mac))
2900 2898
2901#ifdef SERIAL_DEBUG 2899#ifdef SERIAL_DEBUG
2900
2902/* You may define either or both of these. */ 2901/* You may define either or both of these. */
2903#define MAC_USE_SCC_A /* Modem port */ 2902#define MAC_USE_SCC_A /* Modem port */
2904#define MAC_USE_SCC_B /* Printer port */ 2903#define MAC_USE_SCC_B /* Printer port */
@@ -2908,9 +2907,21 @@ func_start serial_init,%d0/%d1/%a0/%a1
2908#define mac_scc_cha_b_data_offset 0x4 2907#define mac_scc_cha_b_data_offset 0x4
2909#define mac_scc_cha_a_data_offset 0x6 2908#define mac_scc_cha_a_data_offset 0x6
2910 2909
2910#if defined(MAC_USE_SCC_A) || defined(MAC_USE_SCC_B)
2911 movel %pc@(L(mac_sccbase)),%a0
2912 /* Reset SCC device */
2913 moveb #9,%a0@(mac_scc_cha_a_ctrl_offset)
2914 moveb #0xc0,%a0@(mac_scc_cha_a_ctrl_offset)
2915 /* Wait for 5 PCLK cycles, which is about 68 CPU cycles */
2916 /* 5 / 3.6864 MHz = approx. 1.36 us = 68 / 50 MHz */
2917 movel #35,%d0
29185:
2919 subq #1,%d0
2920 jne 5b
2921#endif
2922
2911#ifdef MAC_USE_SCC_A 2923#ifdef MAC_USE_SCC_A
2912 /* Initialize channel A */ 2924 /* Initialize channel A */
2913 movel %pc@(L(mac_sccbase)),%a0
2914 lea %pc@(L(scc_initable_mac)),%a1 2925 lea %pc@(L(scc_initable_mac)),%a1
29155: moveb %a1@+,%d0 29265: moveb %a1@+,%d0
2916 jmi 6f 2927 jmi 6f
@@ -2922,9 +2933,6 @@ func_start serial_init,%d0/%d1/%a0/%a1
2922 2933
2923#ifdef MAC_USE_SCC_B 2934#ifdef MAC_USE_SCC_B
2924 /* Initialize channel B */ 2935 /* Initialize channel B */
2925#ifndef MAC_USE_SCC_A /* Load mac_sccbase only if needed */
2926 movel %pc@(L(mac_sccbase)),%a0
2927#endif /* MAC_USE_SCC_A */
2928 lea %pc@(L(scc_initable_mac)),%a1 2936 lea %pc@(L(scc_initable_mac)),%a1
29297: moveb %a1@+,%d0 29377: moveb %a1@+,%d0
2930 jmi 8f 2938 jmi 8f
@@ -2933,6 +2941,7 @@ func_start serial_init,%d0/%d1/%a0/%a1
2933 jra 7b 2941 jra 7b
29348: 29428:
2935#endif /* MAC_USE_SCC_B */ 2943#endif /* MAC_USE_SCC_B */
2944
2936#endif /* SERIAL_DEBUG */ 2945#endif /* SERIAL_DEBUG */
2937 2946
2938 jra L(serial_init_done) 2947 jra L(serial_init_done)
@@ -3006,17 +3015,17 @@ func_start serial_putc,%d0/%d1/%a0/%a1
3006 3015
3007#ifdef SERIAL_DEBUG 3016#ifdef SERIAL_DEBUG
3008 3017
3009#ifdef MAC_USE_SCC_A 3018#if defined(MAC_USE_SCC_A) || defined(MAC_USE_SCC_B)
3010 movel %pc@(L(mac_sccbase)),%a1 3019 movel %pc@(L(mac_sccbase)),%a1
3020#endif
3021
3022#ifdef MAC_USE_SCC_A
30113: btst #2,%a1@(mac_scc_cha_a_ctrl_offset) 30233: btst #2,%a1@(mac_scc_cha_a_ctrl_offset)
3012 jeq 3b 3024 jeq 3b
3013 moveb %d0,%a1@(mac_scc_cha_a_data_offset) 3025 moveb %d0,%a1@(mac_scc_cha_a_data_offset)
3014#endif /* MAC_USE_SCC_A */ 3026#endif /* MAC_USE_SCC_A */
3015 3027
3016#ifdef MAC_USE_SCC_B 3028#ifdef MAC_USE_SCC_B
3017#ifndef MAC_USE_SCC_A /* Load mac_sccbase only if needed */
3018 movel %pc@(L(mac_sccbase)),%a1
3019#endif /* MAC_USE_SCC_A */
30204: btst #2,%a1@(mac_scc_cha_b_ctrl_offset) 30294: btst #2,%a1@(mac_scc_cha_b_ctrl_offset)
3021 jeq 4b 3030 jeq 4b
3022 moveb %d0,%a1@(mac_scc_cha_b_data_offset) 3031 moveb %d0,%a1@(mac_scc_cha_b_data_offset)
diff --git a/arch/metag/include/asm/hugetlb.h b/arch/metag/include/asm/hugetlb.h
index f545477e61f3..471f481e67f3 100644
--- a/arch/metag/include/asm/hugetlb.h
+++ b/arch/metag/include/asm/hugetlb.h
@@ -2,6 +2,7 @@
2#define _ASM_METAG_HUGETLB_H 2#define _ASM_METAG_HUGETLB_H
3 3
4#include <asm/page.h> 4#include <asm/page.h>
5#include <asm-generic/hugetlb.h>
5 6
6 7
7static inline int is_hugepage_only_range(struct mm_struct *mm, 8static inline int is_hugepage_only_range(struct mm_struct *mm,
diff --git a/arch/microblaze/include/asm/cacheflush.h b/arch/microblaze/include/asm/cacheflush.h
index 0f553bc009a0..ffea82a16d2c 100644
--- a/arch/microblaze/include/asm/cacheflush.h
+++ b/arch/microblaze/include/asm/cacheflush.h
@@ -102,21 +102,23 @@ do { \
102 102
103#define flush_cache_range(vma, start, len) do { } while (0) 103#define flush_cache_range(vma, start, len) do { } while (0)
104 104
105#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ 105static inline void copy_to_user_page(struct vm_area_struct *vma,
106do { \ 106 struct page *page, unsigned long vaddr,
107 u32 addr = virt_to_phys(dst); \ 107 void *dst, void *src, int len)
108 memcpy((dst), (src), (len)); \ 108{
109 if (vma->vm_flags & VM_EXEC) { \ 109 u32 addr = virt_to_phys(dst);
110 invalidate_icache_range((unsigned) (addr), \ 110 memcpy(dst, src, len);
111 (unsigned) (addr) + PAGE_SIZE); \ 111 if (vma->vm_flags & VM_EXEC) {
112 flush_dcache_range((unsigned) (addr), \ 112 invalidate_icache_range(addr, addr + PAGE_SIZE);
113 (unsigned) (addr) + PAGE_SIZE); \ 113 flush_dcache_range(addr, addr + PAGE_SIZE);
114 } \ 114 }
115} while (0) 115}
116 116
117#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ 117static inline void copy_from_user_page(struct vm_area_struct *vma,
118do { \ 118 struct page *page, unsigned long vaddr,
119 memcpy((dst), (src), (len)); \ 119 void *dst, void *src, int len)
120} while (0) 120{
121 memcpy(dst, src, len);
122}
121 123
122#endif /* _ASM_MICROBLAZE_CACHEFLUSH_H */ 124#endif /* _ASM_MICROBLAZE_CACHEFLUSH_H */
diff --git a/arch/microblaze/include/asm/uaccess.h b/arch/microblaze/include/asm/uaccess.h
index efe59d881789..04e49553bdf9 100644
--- a/arch/microblaze/include/asm/uaccess.h
+++ b/arch/microblaze/include/asm/uaccess.h
@@ -99,13 +99,13 @@ static inline int access_ok(int type, const void __user *addr,
99 if ((get_fs().seg < ((unsigned long)addr)) || 99 if ((get_fs().seg < ((unsigned long)addr)) ||
100 (get_fs().seg < ((unsigned long)addr + size - 1))) { 100 (get_fs().seg < ((unsigned long)addr + size - 1))) {
101 pr_debug("ACCESS fail: %s at 0x%08x (size 0x%x), seg 0x%08x\n", 101 pr_debug("ACCESS fail: %s at 0x%08x (size 0x%x), seg 0x%08x\n",
102 type ? "WRITE" : "READ ", (u32)addr, (u32)size, 102 type ? "WRITE" : "READ ", (__force u32)addr, (u32)size,
103 (u32)get_fs().seg); 103 (u32)get_fs().seg);
104 return 0; 104 return 0;
105 } 105 }
106ok: 106ok:
107 pr_debug("ACCESS OK: %s at 0x%08x (size 0x%x), seg 0x%08x\n", 107 pr_debug("ACCESS OK: %s at 0x%08x (size 0x%x), seg 0x%08x\n",
108 type ? "WRITE" : "READ ", (u32)addr, (u32)size, 108 type ? "WRITE" : "READ ", (__force u32)addr, (u32)size,
109 (u32)get_fs().seg); 109 (u32)get_fs().seg);
110 return 1; 110 return 1;
111} 111}
diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c
index b0baa299f899..01b1b3f94feb 100644
--- a/arch/mips/cavium-octeon/setup.c
+++ b/arch/mips/cavium-octeon/setup.c
@@ -428,13 +428,16 @@ static void octeon_restart(char *command)
428 */ 428 */
429static void octeon_kill_core(void *arg) 429static void octeon_kill_core(void *arg)
430{ 430{
431 mb(); 431 if (octeon_is_simulation())
432 if (octeon_is_simulation()) {
433 /* The simulator needs the watchdog to stop for dead cores */
434 cvmx_write_csr(CVMX_CIU_WDOGX(cvmx_get_core_num()), 0);
435 /* A break instruction causes the simulator stop a core */ 432 /* A break instruction causes the simulator stop a core */
436 asm volatile ("sync\nbreak"); 433 asm volatile ("break" ::: "memory");
437 } 434
435 local_irq_disable();
436 /* Disable watchdog on this core. */
437 cvmx_write_csr(CVMX_CIU_WDOGX(cvmx_get_core_num()), 0);
438 /* Spin in a low power mode. */
439 while (true)
440 asm volatile ("wait" ::: "memory");
438} 441}
439 442
440 443
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
index 143875c6c95a..4d6fa0bf1305 100644
--- a/arch/mips/include/asm/kvm_host.h
+++ b/arch/mips/include/asm/kvm_host.h
@@ -496,10 +496,6 @@ struct kvm_mips_callbacks {
496 uint32_t cause); 496 uint32_t cause);
497 int (*irq_clear) (struct kvm_vcpu *vcpu, unsigned int priority, 497 int (*irq_clear) (struct kvm_vcpu *vcpu, unsigned int priority,
498 uint32_t cause); 498 uint32_t cause);
499 int (*vcpu_ioctl_get_regs) (struct kvm_vcpu *vcpu,
500 struct kvm_regs *regs);
501 int (*vcpu_ioctl_set_regs) (struct kvm_vcpu *vcpu,
502 struct kvm_regs *regs);
503}; 499};
504extern struct kvm_mips_callbacks *kvm_mips_callbacks; 500extern struct kvm_mips_callbacks *kvm_mips_callbacks;
505int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks); 501int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks);
diff --git a/arch/mips/include/asm/mmu_context.h b/arch/mips/include/asm/mmu_context.h
index 820116067c10..516e6e9a5594 100644
--- a/arch/mips/include/asm/mmu_context.h
+++ b/arch/mips/include/asm/mmu_context.h
@@ -117,7 +117,7 @@ get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
117 if (! ((asid += ASID_INC) & ASID_MASK) ) { 117 if (! ((asid += ASID_INC) & ASID_MASK) ) {
118 if (cpu_has_vtag_icache) 118 if (cpu_has_vtag_icache)
119 flush_icache_all(); 119 flush_icache_all();
120#ifdef CONFIG_VIRTUALIZATION 120#ifdef CONFIG_KVM
121 kvm_local_flush_tlb_all(); /* start new asid cycle */ 121 kvm_local_flush_tlb_all(); /* start new asid cycle */
122#else 122#else
123 local_flush_tlb_all(); /* start new asid cycle */ 123 local_flush_tlb_all(); /* start new asid cycle */
diff --git a/arch/mips/include/asm/ptrace.h b/arch/mips/include/asm/ptrace.h
index a3186f2bb8a0..5e6cd0947393 100644
--- a/arch/mips/include/asm/ptrace.h
+++ b/arch/mips/include/asm/ptrace.h
@@ -16,6 +16,38 @@
16#include <asm/isadep.h> 16#include <asm/isadep.h>
17#include <uapi/asm/ptrace.h> 17#include <uapi/asm/ptrace.h>
18 18
19/*
20 * This struct defines the way the registers are stored on the stack during a
21 * system call/exception. As usual the registers k0/k1 aren't being saved.
22 */
23struct pt_regs {
24#ifdef CONFIG_32BIT
25 /* Pad bytes for argument save space on the stack. */
26 unsigned long pad0[6];
27#endif
28
29 /* Saved main processor registers. */
30 unsigned long regs[32];
31
32 /* Saved special registers. */
33 unsigned long cp0_status;
34 unsigned long hi;
35 unsigned long lo;
36#ifdef CONFIG_CPU_HAS_SMARTMIPS
37 unsigned long acx;
38#endif
39 unsigned long cp0_badvaddr;
40 unsigned long cp0_cause;
41 unsigned long cp0_epc;
42#ifdef CONFIG_MIPS_MT_SMTC
43 unsigned long cp0_tcstatus;
44#endif /* CONFIG_MIPS_MT_SMTC */
45#ifdef CONFIG_CPU_CAVIUM_OCTEON
46 unsigned long long mpl[3]; /* MTM{0,1,2} */
47 unsigned long long mtp[3]; /* MTP{0,1,2} */
48#endif
49} __aligned(8);
50
19struct task_struct; 51struct task_struct;
20 52
21extern int ptrace_getregs(struct task_struct *child, __s64 __user *data); 53extern int ptrace_getregs(struct task_struct *child, __s64 __user *data);
diff --git a/arch/mips/include/uapi/asm/kvm.h b/arch/mips/include/uapi/asm/kvm.h
index 85789eacbf18..f09ff5ae2059 100644
--- a/arch/mips/include/uapi/asm/kvm.h
+++ b/arch/mips/include/uapi/asm/kvm.h
@@ -1,55 +1,135 @@
1/* 1/*
2* This file is subject to the terms and conditions of the GNU General Public 2 * This file is subject to the terms and conditions of the GNU General Public
3* License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4* for more details. 4 * for more details.
5* 5 *
6* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. 6 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
7* Authors: Sanjay Lal <sanjayl@kymasys.com> 7 * Copyright (C) 2013 Cavium, Inc.
8*/ 8 * Authors: Sanjay Lal <sanjayl@kymasys.com>
9 */
9 10
10#ifndef __LINUX_KVM_MIPS_H 11#ifndef __LINUX_KVM_MIPS_H
11#define __LINUX_KVM_MIPS_H 12#define __LINUX_KVM_MIPS_H
12 13
13#include <linux/types.h> 14#include <linux/types.h>
14 15
15#define __KVM_MIPS 16/*
16 17 * KVM MIPS specific structures and definitions.
17#define N_MIPS_COPROC_REGS 32 18 *
18#define N_MIPS_COPROC_SEL 8 19 * Some parts derived from the x86 version of this file.
20 */
19 21
20/* for KVM_GET_REGS and KVM_SET_REGS */ 22/*
23 * for KVM_GET_REGS and KVM_SET_REGS
24 *
25 * If Config[AT] is zero (32-bit CPU), the register contents are
26 * stored in the lower 32-bits of the struct kvm_regs fields and sign
27 * extended to 64-bits.
28 */
21struct kvm_regs { 29struct kvm_regs {
22 __u32 gprs[32]; 30 /* out (KVM_GET_REGS) / in (KVM_SET_REGS) */
23 __u32 hi; 31 __u64 gpr[32];
24 __u32 lo; 32 __u64 hi;
25 __u32 pc; 33 __u64 lo;
26 34 __u64 pc;
27 __u32 cp0reg[N_MIPS_COPROC_REGS][N_MIPS_COPROC_SEL];
28};
29
30/* for KVM_GET_SREGS and KVM_SET_SREGS */
31struct kvm_sregs {
32}; 35};
33 36
34/* for KVM_GET_FPU and KVM_SET_FPU */ 37/*
38 * for KVM_GET_FPU and KVM_SET_FPU
39 *
40 * If Status[FR] is zero (32-bit FPU), the upper 32-bits of the FPRs
41 * are zero filled.
42 */
35struct kvm_fpu { 43struct kvm_fpu {
44 __u64 fpr[32];
45 __u32 fir;
46 __u32 fccr;
47 __u32 fexr;
48 __u32 fenr;
49 __u32 fcsr;
50 __u32 pad;
36}; 51};
37 52
53
54/*
55 * For MIPS, we use KVM_SET_ONE_REG and KVM_GET_ONE_REG to access CP0
56 * registers. The id field is broken down as follows:
57 *
58 * bits[2..0] - Register 'sel' index.
59 * bits[7..3] - Register 'rd' index.
60 * bits[15..8] - Must be zero.
61 * bits[31..16] - 1 -> CP0 registers.
62 * bits[51..32] - Must be zero.
63 * bits[63..52] - As per linux/kvm.h
64 *
65 * Other sets registers may be added in the future. Each set would
66 * have its own identifier in bits[31..16].
67 *
68 * The registers defined in struct kvm_regs are also accessible, the
69 * id values for these are below.
70 */
71
72#define KVM_REG_MIPS_R0 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 0)
73#define KVM_REG_MIPS_R1 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 1)
74#define KVM_REG_MIPS_R2 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 2)
75#define KVM_REG_MIPS_R3 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 3)
76#define KVM_REG_MIPS_R4 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 4)
77#define KVM_REG_MIPS_R5 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 5)
78#define KVM_REG_MIPS_R6 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 6)
79#define KVM_REG_MIPS_R7 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 7)
80#define KVM_REG_MIPS_R8 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 8)
81#define KVM_REG_MIPS_R9 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 9)
82#define KVM_REG_MIPS_R10 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 10)
83#define KVM_REG_MIPS_R11 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 11)
84#define KVM_REG_MIPS_R12 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 12)
85#define KVM_REG_MIPS_R13 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 13)
86#define KVM_REG_MIPS_R14 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 14)
87#define KVM_REG_MIPS_R15 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 15)
88#define KVM_REG_MIPS_R16 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 16)
89#define KVM_REG_MIPS_R17 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 17)
90#define KVM_REG_MIPS_R18 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 18)
91#define KVM_REG_MIPS_R19 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 19)
92#define KVM_REG_MIPS_R20 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 20)
93#define KVM_REG_MIPS_R21 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 21)
94#define KVM_REG_MIPS_R22 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 22)
95#define KVM_REG_MIPS_R23 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 23)
96#define KVM_REG_MIPS_R24 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 24)
97#define KVM_REG_MIPS_R25 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 25)
98#define KVM_REG_MIPS_R26 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 26)
99#define KVM_REG_MIPS_R27 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 27)
100#define KVM_REG_MIPS_R28 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 28)
101#define KVM_REG_MIPS_R29 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 29)
102#define KVM_REG_MIPS_R30 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 30)
103#define KVM_REG_MIPS_R31 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 31)
104
105#define KVM_REG_MIPS_HI (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 32)
106#define KVM_REG_MIPS_LO (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 33)
107#define KVM_REG_MIPS_PC (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 34)
108
109/*
110 * KVM MIPS specific structures and definitions
111 *
112 */
38struct kvm_debug_exit_arch { 113struct kvm_debug_exit_arch {
114 __u64 epc;
39}; 115};
40 116
41/* for KVM_SET_GUEST_DEBUG */ 117/* for KVM_SET_GUEST_DEBUG */
42struct kvm_guest_debug_arch { 118struct kvm_guest_debug_arch {
43}; 119};
44 120
121/* definition of registers in kvm_run */
122struct kvm_sync_regs {
123};
124
125/* dummy definition */
126struct kvm_sregs {
127};
128
45struct kvm_mips_interrupt { 129struct kvm_mips_interrupt {
46 /* in */ 130 /* in */
47 __u32 cpu; 131 __u32 cpu;
48 __u32 irq; 132 __u32 irq;
49}; 133};
50 134
51/* definition of registers in kvm_run */
52struct kvm_sync_regs {
53};
54
55#endif /* __LINUX_KVM_MIPS_H */ 135#endif /* __LINUX_KVM_MIPS_H */
diff --git a/arch/mips/include/uapi/asm/ptrace.h b/arch/mips/include/uapi/asm/ptrace.h
index 4d58d8468705..b26f7e317279 100644
--- a/arch/mips/include/uapi/asm/ptrace.h
+++ b/arch/mips/include/uapi/asm/ptrace.h
@@ -22,16 +22,12 @@
22#define DSP_CONTROL 77 22#define DSP_CONTROL 77
23#define ACX 78 23#define ACX 78
24 24
25#ifndef __KERNEL__
25/* 26/*
26 * This struct defines the way the registers are stored on the stack during a 27 * This struct defines the way the registers are stored on the stack during a
27 * system call/exception. As usual the registers k0/k1 aren't being saved. 28 * system call/exception. As usual the registers k0/k1 aren't being saved.
28 */ 29 */
29struct pt_regs { 30struct pt_regs {
30#ifdef CONFIG_32BIT
31 /* Pad bytes for argument save space on the stack. */
32 unsigned long pad0[6];
33#endif
34
35 /* Saved main processor registers. */ 31 /* Saved main processor registers. */
36 unsigned long regs[32]; 32 unsigned long regs[32];
37 33
@@ -39,20 +35,11 @@ struct pt_regs {
39 unsigned long cp0_status; 35 unsigned long cp0_status;
40 unsigned long hi; 36 unsigned long hi;
41 unsigned long lo; 37 unsigned long lo;
42#ifdef CONFIG_CPU_HAS_SMARTMIPS
43 unsigned long acx;
44#endif
45 unsigned long cp0_badvaddr; 38 unsigned long cp0_badvaddr;
46 unsigned long cp0_cause; 39 unsigned long cp0_cause;
47 unsigned long cp0_epc; 40 unsigned long cp0_epc;
48#ifdef CONFIG_MIPS_MT_SMTC
49 unsigned long cp0_tcstatus;
50#endif /* CONFIG_MIPS_MT_SMTC */
51#ifdef CONFIG_CPU_CAVIUM_OCTEON
52 unsigned long long mpl[3]; /* MTM{0,1,2} */
53 unsigned long long mtp[3]; /* MTP{0,1,2} */
54#endif
55} __attribute__ ((aligned (8))); 41} __attribute__ ((aligned (8)));
42#endif /* __KERNEL__ */
56 43
57/* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */ 44/* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */
58#define PTRACE_GETREGS 12 45#define PTRACE_GETREGS 12
diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
index e06f777e9c49..1188e00bb120 100644
--- a/arch/mips/kernel/binfmt_elfn32.c
+++ b/arch/mips/kernel/binfmt_elfn32.c
@@ -119,4 +119,15 @@ MODULE_AUTHOR("Ralf Baechle (ralf@linux-mips.org)");
119#undef TASK_SIZE 119#undef TASK_SIZE
120#define TASK_SIZE TASK_SIZE32 120#define TASK_SIZE TASK_SIZE32
121 121
122#undef cputime_to_timeval
123#define cputime_to_timeval cputime_to_compat_timeval
124static __inline__ void
125cputime_to_compat_timeval(const cputime_t cputime, struct compat_timeval *value)
126{
127 unsigned long jiffies = cputime_to_jiffies(cputime);
128
129 value->tv_usec = (jiffies % HZ) * (1000000L / HZ);
130 value->tv_sec = jiffies / HZ;
131}
132
122#include "../../../fs/binfmt_elf.c" 133#include "../../../fs/binfmt_elf.c"
diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
index 97c5a1668e53..202e581e6096 100644
--- a/arch/mips/kernel/binfmt_elfo32.c
+++ b/arch/mips/kernel/binfmt_elfo32.c
@@ -162,4 +162,15 @@ MODULE_AUTHOR("Ralf Baechle (ralf@linux-mips.org)");
162#undef TASK_SIZE 162#undef TASK_SIZE
163#define TASK_SIZE TASK_SIZE32 163#define TASK_SIZE TASK_SIZE32
164 164
165#undef cputime_to_timeval
166#define cputime_to_timeval cputime_to_compat_timeval
167static __inline__ void
168cputime_to_compat_timeval(const cputime_t cputime, struct compat_timeval *value)
169{
170 unsigned long jiffies = cputime_to_jiffies(cputime);
171
172 value->tv_usec = (jiffies % HZ) * (1000000L / HZ);
173 value->tv_sec = jiffies / HZ;
174}
175
165#include "../../../fs/binfmt_elf.c" 176#include "../../../fs/binfmt_elf.c"
diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c
index cf5509f13dd5..dba90ec0dc38 100644
--- a/arch/mips/kernel/ftrace.c
+++ b/arch/mips/kernel/ftrace.c
@@ -25,12 +25,16 @@
25#define MCOUNT_OFFSET_INSNS 4 25#define MCOUNT_OFFSET_INSNS 4
26#endif 26#endif
27 27
28#ifdef CONFIG_DYNAMIC_FTRACE
29
28/* Arch override because MIPS doesn't need to run this from stop_machine() */ 30/* Arch override because MIPS doesn't need to run this from stop_machine() */
29void arch_ftrace_update_code(int command) 31void arch_ftrace_update_code(int command)
30{ 32{
31 ftrace_modify_all_code(command); 33 ftrace_modify_all_code(command);
32} 34}
33 35
36#endif
37
34/* 38/*
35 * Check if the address is in kernel space 39 * Check if the address is in kernel space
36 * 40 *
diff --git a/arch/mips/kernel/idle.c b/arch/mips/kernel/idle.c
index 3b09b888afa9..0c655deeea4a 100644
--- a/arch/mips/kernel/idle.c
+++ b/arch/mips/kernel/idle.c
@@ -93,26 +93,27 @@ static void rm7k_wait_irqoff(void)
93} 93}
94 94
95/* 95/*
96 * The Au1xxx wait is available only if using 32khz counter or 96 * Au1 'wait' is only useful when the 32kHz counter is used as timer,
97 * external timer source, but specifically not CP0 Counter. 97 * since coreclock (and the cp0 counter) stops upon executing it. Only an
98 * alchemy/common/time.c may override cpu_wait! 98 * interrupt can wake it, so they must be enabled before entering idle modes.
99 */ 99 */
100static void au1k_wait(void) 100static void au1k_wait(void)
101{ 101{
102 unsigned long c0status = read_c0_status() | 1; /* irqs on */
103
102 __asm__( 104 __asm__(
103 " .set mips3 \n" 105 " .set mips3 \n"
104 " cache 0x14, 0(%0) \n" 106 " cache 0x14, 0(%0) \n"
105 " cache 0x14, 32(%0) \n" 107 " cache 0x14, 32(%0) \n"
106 " sync \n" 108 " sync \n"
107 " nop \n" 109 " mtc0 %1, $12 \n" /* wr c0status */
108 " wait \n" 110 " wait \n"
109 " nop \n" 111 " nop \n"
110 " nop \n" 112 " nop \n"
111 " nop \n" 113 " nop \n"
112 " nop \n" 114 " nop \n"
113 " .set mips0 \n" 115 " .set mips0 \n"
114 : : "r" (au1k_wait)); 116 : : "r" (au1k_wait), "r" (c0status));
115 local_irq_enable();
116} 117}
117 118
118static int __initdata nowait; 119static int __initdata nowait;
diff --git a/arch/mips/kernel/rtlx.c b/arch/mips/kernel/rtlx.c
index 93c070b41b0d..6fa198db8999 100644
--- a/arch/mips/kernel/rtlx.c
+++ b/arch/mips/kernel/rtlx.c
@@ -40,6 +40,7 @@
40#include <asm/processor.h> 40#include <asm/processor.h>
41#include <asm/vpe.h> 41#include <asm/vpe.h>
42#include <asm/rtlx.h> 42#include <asm/rtlx.h>
43#include <asm/setup.h>
43 44
44static struct rtlx_info *rtlx; 45static struct rtlx_info *rtlx;
45static int major; 46static int major;
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index e3be67012d78..a75ae40184aa 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -897,22 +897,24 @@ out_sigsegv:
897 897
898asmlinkage void do_tr(struct pt_regs *regs) 898asmlinkage void do_tr(struct pt_regs *regs)
899{ 899{
900 unsigned int opcode, tcode = 0; 900 u32 opcode, tcode = 0;
901 u16 instr[2]; 901 u16 instr[2];
902 unsigned long epc = exception_epc(regs); 902 unsigned long epc = msk_isa16_mode(exception_epc(regs));
903 903
904 if ((__get_user(instr[0], (u16 __user *)msk_isa16_mode(epc))) || 904 if (get_isa16_mode(regs->cp0_epc)) {
905 (__get_user(instr[1], (u16 __user *)msk_isa16_mode(epc + 2)))) 905 if (__get_user(instr[0], (u16 __user *)(epc + 0)) ||
906 __get_user(instr[1], (u16 __user *)(epc + 2)))
906 goto out_sigsegv; 907 goto out_sigsegv;
907 opcode = (instr[0] << 16) | instr[1]; 908 opcode = (instr[0] << 16) | instr[1];
908 909 /* Immediate versions don't provide a code. */
909 /* Immediate versions don't provide a code. */ 910 if (!(opcode & OPCODE))
910 if (!(opcode & OPCODE)) { 911 tcode = (opcode >> 12) & ((1 << 4) - 1);
911 if (get_isa16_mode(regs->cp0_epc)) 912 } else {
912 /* microMIPS */ 913 if (__get_user(opcode, (u32 __user *)epc))
913 tcode = (opcode >> 12) & 0x1f; 914 goto out_sigsegv;
914 else 915 /* Immediate versions don't provide a code. */
915 tcode = ((opcode >> 6) & ((1 << 10) - 1)); 916 if (!(opcode & OPCODE))
917 tcode = (opcode >> 6) & ((1 << 10) - 1);
916 } 918 }
917 919
918 do_trap_or_bp(regs, tcode, "Trap"); 920 do_trap_or_bp(regs, tcode, "Trap");
diff --git a/arch/mips/kvm/kvm_mips.c b/arch/mips/kvm/kvm_mips.c
index e0dad0289797..dd203e59e6fd 100644
--- a/arch/mips/kvm/kvm_mips.c
+++ b/arch/mips/kvm/kvm_mips.c
@@ -195,7 +195,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
195long 195long
196kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) 196kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
197{ 197{
198 return -EINVAL; 198 return -ENOIOCTLCMD;
199} 199}
200 200
201void kvm_arch_free_memslot(struct kvm_memory_slot *free, 201void kvm_arch_free_memslot(struct kvm_memory_slot *free,
@@ -401,7 +401,7 @@ int
401kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, 401kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
402 struct kvm_guest_debug *dbg) 402 struct kvm_guest_debug *dbg)
403{ 403{
404 return -EINVAL; 404 return -ENOIOCTLCMD;
405} 405}
406 406
407int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) 407int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
@@ -475,14 +475,248 @@ int
475kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, 475kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
476 struct kvm_mp_state *mp_state) 476 struct kvm_mp_state *mp_state)
477{ 477{
478 return -EINVAL; 478 return -ENOIOCTLCMD;
479} 479}
480 480
481int 481int
482kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, 482kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
483 struct kvm_mp_state *mp_state) 483 struct kvm_mp_state *mp_state)
484{ 484{
485 return -EINVAL; 485 return -ENOIOCTLCMD;
486}
487
488#define MIPS_CP0_32(_R, _S) \
489 (KVM_REG_MIPS | KVM_REG_SIZE_U32 | 0x10000 | (8 * (_R) + (_S)))
490
491#define MIPS_CP0_64(_R, _S) \
492 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 0x10000 | (8 * (_R) + (_S)))
493
494#define KVM_REG_MIPS_CP0_INDEX MIPS_CP0_32(0, 0)
495#define KVM_REG_MIPS_CP0_ENTRYLO0 MIPS_CP0_64(2, 0)
496#define KVM_REG_MIPS_CP0_ENTRYLO1 MIPS_CP0_64(3, 0)
497#define KVM_REG_MIPS_CP0_CONTEXT MIPS_CP0_64(4, 0)
498#define KVM_REG_MIPS_CP0_USERLOCAL MIPS_CP0_64(4, 2)
499#define KVM_REG_MIPS_CP0_PAGEMASK MIPS_CP0_32(5, 0)
500#define KVM_REG_MIPS_CP0_PAGEGRAIN MIPS_CP0_32(5, 1)
501#define KVM_REG_MIPS_CP0_WIRED MIPS_CP0_32(6, 0)
502#define KVM_REG_MIPS_CP0_HWRENA MIPS_CP0_32(7, 0)
503#define KVM_REG_MIPS_CP0_BADVADDR MIPS_CP0_64(8, 0)
504#define KVM_REG_MIPS_CP0_COUNT MIPS_CP0_32(9, 0)
505#define KVM_REG_MIPS_CP0_ENTRYHI MIPS_CP0_64(10, 0)
506#define KVM_REG_MIPS_CP0_COMPARE MIPS_CP0_32(11, 0)
507#define KVM_REG_MIPS_CP0_STATUS MIPS_CP0_32(12, 0)
508#define KVM_REG_MIPS_CP0_CAUSE MIPS_CP0_32(13, 0)
509#define KVM_REG_MIPS_CP0_EBASE MIPS_CP0_64(15, 1)
510#define KVM_REG_MIPS_CP0_CONFIG MIPS_CP0_32(16, 0)
511#define KVM_REG_MIPS_CP0_CONFIG1 MIPS_CP0_32(16, 1)
512#define KVM_REG_MIPS_CP0_CONFIG2 MIPS_CP0_32(16, 2)
513#define KVM_REG_MIPS_CP0_CONFIG3 MIPS_CP0_32(16, 3)
514#define KVM_REG_MIPS_CP0_CONFIG7 MIPS_CP0_32(16, 7)
515#define KVM_REG_MIPS_CP0_XCONTEXT MIPS_CP0_64(20, 0)
516#define KVM_REG_MIPS_CP0_ERROREPC MIPS_CP0_64(30, 0)
517
518static u64 kvm_mips_get_one_regs[] = {
519 KVM_REG_MIPS_R0,
520 KVM_REG_MIPS_R1,
521 KVM_REG_MIPS_R2,
522 KVM_REG_MIPS_R3,
523 KVM_REG_MIPS_R4,
524 KVM_REG_MIPS_R5,
525 KVM_REG_MIPS_R6,
526 KVM_REG_MIPS_R7,
527 KVM_REG_MIPS_R8,
528 KVM_REG_MIPS_R9,
529 KVM_REG_MIPS_R10,
530 KVM_REG_MIPS_R11,
531 KVM_REG_MIPS_R12,
532 KVM_REG_MIPS_R13,
533 KVM_REG_MIPS_R14,
534 KVM_REG_MIPS_R15,
535 KVM_REG_MIPS_R16,
536 KVM_REG_MIPS_R17,
537 KVM_REG_MIPS_R18,
538 KVM_REG_MIPS_R19,
539 KVM_REG_MIPS_R20,
540 KVM_REG_MIPS_R21,
541 KVM_REG_MIPS_R22,
542 KVM_REG_MIPS_R23,
543 KVM_REG_MIPS_R24,
544 KVM_REG_MIPS_R25,
545 KVM_REG_MIPS_R26,
546 KVM_REG_MIPS_R27,
547 KVM_REG_MIPS_R28,
548 KVM_REG_MIPS_R29,
549 KVM_REG_MIPS_R30,
550 KVM_REG_MIPS_R31,
551
552 KVM_REG_MIPS_HI,
553 KVM_REG_MIPS_LO,
554 KVM_REG_MIPS_PC,
555
556 KVM_REG_MIPS_CP0_INDEX,
557 KVM_REG_MIPS_CP0_CONTEXT,
558 KVM_REG_MIPS_CP0_PAGEMASK,
559 KVM_REG_MIPS_CP0_WIRED,
560 KVM_REG_MIPS_CP0_BADVADDR,
561 KVM_REG_MIPS_CP0_ENTRYHI,
562 KVM_REG_MIPS_CP0_STATUS,
563 KVM_REG_MIPS_CP0_CAUSE,
564 /* EPC set via kvm_regs, et al. */
565 KVM_REG_MIPS_CP0_CONFIG,
566 KVM_REG_MIPS_CP0_CONFIG1,
567 KVM_REG_MIPS_CP0_CONFIG2,
568 KVM_REG_MIPS_CP0_CONFIG3,
569 KVM_REG_MIPS_CP0_CONFIG7,
570 KVM_REG_MIPS_CP0_ERROREPC
571};
572
573static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
574 const struct kvm_one_reg *reg)
575{
576 struct mips_coproc *cop0 = vcpu->arch.cop0;
577 s64 v;
578
579 switch (reg->id) {
580 case KVM_REG_MIPS_R0 ... KVM_REG_MIPS_R31:
581 v = (long)vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0];
582 break;
583 case KVM_REG_MIPS_HI:
584 v = (long)vcpu->arch.hi;
585 break;
586 case KVM_REG_MIPS_LO:
587 v = (long)vcpu->arch.lo;
588 break;
589 case KVM_REG_MIPS_PC:
590 v = (long)vcpu->arch.pc;
591 break;
592
593 case KVM_REG_MIPS_CP0_INDEX:
594 v = (long)kvm_read_c0_guest_index(cop0);
595 break;
596 case KVM_REG_MIPS_CP0_CONTEXT:
597 v = (long)kvm_read_c0_guest_context(cop0);
598 break;
599 case KVM_REG_MIPS_CP0_PAGEMASK:
600 v = (long)kvm_read_c0_guest_pagemask(cop0);
601 break;
602 case KVM_REG_MIPS_CP0_WIRED:
603 v = (long)kvm_read_c0_guest_wired(cop0);
604 break;
605 case KVM_REG_MIPS_CP0_BADVADDR:
606 v = (long)kvm_read_c0_guest_badvaddr(cop0);
607 break;
608 case KVM_REG_MIPS_CP0_ENTRYHI:
609 v = (long)kvm_read_c0_guest_entryhi(cop0);
610 break;
611 case KVM_REG_MIPS_CP0_STATUS:
612 v = (long)kvm_read_c0_guest_status(cop0);
613 break;
614 case KVM_REG_MIPS_CP0_CAUSE:
615 v = (long)kvm_read_c0_guest_cause(cop0);
616 break;
617 case KVM_REG_MIPS_CP0_ERROREPC:
618 v = (long)kvm_read_c0_guest_errorepc(cop0);
619 break;
620 case KVM_REG_MIPS_CP0_CONFIG:
621 v = (long)kvm_read_c0_guest_config(cop0);
622 break;
623 case KVM_REG_MIPS_CP0_CONFIG1:
624 v = (long)kvm_read_c0_guest_config1(cop0);
625 break;
626 case KVM_REG_MIPS_CP0_CONFIG2:
627 v = (long)kvm_read_c0_guest_config2(cop0);
628 break;
629 case KVM_REG_MIPS_CP0_CONFIG3:
630 v = (long)kvm_read_c0_guest_config3(cop0);
631 break;
632 case KVM_REG_MIPS_CP0_CONFIG7:
633 v = (long)kvm_read_c0_guest_config7(cop0);
634 break;
635 default:
636 return -EINVAL;
637 }
638 if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) {
639 u64 __user *uaddr64 = (u64 __user *)(long)reg->addr;
640 return put_user(v, uaddr64);
641 } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) {
642 u32 __user *uaddr32 = (u32 __user *)(long)reg->addr;
643 u32 v32 = (u32)v;
644 return put_user(v32, uaddr32);
645 } else {
646 return -EINVAL;
647 }
648}
649
650static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
651 const struct kvm_one_reg *reg)
652{
653 struct mips_coproc *cop0 = vcpu->arch.cop0;
654 u64 v;
655
656 if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) {
657 u64 __user *uaddr64 = (u64 __user *)(long)reg->addr;
658
659 if (get_user(v, uaddr64) != 0)
660 return -EFAULT;
661 } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) {
662 u32 __user *uaddr32 = (u32 __user *)(long)reg->addr;
663 s32 v32;
664
665 if (get_user(v32, uaddr32) != 0)
666 return -EFAULT;
667 v = (s64)v32;
668 } else {
669 return -EINVAL;
670 }
671
672 switch (reg->id) {
673 case KVM_REG_MIPS_R0:
674 /* Silently ignore requests to set $0 */
675 break;
676 case KVM_REG_MIPS_R1 ... KVM_REG_MIPS_R31:
677 vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0] = v;
678 break;
679 case KVM_REG_MIPS_HI:
680 vcpu->arch.hi = v;
681 break;
682 case KVM_REG_MIPS_LO:
683 vcpu->arch.lo = v;
684 break;
685 case KVM_REG_MIPS_PC:
686 vcpu->arch.pc = v;
687 break;
688
689 case KVM_REG_MIPS_CP0_INDEX:
690 kvm_write_c0_guest_index(cop0, v);
691 break;
692 case KVM_REG_MIPS_CP0_CONTEXT:
693 kvm_write_c0_guest_context(cop0, v);
694 break;
695 case KVM_REG_MIPS_CP0_PAGEMASK:
696 kvm_write_c0_guest_pagemask(cop0, v);
697 break;
698 case KVM_REG_MIPS_CP0_WIRED:
699 kvm_write_c0_guest_wired(cop0, v);
700 break;
701 case KVM_REG_MIPS_CP0_BADVADDR:
702 kvm_write_c0_guest_badvaddr(cop0, v);
703 break;
704 case KVM_REG_MIPS_CP0_ENTRYHI:
705 kvm_write_c0_guest_entryhi(cop0, v);
706 break;
707 case KVM_REG_MIPS_CP0_STATUS:
708 kvm_write_c0_guest_status(cop0, v);
709 break;
710 case KVM_REG_MIPS_CP0_CAUSE:
711 kvm_write_c0_guest_cause(cop0, v);
712 break;
713 case KVM_REG_MIPS_CP0_ERROREPC:
714 kvm_write_c0_guest_errorepc(cop0, v);
715 break;
716 default:
717 return -EINVAL;
718 }
719 return 0;
486} 720}
487 721
488long 722long
@@ -491,9 +725,38 @@ kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
491 struct kvm_vcpu *vcpu = filp->private_data; 725 struct kvm_vcpu *vcpu = filp->private_data;
492 void __user *argp = (void __user *)arg; 726 void __user *argp = (void __user *)arg;
493 long r; 727 long r;
494 int intr;
495 728
496 switch (ioctl) { 729 switch (ioctl) {
730 case KVM_SET_ONE_REG:
731 case KVM_GET_ONE_REG: {
732 struct kvm_one_reg reg;
733 if (copy_from_user(&reg, argp, sizeof(reg)))
734 return -EFAULT;
735 if (ioctl == KVM_SET_ONE_REG)
736 return kvm_mips_set_reg(vcpu, &reg);
737 else
738 return kvm_mips_get_reg(vcpu, &reg);
739 }
740 case KVM_GET_REG_LIST: {
741 struct kvm_reg_list __user *user_list = argp;
742 u64 __user *reg_dest;
743 struct kvm_reg_list reg_list;
744 unsigned n;
745
746 if (copy_from_user(&reg_list, user_list, sizeof(reg_list)))
747 return -EFAULT;
748 n = reg_list.n;
749 reg_list.n = ARRAY_SIZE(kvm_mips_get_one_regs);
750 if (copy_to_user(user_list, &reg_list, sizeof(reg_list)))
751 return -EFAULT;
752 if (n < reg_list.n)
753 return -E2BIG;
754 reg_dest = user_list->reg;
755 if (copy_to_user(reg_dest, kvm_mips_get_one_regs,
756 sizeof(kvm_mips_get_one_regs)))
757 return -EFAULT;
758 return 0;
759 }
497 case KVM_NMI: 760 case KVM_NMI:
498 /* Treat the NMI as a CPU reset */ 761 /* Treat the NMI as a CPU reset */
499 r = kvm_mips_reset_vcpu(vcpu); 762 r = kvm_mips_reset_vcpu(vcpu);
@@ -505,8 +768,6 @@ kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
505 if (copy_from_user(&irq, argp, sizeof(irq))) 768 if (copy_from_user(&irq, argp, sizeof(irq)))
506 goto out; 769 goto out;
507 770
508 intr = (int)irq.irq;
509
510 kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__, 771 kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__,
511 irq.irq); 772 irq.irq);
512 773
@@ -514,7 +775,7 @@ kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
514 break; 775 break;
515 } 776 }
516 default: 777 default:
517 r = -EINVAL; 778 r = -ENOIOCTLCMD;
518 } 779 }
519 780
520out: 781out:
@@ -565,7 +826,7 @@ long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
565 826
566 switch (ioctl) { 827 switch (ioctl) {
567 default: 828 default:
568 r = -EINVAL; 829 r = -ENOIOCTLCMD;
569 } 830 }
570 831
571 return r; 832 return r;
@@ -593,13 +854,13 @@ void kvm_arch_exit(void)
593int 854int
594kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) 855kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
595{ 856{
596 return -ENOTSUPP; 857 return -ENOIOCTLCMD;
597} 858}
598 859
599int 860int
600kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) 861kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
601{ 862{
602 return -ENOTSUPP; 863 return -ENOIOCTLCMD;
603} 864}
604 865
605int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) 866int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
@@ -609,12 +870,12 @@ int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
609 870
610int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 871int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
611{ 872{
612 return -ENOTSUPP; 873 return -ENOIOCTLCMD;
613} 874}
614 875
615int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 876int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
616{ 877{
617 return -ENOTSUPP; 878 return -ENOIOCTLCMD;
618} 879}
619 880
620int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) 881int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
@@ -627,6 +888,9 @@ int kvm_dev_ioctl_check_extension(long ext)
627 int r; 888 int r;
628 889
629 switch (ext) { 890 switch (ext) {
891 case KVM_CAP_ONE_REG:
892 r = 1;
893 break;
630 case KVM_CAP_COALESCED_MMIO: 894 case KVM_CAP_COALESCED_MMIO:
631 r = KVM_COALESCED_MMIO_PAGE_OFFSET; 895 r = KVM_COALESCED_MMIO_PAGE_OFFSET;
632 break; 896 break;
@@ -635,7 +899,6 @@ int kvm_dev_ioctl_check_extension(long ext)
635 break; 899 break;
636 } 900 }
637 return r; 901 return r;
638
639} 902}
640 903
641int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) 904int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
@@ -677,28 +940,28 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
677{ 940{
678 int i; 941 int i;
679 942
680 for (i = 0; i < 32; i++) 943 for (i = 1; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
681 vcpu->arch.gprs[i] = regs->gprs[i]; 944 vcpu->arch.gprs[i] = regs->gpr[i];
682 945 vcpu->arch.gprs[0] = 0; /* zero is special, and cannot be set. */
683 vcpu->arch.hi = regs->hi; 946 vcpu->arch.hi = regs->hi;
684 vcpu->arch.lo = regs->lo; 947 vcpu->arch.lo = regs->lo;
685 vcpu->arch.pc = regs->pc; 948 vcpu->arch.pc = regs->pc;
686 949
687 return kvm_mips_callbacks->vcpu_ioctl_set_regs(vcpu, regs); 950 return 0;
688} 951}
689 952
690int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 953int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
691{ 954{
692 int i; 955 int i;
693 956
694 for (i = 0; i < 32; i++) 957 for (i = 0; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
695 regs->gprs[i] = vcpu->arch.gprs[i]; 958 regs->gpr[i] = vcpu->arch.gprs[i];
696 959
697 regs->hi = vcpu->arch.hi; 960 regs->hi = vcpu->arch.hi;
698 regs->lo = vcpu->arch.lo; 961 regs->lo = vcpu->arch.lo;
699 regs->pc = vcpu->arch.pc; 962 regs->pc = vcpu->arch.pc;
700 963
701 return kvm_mips_callbacks->vcpu_ioctl_get_regs(vcpu, regs); 964 return 0;
702} 965}
703 966
704void kvm_mips_comparecount_func(unsigned long data) 967void kvm_mips_comparecount_func(unsigned long data)
diff --git a/arch/mips/kvm/kvm_trap_emul.c b/arch/mips/kvm/kvm_trap_emul.c
index 466aeef044bd..30d725321db1 100644
--- a/arch/mips/kvm/kvm_trap_emul.c
+++ b/arch/mips/kvm/kvm_trap_emul.c
@@ -345,54 +345,6 @@ static int kvm_trap_emul_handle_break(struct kvm_vcpu *vcpu)
345 return ret; 345 return ret;
346} 346}
347 347
348static int
349kvm_trap_emul_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
350{
351 struct mips_coproc *cop0 = vcpu->arch.cop0;
352
353 kvm_write_c0_guest_index(cop0, regs->cp0reg[MIPS_CP0_TLB_INDEX][0]);
354 kvm_write_c0_guest_context(cop0, regs->cp0reg[MIPS_CP0_TLB_CONTEXT][0]);
355 kvm_write_c0_guest_badvaddr(cop0, regs->cp0reg[MIPS_CP0_BAD_VADDR][0]);
356 kvm_write_c0_guest_entryhi(cop0, regs->cp0reg[MIPS_CP0_TLB_HI][0]);
357 kvm_write_c0_guest_epc(cop0, regs->cp0reg[MIPS_CP0_EXC_PC][0]);
358
359 kvm_write_c0_guest_status(cop0, regs->cp0reg[MIPS_CP0_STATUS][0]);
360 kvm_write_c0_guest_cause(cop0, regs->cp0reg[MIPS_CP0_CAUSE][0]);
361 kvm_write_c0_guest_pagemask(cop0,
362 regs->cp0reg[MIPS_CP0_TLB_PG_MASK][0]);
363 kvm_write_c0_guest_wired(cop0, regs->cp0reg[MIPS_CP0_TLB_WIRED][0]);
364 kvm_write_c0_guest_errorepc(cop0, regs->cp0reg[MIPS_CP0_ERROR_PC][0]);
365
366 return 0;
367}
368
369static int
370kvm_trap_emul_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
371{
372 struct mips_coproc *cop0 = vcpu->arch.cop0;
373
374 regs->cp0reg[MIPS_CP0_TLB_INDEX][0] = kvm_read_c0_guest_index(cop0);
375 regs->cp0reg[MIPS_CP0_TLB_CONTEXT][0] = kvm_read_c0_guest_context(cop0);
376 regs->cp0reg[MIPS_CP0_BAD_VADDR][0] = kvm_read_c0_guest_badvaddr(cop0);
377 regs->cp0reg[MIPS_CP0_TLB_HI][0] = kvm_read_c0_guest_entryhi(cop0);
378 regs->cp0reg[MIPS_CP0_EXC_PC][0] = kvm_read_c0_guest_epc(cop0);
379
380 regs->cp0reg[MIPS_CP0_STATUS][0] = kvm_read_c0_guest_status(cop0);
381 regs->cp0reg[MIPS_CP0_CAUSE][0] = kvm_read_c0_guest_cause(cop0);
382 regs->cp0reg[MIPS_CP0_TLB_PG_MASK][0] =
383 kvm_read_c0_guest_pagemask(cop0);
384 regs->cp0reg[MIPS_CP0_TLB_WIRED][0] = kvm_read_c0_guest_wired(cop0);
385 regs->cp0reg[MIPS_CP0_ERROR_PC][0] = kvm_read_c0_guest_errorepc(cop0);
386
387 regs->cp0reg[MIPS_CP0_CONFIG][0] = kvm_read_c0_guest_config(cop0);
388 regs->cp0reg[MIPS_CP0_CONFIG][1] = kvm_read_c0_guest_config1(cop0);
389 regs->cp0reg[MIPS_CP0_CONFIG][2] = kvm_read_c0_guest_config2(cop0);
390 regs->cp0reg[MIPS_CP0_CONFIG][3] = kvm_read_c0_guest_config3(cop0);
391 regs->cp0reg[MIPS_CP0_CONFIG][7] = kvm_read_c0_guest_config7(cop0);
392
393 return 0;
394}
395
396static int kvm_trap_emul_vm_init(struct kvm *kvm) 348static int kvm_trap_emul_vm_init(struct kvm *kvm)
397{ 349{
398 return 0; 350 return 0;
@@ -471,8 +423,6 @@ static struct kvm_mips_callbacks kvm_trap_emul_callbacks = {
471 .dequeue_io_int = kvm_mips_dequeue_io_int_cb, 423 .dequeue_io_int = kvm_mips_dequeue_io_int_cb,
472 .irq_deliver = kvm_mips_irq_deliver_cb, 424 .irq_deliver = kvm_mips_irq_deliver_cb,
473 .irq_clear = kvm_mips_irq_clear_cb, 425 .irq_clear = kvm_mips_irq_clear_cb,
474 .vcpu_ioctl_get_regs = kvm_trap_emul_ioctl_get_regs,
475 .vcpu_ioctl_set_regs = kvm_trap_emul_ioctl_set_regs,
476}; 426};
477 427
478int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks) 428int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks)
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index ce9818eef7d3..afeef93f81a7 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -301,10 +301,6 @@ static u32 tlb_handler[128] __cpuinitdata;
301static struct uasm_label labels[128] __cpuinitdata; 301static struct uasm_label labels[128] __cpuinitdata;
302static struct uasm_reloc relocs[128] __cpuinitdata; 302static struct uasm_reloc relocs[128] __cpuinitdata;
303 303
304#ifdef CONFIG_64BIT
305static int check_for_high_segbits __cpuinitdata;
306#endif
307
308static int check_for_high_segbits __cpuinitdata; 304static int check_for_high_segbits __cpuinitdata;
309 305
310static unsigned int kscratch_used_mask __cpuinitdata; 306static unsigned int kscratch_used_mask __cpuinitdata;
diff --git a/arch/mips/ralink/of.c b/arch/mips/ralink/of.c
index fb1569580def..6b5f3406f414 100644
--- a/arch/mips/ralink/of.c
+++ b/arch/mips/ralink/of.c
@@ -88,7 +88,7 @@ void __init plat_mem_setup(void)
88 __dt_setup_arch(&__dtb_start); 88 __dt_setup_arch(&__dtb_start);
89 89
90 if (soc_info.mem_size) 90 if (soc_info.mem_size)
91 add_memory_region(soc_info.mem_base, soc_info.mem_size, 91 add_memory_region(soc_info.mem_base, soc_info.mem_size * SZ_1M,
92 BOOT_MEM_RAM); 92 BOOT_MEM_RAM);
93 else 93 else
94 detect_memory_region(soc_info.mem_base, 94 detect_memory_region(soc_info.mem_base,
diff --git a/arch/mn10300/include/asm/irqflags.h b/arch/mn10300/include/asm/irqflags.h
index 678f68d5f37b..8730c0a3c37d 100644
--- a/arch/mn10300/include/asm/irqflags.h
+++ b/arch/mn10300/include/asm/irqflags.h
@@ -13,9 +13,8 @@
13#define _ASM_IRQFLAGS_H 13#define _ASM_IRQFLAGS_H
14 14
15#include <asm/cpu-regs.h> 15#include <asm/cpu-regs.h>
16#ifndef __ASSEMBLY__ 16/* linux/smp.h <- linux/irqflags.h needs asm/smp.h first */
17#include <linux/smp.h> 17#include <asm/smp.h>
18#endif
19 18
20/* 19/*
21 * interrupt control 20 * interrupt control
diff --git a/arch/mn10300/include/asm/smp.h b/arch/mn10300/include/asm/smp.h
index 6745dbe64944..56c42417d428 100644
--- a/arch/mn10300/include/asm/smp.h
+++ b/arch/mn10300/include/asm/smp.h
@@ -24,6 +24,7 @@
24#ifndef __ASSEMBLY__ 24#ifndef __ASSEMBLY__
25#include <linux/threads.h> 25#include <linux/threads.h>
26#include <linux/cpumask.h> 26#include <linux/cpumask.h>
27#include <linux/thread_info.h>
27#endif 28#endif
28 29
29#ifdef CONFIG_SMP 30#ifdef CONFIG_SMP
@@ -85,7 +86,7 @@ extern cpumask_t cpu_boot_map;
85extern void smp_init_cpus(void); 86extern void smp_init_cpus(void);
86extern void smp_cache_interrupt(void); 87extern void smp_cache_interrupt(void);
87extern void send_IPI_allbutself(int irq); 88extern void send_IPI_allbutself(int irq);
88extern int smp_nmi_call_function(smp_call_func_t func, void *info, int wait); 89extern int smp_nmi_call_function(void (*func)(void *), void *info, int wait);
89 90
90extern void arch_send_call_function_single_ipi(int cpu); 91extern void arch_send_call_function_single_ipi(int cpu);
91extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); 92extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
@@ -100,6 +101,7 @@ extern void __cpu_die(unsigned int cpu);
100#ifndef __ASSEMBLY__ 101#ifndef __ASSEMBLY__
101 102
102static inline void smp_init_cpus(void) {} 103static inline void smp_init_cpus(void) {}
104#define raw_smp_processor_id() 0
103 105
104#endif /* __ASSEMBLY__ */ 106#endif /* __ASSEMBLY__ */
105#endif /* CONFIG_SMP */ 107#endif /* CONFIG_SMP */
diff --git a/arch/mn10300/include/asm/uaccess.h b/arch/mn10300/include/asm/uaccess.h
index 780560b330d9..d7966e0f7698 100644
--- a/arch/mn10300/include/asm/uaccess.h
+++ b/arch/mn10300/include/asm/uaccess.h
@@ -161,7 +161,7 @@ struct __large_struct { unsigned long buf[100]; };
161 161
162#define __get_user_check(x, ptr, size) \ 162#define __get_user_check(x, ptr, size) \
163({ \ 163({ \
164 const __typeof__(ptr) __guc_ptr = (ptr); \ 164 const __typeof__(*(ptr))* __guc_ptr = (ptr); \
165 int _e; \ 165 int _e; \
166 if (likely(__access_ok((unsigned long) __guc_ptr, (size)))) \ 166 if (likely(__access_ok((unsigned long) __guc_ptr, (size)))) \
167 _e = __get_user_nocheck((x), __guc_ptr, (size)); \ 167 _e = __get_user_nocheck((x), __guc_ptr, (size)); \
diff --git a/arch/mn10300/kernel/setup.c b/arch/mn10300/kernel/setup.c
index 33c3bd1e5c6d..ebac9c11f796 100644
--- a/arch/mn10300/kernel/setup.c
+++ b/arch/mn10300/kernel/setup.c
@@ -38,6 +38,7 @@ struct mn10300_cpuinfo boot_cpu_data;
38/* For PCI or other memory-mapped resources */ 38/* For PCI or other memory-mapped resources */
39unsigned long pci_mem_start = 0x18000000; 39unsigned long pci_mem_start = 0x18000000;
40 40
41static char __initdata cmd_line[COMMAND_LINE_SIZE];
41char redboot_command_line[COMMAND_LINE_SIZE] = 42char redboot_command_line[COMMAND_LINE_SIZE] =
42 "console=ttyS0,115200 root=/dev/mtdblock3 rw"; 43 "console=ttyS0,115200 root=/dev/mtdblock3 rw";
43 44
@@ -74,45 +75,19 @@ static const char *const mn10300_cputypes[] = {
74}; 75};
75 76
76/* 77/*
77 * 78 * Pick out the memory size. We look for mem=size,
79 * where size is "size[KkMm]"
78 */ 80 */
79static void __init parse_mem_cmdline(char **cmdline_p) 81static int __init early_mem(char *p)
80{ 82{
81 char *from, *to, c; 83 memory_size = memparse(p, &p);
82
83 /* save unparsed command line copy for /proc/cmdline */
84 strcpy(boot_command_line, redboot_command_line);
85
86 /* see if there's an explicit memory size option */
87 from = redboot_command_line;
88 to = redboot_command_line;
89 c = ' ';
90
91 for (;;) {
92 if (c == ' ' && !memcmp(from, "mem=", 4)) {
93 if (to != redboot_command_line)
94 to--;
95 memory_size = memparse(from + 4, &from);
96 }
97
98 c = *(from++);
99 if (!c)
100 break;
101
102 *(to++) = c;
103 }
104
105 *to = '\0';
106 *cmdline_p = redboot_command_line;
107 84
108 if (memory_size == 0) 85 if (memory_size == 0)
109 panic("Memory size not known\n"); 86 panic("Memory size not known\n");
110 87
111 memory_end = (unsigned long) CONFIG_KERNEL_RAM_BASE_ADDRESS + 88 return 0;
112 memory_size;
113 if (memory_end > phys_memory_end)
114 memory_end = phys_memory_end;
115} 89}
90early_param("mem", early_mem);
116 91
117/* 92/*
118 * architecture specific setup 93 * architecture specific setup
@@ -125,7 +100,20 @@ void __init setup_arch(char **cmdline_p)
125 cpu_init(); 100 cpu_init();
126 unit_setup(); 101 unit_setup();
127 smp_init_cpus(); 102 smp_init_cpus();
128 parse_mem_cmdline(cmdline_p); 103
104 /* save unparsed command line copy for /proc/cmdline */
105 strlcpy(boot_command_line, redboot_command_line, COMMAND_LINE_SIZE);
106
107 /* populate cmd_line too for later use, preserving boot_command_line */
108 strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
109 *cmdline_p = cmd_line;
110
111 parse_early_param();
112
113 memory_end = (unsigned long) CONFIG_KERNEL_RAM_BASE_ADDRESS +
114 memory_size;
115 if (memory_end > phys_memory_end)
116 memory_end = phys_memory_end;
129 117
130 init_mm.start_code = (unsigned long)&_text; 118 init_mm.start_code = (unsigned long)&_text;
131 init_mm.end_code = (unsigned long) &_etext; 119 init_mm.end_code = (unsigned long) &_etext;
diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile
index 197690068f88..96ec3982be8d 100644
--- a/arch/parisc/Makefile
+++ b/arch/parisc/Makefile
@@ -66,7 +66,7 @@ KBUILD_CFLAGS_KERNEL += -mlong-calls
66endif 66endif
67 67
68# select which processor to optimise for 68# select which processor to optimise for
69cflags-$(CONFIG_PA7100) += -march=1.1 -mschedule=7100 69cflags-$(CONFIG_PA7000) += -march=1.1 -mschedule=7100
70cflags-$(CONFIG_PA7200) += -march=1.1 -mschedule=7200 70cflags-$(CONFIG_PA7200) += -march=1.1 -mschedule=7200
71cflags-$(CONFIG_PA7100LC) += -march=1.1 -mschedule=7100LC 71cflags-$(CONFIG_PA7100LC) += -march=1.1 -mschedule=7100LC
72cflags-$(CONFIG_PA7300LC) += -march=1.1 -mschedule=7300 72cflags-$(CONFIG_PA7300LC) += -march=1.1 -mschedule=7300
diff --git a/arch/parisc/include/asm/mmzone.h b/arch/parisc/include/asm/mmzone.h
index 0e625ab9aaec..b6b34a0987e7 100644
--- a/arch/parisc/include/asm/mmzone.h
+++ b/arch/parisc/include/asm/mmzone.h
@@ -27,7 +27,7 @@ extern struct node_map_data node_data[];
27 27
28#define PFNNID_SHIFT (30 - PAGE_SHIFT) 28#define PFNNID_SHIFT (30 - PAGE_SHIFT)
29#define PFNNID_MAP_MAX 512 /* support 512GB */ 29#define PFNNID_MAP_MAX 512 /* support 512GB */
30extern unsigned char pfnnid_map[PFNNID_MAP_MAX]; 30extern signed char pfnnid_map[PFNNID_MAP_MAX];
31 31
32#ifndef CONFIG_64BIT 32#ifndef CONFIG_64BIT
33#define pfn_is_io(pfn) ((pfn & (0xf0000000UL >> PAGE_SHIFT)) == (0xf0000000UL >> PAGE_SHIFT)) 33#define pfn_is_io(pfn) ((pfn & (0xf0000000UL >> PAGE_SHIFT)) == (0xf0000000UL >> PAGE_SHIFT))
@@ -39,17 +39,14 @@ extern unsigned char pfnnid_map[PFNNID_MAP_MAX];
39static inline int pfn_to_nid(unsigned long pfn) 39static inline int pfn_to_nid(unsigned long pfn)
40{ 40{
41 unsigned int i; 41 unsigned int i;
42 unsigned char r;
43 42
44 if (unlikely(pfn_is_io(pfn))) 43 if (unlikely(pfn_is_io(pfn)))
45 return 0; 44 return 0;
46 45
47 i = pfn >> PFNNID_SHIFT; 46 i = pfn >> PFNNID_SHIFT;
48 BUG_ON(i >= ARRAY_SIZE(pfnnid_map)); 47 BUG_ON(i >= ARRAY_SIZE(pfnnid_map));
49 r = pfnnid_map[i];
50 BUG_ON(r == 0xff);
51 48
52 return (int)r; 49 return pfnnid_map[i];
53} 50}
54 51
55static inline int pfn_valid(int pfn) 52static inline int pfn_valid(int pfn)
diff --git a/arch/parisc/include/asm/pci.h b/arch/parisc/include/asm/pci.h
index 3234f492d575..465154076d23 100644
--- a/arch/parisc/include/asm/pci.h
+++ b/arch/parisc/include/asm/pci.h
@@ -225,4 +225,9 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
225 return channel ? 15 : 14; 225 return channel ? 15 : 14;
226} 226}
227 227
228#define HAVE_PCI_MMAP
229
230extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
231 enum pci_mmap_state mmap_state, int write_combine);
232
228#endif /* __ASM_PARISC_PCI_H */ 233#endif /* __ASM_PARISC_PCI_H */
diff --git a/arch/parisc/kernel/drivers.c b/arch/parisc/kernel/drivers.c
index 5709c5e59be8..14285caec71a 100644
--- a/arch/parisc/kernel/drivers.c
+++ b/arch/parisc/kernel/drivers.c
@@ -394,7 +394,7 @@ EXPORT_SYMBOL(print_pci_hwpath);
394static void setup_bus_id(struct parisc_device *padev) 394static void setup_bus_id(struct parisc_device *padev)
395{ 395{
396 struct hardware_path path; 396 struct hardware_path path;
397 char name[20]; 397 char name[28];
398 char *output = name; 398 char *output = name;
399 int i; 399 int i;
400 400
diff --git a/arch/parisc/kernel/hardware.c b/arch/parisc/kernel/hardware.c
index 9e2d2e408529..872275659d98 100644
--- a/arch/parisc/kernel/hardware.c
+++ b/arch/parisc/kernel/hardware.c
@@ -1205,6 +1205,7 @@ static struct hp_hardware hp_hardware_list[] = {
1205 {HPHW_FIO, 0x004, 0x00320, 0x0, "Metheus Frame Buffer"}, 1205 {HPHW_FIO, 0x004, 0x00320, 0x0, "Metheus Frame Buffer"},
1206 {HPHW_FIO, 0x004, 0x00340, 0x0, "BARCO CX4500 VME Grphx Cnsl"}, 1206 {HPHW_FIO, 0x004, 0x00340, 0x0, "BARCO CX4500 VME Grphx Cnsl"},
1207 {HPHW_FIO, 0x004, 0x00360, 0x0, "Hughes TOG VME FDDI"}, 1207 {HPHW_FIO, 0x004, 0x00360, 0x0, "Hughes TOG VME FDDI"},
1208 {HPHW_FIO, 0x076, 0x000AD, 0x00, "Crestone Peak RS-232"},
1208 {HPHW_IOA, 0x185, 0x0000B, 0x00, "Java BC Summit Port"}, 1209 {HPHW_IOA, 0x185, 0x0000B, 0x00, "Java BC Summit Port"},
1209 {HPHW_IOA, 0x1FF, 0x0000B, 0x00, "Hitachi Ghostview Summit Port"}, 1210 {HPHW_IOA, 0x1FF, 0x0000B, 0x00, "Hitachi Ghostview Summit Port"},
1210 {HPHW_IOA, 0x580, 0x0000B, 0x10, "U2-IOA BC Runway Port"}, 1211 {HPHW_IOA, 0x580, 0x0000B, 0x10, "U2-IOA BC Runway Port"},
diff --git a/arch/parisc/kernel/pacache.S b/arch/parisc/kernel/pacache.S
index 36d7f402e48e..b743a80eaba0 100644
--- a/arch/parisc/kernel/pacache.S
+++ b/arch/parisc/kernel/pacache.S
@@ -860,7 +860,7 @@ ENTRY(flush_dcache_page_asm)
860#endif 860#endif
861 861
862 ldil L%dcache_stride, %r1 862 ldil L%dcache_stride, %r1
863 ldw R%dcache_stride(%r1), %r1 863 ldw R%dcache_stride(%r1), r31
864 864
865#ifdef CONFIG_64BIT 865#ifdef CONFIG_64BIT
866 depdi,z 1, 63-PAGE_SHIFT,1, %r25 866 depdi,z 1, 63-PAGE_SHIFT,1, %r25
@@ -868,26 +868,26 @@ ENTRY(flush_dcache_page_asm)
868 depwi,z 1, 31-PAGE_SHIFT,1, %r25 868 depwi,z 1, 31-PAGE_SHIFT,1, %r25
869#endif 869#endif
870 add %r28, %r25, %r25 870 add %r28, %r25, %r25
871 sub %r25, %r1, %r25 871 sub %r25, r31, %r25
872 872
873 873
8741: fdc,m %r1(%r28) 8741: fdc,m r31(%r28)
875 fdc,m %r1(%r28) 875 fdc,m r31(%r28)
876 fdc,m %r1(%r28) 876 fdc,m r31(%r28)
877 fdc,m %r1(%r28) 877 fdc,m r31(%r28)
878 fdc,m %r1(%r28) 878 fdc,m r31(%r28)
879 fdc,m %r1(%r28) 879 fdc,m r31(%r28)
880 fdc,m %r1(%r28) 880 fdc,m r31(%r28)
881 fdc,m %r1(%r28) 881 fdc,m r31(%r28)
882 fdc,m %r1(%r28) 882 fdc,m r31(%r28)
883 fdc,m %r1(%r28) 883 fdc,m r31(%r28)
884 fdc,m %r1(%r28) 884 fdc,m r31(%r28)
885 fdc,m %r1(%r28) 885 fdc,m r31(%r28)
886 fdc,m %r1(%r28) 886 fdc,m r31(%r28)
887 fdc,m %r1(%r28) 887 fdc,m r31(%r28)
888 fdc,m %r1(%r28) 888 fdc,m r31(%r28)
889 cmpb,COND(<<) %r28, %r25,1b 889 cmpb,COND(<<) %r28, %r25,1b
890 fdc,m %r1(%r28) 890 fdc,m r31(%r28)
891 891
892 sync 892 sync
893 893
@@ -936,7 +936,7 @@ ENTRY(flush_icache_page_asm)
936#endif 936#endif
937 937
938 ldil L%icache_stride, %r1 938 ldil L%icache_stride, %r1
939 ldw R%icache_stride(%r1), %r1 939 ldw R%icache_stride(%r1), %r31
940 940
941#ifdef CONFIG_64BIT 941#ifdef CONFIG_64BIT
942 depdi,z 1, 63-PAGE_SHIFT,1, %r25 942 depdi,z 1, 63-PAGE_SHIFT,1, %r25
@@ -944,28 +944,28 @@ ENTRY(flush_icache_page_asm)
944 depwi,z 1, 31-PAGE_SHIFT,1, %r25 944 depwi,z 1, 31-PAGE_SHIFT,1, %r25
945#endif 945#endif
946 add %r28, %r25, %r25 946 add %r28, %r25, %r25
947 sub %r25, %r1, %r25 947 sub %r25, %r31, %r25
948 948
949 949
950 /* fic only has the type 26 form on PA1.1, requiring an 950 /* fic only has the type 26 form on PA1.1, requiring an
951 * explicit space specification, so use %sr4 */ 951 * explicit space specification, so use %sr4 */
9521: fic,m %r1(%sr4,%r28) 9521: fic,m %r31(%sr4,%r28)
953 fic,m %r1(%sr4,%r28) 953 fic,m %r31(%sr4,%r28)
954 fic,m %r1(%sr4,%r28) 954 fic,m %r31(%sr4,%r28)
955 fic,m %r1(%sr4,%r28) 955 fic,m %r31(%sr4,%r28)
956 fic,m %r1(%sr4,%r28) 956 fic,m %r31(%sr4,%r28)
957 fic,m %r1(%sr4,%r28) 957 fic,m %r31(%sr4,%r28)
958 fic,m %r1(%sr4,%r28) 958 fic,m %r31(%sr4,%r28)
959 fic,m %r1(%sr4,%r28) 959 fic,m %r31(%sr4,%r28)
960 fic,m %r1(%sr4,%r28) 960 fic,m %r31(%sr4,%r28)
961 fic,m %r1(%sr4,%r28) 961 fic,m %r31(%sr4,%r28)
962 fic,m %r1(%sr4,%r28) 962 fic,m %r31(%sr4,%r28)
963 fic,m %r1(%sr4,%r28) 963 fic,m %r31(%sr4,%r28)
964 fic,m %r1(%sr4,%r28) 964 fic,m %r31(%sr4,%r28)
965 fic,m %r1(%sr4,%r28) 965 fic,m %r31(%sr4,%r28)
966 fic,m %r1(%sr4,%r28) 966 fic,m %r31(%sr4,%r28)
967 cmpb,COND(<<) %r28, %r25,1b 967 cmpb,COND(<<) %r28, %r25,1b
968 fic,m %r1(%sr4,%r28) 968 fic,m %r31(%sr4,%r28)
969 969
970 sync 970 sync
971 971
diff --git a/arch/parisc/kernel/pci.c b/arch/parisc/kernel/pci.c
index 60309051875e..64f2764a8cef 100644
--- a/arch/parisc/kernel/pci.c
+++ b/arch/parisc/kernel/pci.c
@@ -220,6 +220,33 @@ resource_size_t pcibios_align_resource(void *data, const struct resource *res,
220} 220}
221 221
222 222
223int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
224 enum pci_mmap_state mmap_state, int write_combine)
225{
226 unsigned long prot;
227
228 /*
229 * I/O space can be accessed via normal processor loads and stores on
230 * this platform but for now we elect not to do this and portable
231 * drivers should not do this anyway.
232 */
233 if (mmap_state == pci_mmap_io)
234 return -EINVAL;
235
236 if (write_combine)
237 return -EINVAL;
238
239 /*
240 * Ignore write-combine; for now only return uncached mappings.
241 */
242 prot = pgprot_val(vma->vm_page_prot);
243 prot |= _PAGE_NO_CACHE;
244 vma->vm_page_prot = __pgprot(prot);
245
246 return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
247 vma->vm_end - vma->vm_start, vma->vm_page_prot);
248}
249
223/* 250/*
224 * A driver is enabling the device. We make sure that all the appropriate 251 * A driver is enabling the device. We make sure that all the appropriate
225 * bits are set to allow the device to operate as the driver is expecting. 252 * bits are set to allow the device to operate as the driver is expecting.
diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c
index 76b63e726a53..1e95b2000ce8 100644
--- a/arch/parisc/kernel/setup.c
+++ b/arch/parisc/kernel/setup.c
@@ -69,7 +69,8 @@ void __init setup_cmdline(char **cmdline_p)
69 /* called from hpux boot loader */ 69 /* called from hpux boot loader */
70 boot_command_line[0] = '\0'; 70 boot_command_line[0] = '\0';
71 } else { 71 } else {
72 strcpy(boot_command_line, (char *)__va(boot_args[1])); 72 strlcpy(boot_command_line, (char *)__va(boot_args[1]),
73 COMMAND_LINE_SIZE);
73 74
74#ifdef CONFIG_BLK_DEV_INITRD 75#ifdef CONFIG_BLK_DEV_INITRD
75 if (boot_args[2] != 0) /* did palo pass us a ramdisk? */ 76 if (boot_args[2] != 0) /* did palo pass us a ramdisk? */
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
index 1c965642068b..505b56c6b9b9 100644
--- a/arch/parisc/mm/init.c
+++ b/arch/parisc/mm/init.c
@@ -47,7 +47,7 @@ pte_t pg0[PT_INITIAL * PTRS_PER_PTE] __attribute__ ((__section__ (".data..vm0.pt
47 47
48#ifdef CONFIG_DISCONTIGMEM 48#ifdef CONFIG_DISCONTIGMEM
49struct node_map_data node_data[MAX_NUMNODES] __read_mostly; 49struct node_map_data node_data[MAX_NUMNODES] __read_mostly;
50unsigned char pfnnid_map[PFNNID_MAP_MAX] __read_mostly; 50signed char pfnnid_map[PFNNID_MAP_MAX] __read_mostly;
51#endif 51#endif
52 52
53static struct resource data_resource = { 53static struct resource data_resource = {
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index 26807e5aff51..6f3887d884d2 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -176,6 +176,7 @@ extern const char *powerpc_base_platform;
176#define CPU_FTR_CFAR LONG_ASM_CONST(0x0100000000000000) 176#define CPU_FTR_CFAR LONG_ASM_CONST(0x0100000000000000)
177#define CPU_FTR_HAS_PPR LONG_ASM_CONST(0x0200000000000000) 177#define CPU_FTR_HAS_PPR LONG_ASM_CONST(0x0200000000000000)
178#define CPU_FTR_DAWR LONG_ASM_CONST(0x0400000000000000) 178#define CPU_FTR_DAWR LONG_ASM_CONST(0x0400000000000000)
179#define CPU_FTR_DABRX LONG_ASM_CONST(0x0800000000000000)
179 180
180#ifndef __ASSEMBLY__ 181#ifndef __ASSEMBLY__
181 182
@@ -394,19 +395,20 @@ extern const char *powerpc_base_platform;
394 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_ARCH_201 | \ 395 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_ARCH_201 | \
395 CPU_FTR_ALTIVEC_COMP | CPU_FTR_CAN_NAP | CPU_FTR_MMCRA | \ 396 CPU_FTR_ALTIVEC_COMP | CPU_FTR_CAN_NAP | CPU_FTR_MMCRA | \
396 CPU_FTR_CP_USE_DCBTZ | CPU_FTR_STCX_CHECKS_ADDRESS | \ 397 CPU_FTR_CP_USE_DCBTZ | CPU_FTR_STCX_CHECKS_ADDRESS | \
397 CPU_FTR_HVMODE) 398 CPU_FTR_HVMODE | CPU_FTR_DABRX)
398#define CPU_FTRS_POWER5 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ 399#define CPU_FTRS_POWER5 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
399 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ 400 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
400 CPU_FTR_MMCRA | CPU_FTR_SMT | \ 401 CPU_FTR_MMCRA | CPU_FTR_SMT | \
401 CPU_FTR_COHERENT_ICACHE | CPU_FTR_PURR | \ 402 CPU_FTR_COHERENT_ICACHE | CPU_FTR_PURR | \
402 CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB) 403 CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_DABRX)
403#define CPU_FTRS_POWER6 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ 404#define CPU_FTRS_POWER6 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
404 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ 405 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
405 CPU_FTR_MMCRA | CPU_FTR_SMT | \ 406 CPU_FTR_MMCRA | CPU_FTR_SMT | \
406 CPU_FTR_COHERENT_ICACHE | \ 407 CPU_FTR_COHERENT_ICACHE | \
407 CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \ 408 CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \
408 CPU_FTR_DSCR | CPU_FTR_UNALIGNED_LD_STD | \ 409 CPU_FTR_DSCR | CPU_FTR_UNALIGNED_LD_STD | \
409 CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_CFAR) 410 CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_CFAR | \
411 CPU_FTR_DABRX)
410#define CPU_FTRS_POWER7 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ 412#define CPU_FTRS_POWER7 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
411 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_ARCH_206 |\ 413 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_ARCH_206 |\
412 CPU_FTR_MMCRA | CPU_FTR_SMT | \ 414 CPU_FTR_MMCRA | CPU_FTR_SMT | \
@@ -415,7 +417,7 @@ extern const char *powerpc_base_platform;
415 CPU_FTR_DSCR | CPU_FTR_SAO | CPU_FTR_ASYM_SMT | \ 417 CPU_FTR_DSCR | CPU_FTR_SAO | CPU_FTR_ASYM_SMT | \
416 CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \ 418 CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
417 CPU_FTR_ICSWX | CPU_FTR_CFAR | CPU_FTR_HVMODE | \ 419 CPU_FTR_ICSWX | CPU_FTR_CFAR | CPU_FTR_HVMODE | \
418 CPU_FTR_VMX_COPY | CPU_FTR_HAS_PPR) 420 CPU_FTR_VMX_COPY | CPU_FTR_HAS_PPR | CPU_FTR_DABRX)
419#define CPU_FTRS_POWER8 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ 421#define CPU_FTRS_POWER8 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
420 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_ARCH_206 |\ 422 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_ARCH_206 |\
421 CPU_FTR_MMCRA | CPU_FTR_SMT | \ 423 CPU_FTR_MMCRA | CPU_FTR_SMT | \
@@ -430,14 +432,15 @@ extern const char *powerpc_base_platform;
430 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ 432 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
431 CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \ 433 CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \
432 CPU_FTR_PAUSE_ZERO | CPU_FTR_CELL_TB_BUG | CPU_FTR_CP_USE_DCBTZ | \ 434 CPU_FTR_PAUSE_ZERO | CPU_FTR_CELL_TB_BUG | CPU_FTR_CP_USE_DCBTZ | \
433 CPU_FTR_UNALIGNED_LD_STD) 435 CPU_FTR_UNALIGNED_LD_STD | CPU_FTR_DABRX)
434#define CPU_FTRS_PA6T (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ 436#define CPU_FTRS_PA6T (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
435 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_ALTIVEC_COMP | \ 437 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_ALTIVEC_COMP | \
436 CPU_FTR_PURR | CPU_FTR_REAL_LE) 438 CPU_FTR_PURR | CPU_FTR_REAL_LE | CPU_FTR_DABRX)
437#define CPU_FTRS_COMPATIBLE (CPU_FTR_USE_TB | CPU_FTR_PPCAS_ARCH_V2) 439#define CPU_FTRS_COMPATIBLE (CPU_FTR_USE_TB | CPU_FTR_PPCAS_ARCH_V2)
438 440
439#define CPU_FTRS_A2 (CPU_FTR_USE_TB | CPU_FTR_SMT | CPU_FTR_DBELL | \ 441#define CPU_FTRS_A2 (CPU_FTR_USE_TB | CPU_FTR_SMT | CPU_FTR_DBELL | \
440 CPU_FTR_NOEXECUTE | CPU_FTR_NODSISRALIGN | CPU_FTR_ICSWX) 442 CPU_FTR_NOEXECUTE | CPU_FTR_NODSISRALIGN | \
443 CPU_FTR_ICSWX | CPU_FTR_DABRX )
441 444
442#ifdef __powerpc64__ 445#ifdef __powerpc64__
443#ifdef CONFIG_PPC_BOOK3E 446#ifdef CONFIG_PPC_BOOK3E
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
index 8e5fae8beaf6..46793b58a761 100644
--- a/arch/powerpc/include/asm/exception-64s.h
+++ b/arch/powerpc/include/asm/exception-64s.h
@@ -513,7 +513,7 @@ label##_common: \
513 */ 513 */
514#define STD_EXCEPTION_COMMON_ASYNC(trap, label, hdlr) \ 514#define STD_EXCEPTION_COMMON_ASYNC(trap, label, hdlr) \
515 EXCEPTION_COMMON(trap, label, hdlr, ret_from_except_lite, \ 515 EXCEPTION_COMMON(trap, label, hdlr, ret_from_except_lite, \
516 FINISH_NAP;RUNLATCH_ON;DISABLE_INTS) 516 FINISH_NAP;DISABLE_INTS;RUNLATCH_ON)
517 517
518/* 518/*
519 * When the idle code in power4_idle puts the CPU into NAP mode, 519 * When the idle code in power4_idle puts the CPU into NAP mode,
diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
index cf4df8e2139a..0c7f2bfcf134 100644
--- a/arch/powerpc/include/asm/hvcall.h
+++ b/arch/powerpc/include/asm/hvcall.h
@@ -264,6 +264,7 @@
264#define H_GET_MPP 0x2D4 264#define H_GET_MPP 0x2D4
265#define H_HOME_NODE_ASSOCIATIVITY 0x2EC 265#define H_HOME_NODE_ASSOCIATIVITY 0x2EC
266#define H_BEST_ENERGY 0x2F4 266#define H_BEST_ENERGY 0x2F4
267#define H_XIRR_X 0x2FC
267#define H_RANDOM 0x300 268#define H_RANDOM 0x300
268#define H_COP 0x304 269#define H_COP 0x304
269#define H_GET_MPP_X 0x314 270#define H_GET_MPP_X 0x314
diff --git a/arch/powerpc/include/asm/kvm_asm.h b/arch/powerpc/include/asm/kvm_asm.h
index b9dd382cb349..851bac7afa4b 100644
--- a/arch/powerpc/include/asm/kvm_asm.h
+++ b/arch/powerpc/include/asm/kvm_asm.h
@@ -54,8 +54,16 @@
54#define BOOKE_INTERRUPT_DEBUG 15 54#define BOOKE_INTERRUPT_DEBUG 15
55 55
56/* E500 */ 56/* E500 */
57#define BOOKE_INTERRUPT_SPE_UNAVAIL 32 57#define BOOKE_INTERRUPT_SPE_ALTIVEC_UNAVAIL 32
58#define BOOKE_INTERRUPT_SPE_FP_DATA 33 58#define BOOKE_INTERRUPT_SPE_FP_DATA_ALTIVEC_ASSIST 33
59/*
60 * TODO: Unify 32-bit and 64-bit kernel exception handlers to use same defines
61 */
62#define BOOKE_INTERRUPT_SPE_UNAVAIL BOOKE_INTERRUPT_SPE_ALTIVEC_UNAVAIL
63#define BOOKE_INTERRUPT_SPE_FP_DATA BOOKE_INTERRUPT_SPE_FP_DATA_ALTIVEC_ASSIST
64#define BOOKE_INTERRUPT_ALTIVEC_UNAVAIL BOOKE_INTERRUPT_SPE_ALTIVEC_UNAVAIL
65#define BOOKE_INTERRUPT_ALTIVEC_ASSIST \
66 BOOKE_INTERRUPT_SPE_FP_DATA_ALTIVEC_ASSIST
59#define BOOKE_INTERRUPT_SPE_FP_ROUND 34 67#define BOOKE_INTERRUPT_SPE_FP_ROUND 34
60#define BOOKE_INTERRUPT_PERFORMANCE_MONITOR 35 68#define BOOKE_INTERRUPT_PERFORMANCE_MONITOR 35
61#define BOOKE_INTERRUPT_DOORBELL 36 69#define BOOKE_INTERRUPT_DOORBELL 36
@@ -67,10 +75,6 @@
67#define BOOKE_INTERRUPT_HV_SYSCALL 40 75#define BOOKE_INTERRUPT_HV_SYSCALL 40
68#define BOOKE_INTERRUPT_HV_PRIV 41 76#define BOOKE_INTERRUPT_HV_PRIV 41
69 77
70/* altivec */
71#define BOOKE_INTERRUPT_ALTIVEC_UNAVAIL 42
72#define BOOKE_INTERRUPT_ALTIVEC_ASSIST 43
73
74/* book3s */ 78/* book3s */
75 79
76#define BOOK3S_INTERRUPT_SYSTEM_RESET 0x100 80#define BOOK3S_INTERRUPT_SYSTEM_RESET 0x100
diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h
index cea8496091ff..2f1b6c5f8174 100644
--- a/arch/powerpc/include/asm/ppc_asm.h
+++ b/arch/powerpc/include/asm/ppc_asm.h
@@ -523,6 +523,17 @@ END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,946)
523#define PPC440EP_ERR42 523#define PPC440EP_ERR42
524#endif 524#endif
525 525
526/* The following stops all load and store data streams associated with stream
527 * ID (ie. streams created explicitly). The embedded and server mnemonics for
528 * dcbt are different so we use machine "power4" here explicitly.
529 */
530#define DCBT_STOP_ALL_STREAM_IDS(scratch) \
531.machine push ; \
532.machine "power4" ; \
533 lis scratch,0x60000000@h; \
534 dcbt r0,scratch,0b01010; \
535.machine pop
536
526/* 537/*
527 * toreal/fromreal/tophys/tovirt macros. 32-bit BookE makes them 538 * toreal/fromreal/tophys/tovirt macros. 32-bit BookE makes them
528 * keep the address intact to be compatible with code shared with 539 * keep the address intact to be compatible with code shared with
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index 594db6bc093c..14a658363698 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -409,21 +409,16 @@ static inline void prefetchw(const void *x)
409#endif 409#endif
410 410
411#ifdef CONFIG_PPC64 411#ifdef CONFIG_PPC64
412static inline unsigned long get_clean_sp(struct pt_regs *regs, int is_32) 412static inline unsigned long get_clean_sp(unsigned long sp, int is_32)
413{ 413{
414 unsigned long sp;
415
416 if (is_32) 414 if (is_32)
417 sp = regs->gpr[1] & 0x0ffffffffUL; 415 return sp & 0x0ffffffffUL;
418 else
419 sp = regs->gpr[1];
420
421 return sp; 416 return sp;
422} 417}
423#else 418#else
424static inline unsigned long get_clean_sp(struct pt_regs *regs, int is_32) 419static inline unsigned long get_clean_sp(unsigned long sp, int is_32)
425{ 420{
426 return regs->gpr[1]; 421 return sp;
427} 422}
428#endif 423#endif
429 424
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index a6136515c7f2..4a9e408644fe 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -111,17 +111,6 @@
111#define MSR_TM_TRANSACTIONAL(x) (((x) & MSR_TS_MASK) == MSR_TS_T) 111#define MSR_TM_TRANSACTIONAL(x) (((x) & MSR_TS_MASK) == MSR_TS_T)
112#define MSR_TM_SUSPENDED(x) (((x) & MSR_TS_MASK) == MSR_TS_S) 112#define MSR_TM_SUSPENDED(x) (((x) & MSR_TS_MASK) == MSR_TS_S)
113 113
114/* Reason codes describing kernel causes for transaction aborts. By
115 convention, bit0 is copied to TEXASR[56] (IBM bit 7) which is set if
116 the failure is persistent.
117*/
118#define TM_CAUSE_RESCHED 0xfe
119#define TM_CAUSE_TLBI 0xfc
120#define TM_CAUSE_FAC_UNAV 0xfa
121#define TM_CAUSE_SYSCALL 0xf9 /* Persistent */
122#define TM_CAUSE_MISC 0xf6
123#define TM_CAUSE_SIGNAL 0xf4
124
125#if defined(CONFIG_PPC_BOOK3S_64) 114#if defined(CONFIG_PPC_BOOK3S_64)
126#define MSR_64BIT MSR_SF 115#define MSR_64BIT MSR_SF
127 116
diff --git a/arch/powerpc/include/asm/signal.h b/arch/powerpc/include/asm/signal.h
index fbe66c463891..9322c28aebd2 100644
--- a/arch/powerpc/include/asm/signal.h
+++ b/arch/powerpc/include/asm/signal.h
@@ -3,5 +3,8 @@
3 3
4#define __ARCH_HAS_SA_RESTORER 4#define __ARCH_HAS_SA_RESTORER
5#include <uapi/asm/signal.h> 5#include <uapi/asm/signal.h>
6#include <uapi/asm/ptrace.h>
7
8extern unsigned long get_tm_stackpointer(struct pt_regs *regs);
6 9
7#endif /* _ASM_POWERPC_SIGNAL_H */ 10#endif /* _ASM_POWERPC_SIGNAL_H */
diff --git a/arch/powerpc/include/asm/tm.h b/arch/powerpc/include/asm/tm.h
index 4b4449abf3f8..9dfbc34bdbf5 100644
--- a/arch/powerpc/include/asm/tm.h
+++ b/arch/powerpc/include/asm/tm.h
@@ -5,6 +5,8 @@
5 * Copyright 2012 Matt Evans & Michael Neuling, IBM Corporation. 5 * Copyright 2012 Matt Evans & Michael Neuling, IBM Corporation.
6 */ 6 */
7 7
8#include <uapi/asm/tm.h>
9
8#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 10#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
9extern void do_load_up_transact_fpu(struct thread_struct *thread); 11extern void do_load_up_transact_fpu(struct thread_struct *thread);
10extern void do_load_up_transact_altivec(struct thread_struct *thread); 12extern void do_load_up_transact_altivec(struct thread_struct *thread);
diff --git a/arch/powerpc/include/uapi/asm/Kbuild b/arch/powerpc/include/uapi/asm/Kbuild
index f7bca6370745..5182c8622b54 100644
--- a/arch/powerpc/include/uapi/asm/Kbuild
+++ b/arch/powerpc/include/uapi/asm/Kbuild
@@ -40,6 +40,7 @@ header-y += statfs.h
40header-y += swab.h 40header-y += swab.h
41header-y += termbits.h 41header-y += termbits.h
42header-y += termios.h 42header-y += termios.h
43header-y += tm.h
43header-y += types.h 44header-y += types.h
44header-y += ucontext.h 45header-y += ucontext.h
45header-y += unistd.h 46header-y += unistd.h
diff --git a/arch/powerpc/include/uapi/asm/tm.h b/arch/powerpc/include/uapi/asm/tm.h
new file mode 100644
index 000000000000..85059a00f560
--- /dev/null
+++ b/arch/powerpc/include/uapi/asm/tm.h
@@ -0,0 +1,18 @@
1#ifndef _ASM_POWERPC_TM_H
2#define _ASM_POWERPC_TM_H
3
4/* Reason codes describing kernel causes for transaction aborts. By
5 * convention, bit0 is copied to TEXASR[56] (IBM bit 7) which is set if
6 * the failure is persistent. PAPR saves 0xff-0xe0 for the hypervisor.
7 */
8#define TM_CAUSE_PERSISTENT 0x01
9#define TM_CAUSE_RESCHED 0xde
10#define TM_CAUSE_TLBI 0xdc
11#define TM_CAUSE_FAC_UNAV 0xda
12#define TM_CAUSE_SYSCALL 0xd8 /* future use */
13#define TM_CAUSE_MISC 0xd6 /* future use */
14#define TM_CAUSE_SIGNAL 0xd4
15#define TM_CAUSE_ALIGNMENT 0xd2
16#define TM_CAUSE_EMULATE 0xd0
17
18#endif
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index c60bbec25c1f..2a45d0f04385 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -452,7 +452,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
452 .mmu_features = MMU_FTRS_POWER8, 452 .mmu_features = MMU_FTRS_POWER8,
453 .icache_bsize = 128, 453 .icache_bsize = 128,
454 .dcache_bsize = 128, 454 .dcache_bsize = 128,
455 .oprofile_type = PPC_OPROFILE_POWER4, 455 .oprofile_type = PPC_OPROFILE_INVALID,
456 .oprofile_cpu_type = "ppc64/ibm-compat-v1", 456 .oprofile_cpu_type = "ppc64/ibm-compat-v1",
457 .cpu_setup = __setup_cpu_power8, 457 .cpu_setup = __setup_cpu_power8,
458 .cpu_restore = __restore_cpu_power8, 458 .cpu_restore = __restore_cpu_power8,
@@ -482,7 +482,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
482 .cpu_name = "POWER7+ (raw)", 482 .cpu_name = "POWER7+ (raw)",
483 .cpu_features = CPU_FTRS_POWER7, 483 .cpu_features = CPU_FTRS_POWER7,
484 .cpu_user_features = COMMON_USER_POWER7, 484 .cpu_user_features = COMMON_USER_POWER7,
485 .cpu_user_features = COMMON_USER2_POWER7, 485 .cpu_user_features2 = COMMON_USER2_POWER7,
486 .mmu_features = MMU_FTRS_POWER7, 486 .mmu_features = MMU_FTRS_POWER7,
487 .icache_bsize = 128, 487 .icache_bsize = 128,
488 .dcache_bsize = 128, 488 .dcache_bsize = 128,
@@ -507,7 +507,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
507 .num_pmcs = 6, 507 .num_pmcs = 6,
508 .pmc_type = PPC_PMC_IBM, 508 .pmc_type = PPC_PMC_IBM,
509 .oprofile_cpu_type = "ppc64/power8", 509 .oprofile_cpu_type = "ppc64/power8",
510 .oprofile_type = PPC_OPROFILE_POWER4, 510 .oprofile_type = PPC_OPROFILE_INVALID,
511 .cpu_setup = __setup_cpu_power8, 511 .cpu_setup = __setup_cpu_power8,
512 .cpu_restore = __restore_cpu_power8, 512 .cpu_restore = __restore_cpu_power8,
513 .platform = "power8", 513 .platform = "power8",
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index d22e73e4618b..22b45a4955cd 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -849,7 +849,7 @@ resume_kernel:
849 /* check current_thread_info, _TIF_EMULATE_STACK_STORE */ 849 /* check current_thread_info, _TIF_EMULATE_STACK_STORE */
850 CURRENT_THREAD_INFO(r9, r1) 850 CURRENT_THREAD_INFO(r9, r1)
851 lwz r8,TI_FLAGS(r9) 851 lwz r8,TI_FLAGS(r9)
852 andis. r8,r8,_TIF_EMULATE_STACK_STORE@h 852 andis. r0,r8,_TIF_EMULATE_STACK_STORE@h
853 beq+ 1f 853 beq+ 1f
854 854
855 addi r8,r1,INT_FRAME_SIZE /* Get the kprobed function entry */ 855 addi r8,r1,INT_FRAME_SIZE /* Get the kprobed function entry */
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 0e9095e47b5b..8741c854e03d 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -465,20 +465,6 @@ BEGIN_FTR_SECTION
465 std r0, THREAD_EBBHR(r3) 465 std r0, THREAD_EBBHR(r3)
466 mfspr r0, SPRN_EBBRR 466 mfspr r0, SPRN_EBBRR
467 std r0, THREAD_EBBRR(r3) 467 std r0, THREAD_EBBRR(r3)
468
469 /* PMU registers made user read/(write) by EBB */
470 mfspr r0, SPRN_SIAR
471 std r0, THREAD_SIAR(r3)
472 mfspr r0, SPRN_SDAR
473 std r0, THREAD_SDAR(r3)
474 mfspr r0, SPRN_SIER
475 std r0, THREAD_SIER(r3)
476 mfspr r0, SPRN_MMCR0
477 std r0, THREAD_MMCR0(r3)
478 mfspr r0, SPRN_MMCR2
479 std r0, THREAD_MMCR2(r3)
480 mfspr r0, SPRN_MMCRA
481 std r0, THREAD_MMCRA(r3)
482END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 468END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
483#endif 469#endif
484 470
@@ -501,6 +487,13 @@ BEGIN_FTR_SECTION
501 ldarx r6,0,r1 487 ldarx r6,0,r1
502END_FTR_SECTION_IFSET(CPU_FTR_STCX_CHECKS_ADDRESS) 488END_FTR_SECTION_IFSET(CPU_FTR_STCX_CHECKS_ADDRESS)
503 489
490#ifdef CONFIG_PPC_BOOK3S
491/* Cancel all explict user streams as they will have no use after context
492 * switch and will stop the HW from creating streams itself
493 */
494 DCBT_STOP_ALL_STREAM_IDS(r6)
495#endif
496
504 addi r6,r4,-THREAD /* Convert THREAD to 'current' */ 497 addi r6,r4,-THREAD /* Convert THREAD to 'current' */
505 std r6,PACACURRENT(r13) /* Set new 'current' */ 498 std r6,PACACURRENT(r13) /* Set new 'current' */
506 499
@@ -574,20 +567,6 @@ BEGIN_FTR_SECTION
574 ld r0, THREAD_EBBRR(r4) 567 ld r0, THREAD_EBBRR(r4)
575 mtspr SPRN_EBBRR, r0 568 mtspr SPRN_EBBRR, r0
576 569
577 /* PMU registers made user read/(write) by EBB */
578 ld r0, THREAD_SIAR(r4)
579 mtspr SPRN_SIAR, r0
580 ld r0, THREAD_SDAR(r4)
581 mtspr SPRN_SDAR, r0
582 ld r0, THREAD_SIER(r4)
583 mtspr SPRN_SIER, r0
584 ld r0, THREAD_MMCR0(r4)
585 mtspr SPRN_MMCR0, r0
586 ld r0, THREAD_MMCR2(r4)
587 mtspr SPRN_MMCR2, r0
588 ld r0, THREAD_MMCRA(r4)
589 mtspr SPRN_MMCRA, r0
590
591 ld r0,THREAD_TAR(r4) 570 ld r0,THREAD_TAR(r4)
592 mtspr SPRN_TAR,r0 571 mtspr SPRN_TAR,r0
593END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 572END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index e6eba1bf61ad..40e4a17c8ba0 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -454,38 +454,14 @@ BEGIN_FTR_SECTION
454 xori r10,r10,(MSR_FE0|MSR_FE1) 454 xori r10,r10,(MSR_FE0|MSR_FE1)
455 mtmsrd r10 455 mtmsrd r10
456 sync 456 sync
457 fmr 0,0 457
458 fmr 1,1 458#define FMR2(n) fmr (n), (n) ; fmr n+1, n+1
459 fmr 2,2 459#define FMR4(n) FMR2(n) ; FMR2(n+2)
460 fmr 3,3 460#define FMR8(n) FMR4(n) ; FMR4(n+4)
461 fmr 4,4 461#define FMR16(n) FMR8(n) ; FMR8(n+8)
462 fmr 5,5 462#define FMR32(n) FMR16(n) ; FMR16(n+16)
463 fmr 6,6 463 FMR32(0)
464 fmr 7,7 464
465 fmr 8,8
466 fmr 9,9
467 fmr 10,10
468 fmr 11,11
469 fmr 12,12
470 fmr 13,13
471 fmr 14,14
472 fmr 15,15
473 fmr 16,16
474 fmr 17,17
475 fmr 18,18
476 fmr 19,19
477 fmr 20,20
478 fmr 21,21
479 fmr 22,22
480 fmr 23,23
481 fmr 24,24
482 fmr 25,25
483 fmr 26,26
484 fmr 27,27
485 fmr 28,28
486 fmr 29,29
487 fmr 30,30
488 fmr 31,31
489FTR_SECTION_ELSE 465FTR_SECTION_ELSE
490/* 466/*
491 * To denormalise we need to move a copy of the register to itself. 467 * To denormalise we need to move a copy of the register to itself.
@@ -495,39 +471,25 @@ FTR_SECTION_ELSE
495 oris r10,r10,MSR_VSX@h 471 oris r10,r10,MSR_VSX@h
496 mtmsrd r10 472 mtmsrd r10
497 sync 473 sync
498 XVCPSGNDP(0,0,0) 474
499 XVCPSGNDP(1,1,1) 475#define XVCPSGNDP2(n) XVCPSGNDP(n,n,n) ; XVCPSGNDP(n+1,n+1,n+1)
500 XVCPSGNDP(2,2,2) 476#define XVCPSGNDP4(n) XVCPSGNDP2(n) ; XVCPSGNDP2(n+2)
501 XVCPSGNDP(3,3,3) 477#define XVCPSGNDP8(n) XVCPSGNDP4(n) ; XVCPSGNDP4(n+4)
502 XVCPSGNDP(4,4,4) 478#define XVCPSGNDP16(n) XVCPSGNDP8(n) ; XVCPSGNDP8(n+8)
503 XVCPSGNDP(5,5,5) 479#define XVCPSGNDP32(n) XVCPSGNDP16(n) ; XVCPSGNDP16(n+16)
504 XVCPSGNDP(6,6,6) 480 XVCPSGNDP32(0)
505 XVCPSGNDP(7,7,7) 481
506 XVCPSGNDP(8,8,8)
507 XVCPSGNDP(9,9,9)
508 XVCPSGNDP(10,10,10)
509 XVCPSGNDP(11,11,11)
510 XVCPSGNDP(12,12,12)
511 XVCPSGNDP(13,13,13)
512 XVCPSGNDP(14,14,14)
513 XVCPSGNDP(15,15,15)
514 XVCPSGNDP(16,16,16)
515 XVCPSGNDP(17,17,17)
516 XVCPSGNDP(18,18,18)
517 XVCPSGNDP(19,19,19)
518 XVCPSGNDP(20,20,20)
519 XVCPSGNDP(21,21,21)
520 XVCPSGNDP(22,22,22)
521 XVCPSGNDP(23,23,23)
522 XVCPSGNDP(24,24,24)
523 XVCPSGNDP(25,25,25)
524 XVCPSGNDP(26,26,26)
525 XVCPSGNDP(27,27,27)
526 XVCPSGNDP(28,28,28)
527 XVCPSGNDP(29,29,29)
528 XVCPSGNDP(30,30,30)
529 XVCPSGNDP(31,31,31)
530ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_206) 482ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_206)
483
484BEGIN_FTR_SECTION
485 b denorm_done
486END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
487/*
488 * To denormalise we need to move a copy of the register to itself.
489 * For POWER8 we need to do that for all 64 VSX registers
490 */
491 XVCPSGNDP32(32)
492denorm_done:
531 mtspr SPRN_HSRR0,r11 493 mtspr SPRN_HSRR0,r11
532 mtcrf 0x80,r9 494 mtcrf 0x80,r9
533 ld r9,PACA_EXGEN+EX_R9(r13) 495 ld r9,PACA_EXGEN+EX_R9(r13)
@@ -721,7 +683,7 @@ machine_check_common:
721 STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception) 683 STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception)
722 STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception) 684 STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception)
723 STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception) 685 STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception)
724 STD_EXCEPTION_COMMON(0xe40, emulation_assist, .program_check_exception) 686 STD_EXCEPTION_COMMON(0xe40, emulation_assist, .emulation_assist_interrupt)
725 STD_EXCEPTION_COMMON(0xe60, hmi_exception, .unknown_exception) 687 STD_EXCEPTION_COMMON(0xe60, hmi_exception, .unknown_exception)
726#ifdef CONFIG_PPC_DOORBELL 688#ifdef CONFIG_PPC_DOORBELL
727 STD_EXCEPTION_COMMON_ASYNC(0xe80, h_doorbell, .doorbell_exception) 689 STD_EXCEPTION_COMMON_ASYNC(0xe80, h_doorbell, .doorbell_exception)
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 5cbcf4d5a808..ea185e0b3cae 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -162,7 +162,7 @@ notrace unsigned int __check_irq_replay(void)
162 * in case we also had a rollover while hard disabled 162 * in case we also had a rollover while hard disabled
163 */ 163 */
164 local_paca->irq_happened &= ~PACA_IRQ_DEC; 164 local_paca->irq_happened &= ~PACA_IRQ_DEC;
165 if (decrementer_check_overflow()) 165 if ((happened & PACA_IRQ_DEC) || decrementer_check_overflow())
166 return 0x900; 166 return 0x900;
167 167
168 /* Finally check if an external interrupt happened */ 168 /* Finally check if an external interrupt happened */
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index e9acf50dd5b2..f46914a0f33e 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -657,15 +657,6 @@ void pci_resource_to_user(const struct pci_dev *dev, int bar,
657 * ranges. However, some machines (thanks Apple !) tend to split their 657 * ranges. However, some machines (thanks Apple !) tend to split their
658 * space into lots of small contiguous ranges. So we have to coalesce. 658 * space into lots of small contiguous ranges. So we have to coalesce.
659 * 659 *
660 * - We can only cope with all memory ranges having the same offset
661 * between CPU addresses and PCI addresses. Unfortunately, some bridges
662 * are setup for a large 1:1 mapping along with a small "window" which
663 * maps PCI address 0 to some arbitrary high address of the CPU space in
664 * order to give access to the ISA memory hole.
665 * The way out of here that I've chosen for now is to always set the
666 * offset based on the first resource found, then override it if we
667 * have a different offset and the previous was set by an ISA hole.
668 *
669 * - Some busses have IO space not starting at 0, which causes trouble with 660 * - Some busses have IO space not starting at 0, which causes trouble with
670 * the way we do our IO resource renumbering. The code somewhat deals with 661 * the way we do our IO resource renumbering. The code somewhat deals with
671 * it for 64 bits but I would expect problems on 32 bits. 662 * it for 64 bits but I would expect problems on 32 bits.
@@ -680,10 +671,9 @@ void pci_process_bridge_OF_ranges(struct pci_controller *hose,
680 int rlen; 671 int rlen;
681 int pna = of_n_addr_cells(dev); 672 int pna = of_n_addr_cells(dev);
682 int np = pna + 5; 673 int np = pna + 5;
683 int memno = 0, isa_hole = -1; 674 int memno = 0;
684 u32 pci_space; 675 u32 pci_space;
685 unsigned long long pci_addr, cpu_addr, pci_next, cpu_next, size; 676 unsigned long long pci_addr, cpu_addr, pci_next, cpu_next, size;
686 unsigned long long isa_mb = 0;
687 struct resource *res; 677 struct resource *res;
688 678
689 printk(KERN_INFO "PCI host bridge %s %s ranges:\n", 679 printk(KERN_INFO "PCI host bridge %s %s ranges:\n",
@@ -777,8 +767,6 @@ void pci_process_bridge_OF_ranges(struct pci_controller *hose,
777 } 767 }
778 /* Handles ISA memory hole space here */ 768 /* Handles ISA memory hole space here */
779 if (pci_addr == 0) { 769 if (pci_addr == 0) {
780 isa_mb = cpu_addr;
781 isa_hole = memno;
782 if (primary || isa_mem_base == 0) 770 if (primary || isa_mem_base == 0)
783 isa_mem_base = cpu_addr; 771 isa_mem_base = cpu_addr;
784 hose->isa_mem_phys = cpu_addr; 772 hose->isa_mem_phys = cpu_addr;
@@ -839,6 +827,7 @@ static void pcibios_fixup_resources(struct pci_dev *dev)
839 } 827 }
840 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { 828 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
841 struct resource *res = dev->resource + i; 829 struct resource *res = dev->resource + i;
830 struct pci_bus_region reg;
842 if (!res->flags) 831 if (!res->flags)
843 continue; 832 continue;
844 833
@@ -847,8 +836,9 @@ static void pcibios_fixup_resources(struct pci_dev *dev)
847 * at 0 as unset as well, except if PCI_PROBE_ONLY is also set 836 * at 0 as unset as well, except if PCI_PROBE_ONLY is also set
848 * since in that case, we don't want to re-assign anything 837 * since in that case, we don't want to re-assign anything
849 */ 838 */
839 pcibios_resource_to_bus(dev, &reg, res);
850 if (pci_has_flag(PCI_REASSIGN_ALL_RSRC) || 840 if (pci_has_flag(PCI_REASSIGN_ALL_RSRC) ||
851 (res->start == 0 && !pci_has_flag(PCI_PROBE_ONLY))) { 841 (reg.start == 0 && !pci_has_flag(PCI_PROBE_ONLY))) {
852 /* Only print message if not re-assigning */ 842 /* Only print message if not re-assigning */
853 if (!pci_has_flag(PCI_REASSIGN_ALL_RSRC)) 843 if (!pci_has_flag(PCI_REASSIGN_ALL_RSRC))
854 pr_debug("PCI:%s Resource %d %016llx-%016llx [%x] " 844 pr_debug("PCI:%s Resource %d %016llx-%016llx [%x] "
@@ -1004,7 +994,7 @@ void pcibios_setup_bus_self(struct pci_bus *bus)
1004 ppc_md.pci_dma_bus_setup(bus); 994 ppc_md.pci_dma_bus_setup(bus);
1005} 995}
1006 996
1007void pcibios_setup_device(struct pci_dev *dev) 997static void pcibios_setup_device(struct pci_dev *dev)
1008{ 998{
1009 /* Fixup NUMA node as it may not be setup yet by the generic 999 /* Fixup NUMA node as it may not be setup yet by the generic
1010 * code and is needed by the DMA init 1000 * code and is needed by the DMA init
@@ -1025,6 +1015,17 @@ void pcibios_setup_device(struct pci_dev *dev)
1025 ppc_md.pci_irq_fixup(dev); 1015 ppc_md.pci_irq_fixup(dev);
1026} 1016}
1027 1017
1018int pcibios_add_device(struct pci_dev *dev)
1019{
1020 /*
1021 * We can only call pcibios_setup_device() after bus setup is complete,
1022 * since some of the platform specific DMA setup code depends on it.
1023 */
1024 if (dev->bus->is_added)
1025 pcibios_setup_device(dev);
1026 return 0;
1027}
1028
1028void pcibios_setup_bus_devices(struct pci_bus *bus) 1029void pcibios_setup_bus_devices(struct pci_bus *bus)
1029{ 1030{
1030 struct pci_dev *dev; 1031 struct pci_dev *dev;
@@ -1479,10 +1480,6 @@ int pcibios_enable_device(struct pci_dev *dev, int mask)
1479 if (ppc_md.pcibios_enable_device_hook(dev)) 1480 if (ppc_md.pcibios_enable_device_hook(dev))
1480 return -EINVAL; 1481 return -EINVAL;
1481 1482
1482 /* avoid pcie irq fix up impact on cardbus */
1483 if (dev->hdr_type != PCI_HEADER_TYPE_CARDBUS)
1484 pcibios_setup_device(dev);
1485
1486 return pci_enable_resources(dev, mask); 1483 return pci_enable_resources(dev, mask);
1487} 1484}
1488 1485
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index a902723fdc69..076d1242507a 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -399,7 +399,8 @@ static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
399static inline int __set_dabr(unsigned long dabr, unsigned long dabrx) 399static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
400{ 400{
401 mtspr(SPRN_DABR, dabr); 401 mtspr(SPRN_DABR, dabr);
402 mtspr(SPRN_DABRX, dabrx); 402 if (cpu_has_feature(CPU_FTR_DABRX))
403 mtspr(SPRN_DABRX, dabrx);
403 return 0; 404 return 0;
404} 405}
405#else 406#else
@@ -1368,7 +1369,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
1368 1369
1369#ifdef CONFIG_PPC64 1370#ifdef CONFIG_PPC64
1370/* Called with hard IRQs off */ 1371/* Called with hard IRQs off */
1371void __ppc64_runlatch_on(void) 1372void notrace __ppc64_runlatch_on(void)
1372{ 1373{
1373 struct thread_info *ti = current_thread_info(); 1374 struct thread_info *ti = current_thread_info();
1374 unsigned long ctrl; 1375 unsigned long ctrl;
@@ -1381,7 +1382,7 @@ void __ppc64_runlatch_on(void)
1381} 1382}
1382 1383
1383/* Called with hard IRQs off */ 1384/* Called with hard IRQs off */
1384void __ppc64_runlatch_off(void) 1385void notrace __ppc64_runlatch_off(void)
1385{ 1386{
1386 struct thread_info *ti = current_thread_info(); 1387 struct thread_info *ti = current_thread_info();
1387 unsigned long ctrl; 1388 unsigned long ctrl;
diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c
index 577a8aa69c6e..457e97aa2945 100644
--- a/arch/powerpc/kernel/signal.c
+++ b/arch/powerpc/kernel/signal.c
@@ -18,6 +18,7 @@
18#include <asm/uaccess.h> 18#include <asm/uaccess.h>
19#include <asm/unistd.h> 19#include <asm/unistd.h>
20#include <asm/debug.h> 20#include <asm/debug.h>
21#include <asm/tm.h>
21 22
22#include "signal.h" 23#include "signal.h"
23 24
@@ -30,13 +31,13 @@ int show_unhandled_signals = 1;
30/* 31/*
31 * Allocate space for the signal frame 32 * Allocate space for the signal frame
32 */ 33 */
33void __user * get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, 34void __user * get_sigframe(struct k_sigaction *ka, unsigned long sp,
34 size_t frame_size, int is_32) 35 size_t frame_size, int is_32)
35{ 36{
36 unsigned long oldsp, newsp; 37 unsigned long oldsp, newsp;
37 38
38 /* Default to using normal stack */ 39 /* Default to using normal stack */
39 oldsp = get_clean_sp(regs, is_32); 40 oldsp = get_clean_sp(sp, is_32);
40 41
41 /* Check for alt stack */ 42 /* Check for alt stack */
42 if ((ka->sa.sa_flags & SA_ONSTACK) && 43 if ((ka->sa.sa_flags & SA_ONSTACK) &&
@@ -175,3 +176,38 @@ void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags)
175 176
176 user_enter(); 177 user_enter();
177} 178}
179
180unsigned long get_tm_stackpointer(struct pt_regs *regs)
181{
182 /* When in an active transaction that takes a signal, we need to be
183 * careful with the stack. It's possible that the stack has moved back
184 * up after the tbegin. The obvious case here is when the tbegin is
185 * called inside a function that returns before a tend. In this case,
186 * the stack is part of the checkpointed transactional memory state.
187 * If we write over this non transactionally or in suspend, we are in
188 * trouble because if we get a tm abort, the program counter and stack
189 * pointer will be back at the tbegin but our in memory stack won't be
190 * valid anymore.
191 *
192 * To avoid this, when taking a signal in an active transaction, we
193 * need to use the stack pointer from the checkpointed state, rather
194 * than the speculated state. This ensures that the signal context
195 * (written tm suspended) will be written below the stack required for
196 * the rollback. The transaction is aborted becuase of the treclaim,
197 * so any memory written between the tbegin and the signal will be
198 * rolled back anyway.
199 *
200 * For signals taken in non-TM or suspended mode, we use the
201 * normal/non-checkpointed stack pointer.
202 */
203
204#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
205 if (MSR_TM_ACTIVE(regs->msr)) {
206 tm_enable();
207 tm_reclaim(&current->thread, regs->msr, TM_CAUSE_SIGNAL);
208 if (MSR_TM_TRANSACTIONAL(regs->msr))
209 return current->thread.ckpt_regs.gpr[1];
210 }
211#endif
212 return regs->gpr[1];
213}
diff --git a/arch/powerpc/kernel/signal.h b/arch/powerpc/kernel/signal.h
index ec84c901ceab..c69b9aeb9f23 100644
--- a/arch/powerpc/kernel/signal.h
+++ b/arch/powerpc/kernel/signal.h
@@ -12,7 +12,7 @@
12 12
13extern void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags); 13extern void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags);
14 14
15extern void __user * get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, 15extern void __user * get_sigframe(struct k_sigaction *ka, unsigned long sp,
16 size_t frame_size, int is_32); 16 size_t frame_size, int is_32);
17 17
18extern int handle_signal32(unsigned long sig, struct k_sigaction *ka, 18extern int handle_signal32(unsigned long sig, struct k_sigaction *ka,
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index 95068bf569ad..201385c3a1ae 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -503,12 +503,6 @@ static int save_tm_user_regs(struct pt_regs *regs,
503{ 503{
504 unsigned long msr = regs->msr; 504 unsigned long msr = regs->msr;
505 505
506 /* tm_reclaim rolls back all reg states, updating thread.ckpt_regs,
507 * thread.transact_fpr[], thread.transact_vr[], etc.
508 */
509 tm_enable();
510 tm_reclaim(&current->thread, msr, TM_CAUSE_SIGNAL);
511
512 /* Make sure floating point registers are stored in regs */ 506 /* Make sure floating point registers are stored in regs */
513 flush_fp_to_thread(current); 507 flush_fp_to_thread(current);
514 508
@@ -965,7 +959,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
965 959
966 /* Set up Signal Frame */ 960 /* Set up Signal Frame */
967 /* Put a Real Time Context onto stack */ 961 /* Put a Real Time Context onto stack */
968 rt_sf = get_sigframe(ka, regs, sizeof(*rt_sf), 1); 962 rt_sf = get_sigframe(ka, get_tm_stackpointer(regs), sizeof(*rt_sf), 1);
969 addr = rt_sf; 963 addr = rt_sf;
970 if (unlikely(rt_sf == NULL)) 964 if (unlikely(rt_sf == NULL))
971 goto badframe; 965 goto badframe;
@@ -1403,7 +1397,7 @@ int handle_signal32(unsigned long sig, struct k_sigaction *ka,
1403 unsigned long tramp; 1397 unsigned long tramp;
1404 1398
1405 /* Set up Signal Frame */ 1399 /* Set up Signal Frame */
1406 frame = get_sigframe(ka, regs, sizeof(*frame), 1); 1400 frame = get_sigframe(ka, get_tm_stackpointer(regs), sizeof(*frame), 1);
1407 if (unlikely(frame == NULL)) 1401 if (unlikely(frame == NULL))
1408 goto badframe; 1402 goto badframe;
1409 sc = (struct sigcontext __user *) &frame->sctx; 1403 sc = (struct sigcontext __user *) &frame->sctx;
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index c1794286098c..345947367ec0 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -154,11 +154,12 @@ static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
154 * As above, but Transactional Memory is in use, so deliver sigcontexts 154 * As above, but Transactional Memory is in use, so deliver sigcontexts
155 * containing checkpointed and transactional register states. 155 * containing checkpointed and transactional register states.
156 * 156 *
157 * To do this, we treclaim to gather both sets of registers and set up the 157 * To do this, we treclaim (done before entering here) to gather both sets of
158 * 'normal' sigcontext registers with rolled-back register values such that a 158 * registers and set up the 'normal' sigcontext registers with rolled-back
159 * simple signal handler sees a correct checkpointed register state. 159 * register values such that a simple signal handler sees a correct
160 * If interested, a TM-aware sighandler can examine the transactional registers 160 * checkpointed register state. If interested, a TM-aware sighandler can
161 * in the 2nd sigcontext to determine the real origin of the signal. 161 * examine the transactional registers in the 2nd sigcontext to determine the
162 * real origin of the signal.
162 */ 163 */
163static long setup_tm_sigcontexts(struct sigcontext __user *sc, 164static long setup_tm_sigcontexts(struct sigcontext __user *sc,
164 struct sigcontext __user *tm_sc, 165 struct sigcontext __user *tm_sc,
@@ -184,16 +185,6 @@ static long setup_tm_sigcontexts(struct sigcontext __user *sc,
184 185
185 BUG_ON(!MSR_TM_ACTIVE(regs->msr)); 186 BUG_ON(!MSR_TM_ACTIVE(regs->msr));
186 187
187 /* tm_reclaim rolls back all reg states, saving checkpointed (older)
188 * GPRs to thread.ckpt_regs and (if used) FPRs to (newer)
189 * thread.transact_fp and/or VRs to (newer) thread.transact_vr.
190 * THEN we save out FP/VRs, if necessary, to the checkpointed (older)
191 * thread.fr[]/vr[]s. The transactional (newer) GPRs are on the
192 * stack, in *regs.
193 */
194 tm_enable();
195 tm_reclaim(&current->thread, msr, TM_CAUSE_SIGNAL);
196
197 flush_fp_to_thread(current); 188 flush_fp_to_thread(current);
198 189
199#ifdef CONFIG_ALTIVEC 190#ifdef CONFIG_ALTIVEC
@@ -711,7 +702,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
711 unsigned long newsp = 0; 702 unsigned long newsp = 0;
712 long err = 0; 703 long err = 0;
713 704
714 frame = get_sigframe(ka, regs, sizeof(*frame), 0); 705 frame = get_sigframe(ka, get_tm_stackpointer(regs), sizeof(*frame), 0);
715 if (unlikely(frame == NULL)) 706 if (unlikely(frame == NULL))
716 goto badframe; 707 goto badframe;
717 708
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index a7a648f6b750..c0e5caf8ccc7 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -53,6 +53,7 @@
53#ifdef CONFIG_PPC64 53#ifdef CONFIG_PPC64
54#include <asm/firmware.h> 54#include <asm/firmware.h>
55#include <asm/processor.h> 55#include <asm/processor.h>
56#include <asm/tm.h>
56#endif 57#endif
57#include <asm/kexec.h> 58#include <asm/kexec.h>
58#include <asm/ppc-opcode.h> 59#include <asm/ppc-opcode.h>
@@ -932,6 +933,28 @@ static int emulate_isel(struct pt_regs *regs, u32 instword)
932 return 0; 933 return 0;
933} 934}
934 935
936#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
937static inline bool tm_abort_check(struct pt_regs *regs, int cause)
938{
939 /* If we're emulating a load/store in an active transaction, we cannot
940 * emulate it as the kernel operates in transaction suspended context.
941 * We need to abort the transaction. This creates a persistent TM
942 * abort so tell the user what caused it with a new code.
943 */
944 if (MSR_TM_TRANSACTIONAL(regs->msr)) {
945 tm_enable();
946 tm_abort(cause);
947 return true;
948 }
949 return false;
950}
951#else
952static inline bool tm_abort_check(struct pt_regs *regs, int reason)
953{
954 return false;
955}
956#endif
957
935static int emulate_instruction(struct pt_regs *regs) 958static int emulate_instruction(struct pt_regs *regs)
936{ 959{
937 u32 instword; 960 u32 instword;
@@ -971,6 +994,9 @@ static int emulate_instruction(struct pt_regs *regs)
971 994
972 /* Emulate load/store string insn. */ 995 /* Emulate load/store string insn. */
973 if ((instword & PPC_INST_STRING_GEN_MASK) == PPC_INST_STRING) { 996 if ((instword & PPC_INST_STRING_GEN_MASK) == PPC_INST_STRING) {
997 if (tm_abort_check(regs,
998 TM_CAUSE_EMULATE | TM_CAUSE_PERSISTENT))
999 return -EINVAL;
974 PPC_WARN_EMULATED(string, regs); 1000 PPC_WARN_EMULATED(string, regs);
975 return emulate_string_inst(regs, instword); 1001 return emulate_string_inst(regs, instword);
976 } 1002 }
@@ -1139,6 +1165,16 @@ bail:
1139 exception_exit(prev_state); 1165 exception_exit(prev_state);
1140} 1166}
1141 1167
1168/*
1169 * This occurs when running in hypervisor mode on POWER6 or later
1170 * and an illegal instruction is encountered.
1171 */
1172void __kprobes emulation_assist_interrupt(struct pt_regs *regs)
1173{
1174 regs->msr |= REASON_ILLEGAL;
1175 program_check_exception(regs);
1176}
1177
1142void alignment_exception(struct pt_regs *regs) 1178void alignment_exception(struct pt_regs *regs)
1143{ 1179{
1144 enum ctx_state prev_state = exception_enter(); 1180 enum ctx_state prev_state = exception_enter();
@@ -1148,6 +1184,9 @@ void alignment_exception(struct pt_regs *regs)
1148 if (!arch_irq_disabled_regs(regs)) 1184 if (!arch_irq_disabled_regs(regs))
1149 local_irq_enable(); 1185 local_irq_enable();
1150 1186
1187 if (tm_abort_check(regs, TM_CAUSE_ALIGNMENT | TM_CAUSE_PERSISTENT))
1188 goto bail;
1189
1151 /* we don't implement logging of alignment exceptions */ 1190 /* we don't implement logging of alignment exceptions */
1152 if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS)) 1191 if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS))
1153 fixed = fix_alignment(regs); 1192 fixed = fix_alignment(regs);
diff --git a/arch/powerpc/kvm/44x_tlb.c b/arch/powerpc/kvm/44x_tlb.c
index 5dd3ab469976..ed0385448148 100644
--- a/arch/powerpc/kvm/44x_tlb.c
+++ b/arch/powerpc/kvm/44x_tlb.c
@@ -441,6 +441,7 @@ int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws)
441 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); 441 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
442 struct kvmppc_44x_tlbe *tlbe; 442 struct kvmppc_44x_tlbe *tlbe;
443 unsigned int gtlb_index; 443 unsigned int gtlb_index;
444 int idx;
444 445
445 gtlb_index = kvmppc_get_gpr(vcpu, ra); 446 gtlb_index = kvmppc_get_gpr(vcpu, ra);
446 if (gtlb_index >= KVM44x_GUEST_TLB_SIZE) { 447 if (gtlb_index >= KVM44x_GUEST_TLB_SIZE) {
@@ -473,6 +474,8 @@ int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws)
473 return EMULATE_FAIL; 474 return EMULATE_FAIL;
474 } 475 }
475 476
477 idx = srcu_read_lock(&vcpu->kvm->srcu);
478
476 if (tlbe_is_host_safe(vcpu, tlbe)) { 479 if (tlbe_is_host_safe(vcpu, tlbe)) {
477 gva_t eaddr; 480 gva_t eaddr;
478 gpa_t gpaddr; 481 gpa_t gpaddr;
@@ -489,6 +492,8 @@ int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws)
489 kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index); 492 kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
490 } 493 }
491 494
495 srcu_read_unlock(&vcpu->kvm->srcu, idx);
496
492 trace_kvm_gtlb_write(gtlb_index, tlbe->tid, tlbe->word0, tlbe->word1, 497 trace_kvm_gtlb_write(gtlb_index, tlbe->tid, tlbe->word0, tlbe->word1,
493 tlbe->word2); 498 tlbe->word2);
494 499
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 9de24f8e03c7..550f5928b394 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -562,6 +562,8 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
562 case H_CPPR: 562 case H_CPPR:
563 case H_EOI: 563 case H_EOI:
564 case H_IPI: 564 case H_IPI:
565 case H_IPOLL:
566 case H_XIRR_X:
565 if (kvmppc_xics_enabled(vcpu)) { 567 if (kvmppc_xics_enabled(vcpu)) {
566 ret = kvmppc_xics_hcall(vcpu, req); 568 ret = kvmppc_xics_hcall(vcpu, req);
567 break; 569 break;
diff --git a/arch/powerpc/kvm/book3s_pr_papr.c b/arch/powerpc/kvm/book3s_pr_papr.c
index b24309c6c2d5..da0e0bc268bd 100644
--- a/arch/powerpc/kvm/book3s_pr_papr.c
+++ b/arch/powerpc/kvm/book3s_pr_papr.c
@@ -257,6 +257,8 @@ int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd)
257 case H_CPPR: 257 case H_CPPR:
258 case H_EOI: 258 case H_EOI:
259 case H_IPI: 259 case H_IPI:
260 case H_IPOLL:
261 case H_XIRR_X:
260 if (kvmppc_xics_enabled(vcpu)) 262 if (kvmppc_xics_enabled(vcpu))
261 return kvmppc_h_pr_xics_hcall(vcpu, cmd); 263 return kvmppc_h_pr_xics_hcall(vcpu, cmd);
262 break; 264 break;
diff --git a/arch/powerpc/kvm/book3s_xics.c b/arch/powerpc/kvm/book3s_xics.c
index f7a103756618..94c1dd46b83d 100644
--- a/arch/powerpc/kvm/book3s_xics.c
+++ b/arch/powerpc/kvm/book3s_xics.c
@@ -650,6 +650,23 @@ static noinline int kvmppc_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
650 return H_SUCCESS; 650 return H_SUCCESS;
651} 651}
652 652
653static int kvmppc_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server)
654{
655 union kvmppc_icp_state state;
656 struct kvmppc_icp *icp;
657
658 icp = vcpu->arch.icp;
659 if (icp->server_num != server) {
660 icp = kvmppc_xics_find_server(vcpu->kvm, server);
661 if (!icp)
662 return H_PARAMETER;
663 }
664 state = ACCESS_ONCE(icp->state);
665 kvmppc_set_gpr(vcpu, 4, ((u32)state.cppr << 24) | state.xisr);
666 kvmppc_set_gpr(vcpu, 5, state.mfrr);
667 return H_SUCCESS;
668}
669
653static noinline void kvmppc_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr) 670static noinline void kvmppc_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
654{ 671{
655 union kvmppc_icp_state old_state, new_state; 672 union kvmppc_icp_state old_state, new_state;
@@ -787,6 +804,18 @@ int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 req)
787 if (!xics || !vcpu->arch.icp) 804 if (!xics || !vcpu->arch.icp)
788 return H_HARDWARE; 805 return H_HARDWARE;
789 806
807 /* These requests don't have real-mode implementations at present */
808 switch (req) {
809 case H_XIRR_X:
810 res = kvmppc_h_xirr(vcpu);
811 kvmppc_set_gpr(vcpu, 4, res);
812 kvmppc_set_gpr(vcpu, 5, get_tb());
813 return rc;
814 case H_IPOLL:
815 rc = kvmppc_h_ipoll(vcpu, kvmppc_get_gpr(vcpu, 4));
816 return rc;
817 }
818
790 /* Check for real mode returning too hard */ 819 /* Check for real mode returning too hard */
791 if (xics->real_mode) 820 if (xics->real_mode)
792 return kvmppc_xics_rm_complete(vcpu, req); 821 return kvmppc_xics_rm_complete(vcpu, req);
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 1020119226db..1a1b51189773 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -673,7 +673,6 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
673 ret = s; 673 ret = s;
674 goto out; 674 goto out;
675 } 675 }
676 kvmppc_lazy_ee_enable();
677 676
678 kvm_guest_enter(); 677 kvm_guest_enter();
679 678
@@ -699,6 +698,8 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
699 kvmppc_load_guest_fp(vcpu); 698 kvmppc_load_guest_fp(vcpu);
700#endif 699#endif
701 700
701 kvmppc_lazy_ee_enable();
702
702 ret = __kvmppc_vcpu_run(kvm_run, vcpu); 703 ret = __kvmppc_vcpu_run(kvm_run, vcpu);
703 704
704 /* No need for kvm_guest_exit. It's done in handle_exit. 705 /* No need for kvm_guest_exit. It's done in handle_exit.
@@ -832,6 +833,18 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
832{ 833{
833 int r = RESUME_HOST; 834 int r = RESUME_HOST;
834 int s; 835 int s;
836 int idx;
837
838#ifdef CONFIG_PPC64
839 WARN_ON(local_paca->irq_happened != 0);
840#endif
841
842 /*
843 * We enter with interrupts disabled in hardware, but
844 * we need to call hard_irq_disable anyway to ensure that
845 * the software state is kept in sync.
846 */
847 hard_irq_disable();
835 848
836 /* update before a new last_exit_type is rewritten */ 849 /* update before a new last_exit_type is rewritten */
837 kvmppc_update_timing_stats(vcpu); 850 kvmppc_update_timing_stats(vcpu);
@@ -1053,6 +1066,8 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
1053 break; 1066 break;
1054 } 1067 }
1055 1068
1069 idx = srcu_read_lock(&vcpu->kvm->srcu);
1070
1056 gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr); 1071 gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
1057 gfn = gpaddr >> PAGE_SHIFT; 1072 gfn = gpaddr >> PAGE_SHIFT;
1058 1073
@@ -1075,6 +1090,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
1075 kvmppc_account_exit(vcpu, MMIO_EXITS); 1090 kvmppc_account_exit(vcpu, MMIO_EXITS);
1076 } 1091 }
1077 1092
1093 srcu_read_unlock(&vcpu->kvm->srcu, idx);
1078 break; 1094 break;
1079 } 1095 }
1080 1096
@@ -1098,6 +1114,8 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
1098 1114
1099 kvmppc_account_exit(vcpu, ITLB_VIRT_MISS_EXITS); 1115 kvmppc_account_exit(vcpu, ITLB_VIRT_MISS_EXITS);
1100 1116
1117 idx = srcu_read_lock(&vcpu->kvm->srcu);
1118
1101 gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr); 1119 gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
1102 gfn = gpaddr >> PAGE_SHIFT; 1120 gfn = gpaddr >> PAGE_SHIFT;
1103 1121
@@ -1114,6 +1132,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
1114 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_MACHINE_CHECK); 1132 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_MACHINE_CHECK);
1115 } 1133 }
1116 1134
1135 srcu_read_unlock(&vcpu->kvm->srcu, idx);
1117 break; 1136 break;
1118 } 1137 }
1119 1138
diff --git a/arch/powerpc/kvm/e500_mmu.c b/arch/powerpc/kvm/e500_mmu.c
index c41a5a96b558..6d6f153b6c1d 100644
--- a/arch/powerpc/kvm/e500_mmu.c
+++ b/arch/powerpc/kvm/e500_mmu.c
@@ -396,6 +396,7 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
396 struct kvm_book3e_206_tlb_entry *gtlbe; 396 struct kvm_book3e_206_tlb_entry *gtlbe;
397 int tlbsel, esel; 397 int tlbsel, esel;
398 int recal = 0; 398 int recal = 0;
399 int idx;
399 400
400 tlbsel = get_tlb_tlbsel(vcpu); 401 tlbsel = get_tlb_tlbsel(vcpu);
401 esel = get_tlb_esel(vcpu, tlbsel); 402 esel = get_tlb_esel(vcpu, tlbsel);
@@ -430,6 +431,8 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
430 kvmppc_set_tlb1map_range(vcpu, gtlbe); 431 kvmppc_set_tlb1map_range(vcpu, gtlbe);
431 } 432 }
432 433
434 idx = srcu_read_lock(&vcpu->kvm->srcu);
435
433 /* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */ 436 /* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */
434 if (tlbe_is_host_safe(vcpu, gtlbe)) { 437 if (tlbe_is_host_safe(vcpu, gtlbe)) {
435 u64 eaddr = get_tlb_eaddr(gtlbe); 438 u64 eaddr = get_tlb_eaddr(gtlbe);
@@ -444,6 +447,8 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
444 kvmppc_mmu_map(vcpu, eaddr, raddr, index_of(tlbsel, esel)); 447 kvmppc_mmu_map(vcpu, eaddr, raddr, index_of(tlbsel, esel));
445 } 448 }
446 449
450 srcu_read_unlock(&vcpu->kvm->srcu, idx);
451
447 kvmppc_set_exit_type(vcpu, EMULATED_TLBWE_EXITS); 452 kvmppc_set_exit_type(vcpu, EMULATED_TLBWE_EXITS);
448 return EMULATE_DONE; 453 return EMULATE_DONE;
449} 454}
diff --git a/arch/powerpc/kvm/e500mc.c b/arch/powerpc/kvm/e500mc.c
index 753cc99eff2b..19c8379575f7 100644
--- a/arch/powerpc/kvm/e500mc.c
+++ b/arch/powerpc/kvm/e500mc.c
@@ -177,8 +177,6 @@ int kvmppc_core_check_processor_compat(void)
177 r = 0; 177 r = 0;
178 else if (strcmp(cur_cpu_spec->cpu_name, "e5500") == 0) 178 else if (strcmp(cur_cpu_spec->cpu_name, "e5500") == 0)
179 r = 0; 179 r = 0;
180 else if (strcmp(cur_cpu_spec->cpu_name, "e6500") == 0)
181 r = 0;
182 else 180 else
183 r = -ENOTSUPP; 181 r = -ENOTSUPP;
184 182
diff --git a/arch/powerpc/lib/copypage_power7.S b/arch/powerpc/lib/copypage_power7.S
index 0ef75bf0695c..395c594722a2 100644
--- a/arch/powerpc/lib/copypage_power7.S
+++ b/arch/powerpc/lib/copypage_power7.S
@@ -28,13 +28,14 @@ _GLOBAL(copypage_power7)
28 * aligned we don't need to clear the bottom 7 bits of either 28 * aligned we don't need to clear the bottom 7 bits of either
29 * address. 29 * address.
30 */ 30 */
31 ori r9,r3,1 /* stream=1 */ 31 ori r9,r3,1 /* stream=1 => to */
32 32
33#ifdef CONFIG_PPC_64K_PAGES 33#ifdef CONFIG_PPC_64K_PAGES
34 lis r7,0x0E01 /* depth=7, units=512 */ 34 lis r7,0x0E01 /* depth=7
35 * units/cachelines=512 */
35#else 36#else
36 lis r7,0x0E00 /* depth=7 */ 37 lis r7,0x0E00 /* depth=7 */
37 ori r7,r7,0x1000 /* units=32 */ 38 ori r7,r7,0x1000 /* units/cachelines=32 */
38#endif 39#endif
39 ori r10,r7,1 /* stream=1 */ 40 ori r10,r7,1 /* stream=1 */
40 41
@@ -43,12 +44,14 @@ _GLOBAL(copypage_power7)
43 44
44.machine push 45.machine push
45.machine "power4" 46.machine "power4"
46 dcbt r0,r4,0b01000 47 /* setup read stream 0 */
47 dcbt r0,r7,0b01010 48 dcbt r0,r4,0b01000 /* addr from */
48 dcbtst r0,r9,0b01000 49 dcbt r0,r7,0b01010 /* length and depth from */
49 dcbtst r0,r10,0b01010 50 /* setup write stream 1 */
51 dcbtst r0,r9,0b01000 /* addr to */
52 dcbtst r0,r10,0b01010 /* length and depth to */
50 eieio 53 eieio
51 dcbt r0,r8,0b01010 /* GO */ 54 dcbt r0,r8,0b01010 /* all streams GO */
52.machine pop 55.machine pop
53 56
54#ifdef CONFIG_ALTIVEC 57#ifdef CONFIG_ALTIVEC
diff --git a/arch/powerpc/lib/copyuser_power7.S b/arch/powerpc/lib/copyuser_power7.S
index 0d24ff15f5f6..d1f11795a7ad 100644
--- a/arch/powerpc/lib/copyuser_power7.S
+++ b/arch/powerpc/lib/copyuser_power7.S
@@ -318,12 +318,14 @@ err1; stb r0,0(r3)
318 318
319.machine push 319.machine push
320.machine "power4" 320.machine "power4"
321 dcbt r0,r6,0b01000 321 /* setup read stream 0 */
322 dcbt r0,r7,0b01010 322 dcbt r0,r6,0b01000 /* addr from */
323 dcbtst r0,r9,0b01000 323 dcbt r0,r7,0b01010 /* length and depth from */
324 dcbtst r0,r10,0b01010 324 /* setup write stream 1 */
325 dcbtst r0,r9,0b01000 /* addr to */
326 dcbtst r0,r10,0b01010 /* length and depth to */
325 eieio 327 eieio
326 dcbt r0,r8,0b01010 /* GO */ 328 dcbt r0,r8,0b01010 /* all streams GO */
327.machine pop 329.machine pop
328 330
329 beq cr1,.Lunwind_stack_nonvmx_copy 331 beq cr1,.Lunwind_stack_nonvmx_copy
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
index 6a2aead5b0e5..4c122c3f1623 100644
--- a/arch/powerpc/mm/hash_native_64.c
+++ b/arch/powerpc/mm/hash_native_64.c
@@ -336,11 +336,18 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
336 336
337 hpte_v = hptep->v; 337 hpte_v = hptep->v;
338 actual_psize = hpte_actual_psize(hptep, psize); 338 actual_psize = hpte_actual_psize(hptep, psize);
339 /*
340 * We need to invalidate the TLB always because hpte_remove doesn't do
341 * a tlb invalidate. If a hash bucket gets full, we "evict" a more/less
342 * random entry from it. When we do that we don't invalidate the TLB
343 * (hpte_remove) because we assume the old translation is still
344 * technically "valid".
345 */
339 if (actual_psize < 0) { 346 if (actual_psize < 0) {
340 native_unlock_hpte(hptep); 347 actual_psize = psize;
341 return -1; 348 ret = -1;
349 goto err_out;
342 } 350 }
343 /* Even if we miss, we need to invalidate the TLB */
344 if (!HPTE_V_COMPARE(hpte_v, want_v)) { 351 if (!HPTE_V_COMPARE(hpte_v, want_v)) {
345 DBG_LOW(" -> miss\n"); 352 DBG_LOW(" -> miss\n");
346 ret = -1; 353 ret = -1;
@@ -350,6 +357,7 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
350 hptep->r = (hptep->r & ~(HPTE_R_PP | HPTE_R_N)) | 357 hptep->r = (hptep->r & ~(HPTE_R_PP | HPTE_R_N)) |
351 (newpp & (HPTE_R_PP | HPTE_R_N | HPTE_R_C)); 358 (newpp & (HPTE_R_PP | HPTE_R_N | HPTE_R_C));
352 } 359 }
360err_out:
353 native_unlock_hpte(hptep); 361 native_unlock_hpte(hptep);
354 362
355 /* Ensure it is out of the tlb too. */ 363 /* Ensure it is out of the tlb too. */
@@ -409,7 +417,7 @@ static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
409 hptep = htab_address + slot; 417 hptep = htab_address + slot;
410 actual_psize = hpte_actual_psize(hptep, psize); 418 actual_psize = hpte_actual_psize(hptep, psize);
411 if (actual_psize < 0) 419 if (actual_psize < 0)
412 return; 420 actual_psize = psize;
413 421
414 /* Update the HPTE */ 422 /* Update the HPTE */
415 hptep->r = (hptep->r & ~(HPTE_R_PP | HPTE_R_N)) | 423 hptep->r = (hptep->r & ~(HPTE_R_PP | HPTE_R_N)) |
@@ -437,21 +445,27 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long vpn,
437 hpte_v = hptep->v; 445 hpte_v = hptep->v;
438 446
439 actual_psize = hpte_actual_psize(hptep, psize); 447 actual_psize = hpte_actual_psize(hptep, psize);
448 /*
449 * We need to invalidate the TLB always because hpte_remove doesn't do
450 * a tlb invalidate. If a hash bucket gets full, we "evict" a more/less
451 * random entry from it. When we do that we don't invalidate the TLB
452 * (hpte_remove) because we assume the old translation is still
453 * technically "valid".
454 */
440 if (actual_psize < 0) { 455 if (actual_psize < 0) {
456 actual_psize = psize;
441 native_unlock_hpte(hptep); 457 native_unlock_hpte(hptep);
442 local_irq_restore(flags); 458 goto err_out;
443 return;
444 } 459 }
445 /* Even if we miss, we need to invalidate the TLB */
446 if (!HPTE_V_COMPARE(hpte_v, want_v)) 460 if (!HPTE_V_COMPARE(hpte_v, want_v))
447 native_unlock_hpte(hptep); 461 native_unlock_hpte(hptep);
448 else 462 else
449 /* Invalidate the hpte. NOTE: this also unlocks it */ 463 /* Invalidate the hpte. NOTE: this also unlocks it */
450 hptep->v = 0; 464 hptep->v = 0;
451 465
466err_out:
452 /* Invalidate the TLB */ 467 /* Invalidate the TLB */
453 tlbie(vpn, psize, actual_psize, ssize, local); 468 tlbie(vpn, psize, actual_psize, ssize, local);
454
455 local_irq_restore(flags); 469 local_irq_restore(flags);
456} 470}
457 471
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 237c8e5f2640..77fdd2cef33b 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -592,8 +592,14 @@ static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
592 do { 592 do {
593 pmd = pmd_offset(pud, addr); 593 pmd = pmd_offset(pud, addr);
594 next = pmd_addr_end(addr, end); 594 next = pmd_addr_end(addr, end);
595 if (pmd_none_or_clear_bad(pmd)) 595 if (!is_hugepd(pmd)) {
596 /*
597 * if it is not hugepd pointer, we should already find
598 * it cleared.
599 */
600 WARN_ON(!pmd_none_or_clear_bad(pmd));
596 continue; 601 continue;
602 }
597#ifdef CONFIG_PPC_FSL_BOOK3E 603#ifdef CONFIG_PPC_FSL_BOOK3E
598 /* 604 /*
599 * Increment next by the size of the huge mapping since 605 * Increment next by the size of the huge mapping since
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index 426180b84978..29c6482890c8 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -110,7 +110,7 @@ static inline void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw) {}
110 110
111static bool regs_use_siar(struct pt_regs *regs) 111static bool regs_use_siar(struct pt_regs *regs)
112{ 112{
113 return !!(regs->result & 1); 113 return !!regs->result;
114} 114}
115 115
116/* 116/*
@@ -136,22 +136,30 @@ static inline unsigned long perf_ip_adjust(struct pt_regs *regs)
136 * If we're not doing instruction sampling, give them the SDAR 136 * If we're not doing instruction sampling, give them the SDAR
137 * (sampled data address). If we are doing instruction sampling, then 137 * (sampled data address). If we are doing instruction sampling, then
138 * only give them the SDAR if it corresponds to the instruction 138 * only give them the SDAR if it corresponds to the instruction
139 * pointed to by SIAR; this is indicated by the [POWER6_]MMCRA_SDSYNC or 139 * pointed to by SIAR; this is indicated by the [POWER6_]MMCRA_SDSYNC, the
140 * the [POWER7P_]MMCRA_SDAR_VALID bit in MMCRA. 140 * [POWER7P_]MMCRA_SDAR_VALID bit in MMCRA, or the SDAR_VALID bit in SIER.
141 */ 141 */
142static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp) 142static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp)
143{ 143{
144 unsigned long mmcra = regs->dsisr; 144 unsigned long mmcra = regs->dsisr;
145 unsigned long sdsync; 145 bool sdar_valid;
146 146
147 if (ppmu->flags & PPMU_SIAR_VALID) 147 if (ppmu->flags & PPMU_HAS_SIER)
148 sdsync = POWER7P_MMCRA_SDAR_VALID; 148 sdar_valid = regs->dar & SIER_SDAR_VALID;
149 else if (ppmu->flags & PPMU_ALT_SIPR) 149 else {
150 sdsync = POWER6_MMCRA_SDSYNC; 150 unsigned long sdsync;
151 else 151
152 sdsync = MMCRA_SDSYNC; 152 if (ppmu->flags & PPMU_SIAR_VALID)
153 sdsync = POWER7P_MMCRA_SDAR_VALID;
154 else if (ppmu->flags & PPMU_ALT_SIPR)
155 sdsync = POWER6_MMCRA_SDSYNC;
156 else
157 sdsync = MMCRA_SDSYNC;
158
159 sdar_valid = mmcra & sdsync;
160 }
153 161
154 if (!(mmcra & MMCRA_SAMPLE_ENABLE) || (mmcra & sdsync)) 162 if (!(mmcra & MMCRA_SAMPLE_ENABLE) || sdar_valid)
155 *addrp = mfspr(SPRN_SDAR); 163 *addrp = mfspr(SPRN_SDAR);
156} 164}
157 165
@@ -181,11 +189,6 @@ static bool regs_sipr(struct pt_regs *regs)
181 return !!(regs->dsisr & sipr); 189 return !!(regs->dsisr & sipr);
182} 190}
183 191
184static bool regs_no_sipr(struct pt_regs *regs)
185{
186 return !!(regs->result & 2);
187}
188
189static inline u32 perf_flags_from_msr(struct pt_regs *regs) 192static inline u32 perf_flags_from_msr(struct pt_regs *regs)
190{ 193{
191 if (regs->msr & MSR_PR) 194 if (regs->msr & MSR_PR)
@@ -208,7 +211,7 @@ static inline u32 perf_get_misc_flags(struct pt_regs *regs)
208 * SIAR which should give slightly more reliable 211 * SIAR which should give slightly more reliable
209 * results 212 * results
210 */ 213 */
211 if (regs_no_sipr(regs)) { 214 if (ppmu->flags & PPMU_NO_SIPR) {
212 unsigned long siar = mfspr(SPRN_SIAR); 215 unsigned long siar = mfspr(SPRN_SIAR);
213 if (siar >= PAGE_OFFSET) 216 if (siar >= PAGE_OFFSET)
214 return PERF_RECORD_MISC_KERNEL; 217 return PERF_RECORD_MISC_KERNEL;
@@ -239,22 +242,9 @@ static inline void perf_read_regs(struct pt_regs *regs)
239 int use_siar; 242 int use_siar;
240 243
241 regs->dsisr = mmcra; 244 regs->dsisr = mmcra;
242 regs->result = 0;
243
244 if (ppmu->flags & PPMU_NO_SIPR)
245 regs->result |= 2;
246
247 /*
248 * On power8 if we're in random sampling mode, the SIER is updated.
249 * If we're in continuous sampling mode, we don't have SIPR.
250 */
251 if (ppmu->flags & PPMU_HAS_SIER) {
252 if (marked)
253 regs->dar = mfspr(SPRN_SIER);
254 else
255 regs->result |= 2;
256 }
257 245
246 if (ppmu->flags & PPMU_HAS_SIER)
247 regs->dar = mfspr(SPRN_SIER);
258 248
259 /* 249 /*
260 * If this isn't a PMU exception (eg a software event) the SIAR is 250 * If this isn't a PMU exception (eg a software event) the SIAR is
@@ -279,12 +269,12 @@ static inline void perf_read_regs(struct pt_regs *regs)
279 use_siar = 1; 269 use_siar = 1;
280 else if ((ppmu->flags & PPMU_NO_CONT_SAMPLING)) 270 else if ((ppmu->flags & PPMU_NO_CONT_SAMPLING))
281 use_siar = 0; 271 use_siar = 0;
282 else if (!regs_no_sipr(regs) && regs_sipr(regs)) 272 else if (!(ppmu->flags & PPMU_NO_SIPR) && regs_sipr(regs))
283 use_siar = 0; 273 use_siar = 0;
284 else 274 else
285 use_siar = 1; 275 use_siar = 1;
286 276
287 regs->result |= use_siar; 277 regs->result = use_siar;
288} 278}
289 279
290/* 280/*
@@ -308,8 +298,13 @@ static inline int siar_valid(struct pt_regs *regs)
308 unsigned long mmcra = regs->dsisr; 298 unsigned long mmcra = regs->dsisr;
309 int marked = mmcra & MMCRA_SAMPLE_ENABLE; 299 int marked = mmcra & MMCRA_SAMPLE_ENABLE;
310 300
311 if ((ppmu->flags & PPMU_SIAR_VALID) && marked) 301 if (marked) {
312 return mmcra & POWER7P_MMCRA_SIAR_VALID; 302 if (ppmu->flags & PPMU_HAS_SIER)
303 return regs->dar & SIER_SIAR_VALID;
304
305 if (ppmu->flags & PPMU_SIAR_VALID)
306 return mmcra & POWER7P_MMCRA_SIAR_VALID;
307 }
313 308
314 return 1; 309 return 1;
315} 310}
@@ -1763,7 +1758,7 @@ static void perf_event_interrupt(struct pt_regs *regs)
1763 } 1758 }
1764 } 1759 }
1765 } 1760 }
1766 if ((!found) && printk_ratelimit()) 1761 if (!found && !nmi && printk_ratelimit())
1767 printk(KERN_WARNING "Can't find PMC that caused IRQ\n"); 1762 printk(KERN_WARNING "Can't find PMC that caused IRQ\n");
1768 1763
1769 /* 1764 /*
diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
index 023b288f895b..4459eff7a75a 100644
--- a/arch/powerpc/platforms/pseries/Kconfig
+++ b/arch/powerpc/platforms/pseries/Kconfig
@@ -19,6 +19,8 @@ config PPC_PSERIES
19 select ZLIB_DEFLATE 19 select ZLIB_DEFLATE
20 select PPC_DOORBELL 20 select PPC_DOORBELL
21 select HAVE_CONTEXT_TRACKING 21 select HAVE_CONTEXT_TRACKING
22 select HOTPLUG if SMP
23 select HOTPLUG_CPU if SMP
22 default y 24 default y
23 25
24config PPC_SPLPAR 26config PPC_SPLPAR
diff --git a/arch/powerpc/platforms/pseries/eeh_cache.c b/arch/powerpc/platforms/pseries/eeh_cache.c
index 5a4c87903057..5ce3ba7ad137 100644
--- a/arch/powerpc/platforms/pseries/eeh_cache.c
+++ b/arch/powerpc/platforms/pseries/eeh_cache.c
@@ -294,8 +294,6 @@ void __init eeh_addr_cache_build(void)
294 spin_lock_init(&pci_io_addr_cache_root.piar_lock); 294 spin_lock_init(&pci_io_addr_cache_root.piar_lock);
295 295
296 for_each_pci_dev(dev) { 296 for_each_pci_dev(dev) {
297 eeh_addr_cache_insert_dev(dev);
298
299 dn = pci_device_to_OF_node(dev); 297 dn = pci_device_to_OF_node(dev);
300 if (!dn) 298 if (!dn)
301 continue; 299 continue;
@@ -308,6 +306,8 @@ void __init eeh_addr_cache_build(void)
308 dev->dev.archdata.edev = edev; 306 dev->dev.archdata.edev = edev;
309 edev->pdev = dev; 307 edev->pdev = dev;
310 308
309 eeh_addr_cache_insert_dev(dev);
310
311 eeh_sysfs_add_device(dev); 311 eeh_sysfs_add_device(dev);
312 } 312 }
313 313
diff --git a/arch/powerpc/platforms/pseries/eeh_pe.c b/arch/powerpc/platforms/pseries/eeh_pe.c
index fe43d1aa2cf1..9d4a9e8562b2 100644
--- a/arch/powerpc/platforms/pseries/eeh_pe.c
+++ b/arch/powerpc/platforms/pseries/eeh_pe.c
@@ -639,7 +639,8 @@ struct pci_bus *eeh_pe_bus_get(struct eeh_pe *pe)
639 639
640 if (pe->type & EEH_PE_PHB) { 640 if (pe->type & EEH_PE_PHB) {
641 bus = pe->phb->bus; 641 bus = pe->phb->bus;
642 } else if (pe->type & EEH_PE_BUS) { 642 } else if (pe->type & EEH_PE_BUS ||
643 pe->type & EEH_PE_DEVICE) {
643 edev = list_first_entry(&pe->edevs, struct eeh_dev, list); 644 edev = list_first_entry(&pe->edevs, struct eeh_dev, list);
644 pdev = eeh_dev_to_pci_dev(edev); 645 pdev = eeh_dev_to_pci_dev(edev);
645 if (pdev) 646 if (pdev)
diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c
index 19506f935737..b456b157d33d 100644
--- a/arch/powerpc/platforms/pseries/eeh_pseries.c
+++ b/arch/powerpc/platforms/pseries/eeh_pseries.c
@@ -83,7 +83,11 @@ static int pseries_eeh_init(void)
83 ibm_configure_pe = rtas_token("ibm,configure-pe"); 83 ibm_configure_pe = rtas_token("ibm,configure-pe");
84 ibm_configure_bridge = rtas_token("ibm,configure-bridge"); 84 ibm_configure_bridge = rtas_token("ibm,configure-bridge");
85 85
86 /* necessary sanity check */ 86 /*
87 * Necessary sanity check. We needn't check "get-config-addr-info"
88 * and its variant since the old firmware probably support address
89 * of domain/bus/slot/function for EEH RTAS operations.
90 */
87 if (ibm_set_eeh_option == RTAS_UNKNOWN_SERVICE) { 91 if (ibm_set_eeh_option == RTAS_UNKNOWN_SERVICE) {
88 pr_warning("%s: RTAS service <ibm,set-eeh-option> invalid\n", 92 pr_warning("%s: RTAS service <ibm,set-eeh-option> invalid\n",
89 __func__); 93 __func__);
@@ -102,12 +106,6 @@ static int pseries_eeh_init(void)
102 pr_warning("%s: RTAS service <ibm,slot-error-detail> invalid\n", 106 pr_warning("%s: RTAS service <ibm,slot-error-detail> invalid\n",
103 __func__); 107 __func__);
104 return -EINVAL; 108 return -EINVAL;
105 } else if (ibm_get_config_addr_info2 == RTAS_UNKNOWN_SERVICE &&
106 ibm_get_config_addr_info == RTAS_UNKNOWN_SERVICE) {
107 pr_warning("%s: RTAS service <ibm,get-config-addr-info2> and "
108 "<ibm,get-config-addr-info> invalid\n",
109 __func__);
110 return -EINVAL;
111 } else if (ibm_configure_pe == RTAS_UNKNOWN_SERVICE && 109 } else if (ibm_configure_pe == RTAS_UNKNOWN_SERVICE &&
112 ibm_configure_bridge == RTAS_UNKNOWN_SERVICE) { 110 ibm_configure_bridge == RTAS_UNKNOWN_SERVICE) {
113 pr_warning("%s: RTAS service <ibm,configure-pe> and " 111 pr_warning("%s: RTAS service <ibm,configure-pe> and "
diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c
index 028ac1f71b51..46ac1ddea683 100644
--- a/arch/powerpc/sysdev/fsl_pci.c
+++ b/arch/powerpc/sysdev/fsl_pci.c
@@ -97,22 +97,14 @@ static int fsl_indirect_read_config(struct pci_bus *bus, unsigned int devfn,
97 return indirect_read_config(bus, devfn, offset, len, val); 97 return indirect_read_config(bus, devfn, offset, len, val);
98} 98}
99 99
100static struct pci_ops fsl_indirect_pci_ops = 100#if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx)
101
102static struct pci_ops fsl_indirect_pcie_ops =
101{ 103{
102 .read = fsl_indirect_read_config, 104 .read = fsl_indirect_read_config,
103 .write = indirect_write_config, 105 .write = indirect_write_config,
104}; 106};
105 107
106static void __init fsl_setup_indirect_pci(struct pci_controller* hose,
107 resource_size_t cfg_addr,
108 resource_size_t cfg_data, u32 flags)
109{
110 setup_indirect_pci(hose, cfg_addr, cfg_data, flags);
111 hose->ops = &fsl_indirect_pci_ops;
112}
113
114#if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx)
115
116#define MAX_PHYS_ADDR_BITS 40 108#define MAX_PHYS_ADDR_BITS 40
117static u64 pci64_dma_offset = 1ull << MAX_PHYS_ADDR_BITS; 109static u64 pci64_dma_offset = 1ull << MAX_PHYS_ADDR_BITS;
118 110
@@ -504,13 +496,15 @@ int __init fsl_add_bridge(struct platform_device *pdev, int is_primary)
504 if (!hose->private_data) 496 if (!hose->private_data)
505 goto no_bridge; 497 goto no_bridge;
506 498
507 fsl_setup_indirect_pci(hose, rsrc.start, rsrc.start + 0x4, 499 setup_indirect_pci(hose, rsrc.start, rsrc.start + 0x4,
508 PPC_INDIRECT_TYPE_BIG_ENDIAN); 500 PPC_INDIRECT_TYPE_BIG_ENDIAN);
509 501
510 if (in_be32(&pci->block_rev1) < PCIE_IP_REV_3_0) 502 if (in_be32(&pci->block_rev1) < PCIE_IP_REV_3_0)
511 hose->indirect_type |= PPC_INDIRECT_TYPE_FSL_CFG_REG_LINK; 503 hose->indirect_type |= PPC_INDIRECT_TYPE_FSL_CFG_REG_LINK;
512 504
513 if (early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP)) { 505 if (early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP)) {
506 /* use fsl_indirect_read_config for PCIe */
507 hose->ops = &fsl_indirect_pcie_ops;
514 /* For PCIE read HEADER_TYPE to identify controler mode */ 508 /* For PCIE read HEADER_TYPE to identify controler mode */
515 early_read_config_byte(hose, 0, 0, PCI_HEADER_TYPE, &hdr_type); 509 early_read_config_byte(hose, 0, 0, PCI_HEADER_TYPE, &hdr_type);
516 if ((hdr_type & 0x7f) != PCI_HEADER_TYPE_BRIDGE) 510 if ((hdr_type & 0x7f) != PCI_HEADER_TYPE_BRIDGE)
@@ -814,8 +808,8 @@ int __init mpc83xx_add_bridge(struct device_node *dev)
814 if (ret) 808 if (ret)
815 goto err0; 809 goto err0;
816 } else { 810 } else {
817 fsl_setup_indirect_pci(hose, rsrc_cfg.start, 811 setup_indirect_pci(hose, rsrc_cfg.start,
818 rsrc_cfg.start + 4, 0); 812 rsrc_cfg.start + 4, 0);
819 } 813 }
820 814
821 printk(KERN_INFO "Found FSL PCI host bridge at 0x%016llx. " 815 printk(KERN_INFO "Found FSL PCI host bridge at 0x%016llx. "
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index 0a13ecb270c7..3cc2f9159ab1 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -54,7 +54,7 @@ static DEFINE_RAW_SPINLOCK(mpic_lock);
54 54
55#ifdef CONFIG_PPC32 /* XXX for now */ 55#ifdef CONFIG_PPC32 /* XXX for now */
56#ifdef CONFIG_IRQ_ALL_CPUS 56#ifdef CONFIG_IRQ_ALL_CPUS
57#define distribute_irqs (!(mpic->flags & MPIC_SINGLE_DEST_CPU)) 57#define distribute_irqs (1)
58#else 58#else
59#define distribute_irqs (0) 59#define distribute_irqs (0)
60#endif 60#endif
@@ -1703,7 +1703,7 @@ void mpic_setup_this_cpu(void)
1703 * it differently, then we should make sure we also change the default 1703 * it differently, then we should make sure we also change the default
1704 * values of irq_desc[].affinity in irq.c. 1704 * values of irq_desc[].affinity in irq.c.
1705 */ 1705 */
1706 if (distribute_irqs) { 1706 if (distribute_irqs && !(mpic->flags & MPIC_SINGLE_DEST_CPU)) {
1707 for (i = 0; i < mpic->num_sources ; i++) 1707 for (i = 0; i < mpic->num_sources ; i++)
1708 mpic_irq_write(i, MPIC_INFO(IRQ_DESTINATION), 1708 mpic_irq_write(i, MPIC_INFO(IRQ_DESTINATION),
1709 mpic_irq_read(i, MPIC_INFO(IRQ_DESTINATION)) | msk); 1709 mpic_irq_read(i, MPIC_INFO(IRQ_DESTINATION)) | msk);
diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c
index bae0f402bf2a..87a22092b68f 100644
--- a/arch/s390/appldata/appldata_base.c
+++ b/arch/s390/appldata/appldata_base.c
@@ -212,7 +212,9 @@ appldata_timer_handler(ctl_table *ctl, int write,
212 return 0; 212 return 0;
213 } 213 }
214 if (!write) { 214 if (!write) {
215 len = sprintf(buf, appldata_timer_active ? "1\n" : "0\n"); 215 strncpy(buf, appldata_timer_active ? "1\n" : "0\n",
216 ARRAY_SIZE(buf));
217 len = strnlen(buf, ARRAY_SIZE(buf));
216 if (len > *lenp) 218 if (len > *lenp)
217 len = *lenp; 219 len = *lenp;
218 if (copy_to_user(buffer, buf, len)) 220 if (copy_to_user(buffer, buf, len))
@@ -317,7 +319,8 @@ appldata_generic_handler(ctl_table *ctl, int write,
317 return 0; 319 return 0;
318 } 320 }
319 if (!write) { 321 if (!write) {
320 len = sprintf(buf, ops->active ? "1\n" : "0\n"); 322 strncpy(buf, ops->active ? "1\n" : "0\n", ARRAY_SIZE(buf));
323 len = strnlen(buf, ARRAY_SIZE(buf));
321 if (len > *lenp) 324 if (len > *lenp)
322 len = *lenp; 325 len = *lenp;
323 if (copy_to_user(buffer, buf, len)) { 326 if (copy_to_user(buffer, buf, len)) {
diff --git a/arch/s390/include/asm/dma-mapping.h b/arch/s390/include/asm/dma-mapping.h
index 9411db653bac..2f8c1abeb086 100644
--- a/arch/s390/include/asm/dma-mapping.h
+++ b/arch/s390/include/asm/dma-mapping.h
@@ -50,9 +50,10 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
50{ 50{
51 struct dma_map_ops *dma_ops = get_dma_ops(dev); 51 struct dma_map_ops *dma_ops = get_dma_ops(dev);
52 52
53 debug_dma_mapping_error(dev, dma_addr);
53 if (dma_ops->mapping_error) 54 if (dma_ops->mapping_error)
54 return dma_ops->mapping_error(dev, dma_addr); 55 return dma_ops->mapping_error(dev, dma_addr);
55 return (dma_addr == 0UL); 56 return (dma_addr == DMA_ERROR_CODE);
56} 57}
57 58
58static inline void *dma_alloc_coherent(struct device *dev, size_t size, 59static inline void *dma_alloc_coherent(struct device *dev, size_t size,
@@ -71,8 +72,8 @@ static inline void dma_free_coherent(struct device *dev, size_t size,
71{ 72{
72 struct dma_map_ops *dma_ops = get_dma_ops(dev); 73 struct dma_map_ops *dma_ops = get_dma_ops(dev);
73 74
74 dma_ops->free(dev, size, cpu_addr, dma_handle, NULL);
75 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle); 75 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
76 dma_ops->free(dev, size, cpu_addr, dma_handle, NULL);
76} 77}
77 78
78#endif /* _ASM_S390_DMA_MAPPING_H */ 79#endif /* _ASM_S390_DMA_MAPPING_H */
diff --git a/arch/s390/include/asm/io.h b/arch/s390/include/asm/io.h
index 379d96e2105e..fd9be010f9b2 100644
--- a/arch/s390/include/asm/io.h
+++ b/arch/s390/include/asm/io.h
@@ -36,6 +36,7 @@ static inline void * phys_to_virt(unsigned long address)
36} 36}
37 37
38void *xlate_dev_mem_ptr(unsigned long phys); 38void *xlate_dev_mem_ptr(unsigned long phys);
39#define xlate_dev_mem_ptr xlate_dev_mem_ptr
39void unxlate_dev_mem_ptr(unsigned long phys, void *addr); 40void unxlate_dev_mem_ptr(unsigned long phys, void *addr);
40 41
41/* 42/*
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 0f0de30e3e3f..e8b6e5b8932c 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -623,7 +623,7 @@ static inline pgste_t pgste_get_lock(pte_t *ptep)
623 " csg %0,%1,%2\n" 623 " csg %0,%1,%2\n"
624 " jl 0b\n" 624 " jl 0b\n"
625 : "=&d" (old), "=&d" (new), "=Q" (ptep[PTRS_PER_PTE]) 625 : "=&d" (old), "=&d" (new), "=Q" (ptep[PTRS_PER_PTE])
626 : "Q" (ptep[PTRS_PER_PTE]) : "cc"); 626 : "Q" (ptep[PTRS_PER_PTE]) : "cc", "memory");
627#endif 627#endif
628 return __pgste(new); 628 return __pgste(new);
629} 629}
@@ -635,18 +635,26 @@ static inline void pgste_set_unlock(pte_t *ptep, pgste_t pgste)
635 " nihh %1,0xff7f\n" /* clear RCP_PCL_BIT */ 635 " nihh %1,0xff7f\n" /* clear RCP_PCL_BIT */
636 " stg %1,%0\n" 636 " stg %1,%0\n"
637 : "=Q" (ptep[PTRS_PER_PTE]) 637 : "=Q" (ptep[PTRS_PER_PTE])
638 : "d" (pgste_val(pgste)), "Q" (ptep[PTRS_PER_PTE]) : "cc"); 638 : "d" (pgste_val(pgste)), "Q" (ptep[PTRS_PER_PTE])
639 : "cc", "memory");
639 preempt_enable(); 640 preempt_enable();
640#endif 641#endif
641} 642}
642 643
644static inline void pgste_set(pte_t *ptep, pgste_t pgste)
645{
646#ifdef CONFIG_PGSTE
647 *(pgste_t *)(ptep + PTRS_PER_PTE) = pgste;
648#endif
649}
650
643static inline pgste_t pgste_update_all(pte_t *ptep, pgste_t pgste) 651static inline pgste_t pgste_update_all(pte_t *ptep, pgste_t pgste)
644{ 652{
645#ifdef CONFIG_PGSTE 653#ifdef CONFIG_PGSTE
646 unsigned long address, bits; 654 unsigned long address, bits;
647 unsigned char skey; 655 unsigned char skey;
648 656
649 if (!pte_present(*ptep)) 657 if (pte_val(*ptep) & _PAGE_INVALID)
650 return pgste; 658 return pgste;
651 address = pte_val(*ptep) & PAGE_MASK; 659 address = pte_val(*ptep) & PAGE_MASK;
652 skey = page_get_storage_key(address); 660 skey = page_get_storage_key(address);
@@ -680,7 +688,7 @@ static inline pgste_t pgste_update_young(pte_t *ptep, pgste_t pgste)
680#ifdef CONFIG_PGSTE 688#ifdef CONFIG_PGSTE
681 int young; 689 int young;
682 690
683 if (!pte_present(*ptep)) 691 if (pte_val(*ptep) & _PAGE_INVALID)
684 return pgste; 692 return pgste;
685 /* Get referenced bit from storage key */ 693 /* Get referenced bit from storage key */
686 young = page_reset_referenced(pte_val(*ptep) & PAGE_MASK); 694 young = page_reset_referenced(pte_val(*ptep) & PAGE_MASK);
@@ -704,17 +712,19 @@ static inline void pgste_set_key(pte_t *ptep, pgste_t pgste, pte_t entry)
704{ 712{
705#ifdef CONFIG_PGSTE 713#ifdef CONFIG_PGSTE
706 unsigned long address; 714 unsigned long address;
707 unsigned long okey, nkey; 715 unsigned long nkey;
708 716
709 if (!pte_present(entry)) 717 if (pte_val(entry) & _PAGE_INVALID)
710 return; 718 return;
719 VM_BUG_ON(!(pte_val(*ptep) & _PAGE_INVALID));
711 address = pte_val(entry) & PAGE_MASK; 720 address = pte_val(entry) & PAGE_MASK;
712 okey = nkey = page_get_storage_key(address); 721 /*
713 nkey &= ~(_PAGE_ACC_BITS | _PAGE_FP_BIT); 722 * Set page access key and fetch protection bit from pgste.
714 /* Set page access key and fetch protection bit from pgste */ 723 * The guest C/R information is still in the PGSTE, set real
715 nkey |= (pgste_val(pgste) & (RCP_ACC_BITS | RCP_FP_BIT)) >> 56; 724 * key C/R to 0.
716 if (okey != nkey) 725 */
717 page_set_storage_key(address, nkey, 0); 726 nkey = (pgste_val(pgste) & (RCP_ACC_BITS | RCP_FP_BIT)) >> 56;
727 page_set_storage_key(address, nkey, 0);
718#endif 728#endif
719} 729}
720 730
@@ -1098,6 +1108,11 @@ static inline pte_t ptep_modify_prot_start(struct mm_struct *mm,
1098 pte = *ptep; 1108 pte = *ptep;
1099 if (!mm_exclusive(mm)) 1109 if (!mm_exclusive(mm))
1100 __ptep_ipte(address, ptep); 1110 __ptep_ipte(address, ptep);
1111
1112 if (mm_has_pgste(mm)) {
1113 pgste = pgste_update_all(&pte, pgste);
1114 pgste_set(ptep, pgste);
1115 }
1101 return pte; 1116 return pte;
1102} 1117}
1103 1118
@@ -1105,9 +1120,13 @@ static inline void ptep_modify_prot_commit(struct mm_struct *mm,
1105 unsigned long address, 1120 unsigned long address,
1106 pte_t *ptep, pte_t pte) 1121 pte_t *ptep, pte_t pte)
1107{ 1122{
1123 pgste_t pgste;
1124
1108 if (mm_has_pgste(mm)) { 1125 if (mm_has_pgste(mm)) {
1126 pgste = *(pgste_t *)(ptep + PTRS_PER_PTE);
1127 pgste_set_key(ptep, pgste, pte);
1109 pgste_set_pte(ptep, pte); 1128 pgste_set_pte(ptep, pte);
1110 pgste_set_unlock(ptep, *(pgste_t *)(ptep + PTRS_PER_PTE)); 1129 pgste_set_unlock(ptep, pgste);
1111 } else 1130 } else
1112 *ptep = pte; 1131 *ptep = pte;
1113} 1132}
diff --git a/arch/s390/kernel/dumpstack.c b/arch/s390/kernel/dumpstack.c
index 298297477257..87acc38f73c6 100644
--- a/arch/s390/kernel/dumpstack.c
+++ b/arch/s390/kernel/dumpstack.c
@@ -74,6 +74,8 @@ __show_trace(unsigned long sp, unsigned long low, unsigned long high)
74 74
75static void show_trace(struct task_struct *task, unsigned long *stack) 75static void show_trace(struct task_struct *task, unsigned long *stack)
76{ 76{
77 const unsigned long frame_size =
78 STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
77 register unsigned long __r15 asm ("15"); 79 register unsigned long __r15 asm ("15");
78 unsigned long sp; 80 unsigned long sp;
79 81
@@ -82,11 +84,13 @@ static void show_trace(struct task_struct *task, unsigned long *stack)
82 sp = task ? task->thread.ksp : __r15; 84 sp = task ? task->thread.ksp : __r15;
83 printk("Call Trace:\n"); 85 printk("Call Trace:\n");
84#ifdef CONFIG_CHECK_STACK 86#ifdef CONFIG_CHECK_STACK
85 sp = __show_trace(sp, S390_lowcore.panic_stack - 4096, 87 sp = __show_trace(sp,
86 S390_lowcore.panic_stack); 88 S390_lowcore.panic_stack + frame_size - 4096,
89 S390_lowcore.panic_stack + frame_size);
87#endif 90#endif
88 sp = __show_trace(sp, S390_lowcore.async_stack - ASYNC_SIZE, 91 sp = __show_trace(sp,
89 S390_lowcore.async_stack); 92 S390_lowcore.async_stack + frame_size - ASYNC_SIZE,
93 S390_lowcore.async_stack + frame_size);
90 if (task) 94 if (task)
91 __show_trace(sp, (unsigned long) task_stack_page(task), 95 __show_trace(sp, (unsigned long) task_stack_page(task),
92 (unsigned long) task_stack_page(task) + THREAD_SIZE); 96 (unsigned long) task_stack_page(task) + THREAD_SIZE);
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index d8a6a385d048..feb719d3c851 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -754,9 +754,9 @@ static struct bin_attribute sys_reipl_fcp_scp_data_attr = {
754 .write = reipl_fcp_scpdata_write, 754 .write = reipl_fcp_scpdata_write,
755}; 755};
756 756
757DEFINE_IPL_ATTR_RW(reipl_fcp, wwpn, "0x%016llx\n", "%016llx\n", 757DEFINE_IPL_ATTR_RW(reipl_fcp, wwpn, "0x%016llx\n", "%llx\n",
758 reipl_block_fcp->ipl_info.fcp.wwpn); 758 reipl_block_fcp->ipl_info.fcp.wwpn);
759DEFINE_IPL_ATTR_RW(reipl_fcp, lun, "0x%016llx\n", "%016llx\n", 759DEFINE_IPL_ATTR_RW(reipl_fcp, lun, "0x%016llx\n", "%llx\n",
760 reipl_block_fcp->ipl_info.fcp.lun); 760 reipl_block_fcp->ipl_info.fcp.lun);
761DEFINE_IPL_ATTR_RW(reipl_fcp, bootprog, "%lld\n", "%lld\n", 761DEFINE_IPL_ATTR_RW(reipl_fcp, bootprog, "%lld\n", "%lld\n",
762 reipl_block_fcp->ipl_info.fcp.bootprog); 762 reipl_block_fcp->ipl_info.fcp.bootprog);
@@ -1323,9 +1323,9 @@ static struct shutdown_action __refdata reipl_action = {
1323 1323
1324/* FCP dump device attributes */ 1324/* FCP dump device attributes */
1325 1325
1326DEFINE_IPL_ATTR_RW(dump_fcp, wwpn, "0x%016llx\n", "%016llx\n", 1326DEFINE_IPL_ATTR_RW(dump_fcp, wwpn, "0x%016llx\n", "%llx\n",
1327 dump_block_fcp->ipl_info.fcp.wwpn); 1327 dump_block_fcp->ipl_info.fcp.wwpn);
1328DEFINE_IPL_ATTR_RW(dump_fcp, lun, "0x%016llx\n", "%016llx\n", 1328DEFINE_IPL_ATTR_RW(dump_fcp, lun, "0x%016llx\n", "%llx\n",
1329 dump_block_fcp->ipl_info.fcp.lun); 1329 dump_block_fcp->ipl_info.fcp.lun);
1330DEFINE_IPL_ATTR_RW(dump_fcp, bootprog, "%lld\n", "%lld\n", 1330DEFINE_IPL_ATTR_RW(dump_fcp, bootprog, "%lld\n", "%lld\n",
1331 dump_block_fcp->ipl_info.fcp.bootprog); 1331 dump_block_fcp->ipl_info.fcp.bootprog);
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index f7fb58903f6a..dd3c1994b8bd 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -311,3 +311,69 @@ void measurement_alert_subclass_unregister(void)
311 spin_unlock(&ma_subclass_lock); 311 spin_unlock(&ma_subclass_lock);
312} 312}
313EXPORT_SYMBOL(measurement_alert_subclass_unregister); 313EXPORT_SYMBOL(measurement_alert_subclass_unregister);
314
315#ifdef CONFIG_SMP
316void synchronize_irq(unsigned int irq)
317{
318 /*
319 * Not needed, the handler is protected by a lock and IRQs that occur
320 * after the handler is deleted are just NOPs.
321 */
322}
323EXPORT_SYMBOL_GPL(synchronize_irq);
324#endif
325
326#ifndef CONFIG_PCI
327
328/* Only PCI devices have dynamically-defined IRQ handlers */
329
330int request_irq(unsigned int irq, irq_handler_t handler,
331 unsigned long irqflags, const char *devname, void *dev_id)
332{
333 return -EINVAL;
334}
335EXPORT_SYMBOL_GPL(request_irq);
336
337void free_irq(unsigned int irq, void *dev_id)
338{
339 WARN_ON(1);
340}
341EXPORT_SYMBOL_GPL(free_irq);
342
343void enable_irq(unsigned int irq)
344{
345 WARN_ON(1);
346}
347EXPORT_SYMBOL_GPL(enable_irq);
348
349void disable_irq(unsigned int irq)
350{
351 WARN_ON(1);
352}
353EXPORT_SYMBOL_GPL(disable_irq);
354
355#endif /* !CONFIG_PCI */
356
357void disable_irq_nosync(unsigned int irq)
358{
359 disable_irq(irq);
360}
361EXPORT_SYMBOL_GPL(disable_irq_nosync);
362
363unsigned long probe_irq_on(void)
364{
365 return 0;
366}
367EXPORT_SYMBOL_GPL(probe_irq_on);
368
369int probe_irq_off(unsigned long val)
370{
371 return 0;
372}
373EXPORT_SYMBOL_GPL(probe_irq_off);
374
375unsigned int probe_irq_mask(unsigned long val)
376{
377 return val;
378}
379EXPORT_SYMBOL_GPL(probe_irq_mask);
diff --git a/arch/s390/kernel/sclp.S b/arch/s390/kernel/sclp.S
index b6506ee32a36..29bd7bec4176 100644
--- a/arch/s390/kernel/sclp.S
+++ b/arch/s390/kernel/sclp.S
@@ -225,7 +225,7 @@ _sclp_print:
225 ahi %r2,1 225 ahi %r2,1
226 ltr %r0,%r0 # end of string? 226 ltr %r0,%r0 # end of string?
227 jz .LfinalizemtoS4 227 jz .LfinalizemtoS4
228 chi %r0,0x15 # end of line (NL)? 228 chi %r0,0x0a # end of line (NL)?
229 jz .LfinalizemtoS4 229 jz .LfinalizemtoS4
230 stc %r0,0(%r6,%r7) # copy to mto 230 stc %r0,0(%r6,%r7) # copy to mto
231 la %r11,0(%r6,%r7) 231 la %r11,0(%r6,%r7)
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 05674b669001..4f977d0d25c2 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -428,34 +428,27 @@ void smp_stop_cpu(void)
428 * This is the main routine where commands issued by other 428 * This is the main routine where commands issued by other
429 * cpus are handled. 429 * cpus are handled.
430 */ 430 */
431static void do_ext_call_interrupt(struct ext_code ext_code, 431static void smp_handle_ext_call(void)
432 unsigned int param32, unsigned long param64)
433{ 432{
434 unsigned long bits; 433 unsigned long bits;
435 int cpu;
436
437 cpu = smp_processor_id();
438 if (ext_code.code == 0x1202)
439 inc_irq_stat(IRQEXT_EXC);
440 else
441 inc_irq_stat(IRQEXT_EMS);
442 /*
443 * handle bit signal external calls
444 */
445 bits = xchg(&pcpu_devices[cpu].ec_mask, 0);
446 434
435 /* handle bit signal external calls */
436 bits = xchg(&pcpu_devices[smp_processor_id()].ec_mask, 0);
447 if (test_bit(ec_stop_cpu, &bits)) 437 if (test_bit(ec_stop_cpu, &bits))
448 smp_stop_cpu(); 438 smp_stop_cpu();
449
450 if (test_bit(ec_schedule, &bits)) 439 if (test_bit(ec_schedule, &bits))
451 scheduler_ipi(); 440 scheduler_ipi();
452
453 if (test_bit(ec_call_function, &bits)) 441 if (test_bit(ec_call_function, &bits))
454 generic_smp_call_function_interrupt(); 442 generic_smp_call_function_interrupt();
455
456 if (test_bit(ec_call_function_single, &bits)) 443 if (test_bit(ec_call_function_single, &bits))
457 generic_smp_call_function_single_interrupt(); 444 generic_smp_call_function_single_interrupt();
445}
458 446
447static void do_ext_call_interrupt(struct ext_code ext_code,
448 unsigned int param32, unsigned long param64)
449{
450 inc_irq_stat(ext_code.code == 0x1202 ? IRQEXT_EXC : IRQEXT_EMS);
451 smp_handle_ext_call();
459} 452}
460 453
461void arch_send_call_function_ipi_mask(const struct cpumask *mask) 454void arch_send_call_function_ipi_mask(const struct cpumask *mask)
@@ -760,6 +753,8 @@ int __cpu_disable(void)
760{ 753{
761 unsigned long cregs[16]; 754 unsigned long cregs[16];
762 755
756 /* Handle possible pending IPIs */
757 smp_handle_ext_call();
763 set_cpu_online(smp_processor_id(), false); 758 set_cpu_online(smp_processor_id(), false);
764 /* Disable pseudo page faults on this cpu. */ 759 /* Disable pseudo page faults on this cpu. */
765 pfault_fini(); 760 pfault_fini();
diff --git a/arch/s390/mm/mem_detect.c b/arch/s390/mm/mem_detect.c
index 3cbd3b8bf311..cca388253a39 100644
--- a/arch/s390/mm/mem_detect.c
+++ b/arch/s390/mm/mem_detect.c
@@ -123,7 +123,8 @@ void create_mem_hole(struct mem_chunk mem_chunk[], unsigned long addr,
123 continue; 123 continue;
124 } else if ((addr <= chunk->addr) && 124 } else if ((addr <= chunk->addr) &&
125 (addr + size >= chunk->addr + chunk->size)) { 125 (addr + size >= chunk->addr + chunk->size)) {
126 memset(chunk, 0 , sizeof(*chunk)); 126 memmove(chunk, chunk + 1, (MEMORY_CHUNKS-i-1) * sizeof(*chunk));
127 memset(&mem_chunk[MEMORY_CHUNKS-1], 0, sizeof(*chunk));
127 } else if (addr + size < chunk->addr + chunk->size) { 128 } else if (addr + size < chunk->addr + chunk->size) {
128 chunk->size = chunk->addr + chunk->size - addr - size; 129 chunk->size = chunk->addr + chunk->size - addr - size;
129 chunk->addr = addr + size; 130 chunk->addr = addr + size;
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 18dc417aaf79..a938b548f07e 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -492,7 +492,7 @@ static int gmap_connect_pgtable(unsigned long address, unsigned long segment,
492 mp = (struct gmap_pgtable *) page->index; 492 mp = (struct gmap_pgtable *) page->index;
493 rmap->gmap = gmap; 493 rmap->gmap = gmap;
494 rmap->entry = segment_ptr; 494 rmap->entry = segment_ptr;
495 rmap->vmaddr = address; 495 rmap->vmaddr = address & PMD_MASK;
496 spin_lock(&mm->page_table_lock); 496 spin_lock(&mm->page_table_lock);
497 if (*segment_ptr == segment) { 497 if (*segment_ptr == segment) {
498 list_add(&rmap->list, &mp->mapper); 498 list_add(&rmap->list, &mp->mapper);
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
index e6f15b5d8b7d..f1e5be85d592 100644
--- a/arch/s390/pci/pci.c
+++ b/arch/s390/pci/pci.c
@@ -302,15 +302,6 @@ static int zpci_cfg_store(struct zpci_dev *zdev, int offset, u32 val, u8 len)
302 return rc; 302 return rc;
303} 303}
304 304
305void synchronize_irq(unsigned int irq)
306{
307 /*
308 * Not needed, the handler is protected by a lock and IRQs that occur
309 * after the handler is deleted are just NOPs.
310 */
311}
312EXPORT_SYMBOL_GPL(synchronize_irq);
313
314void enable_irq(unsigned int irq) 305void enable_irq(unsigned int irq)
315{ 306{
316 struct msi_desc *msi = irq_get_msi_desc(irq); 307 struct msi_desc *msi = irq_get_msi_desc(irq);
@@ -327,30 +318,6 @@ void disable_irq(unsigned int irq)
327} 318}
328EXPORT_SYMBOL_GPL(disable_irq); 319EXPORT_SYMBOL_GPL(disable_irq);
329 320
330void disable_irq_nosync(unsigned int irq)
331{
332 disable_irq(irq);
333}
334EXPORT_SYMBOL_GPL(disable_irq_nosync);
335
336unsigned long probe_irq_on(void)
337{
338 return 0;
339}
340EXPORT_SYMBOL_GPL(probe_irq_on);
341
342int probe_irq_off(unsigned long val)
343{
344 return 0;
345}
346EXPORT_SYMBOL_GPL(probe_irq_off);
347
348unsigned int probe_irq_mask(unsigned long val)
349{
350 return val;
351}
352EXPORT_SYMBOL_GPL(probe_irq_mask);
353
354void pcibios_fixup_bus(struct pci_bus *bus) 321void pcibios_fixup_bus(struct pci_bus *bus)
355{ 322{
356} 323}
diff --git a/arch/sparc/include/asm/Kbuild b/arch/sparc/include/asm/Kbuild
index ff18e3cfb6b1..7e4a97fbded4 100644
--- a/arch/sparc/include/asm/Kbuild
+++ b/arch/sparc/include/asm/Kbuild
@@ -6,6 +6,7 @@ generic-y += cputime.h
6generic-y += div64.h 6generic-y += div64.h
7generic-y += emergency-restart.h 7generic-y += emergency-restart.h
8generic-y += exec.h 8generic-y += exec.h
9generic-y += linkage.h
9generic-y += local64.h 10generic-y += local64.h
10generic-y += mutex.h 11generic-y += mutex.h
11generic-y += irq_regs.h 12generic-y += irq_regs.h
diff --git a/arch/sparc/include/asm/leon.h b/arch/sparc/include/asm/leon.h
index 15a716934e4d..b836e9297f2a 100644
--- a/arch/sparc/include/asm/leon.h
+++ b/arch/sparc/include/asm/leon.h
@@ -135,7 +135,7 @@ static inline int sparc_leon3_cpuid(void)
135 135
136#ifdef CONFIG_SMP 136#ifdef CONFIG_SMP
137# define LEON3_IRQ_IPI_DEFAULT 13 137# define LEON3_IRQ_IPI_DEFAULT 13
138# define LEON3_IRQ_TICKER (leon3_ticker_irq) 138# define LEON3_IRQ_TICKER (leon3_gptimer_irq)
139# define LEON3_IRQ_CROSS_CALL 15 139# define LEON3_IRQ_CROSS_CALL 15
140#endif 140#endif
141 141
diff --git a/arch/sparc/include/asm/leon_amba.h b/arch/sparc/include/asm/leon_amba.h
index f3034eddf468..24ec48c3ff90 100644
--- a/arch/sparc/include/asm/leon_amba.h
+++ b/arch/sparc/include/asm/leon_amba.h
@@ -47,6 +47,7 @@ struct amba_prom_registers {
47#define LEON3_GPTIMER_LD 4 47#define LEON3_GPTIMER_LD 4
48#define LEON3_GPTIMER_IRQEN 8 48#define LEON3_GPTIMER_IRQEN 8
49#define LEON3_GPTIMER_SEPIRQ 8 49#define LEON3_GPTIMER_SEPIRQ 8
50#define LEON3_GPTIMER_TIMERS 0x7
50 51
51#define LEON23_REG_TIMER_CONTROL_EN 0x00000001 /* 1 = enable counting */ 52#define LEON23_REG_TIMER_CONTROL_EN 0x00000001 /* 1 = enable counting */
52/* 0 = hold scalar and counter */ 53/* 0 = hold scalar and counter */
diff --git a/arch/sparc/include/asm/linkage.h b/arch/sparc/include/asm/linkage.h
deleted file mode 100644
index 291c2d01c44f..000000000000
--- a/arch/sparc/include/asm/linkage.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __ASM_LINKAGE_H
2#define __ASM_LINKAGE_H
3
4/* Nothing to see here... */
5
6#endif
diff --git a/arch/sparc/kernel/ds.c b/arch/sparc/kernel/ds.c
index 75bb608c423e..5ef48dab5636 100644
--- a/arch/sparc/kernel/ds.c
+++ b/arch/sparc/kernel/ds.c
@@ -843,7 +843,8 @@ void ldom_reboot(const char *boot_command)
843 unsigned long len; 843 unsigned long len;
844 844
845 strcpy(full_boot_str, "boot "); 845 strcpy(full_boot_str, "boot ");
846 strcpy(full_boot_str + strlen("boot "), boot_command); 846 strlcpy(full_boot_str + strlen("boot "), boot_command,
847 sizeof(full_boot_str + strlen("boot ")));
847 len = strlen(full_boot_str); 848 len = strlen(full_boot_str);
848 849
849 if (reboot_data_supported) { 850 if (reboot_data_supported) {
diff --git a/arch/sparc/kernel/leon_kernel.c b/arch/sparc/kernel/leon_kernel.c
index 7c0231dabe44..b7c68976cbc7 100644
--- a/arch/sparc/kernel/leon_kernel.c
+++ b/arch/sparc/kernel/leon_kernel.c
@@ -38,7 +38,6 @@ static DEFINE_SPINLOCK(leon_irq_lock);
38 38
39unsigned long leon3_gptimer_irq; /* interrupt controller irq number */ 39unsigned long leon3_gptimer_irq; /* interrupt controller irq number */
40unsigned long leon3_gptimer_idx; /* Timer Index (0..6) within Timer Core */ 40unsigned long leon3_gptimer_idx; /* Timer Index (0..6) within Timer Core */
41int leon3_ticker_irq; /* Timer ticker IRQ */
42unsigned int sparc_leon_eirq; 41unsigned int sparc_leon_eirq;
43#define LEON_IMASK(cpu) (&leon3_irqctrl_regs->mask[cpu]) 42#define LEON_IMASK(cpu) (&leon3_irqctrl_regs->mask[cpu])
44#define LEON_IACK (&leon3_irqctrl_regs->iclear) 43#define LEON_IACK (&leon3_irqctrl_regs->iclear)
@@ -278,6 +277,9 @@ irqreturn_t leon_percpu_timer_ce_interrupt(int irq, void *unused)
278 277
279 leon_clear_profile_irq(cpu); 278 leon_clear_profile_irq(cpu);
280 279
280 if (cpu == boot_cpu_id)
281 timer_interrupt(irq, NULL);
282
281 ce = &per_cpu(sparc32_clockevent, cpu); 283 ce = &per_cpu(sparc32_clockevent, cpu);
282 284
283 irq_enter(); 285 irq_enter();
@@ -299,6 +301,7 @@ void __init leon_init_timers(void)
299 int icsel; 301 int icsel;
300 int ampopts; 302 int ampopts;
301 int err; 303 int err;
304 u32 config;
302 305
303 sparc_config.get_cycles_offset = leon_cycles_offset; 306 sparc_config.get_cycles_offset = leon_cycles_offset;
304 sparc_config.cs_period = 1000000 / HZ; 307 sparc_config.cs_period = 1000000 / HZ;
@@ -377,23 +380,6 @@ void __init leon_init_timers(void)
377 LEON3_BYPASS_STORE_PA( 380 LEON3_BYPASS_STORE_PA(
378 &leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl, 0); 381 &leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl, 0);
379 382
380#ifdef CONFIG_SMP
381 leon3_ticker_irq = leon3_gptimer_irq + 1 + leon3_gptimer_idx;
382
383 if (!(LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs->config) &
384 (1<<LEON3_GPTIMER_SEPIRQ))) {
385 printk(KERN_ERR "timer not configured with separate irqs\n");
386 BUG();
387 }
388
389 LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx+1].val,
390 0);
391 LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx+1].rld,
392 (((1000000/HZ) - 1)));
393 LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx+1].ctrl,
394 0);
395#endif
396
397 /* 383 /*
398 * The IRQ controller may (if implemented) consist of multiple 384 * The IRQ controller may (if implemented) consist of multiple
399 * IRQ controllers, each mapped on a 4Kb boundary. 385 * IRQ controllers, each mapped on a 4Kb boundary.
@@ -416,13 +402,6 @@ void __init leon_init_timers(void)
416 if (eirq != 0) 402 if (eirq != 0)
417 leon_eirq_setup(eirq); 403 leon_eirq_setup(eirq);
418 404
419 irq = _leon_build_device_irq(NULL, leon3_gptimer_irq+leon3_gptimer_idx);
420 err = request_irq(irq, timer_interrupt, IRQF_TIMER, "timer", NULL);
421 if (err) {
422 printk(KERN_ERR "unable to attach timer IRQ%d\n", irq);
423 prom_halt();
424 }
425
426#ifdef CONFIG_SMP 405#ifdef CONFIG_SMP
427 { 406 {
428 unsigned long flags; 407 unsigned long flags;
@@ -439,30 +418,31 @@ void __init leon_init_timers(void)
439 } 418 }
440#endif 419#endif
441 420
442 LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl, 421 config = LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs->config);
443 LEON3_GPTIMER_EN | 422 if (config & (1 << LEON3_GPTIMER_SEPIRQ))
444 LEON3_GPTIMER_RL | 423 leon3_gptimer_irq += leon3_gptimer_idx;
445 LEON3_GPTIMER_LD | 424 else if ((config & LEON3_GPTIMER_TIMERS) > 1)
446 LEON3_GPTIMER_IRQEN); 425 pr_warn("GPTIMER uses shared irqs, using other timers of the same core will fail.\n");
447 426
448#ifdef CONFIG_SMP 427#ifdef CONFIG_SMP
449 /* Install per-cpu IRQ handler for broadcasted ticker */ 428 /* Install per-cpu IRQ handler for broadcasted ticker */
450 irq = leon_build_device_irq(leon3_ticker_irq, handle_percpu_irq, 429 irq = leon_build_device_irq(leon3_gptimer_irq, handle_percpu_irq,
451 "per-cpu", 0); 430 "per-cpu", 0);
452 err = request_irq(irq, leon_percpu_timer_ce_interrupt, 431 err = request_irq(irq, leon_percpu_timer_ce_interrupt,
453 IRQF_PERCPU | IRQF_TIMER, "ticker", 432 IRQF_PERCPU | IRQF_TIMER, "timer", NULL);
454 NULL); 433#else
434 irq = _leon_build_device_irq(NULL, leon3_gptimer_irq);
435 err = request_irq(irq, timer_interrupt, IRQF_TIMER, "timer", NULL);
436#endif
455 if (err) { 437 if (err) {
456 printk(KERN_ERR "unable to attach ticker IRQ%d\n", irq); 438 pr_err("Unable to attach timer IRQ%d\n", irq);
457 prom_halt(); 439 prom_halt();
458 } 440 }
459 441 LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl,
460 LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx+1].ctrl,
461 LEON3_GPTIMER_EN | 442 LEON3_GPTIMER_EN |
462 LEON3_GPTIMER_RL | 443 LEON3_GPTIMER_RL |
463 LEON3_GPTIMER_LD | 444 LEON3_GPTIMER_LD |
464 LEON3_GPTIMER_IRQEN); 445 LEON3_GPTIMER_IRQEN);
465#endif
466 return; 446 return;
467bad: 447bad:
468 printk(KERN_ERR "No Timer/irqctrl found\n"); 448 printk(KERN_ERR "No Timer/irqctrl found\n");
diff --git a/arch/sparc/kernel/leon_pci_grpci1.c b/arch/sparc/kernel/leon_pci_grpci1.c
index 7739a54315e2..6df26e37f879 100644
--- a/arch/sparc/kernel/leon_pci_grpci1.c
+++ b/arch/sparc/kernel/leon_pci_grpci1.c
@@ -536,11 +536,9 @@ static int grpci1_of_probe(struct platform_device *ofdev)
536 536
537 /* find device register base address */ 537 /* find device register base address */
538 res = platform_get_resource(ofdev, IORESOURCE_MEM, 0); 538 res = platform_get_resource(ofdev, IORESOURCE_MEM, 0);
539 regs = devm_request_and_ioremap(&ofdev->dev, res); 539 regs = devm_ioremap_resource(&ofdev->dev, res);
540 if (!regs) { 540 if (IS_ERR(regs))
541 dev_err(&ofdev->dev, "io-regs mapping failed\n"); 541 return PTR_ERR(regs);
542 return -EADDRNOTAVAIL;
543 }
544 542
545 /* 543 /*
546 * check that we're in Host Slot and that we can act as a Host Bridge 544 * check that we're in Host Slot and that we can act as a Host Bridge
diff --git a/arch/sparc/kernel/leon_pmc.c b/arch/sparc/kernel/leon_pmc.c
index bdf53d9a8d46..b0b3967a2dd2 100644
--- a/arch/sparc/kernel/leon_pmc.c
+++ b/arch/sparc/kernel/leon_pmc.c
@@ -47,6 +47,10 @@ void pmc_leon_idle_fixup(void)
47 * MMU does not get a TLB miss here by using the MMU BYPASS ASI. 47 * MMU does not get a TLB miss here by using the MMU BYPASS ASI.
48 */ 48 */
49 register unsigned int address = (unsigned int)leon3_irqctrl_regs; 49 register unsigned int address = (unsigned int)leon3_irqctrl_regs;
50
51 /* Interrupts need to be enabled to not hang the CPU */
52 local_irq_enable();
53
50 __asm__ __volatile__ ( 54 __asm__ __volatile__ (
51 "wr %%g0, %%asr19\n" 55 "wr %%g0, %%asr19\n"
52 "lda [%0] %1, %%g0\n" 56 "lda [%0] %1, %%g0\n"
@@ -60,6 +64,9 @@ void pmc_leon_idle_fixup(void)
60 */ 64 */
61void pmc_leon_idle(void) 65void pmc_leon_idle(void)
62{ 66{
67 /* Interrupts need to be enabled to not hang the CPU */
68 local_irq_enable();
69
63 /* For systems without power-down, this will be no-op */ 70 /* For systems without power-down, this will be no-op */
64 __asm__ __volatile__ ("wr %g0, %asr19\n\t"); 71 __asm__ __volatile__ ("wr %g0, %asr19\n\t");
65} 72}
diff --git a/arch/sparc/kernel/prom_common.c b/arch/sparc/kernel/prom_common.c
index 9f20566b0773..79cc0d1a477d 100644
--- a/arch/sparc/kernel/prom_common.c
+++ b/arch/sparc/kernel/prom_common.c
@@ -54,6 +54,7 @@ EXPORT_SYMBOL(of_set_property_mutex);
54int of_set_property(struct device_node *dp, const char *name, void *val, int len) 54int of_set_property(struct device_node *dp, const char *name, void *val, int len)
55{ 55{
56 struct property **prevp; 56 struct property **prevp;
57 unsigned long flags;
57 void *new_val; 58 void *new_val;
58 int err; 59 int err;
59 60
@@ -64,7 +65,7 @@ int of_set_property(struct device_node *dp, const char *name, void *val, int len
64 err = -ENODEV; 65 err = -ENODEV;
65 66
66 mutex_lock(&of_set_property_mutex); 67 mutex_lock(&of_set_property_mutex);
67 raw_spin_lock(&devtree_lock); 68 raw_spin_lock_irqsave(&devtree_lock, flags);
68 prevp = &dp->properties; 69 prevp = &dp->properties;
69 while (*prevp) { 70 while (*prevp) {
70 struct property *prop = *prevp; 71 struct property *prop = *prevp;
@@ -91,7 +92,7 @@ int of_set_property(struct device_node *dp, const char *name, void *val, int len
91 } 92 }
92 prevp = &(*prevp)->next; 93 prevp = &(*prevp)->next;
93 } 94 }
94 raw_spin_unlock(&devtree_lock); 95 raw_spin_unlock_irqrestore(&devtree_lock, flags);
95 mutex_unlock(&of_set_property_mutex); 96 mutex_unlock(&of_set_property_mutex);
96 97
97 /* XXX Upate procfs if necessary... */ 98 /* XXX Upate procfs if necessary... */
diff --git a/arch/sparc/kernel/setup_32.c b/arch/sparc/kernel/setup_32.c
index 38bf80a22f02..1434526970a6 100644
--- a/arch/sparc/kernel/setup_32.c
+++ b/arch/sparc/kernel/setup_32.c
@@ -304,7 +304,7 @@ void __init setup_arch(char **cmdline_p)
304 304
305 /* Initialize PROM console and command line. */ 305 /* Initialize PROM console and command line. */
306 *cmdline_p = prom_getbootargs(); 306 *cmdline_p = prom_getbootargs();
307 strcpy(boot_command_line, *cmdline_p); 307 strlcpy(boot_command_line, *cmdline_p, COMMAND_LINE_SIZE);
308 parse_early_param(); 308 parse_early_param();
309 309
310 boot_flags_init(*cmdline_p); 310 boot_flags_init(*cmdline_p);
diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c
index 88a127b9c69e..13785547e435 100644
--- a/arch/sparc/kernel/setup_64.c
+++ b/arch/sparc/kernel/setup_64.c
@@ -555,7 +555,7 @@ void __init setup_arch(char **cmdline_p)
555{ 555{
556 /* Initialize PROM console and command line. */ 556 /* Initialize PROM console and command line. */
557 *cmdline_p = prom_getbootargs(); 557 *cmdline_p = prom_getbootargs();
558 strcpy(boot_command_line, *cmdline_p); 558 strlcpy(boot_command_line, *cmdline_p, COMMAND_LINE_SIZE);
559 parse_early_param(); 559 parse_early_param();
560 560
561 boot_flags_init(*cmdline_p); 561 boot_flags_init(*cmdline_p);
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index a7171997adfd..04fd55a6e461 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -1098,7 +1098,14 @@ static int __init grab_mblocks(struct mdesc_handle *md)
1098 m->size = *val; 1098 m->size = *val;
1099 val = mdesc_get_property(md, node, 1099 val = mdesc_get_property(md, node,
1100 "address-congruence-offset", NULL); 1100 "address-congruence-offset", NULL);
1101 m->offset = *val; 1101
1102 /* The address-congruence-offset property is optional.
1103 * Explicity zero it be identifty this.
1104 */
1105 if (val)
1106 m->offset = *val;
1107 else
1108 m->offset = 0UL;
1102 1109
1103 numadbg("MBLOCK[%d]: base[%llx] size[%llx] offset[%llx]\n", 1110 numadbg("MBLOCK[%d]: base[%llx] size[%llx] offset[%llx]\n",
1104 count - 1, m->base, m->size, m->offset); 1111 count - 1, m->base, m->size, m->offset);
diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c
index 83d89bcb44af..37e7bc4c95b3 100644
--- a/arch/sparc/mm/tlb.c
+++ b/arch/sparc/mm/tlb.c
@@ -85,8 +85,8 @@ static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr,
85 } 85 }
86 86
87 if (!tb->active) { 87 if (!tb->active) {
88 global_flush_tlb_page(mm, vaddr);
89 flush_tsb_user_page(mm, vaddr); 88 flush_tsb_user_page(mm, vaddr);
89 global_flush_tlb_page(mm, vaddr);
90 goto out; 90 goto out;
91 } 91 }
92 92
diff --git a/arch/sparc/prom/bootstr_32.c b/arch/sparc/prom/bootstr_32.c
index f5ec32e0d419..d2b49d2365e7 100644
--- a/arch/sparc/prom/bootstr_32.c
+++ b/arch/sparc/prom/bootstr_32.c
@@ -23,23 +23,25 @@ prom_getbootargs(void)
23 return barg_buf; 23 return barg_buf;
24 } 24 }
25 25
26 switch(prom_vers) { 26 switch (prom_vers) {
27 case PROM_V0: 27 case PROM_V0:
28 cp = barg_buf; 28 cp = barg_buf;
29 /* Start from 1 and go over fd(0,0,0)kernel */ 29 /* Start from 1 and go over fd(0,0,0)kernel */
30 for(iter = 1; iter < 8; iter++) { 30 for (iter = 1; iter < 8; iter++) {
31 arg = (*(romvec->pv_v0bootargs))->argv[iter]; 31 arg = (*(romvec->pv_v0bootargs))->argv[iter];
32 if (arg == NULL) 32 if (arg == NULL)
33 break; 33 break;
34 while(*arg != 0) { 34 while (*arg != 0) {
35 /* Leave place for space and null. */ 35 /* Leave place for space and null. */
36 if(cp >= barg_buf + BARG_LEN-2){ 36 if (cp >= barg_buf + BARG_LEN - 2)
37 /* We might issue a warning here. */ 37 /* We might issue a warning here. */
38 break; 38 break;
39 }
40 *cp++ = *arg++; 39 *cp++ = *arg++;
41 } 40 }
42 *cp++ = ' '; 41 *cp++ = ' ';
42 if (cp >= barg_buf + BARG_LEN - 1)
43 /* We might issue a warning here. */
44 break;
43 } 45 }
44 *cp = 0; 46 *cp = 0;
45 break; 47 break;
diff --git a/arch/sparc/prom/tree_64.c b/arch/sparc/prom/tree_64.c
index 92204c3800b5..bd1b2a3ac34e 100644
--- a/arch/sparc/prom/tree_64.c
+++ b/arch/sparc/prom/tree_64.c
@@ -39,7 +39,7 @@ inline phandle __prom_getchild(phandle node)
39 return prom_node_to_node("child", node); 39 return prom_node_to_node("child", node);
40} 40}
41 41
42inline phandle prom_getchild(phandle node) 42phandle prom_getchild(phandle node)
43{ 43{
44 phandle cnode; 44 phandle cnode;
45 45
@@ -72,7 +72,7 @@ inline phandle __prom_getsibling(phandle node)
72 return prom_node_to_node(prom_peer_name, node); 72 return prom_node_to_node(prom_peer_name, node);
73} 73}
74 74
75inline phandle prom_getsibling(phandle node) 75phandle prom_getsibling(phandle node)
76{ 76{
77 phandle sibnode; 77 phandle sibnode;
78 78
@@ -89,7 +89,7 @@ EXPORT_SYMBOL(prom_getsibling);
89/* Return the length in bytes of property 'prop' at node 'node'. 89/* Return the length in bytes of property 'prop' at node 'node'.
90 * Return -1 on error. 90 * Return -1 on error.
91 */ 91 */
92inline int prom_getproplen(phandle node, const char *prop) 92int prom_getproplen(phandle node, const char *prop)
93{ 93{
94 unsigned long args[6]; 94 unsigned long args[6];
95 95
@@ -113,8 +113,8 @@ EXPORT_SYMBOL(prom_getproplen);
113 * 'buffer' which has a size of 'bufsize'. If the acquisition 113 * 'buffer' which has a size of 'bufsize'. If the acquisition
114 * was successful the length will be returned, else -1 is returned. 114 * was successful the length will be returned, else -1 is returned.
115 */ 115 */
116inline int prom_getproperty(phandle node, const char *prop, 116int prom_getproperty(phandle node, const char *prop,
117 char *buffer, int bufsize) 117 char *buffer, int bufsize)
118{ 118{
119 unsigned long args[8]; 119 unsigned long args[8];
120 int plen; 120 int plen;
@@ -141,7 +141,7 @@ EXPORT_SYMBOL(prom_getproperty);
141/* Acquire an integer property and return its value. Returns -1 141/* Acquire an integer property and return its value. Returns -1
142 * on failure. 142 * on failure.
143 */ 143 */
144inline int prom_getint(phandle node, const char *prop) 144int prom_getint(phandle node, const char *prop)
145{ 145{
146 int intprop; 146 int intprop;
147 147
@@ -235,7 +235,7 @@ static const char *prom_nextprop_name = "nextprop";
235/* Return the first property type for node 'node'. 235/* Return the first property type for node 'node'.
236 * buffer should be at least 32B in length 236 * buffer should be at least 32B in length
237 */ 237 */
238inline char *prom_firstprop(phandle node, char *buffer) 238char *prom_firstprop(phandle node, char *buffer)
239{ 239{
240 unsigned long args[7]; 240 unsigned long args[7];
241 241
@@ -261,7 +261,7 @@ EXPORT_SYMBOL(prom_firstprop);
261 * at node 'node' . Returns NULL string if no more 261 * at node 'node' . Returns NULL string if no more
262 * property types for this node. 262 * property types for this node.
263 */ 263 */
264inline char *prom_nextprop(phandle node, const char *oprop, char *buffer) 264char *prom_nextprop(phandle node, const char *oprop, char *buffer)
265{ 265{
266 unsigned long args[7]; 266 unsigned long args[7];
267 char buf[32]; 267 char buf[32];
diff --git a/arch/tile/lib/exports.c b/arch/tile/lib/exports.c
index 4385cb6fa00a..a93b02a25222 100644
--- a/arch/tile/lib/exports.c
+++ b/arch/tile/lib/exports.c
@@ -84,4 +84,6 @@ uint64_t __ashrdi3(uint64_t, unsigned int);
84EXPORT_SYMBOL(__ashrdi3); 84EXPORT_SYMBOL(__ashrdi3);
85uint64_t __ashldi3(uint64_t, unsigned int); 85uint64_t __ashldi3(uint64_t, unsigned int);
86EXPORT_SYMBOL(__ashldi3); 86EXPORT_SYMBOL(__ashldi3);
87int __ffsdi2(uint64_t);
88EXPORT_SYMBOL(__ffsdi2);
87#endif 89#endif
diff --git a/arch/um/drivers/mconsole_kern.c b/arch/um/drivers/mconsole_kern.c
index d7d21851e60c..3df3bd544492 100644
--- a/arch/um/drivers/mconsole_kern.c
+++ b/arch/um/drivers/mconsole_kern.c
@@ -147,7 +147,7 @@ void mconsole_proc(struct mc_request *req)
147 } 147 }
148 148
149 do { 149 do {
150 loff_t pos; 150 loff_t pos = file->f_pos;
151 mm_segment_t old_fs = get_fs(); 151 mm_segment_t old_fs = get_fs();
152 set_fs(KERNEL_DS); 152 set_fs(KERNEL_DS);
153 len = vfs_read(file, buf, PAGE_SIZE - 1, &pos); 153 len = vfs_read(file, buf, PAGE_SIZE - 1, &pos);
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 685692c94f05..fe120da25625 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -2265,6 +2265,7 @@ source "fs/Kconfig.binfmt"
2265config IA32_EMULATION 2265config IA32_EMULATION
2266 bool "IA32 Emulation" 2266 bool "IA32 Emulation"
2267 depends on X86_64 2267 depends on X86_64
2268 select BINFMT_ELF
2268 select COMPAT_BINFMT_ELF 2269 select COMPAT_BINFMT_ELF
2269 select HAVE_UID16 2270 select HAVE_UID16
2270 ---help--- 2271 ---help---
diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
index 35ee62fccf98..c205035a6b96 100644
--- a/arch/x86/boot/compressed/eboot.c
+++ b/arch/x86/boot/compressed/eboot.c
@@ -251,51 +251,6 @@ static void find_bits(unsigned long mask, u8 *pos, u8 *size)
251 *size = len; 251 *size = len;
252} 252}
253 253
254static efi_status_t setup_efi_vars(struct boot_params *params)
255{
256 struct setup_data *data;
257 struct efi_var_bootdata *efidata;
258 u64 store_size, remaining_size, var_size;
259 efi_status_t status;
260
261 if (sys_table->runtime->hdr.revision < EFI_2_00_SYSTEM_TABLE_REVISION)
262 return EFI_UNSUPPORTED;
263
264 data = (struct setup_data *)(unsigned long)params->hdr.setup_data;
265
266 while (data && data->next)
267 data = (struct setup_data *)(unsigned long)data->next;
268
269 status = efi_call_phys4((void *)sys_table->runtime->query_variable_info,
270 EFI_VARIABLE_NON_VOLATILE |
271 EFI_VARIABLE_BOOTSERVICE_ACCESS |
272 EFI_VARIABLE_RUNTIME_ACCESS, &store_size,
273 &remaining_size, &var_size);
274
275 if (status != EFI_SUCCESS)
276 return status;
277
278 status = efi_call_phys3(sys_table->boottime->allocate_pool,
279 EFI_LOADER_DATA, sizeof(*efidata), &efidata);
280
281 if (status != EFI_SUCCESS)
282 return status;
283
284 efidata->data.type = SETUP_EFI_VARS;
285 efidata->data.len = sizeof(struct efi_var_bootdata) -
286 sizeof(struct setup_data);
287 efidata->data.next = 0;
288 efidata->store_size = store_size;
289 efidata->remaining_size = remaining_size;
290 efidata->max_var_size = var_size;
291
292 if (data)
293 data->next = (unsigned long)efidata;
294 else
295 params->hdr.setup_data = (unsigned long)efidata;
296
297}
298
299static efi_status_t setup_efi_pci(struct boot_params *params) 254static efi_status_t setup_efi_pci(struct boot_params *params)
300{ 255{
301 efi_pci_io_protocol *pci; 256 efi_pci_io_protocol *pci;
@@ -1202,8 +1157,6 @@ struct boot_params *efi_main(void *handle, efi_system_table_t *_table,
1202 1157
1203 setup_graphics(boot_params); 1158 setup_graphics(boot_params);
1204 1159
1205 setup_efi_vars(boot_params);
1206
1207 setup_efi_pci(boot_params); 1160 setup_efi_pci(boot_params);
1208 1161
1209 status = efi_call_phys3(sys_table->boottime->allocate_pool, 1162 status = efi_call_phys3(sys_table->boottime->allocate_pool,
diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
index 62fe22cd4cba..477e9d75149b 100644
--- a/arch/x86/crypto/aesni-intel_asm.S
+++ b/arch/x86/crypto/aesni-intel_asm.S
@@ -2681,56 +2681,68 @@ ENTRY(aesni_xts_crypt8)
2681 addq %rcx, KEYP 2681 addq %rcx, KEYP
2682 2682
2683 movdqa IV, STATE1 2683 movdqa IV, STATE1
2684 pxor 0x00(INP), STATE1 2684 movdqu 0x00(INP), INC
2685 pxor INC, STATE1
2685 movdqu IV, 0x00(OUTP) 2686 movdqu IV, 0x00(OUTP)
2686 2687
2687 _aesni_gf128mul_x_ble() 2688 _aesni_gf128mul_x_ble()
2688 movdqa IV, STATE2 2689 movdqa IV, STATE2
2689 pxor 0x10(INP), STATE2 2690 movdqu 0x10(INP), INC
2691 pxor INC, STATE2
2690 movdqu IV, 0x10(OUTP) 2692 movdqu IV, 0x10(OUTP)
2691 2693
2692 _aesni_gf128mul_x_ble() 2694 _aesni_gf128mul_x_ble()
2693 movdqa IV, STATE3 2695 movdqa IV, STATE3
2694 pxor 0x20(INP), STATE3 2696 movdqu 0x20(INP), INC
2697 pxor INC, STATE3
2695 movdqu IV, 0x20(OUTP) 2698 movdqu IV, 0x20(OUTP)
2696 2699
2697 _aesni_gf128mul_x_ble() 2700 _aesni_gf128mul_x_ble()
2698 movdqa IV, STATE4 2701 movdqa IV, STATE4
2699 pxor 0x30(INP), STATE4 2702 movdqu 0x30(INP), INC
2703 pxor INC, STATE4
2700 movdqu IV, 0x30(OUTP) 2704 movdqu IV, 0x30(OUTP)
2701 2705
2702 call *%r11 2706 call *%r11
2703 2707
2704 pxor 0x00(OUTP), STATE1 2708 movdqu 0x00(OUTP), INC
2709 pxor INC, STATE1
2705 movdqu STATE1, 0x00(OUTP) 2710 movdqu STATE1, 0x00(OUTP)
2706 2711
2707 _aesni_gf128mul_x_ble() 2712 _aesni_gf128mul_x_ble()
2708 movdqa IV, STATE1 2713 movdqa IV, STATE1
2709 pxor 0x40(INP), STATE1 2714 movdqu 0x40(INP), INC
2715 pxor INC, STATE1
2710 movdqu IV, 0x40(OUTP) 2716 movdqu IV, 0x40(OUTP)
2711 2717
2712 pxor 0x10(OUTP), STATE2 2718 movdqu 0x10(OUTP), INC
2719 pxor INC, STATE2
2713 movdqu STATE2, 0x10(OUTP) 2720 movdqu STATE2, 0x10(OUTP)
2714 2721
2715 _aesni_gf128mul_x_ble() 2722 _aesni_gf128mul_x_ble()
2716 movdqa IV, STATE2 2723 movdqa IV, STATE2
2717 pxor 0x50(INP), STATE2 2724 movdqu 0x50(INP), INC
2725 pxor INC, STATE2
2718 movdqu IV, 0x50(OUTP) 2726 movdqu IV, 0x50(OUTP)
2719 2727
2720 pxor 0x20(OUTP), STATE3 2728 movdqu 0x20(OUTP), INC
2729 pxor INC, STATE3
2721 movdqu STATE3, 0x20(OUTP) 2730 movdqu STATE3, 0x20(OUTP)
2722 2731
2723 _aesni_gf128mul_x_ble() 2732 _aesni_gf128mul_x_ble()
2724 movdqa IV, STATE3 2733 movdqa IV, STATE3
2725 pxor 0x60(INP), STATE3 2734 movdqu 0x60(INP), INC
2735 pxor INC, STATE3
2726 movdqu IV, 0x60(OUTP) 2736 movdqu IV, 0x60(OUTP)
2727 2737
2728 pxor 0x30(OUTP), STATE4 2738 movdqu 0x30(OUTP), INC
2739 pxor INC, STATE4
2729 movdqu STATE4, 0x30(OUTP) 2740 movdqu STATE4, 0x30(OUTP)
2730 2741
2731 _aesni_gf128mul_x_ble() 2742 _aesni_gf128mul_x_ble()
2732 movdqa IV, STATE4 2743 movdqa IV, STATE4
2733 pxor 0x70(INP), STATE4 2744 movdqu 0x70(INP), INC
2745 pxor INC, STATE4
2734 movdqu IV, 0x70(OUTP) 2746 movdqu IV, 0x70(OUTP)
2735 2747
2736 _aesni_gf128mul_x_ble() 2748 _aesni_gf128mul_x_ble()
@@ -2738,16 +2750,20 @@ ENTRY(aesni_xts_crypt8)
2738 2750
2739 call *%r11 2751 call *%r11
2740 2752
2741 pxor 0x40(OUTP), STATE1 2753 movdqu 0x40(OUTP), INC
2754 pxor INC, STATE1
2742 movdqu STATE1, 0x40(OUTP) 2755 movdqu STATE1, 0x40(OUTP)
2743 2756
2744 pxor 0x50(OUTP), STATE2 2757 movdqu 0x50(OUTP), INC
2758 pxor INC, STATE2
2745 movdqu STATE2, 0x50(OUTP) 2759 movdqu STATE2, 0x50(OUTP)
2746 2760
2747 pxor 0x60(OUTP), STATE3 2761 movdqu 0x60(OUTP), INC
2762 pxor INC, STATE3
2748 movdqu STATE3, 0x60(OUTP) 2763 movdqu STATE3, 0x60(OUTP)
2749 2764
2750 pxor 0x70(OUTP), STATE4 2765 movdqu 0x70(OUTP), INC
2766 pxor INC, STATE4
2751 movdqu STATE4, 0x70(OUTP) 2767 movdqu STATE4, 0x70(OUTP)
2752 2768
2753 ret 2769 ret
diff --git a/arch/x86/crypto/crc32-pclmul_asm.S b/arch/x86/crypto/crc32-pclmul_asm.S
index 94c27df8a549..f247304299a2 100644
--- a/arch/x86/crypto/crc32-pclmul_asm.S
+++ b/arch/x86/crypto/crc32-pclmul_asm.S
@@ -240,7 +240,7 @@ fold_64:
240 pand %xmm3, %xmm1 240 pand %xmm3, %xmm1
241 PCLMULQDQ 0x00, CONSTANT, %xmm1 241 PCLMULQDQ 0x00, CONSTANT, %xmm1
242 pxor %xmm2, %xmm1 242 pxor %xmm2, %xmm1
243 pextrd $0x01, %xmm1, %eax 243 PEXTRD 0x01, %xmm1, %eax
244 244
245 ret 245 ret
246ENDPROC(crc32_pclmul_le_16) 246ENDPROC(crc32_pclmul_le_16)
diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
index 805078e08013..52ff81cce008 100644
--- a/arch/x86/ia32/ia32_aout.c
+++ b/arch/x86/ia32/ia32_aout.c
@@ -192,7 +192,7 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
192 /* struct user */ 192 /* struct user */
193 DUMP_WRITE(&dump, sizeof(dump)); 193 DUMP_WRITE(&dump, sizeof(dump));
194 /* Now dump all of the user data. Include malloced stuff as well */ 194 /* Now dump all of the user data. Include malloced stuff as well */
195 DUMP_SEEK(PAGE_SIZE); 195 DUMP_SEEK(PAGE_SIZE - sizeof(dump));
196 /* now we start writing out the user space info */ 196 /* now we start writing out the user space info */
197 set_fs(USER_DS); 197 set_fs(USER_DS);
198 /* Dump the data area */ 198 /* Dump the data area */
diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h
index 2fb5d5884e23..60c89f30c727 100644
--- a/arch/x86/include/asm/efi.h
+++ b/arch/x86/include/asm/efi.h
@@ -102,13 +102,6 @@ extern void efi_call_phys_epilog(void);
102extern void efi_unmap_memmap(void); 102extern void efi_unmap_memmap(void);
103extern void efi_memory_uc(u64 addr, unsigned long size); 103extern void efi_memory_uc(u64 addr, unsigned long size);
104 104
105struct efi_var_bootdata {
106 struct setup_data data;
107 u64 store_size;
108 u64 remaining_size;
109 u64 max_var_size;
110};
111
112#ifdef CONFIG_EFI 105#ifdef CONFIG_EFI
113 106
114static inline bool efi_is_native(void) 107static inline bool efi_is_native(void)
diff --git a/arch/x86/include/asm/inst.h b/arch/x86/include/asm/inst.h
index 280bf7fb6aba..3e115273ed88 100644
--- a/arch/x86/include/asm/inst.h
+++ b/arch/x86/include/asm/inst.h
@@ -9,12 +9,68 @@
9 9
10#define REG_NUM_INVALID 100 10#define REG_NUM_INVALID 100
11 11
12#define REG_TYPE_R64 0 12#define REG_TYPE_R32 0
13#define REG_TYPE_XMM 1 13#define REG_TYPE_R64 1
14#define REG_TYPE_XMM 2
14#define REG_TYPE_INVALID 100 15#define REG_TYPE_INVALID 100
15 16
17 .macro R32_NUM opd r32
18 \opd = REG_NUM_INVALID
19 .ifc \r32,%eax
20 \opd = 0
21 .endif
22 .ifc \r32,%ecx
23 \opd = 1
24 .endif
25 .ifc \r32,%edx
26 \opd = 2
27 .endif
28 .ifc \r32,%ebx
29 \opd = 3
30 .endif
31 .ifc \r32,%esp
32 \opd = 4
33 .endif
34 .ifc \r32,%ebp
35 \opd = 5
36 .endif
37 .ifc \r32,%esi
38 \opd = 6
39 .endif
40 .ifc \r32,%edi
41 \opd = 7
42 .endif
43#ifdef CONFIG_X86_64
44 .ifc \r32,%r8d
45 \opd = 8
46 .endif
47 .ifc \r32,%r9d
48 \opd = 9
49 .endif
50 .ifc \r32,%r10d
51 \opd = 10
52 .endif
53 .ifc \r32,%r11d
54 \opd = 11
55 .endif
56 .ifc \r32,%r12d
57 \opd = 12
58 .endif
59 .ifc \r32,%r13d
60 \opd = 13
61 .endif
62 .ifc \r32,%r14d
63 \opd = 14
64 .endif
65 .ifc \r32,%r15d
66 \opd = 15
67 .endif
68#endif
69 .endm
70
16 .macro R64_NUM opd r64 71 .macro R64_NUM opd r64
17 \opd = REG_NUM_INVALID 72 \opd = REG_NUM_INVALID
73#ifdef CONFIG_X86_64
18 .ifc \r64,%rax 74 .ifc \r64,%rax
19 \opd = 0 75 \opd = 0
20 .endif 76 .endif
@@ -63,6 +119,7 @@
63 .ifc \r64,%r15 119 .ifc \r64,%r15
64 \opd = 15 120 \opd = 15
65 .endif 121 .endif
122#endif
66 .endm 123 .endm
67 124
68 .macro XMM_NUM opd xmm 125 .macro XMM_NUM opd xmm
@@ -118,10 +175,13 @@
118 .endm 175 .endm
119 176
120 .macro REG_TYPE type reg 177 .macro REG_TYPE type reg
178 R32_NUM reg_type_r32 \reg
121 R64_NUM reg_type_r64 \reg 179 R64_NUM reg_type_r64 \reg
122 XMM_NUM reg_type_xmm \reg 180 XMM_NUM reg_type_xmm \reg
123 .if reg_type_r64 <> REG_NUM_INVALID 181 .if reg_type_r64 <> REG_NUM_INVALID
124 \type = REG_TYPE_R64 182 \type = REG_TYPE_R64
183 .elseif reg_type_r32 <> REG_NUM_INVALID
184 \type = REG_TYPE_R32
125 .elseif reg_type_xmm <> REG_NUM_INVALID 185 .elseif reg_type_xmm <> REG_NUM_INVALID
126 \type = REG_TYPE_XMM 186 \type = REG_TYPE_XMM
127 .else 187 .else
@@ -162,6 +222,16 @@
162 .byte \imm8 222 .byte \imm8
163 .endm 223 .endm
164 224
225 .macro PEXTRD imm8 xmm gpr
226 R32_NUM extrd_opd1 \gpr
227 XMM_NUM extrd_opd2 \xmm
228 PFX_OPD_SIZE
229 PFX_REX extrd_opd1 extrd_opd2
230 .byte 0x0f, 0x3a, 0x16
231 MODRM 0xc0 extrd_opd1 extrd_opd2
232 .byte \imm8
233 .endm
234
165 .macro AESKEYGENASSIST rcon xmm1 xmm2 235 .macro AESKEYGENASSIST rcon xmm1 xmm2
166 XMM_NUM aeskeygen_opd1 \xmm1 236 XMM_NUM aeskeygen_opd1 \xmm1
167 XMM_NUM aeskeygen_opd2 \xmm2 237 XMM_NUM aeskeygen_opd2 \xmm2
diff --git a/arch/x86/include/asm/irq.h b/arch/x86/include/asm/irq.h
index ba870bb6dd8e..57873beb3292 100644
--- a/arch/x86/include/asm/irq.h
+++ b/arch/x86/include/asm/irq.h
@@ -41,4 +41,9 @@ extern int vector_used_by_percpu_irq(unsigned int vector);
41 41
42extern void init_ISA_irqs(void); 42extern void init_ISA_irqs(void);
43 43
44#ifdef CONFIG_X86_LOCAL_APIC
45void arch_trigger_all_cpu_backtrace(void);
46#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace
47#endif
48
44#endif /* _ASM_X86_IRQ_H */ 49#endif /* _ASM_X86_IRQ_H */
diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h
index 6825e2efd1b4..6bc3985ee473 100644
--- a/arch/x86/include/asm/microcode.h
+++ b/arch/x86/include/asm/microcode.h
@@ -60,11 +60,11 @@ static inline void __exit exit_amd_microcode(void) {}
60#ifdef CONFIG_MICROCODE_EARLY 60#ifdef CONFIG_MICROCODE_EARLY
61#define MAX_UCODE_COUNT 128 61#define MAX_UCODE_COUNT 128
62extern void __init load_ucode_bsp(void); 62extern void __init load_ucode_bsp(void);
63extern __init void load_ucode_ap(void); 63extern void __cpuinit load_ucode_ap(void);
64extern int __init save_microcode_in_initrd(void); 64extern int __init save_microcode_in_initrd(void);
65#else 65#else
66static inline void __init load_ucode_bsp(void) {} 66static inline void __init load_ucode_bsp(void) {}
67static inline __init void load_ucode_ap(void) {} 67static inline void __cpuinit load_ucode_ap(void) {}
68static inline int __init save_microcode_in_initrd(void) 68static inline int __init save_microcode_in_initrd(void)
69{ 69{
70 return 0; 70 return 0;
diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h
index c0fa356e90de..86f9301903c8 100644
--- a/arch/x86/include/asm/nmi.h
+++ b/arch/x86/include/asm/nmi.h
@@ -18,9 +18,7 @@ extern int proc_nmi_enabled(struct ctl_table *, int ,
18 void __user *, size_t *, loff_t *); 18 void __user *, size_t *, loff_t *);
19extern int unknown_nmi_panic; 19extern int unknown_nmi_panic;
20 20
21void arch_trigger_all_cpu_backtrace(void); 21#endif /* CONFIG_X86_LOCAL_APIC */
22#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace
23#endif
24 22
25#define NMI_FLAG_FIRST 1 23#define NMI_FLAG_FIRST 1
26 24
diff --git a/arch/x86/include/uapi/asm/bootparam.h b/arch/x86/include/uapi/asm/bootparam.h
index 08744242b8d2..c15ddaf90710 100644
--- a/arch/x86/include/uapi/asm/bootparam.h
+++ b/arch/x86/include/uapi/asm/bootparam.h
@@ -6,7 +6,6 @@
6#define SETUP_E820_EXT 1 6#define SETUP_E820_EXT 1
7#define SETUP_DTB 2 7#define SETUP_DTB 2
8#define SETUP_PCI 3 8#define SETUP_PCI 3
9#define SETUP_EFI_VARS 4
10 9
11/* ram_size flags */ 10/* ram_size flags */
12#define RAMDISK_IMAGE_START_MASK 0x07FF 11#define RAMDISK_IMAGE_START_MASK 0x07FF
diff --git a/arch/x86/kernel/apic/hw_nmi.c b/arch/x86/kernel/apic/hw_nmi.c
index 31cb9ae992b7..a698d7165c96 100644
--- a/arch/x86/kernel/apic/hw_nmi.c
+++ b/arch/x86/kernel/apic/hw_nmi.c
@@ -9,6 +9,7 @@
9 * 9 *
10 */ 10 */
11#include <asm/apic.h> 11#include <asm/apic.h>
12#include <asm/nmi.h>
12 13
13#include <linux/cpumask.h> 14#include <linux/cpumask.h>
14#include <linux/kdebug.h> 15#include <linux/kdebug.h>
diff --git a/arch/x86/kernel/cpu/mtrr/cleanup.c b/arch/x86/kernel/cpu/mtrr/cleanup.c
index 35ffda5d0727..5f90b85ff22e 100644
--- a/arch/x86/kernel/cpu/mtrr/cleanup.c
+++ b/arch/x86/kernel/cpu/mtrr/cleanup.c
@@ -714,15 +714,15 @@ int __init mtrr_cleanup(unsigned address_bits)
714 if (mtrr_tom2) 714 if (mtrr_tom2)
715 x_remove_size = (mtrr_tom2 >> PAGE_SHIFT) - x_remove_base; 715 x_remove_size = (mtrr_tom2 >> PAGE_SHIFT) - x_remove_base;
716 716
717 nr_range = x86_get_mtrr_mem_range(range, 0, x_remove_base, x_remove_size);
718 /* 717 /*
719 * [0, 1M) should always be covered by var mtrr with WB 718 * [0, 1M) should always be covered by var mtrr with WB
720 * and fixed mtrrs should take effect before var mtrr for it: 719 * and fixed mtrrs should take effect before var mtrr for it:
721 */ 720 */
722 nr_range = add_range_with_merge(range, RANGE_NUM, nr_range, 0, 721 nr_range = add_range_with_merge(range, RANGE_NUM, 0, 0,
723 1ULL<<(20 - PAGE_SHIFT)); 722 1ULL<<(20 - PAGE_SHIFT));
724 /* Sort the ranges: */ 723 /* add from var mtrr at last */
725 sort_range(range, nr_range); 724 nr_range = x86_get_mtrr_mem_range(range, nr_range,
725 x_remove_base, x_remove_size);
726 726
727 range_sums = sum_ranges(range, nr_range); 727 range_sums = sum_ranges(range, nr_range);
728 printk(KERN_INFO "total RAM covered: %ldM\n", 728 printk(KERN_INFO "total RAM covered: %ldM\n",
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index f60d41ff9a97..a9e22073bd56 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -165,13 +165,13 @@ static struct extra_reg intel_snb_extra_regs[] __read_mostly = {
165 INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3f807f8fffull, RSP_0), 165 INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3f807f8fffull, RSP_0),
166 INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3f807f8fffull, RSP_1), 166 INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3f807f8fffull, RSP_1),
167 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd), 167 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
168 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
169 EVENT_EXTRA_END 168 EVENT_EXTRA_END
170}; 169};
171 170
172static struct extra_reg intel_snbep_extra_regs[] __read_mostly = { 171static struct extra_reg intel_snbep_extra_regs[] __read_mostly = {
173 INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0), 172 INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0),
174 INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1), 173 INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1),
174 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
175 EVENT_EXTRA_END 175 EVENT_EXTRA_END
176}; 176};
177 177
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index 08f7e8039099..321d65ebaffe 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -115,8 +115,10 @@ startup_64:
115 movq %rdi, %rax 115 movq %rdi, %rax
116 shrq $PUD_SHIFT, %rax 116 shrq $PUD_SHIFT, %rax
117 andl $(PTRS_PER_PUD-1), %eax 117 andl $(PTRS_PER_PUD-1), %eax
118 movq %rdx, (4096+0)(%rbx,%rax,8) 118 movq %rdx, 4096(%rbx,%rax,8)
119 movq %rdx, (4096+8)(%rbx,%rax,8) 119 incl %eax
120 andl $(PTRS_PER_PUD-1), %eax
121 movq %rdx, 4096(%rbx,%rax,8)
120 122
121 addq $8192, %rbx 123 addq $8192, %rbx
122 movq %rdi, %rax 124 movq %rdi, %rax
diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
index 245a71db401a..cb339097b9ea 100644
--- a/arch/x86/kernel/i387.c
+++ b/arch/x86/kernel/i387.c
@@ -22,23 +22,19 @@
22/* 22/*
23 * Were we in an interrupt that interrupted kernel mode? 23 * Were we in an interrupt that interrupted kernel mode?
24 * 24 *
25 * For now, with eagerfpu we will return interrupted kernel FPU
26 * state as not-idle. TBD: Ideally we can change the return value
27 * to something like __thread_has_fpu(current). But we need to
28 * be careful of doing __thread_clear_has_fpu() before saving
29 * the FPU etc for supporting nested uses etc. For now, take
30 * the simple route!
31 *
32 * On others, we can do a kernel_fpu_begin/end() pair *ONLY* if that 25 * On others, we can do a kernel_fpu_begin/end() pair *ONLY* if that
33 * pair does nothing at all: the thread must not have fpu (so 26 * pair does nothing at all: the thread must not have fpu (so
34 * that we don't try to save the FPU state), and TS must 27 * that we don't try to save the FPU state), and TS must
35 * be set (so that the clts/stts pair does nothing that is 28 * be set (so that the clts/stts pair does nothing that is
36 * visible in the interrupted kernel thread). 29 * visible in the interrupted kernel thread).
30 *
31 * Except for the eagerfpu case when we return 1 unless we've already
32 * been eager and saved the state in kernel_fpu_begin().
37 */ 33 */
38static inline bool interrupted_kernel_fpu_idle(void) 34static inline bool interrupted_kernel_fpu_idle(void)
39{ 35{
40 if (use_eager_fpu()) 36 if (use_eager_fpu())
41 return 0; 37 return __thread_has_fpu(current);
42 38
43 return !__thread_has_fpu(current) && 39 return !__thread_has_fpu(current) &&
44 (read_cr0() & X86_CR0_TS); 40 (read_cr0() & X86_CR0_TS);
@@ -78,8 +74,8 @@ void __kernel_fpu_begin(void)
78 struct task_struct *me = current; 74 struct task_struct *me = current;
79 75
80 if (__thread_has_fpu(me)) { 76 if (__thread_has_fpu(me)) {
81 __save_init_fpu(me);
82 __thread_clear_has_fpu(me); 77 __thread_clear_has_fpu(me);
78 __save_init_fpu(me);
83 /* We do 'stts()' in __kernel_fpu_end() */ 79 /* We do 'stts()' in __kernel_fpu_end() */
84 } else if (!use_eager_fpu()) { 80 } else if (!use_eager_fpu()) {
85 this_cpu_write(fpu_owner_task, NULL); 81 this_cpu_write(fpu_owner_task, NULL);
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index 9895a9a41380..211bce445522 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -365,10 +365,14 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
365 return insn.length; 365 return insn.length;
366} 366}
367 367
368static void __kprobes arch_copy_kprobe(struct kprobe *p) 368static int __kprobes arch_copy_kprobe(struct kprobe *p)
369{ 369{
370 int ret;
371
370 /* Copy an instruction with recovering if other optprobe modifies it.*/ 372 /* Copy an instruction with recovering if other optprobe modifies it.*/
371 __copy_instruction(p->ainsn.insn, p->addr); 373 ret = __copy_instruction(p->ainsn.insn, p->addr);
374 if (!ret)
375 return -EINVAL;
372 376
373 /* 377 /*
374 * __copy_instruction can modify the displacement of the instruction, 378 * __copy_instruction can modify the displacement of the instruction,
@@ -384,6 +388,8 @@ static void __kprobes arch_copy_kprobe(struct kprobe *p)
384 388
385 /* Also, displacement change doesn't affect the first byte */ 389 /* Also, displacement change doesn't affect the first byte */
386 p->opcode = p->ainsn.insn[0]; 390 p->opcode = p->ainsn.insn[0];
391
392 return 0;
387} 393}
388 394
389int __kprobes arch_prepare_kprobe(struct kprobe *p) 395int __kprobes arch_prepare_kprobe(struct kprobe *p)
@@ -397,8 +403,8 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
397 p->ainsn.insn = get_insn_slot(); 403 p->ainsn.insn = get_insn_slot();
398 if (!p->ainsn.insn) 404 if (!p->ainsn.insn)
399 return -ENOMEM; 405 return -ENOMEM;
400 arch_copy_kprobe(p); 406
401 return 0; 407 return arch_copy_kprobe(p);
402} 408}
403 409
404void __kprobes arch_arm_kprobe(struct kprobe *p) 410void __kprobes arch_arm_kprobe(struct kprobe *p)
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
index d2c381280e3c..3dd37ebd591b 100644
--- a/arch/x86/kernel/kvmclock.c
+++ b/arch/x86/kernel/kvmclock.c
@@ -242,6 +242,7 @@ void __init kvmclock_init(void)
242 if (!mem) 242 if (!mem)
243 return; 243 return;
244 hv_clock = __va(mem); 244 hv_clock = __va(mem);
245 memset(hv_clock, 0, size);
245 246
246 if (kvm_register_clock("boot clock")) { 247 if (kvm_register_clock("boot clock")) {
247 hv_clock = NULL; 248 hv_clock = NULL;
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 4e7a37ff03ab..81a5f5e8f142 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -277,18 +277,6 @@ void exit_idle(void)
277} 277}
278#endif 278#endif
279 279
280void arch_cpu_idle_prepare(void)
281{
282 /*
283 * If we're the non-boot CPU, nothing set the stack canary up
284 * for us. CPU0 already has it initialized but no harm in
285 * doing it again. This is a good place for updating it, as
286 * we wont ever return from this function (so the invalid
287 * canaries already on the stack wont ever trigger).
288 */
289 boot_init_stack_canary();
290}
291
292void arch_cpu_idle_enter(void) 280void arch_cpu_idle_enter(void)
293{ 281{
294 local_touch_nmi(); 282 local_touch_nmi();
diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
index 7a6f3b3be3cf..f2bb9c96720a 100644
--- a/arch/x86/kernel/relocate_kernel_64.S
+++ b/arch/x86/kernel/relocate_kernel_64.S
@@ -160,7 +160,7 @@ identity_mapped:
160 xorq %rbp, %rbp 160 xorq %rbp, %rbp
161 xorq %r8, %r8 161 xorq %r8, %r8
162 xorq %r9, %r9 162 xorq %r9, %r9
163 xorq %r10, %r9 163 xorq %r10, %r10
164 xorq %r11, %r11 164 xorq %r11, %r11
165 xorq %r12, %r12 165 xorq %r12, %r12
166 xorq %r13, %r13 166 xorq %r13, %r13
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 8db0010ed150..5953dcea752d 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -1240,9 +1240,12 @@ static int decode_modrm(struct x86_emulate_ctxt *ctxt,
1240 ctxt->modrm_seg = VCPU_SREG_DS; 1240 ctxt->modrm_seg = VCPU_SREG_DS;
1241 1241
1242 if (ctxt->modrm_mod == 3) { 1242 if (ctxt->modrm_mod == 3) {
1243 int highbyte_regs = ctxt->rex_prefix == 0;
1244
1243 op->type = OP_REG; 1245 op->type = OP_REG;
1244 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; 1246 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1245 op->addr.reg = decode_register(ctxt, ctxt->modrm_rm, ctxt->d & ByteOp); 1247 op->addr.reg = decode_register(ctxt, ctxt->modrm_rm,
1248 highbyte_regs && (ctxt->d & ByteOp));
1246 if (ctxt->d & Sse) { 1249 if (ctxt->d & Sse) {
1247 op->type = OP_XMM; 1250 op->type = OP_XMM;
1248 op->bytes = 16; 1251 op->bytes = 16;
@@ -3997,7 +4000,8 @@ static const struct opcode twobyte_table[256] = {
3997 DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N, 4000 DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
3998 N, D(ImplicitOps | ModRM), N, N, 4001 N, D(ImplicitOps | ModRM), N, N,
3999 /* 0x10 - 0x1F */ 4002 /* 0x10 - 0x1F */
4000 N, N, N, N, N, N, N, N, D(ImplicitOps | ModRM), N, N, N, N, N, N, N, 4003 N, N, N, N, N, N, N, N,
4004 D(ImplicitOps | ModRM), N, N, N, N, N, N, D(ImplicitOps | ModRM),
4001 /* 0x20 - 0x2F */ 4005 /* 0x20 - 0x2F */
4002 DIP(ModRM | DstMem | Priv | Op3264, cr_read, check_cr_read), 4006 DIP(ModRM | DstMem | Priv | Op3264, cr_read, check_cr_read),
4003 DIP(ModRM | DstMem | Priv | Op3264, dr_read, check_dr_read), 4007 DIP(ModRM | DstMem | Priv | Op3264, dr_read, check_dr_read),
@@ -4836,6 +4840,7 @@ twobyte_insn:
4836 case 0x08: /* invd */ 4840 case 0x08: /* invd */
4837 case 0x0d: /* GrpP (prefetch) */ 4841 case 0x0d: /* GrpP (prefetch) */
4838 case 0x18: /* Grp16 (prefetch/nop) */ 4842 case 0x18: /* Grp16 (prefetch/nop) */
4843 case 0x1f: /* nop */
4839 break; 4844 break;
4840 case 0x20: /* mov cr, reg */ 4845 case 0x20: /* mov cr, reg */
4841 ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg); 4846 ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index e1adbb4aca75..0eee2c8b64d1 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -1861,11 +1861,14 @@ void kvm_apic_accept_events(struct kvm_vcpu *vcpu)
1861{ 1861{
1862 struct kvm_lapic *apic = vcpu->arch.apic; 1862 struct kvm_lapic *apic = vcpu->arch.apic;
1863 unsigned int sipi_vector; 1863 unsigned int sipi_vector;
1864 unsigned long pe;
1864 1865
1865 if (!kvm_vcpu_has_lapic(vcpu)) 1866 if (!kvm_vcpu_has_lapic(vcpu) || !apic->pending_events)
1866 return; 1867 return;
1867 1868
1868 if (test_and_clear_bit(KVM_APIC_INIT, &apic->pending_events)) { 1869 pe = xchg(&apic->pending_events, 0);
1870
1871 if (test_bit(KVM_APIC_INIT, &pe)) {
1869 kvm_lapic_reset(vcpu); 1872 kvm_lapic_reset(vcpu);
1870 kvm_vcpu_reset(vcpu); 1873 kvm_vcpu_reset(vcpu);
1871 if (kvm_vcpu_is_bsp(apic->vcpu)) 1874 if (kvm_vcpu_is_bsp(apic->vcpu))
@@ -1873,7 +1876,7 @@ void kvm_apic_accept_events(struct kvm_vcpu *vcpu)
1873 else 1876 else
1874 vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED; 1877 vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED;
1875 } 1878 }
1876 if (test_and_clear_bit(KVM_APIC_SIPI, &apic->pending_events) && 1879 if (test_bit(KVM_APIC_SIPI, &pe) &&
1877 vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) { 1880 vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) {
1878 /* evaluate pending_events before reading the vector */ 1881 /* evaluate pending_events before reading the vector */
1879 smp_rmb(); 1882 smp_rmb();
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 094b5d96ab14..e8ba99c34180 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -582,8 +582,6 @@ int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
582 if (index != XCR_XFEATURE_ENABLED_MASK) 582 if (index != XCR_XFEATURE_ENABLED_MASK)
583 return 1; 583 return 1;
584 xcr0 = xcr; 584 xcr0 = xcr;
585 if (kvm_x86_ops->get_cpl(vcpu) != 0)
586 return 1;
587 if (!(xcr0 & XSTATE_FP)) 585 if (!(xcr0 & XSTATE_FP))
588 return 1; 586 return 1;
589 if ((xcr0 & XSTATE_YMM) && !(xcr0 & XSTATE_SSE)) 587 if ((xcr0 & XSTATE_YMM) && !(xcr0 & XSTATE_SSE))
@@ -597,7 +595,8 @@ int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
597 595
598int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) 596int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
599{ 597{
600 if (__kvm_set_xcr(vcpu, index, xcr)) { 598 if (kvm_x86_ops->get_cpl(vcpu) != 0 ||
599 __kvm_set_xcr(vcpu, index, xcr)) {
601 kvm_inject_gp(vcpu, 0); 600 kvm_inject_gp(vcpu, 0);
602 return 1; 601 return 1;
603 } 602 }
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index eaac1743def7..1f34e9219775 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -277,6 +277,9 @@ static int __meminit split_mem_range(struct map_range *mr, int nr_range,
277 end_pfn = limit_pfn; 277 end_pfn = limit_pfn;
278 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0); 278 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0);
279 279
280 if (!after_bootmem)
281 adjust_range_page_size_mask(mr, nr_range);
282
280 /* try to merge same page size and continuous */ 283 /* try to merge same page size and continuous */
281 for (i = 0; nr_range > 1 && i < nr_range - 1; i++) { 284 for (i = 0; nr_range > 1 && i < nr_range - 1; i++) {
282 unsigned long old_start; 285 unsigned long old_start;
@@ -291,9 +294,6 @@ static int __meminit split_mem_range(struct map_range *mr, int nr_range,
291 nr_range--; 294 nr_range--;
292 } 295 }
293 296
294 if (!after_bootmem)
295 adjust_range_page_size_mask(mr, nr_range);
296
297 for (i = 0; i < nr_range; i++) 297 for (i = 0; i < nr_range; i++)
298 printk(KERN_DEBUG " [mem %#010lx-%#010lx] page %s\n", 298 printk(KERN_DEBUG " [mem %#010lx-%#010lx] page %s\n",
299 mr[i].start, mr[i].end - 1, 299 mr[i].start, mr[i].end - 1,
diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c
index 305c68b8d538..981c2dbd72cc 100644
--- a/arch/x86/pci/common.c
+++ b/arch/x86/pci/common.c
@@ -628,7 +628,9 @@ int pcibios_add_device(struct pci_dev *dev)
628 628
629 pa_data = boot_params.hdr.setup_data; 629 pa_data = boot_params.hdr.setup_data;
630 while (pa_data) { 630 while (pa_data) {
631 data = phys_to_virt(pa_data); 631 data = ioremap(pa_data, sizeof(*rom));
632 if (!data)
633 return -ENOMEM;
632 634
633 if (data->type == SETUP_PCI) { 635 if (data->type == SETUP_PCI) {
634 rom = (struct pci_setup_rom *)data; 636 rom = (struct pci_setup_rom *)data;
@@ -645,6 +647,7 @@ int pcibios_add_device(struct pci_dev *dev)
645 } 647 }
646 } 648 }
647 pa_data = data->next; 649 pa_data = data->next;
650 iounmap(data);
648 } 651 }
649 return 0; 652 return 0;
650} 653}
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index 55856b2310d3..d2fbcedcf6ea 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -42,7 +42,6 @@
42#include <linux/io.h> 42#include <linux/io.h>
43#include <linux/reboot.h> 43#include <linux/reboot.h>
44#include <linux/bcd.h> 44#include <linux/bcd.h>
45#include <linux/ucs2_string.h>
46 45
47#include <asm/setup.h> 46#include <asm/setup.h>
48#include <asm/efi.h> 47#include <asm/efi.h>
@@ -54,12 +53,12 @@
54 53
55#define EFI_DEBUG 1 54#define EFI_DEBUG 1
56 55
57/* 56#define EFI_MIN_RESERVE 5120
58 * There's some additional metadata associated with each 57
59 * variable. Intel's reference implementation is 60 bytes - bump that 58#define EFI_DUMMY_GUID \
60 * to account for potential alignment constraints 59 EFI_GUID(0x4424ac57, 0xbe4b, 0x47dd, 0x9e, 0x97, 0xed, 0x50, 0xf0, 0x9f, 0x92, 0xa9)
61 */ 60
62#define VAR_METADATA_SIZE 64 61static efi_char16_t efi_dummy_name[6] = { 'D', 'U', 'M', 'M', 'Y', 0 };
63 62
64struct efi __read_mostly efi = { 63struct efi __read_mostly efi = {
65 .mps = EFI_INVALID_TABLE_ADDR, 64 .mps = EFI_INVALID_TABLE_ADDR,
@@ -79,13 +78,6 @@ struct efi_memory_map memmap;
79static struct efi efi_phys __initdata; 78static struct efi efi_phys __initdata;
80static efi_system_table_t efi_systab __initdata; 79static efi_system_table_t efi_systab __initdata;
81 80
82static u64 efi_var_store_size;
83static u64 efi_var_remaining_size;
84static u64 efi_var_max_var_size;
85static u64 boot_used_size;
86static u64 boot_var_size;
87static u64 active_size;
88
89unsigned long x86_efi_facility; 81unsigned long x86_efi_facility;
90 82
91/* 83/*
@@ -188,53 +180,8 @@ static efi_status_t virt_efi_get_next_variable(unsigned long *name_size,
188 efi_char16_t *name, 180 efi_char16_t *name,
189 efi_guid_t *vendor) 181 efi_guid_t *vendor)
190{ 182{
191 efi_status_t status; 183 return efi_call_virt3(get_next_variable,
192 static bool finished = false; 184 name_size, name, vendor);
193 static u64 var_size;
194
195 status = efi_call_virt3(get_next_variable,
196 name_size, name, vendor);
197
198 if (status == EFI_NOT_FOUND) {
199 finished = true;
200 if (var_size < boot_used_size) {
201 boot_var_size = boot_used_size - var_size;
202 active_size += boot_var_size;
203 } else {
204 printk(KERN_WARNING FW_BUG "efi: Inconsistent initial sizes\n");
205 }
206 }
207
208 if (boot_used_size && !finished) {
209 unsigned long size;
210 u32 attr;
211 efi_status_t s;
212 void *tmp;
213
214 s = virt_efi_get_variable(name, vendor, &attr, &size, NULL);
215
216 if (s != EFI_BUFFER_TOO_SMALL || !size)
217 return status;
218
219 tmp = kmalloc(size, GFP_ATOMIC);
220
221 if (!tmp)
222 return status;
223
224 s = virt_efi_get_variable(name, vendor, &attr, &size, tmp);
225
226 if (s == EFI_SUCCESS && (attr & EFI_VARIABLE_NON_VOLATILE)) {
227 var_size += size;
228 var_size += ucs2_strsize(name, 1024);
229 active_size += size;
230 active_size += VAR_METADATA_SIZE;
231 active_size += ucs2_strsize(name, 1024);
232 }
233
234 kfree(tmp);
235 }
236
237 return status;
238} 185}
239 186
240static efi_status_t virt_efi_set_variable(efi_char16_t *name, 187static efi_status_t virt_efi_set_variable(efi_char16_t *name,
@@ -243,34 +190,9 @@ static efi_status_t virt_efi_set_variable(efi_char16_t *name,
243 unsigned long data_size, 190 unsigned long data_size,
244 void *data) 191 void *data)
245{ 192{
246 efi_status_t status; 193 return efi_call_virt5(set_variable,
247 u32 orig_attr = 0; 194 name, vendor, attr,
248 unsigned long orig_size = 0; 195 data_size, data);
249
250 status = virt_efi_get_variable(name, vendor, &orig_attr, &orig_size,
251 NULL);
252
253 if (status != EFI_BUFFER_TOO_SMALL)
254 orig_size = 0;
255
256 status = efi_call_virt5(set_variable,
257 name, vendor, attr,
258 data_size, data);
259
260 if (status == EFI_SUCCESS) {
261 if (orig_size) {
262 active_size -= orig_size;
263 active_size -= ucs2_strsize(name, 1024);
264 active_size -= VAR_METADATA_SIZE;
265 }
266 if (data_size) {
267 active_size += data_size;
268 active_size += ucs2_strsize(name, 1024);
269 active_size += VAR_METADATA_SIZE;
270 }
271 }
272
273 return status;
274} 196}
275 197
276static efi_status_t virt_efi_query_variable_info(u32 attr, 198static efi_status_t virt_efi_query_variable_info(u32 attr,
@@ -786,9 +708,6 @@ void __init efi_init(void)
786 char vendor[100] = "unknown"; 708 char vendor[100] = "unknown";
787 int i = 0; 709 int i = 0;
788 void *tmp; 710 void *tmp;
789 struct setup_data *data;
790 struct efi_var_bootdata *efi_var_data;
791 u64 pa_data;
792 711
793#ifdef CONFIG_X86_32 712#ifdef CONFIG_X86_32
794 if (boot_params.efi_info.efi_systab_hi || 713 if (boot_params.efi_info.efi_systab_hi ||
@@ -806,22 +725,6 @@ void __init efi_init(void)
806 if (efi_systab_init(efi_phys.systab)) 725 if (efi_systab_init(efi_phys.systab))
807 return; 726 return;
808 727
809 pa_data = boot_params.hdr.setup_data;
810 while (pa_data) {
811 data = early_ioremap(pa_data, sizeof(*efi_var_data));
812 if (data->type == SETUP_EFI_VARS) {
813 efi_var_data = (struct efi_var_bootdata *)data;
814
815 efi_var_store_size = efi_var_data->store_size;
816 efi_var_remaining_size = efi_var_data->remaining_size;
817 efi_var_max_var_size = efi_var_data->max_var_size;
818 }
819 pa_data = data->next;
820 early_iounmap(data, sizeof(*efi_var_data));
821 }
822
823 boot_used_size = efi_var_store_size - efi_var_remaining_size;
824
825 set_bit(EFI_SYSTEM_TABLES, &x86_efi_facility); 728 set_bit(EFI_SYSTEM_TABLES, &x86_efi_facility);
826 729
827 /* 730 /*
@@ -1085,6 +988,13 @@ void __init efi_enter_virtual_mode(void)
1085 runtime_code_page_mkexec(); 988 runtime_code_page_mkexec();
1086 989
1087 kfree(new_memmap); 990 kfree(new_memmap);
991
992 /* clean DUMMY object */
993 efi.set_variable(efi_dummy_name, &EFI_DUMMY_GUID,
994 EFI_VARIABLE_NON_VOLATILE |
995 EFI_VARIABLE_BOOTSERVICE_ACCESS |
996 EFI_VARIABLE_RUNTIME_ACCESS,
997 0, NULL);
1088} 998}
1089 999
1090/* 1000/*
@@ -1136,33 +1046,70 @@ efi_status_t efi_query_variable_store(u32 attributes, unsigned long size)
1136 efi_status_t status; 1046 efi_status_t status;
1137 u64 storage_size, remaining_size, max_size; 1047 u64 storage_size, remaining_size, max_size;
1138 1048
1049 if (!(attributes & EFI_VARIABLE_NON_VOLATILE))
1050 return 0;
1051
1139 status = efi.query_variable_info(attributes, &storage_size, 1052 status = efi.query_variable_info(attributes, &storage_size,
1140 &remaining_size, &max_size); 1053 &remaining_size, &max_size);
1141 if (status != EFI_SUCCESS) 1054 if (status != EFI_SUCCESS)
1142 return status; 1055 return status;
1143 1056
1144 if (!max_size && remaining_size > size)
1145 printk_once(KERN_ERR FW_BUG "Broken EFI implementation"
1146 " is returning MaxVariableSize=0\n");
1147 /* 1057 /*
1148 * Some firmware implementations refuse to boot if there's insufficient 1058 * Some firmware implementations refuse to boot if there's insufficient
1149 * space in the variable store. We account for that by refusing the 1059 * space in the variable store. We account for that by refusing the
1150 * write if permitting it would reduce the available space to under 1060 * write if permitting it would reduce the available space to under
1151 * 50%. However, some firmware won't reclaim variable space until 1061 * 5KB. This figure was provided by Samsung, so should be safe.
1152 * after the used (not merely the actively used) space drops below
1153 * a threshold. We can approximate that case with the value calculated
1154 * above. If both the firmware and our calculations indicate that the
1155 * available space would drop below 50%, refuse the write.
1156 */ 1062 */
1063 if ((remaining_size - size < EFI_MIN_RESERVE) &&
1064 !efi_no_storage_paranoia) {
1065
1066 /*
1067 * Triggering garbage collection may require that the firmware
1068 * generate a real EFI_OUT_OF_RESOURCES error. We can force
1069 * that by attempting to use more space than is available.
1070 */
1071 unsigned long dummy_size = remaining_size + 1024;
1072 void *dummy = kzalloc(dummy_size, GFP_ATOMIC);
1073
1074 if (!dummy)
1075 return EFI_OUT_OF_RESOURCES;
1076
1077 status = efi.set_variable(efi_dummy_name, &EFI_DUMMY_GUID,
1078 EFI_VARIABLE_NON_VOLATILE |
1079 EFI_VARIABLE_BOOTSERVICE_ACCESS |
1080 EFI_VARIABLE_RUNTIME_ACCESS,
1081 dummy_size, dummy);
1082
1083 if (status == EFI_SUCCESS) {
1084 /*
1085 * This should have failed, so if it didn't make sure
1086 * that we delete it...
1087 */
1088 efi.set_variable(efi_dummy_name, &EFI_DUMMY_GUID,
1089 EFI_VARIABLE_NON_VOLATILE |
1090 EFI_VARIABLE_BOOTSERVICE_ACCESS |
1091 EFI_VARIABLE_RUNTIME_ACCESS,
1092 0, dummy);
1093 }
1094
1095 kfree(dummy);
1157 1096
1158 if (!storage_size || size > remaining_size || 1097 /*
1159 (max_size && size > max_size)) 1098 * The runtime code may now have triggered a garbage collection
1160 return EFI_OUT_OF_RESOURCES; 1099 * run, so check the variable info again
1100 */
1101 status = efi.query_variable_info(attributes, &storage_size,
1102 &remaining_size, &max_size);
1161 1103
1162 if (!efi_no_storage_paranoia && 1104 if (status != EFI_SUCCESS)
1163 ((active_size + size + VAR_METADATA_SIZE > storage_size / 2) && 1105 return status;
1164 (remaining_size - size < storage_size / 2))) 1106
1165 return EFI_OUT_OF_RESOURCES; 1107 /*
1108 * There still isn't enough room, so return an error
1109 */
1110 if (remaining_size - size < EFI_MIN_RESERVE)
1111 return EFI_OUT_OF_RESOURCES;
1112 }
1166 1113
1167 return EFI_SUCCESS; 1114 return EFI_SUCCESS;
1168} 1115}
diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
index 590be1090892..f7bab68a4b83 100644
--- a/arch/x86/tools/relocs.c
+++ b/arch/x86/tools/relocs.c
@@ -42,9 +42,6 @@ static const char * const sym_regex_kernel[S_NSYMTYPES] = {
42 "^(xen_irq_disable_direct_reloc$|" 42 "^(xen_irq_disable_direct_reloc$|"
43 "xen_save_fl_direct_reloc$|" 43 "xen_save_fl_direct_reloc$|"
44 "VDSO|" 44 "VDSO|"
45#if ELF_BITS == 64
46 "__vvar_page|"
47#endif
48 "__crc_)", 45 "__crc_)",
49 46
50/* 47/*
@@ -72,6 +69,7 @@ static const char * const sym_regex_kernel[S_NSYMTYPES] = {
72 "__per_cpu_load|" 69 "__per_cpu_load|"
73 "init_per_cpu__.*|" 70 "init_per_cpu__.*|"
74 "__end_rodata_hpage_align|" 71 "__end_rodata_hpage_align|"
72 "__vvar_page|"
75#endif 73#endif
76 "_end)$" 74 "_end)$"
77}; 75};
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 8ff37995d54e..d99cae8147d1 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -17,6 +17,7 @@
17#include <linux/slab.h> 17#include <linux/slab.h>
18#include <linux/smp.h> 18#include <linux/smp.h>
19#include <linux/irq_work.h> 19#include <linux/irq_work.h>
20#include <linux/tick.h>
20 21
21#include <asm/paravirt.h> 22#include <asm/paravirt.h>
22#include <asm/desc.h> 23#include <asm/desc.h>
@@ -447,6 +448,13 @@ static void __cpuinit xen_play_dead(void) /* used only with HOTPLUG_CPU */
447 play_dead_common(); 448 play_dead_common();
448 HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL); 449 HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL);
449 cpu_bringup(); 450 cpu_bringup();
451 /*
452 * commit 4b0c0f294 (tick: Cleanup NOHZ per cpu data on cpu down)
453 * clears certain data that the cpu_idle loop (which called us
454 * and that we return from) expects. The only way to get that
455 * data back is to call:
456 */
457 tick_nohz_idle_enter();
450} 458}
451 459
452#else /* !CONFIG_HOTPLUG_CPU */ 460#else /* !CONFIG_HOTPLUG_CPU */
@@ -576,24 +584,22 @@ void xen_send_IPI_mask_allbutself(const struct cpumask *mask,
576{ 584{
577 unsigned cpu; 585 unsigned cpu;
578 unsigned int this_cpu = smp_processor_id(); 586 unsigned int this_cpu = smp_processor_id();
587 int xen_vector = xen_map_vector(vector);
579 588
580 if (!(num_online_cpus() > 1)) 589 if (!(num_online_cpus() > 1) || (xen_vector < 0))
581 return; 590 return;
582 591
583 for_each_cpu_and(cpu, mask, cpu_online_mask) { 592 for_each_cpu_and(cpu, mask, cpu_online_mask) {
584 if (this_cpu == cpu) 593 if (this_cpu == cpu)
585 continue; 594 continue;
586 595
587 xen_smp_send_call_function_single_ipi(cpu); 596 xen_send_IPI_one(cpu, xen_vector);
588 } 597 }
589} 598}
590 599
591void xen_send_IPI_allbutself(int vector) 600void xen_send_IPI_allbutself(int vector)
592{ 601{
593 int xen_vector = xen_map_vector(vector); 602 xen_send_IPI_mask_allbutself(cpu_online_mask, vector);
594
595 if (xen_vector >= 0)
596 xen_send_IPI_mask_allbutself(cpu_online_mask, xen_vector);
597} 603}
598 604
599static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id) 605static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id)
diff --git a/arch/x86/xen/smp.h b/arch/x86/xen/smp.h
index 8981a76d081a..c7c2d89efd76 100644
--- a/arch/x86/xen/smp.h
+++ b/arch/x86/xen/smp.h
@@ -5,7 +5,6 @@ extern void xen_send_IPI_mask(const struct cpumask *mask,
5extern void xen_send_IPI_mask_allbutself(const struct cpumask *mask, 5extern void xen_send_IPI_mask_allbutself(const struct cpumask *mask,
6 int vector); 6 int vector);
7extern void xen_send_IPI_allbutself(int vector); 7extern void xen_send_IPI_allbutself(int vector);
8extern void physflat_send_IPI_allbutself(int vector);
9extern void xen_send_IPI_all(int vector); 8extern void xen_send_IPI_all(int vector);
10extern void xen_send_IPI_self(int vector); 9extern void xen_send_IPI_self(int vector);
11 10
diff --git a/block/blk-core.c b/block/blk-core.c
index 33c33bc99ddd..d5745b5833c9 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -3164,7 +3164,7 @@ void blk_post_runtime_resume(struct request_queue *q, int err)
3164 q->rpm_status = RPM_ACTIVE; 3164 q->rpm_status = RPM_ACTIVE;
3165 __blk_run_queue(q); 3165 __blk_run_queue(q);
3166 pm_runtime_mark_last_busy(q->dev); 3166 pm_runtime_mark_last_busy(q->dev);
3167 pm_runtime_autosuspend(q->dev); 3167 pm_request_autosuspend(q->dev);
3168 } else { 3168 } else {
3169 q->rpm_status = RPM_SUSPENDED; 3169 q->rpm_status = RPM_SUSPENDED;
3170 } 3170 }
diff --git a/crypto/Kconfig b/crypto/Kconfig
index 622d8a48cbe9..bf8148e74e73 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -823,6 +823,7 @@ config CRYPTO_BLOWFISH_X86_64
823config CRYPTO_BLOWFISH_AVX2_X86_64 823config CRYPTO_BLOWFISH_AVX2_X86_64
824 tristate "Blowfish cipher algorithm (x86_64/AVX2)" 824 tristate "Blowfish cipher algorithm (x86_64/AVX2)"
825 depends on X86 && 64BIT 825 depends on X86 && 64BIT
826 depends on BROKEN
826 select CRYPTO_ALGAPI 827 select CRYPTO_ALGAPI
827 select CRYPTO_CRYPTD 828 select CRYPTO_CRYPTD
828 select CRYPTO_ABLK_HELPER_X86 829 select CRYPTO_ABLK_HELPER_X86
@@ -1299,6 +1300,7 @@ config CRYPTO_TWOFISH_AVX_X86_64
1299config CRYPTO_TWOFISH_AVX2_X86_64 1300config CRYPTO_TWOFISH_AVX2_X86_64
1300 tristate "Twofish cipher algorithm (x86_64/AVX2)" 1301 tristate "Twofish cipher algorithm (x86_64/AVX2)"
1301 depends on X86 && 64BIT 1302 depends on X86 && 64BIT
1303 depends on BROKEN
1302 select CRYPTO_ALGAPI 1304 select CRYPTO_ALGAPI
1303 select CRYPTO_CRYPTD 1305 select CRYPTO_CRYPTD
1304 select CRYPTO_ABLK_HELPER_X86 1306 select CRYPTO_ABLK_HELPER_X86
diff --git a/crypto/algboss.c b/crypto/algboss.c
index 769219b29309..76fc0b23fc6c 100644
--- a/crypto/algboss.c
+++ b/crypto/algboss.c
@@ -45,10 +45,9 @@ struct cryptomgr_param {
45 } nu32; 45 } nu32;
46 } attrs[CRYPTO_MAX_ATTRS]; 46 } attrs[CRYPTO_MAX_ATTRS];
47 47
48 char larval[CRYPTO_MAX_ALG_NAME];
49 char template[CRYPTO_MAX_ALG_NAME]; 48 char template[CRYPTO_MAX_ALG_NAME];
50 49
51 struct completion *completion; 50 struct crypto_larval *larval;
52 51
53 u32 otype; 52 u32 otype;
54 u32 omask; 53 u32 omask;
@@ -87,7 +86,8 @@ static int cryptomgr_probe(void *data)
87 crypto_tmpl_put(tmpl); 86 crypto_tmpl_put(tmpl);
88 87
89out: 88out:
90 complete_all(param->completion); 89 complete_all(&param->larval->completion);
90 crypto_alg_put(&param->larval->alg);
91 kfree(param); 91 kfree(param);
92 module_put_and_exit(0); 92 module_put_and_exit(0);
93} 93}
@@ -187,18 +187,19 @@ static int cryptomgr_schedule_probe(struct crypto_larval *larval)
187 param->otype = larval->alg.cra_flags; 187 param->otype = larval->alg.cra_flags;
188 param->omask = larval->mask; 188 param->omask = larval->mask;
189 189
190 memcpy(param->larval, larval->alg.cra_name, CRYPTO_MAX_ALG_NAME); 190 crypto_alg_get(&larval->alg);
191 191 param->larval = larval;
192 param->completion = &larval->completion;
193 192
194 thread = kthread_run(cryptomgr_probe, param, "cryptomgr_probe"); 193 thread = kthread_run(cryptomgr_probe, param, "cryptomgr_probe");
195 if (IS_ERR(thread)) 194 if (IS_ERR(thread))
196 goto err_free_param; 195 goto err_put_larval;
197 196
198 wait_for_completion_interruptible(&larval->completion); 197 wait_for_completion_interruptible(&larval->completion);
199 198
200 return NOTIFY_STOP; 199 return NOTIFY_STOP;
201 200
201err_put_larval:
202 crypto_alg_put(&larval->alg);
202err_free_param: 203err_free_param:
203 kfree(param); 204 kfree(param);
204err_put_module: 205err_put_module:
diff --git a/crypto/api.c b/crypto/api.c
index 033a7147e5eb..3b6180336d3d 100644
--- a/crypto/api.c
+++ b/crypto/api.c
@@ -34,12 +34,6 @@ EXPORT_SYMBOL_GPL(crypto_alg_sem);
34BLOCKING_NOTIFIER_HEAD(crypto_chain); 34BLOCKING_NOTIFIER_HEAD(crypto_chain);
35EXPORT_SYMBOL_GPL(crypto_chain); 35EXPORT_SYMBOL_GPL(crypto_chain);
36 36
37static inline struct crypto_alg *crypto_alg_get(struct crypto_alg *alg)
38{
39 atomic_inc(&alg->cra_refcnt);
40 return alg;
41}
42
43struct crypto_alg *crypto_mod_get(struct crypto_alg *alg) 37struct crypto_alg *crypto_mod_get(struct crypto_alg *alg)
44{ 38{
45 return try_module_get(alg->cra_module) ? crypto_alg_get(alg) : NULL; 39 return try_module_get(alg->cra_module) ? crypto_alg_get(alg) : NULL;
diff --git a/crypto/internal.h b/crypto/internal.h
index 9ebedae3fb54..bd39bfc92eab 100644
--- a/crypto/internal.h
+++ b/crypto/internal.h
@@ -103,6 +103,12 @@ int crypto_register_notifier(struct notifier_block *nb);
103int crypto_unregister_notifier(struct notifier_block *nb); 103int crypto_unregister_notifier(struct notifier_block *nb);
104int crypto_probing_notify(unsigned long val, void *v); 104int crypto_probing_notify(unsigned long val, void *v);
105 105
106static inline struct crypto_alg *crypto_alg_get(struct crypto_alg *alg)
107{
108 atomic_inc(&alg->cra_refcnt);
109 return alg;
110}
111
106static inline void crypto_alg_put(struct crypto_alg *alg) 112static inline void crypto_alg_put(struct crypto_alg *alg)
107{ 113{
108 if (atomic_dec_and_test(&alg->cra_refcnt) && alg->cra_destroy) 114 if (atomic_dec_and_test(&alg->cra_refcnt) && alg->cra_destroy)
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index 652fd5ce303c..cab13f2fc28e 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -164,15 +164,24 @@ static int acpi_lpss_create_device(struct acpi_device *adev,
164 if (dev_desc->clk_required) { 164 if (dev_desc->clk_required) {
165 ret = register_device_clock(adev, pdata); 165 ret = register_device_clock(adev, pdata);
166 if (ret) { 166 if (ret) {
167 /* 167 /* Skip the device, but continue the namespace scan. */
168 * Skip the device, but don't terminate the namespace 168 ret = 0;
169 * scan. 169 goto err_out;
170 */
171 kfree(pdata);
172 return 0;
173 } 170 }
174 } 171 }
175 172
173 /*
174 * This works around a known issue in ACPI tables where LPSS devices
175 * have _PS0 and _PS3 without _PSC (and no power resources), so
176 * acpi_bus_init_power() will assume that the BIOS has put them into D0.
177 */
178 ret = acpi_device_fix_up_power(adev);
179 if (ret) {
180 /* Skip the device, but continue the namespace scan. */
181 ret = 0;
182 goto err_out;
183 }
184
176 adev->driver_data = pdata; 185 adev->driver_data = pdata;
177 ret = acpi_create_platform_device(adev, id); 186 ret = acpi_create_platform_device(adev, id);
178 if (ret > 0) 187 if (ret > 0)
diff --git a/drivers/acpi/apei/cper.c b/drivers/acpi/apei/cper.c
index fefc2ca7cc3e..33dc6a004802 100644
--- a/drivers/acpi/apei/cper.c
+++ b/drivers/acpi/apei/cper.c
@@ -250,10 +250,6 @@ static const char *cper_pcie_port_type_strs[] = {
250static void cper_print_pcie(const char *pfx, const struct cper_sec_pcie *pcie, 250static void cper_print_pcie(const char *pfx, const struct cper_sec_pcie *pcie,
251 const struct acpi_hest_generic_data *gdata) 251 const struct acpi_hest_generic_data *gdata)
252{ 252{
253#ifdef CONFIG_ACPI_APEI_PCIEAER
254 struct pci_dev *dev;
255#endif
256
257 if (pcie->validation_bits & CPER_PCIE_VALID_PORT_TYPE) 253 if (pcie->validation_bits & CPER_PCIE_VALID_PORT_TYPE)
258 printk("%s""port_type: %d, %s\n", pfx, pcie->port_type, 254 printk("%s""port_type: %d, %s\n", pfx, pcie->port_type,
259 pcie->port_type < ARRAY_SIZE(cper_pcie_port_type_strs) ? 255 pcie->port_type < ARRAY_SIZE(cper_pcie_port_type_strs) ?
@@ -285,20 +281,6 @@ static void cper_print_pcie(const char *pfx, const struct cper_sec_pcie *pcie,
285 printk( 281 printk(
286 "%s""bridge: secondary_status: 0x%04x, control: 0x%04x\n", 282 "%s""bridge: secondary_status: 0x%04x, control: 0x%04x\n",
287 pfx, pcie->bridge.secondary_status, pcie->bridge.control); 283 pfx, pcie->bridge.secondary_status, pcie->bridge.control);
288#ifdef CONFIG_ACPI_APEI_PCIEAER
289 dev = pci_get_domain_bus_and_slot(pcie->device_id.segment,
290 pcie->device_id.bus, pcie->device_id.function);
291 if (!dev) {
292 pr_err("PCI AER Cannot get PCI device %04x:%02x:%02x.%d\n",
293 pcie->device_id.segment, pcie->device_id.bus,
294 pcie->device_id.slot, pcie->device_id.function);
295 return;
296 }
297 if (pcie->validation_bits & CPER_PCIE_VALID_AER_INFO)
298 cper_print_aer(pfx, dev, gdata->error_severity,
299 (struct aer_capability_regs *) pcie->aer_info);
300 pci_dev_put(dev);
301#endif
302} 284}
303 285
304static const char *apei_estatus_section_flag_strs[] = { 286static const char *apei_estatus_section_flag_strs[] = {
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index d668a8ae602b..fcd7d91cec34 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -454,7 +454,9 @@ static void ghes_do_proc(struct ghes *ghes,
454 aer_severity = cper_severity_to_aer(sev); 454 aer_severity = cper_severity_to_aer(sev);
455 aer_recover_queue(pcie_err->device_id.segment, 455 aer_recover_queue(pcie_err->device_id.segment,
456 pcie_err->device_id.bus, 456 pcie_err->device_id.bus,
457 devfn, aer_severity); 457 devfn, aer_severity,
458 (struct aer_capability_regs *)
459 pcie_err->aer_info);
458 } 460 }
459 461
460 } 462 }
@@ -917,13 +919,14 @@ static int ghes_probe(struct platform_device *ghes_dev)
917 break; 919 break;
918 case ACPI_HEST_NOTIFY_EXTERNAL: 920 case ACPI_HEST_NOTIFY_EXTERNAL:
919 /* External interrupt vector is GSI */ 921 /* External interrupt vector is GSI */
920 if (acpi_gsi_to_irq(generic->notify.vector, &ghes->irq)) { 922 rc = acpi_gsi_to_irq(generic->notify.vector, &ghes->irq);
923 if (rc) {
921 pr_err(GHES_PFX "Failed to map GSI to IRQ for generic hardware error source: %d\n", 924 pr_err(GHES_PFX "Failed to map GSI to IRQ for generic hardware error source: %d\n",
922 generic->header.source_id); 925 generic->header.source_id);
923 goto err_edac_unreg; 926 goto err_edac_unreg;
924 } 927 }
925 if (request_irq(ghes->irq, ghes_irq_func, 928 rc = request_irq(ghes->irq, ghes_irq_func, 0, "GHES IRQ", ghes);
926 0, "GHES IRQ", ghes)) { 929 if (rc) {
927 pr_err(GHES_PFX "Failed to register IRQ for generic hardware error source: %d\n", 930 pr_err(GHES_PFX "Failed to register IRQ for generic hardware error source: %d\n",
928 generic->header.source_id); 931 generic->header.source_id);
929 goto err_edac_unreg; 932 goto err_edac_unreg;
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index bc493aa3af19..31c217a42839 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -278,16 +278,38 @@ int acpi_bus_init_power(struct acpi_device *device)
278 if (result) 278 if (result)
279 return result; 279 return result;
280 } else if (state == ACPI_STATE_UNKNOWN) { 280 } else if (state == ACPI_STATE_UNKNOWN) {
281 /* No power resources and missing _PSC? Try to force D0. */ 281 /*
282 * No power resources and missing _PSC? Cross fingers and make
283 * it D0 in hope that this is what the BIOS put the device into.
284 * [We tried to force D0 here by executing _PS0, but that broke
285 * Toshiba P870-303 in a nasty way.]
286 */
282 state = ACPI_STATE_D0; 287 state = ACPI_STATE_D0;
283 result = acpi_dev_pm_explicit_set(device, state);
284 if (result)
285 return result;
286 } 288 }
287 device->power.state = state; 289 device->power.state = state;
288 return 0; 290 return 0;
289} 291}
290 292
293/**
294 * acpi_device_fix_up_power - Force device with missing _PSC into D0.
295 * @device: Device object whose power state is to be fixed up.
296 *
297 * Devices without power resources and _PSC, but having _PS0 and _PS3 defined,
298 * are assumed to be put into D0 by the BIOS. However, in some cases that may
299 * not be the case and this function should be used then.
300 */
301int acpi_device_fix_up_power(struct acpi_device *device)
302{
303 int ret = 0;
304
305 if (!device->power.flags.power_resources
306 && !device->power.flags.explicit_get
307 && device->power.state == ACPI_STATE_D0)
308 ret = acpi_dev_pm_explicit_set(device, ACPI_STATE_D0);
309
310 return ret;
311}
312
291int acpi_bus_update_power(acpi_handle handle, int *state_p) 313int acpi_bus_update_power(acpi_handle handle, int *state_p)
292{ 314{
293 struct acpi_device *device; 315 struct acpi_device *device;
diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
index 4fdea381ef21..14de9f46972e 100644
--- a/drivers/acpi/dock.c
+++ b/drivers/acpi/dock.c
@@ -66,20 +66,21 @@ struct dock_station {
66 spinlock_t dd_lock; 66 spinlock_t dd_lock;
67 struct mutex hp_lock; 67 struct mutex hp_lock;
68 struct list_head dependent_devices; 68 struct list_head dependent_devices;
69 struct list_head hotplug_devices;
70 69
71 struct list_head sibling; 70 struct list_head sibling;
72 struct platform_device *dock_device; 71 struct platform_device *dock_device;
73}; 72};
74static LIST_HEAD(dock_stations); 73static LIST_HEAD(dock_stations);
75static int dock_station_count; 74static int dock_station_count;
75static DEFINE_MUTEX(hotplug_lock);
76 76
77struct dock_dependent_device { 77struct dock_dependent_device {
78 struct list_head list; 78 struct list_head list;
79 struct list_head hotplug_list;
80 acpi_handle handle; 79 acpi_handle handle;
81 const struct acpi_dock_ops *ops; 80 const struct acpi_dock_ops *hp_ops;
82 void *context; 81 void *hp_context;
82 unsigned int hp_refcount;
83 void (*hp_release)(void *);
83}; 84};
84 85
85#define DOCK_DOCKING 0x00000001 86#define DOCK_DOCKING 0x00000001
@@ -111,7 +112,6 @@ add_dock_dependent_device(struct dock_station *ds, acpi_handle handle)
111 112
112 dd->handle = handle; 113 dd->handle = handle;
113 INIT_LIST_HEAD(&dd->list); 114 INIT_LIST_HEAD(&dd->list);
114 INIT_LIST_HEAD(&dd->hotplug_list);
115 115
116 spin_lock(&ds->dd_lock); 116 spin_lock(&ds->dd_lock);
117 list_add_tail(&dd->list, &ds->dependent_devices); 117 list_add_tail(&dd->list, &ds->dependent_devices);
@@ -121,35 +121,90 @@ add_dock_dependent_device(struct dock_station *ds, acpi_handle handle)
121} 121}
122 122
123/** 123/**
124 * dock_add_hotplug_device - associate a hotplug handler with the dock station 124 * dock_init_hotplug - Initialize a hotplug device on a docking station.
125 * @ds: The dock station 125 * @dd: Dock-dependent device.
126 * @dd: The dependent device struct 126 * @ops: Dock operations to attach to the dependent device.
127 * 127 * @context: Data to pass to the @ops callbacks and @release.
128 * Add the dependent device to the dock's hotplug device list 128 * @init: Optional initialization routine to run after setting up context.
129 * @release: Optional release routine to run on removal.
129 */ 130 */
130static void 131static int dock_init_hotplug(struct dock_dependent_device *dd,
131dock_add_hotplug_device(struct dock_station *ds, 132 const struct acpi_dock_ops *ops, void *context,
132 struct dock_dependent_device *dd) 133 void (*init)(void *), void (*release)(void *))
133{ 134{
134 mutex_lock(&ds->hp_lock); 135 int ret = 0;
135 list_add_tail(&dd->hotplug_list, &ds->hotplug_devices); 136
136 mutex_unlock(&ds->hp_lock); 137 mutex_lock(&hotplug_lock);
138
139 if (dd->hp_context) {
140 ret = -EEXIST;
141 } else {
142 dd->hp_refcount = 1;
143 dd->hp_ops = ops;
144 dd->hp_context = context;
145 dd->hp_release = release;
146 }
147
148 if (!WARN_ON(ret) && init)
149 init(context);
150
151 mutex_unlock(&hotplug_lock);
152 return ret;
137} 153}
138 154
139/** 155/**
140 * dock_del_hotplug_device - remove a hotplug handler from the dock station 156 * dock_release_hotplug - Decrement hotplug reference counter of dock device.
141 * @ds: The dock station 157 * @dd: Dock-dependent device.
142 * @dd: the dependent device struct
143 * 158 *
144 * Delete the dependent device from the dock's hotplug device list 159 * Decrement the reference counter of @dd and if 0, detach its hotplug
160 * operations from it, reset its context pointer and run the optional release
161 * routine if present.
145 */ 162 */
146static void 163static void dock_release_hotplug(struct dock_dependent_device *dd)
147dock_del_hotplug_device(struct dock_station *ds,
148 struct dock_dependent_device *dd)
149{ 164{
150 mutex_lock(&ds->hp_lock); 165 void (*release)(void *) = NULL;
151 list_del(&dd->hotplug_list); 166 void *context = NULL;
152 mutex_unlock(&ds->hp_lock); 167
168 mutex_lock(&hotplug_lock);
169
170 if (dd->hp_context && !--dd->hp_refcount) {
171 dd->hp_ops = NULL;
172 context = dd->hp_context;
173 dd->hp_context = NULL;
174 release = dd->hp_release;
175 dd->hp_release = NULL;
176 }
177
178 if (release && context)
179 release(context);
180
181 mutex_unlock(&hotplug_lock);
182}
183
184static void dock_hotplug_event(struct dock_dependent_device *dd, u32 event,
185 bool uevent)
186{
187 acpi_notify_handler cb = NULL;
188 bool run = false;
189
190 mutex_lock(&hotplug_lock);
191
192 if (dd->hp_context) {
193 run = true;
194 dd->hp_refcount++;
195 if (dd->hp_ops)
196 cb = uevent ? dd->hp_ops->uevent : dd->hp_ops->handler;
197 }
198
199 mutex_unlock(&hotplug_lock);
200
201 if (!run)
202 return;
203
204 if (cb)
205 cb(dd->handle, event, dd->hp_context);
206
207 dock_release_hotplug(dd);
153} 208}
154 209
155/** 210/**
@@ -360,9 +415,8 @@ static void hotplug_dock_devices(struct dock_station *ds, u32 event)
360 /* 415 /*
361 * First call driver specific hotplug functions 416 * First call driver specific hotplug functions
362 */ 417 */
363 list_for_each_entry(dd, &ds->hotplug_devices, hotplug_list) 418 list_for_each_entry(dd, &ds->dependent_devices, list)
364 if (dd->ops && dd->ops->handler) 419 dock_hotplug_event(dd, event, false);
365 dd->ops->handler(dd->handle, event, dd->context);
366 420
367 /* 421 /*
368 * Now make sure that an acpi_device is created for each 422 * Now make sure that an acpi_device is created for each
@@ -398,9 +452,8 @@ static void dock_event(struct dock_station *ds, u32 event, int num)
398 if (num == DOCK_EVENT) 452 if (num == DOCK_EVENT)
399 kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, envp); 453 kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, envp);
400 454
401 list_for_each_entry(dd, &ds->hotplug_devices, hotplug_list) 455 list_for_each_entry(dd, &ds->dependent_devices, list)
402 if (dd->ops && dd->ops->uevent) 456 dock_hotplug_event(dd, event, true);
403 dd->ops->uevent(dd->handle, event, dd->context);
404 457
405 if (num != DOCK_EVENT) 458 if (num != DOCK_EVENT)
406 kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, envp); 459 kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, envp);
@@ -570,19 +623,24 @@ EXPORT_SYMBOL_GPL(unregister_dock_notifier);
570 * @handle: the handle of the device 623 * @handle: the handle of the device
571 * @ops: handlers to call after docking 624 * @ops: handlers to call after docking
572 * @context: device specific data 625 * @context: device specific data
626 * @init: Optional initialization routine to run after registration
627 * @release: Optional release routine to run on unregistration
573 * 628 *
574 * If a driver would like to perform a hotplug operation after a dock 629 * If a driver would like to perform a hotplug operation after a dock
575 * event, they can register an acpi_notifiy_handler to be called by 630 * event, they can register an acpi_notifiy_handler to be called by
576 * the dock driver after _DCK is executed. 631 * the dock driver after _DCK is executed.
577 */ 632 */
578int 633int register_hotplug_dock_device(acpi_handle handle,
579register_hotplug_dock_device(acpi_handle handle, const struct acpi_dock_ops *ops, 634 const struct acpi_dock_ops *ops, void *context,
580 void *context) 635 void (*init)(void *), void (*release)(void *))
581{ 636{
582 struct dock_dependent_device *dd; 637 struct dock_dependent_device *dd;
583 struct dock_station *dock_station; 638 struct dock_station *dock_station;
584 int ret = -EINVAL; 639 int ret = -EINVAL;
585 640
641 if (WARN_ON(!context))
642 return -EINVAL;
643
586 if (!dock_station_count) 644 if (!dock_station_count)
587 return -ENODEV; 645 return -ENODEV;
588 646
@@ -597,12 +655,8 @@ register_hotplug_dock_device(acpi_handle handle, const struct acpi_dock_ops *ops
597 * ops 655 * ops
598 */ 656 */
599 dd = find_dock_dependent_device(dock_station, handle); 657 dd = find_dock_dependent_device(dock_station, handle);
600 if (dd) { 658 if (dd && !dock_init_hotplug(dd, ops, context, init, release))
601 dd->ops = ops;
602 dd->context = context;
603 dock_add_hotplug_device(dock_station, dd);
604 ret = 0; 659 ret = 0;
605 }
606 } 660 }
607 661
608 return ret; 662 return ret;
@@ -624,7 +678,7 @@ void unregister_hotplug_dock_device(acpi_handle handle)
624 list_for_each_entry(dock_station, &dock_stations, sibling) { 678 list_for_each_entry(dock_station, &dock_stations, sibling) {
625 dd = find_dock_dependent_device(dock_station, handle); 679 dd = find_dock_dependent_device(dock_station, handle);
626 if (dd) 680 if (dd)
627 dock_del_hotplug_device(dock_station, dd); 681 dock_release_hotplug(dd);
628 } 682 }
629} 683}
630EXPORT_SYMBOL_GPL(unregister_hotplug_dock_device); 684EXPORT_SYMBOL_GPL(unregister_hotplug_dock_device);
@@ -868,8 +922,10 @@ static ssize_t write_undock(struct device *dev, struct device_attribute *attr,
868 if (!count) 922 if (!count)
869 return -EINVAL; 923 return -EINVAL;
870 924
925 acpi_scan_lock_acquire();
871 begin_undock(dock_station); 926 begin_undock(dock_station);
872 ret = handle_eject_request(dock_station, ACPI_NOTIFY_EJECT_REQUEST); 927 ret = handle_eject_request(dock_station, ACPI_NOTIFY_EJECT_REQUEST);
928 acpi_scan_lock_release();
873 return ret ? ret: count; 929 return ret ? ret: count;
874} 930}
875static DEVICE_ATTR(undock, S_IWUSR, NULL, write_undock); 931static DEVICE_ATTR(undock, S_IWUSR, NULL, write_undock);
@@ -951,7 +1007,6 @@ static int __init dock_add(acpi_handle handle)
951 mutex_init(&dock_station->hp_lock); 1007 mutex_init(&dock_station->hp_lock);
952 spin_lock_init(&dock_station->dd_lock); 1008 spin_lock_init(&dock_station->dd_lock);
953 INIT_LIST_HEAD(&dock_station->sibling); 1009 INIT_LIST_HEAD(&dock_station->sibling);
954 INIT_LIST_HEAD(&dock_station->hotplug_devices);
955 ATOMIC_INIT_NOTIFIER_HEAD(&dock_notifier_list); 1010 ATOMIC_INIT_NOTIFIER_HEAD(&dock_notifier_list);
956 INIT_LIST_HEAD(&dock_station->dependent_devices); 1011 INIT_LIST_HEAD(&dock_station->dependent_devices);
957 1012
@@ -992,30 +1047,6 @@ err_unregister:
992} 1047}
993 1048
994/** 1049/**
995 * dock_remove - free up resources related to the dock station
996 */
997static int dock_remove(struct dock_station *ds)
998{
999 struct dock_dependent_device *dd, *tmp;
1000 struct platform_device *dock_device = ds->dock_device;
1001
1002 if (!dock_station_count)
1003 return 0;
1004
1005 /* remove dependent devices */
1006 list_for_each_entry_safe(dd, tmp, &ds->dependent_devices, list)
1007 kfree(dd);
1008
1009 list_del(&ds->sibling);
1010
1011 /* cleanup sysfs */
1012 sysfs_remove_group(&dock_device->dev.kobj, &dock_attribute_group);
1013 platform_device_unregister(dock_device);
1014
1015 return 0;
1016}
1017
1018/**
1019 * find_dock_and_bay - look for dock stations and bays 1050 * find_dock_and_bay - look for dock stations and bays
1020 * @handle: acpi handle of a device 1051 * @handle: acpi handle of a device
1021 * @lvl: unused 1052 * @lvl: unused
@@ -1033,7 +1064,7 @@ find_dock_and_bay(acpi_handle handle, u32 lvl, void *context, void **rv)
1033 return AE_OK; 1064 return AE_OK;
1034} 1065}
1035 1066
1036static int __init dock_init(void) 1067int __init acpi_dock_init(void)
1037{ 1068{
1038 if (acpi_disabled) 1069 if (acpi_disabled)
1039 return 0; 1070 return 0;
@@ -1052,19 +1083,3 @@ static int __init dock_init(void)
1052 ACPI_DOCK_DRIVER_DESCRIPTION, dock_station_count); 1083 ACPI_DOCK_DRIVER_DESCRIPTION, dock_station_count);
1053 return 0; 1084 return 0;
1054} 1085}
1055
1056static void __exit dock_exit(void)
1057{
1058 struct dock_station *tmp, *dock_station;
1059
1060 unregister_acpi_bus_notifier(&dock_acpi_notifier);
1061 list_for_each_entry_safe(dock_station, tmp, &dock_stations, sibling)
1062 dock_remove(dock_station);
1063}
1064
1065/*
1066 * Must be called before drivers of devices in dock, otherwise we can't know
1067 * which devices are in a dock
1068 */
1069subsys_initcall(dock_init);
1070module_exit(dock_exit);
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 297cbf456f86..c610a76d92c4 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -40,6 +40,11 @@ void acpi_container_init(void);
40#else 40#else
41static inline void acpi_container_init(void) {} 41static inline void acpi_container_init(void) {}
42#endif 42#endif
43#ifdef CONFIG_ACPI_DOCK
44void acpi_dock_init(void);
45#else
46static inline void acpi_dock_init(void) {}
47#endif
43#ifdef CONFIG_ACPI_HOTPLUG_MEMORY 48#ifdef CONFIG_ACPI_HOTPLUG_MEMORY
44void acpi_memory_hotplug_init(void); 49void acpi_memory_hotplug_init(void);
45#else 50#else
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index f962047c6c85..288bb270f8ed 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -885,6 +885,7 @@ int acpi_add_power_resource(acpi_handle handle)
885 ACPI_STA_DEFAULT); 885 ACPI_STA_DEFAULT);
886 mutex_init(&resource->resource_lock); 886 mutex_init(&resource->resource_lock);
887 INIT_LIST_HEAD(&resource->dependent); 887 INIT_LIST_HEAD(&resource->dependent);
888 INIT_LIST_HEAD(&resource->list_node);
888 resource->name = device->pnp.bus_id; 889 resource->name = device->pnp.bus_id;
889 strcpy(acpi_device_name(device), ACPI_POWER_DEVICE_NAME); 890 strcpy(acpi_device_name(device), ACPI_POWER_DEVICE_NAME);
890 strcpy(acpi_device_class(device), ACPI_POWER_CLASS); 891 strcpy(acpi_device_class(device), ACPI_POWER_CLASS);
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index a3868f6c222a..3322b47ab7ca 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -304,7 +304,8 @@ static void acpi_dev_irqresource_disabled(struct resource *res, u32 gsi)
304} 304}
305 305
306static void acpi_dev_get_irqresource(struct resource *res, u32 gsi, 306static void acpi_dev_get_irqresource(struct resource *res, u32 gsi,
307 u8 triggering, u8 polarity, u8 shareable) 307 u8 triggering, u8 polarity, u8 shareable,
308 bool legacy)
308{ 309{
309 int irq, p, t; 310 int irq, p, t;
310 311
@@ -317,14 +318,19 @@ static void acpi_dev_get_irqresource(struct resource *res, u32 gsi,
317 * In IO-APIC mode, use overrided attribute. Two reasons: 318 * In IO-APIC mode, use overrided attribute. Two reasons:
318 * 1. BIOS bug in DSDT 319 * 1. BIOS bug in DSDT
319 * 2. BIOS uses IO-APIC mode Interrupt Source Override 320 * 2. BIOS uses IO-APIC mode Interrupt Source Override
321 *
322 * We do this only if we are dealing with IRQ() or IRQNoFlags()
323 * resource (the legacy ISA resources). With modern ACPI 5 devices
324 * using extended IRQ descriptors we take the IRQ configuration
325 * from _CRS directly.
320 */ 326 */
321 if (!acpi_get_override_irq(gsi, &t, &p)) { 327 if (legacy && !acpi_get_override_irq(gsi, &t, &p)) {
322 u8 trig = t ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE; 328 u8 trig = t ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE;
323 u8 pol = p ? ACPI_ACTIVE_LOW : ACPI_ACTIVE_HIGH; 329 u8 pol = p ? ACPI_ACTIVE_LOW : ACPI_ACTIVE_HIGH;
324 330
325 if (triggering != trig || polarity != pol) { 331 if (triggering != trig || polarity != pol) {
326 pr_warning("ACPI: IRQ %d override to %s, %s\n", gsi, 332 pr_warning("ACPI: IRQ %d override to %s, %s\n", gsi,
327 t ? "edge" : "level", p ? "low" : "high"); 333 t ? "level" : "edge", p ? "low" : "high");
328 triggering = trig; 334 triggering = trig;
329 polarity = pol; 335 polarity = pol;
330 } 336 }
@@ -373,7 +379,7 @@ bool acpi_dev_resource_interrupt(struct acpi_resource *ares, int index,
373 } 379 }
374 acpi_dev_get_irqresource(res, irq->interrupts[index], 380 acpi_dev_get_irqresource(res, irq->interrupts[index],
375 irq->triggering, irq->polarity, 381 irq->triggering, irq->polarity,
376 irq->sharable); 382 irq->sharable, true);
377 break; 383 break;
378 case ACPI_RESOURCE_TYPE_EXTENDED_IRQ: 384 case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
379 ext_irq = &ares->data.extended_irq; 385 ext_irq = &ares->data.extended_irq;
@@ -383,7 +389,7 @@ bool acpi_dev_resource_interrupt(struct acpi_resource *ares, int index,
383 } 389 }
384 acpi_dev_get_irqresource(res, ext_irq->interrupts[index], 390 acpi_dev_get_irqresource(res, ext_irq->interrupts[index],
385 ext_irq->triggering, ext_irq->polarity, 391 ext_irq->triggering, ext_irq->polarity,
386 ext_irq->sharable); 392 ext_irq->sharable, false);
387 break; 393 break;
388 default: 394 default:
389 return false; 395 return false;
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 44225cb15f3a..27da63061e11 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -1017,11 +1017,8 @@ acpi_bus_driver_init(struct acpi_device *device, struct acpi_driver *driver)
1017 return -ENOSYS; 1017 return -ENOSYS;
1018 1018
1019 result = driver->ops.add(device); 1019 result = driver->ops.add(device);
1020 if (result) { 1020 if (result)
1021 device->driver = NULL;
1022 device->driver_data = NULL;
1023 return result; 1021 return result;
1024 }
1025 1022
1026 device->driver = driver; 1023 device->driver = driver;
1027 1024
@@ -2045,6 +2042,7 @@ int __init acpi_scan_init(void)
2045 acpi_lpss_init(); 2042 acpi_lpss_init();
2046 acpi_container_init(); 2043 acpi_container_init();
2047 acpi_memory_hotplug_init(); 2044 acpi_memory_hotplug_init();
2045 acpi_dock_init();
2048 2046
2049 mutex_lock(&acpi_scan_lock); 2047 mutex_lock(&acpi_scan_lock);
2050 /* 2048 /*
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index 5b32e15a65ce..440eadf2d32c 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -458,12 +458,28 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
458 }, 458 },
459 { 459 {
460 .callback = video_ignore_initial_backlight, 460 .callback = video_ignore_initial_backlight,
461 .ident = "HP Pavilion g6 Notebook PC",
462 .matches = {
463 DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
464 DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion g6 Notebook PC"),
465 },
466 },
467 {
468 .callback = video_ignore_initial_backlight,
461 .ident = "HP 1000 Notebook PC", 469 .ident = "HP 1000 Notebook PC",
462 .matches = { 470 .matches = {
463 DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"), 471 DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
464 DMI_MATCH(DMI_PRODUCT_NAME, "HP 1000 Notebook PC"), 472 DMI_MATCH(DMI_PRODUCT_NAME, "HP 1000 Notebook PC"),
465 }, 473 },
466 }, 474 },
475 {
476 .callback = video_ignore_initial_backlight,
477 .ident = "HP Pavilion m4",
478 .matches = {
479 DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
480 DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion m4 Notebook PC"),
481 },
482 },
467 {} 483 {}
468}; 484};
469 485
@@ -1706,6 +1722,9 @@ static int acpi_video_bus_add(struct acpi_device *device)
1706 int error; 1722 int error;
1707 acpi_status status; 1723 acpi_status status;
1708 1724
1725 if (device->handler)
1726 return -EINVAL;
1727
1709 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, 1728 status = acpi_walk_namespace(ACPI_TYPE_DEVICE,
1710 device->parent->handle, 1, 1729 device->parent->handle, 1,
1711 acpi_video_bus_match, NULL, 1730 acpi_video_bus_match, NULL,
diff --git a/drivers/ata/acard-ahci.c b/drivers/ata/acard-ahci.c
index 4e94ba29cb8d..9d0cf019ce59 100644
--- a/drivers/ata/acard-ahci.c
+++ b/drivers/ata/acard-ahci.c
@@ -2,7 +2,7 @@
2/* 2/*
3 * acard-ahci.c - ACard AHCI SATA support 3 * acard-ahci.c - ACard AHCI SATA support
4 * 4 *
5 * Maintained by: Jeff Garzik <jgarzik@pobox.com> 5 * Maintained by: Tejun Heo <tj@kernel.org>
6 * Please ALWAYS copy linux-ide@vger.kernel.org 6 * Please ALWAYS copy linux-ide@vger.kernel.org
7 * on emails. 7 * on emails.
8 * 8 *
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 251e57d38942..2b50dfdf1cfc 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * ahci.c - AHCI SATA support 2 * ahci.c - AHCI SATA support
3 * 3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com> 4 * Maintained by: Tejun Heo <tj@kernel.org>
5 * Please ALWAYS copy linux-ide@vger.kernel.org 5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails. 6 * on emails.
7 * 7 *
@@ -423,6 +423,8 @@ static const struct pci_device_id ahci_pci_tbl[] = {
423 .driver_data = board_ahci_yes_fbs }, /* 88se9125 */ 423 .driver_data = board_ahci_yes_fbs }, /* 88se9125 */
424 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x917a), 424 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x917a),
425 .driver_data = board_ahci_yes_fbs }, /* 88se9172 */ 425 .driver_data = board_ahci_yes_fbs }, /* 88se9172 */
426 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9172),
427 .driver_data = board_ahci_yes_fbs }, /* 88se9172 */
426 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9192), 428 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9192),
427 .driver_data = board_ahci_yes_fbs }, /* 88se9172 on some Gigabyte */ 429 .driver_data = board_ahci_yes_fbs }, /* 88se9172 on some Gigabyte */
428 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x91a3), 430 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x91a3),
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index b830e6c9fe49..10b14d45cfd2 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * ahci.h - Common AHCI SATA definitions and declarations 2 * ahci.h - Common AHCI SATA definitions and declarations
3 * 3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com> 4 * Maintained by: Tejun Heo <tj@kernel.org>
5 * Please ALWAYS copy linux-ide@vger.kernel.org 5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails. 6 * on emails.
7 * 7 *
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index 2f48123d74c4..9a8a674e8fac 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * ata_piix.c - Intel PATA/SATA controllers 2 * ata_piix.c - Intel PATA/SATA controllers
3 * 3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com> 4 * Maintained by: Tejun Heo <tj@kernel.org>
5 * Please ALWAYS copy linux-ide@vger.kernel.org 5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails. 6 * on emails.
7 * 7 *
@@ -151,6 +151,7 @@ enum piix_controller_ids {
151 piix_pata_vmw, /* PIIX4 for VMware, spurious DMA_ERR */ 151 piix_pata_vmw, /* PIIX4 for VMware, spurious DMA_ERR */
152 ich8_sata_snb, 152 ich8_sata_snb,
153 ich8_2port_sata_snb, 153 ich8_2port_sata_snb,
154 ich8_2port_sata_byt,
154}; 155};
155 156
156struct piix_map_db { 157struct piix_map_db {
@@ -334,6 +335,9 @@ static const struct pci_device_id piix_pci_tbl[] = {
334 { 0x8086, 0x8d60, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb }, 335 { 0x8086, 0x8d60, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
335 /* SATA Controller IDE (Wellsburg) */ 336 /* SATA Controller IDE (Wellsburg) */
336 { 0x8086, 0x8d68, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, 337 { 0x8086, 0x8d68, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
338 /* SATA Controller IDE (BayTrail) */
339 { 0x8086, 0x0F20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_byt },
340 { 0x8086, 0x0F21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_byt },
337 341
338 { } /* terminate list */ 342 { } /* terminate list */
339}; 343};
@@ -441,6 +445,7 @@ static const struct piix_map_db *piix_map_db_table[] = {
441 [tolapai_sata] = &tolapai_map_db, 445 [tolapai_sata] = &tolapai_map_db,
442 [ich8_sata_snb] = &ich8_map_db, 446 [ich8_sata_snb] = &ich8_map_db,
443 [ich8_2port_sata_snb] = &ich8_2port_map_db, 447 [ich8_2port_sata_snb] = &ich8_2port_map_db,
448 [ich8_2port_sata_byt] = &ich8_2port_map_db,
444}; 449};
445 450
446static struct pci_bits piix_enable_bits[] = { 451static struct pci_bits piix_enable_bits[] = {
@@ -1254,6 +1259,16 @@ static struct ata_port_info piix_port_info[] = {
1254 .udma_mask = ATA_UDMA6, 1259 .udma_mask = ATA_UDMA6,
1255 .port_ops = &piix_sata_ops, 1260 .port_ops = &piix_sata_ops,
1256 }, 1261 },
1262
1263 [ich8_2port_sata_byt] =
1264 {
1265 .flags = PIIX_SATA_FLAGS | PIIX_FLAG_SIDPR | PIIX_FLAG_PIO16,
1266 .pio_mask = ATA_PIO4,
1267 .mwdma_mask = ATA_MWDMA2,
1268 .udma_mask = ATA_UDMA6,
1269 .port_ops = &piix_sata_ops,
1270 },
1271
1257}; 1272};
1258 1273
1259#define AHCI_PCI_BAR 5 1274#define AHCI_PCI_BAR 5
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index 34c82167b962..a70ff154f586 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * libahci.c - Common AHCI SATA low-level routines 2 * libahci.c - Common AHCI SATA low-level routines
3 * 3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com> 4 * Maintained by: Tejun Heo <tj@kernel.org>
5 * Please ALWAYS copy linux-ide@vger.kernel.org 5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails. 6 * on emails.
7 * 7 *
diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c
index 87f2f395d79a..cf4e7020adac 100644
--- a/drivers/ata/libata-acpi.c
+++ b/drivers/ata/libata-acpi.c
@@ -156,8 +156,10 @@ static void ata_acpi_handle_hotplug(struct ata_port *ap, struct ata_device *dev,
156 156
157 spin_unlock_irqrestore(ap->lock, flags); 157 spin_unlock_irqrestore(ap->lock, flags);
158 158
159 if (wait) 159 if (wait) {
160 ata_port_wait_eh(ap); 160 ata_port_wait_eh(ap);
161 flush_work(&ap->hotplug_task.work);
162 }
161} 163}
162 164
163static void ata_acpi_dev_notify_dock(acpi_handle handle, u32 event, void *data) 165static void ata_acpi_dev_notify_dock(acpi_handle handle, u32 event, void *data)
@@ -214,6 +216,39 @@ static const struct acpi_dock_ops ata_acpi_ap_dock_ops = {
214 .uevent = ata_acpi_ap_uevent, 216 .uevent = ata_acpi_ap_uevent,
215}; 217};
216 218
219void ata_acpi_hotplug_init(struct ata_host *host)
220{
221 int i;
222
223 for (i = 0; i < host->n_ports; i++) {
224 struct ata_port *ap = host->ports[i];
225 acpi_handle handle;
226 struct ata_device *dev;
227
228 if (!ap)
229 continue;
230
231 handle = ata_ap_acpi_handle(ap);
232 if (handle) {
233 /* we might be on a docking station */
234 register_hotplug_dock_device(handle,
235 &ata_acpi_ap_dock_ops, ap,
236 NULL, NULL);
237 }
238
239 ata_for_each_dev(dev, &ap->link, ALL) {
240 handle = ata_dev_acpi_handle(dev);
241 if (!handle)
242 continue;
243
244 /* we might be on a docking station */
245 register_hotplug_dock_device(handle,
246 &ata_acpi_dev_dock_ops,
247 dev, NULL, NULL);
248 }
249 }
250}
251
217/** 252/**
218 * ata_acpi_dissociate - dissociate ATA host from ACPI objects 253 * ata_acpi_dissociate - dissociate ATA host from ACPI objects
219 * @host: target ATA host 254 * @host: target ATA host
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 63c743baf920..adf002a3c584 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * libata-core.c - helper library for ATA 2 * libata-core.c - helper library for ATA
3 * 3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com> 4 * Maintained by: Tejun Heo <tj@kernel.org>
5 * Please ALWAYS copy linux-ide@vger.kernel.org 5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails. 6 * on emails.
7 * 7 *
@@ -1602,6 +1602,12 @@ unsigned ata_exec_internal_sg(struct ata_device *dev,
1602 qc->tf = *tf; 1602 qc->tf = *tf;
1603 if (cdb) 1603 if (cdb)
1604 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN); 1604 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
1605
1606 /* some SATA bridges need us to indicate data xfer direction */
1607 if (tf->protocol == ATAPI_PROT_DMA && (dev->flags & ATA_DFLAG_DMADIR) &&
1608 dma_dir == DMA_FROM_DEVICE)
1609 qc->tf.feature |= ATAPI_DMADIR;
1610
1605 qc->flags |= ATA_QCFLAG_RESULT_TF; 1611 qc->flags |= ATA_QCFLAG_RESULT_TF;
1606 qc->dma_dir = dma_dir; 1612 qc->dma_dir = dma_dir;
1607 if (dma_dir != DMA_NONE) { 1613 if (dma_dir != DMA_NONE) {
@@ -6142,6 +6148,8 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
6142 if (rc) 6148 if (rc)
6143 goto err_tadd; 6149 goto err_tadd;
6144 6150
6151 ata_acpi_hotplug_init(host);
6152
6145 /* set cable, sata_spd_limit and report */ 6153 /* set cable, sata_spd_limit and report */
6146 for (i = 0; i < host->n_ports; i++) { 6154 for (i = 0; i < host->n_ports; i++) {
6147 struct ata_port *ap = host->ports[i]; 6155 struct ata_port *ap = host->ports[i];
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index f9476fb3ac43..c69fcce505c0 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * libata-eh.c - libata error handling 2 * libata-eh.c - libata error handling
3 * 3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com> 4 * Maintained by: Tejun Heo <tj@kernel.org>
5 * Please ALWAYS copy linux-ide@vger.kernel.org 5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails. 6 * on emails.
7 * 7 *
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index dd310b27b24c..0101af541436 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * libata-scsi.c - helper library for ATA 2 * libata-scsi.c - helper library for ATA
3 * 3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com> 4 * Maintained by: Tejun Heo <tj@kernel.org>
5 * Please ALWAYS copy linux-ide@vger.kernel.org 5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails. 6 * on emails.
7 * 7 *
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index d8af325a6bda..b603720b877d 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * libata-sff.c - helper library for PCI IDE BMDMA 2 * libata-sff.c - helper library for PCI IDE BMDMA
3 * 3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com> 4 * Maintained by: Tejun Heo <tj@kernel.org>
5 * Please ALWAYS copy linux-ide@vger.kernel.org 5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails. 6 * on emails.
7 * 7 *
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index c949dd311b2e..577d902bc4de 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -122,6 +122,7 @@ extern int ata_acpi_register(void);
122extern void ata_acpi_unregister(void); 122extern void ata_acpi_unregister(void);
123extern void ata_acpi_bind(struct ata_device *dev); 123extern void ata_acpi_bind(struct ata_device *dev);
124extern void ata_acpi_unbind(struct ata_device *dev); 124extern void ata_acpi_unbind(struct ata_device *dev);
125extern void ata_acpi_hotplug_init(struct ata_host *host);
125#else 126#else
126static inline void ata_acpi_dissociate(struct ata_host *host) { } 127static inline void ata_acpi_dissociate(struct ata_host *host) { }
127static inline int ata_acpi_on_suspend(struct ata_port *ap) { return 0; } 128static inline int ata_acpi_on_suspend(struct ata_port *ap) { return 0; }
@@ -134,6 +135,7 @@ static inline int ata_acpi_register(void) { return 0; }
134static inline void ata_acpi_unregister(void) { } 135static inline void ata_acpi_unregister(void) { }
135static inline void ata_acpi_bind(struct ata_device *dev) { } 136static inline void ata_acpi_bind(struct ata_device *dev) { }
136static inline void ata_acpi_unbind(struct ata_device *dev) { } 137static inline void ata_acpi_unbind(struct ata_device *dev) { }
138static inline void ata_acpi_hotplug_init(struct ata_host *host) {}
137#endif 139#endif
138 140
139/* libata-scsi.c */ 141/* libata-scsi.c */
diff --git a/drivers/ata/pdc_adma.c b/drivers/ata/pdc_adma.c
index 505333340ad5..8ea6e6afd041 100644
--- a/drivers/ata/pdc_adma.c
+++ b/drivers/ata/pdc_adma.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * pdc_adma.c - Pacific Digital Corporation ADMA 2 * pdc_adma.c - Pacific Digital Corporation ADMA
3 * 3 *
4 * Maintained by: Mark Lord <mlord@pobox.com> 4 * Maintained by: Tejun Heo <tj@kernel.org>
5 * 5 *
6 * Copyright 2005 Mark Lord 6 * Copyright 2005 Mark Lord
7 * 7 *
diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c
index fb0dd87f8893..958ba2a420c3 100644
--- a/drivers/ata/sata_promise.c
+++ b/drivers/ata/sata_promise.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * sata_promise.c - Promise SATA 2 * sata_promise.c - Promise SATA
3 * 3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com> 4 * Maintained by: Tejun Heo <tj@kernel.org>
5 * Mikael Pettersson <mikpe@it.uu.se> 5 * Mikael Pettersson <mikpe@it.uu.se>
6 * Please ALWAYS copy linux-ide@vger.kernel.org 6 * Please ALWAYS copy linux-ide@vger.kernel.org
7 * on emails. 7 * on emails.
diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c
index 4799868bd733..249c8a289bfd 100644
--- a/drivers/ata/sata_rcar.c
+++ b/drivers/ata/sata_rcar.c
@@ -549,6 +549,7 @@ static void sata_rcar_bmdma_start(struct ata_queued_cmd *qc)
549 549
550 /* start host DMA transaction */ 550 /* start host DMA transaction */
551 dmactl = ioread32(priv->base + ATAPI_CONTROL1_REG); 551 dmactl = ioread32(priv->base + ATAPI_CONTROL1_REG);
552 dmactl &= ~ATAPI_CONTROL1_STOP;
552 dmactl |= ATAPI_CONTROL1_START; 553 dmactl |= ATAPI_CONTROL1_START;
553 iowrite32(dmactl, priv->base + ATAPI_CONTROL1_REG); 554 iowrite32(dmactl, priv->base + ATAPI_CONTROL1_REG);
554} 555}
@@ -618,17 +619,16 @@ static struct ata_port_operations sata_rcar_port_ops = {
618 .bmdma_status = sata_rcar_bmdma_status, 619 .bmdma_status = sata_rcar_bmdma_status,
619}; 620};
620 621
621static int sata_rcar_serr_interrupt(struct ata_port *ap) 622static void sata_rcar_serr_interrupt(struct ata_port *ap)
622{ 623{
623 struct sata_rcar_priv *priv = ap->host->private_data; 624 struct sata_rcar_priv *priv = ap->host->private_data;
624 struct ata_eh_info *ehi = &ap->link.eh_info; 625 struct ata_eh_info *ehi = &ap->link.eh_info;
625 int freeze = 0; 626 int freeze = 0;
626 int handled = 0;
627 u32 serror; 627 u32 serror;
628 628
629 serror = ioread32(priv->base + SCRSERR_REG); 629 serror = ioread32(priv->base + SCRSERR_REG);
630 if (!serror) 630 if (!serror)
631 return 0; 631 return;
632 632
633 DPRINTK("SError @host_intr: 0x%x\n", serror); 633 DPRINTK("SError @host_intr: 0x%x\n", serror);
634 634
@@ -641,7 +641,6 @@ static int sata_rcar_serr_interrupt(struct ata_port *ap)
641 ata_ehi_push_desc(ehi, "%s", "hotplug"); 641 ata_ehi_push_desc(ehi, "%s", "hotplug");
642 642
643 freeze = serror & SERR_COMM_WAKE ? 0 : 1; 643 freeze = serror & SERR_COMM_WAKE ? 0 : 1;
644 handled = 1;
645 } 644 }
646 645
647 /* freeze or abort */ 646 /* freeze or abort */
@@ -649,11 +648,9 @@ static int sata_rcar_serr_interrupt(struct ata_port *ap)
649 ata_port_freeze(ap); 648 ata_port_freeze(ap);
650 else 649 else
651 ata_port_abort(ap); 650 ata_port_abort(ap);
652
653 return handled;
654} 651}
655 652
656static int sata_rcar_ata_interrupt(struct ata_port *ap) 653static void sata_rcar_ata_interrupt(struct ata_port *ap)
657{ 654{
658 struct ata_queued_cmd *qc; 655 struct ata_queued_cmd *qc;
659 int handled = 0; 656 int handled = 0;
@@ -662,7 +659,9 @@ static int sata_rcar_ata_interrupt(struct ata_port *ap)
662 if (qc) 659 if (qc)
663 handled |= ata_bmdma_port_intr(ap, qc); 660 handled |= ata_bmdma_port_intr(ap, qc);
664 661
665 return handled; 662 /* be sure to clear ATA interrupt */
663 if (!handled)
664 sata_rcar_check_status(ap);
666} 665}
667 666
668static irqreturn_t sata_rcar_interrupt(int irq, void *dev_instance) 667static irqreturn_t sata_rcar_interrupt(int irq, void *dev_instance)
@@ -677,20 +676,21 @@ static irqreturn_t sata_rcar_interrupt(int irq, void *dev_instance)
677 spin_lock_irqsave(&host->lock, flags); 676 spin_lock_irqsave(&host->lock, flags);
678 677
679 sataintstat = ioread32(priv->base + SATAINTSTAT_REG); 678 sataintstat = ioread32(priv->base + SATAINTSTAT_REG);
679 sataintstat &= SATA_RCAR_INT_MASK;
680 if (!sataintstat) 680 if (!sataintstat)
681 goto done; 681 goto done;
682 /* ack */ 682 /* ack */
683 iowrite32(sataintstat & ~SATA_RCAR_INT_MASK, 683 iowrite32(~sataintstat & 0x7ff, priv->base + SATAINTSTAT_REG);
684 priv->base + SATAINTSTAT_REG);
685 684
686 ap = host->ports[0]; 685 ap = host->ports[0];
687 686
688 if (sataintstat & SATAINTSTAT_ATA) 687 if (sataintstat & SATAINTSTAT_ATA)
689 handled |= sata_rcar_ata_interrupt(ap); 688 sata_rcar_ata_interrupt(ap);
690 689
691 if (sataintstat & SATAINTSTAT_SERR) 690 if (sataintstat & SATAINTSTAT_SERR)
692 handled |= sata_rcar_serr_interrupt(ap); 691 sata_rcar_serr_interrupt(ap);
693 692
693 handled = 1;
694done: 694done:
695 spin_unlock_irqrestore(&host->lock, flags); 695 spin_unlock_irqrestore(&host->lock, flags);
696 696
diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
index a7b31672c4b7..0ae3ca4bf5c0 100644
--- a/drivers/ata/sata_sil.c
+++ b/drivers/ata/sata_sil.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * sata_sil.c - Silicon Image SATA 2 * sata_sil.c - Silicon Image SATA
3 * 3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com> 4 * Maintained by: Tejun Heo <tj@kernel.org>
5 * Please ALWAYS copy linux-ide@vger.kernel.org 5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails. 6 * on emails.
7 * 7 *
diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c
index 7b7127a58f51..9947010afc0f 100644
--- a/drivers/ata/sata_sx4.c
+++ b/drivers/ata/sata_sx4.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * sata_sx4.c - Promise SATA 2 * sata_sx4.c - Promise SATA
3 * 3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com> 4 * Maintained by: Tejun Heo <tj@kernel.org>
5 * Please ALWAYS copy linux-ide@vger.kernel.org 5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails. 6 * on emails.
7 * 7 *
diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c
index 5913ea9d57b2..87f056e54a9d 100644
--- a/drivers/ata/sata_via.c
+++ b/drivers/ata/sata_via.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * sata_via.c - VIA Serial ATA controllers 2 * sata_via.c - VIA Serial ATA controllers
3 * 3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com> 4 * Maintained by: Tejun Heo <tj@kernel.org>
5 * Please ALWAYS copy linux-ide@vger.kernel.org 5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails. 6 * on emails.
7 * 7 *
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index 4b1f9265887f..01e21037d8fe 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -450,8 +450,18 @@ static void fw_load_abort(struct firmware_priv *fw_priv)
450{ 450{
451 struct firmware_buf *buf = fw_priv->buf; 451 struct firmware_buf *buf = fw_priv->buf;
452 452
453 /*
454 * There is a small window in which user can write to 'loading'
455 * between loading done and disappearance of 'loading'
456 */
457 if (test_bit(FW_STATUS_DONE, &buf->status))
458 return;
459
453 set_bit(FW_STATUS_ABORT, &buf->status); 460 set_bit(FW_STATUS_ABORT, &buf->status);
454 complete_all(&buf->completion); 461 complete_all(&buf->completion);
462
463 /* avoid user action after loading abort */
464 fw_priv->buf = NULL;
455} 465}
456 466
457#define is_fw_load_aborted(buf) \ 467#define is_fw_load_aborted(buf) \
@@ -528,7 +538,12 @@ static ssize_t firmware_loading_show(struct device *dev,
528 struct device_attribute *attr, char *buf) 538 struct device_attribute *attr, char *buf)
529{ 539{
530 struct firmware_priv *fw_priv = to_firmware_priv(dev); 540 struct firmware_priv *fw_priv = to_firmware_priv(dev);
531 int loading = test_bit(FW_STATUS_LOADING, &fw_priv->buf->status); 541 int loading = 0;
542
543 mutex_lock(&fw_lock);
544 if (fw_priv->buf)
545 loading = test_bit(FW_STATUS_LOADING, &fw_priv->buf->status);
546 mutex_unlock(&fw_lock);
532 547
533 return sprintf(buf, "%d\n", loading); 548 return sprintf(buf, "%d\n", loading);
534} 549}
@@ -570,12 +585,12 @@ static ssize_t firmware_loading_store(struct device *dev,
570 const char *buf, size_t count) 585 const char *buf, size_t count)
571{ 586{
572 struct firmware_priv *fw_priv = to_firmware_priv(dev); 587 struct firmware_priv *fw_priv = to_firmware_priv(dev);
573 struct firmware_buf *fw_buf = fw_priv->buf; 588 struct firmware_buf *fw_buf;
574 int loading = simple_strtol(buf, NULL, 10); 589 int loading = simple_strtol(buf, NULL, 10);
575 int i; 590 int i;
576 591
577 mutex_lock(&fw_lock); 592 mutex_lock(&fw_lock);
578 593 fw_buf = fw_priv->buf;
579 if (!fw_buf) 594 if (!fw_buf)
580 goto out; 595 goto out;
581 596
@@ -777,10 +792,6 @@ static void firmware_class_timeout_work(struct work_struct *work)
777 struct firmware_priv, timeout_work.work); 792 struct firmware_priv, timeout_work.work);
778 793
779 mutex_lock(&fw_lock); 794 mutex_lock(&fw_lock);
780 if (test_bit(FW_STATUS_DONE, &(fw_priv->buf->status))) {
781 mutex_unlock(&fw_lock);
782 return;
783 }
784 fw_load_abort(fw_priv); 795 fw_load_abort(fw_priv);
785 mutex_unlock(&fw_lock); 796 mutex_unlock(&fw_lock);
786} 797}
@@ -861,8 +872,6 @@ static int _request_firmware_load(struct firmware_priv *fw_priv, bool uevent,
861 872
862 cancel_delayed_work_sync(&fw_priv->timeout_work); 873 cancel_delayed_work_sync(&fw_priv->timeout_work);
863 874
864 fw_priv->buf = NULL;
865
866 device_remove_file(f_dev, &dev_attr_loading); 875 device_remove_file(f_dev, &dev_attr_loading);
867err_del_bin_attr: 876err_del_bin_attr:
868 device_remove_bin_file(f_dev, &firmware_attr_data); 877 device_remove_bin_file(f_dev, &firmware_attr_data);
diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c
index aa0875f6f1b7..02f490bad30f 100644
--- a/drivers/base/regmap/regcache-rbtree.c
+++ b/drivers/base/regmap/regcache-rbtree.c
@@ -143,7 +143,7 @@ static int rbtree_show(struct seq_file *s, void *ignored)
143 int registers = 0; 143 int registers = 0;
144 int this_registers, average; 144 int this_registers, average;
145 145
146 map->lock(map); 146 map->lock(map->lock_arg);
147 147
148 mem_size = sizeof(*rbtree_ctx); 148 mem_size = sizeof(*rbtree_ctx);
149 mem_size += BITS_TO_LONGS(map->cache_present_nbits) * sizeof(long); 149 mem_size += BITS_TO_LONGS(map->cache_present_nbits) * sizeof(long);
@@ -170,7 +170,7 @@ static int rbtree_show(struct seq_file *s, void *ignored)
170 seq_printf(s, "%d nodes, %d registers, average %d registers, used %zu bytes\n", 170 seq_printf(s, "%d nodes, %d registers, average %d registers, used %zu bytes\n",
171 nodes, registers, average, mem_size); 171 nodes, registers, average, mem_size);
172 172
173 map->unlock(map); 173 map->unlock(map->lock_arg);
174 174
175 return 0; 175 return 0;
176} 176}
@@ -391,8 +391,6 @@ static int regcache_rbtree_sync(struct regmap *map, unsigned int min,
391 for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) { 391 for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) {
392 rbnode = rb_entry(node, struct regcache_rbtree_node, node); 392 rbnode = rb_entry(node, struct regcache_rbtree_node, node);
393 393
394 if (rbnode->base_reg < min)
395 continue;
396 if (rbnode->base_reg > max) 394 if (rbnode->base_reg > max)
397 break; 395 break;
398 if (rbnode->base_reg + rbnode->blklen < min) 396 if (rbnode->base_reg + rbnode->blklen < min)
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
index 75923f2396bd..507ee2da0f6e 100644
--- a/drivers/base/regmap/regcache.c
+++ b/drivers/base/regmap/regcache.c
@@ -270,7 +270,7 @@ int regcache_sync(struct regmap *map)
270 270
271 BUG_ON(!map->cache_ops || !map->cache_ops->sync); 271 BUG_ON(!map->cache_ops || !map->cache_ops->sync);
272 272
273 map->lock(map); 273 map->lock(map->lock_arg);
274 /* Remember the initial bypass state */ 274 /* Remember the initial bypass state */
275 bypass = map->cache_bypass; 275 bypass = map->cache_bypass;
276 dev_dbg(map->dev, "Syncing %s cache\n", 276 dev_dbg(map->dev, "Syncing %s cache\n",
@@ -306,7 +306,7 @@ out:
306 trace_regcache_sync(map->dev, name, "stop"); 306 trace_regcache_sync(map->dev, name, "stop");
307 /* Restore the bypass state */ 307 /* Restore the bypass state */
308 map->cache_bypass = bypass; 308 map->cache_bypass = bypass;
309 map->unlock(map); 309 map->unlock(map->lock_arg);
310 310
311 return ret; 311 return ret;
312} 312}
@@ -333,7 +333,7 @@ int regcache_sync_region(struct regmap *map, unsigned int min,
333 333
334 BUG_ON(!map->cache_ops || !map->cache_ops->sync); 334 BUG_ON(!map->cache_ops || !map->cache_ops->sync);
335 335
336 map->lock(map); 336 map->lock(map->lock_arg);
337 337
338 /* Remember the initial bypass state */ 338 /* Remember the initial bypass state */
339 bypass = map->cache_bypass; 339 bypass = map->cache_bypass;
@@ -352,7 +352,7 @@ out:
352 trace_regcache_sync(map->dev, name, "stop region"); 352 trace_regcache_sync(map->dev, name, "stop region");
353 /* Restore the bypass state */ 353 /* Restore the bypass state */
354 map->cache_bypass = bypass; 354 map->cache_bypass = bypass;
355 map->unlock(map); 355 map->unlock(map->lock_arg);
356 356
357 return ret; 357 return ret;
358} 358}
@@ -372,11 +372,11 @@ EXPORT_SYMBOL_GPL(regcache_sync_region);
372 */ 372 */
373void regcache_cache_only(struct regmap *map, bool enable) 373void regcache_cache_only(struct regmap *map, bool enable)
374{ 374{
375 map->lock(map); 375 map->lock(map->lock_arg);
376 WARN_ON(map->cache_bypass && enable); 376 WARN_ON(map->cache_bypass && enable);
377 map->cache_only = enable; 377 map->cache_only = enable;
378 trace_regmap_cache_only(map->dev, enable); 378 trace_regmap_cache_only(map->dev, enable);
379 map->unlock(map); 379 map->unlock(map->lock_arg);
380} 380}
381EXPORT_SYMBOL_GPL(regcache_cache_only); 381EXPORT_SYMBOL_GPL(regcache_cache_only);
382 382
@@ -391,9 +391,9 @@ EXPORT_SYMBOL_GPL(regcache_cache_only);
391 */ 391 */
392void regcache_mark_dirty(struct regmap *map) 392void regcache_mark_dirty(struct regmap *map)
393{ 393{
394 map->lock(map); 394 map->lock(map->lock_arg);
395 map->cache_dirty = true; 395 map->cache_dirty = true;
396 map->unlock(map); 396 map->unlock(map->lock_arg);
397} 397}
398EXPORT_SYMBOL_GPL(regcache_mark_dirty); 398EXPORT_SYMBOL_GPL(regcache_mark_dirty);
399 399
@@ -410,11 +410,11 @@ EXPORT_SYMBOL_GPL(regcache_mark_dirty);
410 */ 410 */
411void regcache_cache_bypass(struct regmap *map, bool enable) 411void regcache_cache_bypass(struct regmap *map, bool enable)
412{ 412{
413 map->lock(map); 413 map->lock(map->lock_arg);
414 WARN_ON(map->cache_only && enable); 414 WARN_ON(map->cache_only && enable);
415 map->cache_bypass = enable; 415 map->cache_bypass = enable;
416 trace_regmap_cache_bypass(map->dev, enable); 416 trace_regmap_cache_bypass(map->dev, enable);
417 map->unlock(map); 417 map->unlock(map->lock_arg);
418} 418}
419EXPORT_SYMBOL_GPL(regcache_cache_bypass); 419EXPORT_SYMBOL_GPL(regcache_cache_bypass);
420 420
diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
index 23b701f5fd2f..975719bc3450 100644
--- a/drivers/base/regmap/regmap-debugfs.c
+++ b/drivers/base/regmap/regmap-debugfs.c
@@ -265,6 +265,7 @@ static ssize_t regmap_map_write_file(struct file *file,
265 char *start = buf; 265 char *start = buf;
266 unsigned long reg, value; 266 unsigned long reg, value;
267 struct regmap *map = file->private_data; 267 struct regmap *map = file->private_data;
268 int ret;
268 269
269 buf_size = min(count, (sizeof(buf)-1)); 270 buf_size = min(count, (sizeof(buf)-1));
270 if (copy_from_user(buf, user_buf, buf_size)) 271 if (copy_from_user(buf, user_buf, buf_size))
@@ -282,7 +283,9 @@ static ssize_t regmap_map_write_file(struct file *file,
282 /* Userspace has been fiddling around behind the kernel's back */ 283 /* Userspace has been fiddling around behind the kernel's back */
283 add_taint(TAINT_USER, LOCKDEP_NOW_UNRELIABLE); 284 add_taint(TAINT_USER, LOCKDEP_NOW_UNRELIABLE);
284 285
285 regmap_write(map, reg, value); 286 ret = regmap_write(map, reg, value);
287 if (ret < 0)
288 return ret;
286 return buf_size; 289 return buf_size;
287} 290}
288#else 291#else
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 6374dc103521..62b6c2cc80b5 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -168,8 +168,6 @@ static irqreturn_t do_cciss_msix_intr(int irq, void *dev_id);
168static int cciss_open(struct block_device *bdev, fmode_t mode); 168static int cciss_open(struct block_device *bdev, fmode_t mode);
169static int cciss_unlocked_open(struct block_device *bdev, fmode_t mode); 169static int cciss_unlocked_open(struct block_device *bdev, fmode_t mode);
170static void cciss_release(struct gendisk *disk, fmode_t mode); 170static void cciss_release(struct gendisk *disk, fmode_t mode);
171static int do_ioctl(struct block_device *bdev, fmode_t mode,
172 unsigned int cmd, unsigned long arg);
173static int cciss_ioctl(struct block_device *bdev, fmode_t mode, 171static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
174 unsigned int cmd, unsigned long arg); 172 unsigned int cmd, unsigned long arg);
175static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo); 173static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo);
@@ -235,7 +233,7 @@ static const struct block_device_operations cciss_fops = {
235 .owner = THIS_MODULE, 233 .owner = THIS_MODULE,
236 .open = cciss_unlocked_open, 234 .open = cciss_unlocked_open,
237 .release = cciss_release, 235 .release = cciss_release,
238 .ioctl = do_ioctl, 236 .ioctl = cciss_ioctl,
239 .getgeo = cciss_getgeo, 237 .getgeo = cciss_getgeo,
240#ifdef CONFIG_COMPAT 238#ifdef CONFIG_COMPAT
241 .compat_ioctl = cciss_compat_ioctl, 239 .compat_ioctl = cciss_compat_ioctl,
@@ -1143,16 +1141,6 @@ static void cciss_release(struct gendisk *disk, fmode_t mode)
1143 mutex_unlock(&cciss_mutex); 1141 mutex_unlock(&cciss_mutex);
1144} 1142}
1145 1143
1146static int do_ioctl(struct block_device *bdev, fmode_t mode,
1147 unsigned cmd, unsigned long arg)
1148{
1149 int ret;
1150 mutex_lock(&cciss_mutex);
1151 ret = cciss_ioctl(bdev, mode, cmd, arg);
1152 mutex_unlock(&cciss_mutex);
1153 return ret;
1154}
1155
1156#ifdef CONFIG_COMPAT 1144#ifdef CONFIG_COMPAT
1157 1145
1158static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode, 1146static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
@@ -1179,7 +1167,7 @@ static int cciss_compat_ioctl(struct block_device *bdev, fmode_t mode,
1179 case CCISS_REGNEWD: 1167 case CCISS_REGNEWD:
1180 case CCISS_RESCANDISK: 1168 case CCISS_RESCANDISK:
1181 case CCISS_GETLUNINFO: 1169 case CCISS_GETLUNINFO:
1182 return do_ioctl(bdev, mode, cmd, arg); 1170 return cciss_ioctl(bdev, mode, cmd, arg);
1183 1171
1184 case CCISS_PASSTHRU32: 1172 case CCISS_PASSTHRU32:
1185 return cciss_ioctl32_passthru(bdev, mode, cmd, arg); 1173 return cciss_ioctl32_passthru(bdev, mode, cmd, arg);
@@ -1219,7 +1207,7 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
1219 if (err) 1207 if (err)
1220 return -EFAULT; 1208 return -EFAULT;
1221 1209
1222 err = do_ioctl(bdev, mode, CCISS_PASSTHRU, (unsigned long)p); 1210 err = cciss_ioctl(bdev, mode, CCISS_PASSTHRU, (unsigned long)p);
1223 if (err) 1211 if (err)
1224 return err; 1212 return err;
1225 err |= 1213 err |=
@@ -1261,7 +1249,7 @@ static int cciss_ioctl32_big_passthru(struct block_device *bdev, fmode_t mode,
1261 if (err) 1249 if (err)
1262 return -EFAULT; 1250 return -EFAULT;
1263 1251
1264 err = do_ioctl(bdev, mode, CCISS_BIG_PASSTHRU, (unsigned long)p); 1252 err = cciss_ioctl(bdev, mode, CCISS_BIG_PASSTHRU, (unsigned long)p);
1265 if (err) 1253 if (err)
1266 return err; 1254 return err;
1267 err |= 1255 err |=
@@ -1311,11 +1299,14 @@ static int cciss_getpciinfo(ctlr_info_t *h, void __user *argp)
1311static int cciss_getintinfo(ctlr_info_t *h, void __user *argp) 1299static int cciss_getintinfo(ctlr_info_t *h, void __user *argp)
1312{ 1300{
1313 cciss_coalint_struct intinfo; 1301 cciss_coalint_struct intinfo;
1302 unsigned long flags;
1314 1303
1315 if (!argp) 1304 if (!argp)
1316 return -EINVAL; 1305 return -EINVAL;
1306 spin_lock_irqsave(&h->lock, flags);
1317 intinfo.delay = readl(&h->cfgtable->HostWrite.CoalIntDelay); 1307 intinfo.delay = readl(&h->cfgtable->HostWrite.CoalIntDelay);
1318 intinfo.count = readl(&h->cfgtable->HostWrite.CoalIntCount); 1308 intinfo.count = readl(&h->cfgtable->HostWrite.CoalIntCount);
1309 spin_unlock_irqrestore(&h->lock, flags);
1319 if (copy_to_user 1310 if (copy_to_user
1320 (argp, &intinfo, sizeof(cciss_coalint_struct))) 1311 (argp, &intinfo, sizeof(cciss_coalint_struct)))
1321 return -EFAULT; 1312 return -EFAULT;
@@ -1356,12 +1347,15 @@ static int cciss_setintinfo(ctlr_info_t *h, void __user *argp)
1356static int cciss_getnodename(ctlr_info_t *h, void __user *argp) 1347static int cciss_getnodename(ctlr_info_t *h, void __user *argp)
1357{ 1348{
1358 NodeName_type NodeName; 1349 NodeName_type NodeName;
1350 unsigned long flags;
1359 int i; 1351 int i;
1360 1352
1361 if (!argp) 1353 if (!argp)
1362 return -EINVAL; 1354 return -EINVAL;
1355 spin_lock_irqsave(&h->lock, flags);
1363 for (i = 0; i < 16; i++) 1356 for (i = 0; i < 16; i++)
1364 NodeName[i] = readb(&h->cfgtable->ServerName[i]); 1357 NodeName[i] = readb(&h->cfgtable->ServerName[i]);
1358 spin_unlock_irqrestore(&h->lock, flags);
1365 if (copy_to_user(argp, NodeName, sizeof(NodeName_type))) 1359 if (copy_to_user(argp, NodeName, sizeof(NodeName_type)))
1366 return -EFAULT; 1360 return -EFAULT;
1367 return 0; 1361 return 0;
@@ -1398,10 +1392,13 @@ static int cciss_setnodename(ctlr_info_t *h, void __user *argp)
1398static int cciss_getheartbeat(ctlr_info_t *h, void __user *argp) 1392static int cciss_getheartbeat(ctlr_info_t *h, void __user *argp)
1399{ 1393{
1400 Heartbeat_type heartbeat; 1394 Heartbeat_type heartbeat;
1395 unsigned long flags;
1401 1396
1402 if (!argp) 1397 if (!argp)
1403 return -EINVAL; 1398 return -EINVAL;
1399 spin_lock_irqsave(&h->lock, flags);
1404 heartbeat = readl(&h->cfgtable->HeartBeat); 1400 heartbeat = readl(&h->cfgtable->HeartBeat);
1401 spin_unlock_irqrestore(&h->lock, flags);
1405 if (copy_to_user(argp, &heartbeat, sizeof(Heartbeat_type))) 1402 if (copy_to_user(argp, &heartbeat, sizeof(Heartbeat_type)))
1406 return -EFAULT; 1403 return -EFAULT;
1407 return 0; 1404 return 0;
@@ -1410,10 +1407,13 @@ static int cciss_getheartbeat(ctlr_info_t *h, void __user *argp)
1410static int cciss_getbustypes(ctlr_info_t *h, void __user *argp) 1407static int cciss_getbustypes(ctlr_info_t *h, void __user *argp)
1411{ 1408{
1412 BusTypes_type BusTypes; 1409 BusTypes_type BusTypes;
1410 unsigned long flags;
1413 1411
1414 if (!argp) 1412 if (!argp)
1415 return -EINVAL; 1413 return -EINVAL;
1414 spin_lock_irqsave(&h->lock, flags);
1416 BusTypes = readl(&h->cfgtable->BusTypes); 1415 BusTypes = readl(&h->cfgtable->BusTypes);
1416 spin_unlock_irqrestore(&h->lock, flags);
1417 if (copy_to_user(argp, &BusTypes, sizeof(BusTypes_type))) 1417 if (copy_to_user(argp, &BusTypes, sizeof(BusTypes_type)))
1418 return -EFAULT; 1418 return -EFAULT;
1419 return 0; 1419 return 0;
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 847107ef0cce..20dd52a2f92f 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -3002,7 +3002,8 @@ static int mtip_hw_debugfs_init(struct driver_data *dd)
3002 3002
3003static void mtip_hw_debugfs_exit(struct driver_data *dd) 3003static void mtip_hw_debugfs_exit(struct driver_data *dd)
3004{ 3004{
3005 debugfs_remove_recursive(dd->dfs_node); 3005 if (dd->dfs_node)
3006 debugfs_remove_recursive(dd->dfs_node);
3006} 3007}
3007 3008
3008 3009
@@ -3863,7 +3864,7 @@ static void mtip_make_request(struct request_queue *queue, struct bio *bio)
3863 struct driver_data *dd = queue->queuedata; 3864 struct driver_data *dd = queue->queuedata;
3864 struct scatterlist *sg; 3865 struct scatterlist *sg;
3865 struct bio_vec *bvec; 3866 struct bio_vec *bvec;
3866 int nents = 0; 3867 int i, nents = 0;
3867 int tag = 0, unaligned = 0; 3868 int tag = 0, unaligned = 0;
3868 3869
3869 if (unlikely(dd->dd_flag & MTIP_DDF_STOP_IO)) { 3870 if (unlikely(dd->dd_flag & MTIP_DDF_STOP_IO)) {
@@ -3921,11 +3922,12 @@ static void mtip_make_request(struct request_queue *queue, struct bio *bio)
3921 } 3922 }
3922 3923
3923 /* Create the scatter list for this bio. */ 3924 /* Create the scatter list for this bio. */
3924 bio_for_each_segment(bvec, bio, nents) { 3925 bio_for_each_segment(bvec, bio, i) {
3925 sg_set_page(&sg[nents], 3926 sg_set_page(&sg[nents],
3926 bvec->bv_page, 3927 bvec->bv_page,
3927 bvec->bv_len, 3928 bvec->bv_len,
3928 bvec->bv_offset); 3929 bvec->bv_offset);
3930 nents++;
3929 } 3931 }
3930 3932
3931 /* Issue the read/write. */ 3933 /* Issue the read/write. */
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index 8efdfaa44a59..ce79a590b45b 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -629,7 +629,7 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
629 struct nvme_command *cmnd; 629 struct nvme_command *cmnd;
630 struct nvme_iod *iod; 630 struct nvme_iod *iod;
631 enum dma_data_direction dma_dir; 631 enum dma_data_direction dma_dir;
632 int cmdid, length, result = -ENOMEM; 632 int cmdid, length, result;
633 u16 control; 633 u16 control;
634 u32 dsmgmt; 634 u32 dsmgmt;
635 int psegs = bio_phys_segments(ns->queue, bio); 635 int psegs = bio_phys_segments(ns->queue, bio);
@@ -640,6 +640,7 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
640 return result; 640 return result;
641 } 641 }
642 642
643 result = -ENOMEM;
643 iod = nvme_alloc_iod(psegs, bio->bi_size, GFP_ATOMIC); 644 iod = nvme_alloc_iod(psegs, bio->bi_size, GFP_ATOMIC);
644 if (!iod) 645 if (!iod)
645 goto nomem; 646 goto nomem;
@@ -977,6 +978,8 @@ static void nvme_cancel_ios(struct nvme_queue *nvmeq, bool timeout)
977 978
978 if (timeout && !time_after(now, info[cmdid].timeout)) 979 if (timeout && !time_after(now, info[cmdid].timeout))
979 continue; 980 continue;
981 if (info[cmdid].ctx == CMD_CTX_CANCELLED)
982 continue;
980 dev_warn(nvmeq->q_dmadev, "Cancelling I/O %d\n", cmdid); 983 dev_warn(nvmeq->q_dmadev, "Cancelling I/O %d\n", cmdid);
981 ctx = cancel_cmdid(nvmeq, cmdid, &fn); 984 ctx = cancel_cmdid(nvmeq, cmdid, &fn);
982 fn(nvmeq->dev, ctx, &cqe); 985 fn(nvmeq->dev, ctx, &cqe);
@@ -1206,7 +1209,7 @@ struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write,
1206 1209
1207 if (addr & 3) 1210 if (addr & 3)
1208 return ERR_PTR(-EINVAL); 1211 return ERR_PTR(-EINVAL);
1209 if (!length) 1212 if (!length || length > INT_MAX - PAGE_SIZE)
1210 return ERR_PTR(-EINVAL); 1213 return ERR_PTR(-EINVAL);
1211 1214
1212 offset = offset_in_page(addr); 1215 offset = offset_in_page(addr);
@@ -1227,7 +1230,8 @@ struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write,
1227 sg_init_table(sg, count); 1230 sg_init_table(sg, count);
1228 for (i = 0; i < count; i++) { 1231 for (i = 0; i < count; i++) {
1229 sg_set_page(&sg[i], pages[i], 1232 sg_set_page(&sg[i], pages[i],
1230 min_t(int, length, PAGE_SIZE - offset), offset); 1233 min_t(unsigned, length, PAGE_SIZE - offset),
1234 offset);
1231 length -= (PAGE_SIZE - offset); 1235 length -= (PAGE_SIZE - offset);
1232 offset = 0; 1236 offset = 0;
1233 } 1237 }
@@ -1435,7 +1439,7 @@ static int nvme_user_admin_cmd(struct nvme_dev *dev,
1435 nvme_free_iod(dev, iod); 1439 nvme_free_iod(dev, iod);
1436 } 1440 }
1437 1441
1438 if (!status && copy_to_user(&ucmd->result, &cmd.result, 1442 if ((status >= 0) && copy_to_user(&ucmd->result, &cmd.result,
1439 sizeof(cmd.result))) 1443 sizeof(cmd.result)))
1440 status = -EFAULT; 1444 status = -EFAULT;
1441 1445
@@ -1633,7 +1637,8 @@ static int set_queue_count(struct nvme_dev *dev, int count)
1633 1637
1634static int nvme_setup_io_queues(struct nvme_dev *dev) 1638static int nvme_setup_io_queues(struct nvme_dev *dev)
1635{ 1639{
1636 int result, cpu, i, nr_io_queues, db_bar_size, q_depth; 1640 struct pci_dev *pdev = dev->pci_dev;
1641 int result, cpu, i, nr_io_queues, db_bar_size, q_depth, q_count;
1637 1642
1638 nr_io_queues = num_online_cpus(); 1643 nr_io_queues = num_online_cpus();
1639 result = set_queue_count(dev, nr_io_queues); 1644 result = set_queue_count(dev, nr_io_queues);
@@ -1642,14 +1647,14 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
1642 if (result < nr_io_queues) 1647 if (result < nr_io_queues)
1643 nr_io_queues = result; 1648 nr_io_queues = result;
1644 1649
1650 q_count = nr_io_queues;
1645 /* Deregister the admin queue's interrupt */ 1651 /* Deregister the admin queue's interrupt */
1646 free_irq(dev->entry[0].vector, dev->queues[0]); 1652 free_irq(dev->entry[0].vector, dev->queues[0]);
1647 1653
1648 db_bar_size = 4096 + ((nr_io_queues + 1) << (dev->db_stride + 3)); 1654 db_bar_size = 4096 + ((nr_io_queues + 1) << (dev->db_stride + 3));
1649 if (db_bar_size > 8192) { 1655 if (db_bar_size > 8192) {
1650 iounmap(dev->bar); 1656 iounmap(dev->bar);
1651 dev->bar = ioremap(pci_resource_start(dev->pci_dev, 0), 1657 dev->bar = ioremap(pci_resource_start(pdev, 0), db_bar_size);
1652 db_bar_size);
1653 dev->dbs = ((void __iomem *)dev->bar) + 4096; 1658 dev->dbs = ((void __iomem *)dev->bar) + 4096;
1654 dev->queues[0]->q_db = dev->dbs; 1659 dev->queues[0]->q_db = dev->dbs;
1655 } 1660 }
@@ -1657,19 +1662,36 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
1657 for (i = 0; i < nr_io_queues; i++) 1662 for (i = 0; i < nr_io_queues; i++)
1658 dev->entry[i].entry = i; 1663 dev->entry[i].entry = i;
1659 for (;;) { 1664 for (;;) {
1660 result = pci_enable_msix(dev->pci_dev, dev->entry, 1665 result = pci_enable_msix(pdev, dev->entry, nr_io_queues);
1661 nr_io_queues);
1662 if (result == 0) { 1666 if (result == 0) {
1663 break; 1667 break;
1664 } else if (result > 0) { 1668 } else if (result > 0) {
1665 nr_io_queues = result; 1669 nr_io_queues = result;
1666 continue; 1670 continue;
1667 } else { 1671 } else {
1668 nr_io_queues = 1; 1672 nr_io_queues = 0;
1669 break; 1673 break;
1670 } 1674 }
1671 } 1675 }
1672 1676
1677 if (nr_io_queues == 0) {
1678 nr_io_queues = q_count;
1679 for (;;) {
1680 result = pci_enable_msi_block(pdev, nr_io_queues);
1681 if (result == 0) {
1682 for (i = 0; i < nr_io_queues; i++)
1683 dev->entry[i].vector = i + pdev->irq;
1684 break;
1685 } else if (result > 0) {
1686 nr_io_queues = result;
1687 continue;
1688 } else {
1689 nr_io_queues = 1;
1690 break;
1691 }
1692 }
1693 }
1694
1673 result = queue_request_irq(dev, dev->queues[0], "nvme admin"); 1695 result = queue_request_irq(dev, dev->queues[0], "nvme admin");
1674 /* XXX: handle failure here */ 1696 /* XXX: handle failure here */
1675 1697
@@ -1850,7 +1872,10 @@ static void nvme_free_dev(struct kref *kref)
1850{ 1872{
1851 struct nvme_dev *dev = container_of(kref, struct nvme_dev, kref); 1873 struct nvme_dev *dev = container_of(kref, struct nvme_dev, kref);
1852 nvme_dev_remove(dev); 1874 nvme_dev_remove(dev);
1853 pci_disable_msix(dev->pci_dev); 1875 if (dev->pci_dev->msi_enabled)
1876 pci_disable_msi(dev->pci_dev);
1877 else if (dev->pci_dev->msix_enabled)
1878 pci_disable_msix(dev->pci_dev);
1854 iounmap(dev->bar); 1879 iounmap(dev->bar);
1855 nvme_release_instance(dev); 1880 nvme_release_instance(dev);
1856 nvme_release_prp_pools(dev); 1881 nvme_release_prp_pools(dev);
@@ -1923,8 +1948,14 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1923 INIT_LIST_HEAD(&dev->namespaces); 1948 INIT_LIST_HEAD(&dev->namespaces);
1924 dev->pci_dev = pdev; 1949 dev->pci_dev = pdev;
1925 pci_set_drvdata(pdev, dev); 1950 pci_set_drvdata(pdev, dev);
1926 dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); 1951
1927 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); 1952 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)))
1953 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
1954 else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)))
1955 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
1956 else
1957 goto disable;
1958
1928 result = nvme_set_instance(dev); 1959 result = nvme_set_instance(dev);
1929 if (result) 1960 if (result)
1930 goto disable; 1961 goto disable;
@@ -1977,7 +2008,10 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1977 unmap: 2008 unmap:
1978 iounmap(dev->bar); 2009 iounmap(dev->bar);
1979 disable_msix: 2010 disable_msix:
1980 pci_disable_msix(pdev); 2011 if (dev->pci_dev->msi_enabled)
2012 pci_disable_msi(dev->pci_dev);
2013 else if (dev->pci_dev->msix_enabled)
2014 pci_disable_msix(dev->pci_dev);
1981 nvme_release_instance(dev); 2015 nvme_release_instance(dev);
1982 nvme_release_prp_pools(dev); 2016 nvme_release_prp_pools(dev);
1983 disable: 2017 disable:
diff --git a/drivers/block/nvme-scsi.c b/drivers/block/nvme-scsi.c
index fed54b039893..102de2f52b5c 100644
--- a/drivers/block/nvme-scsi.c
+++ b/drivers/block/nvme-scsi.c
@@ -44,7 +44,6 @@
44#include <linux/sched.h> 44#include <linux/sched.h>
45#include <linux/slab.h> 45#include <linux/slab.h>
46#include <linux/types.h> 46#include <linux/types.h>
47#include <linux/version.h>
48#include <scsi/sg.h> 47#include <scsi/sg.h>
49#include <scsi/scsi.h> 48#include <scsi/scsi.h>
50 49
@@ -1654,7 +1653,7 @@ static void nvme_trans_modesel_save_bd(struct nvme_ns *ns, u8 *parm_list,
1654 } 1653 }
1655} 1654}
1656 1655
1657static u16 nvme_trans_modesel_get_mp(struct nvme_ns *ns, struct sg_io_hdr *hdr, 1656static int nvme_trans_modesel_get_mp(struct nvme_ns *ns, struct sg_io_hdr *hdr,
1658 u8 *mode_page, u8 page_code) 1657 u8 *mode_page, u8 page_code)
1659{ 1658{
1660 int res = SNTI_TRANSLATION_SUCCESS; 1659 int res = SNTI_TRANSLATION_SUCCESS;
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 3c08983e600a..f5d0ea11d9fd 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -83,7 +83,8 @@
83 83
84#define MAX_SPEED 0xffff 84#define MAX_SPEED 0xffff
85 85
86#define ZONE(sector, pd) (((sector) + (pd)->offset) & ~((pd)->settings.size - 1)) 86#define ZONE(sector, pd) (((sector) + (pd)->offset) & \
87 ~(sector_t)((pd)->settings.size - 1))
87 88
88static DEFINE_MUTEX(pktcdvd_mutex); 89static DEFINE_MUTEX(pktcdvd_mutex);
89static struct pktcdvd_device *pkt_devs[MAX_WRITERS]; 90static struct pktcdvd_device *pkt_devs[MAX_WRITERS];
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index d6d314027b5d..aff789d6fccd 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -519,8 +519,8 @@ static const struct block_device_operations rbd_bd_ops = {
519}; 519};
520 520
521/* 521/*
522 * Initialize an rbd client instance. 522 * Initialize an rbd client instance. Success or not, this function
523 * We own *ceph_opts. 523 * consumes ceph_opts.
524 */ 524 */
525static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts) 525static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)
526{ 526{
@@ -675,7 +675,8 @@ static int parse_rbd_opts_token(char *c, void *private)
675 675
676/* 676/*
677 * Get a ceph client with specific addr and configuration, if one does 677 * Get a ceph client with specific addr and configuration, if one does
678 * not exist create it. 678 * not exist create it. Either way, ceph_opts is consumed by this
679 * function.
679 */ 680 */
680static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts) 681static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
681{ 682{
@@ -1035,12 +1036,16 @@ static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset)
1035 char *name; 1036 char *name;
1036 u64 segment; 1037 u64 segment;
1037 int ret; 1038 int ret;
1039 char *name_format;
1038 1040
1039 name = kmem_cache_alloc(rbd_segment_name_cache, GFP_NOIO); 1041 name = kmem_cache_alloc(rbd_segment_name_cache, GFP_NOIO);
1040 if (!name) 1042 if (!name)
1041 return NULL; 1043 return NULL;
1042 segment = offset >> rbd_dev->header.obj_order; 1044 segment = offset >> rbd_dev->header.obj_order;
1043 ret = snprintf(name, MAX_OBJ_NAME_SIZE + 1, "%s.%012llx", 1045 name_format = "%s.%012llx";
1046 if (rbd_dev->image_format == 2)
1047 name_format = "%s.%016llx";
1048 ret = snprintf(name, MAX_OBJ_NAME_SIZE + 1, name_format,
1044 rbd_dev->header.object_prefix, segment); 1049 rbd_dev->header.object_prefix, segment);
1045 if (ret < 0 || ret > MAX_OBJ_NAME_SIZE) { 1050 if (ret < 0 || ret > MAX_OBJ_NAME_SIZE) {
1046 pr_err("error formatting segment name for #%llu (%d)\n", 1051 pr_err("error formatting segment name for #%llu (%d)\n",
@@ -2247,13 +2252,17 @@ static int rbd_img_request_fill(struct rbd_img_request *img_request,
2247 obj_request->pages, length, 2252 obj_request->pages, length,
2248 offset & ~PAGE_MASK, false, false); 2253 offset & ~PAGE_MASK, false, false);
2249 2254
2255 /*
2256 * set obj_request->img_request before formatting
2257 * the osd_request so that it gets the right snapc
2258 */
2259 rbd_img_obj_request_add(img_request, obj_request);
2250 if (write_request) 2260 if (write_request)
2251 rbd_osd_req_format_write(obj_request); 2261 rbd_osd_req_format_write(obj_request);
2252 else 2262 else
2253 rbd_osd_req_format_read(obj_request); 2263 rbd_osd_req_format_read(obj_request);
2254 2264
2255 obj_request->img_offset = img_offset; 2265 obj_request->img_offset = img_offset;
2256 rbd_img_obj_request_add(img_request, obj_request);
2257 2266
2258 img_offset += length; 2267 img_offset += length;
2259 resid -= length; 2268 resid -= length;
@@ -4238,6 +4247,10 @@ static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev)
4238 4247
4239 down_write(&rbd_dev->header_rwsem); 4248 down_write(&rbd_dev->header_rwsem);
4240 4249
4250 ret = rbd_dev_v2_image_size(rbd_dev);
4251 if (ret)
4252 goto out;
4253
4241 if (first_time) { 4254 if (first_time) {
4242 ret = rbd_dev_v2_header_onetime(rbd_dev); 4255 ret = rbd_dev_v2_header_onetime(rbd_dev);
4243 if (ret) 4256 if (ret)
@@ -4271,10 +4284,6 @@ static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev)
4271 "is EXPERIMENTAL!"); 4284 "is EXPERIMENTAL!");
4272 } 4285 }
4273 4286
4274 ret = rbd_dev_v2_image_size(rbd_dev);
4275 if (ret)
4276 goto out;
4277
4278 if (rbd_dev->spec->snap_id == CEPH_NOSNAP) 4287 if (rbd_dev->spec->snap_id == CEPH_NOSNAP)
4279 if (rbd_dev->mapping.size != rbd_dev->header.image_size) 4288 if (rbd_dev->mapping.size != rbd_dev->header.image_size)
4280 rbd_dev->mapping.size = rbd_dev->header.image_size; 4289 rbd_dev->mapping.size = rbd_dev->header.image_size;
@@ -4697,8 +4706,10 @@ out:
4697 return ret; 4706 return ret;
4698} 4707}
4699 4708
4700/* Undo whatever state changes are made by v1 or v2 image probe */ 4709/*
4701 4710 * Undo whatever state changes are made by v1 or v2 header info
4711 * call.
4712 */
4702static void rbd_dev_unprobe(struct rbd_device *rbd_dev) 4713static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
4703{ 4714{
4704 struct rbd_image_header *header; 4715 struct rbd_image_header *header;
@@ -4902,9 +4913,10 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping)
4902 int tmp; 4913 int tmp;
4903 4914
4904 /* 4915 /*
4905 * Get the id from the image id object. If it's not a 4916 * Get the id from the image id object. Unless there's an
4906 * format 2 image, we'll get ENOENT back, and we'll assume 4917 * error, rbd_dev->spec->image_id will be filled in with
4907 * it's a format 1 image. 4918 * a dynamically-allocated string, and rbd_dev->image_format
4919 * will be set to either 1 or 2.
4908 */ 4920 */
4909 ret = rbd_dev_image_id(rbd_dev); 4921 ret = rbd_dev_image_id(rbd_dev);
4910 if (ret) 4922 if (ret)
@@ -4992,7 +5004,6 @@ static ssize_t rbd_add(struct bus_type *bus,
4992 rc = PTR_ERR(rbdc); 5004 rc = PTR_ERR(rbdc);
4993 goto err_out_args; 5005 goto err_out_args;
4994 } 5006 }
4995 ceph_opts = NULL; /* rbd_dev client now owns this */
4996 5007
4997 /* pick the pool */ 5008 /* pick the pool */
4998 osdc = &rbdc->client->osdc; 5009 osdc = &rbdc->client->osdc;
@@ -5027,18 +5038,18 @@ static ssize_t rbd_add(struct bus_type *bus,
5027 rbd_dev->mapping.read_only = read_only; 5038 rbd_dev->mapping.read_only = read_only;
5028 5039
5029 rc = rbd_dev_device_setup(rbd_dev); 5040 rc = rbd_dev_device_setup(rbd_dev);
5030 if (!rc) 5041 if (rc) {
5031 return count; 5042 rbd_dev_image_release(rbd_dev);
5043 goto err_out_module;
5044 }
5045
5046 return count;
5032 5047
5033 rbd_dev_image_release(rbd_dev);
5034err_out_rbd_dev: 5048err_out_rbd_dev:
5035 rbd_dev_destroy(rbd_dev); 5049 rbd_dev_destroy(rbd_dev);
5036err_out_client: 5050err_out_client:
5037 rbd_put_client(rbdc); 5051 rbd_put_client(rbdc);
5038err_out_args: 5052err_out_args:
5039 if (ceph_opts)
5040 ceph_destroy_options(ceph_opts);
5041 kfree(rbd_opts);
5042 rbd_spec_put(spec); 5053 rbd_spec_put(spec);
5043err_out_module: 5054err_out_module:
5044 module_put(THIS_MODULE); 5055 module_put(THIS_MODULE);
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
index fdfd61a2d523..11a6104a1e4f 100644
--- a/drivers/bluetooth/Kconfig
+++ b/drivers/bluetooth/Kconfig
@@ -201,7 +201,7 @@ config BT_MRVL
201 The core driver to support Marvell Bluetooth devices. 201 The core driver to support Marvell Bluetooth devices.
202 202
203 This driver is required if you want to support 203 This driver is required if you want to support
204 Marvell Bluetooth devices, such as 8688/8787/8797. 204 Marvell Bluetooth devices, such as 8688/8787/8797/8897.
205 205
206 Say Y here to compile Marvell Bluetooth driver 206 Say Y here to compile Marvell Bluetooth driver
207 into the kernel or say M to compile it as module. 207 into the kernel or say M to compile it as module.
@@ -214,7 +214,7 @@ config BT_MRVL_SDIO
214 The driver for Marvell Bluetooth chipsets with SDIO interface. 214 The driver for Marvell Bluetooth chipsets with SDIO interface.
215 215
216 This driver is required if you want to use Marvell Bluetooth 216 This driver is required if you want to use Marvell Bluetooth
217 devices with SDIO interface. Currently SD8688/SD8787/SD8797 217 devices with SDIO interface. Currently SD8688/SD8787/SD8797/SD8897
218 chipsets are supported. 218 chipsets are supported.
219 219
220 Say Y here to compile support for Marvell BT-over-SDIO driver 220 Say Y here to compile support for Marvell BT-over-SDIO driver
diff --git a/drivers/bluetooth/btmrvl_main.c b/drivers/bluetooth/btmrvl_main.c
index 3a4343b3bd6d..9a9f51875df5 100644
--- a/drivers/bluetooth/btmrvl_main.c
+++ b/drivers/bluetooth/btmrvl_main.c
@@ -498,6 +498,10 @@ static int btmrvl_service_main_thread(void *data)
498 add_wait_queue(&thread->wait_q, &wait); 498 add_wait_queue(&thread->wait_q, &wait);
499 499
500 set_current_state(TASK_INTERRUPTIBLE); 500 set_current_state(TASK_INTERRUPTIBLE);
501 if (kthread_should_stop()) {
502 BT_DBG("main_thread: break from main thread");
503 break;
504 }
501 505
502 if (adapter->wakeup_tries || 506 if (adapter->wakeup_tries ||
503 ((!adapter->int_count) && 507 ((!adapter->int_count) &&
@@ -513,11 +517,6 @@ static int btmrvl_service_main_thread(void *data)
513 517
514 BT_DBG("main_thread woke up"); 518 BT_DBG("main_thread woke up");
515 519
516 if (kthread_should_stop()) {
517 BT_DBG("main_thread: break from main thread");
518 break;
519 }
520
521 spin_lock_irqsave(&priv->driver_lock, flags); 520 spin_lock_irqsave(&priv->driver_lock, flags);
522 if (adapter->int_count) { 521 if (adapter->int_count) {
523 adapter->int_count = 0; 522 adapter->int_count = 0;
diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c
index c63488c54f4a..13693b7a0d5c 100644
--- a/drivers/bluetooth/btmrvl_sdio.c
+++ b/drivers/bluetooth/btmrvl_sdio.c
@@ -82,6 +82,23 @@ static const struct btmrvl_sdio_card_reg btmrvl_reg_87xx = {
82 .io_port_2 = 0x7a, 82 .io_port_2 = 0x7a,
83}; 83};
84 84
85static const struct btmrvl_sdio_card_reg btmrvl_reg_88xx = {
86 .cfg = 0x00,
87 .host_int_mask = 0x02,
88 .host_intstatus = 0x03,
89 .card_status = 0x50,
90 .sq_read_base_addr_a0 = 0x60,
91 .sq_read_base_addr_a1 = 0x61,
92 .card_revision = 0xbc,
93 .card_fw_status0 = 0xc0,
94 .card_fw_status1 = 0xc1,
95 .card_rx_len = 0xc2,
96 .card_rx_unit = 0xc3,
97 .io_port_0 = 0xd8,
98 .io_port_1 = 0xd9,
99 .io_port_2 = 0xda,
100};
101
85static const struct btmrvl_sdio_device btmrvl_sdio_sd8688 = { 102static const struct btmrvl_sdio_device btmrvl_sdio_sd8688 = {
86 .helper = "mrvl/sd8688_helper.bin", 103 .helper = "mrvl/sd8688_helper.bin",
87 .firmware = "mrvl/sd8688.bin", 104 .firmware = "mrvl/sd8688.bin",
@@ -103,6 +120,13 @@ static const struct btmrvl_sdio_device btmrvl_sdio_sd8797 = {
103 .sd_blksz_fw_dl = 256, 120 .sd_blksz_fw_dl = 256,
104}; 121};
105 122
123static const struct btmrvl_sdio_device btmrvl_sdio_sd8897 = {
124 .helper = NULL,
125 .firmware = "mrvl/sd8897_uapsta.bin",
126 .reg = &btmrvl_reg_88xx,
127 .sd_blksz_fw_dl = 256,
128};
129
106static const struct sdio_device_id btmrvl_sdio_ids[] = { 130static const struct sdio_device_id btmrvl_sdio_ids[] = {
107 /* Marvell SD8688 Bluetooth device */ 131 /* Marvell SD8688 Bluetooth device */
108 { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x9105), 132 { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x9105),
@@ -116,6 +140,9 @@ static const struct sdio_device_id btmrvl_sdio_ids[] = {
116 /* Marvell SD8797 Bluetooth device */ 140 /* Marvell SD8797 Bluetooth device */
117 { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x912A), 141 { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x912A),
118 .driver_data = (unsigned long) &btmrvl_sdio_sd8797 }, 142 .driver_data = (unsigned long) &btmrvl_sdio_sd8797 },
143 /* Marvell SD8897 Bluetooth device */
144 { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x912E),
145 .driver_data = (unsigned long) &btmrvl_sdio_sd8897 },
119 146
120 { } /* Terminating entry */ 147 { } /* Terminating entry */
121}; 148};
@@ -1194,3 +1221,4 @@ MODULE_FIRMWARE("mrvl/sd8688_helper.bin");
1194MODULE_FIRMWARE("mrvl/sd8688.bin"); 1221MODULE_FIRMWARE("mrvl/sd8688.bin");
1195MODULE_FIRMWARE("mrvl/sd8787_uapsta.bin"); 1222MODULE_FIRMWARE("mrvl/sd8787_uapsta.bin");
1196MODULE_FIRMWARE("mrvl/sd8797_uapsta.bin"); 1223MODULE_FIRMWARE("mrvl/sd8797_uapsta.bin");
1224MODULE_FIRMWARE("mrvl/sd8897_uapsta.bin");
diff --git a/drivers/clk/clk-si5351.c b/drivers/clk/clk-si5351.c
index 892728412e9d..24f553673b72 100644
--- a/drivers/clk/clk-si5351.c
+++ b/drivers/clk/clk-si5351.c
@@ -932,7 +932,7 @@ static unsigned long si5351_clkout_recalc_rate(struct clk_hw *hw,
932 unsigned char reg; 932 unsigned char reg;
933 unsigned char rdiv; 933 unsigned char rdiv;
934 934
935 if (hwdata->num > 5) 935 if (hwdata->num <= 5)
936 reg = si5351_msynth_params_address(hwdata->num) + 2; 936 reg = si5351_msynth_params_address(hwdata->num) + 2;
937 else 937 else
938 reg = SI5351_CLK6_7_OUTPUT_DIVIDER; 938 reg = SI5351_CLK6_7_OUTPUT_DIVIDER;
@@ -1477,6 +1477,16 @@ static int si5351_i2c_probe(struct i2c_client *client,
1477 return -EINVAL; 1477 return -EINVAL;
1478 } 1478 }
1479 drvdata->onecell.clks[n] = clk; 1479 drvdata->onecell.clks[n] = clk;
1480
1481 /* set initial clkout rate */
1482 if (pdata->clkout[n].rate != 0) {
1483 int ret;
1484 ret = clk_set_rate(clk, pdata->clkout[n].rate);
1485 if (ret != 0) {
1486 dev_err(&client->dev, "Cannot set rate : %d\n",
1487 ret);
1488 }
1489 }
1480 } 1490 }
1481 1491
1482 ret = of_clk_add_provider(client->dev.of_node, of_clk_src_onecell_get, 1492 ret = of_clk_add_provider(client->dev.of_node, of_clk_src_onecell_get,
diff --git a/drivers/clk/clk-vt8500.c b/drivers/clk/clk-vt8500.c
index debf688afa8e..553ac35bcc91 100644
--- a/drivers/clk/clk-vt8500.c
+++ b/drivers/clk/clk-vt8500.c
@@ -183,7 +183,7 @@ static int vt8500_dclk_set_rate(struct clk_hw *hw, unsigned long rate,
183 writel(divisor, cdev->div_reg); 183 writel(divisor, cdev->div_reg);
184 vt8500_pmc_wait_busy(); 184 vt8500_pmc_wait_busy();
185 185
186 spin_lock_irqsave(cdev->lock, flags); 186 spin_unlock_irqrestore(cdev->lock, flags);
187 187
188 return 0; 188 return 0;
189} 189}
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 934cfd18f72d..1144e8c7579d 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -1955,6 +1955,7 @@ int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
1955 /* XXX the notifier code should handle this better */ 1955 /* XXX the notifier code should handle this better */
1956 if (!cn->notifier_head.head) { 1956 if (!cn->notifier_head.head) {
1957 srcu_cleanup_notifier_head(&cn->notifier_head); 1957 srcu_cleanup_notifier_head(&cn->notifier_head);
1958 list_del(&cn->node);
1958 kfree(cn); 1959 kfree(cn);
1959 } 1960 }
1960 1961
diff --git a/drivers/clk/mxs/clk-imx28.c b/drivers/clk/mxs/clk-imx28.c
index d0e5eed146de..4faf0afc44cd 100644
--- a/drivers/clk/mxs/clk-imx28.c
+++ b/drivers/clk/mxs/clk-imx28.c
@@ -10,6 +10,7 @@
10 */ 10 */
11 11
12#include <linux/clk.h> 12#include <linux/clk.h>
13#include <linux/clk/mxs.h>
13#include <linux/clkdev.h> 14#include <linux/clkdev.h>
14#include <linux/err.h> 15#include <linux/err.h>
15#include <linux/init.h> 16#include <linux/init.h>
diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c
index d0940e69d034..3c1f88868f29 100644
--- a/drivers/clk/samsung/clk-exynos4.c
+++ b/drivers/clk/samsung/clk-exynos4.c
@@ -791,7 +791,8 @@ struct samsung_gate_clock exynos4210_gate_clks[] __initdata = {
791 GATE(smmu_pcie, "smmu_pcie", "aclk133", GATE_IP_FSYS, 18, 0, 0), 791 GATE(smmu_pcie, "smmu_pcie", "aclk133", GATE_IP_FSYS, 18, 0, 0),
792 GATE(modemif, "modemif", "aclk100", GATE_IP_PERIL, 28, 0, 0), 792 GATE(modemif, "modemif", "aclk100", GATE_IP_PERIL, 28, 0, 0),
793 GATE(chipid, "chipid", "aclk100", E4210_GATE_IP_PERIR, 0, 0, 0), 793 GATE(chipid, "chipid", "aclk100", E4210_GATE_IP_PERIR, 0, 0, 0),
794 GATE(sysreg, "sysreg", "aclk100", E4210_GATE_IP_PERIR, 0, 0, 0), 794 GATE(sysreg, "sysreg", "aclk100", E4210_GATE_IP_PERIR, 0,
795 CLK_IGNORE_UNUSED, 0),
795 GATE(hdmi_cec, "hdmi_cec", "aclk100", E4210_GATE_IP_PERIR, 11, 0, 0), 796 GATE(hdmi_cec, "hdmi_cec", "aclk100", E4210_GATE_IP_PERIR, 11, 0, 0),
796 GATE(smmu_rotator, "smmu_rotator", "aclk200", 797 GATE(smmu_rotator, "smmu_rotator", "aclk200",
797 E4210_GATE_IP_IMAGE, 4, 0, 0), 798 E4210_GATE_IP_IMAGE, 4, 0, 0),
@@ -819,7 +820,8 @@ struct samsung_gate_clock exynos4x12_gate_clks[] __initdata = {
819 GATE(smmu_mdma, "smmu_mdma", "aclk200", E4X12_GATE_IP_IMAGE, 5, 0, 0), 820 GATE(smmu_mdma, "smmu_mdma", "aclk200", E4X12_GATE_IP_IMAGE, 5, 0, 0),
820 GATE(mipi_hsi, "mipi_hsi", "aclk133", GATE_IP_FSYS, 10, 0, 0), 821 GATE(mipi_hsi, "mipi_hsi", "aclk133", GATE_IP_FSYS, 10, 0, 0),
821 GATE(chipid, "chipid", "aclk100", E4X12_GATE_IP_PERIR, 0, 0, 0), 822 GATE(chipid, "chipid", "aclk100", E4X12_GATE_IP_PERIR, 0, 0, 0),
822 GATE(sysreg, "sysreg", "aclk100", E4X12_GATE_IP_PERIR, 1, 0, 0), 823 GATE(sysreg, "sysreg", "aclk100", E4X12_GATE_IP_PERIR, 1,
824 CLK_IGNORE_UNUSED, 0),
823 GATE(hdmi_cec, "hdmi_cec", "aclk100", E4X12_GATE_IP_PERIR, 11, 0, 0), 825 GATE(hdmi_cec, "hdmi_cec", "aclk100", E4X12_GATE_IP_PERIR, 11, 0, 0),
824 GATE(sclk_mdnie0, "sclk_mdnie0", "div_mdnie0", 826 GATE(sclk_mdnie0, "sclk_mdnie0", "div_mdnie0",
825 SRC_MASK_LCD0, 4, CLK_SET_RATE_PARENT, 0), 827 SRC_MASK_LCD0, 4, CLK_SET_RATE_PARENT, 0),
diff --git a/drivers/clk/samsung/clk-exynos5250.c b/drivers/clk/samsung/clk-exynos5250.c
index 5c97e75924a8..22d7699e7ced 100644
--- a/drivers/clk/samsung/clk-exynos5250.c
+++ b/drivers/clk/samsung/clk-exynos5250.c
@@ -155,7 +155,7 @@ static __initdata unsigned long exynos5250_clk_regs[] = {
155 155
156/* list of all parent clock list */ 156/* list of all parent clock list */
157PNAME(mout_apll_p) = { "fin_pll", "fout_apll", }; 157PNAME(mout_apll_p) = { "fin_pll", "fout_apll", };
158PNAME(mout_cpu_p) = { "mout_apll", "mout_mpll", }; 158PNAME(mout_cpu_p) = { "mout_apll", "sclk_mpll", };
159PNAME(mout_mpll_fout_p) = { "fout_mplldiv2", "fout_mpll" }; 159PNAME(mout_mpll_fout_p) = { "fout_mplldiv2", "fout_mpll" };
160PNAME(mout_mpll_p) = { "fin_pll", "mout_mpll_fout" }; 160PNAME(mout_mpll_p) = { "fin_pll", "mout_mpll_fout" };
161PNAME(mout_bpll_fout_p) = { "fout_bplldiv2", "fout_bpll" }; 161PNAME(mout_bpll_fout_p) = { "fout_bplldiv2", "fout_bpll" };
@@ -208,10 +208,10 @@ struct samsung_fixed_factor_clock exynos5250_fixed_factor_clks[] __initdata = {
208}; 208};
209 209
210struct samsung_mux_clock exynos5250_mux_clks[] __initdata = { 210struct samsung_mux_clock exynos5250_mux_clks[] __initdata = {
211 MUX(none, "mout_apll", mout_apll_p, SRC_CPU, 0, 1), 211 MUX_A(none, "mout_apll", mout_apll_p, SRC_CPU, 0, 1, "mout_apll"),
212 MUX(none, "mout_cpu", mout_cpu_p, SRC_CPU, 16, 1), 212 MUX_A(none, "mout_cpu", mout_cpu_p, SRC_CPU, 16, 1, "mout_cpu"),
213 MUX(none, "mout_mpll_fout", mout_mpll_fout_p, PLL_DIV2_SEL, 4, 1), 213 MUX(none, "mout_mpll_fout", mout_mpll_fout_p, PLL_DIV2_SEL, 4, 1),
214 MUX(none, "sclk_mpll", mout_mpll_p, SRC_CORE1, 8, 1), 214 MUX_A(none, "sclk_mpll", mout_mpll_p, SRC_CORE1, 8, 1, "mout_mpll"),
215 MUX(none, "mout_bpll_fout", mout_bpll_fout_p, PLL_DIV2_SEL, 0, 1), 215 MUX(none, "mout_bpll_fout", mout_bpll_fout_p, PLL_DIV2_SEL, 0, 1),
216 MUX(none, "sclk_bpll", mout_bpll_p, SRC_CDREX, 0, 1), 216 MUX(none, "sclk_bpll", mout_bpll_p, SRC_CDREX, 0, 1),
217 MUX(none, "mout_vpllsrc", mout_vpllsrc_p, SRC_TOP2, 0, 1), 217 MUX(none, "mout_vpllsrc", mout_vpllsrc_p, SRC_TOP2, 0, 1),
@@ -378,7 +378,7 @@ struct samsung_gate_clock exynos5250_gate_clks[] __initdata = {
378 GATE(hsi2c3, "hsi2c3", "aclk66", GATE_IP_PERIC, 31, 0, 0), 378 GATE(hsi2c3, "hsi2c3", "aclk66", GATE_IP_PERIC, 31, 0, 0),
379 GATE(chipid, "chipid", "aclk66", GATE_IP_PERIS, 0, 0, 0), 379 GATE(chipid, "chipid", "aclk66", GATE_IP_PERIS, 0, 0, 0),
380 GATE(sysreg, "sysreg", "aclk66", GATE_IP_PERIS, 1, 0, 0), 380 GATE(sysreg, "sysreg", "aclk66", GATE_IP_PERIS, 1, 0, 0),
381 GATE(pmu, "pmu", "aclk66", GATE_IP_PERIS, 2, 0, 0), 381 GATE(pmu, "pmu", "aclk66", GATE_IP_PERIS, 2, CLK_IGNORE_UNUSED, 0),
382 GATE(tzpc0, "tzpc0", "aclk66", GATE_IP_PERIS, 6, 0, 0), 382 GATE(tzpc0, "tzpc0", "aclk66", GATE_IP_PERIS, 6, 0, 0),
383 GATE(tzpc1, "tzpc1", "aclk66", GATE_IP_PERIS, 7, 0, 0), 383 GATE(tzpc1, "tzpc1", "aclk66", GATE_IP_PERIS, 7, 0, 0),
384 GATE(tzpc2, "tzpc2", "aclk66", GATE_IP_PERIS, 8, 0, 0), 384 GATE(tzpc2, "tzpc2", "aclk66", GATE_IP_PERIS, 8, 0, 0),
diff --git a/drivers/clk/samsung/clk-pll.c b/drivers/clk/samsung/clk-pll.c
index 89135f6be116..362f12dcd944 100644
--- a/drivers/clk/samsung/clk-pll.c
+++ b/drivers/clk/samsung/clk-pll.c
@@ -111,7 +111,8 @@ static unsigned long samsung_pll36xx_recalc_rate(struct clk_hw *hw,
111 unsigned long parent_rate) 111 unsigned long parent_rate)
112{ 112{
113 struct samsung_clk_pll36xx *pll = to_clk_pll36xx(hw); 113 struct samsung_clk_pll36xx *pll = to_clk_pll36xx(hw);
114 u32 mdiv, pdiv, sdiv, kdiv, pll_con0, pll_con1; 114 u32 mdiv, pdiv, sdiv, pll_con0, pll_con1;
115 s16 kdiv;
115 u64 fvco = parent_rate; 116 u64 fvco = parent_rate;
116 117
117 pll_con0 = __raw_readl(pll->con_reg); 118 pll_con0 = __raw_readl(pll->con_reg);
@@ -119,7 +120,7 @@ static unsigned long samsung_pll36xx_recalc_rate(struct clk_hw *hw,
119 mdiv = (pll_con0 >> PLL36XX_MDIV_SHIFT) & PLL36XX_MDIV_MASK; 120 mdiv = (pll_con0 >> PLL36XX_MDIV_SHIFT) & PLL36XX_MDIV_MASK;
120 pdiv = (pll_con0 >> PLL36XX_PDIV_SHIFT) & PLL36XX_PDIV_MASK; 121 pdiv = (pll_con0 >> PLL36XX_PDIV_SHIFT) & PLL36XX_PDIV_MASK;
121 sdiv = (pll_con0 >> PLL36XX_SDIV_SHIFT) & PLL36XX_SDIV_MASK; 122 sdiv = (pll_con0 >> PLL36XX_SDIV_SHIFT) & PLL36XX_SDIV_MASK;
122 kdiv = pll_con1 & PLL36XX_KDIV_MASK; 123 kdiv = (s16)(pll_con1 & PLL36XX_KDIV_MASK);
123 124
124 fvco *= (mdiv << 16) + kdiv; 125 fvco *= (mdiv << 16) + kdiv;
125 do_div(fvco, (pdiv << sdiv)); 126 do_div(fvco, (pdiv << sdiv));
diff --git a/drivers/clk/spear/spear3xx_clock.c b/drivers/clk/spear/spear3xx_clock.c
index f9ec43fd1320..080c3c5e33f6 100644
--- a/drivers/clk/spear/spear3xx_clock.c
+++ b/drivers/clk/spear/spear3xx_clock.c
@@ -369,7 +369,7 @@ static void __init spear320_clk_init(void __iomem *soc_config_base)
369 clk_register_clkdev(clk, NULL, "60100000.serial"); 369 clk_register_clkdev(clk, NULL, "60100000.serial");
370} 370}
371#else 371#else
372static inline void spear320_clk_init(void) { } 372static inline void spear320_clk_init(void __iomem *soc_config_base) { }
373#endif 373#endif
374 374
375void __init spear3xx_clk_init(void __iomem *misc_base, void __iomem *soc_config_base) 375void __init spear3xx_clk_init(void __iomem *misc_base, void __iomem *soc_config_base)
diff --git a/drivers/clk/tegra/clk-tegra30.c b/drivers/clk/tegra/clk-tegra30.c
index c6921f538e28..ba99e3844106 100644
--- a/drivers/clk/tegra/clk-tegra30.c
+++ b/drivers/clk/tegra/clk-tegra30.c
@@ -1598,6 +1598,12 @@ static void __init tegra30_periph_clk_init(void)
1598 clk_register_clkdev(clk, "afi", "tegra-pcie"); 1598 clk_register_clkdev(clk, "afi", "tegra-pcie");
1599 clks[afi] = clk; 1599 clks[afi] = clk;
1600 1600
1601 /* pciex */
1602 clk = tegra_clk_register_periph_gate("pciex", "pll_e", 0, clk_base, 0,
1603 74, &periph_u_regs, periph_clk_enb_refcnt);
1604 clk_register_clkdev(clk, "pciex", "tegra-pcie");
1605 clks[pciex] = clk;
1606
1601 /* kfuse */ 1607 /* kfuse */
1602 clk = tegra_clk_register_periph_gate("kfuse", "clk_m", 1608 clk = tegra_clk_register_periph_gate("kfuse", "clk_m",
1603 TEGRA_PERIPH_ON_APB, 1609 TEGRA_PERIPH_ON_APB,
@@ -1716,11 +1722,6 @@ static void __init tegra30_fixed_clk_init(void)
1716 1, 0, &cml_lock); 1722 1, 0, &cml_lock);
1717 clk_register_clkdev(clk, "cml1", NULL); 1723 clk_register_clkdev(clk, "cml1", NULL);
1718 clks[cml1] = clk; 1724 clks[cml1] = clk;
1719
1720 /* pciex */
1721 clk = clk_register_fixed_rate(NULL, "pciex", "pll_e", 0, 100000000);
1722 clk_register_clkdev(clk, "pciex", NULL);
1723 clks[pciex] = clk;
1724} 1725}
1725 1726
1726static void __init tegra30_osc_clk_init(void) 1727static void __init tegra30_osc_clk_init(void)
diff --git a/drivers/clk/ux500/clk-sysctrl.c b/drivers/clk/ux500/clk-sysctrl.c
index bc7e9bde792b..e364c9d4aa60 100644
--- a/drivers/clk/ux500/clk-sysctrl.c
+++ b/drivers/clk/ux500/clk-sysctrl.c
@@ -145,7 +145,13 @@ static struct clk *clk_reg_sysctrl(struct device *dev,
145 return ERR_PTR(-ENOMEM); 145 return ERR_PTR(-ENOMEM);
146 } 146 }
147 147
148 for (i = 0; i < num_parents; i++) { 148 /* set main clock registers */
149 clk->reg_sel[0] = reg_sel[0];
150 clk->reg_bits[0] = reg_bits[0];
151 clk->reg_mask[0] = reg_mask[0];
152
153 /* handle clocks with more than one parent */
154 for (i = 1; i < num_parents; i++) {
149 clk->reg_sel[i] = reg_sel[i]; 155 clk->reg_sel[i] = reg_sel[i];
150 clk->reg_bits[i] = reg_bits[i]; 156 clk->reg_bits[i] = reg_bits[i];
151 clk->reg_mask[i] = reg_mask[i]; 157 clk->reg_mask[i] = reg_mask[i];
diff --git a/drivers/clk/ux500/u8500_clk.c b/drivers/clk/ux500/u8500_clk.c
index 0b4f35a5ffc2..80069c370a47 100644
--- a/drivers/clk/ux500/u8500_clk.c
+++ b/drivers/clk/ux500/u8500_clk.c
@@ -325,7 +325,7 @@ void u8500_clk_init(u32 clkrst1_base, u32 clkrst2_base, u32 clkrst3_base,
325 clk = clk_reg_prcc_pclk("p3_pclk0", "per3clk", clkrst3_base, 325 clk = clk_reg_prcc_pclk("p3_pclk0", "per3clk", clkrst3_base,
326 BIT(0), 0); 326 BIT(0), 0);
327 clk_register_clkdev(clk, "fsmc", NULL); 327 clk_register_clkdev(clk, "fsmc", NULL);
328 clk_register_clkdev(clk, NULL, "smsc911x"); 328 clk_register_clkdev(clk, NULL, "smsc911x.0");
329 329
330 clk = clk_reg_prcc_pclk("p3_pclk1", "per3clk", clkrst3_base, 330 clk = clk_reg_prcc_pclk("p3_pclk1", "per3clk", clkrst3_base,
331 BIT(1), 0); 331 BIT(1), 0);
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index 11b8b4b54ceb..edc089e9d0c4 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -347,11 +347,11 @@ static u32 get_cur_val(const struct cpumask *mask)
347 switch (per_cpu(acfreq_data, cpumask_first(mask))->cpu_feature) { 347 switch (per_cpu(acfreq_data, cpumask_first(mask))->cpu_feature) {
348 case SYSTEM_INTEL_MSR_CAPABLE: 348 case SYSTEM_INTEL_MSR_CAPABLE:
349 cmd.type = SYSTEM_INTEL_MSR_CAPABLE; 349 cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
350 cmd.addr.msr.reg = MSR_IA32_PERF_STATUS; 350 cmd.addr.msr.reg = MSR_IA32_PERF_CTL;
351 break; 351 break;
352 case SYSTEM_AMD_MSR_CAPABLE: 352 case SYSTEM_AMD_MSR_CAPABLE:
353 cmd.type = SYSTEM_AMD_MSR_CAPABLE; 353 cmd.type = SYSTEM_AMD_MSR_CAPABLE;
354 cmd.addr.msr.reg = MSR_AMD_PERF_STATUS; 354 cmd.addr.msr.reg = MSR_AMD_PERF_CTL;
355 break; 355 break;
356 case SYSTEM_IO_CAPABLE: 356 case SYSTEM_IO_CAPABLE:
357 cmd.type = SYSTEM_IO_CAPABLE; 357 cmd.type = SYSTEM_IO_CAPABLE;
diff --git a/drivers/cpufreq/cpufreq-cpu0.c b/drivers/cpufreq/cpufreq-cpu0.c
index a64eb8b70444..ad1fde277661 100644
--- a/drivers/cpufreq/cpufreq-cpu0.c
+++ b/drivers/cpufreq/cpufreq-cpu0.c
@@ -45,7 +45,7 @@ static int cpu0_set_target(struct cpufreq_policy *policy,
45 struct cpufreq_freqs freqs; 45 struct cpufreq_freqs freqs;
46 struct opp *opp; 46 struct opp *opp;
47 unsigned long volt = 0, volt_old = 0, tol = 0; 47 unsigned long volt = 0, volt_old = 0, tol = 0;
48 long freq_Hz; 48 long freq_Hz, freq_exact;
49 unsigned int index; 49 unsigned int index;
50 int ret; 50 int ret;
51 51
@@ -60,6 +60,7 @@ static int cpu0_set_target(struct cpufreq_policy *policy,
60 freq_Hz = clk_round_rate(cpu_clk, freq_table[index].frequency * 1000); 60 freq_Hz = clk_round_rate(cpu_clk, freq_table[index].frequency * 1000);
61 if (freq_Hz < 0) 61 if (freq_Hz < 0)
62 freq_Hz = freq_table[index].frequency * 1000; 62 freq_Hz = freq_table[index].frequency * 1000;
63 freq_exact = freq_Hz;
63 freqs.new = freq_Hz / 1000; 64 freqs.new = freq_Hz / 1000;
64 freqs.old = clk_get_rate(cpu_clk) / 1000; 65 freqs.old = clk_get_rate(cpu_clk) / 1000;
65 66
@@ -98,7 +99,7 @@ static int cpu0_set_target(struct cpufreq_policy *policy,
98 } 99 }
99 } 100 }
100 101
101 ret = clk_set_rate(cpu_clk, freqs.new * 1000); 102 ret = clk_set_rate(cpu_clk, freq_exact);
102 if (ret) { 103 if (ret) {
103 pr_err("failed to set clock rate: %d\n", ret); 104 pr_err("failed to set clock rate: %d\n", ret);
104 if (cpu_reg) 105 if (cpu_reg)
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index 5af40ad82d23..dc9b72e25c1a 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -26,6 +26,7 @@
26#include <linux/tick.h> 26#include <linux/tick.h>
27#include <linux/types.h> 27#include <linux/types.h>
28#include <linux/workqueue.h> 28#include <linux/workqueue.h>
29#include <linux/cpu.h>
29 30
30#include "cpufreq_governor.h" 31#include "cpufreq_governor.h"
31 32
@@ -180,8 +181,10 @@ void gov_queue_work(struct dbs_data *dbs_data, struct cpufreq_policy *policy,
180 if (!all_cpus) { 181 if (!all_cpus) {
181 __gov_queue_work(smp_processor_id(), dbs_data, delay); 182 __gov_queue_work(smp_processor_id(), dbs_data, delay);
182 } else { 183 } else {
184 get_online_cpus();
183 for_each_cpu(i, policy->cpus) 185 for_each_cpu(i, policy->cpus)
184 __gov_queue_work(i, dbs_data, delay); 186 __gov_queue_work(i, dbs_data, delay);
187 put_online_cpus();
185 } 188 }
186} 189}
187EXPORT_SYMBOL_GPL(gov_queue_work); 190EXPORT_SYMBOL_GPL(gov_queue_work);
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 4b9bb5def6f1..93eb5cbcc1f6 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -47,6 +47,8 @@ static struct od_ops od_ops;
47static struct cpufreq_governor cpufreq_gov_ondemand; 47static struct cpufreq_governor cpufreq_gov_ondemand;
48#endif 48#endif
49 49
50static unsigned int default_powersave_bias;
51
50static void ondemand_powersave_bias_init_cpu(int cpu) 52static void ondemand_powersave_bias_init_cpu(int cpu)
51{ 53{
52 struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu); 54 struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
@@ -543,7 +545,7 @@ static int od_init(struct dbs_data *dbs_data)
543 545
544 tuners->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR; 546 tuners->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR;
545 tuners->ignore_nice = 0; 547 tuners->ignore_nice = 0;
546 tuners->powersave_bias = 0; 548 tuners->powersave_bias = default_powersave_bias;
547 tuners->io_is_busy = should_io_be_busy(); 549 tuners->io_is_busy = should_io_be_busy();
548 550
549 dbs_data->tuners = tuners; 551 dbs_data->tuners = tuners;
@@ -585,6 +587,7 @@ static void od_set_powersave_bias(unsigned int powersave_bias)
585 unsigned int cpu; 587 unsigned int cpu;
586 cpumask_t done; 588 cpumask_t done;
587 589
590 default_powersave_bias = powersave_bias;
588 cpumask_clear(&done); 591 cpumask_clear(&done);
589 592
590 get_online_cpus(); 593 get_online_cpus();
@@ -593,11 +596,17 @@ static void od_set_powersave_bias(unsigned int powersave_bias)
593 continue; 596 continue;
594 597
595 policy = per_cpu(od_cpu_dbs_info, cpu).cdbs.cur_policy; 598 policy = per_cpu(od_cpu_dbs_info, cpu).cdbs.cur_policy;
596 dbs_data = policy->governor_data; 599 if (!policy)
597 od_tuners = dbs_data->tuners; 600 continue;
598 od_tuners->powersave_bias = powersave_bias;
599 601
600 cpumask_or(&done, &done, policy->cpus); 602 cpumask_or(&done, &done, policy->cpus);
603
604 if (policy->governor != &cpufreq_gov_ondemand)
605 continue;
606
607 dbs_data = policy->governor_data;
608 od_tuners = dbs_data->tuners;
609 od_tuners->powersave_bias = default_powersave_bias;
601 } 610 }
602 put_online_cpus(); 611 put_online_cpus();
603} 612}
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c
index a97bb6c1596c..c3dc1c04a5df 100644
--- a/drivers/crypto/sahara.c
+++ b/drivers/crypto/sahara.c
@@ -863,7 +863,7 @@ static struct of_device_id sahara_dt_ids[] = {
863 { .compatible = "fsl,imx27-sahara" }, 863 { .compatible = "fsl,imx27-sahara" },
864 { /* sentinel */ } 864 { /* sentinel */ }
865}; 865};
866MODULE_DEVICE_TABLE(platform, sahara_dt_ids); 866MODULE_DEVICE_TABLE(of, sahara_dt_ids);
867 867
868static int sahara_probe(struct platform_device *pdev) 868static int sahara_probe(struct platform_device *pdev)
869{ 869{
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index d8ce4ecfef18..e88ded2c8d2f 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -716,8 +716,7 @@ static int dmatest_func(void *data)
716 } 716 }
717 dma_async_issue_pending(chan); 717 dma_async_issue_pending(chan);
718 718
719 wait_event_freezable_timeout(done_wait, 719 wait_event_freezable_timeout(done_wait, done.done,
720 done.done || kthread_should_stop(),
721 msecs_to_jiffies(params->timeout)); 720 msecs_to_jiffies(params->timeout));
722 721
723 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); 722 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
@@ -997,7 +996,6 @@ static void stop_threaded_test(struct dmatest_info *info)
997static int __restart_threaded_test(struct dmatest_info *info, bool run) 996static int __restart_threaded_test(struct dmatest_info *info, bool run)
998{ 997{
999 struct dmatest_params *params = &info->params; 998 struct dmatest_params *params = &info->params;
1000 int ret;
1001 999
1002 /* Stop any running test first */ 1000 /* Stop any running test first */
1003 __stop_threaded_test(info); 1001 __stop_threaded_test(info);
@@ -1012,13 +1010,23 @@ static int __restart_threaded_test(struct dmatest_info *info, bool run)
1012 memcpy(params, &info->dbgfs_params, sizeof(*params)); 1010 memcpy(params, &info->dbgfs_params, sizeof(*params));
1013 1011
1014 /* Run test with new parameters */ 1012 /* Run test with new parameters */
1015 ret = __run_threaded_test(info); 1013 return __run_threaded_test(info);
1016 if (ret) { 1014}
1017 __stop_threaded_test(info); 1015
1018 pr_err("dmatest: Can't run test\n"); 1016static bool __is_threaded_test_run(struct dmatest_info *info)
1017{
1018 struct dmatest_chan *dtc;
1019
1020 list_for_each_entry(dtc, &info->channels, node) {
1021 struct dmatest_thread *thread;
1022
1023 list_for_each_entry(thread, &dtc->threads, node) {
1024 if (!thread->done)
1025 return true;
1026 }
1019 } 1027 }
1020 1028
1021 return ret; 1029 return false;
1022} 1030}
1023 1031
1024static ssize_t dtf_write_string(void *to, size_t available, loff_t *ppos, 1032static ssize_t dtf_write_string(void *to, size_t available, loff_t *ppos,
@@ -1091,22 +1099,10 @@ static ssize_t dtf_read_run(struct file *file, char __user *user_buf,
1091{ 1099{
1092 struct dmatest_info *info = file->private_data; 1100 struct dmatest_info *info = file->private_data;
1093 char buf[3]; 1101 char buf[3];
1094 struct dmatest_chan *dtc;
1095 bool alive = false;
1096 1102
1097 mutex_lock(&info->lock); 1103 mutex_lock(&info->lock);
1098 list_for_each_entry(dtc, &info->channels, node) {
1099 struct dmatest_thread *thread;
1100
1101 list_for_each_entry(thread, &dtc->threads, node) {
1102 if (!thread->done) {
1103 alive = true;
1104 break;
1105 }
1106 }
1107 }
1108 1104
1109 if (alive) { 1105 if (__is_threaded_test_run(info)) {
1110 buf[0] = 'Y'; 1106 buf[0] = 'Y';
1111 } else { 1107 } else {
1112 __stop_threaded_test(info); 1108 __stop_threaded_test(info);
@@ -1132,7 +1128,12 @@ static ssize_t dtf_write_run(struct file *file, const char __user *user_buf,
1132 1128
1133 if (strtobool(buf, &bv) == 0) { 1129 if (strtobool(buf, &bv) == 0) {
1134 mutex_lock(&info->lock); 1130 mutex_lock(&info->lock);
1135 ret = __restart_threaded_test(info, bv); 1131
1132 if (__is_threaded_test_run(info))
1133 ret = -EBUSY;
1134 else
1135 ret = __restart_threaded_test(info, bv);
1136
1136 mutex_unlock(&info->lock); 1137 mutex_unlock(&info->lock);
1137 } 1138 }
1138 1139
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 1734feec47b1..71bf4ec300ea 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -1566,10 +1566,12 @@ static void dma_tc_handle(struct d40_chan *d40c)
1566 return; 1566 return;
1567 } 1567 }
1568 1568
1569 if (d40_queue_start(d40c) == NULL) 1569 if (d40_queue_start(d40c) == NULL) {
1570 d40c->busy = false; 1570 d40c->busy = false;
1571 pm_runtime_mark_last_busy(d40c->base->dev); 1571
1572 pm_runtime_put_autosuspend(d40c->base->dev); 1572 pm_runtime_mark_last_busy(d40c->base->dev);
1573 pm_runtime_put_autosuspend(d40c->base->dev);
1574 }
1573 1575
1574 d40_desc_remove(d40d); 1576 d40_desc_remove(d40d);
1575 d40_desc_done(d40c, d40d); 1577 d40_desc_done(d40c, d40d);
diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c
index b623c599e572..8bd1bb6dbe47 100644
--- a/drivers/firmware/efi/efivars.c
+++ b/drivers/firmware/efi/efivars.c
@@ -523,13 +523,11 @@ static void efivar_update_sysfs_entries(struct work_struct *work)
523 struct efivar_entry *entry; 523 struct efivar_entry *entry;
524 int err; 524 int err;
525 525
526 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
527 if (!entry)
528 return;
529
530 /* Add new sysfs entries */ 526 /* Add new sysfs entries */
531 while (1) { 527 while (1) {
532 memset(entry, 0, sizeof(*entry)); 528 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
529 if (!entry)
530 return;
533 531
534 err = efivar_init(efivar_update_sysfs_entry, entry, 532 err = efivar_init(efivar_update_sysfs_entry, entry,
535 true, false, &efivar_sysfs_list); 533 true, false, &efivar_sysfs_list);
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index d3f7d2db870f..4a430360af5a 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -1094,6 +1094,9 @@ static int omap_gpio_probe(struct platform_device *pdev)
1094 const struct omap_gpio_platform_data *pdata; 1094 const struct omap_gpio_platform_data *pdata;
1095 struct resource *res; 1095 struct resource *res;
1096 struct gpio_bank *bank; 1096 struct gpio_bank *bank;
1097#ifdef CONFIG_ARCH_OMAP1
1098 int irq_base;
1099#endif
1097 1100
1098 match = of_match_device(of_match_ptr(omap_gpio_match), dev); 1101 match = of_match_device(of_match_ptr(omap_gpio_match), dev);
1099 1102
@@ -1135,11 +1138,28 @@ static int omap_gpio_probe(struct platform_device *pdev)
1135 pdata->get_context_loss_count; 1138 pdata->get_context_loss_count;
1136 } 1139 }
1137 1140
1141#ifdef CONFIG_ARCH_OMAP1
1142 /*
1143 * REVISIT: Once we have OMAP1 supporting SPARSE_IRQ, we can drop
1144 * irq_alloc_descs() and irq_domain_add_legacy() and just use a
1145 * linear IRQ domain mapping for all OMAP platforms.
1146 */
1147 irq_base = irq_alloc_descs(-1, 0, bank->width, 0);
1148 if (irq_base < 0) {
1149 dev_err(dev, "Couldn't allocate IRQ numbers\n");
1150 return -ENODEV;
1151 }
1138 1152
1153 bank->domain = irq_domain_add_legacy(node, bank->width, irq_base,
1154 0, &irq_domain_simple_ops, NULL);
1155#else
1139 bank->domain = irq_domain_add_linear(node, bank->width, 1156 bank->domain = irq_domain_add_linear(node, bank->width,
1140 &irq_domain_simple_ops, NULL); 1157 &irq_domain_simple_ops, NULL);
1141 if (!bank->domain) 1158#endif
1159 if (!bank->domain) {
1160 dev_err(dev, "Couldn't register an IRQ domain\n");
1142 return -ENODEV; 1161 return -ENODEV;
1162 }
1143 1163
1144 if (bank->regs->set_dataout && bank->regs->clr_dataout) 1164 if (bank->regs->set_dataout && bank->regs->clr_dataout)
1145 bank->set_dataout = _set_gpio_dataout_reg; 1165 bank->set_dataout = _set_gpio_dataout_reg;
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index a6a8643a6a77..8bcce7866d36 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -1054,7 +1054,7 @@ EXPORT_SYMBOL(drm_vblank_off);
1054 */ 1054 */
1055void drm_vblank_pre_modeset(struct drm_device *dev, int crtc) 1055void drm_vblank_pre_modeset(struct drm_device *dev, int crtc)
1056{ 1056{
1057 /* vblank is not initialized (IRQ not installed ?) */ 1057 /* vblank is not initialized (IRQ not installed ?), or has been freed */
1058 if (!dev->num_crtcs) 1058 if (!dev->num_crtcs)
1059 return; 1059 return;
1060 /* 1060 /*
@@ -1076,6 +1076,10 @@ void drm_vblank_post_modeset(struct drm_device *dev, int crtc)
1076{ 1076{
1077 unsigned long irqflags; 1077 unsigned long irqflags;
1078 1078
1079 /* vblank is not initialized (IRQ not installed ?), or has been freed */
1080 if (!dev->num_crtcs)
1081 return;
1082
1079 if (dev->vblank_inmodeset[crtc]) { 1083 if (dev->vblank_inmodeset[crtc]) {
1080 spin_lock_irqsave(&dev->vbl_lock, irqflags); 1084 spin_lock_irqsave(&dev->vbl_lock, irqflags);
1081 dev->vblank_disable_allowed = 1; 1085 dev->vblank_disable_allowed = 1;
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index dcde35231e25..5b7b9110254b 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -190,8 +190,7 @@ struct dma_buf *drm_gem_prime_export(struct drm_device *dev,
190 if (ret) 190 if (ret)
191 return ERR_PTR(ret); 191 return ERR_PTR(ret);
192 } 192 }
193 return dma_buf_export(obj, &drm_gem_prime_dmabuf_ops, obj->size, 193 return dma_buf_export(obj, &drm_gem_prime_dmabuf_ops, obj->size, flags);
194 0600);
195} 194}
196EXPORT_SYMBOL(drm_gem_prime_export); 195EXPORT_SYMBOL(drm_gem_prime_export);
197 196
diff --git a/drivers/gpu/drm/gma500/cdv_intel_display.c b/drivers/gpu/drm/gma500/cdv_intel_display.c
index 3cfd0931fbfb..82430ad8ba62 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_display.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_display.c
@@ -1462,7 +1462,7 @@ static int cdv_intel_crtc_cursor_set(struct drm_crtc *crtc,
1462 size_t addr = 0; 1462 size_t addr = 0;
1463 struct gtt_range *gt; 1463 struct gtt_range *gt;
1464 struct drm_gem_object *obj; 1464 struct drm_gem_object *obj;
1465 int ret; 1465 int ret = 0;
1466 1466
1467 /* if we want to turn of the cursor ignore width and height */ 1467 /* if we want to turn of the cursor ignore width and height */
1468 if (!handle) { 1468 if (!handle) {
@@ -1499,7 +1499,8 @@ static int cdv_intel_crtc_cursor_set(struct drm_crtc *crtc,
1499 1499
1500 if (obj->size < width * height * 4) { 1500 if (obj->size < width * height * 4) {
1501 dev_dbg(dev->dev, "buffer is to small\n"); 1501 dev_dbg(dev->dev, "buffer is to small\n");
1502 return -ENOMEM; 1502 ret = -ENOMEM;
1503 goto unref_cursor;
1503 } 1504 }
1504 1505
1505 gt = container_of(obj, struct gtt_range, gem); 1506 gt = container_of(obj, struct gtt_range, gem);
@@ -1508,7 +1509,7 @@ static int cdv_intel_crtc_cursor_set(struct drm_crtc *crtc,
1508 ret = psb_gtt_pin(gt); 1509 ret = psb_gtt_pin(gt);
1509 if (ret) { 1510 if (ret) {
1510 dev_err(dev->dev, "Can not pin down handle 0x%x\n", handle); 1511 dev_err(dev->dev, "Can not pin down handle 0x%x\n", handle);
1511 return ret; 1512 goto unref_cursor;
1512 } 1513 }
1513 1514
1514 addr = gt->offset; /* Or resource.start ??? */ 1515 addr = gt->offset; /* Or resource.start ??? */
@@ -1532,9 +1533,14 @@ static int cdv_intel_crtc_cursor_set(struct drm_crtc *crtc,
1532 struct gtt_range, gem); 1533 struct gtt_range, gem);
1533 psb_gtt_unpin(gt); 1534 psb_gtt_unpin(gt);
1534 drm_gem_object_unreference(psb_intel_crtc->cursor_obj); 1535 drm_gem_object_unreference(psb_intel_crtc->cursor_obj);
1535 psb_intel_crtc->cursor_obj = obj;
1536 } 1536 }
1537 return 0; 1537
1538 psb_intel_crtc->cursor_obj = obj;
1539 return ret;
1540
1541unref_cursor:
1542 drm_gem_object_unreference(obj);
1543 return ret;
1538} 1544}
1539 1545
1540static int cdv_intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) 1546static int cdv_intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
@@ -1750,6 +1756,19 @@ static void cdv_intel_crtc_destroy(struct drm_crtc *crtc)
1750 kfree(psb_intel_crtc); 1756 kfree(psb_intel_crtc);
1751} 1757}
1752 1758
1759static void cdv_intel_crtc_disable(struct drm_crtc *crtc)
1760{
1761 struct gtt_range *gt;
1762 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
1763
1764 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
1765
1766 if (crtc->fb) {
1767 gt = to_psb_fb(crtc->fb)->gtt;
1768 psb_gtt_unpin(gt);
1769 }
1770}
1771
1753const struct drm_crtc_helper_funcs cdv_intel_helper_funcs = { 1772const struct drm_crtc_helper_funcs cdv_intel_helper_funcs = {
1754 .dpms = cdv_intel_crtc_dpms, 1773 .dpms = cdv_intel_crtc_dpms,
1755 .mode_fixup = cdv_intel_crtc_mode_fixup, 1774 .mode_fixup = cdv_intel_crtc_mode_fixup,
@@ -1757,6 +1776,7 @@ const struct drm_crtc_helper_funcs cdv_intel_helper_funcs = {
1757 .mode_set_base = cdv_intel_pipe_set_base, 1776 .mode_set_base = cdv_intel_pipe_set_base,
1758 .prepare = cdv_intel_crtc_prepare, 1777 .prepare = cdv_intel_crtc_prepare,
1759 .commit = cdv_intel_crtc_commit, 1778 .commit = cdv_intel_crtc_commit,
1779 .disable = cdv_intel_crtc_disable,
1760}; 1780};
1761 1781
1762const struct drm_crtc_funcs cdv_intel_crtc_funcs = { 1782const struct drm_crtc_funcs cdv_intel_crtc_funcs = {
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index 1534e220097a..8b1b6d923abe 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -121,8 +121,8 @@ static int psbfb_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
121 unsigned long address; 121 unsigned long address;
122 int ret; 122 int ret;
123 unsigned long pfn; 123 unsigned long pfn;
124 /* FIXME: assumes fb at stolen base which may not be true */ 124 unsigned long phys_addr = (unsigned long)dev_priv->stolen_base +
125 unsigned long phys_addr = (unsigned long)dev_priv->stolen_base; 125 psbfb->gtt->offset;
126 126
127 page_num = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; 127 page_num = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
128 address = (unsigned long)vmf->virtual_address - (vmf->pgoff << PAGE_SHIFT); 128 address = (unsigned long)vmf->virtual_address - (vmf->pgoff << PAGE_SHIFT);
diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c
index 6e8f42b61ff6..6666493789d1 100644
--- a/drivers/gpu/drm/gma500/psb_intel_display.c
+++ b/drivers/gpu/drm/gma500/psb_intel_display.c
@@ -843,7 +843,7 @@ static int psb_intel_crtc_cursor_set(struct drm_crtc *crtc,
843 struct gtt_range *cursor_gt = psb_intel_crtc->cursor_gt; 843 struct gtt_range *cursor_gt = psb_intel_crtc->cursor_gt;
844 struct drm_gem_object *obj; 844 struct drm_gem_object *obj;
845 void *tmp_dst, *tmp_src; 845 void *tmp_dst, *tmp_src;
846 int ret, i, cursor_pages; 846 int ret = 0, i, cursor_pages;
847 847
848 /* if we want to turn of the cursor ignore width and height */ 848 /* if we want to turn of the cursor ignore width and height */
849 if (!handle) { 849 if (!handle) {
@@ -880,7 +880,8 @@ static int psb_intel_crtc_cursor_set(struct drm_crtc *crtc,
880 880
881 if (obj->size < width * height * 4) { 881 if (obj->size < width * height * 4) {
882 dev_dbg(dev->dev, "buffer is to small\n"); 882 dev_dbg(dev->dev, "buffer is to small\n");
883 return -ENOMEM; 883 ret = -ENOMEM;
884 goto unref_cursor;
884 } 885 }
885 886
886 gt = container_of(obj, struct gtt_range, gem); 887 gt = container_of(obj, struct gtt_range, gem);
@@ -889,13 +890,14 @@ static int psb_intel_crtc_cursor_set(struct drm_crtc *crtc,
889 ret = psb_gtt_pin(gt); 890 ret = psb_gtt_pin(gt);
890 if (ret) { 891 if (ret) {
891 dev_err(dev->dev, "Can not pin down handle 0x%x\n", handle); 892 dev_err(dev->dev, "Can not pin down handle 0x%x\n", handle);
892 return ret; 893 goto unref_cursor;
893 } 894 }
894 895
895 if (dev_priv->ops->cursor_needs_phys) { 896 if (dev_priv->ops->cursor_needs_phys) {
896 if (cursor_gt == NULL) { 897 if (cursor_gt == NULL) {
897 dev_err(dev->dev, "No hardware cursor mem available"); 898 dev_err(dev->dev, "No hardware cursor mem available");
898 return -ENOMEM; 899 ret = -ENOMEM;
900 goto unref_cursor;
899 } 901 }
900 902
901 /* Prevent overflow */ 903 /* Prevent overflow */
@@ -936,9 +938,14 @@ static int psb_intel_crtc_cursor_set(struct drm_crtc *crtc,
936 struct gtt_range, gem); 938 struct gtt_range, gem);
937 psb_gtt_unpin(gt); 939 psb_gtt_unpin(gt);
938 drm_gem_object_unreference(psb_intel_crtc->cursor_obj); 940 drm_gem_object_unreference(psb_intel_crtc->cursor_obj);
939 psb_intel_crtc->cursor_obj = obj;
940 } 941 }
941 return 0; 942
943 psb_intel_crtc->cursor_obj = obj;
944 return ret;
945
946unref_cursor:
947 drm_gem_object_unreference(obj);
948 return ret;
942} 949}
943 950
944static int psb_intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) 951static int psb_intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
@@ -1150,6 +1157,19 @@ static void psb_intel_crtc_destroy(struct drm_crtc *crtc)
1150 kfree(psb_intel_crtc); 1157 kfree(psb_intel_crtc);
1151} 1158}
1152 1159
1160static void psb_intel_crtc_disable(struct drm_crtc *crtc)
1161{
1162 struct gtt_range *gt;
1163 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
1164
1165 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
1166
1167 if (crtc->fb) {
1168 gt = to_psb_fb(crtc->fb)->gtt;
1169 psb_gtt_unpin(gt);
1170 }
1171}
1172
1153const struct drm_crtc_helper_funcs psb_intel_helper_funcs = { 1173const struct drm_crtc_helper_funcs psb_intel_helper_funcs = {
1154 .dpms = psb_intel_crtc_dpms, 1174 .dpms = psb_intel_crtc_dpms,
1155 .mode_fixup = psb_intel_crtc_mode_fixup, 1175 .mode_fixup = psb_intel_crtc_mode_fixup,
@@ -1157,6 +1177,7 @@ const struct drm_crtc_helper_funcs psb_intel_helper_funcs = {
1157 .mode_set_base = psb_intel_pipe_set_base, 1177 .mode_set_base = psb_intel_pipe_set_base,
1158 .prepare = psb_intel_crtc_prepare, 1178 .prepare = psb_intel_crtc_prepare,
1159 .commit = psb_intel_crtc_commit, 1179 .commit = psb_intel_crtc_commit,
1180 .disable = psb_intel_crtc_disable,
1160}; 1181};
1161 1182
1162const struct drm_crtc_funcs psb_intel_crtc_funcs = { 1183const struct drm_crtc_funcs psb_intel_crtc_funcs = {
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index b9d00dcf9a2d..9669a0b8b440 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1697,6 +1697,8 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
1697struct dma_buf *i915_gem_prime_export(struct drm_device *dev, 1697struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
1698 struct drm_gem_object *gem_obj, int flags); 1698 struct drm_gem_object *gem_obj, int flags);
1699 1699
1700void i915_gem_restore_fences(struct drm_device *dev);
1701
1700/* i915_gem_context.c */ 1702/* i915_gem_context.c */
1701void i915_gem_context_init(struct drm_device *dev); 1703void i915_gem_context_init(struct drm_device *dev);
1702void i915_gem_context_fini(struct drm_device *dev); 1704void i915_gem_context_fini(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index a6cf8e843973..9e35dafc5807 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -91,14 +91,11 @@ i915_gem_wait_for_error(struct i915_gpu_error *error)
91{ 91{
92 int ret; 92 int ret;
93 93
94#define EXIT_COND (!i915_reset_in_progress(error)) 94#define EXIT_COND (!i915_reset_in_progress(error) || \
95 i915_terminally_wedged(error))
95 if (EXIT_COND) 96 if (EXIT_COND)
96 return 0; 97 return 0;
97 98
98 /* GPU is already declared terminally dead, give up. */
99 if (i915_terminally_wedged(error))
100 return -EIO;
101
102 /* 99 /*
103 * Only wait 10 seconds for the gpu reset to complete to avoid hanging 100 * Only wait 10 seconds for the gpu reset to complete to avoid hanging
104 * userspace. If it takes that long something really bad is going on and 101 * userspace. If it takes that long something really bad is going on and
@@ -1804,7 +1801,14 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
1804 gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD; 1801 gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
1805 gfp &= ~(__GFP_IO | __GFP_WAIT); 1802 gfp &= ~(__GFP_IO | __GFP_WAIT);
1806 } 1803 }
1807 1804#ifdef CONFIG_SWIOTLB
1805 if (swiotlb_nr_tbl()) {
1806 st->nents++;
1807 sg_set_page(sg, page, PAGE_SIZE, 0);
1808 sg = sg_next(sg);
1809 continue;
1810 }
1811#endif
1808 if (!i || page_to_pfn(page) != last_pfn + 1) { 1812 if (!i || page_to_pfn(page) != last_pfn + 1) {
1809 if (i) 1813 if (i)
1810 sg = sg_next(sg); 1814 sg = sg_next(sg);
@@ -1815,8 +1819,10 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
1815 } 1819 }
1816 last_pfn = page_to_pfn(page); 1820 last_pfn = page_to_pfn(page);
1817 } 1821 }
1818 1822#ifdef CONFIG_SWIOTLB
1819 sg_mark_end(sg); 1823 if (!swiotlb_nr_tbl())
1824#endif
1825 sg_mark_end(sg);
1820 obj->pages = st; 1826 obj->pages = st;
1821 1827
1822 if (i915_gem_object_needs_bit17_swizzle(obj)) 1828 if (i915_gem_object_needs_bit17_swizzle(obj))
@@ -2120,25 +2126,15 @@ static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
2120 } 2126 }
2121} 2127}
2122 2128
2123static void i915_gem_reset_fences(struct drm_device *dev) 2129void i915_gem_restore_fences(struct drm_device *dev)
2124{ 2130{
2125 struct drm_i915_private *dev_priv = dev->dev_private; 2131 struct drm_i915_private *dev_priv = dev->dev_private;
2126 int i; 2132 int i;
2127 2133
2128 for (i = 0; i < dev_priv->num_fence_regs; i++) { 2134 for (i = 0; i < dev_priv->num_fence_regs; i++) {
2129 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i]; 2135 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
2130 2136 i915_gem_write_fence(dev, i, reg->obj);
2131 if (reg->obj)
2132 i915_gem_object_fence_lost(reg->obj);
2133
2134 i915_gem_write_fence(dev, i, NULL);
2135
2136 reg->pin_count = 0;
2137 reg->obj = NULL;
2138 INIT_LIST_HEAD(&reg->lru_list);
2139 } 2137 }
2140
2141 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
2142} 2138}
2143 2139
2144void i915_gem_reset(struct drm_device *dev) 2140void i915_gem_reset(struct drm_device *dev)
@@ -2161,8 +2157,7 @@ void i915_gem_reset(struct drm_device *dev)
2161 obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS; 2157 obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
2162 } 2158 }
2163 2159
2164 /* The fence registers are invalidated so clear them out */ 2160 i915_gem_restore_fences(dev);
2165 i915_gem_reset_fences(dev);
2166} 2161}
2167 2162
2168/** 2163/**
@@ -3868,8 +3863,6 @@ i915_gem_idle(struct drm_device *dev)
3868 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 3863 if (!drm_core_check_feature(dev, DRIVER_MODESET))
3869 i915_gem_evict_everything(dev); 3864 i915_gem_evict_everything(dev);
3870 3865
3871 i915_gem_reset_fences(dev);
3872
3873 /* Hack! Don't let anybody do execbuf while we don't control the chip. 3866 /* Hack! Don't let anybody do execbuf while we don't control the chip.
3874 * We need to replace this with a semaphore, or something. 3867 * We need to replace this with a semaphore, or something.
3875 * And not confound mm.suspended! 3868 * And not confound mm.suspended!
@@ -4196,7 +4189,8 @@ i915_gem_load(struct drm_device *dev)
4196 dev_priv->num_fence_regs = 8; 4189 dev_priv->num_fence_regs = 8;
4197 4190
4198 /* Initialize fence registers to zero */ 4191 /* Initialize fence registers to zero */
4199 i915_gem_reset_fences(dev); 4192 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4193 i915_gem_restore_fences(dev);
4200 4194
4201 i915_gem_detect_bit_6_swizzle(dev); 4195 i915_gem_detect_bit_6_swizzle(dev);
4202 init_waitqueue_head(&dev_priv->pending_flip_queue); 4196 init_waitqueue_head(&dev_priv->pending_flip_queue);
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 41f0fdecfbdc..369b3d8776ab 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -384,6 +384,7 @@ int i915_restore_state(struct drm_device *dev)
384 384
385 mutex_lock(&dev->struct_mutex); 385 mutex_lock(&dev->struct_mutex);
386 386
387 i915_gem_restore_fences(dev);
387 i915_restore_display(dev); 388 i915_restore_display(dev);
388 389
389 if (!drm_core_check_feature(dev, DRIVER_MODESET)) { 390 if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index ad1117bebd7e..56746dcac40f 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -7937,6 +7937,11 @@ intel_modeset_check_state(struct drm_device *dev)
7937 memset(&pipe_config, 0, sizeof(pipe_config)); 7937 memset(&pipe_config, 0, sizeof(pipe_config));
7938 active = dev_priv->display.get_pipe_config(crtc, 7938 active = dev_priv->display.get_pipe_config(crtc,
7939 &pipe_config); 7939 &pipe_config);
7940
7941 /* hw state is inconsistent with the pipe A quirk */
7942 if (crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE)
7943 active = crtc->active;
7944
7940 WARN(crtc->active != active, 7945 WARN(crtc->active != active,
7941 "crtc active state doesn't match with hw state " 7946 "crtc active state doesn't match with hw state "
7942 "(expected %i, found %i)\n", crtc->active, active); 7947 "(expected %i, found %i)\n", crtc->active, active);
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index f36f1baabd5a..29412cc89c7a 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -815,10 +815,10 @@ static const struct dmi_system_id intel_no_lvds[] = {
815 }, 815 },
816 { 816 {
817 .callback = intel_no_lvds_dmi_callback, 817 .callback = intel_no_lvds_dmi_callback,
818 .ident = "Hewlett-Packard HP t5740e Thin Client", 818 .ident = "Hewlett-Packard HP t5740",
819 .matches = { 819 .matches = {
820 DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"), 820 DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
821 DMI_MATCH(DMI_PRODUCT_NAME, "HP t5740e Thin Client"), 821 DMI_MATCH(DMI_PRODUCT_NAME, " t5740"),
822 }, 822 },
823 }, 823 },
824 { 824 {
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index d15428404b9a..d4ea6c265ce1 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -1776,11 +1776,14 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
1776 * Assume that the preferred modes are 1776 * Assume that the preferred modes are
1777 * arranged in priority order. 1777 * arranged in priority order.
1778 */ 1778 */
1779 intel_ddc_get_modes(connector, intel_sdvo->i2c); 1779 intel_ddc_get_modes(connector, &intel_sdvo->ddc);
1780 if (list_empty(&connector->probed_modes) == false)
1781 goto end;
1782 1780
1783 /* Fetch modes from VBT */ 1781 /*
1782 * Fetch modes from VBT. For SDVO prefer the VBT mode since some
1783 * SDVO->LVDS transcoders can't cope with the EDID mode. Since
1784 * drm_mode_probed_add adds the mode at the head of the list we add it
1785 * last.
1786 */
1784 if (dev_priv->sdvo_lvds_vbt_mode != NULL) { 1787 if (dev_priv->sdvo_lvds_vbt_mode != NULL) {
1785 newmode = drm_mode_duplicate(connector->dev, 1788 newmode = drm_mode_duplicate(connector->dev,
1786 dev_priv->sdvo_lvds_vbt_mode); 1789 dev_priv->sdvo_lvds_vbt_mode);
@@ -1792,7 +1795,6 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
1792 } 1795 }
1793 } 1796 }
1794 1797
1795end:
1796 list_for_each_entry(newmode, &connector->probed_modes, head) { 1798 list_for_each_entry(newmode, &connector->probed_modes, head) {
1797 if (newmode->type & DRM_MODE_TYPE_PREFERRED) { 1799 if (newmode->type & DRM_MODE_TYPE_PREFERRED) {
1798 intel_sdvo->sdvo_lvds_fixed_mode = 1800 intel_sdvo->sdvo_lvds_fixed_mode =
@@ -2790,12 +2792,6 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
2790 SDVOB_HOTPLUG_INT_STATUS_I915 : SDVOC_HOTPLUG_INT_STATUS_I915; 2792 SDVOB_HOTPLUG_INT_STATUS_I915 : SDVOC_HOTPLUG_INT_STATUS_I915;
2791 } 2793 }
2792 2794
2793 /* Only enable the hotplug irq if we need it, to work around noisy
2794 * hotplug lines.
2795 */
2796 if (intel_sdvo->hotplug_active)
2797 intel_encoder->hpd_pin = HPD_SDVO_B ? HPD_SDVO_B : HPD_SDVO_C;
2798
2799 intel_encoder->compute_config = intel_sdvo_compute_config; 2795 intel_encoder->compute_config = intel_sdvo_compute_config;
2800 intel_encoder->disable = intel_disable_sdvo; 2796 intel_encoder->disable = intel_disable_sdvo;
2801 intel_encoder->mode_set = intel_sdvo_mode_set; 2797 intel_encoder->mode_set = intel_sdvo_mode_set;
@@ -2814,6 +2810,14 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
2814 goto err_output; 2810 goto err_output;
2815 } 2811 }
2816 2812
2813 /* Only enable the hotplug irq if we need it, to work around noisy
2814 * hotplug lines.
2815 */
2816 if (intel_sdvo->hotplug_active) {
2817 intel_encoder->hpd_pin =
2818 intel_sdvo->is_sdvob ? HPD_SDVO_B : HPD_SDVO_C;
2819 }
2820
2817 /* 2821 /*
2818 * Cloning SDVO with anything is often impossible, since the SDVO 2822 * Cloning SDVO with anything is often impossible, since the SDVO
2819 * encoder can request a special input timing mode. And even if that's 2823 * encoder can request a special input timing mode. And even if that's
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index 77b8a45fb10a..ee66badc8bb6 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -1034,13 +1034,14 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
1034 else 1034 else
1035 hi_pri_lvl = 5; 1035 hi_pri_lvl = 5;
1036 1036
1037 WREG8(0x1fde, 0x06); 1037 WREG8(MGAREG_CRTCEXT_INDEX, 0x06);
1038 WREG8(0x1fdf, hi_pri_lvl); 1038 WREG8(MGAREG_CRTCEXT_DATA, hi_pri_lvl);
1039 } else { 1039 } else {
1040 WREG8(MGAREG_CRTCEXT_INDEX, 0x06);
1040 if (mdev->reg_1e24 >= 0x01) 1041 if (mdev->reg_1e24 >= 0x01)
1041 WREG8(0x1fdf, 0x03); 1042 WREG8(MGAREG_CRTCEXT_DATA, 0x03);
1042 else 1043 else
1043 WREG8(0x1fdf, 0x04); 1044 WREG8(MGAREG_CRTCEXT_DATA, 0x04);
1044 } 1045 }
1045 } 1046 }
1046 return 0; 1047 return 0;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c
index d0817d94454c..f02fd9f443ff 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c
@@ -50,11 +50,16 @@ nv50_dac_sense(struct nv50_disp_priv *priv, int or, u32 loadval)
50{ 50{
51 const u32 doff = (or * 0x800); 51 const u32 doff = (or * 0x800);
52 int load = -EINVAL; 52 int load = -EINVAL;
53 nv_mask(priv, 0x61a004 + doff, 0x807f0000, 0x80150000);
54 nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000);
53 nv_wr32(priv, 0x61a00c + doff, 0x00100000 | loadval); 55 nv_wr32(priv, 0x61a00c + doff, 0x00100000 | loadval);
54 udelay(9500); 56 mdelay(9);
57 udelay(500);
55 nv_wr32(priv, 0x61a00c + doff, 0x80000000); 58 nv_wr32(priv, 0x61a00c + doff, 0x80000000);
56 load = (nv_rd32(priv, 0x61a00c + doff) & 0x38000000) >> 27; 59 load = (nv_rd32(priv, 0x61a00c + doff) & 0x38000000) >> 27;
57 nv_wr32(priv, 0x61a00c + doff, 0x00000000); 60 nv_wr32(priv, 0x61a00c + doff, 0x00000000);
61 nv_mask(priv, 0x61a004 + doff, 0x807f0000, 0x80550000);
62 nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000);
58 return load; 63 return load;
59} 64}
60 65
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdminv84.c b/drivers/gpu/drm/nouveau/core/engine/disp/hdminv84.c
index 0d36bdc51417..7fdade6e604d 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/hdminv84.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/hdminv84.c
@@ -55,6 +55,10 @@ nv84_hdmi_ctrl(struct nv50_disp_priv *priv, int head, int or, u32 data)
55 nv_wr32(priv, 0x616510 + hoff, 0x00000000); 55 nv_wr32(priv, 0x616510 + hoff, 0x00000000);
56 nv_mask(priv, 0x616500 + hoff, 0x00000001, 0x00000001); 56 nv_mask(priv, 0x616500 + hoff, 0x00000001, 0x00000001);
57 57
58 nv_mask(priv, 0x6165d0 + hoff, 0x00070001, 0x00010001); /* SPARE, HW_CTS */
59 nv_mask(priv, 0x616568 + hoff, 0x00010101, 0x00000000); /* ACR_CTRL, ?? */
60 nv_mask(priv, 0x616578 + hoff, 0x80000000, 0x80000000); /* ACR_0441_ENABLE */
61
58 /* ??? */ 62 /* ??? */
59 nv_mask(priv, 0x61733c, 0x00100000, 0x00100000); /* RESETF */ 63 nv_mask(priv, 0x61733c, 0x00100000, 0x00100000); /* RESETF */
60 nv_mask(priv, 0x61733c, 0x10000000, 0x10000000); /* LOOKUP_EN */ 64 nv_mask(priv, 0x61733c, 0x10000000, 0x10000000); /* LOOKUP_EN */
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
index 89bf459d584b..e9b8217d0075 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
@@ -40,14 +40,13 @@
40 * FIFO channel objects 40 * FIFO channel objects
41 ******************************************************************************/ 41 ******************************************************************************/
42 42
43void 43static void
44nv50_fifo_playlist_update(struct nv50_fifo_priv *priv) 44nv50_fifo_playlist_update_locked(struct nv50_fifo_priv *priv)
45{ 45{
46 struct nouveau_bar *bar = nouveau_bar(priv); 46 struct nouveau_bar *bar = nouveau_bar(priv);
47 struct nouveau_gpuobj *cur; 47 struct nouveau_gpuobj *cur;
48 int i, p; 48 int i, p;
49 49
50 mutex_lock(&nv_subdev(priv)->mutex);
51 cur = priv->playlist[priv->cur_playlist]; 50 cur = priv->playlist[priv->cur_playlist];
52 priv->cur_playlist = !priv->cur_playlist; 51 priv->cur_playlist = !priv->cur_playlist;
53 52
@@ -61,6 +60,13 @@ nv50_fifo_playlist_update(struct nv50_fifo_priv *priv)
61 nv_wr32(priv, 0x0032f4, cur->addr >> 12); 60 nv_wr32(priv, 0x0032f4, cur->addr >> 12);
62 nv_wr32(priv, 0x0032ec, p); 61 nv_wr32(priv, 0x0032ec, p);
63 nv_wr32(priv, 0x002500, 0x00000101); 62 nv_wr32(priv, 0x002500, 0x00000101);
63}
64
65void
66nv50_fifo_playlist_update(struct nv50_fifo_priv *priv)
67{
68 mutex_lock(&nv_subdev(priv)->mutex);
69 nv50_fifo_playlist_update_locked(priv);
64 mutex_unlock(&nv_subdev(priv)->mutex); 70 mutex_unlock(&nv_subdev(priv)->mutex);
65} 71}
66 72
@@ -489,7 +495,7 @@ nv50_fifo_init(struct nouveau_object *object)
489 495
490 for (i = 0; i < 128; i++) 496 for (i = 0; i < 128; i++)
491 nv_wr32(priv, 0x002600 + (i * 4), 0x00000000); 497 nv_wr32(priv, 0x002600 + (i * 4), 0x00000000);
492 nv50_fifo_playlist_update(priv); 498 nv50_fifo_playlist_update_locked(priv);
493 499
494 nv_wr32(priv, 0x003200, 0x00000001); 500 nv_wr32(priv, 0x003200, 0x00000001);
495 nv_wr32(priv, 0x003250, 0x00000001); 501 nv_wr32(priv, 0x003250, 0x00000001);
diff --git a/drivers/gpu/drm/nouveau/core/include/core/class.h b/drivers/gpu/drm/nouveau/core/include/core/class.h
index 0a393f7f055f..5a5961b6a6a3 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/class.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/class.h
@@ -218,7 +218,7 @@ struct nv04_display_class {
218#define NV50_DISP_DAC_PWR_STATE 0x00000040 218#define NV50_DISP_DAC_PWR_STATE 0x00000040
219#define NV50_DISP_DAC_PWR_STATE_ON 0x00000000 219#define NV50_DISP_DAC_PWR_STATE_ON 0x00000000
220#define NV50_DISP_DAC_PWR_STATE_OFF 0x00000040 220#define NV50_DISP_DAC_PWR_STATE_OFF 0x00000040
221#define NV50_DISP_DAC_LOAD 0x0002000c 221#define NV50_DISP_DAC_LOAD 0x00020100
222#define NV50_DISP_DAC_LOAD_VALUE 0x00000007 222#define NV50_DISP_DAC_LOAD_VALUE 0x00000007
223 223
224#define NV50_DISP_PIOR_MTHD 0x00030000 224#define NV50_DISP_PIOR_MTHD 0x00030000
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index ebf0a683305e..dd5e01f89f28 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -1554,7 +1554,9 @@ nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
1554{ 1554{
1555 struct nv50_disp *disp = nv50_disp(encoder->dev); 1555 struct nv50_disp *disp = nv50_disp(encoder->dev);
1556 int ret, or = nouveau_encoder(encoder)->or; 1556 int ret, or = nouveau_encoder(encoder)->or;
1557 u32 load = 0; 1557 u32 load = nouveau_drm(encoder->dev)->vbios.dactestval;
1558 if (load == 0)
1559 load = 340;
1558 1560
1559 ret = nv_exec(disp->core, NV50_DISP_DAC_LOAD + or, &load, sizeof(load)); 1561 ret = nv_exec(disp->core, NV50_DISP_DAC_LOAD + or, &load, sizeof(load));
1560 if (ret || load != 7) 1562 if (ret || load != 7)
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index 9c53c25e5201..826586ffbe83 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -649,6 +649,9 @@ static void pdev_shutdown(struct platform_device *device)
649 649
650static int pdev_probe(struct platform_device *device) 650static int pdev_probe(struct platform_device *device)
651{ 651{
652 if (omapdss_is_initialized() == false)
653 return -EPROBE_DEFER;
654
652 DBG("%s", device->name); 655 DBG("%s", device->name);
653 return drm_platform_init(&omap_drm_driver, device); 656 return drm_platform_init(&omap_drm_driver, device);
654} 657}
diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
index 6db7370373ea..a30f29425c21 100644
--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
+++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
@@ -151,7 +151,7 @@ static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data,
151 struct qxl_bo *cmd_bo; 151 struct qxl_bo *cmd_bo;
152 int release_type; 152 int release_type;
153 struct drm_qxl_command *commands = 153 struct drm_qxl_command *commands =
154 (struct drm_qxl_command *)execbuffer->commands; 154 (struct drm_qxl_command *)(uintptr_t)execbuffer->commands;
155 155
156 if (DRM_COPY_FROM_USER(&user_cmd, &commands[cmd_num], 156 if (DRM_COPY_FROM_USER(&user_cmd, &commands[cmd_num],
157 sizeof(user_cmd))) 157 sizeof(user_cmd)))
@@ -171,6 +171,11 @@ static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data,
171 if (user_cmd.command_size > PAGE_SIZE - sizeof(union qxl_release_info)) 171 if (user_cmd.command_size > PAGE_SIZE - sizeof(union qxl_release_info))
172 return -EINVAL; 172 return -EINVAL;
173 173
174 if (!access_ok(VERIFY_READ,
175 (void *)(unsigned long)user_cmd.command,
176 user_cmd.command_size))
177 return -EFAULT;
178
174 ret = qxl_alloc_release_reserved(qdev, 179 ret = qxl_alloc_release_reserved(qdev,
175 sizeof(union qxl_release_info) + 180 sizeof(union qxl_release_info) +
176 user_cmd.command_size, 181 user_cmd.command_size,
@@ -193,7 +198,7 @@ static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data,
193 198
194 for (i = 0 ; i < user_cmd.relocs_num; ++i) { 199 for (i = 0 ; i < user_cmd.relocs_num; ++i) {
195 if (DRM_COPY_FROM_USER(&reloc, 200 if (DRM_COPY_FROM_USER(&reloc,
196 &((struct drm_qxl_reloc *)user_cmd.relocs)[i], 201 &((struct drm_qxl_reloc *)(uintptr_t)user_cmd.relocs)[i],
197 sizeof(reloc))) { 202 sizeof(reloc))) {
198 qxl_bo_list_unreserve(&reloc_list, true); 203 qxl_bo_list_unreserve(&reloc_list, true);
199 qxl_release_unreserve(qdev, release); 204 qxl_release_unreserve(qdev, release);
diff --git a/drivers/gpu/drm/qxl/qxl_kms.c b/drivers/gpu/drm/qxl/qxl_kms.c
index 85127ed24cfd..e27ce2a907cf 100644
--- a/drivers/gpu/drm/qxl/qxl_kms.c
+++ b/drivers/gpu/drm/qxl/qxl_kms.c
@@ -128,12 +128,13 @@ int qxl_device_init(struct qxl_device *qdev,
128 128
129 qdev->vram_mapping = io_mapping_create_wc(qdev->vram_base, pci_resource_len(pdev, 0)); 129 qdev->vram_mapping = io_mapping_create_wc(qdev->vram_base, pci_resource_len(pdev, 0));
130 qdev->surface_mapping = io_mapping_create_wc(qdev->surfaceram_base, qdev->surfaceram_size); 130 qdev->surface_mapping = io_mapping_create_wc(qdev->surfaceram_base, qdev->surfaceram_size);
131 DRM_DEBUG_KMS("qxl: vram %p-%p(%dM %dk), surface %p-%p(%dM %dk)\n", 131 DRM_DEBUG_KMS("qxl: vram %llx-%llx(%dM %dk), surface %llx-%llx(%dM %dk)\n",
132 (void *)qdev->vram_base, (void *)pci_resource_end(pdev, 0), 132 (unsigned long long)qdev->vram_base,
133 (unsigned long long)pci_resource_end(pdev, 0),
133 (int)pci_resource_len(pdev, 0) / 1024 / 1024, 134 (int)pci_resource_len(pdev, 0) / 1024 / 1024,
134 (int)pci_resource_len(pdev, 0) / 1024, 135 (int)pci_resource_len(pdev, 0) / 1024,
135 (void *)qdev->surfaceram_base, 136 (unsigned long long)qdev->surfaceram_base,
136 (void *)pci_resource_end(pdev, 1), 137 (unsigned long long)pci_resource_end(pdev, 1),
137 (int)qdev->surfaceram_size / 1024 / 1024, 138 (int)qdev->surfaceram_size / 1024 / 1024,
138 (int)qdev->surfaceram_size / 1024); 139 (int)qdev->surfaceram_size / 1024);
139 140
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 44a7da66e081..8406c8251fbf 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -667,6 +667,8 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
667int 667int
668atombios_get_encoder_mode(struct drm_encoder *encoder) 668atombios_get_encoder_mode(struct drm_encoder *encoder)
669{ 669{
670 struct drm_device *dev = encoder->dev;
671 struct radeon_device *rdev = dev->dev_private;
670 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 672 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
671 struct drm_connector *connector; 673 struct drm_connector *connector;
672 struct radeon_connector *radeon_connector; 674 struct radeon_connector *radeon_connector;
@@ -693,7 +695,8 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
693 case DRM_MODE_CONNECTOR_DVII: 695 case DRM_MODE_CONNECTOR_DVII:
694 case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */ 696 case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */
695 if (drm_detect_hdmi_monitor(radeon_connector->edid) && 697 if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
696 radeon_audio) 698 radeon_audio &&
699 !ASIC_IS_DCE6(rdev)) /* remove once we support DCE6 */
697 return ATOM_ENCODER_MODE_HDMI; 700 return ATOM_ENCODER_MODE_HDMI;
698 else if (radeon_connector->use_digital) 701 else if (radeon_connector->use_digital)
699 return ATOM_ENCODER_MODE_DVI; 702 return ATOM_ENCODER_MODE_DVI;
@@ -704,7 +707,8 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
704 case DRM_MODE_CONNECTOR_HDMIA: 707 case DRM_MODE_CONNECTOR_HDMIA:
705 default: 708 default:
706 if (drm_detect_hdmi_monitor(radeon_connector->edid) && 709 if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
707 radeon_audio) 710 radeon_audio &&
711 !ASIC_IS_DCE6(rdev)) /* remove once we support DCE6 */
708 return ATOM_ENCODER_MODE_HDMI; 712 return ATOM_ENCODER_MODE_HDMI;
709 else 713 else
710 return ATOM_ENCODER_MODE_DVI; 714 return ATOM_ENCODER_MODE_DVI;
@@ -718,7 +722,8 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
718 (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) 722 (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
719 return ATOM_ENCODER_MODE_DP; 723 return ATOM_ENCODER_MODE_DP;
720 else if (drm_detect_hdmi_monitor(radeon_connector->edid) && 724 else if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
721 radeon_audio) 725 radeon_audio &&
726 !ASIC_IS_DCE6(rdev)) /* remove once we support DCE6 */
722 return ATOM_ENCODER_MODE_HDMI; 727 return ATOM_ENCODER_MODE_HDMI;
723 else 728 else
724 return ATOM_ENCODER_MODE_DVI; 729 return ATOM_ENCODER_MODE_DVI;
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 8f9e2d31b255..0f89ce3d02b9 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -4754,6 +4754,12 @@ static int evergreen_startup(struct radeon_device *rdev)
4754 rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0; 4754 rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
4755 4755
4756 /* Enable IRQ */ 4756 /* Enable IRQ */
4757 if (!rdev->irq.installed) {
4758 r = radeon_irq_kms_init(rdev);
4759 if (r)
4760 return r;
4761 }
4762
4757 r = r600_irq_init(rdev); 4763 r = r600_irq_init(rdev);
4758 if (r) { 4764 if (r) {
4759 DRM_ERROR("radeon: IH init failed (%d).\n", r); 4765 DRM_ERROR("radeon: IH init failed (%d).\n", r);
@@ -4923,10 +4929,6 @@ int evergreen_init(struct radeon_device *rdev)
4923 if (r) 4929 if (r)
4924 return r; 4930 return r;
4925 4931
4926 r = radeon_irq_kms_init(rdev);
4927 if (r)
4928 return r;
4929
4930 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL; 4932 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
4931 r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024); 4933 r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
4932 4934
@@ -4999,8 +5001,7 @@ void evergreen_fini(struct radeon_device *rdev)
4999 5001
5000void evergreen_pcie_gen2_enable(struct radeon_device *rdev) 5002void evergreen_pcie_gen2_enable(struct radeon_device *rdev)
5001{ 5003{
5002 u32 link_width_cntl, speed_cntl, mask; 5004 u32 link_width_cntl, speed_cntl;
5003 int ret;
5004 5005
5005 if (radeon_pcie_gen2 == 0) 5006 if (radeon_pcie_gen2 == 0)
5006 return; 5007 return;
@@ -5015,11 +5016,8 @@ void evergreen_pcie_gen2_enable(struct radeon_device *rdev)
5015 if (ASIC_IS_X2(rdev)) 5016 if (ASIC_IS_X2(rdev))
5016 return; 5017 return;
5017 5018
5018 ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask); 5019 if ((rdev->pdev->bus->max_bus_speed != PCIE_SPEED_5_0GT) &&
5019 if (ret != 0) 5020 (rdev->pdev->bus->max_bus_speed != PCIE_SPEED_8_0GT))
5020 return;
5021
5022 if (!(mask & DRM_PCIE_SPEED_50))
5023 return; 5021 return;
5024 5022
5025 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL); 5023 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 7969c0c8ec20..84583302b081 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -2025,6 +2025,12 @@ static int cayman_startup(struct radeon_device *rdev)
2025 } 2025 }
2026 2026
2027 /* Enable IRQ */ 2027 /* Enable IRQ */
2028 if (!rdev->irq.installed) {
2029 r = radeon_irq_kms_init(rdev);
2030 if (r)
2031 return r;
2032 }
2033
2028 r = r600_irq_init(rdev); 2034 r = r600_irq_init(rdev);
2029 if (r) { 2035 if (r) {
2030 DRM_ERROR("radeon: IH init failed (%d).\n", r); 2036 DRM_ERROR("radeon: IH init failed (%d).\n", r);
@@ -2190,10 +2196,6 @@ int cayman_init(struct radeon_device *rdev)
2190 if (r) 2196 if (r)
2191 return r; 2197 return r;
2192 2198
2193 r = radeon_irq_kms_init(rdev);
2194 if (r)
2195 return r;
2196
2197 ring->ring_obj = NULL; 2199 ring->ring_obj = NULL;
2198 r600_ring_init(rdev, ring, 1024 * 1024); 2200 r600_ring_init(rdev, ring, 1024 * 1024);
2199 2201
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 4973bff37fec..d0314ecbd7c1 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -3869,6 +3869,12 @@ static int r100_startup(struct radeon_device *rdev)
3869 } 3869 }
3870 3870
3871 /* Enable IRQ */ 3871 /* Enable IRQ */
3872 if (!rdev->irq.installed) {
3873 r = radeon_irq_kms_init(rdev);
3874 if (r)
3875 return r;
3876 }
3877
3872 r100_irq_set(rdev); 3878 r100_irq_set(rdev);
3873 rdev->config.r100.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); 3879 rdev->config.r100.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
3874 /* 1M ring buffer */ 3880 /* 1M ring buffer */
@@ -4024,9 +4030,6 @@ int r100_init(struct radeon_device *rdev)
4024 r = radeon_fence_driver_init(rdev); 4030 r = radeon_fence_driver_init(rdev);
4025 if (r) 4031 if (r)
4026 return r; 4032 return r;
4027 r = radeon_irq_kms_init(rdev);
4028 if (r)
4029 return r;
4030 /* Memory manager */ 4033 /* Memory manager */
4031 r = radeon_bo_init(rdev); 4034 r = radeon_bo_init(rdev);
4032 if (r) 4035 if (r)
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index c60350e6872d..b9b776f1e582 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -1382,6 +1382,12 @@ static int r300_startup(struct radeon_device *rdev)
1382 } 1382 }
1383 1383
1384 /* Enable IRQ */ 1384 /* Enable IRQ */
1385 if (!rdev->irq.installed) {
1386 r = radeon_irq_kms_init(rdev);
1387 if (r)
1388 return r;
1389 }
1390
1385 r100_irq_set(rdev); 1391 r100_irq_set(rdev);
1386 rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); 1392 rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
1387 /* 1M ring buffer */ 1393 /* 1M ring buffer */
@@ -1516,9 +1522,6 @@ int r300_init(struct radeon_device *rdev)
1516 r = radeon_fence_driver_init(rdev); 1522 r = radeon_fence_driver_init(rdev);
1517 if (r) 1523 if (r)
1518 return r; 1524 return r;
1519 r = radeon_irq_kms_init(rdev);
1520 if (r)
1521 return r;
1522 /* Memory manager */ 1525 /* Memory manager */
1523 r = radeon_bo_init(rdev); 1526 r = radeon_bo_init(rdev);
1524 if (r) 1527 if (r)
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index 6fce2eb4dd16..4e796ecf9ea4 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -265,6 +265,12 @@ static int r420_startup(struct radeon_device *rdev)
265 } 265 }
266 266
267 /* Enable IRQ */ 267 /* Enable IRQ */
268 if (!rdev->irq.installed) {
269 r = radeon_irq_kms_init(rdev);
270 if (r)
271 return r;
272 }
273
268 r100_irq_set(rdev); 274 r100_irq_set(rdev);
269 rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); 275 rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
270 /* 1M ring buffer */ 276 /* 1M ring buffer */
@@ -411,10 +417,6 @@ int r420_init(struct radeon_device *rdev)
411 if (r) { 417 if (r) {
412 return r; 418 return r;
413 } 419 }
414 r = radeon_irq_kms_init(rdev);
415 if (r) {
416 return r;
417 }
418 /* Memory manager */ 420 /* Memory manager */
419 r = radeon_bo_init(rdev); 421 r = radeon_bo_init(rdev);
420 if (r) { 422 if (r) {
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c
index f795a4e092cb..e1aece73b370 100644
--- a/drivers/gpu/drm/radeon/r520.c
+++ b/drivers/gpu/drm/radeon/r520.c
@@ -194,6 +194,12 @@ static int r520_startup(struct radeon_device *rdev)
194 } 194 }
195 195
196 /* Enable IRQ */ 196 /* Enable IRQ */
197 if (!rdev->irq.installed) {
198 r = radeon_irq_kms_init(rdev);
199 if (r)
200 return r;
201 }
202
197 rs600_irq_set(rdev); 203 rs600_irq_set(rdev);
198 rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); 204 rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
199 /* 1M ring buffer */ 205 /* 1M ring buffer */
@@ -297,9 +303,6 @@ int r520_init(struct radeon_device *rdev)
297 r = radeon_fence_driver_init(rdev); 303 r = radeon_fence_driver_init(rdev);
298 if (r) 304 if (r)
299 return r; 305 return r;
300 r = radeon_irq_kms_init(rdev);
301 if (r)
302 return r;
303 /* Memory manager */ 306 /* Memory manager */
304 r = radeon_bo_init(rdev); 307 r = radeon_bo_init(rdev);
305 if (r) 308 if (r)
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 1a08008c978b..6948eb88c2b7 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -1046,6 +1046,24 @@ int r600_mc_wait_for_idle(struct radeon_device *rdev)
1046 return -1; 1046 return -1;
1047} 1047}
1048 1048
1049uint32_t rs780_mc_rreg(struct radeon_device *rdev, uint32_t reg)
1050{
1051 uint32_t r;
1052
1053 WREG32(R_0028F8_MC_INDEX, S_0028F8_MC_IND_ADDR(reg));
1054 r = RREG32(R_0028FC_MC_DATA);
1055 WREG32(R_0028F8_MC_INDEX, ~C_0028F8_MC_IND_ADDR);
1056 return r;
1057}
1058
1059void rs780_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
1060{
1061 WREG32(R_0028F8_MC_INDEX, S_0028F8_MC_IND_ADDR(reg) |
1062 S_0028F8_MC_IND_WR_EN(1));
1063 WREG32(R_0028FC_MC_DATA, v);
1064 WREG32(R_0028F8_MC_INDEX, 0x7F);
1065}
1066
1049static void r600_mc_program(struct radeon_device *rdev) 1067static void r600_mc_program(struct radeon_device *rdev)
1050{ 1068{
1051 struct rv515_mc_save save; 1069 struct rv515_mc_save save;
@@ -1181,6 +1199,8 @@ static int r600_mc_init(struct radeon_device *rdev)
1181{ 1199{
1182 u32 tmp; 1200 u32 tmp;
1183 int chansize, numchan; 1201 int chansize, numchan;
1202 uint32_t h_addr, l_addr;
1203 unsigned long long k8_addr;
1184 1204
1185 /* Get VRAM informations */ 1205 /* Get VRAM informations */
1186 rdev->mc.vram_is_ddr = true; 1206 rdev->mc.vram_is_ddr = true;
@@ -1221,7 +1241,30 @@ static int r600_mc_init(struct radeon_device *rdev)
1221 if (rdev->flags & RADEON_IS_IGP) { 1241 if (rdev->flags & RADEON_IS_IGP) {
1222 rs690_pm_info(rdev); 1242 rs690_pm_info(rdev);
1223 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); 1243 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
1244
1245 if (rdev->family == CHIP_RS780 || rdev->family == CHIP_RS880) {
1246 /* Use K8 direct mapping for fast fb access. */
1247 rdev->fastfb_working = false;
1248 h_addr = G_000012_K8_ADDR_EXT(RREG32_MC(R_000012_MC_MISC_UMA_CNTL));
1249 l_addr = RREG32_MC(R_000011_K8_FB_LOCATION);
1250 k8_addr = ((unsigned long long)h_addr) << 32 | l_addr;
1251#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
1252 if (k8_addr + rdev->mc.visible_vram_size < 0x100000000ULL)
1253#endif
1254 {
1255 /* FastFB shall be used with UMA memory. Here it is simply disabled when sideport
1256 * memory is present.
1257 */
1258 if (rdev->mc.igp_sideport_enabled == false && radeon_fastfb == 1) {
1259 DRM_INFO("Direct mapping: aper base at 0x%llx, replaced by direct mapping base 0x%llx.\n",
1260 (unsigned long long)rdev->mc.aper_base, k8_addr);
1261 rdev->mc.aper_base = (resource_size_t)k8_addr;
1262 rdev->fastfb_working = true;
1263 }
1264 }
1265 }
1224 } 1266 }
1267
1225 radeon_update_bandwidth_info(rdev); 1268 radeon_update_bandwidth_info(rdev);
1226 return 0; 1269 return 0;
1227} 1270}
@@ -2644,6 +2687,9 @@ void r600_uvd_rbc_stop(struct radeon_device *rdev)
2644int r600_uvd_init(struct radeon_device *rdev) 2687int r600_uvd_init(struct radeon_device *rdev)
2645{ 2688{
2646 int i, j, r; 2689 int i, j, r;
2690 /* disable byte swapping */
2691 u32 lmi_swap_cntl = 0;
2692 u32 mp_swap_cntl = 0;
2647 2693
2648 /* raise clocks while booting up the VCPU */ 2694 /* raise clocks while booting up the VCPU */
2649 radeon_set_uvd_clocks(rdev, 53300, 40000); 2695 radeon_set_uvd_clocks(rdev, 53300, 40000);
@@ -2668,9 +2714,13 @@ int r600_uvd_init(struct radeon_device *rdev)
2668 WREG32(UVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) | 2714 WREG32(UVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) |
2669 (1 << 21) | (1 << 9) | (1 << 20)); 2715 (1 << 21) | (1 << 9) | (1 << 20));
2670 2716
2671 /* disable byte swapping */ 2717#ifdef __BIG_ENDIAN
2672 WREG32(UVD_LMI_SWAP_CNTL, 0); 2718 /* swap (8 in 32) RB and IB */
2673 WREG32(UVD_MP_SWAP_CNTL, 0); 2719 lmi_swap_cntl = 0xa;
2720 mp_swap_cntl = 0;
2721#endif
2722 WREG32(UVD_LMI_SWAP_CNTL, lmi_swap_cntl);
2723 WREG32(UVD_MP_SWAP_CNTL, mp_swap_cntl);
2674 2724
2675 WREG32(UVD_MPC_SET_MUXA0, 0x40c2040); 2725 WREG32(UVD_MPC_SET_MUXA0, 0x40c2040);
2676 WREG32(UVD_MPC_SET_MUXA1, 0x0); 2726 WREG32(UVD_MPC_SET_MUXA1, 0x0);
@@ -3202,6 +3252,12 @@ static int r600_startup(struct radeon_device *rdev)
3202 } 3252 }
3203 3253
3204 /* Enable IRQ */ 3254 /* Enable IRQ */
3255 if (!rdev->irq.installed) {
3256 r = radeon_irq_kms_init(rdev);
3257 if (r)
3258 return r;
3259 }
3260
3205 r = r600_irq_init(rdev); 3261 r = r600_irq_init(rdev);
3206 if (r) { 3262 if (r) {
3207 DRM_ERROR("radeon: IH init failed (%d).\n", r); 3263 DRM_ERROR("radeon: IH init failed (%d).\n", r);
@@ -3356,10 +3412,6 @@ int r600_init(struct radeon_device *rdev)
3356 if (r) 3412 if (r)
3357 return r; 3413 return r;
3358 3414
3359 r = radeon_irq_kms_init(rdev);
3360 if (r)
3361 return r;
3362
3363 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL; 3415 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
3364 r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024); 3416 r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
3365 3417
@@ -4631,8 +4683,6 @@ static void r600_pcie_gen2_enable(struct radeon_device *rdev)
4631{ 4683{
4632 u32 link_width_cntl, lanes, speed_cntl, training_cntl, tmp; 4684 u32 link_width_cntl, lanes, speed_cntl, training_cntl, tmp;
4633 u16 link_cntl2; 4685 u16 link_cntl2;
4634 u32 mask;
4635 int ret;
4636 4686
4637 if (radeon_pcie_gen2 == 0) 4687 if (radeon_pcie_gen2 == 0)
4638 return; 4688 return;
@@ -4651,11 +4701,8 @@ static void r600_pcie_gen2_enable(struct radeon_device *rdev)
4651 if (rdev->family <= CHIP_R600) 4701 if (rdev->family <= CHIP_R600)
4652 return; 4702 return;
4653 4703
4654 ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask); 4704 if ((rdev->pdev->bus->max_bus_speed != PCIE_SPEED_5_0GT) &&
4655 if (ret != 0) 4705 (rdev->pdev->bus->max_bus_speed != PCIE_SPEED_8_0GT))
4656 return;
4657
4658 if (!(mask & DRM_PCIE_SPEED_50))
4659 return; 4706 return;
4660 4707
4661 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL); 4708 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index acb146c06973..79df558f8c40 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -1342,6 +1342,14 @@
1342#define PACKET3_STRMOUT_BASE_UPDATE 0x72 /* r7xx */ 1342#define PACKET3_STRMOUT_BASE_UPDATE 0x72 /* r7xx */
1343#define PACKET3_SURFACE_BASE_UPDATE 0x73 1343#define PACKET3_SURFACE_BASE_UPDATE 0x73
1344 1344
1345#define R_000011_K8_FB_LOCATION 0x11
1346#define R_000012_MC_MISC_UMA_CNTL 0x12
1347#define G_000012_K8_ADDR_EXT(x) (((x) >> 0) & 0xFF)
1348#define R_0028F8_MC_INDEX 0x28F8
1349#define S_0028F8_MC_IND_ADDR(x) (((x) & 0x1FF) << 0)
1350#define C_0028F8_MC_IND_ADDR 0xFFFFFE00
1351#define S_0028F8_MC_IND_WR_EN(x) (((x) & 0x1) << 9)
1352#define R_0028FC_MC_DATA 0x28FC
1345 1353
1346#define R_008020_GRBM_SOFT_RESET 0x8020 1354#define R_008020_GRBM_SOFT_RESET 0x8020
1347#define S_008020_SOFT_RESET_CP(x) (((x) & 1) << 0) 1355#define S_008020_SOFT_RESET_CP(x) (((x) & 1) << 0)
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index 06b8c19ab19e..a2802b47ee95 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -122,6 +122,10 @@ static void radeon_register_accessor_init(struct radeon_device *rdev)
122 rdev->mc_rreg = &rs600_mc_rreg; 122 rdev->mc_rreg = &rs600_mc_rreg;
123 rdev->mc_wreg = &rs600_mc_wreg; 123 rdev->mc_wreg = &rs600_mc_wreg;
124 } 124 }
125 if (rdev->family == CHIP_RS780 || rdev->family == CHIP_RS880) {
126 rdev->mc_rreg = &rs780_mc_rreg;
127 rdev->mc_wreg = &rs780_mc_wreg;
128 }
125 if (rdev->family >= CHIP_R600) { 129 if (rdev->family >= CHIP_R600) {
126 rdev->pciep_rreg = &r600_pciep_rreg; 130 rdev->pciep_rreg = &r600_pciep_rreg;
127 rdev->pciep_wreg = &r600_pciep_wreg; 131 rdev->pciep_wreg = &r600_pciep_wreg;
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 2c87365d345f..a72759ede753 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -347,6 +347,8 @@ extern bool r600_gui_idle(struct radeon_device *rdev);
347extern void r600_pm_misc(struct radeon_device *rdev); 347extern void r600_pm_misc(struct radeon_device *rdev);
348extern void r600_pm_init_profile(struct radeon_device *rdev); 348extern void r600_pm_init_profile(struct radeon_device *rdev);
349extern void rs780_pm_init_profile(struct radeon_device *rdev); 349extern void rs780_pm_init_profile(struct radeon_device *rdev);
350extern uint32_t rs780_mc_rreg(struct radeon_device *rdev, uint32_t reg);
351extern void rs780_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
350extern void r600_pm_get_dynpm_state(struct radeon_device *rdev); 352extern void r600_pm_get_dynpm_state(struct radeon_device *rdev);
351extern void r600_set_pcie_lanes(struct radeon_device *rdev, int lanes); 353extern void r600_set_pcie_lanes(struct radeon_device *rdev, int lanes);
352extern int r600_get_pcie_lanes(struct radeon_device *rdev); 354extern int r600_get_pcie_lanes(struct radeon_device *rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index c2c59fb1ea01..b0dc0b6cb4e0 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -244,16 +244,6 @@ void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
244 */ 244 */
245void radeon_wb_disable(struct radeon_device *rdev) 245void radeon_wb_disable(struct radeon_device *rdev)
246{ 246{
247 int r;
248
249 if (rdev->wb.wb_obj) {
250 r = radeon_bo_reserve(rdev->wb.wb_obj, false);
251 if (unlikely(r != 0))
252 return;
253 radeon_bo_kunmap(rdev->wb.wb_obj);
254 radeon_bo_unpin(rdev->wb.wb_obj);
255 radeon_bo_unreserve(rdev->wb.wb_obj);
256 }
257 rdev->wb.enabled = false; 247 rdev->wb.enabled = false;
258} 248}
259 249
@@ -269,6 +259,11 @@ void radeon_wb_fini(struct radeon_device *rdev)
269{ 259{
270 radeon_wb_disable(rdev); 260 radeon_wb_disable(rdev);
271 if (rdev->wb.wb_obj) { 261 if (rdev->wb.wb_obj) {
262 if (!radeon_bo_reserve(rdev->wb.wb_obj, false)) {
263 radeon_bo_kunmap(rdev->wb.wb_obj);
264 radeon_bo_unpin(rdev->wb.wb_obj);
265 radeon_bo_unreserve(rdev->wb.wb_obj);
266 }
272 radeon_bo_unref(&rdev->wb.wb_obj); 267 radeon_bo_unref(&rdev->wb.wb_obj);
273 rdev->wb.wb = NULL; 268 rdev->wb.wb = NULL;
274 rdev->wb.wb_obj = NULL; 269 rdev->wb.wb_obj = NULL;
@@ -295,26 +290,26 @@ int radeon_wb_init(struct radeon_device *rdev)
295 dev_warn(rdev->dev, "(%d) create WB bo failed\n", r); 290 dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
296 return r; 291 return r;
297 } 292 }
298 } 293 r = radeon_bo_reserve(rdev->wb.wb_obj, false);
299 r = radeon_bo_reserve(rdev->wb.wb_obj, false); 294 if (unlikely(r != 0)) {
300 if (unlikely(r != 0)) { 295 radeon_wb_fini(rdev);
301 radeon_wb_fini(rdev); 296 return r;
302 return r; 297 }
303 } 298 r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
304 r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT, 299 &rdev->wb.gpu_addr);
305 &rdev->wb.gpu_addr); 300 if (r) {
306 if (r) { 301 radeon_bo_unreserve(rdev->wb.wb_obj);
302 dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
303 radeon_wb_fini(rdev);
304 return r;
305 }
306 r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
307 radeon_bo_unreserve(rdev->wb.wb_obj); 307 radeon_bo_unreserve(rdev->wb.wb_obj);
308 dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r); 308 if (r) {
309 radeon_wb_fini(rdev); 309 dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
310 return r; 310 radeon_wb_fini(rdev);
311 } 311 return r;
312 r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb); 312 }
313 radeon_bo_unreserve(rdev->wb.wb_obj);
314 if (r) {
315 dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
316 radeon_wb_fini(rdev);
317 return r;
318 } 313 }
319 314
320 /* clear wb memory */ 315 /* clear wb memory */
@@ -467,23 +462,27 @@ bool radeon_card_posted(struct radeon_device *rdev)
467{ 462{
468 uint32_t reg; 463 uint32_t reg;
469 464
465 /* required for EFI mode on macbook2,1 which uses an r5xx asic */
470 if (efi_enabled(EFI_BOOT) && 466 if (efi_enabled(EFI_BOOT) &&
471 rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) 467 (rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) &&
468 (rdev->family < CHIP_R600))
472 return false; 469 return false;
473 470
471 if (ASIC_IS_NODCE(rdev))
472 goto check_memsize;
473
474 /* first check CRTCs */ 474 /* first check CRTCs */
475 if (ASIC_IS_DCE41(rdev)) { 475 if (ASIC_IS_DCE4(rdev)) {
476 reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) | 476 reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
477 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET); 477 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
478 if (reg & EVERGREEN_CRTC_MASTER_EN) 478 if (rdev->num_crtc >= 4) {
479 return true; 479 reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
480 } else if (ASIC_IS_DCE4(rdev)) { 480 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
481 reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) | 481 }
482 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) | 482 if (rdev->num_crtc >= 6) {
483 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) | 483 reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
484 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) | 484 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
485 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) | 485 }
486 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
487 if (reg & EVERGREEN_CRTC_MASTER_EN) 486 if (reg & EVERGREEN_CRTC_MASTER_EN)
488 return true; 487 return true;
489 } else if (ASIC_IS_AVIVO(rdev)) { 488 } else if (ASIC_IS_AVIVO(rdev)) {
@@ -500,6 +499,7 @@ bool radeon_card_posted(struct radeon_device *rdev)
500 } 499 }
501 } 500 }
502 501
502check_memsize:
503 /* then check MEM_SIZE, in case the crtcs are off */ 503 /* then check MEM_SIZE, in case the crtcs are off */
504 if (rdev->family >= CHIP_R600) 504 if (rdev->family >= CHIP_R600)
505 reg = RREG32(R600_CONFIG_MEMSIZE); 505 reg = RREG32(R600_CONFIG_MEMSIZE);
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 5b937dfe6f65..ddb8f8e04eb5 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -63,7 +63,9 @@ static void radeon_fence_write(struct radeon_device *rdev, u32 seq, int ring)
63{ 63{
64 struct radeon_fence_driver *drv = &rdev->fence_drv[ring]; 64 struct radeon_fence_driver *drv = &rdev->fence_drv[ring];
65 if (likely(rdev->wb.enabled || !drv->scratch_reg)) { 65 if (likely(rdev->wb.enabled || !drv->scratch_reg)) {
66 *drv->cpu_addr = cpu_to_le32(seq); 66 if (drv->cpu_addr) {
67 *drv->cpu_addr = cpu_to_le32(seq);
68 }
67 } else { 69 } else {
68 WREG32(drv->scratch_reg, seq); 70 WREG32(drv->scratch_reg, seq);
69 } 71 }
@@ -84,7 +86,11 @@ static u32 radeon_fence_read(struct radeon_device *rdev, int ring)
84 u32 seq = 0; 86 u32 seq = 0;
85 87
86 if (likely(rdev->wb.enabled || !drv->scratch_reg)) { 88 if (likely(rdev->wb.enabled || !drv->scratch_reg)) {
87 seq = le32_to_cpu(*drv->cpu_addr); 89 if (drv->cpu_addr) {
90 seq = le32_to_cpu(*drv->cpu_addr);
91 } else {
92 seq = lower_32_bits(atomic64_read(&drv->last_seq));
93 }
88 } else { 94 } else {
89 seq = RREG32(drv->scratch_reg); 95 seq = RREG32(drv->scratch_reg);
90 } 96 }
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index 2c1341f63dc5..43ec4a401f07 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -1197,11 +1197,13 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
1197int radeon_vm_bo_rmv(struct radeon_device *rdev, 1197int radeon_vm_bo_rmv(struct radeon_device *rdev,
1198 struct radeon_bo_va *bo_va) 1198 struct radeon_bo_va *bo_va)
1199{ 1199{
1200 int r; 1200 int r = 0;
1201 1201
1202 mutex_lock(&rdev->vm_manager.lock); 1202 mutex_lock(&rdev->vm_manager.lock);
1203 mutex_lock(&bo_va->vm->mutex); 1203 mutex_lock(&bo_va->vm->mutex);
1204 r = radeon_vm_bo_update_pte(rdev, bo_va->vm, bo_va->bo, NULL); 1204 if (bo_va->soffset) {
1205 r = radeon_vm_bo_update_pte(rdev, bo_va->vm, bo_va->bo, NULL);
1206 }
1205 mutex_unlock(&rdev->vm_manager.lock); 1207 mutex_unlock(&rdev->vm_manager.lock);
1206 list_del(&bo_va->vm_list); 1208 list_del(&bo_va->vm_list);
1207 mutex_unlock(&bo_va->vm->mutex); 1209 mutex_unlock(&bo_va->vm->mutex);
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index e17faa7cf732..82434018cbe8 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -402,6 +402,13 @@ int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *ring, unsi
402 return -ENOMEM; 402 return -ENOMEM;
403 /* Align requested size with padding so unlock_commit can 403 /* Align requested size with padding so unlock_commit can
404 * pad safely */ 404 * pad safely */
405 radeon_ring_free_size(rdev, ring);
406 if (ring->ring_free_dw == (ring->ring_size / 4)) {
407 /* This is an empty ring update lockup info to avoid
408 * false positive.
409 */
410 radeon_ring_lockup_update(ring);
411 }
405 ndw = (ndw + ring->align_mask) & ~ring->align_mask; 412 ndw = (ndw + ring->align_mask) & ~ring->align_mask;
406 while (ndw > (ring->ring_free_dw - 1)) { 413 while (ndw > (ring->ring_free_dw - 1)) {
407 radeon_ring_free_size(rdev, ring); 414 radeon_ring_free_size(rdev, ring);
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index 906e5c0ca3b9..cad735dd02c6 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -159,7 +159,17 @@ int radeon_uvd_suspend(struct radeon_device *rdev)
159 if (!r) { 159 if (!r) {
160 radeon_bo_kunmap(rdev->uvd.vcpu_bo); 160 radeon_bo_kunmap(rdev->uvd.vcpu_bo);
161 radeon_bo_unpin(rdev->uvd.vcpu_bo); 161 radeon_bo_unpin(rdev->uvd.vcpu_bo);
162 rdev->uvd.cpu_addr = NULL;
163 if (!radeon_bo_pin(rdev->uvd.vcpu_bo, RADEON_GEM_DOMAIN_CPU, NULL)) {
164 radeon_bo_kmap(rdev->uvd.vcpu_bo, &rdev->uvd.cpu_addr);
165 }
162 radeon_bo_unreserve(rdev->uvd.vcpu_bo); 166 radeon_bo_unreserve(rdev->uvd.vcpu_bo);
167
168 if (rdev->uvd.cpu_addr) {
169 radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_UVD_INDEX);
170 } else {
171 rdev->fence_drv[R600_RING_TYPE_UVD_INDEX].cpu_addr = NULL;
172 }
163 } 173 }
164 return r; 174 return r;
165} 175}
@@ -178,6 +188,10 @@ int radeon_uvd_resume(struct radeon_device *rdev)
178 return r; 188 return r;
179 } 189 }
180 190
191 /* Have been pin in cpu unmap unpin */
192 radeon_bo_kunmap(rdev->uvd.vcpu_bo);
193 radeon_bo_unpin(rdev->uvd.vcpu_bo);
194
181 r = radeon_bo_pin(rdev->uvd.vcpu_bo, RADEON_GEM_DOMAIN_VRAM, 195 r = radeon_bo_pin(rdev->uvd.vcpu_bo, RADEON_GEM_DOMAIN_VRAM,
182 &rdev->uvd.gpu_addr); 196 &rdev->uvd.gpu_addr);
183 if (r) { 197 if (r) {
@@ -613,19 +627,19 @@ int radeon_uvd_get_create_msg(struct radeon_device *rdev, int ring,
613 } 627 }
614 628
615 /* stitch together an UVD create msg */ 629 /* stitch together an UVD create msg */
616 msg[0] = 0x00000de4; 630 msg[0] = cpu_to_le32(0x00000de4);
617 msg[1] = 0x00000000; 631 msg[1] = cpu_to_le32(0x00000000);
618 msg[2] = handle; 632 msg[2] = cpu_to_le32(handle);
619 msg[3] = 0x00000000; 633 msg[3] = cpu_to_le32(0x00000000);
620 msg[4] = 0x00000000; 634 msg[4] = cpu_to_le32(0x00000000);
621 msg[5] = 0x00000000; 635 msg[5] = cpu_to_le32(0x00000000);
622 msg[6] = 0x00000000; 636 msg[6] = cpu_to_le32(0x00000000);
623 msg[7] = 0x00000780; 637 msg[7] = cpu_to_le32(0x00000780);
624 msg[8] = 0x00000440; 638 msg[8] = cpu_to_le32(0x00000440);
625 msg[9] = 0x00000000; 639 msg[9] = cpu_to_le32(0x00000000);
626 msg[10] = 0x01b37000; 640 msg[10] = cpu_to_le32(0x01b37000);
627 for (i = 11; i < 1024; ++i) 641 for (i = 11; i < 1024; ++i)
628 msg[i] = 0x0; 642 msg[i] = cpu_to_le32(0x0);
629 643
630 radeon_bo_kunmap(bo); 644 radeon_bo_kunmap(bo);
631 radeon_bo_unreserve(bo); 645 radeon_bo_unreserve(bo);
@@ -659,12 +673,12 @@ int radeon_uvd_get_destroy_msg(struct radeon_device *rdev, int ring,
659 } 673 }
660 674
661 /* stitch together an UVD destroy msg */ 675 /* stitch together an UVD destroy msg */
662 msg[0] = 0x00000de4; 676 msg[0] = cpu_to_le32(0x00000de4);
663 msg[1] = 0x00000002; 677 msg[1] = cpu_to_le32(0x00000002);
664 msg[2] = handle; 678 msg[2] = cpu_to_le32(handle);
665 msg[3] = 0x00000000; 679 msg[3] = cpu_to_le32(0x00000000);
666 for (i = 4; i < 1024; ++i) 680 for (i = 4; i < 1024; ++i)
667 msg[i] = 0x0; 681 msg[i] = cpu_to_le32(0x0);
668 682
669 radeon_bo_kunmap(bo); 683 radeon_bo_kunmap(bo);
670 radeon_bo_unreserve(bo); 684 radeon_bo_unreserve(bo);
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index 73051ce3121e..233a9b9fa1f7 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -417,6 +417,12 @@ static int rs400_startup(struct radeon_device *rdev)
417 } 417 }
418 418
419 /* Enable IRQ */ 419 /* Enable IRQ */
420 if (!rdev->irq.installed) {
421 r = radeon_irq_kms_init(rdev);
422 if (r)
423 return r;
424 }
425
420 r100_irq_set(rdev); 426 r100_irq_set(rdev);
421 rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); 427 rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
422 /* 1M ring buffer */ 428 /* 1M ring buffer */
@@ -535,9 +541,6 @@ int rs400_init(struct radeon_device *rdev)
535 r = radeon_fence_driver_init(rdev); 541 r = radeon_fence_driver_init(rdev);
536 if (r) 542 if (r)
537 return r; 543 return r;
538 r = radeon_irq_kms_init(rdev);
539 if (r)
540 return r;
541 /* Memory manager */ 544 /* Memory manager */
542 r = radeon_bo_init(rdev); 545 r = radeon_bo_init(rdev);
543 if (r) 546 if (r)
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 46fa1b07c560..670b555d2ca2 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -923,6 +923,12 @@ static int rs600_startup(struct radeon_device *rdev)
923 } 923 }
924 924
925 /* Enable IRQ */ 925 /* Enable IRQ */
926 if (!rdev->irq.installed) {
927 r = radeon_irq_kms_init(rdev);
928 if (r)
929 return r;
930 }
931
926 rs600_irq_set(rdev); 932 rs600_irq_set(rdev);
927 rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); 933 rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
928 /* 1M ring buffer */ 934 /* 1M ring buffer */
@@ -1047,9 +1053,6 @@ int rs600_init(struct radeon_device *rdev)
1047 r = radeon_fence_driver_init(rdev); 1053 r = radeon_fence_driver_init(rdev);
1048 if (r) 1054 if (r)
1049 return r; 1055 return r;
1050 r = radeon_irq_kms_init(rdev);
1051 if (r)
1052 return r;
1053 /* Memory manager */ 1056 /* Memory manager */
1054 r = radeon_bo_init(rdev); 1057 r = radeon_bo_init(rdev);
1055 if (r) 1058 if (r)
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index ab4c86cfd552..55880d5962c3 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -651,6 +651,12 @@ static int rs690_startup(struct radeon_device *rdev)
651 } 651 }
652 652
653 /* Enable IRQ */ 653 /* Enable IRQ */
654 if (!rdev->irq.installed) {
655 r = radeon_irq_kms_init(rdev);
656 if (r)
657 return r;
658 }
659
654 rs600_irq_set(rdev); 660 rs600_irq_set(rdev);
655 rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); 661 rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
656 /* 1M ring buffer */ 662 /* 1M ring buffer */
@@ -776,9 +782,6 @@ int rs690_init(struct radeon_device *rdev)
776 r = radeon_fence_driver_init(rdev); 782 r = radeon_fence_driver_init(rdev);
777 if (r) 783 if (r)
778 return r; 784 return r;
779 r = radeon_irq_kms_init(rdev);
780 if (r)
781 return r;
782 /* Memory manager */ 785 /* Memory manager */
783 r = radeon_bo_init(rdev); 786 r = radeon_bo_init(rdev);
784 if (r) 787 if (r)
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index ffcba730c57c..21c7d7b26e55 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -532,6 +532,12 @@ static int rv515_startup(struct radeon_device *rdev)
532 } 532 }
533 533
534 /* Enable IRQ */ 534 /* Enable IRQ */
535 if (!rdev->irq.installed) {
536 r = radeon_irq_kms_init(rdev);
537 if (r)
538 return r;
539 }
540
535 rs600_irq_set(rdev); 541 rs600_irq_set(rdev);
536 rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); 542 rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
537 /* 1M ring buffer */ 543 /* 1M ring buffer */
@@ -662,9 +668,6 @@ int rv515_init(struct radeon_device *rdev)
662 r = radeon_fence_driver_init(rdev); 668 r = radeon_fence_driver_init(rdev);
663 if (r) 669 if (r)
664 return r; 670 return r;
665 r = radeon_irq_kms_init(rdev);
666 if (r)
667 return r;
668 /* Memory manager */ 671 /* Memory manager */
669 r = radeon_bo_init(rdev); 672 r = radeon_bo_init(rdev);
670 if (r) 673 if (r)
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 83f612a9500b..4a62ad2e5399 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -862,10 +862,8 @@ int rv770_uvd_resume(struct radeon_device *rdev)
862 chip_id = 0x0100000b; 862 chip_id = 0x0100000b;
863 break; 863 break;
864 case CHIP_SUMO: 864 case CHIP_SUMO:
865 chip_id = 0x0100000c;
866 break;
867 case CHIP_SUMO2: 865 case CHIP_SUMO2:
868 chip_id = 0x0100000d; 866 chip_id = 0x0100000c;
869 break; 867 break;
870 case CHIP_PALM: 868 case CHIP_PALM:
871 chip_id = 0x0100000e; 869 chip_id = 0x0100000e;
@@ -1889,6 +1887,12 @@ static int rv770_startup(struct radeon_device *rdev)
1889 rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0; 1887 rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
1890 1888
1891 /* Enable IRQ */ 1889 /* Enable IRQ */
1890 if (!rdev->irq.installed) {
1891 r = radeon_irq_kms_init(rdev);
1892 if (r)
1893 return r;
1894 }
1895
1892 r = r600_irq_init(rdev); 1896 r = r600_irq_init(rdev);
1893 if (r) { 1897 if (r) {
1894 DRM_ERROR("radeon: IH init failed (%d).\n", r); 1898 DRM_ERROR("radeon: IH init failed (%d).\n", r);
@@ -2047,10 +2051,6 @@ int rv770_init(struct radeon_device *rdev)
2047 if (r) 2051 if (r)
2048 return r; 2052 return r;
2049 2053
2050 r = radeon_irq_kms_init(rdev);
2051 if (r)
2052 return r;
2053
2054 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL; 2054 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
2055 r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024); 2055 r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
2056 2056
@@ -2113,8 +2113,6 @@ static void rv770_pcie_gen2_enable(struct radeon_device *rdev)
2113{ 2113{
2114 u32 link_width_cntl, lanes, speed_cntl, tmp; 2114 u32 link_width_cntl, lanes, speed_cntl, tmp;
2115 u16 link_cntl2; 2115 u16 link_cntl2;
2116 u32 mask;
2117 int ret;
2118 2116
2119 if (radeon_pcie_gen2 == 0) 2117 if (radeon_pcie_gen2 == 0)
2120 return; 2118 return;
@@ -2129,11 +2127,8 @@ static void rv770_pcie_gen2_enable(struct radeon_device *rdev)
2129 if (ASIC_IS_X2(rdev)) 2127 if (ASIC_IS_X2(rdev))
2130 return; 2128 return;
2131 2129
2132 ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask); 2130 if ((rdev->pdev->bus->max_bus_speed != PCIE_SPEED_5_0GT) &&
2133 if (ret != 0) 2131 (rdev->pdev->bus->max_bus_speed != PCIE_SPEED_8_0GT))
2134 return;
2135
2136 if (!(mask & DRM_PCIE_SPEED_50))
2137 return; 2132 return;
2138 2133
2139 DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n"); 2134 DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 5ffade69af25..a1b0da6b5808 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -2616,7 +2616,7 @@ static void si_gpu_init(struct radeon_device *rdev)
2616 default: 2616 default:
2617 rdev->config.si.max_shader_engines = 1; 2617 rdev->config.si.max_shader_engines = 1;
2618 rdev->config.si.max_tile_pipes = 4; 2618 rdev->config.si.max_tile_pipes = 4;
2619 rdev->config.si.max_cu_per_sh = 2; 2619 rdev->config.si.max_cu_per_sh = 5;
2620 rdev->config.si.max_sh_per_se = 2; 2620 rdev->config.si.max_sh_per_se = 2;
2621 rdev->config.si.max_backends_per_se = 4; 2621 rdev->config.si.max_backends_per_se = 4;
2622 rdev->config.si.max_texture_channel_caches = 4; 2622 rdev->config.si.max_texture_channel_caches = 4;
@@ -5350,6 +5350,12 @@ static int si_startup(struct radeon_device *rdev)
5350 } 5350 }
5351 5351
5352 /* Enable IRQ */ 5352 /* Enable IRQ */
5353 if (!rdev->irq.installed) {
5354 r = radeon_irq_kms_init(rdev);
5355 if (r)
5356 return r;
5357 }
5358
5353 r = si_irq_init(rdev); 5359 r = si_irq_init(rdev);
5354 if (r) { 5360 if (r) {
5355 DRM_ERROR("radeon: IH init failed (%d).\n", r); 5361 DRM_ERROR("radeon: IH init failed (%d).\n", r);
@@ -5533,10 +5539,6 @@ int si_init(struct radeon_device *rdev)
5533 if (r) 5539 if (r)
5534 return r; 5540 return r;
5535 5541
5536 r = radeon_irq_kms_init(rdev);
5537 if (r)
5538 return r;
5539
5540 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 5542 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
5541 ring->ring_obj = NULL; 5543 ring->ring_obj = NULL;
5542 r600_ring_init(rdev, ring, 1024 * 1024); 5544 r600_ring_init(rdev, ring, 1024 * 1024);
diff --git a/drivers/gpu/drm/tilcdc/Kconfig b/drivers/gpu/drm/tilcdc/Kconfig
index e461e9972455..7a4d10106906 100644
--- a/drivers/gpu/drm/tilcdc/Kconfig
+++ b/drivers/gpu/drm/tilcdc/Kconfig
@@ -6,6 +6,7 @@ config DRM_TILCDC
6 select DRM_GEM_CMA_HELPER 6 select DRM_GEM_CMA_HELPER
7 select VIDEOMODE_HELPERS 7 select VIDEOMODE_HELPERS
8 select BACKLIGHT_CLASS_DEVICE 8 select BACKLIGHT_CLASS_DEVICE
9 select BACKLIGHT_LCD_SUPPORT
9 help 10 help
10 Choose this option if you have an TI SoC with LCDC display 11 Choose this option if you have an TI SoC with LCDC display
11 controller, for example AM33xx in beagle-bone, DA8xx, or 12 controller, for example AM33xx in beagle-bone, DA8xx, or
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index dc3ae5c56f56..d39a5cede0b0 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -264,9 +264,12 @@ static struct mt_class mt_classes[] = {
264static void mt_free_input_name(struct hid_input *hi) 264static void mt_free_input_name(struct hid_input *hi)
265{ 265{
266 struct hid_device *hdev = hi->report->device; 266 struct hid_device *hdev = hi->report->device;
267 const char *name = hi->input->name;
267 268
268 if (hi->input->name != hdev->name) 269 if (name != hdev->name) {
269 kfree(hi->input->name); 270 hi->input->name = hdev->name;
271 kfree(name);
272 }
270} 273}
271 274
272static ssize_t mt_show_quirks(struct device *dev, 275static ssize_t mt_show_quirks(struct device *dev,
@@ -1040,11 +1043,11 @@ static void mt_remove(struct hid_device *hdev)
1040 struct hid_input *hi; 1043 struct hid_input *hi;
1041 1044
1042 sysfs_remove_group(&hdev->dev.kobj, &mt_attribute_group); 1045 sysfs_remove_group(&hdev->dev.kobj, &mt_attribute_group);
1043 hid_hw_stop(hdev);
1044
1045 list_for_each_entry(hi, &hdev->inputs, list) 1046 list_for_each_entry(hi, &hdev->inputs, list)
1046 mt_free_input_name(hi); 1047 mt_free_input_name(hi);
1047 1048
1049 hid_hw_stop(hdev);
1050
1048 kfree(td); 1051 kfree(td);
1049 hid_set_drvdata(hdev, NULL); 1052 hid_set_drvdata(hdev, NULL);
1050} 1053}
diff --git a/drivers/hwmon/adm1021.c b/drivers/hwmon/adm1021.c
index 7e76922a4ba9..f920619cd6da 100644
--- a/drivers/hwmon/adm1021.c
+++ b/drivers/hwmon/adm1021.c
@@ -331,26 +331,68 @@ static int adm1021_detect(struct i2c_client *client,
331 man_id = i2c_smbus_read_byte_data(client, ADM1021_REG_MAN_ID); 331 man_id = i2c_smbus_read_byte_data(client, ADM1021_REG_MAN_ID);
332 dev_id = i2c_smbus_read_byte_data(client, ADM1021_REG_DEV_ID); 332 dev_id = i2c_smbus_read_byte_data(client, ADM1021_REG_DEV_ID);
333 333
334 if (man_id < 0 || dev_id < 0)
335 return -ENODEV;
336
334 if (man_id == 0x4d && dev_id == 0x01) 337 if (man_id == 0x4d && dev_id == 0x01)
335 type_name = "max1617a"; 338 type_name = "max1617a";
336 else if (man_id == 0x41) { 339 else if (man_id == 0x41) {
337 if ((dev_id & 0xF0) == 0x30) 340 if ((dev_id & 0xF0) == 0x30)
338 type_name = "adm1023"; 341 type_name = "adm1023";
339 else 342 else if ((dev_id & 0xF0) == 0x00)
340 type_name = "adm1021"; 343 type_name = "adm1021";
344 else
345 return -ENODEV;
341 } else if (man_id == 0x49) 346 } else if (man_id == 0x49)
342 type_name = "thmc10"; 347 type_name = "thmc10";
343 else if (man_id == 0x23) 348 else if (man_id == 0x23)
344 type_name = "gl523sm"; 349 type_name = "gl523sm";
345 else if (man_id == 0x54) 350 else if (man_id == 0x54)
346 type_name = "mc1066"; 351 type_name = "mc1066";
347 /* LM84 Mfr ID in a different place, and it has more unused bits */ 352 else {
348 else if (conv_rate == 0x00 353 int lte, rte, lhi, rhi, llo, rlo;
349 && (config & 0x7F) == 0x00 354
350 && (status & 0xAB) == 0x00) 355 /* extra checks for LM84 and MAX1617 to avoid misdetections */
351 type_name = "lm84"; 356
352 else 357 llo = i2c_smbus_read_byte_data(client, ADM1021_REG_THYST_R(0));
353 type_name = "max1617"; 358 rlo = i2c_smbus_read_byte_data(client, ADM1021_REG_THYST_R(1));
359
360 /* fail if any of the additional register reads failed */
361 if (llo < 0 || rlo < 0)
362 return -ENODEV;
363
364 lte = i2c_smbus_read_byte_data(client, ADM1021_REG_TEMP(0));
365 rte = i2c_smbus_read_byte_data(client, ADM1021_REG_TEMP(1));
366 lhi = i2c_smbus_read_byte_data(client, ADM1021_REG_TOS_R(0));
367 rhi = i2c_smbus_read_byte_data(client, ADM1021_REG_TOS_R(1));
368
369 /*
370 * Fail for negative temperatures and negative high limits.
371 * This check also catches read errors on the tested registers.
372 */
373 if ((s8)lte < 0 || (s8)rte < 0 || (s8)lhi < 0 || (s8)rhi < 0)
374 return -ENODEV;
375
376 /* fail if all registers hold the same value */
377 if (lte == rte && lte == lhi && lte == rhi && lte == llo
378 && lte == rlo)
379 return -ENODEV;
380
381 /*
382 * LM84 Mfr ID is in a different place,
383 * and it has more unused bits.
384 */
385 if (conv_rate == 0x00
386 && (config & 0x7F) == 0x00
387 && (status & 0xAB) == 0x00) {
388 type_name = "lm84";
389 } else {
390 /* fail if low limits are larger than high limits */
391 if ((s8)llo > lhi || (s8)rlo > rhi)
392 return -ENODEV;
393 type_name = "max1617";
394 }
395 }
354 396
355 pr_debug("Detected chip %s at adapter %d, address 0x%02x.\n", 397 pr_debug("Detected chip %s at adapter %d, address 0x%02x.\n",
356 type_name, i2c_adapter_id(adapter), client->addr); 398 type_name, i2c_adapter_id(adapter), client->addr);
diff --git a/drivers/iio/buffer_cb.c b/drivers/iio/buffer_cb.c
index 9201022945e9..9d19ba74f22b 100644
--- a/drivers/iio/buffer_cb.c
+++ b/drivers/iio/buffer_cb.c
@@ -64,7 +64,7 @@ struct iio_cb_buffer *iio_channel_get_all_cb(struct device *dev,
64 while (chan->indio_dev) { 64 while (chan->indio_dev) {
65 if (chan->indio_dev != indio_dev) { 65 if (chan->indio_dev != indio_dev) {
66 ret = -EINVAL; 66 ret = -EINVAL;
67 goto error_release_channels; 67 goto error_free_scan_mask;
68 } 68 }
69 set_bit(chan->channel->scan_index, 69 set_bit(chan->channel->scan_index,
70 cb_buff->buffer.scan_mask); 70 cb_buff->buffer.scan_mask);
@@ -73,6 +73,8 @@ struct iio_cb_buffer *iio_channel_get_all_cb(struct device *dev,
73 73
74 return cb_buff; 74 return cb_buff;
75 75
76error_free_scan_mask:
77 kfree(cb_buff->buffer.scan_mask);
76error_release_channels: 78error_release_channels:
77 iio_channel_release_all(cb_buff->channels); 79 iio_channel_release_all(cb_buff->channels);
78error_free_cb_buff: 80error_free_cb_buff:
@@ -100,6 +102,7 @@ EXPORT_SYMBOL_GPL(iio_channel_stop_all_cb);
100 102
101void iio_channel_release_all_cb(struct iio_cb_buffer *cb_buff) 103void iio_channel_release_all_cb(struct iio_cb_buffer *cb_buff)
102{ 104{
105 kfree(cb_buff->buffer.scan_mask);
103 iio_channel_release_all(cb_buff->channels); 106 iio_channel_release_all(cb_buff->channels);
104 kfree(cb_buff); 107 kfree(cb_buff);
105} 108}
diff --git a/drivers/iio/frequency/adf4350.c b/drivers/iio/frequency/adf4350.c
index a884252ac66b..e76d4ace53ff 100644
--- a/drivers/iio/frequency/adf4350.c
+++ b/drivers/iio/frequency/adf4350.c
@@ -212,7 +212,7 @@ static int adf4350_set_freq(struct adf4350_state *st, unsigned long long freq)
212 (pdata->r2_user_settings & (ADF4350_REG2_PD_POLARITY_POS | 212 (pdata->r2_user_settings & (ADF4350_REG2_PD_POLARITY_POS |
213 ADF4350_REG2_LDP_6ns | ADF4350_REG2_LDF_INT_N | 213 ADF4350_REG2_LDP_6ns | ADF4350_REG2_LDF_INT_N |
214 ADF4350_REG2_CHARGE_PUMP_CURR_uA(5000) | 214 ADF4350_REG2_CHARGE_PUMP_CURR_uA(5000) |
215 ADF4350_REG2_MUXOUT(0x7) | ADF4350_REG2_NOISE_MODE(0x9))); 215 ADF4350_REG2_MUXOUT(0x7) | ADF4350_REG2_NOISE_MODE(0x3)));
216 216
217 st->regs[ADF4350_REG3] = pdata->r3_user_settings & 217 st->regs[ADF4350_REG3] = pdata->r3_user_settings &
218 (ADF4350_REG3_12BIT_CLKDIV(0xFFF) | 218 (ADF4350_REG3_12BIT_CLKDIV(0xFFF) |
diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c
index 795d100b4c36..98ddc323add0 100644
--- a/drivers/iio/inkern.c
+++ b/drivers/iio/inkern.c
@@ -124,7 +124,7 @@ static int __of_iio_channel_get(struct iio_channel *channel,
124 channel->indio_dev = indio_dev; 124 channel->indio_dev = indio_dev;
125 index = iiospec.args_count ? iiospec.args[0] : 0; 125 index = iiospec.args_count ? iiospec.args[0] : 0;
126 if (index >= indio_dev->num_channels) { 126 if (index >= indio_dev->num_channels) {
127 return -EINVAL; 127 err = -EINVAL;
128 goto err_put; 128 goto err_put;
129 } 129 }
130 channel->channel = &indio_dev->channels[index]; 130 channel->channel = &indio_dev->channels[index];
@@ -450,7 +450,7 @@ static int iio_convert_raw_to_processed_unlocked(struct iio_channel *chan,
450 s64 raw64 = raw; 450 s64 raw64 = raw;
451 int ret; 451 int ret;
452 452
453 ret = iio_channel_read(chan, &offset, NULL, IIO_CHAN_INFO_SCALE); 453 ret = iio_channel_read(chan, &offset, NULL, IIO_CHAN_INFO_OFFSET);
454 if (ret == 0) 454 if (ret == 0)
455 raw64 += offset; 455 raw64 += offset;
456 456
diff --git a/drivers/infiniband/hw/qib/qib_keys.c b/drivers/infiniband/hw/qib/qib_keys.c
index 81c7b73695d2..3b9afccaaade 100644
--- a/drivers/infiniband/hw/qib/qib_keys.c
+++ b/drivers/infiniband/hw/qib/qib_keys.c
@@ -61,7 +61,7 @@ int qib_alloc_lkey(struct qib_mregion *mr, int dma_region)
61 if (dma_region) { 61 if (dma_region) {
62 struct qib_mregion *tmr; 62 struct qib_mregion *tmr;
63 63
64 tmr = rcu_dereference(dev->dma_mr); 64 tmr = rcu_access_pointer(dev->dma_mr);
65 if (!tmr) { 65 if (!tmr) {
66 qib_get_mr(mr); 66 qib_get_mr(mr);
67 rcu_assign_pointer(dev->dma_mr, mr); 67 rcu_assign_pointer(dev->dma_mr, mr);
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index f19b0998a53c..2e84ef859c5b 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -5,6 +5,7 @@
5 * Copyright (C) 2004 Alex Aizman 5 * Copyright (C) 2004 Alex Aizman
6 * Copyright (C) 2005 Mike Christie 6 * Copyright (C) 2005 Mike Christie
7 * Copyright (c) 2005, 2006 Voltaire, Inc. All rights reserved. 7 * Copyright (c) 2005, 2006 Voltaire, Inc. All rights reserved.
8 * Copyright (c) 2013 Mellanox Technologies. All rights reserved.
8 * maintained by openib-general@openib.org 9 * maintained by openib-general@openib.org
9 * 10 *
10 * This software is available to you under a choice of one of two 11 * This software is available to you under a choice of one of two
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index 06f578cde75b..4f069c0d4c04 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -8,6 +8,7 @@
8 * 8 *
9 * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved. 9 * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved.
10 * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved. 10 * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
11 * Copyright (c) 2013 Mellanox Technologies. All rights reserved.
11 * 12 *
12 * This software is available to you under a choice of one of two 13 * This software is available to you under a choice of one of two
13 * licenses. You may choose to be licensed under the terms of the GNU 14 * licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index a00ccd1ca333..b6d81a86c976 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved. 2 * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved.
3 * Copyright (c) 2013 Mellanox Technologies. All rights reserved.
3 * 4 *
4 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 6 * licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
index 68ebb7fe072a..7827baf455a1 100644
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved. 2 * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved.
3 * Copyright (c) 2013 Mellanox Technologies. All rights reserved.
3 * 4 *
4 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 6 * licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index 5278916c3103..2c4941d0656b 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -1,6 +1,7 @@
1/* 1/*
2 * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved. 2 * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved.
3 * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved. 3 * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
4 * Copyright (c) 2013 Mellanox Technologies. All rights reserved.
4 * 5 *
5 * This software is available to you under a choice of one of two 6 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU 7 * licenses. You may choose to be licensed under the terms of the GNU
@@ -292,10 +293,10 @@ out_err:
292} 293}
293 294
294/** 295/**
295 * releases the FMR pool, QP and CMA ID objects, returns 0 on success, 296 * releases the FMR pool and QP objects, returns 0 on success,
296 * -1 on failure 297 * -1 on failure
297 */ 298 */
298static int iser_free_ib_conn_res(struct iser_conn *ib_conn, int can_destroy_id) 299static int iser_free_ib_conn_res(struct iser_conn *ib_conn)
299{ 300{
300 int cq_index; 301 int cq_index;
301 BUG_ON(ib_conn == NULL); 302 BUG_ON(ib_conn == NULL);
@@ -314,13 +315,9 @@ static int iser_free_ib_conn_res(struct iser_conn *ib_conn, int can_destroy_id)
314 315
315 rdma_destroy_qp(ib_conn->cma_id); 316 rdma_destroy_qp(ib_conn->cma_id);
316 } 317 }
317 /* if cma handler context, the caller acts s.t the cma destroy the id */
318 if (ib_conn->cma_id != NULL && can_destroy_id)
319 rdma_destroy_id(ib_conn->cma_id);
320 318
321 ib_conn->fmr_pool = NULL; 319 ib_conn->fmr_pool = NULL;
322 ib_conn->qp = NULL; 320 ib_conn->qp = NULL;
323 ib_conn->cma_id = NULL;
324 kfree(ib_conn->page_vec); 321 kfree(ib_conn->page_vec);
325 322
326 if (ib_conn->login_buf) { 323 if (ib_conn->login_buf) {
@@ -415,11 +412,16 @@ static void iser_conn_release(struct iser_conn *ib_conn, int can_destroy_id)
415 list_del(&ib_conn->conn_list); 412 list_del(&ib_conn->conn_list);
416 mutex_unlock(&ig.connlist_mutex); 413 mutex_unlock(&ig.connlist_mutex);
417 iser_free_rx_descriptors(ib_conn); 414 iser_free_rx_descriptors(ib_conn);
418 iser_free_ib_conn_res(ib_conn, can_destroy_id); 415 iser_free_ib_conn_res(ib_conn);
419 ib_conn->device = NULL; 416 ib_conn->device = NULL;
420 /* on EVENT_ADDR_ERROR there's no device yet for this conn */ 417 /* on EVENT_ADDR_ERROR there's no device yet for this conn */
421 if (device != NULL) 418 if (device != NULL)
422 iser_device_try_release(device); 419 iser_device_try_release(device);
420 /* if cma handler context, the caller actually destroy the id */
421 if (ib_conn->cma_id != NULL && can_destroy_id) {
422 rdma_destroy_id(ib_conn->cma_id);
423 ib_conn->cma_id = NULL;
424 }
423 iscsi_destroy_endpoint(ib_conn->ep); 425 iscsi_destroy_endpoint(ib_conn->ep);
424} 426}
425 427
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index b08ca7a9f76b..3f3f0416fbdd 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -2227,6 +2227,27 @@ static void srpt_close_ch(struct srpt_rdma_ch *ch)
2227} 2227}
2228 2228
2229/** 2229/**
2230 * srpt_shutdown_session() - Whether or not a session may be shut down.
2231 */
2232static int srpt_shutdown_session(struct se_session *se_sess)
2233{
2234 struct srpt_rdma_ch *ch = se_sess->fabric_sess_ptr;
2235 unsigned long flags;
2236
2237 spin_lock_irqsave(&ch->spinlock, flags);
2238 if (ch->in_shutdown) {
2239 spin_unlock_irqrestore(&ch->spinlock, flags);
2240 return true;
2241 }
2242
2243 ch->in_shutdown = true;
2244 target_sess_cmd_list_set_waiting(se_sess);
2245 spin_unlock_irqrestore(&ch->spinlock, flags);
2246
2247 return true;
2248}
2249
2250/**
2230 * srpt_drain_channel() - Drain a channel by resetting the IB queue pair. 2251 * srpt_drain_channel() - Drain a channel by resetting the IB queue pair.
2231 * @cm_id: Pointer to the CM ID of the channel to be drained. 2252 * @cm_id: Pointer to the CM ID of the channel to be drained.
2232 * 2253 *
@@ -2264,6 +2285,9 @@ static void srpt_drain_channel(struct ib_cm_id *cm_id)
2264 spin_unlock_irq(&sdev->spinlock); 2285 spin_unlock_irq(&sdev->spinlock);
2265 2286
2266 if (do_reset) { 2287 if (do_reset) {
2288 if (ch->sess)
2289 srpt_shutdown_session(ch->sess);
2290
2267 ret = srpt_ch_qp_err(ch); 2291 ret = srpt_ch_qp_err(ch);
2268 if (ret < 0) 2292 if (ret < 0)
2269 printk(KERN_ERR "Setting queue pair in error state" 2293 printk(KERN_ERR "Setting queue pair in error state"
@@ -2328,7 +2352,7 @@ static void srpt_release_channel_work(struct work_struct *w)
2328 se_sess = ch->sess; 2352 se_sess = ch->sess;
2329 BUG_ON(!se_sess); 2353 BUG_ON(!se_sess);
2330 2354
2331 target_wait_for_sess_cmds(se_sess, 0); 2355 target_wait_for_sess_cmds(se_sess);
2332 2356
2333 transport_deregister_session_configfs(se_sess); 2357 transport_deregister_session_configfs(se_sess);
2334 transport_deregister_session(se_sess); 2358 transport_deregister_session(se_sess);
@@ -3467,14 +3491,6 @@ static void srpt_release_cmd(struct se_cmd *se_cmd)
3467} 3491}
3468 3492
3469/** 3493/**
3470 * srpt_shutdown_session() - Whether or not a session may be shut down.
3471 */
3472static int srpt_shutdown_session(struct se_session *se_sess)
3473{
3474 return true;
3475}
3476
3477/**
3478 * srpt_close_session() - Forcibly close a session. 3494 * srpt_close_session() - Forcibly close a session.
3479 * 3495 *
3480 * Callback function invoked by the TCM core to clean up sessions associated 3496 * Callback function invoked by the TCM core to clean up sessions associated
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.h b/drivers/infiniband/ulp/srpt/ib_srpt.h
index 4caf55cda7b1..3dae156905de 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.h
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.h
@@ -325,6 +325,7 @@ struct srpt_rdma_ch {
325 u8 sess_name[36]; 325 u8 sess_name[36];
326 struct work_struct release_work; 326 struct work_struct release_work;
327 struct completion *release_done; 327 struct completion *release_done;
328 bool in_shutdown;
328}; 329};
329 330
330/** 331/**
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index d6cbfe9df218..fa061d46527f 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -137,7 +137,7 @@ static const struct xpad_device {
137 { 0x0738, 0x4540, "Mad Catz Beat Pad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX }, 137 { 0x0738, 0x4540, "Mad Catz Beat Pad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
138 { 0x0738, 0x4556, "Mad Catz Lynx Wireless Controller", 0, XTYPE_XBOX }, 138 { 0x0738, 0x4556, "Mad Catz Lynx Wireless Controller", 0, XTYPE_XBOX },
139 { 0x0738, 0x4716, "Mad Catz Wired Xbox 360 Controller", 0, XTYPE_XBOX360 }, 139 { 0x0738, 0x4716, "Mad Catz Wired Xbox 360 Controller", 0, XTYPE_XBOX360 },
140 { 0x0738, 0x4728, "Mad Catz Street Fighter IV FightPad", XTYPE_XBOX360 }, 140 { 0x0738, 0x4728, "Mad Catz Street Fighter IV FightPad", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
141 { 0x0738, 0x4738, "Mad Catz Wired Xbox 360 Controller (SFIV)", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, 141 { 0x0738, 0x4738, "Mad Catz Wired Xbox 360 Controller (SFIV)", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
142 { 0x0738, 0x6040, "Mad Catz Beat Pad Pro", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX }, 142 { 0x0738, 0x6040, "Mad Catz Beat Pad Pro", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
143 { 0x0738, 0xbeef, "Mad Catz JOYTECH NEO SE Advanced GamePad", XTYPE_XBOX360 }, 143 { 0x0738, 0xbeef, "Mad Catz JOYTECH NEO SE Advanced GamePad", XTYPE_XBOX360 },
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index 62a2c0e4cc99..7ac9c9818d55 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -431,6 +431,7 @@ config KEYBOARD_TEGRA
431 431
432config KEYBOARD_OPENCORES 432config KEYBOARD_OPENCORES
433 tristate "OpenCores Keyboard Controller" 433 tristate "OpenCores Keyboard Controller"
434 depends on HAS_IOMEM
434 help 435 help
435 Say Y here if you want to use the OpenCores Keyboard Controller 436 Say Y here if you want to use the OpenCores Keyboard Controller
436 http://www.opencores.org/project,keyboardcontroller 437 http://www.opencores.org/project,keyboardcontroller
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index 2f78538e09d0..b2420ae19e14 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -1379,6 +1379,7 @@ static int synaptics_reconnect(struct psmouse *psmouse)
1379{ 1379{
1380 struct synaptics_data *priv = psmouse->private; 1380 struct synaptics_data *priv = psmouse->private;
1381 struct synaptics_data old_priv = *priv; 1381 struct synaptics_data old_priv = *priv;
1382 unsigned char param[2];
1382 int retry = 0; 1383 int retry = 0;
1383 int error; 1384 int error;
1384 1385
@@ -1394,6 +1395,7 @@ static int synaptics_reconnect(struct psmouse *psmouse)
1394 */ 1395 */
1395 ssleep(1); 1396 ssleep(1);
1396 } 1397 }
1398 ps2_command(&psmouse->ps2dev, param, PSMOUSE_CMD_GETID);
1397 error = synaptics_detect(psmouse, 0); 1399 error = synaptics_detect(psmouse, 0);
1398 } while (error && ++retry < 3); 1400 } while (error && ++retry < 3);
1399 1401
diff --git a/drivers/input/serio/Kconfig b/drivers/input/serio/Kconfig
index aebfe3ecb945..1bda828f4b55 100644
--- a/drivers/input/serio/Kconfig
+++ b/drivers/input/serio/Kconfig
@@ -205,6 +205,7 @@ config SERIO_XILINX_XPS_PS2
205 205
206config SERIO_ALTERA_PS2 206config SERIO_ALTERA_PS2
207 tristate "Altera UP PS/2 controller" 207 tristate "Altera UP PS/2 controller"
208 depends on HAS_IOMEM
208 help 209 help
209 Say Y here if you have Altera University Program PS/2 ports. 210 Say Y here if you have Altera University Program PS/2 ports.
210 211
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
index 5c68e4486845..384fbcd0cee0 100644
--- a/drivers/input/tablet/wacom_wac.c
+++ b/drivers/input/tablet/wacom_wac.c
@@ -363,6 +363,7 @@ static int wacom_intuos_inout(struct wacom_wac *wacom)
363 case 0x140802: /* Intuos4/5 13HD/24HD Classic Pen */ 363 case 0x140802: /* Intuos4/5 13HD/24HD Classic Pen */
364 case 0x160802: /* Cintiq 13HD Pro Pen */ 364 case 0x160802: /* Cintiq 13HD Pro Pen */
365 case 0x180802: /* DTH2242 Pen */ 365 case 0x180802: /* DTH2242 Pen */
366 case 0x100802: /* Intuos4/5 13HD/24HD General Pen */
366 wacom->tool[idx] = BTN_TOOL_PEN; 367 wacom->tool[idx] = BTN_TOOL_PEN;
367 break; 368 break;
368 369
@@ -401,6 +402,7 @@ static int wacom_intuos_inout(struct wacom_wac *wacom)
401 case 0x10080c: /* Intuos4/5 13HD/24HD Art Pen Eraser */ 402 case 0x10080c: /* Intuos4/5 13HD/24HD Art Pen Eraser */
402 case 0x16080a: /* Cintiq 13HD Pro Pen Eraser */ 403 case 0x16080a: /* Cintiq 13HD Pro Pen Eraser */
403 case 0x18080a: /* DTH2242 Eraser */ 404 case 0x18080a: /* DTH2242 Eraser */
405 case 0x10080a: /* Intuos4/5 13HD/24HD General Pen Eraser */
404 wacom->tool[idx] = BTN_TOOL_RUBBER; 406 wacom->tool[idx] = BTN_TOOL_RUBBER;
405 break; 407 break;
406 408
@@ -1966,7 +1968,8 @@ static const struct wacom_features wacom_features_0xF4 =
1966 63, WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; 1968 63, WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
1967static const struct wacom_features wacom_features_0xF8 = 1969static const struct wacom_features wacom_features_0xF8 =
1968 { "Wacom Cintiq 24HD touch", WACOM_PKGLEN_INTUOS, 104480, 65600, 2047, /* Pen */ 1970 { "Wacom Cintiq 24HD touch", WACOM_PKGLEN_INTUOS, 104480, 65600, 2047, /* Pen */
1969 63, WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, .oVid = USB_VENDOR_ID_WACOM, .oPid = 0xf6 }; 1971 63, WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
1972 .oVid = USB_VENDOR_ID_WACOM, .oPid = 0xf6 };
1970static const struct wacom_features wacom_features_0xF6 = 1973static const struct wacom_features wacom_features_0xF6 =
1971 { "Wacom Cintiq 24HD touch", .type = WACOM_24HDT, /* Touch */ 1974 { "Wacom Cintiq 24HD touch", .type = WACOM_24HDT, /* Touch */
1972 .oVid = USB_VENDOR_ID_WACOM, .oPid = 0xf8, .touch_max = 10 }; 1975 .oVid = USB_VENDOR_ID_WACOM, .oPid = 0xf8, .touch_max = 10 };
@@ -2009,7 +2012,8 @@ static const struct wacom_features wacom_features_0xFA =
2009 63, WACOM_22HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; 2012 63, WACOM_22HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
2010static const struct wacom_features wacom_features_0x5B = 2013static const struct wacom_features wacom_features_0x5B =
2011 { "Wacom Cintiq 22HDT", WACOM_PKGLEN_INTUOS, 95840, 54260, 2047, 2014 { "Wacom Cintiq 22HDT", WACOM_PKGLEN_INTUOS, 95840, 54260, 2047,
2012 63, WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, .oVid = USB_VENDOR_ID_WACOM, .oPid = 0x5e }; 2015 63, WACOM_22HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
2016 .oVid = USB_VENDOR_ID_WACOM, .oPid = 0x5e };
2013static const struct wacom_features wacom_features_0x5E = 2017static const struct wacom_features wacom_features_0x5E =
2014 { "Wacom Cintiq 22HDT", .type = WACOM_24HDT, 2018 { "Wacom Cintiq 22HDT", .type = WACOM_24HDT,
2015 .oVid = USB_VENDOR_ID_WACOM, .oPid = 0x5b, .touch_max = 10 }; 2019 .oVid = USB_VENDOR_ID_WACOM, .oPid = 0x5b, .touch_max = 10 };
@@ -2042,7 +2046,7 @@ static const struct wacom_features wacom_features_0xE5 =
2042static const struct wacom_features wacom_features_0xE6 = 2046static const struct wacom_features wacom_features_0xE6 =
2043 { "Wacom ISDv4 E6", WACOM_PKGLEN_TPC2FG, 27760, 15694, 255, 2047 { "Wacom ISDv4 E6", WACOM_PKGLEN_TPC2FG, 27760, 15694, 255,
2044 0, TABLETPC2FG, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 2048 0, TABLETPC2FG, WACOM_INTUOS_RES, WACOM_INTUOS_RES,
2045 .touch_max = 2 }; 2049 .touch_max = 2 };
2046static const struct wacom_features wacom_features_0xEC = 2050static const struct wacom_features wacom_features_0xEC =
2047 { "Wacom ISDv4 EC", WACOM_PKGLEN_GRAPHIRE, 25710, 14500, 255, 2051 { "Wacom ISDv4 EC", WACOM_PKGLEN_GRAPHIRE, 25710, 14500, 255,
2048 0, TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; 2052 0, TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
diff --git a/drivers/input/touchscreen/cyttsp_core.c b/drivers/input/touchscreen/cyttsp_core.c
index 8e60437ac85b..ae89d2609ab0 100644
--- a/drivers/input/touchscreen/cyttsp_core.c
+++ b/drivers/input/touchscreen/cyttsp_core.c
@@ -116,6 +116,15 @@ static int ttsp_send_command(struct cyttsp *ts, u8 cmd)
116 return ttsp_write_block_data(ts, CY_REG_BASE, sizeof(cmd), &cmd); 116 return ttsp_write_block_data(ts, CY_REG_BASE, sizeof(cmd), &cmd);
117} 117}
118 118
119static int cyttsp_handshake(struct cyttsp *ts)
120{
121 if (ts->pdata->use_hndshk)
122 return ttsp_send_command(ts,
123 ts->xy_data.hst_mode ^ CY_HNDSHK_BIT);
124
125 return 0;
126}
127
119static int cyttsp_load_bl_regs(struct cyttsp *ts) 128static int cyttsp_load_bl_regs(struct cyttsp *ts)
120{ 129{
121 memset(&ts->bl_data, 0, sizeof(ts->bl_data)); 130 memset(&ts->bl_data, 0, sizeof(ts->bl_data));
@@ -133,7 +142,7 @@ static int cyttsp_exit_bl_mode(struct cyttsp *ts)
133 memcpy(bl_cmd, bl_command, sizeof(bl_command)); 142 memcpy(bl_cmd, bl_command, sizeof(bl_command));
134 if (ts->pdata->bl_keys) 143 if (ts->pdata->bl_keys)
135 memcpy(&bl_cmd[sizeof(bl_command) - CY_NUM_BL_KEYS], 144 memcpy(&bl_cmd[sizeof(bl_command) - CY_NUM_BL_KEYS],
136 ts->pdata->bl_keys, sizeof(bl_command)); 145 ts->pdata->bl_keys, CY_NUM_BL_KEYS);
137 146
138 error = ttsp_write_block_data(ts, CY_REG_BASE, 147 error = ttsp_write_block_data(ts, CY_REG_BASE,
139 sizeof(bl_cmd), bl_cmd); 148 sizeof(bl_cmd), bl_cmd);
@@ -167,6 +176,10 @@ static int cyttsp_set_operational_mode(struct cyttsp *ts)
167 if (error) 176 if (error)
168 return error; 177 return error;
169 178
179 error = cyttsp_handshake(ts);
180 if (error)
181 return error;
182
170 return ts->xy_data.act_dist == CY_ACT_DIST_DFLT ? -EIO : 0; 183 return ts->xy_data.act_dist == CY_ACT_DIST_DFLT ? -EIO : 0;
171} 184}
172 185
@@ -188,6 +201,10 @@ static int cyttsp_set_sysinfo_mode(struct cyttsp *ts)
188 if (error) 201 if (error)
189 return error; 202 return error;
190 203
204 error = cyttsp_handshake(ts);
205 if (error)
206 return error;
207
191 if (!ts->sysinfo_data.tts_verh && !ts->sysinfo_data.tts_verl) 208 if (!ts->sysinfo_data.tts_verh && !ts->sysinfo_data.tts_verl)
192 return -EIO; 209 return -EIO;
193 210
@@ -344,12 +361,9 @@ static irqreturn_t cyttsp_irq(int irq, void *handle)
344 goto out; 361 goto out;
345 362
346 /* provide flow control handshake */ 363 /* provide flow control handshake */
347 if (ts->pdata->use_hndshk) { 364 error = cyttsp_handshake(ts);
348 error = ttsp_send_command(ts, 365 if (error)
349 ts->xy_data.hst_mode ^ CY_HNDSHK_BIT); 366 goto out;
350 if (error)
351 goto out;
352 }
353 367
354 if (unlikely(ts->state == CY_IDLE_STATE)) 368 if (unlikely(ts->state == CY_IDLE_STATE))
355 goto out; 369 goto out;
diff --git a/drivers/input/touchscreen/cyttsp_core.h b/drivers/input/touchscreen/cyttsp_core.h
index 1aa3c6967e70..f1ebde369f86 100644
--- a/drivers/input/touchscreen/cyttsp_core.h
+++ b/drivers/input/touchscreen/cyttsp_core.h
@@ -67,8 +67,8 @@ struct cyttsp_xydata {
67/* TTSP System Information interface definition */ 67/* TTSP System Information interface definition */
68struct cyttsp_sysinfo_data { 68struct cyttsp_sysinfo_data {
69 u8 hst_mode; 69 u8 hst_mode;
70 u8 mfg_cmd;
71 u8 mfg_stat; 70 u8 mfg_stat;
71 u8 mfg_cmd;
72 u8 cid[3]; 72 u8 cid[3];
73 u8 tt_undef1; 73 u8 tt_undef1;
74 u8 uid[8]; 74 u8 uid[8];
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index 1760ceb68b7b..19ceaa60e0f4 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -705,7 +705,7 @@ static int gic_irq_domain_xlate(struct irq_domain *d,
705static int __cpuinit gic_secondary_init(struct notifier_block *nfb, 705static int __cpuinit gic_secondary_init(struct notifier_block *nfb,
706 unsigned long action, void *hcpu) 706 unsigned long action, void *hcpu)
707{ 707{
708 if (action == CPU_STARTING) 708 if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
709 gic_cpu_init(&gic_data[0]); 709 gic_cpu_init(&gic_data[0]);
710 return NOTIFY_OK; 710 return NOTIFY_OK;
711} 711}
diff --git a/drivers/irqchip/irq-mxs.c b/drivers/irqchip/irq-mxs.c
index 29889bbdcc6d..63b3d4eb0ef7 100644
--- a/drivers/irqchip/irq-mxs.c
+++ b/drivers/irqchip/irq-mxs.c
@@ -76,16 +76,10 @@ asmlinkage void __exception_irq_entry icoll_handle_irq(struct pt_regs *regs)
76{ 76{
77 u32 irqnr; 77 u32 irqnr;
78 78
79 do { 79 irqnr = __raw_readl(icoll_base + HW_ICOLL_STAT_OFFSET);
80 irqnr = __raw_readl(icoll_base + HW_ICOLL_STAT_OFFSET); 80 __raw_writel(irqnr, icoll_base + HW_ICOLL_VECTOR);
81 if (irqnr != 0x7f) { 81 irqnr = irq_find_mapping(icoll_domain, irqnr);
82 __raw_writel(irqnr, icoll_base + HW_ICOLL_VECTOR); 82 handle_IRQ(irqnr, regs);
83 irqnr = irq_find_mapping(icoll_domain, irqnr);
84 handle_IRQ(irqnr, regs);
85 continue;
86 }
87 break;
88 } while (1);
89} 83}
90 84
91static int icoll_irq_domain_map(struct irq_domain *d, unsigned int virq, 85static int icoll_irq_domain_map(struct irq_domain *d, unsigned int virq,
diff --git a/drivers/irqchip/irq-versatile-fpga.c b/drivers/irqchip/irq-versatile-fpga.c
index 065b7a31a478..47a52ab580d8 100644
--- a/drivers/irqchip/irq-versatile-fpga.c
+++ b/drivers/irqchip/irq-versatile-fpga.c
@@ -119,7 +119,7 @@ static int fpga_irqdomain_map(struct irq_domain *d, unsigned int irq,
119 119
120 /* Skip invalid IRQs, only register handlers for the real ones */ 120 /* Skip invalid IRQs, only register handlers for the real ones */
121 if (!(f->valid & BIT(hwirq))) 121 if (!(f->valid & BIT(hwirq)))
122 return -ENOTSUPP; 122 return -EPERM;
123 irq_set_chip_data(irq, f); 123 irq_set_chip_data(irq, f);
124 irq_set_chip_and_handler(irq, &f->chip, 124 irq_set_chip_and_handler(irq, &f->chip,
125 handle_level_irq); 125 handle_level_irq);
diff --git a/drivers/irqchip/irq-vic.c b/drivers/irqchip/irq-vic.c
index 884d11c7355f..2bbb00404cf5 100644
--- a/drivers/irqchip/irq-vic.c
+++ b/drivers/irqchip/irq-vic.c
@@ -197,7 +197,7 @@ static int vic_irqdomain_map(struct irq_domain *d, unsigned int irq,
197 197
198 /* Skip invalid IRQs, only register handlers for the real ones */ 198 /* Skip invalid IRQs, only register handlers for the real ones */
199 if (!(v->valid_sources & (1 << hwirq))) 199 if (!(v->valid_sources & (1 << hwirq)))
200 return -ENOTSUPP; 200 return -EPERM;
201 irq_set_chip_and_handler(irq, &vic_chip, handle_level_irq); 201 irq_set_chip_and_handler(irq, &vic_chip, handle_level_irq);
202 irq_set_chip_data(irq, v->base); 202 irq_set_chip_data(irq, v->base);
203 set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); 203 set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
diff --git a/drivers/md/bcache/Kconfig b/drivers/md/bcache/Kconfig
index 05c220d05e23..f950c9d29f3e 100644
--- a/drivers/md/bcache/Kconfig
+++ b/drivers/md/bcache/Kconfig
@@ -1,7 +1,6 @@
1 1
2config BCACHE 2config BCACHE
3 tristate "Block device as cache" 3 tristate "Block device as cache"
4 select CLOSURES
5 ---help--- 4 ---help---
6 Allows a block device to be used as cache for other devices; uses 5 Allows a block device to be used as cache for other devices; uses
7 a btree for indexing and the layout is optimized for SSDs. 6 a btree for indexing and the layout is optimized for SSDs.
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index 340146d7c17f..d3e15b42a4ab 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -1241,7 +1241,7 @@ void bch_cache_set_stop(struct cache_set *);
1241struct cache_set *bch_cache_set_alloc(struct cache_sb *); 1241struct cache_set *bch_cache_set_alloc(struct cache_sb *);
1242void bch_btree_cache_free(struct cache_set *); 1242void bch_btree_cache_free(struct cache_set *);
1243int bch_btree_cache_alloc(struct cache_set *); 1243int bch_btree_cache_alloc(struct cache_set *);
1244void bch_writeback_init_cached_dev(struct cached_dev *); 1244void bch_cached_dev_writeback_init(struct cached_dev *);
1245void bch_moving_init_cache_set(struct cache_set *); 1245void bch_moving_init_cache_set(struct cache_set *);
1246 1246
1247void bch_cache_allocator_exit(struct cache *ca); 1247void bch_cache_allocator_exit(struct cache *ca);
diff --git a/drivers/md/bcache/stats.c b/drivers/md/bcache/stats.c
index 64e679449c2a..b8730e714d69 100644
--- a/drivers/md/bcache/stats.c
+++ b/drivers/md/bcache/stats.c
@@ -93,24 +93,6 @@ static struct attribute *bch_stats_files[] = {
93}; 93};
94static KTYPE(bch_stats); 94static KTYPE(bch_stats);
95 95
96static void scale_accounting(unsigned long data);
97
98void bch_cache_accounting_init(struct cache_accounting *acc,
99 struct closure *parent)
100{
101 kobject_init(&acc->total.kobj, &bch_stats_ktype);
102 kobject_init(&acc->five_minute.kobj, &bch_stats_ktype);
103 kobject_init(&acc->hour.kobj, &bch_stats_ktype);
104 kobject_init(&acc->day.kobj, &bch_stats_ktype);
105
106 closure_init(&acc->cl, parent);
107 init_timer(&acc->timer);
108 acc->timer.expires = jiffies + accounting_delay;
109 acc->timer.data = (unsigned long) acc;
110 acc->timer.function = scale_accounting;
111 add_timer(&acc->timer);
112}
113
114int bch_cache_accounting_add_kobjs(struct cache_accounting *acc, 96int bch_cache_accounting_add_kobjs(struct cache_accounting *acc,
115 struct kobject *parent) 97 struct kobject *parent)
116{ 98{
@@ -244,3 +226,19 @@ void bch_mark_sectors_bypassed(struct search *s, int sectors)
244 atomic_add(sectors, &dc->accounting.collector.sectors_bypassed); 226 atomic_add(sectors, &dc->accounting.collector.sectors_bypassed);
245 atomic_add(sectors, &s->op.c->accounting.collector.sectors_bypassed); 227 atomic_add(sectors, &s->op.c->accounting.collector.sectors_bypassed);
246} 228}
229
230void bch_cache_accounting_init(struct cache_accounting *acc,
231 struct closure *parent)
232{
233 kobject_init(&acc->total.kobj, &bch_stats_ktype);
234 kobject_init(&acc->five_minute.kobj, &bch_stats_ktype);
235 kobject_init(&acc->hour.kobj, &bch_stats_ktype);
236 kobject_init(&acc->day.kobj, &bch_stats_ktype);
237
238 closure_init(&acc->cl, parent);
239 init_timer(&acc->timer);
240 acc->timer.expires = jiffies + accounting_delay;
241 acc->timer.data = (unsigned long) acc;
242 acc->timer.function = scale_accounting;
243 add_timer(&acc->timer);
244}
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index c8046bc4aa57..f88e2b653a3f 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -634,11 +634,10 @@ static int open_dev(struct block_device *b, fmode_t mode)
634 return 0; 634 return 0;
635} 635}
636 636
637static int release_dev(struct gendisk *b, fmode_t mode) 637static void release_dev(struct gendisk *b, fmode_t mode)
638{ 638{
639 struct bcache_device *d = b->private_data; 639 struct bcache_device *d = b->private_data;
640 closure_put(&d->cl); 640 closure_put(&d->cl);
641 return 0;
642} 641}
643 642
644static int ioctl_dev(struct block_device *b, fmode_t mode, 643static int ioctl_dev(struct block_device *b, fmode_t mode,
@@ -732,8 +731,7 @@ static void bcache_device_free(struct bcache_device *d)
732 731
733 if (d->c) 732 if (d->c)
734 bcache_device_detach(d); 733 bcache_device_detach(d);
735 734 if (d->disk && d->disk->flags & GENHD_FL_UP)
736 if (d->disk)
737 del_gendisk(d->disk); 735 del_gendisk(d->disk);
738 if (d->disk && d->disk->queue) 736 if (d->disk && d->disk->queue)
739 blk_cleanup_queue(d->disk->queue); 737 blk_cleanup_queue(d->disk->queue);
@@ -756,12 +754,9 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size)
756 if (!(d->bio_split = bioset_create(4, offsetof(struct bbio, bio))) || 754 if (!(d->bio_split = bioset_create(4, offsetof(struct bbio, bio))) ||
757 !(d->unaligned_bvec = mempool_create_kmalloc_pool(1, 755 !(d->unaligned_bvec = mempool_create_kmalloc_pool(1,
758 sizeof(struct bio_vec) * BIO_MAX_PAGES)) || 756 sizeof(struct bio_vec) * BIO_MAX_PAGES)) ||
759 bio_split_pool_init(&d->bio_split_hook)) 757 bio_split_pool_init(&d->bio_split_hook) ||
760 758 !(d->disk = alloc_disk(1)) ||
761 return -ENOMEM; 759 !(q = blk_alloc_queue(GFP_KERNEL)))
762
763 d->disk = alloc_disk(1);
764 if (!d->disk)
765 return -ENOMEM; 760 return -ENOMEM;
766 761
767 snprintf(d->disk->disk_name, DISK_NAME_LEN, "bcache%i", bcache_minor); 762 snprintf(d->disk->disk_name, DISK_NAME_LEN, "bcache%i", bcache_minor);
@@ -771,10 +766,6 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size)
771 d->disk->fops = &bcache_ops; 766 d->disk->fops = &bcache_ops;
772 d->disk->private_data = d; 767 d->disk->private_data = d;
773 768
774 q = blk_alloc_queue(GFP_KERNEL);
775 if (!q)
776 return -ENOMEM;
777
778 blk_queue_make_request(q, NULL); 769 blk_queue_make_request(q, NULL);
779 d->disk->queue = q; 770 d->disk->queue = q;
780 q->queuedata = d; 771 q->queuedata = d;
@@ -999,14 +990,17 @@ static void cached_dev_free(struct closure *cl)
999 990
1000 mutex_lock(&bch_register_lock); 991 mutex_lock(&bch_register_lock);
1001 992
1002 bd_unlink_disk_holder(dc->bdev, dc->disk.disk); 993 if (atomic_read(&dc->running))
994 bd_unlink_disk_holder(dc->bdev, dc->disk.disk);
1003 bcache_device_free(&dc->disk); 995 bcache_device_free(&dc->disk);
1004 list_del(&dc->list); 996 list_del(&dc->list);
1005 997
1006 mutex_unlock(&bch_register_lock); 998 mutex_unlock(&bch_register_lock);
1007 999
1008 if (!IS_ERR_OR_NULL(dc->bdev)) { 1000 if (!IS_ERR_OR_NULL(dc->bdev)) {
1009 blk_sync_queue(bdev_get_queue(dc->bdev)); 1001 if (dc->bdev->bd_disk)
1002 blk_sync_queue(bdev_get_queue(dc->bdev));
1003
1010 blkdev_put(dc->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); 1004 blkdev_put(dc->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
1011 } 1005 }
1012 1006
@@ -1028,73 +1022,67 @@ static void cached_dev_flush(struct closure *cl)
1028 1022
1029static int cached_dev_init(struct cached_dev *dc, unsigned block_size) 1023static int cached_dev_init(struct cached_dev *dc, unsigned block_size)
1030{ 1024{
1031 int err; 1025 int ret;
1032 struct io *io; 1026 struct io *io;
1033 1027 struct request_queue *q = bdev_get_queue(dc->bdev);
1034 closure_init(&dc->disk.cl, NULL);
1035 set_closure_fn(&dc->disk.cl, cached_dev_flush, system_wq);
1036 1028
1037 __module_get(THIS_MODULE); 1029 __module_get(THIS_MODULE);
1038 INIT_LIST_HEAD(&dc->list); 1030 INIT_LIST_HEAD(&dc->list);
1031 closure_init(&dc->disk.cl, NULL);
1032 set_closure_fn(&dc->disk.cl, cached_dev_flush, system_wq);
1039 kobject_init(&dc->disk.kobj, &bch_cached_dev_ktype); 1033 kobject_init(&dc->disk.kobj, &bch_cached_dev_ktype);
1040
1041 bch_cache_accounting_init(&dc->accounting, &dc->disk.cl);
1042
1043 err = bcache_device_init(&dc->disk, block_size);
1044 if (err)
1045 goto err;
1046
1047 spin_lock_init(&dc->io_lock);
1048 closure_init_unlocked(&dc->sb_write);
1049 INIT_WORK(&dc->detach, cached_dev_detach_finish); 1034 INIT_WORK(&dc->detach, cached_dev_detach_finish);
1035 closure_init_unlocked(&dc->sb_write);
1036 INIT_LIST_HEAD(&dc->io_lru);
1037 spin_lock_init(&dc->io_lock);
1038 bch_cache_accounting_init(&dc->accounting, &dc->disk.cl);
1050 1039
1051 dc->sequential_merge = true; 1040 dc->sequential_merge = true;
1052 dc->sequential_cutoff = 4 << 20; 1041 dc->sequential_cutoff = 4 << 20;
1053 1042
1054 INIT_LIST_HEAD(&dc->io_lru);
1055 dc->sb_bio.bi_max_vecs = 1;
1056 dc->sb_bio.bi_io_vec = dc->sb_bio.bi_inline_vecs;
1057
1058 for (io = dc->io; io < dc->io + RECENT_IO; io++) { 1043 for (io = dc->io; io < dc->io + RECENT_IO; io++) {
1059 list_add(&io->lru, &dc->io_lru); 1044 list_add(&io->lru, &dc->io_lru);
1060 hlist_add_head(&io->hash, dc->io_hash + RECENT_IO); 1045 hlist_add_head(&io->hash, dc->io_hash + RECENT_IO);
1061 } 1046 }
1062 1047
1063 bch_writeback_init_cached_dev(dc); 1048 ret = bcache_device_init(&dc->disk, block_size);
1049 if (ret)
1050 return ret;
1051
1052 set_capacity(dc->disk.disk,
1053 dc->bdev->bd_part->nr_sects - dc->sb.data_offset);
1054
1055 dc->disk.disk->queue->backing_dev_info.ra_pages =
1056 max(dc->disk.disk->queue->backing_dev_info.ra_pages,
1057 q->backing_dev_info.ra_pages);
1058
1059 bch_cached_dev_request_init(dc);
1060 bch_cached_dev_writeback_init(dc);
1064 return 0; 1061 return 0;
1065err:
1066 bcache_device_stop(&dc->disk);
1067 return err;
1068} 1062}
1069 1063
1070/* Cached device - bcache superblock */ 1064/* Cached device - bcache superblock */
1071 1065
1072static const char *register_bdev(struct cache_sb *sb, struct page *sb_page, 1066static void register_bdev(struct cache_sb *sb, struct page *sb_page,
1073 struct block_device *bdev, 1067 struct block_device *bdev,
1074 struct cached_dev *dc) 1068 struct cached_dev *dc)
1075{ 1069{
1076 char name[BDEVNAME_SIZE]; 1070 char name[BDEVNAME_SIZE];
1077 const char *err = "cannot allocate memory"; 1071 const char *err = "cannot allocate memory";
1078 struct gendisk *g;
1079 struct cache_set *c; 1072 struct cache_set *c;
1080 1073
1081 if (!dc || cached_dev_init(dc, sb->block_size << 9) != 0)
1082 return err;
1083
1084 memcpy(&dc->sb, sb, sizeof(struct cache_sb)); 1074 memcpy(&dc->sb, sb, sizeof(struct cache_sb));
1085 dc->sb_bio.bi_io_vec[0].bv_page = sb_page;
1086 dc->bdev = bdev; 1075 dc->bdev = bdev;
1087 dc->bdev->bd_holder = dc; 1076 dc->bdev->bd_holder = dc;
1088 1077
1089 g = dc->disk.disk; 1078 bio_init(&dc->sb_bio);
1090 1079 dc->sb_bio.bi_max_vecs = 1;
1091 set_capacity(g, dc->bdev->bd_part->nr_sects - dc->sb.data_offset); 1080 dc->sb_bio.bi_io_vec = dc->sb_bio.bi_inline_vecs;
1092 1081 dc->sb_bio.bi_io_vec[0].bv_page = sb_page;
1093 g->queue->backing_dev_info.ra_pages = 1082 get_page(sb_page);
1094 max(g->queue->backing_dev_info.ra_pages,
1095 bdev->bd_queue->backing_dev_info.ra_pages);
1096 1083
1097 bch_cached_dev_request_init(dc); 1084 if (cached_dev_init(dc, sb->block_size << 9))
1085 goto err;
1098 1086
1099 err = "error creating kobject"; 1087 err = "error creating kobject";
1100 if (kobject_add(&dc->disk.kobj, &part_to_dev(bdev->bd_part)->kobj, 1088 if (kobject_add(&dc->disk.kobj, &part_to_dev(bdev->bd_part)->kobj,
@@ -1103,6 +1091,8 @@ static const char *register_bdev(struct cache_sb *sb, struct page *sb_page,
1103 if (bch_cache_accounting_add_kobjs(&dc->accounting, &dc->disk.kobj)) 1091 if (bch_cache_accounting_add_kobjs(&dc->accounting, &dc->disk.kobj))
1104 goto err; 1092 goto err;
1105 1093
1094 pr_info("registered backing device %s", bdevname(bdev, name));
1095
1106 list_add(&dc->list, &uncached_devices); 1096 list_add(&dc->list, &uncached_devices);
1107 list_for_each_entry(c, &bch_cache_sets, list) 1097 list_for_each_entry(c, &bch_cache_sets, list)
1108 bch_cached_dev_attach(dc, c); 1098 bch_cached_dev_attach(dc, c);
@@ -1111,15 +1101,10 @@ static const char *register_bdev(struct cache_sb *sb, struct page *sb_page,
1111 BDEV_STATE(&dc->sb) == BDEV_STATE_STALE) 1101 BDEV_STATE(&dc->sb) == BDEV_STATE_STALE)
1112 bch_cached_dev_run(dc); 1102 bch_cached_dev_run(dc);
1113 1103
1114 return NULL; 1104 return;
1115err: 1105err:
1116 kobject_put(&dc->disk.kobj);
1117 pr_notice("error opening %s: %s", bdevname(bdev, name), err); 1106 pr_notice("error opening %s: %s", bdevname(bdev, name), err);
1118 /* 1107 bcache_device_stop(&dc->disk);
1119 * Return NULL instead of an error because kobject_put() cleans
1120 * everything up
1121 */
1122 return NULL;
1123} 1108}
1124 1109
1125/* Flash only volumes */ 1110/* Flash only volumes */
@@ -1717,20 +1702,11 @@ static int cache_alloc(struct cache_sb *sb, struct cache *ca)
1717 size_t free; 1702 size_t free;
1718 struct bucket *b; 1703 struct bucket *b;
1719 1704
1720 if (!ca)
1721 return -ENOMEM;
1722
1723 __module_get(THIS_MODULE); 1705 __module_get(THIS_MODULE);
1724 kobject_init(&ca->kobj, &bch_cache_ktype); 1706 kobject_init(&ca->kobj, &bch_cache_ktype);
1725 1707
1726 memcpy(&ca->sb, sb, sizeof(struct cache_sb));
1727
1728 INIT_LIST_HEAD(&ca->discards); 1708 INIT_LIST_HEAD(&ca->discards);
1729 1709
1730 bio_init(&ca->sb_bio);
1731 ca->sb_bio.bi_max_vecs = 1;
1732 ca->sb_bio.bi_io_vec = ca->sb_bio.bi_inline_vecs;
1733
1734 bio_init(&ca->journal.bio); 1710 bio_init(&ca->journal.bio);
1735 ca->journal.bio.bi_max_vecs = 8; 1711 ca->journal.bio.bi_max_vecs = 8;
1736 ca->journal.bio.bi_io_vec = ca->journal.bio.bi_inline_vecs; 1712 ca->journal.bio.bi_io_vec = ca->journal.bio.bi_inline_vecs;
@@ -1742,18 +1718,17 @@ static int cache_alloc(struct cache_sb *sb, struct cache *ca)
1742 !init_fifo(&ca->free_inc, free << 2, GFP_KERNEL) || 1718 !init_fifo(&ca->free_inc, free << 2, GFP_KERNEL) ||
1743 !init_fifo(&ca->unused, free << 2, GFP_KERNEL) || 1719 !init_fifo(&ca->unused, free << 2, GFP_KERNEL) ||
1744 !init_heap(&ca->heap, free << 3, GFP_KERNEL) || 1720 !init_heap(&ca->heap, free << 3, GFP_KERNEL) ||
1745 !(ca->buckets = vmalloc(sizeof(struct bucket) * 1721 !(ca->buckets = vzalloc(sizeof(struct bucket) *
1746 ca->sb.nbuckets)) || 1722 ca->sb.nbuckets)) ||
1747 !(ca->prio_buckets = kzalloc(sizeof(uint64_t) * prio_buckets(ca) * 1723 !(ca->prio_buckets = kzalloc(sizeof(uint64_t) * prio_buckets(ca) *
1748 2, GFP_KERNEL)) || 1724 2, GFP_KERNEL)) ||
1749 !(ca->disk_buckets = alloc_bucket_pages(GFP_KERNEL, ca)) || 1725 !(ca->disk_buckets = alloc_bucket_pages(GFP_KERNEL, ca)) ||
1750 !(ca->alloc_workqueue = alloc_workqueue("bch_allocator", 0, 1)) || 1726 !(ca->alloc_workqueue = alloc_workqueue("bch_allocator", 0, 1)) ||
1751 bio_split_pool_init(&ca->bio_split_hook)) 1727 bio_split_pool_init(&ca->bio_split_hook))
1752 goto err; 1728 return -ENOMEM;
1753 1729
1754 ca->prio_last_buckets = ca->prio_buckets + prio_buckets(ca); 1730 ca->prio_last_buckets = ca->prio_buckets + prio_buckets(ca);
1755 1731
1756 memset(ca->buckets, 0, ca->sb.nbuckets * sizeof(struct bucket));
1757 for_each_bucket(b, ca) 1732 for_each_bucket(b, ca)
1758 atomic_set(&b->pin, 0); 1733 atomic_set(&b->pin, 0);
1759 1734
@@ -1766,22 +1741,28 @@ err:
1766 return -ENOMEM; 1741 return -ENOMEM;
1767} 1742}
1768 1743
1769static const char *register_cache(struct cache_sb *sb, struct page *sb_page, 1744static void register_cache(struct cache_sb *sb, struct page *sb_page,
1770 struct block_device *bdev, struct cache *ca) 1745 struct block_device *bdev, struct cache *ca)
1771{ 1746{
1772 char name[BDEVNAME_SIZE]; 1747 char name[BDEVNAME_SIZE];
1773 const char *err = "cannot allocate memory"; 1748 const char *err = "cannot allocate memory";
1774 1749
1775 if (cache_alloc(sb, ca) != 0) 1750 memcpy(&ca->sb, sb, sizeof(struct cache_sb));
1776 return err;
1777
1778 ca->sb_bio.bi_io_vec[0].bv_page = sb_page;
1779 ca->bdev = bdev; 1751 ca->bdev = bdev;
1780 ca->bdev->bd_holder = ca; 1752 ca->bdev->bd_holder = ca;
1781 1753
1754 bio_init(&ca->sb_bio);
1755 ca->sb_bio.bi_max_vecs = 1;
1756 ca->sb_bio.bi_io_vec = ca->sb_bio.bi_inline_vecs;
1757 ca->sb_bio.bi_io_vec[0].bv_page = sb_page;
1758 get_page(sb_page);
1759
1782 if (blk_queue_discard(bdev_get_queue(ca->bdev))) 1760 if (blk_queue_discard(bdev_get_queue(ca->bdev)))
1783 ca->discard = CACHE_DISCARD(&ca->sb); 1761 ca->discard = CACHE_DISCARD(&ca->sb);
1784 1762
1763 if (cache_alloc(sb, ca) != 0)
1764 goto err;
1765
1785 err = "error creating kobject"; 1766 err = "error creating kobject";
1786 if (kobject_add(&ca->kobj, &part_to_dev(bdev->bd_part)->kobj, "bcache")) 1767 if (kobject_add(&ca->kobj, &part_to_dev(bdev->bd_part)->kobj, "bcache"))
1787 goto err; 1768 goto err;
@@ -1791,15 +1772,10 @@ static const char *register_cache(struct cache_sb *sb, struct page *sb_page,
1791 goto err; 1772 goto err;
1792 1773
1793 pr_info("registered cache device %s", bdevname(bdev, name)); 1774 pr_info("registered cache device %s", bdevname(bdev, name));
1794 1775 return;
1795 return NULL;
1796err: 1776err:
1777 pr_notice("error opening %s: %s", bdevname(bdev, name), err);
1797 kobject_put(&ca->kobj); 1778 kobject_put(&ca->kobj);
1798 pr_info("error opening %s: %s", bdevname(bdev, name), err);
1799 /* Return NULL instead of an error because kobject_put() cleans
1800 * everything up
1801 */
1802 return NULL;
1803} 1779}
1804 1780
1805/* Global interfaces/init */ 1781/* Global interfaces/init */
@@ -1833,12 +1809,15 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
1833 bdev = blkdev_get_by_path(strim(path), 1809 bdev = blkdev_get_by_path(strim(path),
1834 FMODE_READ|FMODE_WRITE|FMODE_EXCL, 1810 FMODE_READ|FMODE_WRITE|FMODE_EXCL,
1835 sb); 1811 sb);
1836 if (bdev == ERR_PTR(-EBUSY)) 1812 if (IS_ERR(bdev)) {
1837 err = "device busy"; 1813 if (bdev == ERR_PTR(-EBUSY))
1838 1814 err = "device busy";
1839 if (IS_ERR(bdev) ||
1840 set_blocksize(bdev, 4096))
1841 goto err; 1815 goto err;
1816 }
1817
1818 err = "failed to set blocksize";
1819 if (set_blocksize(bdev, 4096))
1820 goto err_close;
1842 1821
1843 err = read_super(sb, bdev, &sb_page); 1822 err = read_super(sb, bdev, &sb_page);
1844 if (err) 1823 if (err)
@@ -1846,33 +1825,33 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
1846 1825
1847 if (SB_IS_BDEV(sb)) { 1826 if (SB_IS_BDEV(sb)) {
1848 struct cached_dev *dc = kzalloc(sizeof(*dc), GFP_KERNEL); 1827 struct cached_dev *dc = kzalloc(sizeof(*dc), GFP_KERNEL);
1828 if (!dc)
1829 goto err_close;
1849 1830
1850 err = register_bdev(sb, sb_page, bdev, dc); 1831 register_bdev(sb, sb_page, bdev, dc);
1851 } else { 1832 } else {
1852 struct cache *ca = kzalloc(sizeof(*ca), GFP_KERNEL); 1833 struct cache *ca = kzalloc(sizeof(*ca), GFP_KERNEL);
1834 if (!ca)
1835 goto err_close;
1853 1836
1854 err = register_cache(sb, sb_page, bdev, ca); 1837 register_cache(sb, sb_page, bdev, ca);
1855 } 1838 }
1856 1839out:
1857 if (err) { 1840 if (sb_page)
1858 /* register_(bdev|cache) will only return an error if they
1859 * didn't get far enough to create the kobject - if they did,
1860 * the kobject destructor will do this cleanup.
1861 */
1862 put_page(sb_page); 1841 put_page(sb_page);
1863err_close:
1864 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
1865err:
1866 if (attr != &ksysfs_register_quiet)
1867 pr_info("error opening %s: %s", path, err);
1868 ret = -EINVAL;
1869 }
1870
1871 kfree(sb); 1842 kfree(sb);
1872 kfree(path); 1843 kfree(path);
1873 mutex_unlock(&bch_register_lock); 1844 mutex_unlock(&bch_register_lock);
1874 module_put(THIS_MODULE); 1845 module_put(THIS_MODULE);
1875 return ret; 1846 return ret;
1847
1848err_close:
1849 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
1850err:
1851 if (attr != &ksysfs_register_quiet)
1852 pr_info("error opening %s: %s", path, err);
1853 ret = -EINVAL;
1854 goto out;
1876} 1855}
1877 1856
1878static int bcache_reboot(struct notifier_block *n, unsigned long code, void *x) 1857static int bcache_reboot(struct notifier_block *n, unsigned long code, void *x)
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index 93e7e31a4bd3..2714ed3991d1 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -375,7 +375,7 @@ err:
375 refill_dirty(cl); 375 refill_dirty(cl);
376} 376}
377 377
378void bch_writeback_init_cached_dev(struct cached_dev *dc) 378void bch_cached_dev_writeback_init(struct cached_dev *dc)
379{ 379{
380 closure_init_unlocked(&dc->writeback); 380 closure_init_unlocked(&dc->writeback);
381 init_rwsem(&dc->writeback_lock); 381 init_rwsem(&dc->writeback_lock);
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 681d1099a2d5..9b82377a833b 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -5268,8 +5268,8 @@ static void md_clean(struct mddev *mddev)
5268 5268
5269static void __md_stop_writes(struct mddev *mddev) 5269static void __md_stop_writes(struct mddev *mddev)
5270{ 5270{
5271 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5271 if (mddev->sync_thread) { 5272 if (mddev->sync_thread) {
5272 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5273 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 5273 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
5274 md_reap_sync_thread(mddev); 5274 md_reap_sync_thread(mddev);
5275 } 5275 }
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 55951182af73..6e17f8181c4b 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -417,7 +417,17 @@ static void raid1_end_write_request(struct bio *bio, int error)
417 417
418 r1_bio->bios[mirror] = NULL; 418 r1_bio->bios[mirror] = NULL;
419 to_put = bio; 419 to_put = bio;
420 set_bit(R1BIO_Uptodate, &r1_bio->state); 420 /*
421 * Do not set R1BIO_Uptodate if the current device is
422 * rebuilding or Faulty. This is because we cannot use
423 * such device for properly reading the data back (we could
424 * potentially use it, if the current write would have felt
425 * before rdev->recovery_offset, but for simplicity we don't
426 * check this here.
427 */
428 if (test_bit(In_sync, &conf->mirrors[mirror].rdev->flags) &&
429 !test_bit(Faulty, &conf->mirrors[mirror].rdev->flags))
430 set_bit(R1BIO_Uptodate, &r1_bio->state);
421 431
422 /* Maybe we can clear some bad blocks. */ 432 /* Maybe we can clear some bad blocks. */
423 if (is_badblock(conf->mirrors[mirror].rdev, 433 if (is_badblock(conf->mirrors[mirror].rdev,
@@ -870,17 +880,17 @@ static void allow_barrier(struct r1conf *conf)
870 wake_up(&conf->wait_barrier); 880 wake_up(&conf->wait_barrier);
871} 881}
872 882
873static void freeze_array(struct r1conf *conf) 883static void freeze_array(struct r1conf *conf, int extra)
874{ 884{
875 /* stop syncio and normal IO and wait for everything to 885 /* stop syncio and normal IO and wait for everything to
876 * go quite. 886 * go quite.
877 * We increment barrier and nr_waiting, and then 887 * We increment barrier and nr_waiting, and then
878 * wait until nr_pending match nr_queued+1 888 * wait until nr_pending match nr_queued+extra
879 * This is called in the context of one normal IO request 889 * This is called in the context of one normal IO request
880 * that has failed. Thus any sync request that might be pending 890 * that has failed. Thus any sync request that might be pending
881 * will be blocked by nr_pending, and we need to wait for 891 * will be blocked by nr_pending, and we need to wait for
882 * pending IO requests to complete or be queued for re-try. 892 * pending IO requests to complete or be queued for re-try.
883 * Thus the number queued (nr_queued) plus this request (1) 893 * Thus the number queued (nr_queued) plus this request (extra)
884 * must match the number of pending IOs (nr_pending) before 894 * must match the number of pending IOs (nr_pending) before
885 * we continue. 895 * we continue.
886 */ 896 */
@@ -888,7 +898,7 @@ static void freeze_array(struct r1conf *conf)
888 conf->barrier++; 898 conf->barrier++;
889 conf->nr_waiting++; 899 conf->nr_waiting++;
890 wait_event_lock_irq_cmd(conf->wait_barrier, 900 wait_event_lock_irq_cmd(conf->wait_barrier,
891 conf->nr_pending == conf->nr_queued+1, 901 conf->nr_pending == conf->nr_queued+extra,
892 conf->resync_lock, 902 conf->resync_lock,
893 flush_pending_writes(conf)); 903 flush_pending_writes(conf));
894 spin_unlock_irq(&conf->resync_lock); 904 spin_unlock_irq(&conf->resync_lock);
@@ -1544,8 +1554,8 @@ static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
1544 * we wait for all outstanding requests to complete. 1554 * we wait for all outstanding requests to complete.
1545 */ 1555 */
1546 synchronize_sched(); 1556 synchronize_sched();
1547 raise_barrier(conf); 1557 freeze_array(conf, 0);
1548 lower_barrier(conf); 1558 unfreeze_array(conf);
1549 clear_bit(Unmerged, &rdev->flags); 1559 clear_bit(Unmerged, &rdev->flags);
1550 } 1560 }
1551 md_integrity_add_rdev(rdev, mddev); 1561 md_integrity_add_rdev(rdev, mddev);
@@ -1595,11 +1605,11 @@ static int raid1_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
1595 */ 1605 */
1596 struct md_rdev *repl = 1606 struct md_rdev *repl =
1597 conf->mirrors[conf->raid_disks + number].rdev; 1607 conf->mirrors[conf->raid_disks + number].rdev;
1598 raise_barrier(conf); 1608 freeze_array(conf, 0);
1599 clear_bit(Replacement, &repl->flags); 1609 clear_bit(Replacement, &repl->flags);
1600 p->rdev = repl; 1610 p->rdev = repl;
1601 conf->mirrors[conf->raid_disks + number].rdev = NULL; 1611 conf->mirrors[conf->raid_disks + number].rdev = NULL;
1602 lower_barrier(conf); 1612 unfreeze_array(conf);
1603 clear_bit(WantReplacement, &rdev->flags); 1613 clear_bit(WantReplacement, &rdev->flags);
1604 } else 1614 } else
1605 clear_bit(WantReplacement, &rdev->flags); 1615 clear_bit(WantReplacement, &rdev->flags);
@@ -2195,7 +2205,7 @@ static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)
2195 * frozen 2205 * frozen
2196 */ 2206 */
2197 if (mddev->ro == 0) { 2207 if (mddev->ro == 0) {
2198 freeze_array(conf); 2208 freeze_array(conf, 1);
2199 fix_read_error(conf, r1_bio->read_disk, 2209 fix_read_error(conf, r1_bio->read_disk,
2200 r1_bio->sector, r1_bio->sectors); 2210 r1_bio->sector, r1_bio->sectors);
2201 unfreeze_array(conf); 2211 unfreeze_array(conf);
@@ -2780,8 +2790,8 @@ static int run(struct mddev *mddev)
2780 return PTR_ERR(conf); 2790 return PTR_ERR(conf);
2781 2791
2782 if (mddev->queue) 2792 if (mddev->queue)
2783 blk_queue_max_write_same_sectors(mddev->queue, 2793 blk_queue_max_write_same_sectors(mddev->queue, 0);
2784 mddev->chunk_sectors); 2794
2785 rdev_for_each(rdev, mddev) { 2795 rdev_for_each(rdev, mddev) {
2786 if (!mddev->gendisk) 2796 if (!mddev->gendisk)
2787 continue; 2797 continue;
@@ -2963,7 +2973,7 @@ static int raid1_reshape(struct mddev *mddev)
2963 return -ENOMEM; 2973 return -ENOMEM;
2964 } 2974 }
2965 2975
2966 raise_barrier(conf); 2976 freeze_array(conf, 0);
2967 2977
2968 /* ok, everything is stopped */ 2978 /* ok, everything is stopped */
2969 oldpool = conf->r1bio_pool; 2979 oldpool = conf->r1bio_pool;
@@ -2994,7 +3004,7 @@ static int raid1_reshape(struct mddev *mddev)
2994 conf->raid_disks = mddev->raid_disks = raid_disks; 3004 conf->raid_disks = mddev->raid_disks = raid_disks;
2995 mddev->delta_disks = 0; 3005 mddev->delta_disks = 0;
2996 3006
2997 lower_barrier(conf); 3007 unfreeze_array(conf);
2998 3008
2999 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 3009 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3000 md_wakeup_thread(mddev->thread); 3010 md_wakeup_thread(mddev->thread);
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 59d4daa5f4c7..6ddae2501b9a 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -490,7 +490,17 @@ static void raid10_end_write_request(struct bio *bio, int error)
490 sector_t first_bad; 490 sector_t first_bad;
491 int bad_sectors; 491 int bad_sectors;
492 492
493 set_bit(R10BIO_Uptodate, &r10_bio->state); 493 /*
494 * Do not set R10BIO_Uptodate if the current device is
495 * rebuilding or Faulty. This is because we cannot use
496 * such device for properly reading the data back (we could
497 * potentially use it, if the current write would have felt
498 * before rdev->recovery_offset, but for simplicity we don't
499 * check this here.
500 */
501 if (test_bit(In_sync, &rdev->flags) &&
502 !test_bit(Faulty, &rdev->flags))
503 set_bit(R10BIO_Uptodate, &r10_bio->state);
494 504
495 /* Maybe we can clear some bad blocks. */ 505 /* Maybe we can clear some bad blocks. */
496 if (is_badblock(rdev, 506 if (is_badblock(rdev,
@@ -1055,17 +1065,17 @@ static void allow_barrier(struct r10conf *conf)
1055 wake_up(&conf->wait_barrier); 1065 wake_up(&conf->wait_barrier);
1056} 1066}
1057 1067
1058static void freeze_array(struct r10conf *conf) 1068static void freeze_array(struct r10conf *conf, int extra)
1059{ 1069{
1060 /* stop syncio and normal IO and wait for everything to 1070 /* stop syncio and normal IO and wait for everything to
1061 * go quiet. 1071 * go quiet.
1062 * We increment barrier and nr_waiting, and then 1072 * We increment barrier and nr_waiting, and then
1063 * wait until nr_pending match nr_queued+1 1073 * wait until nr_pending match nr_queued+extra
1064 * This is called in the context of one normal IO request 1074 * This is called in the context of one normal IO request
1065 * that has failed. Thus any sync request that might be pending 1075 * that has failed. Thus any sync request that might be pending
1066 * will be blocked by nr_pending, and we need to wait for 1076 * will be blocked by nr_pending, and we need to wait for
1067 * pending IO requests to complete or be queued for re-try. 1077 * pending IO requests to complete or be queued for re-try.
1068 * Thus the number queued (nr_queued) plus this request (1) 1078 * Thus the number queued (nr_queued) plus this request (extra)
1069 * must match the number of pending IOs (nr_pending) before 1079 * must match the number of pending IOs (nr_pending) before
1070 * we continue. 1080 * we continue.
1071 */ 1081 */
@@ -1073,7 +1083,7 @@ static void freeze_array(struct r10conf *conf)
1073 conf->barrier++; 1083 conf->barrier++;
1074 conf->nr_waiting++; 1084 conf->nr_waiting++;
1075 wait_event_lock_irq_cmd(conf->wait_barrier, 1085 wait_event_lock_irq_cmd(conf->wait_barrier,
1076 conf->nr_pending == conf->nr_queued+1, 1086 conf->nr_pending == conf->nr_queued+extra,
1077 conf->resync_lock, 1087 conf->resync_lock,
1078 flush_pending_writes(conf)); 1088 flush_pending_writes(conf));
1079 1089
@@ -1837,8 +1847,8 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
1837 * we wait for all outstanding requests to complete. 1847 * we wait for all outstanding requests to complete.
1838 */ 1848 */
1839 synchronize_sched(); 1849 synchronize_sched();
1840 raise_barrier(conf, 0); 1850 freeze_array(conf, 0);
1841 lower_barrier(conf); 1851 unfreeze_array(conf);
1842 clear_bit(Unmerged, &rdev->flags); 1852 clear_bit(Unmerged, &rdev->flags);
1843 } 1853 }
1844 md_integrity_add_rdev(rdev, mddev); 1854 md_integrity_add_rdev(rdev, mddev);
@@ -2612,7 +2622,7 @@ static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio)
2612 r10_bio->devs[slot].bio = NULL; 2622 r10_bio->devs[slot].bio = NULL;
2613 2623
2614 if (mddev->ro == 0) { 2624 if (mddev->ro == 0) {
2615 freeze_array(conf); 2625 freeze_array(conf, 1);
2616 fix_read_error(conf, mddev, r10_bio); 2626 fix_read_error(conf, mddev, r10_bio);
2617 unfreeze_array(conf); 2627 unfreeze_array(conf);
2618 } else 2628 } else
@@ -3609,8 +3619,7 @@ static int run(struct mddev *mddev)
3609 if (mddev->queue) { 3619 if (mddev->queue) {
3610 blk_queue_max_discard_sectors(mddev->queue, 3620 blk_queue_max_discard_sectors(mddev->queue,
3611 mddev->chunk_sectors); 3621 mddev->chunk_sectors);
3612 blk_queue_max_write_same_sectors(mddev->queue, 3622 blk_queue_max_write_same_sectors(mddev->queue, 0);
3613 mddev->chunk_sectors);
3614 blk_queue_io_min(mddev->queue, chunk_size); 3623 blk_queue_io_min(mddev->queue, chunk_size);
3615 if (conf->geo.raid_disks % conf->geo.near_copies) 3624 if (conf->geo.raid_disks % conf->geo.near_copies)
3616 blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks); 3625 blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 9359828ffe26..05e4a105b9c7 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -664,6 +664,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
664 if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) 664 if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
665 bi->bi_rw |= REQ_FLUSH; 665 bi->bi_rw |= REQ_FLUSH;
666 666
667 bi->bi_vcnt = 1;
667 bi->bi_io_vec[0].bv_len = STRIPE_SIZE; 668 bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
668 bi->bi_io_vec[0].bv_offset = 0; 669 bi->bi_io_vec[0].bv_offset = 0;
669 bi->bi_size = STRIPE_SIZE; 670 bi->bi_size = STRIPE_SIZE;
@@ -701,6 +702,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
701 else 702 else
702 rbi->bi_sector = (sh->sector 703 rbi->bi_sector = (sh->sector
703 + rrdev->data_offset); 704 + rrdev->data_offset);
705 rbi->bi_vcnt = 1;
704 rbi->bi_io_vec[0].bv_len = STRIPE_SIZE; 706 rbi->bi_io_vec[0].bv_len = STRIPE_SIZE;
705 rbi->bi_io_vec[0].bv_offset = 0; 707 rbi->bi_io_vec[0].bv_offset = 0;
706 rbi->bi_size = STRIPE_SIZE; 708 rbi->bi_size = STRIPE_SIZE;
@@ -5464,7 +5466,7 @@ static int run(struct mddev *mddev)
5464 if (mddev->major_version == 0 && 5466 if (mddev->major_version == 0 &&
5465 mddev->minor_version > 90) 5467 mddev->minor_version > 90)
5466 rdev->recovery_offset = reshape_offset; 5468 rdev->recovery_offset = reshape_offset;
5467 5469
5468 if (rdev->recovery_offset < reshape_offset) { 5470 if (rdev->recovery_offset < reshape_offset) {
5469 /* We need to check old and new layout */ 5471 /* We need to check old and new layout */
5470 if (!only_parity(rdev->raid_disk, 5472 if (!only_parity(rdev->raid_disk,
@@ -5587,6 +5589,8 @@ static int run(struct mddev *mddev)
5587 */ 5589 */
5588 mddev->queue->limits.discard_zeroes_data = 0; 5590 mddev->queue->limits.discard_zeroes_data = 0;
5589 5591
5592 blk_queue_max_write_same_sectors(mddev->queue, 0);
5593
5590 rdev_for_each(rdev, mddev) { 5594 rdev_for_each(rdev, mddev) {
5591 disk_stack_limits(mddev->gendisk, rdev->bdev, 5595 disk_stack_limits(mddev->gendisk, rdev->bdev,
5592 rdev->data_offset << 9); 5596 rdev->data_offset << 9);
diff --git a/drivers/media/Kconfig b/drivers/media/Kconfig
index 7f5a7cac6dc7..8270388e2a0d 100644
--- a/drivers/media/Kconfig
+++ b/drivers/media/Kconfig
@@ -136,9 +136,9 @@ config DVB_NET
136 136
137# This Kconfig option is used by both PCI and USB drivers 137# This Kconfig option is used by both PCI and USB drivers
138config TTPCI_EEPROM 138config TTPCI_EEPROM
139 tristate 139 tristate
140 depends on I2C 140 depends on I2C
141 default n 141 default n
142 142
143source "drivers/media/dvb-core/Kconfig" 143source "drivers/media/dvb-core/Kconfig"
144 144
@@ -189,6 +189,12 @@ config MEDIA_SUBDRV_AUTOSELECT
189 189
190 If unsure say Y. 190 If unsure say Y.
191 191
192config MEDIA_ATTACH
193 bool
194 depends on MEDIA_ANALOG_TV_SUPPORT || MEDIA_DIGITAL_TV_SUPPORT || MEDIA_RADIO_SUPPORT
195 depends on MODULES
196 default MODULES
197
192source "drivers/media/i2c/Kconfig" 198source "drivers/media/i2c/Kconfig"
193source "drivers/media/tuners/Kconfig" 199source "drivers/media/tuners/Kconfig"
194source "drivers/media/dvb-frontends/Kconfig" 200source "drivers/media/dvb-frontends/Kconfig"
diff --git a/drivers/media/i2c/s5c73m3/s5c73m3-core.c b/drivers/media/i2c/s5c73m3/s5c73m3-core.c
index cb52438e53ac..9eac5310942f 100644
--- a/drivers/media/i2c/s5c73m3/s5c73m3-core.c
+++ b/drivers/media/i2c/s5c73m3/s5c73m3-core.c
@@ -956,7 +956,7 @@ static int s5c73m3_oif_enum_frame_interval(struct v4l2_subdev *sd,
956 956
957 if (fie->pad != OIF_SOURCE_PAD) 957 if (fie->pad != OIF_SOURCE_PAD)
958 return -EINVAL; 958 return -EINVAL;
959 if (fie->index > ARRAY_SIZE(s5c73m3_intervals)) 959 if (fie->index >= ARRAY_SIZE(s5c73m3_intervals))
960 return -EINVAL; 960 return -EINVAL;
961 961
962 mutex_lock(&state->lock); 962 mutex_lock(&state->lock);
diff --git a/drivers/media/pci/cx88/cx88-alsa.c b/drivers/media/pci/cx88/cx88-alsa.c
index 27d62623274b..aba5b1c649e6 100644
--- a/drivers/media/pci/cx88/cx88-alsa.c
+++ b/drivers/media/pci/cx88/cx88-alsa.c
@@ -615,7 +615,7 @@ static int snd_cx88_volume_put(struct snd_kcontrol *kcontrol,
615 int changed = 0; 615 int changed = 0;
616 u32 old; 616 u32 old;
617 617
618 if (core->board.audio_chip == V4L2_IDENT_WM8775) 618 if (core->sd_wm8775)
619 snd_cx88_wm8775_volume_put(kcontrol, value); 619 snd_cx88_wm8775_volume_put(kcontrol, value);
620 620
621 left = value->value.integer.value[0] & 0x3f; 621 left = value->value.integer.value[0] & 0x3f;
@@ -682,8 +682,7 @@ static int snd_cx88_switch_put(struct snd_kcontrol *kcontrol,
682 vol ^= bit; 682 vol ^= bit;
683 cx_swrite(SHADOW_AUD_VOL_CTL, AUD_VOL_CTL, vol); 683 cx_swrite(SHADOW_AUD_VOL_CTL, AUD_VOL_CTL, vol);
684 /* Pass mute onto any WM8775 */ 684 /* Pass mute onto any WM8775 */
685 if ((core->board.audio_chip == V4L2_IDENT_WM8775) && 685 if (core->sd_wm8775 && ((1<<6) == bit))
686 ((1<<6) == bit))
687 wm8775_s_ctrl(core, V4L2_CID_AUDIO_MUTE, 0 != (vol & bit)); 686 wm8775_s_ctrl(core, V4L2_CID_AUDIO_MUTE, 0 != (vol & bit));
688 ret = 1; 687 ret = 1;
689 } 688 }
@@ -903,7 +902,7 @@ static int cx88_audio_initdev(struct pci_dev *pci,
903 goto error; 902 goto error;
904 903
905 /* If there's a wm8775 then add a Line-In ALC switch */ 904 /* If there's a wm8775 then add a Line-In ALC switch */
906 if (core->board.audio_chip == V4L2_IDENT_WM8775) 905 if (core->sd_wm8775)
907 snd_ctl_add(card, snd_ctl_new1(&snd_cx88_alc_switch, chip)); 906 snd_ctl_add(card, snd_ctl_new1(&snd_cx88_alc_switch, chip));
908 907
909 strcpy (card->driver, "CX88x"); 908 strcpy (card->driver, "CX88x");
diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c
index 1b00615fd395..c7a9be1065c0 100644
--- a/drivers/media/pci/cx88/cx88-video.c
+++ b/drivers/media/pci/cx88/cx88-video.c
@@ -385,8 +385,7 @@ int cx88_video_mux(struct cx88_core *core, unsigned int input)
385 /* The wm8775 module has the "2" route hardwired into 385 /* The wm8775 module has the "2" route hardwired into
386 the initialization. Some boards may use different 386 the initialization. Some boards may use different
387 routes for different inputs. HVR-1300 surely does */ 387 routes for different inputs. HVR-1300 surely does */
388 if (core->board.audio_chip && 388 if (core->sd_wm8775) {
389 core->board.audio_chip == V4L2_IDENT_WM8775) {
390 call_all(core, audio, s_routing, 389 call_all(core, audio, s_routing,
391 INPUT(input).audioroute, 0, 0); 390 INPUT(input).audioroute, 0, 0);
392 } 391 }
@@ -771,8 +770,7 @@ static int video_open(struct file *file)
771 cx_write(MO_GP1_IO, core->board.radio.gpio1); 770 cx_write(MO_GP1_IO, core->board.radio.gpio1);
772 cx_write(MO_GP2_IO, core->board.radio.gpio2); 771 cx_write(MO_GP2_IO, core->board.radio.gpio2);
773 if (core->board.radio.audioroute) { 772 if (core->board.radio.audioroute) {
774 if(core->board.audio_chip && 773 if (core->sd_wm8775) {
775 core->board.audio_chip == V4L2_IDENT_WM8775) {
776 call_all(core, audio, s_routing, 774 call_all(core, audio, s_routing,
777 core->board.radio.audioroute, 0, 0); 775 core->board.radio.audioroute, 0, 0);
778 } 776 }
@@ -959,7 +957,7 @@ static int cx8800_s_aud_ctrl(struct v4l2_ctrl *ctrl)
959 u32 value,mask; 957 u32 value,mask;
960 958
961 /* Pass changes onto any WM8775 */ 959 /* Pass changes onto any WM8775 */
962 if (core->board.audio_chip == V4L2_IDENT_WM8775) { 960 if (core->sd_wm8775) {
963 switch (ctrl->id) { 961 switch (ctrl->id) {
964 case V4L2_CID_AUDIO_MUTE: 962 case V4L2_CID_AUDIO_MUTE:
965 wm8775_s_ctrl(core, ctrl->id, ctrl->val); 963 wm8775_s_ctrl(core, ctrl->id, ctrl->val);
diff --git a/drivers/media/pci/zoran/zoran.h b/drivers/media/pci/zoran/zoran.h
index ca2754a3cd63..5e040085c2ff 100644
--- a/drivers/media/pci/zoran/zoran.h
+++ b/drivers/media/pci/zoran/zoran.h
@@ -176,7 +176,7 @@ struct zoran_fh;
176 176
177struct zoran_mapping { 177struct zoran_mapping {
178 struct zoran_fh *fh; 178 struct zoran_fh *fh;
179 int count; 179 atomic_t count;
180}; 180};
181 181
182struct zoran_buffer { 182struct zoran_buffer {
diff --git a/drivers/media/pci/zoran/zoran_driver.c b/drivers/media/pci/zoran/zoran_driver.c
index 1168a84a737d..d133c30c3fdc 100644
--- a/drivers/media/pci/zoran/zoran_driver.c
+++ b/drivers/media/pci/zoran/zoran_driver.c
@@ -2803,8 +2803,7 @@ static void
2803zoran_vm_open (struct vm_area_struct *vma) 2803zoran_vm_open (struct vm_area_struct *vma)
2804{ 2804{
2805 struct zoran_mapping *map = vma->vm_private_data; 2805 struct zoran_mapping *map = vma->vm_private_data;
2806 2806 atomic_inc(&map->count);
2807 map->count++;
2808} 2807}
2809 2808
2810static void 2809static void
@@ -2815,7 +2814,7 @@ zoran_vm_close (struct vm_area_struct *vma)
2815 struct zoran *zr = fh->zr; 2814 struct zoran *zr = fh->zr;
2816 int i; 2815 int i;
2817 2816
2818 if (--map->count > 0) 2817 if (!atomic_dec_and_mutex_lock(&map->count, &zr->resource_lock))
2819 return; 2818 return;
2820 2819
2821 dprintk(3, KERN_INFO "%s: %s - munmap(%s)\n", ZR_DEVNAME(zr), 2820 dprintk(3, KERN_INFO "%s: %s - munmap(%s)\n", ZR_DEVNAME(zr),
@@ -2828,14 +2827,16 @@ zoran_vm_close (struct vm_area_struct *vma)
2828 kfree(map); 2827 kfree(map);
2829 2828
2830 /* Any buffers still mapped? */ 2829 /* Any buffers still mapped? */
2831 for (i = 0; i < fh->buffers.num_buffers; i++) 2830 for (i = 0; i < fh->buffers.num_buffers; i++) {
2832 if (fh->buffers.buffer[i].map) 2831 if (fh->buffers.buffer[i].map) {
2832 mutex_unlock(&zr->resource_lock);
2833 return; 2833 return;
2834 }
2835 }
2834 2836
2835 dprintk(3, KERN_INFO "%s: %s - free %s buffers\n", ZR_DEVNAME(zr), 2837 dprintk(3, KERN_INFO "%s: %s - free %s buffers\n", ZR_DEVNAME(zr),
2836 __func__, mode_name(fh->map_mode)); 2838 __func__, mode_name(fh->map_mode));
2837 2839
2838 mutex_lock(&zr->resource_lock);
2839 2840
2840 if (fh->map_mode == ZORAN_MAP_MODE_RAW) { 2841 if (fh->map_mode == ZORAN_MAP_MODE_RAW) {
2841 if (fh->buffers.active != ZORAN_FREE) { 2842 if (fh->buffers.active != ZORAN_FREE) {
@@ -2939,7 +2940,7 @@ zoran_mmap (struct file *file,
2939 goto mmap_unlock_and_return; 2940 goto mmap_unlock_and_return;
2940 } 2941 }
2941 map->fh = fh; 2942 map->fh = fh;
2942 map->count = 1; 2943 atomic_set(&map->count, 1);
2943 2944
2944 vma->vm_ops = &zoran_vm_ops; 2945 vma->vm_ops = &zoran_vm_ops;
2945 vma->vm_flags |= VM_DONTEXPAND; 2946 vma->vm_flags |= VM_DONTEXPAND;
diff --git a/drivers/media/platform/coda.c b/drivers/media/platform/coda.c
index 48b8d7af386d..9d1481a60bd9 100644
--- a/drivers/media/platform/coda.c
+++ b/drivers/media/platform/coda.c
@@ -576,6 +576,14 @@ static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
576 return v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf); 576 return v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf);
577} 577}
578 578
579static int vidioc_create_bufs(struct file *file, void *priv,
580 struct v4l2_create_buffers *create)
581{
582 struct coda_ctx *ctx = fh_to_ctx(priv);
583
584 return v4l2_m2m_create_bufs(file, ctx->m2m_ctx, create);
585}
586
579static int vidioc_streamon(struct file *file, void *priv, 587static int vidioc_streamon(struct file *file, void *priv,
580 enum v4l2_buf_type type) 588 enum v4l2_buf_type type)
581{ 589{
@@ -610,6 +618,7 @@ static const struct v4l2_ioctl_ops coda_ioctl_ops = {
610 618
611 .vidioc_qbuf = vidioc_qbuf, 619 .vidioc_qbuf = vidioc_qbuf,
612 .vidioc_dqbuf = vidioc_dqbuf, 620 .vidioc_dqbuf = vidioc_dqbuf,
621 .vidioc_create_bufs = vidioc_create_bufs,
613 622
614 .vidioc_streamon = vidioc_streamon, 623 .vidioc_streamon = vidioc_streamon,
615 .vidioc_streamoff = vidioc_streamoff, 624 .vidioc_streamoff = vidioc_streamoff,
diff --git a/drivers/media/platform/davinci/vpbe_display.c b/drivers/media/platform/davinci/vpbe_display.c
index 1802f11e939f..d0b375cf565f 100644
--- a/drivers/media/platform/davinci/vpbe_display.c
+++ b/drivers/media/platform/davinci/vpbe_display.c
@@ -916,6 +916,21 @@ static int vpbe_display_s_fmt(struct file *file, void *priv,
916 other video window */ 916 other video window */
917 917
918 layer->pix_fmt = *pixfmt; 918 layer->pix_fmt = *pixfmt;
919 if (pixfmt->pixelformat == V4L2_PIX_FMT_NV12) {
920 struct vpbe_layer *otherlayer;
921
922 otherlayer = _vpbe_display_get_other_win_layer(disp_dev, layer);
923 /* if other layer is available, only
924 * claim it, do not configure it
925 */
926 ret = osd_device->ops.request_layer(osd_device,
927 otherlayer->layer_info.id);
928 if (ret < 0) {
929 v4l2_err(&vpbe_dev->v4l2_dev,
930 "Display Manager failed to allocate layer\n");
931 return -EBUSY;
932 }
933 }
919 934
920 /* Get osd layer config */ 935 /* Get osd layer config */
921 osd_device->ops.get_layer_config(osd_device, 936 osd_device->ops.get_layer_config(osd_device,
diff --git a/drivers/media/platform/davinci/vpfe_capture.c b/drivers/media/platform/davinci/vpfe_capture.c
index 8c50d3074866..93609091cb23 100644
--- a/drivers/media/platform/davinci/vpfe_capture.c
+++ b/drivers/media/platform/davinci/vpfe_capture.c
@@ -1837,7 +1837,7 @@ static int vpfe_probe(struct platform_device *pdev)
1837 if (NULL == ccdc_cfg) { 1837 if (NULL == ccdc_cfg) {
1838 v4l2_err(pdev->dev.driver, 1838 v4l2_err(pdev->dev.driver,
1839 "Memory allocation failed for ccdc_cfg\n"); 1839 "Memory allocation failed for ccdc_cfg\n");
1840 goto probe_free_lock; 1840 goto probe_free_dev_mem;
1841 } 1841 }
1842 1842
1843 mutex_lock(&ccdc_lock); 1843 mutex_lock(&ccdc_lock);
@@ -1991,7 +1991,6 @@ probe_out_release_irq:
1991 free_irq(vpfe_dev->ccdc_irq0, vpfe_dev); 1991 free_irq(vpfe_dev->ccdc_irq0, vpfe_dev);
1992probe_free_ccdc_cfg_mem: 1992probe_free_ccdc_cfg_mem:
1993 kfree(ccdc_cfg); 1993 kfree(ccdc_cfg);
1994probe_free_lock:
1995 mutex_unlock(&ccdc_lock); 1994 mutex_unlock(&ccdc_lock);
1996probe_free_dev_mem: 1995probe_free_dev_mem:
1997 kfree(vpfe_dev); 1996 kfree(vpfe_dev);
diff --git a/drivers/media/platform/exynos4-is/fimc-is-regs.c b/drivers/media/platform/exynos4-is/fimc-is-regs.c
index b0ff67bc1b05..d05eaa2c8490 100644
--- a/drivers/media/platform/exynos4-is/fimc-is-regs.c
+++ b/drivers/media/platform/exynos4-is/fimc-is-regs.c
@@ -174,7 +174,7 @@ int fimc_is_hw_change_mode(struct fimc_is *is)
174 HIC_CAPTURE_STILL, HIC_CAPTURE_VIDEO, 174 HIC_CAPTURE_STILL, HIC_CAPTURE_VIDEO,
175 }; 175 };
176 176
177 if (WARN_ON(is->config_index > ARRAY_SIZE(cmd))) 177 if (WARN_ON(is->config_index >= ARRAY_SIZE(cmd)))
178 return -EINVAL; 178 return -EINVAL;
179 179
180 mcuctl_write(cmd[is->config_index], is, MCUCTL_REG_ISSR(0)); 180 mcuctl_write(cmd[is->config_index], is, MCUCTL_REG_ISSR(0));
diff --git a/drivers/media/platform/exynos4-is/fimc-is.c b/drivers/media/platform/exynos4-is/fimc-is.c
index 47c6363d04e2..0741945b79ed 100644
--- a/drivers/media/platform/exynos4-is/fimc-is.c
+++ b/drivers/media/platform/exynos4-is/fimc-is.c
@@ -48,7 +48,6 @@ static char *fimc_is_clocks[ISS_CLKS_MAX] = {
48 [ISS_CLK_LITE0] = "lite0", 48 [ISS_CLK_LITE0] = "lite0",
49 [ISS_CLK_LITE1] = "lite1", 49 [ISS_CLK_LITE1] = "lite1",
50 [ISS_CLK_MPLL] = "mpll", 50 [ISS_CLK_MPLL] = "mpll",
51 [ISS_CLK_SYSREG] = "sysreg",
52 [ISS_CLK_ISP] = "isp", 51 [ISS_CLK_ISP] = "isp",
53 [ISS_CLK_DRC] = "drc", 52 [ISS_CLK_DRC] = "drc",
54 [ISS_CLK_FD] = "fd", 53 [ISS_CLK_FD] = "fd",
@@ -71,7 +70,6 @@ static void fimc_is_put_clocks(struct fimc_is *is)
71 for (i = 0; i < ISS_CLKS_MAX; i++) { 70 for (i = 0; i < ISS_CLKS_MAX; i++) {
72 if (IS_ERR(is->clocks[i])) 71 if (IS_ERR(is->clocks[i]))
73 continue; 72 continue;
74 clk_unprepare(is->clocks[i]);
75 clk_put(is->clocks[i]); 73 clk_put(is->clocks[i]);
76 is->clocks[i] = ERR_PTR(-EINVAL); 74 is->clocks[i] = ERR_PTR(-EINVAL);
77 } 75 }
@@ -90,12 +88,6 @@ static int fimc_is_get_clocks(struct fimc_is *is)
90 ret = PTR_ERR(is->clocks[i]); 88 ret = PTR_ERR(is->clocks[i]);
91 goto err; 89 goto err;
92 } 90 }
93 ret = clk_prepare(is->clocks[i]);
94 if (ret < 0) {
95 clk_put(is->clocks[i]);
96 is->clocks[i] = ERR_PTR(-EINVAL);
97 goto err;
98 }
99 } 91 }
100 92
101 return 0; 93 return 0;
@@ -103,7 +95,7 @@ err:
103 fimc_is_put_clocks(is); 95 fimc_is_put_clocks(is);
104 dev_err(&is->pdev->dev, "failed to get clock: %s\n", 96 dev_err(&is->pdev->dev, "failed to get clock: %s\n",
105 fimc_is_clocks[i]); 97 fimc_is_clocks[i]);
106 return -ENXIO; 98 return ret;
107} 99}
108 100
109static int fimc_is_setup_clocks(struct fimc_is *is) 101static int fimc_is_setup_clocks(struct fimc_is *is)
@@ -144,7 +136,7 @@ int fimc_is_enable_clocks(struct fimc_is *is)
144 for (i = 0; i < ISS_GATE_CLKS_MAX; i++) { 136 for (i = 0; i < ISS_GATE_CLKS_MAX; i++) {
145 if (IS_ERR(is->clocks[i])) 137 if (IS_ERR(is->clocks[i]))
146 continue; 138 continue;
147 ret = clk_enable(is->clocks[i]); 139 ret = clk_prepare_enable(is->clocks[i]);
148 if (ret < 0) { 140 if (ret < 0) {
149 dev_err(&is->pdev->dev, "clock %s enable failed\n", 141 dev_err(&is->pdev->dev, "clock %s enable failed\n",
150 fimc_is_clocks[i]); 142 fimc_is_clocks[i]);
@@ -163,7 +155,7 @@ void fimc_is_disable_clocks(struct fimc_is *is)
163 155
164 for (i = 0; i < ISS_GATE_CLKS_MAX; i++) { 156 for (i = 0; i < ISS_GATE_CLKS_MAX; i++) {
165 if (!IS_ERR(is->clocks[i])) { 157 if (!IS_ERR(is->clocks[i])) {
166 clk_disable(is->clocks[i]); 158 clk_disable_unprepare(is->clocks[i]);
167 pr_debug("disabled clock: %s\n", fimc_is_clocks[i]); 159 pr_debug("disabled clock: %s\n", fimc_is_clocks[i]);
168 } 160 }
169 } 161 }
@@ -326,6 +318,11 @@ int fimc_is_start_firmware(struct fimc_is *is)
326 struct device *dev = &is->pdev->dev; 318 struct device *dev = &is->pdev->dev;
327 int ret; 319 int ret;
328 320
321 if (is->fw.f_w == NULL) {
322 dev_err(dev, "firmware is not loaded\n");
323 return -EINVAL;
324 }
325
329 memcpy(is->memory.vaddr, is->fw.f_w->data, is->fw.f_w->size); 326 memcpy(is->memory.vaddr, is->fw.f_w->data, is->fw.f_w->size);
330 wmb(); 327 wmb();
331 328
@@ -837,23 +834,11 @@ static int fimc_is_probe(struct platform_device *pdev)
837 goto err_clk; 834 goto err_clk;
838 } 835 }
839 pm_runtime_enable(dev); 836 pm_runtime_enable(dev);
840 /*
841 * Enable only the ISP power domain, keep FIMC-IS clocks off until
842 * the whole clock tree is configured. The ISP power domain needs
843 * be active in order to acces any CMU_ISP clock registers.
844 */
845 ret = pm_runtime_get_sync(dev);
846 if (ret < 0)
847 goto err_irq;
848
849 ret = fimc_is_setup_clocks(is);
850 pm_runtime_put_sync(dev);
851 837
838 ret = pm_runtime_get_sync(dev);
852 if (ret < 0) 839 if (ret < 0)
853 goto err_irq; 840 goto err_irq;
854 841
855 is->clk_init = true;
856
857 is->alloc_ctx = vb2_dma_contig_init_ctx(dev); 842 is->alloc_ctx = vb2_dma_contig_init_ctx(dev);
858 if (IS_ERR(is->alloc_ctx)) { 843 if (IS_ERR(is->alloc_ctx)) {
859 ret = PTR_ERR(is->alloc_ctx); 844 ret = PTR_ERR(is->alloc_ctx);
@@ -875,6 +860,8 @@ static int fimc_is_probe(struct platform_device *pdev)
875 if (ret < 0) 860 if (ret < 0)
876 goto err_dfs; 861 goto err_dfs;
877 862
863 pm_runtime_put_sync(dev);
864
878 dev_dbg(dev, "FIMC-IS registered successfully\n"); 865 dev_dbg(dev, "FIMC-IS registered successfully\n");
879 return 0; 866 return 0;
880 867
@@ -894,9 +881,11 @@ err_clk:
894static int fimc_is_runtime_resume(struct device *dev) 881static int fimc_is_runtime_resume(struct device *dev)
895{ 882{
896 struct fimc_is *is = dev_get_drvdata(dev); 883 struct fimc_is *is = dev_get_drvdata(dev);
884 int ret;
897 885
898 if (!is->clk_init) 886 ret = fimc_is_setup_clocks(is);
899 return 0; 887 if (ret)
888 return ret;
900 889
901 return fimc_is_enable_clocks(is); 890 return fimc_is_enable_clocks(is);
902} 891}
@@ -905,9 +894,7 @@ static int fimc_is_runtime_suspend(struct device *dev)
905{ 894{
906 struct fimc_is *is = dev_get_drvdata(dev); 895 struct fimc_is *is = dev_get_drvdata(dev);
907 896
908 if (is->clk_init) 897 fimc_is_disable_clocks(is);
909 fimc_is_disable_clocks(is);
910
911 return 0; 898 return 0;
912} 899}
913 900
@@ -941,7 +928,8 @@ static int fimc_is_remove(struct platform_device *pdev)
941 vb2_dma_contig_cleanup_ctx(is->alloc_ctx); 928 vb2_dma_contig_cleanup_ctx(is->alloc_ctx);
942 fimc_is_put_clocks(is); 929 fimc_is_put_clocks(is);
943 fimc_is_debugfs_remove(is); 930 fimc_is_debugfs_remove(is);
944 release_firmware(is->fw.f_w); 931 if (is->fw.f_w)
932 release_firmware(is->fw.f_w);
945 fimc_is_free_cpu_memory(is); 933 fimc_is_free_cpu_memory(is);
946 934
947 return 0; 935 return 0;
diff --git a/drivers/media/platform/exynos4-is/fimc-is.h b/drivers/media/platform/exynos4-is/fimc-is.h
index f5275a5b0156..d7db133b493f 100644
--- a/drivers/media/platform/exynos4-is/fimc-is.h
+++ b/drivers/media/platform/exynos4-is/fimc-is.h
@@ -73,7 +73,6 @@ enum {
73 ISS_CLK_LITE0, 73 ISS_CLK_LITE0,
74 ISS_CLK_LITE1, 74 ISS_CLK_LITE1,
75 ISS_CLK_MPLL, 75 ISS_CLK_MPLL,
76 ISS_CLK_SYSREG,
77 ISS_CLK_ISP, 76 ISS_CLK_ISP,
78 ISS_CLK_DRC, 77 ISS_CLK_DRC,
79 ISS_CLK_FD, 78 ISS_CLK_FD,
@@ -265,7 +264,6 @@ struct fimc_is {
265 spinlock_t slock; 264 spinlock_t slock;
266 265
267 struct clk *clocks[ISS_CLKS_MAX]; 266 struct clk *clocks[ISS_CLKS_MAX];
268 bool clk_init;
269 void __iomem *regs; 267 void __iomem *regs;
270 void __iomem *pmu_regs; 268 void __iomem *pmu_regs;
271 int irq; 269 int irq;
diff --git a/drivers/media/platform/exynos4-is/fimc-isp.c b/drivers/media/platform/exynos4-is/fimc-isp.c
index d63947f7b302..7ede30b5910f 100644
--- a/drivers/media/platform/exynos4-is/fimc-isp.c
+++ b/drivers/media/platform/exynos4-is/fimc-isp.c
@@ -138,7 +138,7 @@ static int fimc_isp_subdev_get_fmt(struct v4l2_subdev *sd,
138 return 0; 138 return 0;
139 } 139 }
140 140
141 mf->colorspace = V4L2_COLORSPACE_JPEG; 141 mf->colorspace = V4L2_COLORSPACE_SRGB;
142 142
143 mutex_lock(&isp->subdev_lock); 143 mutex_lock(&isp->subdev_lock);
144 __is_get_frame_size(is, &cur_fmt); 144 __is_get_frame_size(is, &cur_fmt);
@@ -194,7 +194,7 @@ static int fimc_isp_subdev_set_fmt(struct v4l2_subdev *sd,
194 v4l2_dbg(1, debug, sd, "%s: pad%d: code: 0x%x, %dx%d\n", 194 v4l2_dbg(1, debug, sd, "%s: pad%d: code: 0x%x, %dx%d\n",
195 __func__, fmt->pad, mf->code, mf->width, mf->height); 195 __func__, fmt->pad, mf->code, mf->width, mf->height);
196 196
197 mf->colorspace = V4L2_COLORSPACE_JPEG; 197 mf->colorspace = V4L2_COLORSPACE_SRGB;
198 198
199 mutex_lock(&isp->subdev_lock); 199 mutex_lock(&isp->subdev_lock);
200 __isp_subdev_try_format(isp, fmt); 200 __isp_subdev_try_format(isp, fmt);
diff --git a/drivers/media/platform/exynos4-is/mipi-csis.c b/drivers/media/platform/exynos4-is/mipi-csis.c
index a2eda9d5ac87..254d70fe762a 100644
--- a/drivers/media/platform/exynos4-is/mipi-csis.c
+++ b/drivers/media/platform/exynos4-is/mipi-csis.c
@@ -746,7 +746,7 @@ static int s5pcsis_parse_dt(struct platform_device *pdev,
746 node = v4l2_of_get_next_endpoint(node, NULL); 746 node = v4l2_of_get_next_endpoint(node, NULL);
747 if (!node) { 747 if (!node) {
748 dev_err(&pdev->dev, "No port node at %s\n", 748 dev_err(&pdev->dev, "No port node at %s\n",
749 node->full_name); 749 pdev->dev.of_node->full_name);
750 return -EINVAL; 750 return -EINVAL;
751 } 751 }
752 /* Get port node and validate MIPI-CSI channel id. */ 752 /* Get port node and validate MIPI-CSI channel id. */
diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
index 477268a2415f..d338b19da544 100644
--- a/drivers/media/platform/omap/omap_vout.c
+++ b/drivers/media/platform/omap/omap_vout.c
@@ -2150,6 +2150,9 @@ static int __init omap_vout_probe(struct platform_device *pdev)
2150 struct omap_dss_device *def_display; 2150 struct omap_dss_device *def_display;
2151 struct omap2video_device *vid_dev = NULL; 2151 struct omap2video_device *vid_dev = NULL;
2152 2152
2153 if (omapdss_is_initialized() == false)
2154 return -EPROBE_DEFER;
2155
2153 ret = omapdss_compat_init(); 2156 ret = omapdss_compat_init();
2154 if (ret) { 2157 if (ret) {
2155 dev_err(&pdev->dev, "failed to init dss\n"); 2158 dev_err(&pdev->dev, "failed to init dss\n");
diff --git a/drivers/media/platform/s3c-camif/camif-core.h b/drivers/media/platform/s3c-camif/camif-core.h
index 261134baa655..35d2fcdc0036 100644
--- a/drivers/media/platform/s3c-camif/camif-core.h
+++ b/drivers/media/platform/s3c-camif/camif-core.h
@@ -229,7 +229,7 @@ struct camif_vp {
229 unsigned int state; 229 unsigned int state;
230 u16 fmt_flags; 230 u16 fmt_flags;
231 u8 id; 231 u8 id;
232 u8 rotation; 232 u16 rotation;
233 u8 hflip; 233 u8 hflip;
234 u8 vflip; 234 u8 vflip;
235 unsigned int offset; 235 unsigned int offset;
diff --git a/drivers/media/platform/s5p-jpeg/Makefile b/drivers/media/platform/s5p-jpeg/Makefile
index ddc2900d88a2..d18cb5edd2d5 100644
--- a/drivers/media/platform/s5p-jpeg/Makefile
+++ b/drivers/media/platform/s5p-jpeg/Makefile
@@ -1,2 +1,2 @@
1s5p-jpeg-objs := jpeg-core.o 1s5p-jpeg-objs := jpeg-core.o
2obj-$(CONFIG_VIDEO_SAMSUNG_S5P_JPEG) := s5p-jpeg.o 2obj-$(CONFIG_VIDEO_SAMSUNG_S5P_JPEG) += s5p-jpeg.o
diff --git a/drivers/media/platform/s5p-mfc/Makefile b/drivers/media/platform/s5p-mfc/Makefile
index 379008c6d09a..15f59b324fef 100644
--- a/drivers/media/platform/s5p-mfc/Makefile
+++ b/drivers/media/platform/s5p-mfc/Makefile
@@ -1,4 +1,4 @@
1obj-$(CONFIG_VIDEO_SAMSUNG_S5P_MFC) := s5p-mfc.o 1obj-$(CONFIG_VIDEO_SAMSUNG_S5P_MFC) += s5p-mfc.o
2s5p-mfc-y += s5p_mfc.o s5p_mfc_intr.o 2s5p-mfc-y += s5p_mfc.o s5p_mfc_intr.o
3s5p-mfc-y += s5p_mfc_dec.o s5p_mfc_enc.o 3s5p-mfc-y += s5p_mfc_dec.o s5p_mfc_enc.o
4s5p-mfc-y += s5p_mfc_ctrl.o s5p_mfc_pm.o 4s5p-mfc-y += s5p_mfc_ctrl.o s5p_mfc_pm.o
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c
index 01f9ae0dadb0..d12faa691af8 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c
@@ -397,7 +397,7 @@ static void s5p_mfc_handle_frame(struct s5p_mfc_ctx *ctx,
397leave_handle_frame: 397leave_handle_frame:
398 spin_unlock_irqrestore(&dev->irqlock, flags); 398 spin_unlock_irqrestore(&dev->irqlock, flags);
399 if ((ctx->src_queue_cnt == 0 && ctx->state != MFCINST_FINISHING) 399 if ((ctx->src_queue_cnt == 0 && ctx->state != MFCINST_FINISHING)
400 || ctx->dst_queue_cnt < ctx->dpb_count) 400 || ctx->dst_queue_cnt < ctx->pb_count)
401 clear_work_bit(ctx); 401 clear_work_bit(ctx);
402 s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev); 402 s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev);
403 wake_up_ctx(ctx, reason, err); 403 wake_up_ctx(ctx, reason, err);
@@ -473,7 +473,7 @@ static void s5p_mfc_handle_seq_done(struct s5p_mfc_ctx *ctx,
473 473
474 s5p_mfc_hw_call(dev->mfc_ops, dec_calc_dpb_size, ctx); 474 s5p_mfc_hw_call(dev->mfc_ops, dec_calc_dpb_size, ctx);
475 475
476 ctx->dpb_count = s5p_mfc_hw_call(dev->mfc_ops, get_dpb_count, 476 ctx->pb_count = s5p_mfc_hw_call(dev->mfc_ops, get_dpb_count,
477 dev); 477 dev);
478 ctx->mv_count = s5p_mfc_hw_call(dev->mfc_ops, get_mv_count, 478 ctx->mv_count = s5p_mfc_hw_call(dev->mfc_ops, get_mv_count,
479 dev); 479 dev);
@@ -562,7 +562,7 @@ static void s5p_mfc_handle_stream_complete(struct s5p_mfc_ctx *ctx,
562 struct s5p_mfc_dev *dev = ctx->dev; 562 struct s5p_mfc_dev *dev = ctx->dev;
563 struct s5p_mfc_buf *mb_entry; 563 struct s5p_mfc_buf *mb_entry;
564 564
565 mfc_debug(2, "Stream completed"); 565 mfc_debug(2, "Stream completed\n");
566 566
567 s5p_mfc_clear_int_flags(dev); 567 s5p_mfc_clear_int_flags(dev);
568 ctx->int_type = reason; 568 ctx->int_type = reason;
@@ -1362,7 +1362,6 @@ static struct s5p_mfc_variant mfc_drvdata_v5 = {
1362 .port_num = MFC_NUM_PORTS, 1362 .port_num = MFC_NUM_PORTS,
1363 .buf_size = &buf_size_v5, 1363 .buf_size = &buf_size_v5,
1364 .buf_align = &mfc_buf_align_v5, 1364 .buf_align = &mfc_buf_align_v5,
1365 .mclk_name = "sclk_mfc",
1366 .fw_name = "s5p-mfc.fw", 1365 .fw_name = "s5p-mfc.fw",
1367}; 1366};
1368 1367
@@ -1389,7 +1388,6 @@ static struct s5p_mfc_variant mfc_drvdata_v6 = {
1389 .port_num = MFC_NUM_PORTS_V6, 1388 .port_num = MFC_NUM_PORTS_V6,
1390 .buf_size = &buf_size_v6, 1389 .buf_size = &buf_size_v6,
1391 .buf_align = &mfc_buf_align_v6, 1390 .buf_align = &mfc_buf_align_v6,
1392 .mclk_name = "aclk_333",
1393 .fw_name = "s5p-mfc-v6.fw", 1391 .fw_name = "s5p-mfc-v6.fw",
1394}; 1392};
1395 1393
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_common.h b/drivers/media/platform/s5p-mfc/s5p_mfc_common.h
index 202d1d7a37a8..ef4074cd5316 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_common.h
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_common.h
@@ -138,6 +138,7 @@ enum s5p_mfc_inst_state {
138 MFCINST_INIT = 100, 138 MFCINST_INIT = 100,
139 MFCINST_GOT_INST, 139 MFCINST_GOT_INST,
140 MFCINST_HEAD_PARSED, 140 MFCINST_HEAD_PARSED,
141 MFCINST_HEAD_PRODUCED,
141 MFCINST_BUFS_SET, 142 MFCINST_BUFS_SET,
142 MFCINST_RUNNING, 143 MFCINST_RUNNING,
143 MFCINST_FINISHING, 144 MFCINST_FINISHING,
@@ -231,7 +232,6 @@ struct s5p_mfc_variant {
231 unsigned int port_num; 232 unsigned int port_num;
232 struct s5p_mfc_buf_size *buf_size; 233 struct s5p_mfc_buf_size *buf_size;
233 struct s5p_mfc_buf_align *buf_align; 234 struct s5p_mfc_buf_align *buf_align;
234 char *mclk_name;
235 char *fw_name; 235 char *fw_name;
236}; 236};
237 237
@@ -438,7 +438,7 @@ struct s5p_mfc_enc_params {
438 u32 rc_framerate_num; 438 u32 rc_framerate_num;
439 u32 rc_framerate_denom; 439 u32 rc_framerate_denom;
440 440
441 union { 441 struct {
442 struct s5p_mfc_h264_enc_params h264; 442 struct s5p_mfc_h264_enc_params h264;
443 struct s5p_mfc_mpeg4_enc_params mpeg4; 443 struct s5p_mfc_mpeg4_enc_params mpeg4;
444 } codec; 444 } codec;
@@ -602,7 +602,7 @@ struct s5p_mfc_ctx {
602 int after_packed_pb; 602 int after_packed_pb;
603 int sei_fp_parse; 603 int sei_fp_parse;
604 604
605 int dpb_count; 605 int pb_count;
606 int total_dpb_count; 606 int total_dpb_count;
607 int mv_count; 607 int mv_count;
608 /* Buffers */ 608 /* Buffers */
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c b/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c
index 2e5f30b40dea..dc1fc94a488d 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c
@@ -38,7 +38,7 @@ int s5p_mfc_alloc_firmware(struct s5p_mfc_dev *dev)
38 dev->fw_virt_addr = dma_alloc_coherent(dev->mem_dev_l, dev->fw_size, 38 dev->fw_virt_addr = dma_alloc_coherent(dev->mem_dev_l, dev->fw_size,
39 &dev->bank1, GFP_KERNEL); 39 &dev->bank1, GFP_KERNEL);
40 40
41 if (IS_ERR(dev->fw_virt_addr)) { 41 if (IS_ERR_OR_NULL(dev->fw_virt_addr)) {
42 dev->fw_virt_addr = NULL; 42 dev->fw_virt_addr = NULL;
43 mfc_err("Allocating bitprocessor buffer failed\n"); 43 mfc_err("Allocating bitprocessor buffer failed\n");
44 return -ENOMEM; 44 return -ENOMEM;
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_debug.h b/drivers/media/platform/s5p-mfc/s5p_mfc_debug.h
index bd5cd4ae993c..8e608f5aa0d7 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_debug.h
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_debug.h
@@ -30,8 +30,8 @@ extern int debug;
30#define mfc_debug(level, fmt, args...) 30#define mfc_debug(level, fmt, args...)
31#endif 31#endif
32 32
33#define mfc_debug_enter() mfc_debug(5, "enter") 33#define mfc_debug_enter() mfc_debug(5, "enter\n")
34#define mfc_debug_leave() mfc_debug(5, "leave") 34#define mfc_debug_leave() mfc_debug(5, "leave\n")
35 35
36#define mfc_err(fmt, args...) \ 36#define mfc_err(fmt, args...) \
37 do { \ 37 do { \
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c b/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
index 4af53bd2f182..00b07032f4f0 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
@@ -210,11 +210,11 @@ static int s5p_mfc_ctx_ready(struct s5p_mfc_ctx *ctx)
210 /* Context is to decode a frame */ 210 /* Context is to decode a frame */
211 if (ctx->src_queue_cnt >= 1 && 211 if (ctx->src_queue_cnt >= 1 &&
212 ctx->state == MFCINST_RUNNING && 212 ctx->state == MFCINST_RUNNING &&
213 ctx->dst_queue_cnt >= ctx->dpb_count) 213 ctx->dst_queue_cnt >= ctx->pb_count)
214 return 1; 214 return 1;
215 /* Context is to return last frame */ 215 /* Context is to return last frame */
216 if (ctx->state == MFCINST_FINISHING && 216 if (ctx->state == MFCINST_FINISHING &&
217 ctx->dst_queue_cnt >= ctx->dpb_count) 217 ctx->dst_queue_cnt >= ctx->pb_count)
218 return 1; 218 return 1;
219 /* Context is to set buffers */ 219 /* Context is to set buffers */
220 if (ctx->src_queue_cnt >= 1 && 220 if (ctx->src_queue_cnt >= 1 &&
@@ -224,7 +224,7 @@ static int s5p_mfc_ctx_ready(struct s5p_mfc_ctx *ctx)
224 /* Resolution change */ 224 /* Resolution change */
225 if ((ctx->state == MFCINST_RES_CHANGE_INIT || 225 if ((ctx->state == MFCINST_RES_CHANGE_INIT ||
226 ctx->state == MFCINST_RES_CHANGE_FLUSH) && 226 ctx->state == MFCINST_RES_CHANGE_FLUSH) &&
227 ctx->dst_queue_cnt >= ctx->dpb_count) 227 ctx->dst_queue_cnt >= ctx->pb_count)
228 return 1; 228 return 1;
229 if (ctx->state == MFCINST_RES_CHANGE_END && 229 if (ctx->state == MFCINST_RES_CHANGE_END &&
230 ctx->src_queue_cnt >= 1) 230 ctx->src_queue_cnt >= 1)
@@ -537,7 +537,7 @@ static int vidioc_reqbufs(struct file *file, void *priv,
537 mfc_err("vb2_reqbufs on capture failed\n"); 537 mfc_err("vb2_reqbufs on capture failed\n");
538 return ret; 538 return ret;
539 } 539 }
540 if (reqbufs->count < ctx->dpb_count) { 540 if (reqbufs->count < ctx->pb_count) {
541 mfc_err("Not enough buffers allocated\n"); 541 mfc_err("Not enough buffers allocated\n");
542 reqbufs->count = 0; 542 reqbufs->count = 0;
543 s5p_mfc_clock_on(); 543 s5p_mfc_clock_on();
@@ -751,7 +751,7 @@ static int s5p_mfc_dec_g_v_ctrl(struct v4l2_ctrl *ctrl)
751 case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE: 751 case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE:
752 if (ctx->state >= MFCINST_HEAD_PARSED && 752 if (ctx->state >= MFCINST_HEAD_PARSED &&
753 ctx->state < MFCINST_ABORT) { 753 ctx->state < MFCINST_ABORT) {
754 ctrl->val = ctx->dpb_count; 754 ctrl->val = ctx->pb_count;
755 break; 755 break;
756 } else if (ctx->state != MFCINST_INIT) { 756 } else if (ctx->state != MFCINST_INIT) {
757 v4l2_err(&dev->v4l2_dev, "Decoding not initialised\n"); 757 v4l2_err(&dev->v4l2_dev, "Decoding not initialised\n");
@@ -763,7 +763,7 @@ static int s5p_mfc_dec_g_v_ctrl(struct v4l2_ctrl *ctrl)
763 S5P_MFC_R2H_CMD_SEQ_DONE_RET, 0); 763 S5P_MFC_R2H_CMD_SEQ_DONE_RET, 0);
764 if (ctx->state >= MFCINST_HEAD_PARSED && 764 if (ctx->state >= MFCINST_HEAD_PARSED &&
765 ctx->state < MFCINST_ABORT) { 765 ctx->state < MFCINST_ABORT) {
766 ctrl->val = ctx->dpb_count; 766 ctrl->val = ctx->pb_count;
767 } else { 767 } else {
768 v4l2_err(&dev->v4l2_dev, "Decoding not initialised\n"); 768 v4l2_err(&dev->v4l2_dev, "Decoding not initialised\n");
769 return -EINVAL; 769 return -EINVAL;
@@ -924,10 +924,10 @@ static int s5p_mfc_queue_setup(struct vb2_queue *vq,
924 /* Output plane count is 2 - one for Y and one for CbCr */ 924 /* Output plane count is 2 - one for Y and one for CbCr */
925 *plane_count = 2; 925 *plane_count = 2;
926 /* Setup buffer count */ 926 /* Setup buffer count */
927 if (*buf_count < ctx->dpb_count) 927 if (*buf_count < ctx->pb_count)
928 *buf_count = ctx->dpb_count; 928 *buf_count = ctx->pb_count;
929 if (*buf_count > ctx->dpb_count + MFC_MAX_EXTRA_DPB) 929 if (*buf_count > ctx->pb_count + MFC_MAX_EXTRA_DPB)
930 *buf_count = ctx->dpb_count + MFC_MAX_EXTRA_DPB; 930 *buf_count = ctx->pb_count + MFC_MAX_EXTRA_DPB;
931 if (*buf_count > MFC_MAX_BUFFERS) 931 if (*buf_count > MFC_MAX_BUFFERS)
932 *buf_count = MFC_MAX_BUFFERS; 932 *buf_count = MFC_MAX_BUFFERS;
933 } else { 933 } else {
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
index 4f6b553c4b2d..2549967b2f85 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
@@ -592,7 +592,7 @@ static int s5p_mfc_ctx_ready(struct s5p_mfc_ctx *ctx)
592 return 1; 592 return 1;
593 /* context is ready to encode a frame */ 593 /* context is ready to encode a frame */
594 if ((ctx->state == MFCINST_RUNNING || 594 if ((ctx->state == MFCINST_RUNNING ||
595 ctx->state == MFCINST_HEAD_PARSED) && 595 ctx->state == MFCINST_HEAD_PRODUCED) &&
596 ctx->src_queue_cnt >= 1 && ctx->dst_queue_cnt >= 1) 596 ctx->src_queue_cnt >= 1 && ctx->dst_queue_cnt >= 1)
597 return 1; 597 return 1;
598 /* context is ready to encode remaining frames */ 598 /* context is ready to encode remaining frames */
@@ -649,6 +649,7 @@ static int enc_post_seq_start(struct s5p_mfc_ctx *ctx)
649 struct s5p_mfc_enc_params *p = &ctx->enc_params; 649 struct s5p_mfc_enc_params *p = &ctx->enc_params;
650 struct s5p_mfc_buf *dst_mb; 650 struct s5p_mfc_buf *dst_mb;
651 unsigned long flags; 651 unsigned long flags;
652 unsigned int enc_pb_count;
652 653
653 if (p->seq_hdr_mode == V4L2_MPEG_VIDEO_HEADER_MODE_SEPARATE) { 654 if (p->seq_hdr_mode == V4L2_MPEG_VIDEO_HEADER_MODE_SEPARATE) {
654 spin_lock_irqsave(&dev->irqlock, flags); 655 spin_lock_irqsave(&dev->irqlock, flags);
@@ -661,18 +662,19 @@ static int enc_post_seq_start(struct s5p_mfc_ctx *ctx)
661 vb2_buffer_done(dst_mb->b, VB2_BUF_STATE_DONE); 662 vb2_buffer_done(dst_mb->b, VB2_BUF_STATE_DONE);
662 spin_unlock_irqrestore(&dev->irqlock, flags); 663 spin_unlock_irqrestore(&dev->irqlock, flags);
663 } 664 }
664 if (IS_MFCV6(dev)) { 665
665 ctx->state = MFCINST_HEAD_PARSED; /* for INIT_BUFFER cmd */ 666 if (!IS_MFCV6(dev)) {
666 } else {
667 ctx->state = MFCINST_RUNNING; 667 ctx->state = MFCINST_RUNNING;
668 if (s5p_mfc_ctx_ready(ctx)) 668 if (s5p_mfc_ctx_ready(ctx))
669 set_work_bit_irqsave(ctx); 669 set_work_bit_irqsave(ctx);
670 s5p_mfc_hw_call(dev->mfc_ops, try_run, dev); 670 s5p_mfc_hw_call(dev->mfc_ops, try_run, dev);
671 } 671 } else {
672 672 enc_pb_count = s5p_mfc_hw_call(dev->mfc_ops,
673 if (IS_MFCV6(dev))
674 ctx->dpb_count = s5p_mfc_hw_call(dev->mfc_ops,
675 get_enc_dpb_count, dev); 673 get_enc_dpb_count, dev);
674 if (ctx->pb_count < enc_pb_count)
675 ctx->pb_count = enc_pb_count;
676 ctx->state = MFCINST_HEAD_PRODUCED;
677 }
676 678
677 return 0; 679 return 0;
678} 680}
@@ -717,9 +719,9 @@ static int enc_post_frame_start(struct s5p_mfc_ctx *ctx)
717 719
718 slice_type = s5p_mfc_hw_call(dev->mfc_ops, get_enc_slice_type, dev); 720 slice_type = s5p_mfc_hw_call(dev->mfc_ops, get_enc_slice_type, dev);
719 strm_size = s5p_mfc_hw_call(dev->mfc_ops, get_enc_strm_size, dev); 721 strm_size = s5p_mfc_hw_call(dev->mfc_ops, get_enc_strm_size, dev);
720 mfc_debug(2, "Encoded slice type: %d", slice_type); 722 mfc_debug(2, "Encoded slice type: %d\n", slice_type);
721 mfc_debug(2, "Encoded stream size: %d", strm_size); 723 mfc_debug(2, "Encoded stream size: %d\n", strm_size);
722 mfc_debug(2, "Display order: %d", 724 mfc_debug(2, "Display order: %d\n",
723 mfc_read(dev, S5P_FIMV_ENC_SI_PIC_CNT)); 725 mfc_read(dev, S5P_FIMV_ENC_SI_PIC_CNT));
724 spin_lock_irqsave(&dev->irqlock, flags); 726 spin_lock_irqsave(&dev->irqlock, flags);
725 if (slice_type >= 0) { 727 if (slice_type >= 0) {
@@ -1055,15 +1057,13 @@ static int vidioc_reqbufs(struct file *file, void *priv,
1055 } 1057 }
1056 ctx->capture_state = QUEUE_BUFS_REQUESTED; 1058 ctx->capture_state = QUEUE_BUFS_REQUESTED;
1057 1059
1058 if (!IS_MFCV6(dev)) { 1060 ret = s5p_mfc_hw_call(ctx->dev->mfc_ops,
1059 ret = s5p_mfc_hw_call(ctx->dev->mfc_ops, 1061 alloc_codec_buffers, ctx);
1060 alloc_codec_buffers, ctx); 1062 if (ret) {
1061 if (ret) { 1063 mfc_err("Failed to allocate encoding buffers\n");
1062 mfc_err("Failed to allocate encoding buffers\n"); 1064 reqbufs->count = 0;
1063 reqbufs->count = 0; 1065 ret = vb2_reqbufs(&ctx->vq_dst, reqbufs);
1064 ret = vb2_reqbufs(&ctx->vq_dst, reqbufs); 1066 return -ENOMEM;
1065 return -ENOMEM;
1066 }
1067 } 1067 }
1068 } else if (reqbufs->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { 1068 } else if (reqbufs->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
1069 if (ctx->output_state != QUEUE_FREE) { 1069 if (ctx->output_state != QUEUE_FREE) {
@@ -1071,6 +1071,19 @@ static int vidioc_reqbufs(struct file *file, void *priv,
1071 ctx->output_state); 1071 ctx->output_state);
1072 return -EINVAL; 1072 return -EINVAL;
1073 } 1073 }
1074
1075 if (IS_MFCV6(dev)) {
1076 /* Check for min encoder buffers */
1077 if (ctx->pb_count &&
1078 (reqbufs->count < ctx->pb_count)) {
1079 reqbufs->count = ctx->pb_count;
1080 mfc_debug(2, "Minimum %d output buffers needed\n",
1081 ctx->pb_count);
1082 } else {
1083 ctx->pb_count = reqbufs->count;
1084 }
1085 }
1086
1074 ret = vb2_reqbufs(&ctx->vq_src, reqbufs); 1087 ret = vb2_reqbufs(&ctx->vq_src, reqbufs);
1075 if (ret != 0) { 1088 if (ret != 0) {
1076 mfc_err("error in vb2_reqbufs() for E(S)\n"); 1089 mfc_err("error in vb2_reqbufs() for E(S)\n");
@@ -1533,14 +1546,14 @@ int vidioc_encoder_cmd(struct file *file, void *priv,
1533 1546
1534 spin_lock_irqsave(&dev->irqlock, flags); 1547 spin_lock_irqsave(&dev->irqlock, flags);
1535 if (list_empty(&ctx->src_queue)) { 1548 if (list_empty(&ctx->src_queue)) {
1536 mfc_debug(2, "EOS: empty src queue, entering finishing state"); 1549 mfc_debug(2, "EOS: empty src queue, entering finishing state\n");
1537 ctx->state = MFCINST_FINISHING; 1550 ctx->state = MFCINST_FINISHING;
1538 if (s5p_mfc_ctx_ready(ctx)) 1551 if (s5p_mfc_ctx_ready(ctx))
1539 set_work_bit_irqsave(ctx); 1552 set_work_bit_irqsave(ctx);
1540 spin_unlock_irqrestore(&dev->irqlock, flags); 1553 spin_unlock_irqrestore(&dev->irqlock, flags);
1541 s5p_mfc_hw_call(dev->mfc_ops, try_run, dev); 1554 s5p_mfc_hw_call(dev->mfc_ops, try_run, dev);
1542 } else { 1555 } else {
1543 mfc_debug(2, "EOS: marking last buffer of stream"); 1556 mfc_debug(2, "EOS: marking last buffer of stream\n");
1544 buf = list_entry(ctx->src_queue.prev, 1557 buf = list_entry(ctx->src_queue.prev,
1545 struct s5p_mfc_buf, list); 1558 struct s5p_mfc_buf, list);
1546 if (buf->flags & MFC_BUF_FLAG_USED) 1559 if (buf->flags & MFC_BUF_FLAG_USED)
@@ -1609,9 +1622,9 @@ static int check_vb_with_fmt(struct s5p_mfc_fmt *fmt, struct vb2_buffer *vb)
1609 mfc_err("failed to get plane cookie\n"); 1622 mfc_err("failed to get plane cookie\n");
1610 return -EINVAL; 1623 return -EINVAL;
1611 } 1624 }
1612 mfc_debug(2, "index: %d, plane[%d] cookie: 0x%08zx", 1625 mfc_debug(2, "index: %d, plane[%d] cookie: 0x%08zx\n",
1613 vb->v4l2_buf.index, i, 1626 vb->v4l2_buf.index, i,
1614 vb2_dma_contig_plane_dma_addr(vb, i)); 1627 vb2_dma_contig_plane_dma_addr(vb, i));
1615 } 1628 }
1616 return 0; 1629 return 0;
1617} 1630}
@@ -1760,11 +1773,27 @@ static int s5p_mfc_start_streaming(struct vb2_queue *q, unsigned int count)
1760 struct s5p_mfc_ctx *ctx = fh_to_ctx(q->drv_priv); 1773 struct s5p_mfc_ctx *ctx = fh_to_ctx(q->drv_priv);
1761 struct s5p_mfc_dev *dev = ctx->dev; 1774 struct s5p_mfc_dev *dev = ctx->dev;
1762 1775
1763 v4l2_ctrl_handler_setup(&ctx->ctrl_handler); 1776 if (IS_MFCV6(dev) && (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)) {
1777
1778 if ((ctx->state == MFCINST_GOT_INST) &&
1779 (dev->curr_ctx == ctx->num) && dev->hw_lock) {
1780 s5p_mfc_wait_for_done_ctx(ctx,
1781 S5P_MFC_R2H_CMD_SEQ_DONE_RET,
1782 0);
1783 }
1784
1785 if (ctx->src_bufs_cnt < ctx->pb_count) {
1786 mfc_err("Need minimum %d OUTPUT buffers\n",
1787 ctx->pb_count);
1788 return -EINVAL;
1789 }
1790 }
1791
1764 /* If context is ready then dev = work->data;schedule it to run */ 1792 /* If context is ready then dev = work->data;schedule it to run */
1765 if (s5p_mfc_ctx_ready(ctx)) 1793 if (s5p_mfc_ctx_ready(ctx))
1766 set_work_bit_irqsave(ctx); 1794 set_work_bit_irqsave(ctx);
1767 s5p_mfc_hw_call(dev->mfc_ops, try_run, dev); 1795 s5p_mfc_hw_call(dev->mfc_ops, try_run, dev);
1796
1768 return 0; 1797 return 0;
1769} 1798}
1770 1799
@@ -1920,6 +1949,7 @@ int s5p_mfc_enc_ctrls_setup(struct s5p_mfc_ctx *ctx)
1920 if (controls[i].is_volatile && ctx->ctrls[i]) 1949 if (controls[i].is_volatile && ctx->ctrls[i])
1921 ctx->ctrls[i]->flags |= V4L2_CTRL_FLAG_VOLATILE; 1950 ctx->ctrls[i]->flags |= V4L2_CTRL_FLAG_VOLATILE;
1922 } 1951 }
1952 v4l2_ctrl_handler_setup(&ctx->ctrl_handler);
1923 return 0; 1953 return 0;
1924} 1954}
1925 1955
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c
index 0af05a2d1cd4..368582b091bf 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c
@@ -1275,8 +1275,8 @@ static int s5p_mfc_run_enc_frame(struct s5p_mfc_ctx *ctx)
1275 spin_unlock_irqrestore(&dev->irqlock, flags); 1275 spin_unlock_irqrestore(&dev->irqlock, flags);
1276 dev->curr_ctx = ctx->num; 1276 dev->curr_ctx = ctx->num;
1277 s5p_mfc_clean_ctx_int_flags(ctx); 1277 s5p_mfc_clean_ctx_int_flags(ctx);
1278 mfc_debug(2, "encoding buffer with index=%d state=%d", 1278 mfc_debug(2, "encoding buffer with index=%d state=%d\n",
1279 src_mb ? src_mb->b->v4l2_buf.index : -1, ctx->state); 1279 src_mb ? src_mb->b->v4l2_buf.index : -1, ctx->state);
1280 s5p_mfc_encode_one_frame_v5(ctx); 1280 s5p_mfc_encode_one_frame_v5(ctx);
1281 return 0; 1281 return 0;
1282} 1282}
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c
index 7e76fce2e524..66f0d042357f 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c
@@ -62,12 +62,6 @@ static void s5p_mfc_release_dec_desc_buffer_v6(struct s5p_mfc_ctx *ctx)
62 /* NOP */ 62 /* NOP */
63} 63}
64 64
65static int s5p_mfc_get_dec_status_v6(struct s5p_mfc_dev *dev)
66{
67 /* NOP */
68 return -1;
69}
70
71/* Allocate codec buffers */ 65/* Allocate codec buffers */
72static int s5p_mfc_alloc_codec_buffers_v6(struct s5p_mfc_ctx *ctx) 66static int s5p_mfc_alloc_codec_buffers_v6(struct s5p_mfc_ctx *ctx)
73{ 67{
@@ -167,7 +161,7 @@ static int s5p_mfc_alloc_codec_buffers_v6(struct s5p_mfc_ctx *ctx)
167 S5P_FIMV_SCRATCH_BUFFER_ALIGN_V6); 161 S5P_FIMV_SCRATCH_BUFFER_ALIGN_V6);
168 ctx->bank1.size = 162 ctx->bank1.size =
169 ctx->scratch_buf_size + ctx->tmv_buffer_size + 163 ctx->scratch_buf_size + ctx->tmv_buffer_size +
170 (ctx->dpb_count * (ctx->luma_dpb_size + 164 (ctx->pb_count * (ctx->luma_dpb_size +
171 ctx->chroma_dpb_size + ctx->me_buffer_size)); 165 ctx->chroma_dpb_size + ctx->me_buffer_size));
172 ctx->bank2.size = 0; 166 ctx->bank2.size = 0;
173 break; 167 break;
@@ -181,7 +175,7 @@ static int s5p_mfc_alloc_codec_buffers_v6(struct s5p_mfc_ctx *ctx)
181 S5P_FIMV_SCRATCH_BUFFER_ALIGN_V6); 175 S5P_FIMV_SCRATCH_BUFFER_ALIGN_V6);
182 ctx->bank1.size = 176 ctx->bank1.size =
183 ctx->scratch_buf_size + ctx->tmv_buffer_size + 177 ctx->scratch_buf_size + ctx->tmv_buffer_size +
184 (ctx->dpb_count * (ctx->luma_dpb_size + 178 (ctx->pb_count * (ctx->luma_dpb_size +
185 ctx->chroma_dpb_size + ctx->me_buffer_size)); 179 ctx->chroma_dpb_size + ctx->me_buffer_size));
186 ctx->bank2.size = 0; 180 ctx->bank2.size = 0;
187 break; 181 break;
@@ -198,7 +192,6 @@ static int s5p_mfc_alloc_codec_buffers_v6(struct s5p_mfc_ctx *ctx)
198 } 192 }
199 BUG_ON(ctx->bank1.dma & ((1 << MFC_BANK1_ALIGN_ORDER) - 1)); 193 BUG_ON(ctx->bank1.dma & ((1 << MFC_BANK1_ALIGN_ORDER) - 1));
200 } 194 }
201
202 return 0; 195 return 0;
203} 196}
204 197
@@ -449,8 +442,8 @@ static int s5p_mfc_set_enc_stream_buffer_v6(struct s5p_mfc_ctx *ctx,
449 WRITEL(addr, S5P_FIMV_E_STREAM_BUFFER_ADDR_V6); /* 16B align */ 442 WRITEL(addr, S5P_FIMV_E_STREAM_BUFFER_ADDR_V6); /* 16B align */
450 WRITEL(size, S5P_FIMV_E_STREAM_BUFFER_SIZE_V6); 443 WRITEL(size, S5P_FIMV_E_STREAM_BUFFER_SIZE_V6);
451 444
452 mfc_debug(2, "stream buf addr: 0x%08lx, size: 0x%d", 445 mfc_debug(2, "stream buf addr: 0x%08lx, size: 0x%d\n",
453 addr, size); 446 addr, size);
454 447
455 return 0; 448 return 0;
456} 449}
@@ -463,8 +456,8 @@ static void s5p_mfc_set_enc_frame_buffer_v6(struct s5p_mfc_ctx *ctx,
463 WRITEL(y_addr, S5P_FIMV_E_SOURCE_LUMA_ADDR_V6); /* 256B align */ 456 WRITEL(y_addr, S5P_FIMV_E_SOURCE_LUMA_ADDR_V6); /* 256B align */
464 WRITEL(c_addr, S5P_FIMV_E_SOURCE_CHROMA_ADDR_V6); 457 WRITEL(c_addr, S5P_FIMV_E_SOURCE_CHROMA_ADDR_V6);
465 458
466 mfc_debug(2, "enc src y buf addr: 0x%08lx", y_addr); 459 mfc_debug(2, "enc src y buf addr: 0x%08lx\n", y_addr);
467 mfc_debug(2, "enc src c buf addr: 0x%08lx", c_addr); 460 mfc_debug(2, "enc src c buf addr: 0x%08lx\n", c_addr);
468} 461}
469 462
470static void s5p_mfc_get_enc_frame_buffer_v6(struct s5p_mfc_ctx *ctx, 463static void s5p_mfc_get_enc_frame_buffer_v6(struct s5p_mfc_ctx *ctx,
@@ -479,8 +472,8 @@ static void s5p_mfc_get_enc_frame_buffer_v6(struct s5p_mfc_ctx *ctx,
479 enc_recon_y_addr = READL(S5P_FIMV_E_RECON_LUMA_DPB_ADDR_V6); 472 enc_recon_y_addr = READL(S5P_FIMV_E_RECON_LUMA_DPB_ADDR_V6);
480 enc_recon_c_addr = READL(S5P_FIMV_E_RECON_CHROMA_DPB_ADDR_V6); 473 enc_recon_c_addr = READL(S5P_FIMV_E_RECON_CHROMA_DPB_ADDR_V6);
481 474
482 mfc_debug(2, "recon y addr: 0x%08lx", enc_recon_y_addr); 475 mfc_debug(2, "recon y addr: 0x%08lx\n", enc_recon_y_addr);
483 mfc_debug(2, "recon c addr: 0x%08lx", enc_recon_c_addr); 476 mfc_debug(2, "recon c addr: 0x%08lx\n", enc_recon_c_addr);
484} 477}
485 478
486/* Set encoding ref & codec buffer */ 479/* Set encoding ref & codec buffer */
@@ -497,7 +490,7 @@ static int s5p_mfc_set_enc_ref_buffer_v6(struct s5p_mfc_ctx *ctx)
497 490
498 mfc_debug(2, "Buf1: %p (%d)\n", (void *)buf_addr1, buf_size1); 491 mfc_debug(2, "Buf1: %p (%d)\n", (void *)buf_addr1, buf_size1);
499 492
500 for (i = 0; i < ctx->dpb_count; i++) { 493 for (i = 0; i < ctx->pb_count; i++) {
501 WRITEL(buf_addr1, S5P_FIMV_E_LUMA_DPB_V6 + (4 * i)); 494 WRITEL(buf_addr1, S5P_FIMV_E_LUMA_DPB_V6 + (4 * i));
502 buf_addr1 += ctx->luma_dpb_size; 495 buf_addr1 += ctx->luma_dpb_size;
503 WRITEL(buf_addr1, S5P_FIMV_E_CHROMA_DPB_V6 + (4 * i)); 496 WRITEL(buf_addr1, S5P_FIMV_E_CHROMA_DPB_V6 + (4 * i));
@@ -520,7 +513,7 @@ static int s5p_mfc_set_enc_ref_buffer_v6(struct s5p_mfc_ctx *ctx)
520 buf_size1 -= ctx->tmv_buffer_size; 513 buf_size1 -= ctx->tmv_buffer_size;
521 514
522 mfc_debug(2, "Buf1: %u, buf_size1: %d (ref frames %d)\n", 515 mfc_debug(2, "Buf1: %u, buf_size1: %d (ref frames %d)\n",
523 buf_addr1, buf_size1, ctx->dpb_count); 516 buf_addr1, buf_size1, ctx->pb_count);
524 if (buf_size1 < 0) { 517 if (buf_size1 < 0) {
525 mfc_debug(2, "Not enough memory has been allocated.\n"); 518 mfc_debug(2, "Not enough memory has been allocated.\n");
526 return -ENOMEM; 519 return -ENOMEM;
@@ -1431,8 +1424,8 @@ static inline int s5p_mfc_run_enc_frame(struct s5p_mfc_ctx *ctx)
1431 src_y_addr = vb2_dma_contig_plane_dma_addr(src_mb->b, 0); 1424 src_y_addr = vb2_dma_contig_plane_dma_addr(src_mb->b, 0);
1432 src_c_addr = vb2_dma_contig_plane_dma_addr(src_mb->b, 1); 1425 src_c_addr = vb2_dma_contig_plane_dma_addr(src_mb->b, 1);
1433 1426
1434 mfc_debug(2, "enc src y addr: 0x%08lx", src_y_addr); 1427 mfc_debug(2, "enc src y addr: 0x%08lx\n", src_y_addr);
1435 mfc_debug(2, "enc src c addr: 0x%08lx", src_c_addr); 1428 mfc_debug(2, "enc src c addr: 0x%08lx\n", src_c_addr);
1436 1429
1437 s5p_mfc_set_enc_frame_buffer_v6(ctx, src_y_addr, src_c_addr); 1430 s5p_mfc_set_enc_frame_buffer_v6(ctx, src_y_addr, src_c_addr);
1438 1431
@@ -1522,22 +1515,6 @@ static inline int s5p_mfc_run_init_enc_buffers(struct s5p_mfc_ctx *ctx)
1522 struct s5p_mfc_dev *dev = ctx->dev; 1515 struct s5p_mfc_dev *dev = ctx->dev;
1523 int ret; 1516 int ret;
1524 1517
1525 ret = s5p_mfc_alloc_codec_buffers_v6(ctx);
1526 if (ret) {
1527 mfc_err("Failed to allocate encoding buffers.\n");
1528 return -ENOMEM;
1529 }
1530
1531 /* Header was generated now starting processing
1532 * First set the reference frame buffers
1533 */
1534 if (ctx->capture_state != QUEUE_BUFS_REQUESTED) {
1535 mfc_err("It seems that destionation buffers were not\n"
1536 "requested.MFC requires that header should be generated\n"
1537 "before allocating codec buffer.\n");
1538 return -EAGAIN;
1539 }
1540
1541 dev->curr_ctx = ctx->num; 1518 dev->curr_ctx = ctx->num;
1542 s5p_mfc_clean_ctx_int_flags(ctx); 1519 s5p_mfc_clean_ctx_int_flags(ctx);
1543 ret = s5p_mfc_set_enc_ref_buffer_v6(ctx); 1520 ret = s5p_mfc_set_enc_ref_buffer_v6(ctx);
@@ -1582,7 +1559,7 @@ static void s5p_mfc_try_run_v6(struct s5p_mfc_dev *dev)
1582 mfc_debug(1, "Seting new context to %p\n", ctx); 1559 mfc_debug(1, "Seting new context to %p\n", ctx);
1583 /* Got context to run in ctx */ 1560 /* Got context to run in ctx */
1584 mfc_debug(1, "ctx->dst_queue_cnt=%d ctx->dpb_count=%d ctx->src_queue_cnt=%d\n", 1561 mfc_debug(1, "ctx->dst_queue_cnt=%d ctx->dpb_count=%d ctx->src_queue_cnt=%d\n",
1585 ctx->dst_queue_cnt, ctx->dpb_count, ctx->src_queue_cnt); 1562 ctx->dst_queue_cnt, ctx->pb_count, ctx->src_queue_cnt);
1586 mfc_debug(1, "ctx->state=%d\n", ctx->state); 1563 mfc_debug(1, "ctx->state=%d\n", ctx->state);
1587 /* Last frame has already been sent to MFC 1564 /* Last frame has already been sent to MFC
1588 * Now obtaining frames from MFC buffer */ 1565 * Now obtaining frames from MFC buffer */
@@ -1647,7 +1624,7 @@ static void s5p_mfc_try_run_v6(struct s5p_mfc_dev *dev)
1647 case MFCINST_GOT_INST: 1624 case MFCINST_GOT_INST:
1648 s5p_mfc_run_init_enc(ctx); 1625 s5p_mfc_run_init_enc(ctx);
1649 break; 1626 break;
1650 case MFCINST_HEAD_PARSED: /* Only for MFC6.x */ 1627 case MFCINST_HEAD_PRODUCED:
1651 ret = s5p_mfc_run_init_enc_buffers(ctx); 1628 ret = s5p_mfc_run_init_enc_buffers(ctx);
1652 break; 1629 break;
1653 default: 1630 default:
@@ -1730,7 +1707,7 @@ static int s5p_mfc_get_dspl_status_v6(struct s5p_mfc_dev *dev)
1730 return mfc_read(dev, S5P_FIMV_D_DISPLAY_STATUS_V6); 1707 return mfc_read(dev, S5P_FIMV_D_DISPLAY_STATUS_V6);
1731} 1708}
1732 1709
1733static int s5p_mfc_get_decoded_status_v6(struct s5p_mfc_dev *dev) 1710static int s5p_mfc_get_dec_status_v6(struct s5p_mfc_dev *dev)
1734{ 1711{
1735 return mfc_read(dev, S5P_FIMV_D_DECODED_STATUS_V6); 1712 return mfc_read(dev, S5P_FIMV_D_DECODED_STATUS_V6);
1736} 1713}
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c b/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c
index 6aa38a56aaf2..11d5f1dada32 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c
@@ -50,19 +50,6 @@ int s5p_mfc_init_pm(struct s5p_mfc_dev *dev)
50 goto err_p_ip_clk; 50 goto err_p_ip_clk;
51 } 51 }
52 52
53 pm->clock = clk_get(&dev->plat_dev->dev, dev->variant->mclk_name);
54 if (IS_ERR(pm->clock)) {
55 mfc_err("Failed to get MFC clock\n");
56 ret = PTR_ERR(pm->clock);
57 goto err_g_ip_clk_2;
58 }
59
60 ret = clk_prepare(pm->clock);
61 if (ret) {
62 mfc_err("Failed to prepare MFC clock\n");
63 goto err_p_ip_clk_2;
64 }
65
66 atomic_set(&pm->power, 0); 53 atomic_set(&pm->power, 0);
67#ifdef CONFIG_PM_RUNTIME 54#ifdef CONFIG_PM_RUNTIME
68 pm->device = &dev->plat_dev->dev; 55 pm->device = &dev->plat_dev->dev;
@@ -72,10 +59,6 @@ int s5p_mfc_init_pm(struct s5p_mfc_dev *dev)
72 atomic_set(&clk_ref, 0); 59 atomic_set(&clk_ref, 0);
73#endif 60#endif
74 return 0; 61 return 0;
75err_p_ip_clk_2:
76 clk_put(pm->clock);
77err_g_ip_clk_2:
78 clk_unprepare(pm->clock_gate);
79err_p_ip_clk: 62err_p_ip_clk:
80 clk_put(pm->clock_gate); 63 clk_put(pm->clock_gate);
81err_g_ip_clk: 64err_g_ip_clk:
@@ -86,8 +69,6 @@ void s5p_mfc_final_pm(struct s5p_mfc_dev *dev)
86{ 69{
87 clk_unprepare(pm->clock_gate); 70 clk_unprepare(pm->clock_gate);
88 clk_put(pm->clock_gate); 71 clk_put(pm->clock_gate);
89 clk_unprepare(pm->clock);
90 clk_put(pm->clock);
91#ifdef CONFIG_PM_RUNTIME 72#ifdef CONFIG_PM_RUNTIME
92 pm_runtime_disable(pm->device); 73 pm_runtime_disable(pm->device);
93#endif 74#endif
@@ -98,7 +79,7 @@ int s5p_mfc_clock_on(void)
98 int ret; 79 int ret;
99#ifdef CLK_DEBUG 80#ifdef CLK_DEBUG
100 atomic_inc(&clk_ref); 81 atomic_inc(&clk_ref);
101 mfc_debug(3, "+ %d", atomic_read(&clk_ref)); 82 mfc_debug(3, "+ %d\n", atomic_read(&clk_ref));
102#endif 83#endif
103 ret = clk_enable(pm->clock_gate); 84 ret = clk_enable(pm->clock_gate);
104 return ret; 85 return ret;
@@ -108,7 +89,7 @@ void s5p_mfc_clock_off(void)
108{ 89{
109#ifdef CLK_DEBUG 90#ifdef CLK_DEBUG
110 atomic_dec(&clk_ref); 91 atomic_dec(&clk_ref);
111 mfc_debug(3, "- %d", atomic_read(&clk_ref)); 92 mfc_debug(3, "- %d\n", atomic_read(&clk_ref));
112#endif 93#endif
113 clk_disable(pm->clock_gate); 94 clk_disable(pm->clock_gate);
114} 95}
diff --git a/drivers/media/platform/sh_veu.c b/drivers/media/platform/sh_veu.c
index 0b32cc3f6a47..59a9deefb242 100644
--- a/drivers/media/platform/sh_veu.c
+++ b/drivers/media/platform/sh_veu.c
@@ -905,11 +905,11 @@ static int sh_veu_queue_setup(struct vb2_queue *vq,
905 if (ftmp.fmt.pix.width != pix->width || 905 if (ftmp.fmt.pix.width != pix->width ||
906 ftmp.fmt.pix.height != pix->height) 906 ftmp.fmt.pix.height != pix->height)
907 return -EINVAL; 907 return -EINVAL;
908 size = pix->bytesperline ? pix->bytesperline * pix->height : 908 size = pix->bytesperline ? pix->bytesperline * pix->height * fmt->depth / fmt->ydepth :
909 pix->width * pix->height * fmt->depth >> 3; 909 pix->width * pix->height * fmt->depth / fmt->ydepth;
910 } else { 910 } else {
911 vfmt = sh_veu_get_vfmt(veu, vq->type); 911 vfmt = sh_veu_get_vfmt(veu, vq->type);
912 size = vfmt->bytesperline * vfmt->frame.height; 912 size = vfmt->bytesperline * vfmt->frame.height * vfmt->fmt->depth / vfmt->fmt->ydepth;
913 } 913 }
914 914
915 if (count < 2) 915 if (count < 2)
@@ -1033,8 +1033,6 @@ static int sh_veu_release(struct file *file)
1033 1033
1034 dev_dbg(veu->dev, "Releasing instance %p\n", veu_file); 1034 dev_dbg(veu->dev, "Releasing instance %p\n", veu_file);
1035 1035
1036 pm_runtime_put(veu->dev);
1037
1038 if (veu_file == veu->capture) { 1036 if (veu_file == veu->capture) {
1039 veu->capture = NULL; 1037 veu->capture = NULL;
1040 vb2_queue_release(v4l2_m2m_get_vq(veu->m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE)); 1038 vb2_queue_release(v4l2_m2m_get_vq(veu->m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE));
@@ -1050,6 +1048,8 @@ static int sh_veu_release(struct file *file)
1050 veu->m2m_ctx = NULL; 1048 veu->m2m_ctx = NULL;
1051 } 1049 }
1052 1050
1051 pm_runtime_put(veu->dev);
1052
1053 kfree(veu_file); 1053 kfree(veu_file);
1054 1054
1055 return 0; 1055 return 0;
@@ -1138,10 +1138,7 @@ static irqreturn_t sh_veu_isr(int irq, void *dev_id)
1138 1138
1139 veu->xaction++; 1139 veu->xaction++;
1140 1140
1141 if (!veu->aborting) 1141 return IRQ_WAKE_THREAD;
1142 return IRQ_WAKE_THREAD;
1143
1144 return IRQ_HANDLED;
1145} 1142}
1146 1143
1147static int sh_veu_probe(struct platform_device *pdev) 1144static int sh_veu_probe(struct platform_device *pdev)
diff --git a/drivers/media/platform/soc_camera/soc_camera.c b/drivers/media/platform/soc_camera/soc_camera.c
index eea832c5fd01..3a4efbdc7668 100644
--- a/drivers/media/platform/soc_camera/soc_camera.c
+++ b/drivers/media/platform/soc_camera/soc_camera.c
@@ -643,9 +643,9 @@ static int soc_camera_close(struct file *file)
643 643
644 if (ici->ops->init_videobuf2) 644 if (ici->ops->init_videobuf2)
645 vb2_queue_release(&icd->vb2_vidq); 645 vb2_queue_release(&icd->vb2_vidq);
646 ici->ops->remove(icd);
647
648 __soc_camera_power_off(icd); 646 __soc_camera_power_off(icd);
647
648 ici->ops->remove(icd);
649 } 649 }
650 650
651 if (icd->streamer == file) 651 if (icd->streamer == file)
diff --git a/drivers/media/radio/Kconfig b/drivers/media/radio/Kconfig
index c0beee2fa37c..d529ba788f41 100644
--- a/drivers/media/radio/Kconfig
+++ b/drivers/media/radio/Kconfig
@@ -22,6 +22,7 @@ config RADIO_SI476X
22 tristate "Silicon Laboratories Si476x I2C FM Radio" 22 tristate "Silicon Laboratories Si476x I2C FM Radio"
23 depends on I2C && VIDEO_V4L2 23 depends on I2C && VIDEO_V4L2
24 depends on MFD_SI476X_CORE 24 depends on MFD_SI476X_CORE
25 depends on SND_SOC
25 select SND_SOC_SI476X 26 select SND_SOC_SI476X
26 ---help--- 27 ---help---
27 Choose Y here if you have this FM radio chip. 28 Choose Y here if you have this FM radio chip.
diff --git a/drivers/media/radio/radio-si476x.c b/drivers/media/radio/radio-si476x.c
index 9430c6a29937..9dc8bafe6486 100644
--- a/drivers/media/radio/radio-si476x.c
+++ b/drivers/media/radio/radio-si476x.c
@@ -44,7 +44,7 @@
44 44
45#define FREQ_MUL (10000000 / 625) 45#define FREQ_MUL (10000000 / 625)
46 46
47#define SI476X_PHDIV_STATUS_LINK_LOCKED(status) (0b10000000 & (status)) 47#define SI476X_PHDIV_STATUS_LINK_LOCKED(status) (0x80 & (status))
48 48
49#define DRIVER_NAME "si476x-radio" 49#define DRIVER_NAME "si476x-radio"
50#define DRIVER_CARD "SI476x AM/FM Receiver" 50#define DRIVER_CARD "SI476x AM/FM Receiver"
diff --git a/drivers/media/tuners/Kconfig b/drivers/media/tuners/Kconfig
index f6768cad001a..15665debc572 100644
--- a/drivers/media/tuners/Kconfig
+++ b/drivers/media/tuners/Kconfig
@@ -1,23 +1,3 @@
1config MEDIA_ATTACH
2 bool "Load and attach frontend and tuner driver modules as needed"
3 depends on MEDIA_ANALOG_TV_SUPPORT || MEDIA_DIGITAL_TV_SUPPORT || MEDIA_RADIO_SUPPORT
4 depends on MODULES
5 default y if !EXPERT
6 help
7 Remove the static dependency of DVB card drivers on all
8 frontend modules for all possible card variants. Instead,
9 allow the card drivers to only load the frontend modules
10 they require.
11
12 Also, tuner module will automatically load a tuner driver
13 when needed, for analog mode.
14
15 This saves several KBytes of memory.
16
17 Note: You will need module-init-tools v3.2 or later for this feature.
18
19 If unsure say Y.
20
21# Analog TV tuners, auto-loaded via tuner.ko 1# Analog TV tuners, auto-loaded via tuner.ko
22config MEDIA_TUNER 2config MEDIA_TUNER
23 tristate 3 tristate
diff --git a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
index 22015fe1a0f3..2cc8ec70e3b6 100644
--- a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
+++ b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
@@ -376,7 +376,7 @@ static int rtl2832u_read_config(struct dvb_usb_device *d)
376 struct rtl28xxu_req req_mxl5007t = {0xd9c0, CMD_I2C_RD, 1, buf}; 376 struct rtl28xxu_req req_mxl5007t = {0xd9c0, CMD_I2C_RD, 1, buf};
377 struct rtl28xxu_req req_e4000 = {0x02c8, CMD_I2C_RD, 1, buf}; 377 struct rtl28xxu_req req_e4000 = {0x02c8, CMD_I2C_RD, 1, buf};
378 struct rtl28xxu_req req_tda18272 = {0x00c0, CMD_I2C_RD, 2, buf}; 378 struct rtl28xxu_req req_tda18272 = {0x00c0, CMD_I2C_RD, 2, buf};
379 struct rtl28xxu_req req_r820t = {0x0034, CMD_I2C_RD, 5, buf}; 379 struct rtl28xxu_req req_r820t = {0x0034, CMD_I2C_RD, 1, buf};
380 380
381 dev_dbg(&d->udev->dev, "%s:\n", __func__); 381 dev_dbg(&d->udev->dev, "%s:\n", __func__);
382 382
@@ -481,9 +481,9 @@ static int rtl2832u_read_config(struct dvb_usb_device *d)
481 goto found; 481 goto found;
482 } 482 }
483 483
484 /* check R820T by reading tuner stats at I2C addr 0x1a */ 484 /* check R820T ID register; reg=00 val=69 */
485 ret = rtl28xxu_ctrl_msg(d, &req_r820t); 485 ret = rtl28xxu_ctrl_msg(d, &req_r820t);
486 if (ret == 0) { 486 if (ret == 0 && buf[0] == 0x69) {
487 priv->tuner = TUNER_RTL2832_R820T; 487 priv->tuner = TUNER_RTL2832_R820T;
488 priv->tuner_name = "R820T"; 488 priv->tuner_name = "R820T";
489 goto found; 489 goto found;
diff --git a/drivers/media/usb/gspca/sonixb.c b/drivers/media/usb/gspca/sonixb.c
index 3fe207e038c7..d7ff3b9687c5 100644
--- a/drivers/media/usb/gspca/sonixb.c
+++ b/drivers/media/usb/gspca/sonixb.c
@@ -1159,6 +1159,13 @@ static int sd_start(struct gspca_dev *gspca_dev)
1159 regs[0x01] = 0x44; /* Select 24 Mhz clock */ 1159 regs[0x01] = 0x44; /* Select 24 Mhz clock */
1160 regs[0x12] = 0x02; /* Set hstart to 2 */ 1160 regs[0x12] = 0x02; /* Set hstart to 2 */
1161 } 1161 }
1162 break;
1163 case SENSOR_PAS202:
1164 /* For some unknown reason we need to increase hstart by 1 on
1165 the sn9c103, otherwise we get wrong colors (bayer shift). */
1166 if (sd->bridge == BRIDGE_103)
1167 regs[0x12] += 1;
1168 break;
1162 } 1169 }
1163 /* Disable compression when the raw bayer format has been selected */ 1170 /* Disable compression when the raw bayer format has been selected */
1164 if (cam->cam_mode[gspca_dev->curr_mode].priv & MODE_RAW) 1171 if (cam->cam_mode[gspca_dev->curr_mode].priv & MODE_RAW)
diff --git a/drivers/media/usb/pwc/pwc.h b/drivers/media/usb/pwc/pwc.h
index 7a6a0d39c2c6..81b017a554bc 100644
--- a/drivers/media/usb/pwc/pwc.h
+++ b/drivers/media/usb/pwc/pwc.h
@@ -226,7 +226,7 @@ struct pwc_device
226 struct list_head queued_bufs; 226 struct list_head queued_bufs;
227 spinlock_t queued_bufs_lock; /* Protects queued_bufs */ 227 spinlock_t queued_bufs_lock; /* Protects queued_bufs */
228 228
229 /* Note if taking both locks v4l2_lock must always be locked first! */ 229 /* If taking both locks vb_queue_lock must always be locked first! */
230 struct mutex v4l2_lock; /* Protects everything else */ 230 struct mutex v4l2_lock; /* Protects everything else */
231 struct mutex vb_queue_lock; /* Protects vb_queue and capt_file */ 231 struct mutex vb_queue_lock; /* Protects vb_queue and capt_file */
232 232
diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
index ebb8e48619a2..fccd08b66d1a 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls.c
@@ -1835,6 +1835,8 @@ bool v4l2_ctrl_radio_filter(const struct v4l2_ctrl *ctrl)
1835{ 1835{
1836 if (V4L2_CTRL_ID2CLASS(ctrl->id) == V4L2_CTRL_CLASS_FM_TX) 1836 if (V4L2_CTRL_ID2CLASS(ctrl->id) == V4L2_CTRL_CLASS_FM_TX)
1837 return true; 1837 return true;
1838 if (V4L2_CTRL_ID2CLASS(ctrl->id) == V4L2_CTRL_CLASS_FM_RX)
1839 return true;
1838 switch (ctrl->id) { 1840 switch (ctrl->id) {
1839 case V4L2_CID_AUDIO_MUTE: 1841 case V4L2_CID_AUDIO_MUTE:
1840 case V4L2_CID_AUDIO_VOLUME: 1842 case V4L2_CID_AUDIO_VOLUME:
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
index f81bda1a48ec..7658586fe5f4 100644
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -243,7 +243,6 @@ static void v4l_print_format(const void *arg, bool write_only)
243 const struct v4l2_vbi_format *vbi; 243 const struct v4l2_vbi_format *vbi;
244 const struct v4l2_sliced_vbi_format *sliced; 244 const struct v4l2_sliced_vbi_format *sliced;
245 const struct v4l2_window *win; 245 const struct v4l2_window *win;
246 const struct v4l2_clip *clip;
247 unsigned i; 246 unsigned i;
248 247
249 pr_cont("type=%s", prt_names(p->type, v4l2_type_names)); 248 pr_cont("type=%s", prt_names(p->type, v4l2_type_names));
@@ -253,7 +252,7 @@ static void v4l_print_format(const void *arg, bool write_only)
253 pix = &p->fmt.pix; 252 pix = &p->fmt.pix;
254 pr_cont(", width=%u, height=%u, " 253 pr_cont(", width=%u, height=%u, "
255 "pixelformat=%c%c%c%c, field=%s, " 254 "pixelformat=%c%c%c%c, field=%s, "
256 "bytesperline=%u sizeimage=%u, colorspace=%d\n", 255 "bytesperline=%u, sizeimage=%u, colorspace=%d\n",
257 pix->width, pix->height, 256 pix->width, pix->height,
258 (pix->pixelformat & 0xff), 257 (pix->pixelformat & 0xff),
259 (pix->pixelformat >> 8) & 0xff, 258 (pix->pixelformat >> 8) & 0xff,
@@ -284,20 +283,14 @@ static void v4l_print_format(const void *arg, bool write_only)
284 case V4L2_BUF_TYPE_VIDEO_OVERLAY: 283 case V4L2_BUF_TYPE_VIDEO_OVERLAY:
285 case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY: 284 case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY:
286 win = &p->fmt.win; 285 win = &p->fmt.win;
287 pr_cont(", wxh=%dx%d, x,y=%d,%d, field=%s, " 286 /* Note: we can't print the clip list here since the clips
288 "chromakey=0x%08x, bitmap=%p, " 287 * pointer is a userspace pointer, not a kernelspace
289 "global_alpha=0x%02x\n", 288 * pointer. */
290 win->w.width, win->w.height, 289 pr_cont(", wxh=%dx%d, x,y=%d,%d, field=%s, chromakey=0x%08x, clipcount=%u, clips=%p, bitmap=%p, global_alpha=0x%02x\n",
291 win->w.left, win->w.top, 290 win->w.width, win->w.height, win->w.left, win->w.top,
292 prt_names(win->field, v4l2_field_names), 291 prt_names(win->field, v4l2_field_names),
293 win->chromakey, win->bitmap, win->global_alpha); 292 win->chromakey, win->clipcount, win->clips,
294 clip = win->clips; 293 win->bitmap, win->global_alpha);
295 for (i = 0; i < win->clipcount; i++) {
296 printk(KERN_DEBUG "clip %u: wxh=%dx%d, x,y=%d,%d\n",
297 i, clip->c.width, clip->c.height,
298 clip->c.left, clip->c.top);
299 clip = clip->next;
300 }
301 break; 294 break;
302 case V4L2_BUF_TYPE_VBI_CAPTURE: 295 case V4L2_BUF_TYPE_VBI_CAPTURE:
303 case V4L2_BUF_TYPE_VBI_OUTPUT: 296 case V4L2_BUF_TYPE_VBI_OUTPUT:
@@ -332,7 +325,7 @@ static void v4l_print_framebuffer(const void *arg, bool write_only)
332 325
333 pr_cont("capability=0x%x, flags=0x%x, base=0x%p, width=%u, " 326 pr_cont("capability=0x%x, flags=0x%x, base=0x%p, width=%u, "
334 "height=%u, pixelformat=%c%c%c%c, " 327 "height=%u, pixelformat=%c%c%c%c, "
335 "bytesperline=%u sizeimage=%u, colorspace=%d\n", 328 "bytesperline=%u, sizeimage=%u, colorspace=%d\n",
336 p->capability, p->flags, p->base, 329 p->capability, p->flags, p->base,
337 p->fmt.width, p->fmt.height, 330 p->fmt.width, p->fmt.height,
338 (p->fmt.pixelformat & 0xff), 331 (p->fmt.pixelformat & 0xff),
@@ -353,7 +346,7 @@ static void v4l_print_modulator(const void *arg, bool write_only)
353 const struct v4l2_modulator *p = arg; 346 const struct v4l2_modulator *p = arg;
354 347
355 if (write_only) 348 if (write_only)
356 pr_cont("index=%u, txsubchans=0x%x", p->index, p->txsubchans); 349 pr_cont("index=%u, txsubchans=0x%x\n", p->index, p->txsubchans);
357 else 350 else
358 pr_cont("index=%u, name=%.*s, capability=0x%x, " 351 pr_cont("index=%u, name=%.*s, capability=0x%x, "
359 "rangelow=%u, rangehigh=%u, txsubchans=0x%x\n", 352 "rangelow=%u, rangehigh=%u, txsubchans=0x%x\n",
@@ -445,13 +438,13 @@ static void v4l_print_buffer(const void *arg, bool write_only)
445 for (i = 0; i < p->length; ++i) { 438 for (i = 0; i < p->length; ++i) {
446 plane = &p->m.planes[i]; 439 plane = &p->m.planes[i];
447 printk(KERN_DEBUG 440 printk(KERN_DEBUG
448 "plane %d: bytesused=%d, data_offset=0x%08x " 441 "plane %d: bytesused=%d, data_offset=0x%08x, "
449 "offset/userptr=0x%lx, length=%d\n", 442 "offset/userptr=0x%lx, length=%d\n",
450 i, plane->bytesused, plane->data_offset, 443 i, plane->bytesused, plane->data_offset,
451 plane->m.userptr, plane->length); 444 plane->m.userptr, plane->length);
452 } 445 }
453 } else { 446 } else {
454 pr_cont("bytesused=%d, offset/userptr=0x%lx, length=%d\n", 447 pr_cont(", bytesused=%d, offset/userptr=0x%lx, length=%d\n",
455 p->bytesused, p->m.userptr, p->length); 448 p->bytesused, p->m.userptr, p->length);
456 } 449 }
457 450
@@ -504,6 +497,8 @@ static void v4l_print_streamparm(const void *arg, bool write_only)
504 c->capability, c->outputmode, 497 c->capability, c->outputmode,
505 c->timeperframe.numerator, c->timeperframe.denominator, 498 c->timeperframe.numerator, c->timeperframe.denominator,
506 c->extendedmode, c->writebuffers); 499 c->extendedmode, c->writebuffers);
500 } else {
501 pr_cont("\n");
507 } 502 }
508} 503}
509 504
@@ -734,11 +729,11 @@ static void v4l_print_frmsizeenum(const void *arg, bool write_only)
734 p->type); 729 p->type);
735 switch (p->type) { 730 switch (p->type) {
736 case V4L2_FRMSIZE_TYPE_DISCRETE: 731 case V4L2_FRMSIZE_TYPE_DISCRETE:
737 pr_cont(" wxh=%ux%u\n", 732 pr_cont(", wxh=%ux%u\n",
738 p->discrete.width, p->discrete.height); 733 p->discrete.width, p->discrete.height);
739 break; 734 break;
740 case V4L2_FRMSIZE_TYPE_STEPWISE: 735 case V4L2_FRMSIZE_TYPE_STEPWISE:
741 pr_cont(" min=%ux%u, max=%ux%u, step=%ux%u\n", 736 pr_cont(", min=%ux%u, max=%ux%u, step=%ux%u\n",
742 p->stepwise.min_width, p->stepwise.min_height, 737 p->stepwise.min_width, p->stepwise.min_height,
743 p->stepwise.step_width, p->stepwise.step_height, 738 p->stepwise.step_width, p->stepwise.step_height,
744 p->stepwise.max_width, p->stepwise.max_height); 739 p->stepwise.max_width, p->stepwise.max_height);
@@ -764,12 +759,12 @@ static void v4l_print_frmivalenum(const void *arg, bool write_only)
764 p->width, p->height, p->type); 759 p->width, p->height, p->type);
765 switch (p->type) { 760 switch (p->type) {
766 case V4L2_FRMIVAL_TYPE_DISCRETE: 761 case V4L2_FRMIVAL_TYPE_DISCRETE:
767 pr_cont(" fps=%d/%d\n", 762 pr_cont(", fps=%d/%d\n",
768 p->discrete.numerator, 763 p->discrete.numerator,
769 p->discrete.denominator); 764 p->discrete.denominator);
770 break; 765 break;
771 case V4L2_FRMIVAL_TYPE_STEPWISE: 766 case V4L2_FRMIVAL_TYPE_STEPWISE:
772 pr_cont(" min=%d/%d, max=%d/%d, step=%d/%d\n", 767 pr_cont(", min=%d/%d, max=%d/%d, step=%d/%d\n",
773 p->stepwise.min.numerator, 768 p->stepwise.min.numerator,
774 p->stepwise.min.denominator, 769 p->stepwise.min.denominator,
775 p->stepwise.max.numerator, 770 p->stepwise.max.numerator,
@@ -807,8 +802,8 @@ static void v4l_print_event(const void *arg, bool write_only)
807 pr_cont("value64=%lld, ", c->value64); 802 pr_cont("value64=%lld, ", c->value64);
808 else 803 else
809 pr_cont("value=%d, ", c->value); 804 pr_cont("value=%d, ", c->value);
810 pr_cont("flags=0x%x, minimum=%d, maximum=%d, step=%d," 805 pr_cont("flags=0x%x, minimum=%d, maximum=%d, step=%d, "
811 " default_value=%d\n", 806 "default_value=%d\n",
812 c->flags, c->minimum, c->maximum, 807 c->flags, c->minimum, c->maximum,
813 c->step, c->default_value); 808 c->step, c->default_value);
814 break; 809 break;
@@ -845,7 +840,7 @@ static void v4l_print_freq_band(const void *arg, bool write_only)
845 const struct v4l2_frequency_band *p = arg; 840 const struct v4l2_frequency_band *p = arg;
846 841
847 pr_cont("tuner=%u, type=%u, index=%u, capability=0x%x, " 842 pr_cont("tuner=%u, type=%u, index=%u, capability=0x%x, "
848 "rangelow=%u, rangehigh=%u, modulation=0x%x\n", 843 "rangelow=%u, rangehigh=%u, modulation=0x%x\n",
849 p->tuner, p->type, p->index, 844 p->tuner, p->type, p->index,
850 p->capability, p->rangelow, 845 p->capability, p->rangelow,
851 p->rangehigh, p->modulation); 846 p->rangehigh, p->modulation);
diff --git a/drivers/media/v4l2-core/v4l2-mem2mem.c b/drivers/media/v4l2-core/v4l2-mem2mem.c
index 66f599fcb829..e96497f7c3ed 100644
--- a/drivers/media/v4l2-core/v4l2-mem2mem.c
+++ b/drivers/media/v4l2-core/v4l2-mem2mem.c
@@ -205,7 +205,7 @@ static void v4l2_m2m_try_run(struct v4l2_m2m_dev *m2m_dev)
205static void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx) 205static void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx)
206{ 206{
207 struct v4l2_m2m_dev *m2m_dev; 207 struct v4l2_m2m_dev *m2m_dev;
208 unsigned long flags_job, flags; 208 unsigned long flags_job, flags_out, flags_cap;
209 209
210 m2m_dev = m2m_ctx->m2m_dev; 210 m2m_dev = m2m_ctx->m2m_dev;
211 dprintk("Trying to schedule a job for m2m_ctx: %p\n", m2m_ctx); 211 dprintk("Trying to schedule a job for m2m_ctx: %p\n", m2m_ctx);
@@ -223,23 +223,26 @@ static void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx)
223 return; 223 return;
224 } 224 }
225 225
226 spin_lock_irqsave(&m2m_ctx->out_q_ctx.rdy_spinlock, flags); 226 spin_lock_irqsave(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out);
227 if (list_empty(&m2m_ctx->out_q_ctx.rdy_queue)) { 227 if (list_empty(&m2m_ctx->out_q_ctx.rdy_queue)) {
228 spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags); 228 spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock,
229 flags_out);
229 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); 230 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
230 dprintk("No input buffers available\n"); 231 dprintk("No input buffers available\n");
231 return; 232 return;
232 } 233 }
233 spin_lock_irqsave(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags); 234 spin_lock_irqsave(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap);
234 if (list_empty(&m2m_ctx->cap_q_ctx.rdy_queue)) { 235 if (list_empty(&m2m_ctx->cap_q_ctx.rdy_queue)) {
235 spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags); 236 spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock,
236 spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags); 237 flags_cap);
238 spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock,
239 flags_out);
237 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); 240 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
238 dprintk("No output buffers available\n"); 241 dprintk("No output buffers available\n");
239 return; 242 return;
240 } 243 }
241 spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags); 244 spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap);
242 spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags); 245 spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out);
243 246
244 if (m2m_dev->m2m_ops->job_ready 247 if (m2m_dev->m2m_ops->job_ready
245 && (!m2m_dev->m2m_ops->job_ready(m2m_ctx->priv))) { 248 && (!m2m_dev->m2m_ops->job_ready(m2m_ctx->priv))) {
@@ -372,6 +375,20 @@ int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
372EXPORT_SYMBOL_GPL(v4l2_m2m_dqbuf); 375EXPORT_SYMBOL_GPL(v4l2_m2m_dqbuf);
373 376
374/** 377/**
378 * v4l2_m2m_create_bufs() - create a source or destination buffer, depending
379 * on the type
380 */
381int v4l2_m2m_create_bufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
382 struct v4l2_create_buffers *create)
383{
384 struct vb2_queue *vq;
385
386 vq = v4l2_m2m_get_vq(m2m_ctx, create->format.type);
387 return vb2_create_bufs(vq, create);
388}
389EXPORT_SYMBOL_GPL(v4l2_m2m_create_bufs);
390
391/**
375 * v4l2_m2m_expbuf() - export a source or destination buffer, depending on 392 * v4l2_m2m_expbuf() - export a source or destination buffer, depending on
376 * the type 393 * the type
377 */ 394 */
@@ -486,8 +503,10 @@ unsigned int v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
486 if (m2m_ctx->m2m_dev->m2m_ops->unlock) 503 if (m2m_ctx->m2m_dev->m2m_ops->unlock)
487 m2m_ctx->m2m_dev->m2m_ops->unlock(m2m_ctx->priv); 504 m2m_ctx->m2m_dev->m2m_ops->unlock(m2m_ctx->priv);
488 505
489 poll_wait(file, &src_q->done_wq, wait); 506 if (list_empty(&src_q->done_list))
490 poll_wait(file, &dst_q->done_wq, wait); 507 poll_wait(file, &src_q->done_wq, wait);
508 if (list_empty(&dst_q->done_list))
509 poll_wait(file, &dst_q->done_wq, wait);
491 510
492 if (m2m_ctx->m2m_dev->m2m_ops->lock) 511 if (m2m_ctx->m2m_dev->m2m_ops->lock)
493 m2m_ctx->m2m_dev->m2m_ops->lock(m2m_ctx->priv); 512 m2m_ctx->m2m_dev->m2m_ops->lock(m2m_ctx->priv);
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
index 7d833eefaf4e..e3bdc3be91e1 100644
--- a/drivers/media/v4l2-core/videobuf2-core.c
+++ b/drivers/media/v4l2-core/videobuf2-core.c
@@ -2014,7 +2014,8 @@ unsigned int vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait)
2014 if (list_empty(&q->queued_list)) 2014 if (list_empty(&q->queued_list))
2015 return res | POLLERR; 2015 return res | POLLERR;
2016 2016
2017 poll_wait(file, &q->done_wq, wait); 2017 if (list_empty(&q->done_list))
2018 poll_wait(file, &q->done_wq, wait);
2018 2019
2019 /* 2020 /*
2020 * Take first buffer available for dequeuing. 2021 * Take first buffer available for dequeuing.
diff --git a/drivers/mfd/tps6586x.c b/drivers/mfd/tps6586x.c
index 721b9186a5d1..4b93ed4d5cd6 100644
--- a/drivers/mfd/tps6586x.c
+++ b/drivers/mfd/tps6586x.c
@@ -107,7 +107,7 @@ static struct mfd_cell tps6586x_cell[] = {
107 .name = "tps6586x-gpio", 107 .name = "tps6586x-gpio",
108 }, 108 },
109 { 109 {
110 .name = "tps6586x-pmic", 110 .name = "tps6586x-regulator",
111 }, 111 },
112 { 112 {
113 .name = "tps6586x-rtc", 113 .name = "tps6586x-rtc",
diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c
index 713d89fedc46..f580d30bb784 100644
--- a/drivers/misc/mei/init.c
+++ b/drivers/misc/mei/init.c
@@ -197,6 +197,8 @@ void mei_stop(struct mei_device *dev)
197{ 197{
198 dev_dbg(&dev->pdev->dev, "stopping the device.\n"); 198 dev_dbg(&dev->pdev->dev, "stopping the device.\n");
199 199
200 flush_scheduled_work();
201
200 mutex_lock(&dev->device_lock); 202 mutex_lock(&dev->device_lock);
201 203
202 cancel_delayed_work(&dev->timer_work); 204 cancel_delayed_work(&dev->timer_work);
@@ -210,8 +212,6 @@ void mei_stop(struct mei_device *dev)
210 212
211 mutex_unlock(&dev->device_lock); 213 mutex_unlock(&dev->device_lock);
212 214
213 flush_scheduled_work();
214
215 mei_watchdog_unregister(dev); 215 mei_watchdog_unregister(dev);
216} 216}
217EXPORT_SYMBOL_GPL(mei_stop); 217EXPORT_SYMBOL_GPL(mei_stop);
diff --git a/drivers/misc/mei/nfc.c b/drivers/misc/mei/nfc.c
index 3adf8a70f26e..d0c6907dfd92 100644
--- a/drivers/misc/mei/nfc.c
+++ b/drivers/misc/mei/nfc.c
@@ -142,6 +142,8 @@ static void mei_nfc_free(struct mei_nfc_dev *ndev)
142 mei_cl_unlink(ndev->cl_info); 142 mei_cl_unlink(ndev->cl_info);
143 kfree(ndev->cl_info); 143 kfree(ndev->cl_info);
144 } 144 }
145
146 memset(ndev, 0, sizeof(struct mei_nfc_dev));
145} 147}
146 148
147static int mei_nfc_build_bus_name(struct mei_nfc_dev *ndev) 149static int mei_nfc_build_bus_name(struct mei_nfc_dev *ndev)
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index a727464e9c3f..0f268329bd3a 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -325,6 +325,7 @@ static int mei_me_pci_resume(struct device *device)
325 325
326 mutex_lock(&dev->device_lock); 326 mutex_lock(&dev->device_lock);
327 dev->dev_state = MEI_DEV_POWER_UP; 327 dev->dev_state = MEI_DEV_POWER_UP;
328 mei_clear_interrupts(dev);
328 mei_reset(dev, 1); 329 mei_reset(dev, 1);
329 mutex_unlock(&dev->device_lock); 330 mutex_unlock(&dev->device_lock);
330 331
diff --git a/drivers/misc/sgi-gru/grufile.c b/drivers/misc/sgi-gru/grufile.c
index 44d273c5e19d..0535d1e0bc78 100644
--- a/drivers/misc/sgi-gru/grufile.c
+++ b/drivers/misc/sgi-gru/grufile.c
@@ -172,6 +172,7 @@ static long gru_get_config_info(unsigned long arg)
172 nodesperblade = 2; 172 nodesperblade = 2;
173 else 173 else
174 nodesperblade = 1; 174 nodesperblade = 1;
175 memset(&info, 0, sizeof(info));
175 info.cpus = num_online_cpus(); 176 info.cpus = num_online_cpus();
176 info.nodes = num_online_nodes(); 177 info.nodes = num_online_nodes();
177 info.blades = info.nodes / nodesperblade; 178 info.blades = info.nodes / nodesperblade;
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index e75774f72606..aca59d93d5a9 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -2230,10 +2230,15 @@ static void __exit atmci_cleanup_slot(struct atmel_mci_slot *slot,
2230 mmc_free_host(slot->mmc); 2230 mmc_free_host(slot->mmc);
2231} 2231}
2232 2232
2233static bool atmci_filter(struct dma_chan *chan, void *slave) 2233static bool atmci_filter(struct dma_chan *chan, void *pdata)
2234{ 2234{
2235 struct mci_dma_data *sl = slave; 2235 struct mci_platform_data *sl_pdata = pdata;
2236 struct mci_dma_data *sl;
2236 2237
2238 if (!sl_pdata)
2239 return false;
2240
2241 sl = sl_pdata->dma_slave;
2237 if (sl && find_slave_dev(sl) == chan->device->dev) { 2242 if (sl && find_slave_dev(sl) == chan->device->dev) {
2238 chan->private = slave_data_ptr(sl); 2243 chan->private = slave_data_ptr(sl);
2239 return true; 2244 return true;
@@ -2245,24 +2250,18 @@ static bool atmci_filter(struct dma_chan *chan, void *slave)
2245static bool atmci_configure_dma(struct atmel_mci *host) 2250static bool atmci_configure_dma(struct atmel_mci *host)
2246{ 2251{
2247 struct mci_platform_data *pdata; 2252 struct mci_platform_data *pdata;
2253 dma_cap_mask_t mask;
2248 2254
2249 if (host == NULL) 2255 if (host == NULL)
2250 return false; 2256 return false;
2251 2257
2252 pdata = host->pdev->dev.platform_data; 2258 pdata = host->pdev->dev.platform_data;
2253 2259
2254 if (!pdata) 2260 dma_cap_zero(mask);
2255 return false; 2261 dma_cap_set(DMA_SLAVE, mask);
2256 2262
2257 if (pdata->dma_slave && find_slave_dev(pdata->dma_slave)) { 2263 host->dma.chan = dma_request_slave_channel_compat(mask, atmci_filter, pdata,
2258 dma_cap_mask_t mask; 2264 &host->pdev->dev, "rxtx");
2259
2260 /* Try to grab a DMA channel */
2261 dma_cap_zero(mask);
2262 dma_cap_set(DMA_SLAVE, mask);
2263 host->dma.chan =
2264 dma_request_channel(mask, atmci_filter, pdata->dma_slave);
2265 }
2266 if (!host->dma.chan) { 2265 if (!host->dma.chan) {
2267 dev_warn(&host->pdev->dev, "no DMA channel available\n"); 2266 dev_warn(&host->pdev->dev, "no DMA channel available\n");
2268 return false; 2267 return false;
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 6e44025acf01..eccedc7d06a4 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -161,6 +161,7 @@ struct omap_hsmmc_host {
161 */ 161 */
162 struct regulator *vcc; 162 struct regulator *vcc;
163 struct regulator *vcc_aux; 163 struct regulator *vcc_aux;
164 int pbias_disable;
164 void __iomem *base; 165 void __iomem *base;
165 resource_size_t mapbase; 166 resource_size_t mapbase;
166 spinlock_t irq_lock; /* Prevent races with irq handler */ 167 spinlock_t irq_lock; /* Prevent races with irq handler */
@@ -255,11 +256,11 @@ static int omap_hsmmc_set_power(struct device *dev, int slot, int power_on,
255 if (!host->vcc) 256 if (!host->vcc)
256 return 0; 257 return 0;
257 /* 258 /*
258 * With DT, never turn OFF the regulator. This is because 259 * With DT, never turn OFF the regulator for MMC1. This is because
259 * the pbias cell programming support is still missing when 260 * the pbias cell programming support is still missing when
260 * booting with Device tree 261 * booting with Device tree
261 */ 262 */
262 if (dev->of_node && !vdd) 263 if (host->pbias_disable && !vdd)
263 return 0; 264 return 0;
264 265
265 if (mmc_slot(host).before_set_reg) 266 if (mmc_slot(host).before_set_reg)
@@ -1520,10 +1521,10 @@ static void omap_hsmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1520 (ios->vdd == DUAL_VOLT_OCR_BIT) && 1521 (ios->vdd == DUAL_VOLT_OCR_BIT) &&
1521 /* 1522 /*
1522 * With pbias cell programming missing, this 1523 * With pbias cell programming missing, this
1523 * can't be allowed when booting with device 1524 * can't be allowed on MMC1 when booting with device
1524 * tree. 1525 * tree.
1525 */ 1526 */
1526 !host->dev->of_node) { 1527 !host->pbias_disable) {
1527 /* 1528 /*
1528 * The mmc_select_voltage fn of the core does 1529 * The mmc_select_voltage fn of the core does
1529 * not seem to set the power_mode to 1530 * not seem to set the power_mode to
@@ -1871,6 +1872,10 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
1871 1872
1872 omap_hsmmc_context_save(host); 1873 omap_hsmmc_context_save(host);
1873 1874
1875 /* This can be removed once we support PBIAS with DT */
1876 if (host->dev->of_node && host->mapbase == 0x4809c000)
1877 host->pbias_disable = 1;
1878
1874 host->dbclk = clk_get(&pdev->dev, "mmchsdb_fck"); 1879 host->dbclk = clk_get(&pdev->dev, "mmchsdb_fck");
1875 /* 1880 /*
1876 * MMC can still work without debounce clock. 1881 * MMC can still work without debounce clock.
@@ -1906,33 +1911,41 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
1906 1911
1907 omap_hsmmc_conf_bus_power(host); 1912 omap_hsmmc_conf_bus_power(host);
1908 1913
1909 res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "tx"); 1914 if (!pdev->dev.of_node) {
1910 if (!res) { 1915 res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "tx");
1911 dev_err(mmc_dev(host->mmc), "cannot get DMA TX channel\n"); 1916 if (!res) {
1912 ret = -ENXIO; 1917 dev_err(mmc_dev(host->mmc), "cannot get DMA TX channel\n");
1913 goto err_irq; 1918 ret = -ENXIO;
1914 } 1919 goto err_irq;
1915 tx_req = res->start; 1920 }
1921 tx_req = res->start;
1916 1922
1917 res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "rx"); 1923 res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "rx");
1918 if (!res) { 1924 if (!res) {
1919 dev_err(mmc_dev(host->mmc), "cannot get DMA RX channel\n"); 1925 dev_err(mmc_dev(host->mmc), "cannot get DMA RX channel\n");
1920 ret = -ENXIO; 1926 ret = -ENXIO;
1921 goto err_irq; 1927 goto err_irq;
1928 }
1929 rx_req = res->start;
1922 } 1930 }
1923 rx_req = res->start;
1924 1931
1925 dma_cap_zero(mask); 1932 dma_cap_zero(mask);
1926 dma_cap_set(DMA_SLAVE, mask); 1933 dma_cap_set(DMA_SLAVE, mask);
1927 1934
1928 host->rx_chan = dma_request_channel(mask, omap_dma_filter_fn, &rx_req); 1935 host->rx_chan =
1936 dma_request_slave_channel_compat(mask, omap_dma_filter_fn,
1937 &rx_req, &pdev->dev, "rx");
1938
1929 if (!host->rx_chan) { 1939 if (!host->rx_chan) {
1930 dev_err(mmc_dev(host->mmc), "unable to obtain RX DMA engine channel %u\n", rx_req); 1940 dev_err(mmc_dev(host->mmc), "unable to obtain RX DMA engine channel %u\n", rx_req);
1931 ret = -ENXIO; 1941 ret = -ENXIO;
1932 goto err_irq; 1942 goto err_irq;
1933 } 1943 }
1934 1944
1935 host->tx_chan = dma_request_channel(mask, omap_dma_filter_fn, &tx_req); 1945 host->tx_chan =
1946 dma_request_slave_channel_compat(mask, omap_dma_filter_fn,
1947 &tx_req, &pdev->dev, "tx");
1948
1936 if (!host->tx_chan) { 1949 if (!host->tx_chan) {
1937 dev_err(mmc_dev(host->mmc), "unable to obtain TX DMA engine channel %u\n", tx_req); 1950 dev_err(mmc_dev(host->mmc), "unable to obtain TX DMA engine channel %u\n", tx_req);
1938 ret = -ENXIO; 1951 ret = -ENXIO;
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
index 7bcf74b1a5cd..706d9cb1a49e 100644
--- a/drivers/mmc/host/sdhci-acpi.c
+++ b/drivers/mmc/host/sdhci-acpi.c
@@ -87,6 +87,12 @@ static const struct sdhci_ops sdhci_acpi_ops_dflt = {
87 .enable_dma = sdhci_acpi_enable_dma, 87 .enable_dma = sdhci_acpi_enable_dma,
88}; 88};
89 89
90static const struct sdhci_acpi_slot sdhci_acpi_slot_int_emmc = {
91 .caps = MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE,
92 .caps2 = MMC_CAP2_HC_ERASE_SZ,
93 .flags = SDHCI_ACPI_RUNTIME_PM,
94};
95
90static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sdio = { 96static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sdio = {
91 .quirks2 = SDHCI_QUIRK2_HOST_OFF_CARD_ON, 97 .quirks2 = SDHCI_QUIRK2_HOST_OFF_CARD_ON,
92 .caps = MMC_CAP_NONREMOVABLE | MMC_CAP_POWER_OFF_CARD, 98 .caps = MMC_CAP_NONREMOVABLE | MMC_CAP_POWER_OFF_CARD,
@@ -94,23 +100,67 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sdio = {
94 .pm_caps = MMC_PM_KEEP_POWER, 100 .pm_caps = MMC_PM_KEEP_POWER,
95}; 101};
96 102
103static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sd = {
104};
105
106struct sdhci_acpi_uid_slot {
107 const char *hid;
108 const char *uid;
109 const struct sdhci_acpi_slot *slot;
110};
111
112static const struct sdhci_acpi_uid_slot sdhci_acpi_uids[] = {
113 { "80860F14" , "1" , &sdhci_acpi_slot_int_emmc },
114 { "80860F14" , "3" , &sdhci_acpi_slot_int_sd },
115 { "INT33BB" , "2" , &sdhci_acpi_slot_int_sdio },
116 { "INT33C6" , NULL, &sdhci_acpi_slot_int_sdio },
117 { "PNP0D40" },
118 { },
119};
120
97static const struct acpi_device_id sdhci_acpi_ids[] = { 121static const struct acpi_device_id sdhci_acpi_ids[] = {
98 { "INT33C6", (kernel_ulong_t)&sdhci_acpi_slot_int_sdio }, 122 { "80860F14" },
99 { "PNP0D40" }, 123 { "INT33BB" },
124 { "INT33C6" },
125 { "PNP0D40" },
100 { }, 126 { },
101}; 127};
102MODULE_DEVICE_TABLE(acpi, sdhci_acpi_ids); 128MODULE_DEVICE_TABLE(acpi, sdhci_acpi_ids);
103 129
104static const struct sdhci_acpi_slot *sdhci_acpi_get_slot(const char *hid) 130static const struct sdhci_acpi_slot *sdhci_acpi_get_slot_by_ids(const char *hid,
131 const char *uid)
105{ 132{
106 const struct acpi_device_id *id; 133 const struct sdhci_acpi_uid_slot *u;
107 134
108 for (id = sdhci_acpi_ids; id->id[0]; id++) 135 for (u = sdhci_acpi_uids; u->hid; u++) {
109 if (!strcmp(id->id, hid)) 136 if (strcmp(u->hid, hid))
110 return (const struct sdhci_acpi_slot *)id->driver_data; 137 continue;
138 if (!u->uid)
139 return u->slot;
140 if (uid && !strcmp(u->uid, uid))
141 return u->slot;
142 }
111 return NULL; 143 return NULL;
112} 144}
113 145
146static const struct sdhci_acpi_slot *sdhci_acpi_get_slot(acpi_handle handle,
147 const char *hid)
148{
149 const struct sdhci_acpi_slot *slot;
150 struct acpi_device_info *info;
151 const char *uid = NULL;
152 acpi_status status;
153
154 status = acpi_get_object_info(handle, &info);
155 if (!ACPI_FAILURE(status) && (info->valid & ACPI_VALID_UID))
156 uid = info->unique_id.string;
157
158 slot = sdhci_acpi_get_slot_by_ids(hid, uid);
159
160 kfree(info);
161 return slot;
162}
163
114static int sdhci_acpi_probe(struct platform_device *pdev) 164static int sdhci_acpi_probe(struct platform_device *pdev)
115{ 165{
116 struct device *dev = &pdev->dev; 166 struct device *dev = &pdev->dev;
@@ -148,7 +198,7 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
148 198
149 c = sdhci_priv(host); 199 c = sdhci_priv(host);
150 c->host = host; 200 c->host = host;
151 c->slot = sdhci_acpi_get_slot(hid); 201 c->slot = sdhci_acpi_get_slot(handle, hid);
152 c->pdev = pdev; 202 c->pdev = pdev;
153 c->use_runtime_pm = sdhci_acpi_flag(c, SDHCI_ACPI_RUNTIME_PM); 203 c->use_runtime_pm = sdhci_acpi_flag(c, SDHCI_ACPI_RUNTIME_PM);
154 204
@@ -202,6 +252,7 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
202 goto err_free; 252 goto err_free;
203 253
204 if (c->use_runtime_pm) { 254 if (c->use_runtime_pm) {
255 pm_runtime_set_active(dev);
205 pm_suspend_ignore_children(dev, 1); 256 pm_suspend_ignore_children(dev, 1);
206 pm_runtime_set_autosuspend_delay(dev, 50); 257 pm_runtime_set_autosuspend_delay(dev, 50);
207 pm_runtime_use_autosuspend(dev); 258 pm_runtime_use_autosuspend(dev);
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index 67d6dde2ff19..d5f0d59e1310 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -85,6 +85,12 @@ struct pltfm_imx_data {
85 struct clk *clk_ipg; 85 struct clk *clk_ipg;
86 struct clk *clk_ahb; 86 struct clk *clk_ahb;
87 struct clk *clk_per; 87 struct clk *clk_per;
88 enum {
89 NO_CMD_PENDING, /* no multiblock command pending*/
90 MULTIBLK_IN_PROCESS, /* exact multiblock cmd in process */
91 WAIT_FOR_INT, /* sent CMD12, waiting for response INT */
92 } multiblock_status;
93
88}; 94};
89 95
90static struct platform_device_id imx_esdhc_devtype[] = { 96static struct platform_device_id imx_esdhc_devtype[] = {
@@ -154,6 +160,8 @@ static inline void esdhc_clrset_le(struct sdhci_host *host, u32 mask, u32 val, i
154 160
155static u32 esdhc_readl_le(struct sdhci_host *host, int reg) 161static u32 esdhc_readl_le(struct sdhci_host *host, int reg)
156{ 162{
163 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
164 struct pltfm_imx_data *imx_data = pltfm_host->priv;
157 u32 val = readl(host->ioaddr + reg); 165 u32 val = readl(host->ioaddr + reg);
158 166
159 if (unlikely(reg == SDHCI_CAPABILITIES)) { 167 if (unlikely(reg == SDHCI_CAPABILITIES)) {
@@ -175,6 +183,18 @@ static u32 esdhc_readl_le(struct sdhci_host *host, int reg)
175 val &= ~ESDHC_INT_VENDOR_SPEC_DMA_ERR; 183 val &= ~ESDHC_INT_VENDOR_SPEC_DMA_ERR;
176 val |= SDHCI_INT_ADMA_ERROR; 184 val |= SDHCI_INT_ADMA_ERROR;
177 } 185 }
186
187 /*
188 * mask off the interrupt we get in response to the manually
189 * sent CMD12
190 */
191 if ((imx_data->multiblock_status == WAIT_FOR_INT) &&
192 ((val & SDHCI_INT_RESPONSE) == SDHCI_INT_RESPONSE)) {
193 val &= ~SDHCI_INT_RESPONSE;
194 writel(SDHCI_INT_RESPONSE, host->ioaddr +
195 SDHCI_INT_STATUS);
196 imx_data->multiblock_status = NO_CMD_PENDING;
197 }
178 } 198 }
179 199
180 return val; 200 return val;
@@ -211,6 +231,15 @@ static void esdhc_writel_le(struct sdhci_host *host, u32 val, int reg)
211 v = readl(host->ioaddr + ESDHC_VENDOR_SPEC); 231 v = readl(host->ioaddr + ESDHC_VENDOR_SPEC);
212 v &= ~ESDHC_VENDOR_SPEC_SDIO_QUIRK; 232 v &= ~ESDHC_VENDOR_SPEC_SDIO_QUIRK;
213 writel(v, host->ioaddr + ESDHC_VENDOR_SPEC); 233 writel(v, host->ioaddr + ESDHC_VENDOR_SPEC);
234
235 if (imx_data->multiblock_status == MULTIBLK_IN_PROCESS)
236 {
237 /* send a manual CMD12 with RESPTYP=none */
238 data = MMC_STOP_TRANSMISSION << 24 |
239 SDHCI_CMD_ABORTCMD << 16;
240 writel(data, host->ioaddr + SDHCI_TRANSFER_MODE);
241 imx_data->multiblock_status = WAIT_FOR_INT;
242 }
214 } 243 }
215 244
216 if (unlikely(reg == SDHCI_INT_ENABLE || reg == SDHCI_SIGNAL_ENABLE)) { 245 if (unlikely(reg == SDHCI_INT_ENABLE || reg == SDHCI_SIGNAL_ENABLE)) {
@@ -277,11 +306,13 @@ static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg)
277 } 306 }
278 return; 307 return;
279 case SDHCI_COMMAND: 308 case SDHCI_COMMAND:
280 if ((host->cmd->opcode == MMC_STOP_TRANSMISSION || 309 if (host->cmd->opcode == MMC_STOP_TRANSMISSION)
281 host->cmd->opcode == MMC_SET_BLOCK_COUNT) &&
282 (imx_data->flags & ESDHC_FLAG_MULTIBLK_NO_INT))
283 val |= SDHCI_CMD_ABORTCMD; 310 val |= SDHCI_CMD_ABORTCMD;
284 311
312 if ((host->cmd->opcode == MMC_SET_BLOCK_COUNT) &&
313 (imx_data->flags & ESDHC_FLAG_MULTIBLK_NO_INT))
314 imx_data->multiblock_status = MULTIBLK_IN_PROCESS;
315
285 if (is_imx6q_usdhc(imx_data)) 316 if (is_imx6q_usdhc(imx_data))
286 writel(val << 16, 317 writel(val << 16,
287 host->ioaddr + SDHCI_TRANSFER_MODE); 318 host->ioaddr + SDHCI_TRANSFER_MODE);
@@ -324,8 +355,10 @@ static void esdhc_writeb_le(struct sdhci_host *host, u8 val, int reg)
324 /* 355 /*
325 * Do not touch buswidth bits here. This is done in 356 * Do not touch buswidth bits here. This is done in
326 * esdhc_pltfm_bus_width. 357 * esdhc_pltfm_bus_width.
358 * Do not touch the D3CD bit either which is used for the
359 * SDIO interrupt errata workaround.
327 */ 360 */
328 mask = 0xffff & ~ESDHC_CTRL_BUSWIDTH_MASK; 361 mask = 0xffff & ~(ESDHC_CTRL_BUSWIDTH_MASK | ESDHC_CTRL_D3CD);
329 362
330 esdhc_clrset_le(host, mask, new_val, reg); 363 esdhc_clrset_le(host, mask, new_val, reg);
331 return; 364 return;
diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
index 0012d3fdc999..701d06d0e1fb 100644
--- a/drivers/mmc/host/sdhci-pci.c
+++ b/drivers/mmc/host/sdhci-pci.c
@@ -33,6 +33,9 @@
33 */ 33 */
34#define PCI_DEVICE_ID_INTEL_PCH_SDIO0 0x8809 34#define PCI_DEVICE_ID_INTEL_PCH_SDIO0 0x8809
35#define PCI_DEVICE_ID_INTEL_PCH_SDIO1 0x880a 35#define PCI_DEVICE_ID_INTEL_PCH_SDIO1 0x880a
36#define PCI_DEVICE_ID_INTEL_BYT_EMMC 0x0f14
37#define PCI_DEVICE_ID_INTEL_BYT_SDIO 0x0f15
38#define PCI_DEVICE_ID_INTEL_BYT_SD 0x0f16
36 39
37/* 40/*
38 * PCI registers 41 * PCI registers
@@ -304,6 +307,33 @@ static const struct sdhci_pci_fixes sdhci_intel_pch_sdio = {
304 .probe_slot = pch_hc_probe_slot, 307 .probe_slot = pch_hc_probe_slot,
305}; 308};
306 309
310static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot)
311{
312 slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE;
313 slot->host->mmc->caps2 |= MMC_CAP2_HC_ERASE_SZ;
314 return 0;
315}
316
317static int byt_sdio_probe_slot(struct sdhci_pci_slot *slot)
318{
319 slot->host->mmc->caps |= MMC_CAP_POWER_OFF_CARD | MMC_CAP_NONREMOVABLE;
320 return 0;
321}
322
323static const struct sdhci_pci_fixes sdhci_intel_byt_emmc = {
324 .allow_runtime_pm = true,
325 .probe_slot = byt_emmc_probe_slot,
326};
327
328static const struct sdhci_pci_fixes sdhci_intel_byt_sdio = {
329 .quirks2 = SDHCI_QUIRK2_HOST_OFF_CARD_ON,
330 .allow_runtime_pm = true,
331 .probe_slot = byt_sdio_probe_slot,
332};
333
334static const struct sdhci_pci_fixes sdhci_intel_byt_sd = {
335};
336
307/* O2Micro extra registers */ 337/* O2Micro extra registers */
308#define O2_SD_LOCK_WP 0xD3 338#define O2_SD_LOCK_WP 0xD3
309#define O2_SD_MULTI_VCC3V 0xEE 339#define O2_SD_MULTI_VCC3V 0xEE
@@ -856,6 +886,30 @@ static const struct pci_device_id pci_ids[] = {
856 }, 886 },
857 887
858 { 888 {
889 .vendor = PCI_VENDOR_ID_INTEL,
890 .device = PCI_DEVICE_ID_INTEL_BYT_EMMC,
891 .subvendor = PCI_ANY_ID,
892 .subdevice = PCI_ANY_ID,
893 .driver_data = (kernel_ulong_t)&sdhci_intel_byt_emmc,
894 },
895
896 {
897 .vendor = PCI_VENDOR_ID_INTEL,
898 .device = PCI_DEVICE_ID_INTEL_BYT_SDIO,
899 .subvendor = PCI_ANY_ID,
900 .subdevice = PCI_ANY_ID,
901 .driver_data = (kernel_ulong_t)&sdhci_intel_byt_sdio,
902 },
903
904 {
905 .vendor = PCI_VENDOR_ID_INTEL,
906 .device = PCI_DEVICE_ID_INTEL_BYT_SD,
907 .subvendor = PCI_ANY_ID,
908 .subdevice = PCI_ANY_ID,
909 .driver_data = (kernel_ulong_t)&sdhci_intel_byt_sd,
910 },
911
912 {
859 .vendor = PCI_VENDOR_ID_O2, 913 .vendor = PCI_VENDOR_ID_O2,
860 .device = PCI_DEVICE_ID_O2_8120, 914 .device = PCI_DEVICE_ID_O2_8120,
861 .subvendor = PCI_ANY_ID, 915 .subvendor = PCI_ANY_ID,
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 29b846cbfb48..f97569613526 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -764,8 +764,8 @@ static void bond_resend_igmp_join_requests(struct bonding *bond)
764 struct net_device *bond_dev, *vlan_dev, *upper_dev; 764 struct net_device *bond_dev, *vlan_dev, *upper_dev;
765 struct vlan_entry *vlan; 765 struct vlan_entry *vlan;
766 766
767 rcu_read_lock();
768 read_lock(&bond->lock); 767 read_lock(&bond->lock);
768 rcu_read_lock();
769 769
770 bond_dev = bond->dev; 770 bond_dev = bond->dev;
771 771
@@ -787,12 +787,19 @@ static void bond_resend_igmp_join_requests(struct bonding *bond)
787 if (vlan_dev) 787 if (vlan_dev)
788 __bond_resend_igmp_join_requests(vlan_dev); 788 __bond_resend_igmp_join_requests(vlan_dev);
789 } 789 }
790 rcu_read_unlock();
790 791
791 if (--bond->igmp_retrans > 0) 792 /* We use curr_slave_lock to protect against concurrent access to
793 * igmp_retrans from multiple running instances of this function and
794 * bond_change_active_slave
795 */
796 write_lock_bh(&bond->curr_slave_lock);
797 if (bond->igmp_retrans > 1) {
798 bond->igmp_retrans--;
792 queue_delayed_work(bond->wq, &bond->mcast_work, HZ/5); 799 queue_delayed_work(bond->wq, &bond->mcast_work, HZ/5);
793 800 }
801 write_unlock_bh(&bond->curr_slave_lock);
794 read_unlock(&bond->lock); 802 read_unlock(&bond->lock);
795 rcu_read_unlock();
796} 803}
797 804
798static void bond_resend_igmp_join_requests_delayed(struct work_struct *work) 805static void bond_resend_igmp_join_requests_delayed(struct work_struct *work)
@@ -1957,6 +1964,10 @@ err_free:
1957 1964
1958err_undo_flags: 1965err_undo_flags:
1959 bond_compute_features(bond); 1966 bond_compute_features(bond);
1967 /* Enslave of first slave has failed and we need to fix master's mac */
1968 if (bond->slave_cnt == 0 &&
1969 ether_addr_equal(bond_dev->dev_addr, slave_dev->dev_addr))
1970 eth_hw_addr_random(bond_dev);
1960 1971
1961 return res; 1972 return res;
1962} 1973}
@@ -2402,7 +2413,8 @@ static void bond_miimon_commit(struct bonding *bond)
2402 2413
2403 pr_info("%s: link status definitely up for interface %s, %u Mbps %s duplex.\n", 2414 pr_info("%s: link status definitely up for interface %s, %u Mbps %s duplex.\n",
2404 bond->dev->name, slave->dev->name, 2415 bond->dev->name, slave->dev->name,
2405 slave->speed, slave->duplex ? "full" : "half"); 2416 slave->speed == SPEED_UNKNOWN ? 0 : slave->speed,
2417 slave->duplex ? "full" : "half");
2406 2418
2407 /* notify ad that the link status has changed */ 2419 /* notify ad that the link status has changed */
2408 if (bond->params.mode == BOND_MODE_8023AD) 2420 if (bond->params.mode == BOND_MODE_8023AD)
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index 2baec24388b1..f989e1529a29 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -225,7 +225,7 @@ struct bonding {
225 rwlock_t curr_slave_lock; 225 rwlock_t curr_slave_lock;
226 u8 send_peer_notif; 226 u8 send_peer_notif;
227 s8 setup_by_slave; 227 s8 setup_by_slave;
228 s8 igmp_retrans; 228 u8 igmp_retrans;
229#ifdef CONFIG_PROC_FS 229#ifdef CONFIG_PROC_FS
230 struct proc_dir_entry *proc_entry; 230 struct proc_dir_entry *proc_entry;
231 char proc_file_name[IFNAMSIZ]; 231 char proc_file_name[IFNAMSIZ];
diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c
index 9b74d1e3ad44..6aa7b3266c80 100644
--- a/drivers/net/can/usb/esd_usb2.c
+++ b/drivers/net/can/usb/esd_usb2.c
@@ -612,9 +612,15 @@ static int esd_usb2_start(struct esd_usb2_net_priv *priv)
612{ 612{
613 struct esd_usb2 *dev = priv->usb2; 613 struct esd_usb2 *dev = priv->usb2;
614 struct net_device *netdev = priv->netdev; 614 struct net_device *netdev = priv->netdev;
615 struct esd_usb2_msg msg; 615 struct esd_usb2_msg *msg;
616 int err, i; 616 int err, i;
617 617
618 msg = kmalloc(sizeof(*msg), GFP_KERNEL);
619 if (!msg) {
620 err = -ENOMEM;
621 goto out;
622 }
623
618 /* 624 /*
619 * Enable all IDs 625 * Enable all IDs
620 * The IDADD message takes up to 64 32 bit bitmasks (2048 bits). 626 * The IDADD message takes up to 64 32 bit bitmasks (2048 bits).
@@ -628,33 +634,32 @@ static int esd_usb2_start(struct esd_usb2_net_priv *priv)
628 * the number of the starting bitmask (0..64) to the filter.option 634 * the number of the starting bitmask (0..64) to the filter.option
629 * field followed by only some bitmasks. 635 * field followed by only some bitmasks.
630 */ 636 */
631 msg.msg.hdr.cmd = CMD_IDADD; 637 msg->msg.hdr.cmd = CMD_IDADD;
632 msg.msg.hdr.len = 2 + ESD_MAX_ID_SEGMENT; 638 msg->msg.hdr.len = 2 + ESD_MAX_ID_SEGMENT;
633 msg.msg.filter.net = priv->index; 639 msg->msg.filter.net = priv->index;
634 msg.msg.filter.option = ESD_ID_ENABLE; /* start with segment 0 */ 640 msg->msg.filter.option = ESD_ID_ENABLE; /* start with segment 0 */
635 for (i = 0; i < ESD_MAX_ID_SEGMENT; i++) 641 for (i = 0; i < ESD_MAX_ID_SEGMENT; i++)
636 msg.msg.filter.mask[i] = cpu_to_le32(0xffffffff); 642 msg->msg.filter.mask[i] = cpu_to_le32(0xffffffff);
637 /* enable 29bit extended IDs */ 643 /* enable 29bit extended IDs */
638 msg.msg.filter.mask[ESD_MAX_ID_SEGMENT] = cpu_to_le32(0x00000001); 644 msg->msg.filter.mask[ESD_MAX_ID_SEGMENT] = cpu_to_le32(0x00000001);
639 645
640 err = esd_usb2_send_msg(dev, &msg); 646 err = esd_usb2_send_msg(dev, msg);
641 if (err) 647 if (err)
642 goto failed; 648 goto out;
643 649
644 err = esd_usb2_setup_rx_urbs(dev); 650 err = esd_usb2_setup_rx_urbs(dev);
645 if (err) 651 if (err)
646 goto failed; 652 goto out;
647 653
648 priv->can.state = CAN_STATE_ERROR_ACTIVE; 654 priv->can.state = CAN_STATE_ERROR_ACTIVE;
649 655
650 return 0; 656out:
651
652failed:
653 if (err == -ENODEV) 657 if (err == -ENODEV)
654 netif_device_detach(netdev); 658 netif_device_detach(netdev);
659 if (err)
660 netdev_err(netdev, "couldn't start device: %d\n", err);
655 661
656 netdev_err(netdev, "couldn't start device: %d\n", err); 662 kfree(msg);
657
658 return err; 663 return err;
659} 664}
660 665
@@ -833,26 +838,30 @@ nourbmem:
833static int esd_usb2_close(struct net_device *netdev) 838static int esd_usb2_close(struct net_device *netdev)
834{ 839{
835 struct esd_usb2_net_priv *priv = netdev_priv(netdev); 840 struct esd_usb2_net_priv *priv = netdev_priv(netdev);
836 struct esd_usb2_msg msg; 841 struct esd_usb2_msg *msg;
837 int i; 842 int i;
838 843
844 msg = kmalloc(sizeof(*msg), GFP_KERNEL);
845 if (!msg)
846 return -ENOMEM;
847
839 /* Disable all IDs (see esd_usb2_start()) */ 848 /* Disable all IDs (see esd_usb2_start()) */
840 msg.msg.hdr.cmd = CMD_IDADD; 849 msg->msg.hdr.cmd = CMD_IDADD;
841 msg.msg.hdr.len = 2 + ESD_MAX_ID_SEGMENT; 850 msg->msg.hdr.len = 2 + ESD_MAX_ID_SEGMENT;
842 msg.msg.filter.net = priv->index; 851 msg->msg.filter.net = priv->index;
843 msg.msg.filter.option = ESD_ID_ENABLE; /* start with segment 0 */ 852 msg->msg.filter.option = ESD_ID_ENABLE; /* start with segment 0 */
844 for (i = 0; i <= ESD_MAX_ID_SEGMENT; i++) 853 for (i = 0; i <= ESD_MAX_ID_SEGMENT; i++)
845 msg.msg.filter.mask[i] = 0; 854 msg->msg.filter.mask[i] = 0;
846 if (esd_usb2_send_msg(priv->usb2, &msg) < 0) 855 if (esd_usb2_send_msg(priv->usb2, msg) < 0)
847 netdev_err(netdev, "sending idadd message failed\n"); 856 netdev_err(netdev, "sending idadd message failed\n");
848 857
849 /* set CAN controller to reset mode */ 858 /* set CAN controller to reset mode */
850 msg.msg.hdr.len = 2; 859 msg->msg.hdr.len = 2;
851 msg.msg.hdr.cmd = CMD_SETBAUD; 860 msg->msg.hdr.cmd = CMD_SETBAUD;
852 msg.msg.setbaud.net = priv->index; 861 msg->msg.setbaud.net = priv->index;
853 msg.msg.setbaud.rsvd = 0; 862 msg->msg.setbaud.rsvd = 0;
854 msg.msg.setbaud.baud = cpu_to_le32(ESD_USB2_NO_BAUDRATE); 863 msg->msg.setbaud.baud = cpu_to_le32(ESD_USB2_NO_BAUDRATE);
855 if (esd_usb2_send_msg(priv->usb2, &msg) < 0) 864 if (esd_usb2_send_msg(priv->usb2, msg) < 0)
856 netdev_err(netdev, "sending setbaud message failed\n"); 865 netdev_err(netdev, "sending setbaud message failed\n");
857 866
858 priv->can.state = CAN_STATE_STOPPED; 867 priv->can.state = CAN_STATE_STOPPED;
@@ -861,6 +870,8 @@ static int esd_usb2_close(struct net_device *netdev)
861 870
862 close_candev(netdev); 871 close_candev(netdev);
863 872
873 kfree(msg);
874
864 return 0; 875 return 0;
865} 876}
866 877
@@ -886,7 +897,8 @@ static int esd_usb2_set_bittiming(struct net_device *netdev)
886{ 897{
887 struct esd_usb2_net_priv *priv = netdev_priv(netdev); 898 struct esd_usb2_net_priv *priv = netdev_priv(netdev);
888 struct can_bittiming *bt = &priv->can.bittiming; 899 struct can_bittiming *bt = &priv->can.bittiming;
889 struct esd_usb2_msg msg; 900 struct esd_usb2_msg *msg;
901 int err;
890 u32 canbtr; 902 u32 canbtr;
891 int sjw_shift; 903 int sjw_shift;
892 904
@@ -912,15 +924,22 @@ static int esd_usb2_set_bittiming(struct net_device *netdev)
912 if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) 924 if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES)
913 canbtr |= ESD_USB2_3_SAMPLES; 925 canbtr |= ESD_USB2_3_SAMPLES;
914 926
915 msg.msg.hdr.len = 2; 927 msg = kmalloc(sizeof(*msg), GFP_KERNEL);
916 msg.msg.hdr.cmd = CMD_SETBAUD; 928 if (!msg)
917 msg.msg.setbaud.net = priv->index; 929 return -ENOMEM;
918 msg.msg.setbaud.rsvd = 0; 930
919 msg.msg.setbaud.baud = cpu_to_le32(canbtr); 931 msg->msg.hdr.len = 2;
932 msg->msg.hdr.cmd = CMD_SETBAUD;
933 msg->msg.setbaud.net = priv->index;
934 msg->msg.setbaud.rsvd = 0;
935 msg->msg.setbaud.baud = cpu_to_le32(canbtr);
920 936
921 netdev_info(netdev, "setting BTR=%#x\n", canbtr); 937 netdev_info(netdev, "setting BTR=%#x\n", canbtr);
922 938
923 return esd_usb2_send_msg(priv->usb2, &msg); 939 err = esd_usb2_send_msg(priv->usb2, msg);
940
941 kfree(msg);
942 return err;
924} 943}
925 944
926static int esd_usb2_get_berr_counter(const struct net_device *netdev, 945static int esd_usb2_get_berr_counter(const struct net_device *netdev,
@@ -1022,7 +1041,7 @@ static int esd_usb2_probe(struct usb_interface *intf,
1022 const struct usb_device_id *id) 1041 const struct usb_device_id *id)
1023{ 1042{
1024 struct esd_usb2 *dev; 1043 struct esd_usb2 *dev;
1025 struct esd_usb2_msg msg; 1044 struct esd_usb2_msg *msg;
1026 int i, err; 1045 int i, err;
1027 1046
1028 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 1047 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
@@ -1037,27 +1056,33 @@ static int esd_usb2_probe(struct usb_interface *intf,
1037 1056
1038 usb_set_intfdata(intf, dev); 1057 usb_set_intfdata(intf, dev);
1039 1058
1059 msg = kmalloc(sizeof(*msg), GFP_KERNEL);
1060 if (!msg) {
1061 err = -ENOMEM;
1062 goto free_msg;
1063 }
1064
1040 /* query number of CAN interfaces (nets) */ 1065 /* query number of CAN interfaces (nets) */
1041 msg.msg.hdr.cmd = CMD_VERSION; 1066 msg->msg.hdr.cmd = CMD_VERSION;
1042 msg.msg.hdr.len = 2; 1067 msg->msg.hdr.len = 2;
1043 msg.msg.version.rsvd = 0; 1068 msg->msg.version.rsvd = 0;
1044 msg.msg.version.flags = 0; 1069 msg->msg.version.flags = 0;
1045 msg.msg.version.drv_version = 0; 1070 msg->msg.version.drv_version = 0;
1046 1071
1047 err = esd_usb2_send_msg(dev, &msg); 1072 err = esd_usb2_send_msg(dev, msg);
1048 if (err < 0) { 1073 if (err < 0) {
1049 dev_err(&intf->dev, "sending version message failed\n"); 1074 dev_err(&intf->dev, "sending version message failed\n");
1050 goto free_dev; 1075 goto free_msg;
1051 } 1076 }
1052 1077
1053 err = esd_usb2_wait_msg(dev, &msg); 1078 err = esd_usb2_wait_msg(dev, msg);
1054 if (err < 0) { 1079 if (err < 0) {
1055 dev_err(&intf->dev, "no version message answer\n"); 1080 dev_err(&intf->dev, "no version message answer\n");
1056 goto free_dev; 1081 goto free_msg;
1057 } 1082 }
1058 1083
1059 dev->net_count = (int)msg.msg.version_reply.nets; 1084 dev->net_count = (int)msg->msg.version_reply.nets;
1060 dev->version = le32_to_cpu(msg.msg.version_reply.version); 1085 dev->version = le32_to_cpu(msg->msg.version_reply.version);
1061 1086
1062 if (device_create_file(&intf->dev, &dev_attr_firmware)) 1087 if (device_create_file(&intf->dev, &dev_attr_firmware))
1063 dev_err(&intf->dev, 1088 dev_err(&intf->dev,
@@ -1075,10 +1100,10 @@ static int esd_usb2_probe(struct usb_interface *intf,
1075 for (i = 0; i < dev->net_count; i++) 1100 for (i = 0; i < dev->net_count; i++)
1076 esd_usb2_probe_one_net(intf, i); 1101 esd_usb2_probe_one_net(intf, i);
1077 1102
1078 return 0; 1103free_msg:
1079 1104 kfree(msg);
1080free_dev: 1105 if (err)
1081 kfree(dev); 1106 kfree(dev);
1082done: 1107done:
1083 return err; 1108 return err;
1084} 1109}
diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c
index 45cb9f3c1324..3b9546588240 100644
--- a/drivers/net/can/usb/kvaser_usb.c
+++ b/drivers/net/can/usb/kvaser_usb.c
@@ -136,6 +136,9 @@
136#define KVASER_CTRL_MODE_SELFRECEPTION 3 136#define KVASER_CTRL_MODE_SELFRECEPTION 3
137#define KVASER_CTRL_MODE_OFF 4 137#define KVASER_CTRL_MODE_OFF 4
138 138
139/* log message */
140#define KVASER_EXTENDED_FRAME BIT(31)
141
139struct kvaser_msg_simple { 142struct kvaser_msg_simple {
140 u8 tid; 143 u8 tid;
141 u8 channel; 144 u8 channel;
@@ -817,8 +820,13 @@ static void kvaser_usb_rx_can_msg(const struct kvaser_usb *dev,
817 priv = dev->nets[channel]; 820 priv = dev->nets[channel];
818 stats = &priv->netdev->stats; 821 stats = &priv->netdev->stats;
819 822
820 if (msg->u.rx_can.flag & (MSG_FLAG_ERROR_FRAME | MSG_FLAG_NERR | 823 if ((msg->u.rx_can.flag & MSG_FLAG_ERROR_FRAME) &&
821 MSG_FLAG_OVERRUN)) { 824 (msg->id == CMD_LOG_MESSAGE)) {
825 kvaser_usb_rx_error(dev, msg);
826 return;
827 } else if (msg->u.rx_can.flag & (MSG_FLAG_ERROR_FRAME |
828 MSG_FLAG_NERR |
829 MSG_FLAG_OVERRUN)) {
822 kvaser_usb_rx_can_err(priv, msg); 830 kvaser_usb_rx_can_err(priv, msg);
823 return; 831 return;
824 } else if (msg->u.rx_can.flag & ~MSG_FLAG_REMOTE_FRAME) { 832 } else if (msg->u.rx_can.flag & ~MSG_FLAG_REMOTE_FRAME) {
@@ -834,22 +842,40 @@ static void kvaser_usb_rx_can_msg(const struct kvaser_usb *dev,
834 return; 842 return;
835 } 843 }
836 844
837 cf->can_id = ((msg->u.rx_can.msg[0] & 0x1f) << 6) | 845 if (msg->id == CMD_LOG_MESSAGE) {
838 (msg->u.rx_can.msg[1] & 0x3f); 846 cf->can_id = le32_to_cpu(msg->u.log_message.id);
839 cf->can_dlc = get_can_dlc(msg->u.rx_can.msg[5]); 847 if (cf->can_id & KVASER_EXTENDED_FRAME)
848 cf->can_id &= CAN_EFF_MASK | CAN_EFF_FLAG;
849 else
850 cf->can_id &= CAN_SFF_MASK;
840 851
841 if (msg->id == CMD_RX_EXT_MESSAGE) { 852 cf->can_dlc = get_can_dlc(msg->u.log_message.dlc);
842 cf->can_id <<= 18;
843 cf->can_id |= ((msg->u.rx_can.msg[2] & 0x0f) << 14) |
844 ((msg->u.rx_can.msg[3] & 0xff) << 6) |
845 (msg->u.rx_can.msg[4] & 0x3f);
846 cf->can_id |= CAN_EFF_FLAG;
847 }
848 853
849 if (msg->u.rx_can.flag & MSG_FLAG_REMOTE_FRAME) 854 if (msg->u.log_message.flags & MSG_FLAG_REMOTE_FRAME)
850 cf->can_id |= CAN_RTR_FLAG; 855 cf->can_id |= CAN_RTR_FLAG;
851 else 856 else
852 memcpy(cf->data, &msg->u.rx_can.msg[6], cf->can_dlc); 857 memcpy(cf->data, &msg->u.log_message.data,
858 cf->can_dlc);
859 } else {
860 cf->can_id = ((msg->u.rx_can.msg[0] & 0x1f) << 6) |
861 (msg->u.rx_can.msg[1] & 0x3f);
862
863 if (msg->id == CMD_RX_EXT_MESSAGE) {
864 cf->can_id <<= 18;
865 cf->can_id |= ((msg->u.rx_can.msg[2] & 0x0f) << 14) |
866 ((msg->u.rx_can.msg[3] & 0xff) << 6) |
867 (msg->u.rx_can.msg[4] & 0x3f);
868 cf->can_id |= CAN_EFF_FLAG;
869 }
870
871 cf->can_dlc = get_can_dlc(msg->u.rx_can.msg[5]);
872
873 if (msg->u.rx_can.flag & MSG_FLAG_REMOTE_FRAME)
874 cf->can_id |= CAN_RTR_FLAG;
875 else
876 memcpy(cf->data, &msg->u.rx_can.msg[6],
877 cf->can_dlc);
878 }
853 879
854 netif_rx(skb); 880 netif_rx(skb);
855 881
@@ -911,6 +937,7 @@ static void kvaser_usb_handle_message(const struct kvaser_usb *dev,
911 937
912 case CMD_RX_STD_MESSAGE: 938 case CMD_RX_STD_MESSAGE:
913 case CMD_RX_EXT_MESSAGE: 939 case CMD_RX_EXT_MESSAGE:
940 case CMD_LOG_MESSAGE:
914 kvaser_usb_rx_can_msg(dev, msg); 941 kvaser_usb_rx_can_msg(dev, msg);
915 break; 942 break;
916 943
@@ -919,11 +946,6 @@ static void kvaser_usb_handle_message(const struct kvaser_usb *dev,
919 kvaser_usb_rx_error(dev, msg); 946 kvaser_usb_rx_error(dev, msg);
920 break; 947 break;
921 948
922 case CMD_LOG_MESSAGE:
923 if (msg->u.log_message.flags & MSG_FLAG_ERROR_FRAME)
924 kvaser_usb_rx_error(dev, msg);
925 break;
926
927 case CMD_TX_ACKNOWLEDGE: 949 case CMD_TX_ACKNOWLEDGE:
928 kvaser_usb_tx_acknowledge(dev, msg); 950 kvaser_usb_tx_acknowledge(dev, msg);
929 break; 951 break;
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
index 30d79bfa5b10..8ee9d1556e6e 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
@@ -504,15 +504,24 @@ static int pcan_usb_pro_restart_async(struct peak_usb_device *dev,
504 return usb_submit_urb(urb, GFP_ATOMIC); 504 return usb_submit_urb(urb, GFP_ATOMIC);
505} 505}
506 506
507static void pcan_usb_pro_drv_loaded(struct peak_usb_device *dev, int loaded) 507static int pcan_usb_pro_drv_loaded(struct peak_usb_device *dev, int loaded)
508{ 508{
509 u8 buffer[16]; 509 u8 *buffer;
510 int err;
511
512 buffer = kmalloc(PCAN_USBPRO_FCT_DRVLD_REQ_LEN, GFP_KERNEL);
513 if (!buffer)
514 return -ENOMEM;
510 515
511 buffer[0] = 0; 516 buffer[0] = 0;
512 buffer[1] = !!loaded; 517 buffer[1] = !!loaded;
513 518
514 pcan_usb_pro_send_req(dev, PCAN_USBPRO_REQ_FCT, 519 err = pcan_usb_pro_send_req(dev, PCAN_USBPRO_REQ_FCT,
515 PCAN_USBPRO_FCT_DRVLD, buffer, sizeof(buffer)); 520 PCAN_USBPRO_FCT_DRVLD, buffer,
521 PCAN_USBPRO_FCT_DRVLD_REQ_LEN);
522 kfree(buffer);
523
524 return err;
516} 525}
517 526
518static inline 527static inline
@@ -851,21 +860,24 @@ static int pcan_usb_pro_stop(struct peak_usb_device *dev)
851 */ 860 */
852static int pcan_usb_pro_init(struct peak_usb_device *dev) 861static int pcan_usb_pro_init(struct peak_usb_device *dev)
853{ 862{
854 struct pcan_usb_pro_interface *usb_if;
855 struct pcan_usb_pro_device *pdev = 863 struct pcan_usb_pro_device *pdev =
856 container_of(dev, struct pcan_usb_pro_device, dev); 864 container_of(dev, struct pcan_usb_pro_device, dev);
865 struct pcan_usb_pro_interface *usb_if = NULL;
866 struct pcan_usb_pro_fwinfo *fi = NULL;
867 struct pcan_usb_pro_blinfo *bi = NULL;
868 int err;
857 869
858 /* do this for 1st channel only */ 870 /* do this for 1st channel only */
859 if (!dev->prev_siblings) { 871 if (!dev->prev_siblings) {
860 struct pcan_usb_pro_fwinfo fi;
861 struct pcan_usb_pro_blinfo bi;
862 int err;
863
864 /* allocate netdevices common structure attached to first one */ 872 /* allocate netdevices common structure attached to first one */
865 usb_if = kzalloc(sizeof(struct pcan_usb_pro_interface), 873 usb_if = kzalloc(sizeof(struct pcan_usb_pro_interface),
866 GFP_KERNEL); 874 GFP_KERNEL);
867 if (!usb_if) 875 fi = kmalloc(sizeof(struct pcan_usb_pro_fwinfo), GFP_KERNEL);
868 return -ENOMEM; 876 bi = kmalloc(sizeof(struct pcan_usb_pro_blinfo), GFP_KERNEL);
877 if (!usb_if || !fi || !bi) {
878 err = -ENOMEM;
879 goto err_out;
880 }
869 881
870 /* number of ts msgs to ignore before taking one into account */ 882 /* number of ts msgs to ignore before taking one into account */
871 usb_if->cm_ignore_count = 5; 883 usb_if->cm_ignore_count = 5;
@@ -877,34 +889,34 @@ static int pcan_usb_pro_init(struct peak_usb_device *dev)
877 */ 889 */
878 err = pcan_usb_pro_send_req(dev, PCAN_USBPRO_REQ_INFO, 890 err = pcan_usb_pro_send_req(dev, PCAN_USBPRO_REQ_INFO,
879 PCAN_USBPRO_INFO_FW, 891 PCAN_USBPRO_INFO_FW,
880 &fi, sizeof(fi)); 892 fi, sizeof(*fi));
881 if (err) { 893 if (err) {
882 kfree(usb_if);
883 dev_err(dev->netdev->dev.parent, 894 dev_err(dev->netdev->dev.parent,
884 "unable to read %s firmware info (err %d)\n", 895 "unable to read %s firmware info (err %d)\n",
885 pcan_usb_pro.name, err); 896 pcan_usb_pro.name, err);
886 return err; 897 goto err_out;
887 } 898 }
888 899
889 err = pcan_usb_pro_send_req(dev, PCAN_USBPRO_REQ_INFO, 900 err = pcan_usb_pro_send_req(dev, PCAN_USBPRO_REQ_INFO,
890 PCAN_USBPRO_INFO_BL, 901 PCAN_USBPRO_INFO_BL,
891 &bi, sizeof(bi)); 902 bi, sizeof(*bi));
892 if (err) { 903 if (err) {
893 kfree(usb_if);
894 dev_err(dev->netdev->dev.parent, 904 dev_err(dev->netdev->dev.parent,
895 "unable to read %s bootloader info (err %d)\n", 905 "unable to read %s bootloader info (err %d)\n",
896 pcan_usb_pro.name, err); 906 pcan_usb_pro.name, err);
897 return err; 907 goto err_out;
898 } 908 }
899 909
910 /* tell the device the can driver is running */
911 err = pcan_usb_pro_drv_loaded(dev, 1);
912 if (err)
913 goto err_out;
914
900 dev_info(dev->netdev->dev.parent, 915 dev_info(dev->netdev->dev.parent,
901 "PEAK-System %s hwrev %u serial %08X.%08X (%u channels)\n", 916 "PEAK-System %s hwrev %u serial %08X.%08X (%u channels)\n",
902 pcan_usb_pro.name, 917 pcan_usb_pro.name,
903 bi.hw_rev, bi.serial_num_hi, bi.serial_num_lo, 918 bi->hw_rev, bi->serial_num_hi, bi->serial_num_lo,
904 pcan_usb_pro.ctrl_count); 919 pcan_usb_pro.ctrl_count);
905
906 /* tell the device the can driver is running */
907 pcan_usb_pro_drv_loaded(dev, 1);
908 } else { 920 } else {
909 usb_if = pcan_usb_pro_dev_if(dev->prev_siblings); 921 usb_if = pcan_usb_pro_dev_if(dev->prev_siblings);
910 } 922 }
@@ -916,6 +928,13 @@ static int pcan_usb_pro_init(struct peak_usb_device *dev)
916 pcan_usb_pro_set_led(dev, 0, 1); 928 pcan_usb_pro_set_led(dev, 0, 1);
917 929
918 return 0; 930 return 0;
931
932 err_out:
933 kfree(bi);
934 kfree(fi);
935 kfree(usb_if);
936
937 return err;
919} 938}
920 939
921static void pcan_usb_pro_exit(struct peak_usb_device *dev) 940static void pcan_usb_pro_exit(struct peak_usb_device *dev)
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.h b/drivers/net/can/usb/peak_usb/pcan_usb_pro.h
index a869918c5620..32275af547e0 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.h
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.h
@@ -29,6 +29,7 @@
29 29
30/* Vendor Request value for XXX_FCT */ 30/* Vendor Request value for XXX_FCT */
31#define PCAN_USBPRO_FCT_DRVLD 5 /* tell device driver is loaded */ 31#define PCAN_USBPRO_FCT_DRVLD 5 /* tell device driver is loaded */
32#define PCAN_USBPRO_FCT_DRVLD_REQ_LEN 16
32 33
33/* PCAN_USBPRO_INFO_BL vendor request record type */ 34/* PCAN_USBPRO_INFO_BL vendor request record type */
34struct __packed pcan_usb_pro_blinfo { 35struct __packed pcan_usb_pro_blinfo {
diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c
index 6e15ef08f301..cbd388eea682 100644
--- a/drivers/net/can/usb/usb_8dev.c
+++ b/drivers/net/can/usb/usb_8dev.c
@@ -977,7 +977,7 @@ static int usb_8dev_probe(struct usb_interface *intf,
977 err = usb_8dev_cmd_version(priv, &version); 977 err = usb_8dev_cmd_version(priv, &version);
978 if (err) { 978 if (err) {
979 netdev_err(netdev, "can't get firmware version\n"); 979 netdev_err(netdev, "can't get firmware version\n");
980 goto cleanup_cmd_msg_buffer; 980 goto cleanup_unregister_candev;
981 } else { 981 } else {
982 netdev_info(netdev, 982 netdev_info(netdev,
983 "firmware: %d.%d, hardware: %d.%d\n", 983 "firmware: %d.%d, hardware: %d.%d\n",
@@ -989,6 +989,9 @@ static int usb_8dev_probe(struct usb_interface *intf,
989 989
990 return 0; 990 return 0;
991 991
992cleanup_unregister_candev:
993 unregister_netdev(priv->netdev);
994
992cleanup_cmd_msg_buffer: 995cleanup_cmd_msg_buffer:
993 kfree(priv->cmd_msg_buffer); 996 kfree(priv->cmd_msg_buffer);
994 997
diff --git a/drivers/net/ethernet/atheros/Kconfig b/drivers/net/ethernet/atheros/Kconfig
index 36d6abd1cfff..ad6aa1e98348 100644
--- a/drivers/net/ethernet/atheros/Kconfig
+++ b/drivers/net/ethernet/atheros/Kconfig
@@ -67,4 +67,22 @@ config ATL1C
67 To compile this driver as a module, choose M here. The module 67 To compile this driver as a module, choose M here. The module
68 will be called atl1c. 68 will be called atl1c.
69 69
70config ALX
71 tristate "Qualcomm Atheros AR816x/AR817x support"
72 depends on PCI
73 select CRC32
74 select NET_CORE
75 select MDIO
76 help
77 This driver supports the Qualcomm Atheros L1F ethernet adapter,
78 i.e. the following chipsets:
79
80 1969:1091 - AR8161 Gigabit Ethernet
81 1969:1090 - AR8162 Fast Ethernet
82 1969:10A1 - AR8171 Gigabit Ethernet
83 1969:10A0 - AR8172 Fast Ethernet
84
85 To compile this driver as a module, choose M here. The module
86 will be called alx.
87
70endif # NET_VENDOR_ATHEROS 88endif # NET_VENDOR_ATHEROS
diff --git a/drivers/net/ethernet/atheros/Makefile b/drivers/net/ethernet/atheros/Makefile
index e7e76fb576ff..5cf1c65bbce9 100644
--- a/drivers/net/ethernet/atheros/Makefile
+++ b/drivers/net/ethernet/atheros/Makefile
@@ -6,3 +6,4 @@ obj-$(CONFIG_ATL1) += atlx/
6obj-$(CONFIG_ATL2) += atlx/ 6obj-$(CONFIG_ATL2) += atlx/
7obj-$(CONFIG_ATL1E) += atl1e/ 7obj-$(CONFIG_ATL1E) += atl1e/
8obj-$(CONFIG_ATL1C) += atl1c/ 8obj-$(CONFIG_ATL1C) += atl1c/
9obj-$(CONFIG_ALX) += alx/
diff --git a/drivers/net/ethernet/atheros/alx/Makefile b/drivers/net/ethernet/atheros/alx/Makefile
new file mode 100644
index 000000000000..5901fa407d52
--- /dev/null
+++ b/drivers/net/ethernet/atheros/alx/Makefile
@@ -0,0 +1,3 @@
1obj-$(CONFIG_ALX) += alx.o
2alx-objs := main.o ethtool.o hw.o
3ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/ethernet/atheros/alx/alx.h b/drivers/net/ethernet/atheros/alx/alx.h
new file mode 100644
index 000000000000..50b3ae2b143d
--- /dev/null
+++ b/drivers/net/ethernet/atheros/alx/alx.h
@@ -0,0 +1,114 @@
1/*
2 * Copyright (c) 2013 Johannes Berg <johannes@sipsolutions.net>
3 *
4 * This file is free software: you may copy, redistribute and/or modify it
5 * under the terms of the GNU General Public License as published by the
6 * Free Software Foundation, either version 2 of the License, or (at your
7 * option) any later version.
8 *
9 * This file is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 *
17 * This file incorporates work covered by the following copyright and
18 * permission notice:
19 *
20 * Copyright (c) 2012 Qualcomm Atheros, Inc.
21 *
22 * Permission to use, copy, modify, and/or distribute this software for any
23 * purpose with or without fee is hereby granted, provided that the above
24 * copyright notice and this permission notice appear in all copies.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
27 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
28 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
29 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
30 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
31 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
32 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
33 */
34
35#ifndef _ALX_H_
36#define _ALX_H_
37
38#include <linux/types.h>
39#include <linux/etherdevice.h>
40#include <linux/dma-mapping.h>
41#include <linux/spinlock.h>
42#include "hw.h"
43
44#define ALX_WATCHDOG_TIME (5 * HZ)
45
46struct alx_buffer {
47 struct sk_buff *skb;
48 DEFINE_DMA_UNMAP_ADDR(dma);
49 DEFINE_DMA_UNMAP_LEN(size);
50};
51
52struct alx_rx_queue {
53 struct alx_rrd *rrd;
54 dma_addr_t rrd_dma;
55
56 struct alx_rfd *rfd;
57 dma_addr_t rfd_dma;
58
59 struct alx_buffer *bufs;
60
61 u16 write_idx, read_idx;
62 u16 rrd_read_idx;
63};
64#define ALX_RX_ALLOC_THRESH 32
65
66struct alx_tx_queue {
67 struct alx_txd *tpd;
68 dma_addr_t tpd_dma;
69 struct alx_buffer *bufs;
70 u16 write_idx, read_idx;
71};
72
73#define ALX_DEFAULT_TX_WORK 128
74
75enum alx_device_quirks {
76 ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG = BIT(0),
77};
78
79struct alx_priv {
80 struct net_device *dev;
81
82 struct alx_hw hw;
83
84 /* all descriptor memory */
85 struct {
86 dma_addr_t dma;
87 void *virt;
88 int size;
89 } descmem;
90
91 /* protect int_mask updates */
92 spinlock_t irq_lock;
93 u32 int_mask;
94
95 int tx_ringsz;
96 int rx_ringsz;
97 int rxbuf_size;
98
99 struct napi_struct napi;
100 struct alx_tx_queue txq;
101 struct alx_rx_queue rxq;
102
103 struct work_struct link_check_wk;
104 struct work_struct reset_wk;
105
106 u16 msg_enable;
107
108 bool msi;
109};
110
111extern const struct ethtool_ops alx_ethtool_ops;
112extern const char alx_drv_name[];
113
114#endif
diff --git a/drivers/net/ethernet/atheros/alx/ethtool.c b/drivers/net/ethernet/atheros/alx/ethtool.c
new file mode 100644
index 000000000000..6fa2aec2bc81
--- /dev/null
+++ b/drivers/net/ethernet/atheros/alx/ethtool.c
@@ -0,0 +1,272 @@
1/*
2 * Copyright (c) 2013 Johannes Berg <johannes@sipsolutions.net>
3 *
4 * This file is free software: you may copy, redistribute and/or modify it
5 * under the terms of the GNU General Public License as published by the
6 * Free Software Foundation, either version 2 of the License, or (at your
7 * option) any later version.
8 *
9 * This file is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 *
17 * This file incorporates work covered by the following copyright and
18 * permission notice:
19 *
20 * Copyright (c) 2012 Qualcomm Atheros, Inc.
21 *
22 * Permission to use, copy, modify, and/or distribute this software for any
23 * purpose with or without fee is hereby granted, provided that the above
24 * copyright notice and this permission notice appear in all copies.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
27 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
28 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
29 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
30 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
31 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
32 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
33 */
34
35#include <linux/pci.h>
36#include <linux/ip.h>
37#include <linux/tcp.h>
38#include <linux/netdevice.h>
39#include <linux/etherdevice.h>
40#include <linux/ethtool.h>
41#include <linux/mdio.h>
42#include <linux/interrupt.h>
43#include <asm/byteorder.h>
44
45#include "alx.h"
46#include "reg.h"
47#include "hw.h"
48
49
50static int alx_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
51{
52 struct alx_priv *alx = netdev_priv(netdev);
53 struct alx_hw *hw = &alx->hw;
54
55 ecmd->supported = SUPPORTED_10baseT_Half |
56 SUPPORTED_10baseT_Full |
57 SUPPORTED_100baseT_Half |
58 SUPPORTED_100baseT_Full |
59 SUPPORTED_Autoneg |
60 SUPPORTED_TP |
61 SUPPORTED_Pause;
62 if (alx_hw_giga(hw))
63 ecmd->supported |= SUPPORTED_1000baseT_Full;
64
65 ecmd->advertising = ADVERTISED_TP;
66 if (hw->adv_cfg & ADVERTISED_Autoneg)
67 ecmd->advertising |= hw->adv_cfg;
68
69 ecmd->port = PORT_TP;
70 ecmd->phy_address = 0;
71 if (hw->adv_cfg & ADVERTISED_Autoneg)
72 ecmd->autoneg = AUTONEG_ENABLE;
73 else
74 ecmd->autoneg = AUTONEG_DISABLE;
75 ecmd->transceiver = XCVR_INTERNAL;
76
77 if (hw->flowctrl & ALX_FC_ANEG && hw->adv_cfg & ADVERTISED_Autoneg) {
78 if (hw->flowctrl & ALX_FC_RX) {
79 ecmd->advertising |= ADVERTISED_Pause;
80
81 if (!(hw->flowctrl & ALX_FC_TX))
82 ecmd->advertising |= ADVERTISED_Asym_Pause;
83 } else if (hw->flowctrl & ALX_FC_TX) {
84 ecmd->advertising |= ADVERTISED_Asym_Pause;
85 }
86 }
87
88 if (hw->link_speed != SPEED_UNKNOWN) {
89 ethtool_cmd_speed_set(ecmd,
90 hw->link_speed - hw->link_speed % 10);
91 ecmd->duplex = hw->link_speed % 10;
92 } else {
93 ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
94 ecmd->duplex = DUPLEX_UNKNOWN;
95 }
96
97 return 0;
98}
99
100static int alx_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
101{
102 struct alx_priv *alx = netdev_priv(netdev);
103 struct alx_hw *hw = &alx->hw;
104 u32 adv_cfg;
105
106 ASSERT_RTNL();
107
108 if (ecmd->autoneg == AUTONEG_ENABLE) {
109 if (ecmd->advertising & ADVERTISED_1000baseT_Half)
110 return -EINVAL;
111 adv_cfg = ecmd->advertising | ADVERTISED_Autoneg;
112 } else {
113 int speed = ethtool_cmd_speed(ecmd);
114
115 switch (speed + ecmd->duplex) {
116 case SPEED_10 + DUPLEX_HALF:
117 adv_cfg = ADVERTISED_10baseT_Half;
118 break;
119 case SPEED_10 + DUPLEX_FULL:
120 adv_cfg = ADVERTISED_10baseT_Full;
121 break;
122 case SPEED_100 + DUPLEX_HALF:
123 adv_cfg = ADVERTISED_100baseT_Half;
124 break;
125 case SPEED_100 + DUPLEX_FULL:
126 adv_cfg = ADVERTISED_100baseT_Full;
127 break;
128 default:
129 return -EINVAL;
130 }
131 }
132
133 hw->adv_cfg = adv_cfg;
134 return alx_setup_speed_duplex(hw, adv_cfg, hw->flowctrl);
135}
136
137static void alx_get_pauseparam(struct net_device *netdev,
138 struct ethtool_pauseparam *pause)
139{
140 struct alx_priv *alx = netdev_priv(netdev);
141 struct alx_hw *hw = &alx->hw;
142
143 if (hw->flowctrl & ALX_FC_ANEG &&
144 hw->adv_cfg & ADVERTISED_Autoneg)
145 pause->autoneg = AUTONEG_ENABLE;
146 else
147 pause->autoneg = AUTONEG_DISABLE;
148
149 if (hw->flowctrl & ALX_FC_TX)
150 pause->tx_pause = 1;
151 else
152 pause->tx_pause = 0;
153
154 if (hw->flowctrl & ALX_FC_RX)
155 pause->rx_pause = 1;
156 else
157 pause->rx_pause = 0;
158}
159
160
161static int alx_set_pauseparam(struct net_device *netdev,
162 struct ethtool_pauseparam *pause)
163{
164 struct alx_priv *alx = netdev_priv(netdev);
165 struct alx_hw *hw = &alx->hw;
166 int err = 0;
167 bool reconfig_phy = false;
168 u8 fc = 0;
169
170 if (pause->tx_pause)
171 fc |= ALX_FC_TX;
172 if (pause->rx_pause)
173 fc |= ALX_FC_RX;
174 if (pause->autoneg)
175 fc |= ALX_FC_ANEG;
176
177 ASSERT_RTNL();
178
179 /* restart auto-neg for auto-mode */
180 if (hw->adv_cfg & ADVERTISED_Autoneg) {
181 if (!((fc ^ hw->flowctrl) & ALX_FC_ANEG))
182 reconfig_phy = true;
183 if (fc & hw->flowctrl & ALX_FC_ANEG &&
184 (fc ^ hw->flowctrl) & (ALX_FC_RX | ALX_FC_TX))
185 reconfig_phy = true;
186 }
187
188 if (reconfig_phy) {
189 err = alx_setup_speed_duplex(hw, hw->adv_cfg, fc);
190 return err;
191 }
192
193 /* flow control on mac */
194 if ((fc ^ hw->flowctrl) & (ALX_FC_RX | ALX_FC_TX))
195 alx_cfg_mac_flowcontrol(hw, fc);
196
197 hw->flowctrl = fc;
198
199 return 0;
200}
201
202static u32 alx_get_msglevel(struct net_device *netdev)
203{
204 struct alx_priv *alx = netdev_priv(netdev);
205
206 return alx->msg_enable;
207}
208
209static void alx_set_msglevel(struct net_device *netdev, u32 data)
210{
211 struct alx_priv *alx = netdev_priv(netdev);
212
213 alx->msg_enable = data;
214}
215
216static void alx_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
217{
218 struct alx_priv *alx = netdev_priv(netdev);
219 struct alx_hw *hw = &alx->hw;
220
221 wol->supported = WAKE_MAGIC | WAKE_PHY;
222 wol->wolopts = 0;
223
224 if (hw->sleep_ctrl & ALX_SLEEP_WOL_MAGIC)
225 wol->wolopts |= WAKE_MAGIC;
226 if (hw->sleep_ctrl & ALX_SLEEP_WOL_PHY)
227 wol->wolopts |= WAKE_PHY;
228}
229
230static int alx_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
231{
232 struct alx_priv *alx = netdev_priv(netdev);
233 struct alx_hw *hw = &alx->hw;
234
235 if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE |
236 WAKE_UCAST | WAKE_BCAST | WAKE_MCAST))
237 return -EOPNOTSUPP;
238
239 hw->sleep_ctrl = 0;
240
241 if (wol->wolopts & WAKE_MAGIC)
242 hw->sleep_ctrl |= ALX_SLEEP_WOL_MAGIC;
243 if (wol->wolopts & WAKE_PHY)
244 hw->sleep_ctrl |= ALX_SLEEP_WOL_PHY;
245
246 device_set_wakeup_enable(&alx->hw.pdev->dev, hw->sleep_ctrl);
247
248 return 0;
249}
250
251static void alx_get_drvinfo(struct net_device *netdev,
252 struct ethtool_drvinfo *drvinfo)
253{
254 struct alx_priv *alx = netdev_priv(netdev);
255
256 strlcpy(drvinfo->driver, alx_drv_name, sizeof(drvinfo->driver));
257 strlcpy(drvinfo->bus_info, pci_name(alx->hw.pdev),
258 sizeof(drvinfo->bus_info));
259}
260
261const struct ethtool_ops alx_ethtool_ops = {
262 .get_settings = alx_get_settings,
263 .set_settings = alx_set_settings,
264 .get_pauseparam = alx_get_pauseparam,
265 .set_pauseparam = alx_set_pauseparam,
266 .get_drvinfo = alx_get_drvinfo,
267 .get_msglevel = alx_get_msglevel,
268 .set_msglevel = alx_set_msglevel,
269 .get_wol = alx_get_wol,
270 .set_wol = alx_set_wol,
271 .get_link = ethtool_op_get_link,
272};
diff --git a/drivers/net/ethernet/atheros/alx/hw.c b/drivers/net/ethernet/atheros/alx/hw.c
new file mode 100644
index 000000000000..220a16ad0e49
--- /dev/null
+++ b/drivers/net/ethernet/atheros/alx/hw.c
@@ -0,0 +1,1226 @@
1/*
2 * Copyright (c) 2013 Johannes Berg <johannes@sipsolutions.net>
3 *
4 * This file is free software: you may copy, redistribute and/or modify it
5 * under the terms of the GNU General Public License as published by the
6 * Free Software Foundation, either version 2 of the License, or (at your
7 * option) any later version.
8 *
9 * This file is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 *
17 * This file incorporates work covered by the following copyright and
18 * permission notice:
19 *
20 * Copyright (c) 2012 Qualcomm Atheros, Inc.
21 *
22 * Permission to use, copy, modify, and/or distribute this software for any
23 * purpose with or without fee is hereby granted, provided that the above
24 * copyright notice and this permission notice appear in all copies.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
27 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
28 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
29 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
30 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
31 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
32 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
33 */
34#include <linux/etherdevice.h>
35#include <linux/delay.h>
36#include <linux/pci.h>
37#include <linux/mdio.h>
38#include "reg.h"
39#include "hw.h"
40
41static inline bool alx_is_rev_a(u8 rev)
42{
43 return rev == ALX_REV_A0 || rev == ALX_REV_A1;
44}
45
46static int alx_wait_mdio_idle(struct alx_hw *hw)
47{
48 u32 val;
49 int i;
50
51 for (i = 0; i < ALX_MDIO_MAX_AC_TO; i++) {
52 val = alx_read_mem32(hw, ALX_MDIO);
53 if (!(val & ALX_MDIO_BUSY))
54 return 0;
55 udelay(10);
56 }
57
58 return -ETIMEDOUT;
59}
60
61static int alx_read_phy_core(struct alx_hw *hw, bool ext, u8 dev,
62 u16 reg, u16 *phy_data)
63{
64 u32 val, clk_sel;
65 int err;
66
67 *phy_data = 0;
68
69 /* use slow clock when it's in hibernation status */
70 clk_sel = hw->link_speed != SPEED_UNKNOWN ?
71 ALX_MDIO_CLK_SEL_25MD4 :
72 ALX_MDIO_CLK_SEL_25MD128;
73
74 if (ext) {
75 val = dev << ALX_MDIO_EXTN_DEVAD_SHIFT |
76 reg << ALX_MDIO_EXTN_REG_SHIFT;
77 alx_write_mem32(hw, ALX_MDIO_EXTN, val);
78
79 val = ALX_MDIO_SPRES_PRMBL | ALX_MDIO_START |
80 ALX_MDIO_MODE_EXT | ALX_MDIO_OP_READ |
81 clk_sel << ALX_MDIO_CLK_SEL_SHIFT;
82 } else {
83 val = ALX_MDIO_SPRES_PRMBL |
84 clk_sel << ALX_MDIO_CLK_SEL_SHIFT |
85 reg << ALX_MDIO_REG_SHIFT |
86 ALX_MDIO_START | ALX_MDIO_OP_READ;
87 }
88 alx_write_mem32(hw, ALX_MDIO, val);
89
90 err = alx_wait_mdio_idle(hw);
91 if (err)
92 return err;
93 val = alx_read_mem32(hw, ALX_MDIO);
94 *phy_data = ALX_GET_FIELD(val, ALX_MDIO_DATA);
95 return 0;
96}
97
98static int alx_write_phy_core(struct alx_hw *hw, bool ext, u8 dev,
99 u16 reg, u16 phy_data)
100{
101 u32 val, clk_sel;
102
103 /* use slow clock when it's in hibernation status */
104 clk_sel = hw->link_speed != SPEED_UNKNOWN ?
105 ALX_MDIO_CLK_SEL_25MD4 :
106 ALX_MDIO_CLK_SEL_25MD128;
107
108 if (ext) {
109 val = dev << ALX_MDIO_EXTN_DEVAD_SHIFT |
110 reg << ALX_MDIO_EXTN_REG_SHIFT;
111 alx_write_mem32(hw, ALX_MDIO_EXTN, val);
112
113 val = ALX_MDIO_SPRES_PRMBL |
114 clk_sel << ALX_MDIO_CLK_SEL_SHIFT |
115 phy_data << ALX_MDIO_DATA_SHIFT |
116 ALX_MDIO_START | ALX_MDIO_MODE_EXT;
117 } else {
118 val = ALX_MDIO_SPRES_PRMBL |
119 clk_sel << ALX_MDIO_CLK_SEL_SHIFT |
120 reg << ALX_MDIO_REG_SHIFT |
121 phy_data << ALX_MDIO_DATA_SHIFT |
122 ALX_MDIO_START;
123 }
124 alx_write_mem32(hw, ALX_MDIO, val);
125
126 return alx_wait_mdio_idle(hw);
127}
128
129static int __alx_read_phy_reg(struct alx_hw *hw, u16 reg, u16 *phy_data)
130{
131 return alx_read_phy_core(hw, false, 0, reg, phy_data);
132}
133
134static int __alx_write_phy_reg(struct alx_hw *hw, u16 reg, u16 phy_data)
135{
136 return alx_write_phy_core(hw, false, 0, reg, phy_data);
137}
138
139static int __alx_read_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 *pdata)
140{
141 return alx_read_phy_core(hw, true, dev, reg, pdata);
142}
143
144static int __alx_write_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 data)
145{
146 return alx_write_phy_core(hw, true, dev, reg, data);
147}
148
149static int __alx_read_phy_dbg(struct alx_hw *hw, u16 reg, u16 *pdata)
150{
151 int err;
152
153 err = __alx_write_phy_reg(hw, ALX_MII_DBG_ADDR, reg);
154 if (err)
155 return err;
156
157 return __alx_read_phy_reg(hw, ALX_MII_DBG_DATA, pdata);
158}
159
160static int __alx_write_phy_dbg(struct alx_hw *hw, u16 reg, u16 data)
161{
162 int err;
163
164 err = __alx_write_phy_reg(hw, ALX_MII_DBG_ADDR, reg);
165 if (err)
166 return err;
167
168 return __alx_write_phy_reg(hw, ALX_MII_DBG_DATA, data);
169}
170
171int alx_read_phy_reg(struct alx_hw *hw, u16 reg, u16 *phy_data)
172{
173 int err;
174
175 spin_lock(&hw->mdio_lock);
176 err = __alx_read_phy_reg(hw, reg, phy_data);
177 spin_unlock(&hw->mdio_lock);
178
179 return err;
180}
181
182int alx_write_phy_reg(struct alx_hw *hw, u16 reg, u16 phy_data)
183{
184 int err;
185
186 spin_lock(&hw->mdio_lock);
187 err = __alx_write_phy_reg(hw, reg, phy_data);
188 spin_unlock(&hw->mdio_lock);
189
190 return err;
191}
192
193int alx_read_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 *pdata)
194{
195 int err;
196
197 spin_lock(&hw->mdio_lock);
198 err = __alx_read_phy_ext(hw, dev, reg, pdata);
199 spin_unlock(&hw->mdio_lock);
200
201 return err;
202}
203
204int alx_write_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 data)
205{
206 int err;
207
208 spin_lock(&hw->mdio_lock);
209 err = __alx_write_phy_ext(hw, dev, reg, data);
210 spin_unlock(&hw->mdio_lock);
211
212 return err;
213}
214
215static int alx_read_phy_dbg(struct alx_hw *hw, u16 reg, u16 *pdata)
216{
217 int err;
218
219 spin_lock(&hw->mdio_lock);
220 err = __alx_read_phy_dbg(hw, reg, pdata);
221 spin_unlock(&hw->mdio_lock);
222
223 return err;
224}
225
226static int alx_write_phy_dbg(struct alx_hw *hw, u16 reg, u16 data)
227{
228 int err;
229
230 spin_lock(&hw->mdio_lock);
231 err = __alx_write_phy_dbg(hw, reg, data);
232 spin_unlock(&hw->mdio_lock);
233
234 return err;
235}
236
237static u16 alx_get_phy_config(struct alx_hw *hw)
238{
239 u32 val;
240 u16 phy_val;
241
242 val = alx_read_mem32(hw, ALX_PHY_CTRL);
243 /* phy in reset */
244 if ((val & ALX_PHY_CTRL_DSPRST_OUT) == 0)
245 return ALX_DRV_PHY_UNKNOWN;
246
247 val = alx_read_mem32(hw, ALX_DRV);
248 val = ALX_GET_FIELD(val, ALX_DRV_PHY);
249 if (ALX_DRV_PHY_UNKNOWN == val)
250 return ALX_DRV_PHY_UNKNOWN;
251
252 alx_read_phy_reg(hw, ALX_MII_DBG_ADDR, &phy_val);
253 if (ALX_PHY_INITED == phy_val)
254 return val;
255
256 return ALX_DRV_PHY_UNKNOWN;
257}
258
259static bool alx_wait_reg(struct alx_hw *hw, u32 reg, u32 wait, u32 *val)
260{
261 u32 read;
262 int i;
263
264 for (i = 0; i < ALX_SLD_MAX_TO; i++) {
265 read = alx_read_mem32(hw, reg);
266 if ((read & wait) == 0) {
267 if (val)
268 *val = read;
269 return true;
270 }
271 mdelay(1);
272 }
273
274 return false;
275}
276
277static bool alx_read_macaddr(struct alx_hw *hw, u8 *addr)
278{
279 u32 mac0, mac1;
280
281 mac0 = alx_read_mem32(hw, ALX_STAD0);
282 mac1 = alx_read_mem32(hw, ALX_STAD1);
283
284 /* addr should be big-endian */
285 *(__be32 *)(addr + 2) = cpu_to_be32(mac0);
286 *(__be16 *)addr = cpu_to_be16(mac1);
287
288 return is_valid_ether_addr(addr);
289}
290
291int alx_get_perm_macaddr(struct alx_hw *hw, u8 *addr)
292{
293 u32 val;
294
295 /* try to get it from register first */
296 if (alx_read_macaddr(hw, addr))
297 return 0;
298
299 /* try to load from efuse */
300 if (!alx_wait_reg(hw, ALX_SLD, ALX_SLD_STAT | ALX_SLD_START, &val))
301 return -EIO;
302 alx_write_mem32(hw, ALX_SLD, val | ALX_SLD_START);
303 if (!alx_wait_reg(hw, ALX_SLD, ALX_SLD_START, NULL))
304 return -EIO;
305 if (alx_read_macaddr(hw, addr))
306 return 0;
307
308 /* try to load from flash/eeprom (if present) */
309 val = alx_read_mem32(hw, ALX_EFLD);
310 if (val & (ALX_EFLD_F_EXIST | ALX_EFLD_E_EXIST)) {
311 if (!alx_wait_reg(hw, ALX_EFLD,
312 ALX_EFLD_STAT | ALX_EFLD_START, &val))
313 return -EIO;
314 alx_write_mem32(hw, ALX_EFLD, val | ALX_EFLD_START);
315 if (!alx_wait_reg(hw, ALX_EFLD, ALX_EFLD_START, NULL))
316 return -EIO;
317 if (alx_read_macaddr(hw, addr))
318 return 0;
319 }
320
321 return -EIO;
322}
323
324void alx_set_macaddr(struct alx_hw *hw, const u8 *addr)
325{
326 u32 val;
327
328 /* for example: 00-0B-6A-F6-00-DC * STAD0=6AF600DC, STAD1=000B */
329 val = be32_to_cpu(*(__be32 *)(addr + 2));
330 alx_write_mem32(hw, ALX_STAD0, val);
331 val = be16_to_cpu(*(__be16 *)addr);
332 alx_write_mem32(hw, ALX_STAD1, val);
333}
334
335static void alx_enable_osc(struct alx_hw *hw)
336{
337 u32 val;
338
339 /* rising edge */
340 val = alx_read_mem32(hw, ALX_MISC);
341 alx_write_mem32(hw, ALX_MISC, val & ~ALX_MISC_INTNLOSC_OPEN);
342 alx_write_mem32(hw, ALX_MISC, val | ALX_MISC_INTNLOSC_OPEN);
343}
344
345static void alx_reset_osc(struct alx_hw *hw, u8 rev)
346{
347 u32 val, val2;
348
349 /* clear Internal OSC settings, switching OSC by hw itself */
350 val = alx_read_mem32(hw, ALX_MISC3);
351 alx_write_mem32(hw, ALX_MISC3,
352 (val & ~ALX_MISC3_25M_BY_SW) |
353 ALX_MISC3_25M_NOTO_INTNL);
354
355 /* 25M clk from chipset may be unstable 1s after de-assert of
356 * PERST, driver need re-calibrate before enter Sleep for WoL
357 */
358 val = alx_read_mem32(hw, ALX_MISC);
359 if (rev >= ALX_REV_B0) {
360 /* restore over current protection def-val,
361 * this val could be reset by MAC-RST
362 */
363 ALX_SET_FIELD(val, ALX_MISC_PSW_OCP, ALX_MISC_PSW_OCP_DEF);
364 /* a 0->1 change will update the internal val of osc */
365 val &= ~ALX_MISC_INTNLOSC_OPEN;
366 alx_write_mem32(hw, ALX_MISC, val);
367 alx_write_mem32(hw, ALX_MISC, val | ALX_MISC_INTNLOSC_OPEN);
368 /* hw will automatically dis OSC after cab. */
369 val2 = alx_read_mem32(hw, ALX_MSIC2);
370 val2 &= ~ALX_MSIC2_CALB_START;
371 alx_write_mem32(hw, ALX_MSIC2, val2);
372 alx_write_mem32(hw, ALX_MSIC2, val2 | ALX_MSIC2_CALB_START);
373 } else {
374 val &= ~ALX_MISC_INTNLOSC_OPEN;
375 /* disable isolate for rev A devices */
376 if (alx_is_rev_a(rev))
377 val &= ~ALX_MISC_ISO_EN;
378
379 alx_write_mem32(hw, ALX_MISC, val | ALX_MISC_INTNLOSC_OPEN);
380 alx_write_mem32(hw, ALX_MISC, val);
381 }
382
383 udelay(20);
384}
385
386static int alx_stop_mac(struct alx_hw *hw)
387{
388 u32 rxq, txq, val;
389 u16 i;
390
391 rxq = alx_read_mem32(hw, ALX_RXQ0);
392 alx_write_mem32(hw, ALX_RXQ0, rxq & ~ALX_RXQ0_EN);
393 txq = alx_read_mem32(hw, ALX_TXQ0);
394 alx_write_mem32(hw, ALX_TXQ0, txq & ~ALX_TXQ0_EN);
395
396 udelay(40);
397
398 hw->rx_ctrl &= ~(ALX_MAC_CTRL_RX_EN | ALX_MAC_CTRL_TX_EN);
399 alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl);
400
401 for (i = 0; i < ALX_DMA_MAC_RST_TO; i++) {
402 val = alx_read_mem32(hw, ALX_MAC_STS);
403 if (!(val & ALX_MAC_STS_IDLE))
404 return 0;
405 udelay(10);
406 }
407
408 return -ETIMEDOUT;
409}
410
411int alx_reset_mac(struct alx_hw *hw)
412{
413 u32 val, pmctrl;
414 int i, ret;
415 u8 rev;
416 bool a_cr;
417
418 pmctrl = 0;
419 rev = alx_hw_revision(hw);
420 a_cr = alx_is_rev_a(rev) && alx_hw_with_cr(hw);
421
422 /* disable all interrupts, RXQ/TXQ */
423 alx_write_mem32(hw, ALX_MSIX_MASK, 0xFFFFFFFF);
424 alx_write_mem32(hw, ALX_IMR, 0);
425 alx_write_mem32(hw, ALX_ISR, ALX_ISR_DIS);
426
427 ret = alx_stop_mac(hw);
428 if (ret)
429 return ret;
430
431 /* mac reset workaroud */
432 alx_write_mem32(hw, ALX_RFD_PIDX, 1);
433
434 /* dis l0s/l1 before mac reset */
435 if (a_cr) {
436 pmctrl = alx_read_mem32(hw, ALX_PMCTRL);
437 if (pmctrl & (ALX_PMCTRL_L1_EN | ALX_PMCTRL_L0S_EN))
438 alx_write_mem32(hw, ALX_PMCTRL,
439 pmctrl & ~(ALX_PMCTRL_L1_EN |
440 ALX_PMCTRL_L0S_EN));
441 }
442
443 /* reset whole mac safely */
444 val = alx_read_mem32(hw, ALX_MASTER);
445 alx_write_mem32(hw, ALX_MASTER,
446 val | ALX_MASTER_DMA_MAC_RST | ALX_MASTER_OOB_DIS);
447
448 /* make sure it's real idle */
449 udelay(10);
450 for (i = 0; i < ALX_DMA_MAC_RST_TO; i++) {
451 val = alx_read_mem32(hw, ALX_RFD_PIDX);
452 if (val == 0)
453 break;
454 udelay(10);
455 }
456 for (; i < ALX_DMA_MAC_RST_TO; i++) {
457 val = alx_read_mem32(hw, ALX_MASTER);
458 if ((val & ALX_MASTER_DMA_MAC_RST) == 0)
459 break;
460 udelay(10);
461 }
462 if (i == ALX_DMA_MAC_RST_TO)
463 return -EIO;
464 udelay(10);
465
466 if (a_cr) {
467 alx_write_mem32(hw, ALX_MASTER, val | ALX_MASTER_PCLKSEL_SRDS);
468 /* restore l0s / l1 */
469 if (pmctrl & (ALX_PMCTRL_L1_EN | ALX_PMCTRL_L0S_EN))
470 alx_write_mem32(hw, ALX_PMCTRL, pmctrl);
471 }
472
473 alx_reset_osc(hw, rev);
474
475 /* clear Internal OSC settings, switching OSC by hw itself,
476 * disable isolate for rev A devices
477 */
478 val = alx_read_mem32(hw, ALX_MISC3);
479 alx_write_mem32(hw, ALX_MISC3,
480 (val & ~ALX_MISC3_25M_BY_SW) |
481 ALX_MISC3_25M_NOTO_INTNL);
482 val = alx_read_mem32(hw, ALX_MISC);
483 val &= ~ALX_MISC_INTNLOSC_OPEN;
484 if (alx_is_rev_a(rev))
485 val &= ~ALX_MISC_ISO_EN;
486 alx_write_mem32(hw, ALX_MISC, val);
487 udelay(20);
488
489 /* driver control speed/duplex, hash-alg */
490 alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl);
491
492 val = alx_read_mem32(hw, ALX_SERDES);
493 alx_write_mem32(hw, ALX_SERDES,
494 val | ALX_SERDES_MACCLK_SLWDWN |
495 ALX_SERDES_PHYCLK_SLWDWN);
496
497 return 0;
498}
499
500void alx_reset_phy(struct alx_hw *hw)
501{
502 int i;
503 u32 val;
504 u16 phy_val;
505
506 /* (DSP)reset PHY core */
507 val = alx_read_mem32(hw, ALX_PHY_CTRL);
508 val &= ~(ALX_PHY_CTRL_DSPRST_OUT | ALX_PHY_CTRL_IDDQ |
509 ALX_PHY_CTRL_GATE_25M | ALX_PHY_CTRL_POWER_DOWN |
510 ALX_PHY_CTRL_CLS);
511 val |= ALX_PHY_CTRL_RST_ANALOG;
512
513 val |= (ALX_PHY_CTRL_HIB_PULSE | ALX_PHY_CTRL_HIB_EN);
514 alx_write_mem32(hw, ALX_PHY_CTRL, val);
515 udelay(10);
516 alx_write_mem32(hw, ALX_PHY_CTRL, val | ALX_PHY_CTRL_DSPRST_OUT);
517
518 for (i = 0; i < ALX_PHY_CTRL_DSPRST_TO; i++)
519 udelay(10);
520
521 /* phy power saving & hib */
522 alx_write_phy_dbg(hw, ALX_MIIDBG_LEGCYPS, ALX_LEGCYPS_DEF);
523 alx_write_phy_dbg(hw, ALX_MIIDBG_SYSMODCTRL,
524 ALX_SYSMODCTRL_IECHOADJ_DEF);
525 alx_write_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_VDRVBIAS,
526 ALX_VDRVBIAS_DEF);
527
528 /* EEE advertisement */
529 val = alx_read_mem32(hw, ALX_LPI_CTRL);
530 alx_write_mem32(hw, ALX_LPI_CTRL, val & ~ALX_LPI_CTRL_EN);
531 alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_LOCAL_EEEADV, 0);
532
533 /* phy power saving */
534 alx_write_phy_dbg(hw, ALX_MIIDBG_TST10BTCFG, ALX_TST10BTCFG_DEF);
535 alx_write_phy_dbg(hw, ALX_MIIDBG_SRDSYSMOD, ALX_SRDSYSMOD_DEF);
536 alx_write_phy_dbg(hw, ALX_MIIDBG_TST100BTCFG, ALX_TST100BTCFG_DEF);
537 alx_write_phy_dbg(hw, ALX_MIIDBG_ANACTRL, ALX_ANACTRL_DEF);
538 alx_read_phy_dbg(hw, ALX_MIIDBG_GREENCFG2, &phy_val);
539 alx_write_phy_dbg(hw, ALX_MIIDBG_GREENCFG2,
540 phy_val & ~ALX_GREENCFG2_GATE_DFSE_EN);
541 /* rtl8139c, 120m issue */
542 alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_NLP78,
543 ALX_MIIEXT_NLP78_120M_DEF);
544 alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_S3DIG10,
545 ALX_MIIEXT_S3DIG10_DEF);
546
547 if (hw->lnk_patch) {
548 /* Turn off half amplitude */
549 alx_read_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_CLDCTRL3,
550 &phy_val);
551 alx_write_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_CLDCTRL3,
552 phy_val | ALX_CLDCTRL3_BP_CABLE1TH_DET_GT);
553 /* Turn off Green feature */
554 alx_read_phy_dbg(hw, ALX_MIIDBG_GREENCFG2, &phy_val);
555 alx_write_phy_dbg(hw, ALX_MIIDBG_GREENCFG2,
556 phy_val | ALX_GREENCFG2_BP_GREEN);
557 /* Turn off half Bias */
558 alx_read_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_CLDCTRL5,
559 &phy_val);
560 alx_write_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_CLDCTRL5,
561 phy_val | ALX_CLDCTRL5_BP_VD_HLFBIAS);
562 }
563
564 /* set phy interrupt mask */
565 alx_write_phy_reg(hw, ALX_MII_IER, ALX_IER_LINK_UP | ALX_IER_LINK_DOWN);
566}
567
568#define ALX_PCI_CMD (PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY | PCI_COMMAND_IO)
569
570void alx_reset_pcie(struct alx_hw *hw)
571{
572 u8 rev = alx_hw_revision(hw);
573 u32 val;
574 u16 val16;
575
576 /* Workaround for PCI problem when BIOS sets MMRBC incorrectly. */
577 pci_read_config_word(hw->pdev, PCI_COMMAND, &val16);
578 if (!(val16 & ALX_PCI_CMD) || (val16 & PCI_COMMAND_INTX_DISABLE)) {
579 val16 = (val16 | ALX_PCI_CMD) & ~PCI_COMMAND_INTX_DISABLE;
580 pci_write_config_word(hw->pdev, PCI_COMMAND, val16);
581 }
582
583 /* clear WoL setting/status */
584 val = alx_read_mem32(hw, ALX_WOL0);
585 alx_write_mem32(hw, ALX_WOL0, 0);
586
587 val = alx_read_mem32(hw, ALX_PDLL_TRNS1);
588 alx_write_mem32(hw, ALX_PDLL_TRNS1, val & ~ALX_PDLL_TRNS1_D3PLLOFF_EN);
589
590 /* mask some pcie error bits */
591 val = alx_read_mem32(hw, ALX_UE_SVRT);
592 val &= ~(ALX_UE_SVRT_DLPROTERR | ALX_UE_SVRT_FCPROTERR);
593 alx_write_mem32(hw, ALX_UE_SVRT, val);
594
595 /* wol 25M & pclk */
596 val = alx_read_mem32(hw, ALX_MASTER);
597 if (alx_is_rev_a(rev) && alx_hw_with_cr(hw)) {
598 if ((val & ALX_MASTER_WAKEN_25M) == 0 ||
599 (val & ALX_MASTER_PCLKSEL_SRDS) == 0)
600 alx_write_mem32(hw, ALX_MASTER,
601 val | ALX_MASTER_PCLKSEL_SRDS |
602 ALX_MASTER_WAKEN_25M);
603 } else {
604 if ((val & ALX_MASTER_WAKEN_25M) == 0 ||
605 (val & ALX_MASTER_PCLKSEL_SRDS) != 0)
606 alx_write_mem32(hw, ALX_MASTER,
607 (val & ~ALX_MASTER_PCLKSEL_SRDS) |
608 ALX_MASTER_WAKEN_25M);
609 }
610
611 /* ASPM setting */
612 alx_enable_aspm(hw, true, true);
613
614 udelay(10);
615}
616
617void alx_start_mac(struct alx_hw *hw)
618{
619 u32 mac, txq, rxq;
620
621 rxq = alx_read_mem32(hw, ALX_RXQ0);
622 alx_write_mem32(hw, ALX_RXQ0, rxq | ALX_RXQ0_EN);
623 txq = alx_read_mem32(hw, ALX_TXQ0);
624 alx_write_mem32(hw, ALX_TXQ0, txq | ALX_TXQ0_EN);
625
626 mac = hw->rx_ctrl;
627 if (hw->link_speed % 10 == DUPLEX_FULL)
628 mac |= ALX_MAC_CTRL_FULLD;
629 else
630 mac &= ~ALX_MAC_CTRL_FULLD;
631 ALX_SET_FIELD(mac, ALX_MAC_CTRL_SPEED,
632 hw->link_speed >= SPEED_1000 ? ALX_MAC_CTRL_SPEED_1000 :
633 ALX_MAC_CTRL_SPEED_10_100);
634 mac |= ALX_MAC_CTRL_TX_EN | ALX_MAC_CTRL_RX_EN;
635 hw->rx_ctrl = mac;
636 alx_write_mem32(hw, ALX_MAC_CTRL, mac);
637}
638
639void alx_cfg_mac_flowcontrol(struct alx_hw *hw, u8 fc)
640{
641 if (fc & ALX_FC_RX)
642 hw->rx_ctrl |= ALX_MAC_CTRL_RXFC_EN;
643 else
644 hw->rx_ctrl &= ~ALX_MAC_CTRL_RXFC_EN;
645
646 if (fc & ALX_FC_TX)
647 hw->rx_ctrl |= ALX_MAC_CTRL_TXFC_EN;
648 else
649 hw->rx_ctrl &= ~ALX_MAC_CTRL_TXFC_EN;
650
651 alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl);
652}
653
654void alx_enable_aspm(struct alx_hw *hw, bool l0s_en, bool l1_en)
655{
656 u32 pmctrl;
657 u8 rev = alx_hw_revision(hw);
658
659 pmctrl = alx_read_mem32(hw, ALX_PMCTRL);
660
661 ALX_SET_FIELD(pmctrl, ALX_PMCTRL_LCKDET_TIMER,
662 ALX_PMCTRL_LCKDET_TIMER_DEF);
663 pmctrl |= ALX_PMCTRL_RCVR_WT_1US |
664 ALX_PMCTRL_L1_CLKSW_EN |
665 ALX_PMCTRL_L1_SRDSRX_PWD;
666 ALX_SET_FIELD(pmctrl, ALX_PMCTRL_L1REQ_TO, ALX_PMCTRL_L1REG_TO_DEF);
667 ALX_SET_FIELD(pmctrl, ALX_PMCTRL_L1_TIMER, ALX_PMCTRL_L1_TIMER_16US);
668 pmctrl &= ~(ALX_PMCTRL_L1_SRDS_EN |
669 ALX_PMCTRL_L1_SRDSPLL_EN |
670 ALX_PMCTRL_L1_BUFSRX_EN |
671 ALX_PMCTRL_SADLY_EN |
672 ALX_PMCTRL_HOTRST_WTEN|
673 ALX_PMCTRL_L0S_EN |
674 ALX_PMCTRL_L1_EN |
675 ALX_PMCTRL_ASPM_FCEN |
676 ALX_PMCTRL_TXL1_AFTER_L0S |
677 ALX_PMCTRL_RXL1_AFTER_L0S);
678 if (alx_is_rev_a(rev) && alx_hw_with_cr(hw))
679 pmctrl |= ALX_PMCTRL_L1_SRDS_EN | ALX_PMCTRL_L1_SRDSPLL_EN;
680
681 if (l0s_en)
682 pmctrl |= (ALX_PMCTRL_L0S_EN | ALX_PMCTRL_ASPM_FCEN);
683 if (l1_en)
684 pmctrl |= (ALX_PMCTRL_L1_EN | ALX_PMCTRL_ASPM_FCEN);
685
686 alx_write_mem32(hw, ALX_PMCTRL, pmctrl);
687}
688
689
690static u32 ethadv_to_hw_cfg(struct alx_hw *hw, u32 ethadv_cfg)
691{
692 u32 cfg = 0;
693
694 if (ethadv_cfg & ADVERTISED_Autoneg) {
695 cfg |= ALX_DRV_PHY_AUTO;
696 if (ethadv_cfg & ADVERTISED_10baseT_Half)
697 cfg |= ALX_DRV_PHY_10;
698 if (ethadv_cfg & ADVERTISED_10baseT_Full)
699 cfg |= ALX_DRV_PHY_10 | ALX_DRV_PHY_DUPLEX;
700 if (ethadv_cfg & ADVERTISED_100baseT_Half)
701 cfg |= ALX_DRV_PHY_100;
702 if (ethadv_cfg & ADVERTISED_100baseT_Full)
703 cfg |= ALX_DRV_PHY_100 | ALX_DRV_PHY_DUPLEX;
704 if (ethadv_cfg & ADVERTISED_1000baseT_Half)
705 cfg |= ALX_DRV_PHY_1000;
706 if (ethadv_cfg & ADVERTISED_1000baseT_Full)
707 cfg |= ALX_DRV_PHY_100 | ALX_DRV_PHY_DUPLEX;
708 if (ethadv_cfg & ADVERTISED_Pause)
709 cfg |= ADVERTISE_PAUSE_CAP;
710 if (ethadv_cfg & ADVERTISED_Asym_Pause)
711 cfg |= ADVERTISE_PAUSE_ASYM;
712 } else {
713 switch (ethadv_cfg) {
714 case ADVERTISED_10baseT_Half:
715 cfg |= ALX_DRV_PHY_10;
716 break;
717 case ADVERTISED_100baseT_Half:
718 cfg |= ALX_DRV_PHY_100;
719 break;
720 case ADVERTISED_10baseT_Full:
721 cfg |= ALX_DRV_PHY_10 | ALX_DRV_PHY_DUPLEX;
722 break;
723 case ADVERTISED_100baseT_Full:
724 cfg |= ALX_DRV_PHY_100 | ALX_DRV_PHY_DUPLEX;
725 break;
726 }
727 }
728
729 return cfg;
730}
731
732int alx_setup_speed_duplex(struct alx_hw *hw, u32 ethadv, u8 flowctrl)
733{
734 u16 adv, giga, cr;
735 u32 val;
736 int err = 0;
737
738 alx_write_phy_reg(hw, ALX_MII_DBG_ADDR, 0);
739 val = alx_read_mem32(hw, ALX_DRV);
740 ALX_SET_FIELD(val, ALX_DRV_PHY, 0);
741
742 if (ethadv & ADVERTISED_Autoneg) {
743 adv = ADVERTISE_CSMA;
744 adv |= ethtool_adv_to_mii_adv_t(ethadv);
745
746 if (flowctrl & ALX_FC_ANEG) {
747 if (flowctrl & ALX_FC_RX) {
748 adv |= ADVERTISED_Pause;
749 if (!(flowctrl & ALX_FC_TX))
750 adv |= ADVERTISED_Asym_Pause;
751 } else if (flowctrl & ALX_FC_TX) {
752 adv |= ADVERTISED_Asym_Pause;
753 }
754 }
755 giga = 0;
756 if (alx_hw_giga(hw))
757 giga = ethtool_adv_to_mii_ctrl1000_t(ethadv);
758
759 cr = BMCR_RESET | BMCR_ANENABLE | BMCR_ANRESTART;
760
761 if (alx_write_phy_reg(hw, MII_ADVERTISE, adv) ||
762 alx_write_phy_reg(hw, MII_CTRL1000, giga) ||
763 alx_write_phy_reg(hw, MII_BMCR, cr))
764 err = -EBUSY;
765 } else {
766 cr = BMCR_RESET;
767 if (ethadv == ADVERTISED_100baseT_Half ||
768 ethadv == ADVERTISED_100baseT_Full)
769 cr |= BMCR_SPEED100;
770 if (ethadv == ADVERTISED_10baseT_Full ||
771 ethadv == ADVERTISED_100baseT_Full)
772 cr |= BMCR_FULLDPLX;
773
774 err = alx_write_phy_reg(hw, MII_BMCR, cr);
775 }
776
777 if (!err) {
778 alx_write_phy_reg(hw, ALX_MII_DBG_ADDR, ALX_PHY_INITED);
779 val |= ethadv_to_hw_cfg(hw, ethadv);
780 }
781
782 alx_write_mem32(hw, ALX_DRV, val);
783
784 return err;
785}
786
787
788void alx_post_phy_link(struct alx_hw *hw)
789{
790 u16 phy_val, len, agc;
791 u8 revid = alx_hw_revision(hw);
792 bool adj_th = revid == ALX_REV_B0;
793 int speed;
794
795 if (hw->link_speed == SPEED_UNKNOWN)
796 speed = SPEED_UNKNOWN;
797 else
798 speed = hw->link_speed - hw->link_speed % 10;
799
800 if (revid != ALX_REV_B0 && !alx_is_rev_a(revid))
801 return;
802
803 /* 1000BT/AZ, wrong cable length */
804 if (speed != SPEED_UNKNOWN) {
805 alx_read_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_CLDCTRL6,
806 &phy_val);
807 len = ALX_GET_FIELD(phy_val, ALX_CLDCTRL6_CAB_LEN);
808 alx_read_phy_dbg(hw, ALX_MIIDBG_AGC, &phy_val);
809 agc = ALX_GET_FIELD(phy_val, ALX_AGC_2_VGA);
810
811 if ((speed == SPEED_1000 &&
812 (len > ALX_CLDCTRL6_CAB_LEN_SHORT1G ||
813 (len == 0 && agc > ALX_AGC_LONG1G_LIMT))) ||
814 (speed == SPEED_100 &&
815 (len > ALX_CLDCTRL6_CAB_LEN_SHORT100M ||
816 (len == 0 && agc > ALX_AGC_LONG100M_LIMT)))) {
817 alx_write_phy_dbg(hw, ALX_MIIDBG_AZ_ANADECT,
818 ALX_AZ_ANADECT_LONG);
819 alx_read_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_AFE,
820 &phy_val);
821 alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_AFE,
822 phy_val | ALX_AFE_10BT_100M_TH);
823 } else {
824 alx_write_phy_dbg(hw, ALX_MIIDBG_AZ_ANADECT,
825 ALX_AZ_ANADECT_DEF);
826 alx_read_phy_ext(hw, ALX_MIIEXT_ANEG,
827 ALX_MIIEXT_AFE, &phy_val);
828 alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_AFE,
829 phy_val & ~ALX_AFE_10BT_100M_TH);
830 }
831
832 /* threshold adjust */
833 if (adj_th && hw->lnk_patch) {
834 if (speed == SPEED_100) {
835 alx_write_phy_dbg(hw, ALX_MIIDBG_MSE16DB,
836 ALX_MSE16DB_UP);
837 } else if (speed == SPEED_1000) {
838 /*
839 * Giga link threshold, raise the tolerance of
840 * noise 50%
841 */
842 alx_read_phy_dbg(hw, ALX_MIIDBG_MSE20DB,
843 &phy_val);
844 ALX_SET_FIELD(phy_val, ALX_MSE20DB_TH,
845 ALX_MSE20DB_TH_HI);
846 alx_write_phy_dbg(hw, ALX_MIIDBG_MSE20DB,
847 phy_val);
848 }
849 }
850 } else {
851 alx_read_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_AFE,
852 &phy_val);
853 alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_AFE,
854 phy_val & ~ALX_AFE_10BT_100M_TH);
855
856 if (adj_th && hw->lnk_patch) {
857 alx_write_phy_dbg(hw, ALX_MIIDBG_MSE16DB,
858 ALX_MSE16DB_DOWN);
859 alx_read_phy_dbg(hw, ALX_MIIDBG_MSE20DB, &phy_val);
860 ALX_SET_FIELD(phy_val, ALX_MSE20DB_TH,
861 ALX_MSE20DB_TH_DEF);
862 alx_write_phy_dbg(hw, ALX_MIIDBG_MSE20DB, phy_val);
863 }
864 }
865}
866
867
868/* NOTE:
869 * 1. phy link must be established before calling this function
870 * 2. wol option (pattern,magic,link,etc.) is configed before call it.
871 */
872int alx_pre_suspend(struct alx_hw *hw, int speed)
873{
874 u32 master, mac, phy, val;
875 int err = 0;
876
877 master = alx_read_mem32(hw, ALX_MASTER);
878 master &= ~ALX_MASTER_PCLKSEL_SRDS;
879 mac = hw->rx_ctrl;
880 /* 10/100 half */
881 ALX_SET_FIELD(mac, ALX_MAC_CTRL_SPEED, ALX_MAC_CTRL_SPEED_10_100);
882 mac &= ~(ALX_MAC_CTRL_FULLD | ALX_MAC_CTRL_RX_EN | ALX_MAC_CTRL_TX_EN);
883
884 phy = alx_read_mem32(hw, ALX_PHY_CTRL);
885 phy &= ~(ALX_PHY_CTRL_DSPRST_OUT | ALX_PHY_CTRL_CLS);
886 phy |= ALX_PHY_CTRL_RST_ANALOG | ALX_PHY_CTRL_HIB_PULSE |
887 ALX_PHY_CTRL_HIB_EN;
888
889 /* without any activity */
890 if (!(hw->sleep_ctrl & ALX_SLEEP_ACTIVE)) {
891 err = alx_write_phy_reg(hw, ALX_MII_IER, 0);
892 if (err)
893 return err;
894 phy |= ALX_PHY_CTRL_IDDQ | ALX_PHY_CTRL_POWER_DOWN;
895 } else {
896 if (hw->sleep_ctrl & (ALX_SLEEP_WOL_MAGIC | ALX_SLEEP_CIFS))
897 mac |= ALX_MAC_CTRL_RX_EN | ALX_MAC_CTRL_BRD_EN;
898 if (hw->sleep_ctrl & ALX_SLEEP_CIFS)
899 mac |= ALX_MAC_CTRL_TX_EN;
900 if (speed % 10 == DUPLEX_FULL)
901 mac |= ALX_MAC_CTRL_FULLD;
902 if (speed >= SPEED_1000)
903 ALX_SET_FIELD(mac, ALX_MAC_CTRL_SPEED,
904 ALX_MAC_CTRL_SPEED_1000);
905 phy |= ALX_PHY_CTRL_DSPRST_OUT;
906 err = alx_write_phy_ext(hw, ALX_MIIEXT_ANEG,
907 ALX_MIIEXT_S3DIG10,
908 ALX_MIIEXT_S3DIG10_SL);
909 if (err)
910 return err;
911 }
912
913 alx_enable_osc(hw);
914 hw->rx_ctrl = mac;
915 alx_write_mem32(hw, ALX_MASTER, master);
916 alx_write_mem32(hw, ALX_MAC_CTRL, mac);
917 alx_write_mem32(hw, ALX_PHY_CTRL, phy);
918
919 /* set val of PDLL D3PLLOFF */
920 val = alx_read_mem32(hw, ALX_PDLL_TRNS1);
921 val |= ALX_PDLL_TRNS1_D3PLLOFF_EN;
922 alx_write_mem32(hw, ALX_PDLL_TRNS1, val);
923
924 return 0;
925}
926
927bool alx_phy_configured(struct alx_hw *hw)
928{
929 u32 cfg, hw_cfg;
930
931 cfg = ethadv_to_hw_cfg(hw, hw->adv_cfg);
932 cfg = ALX_GET_FIELD(cfg, ALX_DRV_PHY);
933 hw_cfg = alx_get_phy_config(hw);
934
935 if (hw_cfg == ALX_DRV_PHY_UNKNOWN)
936 return false;
937
938 return cfg == hw_cfg;
939}
940
941int alx_get_phy_link(struct alx_hw *hw, int *speed)
942{
943 struct pci_dev *pdev = hw->pdev;
944 u16 bmsr, giga;
945 int err;
946
947 err = alx_read_phy_reg(hw, MII_BMSR, &bmsr);
948 if (err)
949 return err;
950
951 err = alx_read_phy_reg(hw, MII_BMSR, &bmsr);
952 if (err)
953 return err;
954
955 if (!(bmsr & BMSR_LSTATUS)) {
956 *speed = SPEED_UNKNOWN;
957 return 0;
958 }
959
960 /* speed/duplex result is saved in PHY Specific Status Register */
961 err = alx_read_phy_reg(hw, ALX_MII_GIGA_PSSR, &giga);
962 if (err)
963 return err;
964
965 if (!(giga & ALX_GIGA_PSSR_SPD_DPLX_RESOLVED))
966 goto wrong_speed;
967
968 switch (giga & ALX_GIGA_PSSR_SPEED) {
969 case ALX_GIGA_PSSR_1000MBS:
970 *speed = SPEED_1000;
971 break;
972 case ALX_GIGA_PSSR_100MBS:
973 *speed = SPEED_100;
974 break;
975 case ALX_GIGA_PSSR_10MBS:
976 *speed = SPEED_10;
977 break;
978 default:
979 goto wrong_speed;
980 }
981
982 *speed += (giga & ALX_GIGA_PSSR_DPLX) ? DUPLEX_FULL : DUPLEX_HALF;
983 return 1;
984
985wrong_speed:
986 dev_err(&pdev->dev, "invalid PHY speed/duplex: 0x%x\n", giga);
987 return -EINVAL;
988}
989
990int alx_clear_phy_intr(struct alx_hw *hw)
991{
992 u16 isr;
993
994 /* clear interrupt status by reading it */
995 return alx_read_phy_reg(hw, ALX_MII_ISR, &isr);
996}
997
998int alx_config_wol(struct alx_hw *hw)
999{
1000 u32 wol = 0;
1001 int err = 0;
1002
1003 /* turn on magic packet event */
1004 if (hw->sleep_ctrl & ALX_SLEEP_WOL_MAGIC)
1005 wol |= ALX_WOL0_MAGIC_EN | ALX_WOL0_PME_MAGIC_EN;
1006
1007 /* turn on link up event */
1008 if (hw->sleep_ctrl & ALX_SLEEP_WOL_PHY) {
1009 wol |= ALX_WOL0_LINK_EN | ALX_WOL0_PME_LINK;
1010 /* only link up can wake up */
1011 err = alx_write_phy_reg(hw, ALX_MII_IER, ALX_IER_LINK_UP);
1012 }
1013 alx_write_mem32(hw, ALX_WOL0, wol);
1014
1015 return err;
1016}
1017
1018void alx_disable_rss(struct alx_hw *hw)
1019{
1020 u32 ctrl = alx_read_mem32(hw, ALX_RXQ0);
1021
1022 ctrl &= ~ALX_RXQ0_RSS_HASH_EN;
1023 alx_write_mem32(hw, ALX_RXQ0, ctrl);
1024}
1025
1026void alx_configure_basic(struct alx_hw *hw)
1027{
1028 u32 val, raw_mtu, max_payload;
1029 u16 val16;
1030 u8 chip_rev = alx_hw_revision(hw);
1031
1032 alx_set_macaddr(hw, hw->mac_addr);
1033
1034 alx_write_mem32(hw, ALX_CLK_GATE, ALX_CLK_GATE_ALL);
1035
1036 /* idle timeout to switch clk_125M */
1037 if (chip_rev >= ALX_REV_B0)
1038 alx_write_mem32(hw, ALX_IDLE_DECISN_TIMER,
1039 ALX_IDLE_DECISN_TIMER_DEF);
1040
1041 alx_write_mem32(hw, ALX_SMB_TIMER, hw->smb_timer * 500UL);
1042
1043 val = alx_read_mem32(hw, ALX_MASTER);
1044 val |= ALX_MASTER_IRQMOD2_EN |
1045 ALX_MASTER_IRQMOD1_EN |
1046 ALX_MASTER_SYSALVTIMER_EN;
1047 alx_write_mem32(hw, ALX_MASTER, val);
1048 alx_write_mem32(hw, ALX_IRQ_MODU_TIMER,
1049 (hw->imt >> 1) << ALX_IRQ_MODU_TIMER1_SHIFT);
1050 /* intr re-trig timeout */
1051 alx_write_mem32(hw, ALX_INT_RETRIG, ALX_INT_RETRIG_TO);
1052 /* tpd threshold to trig int */
1053 alx_write_mem32(hw, ALX_TINT_TPD_THRSHLD, hw->ith_tpd);
1054 alx_write_mem32(hw, ALX_TINT_TIMER, hw->imt);
1055
1056 raw_mtu = hw->mtu + ETH_HLEN;
1057 alx_write_mem32(hw, ALX_MTU, raw_mtu + 8);
1058 if (raw_mtu > ALX_MTU_JUMBO_TH)
1059 hw->rx_ctrl &= ~ALX_MAC_CTRL_FAST_PAUSE;
1060
1061 if ((raw_mtu + 8) < ALX_TXQ1_JUMBO_TSO_TH)
1062 val = (raw_mtu + 8 + 7) >> 3;
1063 else
1064 val = ALX_TXQ1_JUMBO_TSO_TH >> 3;
1065 alx_write_mem32(hw, ALX_TXQ1, val | ALX_TXQ1_ERRLGPKT_DROP_EN);
1066
1067 max_payload = pcie_get_readrq(hw->pdev) >> 8;
1068 /*
1069 * if BIOS had changed the default dma read max length,
1070 * restore it to default value
1071 */
1072 if (max_payload < ALX_DEV_CTRL_MAXRRS_MIN)
1073 pcie_set_readrq(hw->pdev, 128 << ALX_DEV_CTRL_MAXRRS_MIN);
1074
1075 val = ALX_TXQ_TPD_BURSTPREF_DEF << ALX_TXQ0_TPD_BURSTPREF_SHIFT |
1076 ALX_TXQ0_MODE_ENHANCE | ALX_TXQ0_LSO_8023_EN |
1077 ALX_TXQ0_SUPT_IPOPT |
1078 ALX_TXQ_TXF_BURST_PREF_DEF << ALX_TXQ0_TXF_BURST_PREF_SHIFT;
1079 alx_write_mem32(hw, ALX_TXQ0, val);
1080 val = ALX_TXQ_TPD_BURSTPREF_DEF << ALX_HQTPD_Q1_NUMPREF_SHIFT |
1081 ALX_TXQ_TPD_BURSTPREF_DEF << ALX_HQTPD_Q2_NUMPREF_SHIFT |
1082 ALX_TXQ_TPD_BURSTPREF_DEF << ALX_HQTPD_Q3_NUMPREF_SHIFT |
1083 ALX_HQTPD_BURST_EN;
1084 alx_write_mem32(hw, ALX_HQTPD, val);
1085
1086 /* rxq, flow control */
1087 val = alx_read_mem32(hw, ALX_SRAM5);
1088 val = ALX_GET_FIELD(val, ALX_SRAM_RXF_LEN) << 3;
1089 if (val > ALX_SRAM_RXF_LEN_8K) {
1090 val16 = ALX_MTU_STD_ALGN >> 3;
1091 val = (val - ALX_RXQ2_RXF_FLOW_CTRL_RSVD) >> 3;
1092 } else {
1093 val16 = ALX_MTU_STD_ALGN >> 3;
1094 val = (val - ALX_MTU_STD_ALGN) >> 3;
1095 }
1096 alx_write_mem32(hw, ALX_RXQ2,
1097 val16 << ALX_RXQ2_RXF_XOFF_THRESH_SHIFT |
1098 val << ALX_RXQ2_RXF_XON_THRESH_SHIFT);
1099 val = ALX_RXQ0_NUM_RFD_PREF_DEF << ALX_RXQ0_NUM_RFD_PREF_SHIFT |
1100 ALX_RXQ0_RSS_MODE_DIS << ALX_RXQ0_RSS_MODE_SHIFT |
1101 ALX_RXQ0_IDT_TBL_SIZE_DEF << ALX_RXQ0_IDT_TBL_SIZE_SHIFT |
1102 ALX_RXQ0_RSS_HSTYP_ALL | ALX_RXQ0_RSS_HASH_EN |
1103 ALX_RXQ0_IPV6_PARSE_EN;
1104
1105 if (alx_hw_giga(hw))
1106 ALX_SET_FIELD(val, ALX_RXQ0_ASPM_THRESH,
1107 ALX_RXQ0_ASPM_THRESH_100M);
1108
1109 alx_write_mem32(hw, ALX_RXQ0, val);
1110
1111 val = alx_read_mem32(hw, ALX_DMA);
1112 val = ALX_DMA_RORDER_MODE_OUT << ALX_DMA_RORDER_MODE_SHIFT |
1113 ALX_DMA_RREQ_PRI_DATA |
1114 max_payload << ALX_DMA_RREQ_BLEN_SHIFT |
1115 ALX_DMA_WDLY_CNT_DEF << ALX_DMA_WDLY_CNT_SHIFT |
1116 ALX_DMA_RDLY_CNT_DEF << ALX_DMA_RDLY_CNT_SHIFT |
1117 (hw->dma_chnl - 1) << ALX_DMA_RCHNL_SEL_SHIFT;
1118 alx_write_mem32(hw, ALX_DMA, val);
1119
1120 /* default multi-tx-q weights */
1121 val = ALX_WRR_PRI_RESTRICT_NONE << ALX_WRR_PRI_SHIFT |
1122 4 << ALX_WRR_PRI0_SHIFT |
1123 4 << ALX_WRR_PRI1_SHIFT |
1124 4 << ALX_WRR_PRI2_SHIFT |
1125 4 << ALX_WRR_PRI3_SHIFT;
1126 alx_write_mem32(hw, ALX_WRR, val);
1127}
1128
1129static inline u32 alx_speed_to_ethadv(int speed)
1130{
1131 switch (speed) {
1132 case SPEED_1000 + DUPLEX_FULL:
1133 return ADVERTISED_1000baseT_Full;
1134 case SPEED_100 + DUPLEX_FULL:
1135 return ADVERTISED_100baseT_Full;
1136 case SPEED_100 + DUPLEX_HALF:
1137 return ADVERTISED_10baseT_Half;
1138 case SPEED_10 + DUPLEX_FULL:
1139 return ADVERTISED_10baseT_Full;
1140 case SPEED_10 + DUPLEX_HALF:
1141 return ADVERTISED_10baseT_Half;
1142 default:
1143 return 0;
1144 }
1145}
1146
1147int alx_select_powersaving_speed(struct alx_hw *hw, int *speed)
1148{
1149 int i, err, spd;
1150 u16 lpa;
1151
1152 err = alx_get_phy_link(hw, &spd);
1153 if (err < 0)
1154 return err;
1155
1156 if (spd == SPEED_UNKNOWN)
1157 return 0;
1158
1159 err = alx_read_phy_reg(hw, MII_LPA, &lpa);
1160 if (err)
1161 return err;
1162
1163 if (!(lpa & LPA_LPACK)) {
1164 *speed = spd;
1165 return 0;
1166 }
1167
1168 if (lpa & LPA_10FULL)
1169 *speed = SPEED_10 + DUPLEX_FULL;
1170 else if (lpa & LPA_10HALF)
1171 *speed = SPEED_10 + DUPLEX_HALF;
1172 else if (lpa & LPA_100FULL)
1173 *speed = SPEED_100 + DUPLEX_FULL;
1174 else
1175 *speed = SPEED_100 + DUPLEX_HALF;
1176
1177 if (*speed != spd) {
1178 err = alx_write_phy_reg(hw, ALX_MII_IER, 0);
1179 if (err)
1180 return err;
1181 err = alx_setup_speed_duplex(hw,
1182 alx_speed_to_ethadv(*speed) |
1183 ADVERTISED_Autoneg,
1184 ALX_FC_ANEG | ALX_FC_RX |
1185 ALX_FC_TX);
1186 if (err)
1187 return err;
1188
1189 /* wait for linkup */
1190 for (i = 0; i < ALX_MAX_SETUP_LNK_CYCLE; i++) {
1191 int speed2;
1192
1193 msleep(100);
1194
1195 err = alx_get_phy_link(hw, &speed2);
1196 if (err < 0)
1197 return err;
1198 if (speed2 != SPEED_UNKNOWN)
1199 break;
1200 }
1201 if (i == ALX_MAX_SETUP_LNK_CYCLE)
1202 return -ETIMEDOUT;
1203 }
1204
1205 return 0;
1206}
1207
1208bool alx_get_phy_info(struct alx_hw *hw)
1209{
1210 u16 devs1, devs2;
1211
1212 if (alx_read_phy_reg(hw, MII_PHYSID1, &hw->phy_id[0]) ||
1213 alx_read_phy_reg(hw, MII_PHYSID2, &hw->phy_id[1]))
1214 return false;
1215
1216 /* since we haven't PMA/PMD status2 register, we can't
1217 * use mdio45_probe function for prtad and mmds.
1218 * use fixed MMD3 to get mmds.
1219 */
1220 if (alx_read_phy_ext(hw, 3, MDIO_DEVS1, &devs1) ||
1221 alx_read_phy_ext(hw, 3, MDIO_DEVS2, &devs2))
1222 return false;
1223 hw->mdio.mmds = devs1 | devs2 << 16;
1224
1225 return true;
1226}
diff --git a/drivers/net/ethernet/atheros/alx/hw.h b/drivers/net/ethernet/atheros/alx/hw.h
new file mode 100644
index 000000000000..65e723d2172a
--- /dev/null
+++ b/drivers/net/ethernet/atheros/alx/hw.h
@@ -0,0 +1,499 @@
1/*
2 * Copyright (c) 2013 Johannes Berg <johannes@sipsolutions.net>
3 *
4 * This file is free software: you may copy, redistribute and/or modify it
5 * under the terms of the GNU General Public License as published by the
6 * Free Software Foundation, either version 2 of the License, or (at your
7 * option) any later version.
8 *
9 * This file is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 *
17 * This file incorporates work covered by the following copyright and
18 * permission notice:
19 *
20 * Copyright (c) 2012 Qualcomm Atheros, Inc.
21 *
22 * Permission to use, copy, modify, and/or distribute this software for any
23 * purpose with or without fee is hereby granted, provided that the above
24 * copyright notice and this permission notice appear in all copies.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
27 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
28 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
29 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
30 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
31 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
32 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
33 */
34
35#ifndef ALX_HW_H_
36#define ALX_HW_H_
37#include <linux/types.h>
38#include <linux/mdio.h>
39#include <linux/pci.h>
40#include "reg.h"
41
42/* Transmit Packet Descriptor, contains 4 32-bit words.
43 *
44 * 31 16 0
45 * +----------------+----------------+
46 * | vlan-tag | buf length |
47 * +----------------+----------------+
48 * | Word 1 |
49 * +----------------+----------------+
50 * | Word 2: buf addr lo |
51 * +----------------+----------------+
52 * | Word 3: buf addr hi |
53 * +----------------+----------------+
54 *
55 * Word 2 and 3 combine to form a 64-bit buffer address
56 *
57 * Word 1 has three forms, depending on the state of bit 8/12/13:
58 * if bit8 =='1', the definition is just for custom checksum offload.
59 * if bit8 == '0' && bit12 == '1' && bit13 == '1', the *FIRST* descriptor
60 * for the skb is special for LSO V2, Word 2 become total skb length ,
61 * Word 3 is meaningless.
62 * other condition, the definition is for general skb or ip/tcp/udp
63 * checksum or LSO(TSO) offload.
64 *
65 * Here is the depiction:
66 *
67 * 0-+ 0-+
68 * 1 | 1 |
69 * 2 | 2 |
70 * 3 | Payload offset 3 | L4 header offset
71 * 4 | (7:0) 4 | (7:0)
72 * 5 | 5 |
73 * 6 | 6 |
74 * 7-+ 7-+
75 * 8 Custom csum enable = 1 8 Custom csum enable = 0
76 * 9 General IPv4 checksum 9 General IPv4 checksum
77 * 10 General TCP checksum 10 General TCP checksum
78 * 11 General UDP checksum 11 General UDP checksum
79 * 12 Large Send Segment enable 12 Large Send Segment enable
80 * 13 Large Send Segment type 13 Large Send Segment type
81 * 14 VLAN tagged 14 VLAN tagged
82 * 15 Insert VLAN tag 15 Insert VLAN tag
83 * 16 IPv4 packet 16 IPv4 packet
84 * 17 Ethernet frame type 17 Ethernet frame type
85 * 18-+ 18-+
86 * 19 | 19 |
87 * 20 | 20 |
88 * 21 | Custom csum offset 21 |
89 * 22 | (25:18) 22 |
90 * 23 | 23 | MSS (30:18)
91 * 24 | 24 |
92 * 25-+ 25 |
93 * 26-+ 26 |
94 * 27 | 27 |
95 * 28 | Reserved 28 |
96 * 29 | 29 |
97 * 30-+ 30-+
98 * 31 End of packet 31 End of packet
99 */
100struct alx_txd {
101 __le16 len;
102 __le16 vlan_tag;
103 __le32 word1;
104 union {
105 __le64 addr;
106 struct {
107 __le32 pkt_len;
108 __le32 resvd;
109 } l;
110 } adrl;
111} __packed;
112
113/* tpd word 1 */
114#define TPD_CXSUMSTART_MASK 0x00FF
115#define TPD_CXSUMSTART_SHIFT 0
116#define TPD_L4HDROFFSET_MASK 0x00FF
117#define TPD_L4HDROFFSET_SHIFT 0
118#define TPD_CXSUM_EN_MASK 0x0001
119#define TPD_CXSUM_EN_SHIFT 8
120#define TPD_IP_XSUM_MASK 0x0001
121#define TPD_IP_XSUM_SHIFT 9
122#define TPD_TCP_XSUM_MASK 0x0001
123#define TPD_TCP_XSUM_SHIFT 10
124#define TPD_UDP_XSUM_MASK 0x0001
125#define TPD_UDP_XSUM_SHIFT 11
126#define TPD_LSO_EN_MASK 0x0001
127#define TPD_LSO_EN_SHIFT 12
128#define TPD_LSO_V2_MASK 0x0001
129#define TPD_LSO_V2_SHIFT 13
130#define TPD_VLTAGGED_MASK 0x0001
131#define TPD_VLTAGGED_SHIFT 14
132#define TPD_INS_VLTAG_MASK 0x0001
133#define TPD_INS_VLTAG_SHIFT 15
134#define TPD_IPV4_MASK 0x0001
135#define TPD_IPV4_SHIFT 16
136#define TPD_ETHTYPE_MASK 0x0001
137#define TPD_ETHTYPE_SHIFT 17
138#define TPD_CXSUMOFFSET_MASK 0x00FF
139#define TPD_CXSUMOFFSET_SHIFT 18
140#define TPD_MSS_MASK 0x1FFF
141#define TPD_MSS_SHIFT 18
142#define TPD_EOP_MASK 0x0001
143#define TPD_EOP_SHIFT 31
144
145#define DESC_GET(_x, _name) ((_x) >> _name##SHIFT & _name##MASK)
146
147/* Receive Free Descriptor */
148struct alx_rfd {
149 __le64 addr; /* data buffer address, length is
150 * declared in register --- every
151 * buffer has the same size
152 */
153} __packed;
154
155/* Receive Return Descriptor, contains 4 32-bit words.
156 *
157 * 31 16 0
158 * +----------------+----------------+
159 * | Word 0 |
160 * +----------------+----------------+
161 * | Word 1: RSS Hash value |
162 * +----------------+----------------+
163 * | Word 2 |
164 * +----------------+----------------+
165 * | Word 3 |
166 * +----------------+----------------+
167 *
168 * Word 0 depiction & Word 2 depiction:
169 *
170 * 0--+ 0--+
171 * 1 | 1 |
172 * 2 | 2 |
173 * 3 | 3 |
174 * 4 | 4 |
175 * 5 | 5 |
176 * 6 | 6 |
177 * 7 | IP payload checksum 7 | VLAN tag
178 * 8 | (15:0) 8 | (15:0)
179 * 9 | 9 |
180 * 10 | 10 |
181 * 11 | 11 |
182 * 12 | 12 |
183 * 13 | 13 |
184 * 14 | 14 |
185 * 15-+ 15-+
186 * 16-+ 16-+
187 * 17 | Number of RFDs 17 |
188 * 18 | (19:16) 18 |
189 * 19-+ 19 | Protocol ID
190 * 20-+ 20 | (23:16)
191 * 21 | 21 |
192 * 22 | 22 |
193 * 23 | 23-+
194 * 24 | 24 | Reserved
195 * 25 | Start index of RFD-ring 25-+
196 * 26 | (31:20) 26 | RSS Q-num (27:25)
197 * 27 | 27-+
198 * 28 | 28-+
199 * 29 | 29 | RSS Hash algorithm
200 * 30 | 30 | (31:28)
201 * 31-+ 31-+
202 *
203 * Word 3 depiction:
204 *
205 * 0--+
206 * 1 |
207 * 2 |
208 * 3 |
209 * 4 |
210 * 5 |
211 * 6 |
212 * 7 | Packet length (include FCS)
213 * 8 | (13:0)
214 * 9 |
215 * 10 |
216 * 11 |
217 * 12 |
218 * 13-+
219 * 14 L4 Header checksum error
220 * 15 IPv4 checksum error
221 * 16 VLAN tagged
222 * 17-+
223 * 18 | Protocol ID (19:17)
224 * 19-+
225 * 20 Receive error summary
226 * 21 FCS(CRC) error
227 * 22 Frame alignment error
228 * 23 Truncated packet
229 * 24 Runt packet
230 * 25 Incomplete packet due to insufficient rx-desc
231 * 26 Broadcast packet
232 * 27 Multicast packet
233 * 28 Ethernet type (EII or 802.3)
234 * 29 FIFO overflow
235 * 30 Length error (for 802.3, length field mismatch with actual len)
236 * 31 Updated, indicate to driver that this RRD is refreshed.
237 */
238struct alx_rrd {
239 __le32 word0;
240 __le32 rss_hash;
241 __le32 word2;
242 __le32 word3;
243} __packed;
244
245/* rrd word 0 */
246#define RRD_XSUM_MASK 0xFFFF
247#define RRD_XSUM_SHIFT 0
248#define RRD_NOR_MASK 0x000F
249#define RRD_NOR_SHIFT 16
250#define RRD_SI_MASK 0x0FFF
251#define RRD_SI_SHIFT 20
252
253/* rrd word 2 */
254#define RRD_VLTAG_MASK 0xFFFF
255#define RRD_VLTAG_SHIFT 0
256#define RRD_PID_MASK 0x00FF
257#define RRD_PID_SHIFT 16
258/* non-ip packet */
259#define RRD_PID_NONIP 0
260/* ipv4(only) */
261#define RRD_PID_IPV4 1
262/* tcp/ipv6 */
263#define RRD_PID_IPV6TCP 2
264/* tcp/ipv4 */
265#define RRD_PID_IPV4TCP 3
266/* udp/ipv6 */
267#define RRD_PID_IPV6UDP 4
268/* udp/ipv4 */
269#define RRD_PID_IPV4UDP 5
270/* ipv6(only) */
271#define RRD_PID_IPV6 6
272/* LLDP packet */
273#define RRD_PID_LLDP 7
274/* 1588 packet */
275#define RRD_PID_1588 8
276#define RRD_RSSQ_MASK 0x0007
277#define RRD_RSSQ_SHIFT 25
278#define RRD_RSSALG_MASK 0x000F
279#define RRD_RSSALG_SHIFT 28
280#define RRD_RSSALG_TCPV6 0x1
281#define RRD_RSSALG_IPV6 0x2
282#define RRD_RSSALG_TCPV4 0x4
283#define RRD_RSSALG_IPV4 0x8
284
285/* rrd word 3 */
286#define RRD_PKTLEN_MASK 0x3FFF
287#define RRD_PKTLEN_SHIFT 0
288#define RRD_ERR_L4_MASK 0x0001
289#define RRD_ERR_L4_SHIFT 14
290#define RRD_ERR_IPV4_MASK 0x0001
291#define RRD_ERR_IPV4_SHIFT 15
292#define RRD_VLTAGGED_MASK 0x0001
293#define RRD_VLTAGGED_SHIFT 16
294#define RRD_OLD_PID_MASK 0x0007
295#define RRD_OLD_PID_SHIFT 17
296#define RRD_ERR_RES_MASK 0x0001
297#define RRD_ERR_RES_SHIFT 20
298#define RRD_ERR_FCS_MASK 0x0001
299#define RRD_ERR_FCS_SHIFT 21
300#define RRD_ERR_FAE_MASK 0x0001
301#define RRD_ERR_FAE_SHIFT 22
302#define RRD_ERR_TRUNC_MASK 0x0001
303#define RRD_ERR_TRUNC_SHIFT 23
304#define RRD_ERR_RUNT_MASK 0x0001
305#define RRD_ERR_RUNT_SHIFT 24
306#define RRD_ERR_ICMP_MASK 0x0001
307#define RRD_ERR_ICMP_SHIFT 25
308#define RRD_BCAST_MASK 0x0001
309#define RRD_BCAST_SHIFT 26
310#define RRD_MCAST_MASK 0x0001
311#define RRD_MCAST_SHIFT 27
312#define RRD_ETHTYPE_MASK 0x0001
313#define RRD_ETHTYPE_SHIFT 28
314#define RRD_ERR_FIFOV_MASK 0x0001
315#define RRD_ERR_FIFOV_SHIFT 29
316#define RRD_ERR_LEN_MASK 0x0001
317#define RRD_ERR_LEN_SHIFT 30
318#define RRD_UPDATED_MASK 0x0001
319#define RRD_UPDATED_SHIFT 31
320
321
322#define ALX_MAX_SETUP_LNK_CYCLE 50
323
324/* for FlowControl */
325#define ALX_FC_RX 0x01
326#define ALX_FC_TX 0x02
327#define ALX_FC_ANEG 0x04
328
329/* for sleep control */
330#define ALX_SLEEP_WOL_PHY 0x00000001
331#define ALX_SLEEP_WOL_MAGIC 0x00000002
332#define ALX_SLEEP_CIFS 0x00000004
333#define ALX_SLEEP_ACTIVE (ALX_SLEEP_WOL_PHY | \
334 ALX_SLEEP_WOL_MAGIC | \
335 ALX_SLEEP_CIFS)
336
337/* for RSS hash type */
338#define ALX_RSS_HASH_TYPE_IPV4 0x1
339#define ALX_RSS_HASH_TYPE_IPV4_TCP 0x2
340#define ALX_RSS_HASH_TYPE_IPV6 0x4
341#define ALX_RSS_HASH_TYPE_IPV6_TCP 0x8
342#define ALX_RSS_HASH_TYPE_ALL (ALX_RSS_HASH_TYPE_IPV4 | \
343 ALX_RSS_HASH_TYPE_IPV4_TCP | \
344 ALX_RSS_HASH_TYPE_IPV6 | \
345 ALX_RSS_HASH_TYPE_IPV6_TCP)
346#define ALX_DEF_RXBUF_SIZE 1536
347#define ALX_MAX_JUMBO_PKT_SIZE (9*1024)
348#define ALX_MAX_TSO_PKT_SIZE (7*1024)
349#define ALX_MAX_FRAME_SIZE ALX_MAX_JUMBO_PKT_SIZE
350#define ALX_MIN_FRAME_SIZE 68
351#define ALX_RAW_MTU(_mtu) (_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN)
352
353#define ALX_MAX_RX_QUEUES 8
354#define ALX_MAX_TX_QUEUES 4
355#define ALX_MAX_HANDLED_INTRS 5
356
357#define ALX_ISR_MISC (ALX_ISR_PCIE_LNKDOWN | \
358 ALX_ISR_DMAW | \
359 ALX_ISR_DMAR | \
360 ALX_ISR_SMB | \
361 ALX_ISR_MANU | \
362 ALX_ISR_TIMER)
363
364#define ALX_ISR_FATAL (ALX_ISR_PCIE_LNKDOWN | \
365 ALX_ISR_DMAW | ALX_ISR_DMAR)
366
367#define ALX_ISR_ALERT (ALX_ISR_RXF_OV | \
368 ALX_ISR_TXF_UR | \
369 ALX_ISR_RFD_UR)
370
371#define ALX_ISR_ALL_QUEUES (ALX_ISR_TX_Q0 | \
372 ALX_ISR_TX_Q1 | \
373 ALX_ISR_TX_Q2 | \
374 ALX_ISR_TX_Q3 | \
375 ALX_ISR_RX_Q0 | \
376 ALX_ISR_RX_Q1 | \
377 ALX_ISR_RX_Q2 | \
378 ALX_ISR_RX_Q3 | \
379 ALX_ISR_RX_Q4 | \
380 ALX_ISR_RX_Q5 | \
381 ALX_ISR_RX_Q6 | \
382 ALX_ISR_RX_Q7)
383
384/* maximum interrupt vectors for msix */
385#define ALX_MAX_MSIX_INTRS 16
386
387#define ALX_GET_FIELD(_data, _field) \
388 (((_data) >> _field ## _SHIFT) & _field ## _MASK)
389
390#define ALX_SET_FIELD(_data, _field, _value) do { \
391 (_data) &= ~(_field ## _MASK << _field ## _SHIFT); \
392 (_data) |= ((_value) & _field ## _MASK) << _field ## _SHIFT;\
393 } while (0)
394
395struct alx_hw {
396 struct pci_dev *pdev;
397 u8 __iomem *hw_addr;
398
399 /* current & permanent mac addr */
400 u8 mac_addr[ETH_ALEN];
401 u8 perm_addr[ETH_ALEN];
402
403 u16 mtu;
404 u16 imt;
405 u8 dma_chnl;
406 u8 max_dma_chnl;
407 /* tpd threshold to trig INT */
408 u32 ith_tpd;
409 u32 rx_ctrl;
410 u32 mc_hash[2];
411
412 u32 smb_timer;
413 /* SPEED_* + DUPLEX_*, SPEED_UNKNOWN if link is down */
414 int link_speed;
415
416 /* auto-neg advertisement or force mode config */
417 u32 adv_cfg;
418 u8 flowctrl;
419
420 u32 sleep_ctrl;
421
422 spinlock_t mdio_lock;
423 struct mdio_if_info mdio;
424 u16 phy_id[2];
425
426 /* PHY link patch flag */
427 bool lnk_patch;
428};
429
430static inline int alx_hw_revision(struct alx_hw *hw)
431{
432 return hw->pdev->revision >> ALX_PCI_REVID_SHIFT;
433}
434
435static inline bool alx_hw_with_cr(struct alx_hw *hw)
436{
437 return hw->pdev->revision & 1;
438}
439
440static inline bool alx_hw_giga(struct alx_hw *hw)
441{
442 return hw->pdev->device & 1;
443}
444
445static inline void alx_write_mem8(struct alx_hw *hw, u32 reg, u8 val)
446{
447 writeb(val, hw->hw_addr + reg);
448}
449
450static inline void alx_write_mem16(struct alx_hw *hw, u32 reg, u16 val)
451{
452 writew(val, hw->hw_addr + reg);
453}
454
455static inline u16 alx_read_mem16(struct alx_hw *hw, u32 reg)
456{
457 return readw(hw->hw_addr + reg);
458}
459
460static inline void alx_write_mem32(struct alx_hw *hw, u32 reg, u32 val)
461{
462 writel(val, hw->hw_addr + reg);
463}
464
465static inline u32 alx_read_mem32(struct alx_hw *hw, u32 reg)
466{
467 return readl(hw->hw_addr + reg);
468}
469
470static inline void alx_post_write(struct alx_hw *hw)
471{
472 readl(hw->hw_addr);
473}
474
475int alx_get_perm_macaddr(struct alx_hw *hw, u8 *addr);
476void alx_reset_phy(struct alx_hw *hw);
477void alx_reset_pcie(struct alx_hw *hw);
478void alx_enable_aspm(struct alx_hw *hw, bool l0s_en, bool l1_en);
479int alx_setup_speed_duplex(struct alx_hw *hw, u32 ethadv, u8 flowctrl);
480void alx_post_phy_link(struct alx_hw *hw);
481int alx_pre_suspend(struct alx_hw *hw, int speed);
482int alx_read_phy_reg(struct alx_hw *hw, u16 reg, u16 *phy_data);
483int alx_write_phy_reg(struct alx_hw *hw, u16 reg, u16 phy_data);
484int alx_read_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 *pdata);
485int alx_write_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 data);
486int alx_get_phy_link(struct alx_hw *hw, int *speed);
487int alx_clear_phy_intr(struct alx_hw *hw);
488int alx_config_wol(struct alx_hw *hw);
489void alx_cfg_mac_flowcontrol(struct alx_hw *hw, u8 fc);
490void alx_start_mac(struct alx_hw *hw);
491int alx_reset_mac(struct alx_hw *hw);
492void alx_set_macaddr(struct alx_hw *hw, const u8 *addr);
493bool alx_phy_configured(struct alx_hw *hw);
494void alx_configure_basic(struct alx_hw *hw);
495void alx_disable_rss(struct alx_hw *hw);
496int alx_select_powersaving_speed(struct alx_hw *hw, int *speed);
497bool alx_get_phy_info(struct alx_hw *hw);
498
499#endif
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
new file mode 100644
index 000000000000..418de8b13165
--- /dev/null
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -0,0 +1,1625 @@
1/*
2 * Copyright (c) 2013 Johannes Berg <johannes@sipsolutions.net>
3 *
4 * This file is free software: you may copy, redistribute and/or modify it
5 * under the terms of the GNU General Public License as published by the
6 * Free Software Foundation, either version 2 of the License, or (at your
7 * option) any later version.
8 *
9 * This file is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 *
17 * This file incorporates work covered by the following copyright and
18 * permission notice:
19 *
20 * Copyright (c) 2012 Qualcomm Atheros, Inc.
21 *
22 * Permission to use, copy, modify, and/or distribute this software for any
23 * purpose with or without fee is hereby granted, provided that the above
24 * copyright notice and this permission notice appear in all copies.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
27 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
28 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
29 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
30 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
31 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
32 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
33 */
34
35#include <linux/module.h>
36#include <linux/pci.h>
37#include <linux/interrupt.h>
38#include <linux/ip.h>
39#include <linux/ipv6.h>
40#include <linux/if_vlan.h>
41#include <linux/mdio.h>
42#include <linux/aer.h>
43#include <linux/bitops.h>
44#include <linux/netdevice.h>
45#include <linux/etherdevice.h>
46#include <net/ip6_checksum.h>
47#include <linux/crc32.h>
48#include "alx.h"
49#include "hw.h"
50#include "reg.h"
51
52const char alx_drv_name[] = "alx";
53
54
55static void alx_free_txbuf(struct alx_priv *alx, int entry)
56{
57 struct alx_buffer *txb = &alx->txq.bufs[entry];
58
59 if (dma_unmap_len(txb, size)) {
60 dma_unmap_single(&alx->hw.pdev->dev,
61 dma_unmap_addr(txb, dma),
62 dma_unmap_len(txb, size),
63 DMA_TO_DEVICE);
64 dma_unmap_len_set(txb, size, 0);
65 }
66
67 if (txb->skb) {
68 dev_kfree_skb_any(txb->skb);
69 txb->skb = NULL;
70 }
71}
72
73static int alx_refill_rx_ring(struct alx_priv *alx, gfp_t gfp)
74{
75 struct alx_rx_queue *rxq = &alx->rxq;
76 struct sk_buff *skb;
77 struct alx_buffer *cur_buf;
78 dma_addr_t dma;
79 u16 cur, next, count = 0;
80
81 next = cur = rxq->write_idx;
82 if (++next == alx->rx_ringsz)
83 next = 0;
84 cur_buf = &rxq->bufs[cur];
85
86 while (!cur_buf->skb && next != rxq->read_idx) {
87 struct alx_rfd *rfd = &rxq->rfd[cur];
88
89 skb = __netdev_alloc_skb(alx->dev, alx->rxbuf_size, gfp);
90 if (!skb)
91 break;
92 dma = dma_map_single(&alx->hw.pdev->dev,
93 skb->data, alx->rxbuf_size,
94 DMA_FROM_DEVICE);
95 if (dma_mapping_error(&alx->hw.pdev->dev, dma)) {
96 dev_kfree_skb(skb);
97 break;
98 }
99
100 /* Unfortunately, RX descriptor buffers must be 4-byte
101 * aligned, so we can't use IP alignment.
102 */
103 if (WARN_ON(dma & 3)) {
104 dev_kfree_skb(skb);
105 break;
106 }
107
108 cur_buf->skb = skb;
109 dma_unmap_len_set(cur_buf, size, alx->rxbuf_size);
110 dma_unmap_addr_set(cur_buf, dma, dma);
111 rfd->addr = cpu_to_le64(dma);
112
113 cur = next;
114 if (++next == alx->rx_ringsz)
115 next = 0;
116 cur_buf = &rxq->bufs[cur];
117 count++;
118 }
119
120 if (count) {
121 /* flush all updates before updating hardware */
122 wmb();
123 rxq->write_idx = cur;
124 alx_write_mem16(&alx->hw, ALX_RFD_PIDX, cur);
125 }
126
127 return count;
128}
129
130static inline int alx_tpd_avail(struct alx_priv *alx)
131{
132 struct alx_tx_queue *txq = &alx->txq;
133
134 if (txq->write_idx >= txq->read_idx)
135 return alx->tx_ringsz + txq->read_idx - txq->write_idx - 1;
136 return txq->read_idx - txq->write_idx - 1;
137}
138
139static bool alx_clean_tx_irq(struct alx_priv *alx)
140{
141 struct alx_tx_queue *txq = &alx->txq;
142 u16 hw_read_idx, sw_read_idx;
143 unsigned int total_bytes = 0, total_packets = 0;
144 int budget = ALX_DEFAULT_TX_WORK;
145
146 sw_read_idx = txq->read_idx;
147 hw_read_idx = alx_read_mem16(&alx->hw, ALX_TPD_PRI0_CIDX);
148
149 if (sw_read_idx != hw_read_idx) {
150 while (sw_read_idx != hw_read_idx && budget > 0) {
151 struct sk_buff *skb;
152
153 skb = txq->bufs[sw_read_idx].skb;
154 if (skb) {
155 total_bytes += skb->len;
156 total_packets++;
157 budget--;
158 }
159
160 alx_free_txbuf(alx, sw_read_idx);
161
162 if (++sw_read_idx == alx->tx_ringsz)
163 sw_read_idx = 0;
164 }
165 txq->read_idx = sw_read_idx;
166
167 netdev_completed_queue(alx->dev, total_packets, total_bytes);
168 }
169
170 if (netif_queue_stopped(alx->dev) && netif_carrier_ok(alx->dev) &&
171 alx_tpd_avail(alx) > alx->tx_ringsz/4)
172 netif_wake_queue(alx->dev);
173
174 return sw_read_idx == hw_read_idx;
175}
176
177static void alx_schedule_link_check(struct alx_priv *alx)
178{
179 schedule_work(&alx->link_check_wk);
180}
181
182static void alx_schedule_reset(struct alx_priv *alx)
183{
184 schedule_work(&alx->reset_wk);
185}
186
187static bool alx_clean_rx_irq(struct alx_priv *alx, int budget)
188{
189 struct alx_rx_queue *rxq = &alx->rxq;
190 struct alx_rrd *rrd;
191 struct alx_buffer *rxb;
192 struct sk_buff *skb;
193 u16 length, rfd_cleaned = 0;
194
195 while (budget > 0) {
196 rrd = &rxq->rrd[rxq->rrd_read_idx];
197 if (!(rrd->word3 & cpu_to_le32(1 << RRD_UPDATED_SHIFT)))
198 break;
199 rrd->word3 &= ~cpu_to_le32(1 << RRD_UPDATED_SHIFT);
200
201 if (ALX_GET_FIELD(le32_to_cpu(rrd->word0),
202 RRD_SI) != rxq->read_idx ||
203 ALX_GET_FIELD(le32_to_cpu(rrd->word0),
204 RRD_NOR) != 1) {
205 alx_schedule_reset(alx);
206 return 0;
207 }
208
209 rxb = &rxq->bufs[rxq->read_idx];
210 dma_unmap_single(&alx->hw.pdev->dev,
211 dma_unmap_addr(rxb, dma),
212 dma_unmap_len(rxb, size),
213 DMA_FROM_DEVICE);
214 dma_unmap_len_set(rxb, size, 0);
215 skb = rxb->skb;
216 rxb->skb = NULL;
217
218 if (rrd->word3 & cpu_to_le32(1 << RRD_ERR_RES_SHIFT) ||
219 rrd->word3 & cpu_to_le32(1 << RRD_ERR_LEN_SHIFT)) {
220 rrd->word3 = 0;
221 dev_kfree_skb_any(skb);
222 goto next_pkt;
223 }
224
225 length = ALX_GET_FIELD(le32_to_cpu(rrd->word3),
226 RRD_PKTLEN) - ETH_FCS_LEN;
227 skb_put(skb, length);
228 skb->protocol = eth_type_trans(skb, alx->dev);
229
230 skb_checksum_none_assert(skb);
231 if (alx->dev->features & NETIF_F_RXCSUM &&
232 !(rrd->word3 & (cpu_to_le32(1 << RRD_ERR_L4_SHIFT) |
233 cpu_to_le32(1 << RRD_ERR_IPV4_SHIFT)))) {
234 switch (ALX_GET_FIELD(le32_to_cpu(rrd->word2),
235 RRD_PID)) {
236 case RRD_PID_IPV6UDP:
237 case RRD_PID_IPV4UDP:
238 case RRD_PID_IPV4TCP:
239 case RRD_PID_IPV6TCP:
240 skb->ip_summed = CHECKSUM_UNNECESSARY;
241 break;
242 }
243 }
244
245 napi_gro_receive(&alx->napi, skb);
246 budget--;
247
248next_pkt:
249 if (++rxq->read_idx == alx->rx_ringsz)
250 rxq->read_idx = 0;
251 if (++rxq->rrd_read_idx == alx->rx_ringsz)
252 rxq->rrd_read_idx = 0;
253
254 if (++rfd_cleaned > ALX_RX_ALLOC_THRESH)
255 rfd_cleaned -= alx_refill_rx_ring(alx, GFP_ATOMIC);
256 }
257
258 if (rfd_cleaned)
259 alx_refill_rx_ring(alx, GFP_ATOMIC);
260
261 return budget > 0;
262}
263
264static int alx_poll(struct napi_struct *napi, int budget)
265{
266 struct alx_priv *alx = container_of(napi, struct alx_priv, napi);
267 struct alx_hw *hw = &alx->hw;
268 bool complete = true;
269 unsigned long flags;
270
271 complete = alx_clean_tx_irq(alx) &&
272 alx_clean_rx_irq(alx, budget);
273
274 if (!complete)
275 return 1;
276
277 napi_complete(&alx->napi);
278
279 /* enable interrupt */
280 spin_lock_irqsave(&alx->irq_lock, flags);
281 alx->int_mask |= ALX_ISR_TX_Q0 | ALX_ISR_RX_Q0;
282 alx_write_mem32(hw, ALX_IMR, alx->int_mask);
283 spin_unlock_irqrestore(&alx->irq_lock, flags);
284
285 alx_post_write(hw);
286
287 return 0;
288}
289
290static irqreturn_t alx_intr_handle(struct alx_priv *alx, u32 intr)
291{
292 struct alx_hw *hw = &alx->hw;
293 bool write_int_mask = false;
294
295 spin_lock(&alx->irq_lock);
296
297 /* ACK interrupt */
298 alx_write_mem32(hw, ALX_ISR, intr | ALX_ISR_DIS);
299 intr &= alx->int_mask;
300
301 if (intr & ALX_ISR_FATAL) {
302 netif_warn(alx, hw, alx->dev,
303 "fatal interrupt 0x%x, resetting\n", intr);
304 alx_schedule_reset(alx);
305 goto out;
306 }
307
308 if (intr & ALX_ISR_ALERT)
309 netdev_warn(alx->dev, "alert interrupt: 0x%x\n", intr);
310
311 if (intr & ALX_ISR_PHY) {
312 /* suppress PHY interrupt, because the source
313 * is from PHY internal. only the internal status
314 * is cleared, the interrupt status could be cleared.
315 */
316 alx->int_mask &= ~ALX_ISR_PHY;
317 write_int_mask = true;
318 alx_schedule_link_check(alx);
319 }
320
321 if (intr & (ALX_ISR_TX_Q0 | ALX_ISR_RX_Q0)) {
322 napi_schedule(&alx->napi);
323 /* mask rx/tx interrupt, enable them when napi complete */
324 alx->int_mask &= ~ALX_ISR_ALL_QUEUES;
325 write_int_mask = true;
326 }
327
328 if (write_int_mask)
329 alx_write_mem32(hw, ALX_IMR, alx->int_mask);
330
331 alx_write_mem32(hw, ALX_ISR, 0);
332
333 out:
334 spin_unlock(&alx->irq_lock);
335 return IRQ_HANDLED;
336}
337
338static irqreturn_t alx_intr_msi(int irq, void *data)
339{
340 struct alx_priv *alx = data;
341
342 return alx_intr_handle(alx, alx_read_mem32(&alx->hw, ALX_ISR));
343}
344
345static irqreturn_t alx_intr_legacy(int irq, void *data)
346{
347 struct alx_priv *alx = data;
348 struct alx_hw *hw = &alx->hw;
349 u32 intr;
350
351 intr = alx_read_mem32(hw, ALX_ISR);
352
353 if (intr & ALX_ISR_DIS || !(intr & alx->int_mask))
354 return IRQ_NONE;
355
356 return alx_intr_handle(alx, intr);
357}
358
359static void alx_init_ring_ptrs(struct alx_priv *alx)
360{
361 struct alx_hw *hw = &alx->hw;
362 u32 addr_hi = ((u64)alx->descmem.dma) >> 32;
363
364 alx->rxq.read_idx = 0;
365 alx->rxq.write_idx = 0;
366 alx->rxq.rrd_read_idx = 0;
367 alx_write_mem32(hw, ALX_RX_BASE_ADDR_HI, addr_hi);
368 alx_write_mem32(hw, ALX_RRD_ADDR_LO, alx->rxq.rrd_dma);
369 alx_write_mem32(hw, ALX_RRD_RING_SZ, alx->rx_ringsz);
370 alx_write_mem32(hw, ALX_RFD_ADDR_LO, alx->rxq.rfd_dma);
371 alx_write_mem32(hw, ALX_RFD_RING_SZ, alx->rx_ringsz);
372 alx_write_mem32(hw, ALX_RFD_BUF_SZ, alx->rxbuf_size);
373
374 alx->txq.read_idx = 0;
375 alx->txq.write_idx = 0;
376 alx_write_mem32(hw, ALX_TX_BASE_ADDR_HI, addr_hi);
377 alx_write_mem32(hw, ALX_TPD_PRI0_ADDR_LO, alx->txq.tpd_dma);
378 alx_write_mem32(hw, ALX_TPD_RING_SZ, alx->tx_ringsz);
379
380 /* load these pointers into the chip */
381 alx_write_mem32(hw, ALX_SRAM9, ALX_SRAM_LOAD_PTR);
382}
383
384static void alx_free_txring_buf(struct alx_priv *alx)
385{
386 struct alx_tx_queue *txq = &alx->txq;
387 int i;
388
389 if (!txq->bufs)
390 return;
391
392 for (i = 0; i < alx->tx_ringsz; i++)
393 alx_free_txbuf(alx, i);
394
395 memset(txq->bufs, 0, alx->tx_ringsz * sizeof(struct alx_buffer));
396 memset(txq->tpd, 0, alx->tx_ringsz * sizeof(struct alx_txd));
397 txq->write_idx = 0;
398 txq->read_idx = 0;
399
400 netdev_reset_queue(alx->dev);
401}
402
403static void alx_free_rxring_buf(struct alx_priv *alx)
404{
405 struct alx_rx_queue *rxq = &alx->rxq;
406 struct alx_buffer *cur_buf;
407 u16 i;
408
409 if (rxq == NULL)
410 return;
411
412 for (i = 0; i < alx->rx_ringsz; i++) {
413 cur_buf = rxq->bufs + i;
414 if (cur_buf->skb) {
415 dma_unmap_single(&alx->hw.pdev->dev,
416 dma_unmap_addr(cur_buf, dma),
417 dma_unmap_len(cur_buf, size),
418 DMA_FROM_DEVICE);
419 dev_kfree_skb(cur_buf->skb);
420 cur_buf->skb = NULL;
421 dma_unmap_len_set(cur_buf, size, 0);
422 dma_unmap_addr_set(cur_buf, dma, 0);
423 }
424 }
425
426 rxq->write_idx = 0;
427 rxq->read_idx = 0;
428 rxq->rrd_read_idx = 0;
429}
430
431static void alx_free_buffers(struct alx_priv *alx)
432{
433 alx_free_txring_buf(alx);
434 alx_free_rxring_buf(alx);
435}
436
437static int alx_reinit_rings(struct alx_priv *alx)
438{
439 alx_free_buffers(alx);
440
441 alx_init_ring_ptrs(alx);
442
443 if (!alx_refill_rx_ring(alx, GFP_KERNEL))
444 return -ENOMEM;
445
446 return 0;
447}
448
449static void alx_add_mc_addr(struct alx_hw *hw, const u8 *addr, u32 *mc_hash)
450{
451 u32 crc32, bit, reg;
452
453 crc32 = ether_crc(ETH_ALEN, addr);
454 reg = (crc32 >> 31) & 0x1;
455 bit = (crc32 >> 26) & 0x1F;
456
457 mc_hash[reg] |= BIT(bit);
458}
459
460static void __alx_set_rx_mode(struct net_device *netdev)
461{
462 struct alx_priv *alx = netdev_priv(netdev);
463 struct alx_hw *hw = &alx->hw;
464 struct netdev_hw_addr *ha;
465 u32 mc_hash[2] = {};
466
467 if (!(netdev->flags & IFF_ALLMULTI)) {
468 netdev_for_each_mc_addr(ha, netdev)
469 alx_add_mc_addr(hw, ha->addr, mc_hash);
470
471 alx_write_mem32(hw, ALX_HASH_TBL0, mc_hash[0]);
472 alx_write_mem32(hw, ALX_HASH_TBL1, mc_hash[1]);
473 }
474
475 hw->rx_ctrl &= ~(ALX_MAC_CTRL_MULTIALL_EN | ALX_MAC_CTRL_PROMISC_EN);
476 if (netdev->flags & IFF_PROMISC)
477 hw->rx_ctrl |= ALX_MAC_CTRL_PROMISC_EN;
478 if (netdev->flags & IFF_ALLMULTI)
479 hw->rx_ctrl |= ALX_MAC_CTRL_MULTIALL_EN;
480
481 alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl);
482}
483
484static void alx_set_rx_mode(struct net_device *netdev)
485{
486 __alx_set_rx_mode(netdev);
487}
488
489static int alx_set_mac_address(struct net_device *netdev, void *data)
490{
491 struct alx_priv *alx = netdev_priv(netdev);
492 struct alx_hw *hw = &alx->hw;
493 struct sockaddr *addr = data;
494
495 if (!is_valid_ether_addr(addr->sa_data))
496 return -EADDRNOTAVAIL;
497
498 if (netdev->addr_assign_type & NET_ADDR_RANDOM)
499 netdev->addr_assign_type ^= NET_ADDR_RANDOM;
500
501 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
502 memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len);
503 alx_set_macaddr(hw, hw->mac_addr);
504
505 return 0;
506}
507
508static int alx_alloc_descriptors(struct alx_priv *alx)
509{
510 alx->txq.bufs = kcalloc(alx->tx_ringsz,
511 sizeof(struct alx_buffer),
512 GFP_KERNEL);
513 if (!alx->txq.bufs)
514 return -ENOMEM;
515
516 alx->rxq.bufs = kcalloc(alx->rx_ringsz,
517 sizeof(struct alx_buffer),
518 GFP_KERNEL);
519 if (!alx->rxq.bufs)
520 goto out_free;
521
522 /* physical tx/rx ring descriptors
523 *
524 * Allocate them as a single chunk because they must not cross a
525 * 4G boundary (hardware has a single register for high 32 bits
526 * of addresses only)
527 */
528 alx->descmem.size = sizeof(struct alx_txd) * alx->tx_ringsz +
529 sizeof(struct alx_rrd) * alx->rx_ringsz +
530 sizeof(struct alx_rfd) * alx->rx_ringsz;
531 alx->descmem.virt = dma_zalloc_coherent(&alx->hw.pdev->dev,
532 alx->descmem.size,
533 &alx->descmem.dma,
534 GFP_KERNEL);
535 if (!alx->descmem.virt)
536 goto out_free;
537
538 alx->txq.tpd = (void *)alx->descmem.virt;
539 alx->txq.tpd_dma = alx->descmem.dma;
540
541 /* alignment requirement for next block */
542 BUILD_BUG_ON(sizeof(struct alx_txd) % 8);
543
544 alx->rxq.rrd =
545 (void *)((u8 *)alx->descmem.virt +
546 sizeof(struct alx_txd) * alx->tx_ringsz);
547 alx->rxq.rrd_dma = alx->descmem.dma +
548 sizeof(struct alx_txd) * alx->tx_ringsz;
549
550 /* alignment requirement for next block */
551 BUILD_BUG_ON(sizeof(struct alx_rrd) % 8);
552
553 alx->rxq.rfd =
554 (void *)((u8 *)alx->descmem.virt +
555 sizeof(struct alx_txd) * alx->tx_ringsz +
556 sizeof(struct alx_rrd) * alx->rx_ringsz);
557 alx->rxq.rfd_dma = alx->descmem.dma +
558 sizeof(struct alx_txd) * alx->tx_ringsz +
559 sizeof(struct alx_rrd) * alx->rx_ringsz;
560
561 return 0;
562out_free:
563 kfree(alx->txq.bufs);
564 kfree(alx->rxq.bufs);
565 return -ENOMEM;
566}
567
568static int alx_alloc_rings(struct alx_priv *alx)
569{
570 int err;
571
572 err = alx_alloc_descriptors(alx);
573 if (err)
574 return err;
575
576 alx->int_mask &= ~ALX_ISR_ALL_QUEUES;
577 alx->int_mask |= ALX_ISR_TX_Q0 | ALX_ISR_RX_Q0;
578 alx->tx_ringsz = alx->tx_ringsz;
579
580 netif_napi_add(alx->dev, &alx->napi, alx_poll, 64);
581
582 alx_reinit_rings(alx);
583 return 0;
584}
585
586static void alx_free_rings(struct alx_priv *alx)
587{
588 netif_napi_del(&alx->napi);
589 alx_free_buffers(alx);
590
591 kfree(alx->txq.bufs);
592 kfree(alx->rxq.bufs);
593
594 dma_free_coherent(&alx->hw.pdev->dev,
595 alx->descmem.size,
596 alx->descmem.virt,
597 alx->descmem.dma);
598}
599
600static void alx_config_vector_mapping(struct alx_priv *alx)
601{
602 struct alx_hw *hw = &alx->hw;
603
604 alx_write_mem32(hw, ALX_MSI_MAP_TBL1, 0);
605 alx_write_mem32(hw, ALX_MSI_MAP_TBL2, 0);
606 alx_write_mem32(hw, ALX_MSI_ID_MAP, 0);
607}
608
609static void alx_irq_enable(struct alx_priv *alx)
610{
611 struct alx_hw *hw = &alx->hw;
612
613 /* level-1 interrupt switch */
614 alx_write_mem32(hw, ALX_ISR, 0);
615 alx_write_mem32(hw, ALX_IMR, alx->int_mask);
616 alx_post_write(hw);
617}
618
619static void alx_irq_disable(struct alx_priv *alx)
620{
621 struct alx_hw *hw = &alx->hw;
622
623 alx_write_mem32(hw, ALX_ISR, ALX_ISR_DIS);
624 alx_write_mem32(hw, ALX_IMR, 0);
625 alx_post_write(hw);
626
627 synchronize_irq(alx->hw.pdev->irq);
628}
629
630static int alx_request_irq(struct alx_priv *alx)
631{
632 struct pci_dev *pdev = alx->hw.pdev;
633 struct alx_hw *hw = &alx->hw;
634 int err;
635 u32 msi_ctrl;
636
637 msi_ctrl = (hw->imt >> 1) << ALX_MSI_RETRANS_TM_SHIFT;
638
639 if (!pci_enable_msi(alx->hw.pdev)) {
640 alx->msi = true;
641
642 alx_write_mem32(hw, ALX_MSI_RETRANS_TIMER,
643 msi_ctrl | ALX_MSI_MASK_SEL_LINE);
644 err = request_irq(pdev->irq, alx_intr_msi, 0,
645 alx->dev->name, alx);
646 if (!err)
647 goto out;
648 /* fall back to legacy interrupt */
649 pci_disable_msi(alx->hw.pdev);
650 }
651
652 alx_write_mem32(hw, ALX_MSI_RETRANS_TIMER, 0);
653 err = request_irq(pdev->irq, alx_intr_legacy, IRQF_SHARED,
654 alx->dev->name, alx);
655out:
656 if (!err)
657 alx_config_vector_mapping(alx);
658 return err;
659}
660
661static void alx_free_irq(struct alx_priv *alx)
662{
663 struct pci_dev *pdev = alx->hw.pdev;
664
665 free_irq(pdev->irq, alx);
666
667 if (alx->msi) {
668 pci_disable_msi(alx->hw.pdev);
669 alx->msi = false;
670 }
671}
672
673static int alx_identify_hw(struct alx_priv *alx)
674{
675 struct alx_hw *hw = &alx->hw;
676 int rev = alx_hw_revision(hw);
677
678 if (rev > ALX_REV_C0)
679 return -EINVAL;
680
681 hw->max_dma_chnl = rev >= ALX_REV_B0 ? 4 : 2;
682
683 return 0;
684}
685
686static int alx_init_sw(struct alx_priv *alx)
687{
688 struct pci_dev *pdev = alx->hw.pdev;
689 struct alx_hw *hw = &alx->hw;
690 int err;
691
692 err = alx_identify_hw(alx);
693 if (err) {
694 dev_err(&pdev->dev, "unrecognized chip, aborting\n");
695 return err;
696 }
697
698 alx->hw.lnk_patch =
699 pdev->device == ALX_DEV_ID_AR8161 &&
700 pdev->subsystem_vendor == PCI_VENDOR_ID_ATTANSIC &&
701 pdev->subsystem_device == 0x0091 &&
702 pdev->revision == 0;
703
704 hw->smb_timer = 400;
705 hw->mtu = alx->dev->mtu;
706 alx->rxbuf_size = ALIGN(ALX_RAW_MTU(hw->mtu), 8);
707 alx->tx_ringsz = 256;
708 alx->rx_ringsz = 512;
709 hw->sleep_ctrl = ALX_SLEEP_WOL_MAGIC | ALX_SLEEP_WOL_PHY;
710 hw->imt = 200;
711 alx->int_mask = ALX_ISR_MISC;
712 hw->dma_chnl = hw->max_dma_chnl;
713 hw->ith_tpd = alx->tx_ringsz / 3;
714 hw->link_speed = SPEED_UNKNOWN;
715 hw->adv_cfg = ADVERTISED_Autoneg |
716 ADVERTISED_10baseT_Half |
717 ADVERTISED_10baseT_Full |
718 ADVERTISED_100baseT_Full |
719 ADVERTISED_100baseT_Half |
720 ADVERTISED_1000baseT_Full;
721 hw->flowctrl = ALX_FC_ANEG | ALX_FC_RX | ALX_FC_TX;
722
723 hw->rx_ctrl = ALX_MAC_CTRL_WOLSPED_SWEN |
724 ALX_MAC_CTRL_MHASH_ALG_HI5B |
725 ALX_MAC_CTRL_BRD_EN |
726 ALX_MAC_CTRL_PCRCE |
727 ALX_MAC_CTRL_CRCE |
728 ALX_MAC_CTRL_RXFC_EN |
729 ALX_MAC_CTRL_TXFC_EN |
730 7 << ALX_MAC_CTRL_PRMBLEN_SHIFT;
731
732 return err;
733}
734
735
736static netdev_features_t alx_fix_features(struct net_device *netdev,
737 netdev_features_t features)
738{
739 if (netdev->mtu > ALX_MAX_TSO_PKT_SIZE)
740 features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
741
742 return features;
743}
744
745static void alx_netif_stop(struct alx_priv *alx)
746{
747 alx->dev->trans_start = jiffies;
748 if (netif_carrier_ok(alx->dev)) {
749 netif_carrier_off(alx->dev);
750 netif_tx_disable(alx->dev);
751 napi_disable(&alx->napi);
752 }
753}
754
755static void alx_halt(struct alx_priv *alx)
756{
757 struct alx_hw *hw = &alx->hw;
758
759 alx_netif_stop(alx);
760 hw->link_speed = SPEED_UNKNOWN;
761
762 alx_reset_mac(hw);
763
764 /* disable l0s/l1 */
765 alx_enable_aspm(hw, false, false);
766 alx_irq_disable(alx);
767 alx_free_buffers(alx);
768}
769
770static void alx_configure(struct alx_priv *alx)
771{
772 struct alx_hw *hw = &alx->hw;
773
774 alx_configure_basic(hw);
775 alx_disable_rss(hw);
776 __alx_set_rx_mode(alx->dev);
777
778 alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl);
779}
780
781static void alx_activate(struct alx_priv *alx)
782{
783 /* hardware setting lost, restore it */
784 alx_reinit_rings(alx);
785 alx_configure(alx);
786
787 /* clear old interrupts */
788 alx_write_mem32(&alx->hw, ALX_ISR, ~(u32)ALX_ISR_DIS);
789
790 alx_irq_enable(alx);
791
792 alx_schedule_link_check(alx);
793}
794
795static void alx_reinit(struct alx_priv *alx)
796{
797 ASSERT_RTNL();
798
799 alx_halt(alx);
800 alx_activate(alx);
801}
802
803static int alx_change_mtu(struct net_device *netdev, int mtu)
804{
805 struct alx_priv *alx = netdev_priv(netdev);
806 int max_frame = mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
807
808 if ((max_frame < ALX_MIN_FRAME_SIZE) ||
809 (max_frame > ALX_MAX_FRAME_SIZE))
810 return -EINVAL;
811
812 if (netdev->mtu == mtu)
813 return 0;
814
815 netdev->mtu = mtu;
816 alx->hw.mtu = mtu;
817 alx->rxbuf_size = mtu > ALX_DEF_RXBUF_SIZE ?
818 ALIGN(max_frame, 8) : ALX_DEF_RXBUF_SIZE;
819 netdev_update_features(netdev);
820 if (netif_running(netdev))
821 alx_reinit(alx);
822 return 0;
823}
824
825static void alx_netif_start(struct alx_priv *alx)
826{
827 netif_tx_wake_all_queues(alx->dev);
828 napi_enable(&alx->napi);
829 netif_carrier_on(alx->dev);
830}
831
832static int __alx_open(struct alx_priv *alx, bool resume)
833{
834 int err;
835
836 if (!resume)
837 netif_carrier_off(alx->dev);
838
839 err = alx_alloc_rings(alx);
840 if (err)
841 return err;
842
843 alx_configure(alx);
844
845 err = alx_request_irq(alx);
846 if (err)
847 goto out_free_rings;
848
849 /* clear old interrupts */
850 alx_write_mem32(&alx->hw, ALX_ISR, ~(u32)ALX_ISR_DIS);
851
852 alx_irq_enable(alx);
853
854 if (!resume)
855 netif_tx_start_all_queues(alx->dev);
856
857 alx_schedule_link_check(alx);
858 return 0;
859
860out_free_rings:
861 alx_free_rings(alx);
862 return err;
863}
864
865static void __alx_stop(struct alx_priv *alx)
866{
867 alx_halt(alx);
868 alx_free_irq(alx);
869 alx_free_rings(alx);
870}
871
872static const char *alx_speed_desc(u16 speed)
873{
874 switch (speed) {
875 case SPEED_1000 + DUPLEX_FULL:
876 return "1 Gbps Full";
877 case SPEED_100 + DUPLEX_FULL:
878 return "100 Mbps Full";
879 case SPEED_100 + DUPLEX_HALF:
880 return "100 Mbps Half";
881 case SPEED_10 + DUPLEX_FULL:
882 return "10 Mbps Full";
883 case SPEED_10 + DUPLEX_HALF:
884 return "10 Mbps Half";
885 default:
886 return "Unknown speed";
887 }
888}
889
890static void alx_check_link(struct alx_priv *alx)
891{
892 struct alx_hw *hw = &alx->hw;
893 unsigned long flags;
894 int speed, old_speed;
895 int err;
896
897 /* clear PHY internal interrupt status, otherwise the main
898 * interrupt status will be asserted forever
899 */
900 alx_clear_phy_intr(hw);
901
902 err = alx_get_phy_link(hw, &speed);
903 if (err < 0)
904 goto reset;
905
906 spin_lock_irqsave(&alx->irq_lock, flags);
907 alx->int_mask |= ALX_ISR_PHY;
908 alx_write_mem32(hw, ALX_IMR, alx->int_mask);
909 spin_unlock_irqrestore(&alx->irq_lock, flags);
910
911 old_speed = hw->link_speed;
912
913 if (old_speed == speed)
914 return;
915 hw->link_speed = speed;
916
917 if (speed != SPEED_UNKNOWN) {
918 netif_info(alx, link, alx->dev,
919 "NIC Up: %s\n", alx_speed_desc(speed));
920 alx_post_phy_link(hw);
921 alx_enable_aspm(hw, true, true);
922 alx_start_mac(hw);
923
924 if (old_speed == SPEED_UNKNOWN)
925 alx_netif_start(alx);
926 } else {
927 /* link is now down */
928 alx_netif_stop(alx);
929 netif_info(alx, link, alx->dev, "Link Down\n");
930 err = alx_reset_mac(hw);
931 if (err)
932 goto reset;
933 alx_irq_disable(alx);
934
935 /* MAC reset causes all HW settings to be lost, restore all */
936 err = alx_reinit_rings(alx);
937 if (err)
938 goto reset;
939 alx_configure(alx);
940 alx_enable_aspm(hw, false, true);
941 alx_post_phy_link(hw);
942 alx_irq_enable(alx);
943 }
944
945 return;
946
947reset:
948 alx_schedule_reset(alx);
949}
950
951static int alx_open(struct net_device *netdev)
952{
953 return __alx_open(netdev_priv(netdev), false);
954}
955
956static int alx_stop(struct net_device *netdev)
957{
958 __alx_stop(netdev_priv(netdev));
959 return 0;
960}
961
962static int __alx_shutdown(struct pci_dev *pdev, bool *wol_en)
963{
964 struct alx_priv *alx = pci_get_drvdata(pdev);
965 struct net_device *netdev = alx->dev;
966 struct alx_hw *hw = &alx->hw;
967 int err, speed;
968
969 netif_device_detach(netdev);
970
971 if (netif_running(netdev))
972 __alx_stop(alx);
973
974#ifdef CONFIG_PM_SLEEP
975 err = pci_save_state(pdev);
976 if (err)
977 return err;
978#endif
979
980 err = alx_select_powersaving_speed(hw, &speed);
981 if (err)
982 return err;
983 err = alx_clear_phy_intr(hw);
984 if (err)
985 return err;
986 err = alx_pre_suspend(hw, speed);
987 if (err)
988 return err;
989 err = alx_config_wol(hw);
990 if (err)
991 return err;
992
993 *wol_en = false;
994 if (hw->sleep_ctrl & ALX_SLEEP_ACTIVE) {
995 netif_info(alx, wol, netdev,
996 "wol: ctrl=%X, speed=%X\n",
997 hw->sleep_ctrl, speed);
998 device_set_wakeup_enable(&pdev->dev, true);
999 *wol_en = true;
1000 }
1001
1002 pci_disable_device(pdev);
1003
1004 return 0;
1005}
1006
1007static void alx_shutdown(struct pci_dev *pdev)
1008{
1009 int err;
1010 bool wol_en;
1011
1012 err = __alx_shutdown(pdev, &wol_en);
1013 if (!err) {
1014 pci_wake_from_d3(pdev, wol_en);
1015 pci_set_power_state(pdev, PCI_D3hot);
1016 } else {
1017 dev_err(&pdev->dev, "shutdown fail %d\n", err);
1018 }
1019}
1020
1021static void alx_link_check(struct work_struct *work)
1022{
1023 struct alx_priv *alx;
1024
1025 alx = container_of(work, struct alx_priv, link_check_wk);
1026
1027 rtnl_lock();
1028 alx_check_link(alx);
1029 rtnl_unlock();
1030}
1031
1032static void alx_reset(struct work_struct *work)
1033{
1034 struct alx_priv *alx = container_of(work, struct alx_priv, reset_wk);
1035
1036 rtnl_lock();
1037 alx_reinit(alx);
1038 rtnl_unlock();
1039}
1040
1041static int alx_tx_csum(struct sk_buff *skb, struct alx_txd *first)
1042{
1043 u8 cso, css;
1044
1045 if (skb->ip_summed != CHECKSUM_PARTIAL)
1046 return 0;
1047
1048 cso = skb_checksum_start_offset(skb);
1049 if (cso & 1)
1050 return -EINVAL;
1051
1052 css = cso + skb->csum_offset;
1053 first->word1 |= cpu_to_le32((cso >> 1) << TPD_CXSUMSTART_SHIFT);
1054 first->word1 |= cpu_to_le32((css >> 1) << TPD_CXSUMOFFSET_SHIFT);
1055 first->word1 |= cpu_to_le32(1 << TPD_CXSUM_EN_SHIFT);
1056
1057 return 0;
1058}
1059
1060static int alx_map_tx_skb(struct alx_priv *alx, struct sk_buff *skb)
1061{
1062 struct alx_tx_queue *txq = &alx->txq;
1063 struct alx_txd *tpd, *first_tpd;
1064 dma_addr_t dma;
1065 int maplen, f, first_idx = txq->write_idx;
1066
1067 first_tpd = &txq->tpd[txq->write_idx];
1068 tpd = first_tpd;
1069
1070 maplen = skb_headlen(skb);
1071 dma = dma_map_single(&alx->hw.pdev->dev, skb->data, maplen,
1072 DMA_TO_DEVICE);
1073 if (dma_mapping_error(&alx->hw.pdev->dev, dma))
1074 goto err_dma;
1075
1076 dma_unmap_len_set(&txq->bufs[txq->write_idx], size, maplen);
1077 dma_unmap_addr_set(&txq->bufs[txq->write_idx], dma, dma);
1078
1079 tpd->adrl.addr = cpu_to_le64(dma);
1080 tpd->len = cpu_to_le16(maplen);
1081
1082 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
1083 struct skb_frag_struct *frag;
1084
1085 frag = &skb_shinfo(skb)->frags[f];
1086
1087 if (++txq->write_idx == alx->tx_ringsz)
1088 txq->write_idx = 0;
1089 tpd = &txq->tpd[txq->write_idx];
1090
1091 tpd->word1 = first_tpd->word1;
1092
1093 maplen = skb_frag_size(frag);
1094 dma = skb_frag_dma_map(&alx->hw.pdev->dev, frag, 0,
1095 maplen, DMA_TO_DEVICE);
1096 if (dma_mapping_error(&alx->hw.pdev->dev, dma))
1097 goto err_dma;
1098 dma_unmap_len_set(&txq->bufs[txq->write_idx], size, maplen);
1099 dma_unmap_addr_set(&txq->bufs[txq->write_idx], dma, dma);
1100
1101 tpd->adrl.addr = cpu_to_le64(dma);
1102 tpd->len = cpu_to_le16(maplen);
1103 }
1104
1105 /* last TPD, set EOP flag and store skb */
1106 tpd->word1 |= cpu_to_le32(1 << TPD_EOP_SHIFT);
1107 txq->bufs[txq->write_idx].skb = skb;
1108
1109 if (++txq->write_idx == alx->tx_ringsz)
1110 txq->write_idx = 0;
1111
1112 return 0;
1113
1114err_dma:
1115 f = first_idx;
1116 while (f != txq->write_idx) {
1117 alx_free_txbuf(alx, f);
1118 if (++f == alx->tx_ringsz)
1119 f = 0;
1120 }
1121 return -ENOMEM;
1122}
1123
1124static netdev_tx_t alx_start_xmit(struct sk_buff *skb,
1125 struct net_device *netdev)
1126{
1127 struct alx_priv *alx = netdev_priv(netdev);
1128 struct alx_tx_queue *txq = &alx->txq;
1129 struct alx_txd *first;
1130 int tpdreq = skb_shinfo(skb)->nr_frags + 1;
1131
1132 if (alx_tpd_avail(alx) < tpdreq) {
1133 netif_stop_queue(alx->dev);
1134 goto drop;
1135 }
1136
1137 first = &txq->tpd[txq->write_idx];
1138 memset(first, 0, sizeof(*first));
1139
1140 if (alx_tx_csum(skb, first))
1141 goto drop;
1142
1143 if (alx_map_tx_skb(alx, skb) < 0)
1144 goto drop;
1145
1146 netdev_sent_queue(alx->dev, skb->len);
1147
1148 /* flush updates before updating hardware */
1149 wmb();
1150 alx_write_mem16(&alx->hw, ALX_TPD_PRI0_PIDX, txq->write_idx);
1151
1152 if (alx_tpd_avail(alx) < alx->tx_ringsz/8)
1153 netif_stop_queue(alx->dev);
1154
1155 return NETDEV_TX_OK;
1156
1157drop:
1158 dev_kfree_skb(skb);
1159 return NETDEV_TX_OK;
1160}
1161
1162static void alx_tx_timeout(struct net_device *dev)
1163{
1164 struct alx_priv *alx = netdev_priv(dev);
1165
1166 alx_schedule_reset(alx);
1167}
1168
1169static int alx_mdio_read(struct net_device *netdev,
1170 int prtad, int devad, u16 addr)
1171{
1172 struct alx_priv *alx = netdev_priv(netdev);
1173 struct alx_hw *hw = &alx->hw;
1174 u16 val;
1175 int err;
1176
1177 if (prtad != hw->mdio.prtad)
1178 return -EINVAL;
1179
1180 if (devad == MDIO_DEVAD_NONE)
1181 err = alx_read_phy_reg(hw, addr, &val);
1182 else
1183 err = alx_read_phy_ext(hw, devad, addr, &val);
1184
1185 if (err)
1186 return err;
1187 return val;
1188}
1189
1190static int alx_mdio_write(struct net_device *netdev,
1191 int prtad, int devad, u16 addr, u16 val)
1192{
1193 struct alx_priv *alx = netdev_priv(netdev);
1194 struct alx_hw *hw = &alx->hw;
1195
1196 if (prtad != hw->mdio.prtad)
1197 return -EINVAL;
1198
1199 if (devad == MDIO_DEVAD_NONE)
1200 return alx_write_phy_reg(hw, addr, val);
1201
1202 return alx_write_phy_ext(hw, devad, addr, val);
1203}
1204
1205static int alx_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
1206{
1207 struct alx_priv *alx = netdev_priv(netdev);
1208
1209 if (!netif_running(netdev))
1210 return -EAGAIN;
1211
1212 return mdio_mii_ioctl(&alx->hw.mdio, if_mii(ifr), cmd);
1213}
1214
1215#ifdef CONFIG_NET_POLL_CONTROLLER
1216static void alx_poll_controller(struct net_device *netdev)
1217{
1218 struct alx_priv *alx = netdev_priv(netdev);
1219
1220 if (alx->msi)
1221 alx_intr_msi(0, alx);
1222 else
1223 alx_intr_legacy(0, alx);
1224}
1225#endif
1226
1227static const struct net_device_ops alx_netdev_ops = {
1228 .ndo_open = alx_open,
1229 .ndo_stop = alx_stop,
1230 .ndo_start_xmit = alx_start_xmit,
1231 .ndo_set_rx_mode = alx_set_rx_mode,
1232 .ndo_validate_addr = eth_validate_addr,
1233 .ndo_set_mac_address = alx_set_mac_address,
1234 .ndo_change_mtu = alx_change_mtu,
1235 .ndo_do_ioctl = alx_ioctl,
1236 .ndo_tx_timeout = alx_tx_timeout,
1237 .ndo_fix_features = alx_fix_features,
1238#ifdef CONFIG_NET_POLL_CONTROLLER
1239 .ndo_poll_controller = alx_poll_controller,
1240#endif
1241};
1242
1243static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1244{
1245 struct net_device *netdev;
1246 struct alx_priv *alx;
1247 struct alx_hw *hw;
1248 bool phy_configured;
1249 int bars, pm_cap, err;
1250
1251 err = pci_enable_device_mem(pdev);
1252 if (err)
1253 return err;
1254
1255 /* The alx chip can DMA to 64-bit addresses, but it uses a single
1256 * shared register for the high 32 bits, so only a single, aligned,
1257 * 4 GB physical address range can be used for descriptors.
1258 */
1259 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
1260 !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
1261 dev_dbg(&pdev->dev, "DMA to 64-BIT addresses\n");
1262 } else {
1263 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
1264 if (err) {
1265 err = dma_set_coherent_mask(&pdev->dev,
1266 DMA_BIT_MASK(32));
1267 if (err) {
1268 dev_err(&pdev->dev,
1269 "No usable DMA config, aborting\n");
1270 goto out_pci_disable;
1271 }
1272 }
1273 }
1274
1275 bars = pci_select_bars(pdev, IORESOURCE_MEM);
1276 err = pci_request_selected_regions(pdev, bars, alx_drv_name);
1277 if (err) {
1278 dev_err(&pdev->dev,
1279 "pci_request_selected_regions failed(bars:%d)\n", bars);
1280 goto out_pci_disable;
1281 }
1282
1283 pci_enable_pcie_error_reporting(pdev);
1284 pci_set_master(pdev);
1285
1286 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
1287 if (pm_cap == 0) {
1288 dev_err(&pdev->dev,
1289 "Can't find power management capability, aborting\n");
1290 err = -EIO;
1291 goto out_pci_release;
1292 }
1293
1294 err = pci_set_power_state(pdev, PCI_D0);
1295 if (err)
1296 goto out_pci_release;
1297
1298 netdev = alloc_etherdev(sizeof(*alx));
1299 if (!netdev) {
1300 err = -ENOMEM;
1301 goto out_pci_release;
1302 }
1303
1304 SET_NETDEV_DEV(netdev, &pdev->dev);
1305 alx = netdev_priv(netdev);
1306 alx->dev = netdev;
1307 alx->hw.pdev = pdev;
1308 alx->msg_enable = NETIF_MSG_LINK | NETIF_MSG_HW | NETIF_MSG_IFUP |
1309 NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR | NETIF_MSG_WOL;
1310 hw = &alx->hw;
1311 pci_set_drvdata(pdev, alx);
1312
1313 hw->hw_addr = pci_ioremap_bar(pdev, 0);
1314 if (!hw->hw_addr) {
1315 dev_err(&pdev->dev, "cannot map device registers\n");
1316 err = -EIO;
1317 goto out_free_netdev;
1318 }
1319
1320 netdev->netdev_ops = &alx_netdev_ops;
1321 SET_ETHTOOL_OPS(netdev, &alx_ethtool_ops);
1322 netdev->irq = pdev->irq;
1323 netdev->watchdog_timeo = ALX_WATCHDOG_TIME;
1324
1325 if (ent->driver_data & ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG)
1326 pdev->dev_flags |= PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG;
1327
1328 err = alx_init_sw(alx);
1329 if (err) {
1330 dev_err(&pdev->dev, "net device private data init failed\n");
1331 goto out_unmap;
1332 }
1333
1334 alx_reset_pcie(hw);
1335
1336 phy_configured = alx_phy_configured(hw);
1337
1338 if (!phy_configured)
1339 alx_reset_phy(hw);
1340
1341 err = alx_reset_mac(hw);
1342 if (err) {
1343 dev_err(&pdev->dev, "MAC Reset failed, error = %d\n", err);
1344 goto out_unmap;
1345 }
1346
1347 /* setup link to put it in a known good starting state */
1348 if (!phy_configured) {
1349 err = alx_setup_speed_duplex(hw, hw->adv_cfg, hw->flowctrl);
1350 if (err) {
1351 dev_err(&pdev->dev,
1352 "failed to configure PHY speed/duplex (err=%d)\n",
1353 err);
1354 goto out_unmap;
1355 }
1356 }
1357
1358 netdev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM;
1359
1360 if (alx_get_perm_macaddr(hw, hw->perm_addr)) {
1361 dev_warn(&pdev->dev,
1362 "Invalid permanent address programmed, using random one\n");
1363 eth_hw_addr_random(netdev);
1364 memcpy(hw->perm_addr, netdev->dev_addr, netdev->addr_len);
1365 }
1366
1367 memcpy(hw->mac_addr, hw->perm_addr, ETH_ALEN);
1368 memcpy(netdev->dev_addr, hw->mac_addr, ETH_ALEN);
1369 memcpy(netdev->perm_addr, hw->perm_addr, ETH_ALEN);
1370
1371 hw->mdio.prtad = 0;
1372 hw->mdio.mmds = 0;
1373 hw->mdio.dev = netdev;
1374 hw->mdio.mode_support = MDIO_SUPPORTS_C45 |
1375 MDIO_SUPPORTS_C22 |
1376 MDIO_EMULATE_C22;
1377 hw->mdio.mdio_read = alx_mdio_read;
1378 hw->mdio.mdio_write = alx_mdio_write;
1379
1380 if (!alx_get_phy_info(hw)) {
1381 dev_err(&pdev->dev, "failed to identify PHY\n");
1382 err = -EIO;
1383 goto out_unmap;
1384 }
1385
1386 INIT_WORK(&alx->link_check_wk, alx_link_check);
1387 INIT_WORK(&alx->reset_wk, alx_reset);
1388 spin_lock_init(&alx->hw.mdio_lock);
1389 spin_lock_init(&alx->irq_lock);
1390
1391 netif_carrier_off(netdev);
1392
1393 err = register_netdev(netdev);
1394 if (err) {
1395 dev_err(&pdev->dev, "register netdevice failed\n");
1396 goto out_unmap;
1397 }
1398
1399 device_set_wakeup_enable(&pdev->dev, hw->sleep_ctrl);
1400
1401 netdev_info(netdev,
1402 "Qualcomm Atheros AR816x/AR817x Ethernet [%pM]\n",
1403 netdev->dev_addr);
1404
1405 return 0;
1406
1407out_unmap:
1408 iounmap(hw->hw_addr);
1409out_free_netdev:
1410 free_netdev(netdev);
1411out_pci_release:
1412 pci_release_selected_regions(pdev, bars);
1413out_pci_disable:
1414 pci_disable_device(pdev);
1415 return err;
1416}
1417
1418static void alx_remove(struct pci_dev *pdev)
1419{
1420 struct alx_priv *alx = pci_get_drvdata(pdev);
1421 struct alx_hw *hw = &alx->hw;
1422
1423 cancel_work_sync(&alx->link_check_wk);
1424 cancel_work_sync(&alx->reset_wk);
1425
1426 /* restore permanent mac address */
1427 alx_set_macaddr(hw, hw->perm_addr);
1428
1429 unregister_netdev(alx->dev);
1430 iounmap(hw->hw_addr);
1431 pci_release_selected_regions(pdev,
1432 pci_select_bars(pdev, IORESOURCE_MEM));
1433
1434 pci_disable_pcie_error_reporting(pdev);
1435 pci_disable_device(pdev);
1436 pci_set_drvdata(pdev, NULL);
1437
1438 free_netdev(alx->dev);
1439}
1440
1441#ifdef CONFIG_PM_SLEEP
1442static int alx_suspend(struct device *dev)
1443{
1444 struct pci_dev *pdev = to_pci_dev(dev);
1445 int err;
1446 bool wol_en;
1447
1448 err = __alx_shutdown(pdev, &wol_en);
1449 if (err) {
1450 dev_err(&pdev->dev, "shutdown fail in suspend %d\n", err);
1451 return err;
1452 }
1453
1454 if (wol_en) {
1455 pci_prepare_to_sleep(pdev);
1456 } else {
1457 pci_wake_from_d3(pdev, false);
1458 pci_set_power_state(pdev, PCI_D3hot);
1459 }
1460
1461 return 0;
1462}
1463
1464static int alx_resume(struct device *dev)
1465{
1466 struct pci_dev *pdev = to_pci_dev(dev);
1467 struct alx_priv *alx = pci_get_drvdata(pdev);
1468 struct net_device *netdev = alx->dev;
1469 struct alx_hw *hw = &alx->hw;
1470 int err;
1471
1472 pci_set_power_state(pdev, PCI_D0);
1473 pci_restore_state(pdev);
1474 pci_save_state(pdev);
1475
1476 pci_enable_wake(pdev, PCI_D3hot, 0);
1477 pci_enable_wake(pdev, PCI_D3cold, 0);
1478
1479 hw->link_speed = SPEED_UNKNOWN;
1480 alx->int_mask = ALX_ISR_MISC;
1481
1482 alx_reset_pcie(hw);
1483 alx_reset_phy(hw);
1484
1485 err = alx_reset_mac(hw);
1486 if (err) {
1487 netif_err(alx, hw, alx->dev,
1488 "resume:reset_mac fail %d\n", err);
1489 return -EIO;
1490 }
1491
1492 err = alx_setup_speed_duplex(hw, hw->adv_cfg, hw->flowctrl);
1493 if (err) {
1494 netif_err(alx, hw, alx->dev,
1495 "resume:setup_speed_duplex fail %d\n", err);
1496 return -EIO;
1497 }
1498
1499 if (netif_running(netdev)) {
1500 err = __alx_open(alx, true);
1501 if (err)
1502 return err;
1503 }
1504
1505 netif_device_attach(netdev);
1506
1507 return err;
1508}
1509#endif
1510
1511static pci_ers_result_t alx_pci_error_detected(struct pci_dev *pdev,
1512 pci_channel_state_t state)
1513{
1514 struct alx_priv *alx = pci_get_drvdata(pdev);
1515 struct net_device *netdev = alx->dev;
1516 pci_ers_result_t rc = PCI_ERS_RESULT_NEED_RESET;
1517
1518 dev_info(&pdev->dev, "pci error detected\n");
1519
1520 rtnl_lock();
1521
1522 if (netif_running(netdev)) {
1523 netif_device_detach(netdev);
1524 alx_halt(alx);
1525 }
1526
1527 if (state == pci_channel_io_perm_failure)
1528 rc = PCI_ERS_RESULT_DISCONNECT;
1529 else
1530 pci_disable_device(pdev);
1531
1532 rtnl_unlock();
1533
1534 return rc;
1535}
1536
1537static pci_ers_result_t alx_pci_error_slot_reset(struct pci_dev *pdev)
1538{
1539 struct alx_priv *alx = pci_get_drvdata(pdev);
1540 struct alx_hw *hw = &alx->hw;
1541 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
1542
1543 dev_info(&pdev->dev, "pci error slot reset\n");
1544
1545 rtnl_lock();
1546
1547 if (pci_enable_device(pdev)) {
1548 dev_err(&pdev->dev, "Failed to re-enable PCI device after reset\n");
1549 goto out;
1550 }
1551
1552 pci_set_master(pdev);
1553 pci_enable_wake(pdev, PCI_D3hot, 0);
1554 pci_enable_wake(pdev, PCI_D3cold, 0);
1555
1556 alx_reset_pcie(hw);
1557 if (!alx_reset_mac(hw))
1558 rc = PCI_ERS_RESULT_RECOVERED;
1559out:
1560 pci_cleanup_aer_uncorrect_error_status(pdev);
1561
1562 rtnl_unlock();
1563
1564 return rc;
1565}
1566
1567static void alx_pci_error_resume(struct pci_dev *pdev)
1568{
1569 struct alx_priv *alx = pci_get_drvdata(pdev);
1570 struct net_device *netdev = alx->dev;
1571
1572 dev_info(&pdev->dev, "pci error resume\n");
1573
1574 rtnl_lock();
1575
1576 if (netif_running(netdev)) {
1577 alx_activate(alx);
1578 netif_device_attach(netdev);
1579 }
1580
1581 rtnl_unlock();
1582}
1583
1584static const struct pci_error_handlers alx_err_handlers = {
1585 .error_detected = alx_pci_error_detected,
1586 .slot_reset = alx_pci_error_slot_reset,
1587 .resume = alx_pci_error_resume,
1588};
1589
1590#ifdef CONFIG_PM_SLEEP
1591static SIMPLE_DEV_PM_OPS(alx_pm_ops, alx_suspend, alx_resume);
1592#define ALX_PM_OPS (&alx_pm_ops)
1593#else
1594#define ALX_PM_OPS NULL
1595#endif
1596
1597static DEFINE_PCI_DEVICE_TABLE(alx_pci_tbl) = {
1598 { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8161),
1599 .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
1600 { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_E2200),
1601 .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
1602 { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8162),
1603 .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
1604 { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8171) },
1605 { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8172) },
1606 {}
1607};
1608
1609static struct pci_driver alx_driver = {
1610 .name = alx_drv_name,
1611 .id_table = alx_pci_tbl,
1612 .probe = alx_probe,
1613 .remove = alx_remove,
1614 .shutdown = alx_shutdown,
1615 .err_handler = &alx_err_handlers,
1616 .driver.pm = ALX_PM_OPS,
1617};
1618
1619module_pci_driver(alx_driver);
1620MODULE_DEVICE_TABLE(pci, alx_pci_tbl);
1621MODULE_AUTHOR("Johannes Berg <johannes@sipsolutions.net>");
1622MODULE_AUTHOR("Qualcomm Corporation, <nic-devel@qualcomm.com>");
1623MODULE_DESCRIPTION(
1624 "Qualcomm Atheros(R) AR816x/AR817x PCI-E Ethernet Network Driver");
1625MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/atheros/alx/reg.h b/drivers/net/ethernet/atheros/alx/reg.h
new file mode 100644
index 000000000000..e4358c98bc4e
--- /dev/null
+++ b/drivers/net/ethernet/atheros/alx/reg.h
@@ -0,0 +1,810 @@
1/*
2 * Copyright (c) 2013 Johannes Berg <johannes@sipsolutions.net>
3 *
4 * This file is free software: you may copy, redistribute and/or modify it
5 * under the terms of the GNU General Public License as published by the
6 * Free Software Foundation, either version 2 of the License, or (at your
7 * option) any later version.
8 *
9 * This file is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 *
17 * This file incorporates work covered by the following copyright and
18 * permission notice:
19 *
20 * Copyright (c) 2012 Qualcomm Atheros, Inc.
21 *
22 * Permission to use, copy, modify, and/or distribute this software for any
23 * purpose with or without fee is hereby granted, provided that the above
24 * copyright notice and this permission notice appear in all copies.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
27 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
28 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
29 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
30 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
31 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
32 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
33 */
34
35#ifndef ALX_REG_H
36#define ALX_REG_H
37
38#define ALX_DEV_ID_AR8161 0x1091
39#define ALX_DEV_ID_E2200 0xe091
40#define ALX_DEV_ID_AR8162 0x1090
41#define ALX_DEV_ID_AR8171 0x10A1
42#define ALX_DEV_ID_AR8172 0x10A0
43
44/* rev definition,
45 * bit(0): with xD support
46 * bit(1): with Card Reader function
47 * bit(7:2): real revision
48 */
49#define ALX_PCI_REVID_SHIFT 3
50#define ALX_REV_A0 0
51#define ALX_REV_A1 1
52#define ALX_REV_B0 2
53#define ALX_REV_C0 3
54
55#define ALX_DEV_CTRL 0x0060
56#define ALX_DEV_CTRL_MAXRRS_MIN 2
57
58#define ALX_MSIX_MASK 0x0090
59
60#define ALX_UE_SVRT 0x010C
61#define ALX_UE_SVRT_FCPROTERR BIT(13)
62#define ALX_UE_SVRT_DLPROTERR BIT(4)
63
64/* eeprom & flash load register */
65#define ALX_EFLD 0x0204
66#define ALX_EFLD_F_EXIST BIT(10)
67#define ALX_EFLD_E_EXIST BIT(9)
68#define ALX_EFLD_STAT BIT(5)
69#define ALX_EFLD_START BIT(0)
70
71/* eFuse load register */
72#define ALX_SLD 0x0218
73#define ALX_SLD_STAT BIT(12)
74#define ALX_SLD_START BIT(11)
75#define ALX_SLD_MAX_TO 100
76
77#define ALX_PDLL_TRNS1 0x1104
78#define ALX_PDLL_TRNS1_D3PLLOFF_EN BIT(11)
79
80#define ALX_PMCTRL 0x12F8
81#define ALX_PMCTRL_HOTRST_WTEN BIT(31)
82/* bit30: L0s/L1 controlled by MAC based on throughput(setting in 15A0) */
83#define ALX_PMCTRL_ASPM_FCEN BIT(30)
84#define ALX_PMCTRL_SADLY_EN BIT(29)
85#define ALX_PMCTRL_LCKDET_TIMER_MASK 0xF
86#define ALX_PMCTRL_LCKDET_TIMER_SHIFT 24
87#define ALX_PMCTRL_LCKDET_TIMER_DEF 0xC
88/* bit[23:20] if pm_request_l1 time > @, then enter L0s not L1 */
89#define ALX_PMCTRL_L1REQ_TO_MASK 0xF
90#define ALX_PMCTRL_L1REQ_TO_SHIFT 20
91#define ALX_PMCTRL_L1REG_TO_DEF 0xF
92#define ALX_PMCTRL_TXL1_AFTER_L0S BIT(19)
93#define ALX_PMCTRL_L1_TIMER_MASK 0x7
94#define ALX_PMCTRL_L1_TIMER_SHIFT 16
95#define ALX_PMCTRL_L1_TIMER_16US 4
96#define ALX_PMCTRL_RCVR_WT_1US BIT(15)
97/* bit13: enable pcie clk switch in L1 state */
98#define ALX_PMCTRL_L1_CLKSW_EN BIT(13)
99#define ALX_PMCTRL_L0S_EN BIT(12)
100#define ALX_PMCTRL_RXL1_AFTER_L0S BIT(11)
101#define ALX_PMCTRL_L1_BUFSRX_EN BIT(7)
102/* bit6: power down serdes RX */
103#define ALX_PMCTRL_L1_SRDSRX_PWD BIT(6)
104#define ALX_PMCTRL_L1_SRDSPLL_EN BIT(5)
105#define ALX_PMCTRL_L1_SRDS_EN BIT(4)
106#define ALX_PMCTRL_L1_EN BIT(3)
107
108/*******************************************************/
109/* following registers are mapped only to memory space */
110/*******************************************************/
111
112#define ALX_MASTER 0x1400
113/* bit12: 1:alwys select pclk from serdes, not sw to 25M */
114#define ALX_MASTER_PCLKSEL_SRDS BIT(12)
115/* bit11: irq moduration for rx */
116#define ALX_MASTER_IRQMOD2_EN BIT(11)
117/* bit10: irq moduration for tx/rx */
118#define ALX_MASTER_IRQMOD1_EN BIT(10)
119#define ALX_MASTER_SYSALVTIMER_EN BIT(7)
120#define ALX_MASTER_OOB_DIS BIT(6)
121/* bit5: wakeup without pcie clk */
122#define ALX_MASTER_WAKEN_25M BIT(5)
123/* bit0: MAC & DMA reset */
124#define ALX_MASTER_DMA_MAC_RST BIT(0)
125#define ALX_DMA_MAC_RST_TO 50
126
127#define ALX_IRQ_MODU_TIMER 0x1408
128#define ALX_IRQ_MODU_TIMER1_MASK 0xFFFF
129#define ALX_IRQ_MODU_TIMER1_SHIFT 0
130
131#define ALX_PHY_CTRL 0x140C
132#define ALX_PHY_CTRL_100AB_EN BIT(17)
133/* bit14: affect MAC & PHY, go to low power sts */
134#define ALX_PHY_CTRL_POWER_DOWN BIT(14)
135/* bit13: 1:pll always ON, 0:can switch in lpw */
136#define ALX_PHY_CTRL_PLL_ON BIT(13)
137#define ALX_PHY_CTRL_RST_ANALOG BIT(12)
138#define ALX_PHY_CTRL_HIB_PULSE BIT(11)
139#define ALX_PHY_CTRL_HIB_EN BIT(10)
140#define ALX_PHY_CTRL_IDDQ BIT(7)
141#define ALX_PHY_CTRL_GATE_25M BIT(5)
142#define ALX_PHY_CTRL_LED_MODE BIT(2)
143/* bit0: out of dsp RST state */
144#define ALX_PHY_CTRL_DSPRST_OUT BIT(0)
145#define ALX_PHY_CTRL_DSPRST_TO 80
146#define ALX_PHY_CTRL_CLS (ALX_PHY_CTRL_LED_MODE | \
147 ALX_PHY_CTRL_100AB_EN | \
148 ALX_PHY_CTRL_PLL_ON)
149
150#define ALX_MAC_STS 0x1410
151#define ALX_MAC_STS_TXQ_BUSY BIT(3)
152#define ALX_MAC_STS_RXQ_BUSY BIT(2)
153#define ALX_MAC_STS_TXMAC_BUSY BIT(1)
154#define ALX_MAC_STS_RXMAC_BUSY BIT(0)
155#define ALX_MAC_STS_IDLE (ALX_MAC_STS_TXQ_BUSY | \
156 ALX_MAC_STS_RXQ_BUSY | \
157 ALX_MAC_STS_TXMAC_BUSY | \
158 ALX_MAC_STS_RXMAC_BUSY)
159
160#define ALX_MDIO 0x1414
161#define ALX_MDIO_MODE_EXT BIT(30)
162#define ALX_MDIO_BUSY BIT(27)
163#define ALX_MDIO_CLK_SEL_MASK 0x7
164#define ALX_MDIO_CLK_SEL_SHIFT 24
165#define ALX_MDIO_CLK_SEL_25MD4 0
166#define ALX_MDIO_CLK_SEL_25MD128 7
167#define ALX_MDIO_START BIT(23)
168#define ALX_MDIO_SPRES_PRMBL BIT(22)
169/* bit21: 1:read,0:write */
170#define ALX_MDIO_OP_READ BIT(21)
171#define ALX_MDIO_REG_MASK 0x1F
172#define ALX_MDIO_REG_SHIFT 16
173#define ALX_MDIO_DATA_MASK 0xFFFF
174#define ALX_MDIO_DATA_SHIFT 0
175#define ALX_MDIO_MAX_AC_TO 120
176
177#define ALX_MDIO_EXTN 0x1448
178#define ALX_MDIO_EXTN_DEVAD_MASK 0x1F
179#define ALX_MDIO_EXTN_DEVAD_SHIFT 16
180#define ALX_MDIO_EXTN_REG_MASK 0xFFFF
181#define ALX_MDIO_EXTN_REG_SHIFT 0
182
183#define ALX_SERDES 0x1424
184#define ALX_SERDES_PHYCLK_SLWDWN BIT(18)
185#define ALX_SERDES_MACCLK_SLWDWN BIT(17)
186
187#define ALX_LPI_CTRL 0x1440
188#define ALX_LPI_CTRL_EN BIT(0)
189
190/* for B0+, bit[13..] for C0+ */
191#define ALX_HRTBT_EXT_CTRL 0x1AD0
192#define L1F_HRTBT_EXT_CTRL_PERIOD_HIGH_MASK 0x3F
193#define L1F_HRTBT_EXT_CTRL_PERIOD_HIGH_SHIFT 24
194#define L1F_HRTBT_EXT_CTRL_SWOI_STARTUP_PKT_EN BIT(23)
195#define L1F_HRTBT_EXT_CTRL_IOAC_2_FRAGMENTED BIT(22)
196#define L1F_HRTBT_EXT_CTRL_IOAC_1_FRAGMENTED BIT(21)
197#define L1F_HRTBT_EXT_CTRL_IOAC_1_KEEPALIVE_EN BIT(20)
198#define L1F_HRTBT_EXT_CTRL_IOAC_1_HAS_VLAN BIT(19)
199#define L1F_HRTBT_EXT_CTRL_IOAC_1_IS_8023 BIT(18)
200#define L1F_HRTBT_EXT_CTRL_IOAC_1_IS_IPV6 BIT(17)
201#define L1F_HRTBT_EXT_CTRL_IOAC_2_KEEPALIVE_EN BIT(16)
202#define L1F_HRTBT_EXT_CTRL_IOAC_2_HAS_VLAN BIT(15)
203#define L1F_HRTBT_EXT_CTRL_IOAC_2_IS_8023 BIT(14)
204#define L1F_HRTBT_EXT_CTRL_IOAC_2_IS_IPV6 BIT(13)
205#define ALX_HRTBT_EXT_CTRL_NS_EN BIT(12)
206#define ALX_HRTBT_EXT_CTRL_FRAG_LEN_MASK 0xFF
207#define ALX_HRTBT_EXT_CTRL_FRAG_LEN_SHIFT 4
208#define ALX_HRTBT_EXT_CTRL_IS_8023 BIT(3)
209#define ALX_HRTBT_EXT_CTRL_IS_IPV6 BIT(2)
210#define ALX_HRTBT_EXT_CTRL_WAKEUP_EN BIT(1)
211#define ALX_HRTBT_EXT_CTRL_ARP_EN BIT(0)
212
213#define ALX_HRTBT_REM_IPV4_ADDR 0x1AD4
214#define ALX_HRTBT_HOST_IPV4_ADDR 0x1478
215#define ALX_HRTBT_REM_IPV6_ADDR3 0x1AD8
216#define ALX_HRTBT_REM_IPV6_ADDR2 0x1ADC
217#define ALX_HRTBT_REM_IPV6_ADDR1 0x1AE0
218#define ALX_HRTBT_REM_IPV6_ADDR0 0x1AE4
219
220/* 1B8C ~ 1B94 for C0+ */
221#define ALX_SWOI_ACER_CTRL 0x1B8C
222#define ALX_SWOI_ORIG_ACK_NAK_EN BIT(20)
223#define ALX_SWOI_ORIG_ACK_NAK_PKT_LEN_MASK 0XFF
224#define ALX_SWOI_ORIG_ACK_NAK_PKT_LEN_SHIFT 12
225#define ALX_SWOI_ORIG_ACK_ADDR_MASK 0XFFF
226#define ALX_SWOI_ORIG_ACK_ADDR_SHIFT 0
227
228#define ALX_SWOI_IOAC_CTRL_2 0x1B90
229#define ALX_SWOI_IOAC_CTRL_2_SWOI_1_FRAG_LEN_MASK 0xFF
230#define ALX_SWOI_IOAC_CTRL_2_SWOI_1_FRAG_LEN_SHIFT 24
231#define ALX_SWOI_IOAC_CTRL_2_SWOI_1_PKT_LEN_MASK 0xFFF
232#define ALX_SWOI_IOAC_CTRL_2_SWOI_1_PKT_LEN_SHIFT 12
233#define ALX_SWOI_IOAC_CTRL_2_SWOI_1_HDR_ADDR_MASK 0xFFF
234#define ALX_SWOI_IOAC_CTRL_2_SWOI_1_HDR_ADDR_SHIFT 0
235
236#define ALX_SWOI_IOAC_CTRL_3 0x1B94
237#define ALX_SWOI_IOAC_CTRL_3_SWOI_2_FRAG_LEN_MASK 0xFF
238#define ALX_SWOI_IOAC_CTRL_3_SWOI_2_FRAG_LEN_SHIFT 24
239#define ALX_SWOI_IOAC_CTRL_3_SWOI_2_PKT_LEN_MASK 0xFFF
240#define ALX_SWOI_IOAC_CTRL_3_SWOI_2_PKT_LEN_SHIFT 12
241#define ALX_SWOI_IOAC_CTRL_3_SWOI_2_HDR_ADDR_MASK 0xFFF
242#define ALX_SWOI_IOAC_CTRL_3_SWOI_2_HDR_ADDR_SHIFT 0
243
244/* for B0 */
245#define ALX_IDLE_DECISN_TIMER 0x1474
246/* 1ms */
247#define ALX_IDLE_DECISN_TIMER_DEF 0x400
248
249#define ALX_MAC_CTRL 0x1480
250#define ALX_MAC_CTRL_FAST_PAUSE BIT(31)
251#define ALX_MAC_CTRL_WOLSPED_SWEN BIT(30)
252/* bit29: 1:legacy(hi5b), 0:marvl(lo5b)*/
253#define ALX_MAC_CTRL_MHASH_ALG_HI5B BIT(29)
254#define ALX_MAC_CTRL_BRD_EN BIT(26)
255#define ALX_MAC_CTRL_MULTIALL_EN BIT(25)
256#define ALX_MAC_CTRL_SPEED_MASK 0x3
257#define ALX_MAC_CTRL_SPEED_SHIFT 20
258#define ALX_MAC_CTRL_SPEED_10_100 1
259#define ALX_MAC_CTRL_SPEED_1000 2
260#define ALX_MAC_CTRL_PROMISC_EN BIT(15)
261#define ALX_MAC_CTRL_VLANSTRIP BIT(14)
262#define ALX_MAC_CTRL_PRMBLEN_MASK 0xF
263#define ALX_MAC_CTRL_PRMBLEN_SHIFT 10
264#define ALX_MAC_CTRL_PCRCE BIT(7)
265#define ALX_MAC_CTRL_CRCE BIT(6)
266#define ALX_MAC_CTRL_FULLD BIT(5)
267#define ALX_MAC_CTRL_RXFC_EN BIT(3)
268#define ALX_MAC_CTRL_TXFC_EN BIT(2)
269#define ALX_MAC_CTRL_RX_EN BIT(1)
270#define ALX_MAC_CTRL_TX_EN BIT(0)
271
272#define ALX_STAD0 0x1488
273#define ALX_STAD1 0x148C
274
275#define ALX_HASH_TBL0 0x1490
276#define ALX_HASH_TBL1 0x1494
277
278#define ALX_MTU 0x149C
279#define ALX_MTU_JUMBO_TH 1514
280#define ALX_MTU_STD_ALGN 1536
281
282#define ALX_SRAM5 0x1524
283#define ALX_SRAM_RXF_LEN_MASK 0xFFF
284#define ALX_SRAM_RXF_LEN_SHIFT 0
285#define ALX_SRAM_RXF_LEN_8K (8*1024)
286
287#define ALX_SRAM9 0x1534
288#define ALX_SRAM_LOAD_PTR BIT(0)
289
290#define ALX_RX_BASE_ADDR_HI 0x1540
291
292#define ALX_TX_BASE_ADDR_HI 0x1544
293
294#define ALX_RFD_ADDR_LO 0x1550
295#define ALX_RFD_RING_SZ 0x1560
296#define ALX_RFD_BUF_SZ 0x1564
297
298#define ALX_RRD_ADDR_LO 0x1568
299#define ALX_RRD_RING_SZ 0x1578
300
301/* pri3: highest, pri0: lowest */
302#define ALX_TPD_PRI3_ADDR_LO 0x14E4
303#define ALX_TPD_PRI2_ADDR_LO 0x14E0
304#define ALX_TPD_PRI1_ADDR_LO 0x157C
305#define ALX_TPD_PRI0_ADDR_LO 0x1580
306
307/* producer index is 16bit */
308#define ALX_TPD_PRI3_PIDX 0x1618
309#define ALX_TPD_PRI2_PIDX 0x161A
310#define ALX_TPD_PRI1_PIDX 0x15F0
311#define ALX_TPD_PRI0_PIDX 0x15F2
312
313/* consumer index is 16bit */
314#define ALX_TPD_PRI3_CIDX 0x161C
315#define ALX_TPD_PRI2_CIDX 0x161E
316#define ALX_TPD_PRI1_CIDX 0x15F4
317#define ALX_TPD_PRI0_CIDX 0x15F6
318
319#define ALX_TPD_RING_SZ 0x1584
320
321#define ALX_TXQ0 0x1590
322#define ALX_TXQ0_TXF_BURST_PREF_MASK 0xFFFF
323#define ALX_TXQ0_TXF_BURST_PREF_SHIFT 16
324#define ALX_TXQ_TXF_BURST_PREF_DEF 0x200
325#define ALX_TXQ0_LSO_8023_EN BIT(7)
326#define ALX_TXQ0_MODE_ENHANCE BIT(6)
327#define ALX_TXQ0_EN BIT(5)
328#define ALX_TXQ0_SUPT_IPOPT BIT(4)
329#define ALX_TXQ0_TPD_BURSTPREF_MASK 0xF
330#define ALX_TXQ0_TPD_BURSTPREF_SHIFT 0
331#define ALX_TXQ_TPD_BURSTPREF_DEF 5
332
333#define ALX_TXQ1 0x1594
334/* bit11: drop large packet, len > (rfd buf) */
335#define ALX_TXQ1_ERRLGPKT_DROP_EN BIT(11)
336#define ALX_TXQ1_JUMBO_TSO_TH (7*1024)
337
338#define ALX_RXQ0 0x15A0
339#define ALX_RXQ0_EN BIT(31)
340#define ALX_RXQ0_RSS_HASH_EN BIT(29)
341#define ALX_RXQ0_RSS_MODE_MASK 0x3
342#define ALX_RXQ0_RSS_MODE_SHIFT 26
343#define ALX_RXQ0_RSS_MODE_DIS 0
344#define ALX_RXQ0_RSS_MODE_MQMI 3
345#define ALX_RXQ0_NUM_RFD_PREF_MASK 0x3F
346#define ALX_RXQ0_NUM_RFD_PREF_SHIFT 20
347#define ALX_RXQ0_NUM_RFD_PREF_DEF 8
348#define ALX_RXQ0_IDT_TBL_SIZE_MASK 0x1FF
349#define ALX_RXQ0_IDT_TBL_SIZE_SHIFT 8
350#define ALX_RXQ0_IDT_TBL_SIZE_DEF 0x100
351#define ALX_RXQ0_IDT_TBL_SIZE_NORMAL 128
352#define ALX_RXQ0_IPV6_PARSE_EN BIT(7)
353#define ALX_RXQ0_RSS_HSTYP_MASK 0xF
354#define ALX_RXQ0_RSS_HSTYP_SHIFT 2
355#define ALX_RXQ0_RSS_HSTYP_IPV6_TCP_EN BIT(5)
356#define ALX_RXQ0_RSS_HSTYP_IPV6_EN BIT(4)
357#define ALX_RXQ0_RSS_HSTYP_IPV4_TCP_EN BIT(3)
358#define ALX_RXQ0_RSS_HSTYP_IPV4_EN BIT(2)
359#define ALX_RXQ0_RSS_HSTYP_ALL (ALX_RXQ0_RSS_HSTYP_IPV6_TCP_EN | \
360 ALX_RXQ0_RSS_HSTYP_IPV4_TCP_EN | \
361 ALX_RXQ0_RSS_HSTYP_IPV6_EN | \
362 ALX_RXQ0_RSS_HSTYP_IPV4_EN)
363#define ALX_RXQ0_ASPM_THRESH_MASK 0x3
364#define ALX_RXQ0_ASPM_THRESH_SHIFT 0
365#define ALX_RXQ0_ASPM_THRESH_100M 3
366
367#define ALX_RXQ2 0x15A8
368#define ALX_RXQ2_RXF_XOFF_THRESH_MASK 0xFFF
369#define ALX_RXQ2_RXF_XOFF_THRESH_SHIFT 16
370#define ALX_RXQ2_RXF_XON_THRESH_MASK 0xFFF
371#define ALX_RXQ2_RXF_XON_THRESH_SHIFT 0
372/* Size = tx-packet(1522) + IPG(12) + SOF(8) + 64(Pause) + IPG(12) + SOF(8) +
373 * rx-packet(1522) + delay-of-link(64)
374 * = 3212.
375 */
376#define ALX_RXQ2_RXF_FLOW_CTRL_RSVD 3212
377
378#define ALX_DMA 0x15C0
379#define ALX_DMA_RCHNL_SEL_MASK 0x3
380#define ALX_DMA_RCHNL_SEL_SHIFT 26
381#define ALX_DMA_WDLY_CNT_MASK 0xF
382#define ALX_DMA_WDLY_CNT_SHIFT 16
383#define ALX_DMA_WDLY_CNT_DEF 4
384#define ALX_DMA_RDLY_CNT_MASK 0x1F
385#define ALX_DMA_RDLY_CNT_SHIFT 11
386#define ALX_DMA_RDLY_CNT_DEF 15
387/* bit10: 0:tpd with pri, 1: data */
388#define ALX_DMA_RREQ_PRI_DATA BIT(10)
389#define ALX_DMA_RREQ_BLEN_MASK 0x7
390#define ALX_DMA_RREQ_BLEN_SHIFT 4
391#define ALX_DMA_RORDER_MODE_MASK 0x7
392#define ALX_DMA_RORDER_MODE_SHIFT 0
393#define ALX_DMA_RORDER_MODE_OUT 4
394
395#define ALX_WOL0 0x14A0
396#define ALX_WOL0_PME_LINK BIT(5)
397#define ALX_WOL0_LINK_EN BIT(4)
398#define ALX_WOL0_PME_MAGIC_EN BIT(3)
399#define ALX_WOL0_MAGIC_EN BIT(2)
400
401#define ALX_RFD_PIDX 0x15E0
402
403#define ALX_RFD_CIDX 0x15F8
404
405/* MIB */
406#define ALX_MIB_BASE 0x1700
407#define ALX_MIB_RX_OK (ALX_MIB_BASE + 0)
408#define ALX_MIB_RX_ERRADDR (ALX_MIB_BASE + 92)
409#define ALX_MIB_TX_OK (ALX_MIB_BASE + 96)
410#define ALX_MIB_TX_MCCNT (ALX_MIB_BASE + 192)
411
412#define ALX_RX_STATS_BIN ALX_MIB_RX_OK
413#define ALX_RX_STATS_END ALX_MIB_RX_ERRADDR
414#define ALX_TX_STATS_BIN ALX_MIB_TX_OK
415#define ALX_TX_STATS_END ALX_MIB_TX_MCCNT
416
417#define ALX_ISR 0x1600
418#define ALX_ISR_DIS BIT(31)
419#define ALX_ISR_RX_Q7 BIT(30)
420#define ALX_ISR_RX_Q6 BIT(29)
421#define ALX_ISR_RX_Q5 BIT(28)
422#define ALX_ISR_RX_Q4 BIT(27)
423#define ALX_ISR_PCIE_LNKDOWN BIT(26)
424#define ALX_ISR_RX_Q3 BIT(19)
425#define ALX_ISR_RX_Q2 BIT(18)
426#define ALX_ISR_RX_Q1 BIT(17)
427#define ALX_ISR_RX_Q0 BIT(16)
428#define ALX_ISR_TX_Q0 BIT(15)
429#define ALX_ISR_PHY BIT(12)
430#define ALX_ISR_DMAW BIT(10)
431#define ALX_ISR_DMAR BIT(9)
432#define ALX_ISR_TXF_UR BIT(8)
433#define ALX_ISR_TX_Q3 BIT(7)
434#define ALX_ISR_TX_Q2 BIT(6)
435#define ALX_ISR_TX_Q1 BIT(5)
436#define ALX_ISR_RFD_UR BIT(4)
437#define ALX_ISR_RXF_OV BIT(3)
438#define ALX_ISR_MANU BIT(2)
439#define ALX_ISR_TIMER BIT(1)
440#define ALX_ISR_SMB BIT(0)
441
442#define ALX_IMR 0x1604
443
444/* re-send assert msg if SW no response */
445#define ALX_INT_RETRIG 0x1608
446/* 40ms */
447#define ALX_INT_RETRIG_TO 20000
448
449#define ALX_SMB_TIMER 0x15C4
450
451#define ALX_TINT_TPD_THRSHLD 0x15C8
452
453#define ALX_TINT_TIMER 0x15CC
454
455#define ALX_CLK_GATE 0x1814
456#define ALX_CLK_GATE_RXMAC BIT(5)
457#define ALX_CLK_GATE_TXMAC BIT(4)
458#define ALX_CLK_GATE_RXQ BIT(3)
459#define ALX_CLK_GATE_TXQ BIT(2)
460#define ALX_CLK_GATE_DMAR BIT(1)
461#define ALX_CLK_GATE_DMAW BIT(0)
462#define ALX_CLK_GATE_ALL (ALX_CLK_GATE_RXMAC | \
463 ALX_CLK_GATE_TXMAC | \
464 ALX_CLK_GATE_RXQ | \
465 ALX_CLK_GATE_TXQ | \
466 ALX_CLK_GATE_DMAR | \
467 ALX_CLK_GATE_DMAW)
468
469/* interop between drivers */
470#define ALX_DRV 0x1804
471#define ALX_DRV_PHY_AUTO BIT(28)
472#define ALX_DRV_PHY_1000 BIT(27)
473#define ALX_DRV_PHY_100 BIT(26)
474#define ALX_DRV_PHY_10 BIT(25)
475#define ALX_DRV_PHY_DUPLEX BIT(24)
476/* bit23: adv Pause */
477#define ALX_DRV_PHY_PAUSE BIT(23)
478/* bit22: adv Asym Pause */
479#define ALX_DRV_PHY_MASK 0xFF
480#define ALX_DRV_PHY_SHIFT 21
481#define ALX_DRV_PHY_UNKNOWN 0
482
483/* flag of phy inited */
484#define ALX_PHY_INITED 0x003F
485
486/* reg 1830 ~ 186C for C0+, 16 bit map patterns and wake packet detection */
487#define ALX_WOL_CTRL2 0x1830
488#define ALX_WOL_CTRL2_DATA_STORE BIT(3)
489#define ALX_WOL_CTRL2_PTRN_EVT BIT(2)
490#define ALX_WOL_CTRL2_PME_PTRN_EN BIT(1)
491#define ALX_WOL_CTRL2_PTRN_EN BIT(0)
492
493#define ALX_WOL_CTRL3 0x1834
494#define ALX_WOL_CTRL3_PTRN_ADDR_MASK 0xFFFFF
495#define ALX_WOL_CTRL3_PTRN_ADDR_SHIFT 0
496
497#define ALX_WOL_CTRL4 0x1838
498#define ALX_WOL_CTRL4_PT15_MATCH BIT(31)
499#define ALX_WOL_CTRL4_PT14_MATCH BIT(30)
500#define ALX_WOL_CTRL4_PT13_MATCH BIT(29)
501#define ALX_WOL_CTRL4_PT12_MATCH BIT(28)
502#define ALX_WOL_CTRL4_PT11_MATCH BIT(27)
503#define ALX_WOL_CTRL4_PT10_MATCH BIT(26)
504#define ALX_WOL_CTRL4_PT9_MATCH BIT(25)
505#define ALX_WOL_CTRL4_PT8_MATCH BIT(24)
506#define ALX_WOL_CTRL4_PT7_MATCH BIT(23)
507#define ALX_WOL_CTRL4_PT6_MATCH BIT(22)
508#define ALX_WOL_CTRL4_PT5_MATCH BIT(21)
509#define ALX_WOL_CTRL4_PT4_MATCH BIT(20)
510#define ALX_WOL_CTRL4_PT3_MATCH BIT(19)
511#define ALX_WOL_CTRL4_PT2_MATCH BIT(18)
512#define ALX_WOL_CTRL4_PT1_MATCH BIT(17)
513#define ALX_WOL_CTRL4_PT0_MATCH BIT(16)
514#define ALX_WOL_CTRL4_PT15_EN BIT(15)
515#define ALX_WOL_CTRL4_PT14_EN BIT(14)
516#define ALX_WOL_CTRL4_PT13_EN BIT(13)
517#define ALX_WOL_CTRL4_PT12_EN BIT(12)
518#define ALX_WOL_CTRL4_PT11_EN BIT(11)
519#define ALX_WOL_CTRL4_PT10_EN BIT(10)
520#define ALX_WOL_CTRL4_PT9_EN BIT(9)
521#define ALX_WOL_CTRL4_PT8_EN BIT(8)
522#define ALX_WOL_CTRL4_PT7_EN BIT(7)
523#define ALX_WOL_CTRL4_PT6_EN BIT(6)
524#define ALX_WOL_CTRL4_PT5_EN BIT(5)
525#define ALX_WOL_CTRL4_PT4_EN BIT(4)
526#define ALX_WOL_CTRL4_PT3_EN BIT(3)
527#define ALX_WOL_CTRL4_PT2_EN BIT(2)
528#define ALX_WOL_CTRL4_PT1_EN BIT(1)
529#define ALX_WOL_CTRL4_PT0_EN BIT(0)
530
531#define ALX_WOL_CTRL5 0x183C
532#define ALX_WOL_CTRL5_PT3_LEN_MASK 0xFF
533#define ALX_WOL_CTRL5_PT3_LEN_SHIFT 24
534#define ALX_WOL_CTRL5_PT2_LEN_MASK 0xFF
535#define ALX_WOL_CTRL5_PT2_LEN_SHIFT 16
536#define ALX_WOL_CTRL5_PT1_LEN_MASK 0xFF
537#define ALX_WOL_CTRL5_PT1_LEN_SHIFT 8
538#define ALX_WOL_CTRL5_PT0_LEN_MASK 0xFF
539#define ALX_WOL_CTRL5_PT0_LEN_SHIFT 0
540
541#define ALX_WOL_CTRL6 0x1840
542#define ALX_WOL_CTRL5_PT7_LEN_MASK 0xFF
543#define ALX_WOL_CTRL5_PT7_LEN_SHIFT 24
544#define ALX_WOL_CTRL5_PT6_LEN_MASK 0xFF
545#define ALX_WOL_CTRL5_PT6_LEN_SHIFT 16
546#define ALX_WOL_CTRL5_PT5_LEN_MASK 0xFF
547#define ALX_WOL_CTRL5_PT5_LEN_SHIFT 8
548#define ALX_WOL_CTRL5_PT4_LEN_MASK 0xFF
549#define ALX_WOL_CTRL5_PT4_LEN_SHIFT 0
550
551#define ALX_WOL_CTRL7 0x1844
552#define ALX_WOL_CTRL5_PT11_LEN_MASK 0xFF
553#define ALX_WOL_CTRL5_PT11_LEN_SHIFT 24
554#define ALX_WOL_CTRL5_PT10_LEN_MASK 0xFF
555#define ALX_WOL_CTRL5_PT10_LEN_SHIFT 16
556#define ALX_WOL_CTRL5_PT9_LEN_MASK 0xFF
557#define ALX_WOL_CTRL5_PT9_LEN_SHIFT 8
558#define ALX_WOL_CTRL5_PT8_LEN_MASK 0xFF
559#define ALX_WOL_CTRL5_PT8_LEN_SHIFT 0
560
561#define ALX_WOL_CTRL8 0x1848
562#define ALX_WOL_CTRL5_PT15_LEN_MASK 0xFF
563#define ALX_WOL_CTRL5_PT15_LEN_SHIFT 24
564#define ALX_WOL_CTRL5_PT14_LEN_MASK 0xFF
565#define ALX_WOL_CTRL5_PT14_LEN_SHIFT 16
566#define ALX_WOL_CTRL5_PT13_LEN_MASK 0xFF
567#define ALX_WOL_CTRL5_PT13_LEN_SHIFT 8
568#define ALX_WOL_CTRL5_PT12_LEN_MASK 0xFF
569#define ALX_WOL_CTRL5_PT12_LEN_SHIFT 0
570
571#define ALX_ACER_FIXED_PTN0 0x1850
572#define ALX_ACER_FIXED_PTN0_MASK 0xFFFFFFFF
573#define ALX_ACER_FIXED_PTN0_SHIFT 0
574
575#define ALX_ACER_FIXED_PTN1 0x1854
576#define ALX_ACER_FIXED_PTN1_MASK 0xFFFF
577#define ALX_ACER_FIXED_PTN1_SHIFT 0
578
579#define ALX_ACER_RANDOM_NUM0 0x1858
580#define ALX_ACER_RANDOM_NUM0_MASK 0xFFFFFFFF
581#define ALX_ACER_RANDOM_NUM0_SHIFT 0
582
583#define ALX_ACER_RANDOM_NUM1 0x185C
584#define ALX_ACER_RANDOM_NUM1_MASK 0xFFFFFFFF
585#define ALX_ACER_RANDOM_NUM1_SHIFT 0
586
587#define ALX_ACER_RANDOM_NUM2 0x1860
588#define ALX_ACER_RANDOM_NUM2_MASK 0xFFFFFFFF
589#define ALX_ACER_RANDOM_NUM2_SHIFT 0
590
591#define ALX_ACER_RANDOM_NUM3 0x1864
592#define ALX_ACER_RANDOM_NUM3_MASK 0xFFFFFFFF
593#define ALX_ACER_RANDOM_NUM3_SHIFT 0
594
595#define ALX_ACER_MAGIC 0x1868
596#define ALX_ACER_MAGIC_EN BIT(31)
597#define ALX_ACER_MAGIC_PME_EN BIT(30)
598#define ALX_ACER_MAGIC_MATCH BIT(29)
599#define ALX_ACER_MAGIC_FF_CHECK BIT(10)
600#define ALX_ACER_MAGIC_RAN_LEN_MASK 0x1F
601#define ALX_ACER_MAGIC_RAN_LEN_SHIFT 5
602#define ALX_ACER_MAGIC_FIX_LEN_MASK 0x1F
603#define ALX_ACER_MAGIC_FIX_LEN_SHIFT 0
604
605#define ALX_ACER_TIMER 0x186C
606#define ALX_ACER_TIMER_EN BIT(31)
607#define ALX_ACER_TIMER_PME_EN BIT(30)
608#define ALX_ACER_TIMER_MATCH BIT(29)
609#define ALX_ACER_TIMER_THRES_MASK 0x1FFFF
610#define ALX_ACER_TIMER_THRES_SHIFT 0
611#define ALX_ACER_TIMER_THRES_DEF 1
612
613/* RSS definitions */
614#define ALX_RSS_KEY0 0x14B0
615#define ALX_RSS_KEY1 0x14B4
616#define ALX_RSS_KEY2 0x14B8
617#define ALX_RSS_KEY3 0x14BC
618#define ALX_RSS_KEY4 0x14C0
619#define ALX_RSS_KEY5 0x14C4
620#define ALX_RSS_KEY6 0x14C8
621#define ALX_RSS_KEY7 0x14CC
622#define ALX_RSS_KEY8 0x14D0
623#define ALX_RSS_KEY9 0x14D4
624
625#define ALX_RSS_IDT_TBL0 0x1B00
626
627#define ALX_MSI_MAP_TBL1 0x15D0
628#define ALX_MSI_MAP_TBL1_TXQ1_SHIFT 20
629#define ALX_MSI_MAP_TBL1_TXQ0_SHIFT 16
630#define ALX_MSI_MAP_TBL1_RXQ3_SHIFT 12
631#define ALX_MSI_MAP_TBL1_RXQ2_SHIFT 8
632#define ALX_MSI_MAP_TBL1_RXQ1_SHIFT 4
633#define ALX_MSI_MAP_TBL1_RXQ0_SHIFT 0
634
635#define ALX_MSI_MAP_TBL2 0x15D8
636#define ALX_MSI_MAP_TBL2_TXQ3_SHIFT 20
637#define ALX_MSI_MAP_TBL2_TXQ2_SHIFT 16
638#define ALX_MSI_MAP_TBL2_RXQ7_SHIFT 12
639#define ALX_MSI_MAP_TBL2_RXQ6_SHIFT 8
640#define ALX_MSI_MAP_TBL2_RXQ5_SHIFT 4
641#define ALX_MSI_MAP_TBL2_RXQ4_SHIFT 0
642
643#define ALX_MSI_ID_MAP 0x15D4
644
645#define ALX_MSI_RETRANS_TIMER 0x1920
646/* bit16: 1:line,0:standard */
647#define ALX_MSI_MASK_SEL_LINE BIT(16)
648#define ALX_MSI_RETRANS_TM_MASK 0xFFFF
649#define ALX_MSI_RETRANS_TM_SHIFT 0
650
651/* CR DMA ctrl */
652
653/* TX QoS */
654#define ALX_WRR 0x1938
655#define ALX_WRR_PRI_MASK 0x3
656#define ALX_WRR_PRI_SHIFT 29
657#define ALX_WRR_PRI_RESTRICT_NONE 3
658#define ALX_WRR_PRI3_MASK 0x1F
659#define ALX_WRR_PRI3_SHIFT 24
660#define ALX_WRR_PRI2_MASK 0x1F
661#define ALX_WRR_PRI2_SHIFT 16
662#define ALX_WRR_PRI1_MASK 0x1F
663#define ALX_WRR_PRI1_SHIFT 8
664#define ALX_WRR_PRI0_MASK 0x1F
665#define ALX_WRR_PRI0_SHIFT 0
666
667#define ALX_HQTPD 0x193C
668#define ALX_HQTPD_BURST_EN BIT(31)
669#define ALX_HQTPD_Q3_NUMPREF_MASK 0xF
670#define ALX_HQTPD_Q3_NUMPREF_SHIFT 8
671#define ALX_HQTPD_Q2_NUMPREF_MASK 0xF
672#define ALX_HQTPD_Q2_NUMPREF_SHIFT 4
673#define ALX_HQTPD_Q1_NUMPREF_MASK 0xF
674#define ALX_HQTPD_Q1_NUMPREF_SHIFT 0
675
676#define ALX_MISC 0x19C0
677#define ALX_MISC_PSW_OCP_MASK 0x7
678#define ALX_MISC_PSW_OCP_SHIFT 21
679#define ALX_MISC_PSW_OCP_DEF 0x7
680#define ALX_MISC_ISO_EN BIT(12)
681#define ALX_MISC_INTNLOSC_OPEN BIT(3)
682
683#define ALX_MSIC2 0x19C8
684#define ALX_MSIC2_CALB_START BIT(0)
685
686#define ALX_MISC3 0x19CC
687/* bit1: 1:Software control 25M */
688#define ALX_MISC3_25M_BY_SW BIT(1)
689/* bit0: 25M switch to intnl OSC */
690#define ALX_MISC3_25M_NOTO_INTNL BIT(0)
691
692/* MSIX tbl in memory space */
693#define ALX_MSIX_ENTRY_BASE 0x2000
694
695/********************* PHY regs definition ***************************/
696
697/* PHY Specific Status Register */
698#define ALX_MII_GIGA_PSSR 0x11
699#define ALX_GIGA_PSSR_SPD_DPLX_RESOLVED 0x0800
700#define ALX_GIGA_PSSR_DPLX 0x2000
701#define ALX_GIGA_PSSR_SPEED 0xC000
702#define ALX_GIGA_PSSR_10MBS 0x0000
703#define ALX_GIGA_PSSR_100MBS 0x4000
704#define ALX_GIGA_PSSR_1000MBS 0x8000
705
706/* PHY Interrupt Enable Register */
707#define ALX_MII_IER 0x12
708#define ALX_IER_LINK_UP 0x0400
709#define ALX_IER_LINK_DOWN 0x0800
710
711/* PHY Interrupt Status Register */
712#define ALX_MII_ISR 0x13
713
714#define ALX_MII_DBG_ADDR 0x1D
715#define ALX_MII_DBG_DATA 0x1E
716
717/***************************** debug port *************************************/
718
719#define ALX_MIIDBG_ANACTRL 0x00
720#define ALX_ANACTRL_DEF 0x02EF
721
722#define ALX_MIIDBG_SYSMODCTRL 0x04
723/* en half bias */
724#define ALX_SYSMODCTRL_IECHOADJ_DEF 0xBB8B
725
726#define ALX_MIIDBG_SRDSYSMOD 0x05
727#define ALX_SRDSYSMOD_DEEMP_EN 0x0040
728#define ALX_SRDSYSMOD_DEF 0x2C46
729
730#define ALX_MIIDBG_HIBNEG 0x0B
731#define ALX_HIBNEG_PSHIB_EN 0x8000
732#define ALX_HIBNEG_HIB_PSE 0x1000
733#define ALX_HIBNEG_DEF 0xBC40
734#define ALX_HIBNEG_NOHIB (ALX_HIBNEG_DEF & \
735 ~(ALX_HIBNEG_PSHIB_EN | ALX_HIBNEG_HIB_PSE))
736
737#define ALX_MIIDBG_TST10BTCFG 0x12
738#define ALX_TST10BTCFG_DEF 0x4C04
739
740#define ALX_MIIDBG_AZ_ANADECT 0x15
741#define ALX_AZ_ANADECT_DEF 0x3220
742#define ALX_AZ_ANADECT_LONG 0x3210
743
744#define ALX_MIIDBG_MSE16DB 0x18
745#define ALX_MSE16DB_UP 0x05EA
746#define ALX_MSE16DB_DOWN 0x02EA
747
748#define ALX_MIIDBG_MSE20DB 0x1C
749#define ALX_MSE20DB_TH_MASK 0x7F
750#define ALX_MSE20DB_TH_SHIFT 2
751#define ALX_MSE20DB_TH_DEF 0x2E
752#define ALX_MSE20DB_TH_HI 0x54
753
754#define ALX_MIIDBG_AGC 0x23
755#define ALX_AGC_2_VGA_MASK 0x3FU
756#define ALX_AGC_2_VGA_SHIFT 8
757#define ALX_AGC_LONG1G_LIMT 40
758#define ALX_AGC_LONG100M_LIMT 44
759
760#define ALX_MIIDBG_LEGCYPS 0x29
761#define ALX_LEGCYPS_EN 0x8000
762#define ALX_LEGCYPS_DEF 0x129D
763
764#define ALX_MIIDBG_TST100BTCFG 0x36
765#define ALX_TST100BTCFG_DEF 0xE12C
766
767#define ALX_MIIDBG_GREENCFG 0x3B
768#define ALX_GREENCFG_DEF 0x7078
769
770#define ALX_MIIDBG_GREENCFG2 0x3D
771#define ALX_GREENCFG2_BP_GREEN 0x8000
772#define ALX_GREENCFG2_GATE_DFSE_EN 0x0080
773
774/******* dev 3 *********/
775#define ALX_MIIEXT_PCS 3
776
777#define ALX_MIIEXT_CLDCTRL3 0x8003
778#define ALX_CLDCTRL3_BP_CABLE1TH_DET_GT 0x8000
779
780#define ALX_MIIEXT_CLDCTRL5 0x8005
781#define ALX_CLDCTRL5_BP_VD_HLFBIAS 0x4000
782
783#define ALX_MIIEXT_CLDCTRL6 0x8006
784#define ALX_CLDCTRL6_CAB_LEN_MASK 0xFF
785#define ALX_CLDCTRL6_CAB_LEN_SHIFT 0
786#define ALX_CLDCTRL6_CAB_LEN_SHORT1G 116
787#define ALX_CLDCTRL6_CAB_LEN_SHORT100M 152
788
789#define ALX_MIIEXT_VDRVBIAS 0x8062
790#define ALX_VDRVBIAS_DEF 0x3
791
792/********* dev 7 **********/
793#define ALX_MIIEXT_ANEG 7
794
795#define ALX_MIIEXT_LOCAL_EEEADV 0x3C
796#define ALX_LOCAL_EEEADV_1000BT 0x0004
797#define ALX_LOCAL_EEEADV_100BT 0x0002
798
799#define ALX_MIIEXT_AFE 0x801A
800#define ALX_AFE_10BT_100M_TH 0x0040
801
802#define ALX_MIIEXT_S3DIG10 0x8023
803/* bit0: 1:bypass 10BT rx fifo, 0:original 10BT rx */
804#define ALX_MIIEXT_S3DIG10_SL 0x0001
805#define ALX_MIIEXT_S3DIG10_DEF 0
806
807#define ALX_MIIEXT_NLP78 0x8027
808#define ALX_MIIEXT_NLP78_120M_DEF 0x8A05
809
810#endif
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index be59ec4b2c30..638e55435b04 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -3192,11 +3192,11 @@ static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
3192 rc |= XMIT_CSUM_TCP; 3192 rc |= XMIT_CSUM_TCP;
3193 3193
3194 if (skb_is_gso_v6(skb)) { 3194 if (skb_is_gso_v6(skb)) {
3195 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6); 3195 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP);
3196 if (rc & XMIT_CSUM_ENC) 3196 if (rc & XMIT_CSUM_ENC)
3197 rc |= XMIT_GSO_ENC_V6; 3197 rc |= XMIT_GSO_ENC_V6;
3198 } else if (skb_is_gso(skb)) { 3198 } else if (skb_is_gso(skb)) {
3199 rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP); 3199 rc |= (XMIT_GSO_V4 | XMIT_CSUM_TCP);
3200 if (rc & XMIT_CSUM_ENC) 3200 if (rc & XMIT_CSUM_ENC)
3201 rc |= XMIT_GSO_ENC_V4; 3201 rc |= XMIT_GSO_ENC_V4;
3202 } 3202 }
@@ -3483,19 +3483,18 @@ static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
3483{ 3483{
3484 u16 hlen_w = 0; 3484 u16 hlen_w = 0;
3485 u8 outerip_off, outerip_len = 0; 3485 u8 outerip_off, outerip_len = 0;
3486
3486 /* from outer IP to transport */ 3487 /* from outer IP to transport */
3487 hlen_w = (skb_inner_transport_header(skb) - 3488 hlen_w = (skb_inner_transport_header(skb) -
3488 skb_network_header(skb)) >> 1; 3489 skb_network_header(skb)) >> 1;
3489 3490
3490 /* transport len */ 3491 /* transport len */
3491 if (xmit_type & XMIT_CSUM_TCP) 3492 hlen_w += inner_tcp_hdrlen(skb) >> 1;
3492 hlen_w += inner_tcp_hdrlen(skb) >> 1;
3493 else
3494 hlen_w += sizeof(struct udphdr) >> 1;
3495 3493
3496 pbd2->fw_ip_hdr_to_payload_w = hlen_w; 3494 pbd2->fw_ip_hdr_to_payload_w = hlen_w;
3497 3495
3498 if (xmit_type & XMIT_CSUM_ENC_V4) { 3496 /* outer IP header info */
3497 if (xmit_type & XMIT_CSUM_V4) {
3499 struct iphdr *iph = ip_hdr(skb); 3498 struct iphdr *iph = ip_hdr(skb);
3500 pbd2->fw_ip_csum_wo_len_flags_frag = 3499 pbd2->fw_ip_csum_wo_len_flags_frag =
3501 bswab16(csum_fold((~iph->check) - 3500 bswab16(csum_fold((~iph->check) -
@@ -3818,8 +3817,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3818 bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data, 3817 bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
3819 xmit_type); 3818 xmit_type);
3820 else 3819 else
3821 bnx2x_set_pbd_gso(skb, pbd_e1x, tx_start_bd, 3820 bnx2x_set_pbd_gso(skb, pbd_e1x, first_bd, xmit_type);
3822 xmit_type);
3823 } 3821 }
3824 3822
3825 /* Set the PBD's parsing_data field if not zero 3823 /* Set the PBD's parsing_data field if not zero
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 1f2dd928888a..a13463e8a2c3 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -744,6 +744,9 @@ static int tg3_ape_lock(struct tg3 *tp, int locknum)
744 status = tg3_ape_read32(tp, gnt + off); 744 status = tg3_ape_read32(tp, gnt + off);
745 if (status == bit) 745 if (status == bit)
746 break; 746 break;
747 if (pci_channel_offline(tp->pdev))
748 break;
749
747 udelay(10); 750 udelay(10);
748 } 751 }
749 752
@@ -1635,6 +1638,9 @@ static void tg3_wait_for_event_ack(struct tg3 *tp)
1635 for (i = 0; i < delay_cnt; i++) { 1638 for (i = 0; i < delay_cnt; i++) {
1636 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT)) 1639 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1637 break; 1640 break;
1641 if (pci_channel_offline(tp->pdev))
1642 break;
1643
1638 udelay(8); 1644 udelay(8);
1639 } 1645 }
1640} 1646}
@@ -1800,6 +1806,9 @@ static int tg3_poll_fw(struct tg3 *tp)
1800 int i; 1806 int i;
1801 u32 val; 1807 u32 val;
1802 1808
1809 if (tg3_flag(tp, NO_FWARE_REPORTED))
1810 return 0;
1811
1803 if (tg3_flag(tp, IS_SSB_CORE)) { 1812 if (tg3_flag(tp, IS_SSB_CORE)) {
1804 /* We don't use firmware. */ 1813 /* We don't use firmware. */
1805 return 0; 1814 return 0;
@@ -1810,6 +1819,9 @@ static int tg3_poll_fw(struct tg3 *tp)
1810 for (i = 0; i < 200; i++) { 1819 for (i = 0; i < 200; i++) {
1811 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE) 1820 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1812 return 0; 1821 return 0;
1822 if (pci_channel_offline(tp->pdev))
1823 return -ENODEV;
1824
1813 udelay(100); 1825 udelay(100);
1814 } 1826 }
1815 return -ENODEV; 1827 return -ENODEV;
@@ -1820,6 +1832,15 @@ static int tg3_poll_fw(struct tg3 *tp)
1820 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val); 1832 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1821 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1) 1833 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1822 break; 1834 break;
1835 if (pci_channel_offline(tp->pdev)) {
1836 if (!tg3_flag(tp, NO_FWARE_REPORTED)) {
1837 tg3_flag_set(tp, NO_FWARE_REPORTED);
1838 netdev_info(tp->dev, "No firmware running\n");
1839 }
1840
1841 break;
1842 }
1843
1823 udelay(10); 1844 udelay(10);
1824 } 1845 }
1825 1846
@@ -3517,6 +3538,8 @@ static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3517 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT); 3538 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3518 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT) 3539 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3519 break; 3540 break;
3541 if (pci_channel_offline(tp->pdev))
3542 return -EBUSY;
3520 } 3543 }
3521 3544
3522 return (i == iters) ? -EBUSY : 0; 3545 return (i == iters) ? -EBUSY : 0;
@@ -8586,6 +8609,14 @@ static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, boo
8586 tw32_f(ofs, val); 8609 tw32_f(ofs, val);
8587 8610
8588 for (i = 0; i < MAX_WAIT_CNT; i++) { 8611 for (i = 0; i < MAX_WAIT_CNT; i++) {
8612 if (pci_channel_offline(tp->pdev)) {
8613 dev_err(&tp->pdev->dev,
8614 "tg3_stop_block device offline, "
8615 "ofs=%lx enable_bit=%x\n",
8616 ofs, enable_bit);
8617 return -ENODEV;
8618 }
8619
8589 udelay(100); 8620 udelay(100);
8590 val = tr32(ofs); 8621 val = tr32(ofs);
8591 if ((val & enable_bit) == 0) 8622 if ((val & enable_bit) == 0)
@@ -8609,6 +8640,13 @@ static int tg3_abort_hw(struct tg3 *tp, bool silent)
8609 8640
8610 tg3_disable_ints(tp); 8641 tg3_disable_ints(tp);
8611 8642
8643 if (pci_channel_offline(tp->pdev)) {
8644 tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE);
8645 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8646 err = -ENODEV;
8647 goto err_no_dev;
8648 }
8649
8612 tp->rx_mode &= ~RX_MODE_ENABLE; 8650 tp->rx_mode &= ~RX_MODE_ENABLE;
8613 tw32_f(MAC_RX_MODE, tp->rx_mode); 8651 tw32_f(MAC_RX_MODE, tp->rx_mode);
8614 udelay(10); 8652 udelay(10);
@@ -8657,6 +8695,7 @@ static int tg3_abort_hw(struct tg3 *tp, bool silent)
8657 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent); 8695 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8658 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent); 8696 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8659 8697
8698err_no_dev:
8660 for (i = 0; i < tp->irq_cnt; i++) { 8699 for (i = 0; i < tp->irq_cnt; i++) {
8661 struct tg3_napi *tnapi = &tp->napi[i]; 8700 struct tg3_napi *tnapi = &tp->napi[i];
8662 if (tnapi->hw_status) 8701 if (tnapi->hw_status)
@@ -9468,6 +9507,14 @@ static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9468 } 9507 }
9469} 9508}
9470 9509
9510static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp)
9511{
9512 if (tg3_asic_rev(tp) == ASIC_REV_5719)
9513 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719;
9514 else
9515 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720;
9516}
9517
9471/* tp->lock is held. */ 9518/* tp->lock is held. */
9472static int tg3_reset_hw(struct tg3 *tp, bool reset_phy) 9519static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
9473{ 9520{
@@ -10153,16 +10200,17 @@ static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
10153 tw32_f(RDMAC_MODE, rdmac_mode); 10200 tw32_f(RDMAC_MODE, rdmac_mode);
10154 udelay(40); 10201 udelay(40);
10155 10202
10156 if (tg3_asic_rev(tp) == ASIC_REV_5719) { 10203 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10204 tg3_asic_rev(tp) == ASIC_REV_5720) {
10157 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) { 10205 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
10158 if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp)) 10206 if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
10159 break; 10207 break;
10160 } 10208 }
10161 if (i < TG3_NUM_RDMA_CHANNELS) { 10209 if (i < TG3_NUM_RDMA_CHANNELS) {
10162 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL); 10210 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10163 val |= TG3_LSO_RD_DMA_TX_LENGTH_WA; 10211 val |= tg3_lso_rd_dma_workaround_bit(tp);
10164 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val); 10212 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10165 tg3_flag_set(tp, 5719_RDMA_BUG); 10213 tg3_flag_set(tp, 5719_5720_RDMA_BUG);
10166 } 10214 }
10167 } 10215 }
10168 10216
@@ -10395,6 +10443,13 @@ static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
10395 */ 10443 */
10396static int tg3_init_hw(struct tg3 *tp, bool reset_phy) 10444static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
10397{ 10445{
10446 /* Chip may have been just powered on. If so, the boot code may still
10447 * be running initialization. Wait for it to finish to avoid races in
10448 * accessing the hardware.
10449 */
10450 tg3_enable_register_access(tp);
10451 tg3_poll_fw(tp);
10452
10398 tg3_switch_clocks(tp); 10453 tg3_switch_clocks(tp);
10399 10454
10400 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0); 10455 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
@@ -10526,15 +10581,15 @@ static void tg3_periodic_fetch_stats(struct tg3 *tp)
10526 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST); 10581 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10527 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST); 10582 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10528 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST); 10583 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10529 if (unlikely(tg3_flag(tp, 5719_RDMA_BUG) && 10584 if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) &&
10530 (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low + 10585 (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10531 sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) { 10586 sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10532 u32 val; 10587 u32 val;
10533 10588
10534 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL); 10589 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10535 val &= ~TG3_LSO_RD_DMA_TX_LENGTH_WA; 10590 val &= ~tg3_lso_rd_dma_workaround_bit(tp);
10536 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val); 10591 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10537 tg3_flag_clear(tp, 5719_RDMA_BUG); 10592 tg3_flag_clear(tp, 5719_5720_RDMA_BUG);
10538 } 10593 }
10539 10594
10540 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS); 10595 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index 9b2d3ac2474a..ff6e30eeae35 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -1422,7 +1422,8 @@
1422#define TG3_LSO_RD_DMA_CRPTEN_CTRL 0x00004910 1422#define TG3_LSO_RD_DMA_CRPTEN_CTRL 0x00004910
1423#define TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K 0x00030000 1423#define TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K 0x00030000
1424#define TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K 0x000c0000 1424#define TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K 0x000c0000
1425#define TG3_LSO_RD_DMA_TX_LENGTH_WA 0x02000000 1425#define TG3_LSO_RD_DMA_TX_LENGTH_WA_5719 0x02000000
1426#define TG3_LSO_RD_DMA_TX_LENGTH_WA_5720 0x00200000
1426/* 0x4914 --> 0x4be0 unused */ 1427/* 0x4914 --> 0x4be0 unused */
1427 1428
1428#define TG3_NUM_RDMA_CHANNELS 4 1429#define TG3_NUM_RDMA_CHANNELS 4
@@ -3059,7 +3060,7 @@ enum TG3_FLAGS {
3059 TG3_FLAG_APE_HAS_NCSI, 3060 TG3_FLAG_APE_HAS_NCSI,
3060 TG3_FLAG_TX_TSTAMP_EN, 3061 TG3_FLAG_TX_TSTAMP_EN,
3061 TG3_FLAG_4K_FIFO_LIMIT, 3062 TG3_FLAG_4K_FIFO_LIMIT,
3062 TG3_FLAG_5719_RDMA_BUG, 3063 TG3_FLAG_5719_5720_RDMA_BUG,
3063 TG3_FLAG_RESET_TASK_PENDING, 3064 TG3_FLAG_RESET_TASK_PENDING,
3064 TG3_FLAG_PTP_CAPABLE, 3065 TG3_FLAG_PTP_CAPABLE,
3065 TG3_FLAG_5705_PLUS, 3066 TG3_FLAG_5705_PLUS,
diff --git a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
index 6e8bc9d88c41..94d957d203a6 100644
--- a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
+++ b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
@@ -244,7 +244,7 @@ bnad_debugfs_lseek(struct file *file, loff_t offset, int orig)
244 file->f_pos += offset; 244 file->f_pos += offset;
245 break; 245 break;
246 case 2: 246 case 2:
247 file->f_pos = debug->buffer_len - offset; 247 file->f_pos = debug->buffer_len + offset;
248 break; 248 break;
249 default: 249 default:
250 return -EINVAL; 250 return -EINVAL;
diff --git a/drivers/net/ethernet/dec/tulip/interrupt.c b/drivers/net/ethernet/dec/tulip/interrupt.c
index 28a5e425fecf..92306b320840 100644
--- a/drivers/net/ethernet/dec/tulip/interrupt.c
+++ b/drivers/net/ethernet/dec/tulip/interrupt.c
@@ -76,6 +76,12 @@ int tulip_refill_rx(struct net_device *dev)
76 76
77 mapping = pci_map_single(tp->pdev, skb->data, PKT_BUF_SZ, 77 mapping = pci_map_single(tp->pdev, skb->data, PKT_BUF_SZ,
78 PCI_DMA_FROMDEVICE); 78 PCI_DMA_FROMDEVICE);
79 if (dma_mapping_error(&tp->pdev->dev, mapping)) {
80 dev_kfree_skb(skb);
81 tp->rx_buffers[entry].skb = NULL;
82 break;
83 }
84
79 tp->rx_buffers[entry].mapping = mapping; 85 tp->rx_buffers[entry].mapping = mapping;
80 86
81 tp->rx_ring[entry].buffer1 = cpu_to_le32(mapping); 87 tp->rx_ring[entry].buffer1 = cpu_to_le32(mapping);
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index f544b297c9ab..0a510684e468 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -262,6 +262,7 @@ struct be_rx_compl_info {
262 u8 ipv6; 262 u8 ipv6;
263 u8 vtm; 263 u8 vtm;
264 u8 pkt_type; 264 u8 pkt_type;
265 u8 ip_frag;
265}; 266};
266 267
267struct be_rx_obj { 268struct be_rx_obj {
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index a236ecd27cf3..1db2df61b8af 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -562,7 +562,7 @@ int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
562 562
563 resource_error = lancer_provisioning_error(adapter); 563 resource_error = lancer_provisioning_error(adapter);
564 if (resource_error) 564 if (resource_error)
565 return -1; 565 return -EAGAIN;
566 566
567 status = lancer_wait_ready(adapter); 567 status = lancer_wait_ready(adapter);
568 if (!status) { 568 if (!status) {
@@ -590,8 +590,8 @@ int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
590 * when PF provisions resources. 590 * when PF provisions resources.
591 */ 591 */
592 resource_error = lancer_provisioning_error(adapter); 592 resource_error = lancer_provisioning_error(adapter);
593 if (status == -1 && !resource_error) 593 if (resource_error)
594 adapter->eeh_error = true; 594 status = -EAGAIN;
595 595
596 return status; 596 return status;
597} 597}
diff --git a/drivers/net/ethernet/emulex/benet/be_hw.h b/drivers/net/ethernet/emulex/benet/be_hw.h
index 3c1099b47f2a..8780183c6d1c 100644
--- a/drivers/net/ethernet/emulex/benet/be_hw.h
+++ b/drivers/net/ethernet/emulex/benet/be_hw.h
@@ -356,7 +356,7 @@ struct amap_eth_rx_compl_v0 {
356 u8 ip_version; /* dword 1 */ 356 u8 ip_version; /* dword 1 */
357 u8 macdst[6]; /* dword 1 */ 357 u8 macdst[6]; /* dword 1 */
358 u8 vtp; /* dword 1 */ 358 u8 vtp; /* dword 1 */
359 u8 rsvd0; /* dword 1 */ 359 u8 ip_frag; /* dword 1 */
360 u8 fragndx[10]; /* dword 1 */ 360 u8 fragndx[10]; /* dword 1 */
361 u8 ct[2]; /* dword 1 */ 361 u8 ct[2]; /* dword 1 */
362 u8 sw; /* dword 1 */ 362 u8 sw; /* dword 1 */
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index ca2967b0f18b..a0b4be51f0d1 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -1599,6 +1599,8 @@ static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1599 compl); 1599 compl);
1600 } 1600 }
1601 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl); 1601 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
1602 rxcp->ip_frag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
1603 ip_frag, compl);
1602} 1604}
1603 1605
1604static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo) 1606static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
@@ -1620,6 +1622,9 @@ static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1620 else 1622 else
1621 be_parse_rx_compl_v0(compl, rxcp); 1623 be_parse_rx_compl_v0(compl, rxcp);
1622 1624
1625 if (rxcp->ip_frag)
1626 rxcp->l4_csum = 0;
1627
1623 if (rxcp->vlanf) { 1628 if (rxcp->vlanf) {
1624 /* vlanf could be wrongly set in some cards. 1629 /* vlanf could be wrongly set in some cards.
1625 * ignore if vtm is not set */ 1630 * ignore if vtm is not set */
@@ -2168,7 +2173,7 @@ static irqreturn_t be_msix(int irq, void *dev)
2168 2173
2169static inline bool do_gro(struct be_rx_compl_info *rxcp) 2174static inline bool do_gro(struct be_rx_compl_info *rxcp)
2170{ 2175{
2171 return (rxcp->tcpf && !rxcp->err) ? true : false; 2176 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
2172} 2177}
2173 2178
2174static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi, 2179static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
@@ -4093,6 +4098,7 @@ static int be_get_initial_config(struct be_adapter *adapter)
4093 4098
4094static int lancer_recover_func(struct be_adapter *adapter) 4099static int lancer_recover_func(struct be_adapter *adapter)
4095{ 4100{
4101 struct device *dev = &adapter->pdev->dev;
4096 int status; 4102 int status;
4097 4103
4098 status = lancer_test_and_set_rdy_state(adapter); 4104 status = lancer_test_and_set_rdy_state(adapter);
@@ -4104,8 +4110,7 @@ static int lancer_recover_func(struct be_adapter *adapter)
4104 4110
4105 be_clear(adapter); 4111 be_clear(adapter);
4106 4112
4107 adapter->hw_error = false; 4113 be_clear_all_error(adapter);
4108 adapter->fw_timeout = false;
4109 4114
4110 status = be_setup(adapter); 4115 status = be_setup(adapter);
4111 if (status) 4116 if (status)
@@ -4117,13 +4122,13 @@ static int lancer_recover_func(struct be_adapter *adapter)
4117 goto err; 4122 goto err;
4118 } 4123 }
4119 4124
4120 dev_err(&adapter->pdev->dev, 4125 dev_err(dev, "Error recovery successful\n");
4121 "Adapter SLIPORT recovery succeeded\n");
4122 return 0; 4126 return 0;
4123err: 4127err:
4124 if (adapter->eeh_error) 4128 if (status == -EAGAIN)
4125 dev_err(&adapter->pdev->dev, 4129 dev_err(dev, "Waiting for resource provisioning\n");
4126 "Adapter SLIPORT recovery failed\n"); 4130 else
4131 dev_err(dev, "Error recovery failed\n");
4127 4132
4128 return status; 4133 return status;
4129} 4134}
@@ -4132,28 +4137,27 @@ static void be_func_recovery_task(struct work_struct *work)
4132{ 4137{
4133 struct be_adapter *adapter = 4138 struct be_adapter *adapter =
4134 container_of(work, struct be_adapter, func_recovery_work.work); 4139 container_of(work, struct be_adapter, func_recovery_work.work);
4135 int status; 4140 int status = 0;
4136 4141
4137 be_detect_error(adapter); 4142 be_detect_error(adapter);
4138 4143
4139 if (adapter->hw_error && lancer_chip(adapter)) { 4144 if (adapter->hw_error && lancer_chip(adapter)) {
4140 4145
4141 if (adapter->eeh_error)
4142 goto out;
4143
4144 rtnl_lock(); 4146 rtnl_lock();
4145 netif_device_detach(adapter->netdev); 4147 netif_device_detach(adapter->netdev);
4146 rtnl_unlock(); 4148 rtnl_unlock();
4147 4149
4148 status = lancer_recover_func(adapter); 4150 status = lancer_recover_func(adapter);
4149
4150 if (!status) 4151 if (!status)
4151 netif_device_attach(adapter->netdev); 4152 netif_device_attach(adapter->netdev);
4152 } 4153 }
4153 4154
4154out: 4155 /* In Lancer, for all errors other than provisioning error (-EAGAIN),
4155 schedule_delayed_work(&adapter->func_recovery_work, 4156 * no need to attempt further recovery.
4156 msecs_to_jiffies(1000)); 4157 */
4158 if (!status || status == -EAGAIN)
4159 schedule_delayed_work(&adapter->func_recovery_work,
4160 msecs_to_jiffies(1000));
4157} 4161}
4158 4162
4159static void be_worker(struct work_struct *work) 4163static void be_worker(struct work_struct *work)
@@ -4258,6 +4262,9 @@ static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
4258 netdev->features |= NETIF_F_HIGHDMA; 4262 netdev->features |= NETIF_F_HIGHDMA;
4259 } else { 4263 } else {
4260 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); 4264 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
4265 if (!status)
4266 status = dma_set_coherent_mask(&pdev->dev,
4267 DMA_BIT_MASK(32));
4261 if (status) { 4268 if (status) {
4262 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n"); 4269 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4263 goto free_netdev; 4270 goto free_netdev;
@@ -4436,20 +4443,19 @@ static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4436 4443
4437 dev_err(&adapter->pdev->dev, "EEH error detected\n"); 4444 dev_err(&adapter->pdev->dev, "EEH error detected\n");
4438 4445
4439 adapter->eeh_error = true; 4446 if (!adapter->eeh_error) {
4447 adapter->eeh_error = true;
4440 4448
4441 cancel_delayed_work_sync(&adapter->func_recovery_work); 4449 cancel_delayed_work_sync(&adapter->func_recovery_work);
4442
4443 rtnl_lock();
4444 netif_device_detach(netdev);
4445 rtnl_unlock();
4446 4450
4447 if (netif_running(netdev)) {
4448 rtnl_lock(); 4451 rtnl_lock();
4449 be_close(netdev); 4452 netif_device_detach(netdev);
4453 if (netif_running(netdev))
4454 be_close(netdev);
4450 rtnl_unlock(); 4455 rtnl_unlock();
4456
4457 be_clear(adapter);
4451 } 4458 }
4452 be_clear(adapter);
4453 4459
4454 if (state == pci_channel_io_perm_failure) 4460 if (state == pci_channel_io_perm_failure)
4455 return PCI_ERS_RESULT_DISCONNECT; 4461 return PCI_ERS_RESULT_DISCONNECT;
@@ -4474,7 +4480,6 @@ static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4474 int status; 4480 int status;
4475 4481
4476 dev_info(&adapter->pdev->dev, "EEH reset\n"); 4482 dev_info(&adapter->pdev->dev, "EEH reset\n");
4477 be_clear_all_error(adapter);
4478 4483
4479 status = pci_enable_device(pdev); 4484 status = pci_enable_device(pdev);
4480 if (status) 4485 if (status)
@@ -4492,6 +4497,7 @@ static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4492 return PCI_ERS_RESULT_DISCONNECT; 4497 return PCI_ERS_RESULT_DISCONNECT;
4493 4498
4494 pci_cleanup_aer_uncorrect_error_status(pdev); 4499 pci_cleanup_aer_uncorrect_error_status(pdev);
4500 be_clear_all_error(adapter);
4495 return PCI_ERS_RESULT_RECOVERED; 4501 return PCI_ERS_RESULT_RECOVERED;
4496} 4502}
4497 4503
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 85a06037b242..d48099f03b7f 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -516,6 +516,7 @@ fec_restart(struct net_device *ndev, int duplex)
516 /* Set MII speed */ 516 /* Set MII speed */
517 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); 517 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
518 518
519#if !defined(CONFIG_M5272)
519 /* set RX checksum */ 520 /* set RX checksum */
520 val = readl(fep->hwp + FEC_RACC); 521 val = readl(fep->hwp + FEC_RACC);
521 if (fep->csum_flags & FLAG_RX_CSUM_ENABLED) 522 if (fep->csum_flags & FLAG_RX_CSUM_ENABLED)
@@ -523,6 +524,7 @@ fec_restart(struct net_device *ndev, int duplex)
523 else 524 else
524 val &= ~FEC_RACC_OPTIONS; 525 val &= ~FEC_RACC_OPTIONS;
525 writel(val, fep->hwp + FEC_RACC); 526 writel(val, fep->hwp + FEC_RACC);
527#endif
526 528
527 /* 529 /*
528 * The phy interface and speed need to get configured 530 * The phy interface and speed need to get configured
@@ -575,6 +577,7 @@ fec_restart(struct net_device *ndev, int duplex)
575#endif 577#endif
576 } 578 }
577 579
580#if !defined(CONFIG_M5272)
578 /* enable pause frame*/ 581 /* enable pause frame*/
579 if ((fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) || 582 if ((fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) ||
580 ((fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) && 583 ((fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) &&
@@ -592,6 +595,7 @@ fec_restart(struct net_device *ndev, int duplex)
592 } else { 595 } else {
593 rcntl &= ~FEC_ENET_FCE; 596 rcntl &= ~FEC_ENET_FCE;
594 } 597 }
598#endif /* !defined(CONFIG_M5272) */
595 599
596 writel(rcntl, fep->hwp + FEC_R_CNTRL); 600 writel(rcntl, fep->hwp + FEC_R_CNTRL);
597 601
@@ -1038,6 +1042,18 @@ static void fec_get_mac(struct net_device *ndev)
1038 iap = &tmpaddr[0]; 1042 iap = &tmpaddr[0];
1039 } 1043 }
1040 1044
1045 /*
1046 * 5) random mac address
1047 */
1048 if (!is_valid_ether_addr(iap)) {
1049 /* Report it and use a random ethernet address instead */
1050 netdev_err(ndev, "Invalid MAC address: %pM\n", iap);
1051 eth_hw_addr_random(ndev);
1052 netdev_info(ndev, "Using random MAC address: %pM\n",
1053 ndev->dev_addr);
1054 return;
1055 }
1056
1041 memcpy(ndev->dev_addr, iap, ETH_ALEN); 1057 memcpy(ndev->dev_addr, iap, ETH_ALEN);
1042 1058
1043 /* Adjust MAC if using macaddr */ 1059 /* Adjust MAC if using macaddr */
@@ -1193,7 +1209,9 @@ static int fec_enet_mii_probe(struct net_device *ndev)
1193 /* mask with MAC supported features */ 1209 /* mask with MAC supported features */
1194 if (id_entry->driver_data & FEC_QUIRK_HAS_GBIT) { 1210 if (id_entry->driver_data & FEC_QUIRK_HAS_GBIT) {
1195 phy_dev->supported &= PHY_GBIT_FEATURES; 1211 phy_dev->supported &= PHY_GBIT_FEATURES;
1212#if !defined(CONFIG_M5272)
1196 phy_dev->supported |= SUPPORTED_Pause; 1213 phy_dev->supported |= SUPPORTED_Pause;
1214#endif
1197 } 1215 }
1198 else 1216 else
1199 phy_dev->supported &= PHY_BASIC_FEATURES; 1217 phy_dev->supported &= PHY_BASIC_FEATURES;
@@ -1378,6 +1396,8 @@ static int fec_enet_get_ts_info(struct net_device *ndev,
1378 } 1396 }
1379} 1397}
1380 1398
1399#if !defined(CONFIG_M5272)
1400
1381static void fec_enet_get_pauseparam(struct net_device *ndev, 1401static void fec_enet_get_pauseparam(struct net_device *ndev,
1382 struct ethtool_pauseparam *pause) 1402 struct ethtool_pauseparam *pause)
1383{ 1403{
@@ -1424,9 +1444,13 @@ static int fec_enet_set_pauseparam(struct net_device *ndev,
1424 return 0; 1444 return 0;
1425} 1445}
1426 1446
1447#endif /* !defined(CONFIG_M5272) */
1448
1427static const struct ethtool_ops fec_enet_ethtool_ops = { 1449static const struct ethtool_ops fec_enet_ethtool_ops = {
1450#if !defined(CONFIG_M5272)
1428 .get_pauseparam = fec_enet_get_pauseparam, 1451 .get_pauseparam = fec_enet_get_pauseparam,
1429 .set_pauseparam = fec_enet_set_pauseparam, 1452 .set_pauseparam = fec_enet_set_pauseparam,
1453#endif
1430 .get_settings = fec_enet_get_settings, 1454 .get_settings = fec_enet_get_settings,
1431 .set_settings = fec_enet_set_settings, 1455 .set_settings = fec_enet_set_settings,
1432 .get_drvinfo = fec_enet_get_drvinfo, 1456 .get_drvinfo = fec_enet_get_drvinfo,
@@ -1862,10 +1886,12 @@ fec_probe(struct platform_device *pdev)
1862 /* setup board info structure */ 1886 /* setup board info structure */
1863 fep = netdev_priv(ndev); 1887 fep = netdev_priv(ndev);
1864 1888
1889#if !defined(CONFIG_M5272)
1865 /* default enable pause frame auto negotiation */ 1890 /* default enable pause frame auto negotiation */
1866 if (pdev->id_entry && 1891 if (pdev->id_entry &&
1867 (pdev->id_entry->driver_data & FEC_QUIRK_HAS_GBIT)) 1892 (pdev->id_entry->driver_data & FEC_QUIRK_HAS_GBIT))
1868 fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG; 1893 fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG;
1894#endif
1869 1895
1870 fep->hwp = devm_request_and_ioremap(&pdev->dev, r); 1896 fep->hwp = devm_request_and_ioremap(&pdev->dev, r);
1871 fep->pdev = pdev; 1897 fep->pdev = pdev;
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 2ad1494efbb3..d1cbfb12c1ca 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -1757,7 +1757,7 @@ static int rxq_init(struct mv643xx_eth_private *mp, int index)
1757 memset(rxq->rx_desc_area, 0, size); 1757 memset(rxq->rx_desc_area, 0, size);
1758 1758
1759 rxq->rx_desc_area_size = size; 1759 rxq->rx_desc_area_size = size;
1760 rxq->rx_skb = kmalloc_array(rxq->rx_ring_size, sizeof(*rxq->rx_skb), 1760 rxq->rx_skb = kcalloc(rxq->rx_ring_size, sizeof(*rxq->rx_skb),
1761 GFP_KERNEL); 1761 GFP_KERNEL);
1762 if (rxq->rx_skb == NULL) 1762 if (rxq->rx_skb == NULL)
1763 goto out_free; 1763 goto out_free;
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index 339bb323cb0c..1c8af8ba08d9 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -1015,7 +1015,7 @@ static int rxq_init(struct net_device *dev)
1015 int rx_desc_num = pep->rx_ring_size; 1015 int rx_desc_num = pep->rx_ring_size;
1016 1016
1017 /* Allocate RX skb rings */ 1017 /* Allocate RX skb rings */
1018 pep->rx_skb = kmalloc(sizeof(*pep->rx_skb) * pep->rx_ring_size, 1018 pep->rx_skb = kzalloc(sizeof(*pep->rx_skb) * pep->rx_ring_size,
1019 GFP_KERNEL); 1019 GFP_KERNEL);
1020 if (!pep->rx_skb) 1020 if (!pep->rx_skb)
1021 return -ENOMEM; 1021 return -ENOMEM;
@@ -1076,7 +1076,7 @@ static int txq_init(struct net_device *dev)
1076 int size = 0, i = 0; 1076 int size = 0, i = 0;
1077 int tx_desc_num = pep->tx_ring_size; 1077 int tx_desc_num = pep->tx_ring_size;
1078 1078
1079 pep->tx_skb = kmalloc(sizeof(*pep->tx_skb) * pep->tx_ring_size, 1079 pep->tx_skb = kzalloc(sizeof(*pep->tx_skb) * pep->tx_ring_size,
1080 GFP_KERNEL); 1080 GFP_KERNEL);
1081 if (!pep->tx_skb) 1081 if (!pep->tx_skb)
1082 return -ENOMEM; 1082 return -ENOMEM;
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 1df56cc50ee9..0e572a527154 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -222,8 +222,6 @@ static int mlx4_comm_cmd_poll(struct mlx4_dev *dev, u8 cmd, u16 param,
222 * FLR process. The only non-zero result in the RESET command 222 * FLR process. The only non-zero result in the RESET command
223 * is MLX4_DELAY_RESET_SLAVE*/ 223 * is MLX4_DELAY_RESET_SLAVE*/
224 if ((MLX4_COMM_CMD_RESET == cmd)) { 224 if ((MLX4_COMM_CMD_RESET == cmd)) {
225 mlx4_warn(dev, "Got slave FLRed from Communication"
226 " channel (ret:0x%x)\n", ret_from_pending);
227 err = MLX4_DELAY_RESET_SLAVE; 225 err = MLX4_DELAY_RESET_SLAVE;
228 } else { 226 } else {
229 mlx4_warn(dev, "Communication channel timed out\n"); 227 mlx4_warn(dev, "Communication channel timed out\n");
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index b35f94700093..89c47ea84b50 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -1323,6 +1323,7 @@ static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
1323 priv->last_moder_time[ring] = moder_time; 1323 priv->last_moder_time[ring] = moder_time;
1324 cq = &priv->rx_cq[ring]; 1324 cq = &priv->rx_cq[ring];
1325 cq->moder_time = moder_time; 1325 cq->moder_time = moder_time;
1326 cq->moder_cnt = priv->rx_frames;
1326 err = mlx4_en_set_cq_moder(priv, cq); 1327 err = mlx4_en_set_cq_moder(priv, cq);
1327 if (err) 1328 if (err)
1328 en_err(priv, "Failed modifying moderation for cq:%d\n", 1329 en_err(priv, "Failed modifying moderation for cq:%d\n",
@@ -2118,6 +2119,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
2118 struct mlx4_en_priv *priv; 2119 struct mlx4_en_priv *priv;
2119 int i; 2120 int i;
2120 int err; 2121 int err;
2122 u64 mac_u64;
2121 2123
2122 dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv), 2124 dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv),
2123 MAX_TX_RINGS, MAX_RX_RINGS); 2125 MAX_TX_RINGS, MAX_RX_RINGS);
@@ -2191,10 +2193,17 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
2191 dev->addr_len = ETH_ALEN; 2193 dev->addr_len = ETH_ALEN;
2192 mlx4_en_u64_to_mac(dev->dev_addr, mdev->dev->caps.def_mac[priv->port]); 2194 mlx4_en_u64_to_mac(dev->dev_addr, mdev->dev->caps.def_mac[priv->port]);
2193 if (!is_valid_ether_addr(dev->dev_addr)) { 2195 if (!is_valid_ether_addr(dev->dev_addr)) {
2194 en_err(priv, "Port: %d, invalid mac burned: %pM, quiting\n", 2196 if (mlx4_is_slave(priv->mdev->dev)) {
2195 priv->port, dev->dev_addr); 2197 eth_hw_addr_random(dev);
2196 err = -EINVAL; 2198 en_warn(priv, "Assigned random MAC address %pM\n", dev->dev_addr);
2197 goto out; 2199 mac_u64 = mlx4_en_mac_to_u64(dev->dev_addr);
2200 mdev->dev->caps.def_mac[priv->port] = mac_u64;
2201 } else {
2202 en_err(priv, "Port: %d, invalid mac burned: %pM, quiting\n",
2203 priv->port, dev->dev_addr);
2204 err = -EINVAL;
2205 goto out;
2206 }
2198 } 2207 }
2199 2208
2200 memcpy(priv->prev_mac, dev->dev_addr, sizeof(priv->prev_mac)); 2209 memcpy(priv->prev_mac, dev->dev_addr, sizeof(priv->prev_mac));
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 58a8e535d698..2c97901c6a6d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -840,12 +840,16 @@ int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave,
840 MLX4_CMD_NATIVE); 840 MLX4_CMD_NATIVE);
841 841
842 if (!err && dev->caps.function != slave) { 842 if (!err && dev->caps.function != slave) {
843 /* set slave default_mac address */
844 MLX4_GET(def_mac, outbox->buf, QUERY_PORT_MAC_OFFSET);
845 def_mac += slave << 8;
846 /* if config MAC in DB use it */ 843 /* if config MAC in DB use it */
847 if (priv->mfunc.master.vf_oper[slave].vport[vhcr->in_modifier].state.mac) 844 if (priv->mfunc.master.vf_oper[slave].vport[vhcr->in_modifier].state.mac)
848 def_mac = priv->mfunc.master.vf_oper[slave].vport[vhcr->in_modifier].state.mac; 845 def_mac = priv->mfunc.master.vf_oper[slave].vport[vhcr->in_modifier].state.mac;
846 else {
847 /* set slave default_mac address */
848 MLX4_GET(def_mac, outbox->buf, QUERY_PORT_MAC_OFFSET);
849 def_mac += slave << 8;
850 priv->mfunc.master.vf_admin[slave].vport[vhcr->in_modifier].mac = def_mac;
851 }
852
849 MLX4_PUT(outbox->buf, def_mac, QUERY_PORT_MAC_OFFSET); 853 MLX4_PUT(outbox->buf, def_mac, QUERY_PORT_MAC_OFFSET);
850 854
851 /* get port type - currently only eth is enabled */ 855 /* get port type - currently only eth is enabled */
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 0d32a82458bf..8a434997a0df 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -632,6 +632,9 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
632 dev->caps.cqe_size = 32; 632 dev->caps.cqe_size = 32;
633 } 633 }
634 634
635 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
636 mlx4_warn(dev, "Timestamping is not supported in slave mode.\n");
637
635 slave_adjust_steering_mode(dev, &dev_cap, &hca_param); 638 slave_adjust_steering_mode(dev, &dev_cap, &hca_param);
636 639
637 return 0; 640 return 0;
@@ -1290,7 +1293,6 @@ static int mlx4_init_slave(struct mlx4_dev *dev)
1290{ 1293{
1291 struct mlx4_priv *priv = mlx4_priv(dev); 1294 struct mlx4_priv *priv = mlx4_priv(dev);
1292 u64 dma = (u64) priv->mfunc.vhcr_dma; 1295 u64 dma = (u64) priv->mfunc.vhcr_dma;
1293 int num_of_reset_retries = NUM_OF_RESET_RETRIES;
1294 int ret_from_reset = 0; 1296 int ret_from_reset = 0;
1295 u32 slave_read; 1297 u32 slave_read;
1296 u32 cmd_channel_ver; 1298 u32 cmd_channel_ver;
@@ -1304,18 +1306,10 @@ static int mlx4_init_slave(struct mlx4_dev *dev)
1304 * NUM_OF_RESET_RETRIES times before leaving.*/ 1306 * NUM_OF_RESET_RETRIES times before leaving.*/
1305 if (ret_from_reset) { 1307 if (ret_from_reset) {
1306 if (MLX4_DELAY_RESET_SLAVE == ret_from_reset) { 1308 if (MLX4_DELAY_RESET_SLAVE == ret_from_reset) {
1307 msleep(SLEEP_TIME_IN_RESET); 1309 mlx4_warn(dev, "slave is currently in the "
1308 while (ret_from_reset && num_of_reset_retries) { 1310 "middle of FLR. Deferring probe.\n");
1309 mlx4_warn(dev, "slave is currently in the" 1311 mutex_unlock(&priv->cmd.slave_cmd_mutex);
1310 "middle of FLR. retrying..." 1312 return -EPROBE_DEFER;
1311 "(try num:%d)\n",
1312 (NUM_OF_RESET_RETRIES -
1313 num_of_reset_retries + 1));
1314 ret_from_reset =
1315 mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET,
1316 0, MLX4_COMM_TIME);
1317 num_of_reset_retries = num_of_reset_retries - 1;
1318 }
1319 } else 1313 } else
1320 goto err; 1314 goto err;
1321 } 1315 }
@@ -1526,7 +1520,8 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
1526 } else { 1520 } else {
1527 err = mlx4_init_slave(dev); 1521 err = mlx4_init_slave(dev);
1528 if (err) { 1522 if (err) {
1529 mlx4_err(dev, "Failed to initialize slave\n"); 1523 if (err != -EPROBE_DEFER)
1524 mlx4_err(dev, "Failed to initialize slave\n");
1530 return err; 1525 return err;
1531 } 1526 }
1532 1527
diff --git a/drivers/net/ethernet/octeon/octeon_mgmt.c b/drivers/net/ethernet/octeon/octeon_mgmt.c
index 921729f9c85c..91a8a5d28037 100644
--- a/drivers/net/ethernet/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/octeon/octeon_mgmt.c
@@ -46,17 +46,25 @@
46union mgmt_port_ring_entry { 46union mgmt_port_ring_entry {
47 u64 d64; 47 u64 d64;
48 struct { 48 struct {
49 u64 reserved_62_63:2; 49#define RING_ENTRY_CODE_DONE 0xf
50#define RING_ENTRY_CODE_MORE 0x10
51#ifdef __BIG_ENDIAN_BITFIELD
52 u64 reserved_62_63:2;
50 /* Length of the buffer/packet in bytes */ 53 /* Length of the buffer/packet in bytes */
51 u64 len:14; 54 u64 len:14;
52 /* For TX, signals that the packet should be timestamped */ 55 /* For TX, signals that the packet should be timestamped */
53 u64 tstamp:1; 56 u64 tstamp:1;
54 /* The RX error code */ 57 /* The RX error code */
55 u64 code:7; 58 u64 code:7;
56#define RING_ENTRY_CODE_DONE 0xf
57#define RING_ENTRY_CODE_MORE 0x10
58 /* Physical address of the buffer */ 59 /* Physical address of the buffer */
59 u64 addr:40; 60 u64 addr:40;
61#else
62 u64 addr:40;
63 u64 code:7;
64 u64 tstamp:1;
65 u64 len:14;
66 u64 reserved_62_63:2;
67#endif
60 } s; 68 } s;
61}; 69};
62 70
@@ -1141,10 +1149,13 @@ static int octeon_mgmt_open(struct net_device *netdev)
1141 /* For compensation state to lock. */ 1149 /* For compensation state to lock. */
1142 ndelay(1040 * NS_PER_PHY_CLK); 1150 ndelay(1040 * NS_PER_PHY_CLK);
1143 1151
1144 /* Some Ethernet switches cannot handle standard 1152 /* Default Interframe Gaps are too small. Recommended
1145 * Interframe Gap, increase to 16 bytes. 1153 * workaround is.
1154 *
1155 * AGL_GMX_TX_IFG[IFG1]=14
1156 * AGL_GMX_TX_IFG[IFG2]=10
1146 */ 1157 */
1147 cvmx_write_csr(CVMX_AGL_GMX_TX_IFG, 0x88); 1158 cvmx_write_csr(CVMX_AGL_GMX_TX_IFG, 0xae);
1148 } 1159 }
1149 1160
1150 octeon_mgmt_rx_fill_ring(netdev); 1161 octeon_mgmt_rx_fill_ring(netdev);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
index 43562c256379..6acf82b9f018 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
@@ -642,7 +642,7 @@ void qlcnic_fw_destroy_ctx(struct qlcnic_adapter *adapter)
642 qlcnic_83xx_config_intrpt(adapter, 0); 642 qlcnic_83xx_config_intrpt(adapter, 0);
643 } 643 }
644 /* Allow dma queues to drain after context reset */ 644 /* Allow dma queues to drain after context reset */
645 msleep(20); 645 mdelay(20);
646 } 646 }
647} 647}
648 648
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index 50235d201592..f87cc216045b 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -4717,6 +4717,7 @@ static int qlge_probe(struct pci_dev *pdev,
4717 dev_err(&pdev->dev, "net device registration failed.\n"); 4717 dev_err(&pdev->dev, "net device registration failed.\n");
4718 ql_release_all(pdev); 4718 ql_release_all(pdev);
4719 pci_disable_device(pdev); 4719 pci_disable_device(pdev);
4720 free_netdev(ndev);
4720 return err; 4721 return err;
4721 } 4722 }
4722 /* Start up the timer to trigger EEH if 4723 /* Start up the timer to trigger EEH if
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 42e9dd05c936..e29fe8dbd226 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -380,8 +380,9 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
380 .eesipr_value = 0x01ff009f, 380 .eesipr_value = 0x01ff009f,
381 381
382 .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO, 382 .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
383 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE | 383 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
384 EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI, 384 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
385 EESR_ECI,
385 .tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE, 386 .tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE,
386 387
387 .apr = 1, 388 .apr = 1,
@@ -427,8 +428,9 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
427 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x01ff009f, 428 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x01ff009f,
428 429
429 .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO, 430 .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
430 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE | 431 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
431 EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI, 432 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
433 EESR_ECI,
432 .tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE, 434 .tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE,
433 435
434 .apr = 1, 436 .apr = 1,
@@ -478,8 +480,9 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
478 .rmcr_value = 0x00000001, 480 .rmcr_value = 0x00000001,
479 481
480 .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO, 482 .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
481 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE | 483 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
482 EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI, 484 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
485 EESR_ECI,
483 .tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE, 486 .tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE,
484 487
485 .apr = 1, 488 .apr = 1,
@@ -592,9 +595,9 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data_giga = {
592 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, 595 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
593 596
594 .tx_check = EESR_TC1 | EESR_FTC, 597 .tx_check = EESR_TC1 | EESR_FTC,
595 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \ 598 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
596 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \ 599 EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
597 EESR_ECI, 600 EESR_TDE | EESR_ECI,
598 .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \ 601 .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \
599 EESR_TFE, 602 EESR_TFE,
600 .fdr_value = 0x0000072f, 603 .fdr_value = 0x0000072f,
@@ -674,9 +677,9 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
674 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, 677 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
675 678
676 .tx_check = EESR_TC1 | EESR_FTC, 679 .tx_check = EESR_TC1 | EESR_FTC,
677 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \ 680 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
678 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \ 681 EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
679 EESR_ECI, 682 EESR_TDE | EESR_ECI,
680 .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \ 683 .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \
681 EESR_TFE, 684 EESR_TFE,
682 685
@@ -811,9 +814,9 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
811 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, 814 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
812 815
813 .tx_check = EESR_TC1 | EESR_FTC, 816 .tx_check = EESR_TC1 | EESR_FTC,
814 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \ 817 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
815 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \ 818 EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
816 EESR_ECI, 819 EESR_TDE | EESR_ECI,
817 .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \ 820 .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \
818 EESR_TFE, 821 EESR_TFE,
819 822
@@ -897,8 +900,8 @@ static int sh_eth_check_reset(struct net_device *ndev)
897 mdelay(1); 900 mdelay(1);
898 cnt--; 901 cnt--;
899 } 902 }
900 if (cnt < 0) { 903 if (cnt <= 0) {
901 pr_err("Device reset fail\n"); 904 pr_err("Device reset failed\n");
902 ret = -ETIMEDOUT; 905 ret = -ETIMEDOUT;
903 } 906 }
904 return ret; 907 return ret;
@@ -1401,16 +1404,23 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status)
1401 desc_status = edmac_to_cpu(mdp, rxdesc->status); 1404 desc_status = edmac_to_cpu(mdp, rxdesc->status);
1402 pkt_len = rxdesc->frame_length; 1405 pkt_len = rxdesc->frame_length;
1403 1406
1404#if defined(CONFIG_ARCH_R8A7740)
1405 desc_status >>= 16;
1406#endif
1407
1408 if (--boguscnt < 0) 1407 if (--boguscnt < 0)
1409 break; 1408 break;
1410 1409
1411 if (!(desc_status & RDFEND)) 1410 if (!(desc_status & RDFEND))
1412 ndev->stats.rx_length_errors++; 1411 ndev->stats.rx_length_errors++;
1413 1412
1413#if defined(CONFIG_ARCH_R8A7740)
1414 /*
1415 * In case of almost all GETHER/ETHERs, the Receive Frame State
1416 * (RFS) bits in the Receive Descriptor 0 are from bit 9 to
1417 * bit 0. However, in case of the R8A7740's GETHER, the RFS
1418 * bits are from bit 25 to bit 16. So, the driver needs right
1419 * shifting by 16.
1420 */
1421 desc_status >>= 16;
1422#endif
1423
1414 if (desc_status & (RD_RFS1 | RD_RFS2 | RD_RFS3 | RD_RFS4 | 1424 if (desc_status & (RD_RFS1 | RD_RFS2 | RD_RFS3 | RD_RFS4 |
1415 RD_RFS5 | RD_RFS6 | RD_RFS10)) { 1425 RD_RFS5 | RD_RFS6 | RD_RFS10)) {
1416 ndev->stats.rx_errors++; 1426 ndev->stats.rx_errors++;
@@ -1542,11 +1552,12 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
1542 1552
1543ignore_link: 1553ignore_link:
1544 if (intr_status & EESR_TWB) { 1554 if (intr_status & EESR_TWB) {
1545 /* Write buck end. unused write back interrupt */ 1555 /* Unused write back interrupt */
1546 if (intr_status & EESR_TABT) /* Transmit Abort int */ 1556 if (intr_status & EESR_TABT) { /* Transmit Abort int */
1547 ndev->stats.tx_aborted_errors++; 1557 ndev->stats.tx_aborted_errors++;
1548 if (netif_msg_tx_err(mdp)) 1558 if (netif_msg_tx_err(mdp))
1549 dev_err(&ndev->dev, "Transmit Abort\n"); 1559 dev_err(&ndev->dev, "Transmit Abort\n");
1560 }
1550 } 1561 }
1551 1562
1552 if (intr_status & EESR_RABT) { 1563 if (intr_status & EESR_RABT) {
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index 1ddc9f235bcb..62689a5823be 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -253,7 +253,7 @@ enum EESR_BIT {
253 253
254#define DEFAULT_TX_CHECK (EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | \ 254#define DEFAULT_TX_CHECK (EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | \
255 EESR_RTO) 255 EESR_RTO)
256#define DEFAULT_EESR_ERR_CHECK (EESR_TWB | EESR_TABT | EESR_RABT | \ 256#define DEFAULT_EESR_ERR_CHECK (EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | \
257 EESR_RDE | EESR_RFRMER | EESR_ADE | \ 257 EESR_RDE | EESR_RFRMER | EESR_ADE | \
258 EESR_TFE | EESR_TDE | EESR_ECI) 258 EESR_TFE | EESR_TDE | EESR_ECI)
259#define DEFAULT_TX_ERROR_CHECK (EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | \ 259#define DEFAULT_TX_ERROR_CHECK (EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | \
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 39e4cb39de29..4a14a940c65e 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -2139,7 +2139,7 @@ show_phy_type(struct device *dev, struct device_attribute *attr, char *buf)
2139 struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); 2139 struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
2140 return sprintf(buf, "%d\n", efx->phy_type); 2140 return sprintf(buf, "%d\n", efx->phy_type);
2141} 2141}
2142static DEVICE_ATTR(phy_type, 0644, show_phy_type, NULL); 2142static DEVICE_ATTR(phy_type, 0444, show_phy_type, NULL);
2143 2143
2144static int efx_register_netdev(struct efx_nic *efx) 2144static int efx_register_netdev(struct efx_nic *efx)
2145{ 2145{
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index 7788fbe44f0a..95176979b2d2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -297,8 +297,8 @@ struct dma_features {
297#define MAC_RNABLE_RX 0x00000004 /* Receiver Enable */ 297#define MAC_RNABLE_RX 0x00000004 /* Receiver Enable */
298 298
299/* Default LPI timers */ 299/* Default LPI timers */
300#define STMMAC_DEFAULT_LIT_LS_TIMER 0x3E8 300#define STMMAC_DEFAULT_LIT_LS 0x3E8
301#define STMMAC_DEFAULT_TWT_LS_TIMER 0x0 301#define STMMAC_DEFAULT_TWT_LS 0x0
302 302
303#define STMMAC_CHAIN_MODE 0x1 303#define STMMAC_CHAIN_MODE 0x1
304#define STMMAC_RING_MODE 0x2 304#define STMMAC_RING_MODE 0x2
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 618446ae1ec1..e9eab29db7be 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -130,7 +130,7 @@ static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
130static int eee_timer = STMMAC_DEFAULT_LPI_TIMER; 130static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
131module_param(eee_timer, int, S_IRUGO | S_IWUSR); 131module_param(eee_timer, int, S_IRUGO | S_IWUSR);
132MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec"); 132MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
133#define STMMAC_LPI_TIMER(x) (jiffies + msecs_to_jiffies(x)) 133#define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x))
134 134
135/* By default the driver will use the ring mode to manage tx and rx descriptors 135/* By default the driver will use the ring mode to manage tx and rx descriptors
136 * but passing this value so user can force to use the chain instead of the ring 136 * but passing this value so user can force to use the chain instead of the ring
@@ -288,7 +288,7 @@ static void stmmac_eee_ctrl_timer(unsigned long arg)
288 struct stmmac_priv *priv = (struct stmmac_priv *)arg; 288 struct stmmac_priv *priv = (struct stmmac_priv *)arg;
289 289
290 stmmac_enable_eee_mode(priv); 290 stmmac_enable_eee_mode(priv);
291 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_TIMER(eee_timer)); 291 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
292} 292}
293 293
294/** 294/**
@@ -304,22 +304,34 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
304{ 304{
305 bool ret = false; 305 bool ret = false;
306 306
307 /* Using PCS we cannot dial with the phy registers at this stage
308 * so we do not support extra feature like EEE.
309 */
310 if ((priv->pcs == STMMAC_PCS_RGMII) || (priv->pcs == STMMAC_PCS_TBI) ||
311 (priv->pcs == STMMAC_PCS_RTBI))
312 goto out;
313
307 /* MAC core supports the EEE feature. */ 314 /* MAC core supports the EEE feature. */
308 if (priv->dma_cap.eee) { 315 if (priv->dma_cap.eee) {
309 /* Check if the PHY supports EEE */ 316 /* Check if the PHY supports EEE */
310 if (phy_init_eee(priv->phydev, 1)) 317 if (phy_init_eee(priv->phydev, 1))
311 goto out; 318 goto out;
312 319
313 priv->eee_active = 1; 320 if (!priv->eee_active) {
314 init_timer(&priv->eee_ctrl_timer); 321 priv->eee_active = 1;
315 priv->eee_ctrl_timer.function = stmmac_eee_ctrl_timer; 322 init_timer(&priv->eee_ctrl_timer);
316 priv->eee_ctrl_timer.data = (unsigned long)priv; 323 priv->eee_ctrl_timer.function = stmmac_eee_ctrl_timer;
317 priv->eee_ctrl_timer.expires = STMMAC_LPI_TIMER(eee_timer); 324 priv->eee_ctrl_timer.data = (unsigned long)priv;
318 add_timer(&priv->eee_ctrl_timer); 325 priv->eee_ctrl_timer.expires = STMMAC_LPI_T(eee_timer);
319 326 add_timer(&priv->eee_ctrl_timer);
320 priv->hw->mac->set_eee_timer(priv->ioaddr, 327
321 STMMAC_DEFAULT_LIT_LS_TIMER, 328 priv->hw->mac->set_eee_timer(priv->ioaddr,
322 priv->tx_lpi_timer); 329 STMMAC_DEFAULT_LIT_LS,
330 priv->tx_lpi_timer);
331 } else
332 /* Set HW EEE according to the speed */
333 priv->hw->mac->set_eee_pls(priv->ioaddr,
334 priv->phydev->link);
323 335
324 pr_info("stmmac: Energy-Efficient Ethernet initialized\n"); 336 pr_info("stmmac: Energy-Efficient Ethernet initialized\n");
325 337
@@ -329,20 +341,6 @@ out:
329 return ret; 341 return ret;
330} 342}
331 343
332/**
333 * stmmac_eee_adjust: adjust HW EEE according to the speed
334 * @priv: driver private structure
335 * Description:
336 * When the EEE has been already initialised we have to
337 * modify the PLS bit in the LPI ctrl & status reg according
338 * to the PHY link status. For this reason.
339 */
340static void stmmac_eee_adjust(struct stmmac_priv *priv)
341{
342 if (priv->eee_enabled)
343 priv->hw->mac->set_eee_pls(priv->ioaddr, priv->phydev->link);
344}
345
346/* stmmac_get_tx_hwtstamp: get HW TX timestamps 344/* stmmac_get_tx_hwtstamp: get HW TX timestamps
347 * @priv: driver private structure 345 * @priv: driver private structure
348 * @entry : descriptor index to be used. 346 * @entry : descriptor index to be used.
@@ -769,7 +767,10 @@ static void stmmac_adjust_link(struct net_device *dev)
769 if (new_state && netif_msg_link(priv)) 767 if (new_state && netif_msg_link(priv))
770 phy_print_status(phydev); 768 phy_print_status(phydev);
771 769
772 stmmac_eee_adjust(priv); 770 /* At this stage, it could be needed to setup the EEE or adjust some
771 * MAC related HW registers.
772 */
773 priv->eee_enabled = stmmac_eee_init(priv);
773 774
774 spin_unlock_irqrestore(&priv->lock, flags); 775 spin_unlock_irqrestore(&priv->lock, flags);
775 776
@@ -1277,7 +1278,7 @@ static void stmmac_tx_clean(struct stmmac_priv *priv)
1277 1278
1278 if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) { 1279 if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
1279 stmmac_enable_eee_mode(priv); 1280 stmmac_enable_eee_mode(priv);
1280 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_TIMER(eee_timer)); 1281 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
1281 } 1282 }
1282 spin_unlock(&priv->tx_lock); 1283 spin_unlock(&priv->tx_lock);
1283} 1284}
@@ -1671,14 +1672,9 @@ static int stmmac_open(struct net_device *dev)
1671 if (priv->phydev) 1672 if (priv->phydev)
1672 phy_start(priv->phydev); 1673 phy_start(priv->phydev);
1673 1674
1674 priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS_TIMER; 1675 priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
1675 1676
1676 /* Using PCS we cannot dial with the phy registers at this stage 1677 priv->eee_enabled = stmmac_eee_init(priv);
1677 * so we do not support extra feature like EEE.
1678 */
1679 if (priv->pcs != STMMAC_PCS_RGMII && priv->pcs != STMMAC_PCS_TBI &&
1680 priv->pcs != STMMAC_PCS_RTBI)
1681 priv->eee_enabled = stmmac_eee_init(priv);
1682 1678
1683 stmmac_init_tx_coalesce(priv); 1679 stmmac_init_tx_coalesce(priv);
1684 1680
@@ -1899,7 +1895,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
1899 1895
1900#ifdef STMMAC_XMIT_DEBUG 1896#ifdef STMMAC_XMIT_DEBUG
1901 if (netif_msg_pktdata(priv)) { 1897 if (netif_msg_pktdata(priv)) {
1902 pr_info("%s: curr %d dirty=%d entry=%d, first=%p, nfrags=%d" 1898 pr_info("%s: curr %d dirty=%d entry=%d, first=%p, nfrags=%d",
1903 __func__, (priv->cur_tx % txsize), 1899 __func__, (priv->cur_tx % txsize),
1904 (priv->dirty_tx % txsize), entry, first, nfrags); 1900 (priv->dirty_tx % txsize), entry, first, nfrags);
1905 if (priv->extend_desc) 1901 if (priv->extend_desc)
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 21a5b291b4b3..d1a769f35f9d 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -1679,7 +1679,7 @@ static int cpsw_probe(struct platform_device *pdev)
1679 priv->rx_packet_max = max(rx_packet_max, 128); 1679 priv->rx_packet_max = max(rx_packet_max, 128);
1680 priv->cpts = devm_kzalloc(&pdev->dev, sizeof(struct cpts), GFP_KERNEL); 1680 priv->cpts = devm_kzalloc(&pdev->dev, sizeof(struct cpts), GFP_KERNEL);
1681 priv->irq_enabled = true; 1681 priv->irq_enabled = true;
1682 if (!ndev) { 1682 if (!priv->cpts) {
1683 pr_err("error allocating cpts\n"); 1683 pr_err("error allocating cpts\n");
1684 goto clean_ndev_ret; 1684 goto clean_ndev_ret;
1685 } 1685 }
@@ -1973,9 +1973,12 @@ static int cpsw_suspend(struct device *dev)
1973{ 1973{
1974 struct platform_device *pdev = to_platform_device(dev); 1974 struct platform_device *pdev = to_platform_device(dev);
1975 struct net_device *ndev = platform_get_drvdata(pdev); 1975 struct net_device *ndev = platform_get_drvdata(pdev);
1976 struct cpsw_priv *priv = netdev_priv(ndev);
1976 1977
1977 if (netif_running(ndev)) 1978 if (netif_running(ndev))
1978 cpsw_ndo_stop(ndev); 1979 cpsw_ndo_stop(ndev);
1980 soft_reset("sliver 0", &priv->slaves[0].sliver->soft_reset);
1981 soft_reset("sliver 1", &priv->slaves[1].sliver->soft_reset);
1979 pm_runtime_put_sync(&pdev->dev); 1982 pm_runtime_put_sync(&pdev->dev);
1980 1983
1981 return 0; 1984 return 0;
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c
index 49dfd592ac1e..053c84fd0853 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.c
+++ b/drivers/net/ethernet/ti/davinci_cpdma.c
@@ -705,6 +705,13 @@ int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
705 } 705 }
706 706
707 buffer = dma_map_single(ctlr->dev, data, len, chan->dir); 707 buffer = dma_map_single(ctlr->dev, data, len, chan->dir);
708 ret = dma_mapping_error(ctlr->dev, buffer);
709 if (ret) {
710 cpdma_desc_free(ctlr->pool, desc, 1);
711 ret = -EINVAL;
712 goto unlock_ret;
713 }
714
708 mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP; 715 mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP;
709 cpdma_desc_to_port(chan, mode, directed); 716 cpdma_desc_to_port(chan, mode, directed);
710 717
diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c
index 12aec173564c..c47f0dbcebb5 100644
--- a/drivers/net/ethernet/ti/davinci_mdio.c
+++ b/drivers/net/ethernet/ti/davinci_mdio.c
@@ -449,10 +449,9 @@ static int davinci_mdio_suspend(struct device *dev)
449 __raw_writel(ctrl, &data->regs->control); 449 __raw_writel(ctrl, &data->regs->control);
450 wait_for_idle(data); 450 wait_for_idle(data);
451 451
452 pm_runtime_put_sync(data->dev);
453
454 data->suspended = true; 452 data->suspended = true;
455 spin_unlock(&data->lock); 453 spin_unlock(&data->lock);
454 pm_runtime_put_sync(data->dev);
456 455
457 return 0; 456 return 0;
458} 457}
@@ -460,15 +459,12 @@ static int davinci_mdio_suspend(struct device *dev)
460static int davinci_mdio_resume(struct device *dev) 459static int davinci_mdio_resume(struct device *dev)
461{ 460{
462 struct davinci_mdio_data *data = dev_get_drvdata(dev); 461 struct davinci_mdio_data *data = dev_get_drvdata(dev);
463 u32 ctrl;
464 462
465 spin_lock(&data->lock);
466 pm_runtime_get_sync(data->dev); 463 pm_runtime_get_sync(data->dev);
467 464
465 spin_lock(&data->lock);
468 /* restart the scan state machine */ 466 /* restart the scan state machine */
469 ctrl = __raw_readl(&data->regs->control); 467 __davinci_mdio_reset(data);
470 ctrl |= CONTROL_ENABLE;
471 __raw_writel(ctrl, &data->regs->control);
472 468
473 data->suspended = false; 469 data->suspended = false;
474 spin_unlock(&data->lock); 470 spin_unlock(&data->lock);
@@ -477,8 +473,8 @@ static int davinci_mdio_resume(struct device *dev)
477} 473}
478 474
479static const struct dev_pm_ops davinci_mdio_pm_ops = { 475static const struct dev_pm_ops davinci_mdio_pm_ops = {
480 .suspend = davinci_mdio_suspend, 476 .suspend_late = davinci_mdio_suspend,
481 .resume = davinci_mdio_resume, 477 .resume_early = davinci_mdio_resume,
482}; 478};
483 479
484static const struct of_device_id davinci_mdio_of_mtable[] = { 480static const struct of_device_id davinci_mdio_of_mtable[] = {
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index 919b983114e9..b7268b3dae77 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -946,7 +946,8 @@ static int xemaclite_open(struct net_device *dev)
946 phy_write(lp->phy_dev, MII_CTRL1000, 0); 946 phy_write(lp->phy_dev, MII_CTRL1000, 0);
947 947
948 /* Advertise only 10 and 100mbps full/half duplex speeds */ 948 /* Advertise only 10 and 100mbps full/half duplex speeds */
949 phy_write(lp->phy_dev, MII_ADVERTISE, ADVERTISE_ALL); 949 phy_write(lp->phy_dev, MII_ADVERTISE, ADVERTISE_ALL |
950 ADVERTISE_CSMA);
950 951
951 /* Restart auto negotiation */ 952 /* Restart auto negotiation */
952 bmcr = phy_read(lp->phy_dev, MII_BMCR); 953 bmcr = phy_read(lp->phy_dev, MII_BMCR);
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 088c55496191..4dccead586be 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -31,6 +31,7 @@
31#include <linux/inetdevice.h> 31#include <linux/inetdevice.h>
32#include <linux/etherdevice.h> 32#include <linux/etherdevice.h>
33#include <linux/skbuff.h> 33#include <linux/skbuff.h>
34#include <linux/if_vlan.h>
34#include <linux/in.h> 35#include <linux/in.h>
35#include <linux/slab.h> 36#include <linux/slab.h>
36#include <net/arp.h> 37#include <net/arp.h>
@@ -284,7 +285,9 @@ int netvsc_recv_callback(struct hv_device *device_obj,
284 285
285 skb->protocol = eth_type_trans(skb, net); 286 skb->protocol = eth_type_trans(skb, net);
286 skb->ip_summed = CHECKSUM_NONE; 287 skb->ip_summed = CHECKSUM_NONE;
287 skb->vlan_tci = packet->vlan_tci; 288 if (packet->vlan_tci & VLAN_TAG_PRESENT)
289 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
290 packet->vlan_tci);
288 291
289 net->stats.rx_packets++; 292 net->stats.rx_packets++;
290 net->stats.rx_bytes += packet->total_data_buflen; 293 net->stats.rx_bytes += packet->total_data_buflen;
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 1c502bb0c916..6e91931a1c2c 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -853,18 +853,24 @@ static int macvlan_changelink(struct net_device *dev,
853 struct nlattr *tb[], struct nlattr *data[]) 853 struct nlattr *tb[], struct nlattr *data[])
854{ 854{
855 struct macvlan_dev *vlan = netdev_priv(dev); 855 struct macvlan_dev *vlan = netdev_priv(dev);
856 if (data && data[IFLA_MACVLAN_MODE]) 856
857 vlan->mode = nla_get_u32(data[IFLA_MACVLAN_MODE]);
858 if (data && data[IFLA_MACVLAN_FLAGS]) { 857 if (data && data[IFLA_MACVLAN_FLAGS]) {
859 __u16 flags = nla_get_u16(data[IFLA_MACVLAN_FLAGS]); 858 __u16 flags = nla_get_u16(data[IFLA_MACVLAN_FLAGS]);
860 bool promisc = (flags ^ vlan->flags) & MACVLAN_FLAG_NOPROMISC; 859 bool promisc = (flags ^ vlan->flags) & MACVLAN_FLAG_NOPROMISC;
861 860 if (vlan->port->passthru && promisc) {
862 if (promisc && (flags & MACVLAN_FLAG_NOPROMISC)) 861 int err;
863 dev_set_promiscuity(vlan->lowerdev, -1); 862
864 else if (promisc && !(flags & MACVLAN_FLAG_NOPROMISC)) 863 if (flags & MACVLAN_FLAG_NOPROMISC)
865 dev_set_promiscuity(vlan->lowerdev, 1); 864 err = dev_set_promiscuity(vlan->lowerdev, -1);
865 else
866 err = dev_set_promiscuity(vlan->lowerdev, 1);
867 if (err < 0)
868 return err;
869 }
866 vlan->flags = flags; 870 vlan->flags = flags;
867 } 871 }
872 if (data && data[IFLA_MACVLAN_MODE])
873 vlan->mode = nla_get_u32(data[IFLA_MACVLAN_MODE]);
868 return 0; 874 return 0;
869} 875}
870 876
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 59e9605de316..b6dd6a75919a 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -524,8 +524,10 @@ static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from,
524 return -EMSGSIZE; 524 return -EMSGSIZE;
525 num_pages = get_user_pages_fast(base, size, 0, &page[i]); 525 num_pages = get_user_pages_fast(base, size, 0, &page[i]);
526 if (num_pages != size) { 526 if (num_pages != size) {
527 for (i = 0; i < num_pages; i++) 527 int j;
528 put_page(page[i]); 528
529 for (j = 0; j < num_pages; j++)
530 put_page(page[i + j]);
529 return -EFAULT; 531 return -EFAULT;
530 } 532 }
531 truesize = size * PAGE_SIZE; 533 truesize = size * PAGE_SIZE;
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index c14f14741b3f..38f0b312ff85 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -1044,7 +1044,7 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
1044 adv = mmd_eee_adv_to_ethtool_adv_t(eee_adv); 1044 adv = mmd_eee_adv_to_ethtool_adv_t(eee_adv);
1045 lp = mmd_eee_adv_to_ethtool_adv_t(eee_lp); 1045 lp = mmd_eee_adv_to_ethtool_adv_t(eee_lp);
1046 idx = phy_find_setting(phydev->speed, phydev->duplex); 1046 idx = phy_find_setting(phydev->speed, phydev->duplex);
1047 if ((lp & adv & settings[idx].setting)) 1047 if (!(lp & adv & settings[idx].setting))
1048 goto eee_exit; 1048 goto eee_exit;
1049 1049
1050 if (clk_stop_enable) { 1050 if (clk_stop_enable) {
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 7c43261975bd..b3051052f3ad 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1092,8 +1092,8 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
1092 } 1092 }
1093 1093
1094 port->index = -1; 1094 port->index = -1;
1095 team_port_enable(team, port);
1096 list_add_tail_rcu(&port->list, &team->port_list); 1095 list_add_tail_rcu(&port->list, &team->port_list);
1096 team_port_enable(team, port);
1097 __team_compute_features(team); 1097 __team_compute_features(team);
1098 __team_port_change_port_added(port, !!netif_carrier_ok(port_dev)); 1098 __team_port_change_port_added(port, !!netif_carrier_ok(port_dev));
1099 __team_options_change_check(team); 1099 __team_options_change_check(team);
@@ -2374,7 +2374,8 @@ static int team_nl_send_port_list_get(struct team *team, u32 portid, u32 seq,
2374 bool incomplete; 2374 bool incomplete;
2375 int i; 2375 int i;
2376 2376
2377 port = list_first_entry(&team->port_list, struct team_port, list); 2377 port = list_first_entry_or_null(&team->port_list,
2378 struct team_port, list);
2378 2379
2379start_again: 2380start_again:
2380 err = __send_and_alloc_skb(&skb, team, portid, send_func); 2381 err = __send_and_alloc_skb(&skb, team, portid, send_func);
@@ -2402,8 +2403,8 @@ start_again:
2402 err = team_nl_fill_one_port_get(skb, one_port); 2403 err = team_nl_fill_one_port_get(skb, one_port);
2403 if (err) 2404 if (err)
2404 goto errout; 2405 goto errout;
2405 } else { 2406 } else if (port) {
2406 list_for_each_entry(port, &team->port_list, list) { 2407 list_for_each_entry_from(port, &team->port_list, list) {
2407 err = team_nl_fill_one_port_get(skb, port); 2408 err = team_nl_fill_one_port_get(skb, port);
2408 if (err) { 2409 if (err) {
2409 if (err == -EMSGSIZE) { 2410 if (err == -EMSGSIZE) {
diff --git a/drivers/net/team/team_mode_random.c b/drivers/net/team/team_mode_random.c
index 5ca14d463ba7..7f032e211343 100644
--- a/drivers/net/team/team_mode_random.c
+++ b/drivers/net/team/team_mode_random.c
@@ -28,6 +28,8 @@ static bool rnd_transmit(struct team *team, struct sk_buff *skb)
28 28
29 port_index = random_N(team->en_port_count); 29 port_index = random_N(team->en_port_count);
30 port = team_get_port_by_index_rcu(team, port_index); 30 port = team_get_port_by_index_rcu(team, port_index);
31 if (unlikely(!port))
32 goto drop;
31 port = team_get_first_port_txable_rcu(team, port); 33 port = team_get_first_port_txable_rcu(team, port);
32 if (unlikely(!port)) 34 if (unlikely(!port))
33 goto drop; 35 goto drop;
diff --git a/drivers/net/team/team_mode_roundrobin.c b/drivers/net/team/team_mode_roundrobin.c
index d268e4de781b..472623f8ce3d 100644
--- a/drivers/net/team/team_mode_roundrobin.c
+++ b/drivers/net/team/team_mode_roundrobin.c
@@ -32,6 +32,8 @@ static bool rr_transmit(struct team *team, struct sk_buff *skb)
32 32
33 port_index = rr_priv(team)->sent_packets++ % team->en_port_count; 33 port_index = rr_priv(team)->sent_packets++ % team->en_port_count;
34 port = team_get_port_by_index_rcu(team, port_index); 34 port = team_get_port_by_index_rcu(team, port_index);
35 if (unlikely(!port))
36 goto drop;
35 port = team_get_first_port_txable_rcu(team, port); 37 port = team_get_first_port_txable_rcu(team, port);
36 if (unlikely(!port)) 38 if (unlikely(!port))
37 goto drop; 39 goto drop;
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index f042b0373e5d..9c61f8734a40 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -352,7 +352,7 @@ static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb)
352 u32 numqueues = 0; 352 u32 numqueues = 0;
353 353
354 rcu_read_lock(); 354 rcu_read_lock();
355 numqueues = tun->numqueues; 355 numqueues = ACCESS_ONCE(tun->numqueues);
356 356
357 txq = skb_get_rxhash(skb); 357 txq = skb_get_rxhash(skb);
358 if (txq) { 358 if (txq) {
@@ -1010,8 +1010,10 @@ static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from,
1010 return -EMSGSIZE; 1010 return -EMSGSIZE;
1011 num_pages = get_user_pages_fast(base, size, 0, &page[i]); 1011 num_pages = get_user_pages_fast(base, size, 0, &page[i]);
1012 if (num_pages != size) { 1012 if (num_pages != size) {
1013 for (i = 0; i < num_pages; i++) 1013 int j;
1014 put_page(page[i]); 1014
1015 for (j = 0; j < num_pages; j++)
1016 put_page(page[i + j]);
1015 return -EFAULT; 1017 return -EFAULT;
1016 } 1018 }
1017 truesize = size * PAGE_SIZE; 1019 truesize = size * PAGE_SIZE;
@@ -1585,6 +1587,10 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
1585 else 1587 else
1586 return -EINVAL; 1588 return -EINVAL;
1587 1589
1590 if (!!(ifr->ifr_flags & IFF_MULTI_QUEUE) !=
1591 !!(tun->flags & TUN_TAP_MQ))
1592 return -EINVAL;
1593
1588 if (tun_not_capable(tun)) 1594 if (tun_not_capable(tun))
1589 return -EPERM; 1595 return -EPERM;
1590 err = security_tun_dev_open(tun->security); 1596 err = security_tun_dev_open(tun->security);
@@ -2155,6 +2161,8 @@ static int tun_chr_open(struct inode *inode, struct file * file)
2155 set_bit(SOCK_EXTERNALLY_ALLOCATED, &tfile->socket.flags); 2161 set_bit(SOCK_EXTERNALLY_ALLOCATED, &tfile->socket.flags);
2156 INIT_LIST_HEAD(&tfile->next); 2162 INIT_LIST_HEAD(&tfile->next);
2157 2163
2164 sock_set_flag(&tfile->sk, SOCK_ZEROCOPY);
2165
2158 return 0; 2166 return 0;
2159} 2167}
2160 2168
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 078795fe6e31..04ee044dde51 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -627,6 +627,12 @@ static const struct usb_device_id products [] = {
627 .driver_info = 0, 627 .driver_info = 0,
628}, 628},
629 629
630/* Huawei E1820 - handled by qmi_wwan */
631{
632 USB_DEVICE_INTERFACE_NUMBER(HUAWEI_VENDOR_ID, 0x14ac, 1),
633 .driver_info = 0,
634},
635
630/* Realtek RTL8152 Based USB 2.0 Ethernet Adapters */ 636/* Realtek RTL8152 Based USB 2.0 Ethernet Adapters */
631#if defined(CONFIG_USB_RTL8152) || defined(CONFIG_USB_RTL8152_MODULE) 637#if defined(CONFIG_USB_RTL8152) || defined(CONFIG_USB_RTL8152_MODULE)
632{ 638{
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 86adfa0a912e..56459215a22b 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -519,6 +519,7 @@ static const struct usb_device_id products[] = {
519 /* 3. Combined interface devices matching on interface number */ 519 /* 3. Combined interface devices matching on interface number */
520 {QMI_FIXED_INTF(0x0408, 0xea42, 4)}, /* Yota / Megafon M100-1 */ 520 {QMI_FIXED_INTF(0x0408, 0xea42, 4)}, /* Yota / Megafon M100-1 */
521 {QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */ 521 {QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */
522 {QMI_FIXED_INTF(0x12d1, 0x14ac, 1)}, /* Huawei E1820 */
522 {QMI_FIXED_INTF(0x19d2, 0x0002, 1)}, 523 {QMI_FIXED_INTF(0x19d2, 0x0002, 1)},
523 {QMI_FIXED_INTF(0x19d2, 0x0012, 1)}, 524 {QMI_FIXED_INTF(0x19d2, 0x0012, 1)},
524 {QMI_FIXED_INTF(0x19d2, 0x0017, 3)}, 525 {QMI_FIXED_INTF(0x19d2, 0x0017, 3)},
@@ -589,7 +590,13 @@ static const struct usb_device_id products[] = {
589 {QMI_GOBI1K_DEVICE(0x03f0, 0x1f1d)}, /* HP un2400 Gobi Modem Device */ 590 {QMI_GOBI1K_DEVICE(0x03f0, 0x1f1d)}, /* HP un2400 Gobi Modem Device */
590 {QMI_GOBI1K_DEVICE(0x04da, 0x250d)}, /* Panasonic Gobi Modem device */ 591 {QMI_GOBI1K_DEVICE(0x04da, 0x250d)}, /* Panasonic Gobi Modem device */
591 {QMI_GOBI1K_DEVICE(0x413c, 0x8172)}, /* Dell Gobi Modem device */ 592 {QMI_GOBI1K_DEVICE(0x413c, 0x8172)}, /* Dell Gobi Modem device */
592 {QMI_GOBI1K_DEVICE(0x1410, 0xa001)}, /* Novatel Gobi Modem device */ 593 {QMI_GOBI1K_DEVICE(0x1410, 0xa001)}, /* Novatel/Verizon USB-1000 */
594 {QMI_GOBI1K_DEVICE(0x1410, 0xa002)}, /* Novatel Gobi Modem device */
595 {QMI_GOBI1K_DEVICE(0x1410, 0xa003)}, /* Novatel Gobi Modem device */
596 {QMI_GOBI1K_DEVICE(0x1410, 0xa004)}, /* Novatel Gobi Modem device */
597 {QMI_GOBI1K_DEVICE(0x1410, 0xa005)}, /* Novatel Gobi Modem device */
598 {QMI_GOBI1K_DEVICE(0x1410, 0xa006)}, /* Novatel Gobi Modem device */
599 {QMI_GOBI1K_DEVICE(0x1410, 0xa007)}, /* Novatel Gobi Modem device */
593 {QMI_GOBI1K_DEVICE(0x0b05, 0x1776)}, /* Asus Gobi Modem device */ 600 {QMI_GOBI1K_DEVICE(0x0b05, 0x1776)}, /* Asus Gobi Modem device */
594 {QMI_GOBI1K_DEVICE(0x19d2, 0xfff3)}, /* ONDA Gobi Modem device */ 601 {QMI_GOBI1K_DEVICE(0x19d2, 0xfff3)}, /* ONDA Gobi Modem device */
595 {QMI_GOBI1K_DEVICE(0x05c6, 0x9001)}, /* Generic Gobi Modem device */ 602 {QMI_GOBI1K_DEVICE(0x05c6, 0x9001)}, /* Generic Gobi Modem device */
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 3b1d2ee7156b..57325f356d4f 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -565,18 +565,22 @@ skip:
565 565
566/* Watch incoming packets to learn mapping between Ethernet address 566/* Watch incoming packets to learn mapping between Ethernet address
567 * and Tunnel endpoint. 567 * and Tunnel endpoint.
568 * Return true if packet is bogus and should be droppped.
568 */ 569 */
569static void vxlan_snoop(struct net_device *dev, 570static bool vxlan_snoop(struct net_device *dev,
570 __be32 src_ip, const u8 *src_mac) 571 __be32 src_ip, const u8 *src_mac)
571{ 572{
572 struct vxlan_dev *vxlan = netdev_priv(dev); 573 struct vxlan_dev *vxlan = netdev_priv(dev);
573 struct vxlan_fdb *f; 574 struct vxlan_fdb *f;
574 int err;
575 575
576 f = vxlan_find_mac(vxlan, src_mac); 576 f = vxlan_find_mac(vxlan, src_mac);
577 if (likely(f)) { 577 if (likely(f)) {
578 if (likely(f->remote.remote_ip == src_ip)) 578 if (likely(f->remote.remote_ip == src_ip))
579 return; 579 return false;
580
581 /* Don't migrate static entries, drop packets */
582 if (f->state & NUD_NOARP)
583 return true;
580 584
581 if (net_ratelimit()) 585 if (net_ratelimit())
582 netdev_info(dev, 586 netdev_info(dev,
@@ -588,14 +592,19 @@ static void vxlan_snoop(struct net_device *dev,
588 } else { 592 } else {
589 /* learned new entry */ 593 /* learned new entry */
590 spin_lock(&vxlan->hash_lock); 594 spin_lock(&vxlan->hash_lock);
591 err = vxlan_fdb_create(vxlan, src_mac, src_ip, 595
592 NUD_REACHABLE, 596 /* close off race between vxlan_flush and incoming packets */
593 NLM_F_EXCL|NLM_F_CREATE, 597 if (netif_running(dev))
594 vxlan->dst_port, 598 vxlan_fdb_create(vxlan, src_mac, src_ip,
595 vxlan->default_dst.remote_vni, 599 NUD_REACHABLE,
596 0, NTF_SELF); 600 NLM_F_EXCL|NLM_F_CREATE,
601 vxlan->dst_port,
602 vxlan->default_dst.remote_vni,
603 0, NTF_SELF);
597 spin_unlock(&vxlan->hash_lock); 604 spin_unlock(&vxlan->hash_lock);
598 } 605 }
606
607 return false;
599} 608}
600 609
601 610
@@ -727,8 +736,9 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
727 vxlan->dev->dev_addr) == 0) 736 vxlan->dev->dev_addr) == 0)
728 goto drop; 737 goto drop;
729 738
730 if (vxlan->flags & VXLAN_F_LEARN) 739 if ((vxlan->flags & VXLAN_F_LEARN) &&
731 vxlan_snoop(skb->dev, oip->saddr, eth_hdr(skb)->h_source); 740 vxlan_snoop(skb->dev, oip->saddr, eth_hdr(skb)->h_source))
741 goto drop;
732 742
733 __skb_tunnel_rx(skb, vxlan->dev); 743 __skb_tunnel_rx(skb, vxlan->dev);
734 skb_reset_network_header(skb); 744 skb_reset_network_header(skb);
@@ -1151,9 +1161,11 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
1151 struct sk_buff *skb1; 1161 struct sk_buff *skb1;
1152 1162
1153 skb1 = skb_clone(skb, GFP_ATOMIC); 1163 skb1 = skb_clone(skb, GFP_ATOMIC);
1154 rc1 = vxlan_xmit_one(skb1, dev, rdst, did_rsc); 1164 if (skb1) {
1155 if (rc == NETDEV_TX_OK) 1165 rc1 = vxlan_xmit_one(skb1, dev, rdst, did_rsc);
1156 rc = rc1; 1166 if (rc == NETDEV_TX_OK)
1167 rc = rc1;
1168 }
1157 } 1169 }
1158 1170
1159 rc1 = vxlan_xmit_one(skb, dev, rdst0, did_rsc); 1171 rc1 = vxlan_xmit_one(skb, dev, rdst0, did_rsc);
diff --git a/drivers/net/wan/dlci.c b/drivers/net/wan/dlci.c
index 147614ed86aa..6a8a382c5f4c 100644
--- a/drivers/net/wan/dlci.c
+++ b/drivers/net/wan/dlci.c
@@ -384,21 +384,37 @@ static int dlci_del(struct dlci_add *dlci)
384 struct frad_local *flp; 384 struct frad_local *flp;
385 struct net_device *master, *slave; 385 struct net_device *master, *slave;
386 int err; 386 int err;
387 bool found = false;
388
389 rtnl_lock();
387 390
388 /* validate slave device */ 391 /* validate slave device */
389 master = __dev_get_by_name(&init_net, dlci->devname); 392 master = __dev_get_by_name(&init_net, dlci->devname);
390 if (!master) 393 if (!master) {
391 return -ENODEV; 394 err = -ENODEV;
395 goto out;
396 }
397
398 list_for_each_entry(dlp, &dlci_devs, list) {
399 if (dlp->master == master) {
400 found = true;
401 break;
402 }
403 }
404 if (!found) {
405 err = -ENODEV;
406 goto out;
407 }
392 408
393 if (netif_running(master)) { 409 if (netif_running(master)) {
394 return -EBUSY; 410 err = -EBUSY;
411 goto out;
395 } 412 }
396 413
397 dlp = netdev_priv(master); 414 dlp = netdev_priv(master);
398 slave = dlp->slave; 415 slave = dlp->slave;
399 flp = netdev_priv(slave); 416 flp = netdev_priv(slave);
400 417
401 rtnl_lock();
402 err = (*flp->deassoc)(slave, master); 418 err = (*flp->deassoc)(slave, master);
403 if (!err) { 419 if (!err) {
404 list_del(&dlp->list); 420 list_del(&dlp->list);
@@ -407,8 +423,8 @@ static int dlci_del(struct dlci_add *dlci)
407 423
408 dev_put(slave); 424 dev_put(slave);
409 } 425 }
426out:
410 rtnl_unlock(); 427 rtnl_unlock();
411
412 return err; 428 return err;
413} 429}
414 430
diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig
index f3dc124c60c7..3c2cbc9d6295 100644
--- a/drivers/net/wireless/ath/ath9k/Kconfig
+++ b/drivers/net/wireless/ath/ath9k/Kconfig
@@ -92,13 +92,17 @@ config ATH9K_MAC_DEBUG
92 This option enables collection of statistics for Rx/Tx status 92 This option enables collection of statistics for Rx/Tx status
93 data and some other MAC related statistics 93 data and some other MAC related statistics
94 94
95config ATH9K_RATE_CONTROL 95config ATH9K_LEGACY_RATE_CONTROL
96 bool "Atheros ath9k rate control" 96 bool "Atheros ath9k rate control"
97 depends on ATH9K 97 depends on ATH9K
98 default y 98 default n
99 ---help--- 99 ---help---
100 Say Y, if you want to use the ath9k specific rate control 100 Say Y, if you want to use the ath9k specific rate control
101 module instead of minstrel_ht. 101 module instead of minstrel_ht. Be warned that there are various
102 issues with the ath9k RC and minstrel is a more robust algorithm.
103 Note that even if this option is selected, "ath9k_rate_control"
104 has to be passed to mac80211 using the module parameter,
105 ieee80211_default_rc_algo.
102 106
103config ATH9K_HTC 107config ATH9K_HTC
104 tristate "Atheros HTC based wireless cards support" 108 tristate "Atheros HTC based wireless cards support"
diff --git a/drivers/net/wireless/ath/ath9k/Makefile b/drivers/net/wireless/ath/ath9k/Makefile
index 2ad8f9474ba1..75ee9e7704ce 100644
--- a/drivers/net/wireless/ath/ath9k/Makefile
+++ b/drivers/net/wireless/ath/ath9k/Makefile
@@ -8,7 +8,7 @@ ath9k-y += beacon.o \
8 antenna.o 8 antenna.o
9 9
10ath9k-$(CONFIG_ATH9K_BTCOEX_SUPPORT) += mci.o 10ath9k-$(CONFIG_ATH9K_BTCOEX_SUPPORT) += mci.o
11ath9k-$(CONFIG_ATH9K_RATE_CONTROL) += rc.o 11ath9k-$(CONFIG_ATH9K_LEGACY_RATE_CONTROL) += rc.o
12ath9k-$(CONFIG_ATH9K_PCI) += pci.o 12ath9k-$(CONFIG_ATH9K_PCI) += pci.o
13ath9k-$(CONFIG_ATH9K_AHB) += ahb.o 13ath9k-$(CONFIG_ATH9K_AHB) += ahb.o
14ath9k-$(CONFIG_ATH9K_DEBUGFS) += debug.o 14ath9k-$(CONFIG_ATH9K_DEBUGFS) += debug.o
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
index db5ffada2217..7546b9a7dcbf 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
@@ -958,11 +958,11 @@ static const u32 ar9300Common_rx_gain_table_2p2[][2] = {
958 {0x0000a074, 0x00000000}, 958 {0x0000a074, 0x00000000},
959 {0x0000a078, 0x00000000}, 959 {0x0000a078, 0x00000000},
960 {0x0000a07c, 0x00000000}, 960 {0x0000a07c, 0x00000000},
961 {0x0000a080, 0x1a1a1a1a}, 961 {0x0000a080, 0x22222229},
962 {0x0000a084, 0x1a1a1a1a}, 962 {0x0000a084, 0x1d1d1d1d},
963 {0x0000a088, 0x1a1a1a1a}, 963 {0x0000a088, 0x1d1d1d1d},
964 {0x0000a08c, 0x1a1a1a1a}, 964 {0x0000a08c, 0x1d1d1d1d},
965 {0x0000a090, 0x171a1a1a}, 965 {0x0000a090, 0x171d1d1d},
966 {0x0000a094, 0x11111717}, 966 {0x0000a094, 0x11111717},
967 {0x0000a098, 0x00030311}, 967 {0x0000a098, 0x00030311},
968 {0x0000a09c, 0x00000000}, 968 {0x0000a09c, 0x00000000},
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
index 54ba42f4108a..874f6570bd1c 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
@@ -68,13 +68,16 @@
68#define AR9300_BASE_ADDR 0x3ff 68#define AR9300_BASE_ADDR 0x3ff
69#define AR9300_BASE_ADDR_512 0x1ff 69#define AR9300_BASE_ADDR_512 0x1ff
70 70
71#define AR9300_OTP_BASE (AR_SREV_9340(ah) ? 0x30000 : 0x14000) 71#define AR9300_OTP_BASE \
72#define AR9300_OTP_STATUS (AR_SREV_9340(ah) ? 0x30018 : 0x15f18) 72 ((AR_SREV_9340(ah) || AR_SREV_9550(ah)) ? 0x30000 : 0x14000)
73#define AR9300_OTP_STATUS \
74 ((AR_SREV_9340(ah) || AR_SREV_9550(ah)) ? 0x30018 : 0x15f18)
73#define AR9300_OTP_STATUS_TYPE 0x7 75#define AR9300_OTP_STATUS_TYPE 0x7
74#define AR9300_OTP_STATUS_VALID 0x4 76#define AR9300_OTP_STATUS_VALID 0x4
75#define AR9300_OTP_STATUS_ACCESS_BUSY 0x2 77#define AR9300_OTP_STATUS_ACCESS_BUSY 0x2
76#define AR9300_OTP_STATUS_SM_BUSY 0x1 78#define AR9300_OTP_STATUS_SM_BUSY 0x1
77#define AR9300_OTP_READ_DATA (AR_SREV_9340(ah) ? 0x3001c : 0x15f1c) 79#define AR9300_OTP_READ_DATA \
80 ((AR_SREV_9340(ah) || AR_SREV_9550(ah)) ? 0x3001c : 0x15f1c)
78 81
79enum targetPowerHTRates { 82enum targetPowerHTRates {
80 HT_TARGET_RATE_0_8_16, 83 HT_TARGET_RATE_0_8_16,
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
index 2bf6548dd143..e1714d7c9eeb 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
@@ -334,7 +334,8 @@ static void ar9003_hw_spur_ofdm(struct ath_hw *ah,
334 REG_RMW_FIELD(ah, AR_PHY_SPUR_REG, 334 REG_RMW_FIELD(ah, AR_PHY_SPUR_REG,
335 AR_PHY_SPUR_REG_EN_VIT_SPUR_RSSI, 1); 335 AR_PHY_SPUR_REG_EN_VIT_SPUR_RSSI, 1);
336 336
337 if (REG_READ_FIELD(ah, AR_PHY_MODE, 337 if (!AR_SREV_9340(ah) &&
338 REG_READ_FIELD(ah, AR_PHY_MODE,
338 AR_PHY_MODE_DYNAMIC) == 0x1) 339 AR_PHY_MODE_DYNAMIC) == 0x1)
339 REG_RMW_FIELD(ah, AR_PHY_SPUR_REG, 340 REG_RMW_FIELD(ah, AR_PHY_SPUR_REG,
340 AR_PHY_SPUR_REG_ENABLE_NF_RSSI_SPUR_MIT, 1); 341 AR_PHY_SPUR_REG_ENABLE_NF_RSSI_SPUR_MIT, 1);
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 366002f266f8..42b03dc39d14 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -251,10 +251,9 @@ struct ath_atx_tid {
251 int tidno; 251 int tidno;
252 int baw_head; /* first un-acked tx buffer */ 252 int baw_head; /* first un-acked tx buffer */
253 int baw_tail; /* next unused tx buffer slot */ 253 int baw_tail; /* next unused tx buffer slot */
254 int sched; 254 bool sched;
255 int paused; 255 bool paused;
256 u8 state; 256 bool active;
257 bool stop_cb;
258}; 257};
259 258
260struct ath_node { 259struct ath_node {
@@ -275,10 +274,6 @@ struct ath_node {
275#endif 274#endif
276}; 275};
277 276
278#define AGGR_CLEANUP BIT(1)
279#define AGGR_ADDBA_COMPLETE BIT(2)
280#define AGGR_ADDBA_PROGRESS BIT(3)
281
282struct ath_tx_control { 277struct ath_tx_control {
283 struct ath_txq *txq; 278 struct ath_txq *txq;
284 struct ath_node *an; 279 struct ath_node *an;
@@ -352,8 +347,7 @@ void ath_tx_tasklet(struct ath_softc *sc);
352void ath_tx_edma_tasklet(struct ath_softc *sc); 347void ath_tx_edma_tasklet(struct ath_softc *sc);
353int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta, 348int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
354 u16 tid, u16 *ssn); 349 u16 tid, u16 *ssn);
355bool ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid, 350void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid);
356 bool flush);
357void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid); 351void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid);
358 352
359void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an); 353void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an);
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index 0743a47cef8f..62f1b7636c92 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -1174,7 +1174,7 @@ static int ath9k_htc_config(struct ieee80211_hw *hw, u32 changed)
1174 mutex_lock(&priv->htc_pm_lock); 1174 mutex_lock(&priv->htc_pm_lock);
1175 1175
1176 priv->ps_idle = !!(conf->flags & IEEE80211_CONF_IDLE); 1176 priv->ps_idle = !!(conf->flags & IEEE80211_CONF_IDLE);
1177 if (priv->ps_idle) 1177 if (!priv->ps_idle)
1178 chip_reset = true; 1178 chip_reset = true;
1179 1179
1180 mutex_unlock(&priv->htc_pm_lock); 1180 mutex_unlock(&priv->htc_pm_lock);
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 7f25da8444fe..15dfefcf2d0f 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -1172,6 +1172,7 @@ u32 ath9k_regd_get_ctl(struct ath_regulatory *reg, struct ath9k_channel *chan)
1172static inline void ath9k_hw_set_dma(struct ath_hw *ah) 1172static inline void ath9k_hw_set_dma(struct ath_hw *ah)
1173{ 1173{
1174 struct ath_common *common = ath9k_hw_common(ah); 1174 struct ath_common *common = ath9k_hw_common(ah);
1175 int txbuf_size;
1175 1176
1176 ENABLE_REGWRITE_BUFFER(ah); 1177 ENABLE_REGWRITE_BUFFER(ah);
1177 1178
@@ -1225,13 +1226,17 @@ static inline void ath9k_hw_set_dma(struct ath_hw *ah)
1225 * So set the usable tx buf size also to half to 1226 * So set the usable tx buf size also to half to
1226 * avoid data/delimiter underruns 1227 * avoid data/delimiter underruns
1227 */ 1228 */
1228 REG_WRITE(ah, AR_PCU_TXBUF_CTRL, 1229 txbuf_size = AR_9285_PCU_TXBUF_CTRL_USABLE_SIZE;
1229 AR_9285_PCU_TXBUF_CTRL_USABLE_SIZE); 1230 } else if (AR_SREV_9340_13_OR_LATER(ah)) {
1230 } else if (!AR_SREV_9271(ah)) { 1231 /* Uses fewer entries for AR934x v1.3+ to prevent rx overruns */
1231 REG_WRITE(ah, AR_PCU_TXBUF_CTRL, 1232 txbuf_size = AR_9340_PCU_TXBUF_CTRL_USABLE_SIZE;
1232 AR_PCU_TXBUF_CTRL_USABLE_SIZE); 1233 } else {
1234 txbuf_size = AR_PCU_TXBUF_CTRL_USABLE_SIZE;
1233 } 1235 }
1234 1236
1237 if (!AR_SREV_9271(ah))
1238 REG_WRITE(ah, AR_PCU_TXBUF_CTRL, txbuf_size);
1239
1235 REGWRITE_BUFFER_FLUSH(ah); 1240 REGWRITE_BUFFER_FLUSH(ah);
1236 1241
1237 if (AR_SREV_9300_20_OR_LATER(ah)) 1242 if (AR_SREV_9300_20_OR_LATER(ah))
@@ -1306,9 +1311,13 @@ static bool ath9k_hw_set_reset(struct ath_hw *ah, int type)
1306 AR_RTC_RC_COLD_RESET | AR_RTC_RC_WARM_RESET; 1311 AR_RTC_RC_COLD_RESET | AR_RTC_RC_WARM_RESET;
1307 } else { 1312 } else {
1308 tmpReg = REG_READ(ah, AR_INTR_SYNC_CAUSE); 1313 tmpReg = REG_READ(ah, AR_INTR_SYNC_CAUSE);
1309 if (tmpReg & 1314 if (AR_SREV_9340(ah))
1310 (AR_INTR_SYNC_LOCAL_TIMEOUT | 1315 tmpReg &= AR9340_INTR_SYNC_LOCAL_TIMEOUT;
1311 AR_INTR_SYNC_RADM_CPL_TIMEOUT)) { 1316 else
1317 tmpReg &= AR_INTR_SYNC_LOCAL_TIMEOUT |
1318 AR_INTR_SYNC_RADM_CPL_TIMEOUT;
1319
1320 if (tmpReg) {
1312 u32 val; 1321 u32 val;
1313 REG_WRITE(ah, AR_INTR_SYNC_ENABLE, 0); 1322 REG_WRITE(ah, AR_INTR_SYNC_ENABLE, 0);
1314 1323
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index aba415103f94..2ba494567777 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -787,8 +787,7 @@ void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
787 hw->wiphy->iface_combinations = if_comb; 787 hw->wiphy->iface_combinations = if_comb;
788 hw->wiphy->n_iface_combinations = ARRAY_SIZE(if_comb); 788 hw->wiphy->n_iface_combinations = ARRAY_SIZE(if_comb);
789 789
790 if (AR_SREV_5416(sc->sc_ah)) 790 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
791 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
792 791
793 hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN; 792 hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
794 hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS; 793 hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
@@ -830,10 +829,6 @@ void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
830 sc->ant_rx = hw->wiphy->available_antennas_rx; 829 sc->ant_rx = hw->wiphy->available_antennas_rx;
831 sc->ant_tx = hw->wiphy->available_antennas_tx; 830 sc->ant_tx = hw->wiphy->available_antennas_tx;
832 831
833#ifdef CONFIG_ATH9K_RATE_CONTROL
834 hw->rate_control_algorithm = "ath9k_rate_control";
835#endif
836
837 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ) 832 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
838 hw->wiphy->bands[IEEE80211_BAND_2GHZ] = 833 hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
839 &sc->sbands[IEEE80211_BAND_2GHZ]; 834 &sc->sbands[IEEE80211_BAND_2GHZ];
diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
index 498fee04afa0..566109a40fb3 100644
--- a/drivers/net/wireless/ath/ath9k/mac.c
+++ b/drivers/net/wireless/ath/ath9k/mac.c
@@ -410,7 +410,7 @@ bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
410 410
411 REG_WRITE(ah, AR_QMISC(q), AR_Q_MISC_DCU_EARLY_TERM_REQ); 411 REG_WRITE(ah, AR_QMISC(q), AR_Q_MISC_DCU_EARLY_TERM_REQ);
412 412
413 if (AR_SREV_9340(ah)) 413 if (AR_SREV_9340(ah) && !AR_SREV_9340_13_OR_LATER(ah))
414 REG_WRITE(ah, AR_DMISC(q), 414 REG_WRITE(ah, AR_DMISC(q),
415 AR_D_MISC_CW_BKOFF_EN | AR_D_MISC_FRAG_WAIT_EN | 0x1); 415 AR_D_MISC_CW_BKOFF_EN | AR_D_MISC_FRAG_WAIT_EN | 0x1);
416 else 416 else
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 2382d1262e7f..5092ecae7706 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -1709,7 +1709,8 @@ static int ath9k_ampdu_action(struct ieee80211_hw *hw,
1709 flush = true; 1709 flush = true;
1710 case IEEE80211_AMPDU_TX_STOP_CONT: 1710 case IEEE80211_AMPDU_TX_STOP_CONT:
1711 ath9k_ps_wakeup(sc); 1711 ath9k_ps_wakeup(sc);
1712 if (ath_tx_aggr_stop(sc, sta, tid, flush)) 1712 ath_tx_aggr_stop(sc, sta, tid);
1713 if (!flush)
1713 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); 1714 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
1714 ath9k_ps_restore(sc); 1715 ath9k_ps_restore(sc);
1715 break; 1716 break;
diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c
index aa4d368d8d3d..7eb1f4b458e4 100644
--- a/drivers/net/wireless/ath/ath9k/rc.c
+++ b/drivers/net/wireless/ath/ath9k/rc.c
@@ -1227,10 +1227,7 @@ static bool ath_tx_aggr_check(struct ath_softc *sc, struct ieee80211_sta *sta,
1227 return false; 1227 return false;
1228 1228
1229 txtid = ATH_AN_2_TID(an, tidno); 1229 txtid = ATH_AN_2_TID(an, tidno);
1230 1230 return !txtid->active;
1231 if (!(txtid->state & (AGGR_ADDBA_COMPLETE | AGGR_ADDBA_PROGRESS)))
1232 return true;
1233 return false;
1234} 1231}
1235 1232
1236 1233
diff --git a/drivers/net/wireless/ath/ath9k/rc.h b/drivers/net/wireless/ath/ath9k/rc.h
index 267dbfcfaa96..b9a87383cb43 100644
--- a/drivers/net/wireless/ath/ath9k/rc.h
+++ b/drivers/net/wireless/ath/ath9k/rc.h
@@ -231,7 +231,7 @@ static inline void ath_debug_stat_retries(struct ath_rate_priv *rc, int rix,
231} 231}
232#endif 232#endif
233 233
234#ifdef CONFIG_ATH9K_RATE_CONTROL 234#ifdef CONFIG_ATH9K_LEGACY_RATE_CONTROL
235int ath_rate_control_register(void); 235int ath_rate_control_register(void);
236void ath_rate_control_unregister(void); 236void ath_rate_control_unregister(void);
237#else 237#else
diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h
index 5c4ab5026dca..f7c90cc58d56 100644
--- a/drivers/net/wireless/ath/ath9k/reg.h
+++ b/drivers/net/wireless/ath/ath9k/reg.h
@@ -798,6 +798,10 @@
798#define AR_SREV_REVISION_9485_10 0 798#define AR_SREV_REVISION_9485_10 0
799#define AR_SREV_REVISION_9485_11 1 799#define AR_SREV_REVISION_9485_11 1
800#define AR_SREV_VERSION_9340 0x300 800#define AR_SREV_VERSION_9340 0x300
801#define AR_SREV_REVISION_9340_10 0
802#define AR_SREV_REVISION_9340_11 1
803#define AR_SREV_REVISION_9340_12 2
804#define AR_SREV_REVISION_9340_13 3
801#define AR_SREV_VERSION_9580 0x1C0 805#define AR_SREV_VERSION_9580 0x1C0
802#define AR_SREV_REVISION_9580_10 4 /* AR9580 1.0 */ 806#define AR_SREV_REVISION_9580_10 4 /* AR9580 1.0 */
803#define AR_SREV_VERSION_9462 0x280 807#define AR_SREV_VERSION_9462 0x280
@@ -897,6 +901,10 @@
897#define AR_SREV_9340(_ah) \ 901#define AR_SREV_9340(_ah) \
898 (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9340)) 902 (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9340))
899 903
904#define AR_SREV_9340_13_OR_LATER(_ah) \
905 (AR_SREV_9340((_ah)) && \
906 ((_ah)->hw_version.macRev >= AR_SREV_REVISION_9340_13))
907
900#define AR_SREV_9285E_20(_ah) \ 908#define AR_SREV_9285E_20(_ah) \
901 (AR_SREV_9285_12_OR_LATER(_ah) && \ 909 (AR_SREV_9285_12_OR_LATER(_ah) && \
902 ((REG_READ(_ah, AR_AN_SYNTH9) & 0x7) == 0x1)) 910 ((REG_READ(_ah, AR_AN_SYNTH9) & 0x7) == 0x1))
@@ -1007,6 +1015,8 @@ enum {
1007 AR_INTR_SYNC_LOCAL_TIMEOUT | 1015 AR_INTR_SYNC_LOCAL_TIMEOUT |
1008 AR_INTR_SYNC_MAC_SLEEP_ACCESS), 1016 AR_INTR_SYNC_MAC_SLEEP_ACCESS),
1009 1017
1018 AR9340_INTR_SYNC_LOCAL_TIMEOUT = 0x00000010,
1019
1010 AR_INTR_SYNC_SPURIOUS = 0xFFFFFFFF, 1020 AR_INTR_SYNC_SPURIOUS = 0xFFFFFFFF,
1011 1021
1012}; 1022};
@@ -1881,6 +1891,7 @@ enum {
1881#define AR_PCU_TXBUF_CTRL_SIZE_MASK 0x7FF 1891#define AR_PCU_TXBUF_CTRL_SIZE_MASK 0x7FF
1882#define AR_PCU_TXBUF_CTRL_USABLE_SIZE 0x700 1892#define AR_PCU_TXBUF_CTRL_USABLE_SIZE 0x700
1883#define AR_9285_PCU_TXBUF_CTRL_USABLE_SIZE 0x380 1893#define AR_9285_PCU_TXBUF_CTRL_USABLE_SIZE 0x380
1894#define AR_9340_PCU_TXBUF_CTRL_USABLE_SIZE 0x500
1884 1895
1885#define AR_PCU_MISC_MODE2 0x8344 1896#define AR_PCU_MISC_MODE2 0x8344
1886#define AR_PCU_MISC_MODE2_MGMT_CRYPTO_ENABLE 0x00000002 1897#define AR_PCU_MISC_MODE2_MGMT_CRYPTO_ENABLE 0x00000002
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 14bb3354ea64..83ab6be3fe6d 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -125,24 +125,6 @@ static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
125 list_add_tail(&ac->list, &txq->axq_acq); 125 list_add_tail(&ac->list, &txq->axq_acq);
126} 126}
127 127
128static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
129{
130 struct ath_txq *txq = tid->ac->txq;
131
132 WARN_ON(!tid->paused);
133
134 ath_txq_lock(sc, txq);
135 tid->paused = false;
136
137 if (skb_queue_empty(&tid->buf_q))
138 goto unlock;
139
140 ath_tx_queue_tid(txq, tid);
141 ath_txq_schedule(sc, txq);
142unlock:
143 ath_txq_unlock_complete(sc, txq);
144}
145
146static struct ath_frame_info *get_frame_info(struct sk_buff *skb) 128static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
147{ 129{
148 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 130 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
@@ -164,20 +146,7 @@ static void ath_set_rates(struct ieee80211_vif *vif, struct ieee80211_sta *sta,
164 ARRAY_SIZE(bf->rates)); 146 ARRAY_SIZE(bf->rates));
165} 147}
166 148
167static void ath_tx_clear_tid(struct ath_softc *sc, struct ath_atx_tid *tid) 149static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
168{
169 tid->state &= ~AGGR_ADDBA_COMPLETE;
170 tid->state &= ~AGGR_CLEANUP;
171 if (!tid->stop_cb)
172 return;
173
174 ieee80211_start_tx_ba_cb_irqsafe(tid->an->vif, tid->an->sta->addr,
175 tid->tidno);
176 tid->stop_cb = false;
177}
178
179static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid,
180 bool flush_packets)
181{ 150{
182 struct ath_txq *txq = tid->ac->txq; 151 struct ath_txq *txq = tid->ac->txq;
183 struct sk_buff *skb; 152 struct sk_buff *skb;
@@ -194,15 +163,16 @@ static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid,
194 while ((skb = __skb_dequeue(&tid->buf_q))) { 163 while ((skb = __skb_dequeue(&tid->buf_q))) {
195 fi = get_frame_info(skb); 164 fi = get_frame_info(skb);
196 bf = fi->bf; 165 bf = fi->bf;
197 if (!bf && !flush_packets)
198 bf = ath_tx_setup_buffer(sc, txq, tid, skb);
199 166
200 if (!bf) { 167 if (!bf) {
201 ieee80211_free_txskb(sc->hw, skb); 168 bf = ath_tx_setup_buffer(sc, txq, tid, skb);
202 continue; 169 if (!bf) {
170 ieee80211_free_txskb(sc->hw, skb);
171 continue;
172 }
203 } 173 }
204 174
205 if (fi->retries || flush_packets) { 175 if (fi->retries) {
206 list_add_tail(&bf->list, &bf_head); 176 list_add_tail(&bf->list, &bf_head);
207 ath_tx_update_baw(sc, tid, bf->bf_state.seqno); 177 ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
208 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0); 178 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
@@ -213,10 +183,7 @@ static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid,
213 } 183 }
214 } 184 }
215 185
216 if (tid->baw_head == tid->baw_tail) 186 if (sendbar) {
217 ath_tx_clear_tid(sc, tid);
218
219 if (sendbar && !flush_packets) {
220 ath_txq_unlock(sc, txq); 187 ath_txq_unlock(sc, txq);
221 ath_send_bar(tid, tid->seq_start); 188 ath_send_bar(tid, tid->seq_start);
222 ath_txq_lock(sc, txq); 189 ath_txq_lock(sc, txq);
@@ -499,19 +466,19 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
499 tx_info = IEEE80211_SKB_CB(skb); 466 tx_info = IEEE80211_SKB_CB(skb);
500 fi = get_frame_info(skb); 467 fi = get_frame_info(skb);
501 468
502 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, seqno))) { 469 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno)) {
470 /*
471 * Outside of the current BlockAck window,
472 * maybe part of a previous session
473 */
474 txfail = 1;
475 } else if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, seqno))) {
503 /* transmit completion, subframe is 476 /* transmit completion, subframe is
504 * acked by block ack */ 477 * acked by block ack */
505 acked_cnt++; 478 acked_cnt++;
506 } else if (!isaggr && txok) { 479 } else if (!isaggr && txok) {
507 /* transmit completion */ 480 /* transmit completion */
508 acked_cnt++; 481 acked_cnt++;
509 } else if (tid->state & AGGR_CLEANUP) {
510 /*
511 * cleanup in progress, just fail
512 * the un-acked sub-frames
513 */
514 txfail = 1;
515 } else if (flush) { 482 } else if (flush) {
516 txpending = 1; 483 txpending = 1;
517 } else if (fi->retries < ATH_MAX_SW_RETRIES) { 484 } else if (fi->retries < ATH_MAX_SW_RETRIES) {
@@ -535,7 +502,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
535 if (bf_next != NULL || !bf_last->bf_stale) 502 if (bf_next != NULL || !bf_last->bf_stale)
536 list_move_tail(&bf->list, &bf_head); 503 list_move_tail(&bf->list, &bf_head);
537 504
538 if (!txpending || (tid->state & AGGR_CLEANUP)) { 505 if (!txpending) {
539 /* 506 /*
540 * complete the acked-ones/xretried ones; update 507 * complete the acked-ones/xretried ones; update
541 * block-ack window 508 * block-ack window
@@ -609,9 +576,6 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
609 ath_txq_lock(sc, txq); 576 ath_txq_lock(sc, txq);
610 } 577 }
611 578
612 if (tid->state & AGGR_CLEANUP)
613 ath_tx_flush_tid(sc, tid, false);
614
615 rcu_read_unlock(); 579 rcu_read_unlock();
616 580
617 if (needreset) 581 if (needreset)
@@ -1244,9 +1208,6 @@ int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
1244 an = (struct ath_node *)sta->drv_priv; 1208 an = (struct ath_node *)sta->drv_priv;
1245 txtid = ATH_AN_2_TID(an, tid); 1209 txtid = ATH_AN_2_TID(an, tid);
1246 1210
1247 if (txtid->state & (AGGR_CLEANUP | AGGR_ADDBA_COMPLETE))
1248 return -EAGAIN;
1249
1250 /* update ampdu factor/density, they may have changed. This may happen 1211 /* update ampdu factor/density, they may have changed. This may happen
1251 * in HT IBSS when a beacon with HT-info is received after the station 1212 * in HT IBSS when a beacon with HT-info is received after the station
1252 * has already been added. 1213 * has already been added.
@@ -1258,7 +1219,7 @@ int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
1258 an->mpdudensity = density; 1219 an->mpdudensity = density;
1259 } 1220 }
1260 1221
1261 txtid->state |= AGGR_ADDBA_PROGRESS; 1222 txtid->active = true;
1262 txtid->paused = true; 1223 txtid->paused = true;
1263 *ssn = txtid->seq_start = txtid->seq_next; 1224 *ssn = txtid->seq_start = txtid->seq_next;
1264 txtid->bar_index = -1; 1225 txtid->bar_index = -1;
@@ -1269,45 +1230,17 @@ int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
1269 return 0; 1230 return 0;
1270} 1231}
1271 1232
1272bool ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid, 1233void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
1273 bool flush)
1274{ 1234{
1275 struct ath_node *an = (struct ath_node *)sta->drv_priv; 1235 struct ath_node *an = (struct ath_node *)sta->drv_priv;
1276 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid); 1236 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
1277 struct ath_txq *txq = txtid->ac->txq; 1237 struct ath_txq *txq = txtid->ac->txq;
1278 bool ret = !flush;
1279
1280 if (flush)
1281 txtid->stop_cb = false;
1282
1283 if (txtid->state & AGGR_CLEANUP)
1284 return false;
1285
1286 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
1287 txtid->state &= ~AGGR_ADDBA_PROGRESS;
1288 return ret;
1289 }
1290 1238
1291 ath_txq_lock(sc, txq); 1239 ath_txq_lock(sc, txq);
1240 txtid->active = false;
1292 txtid->paused = true; 1241 txtid->paused = true;
1293 1242 ath_tx_flush_tid(sc, txtid);
1294 /*
1295 * If frames are still being transmitted for this TID, they will be
1296 * cleaned up during tx completion. To prevent race conditions, this
1297 * TID can only be reused after all in-progress subframes have been
1298 * completed.
1299 */
1300 if (txtid->baw_head != txtid->baw_tail) {
1301 txtid->state |= AGGR_CLEANUP;
1302 ret = false;
1303 txtid->stop_cb = !flush;
1304 } else {
1305 txtid->state &= ~AGGR_ADDBA_COMPLETE;
1306 }
1307
1308 ath_tx_flush_tid(sc, txtid, flush);
1309 ath_txq_unlock_complete(sc, txq); 1243 ath_txq_unlock_complete(sc, txq);
1310 return ret;
1311} 1244}
1312 1245
1313void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc, 1246void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc,
@@ -1371,18 +1304,28 @@ void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an)
1371 } 1304 }
1372} 1305}
1373 1306
1374void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid) 1307void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta,
1308 u16 tidno)
1375{ 1309{
1376 struct ath_atx_tid *txtid; 1310 struct ath_atx_tid *tid;
1377 struct ath_node *an; 1311 struct ath_node *an;
1312 struct ath_txq *txq;
1378 1313
1379 an = (struct ath_node *)sta->drv_priv; 1314 an = (struct ath_node *)sta->drv_priv;
1315 tid = ATH_AN_2_TID(an, tidno);
1316 txq = tid->ac->txq;
1380 1317
1381 txtid = ATH_AN_2_TID(an, tid); 1318 ath_txq_lock(sc, txq);
1382 txtid->baw_size = IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor; 1319
1383 txtid->state |= AGGR_ADDBA_COMPLETE; 1320 tid->baw_size = IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
1384 txtid->state &= ~AGGR_ADDBA_PROGRESS; 1321 tid->paused = false;
1385 ath_tx_resume_tid(sc, txtid); 1322
1323 if (!skb_queue_empty(&tid->buf_q)) {
1324 ath_tx_queue_tid(txq, tid);
1325 ath_txq_schedule(sc, txq);
1326 }
1327
1328 ath_txq_unlock_complete(sc, txq);
1386} 1329}
1387 1330
1388/********************/ 1331/********************/
@@ -1627,6 +1570,8 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
1627 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) 1570 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1628 return; 1571 return;
1629 1572
1573 rcu_read_lock();
1574
1630 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list); 1575 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
1631 last_ac = list_entry(txq->axq_acq.prev, struct ath_atx_ac, list); 1576 last_ac = list_entry(txq->axq_acq.prev, struct ath_atx_ac, list);
1632 1577
@@ -1665,8 +1610,10 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
1665 1610
1666 if (ac == last_ac || 1611 if (ac == last_ac ||
1667 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) 1612 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1668 return; 1613 break;
1669 } 1614 }
1615
1616 rcu_read_unlock();
1670} 1617}
1671 1618
1672/***********/ 1619/***********/
@@ -2431,13 +2378,10 @@ void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2431 tid->baw_head = tid->baw_tail = 0; 2378 tid->baw_head = tid->baw_tail = 0;
2432 tid->sched = false; 2379 tid->sched = false;
2433 tid->paused = false; 2380 tid->paused = false;
2434 tid->state &= ~AGGR_CLEANUP; 2381 tid->active = false;
2435 __skb_queue_head_init(&tid->buf_q); 2382 __skb_queue_head_init(&tid->buf_q);
2436 acno = TID_TO_WME_AC(tidno); 2383 acno = TID_TO_WME_AC(tidno);
2437 tid->ac = &an->ac[acno]; 2384 tid->ac = &an->ac[acno];
2438 tid->state &= ~AGGR_ADDBA_COMPLETE;
2439 tid->state &= ~AGGR_ADDBA_PROGRESS;
2440 tid->stop_cb = false;
2441 } 2385 }
2442 2386
2443 for (acno = 0, ac = &an->ac[acno]; 2387 for (acno = 0, ac = &an->ac[acno];
@@ -2474,7 +2418,7 @@ void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
2474 } 2418 }
2475 2419
2476 ath_tid_drain(sc, txq, tid); 2420 ath_tid_drain(sc, txq, tid);
2477 ath_tx_clear_tid(sc, tid); 2421 tid->active = false;
2478 2422
2479 ath_txq_unlock(sc, txq); 2423 ath_txq_unlock(sc, txq);
2480 } 2424 }
diff --git a/drivers/net/wireless/atmel.c b/drivers/net/wireless/atmel.c
index 830bb1d1f957..b827d51c30a3 100644
--- a/drivers/net/wireless/atmel.c
+++ b/drivers/net/wireless/atmel.c
@@ -1624,7 +1624,7 @@ struct net_device *init_atmel_card(unsigned short irq, unsigned long port,
1624 1624
1625 netif_carrier_off(dev); 1625 netif_carrier_off(dev);
1626 1626
1627 if (!proc_create_data("driver/atmel", 0, NULL, &atmel_proc_fops, priv)); 1627 if (!proc_create_data("driver/atmel", 0, NULL, &atmel_proc_fops, priv))
1628 printk(KERN_WARNING "atmel: unable to create /proc entry.\n"); 1628 printk(KERN_WARNING "atmel: unable to create /proc entry.\n");
1629 1629
1630 printk(KERN_INFO "%s: Atmel at76c50x. Version %d.%d. MAC %pM\n", 1630 printk(KERN_INFO "%s: Atmel at76c50x. Version %d.%d. MAC %pM\n",
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 6dd07e2ec595..a95b77ab360e 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -2458,7 +2458,7 @@ static void b43_request_firmware(struct work_struct *work)
2458 for (i = 0; i < B43_NR_FWTYPES; i++) { 2458 for (i = 0; i < B43_NR_FWTYPES; i++) {
2459 errmsg = ctx->errors[i]; 2459 errmsg = ctx->errors[i];
2460 if (strlen(errmsg)) 2460 if (strlen(errmsg))
2461 b43err(dev->wl, errmsg); 2461 b43err(dev->wl, "%s", errmsg);
2462 } 2462 }
2463 b43_print_fw_helptext(dev->wl, 1); 2463 b43_print_fw_helptext(dev->wl, 1);
2464 goto out; 2464 goto out;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
index be0787cab24f..9431af2465f3 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
@@ -27,7 +27,6 @@
27#include "tracepoint.h" 27#include "tracepoint.h"
28 28
29#define PKTFILTER_BUF_SIZE 128 29#define PKTFILTER_BUF_SIZE 128
30#define BRCMF_ARPOL_MODE 0xb /* agent|snoop|peer_autoreply */
31#define BRCMF_DEFAULT_BCN_TIMEOUT 3 30#define BRCMF_DEFAULT_BCN_TIMEOUT 3
32#define BRCMF_DEFAULT_SCAN_CHANNEL_TIME 40 31#define BRCMF_DEFAULT_SCAN_CHANNEL_TIME 40
33#define BRCMF_DEFAULT_SCAN_UNASSOC_TIME 40 32#define BRCMF_DEFAULT_SCAN_UNASSOC_TIME 40
@@ -338,23 +337,6 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
338 goto done; 337 goto done;
339 } 338 }
340 339
341 /* Try to set and enable ARP offload feature, this may fail */
342 err = brcmf_fil_iovar_int_set(ifp, "arp_ol", BRCMF_ARPOL_MODE);
343 if (err) {
344 brcmf_dbg(TRACE, "failed to set ARP offload mode to 0x%x, err = %d\n",
345 BRCMF_ARPOL_MODE, err);
346 err = 0;
347 } else {
348 err = brcmf_fil_iovar_int_set(ifp, "arpoe", 1);
349 if (err) {
350 brcmf_dbg(TRACE, "failed to enable ARP offload err = %d\n",
351 err);
352 err = 0;
353 } else
354 brcmf_dbg(TRACE, "successfully enabled ARP offload to 0x%x\n",
355 BRCMF_ARPOL_MODE);
356 }
357
358 /* Setup packet filter */ 340 /* Setup packet filter */
359 brcmf_c_pktfilter_offload_set(ifp, BRCMF_DEFAULT_PACKET_FILTER); 341 brcmf_c_pktfilter_offload_set(ifp, BRCMF_DEFAULT_PACKET_FILTER);
360 brcmf_c_pktfilter_offload_enable(ifp, BRCMF_DEFAULT_PACKET_FILTER, 342 brcmf_c_pktfilter_offload_enable(ifp, BRCMF_DEFAULT_PACKET_FILTER,
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
index 59c25463e428..2c593570497c 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
@@ -653,10 +653,13 @@ int brcmf_net_attach(struct brcmf_if *ifp, bool rtnl_locked)
653 653
654 brcmf_dbg(INFO, "%s: Broadcom Dongle Host Driver\n", ndev->name); 654 brcmf_dbg(INFO, "%s: Broadcom Dongle Host Driver\n", ndev->name);
655 655
656 ndev->destructor = free_netdev;
656 return 0; 657 return 0;
657 658
658fail: 659fail:
660 drvr->iflist[ifp->bssidx] = NULL;
659 ndev->netdev_ops = NULL; 661 ndev->netdev_ops = NULL;
662 free_netdev(ndev);
660 return -EBADE; 663 return -EBADE;
661} 664}
662 665
@@ -720,6 +723,9 @@ static int brcmf_net_p2p_attach(struct brcmf_if *ifp)
720 return 0; 723 return 0;
721 724
722fail: 725fail:
726 ifp->drvr->iflist[ifp->bssidx] = NULL;
727 ndev->netdev_ops = NULL;
728 free_netdev(ndev);
723 return -EBADE; 729 return -EBADE;
724} 730}
725 731
@@ -788,6 +794,7 @@ void brcmf_del_if(struct brcmf_pub *drvr, s32 bssidx)
788 struct brcmf_if *ifp; 794 struct brcmf_if *ifp;
789 795
790 ifp = drvr->iflist[bssidx]; 796 ifp = drvr->iflist[bssidx];
797 drvr->iflist[bssidx] = NULL;
791 if (!ifp) { 798 if (!ifp) {
792 brcmf_err("Null interface, idx=%d\n", bssidx); 799 brcmf_err("Null interface, idx=%d\n", bssidx);
793 return; 800 return;
@@ -808,15 +815,13 @@ void brcmf_del_if(struct brcmf_pub *drvr, s32 bssidx)
808 cancel_work_sync(&ifp->setmacaddr_work); 815 cancel_work_sync(&ifp->setmacaddr_work);
809 cancel_work_sync(&ifp->multicast_work); 816 cancel_work_sync(&ifp->multicast_work);
810 } 817 }
811 818 /* unregister will take care of freeing it */
812 unregister_netdev(ifp->ndev); 819 unregister_netdev(ifp->ndev);
813 if (bssidx == 0) 820 if (bssidx == 0)
814 brcmf_cfg80211_detach(drvr->config); 821 brcmf_cfg80211_detach(drvr->config);
815 free_netdev(ifp->ndev);
816 } else { 822 } else {
817 kfree(ifp); 823 kfree(ifp);
818 } 824 }
819 drvr->iflist[bssidx] = NULL;
820} 825}
821 826
822int brcmf_attach(uint bus_hdrlen, struct device *dev) 827int brcmf_attach(uint bus_hdrlen, struct device *dev)
@@ -925,8 +930,10 @@ fail:
925 brcmf_fws_del_interface(ifp); 930 brcmf_fws_del_interface(ifp);
926 brcmf_fws_deinit(drvr); 931 brcmf_fws_deinit(drvr);
927 } 932 }
928 free_netdev(ifp->ndev); 933 if (drvr->iflist[0]) {
929 drvr->iflist[0] = NULL; 934 free_netdev(ifp->ndev);
935 drvr->iflist[0] = NULL;
936 }
930 if (p2p_ifp) { 937 if (p2p_ifp) {
931 free_netdev(p2p_ifp->ndev); 938 free_netdev(p2p_ifp->ndev);
932 drvr->iflist[1] = NULL; 939 drvr->iflist[1] = NULL;
@@ -934,7 +941,8 @@ fail:
934 return ret; 941 return ret;
935 } 942 }
936 if ((brcmf_p2p_enable) && (p2p_ifp)) 943 if ((brcmf_p2p_enable) && (p2p_ifp))
937 brcmf_net_p2p_attach(p2p_ifp); 944 if (brcmf_net_p2p_attach(p2p_ifp) < 0)
945 brcmf_p2p_enable = 0;
938 946
939 return 0; 947 return 0;
940} 948}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fweh.c b/drivers/net/wireless/brcm80211/brcmfmac/fweh.c
index 5a64280e6485..83ee53a7c76e 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fweh.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fweh.c
@@ -202,7 +202,8 @@ static void brcmf_fweh_handle_if_event(struct brcmf_pub *drvr,
202 return; 202 return;
203 brcmf_fws_add_interface(ifp); 203 brcmf_fws_add_interface(ifp);
204 if (!drvr->fweh.evt_handler[BRCMF_E_IF]) 204 if (!drvr->fweh.evt_handler[BRCMF_E_IF])
205 err = brcmf_net_attach(ifp, false); 205 if (brcmf_net_attach(ifp, false) < 0)
206 return;
206 } 207 }
207 208
208 if (ifevent->action == BRCMF_E_IF_CHANGE) 209 if (ifevent->action == BRCMF_E_IF_CHANGE)
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h b/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h
index 0f2c83bc95dc..665ef69e974b 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h
@@ -23,6 +23,12 @@
23 23
24#define BRCMF_FIL_ACTION_FRAME_SIZE 1800 24#define BRCMF_FIL_ACTION_FRAME_SIZE 1800
25 25
26/* ARP Offload feature flags for arp_ol iovar */
27#define BRCMF_ARP_OL_AGENT 0x00000001
28#define BRCMF_ARP_OL_SNOOP 0x00000002
29#define BRCMF_ARP_OL_HOST_AUTO_REPLY 0x00000004
30#define BRCMF_ARP_OL_PEER_AUTO_REPLY 0x00000008
31
26 32
27enum brcmf_fil_p2p_if_types { 33enum brcmf_fil_p2p_if_types {
28 BRCMF_FIL_P2P_IF_CLIENT, 34 BRCMF_FIL_P2P_IF_CLIENT,
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/brcm80211/brcmfmac/p2p.c
index e7a1a4770996..79555f006d53 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/p2p.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/p2p.c
@@ -47,6 +47,7 @@
47#define IS_P2P_SOCIAL_CHANNEL(channel) ((channel == SOCIAL_CHAN_1) || \ 47#define IS_P2P_SOCIAL_CHANNEL(channel) ((channel == SOCIAL_CHAN_1) || \
48 (channel == SOCIAL_CHAN_2) || \ 48 (channel == SOCIAL_CHAN_2) || \
49 (channel == SOCIAL_CHAN_3)) 49 (channel == SOCIAL_CHAN_3))
50#define BRCMF_P2P_TEMP_CHAN SOCIAL_CHAN_3
50#define SOCIAL_CHAN_CNT 3 51#define SOCIAL_CHAN_CNT 3
51#define AF_PEER_SEARCH_CNT 2 52#define AF_PEER_SEARCH_CNT 2
52 53
@@ -1954,21 +1955,21 @@ s32 brcmf_p2p_attach(struct brcmf_cfg80211_info *cfg)
1954 err = brcmf_fil_iovar_int_set(pri_ifp, "p2p_disc", 1); 1955 err = brcmf_fil_iovar_int_set(pri_ifp, "p2p_disc", 1);
1955 if (err < 0) { 1956 if (err < 0) {
1956 brcmf_err("set p2p_disc error\n"); 1957 brcmf_err("set p2p_disc error\n");
1957 brcmf_free_vif(p2p_vif); 1958 brcmf_free_vif(cfg, p2p_vif);
1958 goto exit; 1959 goto exit;
1959 } 1960 }
1960 /* obtain bsscfg index for P2P discovery */ 1961 /* obtain bsscfg index for P2P discovery */
1961 err = brcmf_fil_iovar_int_get(pri_ifp, "p2p_dev", &bssidx); 1962 err = brcmf_fil_iovar_int_get(pri_ifp, "p2p_dev", &bssidx);
1962 if (err < 0) { 1963 if (err < 0) {
1963 brcmf_err("retrieving discover bsscfg index failed\n"); 1964 brcmf_err("retrieving discover bsscfg index failed\n");
1964 brcmf_free_vif(p2p_vif); 1965 brcmf_free_vif(cfg, p2p_vif);
1965 goto exit; 1966 goto exit;
1966 } 1967 }
1967 /* Verify that firmware uses same bssidx as driver !! */ 1968 /* Verify that firmware uses same bssidx as driver !! */
1968 if (p2p_ifp->bssidx != bssidx) { 1969 if (p2p_ifp->bssidx != bssidx) {
1969 brcmf_err("Incorrect bssidx=%d, compared to p2p_ifp->bssidx=%d\n", 1970 brcmf_err("Incorrect bssidx=%d, compared to p2p_ifp->bssidx=%d\n",
1970 bssidx, p2p_ifp->bssidx); 1971 bssidx, p2p_ifp->bssidx);
1971 brcmf_free_vif(p2p_vif); 1972 brcmf_free_vif(cfg, p2p_vif);
1972 goto exit; 1973 goto exit;
1973 } 1974 }
1974 1975
@@ -1996,7 +1997,7 @@ void brcmf_p2p_detach(struct brcmf_p2p_info *p2p)
1996 brcmf_p2p_cancel_remain_on_channel(vif->ifp); 1997 brcmf_p2p_cancel_remain_on_channel(vif->ifp);
1997 brcmf_p2p_deinit_discovery(p2p); 1998 brcmf_p2p_deinit_discovery(p2p);
1998 /* remove discovery interface */ 1999 /* remove discovery interface */
1999 brcmf_free_vif(vif); 2000 brcmf_free_vif(p2p->cfg, vif);
2000 p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif = NULL; 2001 p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif = NULL;
2001 } 2002 }
2002 /* just set it all to zero */ 2003 /* just set it all to zero */
@@ -2013,17 +2014,30 @@ static void brcmf_p2p_get_current_chanspec(struct brcmf_p2p_info *p2p,
2013 u16 *chanspec) 2014 u16 *chanspec)
2014{ 2015{
2015 struct brcmf_if *ifp; 2016 struct brcmf_if *ifp;
2016 struct brcmf_fil_chan_info_le ci; 2017 u8 mac_addr[ETH_ALEN];
2017 struct brcmu_chan ch; 2018 struct brcmu_chan ch;
2018 s32 err; 2019 struct brcmf_bss_info_le *bi;
2020 u8 *buf;
2019 2021
2020 ifp = p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif->ifp; 2022 ifp = p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif->ifp;
2021 2023
2022 ch.chnum = 11; 2024 if (brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_BSSID, mac_addr,
2023 2025 ETH_ALEN) == 0) {
2024 err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_CHANNEL, &ci, sizeof(ci)); 2026 buf = kzalloc(WL_BSS_INFO_MAX, GFP_KERNEL);
2025 if (!err) 2027 if (buf != NULL) {
2026 ch.chnum = le32_to_cpu(ci.hw_channel); 2028 *(__le32 *)buf = cpu_to_le32(WL_BSS_INFO_MAX);
2029 if (brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_BSS_INFO,
2030 buf, WL_BSS_INFO_MAX) == 0) {
2031 bi = (struct brcmf_bss_info_le *)(buf + 4);
2032 *chanspec = le16_to_cpu(bi->chanspec);
2033 kfree(buf);
2034 return;
2035 }
2036 kfree(buf);
2037 }
2038 }
2039 /* Use default channel for P2P */
2040 ch.chnum = BRCMF_P2P_TEMP_CHAN;
2027 ch.bw = BRCMU_CHAN_BW_20; 2041 ch.bw = BRCMU_CHAN_BW_20;
2028 p2p->cfg->d11inf.encchspec(&ch); 2042 p2p->cfg->d11inf.encchspec(&ch);
2029 *chanspec = ch.chspec; 2043 *chanspec = ch.chspec;
@@ -2208,7 +2222,7 @@ static struct wireless_dev *brcmf_p2p_create_p2pdev(struct brcmf_p2p_info *p2p,
2208 return &p2p_vif->wdev; 2222 return &p2p_vif->wdev;
2209 2223
2210fail: 2224fail:
2211 brcmf_free_vif(p2p_vif); 2225 brcmf_free_vif(p2p->cfg, p2p_vif);
2212 return ERR_PTR(err); 2226 return ERR_PTR(err);
2213} 2227}
2214 2228
@@ -2217,13 +2231,31 @@ fail:
2217 * 2231 *
2218 * @vif: virtual interface object to delete. 2232 * @vif: virtual interface object to delete.
2219 */ 2233 */
2220static void brcmf_p2p_delete_p2pdev(struct brcmf_cfg80211_vif *vif) 2234static void brcmf_p2p_delete_p2pdev(struct brcmf_cfg80211_info *cfg,
2235 struct brcmf_cfg80211_vif *vif)
2221{ 2236{
2222 struct brcmf_p2p_info *p2p = &vif->ifp->drvr->config->p2p;
2223
2224 cfg80211_unregister_wdev(&vif->wdev); 2237 cfg80211_unregister_wdev(&vif->wdev);
2225 p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif = NULL; 2238 cfg->p2p.bss_idx[P2PAPI_BSSCFG_DEVICE].vif = NULL;
2226 brcmf_free_vif(vif); 2239 brcmf_free_vif(cfg, vif);
2240}
2241
2242/**
2243 * brcmf_p2p_free_p2p_if() - free up net device related data.
2244 *
2245 * @ndev: net device that needs to be freed.
2246 */
2247static void brcmf_p2p_free_p2p_if(struct net_device *ndev)
2248{
2249 struct brcmf_cfg80211_info *cfg;
2250 struct brcmf_cfg80211_vif *vif;
2251 struct brcmf_if *ifp;
2252
2253 ifp = netdev_priv(ndev);
2254 cfg = ifp->drvr->config;
2255 vif = ifp->vif;
2256
2257 brcmf_free_vif(cfg, vif);
2258 free_netdev(ifp->ndev);
2227} 2259}
2228 2260
2229/** 2261/**
@@ -2303,6 +2335,9 @@ struct wireless_dev *brcmf_p2p_add_vif(struct wiphy *wiphy, const char *name,
2303 brcmf_err("Registering netdevice failed\n"); 2335 brcmf_err("Registering netdevice failed\n");
2304 goto fail; 2336 goto fail;
2305 } 2337 }
2338 /* override destructor */
2339 ifp->ndev->destructor = brcmf_p2p_free_p2p_if;
2340
2306 cfg->p2p.bss_idx[P2PAPI_BSSCFG_CONNECTION].vif = vif; 2341 cfg->p2p.bss_idx[P2PAPI_BSSCFG_CONNECTION].vif = vif;
2307 /* Disable firmware roaming for P2P interface */ 2342 /* Disable firmware roaming for P2P interface */
2308 brcmf_fil_iovar_int_set(ifp, "roam_off", 1); 2343 brcmf_fil_iovar_int_set(ifp, "roam_off", 1);
@@ -2314,7 +2349,7 @@ struct wireless_dev *brcmf_p2p_add_vif(struct wiphy *wiphy, const char *name,
2314 return &ifp->vif->wdev; 2349 return &ifp->vif->wdev;
2315 2350
2316fail: 2351fail:
2317 brcmf_free_vif(vif); 2352 brcmf_free_vif(cfg, vif);
2318 return ERR_PTR(err); 2353 return ERR_PTR(err);
2319} 2354}
2320 2355
@@ -2350,7 +2385,7 @@ int brcmf_p2p_del_vif(struct wiphy *wiphy, struct wireless_dev *wdev)
2350 break; 2385 break;
2351 2386
2352 case NL80211_IFTYPE_P2P_DEVICE: 2387 case NL80211_IFTYPE_P2P_DEVICE:
2353 brcmf_p2p_delete_p2pdev(vif); 2388 brcmf_p2p_delete_p2pdev(cfg, vif);
2354 return 0; 2389 return 0;
2355 default: 2390 default:
2356 return -ENOTSUPP; 2391 return -ENOTSUPP;
@@ -2378,7 +2413,6 @@ int brcmf_p2p_del_vif(struct wiphy *wiphy, struct wireless_dev *wdev)
2378 err = 0; 2413 err = 0;
2379 } 2414 }
2380 brcmf_cfg80211_arm_vif_event(cfg, NULL); 2415 brcmf_cfg80211_arm_vif_event(cfg, NULL);
2381 brcmf_free_vif(vif);
2382 p2p->bss_idx[P2PAPI_BSSCFG_CONNECTION].vif = NULL; 2416 p2p->bss_idx[P2PAPI_BSSCFG_CONNECTION].vif = NULL;
2383 2417
2384 return err; 2418 return err;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
index 761f501959a9..301e572e8923 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
@@ -459,6 +459,38 @@ send_key_to_dongle(struct net_device *ndev, struct brcmf_wsec_key *key)
459 return err; 459 return err;
460} 460}
461 461
462static s32
463brcmf_configure_arp_offload(struct brcmf_if *ifp, bool enable)
464{
465 s32 err;
466 u32 mode;
467
468 if (enable)
469 mode = BRCMF_ARP_OL_AGENT | BRCMF_ARP_OL_PEER_AUTO_REPLY;
470 else
471 mode = 0;
472
473 /* Try to set and enable ARP offload feature, this may fail, then it */
474 /* is simply not supported and err 0 will be returned */
475 err = brcmf_fil_iovar_int_set(ifp, "arp_ol", mode);
476 if (err) {
477 brcmf_dbg(TRACE, "failed to set ARP offload mode to 0x%x, err = %d\n",
478 mode, err);
479 err = 0;
480 } else {
481 err = brcmf_fil_iovar_int_set(ifp, "arpoe", enable);
482 if (err) {
483 brcmf_dbg(TRACE, "failed to configure (%d) ARP offload err = %d\n",
484 enable, err);
485 err = 0;
486 } else
487 brcmf_dbg(TRACE, "successfully configured (%d) ARP offload to 0x%x\n",
488 enable, mode);
489 }
490
491 return err;
492}
493
462static struct wireless_dev *brcmf_cfg80211_add_iface(struct wiphy *wiphy, 494static struct wireless_dev *brcmf_cfg80211_add_iface(struct wiphy *wiphy,
463 const char *name, 495 const char *name,
464 enum nl80211_iftype type, 496 enum nl80211_iftype type,
@@ -2216,6 +2248,11 @@ brcmf_cfg80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *ndev,
2216 } 2248 }
2217 2249
2218 pm = enabled ? PM_FAST : PM_OFF; 2250 pm = enabled ? PM_FAST : PM_OFF;
2251 /* Do not enable the power save after assoc if it is a p2p interface */
2252 if (ifp->vif->wdev.iftype == NL80211_IFTYPE_P2P_CLIENT) {
2253 brcmf_dbg(INFO, "Do not enable power save for P2P clients\n");
2254 pm = PM_OFF;
2255 }
2219 brcmf_dbg(INFO, "power save %s\n", (pm ? "enabled" : "disabled")); 2256 brcmf_dbg(INFO, "power save %s\n", (pm ? "enabled" : "disabled"));
2220 2257
2221 err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_PM, pm); 2258 err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_PM, pm);
@@ -3640,10 +3677,28 @@ brcmf_config_ap_mgmt_ie(struct brcmf_cfg80211_vif *vif,
3640} 3677}
3641 3678
3642static s32 3679static s32
3680brcmf_cfg80211_set_channel(struct brcmf_cfg80211_info *cfg,
3681 struct brcmf_if *ifp,
3682 struct ieee80211_channel *channel)
3683{
3684 u16 chanspec;
3685 s32 err;
3686
3687 brcmf_dbg(TRACE, "band=%d, center_freq=%d\n", channel->band,
3688 channel->center_freq);
3689
3690 chanspec = channel_to_chanspec(&cfg->d11inf, channel);
3691 err = brcmf_fil_iovar_int_set(ifp, "chanspec", chanspec);
3692
3693 return err;
3694}
3695
3696static s32
3643brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev, 3697brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
3644 struct cfg80211_ap_settings *settings) 3698 struct cfg80211_ap_settings *settings)
3645{ 3699{
3646 s32 ie_offset; 3700 s32 ie_offset;
3701 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
3647 struct brcmf_if *ifp = netdev_priv(ndev); 3702 struct brcmf_if *ifp = netdev_priv(ndev);
3648 struct brcmf_tlv *ssid_ie; 3703 struct brcmf_tlv *ssid_ie;
3649 struct brcmf_ssid_le ssid_le; 3704 struct brcmf_ssid_le ssid_le;
@@ -3683,6 +3738,7 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
3683 } 3738 }
3684 3739
3685 brcmf_set_mpc(ifp, 0); 3740 brcmf_set_mpc(ifp, 0);
3741 brcmf_configure_arp_offload(ifp, false);
3686 3742
3687 /* find the RSN_IE */ 3743 /* find the RSN_IE */
3688 rsn_ie = brcmf_parse_tlvs((u8 *)settings->beacon.tail, 3744 rsn_ie = brcmf_parse_tlvs((u8 *)settings->beacon.tail,
@@ -3713,6 +3769,12 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
3713 3769
3714 brcmf_config_ap_mgmt_ie(ifp->vif, &settings->beacon); 3770 brcmf_config_ap_mgmt_ie(ifp->vif, &settings->beacon);
3715 3771
3772 err = brcmf_cfg80211_set_channel(cfg, ifp, settings->chandef.chan);
3773 if (err < 0) {
3774 brcmf_err("Set Channel failed, %d\n", err);
3775 goto exit;
3776 }
3777
3716 if (settings->beacon_interval) { 3778 if (settings->beacon_interval) {
3717 err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_BCNPRD, 3779 err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_BCNPRD,
3718 settings->beacon_interval); 3780 settings->beacon_interval);
@@ -3789,8 +3851,10 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
3789 set_bit(BRCMF_VIF_STATUS_AP_CREATED, &ifp->vif->sme_state); 3851 set_bit(BRCMF_VIF_STATUS_AP_CREATED, &ifp->vif->sme_state);
3790 3852
3791exit: 3853exit:
3792 if (err) 3854 if (err) {
3793 brcmf_set_mpc(ifp, 1); 3855 brcmf_set_mpc(ifp, 1);
3856 brcmf_configure_arp_offload(ifp, true);
3857 }
3794 return err; 3858 return err;
3795} 3859}
3796 3860
@@ -3831,6 +3895,7 @@ static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev)
3831 brcmf_err("bss_enable config failed %d\n", err); 3895 brcmf_err("bss_enable config failed %d\n", err);
3832 } 3896 }
3833 brcmf_set_mpc(ifp, 1); 3897 brcmf_set_mpc(ifp, 1);
3898 brcmf_configure_arp_offload(ifp, true);
3834 set_bit(BRCMF_VIF_STATUS_AP_CREATING, &ifp->vif->sme_state); 3899 set_bit(BRCMF_VIF_STATUS_AP_CREATING, &ifp->vif->sme_state);
3835 clear_bit(BRCMF_VIF_STATUS_AP_CREATED, &ifp->vif->sme_state); 3900 clear_bit(BRCMF_VIF_STATUS_AP_CREATED, &ifp->vif->sme_state);
3836 3901
@@ -4148,7 +4213,7 @@ static const struct ieee80211_iface_limit brcmf_iface_limits[] = {
4148static const struct ieee80211_iface_combination brcmf_iface_combos[] = { 4213static const struct ieee80211_iface_combination brcmf_iface_combos[] = {
4149 { 4214 {
4150 .max_interfaces = BRCMF_IFACE_MAX_CNT, 4215 .max_interfaces = BRCMF_IFACE_MAX_CNT,
4151 .num_different_channels = 1, /* no multi-channel for now */ 4216 .num_different_channels = 2,
4152 .n_limits = ARRAY_SIZE(brcmf_iface_limits), 4217 .n_limits = ARRAY_SIZE(brcmf_iface_limits),
4153 .limits = brcmf_iface_limits 4218 .limits = brcmf_iface_limits
4154 } 4219 }
@@ -4256,20 +4321,16 @@ struct brcmf_cfg80211_vif *brcmf_alloc_vif(struct brcmf_cfg80211_info *cfg,
4256 return vif; 4321 return vif;
4257} 4322}
4258 4323
4259void brcmf_free_vif(struct brcmf_cfg80211_vif *vif) 4324void brcmf_free_vif(struct brcmf_cfg80211_info *cfg,
4325 struct brcmf_cfg80211_vif *vif)
4260{ 4326{
4261 struct brcmf_cfg80211_info *cfg;
4262 struct wiphy *wiphy;
4263
4264 wiphy = vif->wdev.wiphy;
4265 cfg = wiphy_priv(wiphy);
4266 list_del(&vif->list); 4327 list_del(&vif->list);
4267 cfg->vif_cnt--; 4328 cfg->vif_cnt--;
4268 4329
4269 kfree(vif); 4330 kfree(vif);
4270 if (!cfg->vif_cnt) { 4331 if (!cfg->vif_cnt) {
4271 wiphy_unregister(wiphy); 4332 wiphy_unregister(cfg->wiphy);
4272 wiphy_free(wiphy); 4333 wiphy_free(cfg->wiphy);
4273 } 4334 }
4274} 4335}
4275 4336
@@ -4646,7 +4707,6 @@ static s32 brcmf_notify_vif_event(struct brcmf_if *ifp,
4646 return 0; 4707 return 0;
4647 4708
4648 case BRCMF_E_IF_DEL: 4709 case BRCMF_E_IF_DEL:
4649 ifp->vif = NULL;
4650 mutex_unlock(&event->vif_event_lock); 4710 mutex_unlock(&event->vif_event_lock);
4651 /* event may not be upon user request */ 4711 /* event may not be upon user request */
4652 if (brcmf_cfg80211_vif_event_armed(cfg)) 4712 if (brcmf_cfg80211_vif_event_armed(cfg))
@@ -4852,8 +4912,7 @@ cfg80211_p2p_attach_out:
4852 wl_deinit_priv(cfg); 4912 wl_deinit_priv(cfg);
4853 4913
4854cfg80211_attach_out: 4914cfg80211_attach_out:
4855 brcmf_free_vif(vif); 4915 brcmf_free_vif(cfg, vif);
4856 wiphy_free(wiphy);
4857 return NULL; 4916 return NULL;
4858} 4917}
4859 4918
@@ -4865,7 +4924,7 @@ void brcmf_cfg80211_detach(struct brcmf_cfg80211_info *cfg)
4865 wl_deinit_priv(cfg); 4924 wl_deinit_priv(cfg);
4866 brcmf_btcoex_detach(cfg); 4925 brcmf_btcoex_detach(cfg);
4867 list_for_each_entry_safe(vif, tmp, &cfg->vif_list, list) { 4926 list_for_each_entry_safe(vif, tmp, &cfg->vif_list, list) {
4868 brcmf_free_vif(vif); 4927 brcmf_free_vif(cfg, vif);
4869 } 4928 }
4870} 4929}
4871 4930
@@ -5229,6 +5288,8 @@ static s32 brcmf_config_dongle(struct brcmf_cfg80211_info *cfg)
5229 if (err) 5288 if (err)
5230 goto default_conf_out; 5289 goto default_conf_out;
5231 5290
5291 brcmf_configure_arp_offload(ifp, true);
5292
5232 cfg->dongle_up = true; 5293 cfg->dongle_up = true;
5233default_conf_out: 5294default_conf_out:
5234 5295
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h
index a71cff84cdcf..d9bdaf9a72d0 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h
@@ -487,7 +487,8 @@ enum nl80211_iftype brcmf_cfg80211_get_iftype(struct brcmf_if *ifp);
487struct brcmf_cfg80211_vif *brcmf_alloc_vif(struct brcmf_cfg80211_info *cfg, 487struct brcmf_cfg80211_vif *brcmf_alloc_vif(struct brcmf_cfg80211_info *cfg,
488 enum nl80211_iftype type, 488 enum nl80211_iftype type,
489 bool pm_block); 489 bool pm_block);
490void brcmf_free_vif(struct brcmf_cfg80211_vif *vif); 490void brcmf_free_vif(struct brcmf_cfg80211_info *cfg,
491 struct brcmf_cfg80211_vif *vif);
491 492
492s32 brcmf_vif_set_mgmt_ie(struct brcmf_cfg80211_vif *vif, s32 pktflag, 493s32 brcmf_vif_set_mgmt_ie(struct brcmf_cfg80211_vif *vif, s32 pktflag,
493 const u8 *vndr_ie_buf, u32 vndr_ie_len); 494 const u8 *vndr_ie_buf, u32 vndr_ie_len);
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.c b/drivers/net/wireless/brcm80211/brcmsmac/main.c
index 28e7aeedd184..9fd6f2fef11b 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/main.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/main.c
@@ -3074,21 +3074,8 @@ static void brcms_b_antsel_set(struct brcms_hardware *wlc_hw, u32 antsel_avail)
3074 */ 3074 */
3075static bool brcms_c_ps_allowed(struct brcms_c_info *wlc) 3075static bool brcms_c_ps_allowed(struct brcms_c_info *wlc)
3076{ 3076{
3077 /* disallow PS when one of the following global conditions meets */ 3077 /* not supporting PS so always return false for now */
3078 if (!wlc->pub->associated) 3078 return false;
3079 return false;
3080
3081 /* disallow PS when one of these meets when not scanning */
3082 if (wlc->filter_flags & FIF_PROMISC_IN_BSS)
3083 return false;
3084
3085 if (wlc->bsscfg->type == BRCMS_TYPE_AP)
3086 return false;
3087
3088 if (wlc->bsscfg->type == BRCMS_TYPE_ADHOC)
3089 return false;
3090
3091 return true;
3092} 3079}
3093 3080
3094static void brcms_c_statsupd(struct brcms_c_info *wlc) 3081static void brcms_c_statsupd(struct brcms_c_info *wlc)
diff --git a/drivers/net/wireless/iwlegacy/3945-rs.c b/drivers/net/wireless/iwlegacy/3945-rs.c
index c9f197d9ca1e..fe31590a51b2 100644
--- a/drivers/net/wireless/iwlegacy/3945-rs.c
+++ b/drivers/net/wireless/iwlegacy/3945-rs.c
@@ -816,6 +816,7 @@ out:
816 rs_sta->last_txrate_idx = idx; 816 rs_sta->last_txrate_idx = idx;
817 info->control.rates[0].idx = rs_sta->last_txrate_idx; 817 info->control.rates[0].idx = rs_sta->last_txrate_idx;
818 } 818 }
819 info->control.rates[0].count = 1;
819 820
820 D_RATE("leave: %d\n", idx); 821 D_RATE("leave: %d\n", idx);
821} 822}
diff --git a/drivers/net/wireless/iwlegacy/4965-rs.c b/drivers/net/wireless/iwlegacy/4965-rs.c
index 1fc0b227e120..ed3c42a63a43 100644
--- a/drivers/net/wireless/iwlegacy/4965-rs.c
+++ b/drivers/net/wireless/iwlegacy/4965-rs.c
@@ -2268,7 +2268,7 @@ il4965_rs_get_rate(void *il_r, struct ieee80211_sta *sta, void *il_sta,
2268 info->control.rates[0].flags = 0; 2268 info->control.rates[0].flags = 0;
2269 } 2269 }
2270 info->control.rates[0].idx = rate_idx; 2270 info->control.rates[0].idx = rate_idx;
2271 2271 info->control.rates[0].count = 1;
2272} 2272}
2273 2273
2274static void * 2274static void *
diff --git a/drivers/net/wireless/iwlegacy/common.h b/drivers/net/wireless/iwlegacy/common.h
index f8246f2d88f9..4caaf52986a4 100644
--- a/drivers/net/wireless/iwlegacy/common.h
+++ b/drivers/net/wireless/iwlegacy/common.h
@@ -1832,16 +1832,16 @@ u32 il_usecs_to_beacons(struct il_priv *il, u32 usec, u32 beacon_interval);
1832__le32 il_add_beacon_time(struct il_priv *il, u32 base, u32 addon, 1832__le32 il_add_beacon_time(struct il_priv *il, u32 base, u32 addon,
1833 u32 beacon_interval); 1833 u32 beacon_interval);
1834 1834
1835#ifdef CONFIG_PM 1835#ifdef CONFIG_PM_SLEEP
1836extern const struct dev_pm_ops il_pm_ops; 1836extern const struct dev_pm_ops il_pm_ops;
1837 1837
1838#define IL_LEGACY_PM_OPS (&il_pm_ops) 1838#define IL_LEGACY_PM_OPS (&il_pm_ops)
1839 1839
1840#else /* !CONFIG_PM */ 1840#else /* !CONFIG_PM_SLEEP */
1841 1841
1842#define IL_LEGACY_PM_OPS NULL 1842#define IL_LEGACY_PM_OPS NULL
1843 1843
1844#endif /* !CONFIG_PM */ 1844#endif /* !CONFIG_PM_SLEEP */
1845 1845
1846/***************************************************** 1846/*****************************************************
1847* Error Handling Debugging 1847* Error Handling Debugging
diff --git a/drivers/net/wireless/iwlwifi/dvm/rs.c b/drivers/net/wireless/iwlwifi/dvm/rs.c
index 907bd6e50aad..10fbb176cc8e 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rs.c
+++ b/drivers/net/wireless/iwlwifi/dvm/rs.c
@@ -2799,7 +2799,7 @@ static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta, void *priv_sta,
2799 info->control.rates[0].flags = 0; 2799 info->control.rates[0].flags = 0;
2800 } 2800 }
2801 info->control.rates[0].idx = rate_idx; 2801 info->control.rates[0].idx = rate_idx;
2802 2802 info->control.rates[0].count = 1;
2803} 2803}
2804 2804
2805static void *rs_alloc_sta(void *priv_rate, struct ieee80211_sta *sta, 2805static void *rs_alloc_sta(void *priv_rate, struct ieee80211_sta *sta,
diff --git a/drivers/net/wireless/iwlwifi/dvm/rxon.c b/drivers/net/wireless/iwlwifi/dvm/rxon.c
index 707446fa00bd..cd1ad0019185 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rxon.c
+++ b/drivers/net/wireless/iwlwifi/dvm/rxon.c
@@ -1378,7 +1378,7 @@ static void iwlagn_chain_noise_reset(struct iwl_priv *priv)
1378 struct iwl_chain_noise_data *data = &priv->chain_noise_data; 1378 struct iwl_chain_noise_data *data = &priv->chain_noise_data;
1379 int ret; 1379 int ret;
1380 1380
1381 if (!(priv->calib_disabled & IWL_CHAIN_NOISE_CALIB_DISABLED)) 1381 if (priv->calib_disabled & IWL_CHAIN_NOISE_CALIB_DISABLED)
1382 return; 1382 return;
1383 1383
1384 if ((data->state == IWL_CHAIN_NOISE_ALIVE) && 1384 if ((data->state == IWL_CHAIN_NOISE_ALIVE) &&
diff --git a/drivers/net/wireless/iwlwifi/dvm/sta.c b/drivers/net/wireless/iwlwifi/dvm/sta.c
index db183b44e038..c3c13ce96eb0 100644
--- a/drivers/net/wireless/iwlwifi/dvm/sta.c
+++ b/drivers/net/wireless/iwlwifi/dvm/sta.c
@@ -735,7 +735,7 @@ void iwl_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
735 memcpy(&lq, priv->stations[i].lq, 735 memcpy(&lq, priv->stations[i].lq,
736 sizeof(struct iwl_link_quality_cmd)); 736 sizeof(struct iwl_link_quality_cmd));
737 737
738 if (!memcmp(&lq, &zero_lq, sizeof(lq))) 738 if (memcmp(&lq, &zero_lq, sizeof(lq)))
739 send_lq = true; 739 send_lq = true;
740 } 740 }
741 spin_unlock_bh(&priv->sta_lock); 741 spin_unlock_bh(&priv->sta_lock);
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c
index 39aad9893e0b..40fed1f511e2 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.c
@@ -1000,10 +1000,12 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
1000 */ 1000 */
1001 if (load_module) { 1001 if (load_module) {
1002 err = request_module("%s", op->name); 1002 err = request_module("%s", op->name);
1003#ifdef CONFIG_IWLWIFI_OPMODE_MODULAR
1003 if (err) 1004 if (err)
1004 IWL_ERR(drv, 1005 IWL_ERR(drv,
1005 "failed to load module %s (error %d), is dynamic loading enabled?\n", 1006 "failed to load module %s (error %d), is dynamic loading enabled?\n",
1006 op->name, err); 1007 op->name, err);
1008#endif
1007 } 1009 }
1008 return; 1010 return;
1009 1011
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.c b/drivers/net/wireless/iwlwifi/mvm/rs.c
index 55334d542e26..b99fe3163866 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/rs.c
@@ -2546,6 +2546,7 @@ static void rs_get_rate(void *mvm_r, struct ieee80211_sta *sta, void *mvm_sta,
2546 info->control.rates[0].flags = 0; 2546 info->control.rates[0].flags = 0;
2547 } 2547 }
2548 info->control.rates[0].idx = rate_idx; 2548 info->control.rates[0].idx = rate_idx;
2549 info->control.rates[0].count = 1;
2549} 2550}
2550 2551
2551static void *rs_alloc_sta(void *mvm_rate, struct ieee80211_sta *sta, 2552static void *rs_alloc_sta(void *mvm_rate, struct ieee80211_sta *sta,
diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c
index f212f16502ff..48c1891e3df6 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tx.c
@@ -180,7 +180,8 @@ static void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm,
180 tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE); 180 tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE);
181 return; 181 return;
182 } else if (ieee80211_is_back_req(fc)) { 182 } else if (ieee80211_is_back_req(fc)) {
183 tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE); 183 tx_cmd->tx_flags |=
184 cpu_to_le32(TX_CMD_FLG_ACK | TX_CMD_FLG_BAR);
184 } 185 }
185 186
186 /* HT rate doesn't make sense for a non data frame */ 187 /* HT rate doesn't make sense for a non data frame */
diff --git a/drivers/net/wireless/mwifiex/debugfs.c b/drivers/net/wireless/mwifiex/debugfs.c
index 753b5682d53f..a5f9875cfd6e 100644
--- a/drivers/net/wireless/mwifiex/debugfs.c
+++ b/drivers/net/wireless/mwifiex/debugfs.c
@@ -26,10 +26,17 @@
26static struct dentry *mwifiex_dfs_dir; 26static struct dentry *mwifiex_dfs_dir;
27 27
28static char *bss_modes[] = { 28static char *bss_modes[] = {
29 "Unknown", 29 "UNSPECIFIED",
30 "Ad-hoc", 30 "ADHOC",
31 "Managed", 31 "STATION",
32 "Auto" 32 "AP",
33 "AP_VLAN",
34 "WDS",
35 "MONITOR",
36 "MESH_POINT",
37 "P2P_CLIENT",
38 "P2P_GO",
39 "P2P_DEVICE",
33}; 40};
34 41
35/* size/addr for mwifiex_debug_info */ 42/* size/addr for mwifiex_debug_info */
@@ -200,7 +207,12 @@ mwifiex_info_read(struct file *file, char __user *ubuf,
200 p += sprintf(p, "driver_version = %s", fmt); 207 p += sprintf(p, "driver_version = %s", fmt);
201 p += sprintf(p, "\nverext = %s", priv->version_str); 208 p += sprintf(p, "\nverext = %s", priv->version_str);
202 p += sprintf(p, "\ninterface_name=\"%s\"\n", netdev->name); 209 p += sprintf(p, "\ninterface_name=\"%s\"\n", netdev->name);
203 p += sprintf(p, "bss_mode=\"%s\"\n", bss_modes[info.bss_mode]); 210
211 if (info.bss_mode >= ARRAY_SIZE(bss_modes))
212 p += sprintf(p, "bss_mode=\"%d\"\n", info.bss_mode);
213 else
214 p += sprintf(p, "bss_mode=\"%s\"\n", bss_modes[info.bss_mode]);
215
204 p += sprintf(p, "media_state=\"%s\"\n", 216 p += sprintf(p, "media_state=\"%s\"\n",
205 (!priv->media_connected ? "Disconnected" : "Connected")); 217 (!priv->media_connected ? "Disconnected" : "Connected"));
206 p += sprintf(p, "mac_address=\"%pM\"\n", netdev->dev_addr); 218 p += sprintf(p, "mac_address=\"%pM\"\n", netdev->dev_addr);
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index b52d70c75e1a..72f32e5caa4d 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -3027,19 +3027,26 @@ static void rt2800_config_txpower(struct rt2x00_dev *rt2x00dev,
3027 * TODO: we do not use +6 dBm option to do not increase power beyond 3027 * TODO: we do not use +6 dBm option to do not increase power beyond
3028 * regulatory limit, however this could be utilized for devices with 3028 * regulatory limit, however this could be utilized for devices with
3029 * CAPABILITY_POWER_LIMIT. 3029 * CAPABILITY_POWER_LIMIT.
3030 *
3031 * TODO: add different temperature compensation code for RT3290 & RT5390
3032 * to allow to use BBP_R1 for those chips.
3030 */ 3033 */
3031 rt2800_bbp_read(rt2x00dev, 1, &r1); 3034 if (!rt2x00_rt(rt2x00dev, RT3290) &&
3032 if (delta <= -12) { 3035 !rt2x00_rt(rt2x00dev, RT5390)) {
3033 power_ctrl = 2; 3036 rt2800_bbp_read(rt2x00dev, 1, &r1);
3034 delta += 12; 3037 if (delta <= -12) {
3035 } else if (delta <= -6) { 3038 power_ctrl = 2;
3036 power_ctrl = 1; 3039 delta += 12;
3037 delta += 6; 3040 } else if (delta <= -6) {
3038 } else { 3041 power_ctrl = 1;
3039 power_ctrl = 0; 3042 delta += 6;
3043 } else {
3044 power_ctrl = 0;
3045 }
3046 rt2x00_set_field8(&r1, BBP1_TX_POWER_CTRL, power_ctrl);
3047 rt2800_bbp_write(rt2x00dev, 1, r1);
3040 } 3048 }
3041 rt2x00_set_field8(&r1, BBP1_TX_POWER_CTRL, power_ctrl); 3049
3042 rt2800_bbp_write(rt2x00dev, 1, r1);
3043 offset = TX_PWR_CFG_0; 3050 offset = TX_PWR_CFG_0;
3044 3051
3045 for (i = 0; i < EEPROM_TXPOWER_BYRATE_SIZE; i += 2) { 3052 for (i = 0; i < EEPROM_TXPOWER_BYRATE_SIZE; i += 2) {
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
index 999ffc12578b..c97e9d327331 100644
--- a/drivers/net/wireless/rtlwifi/pci.c
+++ b/drivers/net/wireless/rtlwifi/pci.c
@@ -764,6 +764,7 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
764 "can't alloc skb for rx\n"); 764 "can't alloc skb for rx\n");
765 goto done; 765 goto done;
766 } 766 }
767 kmemleak_not_leak(new_skb);
767 768
768 pci_unmap_single(rtlpci->pdev, 769 pci_unmap_single(rtlpci->pdev,
769 *((dma_addr_t *) skb->cb), 770 *((dma_addr_t *) skb->cb),
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
index 3d0498e69c8c..189ba124a8c6 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
@@ -1973,26 +1973,35 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
1973 } 1973 }
1974} 1974}
1975 1975
1976void rtl92cu_update_hal_rate_table(struct ieee80211_hw *hw, 1976static void rtl92cu_update_hal_rate_table(struct ieee80211_hw *hw,
1977 struct ieee80211_sta *sta, 1977 struct ieee80211_sta *sta)
1978 u8 rssi_level)
1979{ 1978{
1980 struct rtl_priv *rtlpriv = rtl_priv(hw); 1979 struct rtl_priv *rtlpriv = rtl_priv(hw);
1981 struct rtl_phy *rtlphy = &(rtlpriv->phy); 1980 struct rtl_phy *rtlphy = &(rtlpriv->phy);
1982 struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); 1981 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
1983 u32 ratr_value = (u32) mac->basic_rates; 1982 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1984 u8 *mcsrate = mac->mcs; 1983 u32 ratr_value;
1985 u8 ratr_index = 0; 1984 u8 ratr_index = 0;
1986 u8 nmode = mac->ht_enable; 1985 u8 nmode = mac->ht_enable;
1987 u8 mimo_ps = 1; 1986 u8 mimo_ps = IEEE80211_SMPS_OFF;
1988 u16 shortgi_rate = 0; 1987 u16 shortgi_rate;
1989 u32 tmp_ratr_value = 0; 1988 u32 tmp_ratr_value;
1990 u8 curtxbw_40mhz = mac->bw_40; 1989 u8 curtxbw_40mhz = mac->bw_40;
1991 u8 curshortgi_40mhz = mac->sgi_40; 1990 u8 curshortgi_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
1992 u8 curshortgi_20mhz = mac->sgi_20; 1991 1 : 0;
1992 u8 curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
1993 1 : 0;
1993 enum wireless_mode wirelessmode = mac->mode; 1994 enum wireless_mode wirelessmode = mac->mode;
1994 1995
1995 ratr_value |= ((*(u16 *) (mcsrate))) << 12; 1996 if (rtlhal->current_bandtype == BAND_ON_5G)
1997 ratr_value = sta->supp_rates[1] << 4;
1998 else
1999 ratr_value = sta->supp_rates[0];
2000 if (mac->opmode == NL80211_IFTYPE_ADHOC)
2001 ratr_value = 0xfff;
2002
2003 ratr_value |= (sta->ht_cap.mcs.rx_mask[1] << 20 |
2004 sta->ht_cap.mcs.rx_mask[0] << 12);
1996 switch (wirelessmode) { 2005 switch (wirelessmode) {
1997 case WIRELESS_MODE_B: 2006 case WIRELESS_MODE_B:
1998 if (ratr_value & 0x0000000c) 2007 if (ratr_value & 0x0000000c)
@@ -2006,7 +2015,7 @@ void rtl92cu_update_hal_rate_table(struct ieee80211_hw *hw,
2006 case WIRELESS_MODE_N_24G: 2015 case WIRELESS_MODE_N_24G:
2007 case WIRELESS_MODE_N_5G: 2016 case WIRELESS_MODE_N_5G:
2008 nmode = 1; 2017 nmode = 1;
2009 if (mimo_ps == 0) { 2018 if (mimo_ps == IEEE80211_SMPS_STATIC) {
2010 ratr_value &= 0x0007F005; 2019 ratr_value &= 0x0007F005;
2011 } else { 2020 } else {
2012 u32 ratr_mask; 2021 u32 ratr_mask;
@@ -2016,8 +2025,7 @@ void rtl92cu_update_hal_rate_table(struct ieee80211_hw *hw,
2016 ratr_mask = 0x000ff005; 2025 ratr_mask = 0x000ff005;
2017 else 2026 else
2018 ratr_mask = 0x0f0ff005; 2027 ratr_mask = 0x0f0ff005;
2019 if (curtxbw_40mhz) 2028
2020 ratr_mask |= 0x00000010;
2021 ratr_value &= ratr_mask; 2029 ratr_value &= ratr_mask;
2022 } 2030 }
2023 break; 2031 break;
@@ -2026,41 +2034,74 @@ void rtl92cu_update_hal_rate_table(struct ieee80211_hw *hw,
2026 ratr_value &= 0x000ff0ff; 2034 ratr_value &= 0x000ff0ff;
2027 else 2035 else
2028 ratr_value &= 0x0f0ff0ff; 2036 ratr_value &= 0x0f0ff0ff;
2037
2029 break; 2038 break;
2030 } 2039 }
2040
2031 ratr_value &= 0x0FFFFFFF; 2041 ratr_value &= 0x0FFFFFFF;
2032 if (nmode && ((curtxbw_40mhz && curshortgi_40mhz) || 2042
2033 (!curtxbw_40mhz && curshortgi_20mhz))) { 2043 if (nmode && ((curtxbw_40mhz &&
2044 curshortgi_40mhz) || (!curtxbw_40mhz &&
2045 curshortgi_20mhz))) {
2046
2034 ratr_value |= 0x10000000; 2047 ratr_value |= 0x10000000;
2035 tmp_ratr_value = (ratr_value >> 12); 2048 tmp_ratr_value = (ratr_value >> 12);
2049
2036 for (shortgi_rate = 15; shortgi_rate > 0; shortgi_rate--) { 2050 for (shortgi_rate = 15; shortgi_rate > 0; shortgi_rate--) {
2037 if ((1 << shortgi_rate) & tmp_ratr_value) 2051 if ((1 << shortgi_rate) & tmp_ratr_value)
2038 break; 2052 break;
2039 } 2053 }
2054
2040 shortgi_rate = (shortgi_rate << 12) | (shortgi_rate << 8) | 2055 shortgi_rate = (shortgi_rate << 12) | (shortgi_rate << 8) |
2041 (shortgi_rate << 4) | (shortgi_rate); 2056 (shortgi_rate << 4) | (shortgi_rate);
2042 } 2057 }
2058
2043 rtl_write_dword(rtlpriv, REG_ARFR0 + ratr_index * 4, ratr_value); 2059 rtl_write_dword(rtlpriv, REG_ARFR0 + ratr_index * 4, ratr_value);
2060
2061 RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, "%x\n",
2062 rtl_read_dword(rtlpriv, REG_ARFR0));
2044} 2063}
2045 2064
2046void rtl92cu_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level) 2065static void rtl92cu_update_hal_rate_mask(struct ieee80211_hw *hw,
2066 struct ieee80211_sta *sta,
2067 u8 rssi_level)
2047{ 2068{
2048 struct rtl_priv *rtlpriv = rtl_priv(hw); 2069 struct rtl_priv *rtlpriv = rtl_priv(hw);
2049 struct rtl_phy *rtlphy = &(rtlpriv->phy); 2070 struct rtl_phy *rtlphy = &(rtlpriv->phy);
2050 struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); 2071 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
2051 u32 ratr_bitmap = (u32) mac->basic_rates; 2072 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
2052 u8 *p_mcsrate = mac->mcs; 2073 struct rtl_sta_info *sta_entry = NULL;
2053 u8 ratr_index = 0; 2074 u32 ratr_bitmap;
2054 u8 curtxbw_40mhz = mac->bw_40; 2075 u8 ratr_index;
2055 u8 curshortgi_40mhz = mac->sgi_40; 2076 u8 curtxbw_40mhz = (sta->bandwidth >= IEEE80211_STA_RX_BW_40) ? 1 : 0;
2056 u8 curshortgi_20mhz = mac->sgi_20; 2077 u8 curshortgi_40mhz = curtxbw_40mhz &&
2057 enum wireless_mode wirelessmode = mac->mode; 2078 (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
2079 1 : 0;
2080 u8 curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
2081 1 : 0;
2082 enum wireless_mode wirelessmode = 0;
2058 bool shortgi = false; 2083 bool shortgi = false;
2059 u8 rate_mask[5]; 2084 u8 rate_mask[5];
2060 u8 macid = 0; 2085 u8 macid = 0;
2061 u8 mimops = 1; 2086 u8 mimo_ps = IEEE80211_SMPS_OFF;
2062 2087
2063 ratr_bitmap |= (p_mcsrate[1] << 20) | (p_mcsrate[0] << 12); 2088 sta_entry = (struct rtl_sta_info *) sta->drv_priv;
2089 wirelessmode = sta_entry->wireless_mode;
2090 if (mac->opmode == NL80211_IFTYPE_STATION ||
2091 mac->opmode == NL80211_IFTYPE_MESH_POINT)
2092 curtxbw_40mhz = mac->bw_40;
2093 else if (mac->opmode == NL80211_IFTYPE_AP ||
2094 mac->opmode == NL80211_IFTYPE_ADHOC)
2095 macid = sta->aid + 1;
2096
2097 if (rtlhal->current_bandtype == BAND_ON_5G)
2098 ratr_bitmap = sta->supp_rates[1] << 4;
2099 else
2100 ratr_bitmap = sta->supp_rates[0];
2101 if (mac->opmode == NL80211_IFTYPE_ADHOC)
2102 ratr_bitmap = 0xfff;
2103 ratr_bitmap |= (sta->ht_cap.mcs.rx_mask[1] << 20 |
2104 sta->ht_cap.mcs.rx_mask[0] << 12);
2064 switch (wirelessmode) { 2105 switch (wirelessmode) {
2065 case WIRELESS_MODE_B: 2106 case WIRELESS_MODE_B:
2066 ratr_index = RATR_INX_WIRELESS_B; 2107 ratr_index = RATR_INX_WIRELESS_B;
@@ -2071,6 +2112,7 @@ void rtl92cu_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level)
2071 break; 2112 break;
2072 case WIRELESS_MODE_G: 2113 case WIRELESS_MODE_G:
2073 ratr_index = RATR_INX_WIRELESS_GB; 2114 ratr_index = RATR_INX_WIRELESS_GB;
2115
2074 if (rssi_level == 1) 2116 if (rssi_level == 1)
2075 ratr_bitmap &= 0x00000f00; 2117 ratr_bitmap &= 0x00000f00;
2076 else if (rssi_level == 2) 2118 else if (rssi_level == 2)
@@ -2085,7 +2127,8 @@ void rtl92cu_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level)
2085 case WIRELESS_MODE_N_24G: 2127 case WIRELESS_MODE_N_24G:
2086 case WIRELESS_MODE_N_5G: 2128 case WIRELESS_MODE_N_5G:
2087 ratr_index = RATR_INX_WIRELESS_NGB; 2129 ratr_index = RATR_INX_WIRELESS_NGB;
2088 if (mimops == 0) { 2130
2131 if (mimo_ps == IEEE80211_SMPS_STATIC) {
2089 if (rssi_level == 1) 2132 if (rssi_level == 1)
2090 ratr_bitmap &= 0x00070000; 2133 ratr_bitmap &= 0x00070000;
2091 else if (rssi_level == 2) 2134 else if (rssi_level == 2)
@@ -2128,8 +2171,10 @@ void rtl92cu_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level)
2128 } 2171 }
2129 } 2172 }
2130 } 2173 }
2174
2131 if ((curtxbw_40mhz && curshortgi_40mhz) || 2175 if ((curtxbw_40mhz && curshortgi_40mhz) ||
2132 (!curtxbw_40mhz && curshortgi_20mhz)) { 2176 (!curtxbw_40mhz && curshortgi_20mhz)) {
2177
2133 if (macid == 0) 2178 if (macid == 0)
2134 shortgi = true; 2179 shortgi = true;
2135 else if (macid == 1) 2180 else if (macid == 1)
@@ -2138,21 +2183,42 @@ void rtl92cu_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level)
2138 break; 2183 break;
2139 default: 2184 default:
2140 ratr_index = RATR_INX_WIRELESS_NGB; 2185 ratr_index = RATR_INX_WIRELESS_NGB;
2186
2141 if (rtlphy->rf_type == RF_1T2R) 2187 if (rtlphy->rf_type == RF_1T2R)
2142 ratr_bitmap &= 0x000ff0ff; 2188 ratr_bitmap &= 0x000ff0ff;
2143 else 2189 else
2144 ratr_bitmap &= 0x0f0ff0ff; 2190 ratr_bitmap &= 0x0f0ff0ff;
2145 break; 2191 break;
2146 } 2192 }
2147 RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, "ratr_bitmap :%x\n", 2193 sta_entry->ratr_index = ratr_index;
2148 ratr_bitmap); 2194
2149 *(u32 *)&rate_mask = ((ratr_bitmap & 0x0fffffff) | 2195 RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG,
2150 ratr_index << 28); 2196 "ratr_bitmap :%x\n", ratr_bitmap);
2197 *(u32 *)&rate_mask = (ratr_bitmap & 0x0fffffff) |
2198 (ratr_index << 28);
2151 rate_mask[4] = macid | (shortgi ? 0x20 : 0x00) | 0x80; 2199 rate_mask[4] = macid | (shortgi ? 0x20 : 0x00) | 0x80;
2152 RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, 2200 RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG,
2153 "Rate_index:%x, ratr_val:%x, %5phC\n", 2201 "Rate_index:%x, ratr_val:%x, %5phC\n",
2154 ratr_index, ratr_bitmap, rate_mask); 2202 ratr_index, ratr_bitmap, rate_mask);
2155 rtl92c_fill_h2c_cmd(hw, H2C_RA_MASK, 5, rate_mask); 2203 memcpy(rtlpriv->rate_mask, rate_mask, 5);
2204 /* rtl92c_fill_h2c_cmd() does USB I/O and will result in a
2205 * "scheduled while atomic" if called directly */
2206 schedule_work(&rtlpriv->works.fill_h2c_cmd);
2207
2208 if (macid != 0)
2209 sta_entry->ratr_index = ratr_index;
2210}
2211
2212void rtl92cu_update_hal_rate_tbl(struct ieee80211_hw *hw,
2213 struct ieee80211_sta *sta,
2214 u8 rssi_level)
2215{
2216 struct rtl_priv *rtlpriv = rtl_priv(hw);
2217
2218 if (rtlpriv->dm.useramask)
2219 rtl92cu_update_hal_rate_mask(hw, sta, rssi_level);
2220 else
2221 rtl92cu_update_hal_rate_table(hw, sta);
2156} 2222}
2157 2223
2158void rtl92cu_update_channel_access_setting(struct ieee80211_hw *hw) 2224void rtl92cu_update_channel_access_setting(struct ieee80211_hw *hw)
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.h b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.h
index f41a3aa4a26f..8e3ec1e25644 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.h
@@ -98,10 +98,6 @@ void rtl92cu_update_interrupt_mask(struct ieee80211_hw *hw,
98 u32 add_msr, u32 rm_msr); 98 u32 add_msr, u32 rm_msr);
99void rtl92cu_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val); 99void rtl92cu_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
100void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val); 100void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
101void rtl92cu_update_hal_rate_table(struct ieee80211_hw *hw,
102 struct ieee80211_sta *sta,
103 u8 rssi_level);
104void rtl92cu_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level);
105 101
106void rtl92cu_update_channel_access_setting(struct ieee80211_hw *hw); 102void rtl92cu_update_channel_access_setting(struct ieee80211_hw *hw);
107bool rtl92cu_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 * valid); 103bool rtl92cu_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 * valid);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
index 85b6bdb163c0..da4f587199ee 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
@@ -289,14 +289,30 @@ void rtl92c_set_key(struct ieee80211_hw *hw, u32 key_index,
289 macaddr = cam_const_broad; 289 macaddr = cam_const_broad;
290 entry_id = key_index; 290 entry_id = key_index;
291 } else { 291 } else {
292 if (mac->opmode == NL80211_IFTYPE_AP ||
293 mac->opmode == NL80211_IFTYPE_MESH_POINT) {
294 entry_id = rtl_cam_get_free_entry(hw,
295 p_macaddr);
296 if (entry_id >= TOTAL_CAM_ENTRY) {
297 RT_TRACE(rtlpriv, COMP_SEC,
298 DBG_EMERG,
299 "Can not find free hw security cam entry\n");
300 return;
301 }
302 } else {
303 entry_id = CAM_PAIRWISE_KEY_POSITION;
304 }
305
292 key_index = PAIRWISE_KEYIDX; 306 key_index = PAIRWISE_KEYIDX;
293 entry_id = CAM_PAIRWISE_KEY_POSITION;
294 is_pairwise = true; 307 is_pairwise = true;
295 } 308 }
296 } 309 }
297 if (rtlpriv->sec.key_len[key_index] == 0) { 310 if (rtlpriv->sec.key_len[key_index] == 0) {
298 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, 311 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
299 "delete one entry\n"); 312 "delete one entry\n");
313 if (mac->opmode == NL80211_IFTYPE_AP ||
314 mac->opmode == NL80211_IFTYPE_MESH_POINT)
315 rtl_cam_del_entry(hw, p_macaddr);
300 rtl_cam_delete_one_entry(hw, p_macaddr, entry_id); 316 rtl_cam_delete_one_entry(hw, p_macaddr, entry_id);
301 } else { 317 } else {
302 RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD, 318 RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
index 938b1e670b93..826f085c29dd 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
@@ -106,8 +106,7 @@ static struct rtl_hal_ops rtl8192cu_hal_ops = {
106 .update_interrupt_mask = rtl92cu_update_interrupt_mask, 106 .update_interrupt_mask = rtl92cu_update_interrupt_mask,
107 .get_hw_reg = rtl92cu_get_hw_reg, 107 .get_hw_reg = rtl92cu_get_hw_reg,
108 .set_hw_reg = rtl92cu_set_hw_reg, 108 .set_hw_reg = rtl92cu_set_hw_reg,
109 .update_rate_tbl = rtl92cu_update_hal_rate_table, 109 .update_rate_tbl = rtl92cu_update_hal_rate_tbl,
110 .update_rate_mask = rtl92cu_update_hal_rate_mask,
111 .fill_tx_desc = rtl92cu_tx_fill_desc, 110 .fill_tx_desc = rtl92cu_tx_fill_desc,
112 .fill_fake_txdesc = rtl92cu_fill_fake_txdesc, 111 .fill_fake_txdesc = rtl92cu_fill_fake_txdesc,
113 .fill_tx_cmddesc = rtl92cu_tx_fill_cmddesc, 112 .fill_tx_cmddesc = rtl92cu_tx_fill_cmddesc,
@@ -137,6 +136,7 @@ static struct rtl_hal_ops rtl8192cu_hal_ops = {
137 .phy_lc_calibrate = _rtl92cu_phy_lc_calibrate, 136 .phy_lc_calibrate = _rtl92cu_phy_lc_calibrate,
138 .phy_set_bw_mode_callback = rtl92cu_phy_set_bw_mode_callback, 137 .phy_set_bw_mode_callback = rtl92cu_phy_set_bw_mode_callback,
139 .dm_dynamic_txpower = rtl92cu_dm_dynamic_txpower, 138 .dm_dynamic_txpower = rtl92cu_dm_dynamic_txpower,
139 .fill_h2c_cmd = rtl92c_fill_h2c_cmd,
140}; 140};
141 141
142static struct rtl_mod_params rtl92cu_mod_params = { 142static struct rtl_mod_params rtl92cu_mod_params = {
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.h b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.h
index a1310abd0d54..262e1e4c6e5b 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.h
@@ -49,5 +49,8 @@ bool rtl92cu_phy_set_rf_power_state(struct ieee80211_hw *hw,
49u32 rtl92cu_phy_query_rf_reg(struct ieee80211_hw *hw, 49u32 rtl92cu_phy_query_rf_reg(struct ieee80211_hw *hw,
50 enum radio_path rfpath, u32 regaddr, u32 bitmask); 50 enum radio_path rfpath, u32 regaddr, u32 bitmask);
51void rtl92cu_phy_set_bw_mode_callback(struct ieee80211_hw *hw); 51void rtl92cu_phy_set_bw_mode_callback(struct ieee80211_hw *hw);
52void rtl92cu_update_hal_rate_tbl(struct ieee80211_hw *hw,
53 struct ieee80211_sta *sta,
54 u8 rssi_level);
52 55
53#endif 56#endif
diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c
index 76732b0cd221..a3532e077871 100644
--- a/drivers/net/wireless/rtlwifi/usb.c
+++ b/drivers/net/wireless/rtlwifi/usb.c
@@ -824,6 +824,7 @@ static void rtl_usb_stop(struct ieee80211_hw *hw)
824 824
825 /* should after adapter start and interrupt enable. */ 825 /* should after adapter start and interrupt enable. */
826 set_hal_stop(rtlhal); 826 set_hal_stop(rtlhal);
827 cancel_work_sync(&rtlpriv->works.fill_h2c_cmd);
827 /* Enable software */ 828 /* Enable software */
828 SET_USB_STOP(rtlusb); 829 SET_USB_STOP(rtlusb);
829 rtl_usb_deinit(hw); 830 rtl_usb_deinit(hw);
@@ -1026,6 +1027,16 @@ static bool rtl_usb_tx_chk_waitq_insert(struct ieee80211_hw *hw,
1026 return false; 1027 return false;
1027} 1028}
1028 1029
1030static void rtl_fill_h2c_cmd_work_callback(struct work_struct *work)
1031{
1032 struct rtl_works *rtlworks =
1033 container_of(work, struct rtl_works, fill_h2c_cmd);
1034 struct ieee80211_hw *hw = rtlworks->hw;
1035 struct rtl_priv *rtlpriv = rtl_priv(hw);
1036
1037 rtlpriv->cfg->ops->fill_h2c_cmd(hw, H2C_RA_MASK, 5, rtlpriv->rate_mask);
1038}
1039
1029static struct rtl_intf_ops rtl_usb_ops = { 1040static struct rtl_intf_ops rtl_usb_ops = {
1030 .adapter_start = rtl_usb_start, 1041 .adapter_start = rtl_usb_start,
1031 .adapter_stop = rtl_usb_stop, 1042 .adapter_stop = rtl_usb_stop,
@@ -1057,6 +1068,8 @@ int rtl_usb_probe(struct usb_interface *intf,
1057 1068
1058 /* this spin lock must be initialized early */ 1069 /* this spin lock must be initialized early */
1059 spin_lock_init(&rtlpriv->locks.usb_lock); 1070 spin_lock_init(&rtlpriv->locks.usb_lock);
1071 INIT_WORK(&rtlpriv->works.fill_h2c_cmd,
1072 rtl_fill_h2c_cmd_work_callback);
1060 1073
1061 rtlpriv->usb_data_index = 0; 1074 rtlpriv->usb_data_index = 0;
1062 init_completion(&rtlpriv->firmware_loading_complete); 1075 init_completion(&rtlpriv->firmware_loading_complete);
diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h
index 44328baa6389..cc03e7c87cbe 100644
--- a/drivers/net/wireless/rtlwifi/wifi.h
+++ b/drivers/net/wireless/rtlwifi/wifi.h
@@ -1736,6 +1736,8 @@ struct rtl_hal_ops {
1736 void (*bt_wifi_media_status_notify) (struct ieee80211_hw *hw, 1736 void (*bt_wifi_media_status_notify) (struct ieee80211_hw *hw,
1737 bool mstate); 1737 bool mstate);
1738 void (*bt_coex_off_before_lps) (struct ieee80211_hw *hw); 1738 void (*bt_coex_off_before_lps) (struct ieee80211_hw *hw);
1739 void (*fill_h2c_cmd) (struct ieee80211_hw *hw, u8 element_id,
1740 u32 cmd_len, u8 *p_cmdbuffer);
1739}; 1741};
1740 1742
1741struct rtl_intf_ops { 1743struct rtl_intf_ops {
@@ -1869,6 +1871,7 @@ struct rtl_works {
1869 struct delayed_work fwevt_wq; 1871 struct delayed_work fwevt_wq;
1870 1872
1871 struct work_struct lps_change_work; 1873 struct work_struct lps_change_work;
1874 struct work_struct fill_h2c_cmd;
1872}; 1875};
1873 1876
1874struct rtl_debug { 1877struct rtl_debug {
@@ -2048,6 +2051,7 @@ struct rtl_priv {
2048 }; 2051 };
2049 }; 2052 };
2050 bool enter_ps; /* true when entering PS */ 2053 bool enter_ps; /* true when entering PS */
2054 u8 rate_mask[5];
2051 2055
2052 /*This must be the last item so 2056 /*This must be the last item so
2053 that it points to the data allocated 2057 that it points to the data allocated
diff --git a/drivers/net/wireless/ti/wl12xx/scan.c b/drivers/net/wireless/ti/wl12xx/scan.c
index affdb3ec6225..4a0bbb13806b 100644
--- a/drivers/net/wireless/ti/wl12xx/scan.c
+++ b/drivers/net/wireless/ti/wl12xx/scan.c
@@ -310,7 +310,7 @@ static void wl12xx_adjust_channels(struct wl1271_cmd_sched_scan_config *cmd,
310 memcpy(cmd->channels_2, cmd_channels->channels_2, 310 memcpy(cmd->channels_2, cmd_channels->channels_2,
311 sizeof(cmd->channels_2)); 311 sizeof(cmd->channels_2));
312 memcpy(cmd->channels_5, cmd_channels->channels_5, 312 memcpy(cmd->channels_5, cmd_channels->channels_5,
313 sizeof(cmd->channels_2)); 313 sizeof(cmd->channels_5));
314 /* channels_4 are not supported, so no need to copy them */ 314 /* channels_4 are not supported, so no need to copy them */
315} 315}
316 316
diff --git a/drivers/net/wireless/ti/wl12xx/wl12xx.h b/drivers/net/wireless/ti/wl12xx/wl12xx.h
index 222d03540200..9e5484a73667 100644
--- a/drivers/net/wireless/ti/wl12xx/wl12xx.h
+++ b/drivers/net/wireless/ti/wl12xx/wl12xx.h
@@ -36,12 +36,12 @@
36#define WL127X_IFTYPE_SR_VER 3 36#define WL127X_IFTYPE_SR_VER 3
37#define WL127X_MAJOR_SR_VER 10 37#define WL127X_MAJOR_SR_VER 10
38#define WL127X_SUBTYPE_SR_VER WLCORE_FW_VER_IGNORE 38#define WL127X_SUBTYPE_SR_VER WLCORE_FW_VER_IGNORE
39#define WL127X_MINOR_SR_VER 115 39#define WL127X_MINOR_SR_VER 133
40/* minimum multi-role FW version for wl127x */ 40/* minimum multi-role FW version for wl127x */
41#define WL127X_IFTYPE_MR_VER 5 41#define WL127X_IFTYPE_MR_VER 5
42#define WL127X_MAJOR_MR_VER 7 42#define WL127X_MAJOR_MR_VER 7
43#define WL127X_SUBTYPE_MR_VER WLCORE_FW_VER_IGNORE 43#define WL127X_SUBTYPE_MR_VER WLCORE_FW_VER_IGNORE
44#define WL127X_MINOR_MR_VER 115 44#define WL127X_MINOR_MR_VER 42
45 45
46/* FW chip version for wl128x */ 46/* FW chip version for wl128x */
47#define WL128X_CHIP_VER 7 47#define WL128X_CHIP_VER 7
@@ -49,7 +49,7 @@
49#define WL128X_IFTYPE_SR_VER 3 49#define WL128X_IFTYPE_SR_VER 3
50#define WL128X_MAJOR_SR_VER 10 50#define WL128X_MAJOR_SR_VER 10
51#define WL128X_SUBTYPE_SR_VER WLCORE_FW_VER_IGNORE 51#define WL128X_SUBTYPE_SR_VER WLCORE_FW_VER_IGNORE
52#define WL128X_MINOR_SR_VER 115 52#define WL128X_MINOR_SR_VER 133
53/* minimum multi-role FW version for wl128x */ 53/* minimum multi-role FW version for wl128x */
54#define WL128X_IFTYPE_MR_VER 5 54#define WL128X_IFTYPE_MR_VER 5
55#define WL128X_MAJOR_MR_VER 7 55#define WL128X_MAJOR_MR_VER 7
diff --git a/drivers/net/wireless/ti/wl18xx/scan.c b/drivers/net/wireless/ti/wl18xx/scan.c
index 09d944505ac0..2b642f8c9266 100644
--- a/drivers/net/wireless/ti/wl18xx/scan.c
+++ b/drivers/net/wireless/ti/wl18xx/scan.c
@@ -34,7 +34,7 @@ static void wl18xx_adjust_channels(struct wl18xx_cmd_scan_params *cmd,
34 memcpy(cmd->channels_2, cmd_channels->channels_2, 34 memcpy(cmd->channels_2, cmd_channels->channels_2,
35 sizeof(cmd->channels_2)); 35 sizeof(cmd->channels_2));
36 memcpy(cmd->channels_5, cmd_channels->channels_5, 36 memcpy(cmd->channels_5, cmd_channels->channels_5,
37 sizeof(cmd->channels_2)); 37 sizeof(cmd->channels_5));
38 /* channels_4 are not supported, so no need to copy them */ 38 /* channels_4 are not supported, so no need to copy them */
39} 39}
40 40
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 37984e6d4e99..8c20935d72c9 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -662,7 +662,7 @@ static void xen_netbk_rx_action(struct xen_netbk *netbk)
662{ 662{
663 struct xenvif *vif = NULL, *tmp; 663 struct xenvif *vif = NULL, *tmp;
664 s8 status; 664 s8 status;
665 u16 irq, flags; 665 u16 flags;
666 struct xen_netif_rx_response *resp; 666 struct xen_netif_rx_response *resp;
667 struct sk_buff_head rxq; 667 struct sk_buff_head rxq;
668 struct sk_buff *skb; 668 struct sk_buff *skb;
@@ -771,13 +771,13 @@ static void xen_netbk_rx_action(struct xen_netbk *netbk)
771 sco->meta_slots_used); 771 sco->meta_slots_used);
772 772
773 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->rx, ret); 773 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->rx, ret);
774 irq = vif->irq;
775 if (ret && list_empty(&vif->notify_list))
776 list_add_tail(&vif->notify_list, &notify);
777 774
778 xenvif_notify_tx_completion(vif); 775 xenvif_notify_tx_completion(vif);
779 776
780 xenvif_put(vif); 777 if (ret && list_empty(&vif->notify_list))
778 list_add_tail(&vif->notify_list, &notify);
779 else
780 xenvif_put(vif);
781 npo.meta_cons += sco->meta_slots_used; 781 npo.meta_cons += sco->meta_slots_used;
782 dev_kfree_skb(skb); 782 dev_kfree_skb(skb);
783 } 783 }
@@ -785,6 +785,7 @@ static void xen_netbk_rx_action(struct xen_netbk *netbk)
785 list_for_each_entry_safe(vif, tmp, &notify, notify_list) { 785 list_for_each_entry_safe(vif, tmp, &notify, notify_list) {
786 notify_remote_via_irq(vif->irq); 786 notify_remote_via_irq(vif->irq);
787 list_del_init(&vif->notify_list); 787 list_del_init(&vif->notify_list);
788 xenvif_put(vif);
788 } 789 }
789 790
790 /* More work to do? */ 791 /* More work to do? */
diff --git a/drivers/nfc/Kconfig b/drivers/nfc/Kconfig
index 4775d4e61b88..74a852e4e41f 100644
--- a/drivers/nfc/Kconfig
+++ b/drivers/nfc/Kconfig
@@ -28,7 +28,7 @@ config NFC_WILINK
28 28
29config NFC_MEI_PHY 29config NFC_MEI_PHY
30 tristate "MEI bus NFC device support" 30 tristate "MEI bus NFC device support"
31 depends on INTEL_MEI_BUS_NFC && NFC_HCI 31 depends on INTEL_MEI && NFC_HCI
32 help 32 help
33 This adds support to use an mei bus nfc device. Select this if you 33 This adds support to use an mei bus nfc device. Select this if you
34 will use an HCI NFC driver for an NFC chip connected behind an 34 will use an HCI NFC driver for an NFC chip connected behind an
diff --git a/drivers/nfc/mei_phy.c b/drivers/nfc/mei_phy.c
index b8f8abc422f0..1201bdbfb791 100644
--- a/drivers/nfc/mei_phy.c
+++ b/drivers/nfc/mei_phy.c
@@ -64,6 +64,15 @@ int nfc_mei_phy_enable(void *phy_id)
64 return r; 64 return r;
65 } 65 }
66 66
67 r = mei_cl_register_event_cb(phy->device, nfc_mei_event_cb, phy);
68 if (r) {
69 pr_err("MEY_PHY: Event cb registration failed\n");
70 mei_cl_disable_device(phy->device);
71 phy->powered = 0;
72
73 return r;
74 }
75
67 phy->powered = 1; 76 phy->powered = 1;
68 77
69 return 0; 78 return 0;
diff --git a/drivers/nfc/microread/mei.c b/drivers/nfc/microread/mei.c
index 1ad044dce7b6..cdf1bc53b257 100644
--- a/drivers/nfc/microread/mei.c
+++ b/drivers/nfc/microread/mei.c
@@ -43,24 +43,16 @@ static int microread_mei_probe(struct mei_cl_device *device,
43 return -ENOMEM; 43 return -ENOMEM;
44 } 44 }
45 45
46 r = mei_cl_register_event_cb(device, nfc_mei_event_cb, phy);
47 if (r) {
48 pr_err(MICROREAD_DRIVER_NAME ": event cb registration failed\n");
49 goto err_out;
50 }
51
52 r = microread_probe(phy, &mei_phy_ops, LLC_NOP_NAME, 46 r = microread_probe(phy, &mei_phy_ops, LLC_NOP_NAME,
53 MEI_NFC_HEADER_SIZE, 0, MEI_NFC_MAX_HCI_PAYLOAD, 47 MEI_NFC_HEADER_SIZE, 0, MEI_NFC_MAX_HCI_PAYLOAD,
54 &phy->hdev); 48 &phy->hdev);
55 if (r < 0) 49 if (r < 0) {
56 goto err_out; 50 nfc_mei_phy_free(phy);
57
58 return 0;
59 51
60err_out: 52 return r;
61 nfc_mei_phy_free(phy); 53 }
62 54
63 return r; 55 return 0;
64} 56}
65 57
66static int microread_mei_remove(struct mei_cl_device *device) 58static int microread_mei_remove(struct mei_cl_device *device)
@@ -71,8 +63,6 @@ static int microread_mei_remove(struct mei_cl_device *device)
71 63
72 microread_remove(phy->hdev); 64 microread_remove(phy->hdev);
73 65
74 nfc_mei_phy_disable(phy);
75
76 nfc_mei_phy_free(phy); 66 nfc_mei_phy_free(phy);
77 67
78 return 0; 68 return 0;
diff --git a/drivers/nfc/pn544/mei.c b/drivers/nfc/pn544/mei.c
index 1eb48848a35a..b5d3d18179eb 100644
--- a/drivers/nfc/pn544/mei.c
+++ b/drivers/nfc/pn544/mei.c
@@ -43,24 +43,16 @@ static int pn544_mei_probe(struct mei_cl_device *device,
43 return -ENOMEM; 43 return -ENOMEM;
44 } 44 }
45 45
46 r = mei_cl_register_event_cb(device, nfc_mei_event_cb, phy);
47 if (r) {
48 pr_err(PN544_DRIVER_NAME ": event cb registration failed\n");
49 goto err_out;
50 }
51
52 r = pn544_hci_probe(phy, &mei_phy_ops, LLC_NOP_NAME, 46 r = pn544_hci_probe(phy, &mei_phy_ops, LLC_NOP_NAME,
53 MEI_NFC_HEADER_SIZE, 0, MEI_NFC_MAX_HCI_PAYLOAD, 47 MEI_NFC_HEADER_SIZE, 0, MEI_NFC_MAX_HCI_PAYLOAD,
54 &phy->hdev); 48 &phy->hdev);
55 if (r < 0) 49 if (r < 0) {
56 goto err_out; 50 nfc_mei_phy_free(phy);
57
58 return 0;
59 51
60err_out: 52 return r;
61 nfc_mei_phy_free(phy); 53 }
62 54
63 return r; 55 return 0;
64} 56}
65 57
66static int pn544_mei_remove(struct mei_cl_device *device) 58static int pn544_mei_remove(struct mei_cl_device *device)
@@ -71,8 +63,6 @@ static int pn544_mei_remove(struct mei_cl_device *device)
71 63
72 pn544_hci_remove(phy->hdev); 64 pn544_hci_remove(phy->hdev);
73 65
74 nfc_mei_phy_disable(phy);
75
76 nfc_mei_phy_free(phy); 66 nfc_mei_phy_free(phy);
77 67
78 return 0; 68 return 0;
diff --git a/drivers/of/base.c b/drivers/of/base.c
index f53b992f060a..a6f584a7f4a1 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -192,14 +192,15 @@ EXPORT_SYMBOL(of_find_property);
192struct device_node *of_find_all_nodes(struct device_node *prev) 192struct device_node *of_find_all_nodes(struct device_node *prev)
193{ 193{
194 struct device_node *np; 194 struct device_node *np;
195 unsigned long flags;
195 196
196 raw_spin_lock(&devtree_lock); 197 raw_spin_lock_irqsave(&devtree_lock, flags);
197 np = prev ? prev->allnext : of_allnodes; 198 np = prev ? prev->allnext : of_allnodes;
198 for (; np != NULL; np = np->allnext) 199 for (; np != NULL; np = np->allnext)
199 if (of_node_get(np)) 200 if (of_node_get(np))
200 break; 201 break;
201 of_node_put(prev); 202 of_node_put(prev);
202 raw_spin_unlock(&devtree_lock); 203 raw_spin_unlock_irqrestore(&devtree_lock, flags);
203 return np; 204 return np;
204} 205}
205EXPORT_SYMBOL(of_find_all_nodes); 206EXPORT_SYMBOL(of_find_all_nodes);
@@ -421,8 +422,9 @@ struct device_node *of_get_next_available_child(const struct device_node *node,
421 struct device_node *prev) 422 struct device_node *prev)
422{ 423{
423 struct device_node *next; 424 struct device_node *next;
425 unsigned long flags;
424 426
425 raw_spin_lock(&devtree_lock); 427 raw_spin_lock_irqsave(&devtree_lock, flags);
426 next = prev ? prev->sibling : node->child; 428 next = prev ? prev->sibling : node->child;
427 for (; next; next = next->sibling) { 429 for (; next; next = next->sibling) {
428 if (!__of_device_is_available(next)) 430 if (!__of_device_is_available(next))
@@ -431,7 +433,7 @@ struct device_node *of_get_next_available_child(const struct device_node *node,
431 break; 433 break;
432 } 434 }
433 of_node_put(prev); 435 of_node_put(prev);
434 raw_spin_unlock(&devtree_lock); 436 raw_spin_unlock_irqrestore(&devtree_lock, flags);
435 return next; 437 return next;
436} 438}
437EXPORT_SYMBOL(of_get_next_available_child); 439EXPORT_SYMBOL(of_get_next_available_child);
@@ -735,13 +737,14 @@ EXPORT_SYMBOL_GPL(of_modalias_node);
735struct device_node *of_find_node_by_phandle(phandle handle) 737struct device_node *of_find_node_by_phandle(phandle handle)
736{ 738{
737 struct device_node *np; 739 struct device_node *np;
740 unsigned long flags;
738 741
739 raw_spin_lock(&devtree_lock); 742 raw_spin_lock_irqsave(&devtree_lock, flags);
740 for (np = of_allnodes; np; np = np->allnext) 743 for (np = of_allnodes; np; np = np->allnext)
741 if (np->phandle == handle) 744 if (np->phandle == handle)
742 break; 745 break;
743 of_node_get(np); 746 of_node_get(np);
744 raw_spin_unlock(&devtree_lock); 747 raw_spin_unlock_irqrestore(&devtree_lock, flags);
745 return np; 748 return np;
746} 749}
747EXPORT_SYMBOL(of_find_node_by_phandle); 750EXPORT_SYMBOL(of_find_node_by_phandle);
diff --git a/drivers/parisc/iosapic.c b/drivers/parisc/iosapic.c
index 9544cdc0d1af..e79e006eb9ab 100644
--- a/drivers/parisc/iosapic.c
+++ b/drivers/parisc/iosapic.c
@@ -811,6 +811,70 @@ int iosapic_fixup_irq(void *isi_obj, struct pci_dev *pcidev)
811 return pcidev->irq; 811 return pcidev->irq;
812} 812}
813 813
814static struct iosapic_info *first_isi = NULL;
815
816#ifdef CONFIG_64BIT
817int iosapic_serial_irq(int num)
818{
819 struct iosapic_info *isi = first_isi;
820 struct irt_entry *irte = NULL; /* only used if PAT PDC */
821 struct vector_info *vi;
822 int isi_line; /* line used by device */
823
824 /* lookup IRT entry for isi/slot/pin set */
825 irte = &irt_cell[num];
826
827 DBG_IRT("iosapic_serial_irq(): irte %p %x %x %x %x %x %x %x %x\n",
828 irte,
829 irte->entry_type,
830 irte->entry_length,
831 irte->polarity_trigger,
832 irte->src_bus_irq_devno,
833 irte->src_bus_id,
834 irte->src_seg_id,
835 irte->dest_iosapic_intin,
836 (u32) irte->dest_iosapic_addr);
837 isi_line = irte->dest_iosapic_intin;
838
839 /* get vector info for this input line */
840 vi = isi->isi_vector + isi_line;
841 DBG_IRT("iosapic_serial_irq: line %d vi 0x%p\n", isi_line, vi);
842
843 /* If this IRQ line has already been setup, skip it */
844 if (vi->irte)
845 goto out;
846
847 vi->irte = irte;
848
849 /*
850 * Allocate processor IRQ
851 *
852 * XXX/FIXME The txn_alloc_irq() code and related code should be
853 * moved to enable_irq(). That way we only allocate processor IRQ
854 * bits for devices that actually have drivers claiming them.
855 * Right now we assign an IRQ to every PCI device present,
856 * regardless of whether it's used or not.
857 */
858 vi->txn_irq = txn_alloc_irq(8);
859
860 if (vi->txn_irq < 0)
861 panic("I/O sapic: couldn't get TXN IRQ\n");
862
863 /* enable_irq() will use txn_* to program IRdT */
864 vi->txn_addr = txn_alloc_addr(vi->txn_irq);
865 vi->txn_data = txn_alloc_data(vi->txn_irq);
866
867 vi->eoi_addr = isi->addr + IOSAPIC_REG_EOI;
868 vi->eoi_data = cpu_to_le32(vi->txn_data);
869
870 cpu_claim_irq(vi->txn_irq, &iosapic_interrupt_type, vi);
871
872 out:
873
874 return vi->txn_irq;
875}
876#endif
877
814 878
815/* 879/*
816** squirrel away the I/O Sapic Version 880** squirrel away the I/O Sapic Version
@@ -877,6 +941,8 @@ void *iosapic_register(unsigned long hpa)
877 vip->irqline = (unsigned char) cnt; 941 vip->irqline = (unsigned char) cnt;
878 vip->iosapic = isi; 942 vip->iosapic = isi;
879 } 943 }
944 if (!first_isi)
945 first_isi = isi;
880 return isi; 946 return isi;
881} 947}
882 948
diff --git a/drivers/parisc/lba_pci.c b/drivers/parisc/lba_pci.c
index 2ef7103270bb..1f05913ae677 100644
--- a/drivers/parisc/lba_pci.c
+++ b/drivers/parisc/lba_pci.c
@@ -668,7 +668,7 @@ lba_fixup_bus(struct pci_bus *bus)
668 BUG(); 668 BUG();
669 } 669 }
670 670
671 if (ldev->hba.elmmio_space.start) { 671 if (ldev->hba.elmmio_space.flags) {
672 err = request_resource(&iomem_resource, 672 err = request_resource(&iomem_resource,
673 &(ldev->hba.elmmio_space)); 673 &(ldev->hba.elmmio_space));
674 if (err < 0) { 674 if (err < 0) {
@@ -993,7 +993,7 @@ lba_pat_resources(struct parisc_device *pa_dev, struct lba_device *lba_dev)
993 993
994 case PAT_LMMIO: 994 case PAT_LMMIO:
995 /* used to fix up pre-initialized MEM BARs */ 995 /* used to fix up pre-initialized MEM BARs */
996 if (!lba_dev->hba.lmmio_space.start) { 996 if (!lba_dev->hba.lmmio_space.flags) {
997 sprintf(lba_dev->hba.lmmio_name, 997 sprintf(lba_dev->hba.lmmio_name,
998 "PCI%02x LMMIO", 998 "PCI%02x LMMIO",
999 (int)lba_dev->hba.bus_num.start); 999 (int)lba_dev->hba.bus_num.start);
@@ -1001,7 +1001,7 @@ lba_pat_resources(struct parisc_device *pa_dev, struct lba_device *lba_dev)
1001 io->start; 1001 io->start;
1002 r = &lba_dev->hba.lmmio_space; 1002 r = &lba_dev->hba.lmmio_space;
1003 r->name = lba_dev->hba.lmmio_name; 1003 r->name = lba_dev->hba.lmmio_name;
1004 } else if (!lba_dev->hba.elmmio_space.start) { 1004 } else if (!lba_dev->hba.elmmio_space.flags) {
1005 sprintf(lba_dev->hba.elmmio_name, 1005 sprintf(lba_dev->hba.elmmio_name,
1006 "PCI%02x ELMMIO", 1006 "PCI%02x ELMMIO",
1007 (int)lba_dev->hba.bus_num.start); 1007 (int)lba_dev->hba.bus_num.start);
@@ -1096,6 +1096,7 @@ lba_legacy_resources(struct parisc_device *pa_dev, struct lba_device *lba_dev)
1096 r->name = "LBA PCI Busses"; 1096 r->name = "LBA PCI Busses";
1097 r->start = lba_num & 0xff; 1097 r->start = lba_num & 0xff;
1098 r->end = (lba_num>>8) & 0xff; 1098 r->end = (lba_num>>8) & 0xff;
1099 r->flags = IORESOURCE_BUS;
1099 1100
1100 /* Set up local PCI Bus resources - we don't need them for 1101 /* Set up local PCI Bus resources - we don't need them for
1101 ** Legacy boxes but it's nice to see in /proc/iomem. 1102 ** Legacy boxes but it's nice to see in /proc/iomem.
@@ -1494,7 +1495,7 @@ lba_driver_probe(struct parisc_device *dev)
1494 1495
1495 pci_add_resource_offset(&resources, &lba_dev->hba.io_space, 1496 pci_add_resource_offset(&resources, &lba_dev->hba.io_space,
1496 HBA_PORT_BASE(lba_dev->hba.hba_num)); 1497 HBA_PORT_BASE(lba_dev->hba.hba_num));
1497 if (lba_dev->hba.elmmio_space.start) 1498 if (lba_dev->hba.elmmio_space.flags)
1498 pci_add_resource_offset(&resources, &lba_dev->hba.elmmio_space, 1499 pci_add_resource_offset(&resources, &lba_dev->hba.elmmio_space,
1499 lba_dev->hba.lmmio_space_offset); 1500 lba_dev->hba.lmmio_space_offset);
1500 if (lba_dev->hba.lmmio_space.flags) 1501 if (lba_dev->hba.lmmio_space.flags)
diff --git a/drivers/parport/Kconfig b/drivers/parport/Kconfig
index 24e12d4d1769..a50576081b34 100644
--- a/drivers/parport/Kconfig
+++ b/drivers/parport/Kconfig
@@ -71,7 +71,7 @@ config PARPORT_PC_FIFO
71 71
72config PARPORT_PC_SUPERIO 72config PARPORT_PC_SUPERIO
73 bool "SuperIO chipset support" 73 bool "SuperIO chipset support"
74 depends on PARPORT_PC 74 depends on PARPORT_PC && !PARISC
75 help 75 help
76 Saying Y here enables some probes for Super-IO chipsets in order to 76 Saying Y here enables some probes for Super-IO chipsets in order to
77 find out things like base addresses, IRQ lines and DMA channels. It 77 find out things like base addresses, IRQ lines and DMA channels. It
diff --git a/drivers/parport/parport_gsc.c b/drivers/parport/parport_gsc.c
index a5251cb5fb0c..6e3a60c78873 100644
--- a/drivers/parport/parport_gsc.c
+++ b/drivers/parport/parport_gsc.c
@@ -234,7 +234,7 @@ static int parport_PS2_supported(struct parport *pb)
234 234
235struct parport *parport_gsc_probe_port(unsigned long base, 235struct parport *parport_gsc_probe_port(unsigned long base,
236 unsigned long base_hi, int irq, 236 unsigned long base_hi, int irq,
237 int dma, struct pci_dev *dev) 237 int dma, struct parisc_device *padev)
238{ 238{
239 struct parport_gsc_private *priv; 239 struct parport_gsc_private *priv;
240 struct parport_operations *ops; 240 struct parport_operations *ops;
@@ -258,7 +258,6 @@ struct parport *parport_gsc_probe_port(unsigned long base,
258 priv->ctr_writable = 0xff; 258 priv->ctr_writable = 0xff;
259 priv->dma_buf = 0; 259 priv->dma_buf = 0;
260 priv->dma_handle = 0; 260 priv->dma_handle = 0;
261 priv->dev = dev;
262 p->base = base; 261 p->base = base;
263 p->base_hi = base_hi; 262 p->base_hi = base_hi;
264 p->irq = irq; 263 p->irq = irq;
@@ -282,6 +281,7 @@ struct parport *parport_gsc_probe_port(unsigned long base,
282 return NULL; 281 return NULL;
283 } 282 }
284 283
284 p->dev = &padev->dev;
285 p->base_hi = base_hi; 285 p->base_hi = base_hi;
286 p->modes = tmp.modes; 286 p->modes = tmp.modes;
287 p->size = (p->modes & PARPORT_MODE_EPP)?8:3; 287 p->size = (p->modes & PARPORT_MODE_EPP)?8:3;
@@ -373,7 +373,7 @@ static int parport_init_chip(struct parisc_device *dev)
373 } 373 }
374 374
375 p = parport_gsc_probe_port(port, 0, dev->irq, 375 p = parport_gsc_probe_port(port, 0, dev->irq,
376 /* PARPORT_IRQ_NONE */ PARPORT_DMA_NONE, NULL); 376 /* PARPORT_IRQ_NONE */ PARPORT_DMA_NONE, dev);
377 if (p) 377 if (p)
378 parport_count++; 378 parport_count++;
379 dev_set_drvdata(&dev->dev, p); 379 dev_set_drvdata(&dev->dev, p);
diff --git a/drivers/parport/parport_gsc.h b/drivers/parport/parport_gsc.h
index fc9c37c54022..812214768d27 100644
--- a/drivers/parport/parport_gsc.h
+++ b/drivers/parport/parport_gsc.h
@@ -217,6 +217,6 @@ extern void parport_gsc_dec_use_count(void);
217extern struct parport *parport_gsc_probe_port(unsigned long base, 217extern struct parport *parport_gsc_probe_port(unsigned long base,
218 unsigned long base_hi, 218 unsigned long base_hi,
219 int irq, int dma, 219 int irq, int dma,
220 struct pci_dev *dev); 220 struct parisc_device *padev);
221 221
222#endif /* __DRIVERS_PARPORT_PARPORT_GSC_H */ 222#endif /* __DRIVERS_PARPORT_PARPORT_GSC_H */
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index 716aa93fff76..59df8575a48c 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -61,6 +61,7 @@ static DEFINE_MUTEX(bridge_mutex);
61static void handle_hotplug_event_bridge (acpi_handle, u32, void *); 61static void handle_hotplug_event_bridge (acpi_handle, u32, void *);
62static void acpiphp_sanitize_bus(struct pci_bus *bus); 62static void acpiphp_sanitize_bus(struct pci_bus *bus);
63static void acpiphp_set_hpp_values(struct pci_bus *bus); 63static void acpiphp_set_hpp_values(struct pci_bus *bus);
64static void hotplug_event_func(acpi_handle handle, u32 type, void *context);
64static void handle_hotplug_event_func(acpi_handle handle, u32 type, void *context); 65static void handle_hotplug_event_func(acpi_handle handle, u32 type, void *context);
65static void free_bridge(struct kref *kref); 66static void free_bridge(struct kref *kref);
66 67
@@ -147,7 +148,7 @@ static int post_dock_fixups(struct notifier_block *nb, unsigned long val,
147 148
148 149
149static const struct acpi_dock_ops acpiphp_dock_ops = { 150static const struct acpi_dock_ops acpiphp_dock_ops = {
150 .handler = handle_hotplug_event_func, 151 .handler = hotplug_event_func,
151}; 152};
152 153
153/* Check whether the PCI device is managed by native PCIe hotplug driver */ 154/* Check whether the PCI device is managed by native PCIe hotplug driver */
@@ -179,6 +180,20 @@ static bool device_is_managed_by_native_pciehp(struct pci_dev *pdev)
179 return true; 180 return true;
180} 181}
181 182
183static void acpiphp_dock_init(void *data)
184{
185 struct acpiphp_func *func = data;
186
187 get_bridge(func->slot->bridge);
188}
189
190static void acpiphp_dock_release(void *data)
191{
192 struct acpiphp_func *func = data;
193
194 put_bridge(func->slot->bridge);
195}
196
182/* callback routine to register each ACPI PCI slot object */ 197/* callback routine to register each ACPI PCI slot object */
183static acpi_status 198static acpi_status
184register_slot(acpi_handle handle, u32 lvl, void *context, void **rv) 199register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
@@ -298,7 +313,8 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
298 */ 313 */
299 newfunc->flags &= ~FUNC_HAS_EJ0; 314 newfunc->flags &= ~FUNC_HAS_EJ0;
300 if (register_hotplug_dock_device(handle, 315 if (register_hotplug_dock_device(handle,
301 &acpiphp_dock_ops, newfunc)) 316 &acpiphp_dock_ops, newfunc,
317 acpiphp_dock_init, acpiphp_dock_release))
302 dbg("failed to register dock device\n"); 318 dbg("failed to register dock device\n");
303 319
304 /* we need to be notified when dock events happen 320 /* we need to be notified when dock events happen
@@ -670,6 +686,7 @@ static int __ref enable_device(struct acpiphp_slot *slot)
670 struct pci_bus *bus = slot->bridge->pci_bus; 686 struct pci_bus *bus = slot->bridge->pci_bus;
671 struct acpiphp_func *func; 687 struct acpiphp_func *func;
672 int num, max, pass; 688 int num, max, pass;
689 LIST_HEAD(add_list);
673 690
674 if (slot->flags & SLOT_ENABLED) 691 if (slot->flags & SLOT_ENABLED)
675 goto err_exit; 692 goto err_exit;
@@ -694,13 +711,15 @@ static int __ref enable_device(struct acpiphp_slot *slot)
694 max = pci_scan_bridge(bus, dev, max, pass); 711 max = pci_scan_bridge(bus, dev, max, pass);
695 if (pass && dev->subordinate) { 712 if (pass && dev->subordinate) {
696 check_hotplug_bridge(slot, dev); 713 check_hotplug_bridge(slot, dev);
697 pci_bus_size_bridges(dev->subordinate); 714 pcibios_resource_survey_bus(dev->subordinate);
715 __pci_bus_size_bridges(dev->subordinate,
716 &add_list);
698 } 717 }
699 } 718 }
700 } 719 }
701 } 720 }
702 721
703 pci_bus_assign_resources(bus); 722 __pci_bus_assign_resources(bus, &add_list, NULL);
704 acpiphp_sanitize_bus(bus); 723 acpiphp_sanitize_bus(bus);
705 acpiphp_set_hpp_values(bus); 724 acpiphp_set_hpp_values(bus);
706 acpiphp_set_acpi_region(slot); 725 acpiphp_set_acpi_region(slot);
@@ -1065,22 +1084,12 @@ static void handle_hotplug_event_bridge(acpi_handle handle, u32 type,
1065 alloc_acpi_hp_work(handle, type, context, _handle_hotplug_event_bridge); 1084 alloc_acpi_hp_work(handle, type, context, _handle_hotplug_event_bridge);
1066} 1085}
1067 1086
1068static void _handle_hotplug_event_func(struct work_struct *work) 1087static void hotplug_event_func(acpi_handle handle, u32 type, void *context)
1069{ 1088{
1070 struct acpiphp_func *func; 1089 struct acpiphp_func *func = context;
1071 char objname[64]; 1090 char objname[64];
1072 struct acpi_buffer buffer = { .length = sizeof(objname), 1091 struct acpi_buffer buffer = { .length = sizeof(objname),
1073 .pointer = objname }; 1092 .pointer = objname };
1074 struct acpi_hp_work *hp_work;
1075 acpi_handle handle;
1076 u32 type;
1077
1078 hp_work = container_of(work, struct acpi_hp_work, work);
1079 handle = hp_work->handle;
1080 type = hp_work->type;
1081 func = (struct acpiphp_func *)hp_work->context;
1082
1083 acpi_scan_lock_acquire();
1084 1093
1085 acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer); 1094 acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
1086 1095
@@ -1113,6 +1122,18 @@ static void _handle_hotplug_event_func(struct work_struct *work)
1113 warn("notify_handler: unknown event type 0x%x for %s\n", type, objname); 1122 warn("notify_handler: unknown event type 0x%x for %s\n", type, objname);
1114 break; 1123 break;
1115 } 1124 }
1125}
1126
1127static void _handle_hotplug_event_func(struct work_struct *work)
1128{
1129 struct acpi_hp_work *hp_work;
1130 struct acpiphp_func *func;
1131
1132 hp_work = container_of(work, struct acpi_hp_work, work);
1133 func = hp_work->context;
1134 acpi_scan_lock_acquire();
1135
1136 hotplug_event_func(hp_work->handle, hp_work->type, func);
1116 1137
1117 acpi_scan_lock_release(); 1138 acpi_scan_lock_release();
1118 kfree(hp_work); /* allocated in handle_hotplug_event_func */ 1139 kfree(hp_work); /* allocated in handle_hotplug_event_func */
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 68678ed76b0d..d1182c4a754e 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -202,6 +202,11 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
202 struct resource *res, unsigned int reg); 202 struct resource *res, unsigned int reg);
203int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type); 203int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type);
204void pci_configure_ari(struct pci_dev *dev); 204void pci_configure_ari(struct pci_dev *dev);
205void __ref __pci_bus_size_bridges(struct pci_bus *bus,
206 struct list_head *realloc_head);
207void __ref __pci_bus_assign_resources(const struct pci_bus *bus,
208 struct list_head *realloc_head,
209 struct list_head *fail_head);
205 210
206/** 211/**
207 * pci_ari_enabled - query ARI forwarding status 212 * pci_ari_enabled - query ARI forwarding status
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
index 8ec8b4f48560..0f4554e48cc5 100644
--- a/drivers/pci/pcie/aer/aerdrv_core.c
+++ b/drivers/pci/pcie/aer/aerdrv_core.c
@@ -580,6 +580,7 @@ struct aer_recover_entry
580 u8 devfn; 580 u8 devfn;
581 u16 domain; 581 u16 domain;
582 int severity; 582 int severity;
583 struct aer_capability_regs *regs;
583}; 584};
584 585
585static DEFINE_KFIFO(aer_recover_ring, struct aer_recover_entry, 586static DEFINE_KFIFO(aer_recover_ring, struct aer_recover_entry,
@@ -593,7 +594,7 @@ static DEFINE_SPINLOCK(aer_recover_ring_lock);
593static DECLARE_WORK(aer_recover_work, aer_recover_work_func); 594static DECLARE_WORK(aer_recover_work, aer_recover_work_func);
594 595
595void aer_recover_queue(int domain, unsigned int bus, unsigned int devfn, 596void aer_recover_queue(int domain, unsigned int bus, unsigned int devfn,
596 int severity) 597 int severity, struct aer_capability_regs *aer_regs)
597{ 598{
598 unsigned long flags; 599 unsigned long flags;
599 struct aer_recover_entry entry = { 600 struct aer_recover_entry entry = {
@@ -601,6 +602,7 @@ void aer_recover_queue(int domain, unsigned int bus, unsigned int devfn,
601 .devfn = devfn, 602 .devfn = devfn,
602 .domain = domain, 603 .domain = domain,
603 .severity = severity, 604 .severity = severity,
605 .regs = aer_regs,
604 }; 606 };
605 607
606 spin_lock_irqsave(&aer_recover_ring_lock, flags); 608 spin_lock_irqsave(&aer_recover_ring_lock, flags);
@@ -627,6 +629,7 @@ static void aer_recover_work_func(struct work_struct *work)
627 PCI_SLOT(entry.devfn), PCI_FUNC(entry.devfn)); 629 PCI_SLOT(entry.devfn), PCI_FUNC(entry.devfn));
628 continue; 630 continue;
629 } 631 }
632 cper_print_aer(pdev, entry.severity, entry.regs);
630 do_recovery(pdev, entry.severity); 633 do_recovery(pdev, entry.severity);
631 pci_dev_put(pdev); 634 pci_dev_put(pdev);
632 } 635 }
diff --git a/drivers/pci/pcie/aer/aerdrv_errprint.c b/drivers/pci/pcie/aer/aerdrv_errprint.c
index 5ab14251839d..2c7c9f5f592c 100644
--- a/drivers/pci/pcie/aer/aerdrv_errprint.c
+++ b/drivers/pci/pcie/aer/aerdrv_errprint.c
@@ -220,7 +220,7 @@ int cper_severity_to_aer(int cper_severity)
220} 220}
221EXPORT_SYMBOL_GPL(cper_severity_to_aer); 221EXPORT_SYMBOL_GPL(cper_severity_to_aer);
222 222
223void cper_print_aer(const char *prefix, struct pci_dev *dev, int cper_severity, 223void cper_print_aer(struct pci_dev *dev, int cper_severity,
224 struct aer_capability_regs *aer) 224 struct aer_capability_regs *aer)
225{ 225{
226 int aer_severity, layer, agent, status_strs_size, tlp_header_valid = 0; 226 int aer_severity, layer, agent, status_strs_size, tlp_header_valid = 0;
@@ -244,7 +244,7 @@ void cper_print_aer(const char *prefix, struct pci_dev *dev, int cper_severity,
244 agent = AER_GET_AGENT(aer_severity, status); 244 agent = AER_GET_AGENT(aer_severity, status);
245 dev_err(&dev->dev, "aer_status: 0x%08x, aer_mask: 0x%08x\n", 245 dev_err(&dev->dev, "aer_status: 0x%08x, aer_mask: 0x%08x\n",
246 status, mask); 246 status, mask);
247 cper_print_bits(prefix, status, status_strs, status_strs_size); 247 cper_print_bits("", status, status_strs, status_strs_size);
248 dev_err(&dev->dev, "aer_layer=%s, aer_agent=%s\n", 248 dev_err(&dev->dev, "aer_layer=%s, aer_agent=%s\n",
249 aer_error_layer[layer], aer_agent_string[agent]); 249 aer_error_layer[layer], aer_agent_string[agent]);
250 if (aer_severity != AER_CORRECTABLE) 250 if (aer_severity != AER_CORRECTABLE)
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index 16abaaa1f83c..d254e2379533 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -1044,7 +1044,7 @@ handle_done:
1044 ; 1044 ;
1045} 1045}
1046 1046
1047static void __ref __pci_bus_size_bridges(struct pci_bus *bus, 1047void __ref __pci_bus_size_bridges(struct pci_bus *bus,
1048 struct list_head *realloc_head) 1048 struct list_head *realloc_head)
1049{ 1049{
1050 struct pci_dev *dev; 1050 struct pci_dev *dev;
@@ -1115,9 +1115,9 @@ void __ref pci_bus_size_bridges(struct pci_bus *bus)
1115} 1115}
1116EXPORT_SYMBOL(pci_bus_size_bridges); 1116EXPORT_SYMBOL(pci_bus_size_bridges);
1117 1117
1118static void __ref __pci_bus_assign_resources(const struct pci_bus *bus, 1118void __ref __pci_bus_assign_resources(const struct pci_bus *bus,
1119 struct list_head *realloc_head, 1119 struct list_head *realloc_head,
1120 struct list_head *fail_head) 1120 struct list_head *fail_head)
1121{ 1121{
1122 struct pci_bus *b; 1122 struct pci_bus *b;
1123 struct pci_dev *dev; 1123 struct pci_dev *dev;
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7779.c b/drivers/pinctrl/sh-pfc/pfc-r8a7779.c
index 791a6719d8a9..8cd90e7e945a 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7779.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7779.c
@@ -2357,27 +2357,48 @@ static const unsigned int sdhi3_wp_mux[] = {
2357}; 2357};
2358/* - USB0 ------------------------------------------------------------------- */ 2358/* - USB0 ------------------------------------------------------------------- */
2359static const unsigned int usb0_pins[] = { 2359static const unsigned int usb0_pins[] = {
2360 /* OVC */ 2360 /* PENC */
2361 150, 154, 2361 154,
2362}; 2362};
2363static const unsigned int usb0_mux[] = { 2363static const unsigned int usb0_mux[] = {
2364 USB_OVC0_MARK, USB_PENC0_MARK, 2364 USB_PENC0_MARK,
2365};
2366static const unsigned int usb0_ovc_pins[] = {
2367 /* USB_OVC */
2368 150
2369};
2370static const unsigned int usb0_ovc_mux[] = {
2371 USB_OVC0_MARK,
2365}; 2372};
2366/* - USB1 ------------------------------------------------------------------- */ 2373/* - USB1 ------------------------------------------------------------------- */
2367static const unsigned int usb1_pins[] = { 2374static const unsigned int usb1_pins[] = {
2368 /* OVC */ 2375 /* PENC */
2369 152, 155, 2376 155,
2370}; 2377};
2371static const unsigned int usb1_mux[] = { 2378static const unsigned int usb1_mux[] = {
2372 USB_OVC1_MARK, USB_PENC1_MARK, 2379 USB_PENC1_MARK,
2380};
2381static const unsigned int usb1_ovc_pins[] = {
2382 /* USB_OVC */
2383 152,
2384};
2385static const unsigned int usb1_ovc_mux[] = {
2386 USB_OVC1_MARK,
2373}; 2387};
2374/* - USB2 ------------------------------------------------------------------- */ 2388/* - USB2 ------------------------------------------------------------------- */
2375static const unsigned int usb2_pins[] = { 2389static const unsigned int usb2_pins[] = {
2376 /* OVC, PENC */ 2390 /* PENC */
2377 125, 156, 2391 156,
2378}; 2392};
2379static const unsigned int usb2_mux[] = { 2393static const unsigned int usb2_mux[] = {
2380 USB_OVC2_MARK, USB_PENC2_MARK, 2394 USB_PENC2_MARK,
2395};
2396static const unsigned int usb2_ovc_pins[] = {
2397 /* USB_OVC */
2398 125,
2399};
2400static const unsigned int usb2_ovc_mux[] = {
2401 USB_OVC2_MARK,
2381}; 2402};
2382 2403
2383static const struct sh_pfc_pin_group pinmux_groups[] = { 2404static const struct sh_pfc_pin_group pinmux_groups[] = {
@@ -2501,8 +2522,11 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
2501 SH_PFC_PIN_GROUP(sdhi3_cd), 2522 SH_PFC_PIN_GROUP(sdhi3_cd),
2502 SH_PFC_PIN_GROUP(sdhi3_wp), 2523 SH_PFC_PIN_GROUP(sdhi3_wp),
2503 SH_PFC_PIN_GROUP(usb0), 2524 SH_PFC_PIN_GROUP(usb0),
2525 SH_PFC_PIN_GROUP(usb0_ovc),
2504 SH_PFC_PIN_GROUP(usb1), 2526 SH_PFC_PIN_GROUP(usb1),
2527 SH_PFC_PIN_GROUP(usb1_ovc),
2505 SH_PFC_PIN_GROUP(usb2), 2528 SH_PFC_PIN_GROUP(usb2),
2529 SH_PFC_PIN_GROUP(usb2_ovc),
2506}; 2530};
2507 2531
2508static const char * const du0_groups[] = { 2532static const char * const du0_groups[] = {
@@ -2683,14 +2707,17 @@ static const char * const sdhi3_groups[] = {
2683 2707
2684static const char * const usb0_groups[] = { 2708static const char * const usb0_groups[] = {
2685 "usb0", 2709 "usb0",
2710 "usb0_ovc",
2686}; 2711};
2687 2712
2688static const char * const usb1_groups[] = { 2713static const char * const usb1_groups[] = {
2689 "usb1", 2714 "usb1",
2715 "usb1_ovc",
2690}; 2716};
2691 2717
2692static const char * const usb2_groups[] = { 2718static const char * const usb2_groups[] = {
2693 "usb2", 2719 "usb2",
2720 "usb2_ovc",
2694}; 2721};
2695 2722
2696static const struct sh_pfc_function pinmux_functions[] = { 2723static const struct sh_pfc_function pinmux_functions[] = {
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
index 8df0c5a21be2..d111c8687f9b 100644
--- a/drivers/platform/x86/hp-wmi.c
+++ b/drivers/platform/x86/hp-wmi.c
@@ -703,7 +703,7 @@ static int hp_wmi_rfkill_setup(struct platform_device *device)
703 } 703 }
704 rfkill_init_sw_state(gps_rfkill, 704 rfkill_init_sw_state(gps_rfkill,
705 hp_wmi_get_sw_state(HPWMI_GPS)); 705 hp_wmi_get_sw_state(HPWMI_GPS));
706 rfkill_set_hw_state(bluetooth_rfkill, 706 rfkill_set_hw_state(gps_rfkill,
707 hp_wmi_get_hw_state(HPWMI_GPS)); 707 hp_wmi_get_hw_state(HPWMI_GPS));
708 err = rfkill_register(gps_rfkill); 708 err = rfkill_register(gps_rfkill);
709 if (err) 709 if (err)
diff --git a/drivers/ptp/ptp_pch.c b/drivers/ptp/ptp_pch.c
index bea94510ad2d..71a2559278d7 100644
--- a/drivers/ptp/ptp_pch.c
+++ b/drivers/ptp/ptp_pch.c
@@ -628,9 +628,10 @@ pch_probe(struct pci_dev *pdev, const struct pci_device_id *id)
628 628
629 chip->caps = ptp_pch_caps; 629 chip->caps = ptp_pch_caps;
630 chip->ptp_clock = ptp_clock_register(&chip->caps, &pdev->dev); 630 chip->ptp_clock = ptp_clock_register(&chip->caps, &pdev->dev);
631 631 if (IS_ERR(chip->ptp_clock)) {
632 if (IS_ERR(chip->ptp_clock)) 632 ret = PTR_ERR(chip->ptp_clock);
633 return PTR_ERR(chip->ptp_clock); 633 goto err_ptp_clock_reg;
634 }
634 635
635 spin_lock_init(&chip->register_lock); 636 spin_lock_init(&chip->register_lock);
636 637
@@ -669,6 +670,7 @@ pch_probe(struct pci_dev *pdev, const struct pci_device_id *id)
669 670
670err_req_irq: 671err_req_irq:
671 ptp_clock_unregister(chip->ptp_clock); 672 ptp_clock_unregister(chip->ptp_clock);
673err_ptp_clock_reg:
672 iounmap(chip->regs); 674 iounmap(chip->regs);
673 chip->regs = NULL; 675 chip->regs = NULL;
674 676
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 6e5017841582..815d6df8bd5f 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -1539,7 +1539,10 @@ static void regulator_ena_gpio_free(struct regulator_dev *rdev)
1539} 1539}
1540 1540
1541/** 1541/**
1542 * Balance enable_count of each GPIO and actual GPIO pin control. 1542 * regulator_ena_gpio_ctrl - balance enable_count of each GPIO and actual GPIO pin control
1543 * @rdev: regulator_dev structure
1544 * @enable: enable GPIO at initial use?
1545 *
1543 * GPIO is enabled in case of initial use. (enable_count is 0) 1546 * GPIO is enabled in case of initial use. (enable_count is 0)
1544 * GPIO is disabled when it is not shared any more. (enable_count <= 1) 1547 * GPIO is disabled when it is not shared any more. (enable_count <= 1)
1545 */ 1548 */
@@ -2702,7 +2705,7 @@ EXPORT_SYMBOL_GPL(regulator_get_voltage);
2702/** 2705/**
2703 * regulator_set_current_limit - set regulator output current limit 2706 * regulator_set_current_limit - set regulator output current limit
2704 * @regulator: regulator source 2707 * @regulator: regulator source
2705 * @min_uA: Minimuum supported current in uA 2708 * @min_uA: Minimum supported current in uA
2706 * @max_uA: Maximum supported current in uA 2709 * @max_uA: Maximum supported current in uA
2707 * 2710 *
2708 * Sets current sink to the desired output current. This can be set during 2711 * Sets current sink to the desired output current. This can be set during
diff --git a/drivers/regulator/dbx500-prcmu.c b/drivers/regulator/dbx500-prcmu.c
index 89bd2faaef8c..ce89f7848a57 100644
--- a/drivers/regulator/dbx500-prcmu.c
+++ b/drivers/regulator/dbx500-prcmu.c
@@ -24,18 +24,6 @@
24static int power_state_active_cnt; /* will initialize to zero */ 24static int power_state_active_cnt; /* will initialize to zero */
25static DEFINE_SPINLOCK(power_state_active_lock); 25static DEFINE_SPINLOCK(power_state_active_lock);
26 26
27int power_state_active_get(void)
28{
29 unsigned long flags;
30 int cnt;
31
32 spin_lock_irqsave(&power_state_active_lock, flags);
33 cnt = power_state_active_cnt;
34 spin_unlock_irqrestore(&power_state_active_lock, flags);
35
36 return cnt;
37}
38
39void power_state_active_enable(void) 27void power_state_active_enable(void)
40{ 28{
41 unsigned long flags; 29 unsigned long flags;
@@ -65,6 +53,18 @@ out:
65 53
66#ifdef CONFIG_REGULATOR_DEBUG 54#ifdef CONFIG_REGULATOR_DEBUG
67 55
56static int power_state_active_get(void)
57{
58 unsigned long flags;
59 int cnt;
60
61 spin_lock_irqsave(&power_state_active_lock, flags);
62 cnt = power_state_active_cnt;
63 spin_unlock_irqrestore(&power_state_active_lock, flags);
64
65 return cnt;
66}
67
68static struct ux500_regulator_debug { 68static struct ux500_regulator_debug {
69 struct dentry *dir; 69 struct dentry *dir;
70 struct dentry *status_file; 70 struct dentry *status_file;
diff --git a/drivers/regulator/palmas-regulator.c b/drivers/regulator/palmas-regulator.c
index 92ceed0fc65e..3ae44ac12a94 100644
--- a/drivers/regulator/palmas-regulator.c
+++ b/drivers/regulator/palmas-regulator.c
@@ -840,7 +840,7 @@ static int palmas_regulators_probe(struct platform_device *pdev)
840 break; 840 break;
841 } 841 }
842 842
843 if ((id == PALMAS_REG_SMPS6) && (id == PALMAS_REG_SMPS8)) 843 if ((id == PALMAS_REG_SMPS6) || (id == PALMAS_REG_SMPS8))
844 ramp_delay_support = true; 844 ramp_delay_support = true;
845 845
846 if (ramp_delay_support) { 846 if (ramp_delay_support) {
@@ -878,7 +878,7 @@ static int palmas_regulators_probe(struct platform_device *pdev)
878 pmic->desc[id].vsel_mask = SMPS10_VSEL; 878 pmic->desc[id].vsel_mask = SMPS10_VSEL;
879 pmic->desc[id].enable_reg = 879 pmic->desc[id].enable_reg =
880 PALMAS_BASE_TO_REG(PALMAS_SMPS_BASE, 880 PALMAS_BASE_TO_REG(PALMAS_SMPS_BASE,
881 PALMAS_SMPS10_STATUS); 881 PALMAS_SMPS10_CTRL);
882 pmic->desc[id].enable_mask = SMPS10_BOOST_EN; 882 pmic->desc[id].enable_mask = SMPS10_BOOST_EN;
883 pmic->desc[id].min_uV = 3750000; 883 pmic->desc[id].min_uV = 3750000;
884 pmic->desc[id].uV_step = 1250000; 884 pmic->desc[id].uV_step = 1250000;
diff --git a/drivers/regulator/tps6586x-regulator.c b/drivers/regulator/tps6586x-regulator.c
index d8fa37d5c734..2c9155b66f09 100644
--- a/drivers/regulator/tps6586x-regulator.c
+++ b/drivers/regulator/tps6586x-regulator.c
@@ -439,7 +439,7 @@ static int tps6586x_regulator_remove(struct platform_device *pdev)
439 439
440static struct platform_driver tps6586x_regulator_driver = { 440static struct platform_driver tps6586x_regulator_driver = {
441 .driver = { 441 .driver = {
442 .name = "tps6586x-pmic", 442 .name = "tps6586x-regulator",
443 .owner = THIS_MODULE, 443 .owner = THIS_MODULE,
444 }, 444 },
445 .probe = tps6586x_regulator_probe, 445 .probe = tps6586x_regulator_probe,
diff --git a/drivers/rtc/rtc-at91rm9200.c b/drivers/rtc/rtc-at91rm9200.c
index 0eab77b22340..f296f3f7db9b 100644
--- a/drivers/rtc/rtc-at91rm9200.c
+++ b/drivers/rtc/rtc-at91rm9200.c
@@ -25,6 +25,7 @@
25#include <linux/rtc.h> 25#include <linux/rtc.h>
26#include <linux/bcd.h> 26#include <linux/bcd.h>
27#include <linux/interrupt.h> 27#include <linux/interrupt.h>
28#include <linux/spinlock.h>
28#include <linux/ioctl.h> 29#include <linux/ioctl.h>
29#include <linux/completion.h> 30#include <linux/completion.h>
30#include <linux/io.h> 31#include <linux/io.h>
@@ -42,10 +43,65 @@
42 43
43#define AT91_RTC_EPOCH 1900UL /* just like arch/arm/common/rtctime.c */ 44#define AT91_RTC_EPOCH 1900UL /* just like arch/arm/common/rtctime.c */
44 45
46struct at91_rtc_config {
47 bool use_shadow_imr;
48};
49
50static const struct at91_rtc_config *at91_rtc_config;
45static DECLARE_COMPLETION(at91_rtc_updated); 51static DECLARE_COMPLETION(at91_rtc_updated);
46static unsigned int at91_alarm_year = AT91_RTC_EPOCH; 52static unsigned int at91_alarm_year = AT91_RTC_EPOCH;
47static void __iomem *at91_rtc_regs; 53static void __iomem *at91_rtc_regs;
48static int irq; 54static int irq;
55static DEFINE_SPINLOCK(at91_rtc_lock);
56static u32 at91_rtc_shadow_imr;
57
58static void at91_rtc_write_ier(u32 mask)
59{
60 unsigned long flags;
61
62 spin_lock_irqsave(&at91_rtc_lock, flags);
63 at91_rtc_shadow_imr |= mask;
64 at91_rtc_write(AT91_RTC_IER, mask);
65 spin_unlock_irqrestore(&at91_rtc_lock, flags);
66}
67
68static void at91_rtc_write_idr(u32 mask)
69{
70 unsigned long flags;
71
72 spin_lock_irqsave(&at91_rtc_lock, flags);
73 at91_rtc_write(AT91_RTC_IDR, mask);
74 /*
75 * Register read back (of any RTC-register) needed to make sure
76 * IDR-register write has reached the peripheral before updating
77 * shadow mask.
78 *
79 * Note that there is still a possibility that the mask is updated
80 * before interrupts have actually been disabled in hardware. The only
81 * way to be certain would be to poll the IMR-register, which is is
82 * the very register we are trying to emulate. The register read back
83 * is a reasonable heuristic.
84 */
85 at91_rtc_read(AT91_RTC_SR);
86 at91_rtc_shadow_imr &= ~mask;
87 spin_unlock_irqrestore(&at91_rtc_lock, flags);
88}
89
90static u32 at91_rtc_read_imr(void)
91{
92 unsigned long flags;
93 u32 mask;
94
95 if (at91_rtc_config->use_shadow_imr) {
96 spin_lock_irqsave(&at91_rtc_lock, flags);
97 mask = at91_rtc_shadow_imr;
98 spin_unlock_irqrestore(&at91_rtc_lock, flags);
99 } else {
100 mask = at91_rtc_read(AT91_RTC_IMR);
101 }
102
103 return mask;
104}
49 105
50/* 106/*
51 * Decode time/date into rtc_time structure 107 * Decode time/date into rtc_time structure
@@ -110,9 +166,9 @@ static int at91_rtc_settime(struct device *dev, struct rtc_time *tm)
110 cr = at91_rtc_read(AT91_RTC_CR); 166 cr = at91_rtc_read(AT91_RTC_CR);
111 at91_rtc_write(AT91_RTC_CR, cr | AT91_RTC_UPDCAL | AT91_RTC_UPDTIM); 167 at91_rtc_write(AT91_RTC_CR, cr | AT91_RTC_UPDCAL | AT91_RTC_UPDTIM);
112 168
113 at91_rtc_write(AT91_RTC_IER, AT91_RTC_ACKUPD); 169 at91_rtc_write_ier(AT91_RTC_ACKUPD);
114 wait_for_completion(&at91_rtc_updated); /* wait for ACKUPD interrupt */ 170 wait_for_completion(&at91_rtc_updated); /* wait for ACKUPD interrupt */
115 at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ACKUPD); 171 at91_rtc_write_idr(AT91_RTC_ACKUPD);
116 172
117 at91_rtc_write(AT91_RTC_TIMR, 173 at91_rtc_write(AT91_RTC_TIMR,
118 bin2bcd(tm->tm_sec) << 0 174 bin2bcd(tm->tm_sec) << 0
@@ -144,7 +200,7 @@ static int at91_rtc_readalarm(struct device *dev, struct rtc_wkalrm *alrm)
144 tm->tm_yday = rtc_year_days(tm->tm_mday, tm->tm_mon, tm->tm_year); 200 tm->tm_yday = rtc_year_days(tm->tm_mday, tm->tm_mon, tm->tm_year);
145 tm->tm_year = at91_alarm_year - 1900; 201 tm->tm_year = at91_alarm_year - 1900;
146 202
147 alrm->enabled = (at91_rtc_read(AT91_RTC_IMR) & AT91_RTC_ALARM) 203 alrm->enabled = (at91_rtc_read_imr() & AT91_RTC_ALARM)
148 ? 1 : 0; 204 ? 1 : 0;
149 205
150 dev_dbg(dev, "%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __func__, 206 dev_dbg(dev, "%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __func__,
@@ -169,7 +225,7 @@ static int at91_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
169 tm.tm_min = alrm->time.tm_min; 225 tm.tm_min = alrm->time.tm_min;
170 tm.tm_sec = alrm->time.tm_sec; 226 tm.tm_sec = alrm->time.tm_sec;
171 227
172 at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ALARM); 228 at91_rtc_write_idr(AT91_RTC_ALARM);
173 at91_rtc_write(AT91_RTC_TIMALR, 229 at91_rtc_write(AT91_RTC_TIMALR,
174 bin2bcd(tm.tm_sec) << 0 230 bin2bcd(tm.tm_sec) << 0
175 | bin2bcd(tm.tm_min) << 8 231 | bin2bcd(tm.tm_min) << 8
@@ -182,7 +238,7 @@ static int at91_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
182 238
183 if (alrm->enabled) { 239 if (alrm->enabled) {
184 at91_rtc_write(AT91_RTC_SCCR, AT91_RTC_ALARM); 240 at91_rtc_write(AT91_RTC_SCCR, AT91_RTC_ALARM);
185 at91_rtc_write(AT91_RTC_IER, AT91_RTC_ALARM); 241 at91_rtc_write_ier(AT91_RTC_ALARM);
186 } 242 }
187 243
188 dev_dbg(dev, "%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __func__, 244 dev_dbg(dev, "%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __func__,
@@ -198,9 +254,9 @@ static int at91_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
198 254
199 if (enabled) { 255 if (enabled) {
200 at91_rtc_write(AT91_RTC_SCCR, AT91_RTC_ALARM); 256 at91_rtc_write(AT91_RTC_SCCR, AT91_RTC_ALARM);
201 at91_rtc_write(AT91_RTC_IER, AT91_RTC_ALARM); 257 at91_rtc_write_ier(AT91_RTC_ALARM);
202 } else 258 } else
203 at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ALARM); 259 at91_rtc_write_idr(AT91_RTC_ALARM);
204 260
205 return 0; 261 return 0;
206} 262}
@@ -209,7 +265,7 @@ static int at91_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
209 */ 265 */
210static int at91_rtc_proc(struct device *dev, struct seq_file *seq) 266static int at91_rtc_proc(struct device *dev, struct seq_file *seq)
211{ 267{
212 unsigned long imr = at91_rtc_read(AT91_RTC_IMR); 268 unsigned long imr = at91_rtc_read_imr();
213 269
214 seq_printf(seq, "update_IRQ\t: %s\n", 270 seq_printf(seq, "update_IRQ\t: %s\n",
215 (imr & AT91_RTC_ACKUPD) ? "yes" : "no"); 271 (imr & AT91_RTC_ACKUPD) ? "yes" : "no");
@@ -229,7 +285,7 @@ static irqreturn_t at91_rtc_interrupt(int irq, void *dev_id)
229 unsigned int rtsr; 285 unsigned int rtsr;
230 unsigned long events = 0; 286 unsigned long events = 0;
231 287
232 rtsr = at91_rtc_read(AT91_RTC_SR) & at91_rtc_read(AT91_RTC_IMR); 288 rtsr = at91_rtc_read(AT91_RTC_SR) & at91_rtc_read_imr();
233 if (rtsr) { /* this interrupt is shared! Is it ours? */ 289 if (rtsr) { /* this interrupt is shared! Is it ours? */
234 if (rtsr & AT91_RTC_ALARM) 290 if (rtsr & AT91_RTC_ALARM)
235 events |= (RTC_AF | RTC_IRQF); 291 events |= (RTC_AF | RTC_IRQF);
@@ -250,6 +306,43 @@ static irqreturn_t at91_rtc_interrupt(int irq, void *dev_id)
250 return IRQ_NONE; /* not handled */ 306 return IRQ_NONE; /* not handled */
251} 307}
252 308
309static const struct at91_rtc_config at91rm9200_config = {
310};
311
312static const struct at91_rtc_config at91sam9x5_config = {
313 .use_shadow_imr = true,
314};
315
316#ifdef CONFIG_OF
317static const struct of_device_id at91_rtc_dt_ids[] = {
318 {
319 .compatible = "atmel,at91rm9200-rtc",
320 .data = &at91rm9200_config,
321 }, {
322 .compatible = "atmel,at91sam9x5-rtc",
323 .data = &at91sam9x5_config,
324 }, {
325 /* sentinel */
326 }
327};
328MODULE_DEVICE_TABLE(of, at91_rtc_dt_ids);
329#endif
330
331static const struct at91_rtc_config *
332at91_rtc_get_config(struct platform_device *pdev)
333{
334 const struct of_device_id *match;
335
336 if (pdev->dev.of_node) {
337 match = of_match_node(at91_rtc_dt_ids, pdev->dev.of_node);
338 if (!match)
339 return NULL;
340 return (const struct at91_rtc_config *)match->data;
341 }
342
343 return &at91rm9200_config;
344}
345
253static const struct rtc_class_ops at91_rtc_ops = { 346static const struct rtc_class_ops at91_rtc_ops = {
254 .read_time = at91_rtc_readtime, 347 .read_time = at91_rtc_readtime,
255 .set_time = at91_rtc_settime, 348 .set_time = at91_rtc_settime,
@@ -268,6 +361,10 @@ static int __init at91_rtc_probe(struct platform_device *pdev)
268 struct resource *regs; 361 struct resource *regs;
269 int ret = 0; 362 int ret = 0;
270 363
364 at91_rtc_config = at91_rtc_get_config(pdev);
365 if (!at91_rtc_config)
366 return -ENODEV;
367
271 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); 368 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
272 if (!regs) { 369 if (!regs) {
273 dev_err(&pdev->dev, "no mmio resource defined\n"); 370 dev_err(&pdev->dev, "no mmio resource defined\n");
@@ -290,7 +387,7 @@ static int __init at91_rtc_probe(struct platform_device *pdev)
290 at91_rtc_write(AT91_RTC_MR, 0); /* 24 hour mode */ 387 at91_rtc_write(AT91_RTC_MR, 0); /* 24 hour mode */
291 388
292 /* Disable all interrupts */ 389 /* Disable all interrupts */
293 at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ACKUPD | AT91_RTC_ALARM | 390 at91_rtc_write_idr(AT91_RTC_ACKUPD | AT91_RTC_ALARM |
294 AT91_RTC_SECEV | AT91_RTC_TIMEV | 391 AT91_RTC_SECEV | AT91_RTC_TIMEV |
295 AT91_RTC_CALEV); 392 AT91_RTC_CALEV);
296 393
@@ -335,7 +432,7 @@ static int __exit at91_rtc_remove(struct platform_device *pdev)
335 struct rtc_device *rtc = platform_get_drvdata(pdev); 432 struct rtc_device *rtc = platform_get_drvdata(pdev);
336 433
337 /* Disable all interrupts */ 434 /* Disable all interrupts */
338 at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ACKUPD | AT91_RTC_ALARM | 435 at91_rtc_write_idr(AT91_RTC_ACKUPD | AT91_RTC_ALARM |
339 AT91_RTC_SECEV | AT91_RTC_TIMEV | 436 AT91_RTC_SECEV | AT91_RTC_TIMEV |
340 AT91_RTC_CALEV); 437 AT91_RTC_CALEV);
341 free_irq(irq, pdev); 438 free_irq(irq, pdev);
@@ -358,13 +455,13 @@ static int at91_rtc_suspend(struct device *dev)
358 /* this IRQ is shared with DBGU and other hardware which isn't 455 /* this IRQ is shared with DBGU and other hardware which isn't
359 * necessarily doing PM like we are... 456 * necessarily doing PM like we are...
360 */ 457 */
361 at91_rtc_imr = at91_rtc_read(AT91_RTC_IMR) 458 at91_rtc_imr = at91_rtc_read_imr()
362 & (AT91_RTC_ALARM|AT91_RTC_SECEV); 459 & (AT91_RTC_ALARM|AT91_RTC_SECEV);
363 if (at91_rtc_imr) { 460 if (at91_rtc_imr) {
364 if (device_may_wakeup(dev)) 461 if (device_may_wakeup(dev))
365 enable_irq_wake(irq); 462 enable_irq_wake(irq);
366 else 463 else
367 at91_rtc_write(AT91_RTC_IDR, at91_rtc_imr); 464 at91_rtc_write_idr(at91_rtc_imr);
368 } 465 }
369 return 0; 466 return 0;
370} 467}
@@ -375,7 +472,7 @@ static int at91_rtc_resume(struct device *dev)
375 if (device_may_wakeup(dev)) 472 if (device_may_wakeup(dev))
376 disable_irq_wake(irq); 473 disable_irq_wake(irq);
377 else 474 else
378 at91_rtc_write(AT91_RTC_IER, at91_rtc_imr); 475 at91_rtc_write_ier(at91_rtc_imr);
379 } 476 }
380 return 0; 477 return 0;
381} 478}
@@ -383,12 +480,6 @@ static int at91_rtc_resume(struct device *dev)
383 480
384static SIMPLE_DEV_PM_OPS(at91_rtc_pm_ops, at91_rtc_suspend, at91_rtc_resume); 481static SIMPLE_DEV_PM_OPS(at91_rtc_pm_ops, at91_rtc_suspend, at91_rtc_resume);
385 482
386static const struct of_device_id at91_rtc_dt_ids[] = {
387 { .compatible = "atmel,at91rm9200-rtc" },
388 { /* sentinel */ }
389};
390MODULE_DEVICE_TABLE(of, at91_rtc_dt_ids);
391
392static struct platform_driver at91_rtc_driver = { 483static struct platform_driver at91_rtc_driver = {
393 .remove = __exit_p(at91_rtc_remove), 484 .remove = __exit_p(at91_rtc_remove),
394 .driver = { 485 .driver = {
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index cc5bea9c4b1c..f1cb706445c7 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -854,6 +854,9 @@ static int cmos_resume(struct device *dev)
854 } 854 }
855 855
856 spin_lock_irq(&rtc_lock); 856 spin_lock_irq(&rtc_lock);
857 if (device_may_wakeup(dev))
858 hpet_rtc_timer_init();
859
857 do { 860 do {
858 CMOS_WRITE(tmp, RTC_CONTROL); 861 CMOS_WRITE(tmp, RTC_CONTROL);
859 hpet_set_rtc_irq_bit(tmp & RTC_IRQMASK); 862 hpet_set_rtc_irq_bit(tmp & RTC_IRQMASK);
@@ -869,7 +872,6 @@ static int cmos_resume(struct device *dev)
869 rtc_update_irq(cmos->rtc, 1, mask); 872 rtc_update_irq(cmos->rtc, 1, mask);
870 tmp &= ~RTC_AIE; 873 tmp &= ~RTC_AIE;
871 hpet_mask_rtc_irq_bit(RTC_AIE); 874 hpet_mask_rtc_irq_bit(RTC_AIE);
872 hpet_rtc_timer_init();
873 } while (mask & RTC_AIE); 875 } while (mask & RTC_AIE);
874 spin_unlock_irq(&rtc_lock); 876 spin_unlock_irq(&rtc_lock);
875 } 877 }
diff --git a/drivers/rtc/rtc-tps6586x.c b/drivers/rtc/rtc-tps6586x.c
index 459c2ffc95a6..426901cef14f 100644
--- a/drivers/rtc/rtc-tps6586x.c
+++ b/drivers/rtc/rtc-tps6586x.c
@@ -273,6 +273,8 @@ static int tps6586x_rtc_probe(struct platform_device *pdev)
273 return ret; 273 return ret;
274 } 274 }
275 275
276 device_init_wakeup(&pdev->dev, 1);
277
276 platform_set_drvdata(pdev, rtc); 278 platform_set_drvdata(pdev, rtc);
277 rtc->rtc = devm_rtc_device_register(&pdev->dev, dev_name(&pdev->dev), 279 rtc->rtc = devm_rtc_device_register(&pdev->dev, dev_name(&pdev->dev),
278 &tps6586x_rtc_ops, THIS_MODULE); 280 &tps6586x_rtc_ops, THIS_MODULE);
@@ -292,7 +294,6 @@ static int tps6586x_rtc_probe(struct platform_device *pdev)
292 goto fail_rtc_register; 294 goto fail_rtc_register;
293 } 295 }
294 disable_irq(rtc->irq); 296 disable_irq(rtc->irq);
295 device_set_wakeup_capable(&pdev->dev, 1);
296 return 0; 297 return 0;
297 298
298fail_rtc_register: 299fail_rtc_register:
diff --git a/drivers/rtc/rtc-twl.c b/drivers/rtc/rtc-twl.c
index 8751a5240c99..b2eab34f38d9 100644
--- a/drivers/rtc/rtc-twl.c
+++ b/drivers/rtc/rtc-twl.c
@@ -524,6 +524,7 @@ static int twl_rtc_probe(struct platform_device *pdev)
524 } 524 }
525 525
526 platform_set_drvdata(pdev, rtc); 526 platform_set_drvdata(pdev, rtc);
527 device_init_wakeup(&pdev->dev, 1);
527 return 0; 528 return 0;
528 529
529out2: 530out2:
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 4361d9772c42..d72a9216ee2e 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -3440,8 +3440,16 @@ void dasd_generic_path_event(struct ccw_device *cdev, int *path_event)
3440 device->path_data.opm &= ~eventlpm; 3440 device->path_data.opm &= ~eventlpm;
3441 device->path_data.ppm &= ~eventlpm; 3441 device->path_data.ppm &= ~eventlpm;
3442 device->path_data.npm &= ~eventlpm; 3442 device->path_data.npm &= ~eventlpm;
3443 if (oldopm && !device->path_data.opm) 3443 if (oldopm && !device->path_data.opm) {
3444 dasd_generic_last_path_gone(device); 3444 dev_warn(&device->cdev->dev,
3445 "No verified channel paths remain "
3446 "for the device\n");
3447 DBF_DEV_EVENT(DBF_WARNING, device,
3448 "%s", "last verified path gone");
3449 dasd_eer_write(device, NULL, DASD_EER_NOPATH);
3450 dasd_device_set_stop_bits(device,
3451 DASD_STOPPED_DC_WAIT);
3452 }
3445 } 3453 }
3446 if (path_event[chp] & PE_PATH_AVAILABLE) { 3454 if (path_event[chp] & PE_PATH_AVAILABLE) {
3447 device->path_data.opm &= ~eventlpm; 3455 device->path_data.opm &= ~eventlpm;
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
index 4ffa66c87ea5..9ca3996f65b2 100644
--- a/drivers/s390/net/netiucv.c
+++ b/drivers/s390/net/netiucv.c
@@ -2040,6 +2040,7 @@ static struct net_device *netiucv_init_netdevice(char *username, char *userdata)
2040 netiucv_setup_netdevice); 2040 netiucv_setup_netdevice);
2041 if (!dev) 2041 if (!dev)
2042 return NULL; 2042 return NULL;
2043 rtnl_lock();
2043 if (dev_alloc_name(dev, dev->name) < 0) 2044 if (dev_alloc_name(dev, dev->name) < 0)
2044 goto out_netdev; 2045 goto out_netdev;
2045 2046
@@ -2061,6 +2062,7 @@ static struct net_device *netiucv_init_netdevice(char *username, char *userdata)
2061out_fsm: 2062out_fsm:
2062 kfree_fsm(privptr->fsm); 2063 kfree_fsm(privptr->fsm);
2063out_netdev: 2064out_netdev:
2065 rtnl_unlock();
2064 free_netdev(dev); 2066 free_netdev(dev);
2065 return NULL; 2067 return NULL;
2066} 2068}
@@ -2100,6 +2102,7 @@ static ssize_t conn_write(struct device_driver *drv,
2100 2102
2101 rc = netiucv_register_device(dev); 2103 rc = netiucv_register_device(dev);
2102 if (rc) { 2104 if (rc) {
2105 rtnl_unlock();
2103 IUCV_DBF_TEXT_(setup, 2, 2106 IUCV_DBF_TEXT_(setup, 2,
2104 "ret %d from netiucv_register_device\n", rc); 2107 "ret %d from netiucv_register_device\n", rc);
2105 goto out_free_ndev; 2108 goto out_free_ndev;
@@ -2109,7 +2112,8 @@ static ssize_t conn_write(struct device_driver *drv,
2109 priv = netdev_priv(dev); 2112 priv = netdev_priv(dev);
2110 SET_NETDEV_DEV(dev, priv->dev); 2113 SET_NETDEV_DEV(dev, priv->dev);
2111 2114
2112 rc = register_netdev(dev); 2115 rc = register_netdevice(dev);
2116 rtnl_unlock();
2113 if (rc) 2117 if (rc)
2114 goto out_unreg; 2118 goto out_unreg;
2115 2119
diff --git a/drivers/scsi/bfa/bfad_debugfs.c b/drivers/scsi/bfa/bfad_debugfs.c
index 439c012be763..b63d534192e3 100644
--- a/drivers/scsi/bfa/bfad_debugfs.c
+++ b/drivers/scsi/bfa/bfad_debugfs.c
@@ -186,7 +186,7 @@ bfad_debugfs_lseek(struct file *file, loff_t offset, int orig)
186 file->f_pos += offset; 186 file->f_pos += offset;
187 break; 187 break;
188 case 2: 188 case 2:
189 file->f_pos = debug->buffer_len - offset; 189 file->f_pos = debug->buffer_len + offset;
190 break; 190 break;
191 default: 191 default:
192 return -EINVAL; 192 return -EINVAL;
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index 292b24f9bf93..32ae6c67ea3a 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -1656,9 +1656,12 @@ static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
1656 1656
1657 if (fcoe->netdev->priv_flags & IFF_802_1Q_VLAN && 1657 if (fcoe->netdev->priv_flags & IFF_802_1Q_VLAN &&
1658 fcoe->realdev->features & NETIF_F_HW_VLAN_CTAG_TX) { 1658 fcoe->realdev->features & NETIF_F_HW_VLAN_CTAG_TX) {
1659 skb->vlan_tci = VLAN_TAG_PRESENT | 1659 /* must set skb->dev before calling vlan_put_tag */
1660 vlan_dev_vlan_id(fcoe->netdev);
1661 skb->dev = fcoe->realdev; 1660 skb->dev = fcoe->realdev;
1661 skb = __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1662 vlan_dev_vlan_id(fcoe->netdev));
1663 if (!skb)
1664 return -ENOMEM;
1662 } else 1665 } else
1663 skb->dev = fcoe->netdev; 1666 skb->dev = fcoe->netdev;
1664 1667
diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
index cd743c545ce9..795843dde8ec 100644
--- a/drivers/scsi/fcoe/fcoe_ctlr.c
+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
@@ -1548,9 +1548,6 @@ static struct fcoe_fcf *fcoe_ctlr_select(struct fcoe_ctlr *fip)
1548{ 1548{
1549 struct fcoe_fcf *fcf; 1549 struct fcoe_fcf *fcf;
1550 struct fcoe_fcf *best = fip->sel_fcf; 1550 struct fcoe_fcf *best = fip->sel_fcf;
1551 struct fcoe_fcf *first;
1552
1553 first = list_first_entry(&fip->fcfs, struct fcoe_fcf, list);
1554 1551
1555 list_for_each_entry(fcf, &fip->fcfs, list) { 1552 list_for_each_entry(fcf, &fip->fcfs, list) {
1556 LIBFCOE_FIP_DBG(fip, "consider FCF fab %16.16llx " 1553 LIBFCOE_FIP_DBG(fip, "consider FCF fab %16.16llx "
@@ -1568,17 +1565,15 @@ static struct fcoe_fcf *fcoe_ctlr_select(struct fcoe_ctlr *fip)
1568 "" : "un"); 1565 "" : "un");
1569 continue; 1566 continue;
1570 } 1567 }
1571 if (fcf->fabric_name != first->fabric_name || 1568 if (!best || fcf->pri < best->pri || best->flogi_sent)
1572 fcf->vfid != first->vfid || 1569 best = fcf;
1573 fcf->fc_map != first->fc_map) { 1570 if (fcf->fabric_name != best->fabric_name ||
1571 fcf->vfid != best->vfid ||
1572 fcf->fc_map != best->fc_map) {
1574 LIBFCOE_FIP_DBG(fip, "Conflicting fabric, VFID, " 1573 LIBFCOE_FIP_DBG(fip, "Conflicting fabric, VFID, "
1575 "or FC-MAP\n"); 1574 "or FC-MAP\n");
1576 return NULL; 1575 return NULL;
1577 } 1576 }
1578 if (fcf->flogi_sent)
1579 continue;
1580 if (!best || fcf->pri < best->pri || best->flogi_sent)
1581 best = fcf;
1582 } 1577 }
1583 fip->sel_fcf = best; 1578 fip->sel_fcf = best;
1584 if (best) { 1579 if (best) {
diff --git a/drivers/scsi/fnic/fnic_debugfs.c b/drivers/scsi/fnic/fnic_debugfs.c
index adc1f7f471f5..85e1ffd0e5c5 100644
--- a/drivers/scsi/fnic/fnic_debugfs.c
+++ b/drivers/scsi/fnic/fnic_debugfs.c
@@ -174,7 +174,7 @@ static loff_t fnic_trace_debugfs_lseek(struct file *file,
174 pos = file->f_pos + offset; 174 pos = file->f_pos + offset;
175 break; 175 break;
176 case 2: 176 case 2:
177 pos = fnic_dbg_prt->buffer_len - offset; 177 pos = fnic_dbg_prt->buffer_len + offset;
178 } 178 }
179 return (pos < 0 || pos > fnic_dbg_prt->buffer_len) ? 179 return (pos < 0 || pos > fnic_dbg_prt->buffer_len) ?
180 -EINVAL : (file->f_pos = pos); 180 -EINVAL : (file->f_pos = pos);
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 82a3c1ec8706..6c4cedb44c07 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -8980,19 +8980,6 @@ static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
8980 if (!ioa_cfg->res_entries) 8980 if (!ioa_cfg->res_entries)
8981 goto out; 8981 goto out;
8982 8982
8983 if (ioa_cfg->sis64) {
8984 ioa_cfg->target_ids = kzalloc(sizeof(unsigned long) *
8985 BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
8986 ioa_cfg->array_ids = kzalloc(sizeof(unsigned long) *
8987 BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
8988 ioa_cfg->vset_ids = kzalloc(sizeof(unsigned long) *
8989 BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
8990
8991 if (!ioa_cfg->target_ids || !ioa_cfg->array_ids
8992 || !ioa_cfg->vset_ids)
8993 goto out_free_res_entries;
8994 }
8995
8996 for (i = 0; i < ioa_cfg->max_devs_supported; i++) { 8983 for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
8997 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q); 8984 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
8998 ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg; 8985 ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
@@ -9089,9 +9076,6 @@ out_free_vpd_cbs:
9089 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma); 9076 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
9090out_free_res_entries: 9077out_free_res_entries:
9091 kfree(ioa_cfg->res_entries); 9078 kfree(ioa_cfg->res_entries);
9092 kfree(ioa_cfg->target_ids);
9093 kfree(ioa_cfg->array_ids);
9094 kfree(ioa_cfg->vset_ids);
9095 goto out; 9079 goto out;
9096} 9080}
9097 9081
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index a1fb840596ef..07a85ce41782 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -1440,9 +1440,9 @@ struct ipr_ioa_cfg {
1440 /* 1440 /*
1441 * Bitmaps for SIS64 generated target values 1441 * Bitmaps for SIS64 generated target values
1442 */ 1442 */
1443 unsigned long *target_ids; 1443 unsigned long target_ids[BITS_TO_LONGS(IPR_MAX_SIS64_DEVS)];
1444 unsigned long *array_ids; 1444 unsigned long array_ids[BITS_TO_LONGS(IPR_MAX_SIS64_DEVS)];
1445 unsigned long *vset_ids; 1445 unsigned long vset_ids[BITS_TO_LONGS(IPR_MAX_SIS64_DEVS)];
1446 1446
1447 u16 type; /* CCIN of the card */ 1447 u16 type; /* CCIN of the card */
1448 1448
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
index c772d8d27159..8b928c67e4b9 100644
--- a/drivers/scsi/libfc/fc_exch.c
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -463,13 +463,7 @@ static void fc_exch_delete(struct fc_exch *ep)
463 fc_exch_release(ep); /* drop hold for exch in mp */ 463 fc_exch_release(ep); /* drop hold for exch in mp */
464} 464}
465 465
466/** 466static int fc_seq_send_locked(struct fc_lport *lport, struct fc_seq *sp,
467 * fc_seq_send() - Send a frame using existing sequence/exchange pair
468 * @lport: The local port that the exchange will be sent on
469 * @sp: The sequence to be sent
470 * @fp: The frame to be sent on the exchange
471 */
472static int fc_seq_send(struct fc_lport *lport, struct fc_seq *sp,
473 struct fc_frame *fp) 467 struct fc_frame *fp)
474{ 468{
475 struct fc_exch *ep; 469 struct fc_exch *ep;
@@ -479,7 +473,7 @@ static int fc_seq_send(struct fc_lport *lport, struct fc_seq *sp,
479 u8 fh_type = fh->fh_type; 473 u8 fh_type = fh->fh_type;
480 474
481 ep = fc_seq_exch(sp); 475 ep = fc_seq_exch(sp);
482 WARN_ON((ep->esb_stat & ESB_ST_SEQ_INIT) != ESB_ST_SEQ_INIT); 476 WARN_ON(!(ep->esb_stat & ESB_ST_SEQ_INIT));
483 477
484 f_ctl = ntoh24(fh->fh_f_ctl); 478 f_ctl = ntoh24(fh->fh_f_ctl);
485 fc_exch_setup_hdr(ep, fp, f_ctl); 479 fc_exch_setup_hdr(ep, fp, f_ctl);
@@ -502,17 +496,34 @@ static int fc_seq_send(struct fc_lport *lport, struct fc_seq *sp,
502 error = lport->tt.frame_send(lport, fp); 496 error = lport->tt.frame_send(lport, fp);
503 497
504 if (fh_type == FC_TYPE_BLS) 498 if (fh_type == FC_TYPE_BLS)
505 return error; 499 goto out;
506 500
507 /* 501 /*
508 * Update the exchange and sequence flags, 502 * Update the exchange and sequence flags,
509 * assuming all frames for the sequence have been sent. 503 * assuming all frames for the sequence have been sent.
510 * We can only be called to send once for each sequence. 504 * We can only be called to send once for each sequence.
511 */ 505 */
512 spin_lock_bh(&ep->ex_lock);
513 ep->f_ctl = f_ctl & ~FC_FC_FIRST_SEQ; /* not first seq */ 506 ep->f_ctl = f_ctl & ~FC_FC_FIRST_SEQ; /* not first seq */
514 if (f_ctl & FC_FC_SEQ_INIT) 507 if (f_ctl & FC_FC_SEQ_INIT)
515 ep->esb_stat &= ~ESB_ST_SEQ_INIT; 508 ep->esb_stat &= ~ESB_ST_SEQ_INIT;
509out:
510 return error;
511}
512
513/**
514 * fc_seq_send() - Send a frame using existing sequence/exchange pair
515 * @lport: The local port that the exchange will be sent on
516 * @sp: The sequence to be sent
517 * @fp: The frame to be sent on the exchange
518 */
519static int fc_seq_send(struct fc_lport *lport, struct fc_seq *sp,
520 struct fc_frame *fp)
521{
522 struct fc_exch *ep;
523 int error;
524 ep = fc_seq_exch(sp);
525 spin_lock_bh(&ep->ex_lock);
526 error = fc_seq_send_locked(lport, sp, fp);
516 spin_unlock_bh(&ep->ex_lock); 527 spin_unlock_bh(&ep->ex_lock);
517 return error; 528 return error;
518} 529}
@@ -629,7 +640,7 @@ static int fc_exch_abort_locked(struct fc_exch *ep,
629 if (fp) { 640 if (fp) {
630 fc_fill_fc_hdr(fp, FC_RCTL_BA_ABTS, ep->did, ep->sid, 641 fc_fill_fc_hdr(fp, FC_RCTL_BA_ABTS, ep->did, ep->sid,
631 FC_TYPE_BLS, FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); 642 FC_TYPE_BLS, FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
632 error = fc_seq_send(ep->lp, sp, fp); 643 error = fc_seq_send_locked(ep->lp, sp, fp);
633 } else 644 } else
634 error = -ENOBUFS; 645 error = -ENOBUFS;
635 return error; 646 return error;
@@ -1132,7 +1143,7 @@ static void fc_seq_send_last(struct fc_seq *sp, struct fc_frame *fp,
1132 f_ctl = FC_FC_LAST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT; 1143 f_ctl = FC_FC_LAST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT;
1133 f_ctl |= ep->f_ctl; 1144 f_ctl |= ep->f_ctl;
1134 fc_fill_fc_hdr(fp, rctl, ep->did, ep->sid, fh_type, f_ctl, 0); 1145 fc_fill_fc_hdr(fp, rctl, ep->did, ep->sid, fh_type, f_ctl, 0);
1135 fc_seq_send(ep->lp, sp, fp); 1146 fc_seq_send_locked(ep->lp, sp, fp);
1136} 1147}
1137 1148
1138/** 1149/**
@@ -1307,8 +1318,8 @@ static void fc_exch_recv_abts(struct fc_exch *ep, struct fc_frame *rx_fp)
1307 ap->ba_low_seq_cnt = htons(sp->cnt); 1318 ap->ba_low_seq_cnt = htons(sp->cnt);
1308 } 1319 }
1309 sp = fc_seq_start_next_locked(sp); 1320 sp = fc_seq_start_next_locked(sp);
1310 spin_unlock_bh(&ep->ex_lock);
1311 fc_seq_send_last(sp, fp, FC_RCTL_BA_ACC, FC_TYPE_BLS); 1321 fc_seq_send_last(sp, fp, FC_RCTL_BA_ACC, FC_TYPE_BLS);
1322 spin_unlock_bh(&ep->ex_lock);
1312 fc_frame_free(rx_fp); 1323 fc_frame_free(rx_fp);
1313 return; 1324 return;
1314 1325
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
index d518d17e940f..6bbb9447b75d 100644
--- a/drivers/scsi/libfc/fc_rport.c
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -1962,7 +1962,7 @@ static int fc_rport_fcp_prli(struct fc_rport_priv *rdata, u32 spp_len,
1962 rdata->flags |= FC_RP_FLAGS_RETRY; 1962 rdata->flags |= FC_RP_FLAGS_RETRY;
1963 rdata->supported_classes = FC_COS_CLASS3; 1963 rdata->supported_classes = FC_COS_CLASS3;
1964 1964
1965 if (!(lport->service_params & FC_RPORT_ROLE_FCP_INITIATOR)) 1965 if (!(lport->service_params & FCP_SPPF_INIT_FCN))
1966 return 0; 1966 return 0;
1967 1967
1968 spp->spp_flags |= rspp->spp_flags & FC_SPP_EST_IMG_PAIR; 1968 spp->spp_flags |= rspp->spp_flags & FC_SPP_EST_IMG_PAIR;
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index f63f5ff7f274..f525ecb7a9c6 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -1178,7 +1178,7 @@ lpfc_debugfs_lseek(struct file *file, loff_t off, int whence)
1178 pos = file->f_pos + off; 1178 pos = file->f_pos + off;
1179 break; 1179 break;
1180 case 2: 1180 case 2:
1181 pos = debug->len - off; 1181 pos = debug->len + off;
1182 } 1182 }
1183 return (pos < 0 || pos > debug->len) ? -EINVAL : (file->f_pos = pos); 1183 return (pos < 0 || pos > debug->len) ? -EINVAL : (file->f_pos = pos);
1184} 1184}
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index 98ab921070d2..0a5c8951cebb 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -278,3 +278,14 @@ qla2x00_do_host_ramp_up(scsi_qla_host_t *vha)
278 278
279 set_bit(HOST_RAMP_UP_QUEUE_DEPTH, &vha->dpc_flags); 279 set_bit(HOST_RAMP_UP_QUEUE_DEPTH, &vha->dpc_flags);
280} 280}
281
282static inline void
283qla2x00_handle_mbx_completion(struct qla_hw_data *ha, int status)
284{
285 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
286 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
287 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
288 clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
289 complete(&ha->mbx_intr_comp);
290 }
291}
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 259d9205d876..d2a4c75e5b8f 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -104,14 +104,9 @@ qla2100_intr_handler(int irq, void *dev_id)
104 RD_REG_WORD(&reg->hccr); 104 RD_REG_WORD(&reg->hccr);
105 } 105 }
106 } 106 }
107 qla2x00_handle_mbx_completion(ha, status);
107 spin_unlock_irqrestore(&ha->hardware_lock, flags); 108 spin_unlock_irqrestore(&ha->hardware_lock, flags);
108 109
109 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
110 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
111 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
112 complete(&ha->mbx_intr_comp);
113 }
114
115 return (IRQ_HANDLED); 110 return (IRQ_HANDLED);
116} 111}
117 112
@@ -221,14 +216,9 @@ qla2300_intr_handler(int irq, void *dev_id)
221 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT); 216 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
222 RD_REG_WORD_RELAXED(&reg->hccr); 217 RD_REG_WORD_RELAXED(&reg->hccr);
223 } 218 }
219 qla2x00_handle_mbx_completion(ha, status);
224 spin_unlock_irqrestore(&ha->hardware_lock, flags); 220 spin_unlock_irqrestore(&ha->hardware_lock, flags);
225 221
226 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
227 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
228 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
229 complete(&ha->mbx_intr_comp);
230 }
231
232 return (IRQ_HANDLED); 222 return (IRQ_HANDLED);
233} 223}
234 224
@@ -2613,14 +2603,9 @@ qla24xx_intr_handler(int irq, void *dev_id)
2613 if (unlikely(IS_QLA83XX(ha) && (ha->pdev->revision == 1))) 2603 if (unlikely(IS_QLA83XX(ha) && (ha->pdev->revision == 1)))
2614 ndelay(3500); 2604 ndelay(3500);
2615 } 2605 }
2606 qla2x00_handle_mbx_completion(ha, status);
2616 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2607 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2617 2608
2618 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
2619 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
2620 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
2621 complete(&ha->mbx_intr_comp);
2622 }
2623
2624 return IRQ_HANDLED; 2609 return IRQ_HANDLED;
2625} 2610}
2626 2611
@@ -2763,13 +2748,9 @@ qla24xx_msix_default(int irq, void *dev_id)
2763 } 2748 }
2764 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT); 2749 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
2765 } while (0); 2750 } while (0);
2751 qla2x00_handle_mbx_completion(ha, status);
2766 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2752 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2767 2753
2768 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
2769 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
2770 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
2771 complete(&ha->mbx_intr_comp);
2772 }
2773 return IRQ_HANDLED; 2754 return IRQ_HANDLED;
2774} 2755}
2775 2756
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 9e5d89db7272..3587ec267fa6 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -179,8 +179,6 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
179 179
180 wait_for_completion_timeout(&ha->mbx_intr_comp, mcp->tov * HZ); 180 wait_for_completion_timeout(&ha->mbx_intr_comp, mcp->tov * HZ);
181 181
182 clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
183
184 } else { 182 } else {
185 ql_dbg(ql_dbg_mbx, vha, 0x1011, 183 ql_dbg(ql_dbg_mbx, vha, 0x1011,
186 "Cmd=%x Polling Mode.\n", command); 184 "Cmd=%x Polling Mode.\n", command);
diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c
index 937fed8cb038..a6df55838365 100644
--- a/drivers/scsi/qla2xxx/qla_mr.c
+++ b/drivers/scsi/qla2xxx/qla_mr.c
@@ -148,9 +148,6 @@ qlafx00_mailbox_command(scsi_qla_host_t *vha, struct mbx_cmd_32 *mcp)
148 spin_unlock_irqrestore(&ha->hardware_lock, flags); 148 spin_unlock_irqrestore(&ha->hardware_lock, flags);
149 149
150 wait_for_completion_timeout(&ha->mbx_intr_comp, mcp->tov * HZ); 150 wait_for_completion_timeout(&ha->mbx_intr_comp, mcp->tov * HZ);
151
152 clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
153
154 } else { 151 } else {
155 ql_dbg(ql_dbg_mbx, vha, 0x112c, 152 ql_dbg(ql_dbg_mbx, vha, 0x112c,
156 "Cmd=%x Polling Mode.\n", command); 153 "Cmd=%x Polling Mode.\n", command);
@@ -2934,13 +2931,10 @@ qlafx00_intr_handler(int irq, void *dev_id)
2934 QLAFX00_CLR_INTR_REG(ha, clr_intr); 2931 QLAFX00_CLR_INTR_REG(ha, clr_intr);
2935 QLAFX00_RD_INTR_REG(ha); 2932 QLAFX00_RD_INTR_REG(ha);
2936 } 2933 }
2934
2935 qla2x00_handle_mbx_completion(ha, status);
2937 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2936 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2938 2937
2939 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
2940 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
2941 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
2942 complete(&ha->mbx_intr_comp);
2943 }
2944 return IRQ_HANDLED; 2938 return IRQ_HANDLED;
2945} 2939}
2946 2940
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index 10754f518303..cce0cd0d7ec4 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -2074,9 +2074,6 @@ qla82xx_intr_handler(int irq, void *dev_id)
2074 } 2074 }
2075 WRT_REG_DWORD(&reg->host_int, 0); 2075 WRT_REG_DWORD(&reg->host_int, 0);
2076 } 2076 }
2077 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2078 if (!ha->flags.msi_enabled)
2079 qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff);
2080 2077
2081#ifdef QL_DEBUG_LEVEL_17 2078#ifdef QL_DEBUG_LEVEL_17
2082 if (!irq && ha->flags.eeh_busy) 2079 if (!irq && ha->flags.eeh_busy)
@@ -2085,11 +2082,12 @@ qla82xx_intr_handler(int irq, void *dev_id)
2085 status, ha->mbx_cmd_flags, ha->flags.mbox_int, stat); 2082 status, ha->mbx_cmd_flags, ha->flags.mbox_int, stat);
2086#endif 2083#endif
2087 2084
2088 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && 2085 qla2x00_handle_mbx_completion(ha, status);
2089 (status & MBX_INTERRUPT) && ha->flags.mbox_int) { 2086 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2090 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 2087
2091 complete(&ha->mbx_intr_comp); 2088 if (!ha->flags.msi_enabled)
2092 } 2089 qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff);
2090
2093 return IRQ_HANDLED; 2091 return IRQ_HANDLED;
2094} 2092}
2095 2093
@@ -2149,8 +2147,6 @@ qla82xx_msix_default(int irq, void *dev_id)
2149 WRT_REG_DWORD(&reg->host_int, 0); 2147 WRT_REG_DWORD(&reg->host_int, 0);
2150 } while (0); 2148 } while (0);
2151 2149
2152 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2153
2154#ifdef QL_DEBUG_LEVEL_17 2150#ifdef QL_DEBUG_LEVEL_17
2155 if (!irq && ha->flags.eeh_busy) 2151 if (!irq && ha->flags.eeh_busy)
2156 ql_log(ql_log_warn, vha, 0x5044, 2152 ql_log(ql_log_warn, vha, 0x5044,
@@ -2158,11 +2154,9 @@ qla82xx_msix_default(int irq, void *dev_id)
2158 status, ha->mbx_cmd_flags, ha->flags.mbox_int, stat); 2154 status, ha->mbx_cmd_flags, ha->flags.mbox_int, stat);
2159#endif 2155#endif
2160 2156
2161 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && 2157 qla2x00_handle_mbx_completion(ha, status);
2162 (status & MBX_INTERRUPT) && ha->flags.mbox_int) { 2158 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2163 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 2159
2164 complete(&ha->mbx_intr_comp);
2165 }
2166 return IRQ_HANDLED; 2160 return IRQ_HANDLED;
2167} 2161}
2168 2162
@@ -3345,7 +3339,7 @@ void qla82xx_clear_pending_mbx(scsi_qla_host_t *vha)
3345 ha->flags.mbox_busy = 0; 3339 ha->flags.mbox_busy = 0;
3346 ql_log(ql_log_warn, vha, 0x6010, 3340 ql_log(ql_log_warn, vha, 0x6010,
3347 "Doing premature completion of mbx command.\n"); 3341 "Doing premature completion of mbx command.\n");
3348 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags)) 3342 if (test_and_clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags))
3349 complete(&ha->mbx_intr_comp); 3343 complete(&ha->mbx_intr_comp);
3350 } 3344 }
3351} 3345}
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index d182c96e17ea..66b0b26a1381 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -688,8 +688,12 @@ static int tcm_qla2xxx_queue_status(struct se_cmd *se_cmd)
688 * For FCP_READ with CHECK_CONDITION status, clear cmd->bufflen 688 * For FCP_READ with CHECK_CONDITION status, clear cmd->bufflen
689 * for qla_tgt_xmit_response LLD code 689 * for qla_tgt_xmit_response LLD code
690 */ 690 */
691 if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
692 se_cmd->se_cmd_flags &= ~SCF_OVERFLOW_BIT;
693 se_cmd->residual_count = 0;
694 }
691 se_cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; 695 se_cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
692 se_cmd->residual_count = se_cmd->data_length; 696 se_cmd->residual_count += se_cmd->data_length;
693 697
694 cmd->bufflen = 0; 698 cmd->bufflen = 0;
695 } 699 }
@@ -1370,7 +1374,7 @@ static void tcm_qla2xxx_free_session(struct qla_tgt_sess *sess)
1370 dump_stack(); 1374 dump_stack();
1371 return; 1375 return;
1372 } 1376 }
1373 target_wait_for_sess_cmds(se_sess, 0); 1377 target_wait_for_sess_cmds(se_sess);
1374 1378
1375 transport_deregister_session_configfs(sess->se_sess); 1379 transport_deregister_session_configfs(sess->se_sess);
1376 transport_deregister_session(sess->se_sess); 1380 transport_deregister_session(sess->se_sess);
diff --git a/drivers/scsi/scsi_proc.c b/drivers/scsi/scsi_proc.c
index db66357211ed..86f0c5d5c116 100644
--- a/drivers/scsi/scsi_proc.c
+++ b/drivers/scsi/scsi_proc.c
@@ -84,6 +84,7 @@ static int proc_scsi_host_open(struct inode *inode, struct file *file)
84 84
85static const struct file_operations proc_scsi_fops = { 85static const struct file_operations proc_scsi_fops = {
86 .open = proc_scsi_host_open, 86 .open = proc_scsi_host_open,
87 .release = single_release,
87 .read = seq_read, 88 .read = seq_read,
88 .llseek = seq_lseek, 89 .llseek = seq_lseek,
89 .write = proc_scsi_host_write 90 .write = proc_scsi_host_write
diff --git a/drivers/spi/spi-pxa2xx-dma.c b/drivers/spi/spi-pxa2xx-dma.c
index c735c5a008a2..6427600b5bbe 100644
--- a/drivers/spi/spi-pxa2xx-dma.c
+++ b/drivers/spi/spi-pxa2xx-dma.c
@@ -59,7 +59,7 @@ static int pxa2xx_spi_map_dma_buffer(struct driver_data *drv_data,
59 int ret; 59 int ret;
60 60
61 sg_free_table(sgt); 61 sg_free_table(sgt);
62 ret = sg_alloc_table(sgt, nents, GFP_KERNEL); 62 ret = sg_alloc_table(sgt, nents, GFP_ATOMIC);
63 if (ret) 63 if (ret)
64 return ret; 64 return ret;
65 } 65 }
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index f5d84d6f8222..48b396fced0a 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -1075,7 +1075,7 @@ pxa2xx_spi_acpi_get_pdata(struct platform_device *pdev)
1075 acpi_bus_get_device(ACPI_HANDLE(&pdev->dev), &adev)) 1075 acpi_bus_get_device(ACPI_HANDLE(&pdev->dev), &adev))
1076 return NULL; 1076 return NULL;
1077 1077
1078 pdata = devm_kzalloc(&pdev->dev, sizeof(*ssp), GFP_KERNEL); 1078 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1079 if (!pdata) { 1079 if (!pdata) {
1080 dev_err(&pdev->dev, 1080 dev_err(&pdev->dev,
1081 "failed to allocate memory for platform data\n"); 1081 "failed to allocate memory for platform data\n");
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
index 5000586cb98d..71cc3e6ef47c 100644
--- a/drivers/spi/spi-s3c64xx.c
+++ b/drivers/spi/spi-s3c64xx.c
@@ -444,7 +444,7 @@ static int s3c64xx_spi_prepare_transfer(struct spi_master *spi)
444 } 444 }
445 445
446 ret = pm_runtime_get_sync(&sdd->pdev->dev); 446 ret = pm_runtime_get_sync(&sdd->pdev->dev);
447 if (ret != 0) { 447 if (ret < 0) {
448 dev_err(dev, "Failed to enable device: %d\n", ret); 448 dev_err(dev, "Failed to enable device: %d\n", ret);
449 goto out_tx; 449 goto out_tx;
450 } 450 }
diff --git a/drivers/spi/spi-sh-hspi.c b/drivers/spi/spi-sh-hspi.c
index 60cfae51c713..eab593eaaafa 100644
--- a/drivers/spi/spi-sh-hspi.c
+++ b/drivers/spi/spi-sh-hspi.c
@@ -89,7 +89,7 @@ static int hspi_status_check_timeout(struct hspi_priv *hspi, u32 mask, u32 val)
89 if ((mask & hspi_read(hspi, SPSR)) == val) 89 if ((mask & hspi_read(hspi, SPSR)) == val)
90 return 0; 90 return 0;
91 91
92 msleep(20); 92 udelay(10);
93 } 93 }
94 94
95 dev_err(hspi->dev, "timeout\n"); 95 dev_err(hspi->dev, "timeout\n");
diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c
index 35f60bd252dd..637d728fbeb5 100644
--- a/drivers/spi/spi-topcliff-pch.c
+++ b/drivers/spi/spi-topcliff-pch.c
@@ -1487,7 +1487,7 @@ static int pch_spi_pd_probe(struct platform_device *plat_dev)
1487 return 0; 1487 return 0;
1488 1488
1489err_spi_register_master: 1489err_spi_register_master:
1490 free_irq(board_dat->pdev->irq, board_dat); 1490 free_irq(board_dat->pdev->irq, data);
1491err_request_irq: 1491err_request_irq:
1492 pch_spi_free_resources(board_dat, data); 1492 pch_spi_free_resources(board_dat, data);
1493err_spi_get_resources: 1493err_spi_get_resources:
@@ -1667,6 +1667,7 @@ static int pch_spi_probe(struct pci_dev *pdev,
1667 pd_dev = platform_device_alloc("pch-spi", i); 1667 pd_dev = platform_device_alloc("pch-spi", i);
1668 if (!pd_dev) { 1668 if (!pd_dev) {
1669 dev_err(&pdev->dev, "platform_device_alloc failed\n"); 1669 dev_err(&pdev->dev, "platform_device_alloc failed\n");
1670 retval = -ENOMEM;
1670 goto err_platform_device; 1671 goto err_platform_device;
1671 } 1672 }
1672 pd_dev_save->pd_save[i] = pd_dev; 1673 pd_dev_save->pd_save[i] = pd_dev;
diff --git a/drivers/spi/spi-xilinx.c b/drivers/spi/spi-xilinx.c
index e1d769607425..34d18dcfa0db 100644
--- a/drivers/spi/spi-xilinx.c
+++ b/drivers/spi/spi-xilinx.c
@@ -267,7 +267,6 @@ static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
267{ 267{
268 struct xilinx_spi *xspi = spi_master_get_devdata(spi->master); 268 struct xilinx_spi *xspi = spi_master_get_devdata(spi->master);
269 u32 ipif_ier; 269 u32 ipif_ier;
270 u16 cr;
271 270
272 /* We get here with transmitter inhibited */ 271 /* We get here with transmitter inhibited */
273 272
@@ -276,7 +275,6 @@ static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
276 xspi->remaining_bytes = t->len; 275 xspi->remaining_bytes = t->len;
277 INIT_COMPLETION(xspi->done); 276 INIT_COMPLETION(xspi->done);
278 277
279 xilinx_spi_fill_tx_fifo(xspi);
280 278
281 /* Enable the transmit empty interrupt, which we use to determine 279 /* Enable the transmit empty interrupt, which we use to determine
282 * progress on the transmission. 280 * progress on the transmission.
@@ -285,12 +283,41 @@ static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
285 xspi->write_fn(ipif_ier | XSPI_INTR_TX_EMPTY, 283 xspi->write_fn(ipif_ier | XSPI_INTR_TX_EMPTY,
286 xspi->regs + XIPIF_V123B_IIER_OFFSET); 284 xspi->regs + XIPIF_V123B_IIER_OFFSET);
287 285
288 /* Start the transfer by not inhibiting the transmitter any longer */ 286 for (;;) {
289 cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET) & 287 u16 cr;
290 ~XSPI_CR_TRANS_INHIBIT; 288 u8 sr;
291 xspi->write_fn(cr, xspi->regs + XSPI_CR_OFFSET); 289
290 xilinx_spi_fill_tx_fifo(xspi);
291
292 /* Start the transfer by not inhibiting the transmitter any
293 * longer
294 */
295 cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET) &
296 ~XSPI_CR_TRANS_INHIBIT;
297 xspi->write_fn(cr, xspi->regs + XSPI_CR_OFFSET);
298
299 wait_for_completion(&xspi->done);
300
301 /* A transmit has just completed. Process received data and
302 * check for more data to transmit. Always inhibit the
303 * transmitter while the Isr refills the transmit register/FIFO,
304 * or make sure it is stopped if we're done.
305 */
306 cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET);
307 xspi->write_fn(cr | XSPI_CR_TRANS_INHIBIT,
308 xspi->regs + XSPI_CR_OFFSET);
309
310 /* Read out all the data from the Rx FIFO */
311 sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET);
312 while ((sr & XSPI_SR_RX_EMPTY_MASK) == 0) {
313 xspi->rx_fn(xspi);
314 sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET);
315 }
292 316
293 wait_for_completion(&xspi->done); 317 /* See if there is more data to send */
318 if (!xspi->remaining_bytes > 0)
319 break;
320 }
294 321
295 /* Disable the transmit empty interrupt */ 322 /* Disable the transmit empty interrupt */
296 xspi->write_fn(ipif_ier, xspi->regs + XIPIF_V123B_IIER_OFFSET); 323 xspi->write_fn(ipif_ier, xspi->regs + XIPIF_V123B_IIER_OFFSET);
@@ -314,38 +341,7 @@ static irqreturn_t xilinx_spi_irq(int irq, void *dev_id)
314 xspi->write_fn(ipif_isr, xspi->regs + XIPIF_V123B_IISR_OFFSET); 341 xspi->write_fn(ipif_isr, xspi->regs + XIPIF_V123B_IISR_OFFSET);
315 342
316 if (ipif_isr & XSPI_INTR_TX_EMPTY) { /* Transmission completed */ 343 if (ipif_isr & XSPI_INTR_TX_EMPTY) { /* Transmission completed */
317 u16 cr; 344 complete(&xspi->done);
318 u8 sr;
319
320 /* A transmit has just completed. Process received data and
321 * check for more data to transmit. Always inhibit the
322 * transmitter while the Isr refills the transmit register/FIFO,
323 * or make sure it is stopped if we're done.
324 */
325 cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET);
326 xspi->write_fn(cr | XSPI_CR_TRANS_INHIBIT,
327 xspi->regs + XSPI_CR_OFFSET);
328
329 /* Read out all the data from the Rx FIFO */
330 sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET);
331 while ((sr & XSPI_SR_RX_EMPTY_MASK) == 0) {
332 xspi->rx_fn(xspi);
333 sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET);
334 }
335
336 /* See if there is more data to send */
337 if (xspi->remaining_bytes > 0) {
338 xilinx_spi_fill_tx_fifo(xspi);
339 /* Start the transfer by not inhibiting the
340 * transmitter any longer
341 */
342 xspi->write_fn(cr, xspi->regs + XSPI_CR_OFFSET);
343 } else {
344 /* No more data to send.
345 * Indicate the transfer is completed.
346 */
347 complete(&xspi->done);
348 }
349 } 345 }
350 346
351 return IRQ_HANDLED; 347 return IRQ_HANDLED;
diff --git a/drivers/staging/android/alarm-dev.c b/drivers/staging/android/alarm-dev.c
index ceb1c643753d..6dc27dac679d 100644
--- a/drivers/staging/android/alarm-dev.c
+++ b/drivers/staging/android/alarm-dev.c
@@ -264,6 +264,8 @@ static long alarm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
264 } 264 }
265 265
266 rv = alarm_do_ioctl(file, cmd, &ts); 266 rv = alarm_do_ioctl(file, cmd, &ts);
267 if (rv)
268 return rv;
267 269
268 switch (ANDROID_ALARM_BASE_CMD(cmd)) { 270 switch (ANDROID_ALARM_BASE_CMD(cmd)) {
269 case ANDROID_ALARM_GET_TIME(0): 271 case ANDROID_ALARM_GET_TIME(0):
@@ -272,7 +274,7 @@ static long alarm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
272 break; 274 break;
273 } 275 }
274 276
275 return rv; 277 return 0;
276} 278}
277#ifdef CONFIG_COMPAT 279#ifdef CONFIG_COMPAT
278static long alarm_compat_ioctl(struct file *file, unsigned int cmd, 280static long alarm_compat_ioctl(struct file *file, unsigned int cmd,
@@ -295,6 +297,8 @@ static long alarm_compat_ioctl(struct file *file, unsigned int cmd,
295 } 297 }
296 298
297 rv = alarm_do_ioctl(file, cmd, &ts); 299 rv = alarm_do_ioctl(file, cmd, &ts);
300 if (rv)
301 return rv;
298 302
299 switch (ANDROID_ALARM_BASE_CMD(cmd)) { 303 switch (ANDROID_ALARM_BASE_CMD(cmd)) {
300 case ANDROID_ALARM_GET_TIME(0): /* NOTE: we modified cmd above */ 304 case ANDROID_ALARM_GET_TIME(0): /* NOTE: we modified cmd above */
@@ -303,7 +307,7 @@ static long alarm_compat_ioctl(struct file *file, unsigned int cmd,
303 break; 307 break;
304 } 308 }
305 309
306 return rv; 310 return 0;
307} 311}
308#endif 312#endif
309 313
diff --git a/drivers/staging/dwc2/hcd.c b/drivers/staging/dwc2/hcd.c
index 827ab781ae9b..8551ccedf037 100644
--- a/drivers/staging/dwc2/hcd.c
+++ b/drivers/staging/dwc2/hcd.c
@@ -2804,9 +2804,8 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg, int irq,
2804 2804
2805 /* Set device flags indicating whether the HCD supports DMA */ 2805 /* Set device flags indicating whether the HCD supports DMA */
2806 if (hsotg->core_params->dma_enable > 0) { 2806 if (hsotg->core_params->dma_enable > 0) {
2807 if (dma_set_mask(hsotg->dev, DMA_BIT_MASK(31)) < 0) 2807 if (dma_set_mask(hsotg->dev, DMA_BIT_MASK(32)) < 0)
2808 dev_warn(hsotg->dev, 2808 dev_warn(hsotg->dev, "can't set DMA mask\n");
2809 "can't enable workaround for >2GB RAM\n");
2810 if (dma_set_coherent_mask(hsotg->dev, DMA_BIT_MASK(31)) < 0) 2809 if (dma_set_coherent_mask(hsotg->dev, DMA_BIT_MASK(31)) < 0)
2811 dev_warn(hsotg->dev, 2810 dev_warn(hsotg->dev,
2812 "can't enable workaround for >2GB RAM\n"); 2811 "can't enable workaround for >2GB RAM\n");
diff --git a/drivers/staging/media/davinci_vpfe/Kconfig b/drivers/staging/media/davinci_vpfe/Kconfig
index 2e4a28b018e8..12f321dd2399 100644
--- a/drivers/staging/media/davinci_vpfe/Kconfig
+++ b/drivers/staging/media/davinci_vpfe/Kconfig
@@ -1,6 +1,6 @@
1config VIDEO_DM365_VPFE 1config VIDEO_DM365_VPFE
2 tristate "DM365 VPFE Media Controller Capture Driver" 2 tristate "DM365 VPFE Media Controller Capture Driver"
3 depends on VIDEO_V4L2 && ARCH_DAVINCI_DM365 && !VIDEO_VPFE_CAPTURE 3 depends on VIDEO_V4L2 && ARCH_DAVINCI_DM365 && !VIDEO_DM365_ISIF
4 select VIDEOBUF2_DMA_CONTIG 4 select VIDEOBUF2_DMA_CONTIG
5 help 5 help
6 Support for DM365 VPFE based Media Controller Capture driver. 6 Support for DM365 VPFE based Media Controller Capture driver.
diff --git a/drivers/staging/media/davinci_vpfe/vpfe_mc_capture.c b/drivers/staging/media/davinci_vpfe/vpfe_mc_capture.c
index b88e1ddce229..d8ce20d2fbda 100644
--- a/drivers/staging/media/davinci_vpfe/vpfe_mc_capture.c
+++ b/drivers/staging/media/davinci_vpfe/vpfe_mc_capture.c
@@ -639,7 +639,8 @@ static int vpfe_probe(struct platform_device *pdev)
639 if (ret) 639 if (ret)
640 goto probe_free_dev_mem; 640 goto probe_free_dev_mem;
641 641
642 if (vpfe_initialize_modules(vpfe_dev, pdev)) 642 ret = vpfe_initialize_modules(vpfe_dev, pdev);
643 if (ret)
643 goto probe_disable_clock; 644 goto probe_disable_clock;
644 645
645 vpfe_dev->media_dev.dev = vpfe_dev->pdev; 646 vpfe_dev->media_dev.dev = vpfe_dev->pdev;
@@ -663,7 +664,8 @@ static int vpfe_probe(struct platform_device *pdev)
663 /* set the driver data in platform device */ 664 /* set the driver data in platform device */
664 platform_set_drvdata(pdev, vpfe_dev); 665 platform_set_drvdata(pdev, vpfe_dev);
665 /* register subdevs/entities */ 666 /* register subdevs/entities */
666 if (vpfe_register_entities(vpfe_dev)) 667 ret = vpfe_register_entities(vpfe_dev);
668 if (ret)
667 goto probe_out_v4l2_unregister; 669 goto probe_out_v4l2_unregister;
668 670
669 ret = vpfe_attach_irq(vpfe_dev); 671 ret = vpfe_attach_irq(vpfe_dev);
diff --git a/drivers/staging/media/solo6x10/Kconfig b/drivers/staging/media/solo6x10/Kconfig
index df6569b997b8..34f3b6d02d2a 100644
--- a/drivers/staging/media/solo6x10/Kconfig
+++ b/drivers/staging/media/solo6x10/Kconfig
@@ -5,6 +5,7 @@ config SOLO6X10
5 select VIDEOBUF2_DMA_SG 5 select VIDEOBUF2_DMA_SG
6 select VIDEOBUF2_DMA_CONTIG 6 select VIDEOBUF2_DMA_CONTIG
7 select SND_PCM 7 select SND_PCM
8 select FONT_8x16
8 ---help--- 9 ---help---
9 This driver supports the Softlogic based MPEG-4 and h.264 codec 10 This driver supports the Softlogic based MPEG-4 and h.264 codec
10 cards. 11 cards.
diff --git a/drivers/staging/zcache/ramster.h b/drivers/staging/zcache/ramster.h
index e1f91d5a0f6a..a858666eae68 100644
--- a/drivers/staging/zcache/ramster.h
+++ b/drivers/staging/zcache/ramster.h
@@ -11,10 +11,6 @@
11#ifndef _ZCACHE_RAMSTER_H_ 11#ifndef _ZCACHE_RAMSTER_H_
12#define _ZCACHE_RAMSTER_H_ 12#define _ZCACHE_RAMSTER_H_
13 13
14#ifdef CONFIG_RAMSTER_MODULE
15#define CONFIG_RAMSTER
16#endif
17
18#ifdef CONFIG_RAMSTER 14#ifdef CONFIG_RAMSTER
19#include "ramster/ramster.h" 15#include "ramster/ramster.h"
20#else 16#else
diff --git a/drivers/staging/zcache/ramster/debug.c b/drivers/staging/zcache/ramster/debug.c
index 327e4f0d98e1..5b26ee977c2f 100644
--- a/drivers/staging/zcache/ramster/debug.c
+++ b/drivers/staging/zcache/ramster/debug.c
@@ -1,6 +1,8 @@
1#include <linux/atomic.h> 1#include <linux/atomic.h>
2#include "debug.h" 2#include "debug.h"
3 3
4ssize_t ramster_foreign_eph_pages;
5ssize_t ramster_foreign_pers_pages;
4#ifdef CONFIG_DEBUG_FS 6#ifdef CONFIG_DEBUG_FS
5#include <linux/debugfs.h> 7#include <linux/debugfs.h>
6 8
diff --git a/drivers/staging/zcache/ramster/ramster.c b/drivers/staging/zcache/ramster/ramster.c
index b18b887db79f..a937ce1fa27a 100644
--- a/drivers/staging/zcache/ramster/ramster.c
+++ b/drivers/staging/zcache/ramster/ramster.c
@@ -66,8 +66,6 @@ static int ramster_remote_target_nodenum __read_mostly = -1;
66 66
67/* Used by this code. */ 67/* Used by this code. */
68long ramster_flnodes; 68long ramster_flnodes;
69ssize_t ramster_foreign_eph_pages;
70ssize_t ramster_foreign_pers_pages;
71/* FIXME frontswap selfshrinking knobs in debugfs? */ 69/* FIXME frontswap selfshrinking knobs in debugfs? */
72 70
73static LIST_HEAD(ramster_rem_op_list); 71static LIST_HEAD(ramster_rem_op_list);
@@ -399,14 +397,18 @@ void ramster_count_foreign_pages(bool eph, int count)
399 inc_ramster_foreign_eph_pages(); 397 inc_ramster_foreign_eph_pages();
400 } else { 398 } else {
401 dec_ramster_foreign_eph_pages(); 399 dec_ramster_foreign_eph_pages();
400#ifdef CONFIG_RAMSTER_DEBUG
402 WARN_ON_ONCE(ramster_foreign_eph_pages < 0); 401 WARN_ON_ONCE(ramster_foreign_eph_pages < 0);
402#endif
403 } 403 }
404 } else { 404 } else {
405 if (count > 0) { 405 if (count > 0) {
406 inc_ramster_foreign_pers_pages(); 406 inc_ramster_foreign_pers_pages();
407 } else { 407 } else {
408 dec_ramster_foreign_pers_pages(); 408 dec_ramster_foreign_pers_pages();
409#ifdef CONFIG_RAMSTER_DEBUG
409 WARN_ON_ONCE(ramster_foreign_pers_pages < 0); 410 WARN_ON_ONCE(ramster_foreign_pers_pages < 0);
411#endif
410 } 412 }
411 } 413 }
412} 414}
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 262ef1f23b38..d7705e5824fb 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -651,7 +651,7 @@ static int iscsit_add_reject(
651 cmd->buf_ptr = kmemdup(buf, ISCSI_HDR_LEN, GFP_KERNEL); 651 cmd->buf_ptr = kmemdup(buf, ISCSI_HDR_LEN, GFP_KERNEL);
652 if (!cmd->buf_ptr) { 652 if (!cmd->buf_ptr) {
653 pr_err("Unable to allocate memory for cmd->buf_ptr\n"); 653 pr_err("Unable to allocate memory for cmd->buf_ptr\n");
654 iscsit_release_cmd(cmd); 654 iscsit_free_cmd(cmd, false);
655 return -1; 655 return -1;
656 } 656 }
657 657
@@ -697,7 +697,7 @@ int iscsit_add_reject_from_cmd(
697 cmd->buf_ptr = kmemdup(buf, ISCSI_HDR_LEN, GFP_KERNEL); 697 cmd->buf_ptr = kmemdup(buf, ISCSI_HDR_LEN, GFP_KERNEL);
698 if (!cmd->buf_ptr) { 698 if (!cmd->buf_ptr) {
699 pr_err("Unable to allocate memory for cmd->buf_ptr\n"); 699 pr_err("Unable to allocate memory for cmd->buf_ptr\n");
700 iscsit_release_cmd(cmd); 700 iscsit_free_cmd(cmd, false);
701 return -1; 701 return -1;
702 } 702 }
703 703
@@ -1743,7 +1743,7 @@ int iscsit_handle_nop_out(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
1743 return 0; 1743 return 0;
1744out: 1744out:
1745 if (cmd) 1745 if (cmd)
1746 iscsit_release_cmd(cmd); 1746 iscsit_free_cmd(cmd, false);
1747ping_out: 1747ping_out:
1748 kfree(ping_data); 1748 kfree(ping_data);
1749 return ret; 1749 return ret;
@@ -2251,7 +2251,7 @@ iscsit_handle_logout_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2251 if (conn->conn_state != TARG_CONN_STATE_LOGGED_IN) { 2251 if (conn->conn_state != TARG_CONN_STATE_LOGGED_IN) {
2252 pr_err("Received logout request on connection that" 2252 pr_err("Received logout request on connection that"
2253 " is not in logged in state, ignoring request.\n"); 2253 " is not in logged in state, ignoring request.\n");
2254 iscsit_release_cmd(cmd); 2254 iscsit_free_cmd(cmd, false);
2255 return 0; 2255 return 0;
2256 } 2256 }
2257 2257
@@ -3665,7 +3665,7 @@ iscsit_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state
3665 list_del(&cmd->i_conn_node); 3665 list_del(&cmd->i_conn_node);
3666 spin_unlock_bh(&conn->cmd_lock); 3666 spin_unlock_bh(&conn->cmd_lock);
3667 3667
3668 iscsit_free_cmd(cmd); 3668 iscsit_free_cmd(cmd, false);
3669 break; 3669 break;
3670 case ISTATE_SEND_NOPIN_WANT_RESPONSE: 3670 case ISTATE_SEND_NOPIN_WANT_RESPONSE:
3671 iscsit_mod_nopin_response_timer(conn); 3671 iscsit_mod_nopin_response_timer(conn);
@@ -4122,7 +4122,7 @@ static void iscsit_release_commands_from_conn(struct iscsi_conn *conn)
4122 4122
4123 iscsit_increment_maxcmdsn(cmd, sess); 4123 iscsit_increment_maxcmdsn(cmd, sess);
4124 4124
4125 iscsit_free_cmd(cmd); 4125 iscsit_free_cmd(cmd, true);
4126 4126
4127 spin_lock_bh(&conn->cmd_lock); 4127 spin_lock_bh(&conn->cmd_lock);
4128 } 4128 }
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index 13e9e715ad2e..8d8b3ff68490 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -155,7 +155,7 @@ static ssize_t lio_target_np_store_iser(
155 struct iscsi_tpg_np *tpg_np_iser = NULL; 155 struct iscsi_tpg_np *tpg_np_iser = NULL;
156 char *endptr; 156 char *endptr;
157 u32 op; 157 u32 op;
158 int rc; 158 int rc = 0;
159 159
160 op = simple_strtoul(page, &endptr, 0); 160 op = simple_strtoul(page, &endptr, 0);
161 if ((op != 1) && (op != 0)) { 161 if ((op != 1) && (op != 0)) {
@@ -174,31 +174,32 @@ static ssize_t lio_target_np_store_iser(
174 return -EINVAL; 174 return -EINVAL;
175 175
176 if (op) { 176 if (op) {
177 int rc = request_module("ib_isert"); 177 rc = request_module("ib_isert");
178 if (rc != 0) 178 if (rc != 0) {
179 pr_warn("Unable to request_module for ib_isert\n"); 179 pr_warn("Unable to request_module for ib_isert\n");
180 rc = 0;
181 }
180 182
181 tpg_np_iser = iscsit_tpg_add_network_portal(tpg, &np->np_sockaddr, 183 tpg_np_iser = iscsit_tpg_add_network_portal(tpg, &np->np_sockaddr,
182 np->np_ip, tpg_np, ISCSI_INFINIBAND); 184 np->np_ip, tpg_np, ISCSI_INFINIBAND);
183 if (!tpg_np_iser || IS_ERR(tpg_np_iser)) 185 if (IS_ERR(tpg_np_iser)) {
186 rc = PTR_ERR(tpg_np_iser);
184 goto out; 187 goto out;
188 }
185 } else { 189 } else {
186 tpg_np_iser = iscsit_tpg_locate_child_np(tpg_np, ISCSI_INFINIBAND); 190 tpg_np_iser = iscsit_tpg_locate_child_np(tpg_np, ISCSI_INFINIBAND);
187 if (!tpg_np_iser) 191 if (tpg_np_iser) {
188 goto out; 192 rc = iscsit_tpg_del_network_portal(tpg, tpg_np_iser);
189 193 if (rc < 0)
190 rc = iscsit_tpg_del_network_portal(tpg, tpg_np_iser); 194 goto out;
191 if (rc < 0) 195 }
192 goto out;
193 } 196 }
194 197
195 printk("lio_target_np_store_iser() done, op: %d\n", op);
196
197 iscsit_put_tpg(tpg); 198 iscsit_put_tpg(tpg);
198 return count; 199 return count;
199out: 200out:
200 iscsit_put_tpg(tpg); 201 iscsit_put_tpg(tpg);
201 return -EINVAL; 202 return rc;
202} 203}
203 204
204TF_NP_BASE_ATTR(lio_target, iser, S_IRUGO | S_IWUSR); 205TF_NP_BASE_ATTR(lio_target, iser, S_IRUGO | S_IWUSR);
diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c
index 8e6298cc8839..dcb199da06b9 100644
--- a/drivers/target/iscsi/iscsi_target_erl0.c
+++ b/drivers/target/iscsi/iscsi_target_erl0.c
@@ -842,11 +842,11 @@ int iscsit_stop_time2retain_timer(struct iscsi_session *sess)
842 return 0; 842 return 0;
843 843
844 sess->time2retain_timer_flags |= ISCSI_TF_STOP; 844 sess->time2retain_timer_flags |= ISCSI_TF_STOP;
845 spin_unlock_bh(&se_tpg->session_lock); 845 spin_unlock(&se_tpg->session_lock);
846 846
847 del_timer_sync(&sess->time2retain_timer); 847 del_timer_sync(&sess->time2retain_timer);
848 848
849 spin_lock_bh(&se_tpg->session_lock); 849 spin_lock(&se_tpg->session_lock);
850 sess->time2retain_timer_flags &= ~ISCSI_TF_RUNNING; 850 sess->time2retain_timer_flags &= ~ISCSI_TF_RUNNING;
851 pr_debug("Stopped Time2Retain Timer for SID: %u\n", 851 pr_debug("Stopped Time2Retain Timer for SID: %u\n",
852 sess->sid); 852 sess->sid);
diff --git a/drivers/target/iscsi/iscsi_target_erl2.c b/drivers/target/iscsi/iscsi_target_erl2.c
index ba6091bf93fc..45a5afd5ea13 100644
--- a/drivers/target/iscsi/iscsi_target_erl2.c
+++ b/drivers/target/iscsi/iscsi_target_erl2.c
@@ -143,7 +143,7 @@ void iscsit_free_connection_recovery_entires(struct iscsi_session *sess)
143 list_del(&cmd->i_conn_node); 143 list_del(&cmd->i_conn_node);
144 cmd->conn = NULL; 144 cmd->conn = NULL;
145 spin_unlock(&cr->conn_recovery_cmd_lock); 145 spin_unlock(&cr->conn_recovery_cmd_lock);
146 iscsit_free_cmd(cmd); 146 iscsit_free_cmd(cmd, true);
147 spin_lock(&cr->conn_recovery_cmd_lock); 147 spin_lock(&cr->conn_recovery_cmd_lock);
148 } 148 }
149 spin_unlock(&cr->conn_recovery_cmd_lock); 149 spin_unlock(&cr->conn_recovery_cmd_lock);
@@ -165,7 +165,7 @@ void iscsit_free_connection_recovery_entires(struct iscsi_session *sess)
165 list_del(&cmd->i_conn_node); 165 list_del(&cmd->i_conn_node);
166 cmd->conn = NULL; 166 cmd->conn = NULL;
167 spin_unlock(&cr->conn_recovery_cmd_lock); 167 spin_unlock(&cr->conn_recovery_cmd_lock);
168 iscsit_free_cmd(cmd); 168 iscsit_free_cmd(cmd, true);
169 spin_lock(&cr->conn_recovery_cmd_lock); 169 spin_lock(&cr->conn_recovery_cmd_lock);
170 } 170 }
171 spin_unlock(&cr->conn_recovery_cmd_lock); 171 spin_unlock(&cr->conn_recovery_cmd_lock);
@@ -248,7 +248,7 @@ void iscsit_discard_cr_cmds_by_expstatsn(
248 iscsit_remove_cmd_from_connection_recovery(cmd, sess); 248 iscsit_remove_cmd_from_connection_recovery(cmd, sess);
249 249
250 spin_unlock(&cr->conn_recovery_cmd_lock); 250 spin_unlock(&cr->conn_recovery_cmd_lock);
251 iscsit_free_cmd(cmd); 251 iscsit_free_cmd(cmd, true);
252 spin_lock(&cr->conn_recovery_cmd_lock); 252 spin_lock(&cr->conn_recovery_cmd_lock);
253 } 253 }
254 spin_unlock(&cr->conn_recovery_cmd_lock); 254 spin_unlock(&cr->conn_recovery_cmd_lock);
@@ -302,7 +302,7 @@ int iscsit_discard_unacknowledged_ooo_cmdsns_for_conn(struct iscsi_conn *conn)
302 list_del(&cmd->i_conn_node); 302 list_del(&cmd->i_conn_node);
303 303
304 spin_unlock_bh(&conn->cmd_lock); 304 spin_unlock_bh(&conn->cmd_lock);
305 iscsit_free_cmd(cmd); 305 iscsit_free_cmd(cmd, true);
306 spin_lock_bh(&conn->cmd_lock); 306 spin_lock_bh(&conn->cmd_lock);
307 } 307 }
308 spin_unlock_bh(&conn->cmd_lock); 308 spin_unlock_bh(&conn->cmd_lock);
@@ -355,7 +355,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)
355 355
356 list_del(&cmd->i_conn_node); 356 list_del(&cmd->i_conn_node);
357 spin_unlock_bh(&conn->cmd_lock); 357 spin_unlock_bh(&conn->cmd_lock);
358 iscsit_free_cmd(cmd); 358 iscsit_free_cmd(cmd, true);
359 spin_lock_bh(&conn->cmd_lock); 359 spin_lock_bh(&conn->cmd_lock);
360 continue; 360 continue;
361 } 361 }
@@ -375,7 +375,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)
375 iscsi_sna_gte(cmd->cmd_sn, conn->sess->exp_cmd_sn)) { 375 iscsi_sna_gte(cmd->cmd_sn, conn->sess->exp_cmd_sn)) {
376 list_del(&cmd->i_conn_node); 376 list_del(&cmd->i_conn_node);
377 spin_unlock_bh(&conn->cmd_lock); 377 spin_unlock_bh(&conn->cmd_lock);
378 iscsit_free_cmd(cmd); 378 iscsit_free_cmd(cmd, true);
379 spin_lock_bh(&conn->cmd_lock); 379 spin_lock_bh(&conn->cmd_lock);
380 continue; 380 continue;
381 } 381 }
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index bb5d5c5bce65..3402241be87c 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -984,8 +984,6 @@ int iscsi_target_setup_login_socket(
984 } 984 }
985 985
986 np->np_transport = t; 986 np->np_transport = t;
987 printk("Set np->np_transport to %p -> %s\n", np->np_transport,
988 np->np_transport->name);
989 return 0; 987 return 0;
990} 988}
991 989
@@ -1002,7 +1000,6 @@ int iscsit_accept_np(struct iscsi_np *np, struct iscsi_conn *conn)
1002 1000
1003 conn->sock = new_sock; 1001 conn->sock = new_sock;
1004 conn->login_family = np->np_sockaddr.ss_family; 1002 conn->login_family = np->np_sockaddr.ss_family;
1005 printk("iSCSI/TCP: Setup conn->sock from new_sock: %p\n", new_sock);
1006 1003
1007 if (np->np_sockaddr.ss_family == AF_INET6) { 1004 if (np->np_sockaddr.ss_family == AF_INET6) {
1008 memset(&sock_in6, 0, sizeof(struct sockaddr_in6)); 1005 memset(&sock_in6, 0, sizeof(struct sockaddr_in6));
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
index 7ad912060e21..cd5018ff9cd7 100644
--- a/drivers/target/iscsi/iscsi_target_nego.c
+++ b/drivers/target/iscsi/iscsi_target_nego.c
@@ -721,9 +721,6 @@ int iscsi_target_locate_portal(
721 721
722 start += strlen(key) + strlen(value) + 2; 722 start += strlen(key) + strlen(value) + 2;
723 } 723 }
724
725 printk("i_buf: %s, s_buf: %s, t_buf: %s\n", i_buf, s_buf, t_buf);
726
727 /* 724 /*
728 * See 5.3. Login Phase. 725 * See 5.3. Login Phase.
729 */ 726 */
diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c
index c2185fc31136..e38222191a33 100644
--- a/drivers/target/iscsi/iscsi_target_parameters.c
+++ b/drivers/target/iscsi/iscsi_target_parameters.c
@@ -758,9 +758,9 @@ static int iscsi_add_notunderstood_response(
758 } 758 }
759 INIT_LIST_HEAD(&extra_response->er_list); 759 INIT_LIST_HEAD(&extra_response->er_list);
760 760
761 strncpy(extra_response->key, key, strlen(key) + 1); 761 strlcpy(extra_response->key, key, sizeof(extra_response->key));
762 strncpy(extra_response->value, NOTUNDERSTOOD, 762 strlcpy(extra_response->value, NOTUNDERSTOOD,
763 strlen(NOTUNDERSTOOD) + 1); 763 sizeof(extra_response->value));
764 764
765 list_add_tail(&extra_response->er_list, 765 list_add_tail(&extra_response->er_list,
766 &param_list->extra_response_list); 766 &param_list->extra_response_list);
@@ -1629,8 +1629,6 @@ int iscsi_decode_text_input(
1629 1629
1630 if (phase & PHASE_SECURITY) { 1630 if (phase & PHASE_SECURITY) {
1631 if (iscsi_check_for_auth_key(key) > 0) { 1631 if (iscsi_check_for_auth_key(key) > 0) {
1632 char *tmpptr = key + strlen(key);
1633 *tmpptr = '=';
1634 kfree(tmpbuf); 1632 kfree(tmpbuf);
1635 return 1; 1633 return 1;
1636 } 1634 }
diff --git a/drivers/target/iscsi/iscsi_target_parameters.h b/drivers/target/iscsi/iscsi_target_parameters.h
index 915b06798505..a47046a752aa 100644
--- a/drivers/target/iscsi/iscsi_target_parameters.h
+++ b/drivers/target/iscsi/iscsi_target_parameters.h
@@ -1,8 +1,10 @@
1#ifndef ISCSI_PARAMETERS_H 1#ifndef ISCSI_PARAMETERS_H
2#define ISCSI_PARAMETERS_H 2#define ISCSI_PARAMETERS_H
3 3
4#include <scsi/iscsi_proto.h>
5
4struct iscsi_extra_response { 6struct iscsi_extra_response {
5 char key[64]; 7 char key[KEY_MAXLEN];
6 char value[32]; 8 char value[32];
7 struct list_head er_list; 9 struct list_head er_list;
8} ____cacheline_aligned; 10} ____cacheline_aligned;
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index 2cc6c9a3ffb8..08a3bacef0c5 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -676,40 +676,56 @@ void iscsit_free_queue_reqs_for_conn(struct iscsi_conn *conn)
676 676
677void iscsit_release_cmd(struct iscsi_cmd *cmd) 677void iscsit_release_cmd(struct iscsi_cmd *cmd)
678{ 678{
679 struct iscsi_conn *conn = cmd->conn;
680
681 iscsit_free_r2ts_from_list(cmd);
682 iscsit_free_all_datain_reqs(cmd);
683
684 kfree(cmd->buf_ptr); 679 kfree(cmd->buf_ptr);
685 kfree(cmd->pdu_list); 680 kfree(cmd->pdu_list);
686 kfree(cmd->seq_list); 681 kfree(cmd->seq_list);
687 kfree(cmd->tmr_req); 682 kfree(cmd->tmr_req);
688 kfree(cmd->iov_data); 683 kfree(cmd->iov_data);
689 684
690 if (conn) { 685 kmem_cache_free(lio_cmd_cache, cmd);
686}
687
688static void __iscsit_free_cmd(struct iscsi_cmd *cmd, bool scsi_cmd,
689 bool check_queues)
690{
691 struct iscsi_conn *conn = cmd->conn;
692
693 if (scsi_cmd) {
694 if (cmd->data_direction == DMA_TO_DEVICE) {
695 iscsit_stop_dataout_timer(cmd);
696 iscsit_free_r2ts_from_list(cmd);
697 }
698 if (cmd->data_direction == DMA_FROM_DEVICE)
699 iscsit_free_all_datain_reqs(cmd);
700 }
701
702 if (conn && check_queues) {
691 iscsit_remove_cmd_from_immediate_queue(cmd, conn); 703 iscsit_remove_cmd_from_immediate_queue(cmd, conn);
692 iscsit_remove_cmd_from_response_queue(cmd, conn); 704 iscsit_remove_cmd_from_response_queue(cmd, conn);
693 } 705 }
694
695 kmem_cache_free(lio_cmd_cache, cmd);
696} 706}
697 707
698void iscsit_free_cmd(struct iscsi_cmd *cmd) 708void iscsit_free_cmd(struct iscsi_cmd *cmd, bool shutdown)
699{ 709{
710 struct se_cmd *se_cmd = NULL;
711 int rc;
700 /* 712 /*
701 * Determine if a struct se_cmd is associated with 713 * Determine if a struct se_cmd is associated with
702 * this struct iscsi_cmd. 714 * this struct iscsi_cmd.
703 */ 715 */
704 switch (cmd->iscsi_opcode) { 716 switch (cmd->iscsi_opcode) {
705 case ISCSI_OP_SCSI_CMD: 717 case ISCSI_OP_SCSI_CMD:
706 if (cmd->data_direction == DMA_TO_DEVICE) 718 se_cmd = &cmd->se_cmd;
707 iscsit_stop_dataout_timer(cmd); 719 __iscsit_free_cmd(cmd, true, shutdown);
708 /* 720 /*
709 * Fallthrough 721 * Fallthrough
710 */ 722 */
711 case ISCSI_OP_SCSI_TMFUNC: 723 case ISCSI_OP_SCSI_TMFUNC:
712 transport_generic_free_cmd(&cmd->se_cmd, 1); 724 rc = transport_generic_free_cmd(&cmd->se_cmd, 1);
725 if (!rc && shutdown && se_cmd && se_cmd->se_sess) {
726 __iscsit_free_cmd(cmd, true, shutdown);
727 target_put_sess_cmd(se_cmd->se_sess, se_cmd);
728 }
713 break; 729 break;
714 case ISCSI_OP_REJECT: 730 case ISCSI_OP_REJECT:
715 /* 731 /*
@@ -718,11 +734,19 @@ void iscsit_free_cmd(struct iscsi_cmd *cmd)
718 * associated cmd->se_cmd needs to be released. 734 * associated cmd->se_cmd needs to be released.
719 */ 735 */
720 if (cmd->se_cmd.se_tfo != NULL) { 736 if (cmd->se_cmd.se_tfo != NULL) {
721 transport_generic_free_cmd(&cmd->se_cmd, 1); 737 se_cmd = &cmd->se_cmd;
738 __iscsit_free_cmd(cmd, true, shutdown);
739
740 rc = transport_generic_free_cmd(&cmd->se_cmd, 1);
741 if (!rc && shutdown && se_cmd->se_sess) {
742 __iscsit_free_cmd(cmd, true, shutdown);
743 target_put_sess_cmd(se_cmd->se_sess, se_cmd);
744 }
722 break; 745 break;
723 } 746 }
724 /* Fall-through */ 747 /* Fall-through */
725 default: 748 default:
749 __iscsit_free_cmd(cmd, false, shutdown);
726 cmd->release_cmd(cmd); 750 cmd->release_cmd(cmd);
727 break; 751 break;
728 } 752 }
diff --git a/drivers/target/iscsi/iscsi_target_util.h b/drivers/target/iscsi/iscsi_target_util.h
index 4f8e01a47081..a4422659d049 100644
--- a/drivers/target/iscsi/iscsi_target_util.h
+++ b/drivers/target/iscsi/iscsi_target_util.h
@@ -29,7 +29,7 @@ extern void iscsit_remove_cmd_from_tx_queues(struct iscsi_cmd *, struct iscsi_co
29extern bool iscsit_conn_all_queues_empty(struct iscsi_conn *); 29extern bool iscsit_conn_all_queues_empty(struct iscsi_conn *);
30extern void iscsit_free_queue_reqs_for_conn(struct iscsi_conn *); 30extern void iscsit_free_queue_reqs_for_conn(struct iscsi_conn *);
31extern void iscsit_release_cmd(struct iscsi_cmd *); 31extern void iscsit_release_cmd(struct iscsi_cmd *);
32extern void iscsit_free_cmd(struct iscsi_cmd *); 32extern void iscsit_free_cmd(struct iscsi_cmd *, bool);
33extern int iscsit_check_session_usage_count(struct iscsi_session *); 33extern int iscsit_check_session_usage_count(struct iscsi_session *);
34extern void iscsit_dec_session_usage_count(struct iscsi_session *); 34extern void iscsit_dec_session_usage_count(struct iscsi_session *);
35extern void iscsit_inc_session_usage_count(struct iscsi_session *); 35extern void iscsit_inc_session_usage_count(struct iscsi_session *);
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index 1b1d544e927a..b11890d85120 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -153,6 +153,7 @@ static int fd_configure_device(struct se_device *dev)
153 struct request_queue *q = bdev_get_queue(inode->i_bdev); 153 struct request_queue *q = bdev_get_queue(inode->i_bdev);
154 unsigned long long dev_size; 154 unsigned long long dev_size;
155 155
156 fd_dev->fd_block_size = bdev_logical_block_size(inode->i_bdev);
156 /* 157 /*
157 * Determine the number of bytes from i_size_read() minus 158 * Determine the number of bytes from i_size_read() minus
158 * one (1) logical sector from underlying struct block_device 159 * one (1) logical sector from underlying struct block_device
@@ -199,6 +200,7 @@ static int fd_configure_device(struct se_device *dev)
199 goto fail; 200 goto fail;
200 } 201 }
201 202
203 fd_dev->fd_block_size = FD_BLOCKSIZE;
202 /* 204 /*
203 * Limit UNMAP emulation to 8k Number of LBAs (NoLB) 205 * Limit UNMAP emulation to 8k Number of LBAs (NoLB)
204 */ 206 */
@@ -217,9 +219,7 @@ static int fd_configure_device(struct se_device *dev)
217 dev->dev_attrib.max_write_same_len = 0x1000; 219 dev->dev_attrib.max_write_same_len = 0x1000;
218 } 220 }
219 221
220 fd_dev->fd_block_size = dev->dev_attrib.hw_block_size; 222 dev->dev_attrib.hw_block_size = fd_dev->fd_block_size;
221
222 dev->dev_attrib.hw_block_size = FD_BLOCKSIZE;
223 dev->dev_attrib.hw_max_sectors = FD_MAX_SECTORS; 223 dev->dev_attrib.hw_max_sectors = FD_MAX_SECTORS;
224 dev->dev_attrib.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH; 224 dev->dev_attrib.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH;
225 225
@@ -694,11 +694,12 @@ static sector_t fd_get_blocks(struct se_device *dev)
694 * to handle underlying block_device resize operations. 694 * to handle underlying block_device resize operations.
695 */ 695 */
696 if (S_ISBLK(i->i_mode)) 696 if (S_ISBLK(i->i_mode))
697 dev_size = (i_size_read(i) - fd_dev->fd_block_size); 697 dev_size = i_size_read(i);
698 else 698 else
699 dev_size = fd_dev->fd_dev_size; 699 dev_size = fd_dev->fd_dev_size;
700 700
701 return div_u64(dev_size, dev->dev_attrib.block_size); 701 return div_u64(dev_size - dev->dev_attrib.block_size,
702 dev->dev_attrib.block_size);
702} 703}
703 704
704static struct sbc_ops fd_sbc_ops = { 705static struct sbc_ops fd_sbc_ops = {
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 4a793362309d..21e315874a54 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -65,7 +65,7 @@ static void transport_complete_task_attr(struct se_cmd *cmd);
65static void transport_handle_queue_full(struct se_cmd *cmd, 65static void transport_handle_queue_full(struct se_cmd *cmd,
66 struct se_device *dev); 66 struct se_device *dev);
67static int transport_generic_get_mem(struct se_cmd *cmd); 67static int transport_generic_get_mem(struct se_cmd *cmd);
68static void transport_put_cmd(struct se_cmd *cmd); 68static int transport_put_cmd(struct se_cmd *cmd);
69static void target_complete_ok_work(struct work_struct *work); 69static void target_complete_ok_work(struct work_struct *work);
70 70
71int init_se_kmem_caches(void) 71int init_se_kmem_caches(void)
@@ -221,6 +221,7 @@ struct se_session *transport_init_session(void)
221 INIT_LIST_HEAD(&se_sess->sess_list); 221 INIT_LIST_HEAD(&se_sess->sess_list);
222 INIT_LIST_HEAD(&se_sess->sess_acl_list); 222 INIT_LIST_HEAD(&se_sess->sess_acl_list);
223 INIT_LIST_HEAD(&se_sess->sess_cmd_list); 223 INIT_LIST_HEAD(&se_sess->sess_cmd_list);
224 INIT_LIST_HEAD(&se_sess->sess_wait_list);
224 spin_lock_init(&se_sess->sess_cmd_lock); 225 spin_lock_init(&se_sess->sess_cmd_lock);
225 kref_init(&se_sess->sess_kref); 226 kref_init(&se_sess->sess_kref);
226 227
@@ -1943,7 +1944,7 @@ static inline void transport_free_pages(struct se_cmd *cmd)
1943 * This routine unconditionally frees a command, and reference counting 1944 * This routine unconditionally frees a command, and reference counting
1944 * or list removal must be done in the caller. 1945 * or list removal must be done in the caller.
1945 */ 1946 */
1946static void transport_release_cmd(struct se_cmd *cmd) 1947static int transport_release_cmd(struct se_cmd *cmd)
1947{ 1948{
1948 BUG_ON(!cmd->se_tfo); 1949 BUG_ON(!cmd->se_tfo);
1949 1950
@@ -1955,11 +1956,11 @@ static void transport_release_cmd(struct se_cmd *cmd)
1955 * If this cmd has been setup with target_get_sess_cmd(), drop 1956 * If this cmd has been setup with target_get_sess_cmd(), drop
1956 * the kref and call ->release_cmd() in kref callback. 1957 * the kref and call ->release_cmd() in kref callback.
1957 */ 1958 */
1958 if (cmd->check_release != 0) { 1959 if (cmd->check_release != 0)
1959 target_put_sess_cmd(cmd->se_sess, cmd); 1960 return target_put_sess_cmd(cmd->se_sess, cmd);
1960 return; 1961
1961 }
1962 cmd->se_tfo->release_cmd(cmd); 1962 cmd->se_tfo->release_cmd(cmd);
1963 return 1;
1963} 1964}
1964 1965
1965/** 1966/**
@@ -1968,7 +1969,7 @@ static void transport_release_cmd(struct se_cmd *cmd)
1968 * 1969 *
1969 * This routine releases our reference to the command and frees it if possible. 1970 * This routine releases our reference to the command and frees it if possible.
1970 */ 1971 */
1971static void transport_put_cmd(struct se_cmd *cmd) 1972static int transport_put_cmd(struct se_cmd *cmd)
1972{ 1973{
1973 unsigned long flags; 1974 unsigned long flags;
1974 1975
@@ -1976,7 +1977,7 @@ static void transport_put_cmd(struct se_cmd *cmd)
1976 if (atomic_read(&cmd->t_fe_count) && 1977 if (atomic_read(&cmd->t_fe_count) &&
1977 !atomic_dec_and_test(&cmd->t_fe_count)) { 1978 !atomic_dec_and_test(&cmd->t_fe_count)) {
1978 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 1979 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
1979 return; 1980 return 0;
1980 } 1981 }
1981 1982
1982 if (cmd->transport_state & CMD_T_DEV_ACTIVE) { 1983 if (cmd->transport_state & CMD_T_DEV_ACTIVE) {
@@ -1986,8 +1987,7 @@ static void transport_put_cmd(struct se_cmd *cmd)
1986 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 1987 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
1987 1988
1988 transport_free_pages(cmd); 1989 transport_free_pages(cmd);
1989 transport_release_cmd(cmd); 1990 return transport_release_cmd(cmd);
1990 return;
1991} 1991}
1992 1992
1993void *transport_kmap_data_sg(struct se_cmd *cmd) 1993void *transport_kmap_data_sg(struct se_cmd *cmd)
@@ -2152,13 +2152,15 @@ static void transport_write_pending_qf(struct se_cmd *cmd)
2152 } 2152 }
2153} 2153}
2154 2154
2155void transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks) 2155int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
2156{ 2156{
2157 int ret = 0;
2158
2157 if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) { 2159 if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) {
2158 if (wait_for_tasks && (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) 2160 if (wait_for_tasks && (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
2159 transport_wait_for_tasks(cmd); 2161 transport_wait_for_tasks(cmd);
2160 2162
2161 transport_release_cmd(cmd); 2163 ret = transport_release_cmd(cmd);
2162 } else { 2164 } else {
2163 if (wait_for_tasks) 2165 if (wait_for_tasks)
2164 transport_wait_for_tasks(cmd); 2166 transport_wait_for_tasks(cmd);
@@ -2166,8 +2168,9 @@ void transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
2166 if (cmd->se_lun) 2168 if (cmd->se_lun)
2167 transport_lun_remove_cmd(cmd); 2169 transport_lun_remove_cmd(cmd);
2168 2170
2169 transport_put_cmd(cmd); 2171 ret = transport_put_cmd(cmd);
2170 } 2172 }
2173 return ret;
2171} 2174}
2172EXPORT_SYMBOL(transport_generic_free_cmd); 2175EXPORT_SYMBOL(transport_generic_free_cmd);
2173 2176
@@ -2250,11 +2253,14 @@ void target_sess_cmd_list_set_waiting(struct se_session *se_sess)
2250 unsigned long flags; 2253 unsigned long flags;
2251 2254
2252 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); 2255 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
2253 2256 if (se_sess->sess_tearing_down) {
2254 WARN_ON(se_sess->sess_tearing_down); 2257 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2258 return;
2259 }
2255 se_sess->sess_tearing_down = 1; 2260 se_sess->sess_tearing_down = 1;
2261 list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list);
2256 2262
2257 list_for_each_entry(se_cmd, &se_sess->sess_cmd_list, se_cmd_list) 2263 list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list)
2258 se_cmd->cmd_wait_set = 1; 2264 se_cmd->cmd_wait_set = 1;
2259 2265
2260 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2266 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
@@ -2263,44 +2269,32 @@ EXPORT_SYMBOL(target_sess_cmd_list_set_waiting);
2263 2269
2264/* target_wait_for_sess_cmds - Wait for outstanding descriptors 2270/* target_wait_for_sess_cmds - Wait for outstanding descriptors
2265 * @se_sess: session to wait for active I/O 2271 * @se_sess: session to wait for active I/O
2266 * @wait_for_tasks: Make extra transport_wait_for_tasks call
2267 */ 2272 */
2268void target_wait_for_sess_cmds( 2273void target_wait_for_sess_cmds(struct se_session *se_sess)
2269 struct se_session *se_sess,
2270 int wait_for_tasks)
2271{ 2274{
2272 struct se_cmd *se_cmd, *tmp_cmd; 2275 struct se_cmd *se_cmd, *tmp_cmd;
2273 bool rc = false; 2276 unsigned long flags;
2274 2277
2275 list_for_each_entry_safe(se_cmd, tmp_cmd, 2278 list_for_each_entry_safe(se_cmd, tmp_cmd,
2276 &se_sess->sess_cmd_list, se_cmd_list) { 2279 &se_sess->sess_wait_list, se_cmd_list) {
2277 list_del(&se_cmd->se_cmd_list); 2280 list_del(&se_cmd->se_cmd_list);
2278 2281
2279 pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:" 2282 pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:"
2280 " %d\n", se_cmd, se_cmd->t_state, 2283 " %d\n", se_cmd, se_cmd->t_state,
2281 se_cmd->se_tfo->get_cmd_state(se_cmd)); 2284 se_cmd->se_tfo->get_cmd_state(se_cmd));
2282 2285
2283 if (wait_for_tasks) { 2286 wait_for_completion(&se_cmd->cmd_wait_comp);
2284 pr_debug("Calling transport_wait_for_tasks se_cmd: %p t_state: %d," 2287 pr_debug("After cmd_wait_comp: se_cmd: %p t_state: %d"
2285 " fabric state: %d\n", se_cmd, se_cmd->t_state, 2288 " fabric state: %d\n", se_cmd, se_cmd->t_state,
2286 se_cmd->se_tfo->get_cmd_state(se_cmd)); 2289 se_cmd->se_tfo->get_cmd_state(se_cmd));
2287
2288 rc = transport_wait_for_tasks(se_cmd);
2289
2290 pr_debug("After transport_wait_for_tasks se_cmd: %p t_state: %d,"
2291 " fabric state: %d\n", se_cmd, se_cmd->t_state,
2292 se_cmd->se_tfo->get_cmd_state(se_cmd));
2293 }
2294
2295 if (!rc) {
2296 wait_for_completion(&se_cmd->cmd_wait_comp);
2297 pr_debug("After cmd_wait_comp: se_cmd: %p t_state: %d"
2298 " fabric state: %d\n", se_cmd, se_cmd->t_state,
2299 se_cmd->se_tfo->get_cmd_state(se_cmd));
2300 }
2301 2290
2302 se_cmd->se_tfo->release_cmd(se_cmd); 2291 se_cmd->se_tfo->release_cmd(se_cmd);
2303 } 2292 }
2293
2294 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
2295 WARN_ON(!list_empty(&se_sess->sess_cmd_list));
2296 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2297
2304} 2298}
2305EXPORT_SYMBOL(target_wait_for_sess_cmds); 2299EXPORT_SYMBOL(target_wait_for_sess_cmds);
2306 2300
diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
index 59bfaecc4e14..abfd99089781 100644
--- a/drivers/tty/pty.c
+++ b/drivers/tty/pty.c
@@ -244,14 +244,9 @@ static void pty_flush_buffer(struct tty_struct *tty)
244 244
245static int pty_open(struct tty_struct *tty, struct file *filp) 245static int pty_open(struct tty_struct *tty, struct file *filp)
246{ 246{
247 int retval = -ENODEV;
248
249 if (!tty || !tty->link) 247 if (!tty || !tty->link)
250 goto out; 248 return -ENODEV;
251
252 set_bit(TTY_IO_ERROR, &tty->flags);
253 249
254 retval = -EIO;
255 if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) 250 if (test_bit(TTY_OTHER_CLOSED, &tty->flags))
256 goto out; 251 goto out;
257 if (test_bit(TTY_PTY_LOCK, &tty->link->flags)) 252 if (test_bit(TTY_PTY_LOCK, &tty->link->flags))
@@ -262,9 +257,11 @@ static int pty_open(struct tty_struct *tty, struct file *filp)
262 clear_bit(TTY_IO_ERROR, &tty->flags); 257 clear_bit(TTY_IO_ERROR, &tty->flags);
263 clear_bit(TTY_OTHER_CLOSED, &tty->link->flags); 258 clear_bit(TTY_OTHER_CLOSED, &tty->link->flags);
264 set_bit(TTY_THROTTLED, &tty->flags); 259 set_bit(TTY_THROTTLED, &tty->flags);
265 retval = 0; 260 return 0;
261
266out: 262out:
267 return retval; 263 set_bit(TTY_IO_ERROR, &tty->flags);
264 return -EIO;
268} 265}
269 266
270static void pty_set_termios(struct tty_struct *tty, 267static void pty_set_termios(struct tty_struct *tty,
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
index 46528d57be72..86c00b1c5583 100644
--- a/drivers/tty/serial/8250/8250_core.c
+++ b/drivers/tty/serial/8250/8250_core.c
@@ -2755,7 +2755,7 @@ static void __init serial8250_isa_init_ports(void)
2755 if (nr_uarts > UART_NR) 2755 if (nr_uarts > UART_NR)
2756 nr_uarts = UART_NR; 2756 nr_uarts = UART_NR;
2757 2757
2758 for (i = 0; i < UART_NR; i++) { 2758 for (i = 0; i < nr_uarts; i++) {
2759 struct uart_8250_port *up = &serial8250_ports[i]; 2759 struct uart_8250_port *up = &serial8250_ports[i];
2760 struct uart_port *port = &up->port; 2760 struct uart_port *port = &up->port;
2761 2761
@@ -2916,7 +2916,7 @@ static int __init serial8250_console_setup(struct console *co, char *options)
2916 * if so, search for the first available port that does have 2916 * if so, search for the first available port that does have
2917 * console support. 2917 * console support.
2918 */ 2918 */
2919 if (co->index >= UART_NR) 2919 if (co->index >= nr_uarts)
2920 co->index = 0; 2920 co->index = 0;
2921 port = &serial8250_ports[co->index].port; 2921 port = &serial8250_ports[co->index].port;
2922 if (!port->iobase && !port->membase) 2922 if (!port->iobase && !port->membase)
@@ -2957,7 +2957,7 @@ int serial8250_find_port(struct uart_port *p)
2957 int line; 2957 int line;
2958 struct uart_port *port; 2958 struct uart_port *port;
2959 2959
2960 for (line = 0; line < UART_NR; line++) { 2960 for (line = 0; line < nr_uarts; line++) {
2961 port = &serial8250_ports[line].port; 2961 port = &serial8250_ports[line].port;
2962 if (uart_match_port(p, port)) 2962 if (uart_match_port(p, port))
2963 return line; 2963 return line;
@@ -3110,7 +3110,7 @@ static int serial8250_remove(struct platform_device *dev)
3110{ 3110{
3111 int i; 3111 int i;
3112 3112
3113 for (i = 0; i < UART_NR; i++) { 3113 for (i = 0; i < nr_uarts; i++) {
3114 struct uart_8250_port *up = &serial8250_ports[i]; 3114 struct uart_8250_port *up = &serial8250_ports[i];
3115 3115
3116 if (up->port.dev == &dev->dev) 3116 if (up->port.dev == &dev->dev)
@@ -3178,7 +3178,7 @@ static struct uart_8250_port *serial8250_find_match_or_unused(struct uart_port *
3178 /* 3178 /*
3179 * First, find a port entry which matches. 3179 * First, find a port entry which matches.
3180 */ 3180 */
3181 for (i = 0; i < UART_NR; i++) 3181 for (i = 0; i < nr_uarts; i++)
3182 if (uart_match_port(&serial8250_ports[i].port, port)) 3182 if (uart_match_port(&serial8250_ports[i].port, port))
3183 return &serial8250_ports[i]; 3183 return &serial8250_ports[i];
3184 3184
@@ -3187,7 +3187,7 @@ static struct uart_8250_port *serial8250_find_match_or_unused(struct uart_port *
3187 * free entry. We look for one which hasn't been previously 3187 * free entry. We look for one which hasn't been previously
3188 * used (indicated by zero iobase). 3188 * used (indicated by zero iobase).
3189 */ 3189 */
3190 for (i = 0; i < UART_NR; i++) 3190 for (i = 0; i < nr_uarts; i++)
3191 if (serial8250_ports[i].port.type == PORT_UNKNOWN && 3191 if (serial8250_ports[i].port.type == PORT_UNKNOWN &&
3192 serial8250_ports[i].port.iobase == 0) 3192 serial8250_ports[i].port.iobase == 0)
3193 return &serial8250_ports[i]; 3193 return &serial8250_ports[i];
@@ -3196,7 +3196,7 @@ static struct uart_8250_port *serial8250_find_match_or_unused(struct uart_port *
3196 * That also failed. Last resort is to find any entry which 3196 * That also failed. Last resort is to find any entry which
3197 * doesn't have a real port associated with it. 3197 * doesn't have a real port associated with it.
3198 */ 3198 */
3199 for (i = 0; i < UART_NR; i++) 3199 for (i = 0; i < nr_uarts; i++)
3200 if (serial8250_ports[i].port.type == PORT_UNKNOWN) 3200 if (serial8250_ports[i].port.type == PORT_UNKNOWN)
3201 return &serial8250_ports[i]; 3201 return &serial8250_ports[i];
3202 3202
diff --git a/drivers/tty/serial/8250/8250_gsc.c b/drivers/tty/serial/8250/8250_gsc.c
index 097dff9c08ad..bb91b4713ebd 100644
--- a/drivers/tty/serial/8250/8250_gsc.c
+++ b/drivers/tty/serial/8250/8250_gsc.c
@@ -30,6 +30,12 @@ static int __init serial_init_chip(struct parisc_device *dev)
30 unsigned long address; 30 unsigned long address;
31 int err; 31 int err;
32 32
33#ifdef CONFIG_64BIT
34 extern int iosapic_serial_irq(int cellnum);
35 if (!dev->irq && (dev->id.sversion == 0xad))
36 dev->irq = iosapic_serial_irq(dev->mod_index-1);
37#endif
38
33 if (!dev->irq) { 39 if (!dev->irq) {
34 /* We find some unattached serial ports by walking native 40 /* We find some unattached serial ports by walking native
35 * busses. These should be silently ignored. Otherwise, 41 * busses. These should be silently ignored. Otherwise,
@@ -51,7 +57,8 @@ static int __init serial_init_chip(struct parisc_device *dev)
51 memset(&uart, 0, sizeof(uart)); 57 memset(&uart, 0, sizeof(uart));
52 uart.port.iotype = UPIO_MEM; 58 uart.port.iotype = UPIO_MEM;
53 /* 7.272727MHz on Lasi. Assumed the same for Dino, Wax and Timi. */ 59 /* 7.272727MHz on Lasi. Assumed the same for Dino, Wax and Timi. */
54 uart.port.uartclk = 7272727; 60 uart.port.uartclk = (dev->id.sversion != 0xad) ?
61 7272727 : 1843200;
55 uart.port.mapbase = address; 62 uart.port.mapbase = address;
56 uart.port.membase = ioremap_nocache(address, 16); 63 uart.port.membase = ioremap_nocache(address, 16);
57 uart.port.irq = dev->irq; 64 uart.port.irq = dev->irq;
@@ -73,6 +80,7 @@ static struct parisc_device_id serial_tbl[] = {
73 { HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x00075 }, 80 { HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x00075 },
74 { HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x0008c }, 81 { HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x0008c },
75 { HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x0008d }, 82 { HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x0008d },
83 { HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x000ad },
76 { 0 } 84 { 0 }
77}; 85};
78 86
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index 147c9e193595..8cdfbd365892 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -761,6 +761,8 @@ static int imx_startup(struct uart_port *port)
761 761
762 temp = readl(sport->port.membase + UCR2); 762 temp = readl(sport->port.membase + UCR2);
763 temp |= (UCR2_RXEN | UCR2_TXEN); 763 temp |= (UCR2_RXEN | UCR2_TXEN);
764 if (!sport->have_rtscts)
765 temp |= UCR2_IRTS;
764 writel(temp, sport->port.membase + UCR2); 766 writel(temp, sport->port.membase + UCR2);
765 767
766 if (USE_IRDA(sport)) { 768 if (USE_IRDA(sport)) {
diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
index 89429410a245..0c8a9fa2be6c 100644
--- a/drivers/tty/serial/samsung.c
+++ b/drivers/tty/serial/samsung.c
@@ -1166,6 +1166,18 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
1166 ourport->tx_irq = ret; 1166 ourport->tx_irq = ret;
1167 1167
1168 ourport->clk = clk_get(&platdev->dev, "uart"); 1168 ourport->clk = clk_get(&platdev->dev, "uart");
1169 if (IS_ERR(ourport->clk)) {
1170 pr_err("%s: Controller clock not found\n",
1171 dev_name(&platdev->dev));
1172 return PTR_ERR(ourport->clk);
1173 }
1174
1175 ret = clk_prepare_enable(ourport->clk);
1176 if (ret) {
1177 pr_err("uart: clock failed to prepare+enable: %d\n", ret);
1178 clk_put(ourport->clk);
1179 return ret;
1180 }
1169 1181
1170 /* Keep all interrupts masked and cleared */ 1182 /* Keep all interrupts masked and cleared */
1171 if (s3c24xx_serial_has_interrupt_mask(port)) { 1183 if (s3c24xx_serial_has_interrupt_mask(port)) {
@@ -1180,6 +1192,7 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
1180 1192
1181 /* reset the fifos (and setup the uart) */ 1193 /* reset the fifos (and setup the uart) */
1182 s3c24xx_serial_resetport(port, cfg); 1194 s3c24xx_serial_resetport(port, cfg);
1195 clk_disable_unprepare(ourport->clk);
1183 return 0; 1196 return 0;
1184} 1197}
1185 1198
diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c
index fc2c06c66e89..2bd78e2ac8ec 100644
--- a/drivers/tty/vt/vt_ioctl.c
+++ b/drivers/tty/vt/vt_ioctl.c
@@ -289,13 +289,10 @@ static int vt_disallocate(unsigned int vc_num)
289 struct vc_data *vc = NULL; 289 struct vc_data *vc = NULL;
290 int ret = 0; 290 int ret = 0;
291 291
292 if (!vc_num)
293 return 0;
294
295 console_lock(); 292 console_lock();
296 if (VT_BUSY(vc_num)) 293 if (VT_BUSY(vc_num))
297 ret = -EBUSY; 294 ret = -EBUSY;
298 else 295 else if (vc_num)
299 vc = vc_deallocate(vc_num); 296 vc = vc_deallocate(vc_num);
300 console_unlock(); 297 console_unlock();
301 298
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
index 49b098bedf9b..475c9c114689 100644
--- a/drivers/usb/chipidea/core.c
+++ b/drivers/usb/chipidea/core.c
@@ -276,8 +276,9 @@ static void ci_role_work(struct work_struct *work)
276 276
277 ci_role_stop(ci); 277 ci_role_stop(ci);
278 ci_role_start(ci, role); 278 ci_role_start(ci, role);
279 enable_irq(ci->irq);
280 } 279 }
280
281 enable_irq(ci->irq);
281} 282}
282 283
283static irqreturn_t ci_irq(int irq, void *data) 284static irqreturn_t ci_irq(int irq, void *data)
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
index 519ead2443c5..b501346484ae 100644
--- a/drivers/usb/chipidea/udc.c
+++ b/drivers/usb/chipidea/udc.c
@@ -1678,8 +1678,11 @@ static int udc_start(struct ci13xxx *ci)
1678 1678
1679 ci->gadget.ep0 = &ci->ep0in->ep; 1679 ci->gadget.ep0 = &ci->ep0in->ep;
1680 1680
1681 if (ci->global_phy) 1681 if (ci->global_phy) {
1682 ci->transceiver = usb_get_phy(USB_PHY_TYPE_USB2); 1682 ci->transceiver = usb_get_phy(USB_PHY_TYPE_USB2);
1683 if (IS_ERR(ci->transceiver))
1684 ci->transceiver = NULL;
1685 }
1683 1686
1684 if (ci->platdata->flags & CI13XXX_REQUIRE_TRANSCEIVER) { 1687 if (ci->platdata->flags & CI13XXX_REQUIRE_TRANSCEIVER) {
1685 if (ci->transceiver == NULL) { 1688 if (ci->transceiver == NULL) {
@@ -1694,7 +1697,7 @@ static int udc_start(struct ci13xxx *ci)
1694 goto put_transceiver; 1697 goto put_transceiver;
1695 } 1698 }
1696 1699
1697 if (!IS_ERR_OR_NULL(ci->transceiver)) { 1700 if (ci->transceiver) {
1698 retval = otg_set_peripheral(ci->transceiver->otg, 1701 retval = otg_set_peripheral(ci->transceiver->otg,
1699 &ci->gadget); 1702 &ci->gadget);
1700 if (retval) 1703 if (retval)
@@ -1711,7 +1714,7 @@ static int udc_start(struct ci13xxx *ci)
1711 return retval; 1714 return retval;
1712 1715
1713remove_trans: 1716remove_trans:
1714 if (!IS_ERR_OR_NULL(ci->transceiver)) { 1717 if (ci->transceiver) {
1715 otg_set_peripheral(ci->transceiver->otg, NULL); 1718 otg_set_peripheral(ci->transceiver->otg, NULL);
1716 if (ci->global_phy) 1719 if (ci->global_phy)
1717 usb_put_phy(ci->transceiver); 1720 usb_put_phy(ci->transceiver);
@@ -1719,7 +1722,7 @@ remove_trans:
1719 1722
1720 dev_err(dev, "error = %i\n", retval); 1723 dev_err(dev, "error = %i\n", retval);
1721put_transceiver: 1724put_transceiver:
1722 if (!IS_ERR_OR_NULL(ci->transceiver) && ci->global_phy) 1725 if (ci->transceiver && ci->global_phy)
1723 usb_put_phy(ci->transceiver); 1726 usb_put_phy(ci->transceiver);
1724destroy_eps: 1727destroy_eps:
1725 destroy_eps(ci); 1728 destroy_eps(ci);
@@ -1747,7 +1750,7 @@ static void udc_stop(struct ci13xxx *ci)
1747 dma_pool_destroy(ci->td_pool); 1750 dma_pool_destroy(ci->td_pool);
1748 dma_pool_destroy(ci->qh_pool); 1751 dma_pool_destroy(ci->qh_pool);
1749 1752
1750 if (!IS_ERR_OR_NULL(ci->transceiver)) { 1753 if (ci->transceiver) {
1751 otg_set_peripheral(ci->transceiver->otg, NULL); 1754 otg_set_peripheral(ci->transceiver->otg, NULL);
1752 if (ci->global_phy) 1755 if (ci->global_phy)
1753 usb_put_phy(ci->transceiver); 1756 usb_put_phy(ci->transceiver);
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index caefc800f298..c88c4fb9459d 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -1287,9 +1287,13 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
1287 goto error; 1287 goto error;
1288 } 1288 }
1289 for (totlen = u = 0; u < uurb->number_of_packets; u++) { 1289 for (totlen = u = 0; u < uurb->number_of_packets; u++) {
1290 /* arbitrary limit, 1290 /*
1291 * sufficient for USB 2.0 high-bandwidth iso */ 1291 * arbitrary limit need for USB 3.0
1292 if (isopkt[u].length > 8192) { 1292 * bMaxBurst (0~15 allowed, 1~16 packets)
1293 * bmAttributes (bit 1:0, mult 0~2, 1~3 packets)
1294 * sizemax: 1024 * 16 * 3 = 49152
1295 */
1296 if (isopkt[u].length > 49152) {
1293 ret = -EINVAL; 1297 ret = -EINVAL;
1294 goto error; 1298 goto error;
1295 } 1299 }
diff --git a/drivers/usb/dwc3/dwc3-exynos.c b/drivers/usb/dwc3/dwc3-exynos.c
index 929e7dd6e58b..8ce9d7fd6cfc 100644
--- a/drivers/usb/dwc3/dwc3-exynos.c
+++ b/drivers/usb/dwc3/dwc3-exynos.c
@@ -164,9 +164,9 @@ static int dwc3_exynos_remove(struct platform_device *pdev)
164{ 164{
165 struct dwc3_exynos *exynos = platform_get_drvdata(pdev); 165 struct dwc3_exynos *exynos = platform_get_drvdata(pdev);
166 166
167 device_for_each_child(&pdev->dev, NULL, dwc3_exynos_remove_child);
167 platform_device_unregister(exynos->usb2_phy); 168 platform_device_unregister(exynos->usb2_phy);
168 platform_device_unregister(exynos->usb3_phy); 169 platform_device_unregister(exynos->usb3_phy);
169 device_for_each_child(&pdev->dev, NULL, dwc3_exynos_remove_child);
170 170
171 clk_disable_unprepare(exynos->clk); 171 clk_disable_unprepare(exynos->clk);
172 172
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index 227d4a7acad7..eba9e2baf32b 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -196,9 +196,9 @@ static void dwc3_pci_remove(struct pci_dev *pci)
196{ 196{
197 struct dwc3_pci *glue = pci_get_drvdata(pci); 197 struct dwc3_pci *glue = pci_get_drvdata(pci);
198 198
199 platform_device_unregister(glue->dwc3);
199 platform_device_unregister(glue->usb2_phy); 200 platform_device_unregister(glue->usb2_phy);
200 platform_device_unregister(glue->usb3_phy); 201 platform_device_unregister(glue->usb3_phy);
201 platform_device_unregister(glue->dwc3);
202 pci_set_drvdata(pci, NULL); 202 pci_set_drvdata(pci, NULL);
203 pci_disable_device(pci); 203 pci_disable_device(pci);
204} 204}
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 2b6e7e001207..b5e5b35df49c 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -1706,11 +1706,19 @@ static void dwc3_gadget_free_endpoints(struct dwc3 *dwc)
1706 dep = dwc->eps[epnum]; 1706 dep = dwc->eps[epnum];
1707 if (!dep) 1707 if (!dep)
1708 continue; 1708 continue;
1709 1709 /*
1710 dwc3_free_trb_pool(dep); 1710 * Physical endpoints 0 and 1 are special; they form the
1711 1711 * bi-directional USB endpoint 0.
1712 if (epnum != 0 && epnum != 1) 1712 *
1713 * For those two physical endpoints, we don't allocate a TRB
1714 * pool nor do we add them the endpoints list. Due to that, we
1715 * shouldn't do these two operations otherwise we would end up
1716 * with all sorts of bugs when removing dwc3.ko.
1717 */
1718 if (epnum != 0 && epnum != 1) {
1719 dwc3_free_trb_pool(dep);
1713 list_del(&dep->endpoint.ep_list); 1720 list_del(&dep->endpoint.ep_list);
1721 }
1714 1722
1715 kfree(dep); 1723 kfree(dep);
1716 } 1724 }
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
index acff5b8f6e89..f80d0330d548 100644
--- a/drivers/usb/host/ehci-sched.c
+++ b/drivers/usb/host/ehci-sched.c
@@ -213,7 +213,7 @@ static inline unsigned char tt_start_uframe(struct ehci_hcd *ehci, __hc32 mask)
213} 213}
214 214
215static const unsigned char 215static const unsigned char
216max_tt_usecs[] = { 125, 125, 125, 125, 125, 125, 125, 25 }; 216max_tt_usecs[] = { 125, 125, 125, 125, 125, 125, 30, 0 };
217 217
218/* carryover low/fullspeed bandwidth that crosses uframe boundries */ 218/* carryover low/fullspeed bandwidth that crosses uframe boundries */
219static inline void carryover_tt_bandwidth(unsigned short tt_usecs[8]) 219static inline void carryover_tt_bandwidth(unsigned short tt_usecs[8])
@@ -646,6 +646,10 @@ static void end_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh)
646 /* reschedule QH iff another request is queued */ 646 /* reschedule QH iff another request is queued */
647 if (!list_empty(&qh->qtd_list) && ehci->rh_state == EHCI_RH_RUNNING) { 647 if (!list_empty(&qh->qtd_list) && ehci->rh_state == EHCI_RH_RUNNING) {
648 rc = qh_schedule(ehci, qh); 648 rc = qh_schedule(ehci, qh);
649 if (rc == 0) {
650 qh_refresh(ehci, qh);
651 qh_link_periodic(ehci, qh);
652 }
649 653
650 /* An error here likely indicates handshake failure 654 /* An error here likely indicates handshake failure
651 * or no space left in the schedule. Neither fault 655 * or no space left in the schedule. Neither fault
@@ -653,9 +657,10 @@ static void end_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh)
653 * 657 *
654 * FIXME kill the now-dysfunctional queued urbs 658 * FIXME kill the now-dysfunctional queued urbs
655 */ 659 */
656 if (rc != 0) 660 else {
657 ehci_err(ehci, "can't reschedule qh %p, err %d\n", 661 ehci_err(ehci, "can't reschedule qh %p, err %d\n",
658 qh, rc); 662 qh, rc);
663 }
659 } 664 }
660 665
661 /* maybe turn off periodic schedule */ 666 /* maybe turn off periodic schedule */
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 2cfc465925bd..fbf75e57628b 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -1827,6 +1827,9 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
1827 } 1827 }
1828 spin_unlock_irqrestore(&xhci->lock, flags); 1828 spin_unlock_irqrestore(&xhci->lock, flags);
1829 1829
1830 if (!xhci->rh_bw)
1831 goto no_bw;
1832
1830 num_ports = HCS_MAX_PORTS(xhci->hcs_params1); 1833 num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
1831 for (i = 0; i < num_ports; i++) { 1834 for (i = 0; i < num_ports; i++) {
1832 struct xhci_interval_bw_table *bwt = &xhci->rh_bw[i].bw_table; 1835 struct xhci_interval_bw_table *bwt = &xhci->rh_bw[i].bw_table;
@@ -1845,6 +1848,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
1845 } 1848 }
1846 } 1849 }
1847 1850
1851no_bw:
1848 xhci->num_usb2_ports = 0; 1852 xhci->num_usb2_ports = 0;
1849 xhci->num_usb3_ports = 0; 1853 xhci->num_usb3_ports = 0;
1850 xhci->num_active_eps = 0; 1854 xhci->num_active_eps = 0;
@@ -2256,6 +2260,9 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
2256 u32 page_size, temp; 2260 u32 page_size, temp;
2257 int i; 2261 int i;
2258 2262
2263 INIT_LIST_HEAD(&xhci->lpm_failed_devs);
2264 INIT_LIST_HEAD(&xhci->cancel_cmd_list);
2265
2259 page_size = xhci_readl(xhci, &xhci->op_regs->page_size); 2266 page_size = xhci_readl(xhci, &xhci->op_regs->page_size);
2260 xhci_dbg(xhci, "Supported page size register = 0x%x\n", page_size); 2267 xhci_dbg(xhci, "Supported page size register = 0x%x\n", page_size);
2261 for (i = 0; i < 16; i++) { 2268 for (i = 0; i < 16; i++) {
@@ -2334,7 +2341,6 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
2334 xhci->cmd_ring = xhci_ring_alloc(xhci, 1, 1, TYPE_COMMAND, flags); 2341 xhci->cmd_ring = xhci_ring_alloc(xhci, 1, 1, TYPE_COMMAND, flags);
2335 if (!xhci->cmd_ring) 2342 if (!xhci->cmd_ring)
2336 goto fail; 2343 goto fail;
2337 INIT_LIST_HEAD(&xhci->cancel_cmd_list);
2338 xhci_dbg(xhci, "Allocated command ring at %p\n", xhci->cmd_ring); 2344 xhci_dbg(xhci, "Allocated command ring at %p\n", xhci->cmd_ring);
2339 xhci_dbg(xhci, "First segment DMA is 0x%llx\n", 2345 xhci_dbg(xhci, "First segment DMA is 0x%llx\n",
2340 (unsigned long long)xhci->cmd_ring->first_seg->dma); 2346 (unsigned long long)xhci->cmd_ring->first_seg->dma);
@@ -2445,8 +2451,6 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
2445 if (xhci_setup_port_arrays(xhci, flags)) 2451 if (xhci_setup_port_arrays(xhci, flags))
2446 goto fail; 2452 goto fail;
2447 2453
2448 INIT_LIST_HEAD(&xhci->lpm_failed_devs);
2449
2450 /* Enable USB 3.0 device notifications for function remote wake, which 2454 /* Enable USB 3.0 device notifications for function remote wake, which
2451 * is necessary for allowing USB 3.0 devices to do remote wakeup from 2455 * is necessary for allowing USB 3.0 devices to do remote wakeup from
2452 * U3 (device suspend). 2456 * U3 (device suspend).
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 1a30c380043c..cc24e39b97d5 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -221,6 +221,14 @@ static void xhci_pci_remove(struct pci_dev *dev)
221static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup) 221static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
222{ 222{
223 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 223 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
224 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
225
226 /*
227 * Systems with the TI redriver that loses port status change events
228 * need to have the registers polled during D3, so avoid D3cold.
229 */
230 if (xhci_compliance_mode_recovery_timer_quirk_check())
231 pdev->no_d3cold = true;
224 232
225 return xhci_suspend(xhci); 233 return xhci_suspend(xhci);
226} 234}
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index b4aa79d154b2..d8f640b12dd9 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -466,7 +466,7 @@ static void compliance_mode_recovery_timer_init(struct xhci_hcd *xhci)
466 * Systems: 466 * Systems:
467 * Vendor: Hewlett-Packard -> System Models: Z420, Z620 and Z820 467 * Vendor: Hewlett-Packard -> System Models: Z420, Z620 and Z820
468 */ 468 */
469static bool compliance_mode_recovery_timer_quirk_check(void) 469bool xhci_compliance_mode_recovery_timer_quirk_check(void)
470{ 470{
471 const char *dmi_product_name, *dmi_sys_vendor; 471 const char *dmi_product_name, *dmi_sys_vendor;
472 472
@@ -517,7 +517,7 @@ int xhci_init(struct usb_hcd *hcd)
517 xhci_dbg(xhci, "Finished xhci_init\n"); 517 xhci_dbg(xhci, "Finished xhci_init\n");
518 518
519 /* Initializing Compliance Mode Recovery Data If Needed */ 519 /* Initializing Compliance Mode Recovery Data If Needed */
520 if (compliance_mode_recovery_timer_quirk_check()) { 520 if (xhci_compliance_mode_recovery_timer_quirk_check()) {
521 xhci->quirks |= XHCI_COMP_MODE_QUIRK; 521 xhci->quirks |= XHCI_COMP_MODE_QUIRK;
522 compliance_mode_recovery_timer_init(xhci); 522 compliance_mode_recovery_timer_init(xhci);
523 } 523 }
@@ -956,6 +956,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
956 struct usb_hcd *hcd = xhci_to_hcd(xhci); 956 struct usb_hcd *hcd = xhci_to_hcd(xhci);
957 struct usb_hcd *secondary_hcd; 957 struct usb_hcd *secondary_hcd;
958 int retval = 0; 958 int retval = 0;
959 bool comp_timer_running = false;
959 960
960 /* Wait a bit if either of the roothubs need to settle from the 961 /* Wait a bit if either of the roothubs need to settle from the
961 * transition into bus suspend. 962 * transition into bus suspend.
@@ -993,6 +994,13 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
993 994
994 /* If restore operation fails, re-initialize the HC during resume */ 995 /* If restore operation fails, re-initialize the HC during resume */
995 if ((temp & STS_SRE) || hibernated) { 996 if ((temp & STS_SRE) || hibernated) {
997
998 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
999 !(xhci_all_ports_seen_u0(xhci))) {
1000 del_timer_sync(&xhci->comp_mode_recovery_timer);
1001 xhci_dbg(xhci, "Compliance Mode Recovery Timer deleted!\n");
1002 }
1003
996 /* Let the USB core know _both_ roothubs lost power. */ 1004 /* Let the USB core know _both_ roothubs lost power. */
997 usb_root_hub_lost_power(xhci->main_hcd->self.root_hub); 1005 usb_root_hub_lost_power(xhci->main_hcd->self.root_hub);
998 usb_root_hub_lost_power(xhci->shared_hcd->self.root_hub); 1006 usb_root_hub_lost_power(xhci->shared_hcd->self.root_hub);
@@ -1035,6 +1043,8 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
1035 retval = xhci_init(hcd->primary_hcd); 1043 retval = xhci_init(hcd->primary_hcd);
1036 if (retval) 1044 if (retval)
1037 return retval; 1045 return retval;
1046 comp_timer_running = true;
1047
1038 xhci_dbg(xhci, "Start the primary HCD\n"); 1048 xhci_dbg(xhci, "Start the primary HCD\n");
1039 retval = xhci_run(hcd->primary_hcd); 1049 retval = xhci_run(hcd->primary_hcd);
1040 if (!retval) { 1050 if (!retval) {
@@ -1076,7 +1086,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
1076 * to suffer the Compliance Mode issue again. It doesn't matter if 1086 * to suffer the Compliance Mode issue again. It doesn't matter if
1077 * ports have entered previously to U0 before system's suspension. 1087 * ports have entered previously to U0 before system's suspension.
1078 */ 1088 */
1079 if (xhci->quirks & XHCI_COMP_MODE_QUIRK) 1089 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && !comp_timer_running)
1080 compliance_mode_recovery_timer_init(xhci); 1090 compliance_mode_recovery_timer_init(xhci);
1081 1091
1082 /* Re-enable port polling. */ 1092 /* Re-enable port polling. */
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 29c978e37135..77600cefcaf1 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1853,4 +1853,7 @@ struct xhci_input_control_ctx *xhci_get_input_control_ctx(struct xhci_hcd *xhci,
1853struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx); 1853struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx);
1854struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx, unsigned int ep_index); 1854struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx, unsigned int ep_index);
1855 1855
1856/* xHCI quirks */
1857bool xhci_compliance_mode_recovery_timer_quirk_check(void);
1858
1856#endif /* __LINUX_XHCI_HCD_H */ 1859#endif /* __LINUX_XHCI_HCD_H */
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 8914dec49f01..9d3044bdebe5 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -1232,7 +1232,6 @@ void musb_host_tx(struct musb *musb, u8 epnum)
1232 void __iomem *mbase = musb->mregs; 1232 void __iomem *mbase = musb->mregs;
1233 struct dma_channel *dma; 1233 struct dma_channel *dma;
1234 bool transfer_pending = false; 1234 bool transfer_pending = false;
1235 static bool use_sg;
1236 1235
1237 musb_ep_select(mbase, epnum); 1236 musb_ep_select(mbase, epnum);
1238 tx_csr = musb_readw(epio, MUSB_TXCSR); 1237 tx_csr = musb_readw(epio, MUSB_TXCSR);
@@ -1463,9 +1462,9 @@ done:
1463 * NULL. 1462 * NULL.
1464 */ 1463 */
1465 if (!urb->transfer_buffer) 1464 if (!urb->transfer_buffer)
1466 use_sg = true; 1465 qh->use_sg = true;
1467 1466
1468 if (use_sg) { 1467 if (qh->use_sg) {
1469 /* sg_miter_start is already done in musb_ep_program */ 1468 /* sg_miter_start is already done in musb_ep_program */
1470 if (!sg_miter_next(&qh->sg_miter)) { 1469 if (!sg_miter_next(&qh->sg_miter)) {
1471 dev_err(musb->controller, "error: sg list empty\n"); 1470 dev_err(musb->controller, "error: sg list empty\n");
@@ -1484,9 +1483,9 @@ done:
1484 1483
1485 qh->segsize = length; 1484 qh->segsize = length;
1486 1485
1487 if (use_sg) { 1486 if (qh->use_sg) {
1488 if (offset + length >= urb->transfer_buffer_length) 1487 if (offset + length >= urb->transfer_buffer_length)
1489 use_sg = false; 1488 qh->use_sg = false;
1490 } 1489 }
1491 1490
1492 musb_ep_select(mbase, epnum); 1491 musb_ep_select(mbase, epnum);
@@ -1552,7 +1551,6 @@ void musb_host_rx(struct musb *musb, u8 epnum)
1552 bool done = false; 1551 bool done = false;
1553 u32 status; 1552 u32 status;
1554 struct dma_channel *dma; 1553 struct dma_channel *dma;
1555 static bool use_sg;
1556 unsigned int sg_flags = SG_MITER_ATOMIC | SG_MITER_TO_SG; 1554 unsigned int sg_flags = SG_MITER_ATOMIC | SG_MITER_TO_SG;
1557 1555
1558 musb_ep_select(mbase, epnum); 1556 musb_ep_select(mbase, epnum);
@@ -1878,12 +1876,12 @@ void musb_host_rx(struct musb *musb, u8 epnum)
1878 * NULL. 1876 * NULL.
1879 */ 1877 */
1880 if (!urb->transfer_buffer) { 1878 if (!urb->transfer_buffer) {
1881 use_sg = true; 1879 qh->use_sg = true;
1882 sg_miter_start(&qh->sg_miter, urb->sg, 1, 1880 sg_miter_start(&qh->sg_miter, urb->sg, 1,
1883 sg_flags); 1881 sg_flags);
1884 } 1882 }
1885 1883
1886 if (use_sg) { 1884 if (qh->use_sg) {
1887 if (!sg_miter_next(&qh->sg_miter)) { 1885 if (!sg_miter_next(&qh->sg_miter)) {
1888 dev_err(musb->controller, "error: sg list empty\n"); 1886 dev_err(musb->controller, "error: sg list empty\n");
1889 sg_miter_stop(&qh->sg_miter); 1887 sg_miter_stop(&qh->sg_miter);
@@ -1913,8 +1911,8 @@ finish:
1913 urb->actual_length += xfer_len; 1911 urb->actual_length += xfer_len;
1914 qh->offset += xfer_len; 1912 qh->offset += xfer_len;
1915 if (done) { 1913 if (done) {
1916 if (use_sg) 1914 if (qh->use_sg)
1917 use_sg = false; 1915 qh->use_sg = false;
1918 1916
1919 if (urb->status == -EINPROGRESS) 1917 if (urb->status == -EINPROGRESS)
1920 urb->status = status; 1918 urb->status = status;
diff --git a/drivers/usb/musb/musb_host.h b/drivers/usb/musb/musb_host.h
index 5a9c8feec10c..738f7eb60df9 100644
--- a/drivers/usb/musb/musb_host.h
+++ b/drivers/usb/musb/musb_host.h
@@ -74,6 +74,7 @@ struct musb_qh {
74 u16 frame; /* for periodic schedule */ 74 u16 frame; /* for periodic schedule */
75 unsigned iso_idx; /* in urb->iso_frame_desc[] */ 75 unsigned iso_idx; /* in urb->iso_frame_desc[] */
76 struct sg_mapping_iter sg_miter; /* for highmem in PIO mode */ 76 struct sg_mapping_iter sg_miter; /* for highmem in PIO mode */
77 bool use_sg; /* to track urb using sglist */
77}; 78};
78 79
79/* map from control or bulk queue head to the first qh on that ring */ 80/* map from control or bulk queue head to the first qh on that ring */
diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig
index 7ef3eb8617a6..2311b1e4e43c 100644
--- a/drivers/usb/phy/Kconfig
+++ b/drivers/usb/phy/Kconfig
@@ -4,11 +4,17 @@
4menuconfig USB_PHY 4menuconfig USB_PHY
5 bool "USB Physical Layer drivers" 5 bool "USB Physical Layer drivers"
6 help 6 help
7 USB controllers (those which are host, device or DRD) need a 7 Most USB controllers have the physical layer signalling part
8 device to handle the physical layer signalling, commonly called 8 (commonly called a PHY) built in. However, dual-role devices
9 a PHY. 9 (a.k.a. USB on-the-go) which support being USB master or slave
10 with the same connector often use an external PHY.
10 11
11 The following drivers add support for such PHY devices. 12 The drivers in this submenu add support for such PHY devices.
13 They are not needed for standard master-only (or the vast
14 majority of slave-only) USB interfaces.
15
16 If you're not sure if this applies to you, it probably doesn't;
17 say N here.
12 18
13if USB_PHY 19if USB_PHY
14 20
diff --git a/drivers/usb/serial/ark3116.c b/drivers/usb/serial/ark3116.c
index 3b16118cbf62..40e7fd94646f 100644
--- a/drivers/usb/serial/ark3116.c
+++ b/drivers/usb/serial/ark3116.c
@@ -43,7 +43,7 @@
43#define DRIVER_NAME "ark3116" 43#define DRIVER_NAME "ark3116"
44 44
45/* usb timeout of 1 second */ 45/* usb timeout of 1 second */
46#define ARK_TIMEOUT (1*HZ) 46#define ARK_TIMEOUT 1000
47 47
48static const struct usb_device_id id_table[] = { 48static const struct usb_device_id id_table[] = {
49 { USB_DEVICE(0x6547, 0x0232) }, 49 { USB_DEVICE(0x6547, 0x0232) },
diff --git a/drivers/usb/serial/cypress_m8.c b/drivers/usb/serial/cypress_m8.c
index d341555d37d8..082120198f87 100644
--- a/drivers/usb/serial/cypress_m8.c
+++ b/drivers/usb/serial/cypress_m8.c
@@ -65,6 +65,7 @@ static const struct usb_device_id id_table_earthmate[] = {
65static const struct usb_device_id id_table_cyphidcomrs232[] = { 65static const struct usb_device_id id_table_cyphidcomrs232[] = {
66 { USB_DEVICE(VENDOR_ID_CYPRESS, PRODUCT_ID_CYPHIDCOM) }, 66 { USB_DEVICE(VENDOR_ID_CYPRESS, PRODUCT_ID_CYPHIDCOM) },
67 { USB_DEVICE(VENDOR_ID_POWERCOM, PRODUCT_ID_UPS) }, 67 { USB_DEVICE(VENDOR_ID_POWERCOM, PRODUCT_ID_UPS) },
68 { USB_DEVICE(VENDOR_ID_FRWD, PRODUCT_ID_CYPHIDCOM_FRWD) },
68 { } /* Terminating entry */ 69 { } /* Terminating entry */
69}; 70};
70 71
@@ -78,6 +79,7 @@ static const struct usb_device_id id_table_combined[] = {
78 { USB_DEVICE(VENDOR_ID_DELORME, PRODUCT_ID_EARTHMATEUSB_LT20) }, 79 { USB_DEVICE(VENDOR_ID_DELORME, PRODUCT_ID_EARTHMATEUSB_LT20) },
79 { USB_DEVICE(VENDOR_ID_CYPRESS, PRODUCT_ID_CYPHIDCOM) }, 80 { USB_DEVICE(VENDOR_ID_CYPRESS, PRODUCT_ID_CYPHIDCOM) },
80 { USB_DEVICE(VENDOR_ID_POWERCOM, PRODUCT_ID_UPS) }, 81 { USB_DEVICE(VENDOR_ID_POWERCOM, PRODUCT_ID_UPS) },
82 { USB_DEVICE(VENDOR_ID_FRWD, PRODUCT_ID_CYPHIDCOM_FRWD) },
81 { USB_DEVICE(VENDOR_ID_DAZZLE, PRODUCT_ID_CA42) }, 83 { USB_DEVICE(VENDOR_ID_DAZZLE, PRODUCT_ID_CA42) },
82 { } /* Terminating entry */ 84 { } /* Terminating entry */
83}; 85};
@@ -229,6 +231,12 @@ static struct usb_serial_driver * const serial_drivers[] = {
229 * Cypress serial helper functions 231 * Cypress serial helper functions
230 *****************************************************************************/ 232 *****************************************************************************/
231 233
234/* FRWD Dongle hidcom needs to skip reset and speed checks */
235static inline bool is_frwd(struct usb_device *dev)
236{
237 return ((le16_to_cpu(dev->descriptor.idVendor) == VENDOR_ID_FRWD) &&
238 (le16_to_cpu(dev->descriptor.idProduct) == PRODUCT_ID_CYPHIDCOM_FRWD));
239}
232 240
233static int analyze_baud_rate(struct usb_serial_port *port, speed_t new_rate) 241static int analyze_baud_rate(struct usb_serial_port *port, speed_t new_rate)
234{ 242{
@@ -238,6 +246,10 @@ static int analyze_baud_rate(struct usb_serial_port *port, speed_t new_rate)
238 if (unstable_bauds) 246 if (unstable_bauds)
239 return new_rate; 247 return new_rate;
240 248
249 /* FRWD Dongle uses 115200 bps */
250 if (is_frwd(port->serial->dev))
251 return new_rate;
252
241 /* 253 /*
242 * The general purpose firmware for the Cypress M8 allows for 254 * The general purpose firmware for the Cypress M8 allows for
243 * a maximum speed of 57600bps (I have no idea whether DeLorme 255 * a maximum speed of 57600bps (I have no idea whether DeLorme
@@ -448,7 +460,11 @@ static int cypress_generic_port_probe(struct usb_serial_port *port)
448 return -ENOMEM; 460 return -ENOMEM;
449 } 461 }
450 462
451 usb_reset_configuration(serial->dev); 463 /* Skip reset for FRWD device. It is a workaound:
464 device hangs if it receives SET_CONFIGURE in Configured
465 state. */
466 if (!is_frwd(serial->dev))
467 usb_reset_configuration(serial->dev);
452 468
453 priv->cmd_ctrl = 0; 469 priv->cmd_ctrl = 0;
454 priv->line_control = 0; 470 priv->line_control = 0;
diff --git a/drivers/usb/serial/cypress_m8.h b/drivers/usb/serial/cypress_m8.h
index 67cf60826884..b461311a2ae7 100644
--- a/drivers/usb/serial/cypress_m8.h
+++ b/drivers/usb/serial/cypress_m8.h
@@ -24,6 +24,10 @@
24#define VENDOR_ID_CYPRESS 0x04b4 24#define VENDOR_ID_CYPRESS 0x04b4
25#define PRODUCT_ID_CYPHIDCOM 0x5500 25#define PRODUCT_ID_CYPHIDCOM 0x5500
26 26
27/* FRWD Dongle - a GPS sports watch */
28#define VENDOR_ID_FRWD 0x6737
29#define PRODUCT_ID_CYPHIDCOM_FRWD 0x0001
30
27/* Powercom UPS, chip CY7C63723 */ 31/* Powercom UPS, chip CY7C63723 */
28#define VENDOR_ID_POWERCOM 0x0d9f 32#define VENDOR_ID_POWERCOM 0x0d9f
29#define PRODUCT_ID_UPS 0x0002 33#define PRODUCT_ID_UPS 0x0002
diff --git a/drivers/usb/serial/f81232.c b/drivers/usb/serial/f81232.c
index 090b411d893f..7d8dd5aad236 100644
--- a/drivers/usb/serial/f81232.c
+++ b/drivers/usb/serial/f81232.c
@@ -165,11 +165,12 @@ static void f81232_set_termios(struct tty_struct *tty,
165 /* FIXME - Stubbed out for now */ 165 /* FIXME - Stubbed out for now */
166 166
167 /* Don't change anything if nothing has changed */ 167 /* Don't change anything if nothing has changed */
168 if (!tty_termios_hw_change(&tty->termios, old_termios)) 168 if (old_termios && !tty_termios_hw_change(&tty->termios, old_termios))
169 return; 169 return;
170 170
171 /* Do the real work here... */ 171 /* Do the real work here... */
172 tty_termios_copy_hw(&tty->termios, old_termios); 172 if (old_termios)
173 tty_termios_copy_hw(&tty->termios, old_termios);
173} 174}
174 175
175static int f81232_tiocmget(struct tty_struct *tty) 176static int f81232_tiocmget(struct tty_struct *tty)
@@ -187,12 +188,11 @@ static int f81232_tiocmset(struct tty_struct *tty,
187 188
188static int f81232_open(struct tty_struct *tty, struct usb_serial_port *port) 189static int f81232_open(struct tty_struct *tty, struct usb_serial_port *port)
189{ 190{
190 struct ktermios tmp_termios;
191 int result; 191 int result;
192 192
193 /* Setup termios */ 193 /* Setup termios */
194 if (tty) 194 if (tty)
195 f81232_set_termios(tty, port, &tmp_termios); 195 f81232_set_termios(tty, port, NULL);
196 196
197 result = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL); 197 result = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
198 if (result) { 198 if (result) {
diff --git a/drivers/usb/serial/iuu_phoenix.c b/drivers/usb/serial/iuu_phoenix.c
index 9d74c278b7b5..790673e5faa7 100644
--- a/drivers/usb/serial/iuu_phoenix.c
+++ b/drivers/usb/serial/iuu_phoenix.c
@@ -287,7 +287,7 @@ static int bulk_immediate(struct usb_serial_port *port, u8 *buf, u8 count)
287 usb_bulk_msg(serial->dev, 287 usb_bulk_msg(serial->dev,
288 usb_sndbulkpipe(serial->dev, 288 usb_sndbulkpipe(serial->dev,
289 port->bulk_out_endpointAddress), buf, 289 port->bulk_out_endpointAddress), buf,
290 count, &actual, HZ * 1); 290 count, &actual, 1000);
291 291
292 if (status != IUU_OPERATION_OK) 292 if (status != IUU_OPERATION_OK)
293 dev_dbg(&port->dev, "%s - error = %2x\n", __func__, status); 293 dev_dbg(&port->dev, "%s - error = %2x\n", __func__, status);
@@ -307,7 +307,7 @@ static int read_immediate(struct usb_serial_port *port, u8 *buf, u8 count)
307 usb_bulk_msg(serial->dev, 307 usb_bulk_msg(serial->dev,
308 usb_rcvbulkpipe(serial->dev, 308 usb_rcvbulkpipe(serial->dev,
309 port->bulk_in_endpointAddress), buf, 309 port->bulk_in_endpointAddress), buf,
310 count, &actual, HZ * 1); 310 count, &actual, 1000);
311 311
312 if (status != IUU_OPERATION_OK) 312 if (status != IUU_OPERATION_OK)
313 dev_dbg(&port->dev, "%s - error = %2x\n", __func__, status); 313 dev_dbg(&port->dev, "%s - error = %2x\n", __func__, status);
diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c
index eb30d7b01f36..3549d073df22 100644
--- a/drivers/usb/serial/keyspan.c
+++ b/drivers/usb/serial/keyspan.c
@@ -1548,7 +1548,6 @@ static int keyspan_usa26_send_setup(struct usb_serial *serial,
1548 struct keyspan_serial_private *s_priv; 1548 struct keyspan_serial_private *s_priv;
1549 struct keyspan_port_private *p_priv; 1549 struct keyspan_port_private *p_priv;
1550 const struct keyspan_device_details *d_details; 1550 const struct keyspan_device_details *d_details;
1551 int outcont_urb;
1552 struct urb *this_urb; 1551 struct urb *this_urb;
1553 int device_port, err; 1552 int device_port, err;
1554 1553
@@ -1559,7 +1558,6 @@ static int keyspan_usa26_send_setup(struct usb_serial *serial,
1559 d_details = s_priv->device_details; 1558 d_details = s_priv->device_details;
1560 device_port = port->number - port->serial->minor; 1559 device_port = port->number - port->serial->minor;
1561 1560
1562 outcont_urb = d_details->outcont_endpoints[port->number];
1563 this_urb = p_priv->outcont_urb; 1561 this_urb = p_priv->outcont_urb;
1564 1562
1565 dev_dbg(&port->dev, "%s - endpoint %d\n", __func__, usb_pipeendpoint(this_urb->pipe)); 1563 dev_dbg(&port->dev, "%s - endpoint %d\n", __func__, usb_pipeendpoint(this_urb->pipe));
@@ -1685,14 +1683,6 @@ static int keyspan_usa26_send_setup(struct usb_serial *serial,
1685 err = usb_submit_urb(this_urb, GFP_ATOMIC); 1683 err = usb_submit_urb(this_urb, GFP_ATOMIC);
1686 if (err != 0) 1684 if (err != 0)
1687 dev_dbg(&port->dev, "%s - usb_submit_urb(setup) failed (%d)\n", __func__, err); 1685 dev_dbg(&port->dev, "%s - usb_submit_urb(setup) failed (%d)\n", __func__, err);
1688#if 0
1689 else {
1690 dev_dbg(&port->dev, "%s - usb_submit_urb(%d) OK %d bytes (end %d)\n", __func__
1691 outcont_urb, this_urb->transfer_buffer_length,
1692 usb_pipeendpoint(this_urb->pipe));
1693 }
1694#endif
1695
1696 return 0; 1686 return 0;
1697} 1687}
1698 1688
diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
index cc0e54345df9..f27c621a9297 100644
--- a/drivers/usb/serial/mos7720.c
+++ b/drivers/usb/serial/mos7720.c
@@ -40,7 +40,7 @@
40#define DRIVER_DESC "Moschip USB Serial Driver" 40#define DRIVER_DESC "Moschip USB Serial Driver"
41 41
42/* default urb timeout */ 42/* default urb timeout */
43#define MOS_WDR_TIMEOUT (HZ * 5) 43#define MOS_WDR_TIMEOUT 5000
44 44
45#define MOS_MAX_PORT 0x02 45#define MOS_MAX_PORT 0x02
46#define MOS_WRITE 0x0E 46#define MOS_WRITE 0x0E
@@ -227,11 +227,22 @@ static int read_mos_reg(struct usb_serial *serial, unsigned int serial_portnum,
227 __u8 requesttype = (__u8)0xc0; 227 __u8 requesttype = (__u8)0xc0;
228 __u16 index = get_reg_index(reg); 228 __u16 index = get_reg_index(reg);
229 __u16 value = get_reg_value(reg, serial_portnum); 229 __u16 value = get_reg_value(reg, serial_portnum);
230 int status = usb_control_msg(usbdev, pipe, request, requesttype, value, 230 u8 *buf;
231 index, data, 1, MOS_WDR_TIMEOUT); 231 int status;
232 if (status < 0) 232
233 buf = kmalloc(1, GFP_KERNEL);
234 if (!buf)
235 return -ENOMEM;
236
237 status = usb_control_msg(usbdev, pipe, request, requesttype, value,
238 index, buf, 1, MOS_WDR_TIMEOUT);
239 if (status == 1)
240 *data = *buf;
241 else if (status < 0)
233 dev_err(&usbdev->dev, 242 dev_err(&usbdev->dev,
234 "mos7720: usb_control_msg() failed: %d", status); 243 "mos7720: usb_control_msg() failed: %d", status);
244 kfree(buf);
245
235 return status; 246 return status;
236} 247}
237 248
@@ -1618,7 +1629,7 @@ static void change_port_settings(struct tty_struct *tty,
1618 mos7720_port->shadowMCR |= (UART_MCR_XONANY); 1629 mos7720_port->shadowMCR |= (UART_MCR_XONANY);
1619 /* To set hardware flow control to the specified * 1630 /* To set hardware flow control to the specified *
1620 * serial port, in SP1/2_CONTROL_REG */ 1631 * serial port, in SP1/2_CONTROL_REG */
1621 if (port->number) 1632 if (port_number)
1622 write_mos_reg(serial, dummy, SP_CONTROL_REG, 0x01); 1633 write_mos_reg(serial, dummy, SP_CONTROL_REG, 0x01);
1623 else 1634 else
1624 write_mos_reg(serial, dummy, SP_CONTROL_REG, 0x02); 1635 write_mos_reg(serial, dummy, SP_CONTROL_REG, 0x02);
@@ -1927,7 +1938,7 @@ static int mos7720_startup(struct usb_serial *serial)
1927 1938
1928 /* setting configuration feature to one */ 1939 /* setting configuration feature to one */
1929 usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), 1940 usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
1930 (__u8)0x03, 0x00, 0x01, 0x00, NULL, 0x00, 5*HZ); 1941 (__u8)0x03, 0x00, 0x01, 0x00, NULL, 0x00, 5000);
1931 1942
1932 /* start the interrupt urb */ 1943 /* start the interrupt urb */
1933 ret_val = usb_submit_urb(serial->port[0]->interrupt_in_urb, GFP_KERNEL); 1944 ret_val = usb_submit_urb(serial->port[0]->interrupt_in_urb, GFP_KERNEL);
@@ -1970,7 +1981,7 @@ static void mos7720_release(struct usb_serial *serial)
1970 /* wait for synchronous usb calls to return */ 1981 /* wait for synchronous usb calls to return */
1971 if (mos_parport->msg_pending) 1982 if (mos_parport->msg_pending)
1972 wait_for_completion_timeout(&mos_parport->syncmsg_compl, 1983 wait_for_completion_timeout(&mos_parport->syncmsg_compl,
1973 MOS_WDR_TIMEOUT); 1984 msecs_to_jiffies(MOS_WDR_TIMEOUT));
1974 1985
1975 parport_remove_port(mos_parport->pp); 1986 parport_remove_port(mos_parport->pp);
1976 usb_set_serial_data(serial, NULL); 1987 usb_set_serial_data(serial, NULL);
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index a0d5ea545982..7e998081e1cd 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -2142,13 +2142,21 @@ static int mos7840_ioctl(struct tty_struct *tty,
2142static int mos7810_check(struct usb_serial *serial) 2142static int mos7810_check(struct usb_serial *serial)
2143{ 2143{
2144 int i, pass_count = 0; 2144 int i, pass_count = 0;
2145 u8 *buf;
2145 __u16 data = 0, mcr_data = 0; 2146 __u16 data = 0, mcr_data = 0;
2146 __u16 test_pattern = 0x55AA; 2147 __u16 test_pattern = 0x55AA;
2148 int res;
2149
2150 buf = kmalloc(VENDOR_READ_LENGTH, GFP_KERNEL);
2151 if (!buf)
2152 return 0; /* failed to identify 7810 */
2147 2153
2148 /* Store MCR setting */ 2154 /* Store MCR setting */
2149 usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0), 2155 res = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
2150 MCS_RDREQ, MCS_RD_RTYPE, 0x0300, MODEM_CONTROL_REGISTER, 2156 MCS_RDREQ, MCS_RD_RTYPE, 0x0300, MODEM_CONTROL_REGISTER,
2151 &mcr_data, VENDOR_READ_LENGTH, MOS_WDR_TIMEOUT); 2157 buf, VENDOR_READ_LENGTH, MOS_WDR_TIMEOUT);
2158 if (res == VENDOR_READ_LENGTH)
2159 mcr_data = *buf;
2152 2160
2153 for (i = 0; i < 16; i++) { 2161 for (i = 0; i < 16; i++) {
2154 /* Send the 1-bit test pattern out to MCS7810 test pin */ 2162 /* Send the 1-bit test pattern out to MCS7810 test pin */
@@ -2158,9 +2166,12 @@ static int mos7810_check(struct usb_serial *serial)
2158 MODEM_CONTROL_REGISTER, NULL, 0, MOS_WDR_TIMEOUT); 2166 MODEM_CONTROL_REGISTER, NULL, 0, MOS_WDR_TIMEOUT);
2159 2167
2160 /* Read the test pattern back */ 2168 /* Read the test pattern back */
2161 usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0), 2169 res = usb_control_msg(serial->dev,
2162 MCS_RDREQ, MCS_RD_RTYPE, 0, GPIO_REGISTER, &data, 2170 usb_rcvctrlpipe(serial->dev, 0), MCS_RDREQ,
2163 VENDOR_READ_LENGTH, MOS_WDR_TIMEOUT); 2171 MCS_RD_RTYPE, 0, GPIO_REGISTER, buf,
2172 VENDOR_READ_LENGTH, MOS_WDR_TIMEOUT);
2173 if (res == VENDOR_READ_LENGTH)
2174 data = *buf;
2164 2175
2165 /* If this is a MCS7810 device, both test patterns must match */ 2176 /* If this is a MCS7810 device, both test patterns must match */
2166 if (((test_pattern >> i) ^ (~data >> 1)) & 0x0001) 2177 if (((test_pattern >> i) ^ (~data >> 1)) & 0x0001)
@@ -2174,6 +2185,8 @@ static int mos7810_check(struct usb_serial *serial)
2174 MCS_WR_RTYPE, 0x0300 | mcr_data, MODEM_CONTROL_REGISTER, NULL, 2185 MCS_WR_RTYPE, 0x0300 | mcr_data, MODEM_CONTROL_REGISTER, NULL,
2175 0, MOS_WDR_TIMEOUT); 2186 0, MOS_WDR_TIMEOUT);
2176 2187
2188 kfree(buf);
2189
2177 if (pass_count == 16) 2190 if (pass_count == 16)
2178 return 1; 2191 return 1;
2179 2192
@@ -2183,11 +2196,17 @@ static int mos7810_check(struct usb_serial *serial)
2183static int mos7840_calc_num_ports(struct usb_serial *serial) 2196static int mos7840_calc_num_ports(struct usb_serial *serial)
2184{ 2197{
2185 __u16 data = 0x00; 2198 __u16 data = 0x00;
2199 u8 *buf;
2186 int mos7840_num_ports; 2200 int mos7840_num_ports;
2187 2201
2188 usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0), 2202 buf = kzalloc(VENDOR_READ_LENGTH, GFP_KERNEL);
2189 MCS_RDREQ, MCS_RD_RTYPE, 0, GPIO_REGISTER, &data, 2203 if (buf) {
2190 VENDOR_READ_LENGTH, MOS_WDR_TIMEOUT); 2204 usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
2205 MCS_RDREQ, MCS_RD_RTYPE, 0, GPIO_REGISTER, buf,
2206 VENDOR_READ_LENGTH, MOS_WDR_TIMEOUT);
2207 data = *buf;
2208 kfree(buf);
2209 }
2191 2210
2192 if (serial->dev->descriptor.idProduct == MOSCHIP_DEVICE_ID_7810 || 2211 if (serial->dev->descriptor.idProduct == MOSCHIP_DEVICE_ID_7810 ||
2193 serial->dev->descriptor.idProduct == MOSCHIP_DEVICE_ID_7820) { 2212 serial->dev->descriptor.idProduct == MOSCHIP_DEVICE_ID_7820) {
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 93d02bc4eb52..bd4323ddae1a 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -250,13 +250,7 @@ static void option_instat_callback(struct urb *urb);
250#define ZTE_PRODUCT_MF622 0x0001 250#define ZTE_PRODUCT_MF622 0x0001
251#define ZTE_PRODUCT_MF628 0x0015 251#define ZTE_PRODUCT_MF628 0x0015
252#define ZTE_PRODUCT_MF626 0x0031 252#define ZTE_PRODUCT_MF626 0x0031
253#define ZTE_PRODUCT_CDMA_TECH 0xfffe
254#define ZTE_PRODUCT_AC8710 0xfff1
255#define ZTE_PRODUCT_AC2726 0xfff5
256#define ZTE_PRODUCT_AC8710T 0xffff
257#define ZTE_PRODUCT_MC2718 0xffe8 253#define ZTE_PRODUCT_MC2718 0xffe8
258#define ZTE_PRODUCT_AD3812 0xffeb
259#define ZTE_PRODUCT_MC2716 0xffed
260 254
261#define BENQ_VENDOR_ID 0x04a5 255#define BENQ_VENDOR_ID 0x04a5
262#define BENQ_PRODUCT_H10 0x4068 256#define BENQ_PRODUCT_H10 0x4068
@@ -495,18 +489,10 @@ static const struct option_blacklist_info zte_k3765_z_blacklist = {
495 .reserved = BIT(4), 489 .reserved = BIT(4),
496}; 490};
497 491
498static const struct option_blacklist_info zte_ad3812_z_blacklist = {
499 .sendsetup = BIT(0) | BIT(1) | BIT(2),
500};
501
502static const struct option_blacklist_info zte_mc2718_z_blacklist = { 492static const struct option_blacklist_info zte_mc2718_z_blacklist = {
503 .sendsetup = BIT(1) | BIT(2) | BIT(3) | BIT(4), 493 .sendsetup = BIT(1) | BIT(2) | BIT(3) | BIT(4),
504}; 494};
505 495
506static const struct option_blacklist_info zte_mc2716_z_blacklist = {
507 .sendsetup = BIT(1) | BIT(2) | BIT(3),
508};
509
510static const struct option_blacklist_info huawei_cdc12_blacklist = { 496static const struct option_blacklist_info huawei_cdc12_blacklist = {
511 .reserved = BIT(1) | BIT(2), 497 .reserved = BIT(1) | BIT(2),
512}; 498};
@@ -593,6 +579,8 @@ static const struct usb_device_id option_ids[] = {
593 .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist }, 579 .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist },
594 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff), 580 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff),
595 .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist }, 581 .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist },
582 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x14ac, 0xff, 0xff, 0xff), /* Huawei E1820 */
583 .driver_info = (kernel_ulong_t) &net_intf1_blacklist },
596 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0xff, 0xff), 584 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0xff, 0xff),
597 .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist }, 585 .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist },
598 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0xff, 0xff) }, 586 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0xff, 0xff) },
@@ -797,7 +785,6 @@ static const struct usb_device_id option_ids[] = {
797 { USB_DEVICE_INTERFACE_CLASS(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1012, 0xff) }, 785 { USB_DEVICE_INTERFACE_CLASS(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1012, 0xff) },
798 { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC650) }, 786 { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC650) },
799 { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) }, 787 { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) },
800 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6000)}, /* ZTE AC8700 */
801 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */ 788 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
802 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */ 789 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
803 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6280) }, /* BP3-USB & BP3-EXT HSDPA */ 790 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6280) }, /* BP3-USB & BP3-EXT HSDPA */
@@ -1199,16 +1186,9 @@ static const struct usb_device_id option_ids[] = {
1199 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0178, 0xff, 0xff, 0xff), 1186 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0178, 0xff, 0xff, 0xff),
1200 .driver_info = (kernel_ulong_t)&net_intf3_blacklist }, 1187 .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
1201 1188
1202 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_CDMA_TECH, 0xff, 0xff, 0xff) }, 1189 /* NOTE: most ZTE CDMA devices should be driven by zte_ev, not option */
1203 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710, 0xff, 0xff, 0xff) },
1204 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC2726, 0xff, 0xff, 0xff) },
1205 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710T, 0xff, 0xff, 0xff) },
1206 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MC2718, 0xff, 0xff, 0xff), 1190 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MC2718, 0xff, 0xff, 0xff),
1207 .driver_info = (kernel_ulong_t)&zte_mc2718_z_blacklist }, 1191 .driver_info = (kernel_ulong_t)&zte_mc2718_z_blacklist },
1208 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AD3812, 0xff, 0xff, 0xff),
1209 .driver_info = (kernel_ulong_t)&zte_ad3812_z_blacklist },
1210 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MC2716, 0xff, 0xff, 0xff),
1211 .driver_info = (kernel_ulong_t)&zte_mc2716_z_blacklist },
1212 { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x01) }, 1192 { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x01) },
1213 { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x05) }, 1193 { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x05) },
1214 { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x86, 0x10) }, 1194 { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x86, 0x10) },
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index 7151659367a0..048cd44d51b1 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -284,7 +284,7 @@ static void pl2303_set_termios(struct tty_struct *tty,
284 serial settings even to the same values as before. Thus 284 serial settings even to the same values as before. Thus
285 we actually need to filter in this specific case */ 285 we actually need to filter in this specific case */
286 286
287 if (!tty_termios_hw_change(&tty->termios, old_termios)) 287 if (old_termios && !tty_termios_hw_change(&tty->termios, old_termios))
288 return; 288 return;
289 289
290 cflag = tty->termios.c_cflag; 290 cflag = tty->termios.c_cflag;
@@ -293,7 +293,8 @@ static void pl2303_set_termios(struct tty_struct *tty,
293 if (!buf) { 293 if (!buf) {
294 dev_err(&port->dev, "%s - out of memory.\n", __func__); 294 dev_err(&port->dev, "%s - out of memory.\n", __func__);
295 /* Report back no change occurred */ 295 /* Report back no change occurred */
296 tty->termios = *old_termios; 296 if (old_termios)
297 tty->termios = *old_termios;
297 return; 298 return;
298 } 299 }
299 300
@@ -433,7 +434,7 @@ static void pl2303_set_termios(struct tty_struct *tty,
433 control = priv->line_control; 434 control = priv->line_control;
434 if ((cflag & CBAUD) == B0) 435 if ((cflag & CBAUD) == B0)
435 priv->line_control &= ~(CONTROL_DTR | CONTROL_RTS); 436 priv->line_control &= ~(CONTROL_DTR | CONTROL_RTS);
436 else if ((old_termios->c_cflag & CBAUD) == B0) 437 else if (old_termios && (old_termios->c_cflag & CBAUD) == B0)
437 priv->line_control |= (CONTROL_DTR | CONTROL_RTS); 438 priv->line_control |= (CONTROL_DTR | CONTROL_RTS);
438 if (control != priv->line_control) { 439 if (control != priv->line_control) {
439 control = priv->line_control; 440 control = priv->line_control;
@@ -492,7 +493,6 @@ static void pl2303_close(struct usb_serial_port *port)
492 493
493static int pl2303_open(struct tty_struct *tty, struct usb_serial_port *port) 494static int pl2303_open(struct tty_struct *tty, struct usb_serial_port *port)
494{ 495{
495 struct ktermios tmp_termios;
496 struct usb_serial *serial = port->serial; 496 struct usb_serial *serial = port->serial;
497 struct pl2303_serial_private *spriv = usb_get_serial_data(serial); 497 struct pl2303_serial_private *spriv = usb_get_serial_data(serial);
498 int result; 498 int result;
@@ -508,7 +508,7 @@ static int pl2303_open(struct tty_struct *tty, struct usb_serial_port *port)
508 508
509 /* Setup termios */ 509 /* Setup termios */
510 if (tty) 510 if (tty)
511 pl2303_set_termios(tty, port, &tmp_termios); 511 pl2303_set_termios(tty, port, NULL);
512 512
513 result = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL); 513 result = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
514 if (result) { 514 if (result) {
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index 59b32b782126..bd794b43898c 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -118,6 +118,7 @@ static const struct usb_device_id id_table[] = {
118 {USB_DEVICE(0x1199, 0x901b)}, /* Sierra Wireless MC7770 */ 118 {USB_DEVICE(0x1199, 0x901b)}, /* Sierra Wireless MC7770 */
119 {USB_DEVICE(0x12D1, 0x14F0)}, /* Sony Gobi 3000 QDL */ 119 {USB_DEVICE(0x12D1, 0x14F0)}, /* Sony Gobi 3000 QDL */
120 {USB_DEVICE(0x12D1, 0x14F1)}, /* Sony Gobi 3000 Composite */ 120 {USB_DEVICE(0x12D1, 0x14F1)}, /* Sony Gobi 3000 Composite */
121 {USB_DEVICE(0x0AF0, 0x8120)}, /* Option GTM681W */
121 122
122 /* non Gobi Qualcomm serial devices */ 123 /* non Gobi Qualcomm serial devices */
123 {USB_DEVICE_INTERFACE_NUMBER(0x0f3d, 0x68a2, 0)}, /* Sierra Wireless MC7700 Device Management */ 124 {USB_DEVICE_INTERFACE_NUMBER(0x0f3d, 0x68a2, 0)}, /* Sierra Wireless MC7700 Device Management */
diff --git a/drivers/usb/serial/spcp8x5.c b/drivers/usb/serial/spcp8x5.c
index cf3df793c2b7..ddf6c47137dc 100644
--- a/drivers/usb/serial/spcp8x5.c
+++ b/drivers/usb/serial/spcp8x5.c
@@ -291,7 +291,6 @@ static void spcp8x5_set_termios(struct tty_struct *tty,
291 struct spcp8x5_private *priv = usb_get_serial_port_data(port); 291 struct spcp8x5_private *priv = usb_get_serial_port_data(port);
292 unsigned long flags; 292 unsigned long flags;
293 unsigned int cflag = tty->termios.c_cflag; 293 unsigned int cflag = tty->termios.c_cflag;
294 unsigned int old_cflag = old_termios->c_cflag;
295 unsigned short uartdata; 294 unsigned short uartdata;
296 unsigned char buf[2] = {0, 0}; 295 unsigned char buf[2] = {0, 0};
297 int baud; 296 int baud;
@@ -299,15 +298,15 @@ static void spcp8x5_set_termios(struct tty_struct *tty,
299 u8 control; 298 u8 control;
300 299
301 /* check that they really want us to change something */ 300 /* check that they really want us to change something */
302 if (!tty_termios_hw_change(&tty->termios, old_termios)) 301 if (old_termios && !tty_termios_hw_change(&tty->termios, old_termios))
303 return; 302 return;
304 303
305 /* set DTR/RTS active */ 304 /* set DTR/RTS active */
306 spin_lock_irqsave(&priv->lock, flags); 305 spin_lock_irqsave(&priv->lock, flags);
307 control = priv->line_control; 306 control = priv->line_control;
308 if ((old_cflag & CBAUD) == B0) { 307 if (old_termios && (old_termios->c_cflag & CBAUD) == B0) {
309 priv->line_control |= MCR_DTR; 308 priv->line_control |= MCR_DTR;
310 if (!(old_cflag & CRTSCTS)) 309 if (!(old_termios->c_cflag & CRTSCTS))
311 priv->line_control |= MCR_RTS; 310 priv->line_control |= MCR_RTS;
312 } 311 }
313 if (control != priv->line_control) { 312 if (control != priv->line_control) {
@@ -394,7 +393,6 @@ static void spcp8x5_set_termios(struct tty_struct *tty,
394 393
395static int spcp8x5_open(struct tty_struct *tty, struct usb_serial_port *port) 394static int spcp8x5_open(struct tty_struct *tty, struct usb_serial_port *port)
396{ 395{
397 struct ktermios tmp_termios;
398 struct usb_serial *serial = port->serial; 396 struct usb_serial *serial = port->serial;
399 struct spcp8x5_private *priv = usb_get_serial_port_data(port); 397 struct spcp8x5_private *priv = usb_get_serial_port_data(port);
400 int ret; 398 int ret;
@@ -411,7 +409,7 @@ static int spcp8x5_open(struct tty_struct *tty, struct usb_serial_port *port)
411 spcp8x5_set_ctrl_line(port, priv->line_control); 409 spcp8x5_set_ctrl_line(port, priv->line_control);
412 410
413 if (tty) 411 if (tty)
414 spcp8x5_set_termios(tty, port, &tmp_termios); 412 spcp8x5_set_termios(tty, port, NULL);
415 413
416 port->port.drain_delay = 256; 414 port->port.drain_delay = 256;
417 415
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index c92c5ed4e580..e581c2549a57 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -172,7 +172,8 @@ static struct usb_device_id ti_id_table_3410[15+TI_EXTRA_VID_PID_COUNT+1] = {
172 { USB_DEVICE(IBM_VENDOR_ID, IBM_4543_PRODUCT_ID) }, 172 { USB_DEVICE(IBM_VENDOR_ID, IBM_4543_PRODUCT_ID) },
173 { USB_DEVICE(IBM_VENDOR_ID, IBM_454B_PRODUCT_ID) }, 173 { USB_DEVICE(IBM_VENDOR_ID, IBM_454B_PRODUCT_ID) },
174 { USB_DEVICE(IBM_VENDOR_ID, IBM_454C_PRODUCT_ID) }, 174 { USB_DEVICE(IBM_VENDOR_ID, IBM_454C_PRODUCT_ID) },
175 { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_PRODUCT_ID) }, 175 { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_STEREO_PLUG_ID) },
176 { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_STRIP_PORT_ID) },
176 { USB_DEVICE(TI_VENDOR_ID, FRI2_PRODUCT_ID) }, 177 { USB_DEVICE(TI_VENDOR_ID, FRI2_PRODUCT_ID) },
177}; 178};
178 179
diff --git a/drivers/usb/serial/ti_usb_3410_5052.h b/drivers/usb/serial/ti_usb_3410_5052.h
index b353e7e3d480..4a2423e84d55 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.h
+++ b/drivers/usb/serial/ti_usb_3410_5052.h
@@ -52,7 +52,9 @@
52 52
53/* Abbott Diabetics vendor and product ids */ 53/* Abbott Diabetics vendor and product ids */
54#define ABBOTT_VENDOR_ID 0x1a61 54#define ABBOTT_VENDOR_ID 0x1a61
55#define ABBOTT_PRODUCT_ID 0x3410 55#define ABBOTT_STEREO_PLUG_ID 0x3410
56#define ABBOTT_PRODUCT_ID ABBOTT_STEREO_PLUG_ID
57#define ABBOTT_STRIP_PORT_ID 0x3420
56 58
57/* Commands */ 59/* Commands */
58#define TI_GET_VERSION 0x01 60#define TI_GET_VERSION 0x01
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index 4753c005cfb6..5f6b1ff9d29e 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -408,7 +408,7 @@ static int serial_ioctl(struct tty_struct *tty,
408 unsigned int cmd, unsigned long arg) 408 unsigned int cmd, unsigned long arg)
409{ 409{
410 struct usb_serial_port *port = tty->driver_data; 410 struct usb_serial_port *port = tty->driver_data;
411 int retval = -ENODEV; 411 int retval = -ENOIOCTLCMD;
412 412
413 dev_dbg(tty->dev, "%s - cmd 0x%.4x\n", __func__, cmd); 413 dev_dbg(tty->dev, "%s - cmd 0x%.4x\n", __func__, cmd);
414 414
@@ -420,8 +420,6 @@ static int serial_ioctl(struct tty_struct *tty,
420 default: 420 default:
421 if (port->serial->type->ioctl) 421 if (port->serial->type->ioctl)
422 retval = port->serial->type->ioctl(tty, cmd, arg); 422 retval = port->serial->type->ioctl(tty, cmd, arg);
423 else
424 retval = -ENOIOCTLCMD;
425 } 423 }
426 424
427 return retval; 425 return retval;
diff --git a/drivers/usb/serial/visor.c b/drivers/usb/serial/visor.c
index 7573ec8a084f..9910aa2edf4b 100644
--- a/drivers/usb/serial/visor.c
+++ b/drivers/usb/serial/visor.c
@@ -560,10 +560,19 @@ static int treo_attach(struct usb_serial *serial)
560 */ 560 */
561#define COPY_PORT(dest, src) \ 561#define COPY_PORT(dest, src) \
562 do { \ 562 do { \
563 int i; \
564 \
565 for (i = 0; i < ARRAY_SIZE(src->read_urbs); ++i) { \
566 dest->read_urbs[i] = src->read_urbs[i]; \
567 dest->read_urbs[i]->context = dest; \
568 dest->bulk_in_buffers[i] = src->bulk_in_buffers[i]; \
569 } \
563 dest->read_urb = src->read_urb; \ 570 dest->read_urb = src->read_urb; \
564 dest->bulk_in_endpointAddress = src->bulk_in_endpointAddress;\ 571 dest->bulk_in_endpointAddress = src->bulk_in_endpointAddress;\
565 dest->bulk_in_buffer = src->bulk_in_buffer; \ 572 dest->bulk_in_buffer = src->bulk_in_buffer; \
573 dest->bulk_in_size = src->bulk_in_size; \
566 dest->interrupt_in_urb = src->interrupt_in_urb; \ 574 dest->interrupt_in_urb = src->interrupt_in_urb; \
575 dest->interrupt_in_urb->context = dest; \
567 dest->interrupt_in_endpointAddress = \ 576 dest->interrupt_in_endpointAddress = \
568 src->interrupt_in_endpointAddress;\ 577 src->interrupt_in_endpointAddress;\
569 dest->interrupt_in_buffer = src->interrupt_in_buffer; \ 578 dest->interrupt_in_buffer = src->interrupt_in_buffer; \
diff --git a/drivers/usb/serial/whiteheat.c b/drivers/usb/serial/whiteheat.c
index b9fca3586d74..347caad47a12 100644
--- a/drivers/usb/serial/whiteheat.c
+++ b/drivers/usb/serial/whiteheat.c
@@ -649,7 +649,7 @@ static void firm_setup_port(struct tty_struct *tty)
649 struct whiteheat_port_settings port_settings; 649 struct whiteheat_port_settings port_settings;
650 unsigned int cflag = tty->termios.c_cflag; 650 unsigned int cflag = tty->termios.c_cflag;
651 651
652 port_settings.port = port->number + 1; 652 port_settings.port = port->number - port->serial->minor + 1;
653 653
654 /* get the byte size */ 654 /* get the byte size */
655 switch (cflag & CSIZE) { 655 switch (cflag & CSIZE) {
diff --git a/drivers/usb/serial/zte_ev.c b/drivers/usb/serial/zte_ev.c
index 39ee7373b4ee..fca4c752a4ed 100644
--- a/drivers/usb/serial/zte_ev.c
+++ b/drivers/usb/serial/zte_ev.c
@@ -41,9 +41,6 @@ static int zte_ev_usb_serial_open(struct tty_struct *tty,
41 int len; 41 int len;
42 unsigned char *buf; 42 unsigned char *buf;
43 43
44 if (port->number != 0)
45 return -ENODEV;
46
47 buf = kmalloc(MAX_SETUP_DATA_SIZE, GFP_KERNEL); 44 buf = kmalloc(MAX_SETUP_DATA_SIZE, GFP_KERNEL);
48 if (!buf) 45 if (!buf)
49 return -ENOMEM; 46 return -ENOMEM;
@@ -53,7 +50,7 @@ static int zte_ev_usb_serial_open(struct tty_struct *tty,
53 result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 50 result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
54 0x22, 0x21, 51 0x22, 0x21,
55 0x0001, 0x0000, NULL, len, 52 0x0001, 0x0000, NULL, len,
56 HZ * USB_CTRL_GET_TIMEOUT); 53 USB_CTRL_GET_TIMEOUT);
57 dev_dbg(dev, "result = %d\n", result); 54 dev_dbg(dev, "result = %d\n", result);
58 55
59 /* send 2st cmd and recieve data */ 56 /* send 2st cmd and recieve data */
@@ -65,7 +62,7 @@ static int zte_ev_usb_serial_open(struct tty_struct *tty,
65 result = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), 62 result = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
66 0x21, 0xa1, 63 0x21, 0xa1,
67 0x0000, 0x0000, buf, len, 64 0x0000, 0x0000, buf, len,
68 HZ * USB_CTRL_GET_TIMEOUT); 65 USB_CTRL_GET_TIMEOUT);
69 debug_data(dev, __func__, len, buf, result); 66 debug_data(dev, __func__, len, buf, result);
70 67
71 /* send 3 cmd */ 68 /* send 3 cmd */
@@ -84,7 +81,7 @@ static int zte_ev_usb_serial_open(struct tty_struct *tty,
84 result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 81 result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
85 0x20, 0x21, 82 0x20, 0x21,
86 0x0000, 0x0000, buf, len, 83 0x0000, 0x0000, buf, len,
87 HZ * USB_CTRL_GET_TIMEOUT); 84 USB_CTRL_GET_TIMEOUT);
88 debug_data(dev, __func__, len, buf, result); 85 debug_data(dev, __func__, len, buf, result);
89 86
90 /* send 4 cmd */ 87 /* send 4 cmd */
@@ -95,7 +92,7 @@ static int zte_ev_usb_serial_open(struct tty_struct *tty,
95 result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 92 result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
96 0x22, 0x21, 93 0x22, 0x21,
97 0x0003, 0x0000, NULL, len, 94 0x0003, 0x0000, NULL, len,
98 HZ * USB_CTRL_GET_TIMEOUT); 95 USB_CTRL_GET_TIMEOUT);
99 dev_dbg(dev, "result = %d\n", result); 96 dev_dbg(dev, "result = %d\n", result);
100 97
101 /* send 5 cmd */ 98 /* send 5 cmd */
@@ -107,7 +104,7 @@ static int zte_ev_usb_serial_open(struct tty_struct *tty,
107 result = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), 104 result = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
108 0x21, 0xa1, 105 0x21, 0xa1,
109 0x0000, 0x0000, buf, len, 106 0x0000, 0x0000, buf, len,
110 HZ * USB_CTRL_GET_TIMEOUT); 107 USB_CTRL_GET_TIMEOUT);
111 debug_data(dev, __func__, len, buf, result); 108 debug_data(dev, __func__, len, buf, result);
112 109
113 /* send 6 cmd */ 110 /* send 6 cmd */
@@ -126,7 +123,7 @@ static int zte_ev_usb_serial_open(struct tty_struct *tty,
126 result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 123 result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
127 0x20, 0x21, 124 0x20, 0x21,
128 0x0000, 0x0000, buf, len, 125 0x0000, 0x0000, buf, len,
129 HZ * USB_CTRL_GET_TIMEOUT); 126 USB_CTRL_GET_TIMEOUT);
130 debug_data(dev, __func__, len, buf, result); 127 debug_data(dev, __func__, len, buf, result);
131 kfree(buf); 128 kfree(buf);
132 129
@@ -166,9 +163,6 @@ static void zte_ev_usb_serial_close(struct usb_serial_port *port)
166 int len; 163 int len;
167 unsigned char *buf; 164 unsigned char *buf;
168 165
169 if (port->number != 0)
170 return;
171
172 buf = kmalloc(MAX_SETUP_DATA_SIZE, GFP_KERNEL); 166 buf = kmalloc(MAX_SETUP_DATA_SIZE, GFP_KERNEL);
173 if (!buf) 167 if (!buf)
174 return; 168 return;
@@ -178,7 +172,7 @@ static void zte_ev_usb_serial_close(struct usb_serial_port *port)
178 result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 172 result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
179 0x22, 0x21, 173 0x22, 0x21,
180 0x0002, 0x0000, NULL, len, 174 0x0002, 0x0000, NULL, len,
181 HZ * USB_CTRL_GET_TIMEOUT); 175 USB_CTRL_GET_TIMEOUT);
182 dev_dbg(dev, "result = %d\n", result); 176 dev_dbg(dev, "result = %d\n", result);
183 177
184 /* send 2st ctl cmd(CTL 21 22 03 00 00 00 00 00 ) */ 178 /* send 2st ctl cmd(CTL 21 22 03 00 00 00 00 00 ) */
@@ -186,7 +180,7 @@ static void zte_ev_usb_serial_close(struct usb_serial_port *port)
186 result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 180 result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
187 0x22, 0x21, 181 0x22, 0x21,
188 0x0003, 0x0000, NULL, len, 182 0x0003, 0x0000, NULL, len,
189 HZ * USB_CTRL_GET_TIMEOUT); 183 USB_CTRL_GET_TIMEOUT);
190 dev_dbg(dev, "result = %d\n", result); 184 dev_dbg(dev, "result = %d\n", result);
191 185
192 /* send 3st cmd and recieve data */ 186 /* send 3st cmd and recieve data */
@@ -198,7 +192,7 @@ static void zte_ev_usb_serial_close(struct usb_serial_port *port)
198 result = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), 192 result = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
199 0x21, 0xa1, 193 0x21, 0xa1,
200 0x0000, 0x0000, buf, len, 194 0x0000, 0x0000, buf, len,
201 HZ * USB_CTRL_GET_TIMEOUT); 195 USB_CTRL_GET_TIMEOUT);
202 debug_data(dev, __func__, len, buf, result); 196 debug_data(dev, __func__, len, buf, result);
203 197
204 /* send 4 cmd */ 198 /* send 4 cmd */
@@ -217,7 +211,7 @@ static void zte_ev_usb_serial_close(struct usb_serial_port *port)
217 result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 211 result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
218 0x20, 0x21, 212 0x20, 0x21,
219 0x0000, 0x0000, buf, len, 213 0x0000, 0x0000, buf, len,
220 HZ * USB_CTRL_GET_TIMEOUT); 214 USB_CTRL_GET_TIMEOUT);
221 debug_data(dev, __func__, len, buf, result); 215 debug_data(dev, __func__, len, buf, result);
222 216
223 /* send 5 cmd */ 217 /* send 5 cmd */
@@ -228,7 +222,7 @@ static void zte_ev_usb_serial_close(struct usb_serial_port *port)
228 result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 222 result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
229 0x22, 0x21, 223 0x22, 0x21,
230 0x0003, 0x0000, NULL, len, 224 0x0003, 0x0000, NULL, len,
231 HZ * USB_CTRL_GET_TIMEOUT); 225 USB_CTRL_GET_TIMEOUT);
232 dev_dbg(dev, "result = %d\n", result); 226 dev_dbg(dev, "result = %d\n", result);
233 227
234 /* send 6 cmd */ 228 /* send 6 cmd */
@@ -240,7 +234,7 @@ static void zte_ev_usb_serial_close(struct usb_serial_port *port)
240 result = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), 234 result = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
241 0x21, 0xa1, 235 0x21, 0xa1,
242 0x0000, 0x0000, buf, len, 236 0x0000, 0x0000, buf, len,
243 HZ * USB_CTRL_GET_TIMEOUT); 237 USB_CTRL_GET_TIMEOUT);
244 debug_data(dev, __func__, len, buf, result); 238 debug_data(dev, __func__, len, buf, result);
245 239
246 /* send 7 cmd */ 240 /* send 7 cmd */
@@ -259,7 +253,7 @@ static void zte_ev_usb_serial_close(struct usb_serial_port *port)
259 result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 253 result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
260 0x20, 0x21, 254 0x20, 0x21,
261 0x0000, 0x0000, buf, len, 255 0x0000, 0x0000, buf, len,
262 HZ * USB_CTRL_GET_TIMEOUT); 256 USB_CTRL_GET_TIMEOUT);
263 debug_data(dev, __func__, len, buf, result); 257 debug_data(dev, __func__, len, buf, result);
264 258
265 /* send 8 cmd */ 259 /* send 8 cmd */
@@ -270,7 +264,7 @@ static void zte_ev_usb_serial_close(struct usb_serial_port *port)
270 result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 264 result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
271 0x22, 0x21, 265 0x22, 0x21,
272 0x0003, 0x0000, NULL, len, 266 0x0003, 0x0000, NULL, len,
273 HZ * USB_CTRL_GET_TIMEOUT); 267 USB_CTRL_GET_TIMEOUT);
274 dev_dbg(dev, "result = %d\n", result); 268 dev_dbg(dev, "result = %d\n", result);
275 269
276 kfree(buf); 270 kfree(buf);
@@ -279,11 +273,29 @@ static void zte_ev_usb_serial_close(struct usb_serial_port *port)
279} 273}
280 274
281static const struct usb_device_id id_table[] = { 275static const struct usb_device_id id_table[] = {
282 { USB_DEVICE(0x19d2, 0xffff) }, /* AC8700 */ 276 /* AC8710, AC8710T */
283 { USB_DEVICE(0x19d2, 0xfffe) }, 277 { USB_DEVICE_AND_INTERFACE_INFO(0x19d2, 0xffff, 0xff, 0xff, 0xff) },
284 { USB_DEVICE(0x19d2, 0xfffd) }, /* MG880 */ 278 /* AC8700 */
279 { USB_DEVICE_AND_INTERFACE_INFO(0x19d2, 0xfffe, 0xff, 0xff, 0xff) },
280 /* MG880 */
281 { USB_DEVICE(0x19d2, 0xfffd) },
282 { USB_DEVICE(0x19d2, 0xfffc) },
283 { USB_DEVICE(0x19d2, 0xfffb) },
284 /* AC2726, AC8710_V3 */
285 { USB_DEVICE_AND_INTERFACE_INFO(0x19d2, 0xfff1, 0xff, 0xff, 0xff) },
286 { USB_DEVICE(0x19d2, 0xfff6) },
287 { USB_DEVICE(0x19d2, 0xfff7) },
288 { USB_DEVICE(0x19d2, 0xfff8) },
289 { USB_DEVICE(0x19d2, 0xfff9) },
290 { USB_DEVICE(0x19d2, 0xffee) },
291 /* AC2716, MC2716 */
292 { USB_DEVICE_AND_INTERFACE_INFO(0x19d2, 0xffed, 0xff, 0xff, 0xff) },
293 /* AD3812 */
294 { USB_DEVICE_AND_INTERFACE_INFO(0x19d2, 0xffeb, 0xff, 0xff, 0xff) },
295 { USB_DEVICE(0x19d2, 0xffec) },
285 { USB_DEVICE(0x05C6, 0x3197) }, 296 { USB_DEVICE(0x05C6, 0x3197) },
286 { USB_DEVICE(0x05C6, 0x6000) }, 297 { USB_DEVICE(0x05C6, 0x6000) },
298 { USB_DEVICE(0x05C6, 0x9008) },
287 { }, 299 { },
288}; 300};
289MODULE_DEVICE_TABLE(usb, id_table); 301MODULE_DEVICE_TABLE(usb, id_table);
diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
index acb7121a9316..6d78736563de 100644
--- a/drivers/vfio/vfio.c
+++ b/drivers/vfio/vfio.c
@@ -1360,7 +1360,7 @@ static const struct file_operations vfio_device_fops = {
1360 */ 1360 */
1361static char *vfio_devnode(struct device *dev, umode_t *mode) 1361static char *vfio_devnode(struct device *dev, umode_t *mode)
1362{ 1362{
1363 if (MINOR(dev->devt) == 0) 1363 if (mode && (MINOR(dev->devt) == 0))
1364 *mode = S_IRUGO | S_IWUGO; 1364 *mode = S_IRUGO | S_IWUGO;
1365 1365
1366 return kasprintf(GFP_KERNEL, "vfio/%s", dev_name(dev)); 1366 return kasprintf(GFP_KERNEL, "vfio/%s", dev_name(dev));
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 2b51e2336aa2..f80d3dd41d8c 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -155,14 +155,11 @@ static void vhost_net_ubuf_put_and_wait(struct vhost_net_ubuf_ref *ubufs)
155 155
156static void vhost_net_clear_ubuf_info(struct vhost_net *n) 156static void vhost_net_clear_ubuf_info(struct vhost_net *n)
157{ 157{
158
159 bool zcopy;
160 int i; 158 int i;
161 159
162 for (i = 0; i < n->dev.nvqs; ++i) { 160 for (i = 0; i < VHOST_NET_VQ_MAX; ++i) {
163 zcopy = vhost_net_zcopy_mask & (0x1 << i); 161 kfree(n->vqs[i].ubuf_info);
164 if (zcopy) 162 n->vqs[i].ubuf_info = NULL;
165 kfree(n->vqs[i].ubuf_info);
166 } 163 }
167} 164}
168 165
@@ -171,7 +168,7 @@ int vhost_net_set_ubuf_info(struct vhost_net *n)
171 bool zcopy; 168 bool zcopy;
172 int i; 169 int i;
173 170
174 for (i = 0; i < n->dev.nvqs; ++i) { 171 for (i = 0; i < VHOST_NET_VQ_MAX; ++i) {
175 zcopy = vhost_net_zcopy_mask & (0x1 << i); 172 zcopy = vhost_net_zcopy_mask & (0x1 << i);
176 if (!zcopy) 173 if (!zcopy)
177 continue; 174 continue;
@@ -183,12 +180,7 @@ int vhost_net_set_ubuf_info(struct vhost_net *n)
183 return 0; 180 return 0;
184 181
185err: 182err:
186 while (i--) { 183 vhost_net_clear_ubuf_info(n);
187 zcopy = vhost_net_zcopy_mask & (0x1 << i);
188 if (!zcopy)
189 continue;
190 kfree(n->vqs[i].ubuf_info);
191 }
192 return -ENOMEM; 184 return -ENOMEM;
193} 185}
194 186
@@ -196,12 +188,12 @@ void vhost_net_vq_reset(struct vhost_net *n)
196{ 188{
197 int i; 189 int i;
198 190
191 vhost_net_clear_ubuf_info(n);
192
199 for (i = 0; i < VHOST_NET_VQ_MAX; i++) { 193 for (i = 0; i < VHOST_NET_VQ_MAX; i++) {
200 n->vqs[i].done_idx = 0; 194 n->vqs[i].done_idx = 0;
201 n->vqs[i].upend_idx = 0; 195 n->vqs[i].upend_idx = 0;
202 n->vqs[i].ubufs = NULL; 196 n->vqs[i].ubufs = NULL;
203 kfree(n->vqs[i].ubuf_info);
204 n->vqs[i].ubuf_info = NULL;
205 n->vqs[i].vhost_hlen = 0; 197 n->vqs[i].vhost_hlen = 0;
206 n->vqs[i].sock_hlen = 0; 198 n->vqs[i].sock_hlen = 0;
207 } 199 }
@@ -436,7 +428,8 @@ static void handle_tx(struct vhost_net *net)
436 kref_get(&ubufs->kref); 428 kref_get(&ubufs->kref);
437 } 429 }
438 nvq->upend_idx = (nvq->upend_idx + 1) % UIO_MAXIOV; 430 nvq->upend_idx = (nvq->upend_idx + 1) % UIO_MAXIOV;
439 } 431 } else
432 msg.msg_control = NULL;
440 /* TODO: Check specific error and bomb out unless ENOBUFS? */ 433 /* TODO: Check specific error and bomb out unless ENOBUFS? */
441 err = sock->ops->sendmsg(NULL, sock, &msg, len); 434 err = sock->ops->sendmsg(NULL, sock, &msg, len);
442 if (unlikely(err < 0)) { 435 if (unlikely(err < 0)) {
@@ -1053,6 +1046,10 @@ static long vhost_net_set_owner(struct vhost_net *n)
1053 int r; 1046 int r;
1054 1047
1055 mutex_lock(&n->dev.mutex); 1048 mutex_lock(&n->dev.mutex);
1049 if (vhost_dev_has_owner(&n->dev)) {
1050 r = -EBUSY;
1051 goto out;
1052 }
1056 r = vhost_net_set_ubuf_info(n); 1053 r = vhost_net_set_ubuf_info(n);
1057 if (r) 1054 if (r)
1058 goto out; 1055 goto out;
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index beee7f5787e6..60aa5ad09a2f 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -344,13 +344,19 @@ static int vhost_attach_cgroups(struct vhost_dev *dev)
344} 344}
345 345
346/* Caller should have device mutex */ 346/* Caller should have device mutex */
347bool vhost_dev_has_owner(struct vhost_dev *dev)
348{
349 return dev->mm;
350}
351
352/* Caller should have device mutex */
347long vhost_dev_set_owner(struct vhost_dev *dev) 353long vhost_dev_set_owner(struct vhost_dev *dev)
348{ 354{
349 struct task_struct *worker; 355 struct task_struct *worker;
350 int err; 356 int err;
351 357
352 /* Is there an owner already? */ 358 /* Is there an owner already? */
353 if (dev->mm) { 359 if (vhost_dev_has_owner(dev)) {
354 err = -EBUSY; 360 err = -EBUSY;
355 goto err_mm; 361 goto err_mm;
356 } 362 }
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index a7ad63592987..64adcf99ff33 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -133,6 +133,7 @@ struct vhost_dev {
133 133
134long vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs, int nvqs); 134long vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs, int nvqs);
135long vhost_dev_set_owner(struct vhost_dev *dev); 135long vhost_dev_set_owner(struct vhost_dev *dev);
136bool vhost_dev_has_owner(struct vhost_dev *dev);
136long vhost_dev_check_owner(struct vhost_dev *); 137long vhost_dev_check_owner(struct vhost_dev *);
137struct vhost_memory *vhost_dev_reset_owner_prepare(void); 138struct vhost_memory *vhost_dev_reset_owner_prepare(void);
138void vhost_dev_reset_owner(struct vhost_dev *, struct vhost_memory *); 139void vhost_dev_reset_owner(struct vhost_dev *, struct vhost_memory *);
diff --git a/drivers/video/atmel_lcdfb.c b/drivers/video/atmel_lcdfb.c
index 540909de6247..effdb373b8db 100644
--- a/drivers/video/atmel_lcdfb.c
+++ b/drivers/video/atmel_lcdfb.c
@@ -223,8 +223,14 @@ static void init_backlight(struct atmel_lcdfb_info *sinfo)
223 223
224static void exit_backlight(struct atmel_lcdfb_info *sinfo) 224static void exit_backlight(struct atmel_lcdfb_info *sinfo)
225{ 225{
226 if (sinfo->backlight) 226 if (!sinfo->backlight)
227 backlight_device_unregister(sinfo->backlight); 227 return;
228
229 if (sinfo->backlight->ops) {
230 sinfo->backlight->props.power = FB_BLANK_POWERDOWN;
231 sinfo->backlight->ops->update_status(sinfo->backlight);
232 }
233 backlight_device_unregister(sinfo->backlight);
228} 234}
229 235
230#else 236#else
@@ -461,8 +467,11 @@ static int atmel_lcdfb_check_var(struct fb_var_screeninfo *var,
461 if (info->fix.smem_len) { 467 if (info->fix.smem_len) {
462 unsigned int smem_len = (var->xres_virtual * var->yres_virtual 468 unsigned int smem_len = (var->xres_virtual * var->yres_virtual
463 * ((var->bits_per_pixel + 7) / 8)); 469 * ((var->bits_per_pixel + 7) / 8));
464 if (smem_len > info->fix.smem_len) 470 if (smem_len > info->fix.smem_len) {
471 dev_err(dev, "Frame buffer is too small (%u) for screen size (need at least %u)\n",
472 info->fix.smem_len, smem_len);
465 return -EINVAL; 473 return -EINVAL;
474 }
466 } 475 }
467 476
468 /* Saturate vertical and horizontal timings at maximum values */ 477 /* Saturate vertical and horizontal timings at maximum values */
diff --git a/drivers/video/omap2/dss/core.c b/drivers/video/omap2/dss/core.c
index 60cc6fee6548..c9c2252e3719 100644
--- a/drivers/video/omap2/dss/core.c
+++ b/drivers/video/omap2/dss/core.c
@@ -53,6 +53,8 @@ static char *def_disp_name;
53module_param_named(def_disp, def_disp_name, charp, 0); 53module_param_named(def_disp, def_disp_name, charp, 0);
54MODULE_PARM_DESC(def_disp, "default display name"); 54MODULE_PARM_DESC(def_disp, "default display name");
55 55
56static bool dss_initialized;
57
56const char *omapdss_get_default_display_name(void) 58const char *omapdss_get_default_display_name(void)
57{ 59{
58 return core.default_display_name; 60 return core.default_display_name;
@@ -66,6 +68,12 @@ enum omapdss_version omapdss_get_version(void)
66} 68}
67EXPORT_SYMBOL(omapdss_get_version); 69EXPORT_SYMBOL(omapdss_get_version);
68 70
71bool omapdss_is_initialized(void)
72{
73 return dss_initialized;
74}
75EXPORT_SYMBOL(omapdss_is_initialized);
76
69struct platform_device *dss_get_core_pdev(void) 77struct platform_device *dss_get_core_pdev(void)
70{ 78{
71 return core.pdev; 79 return core.pdev;
@@ -603,6 +611,8 @@ static int __init omap_dss_init(void)
603 return r; 611 return r;
604 } 612 }
605 613
614 dss_initialized = true;
615
606 return 0; 616 return 0;
607} 617}
608 618
@@ -633,7 +643,15 @@ static int __init omap_dss_init(void)
633 643
634static int __init omap_dss_init2(void) 644static int __init omap_dss_init2(void)
635{ 645{
636 return omap_dss_register_drivers(); 646 int r;
647
648 r = omap_dss_register_drivers();
649 if (r)
650 return r;
651
652 dss_initialized = true;
653
654 return 0;
637} 655}
638 656
639core_initcall(omap_dss_init); 657core_initcall(omap_dss_init);
diff --git a/drivers/video/omap2/omapfb/omapfb-main.c b/drivers/video/omap2/omapfb/omapfb-main.c
index c84bb8a4d0c4..856917b33616 100644
--- a/drivers/video/omap2/omapfb/omapfb-main.c
+++ b/drivers/video/omap2/omapfb/omapfb-main.c
@@ -2416,6 +2416,9 @@ static int omapfb_probe(struct platform_device *pdev)
2416 2416
2417 DBG("omapfb_probe\n"); 2417 DBG("omapfb_probe\n");
2418 2418
2419 if (omapdss_is_initialized() == false)
2420 return -EPROBE_DEFER;
2421
2419 if (pdev->num_resources != 0) { 2422 if (pdev->num_resources != 0) {
2420 dev_err(&pdev->dev, "probed for an unknown device\n"); 2423 dev_err(&pdev->dev, "probed for an unknown device\n");
2421 r = -ENODEV; 2424 r = -ENODEV;
diff --git a/drivers/video/ps3fb.c b/drivers/video/ps3fb.c
index d9f08c653d62..dbfe2c18a434 100644
--- a/drivers/video/ps3fb.c
+++ b/drivers/video/ps3fb.c
@@ -710,7 +710,7 @@ static int ps3fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
710 r = vm_iomap_memory(vma, info->fix.smem_start, info->fix.smem_len); 710 r = vm_iomap_memory(vma, info->fix.smem_start, info->fix.smem_len);
711 711
712 dev_dbg(info->device, "ps3fb: mmap framebuffer P(%lx)->V(%lx)\n", 712 dev_dbg(info->device, "ps3fb: mmap framebuffer P(%lx)->V(%lx)\n",
713 info->fix.smem_start + vma->vm_pgoff << PAGE_SHIFT, 713 info->fix.smem_start + (vma->vm_pgoff << PAGE_SHIFT),
714 vma->vm_start); 714 vma->vm_start);
715 715
716 return r; 716 return r;
diff --git a/drivers/xen/tmem.c b/drivers/xen/tmem.c
index 18e8bd8fa947..0f0493c63371 100644
--- a/drivers/xen/tmem.c
+++ b/drivers/xen/tmem.c
@@ -41,6 +41,8 @@ module_param(selfballooning, bool, S_IRUGO);
41#ifdef CONFIG_FRONTSWAP 41#ifdef CONFIG_FRONTSWAP
42static bool frontswap __read_mostly = true; 42static bool frontswap __read_mostly = true;
43module_param(frontswap, bool, S_IRUGO); 43module_param(frontswap, bool, S_IRUGO);
44#else /* CONFIG_FRONTSWAP */
45#define frontswap (0)
44#endif /* CONFIG_FRONTSWAP */ 46#endif /* CONFIG_FRONTSWAP */
45 47
46#ifdef CONFIG_XEN_SELFBALLOONING 48#ifdef CONFIG_XEN_SELFBALLOONING
@@ -377,10 +379,10 @@ static int xen_tmem_init(void)
377#ifdef CONFIG_FRONTSWAP 379#ifdef CONFIG_FRONTSWAP
378 if (tmem_enabled && frontswap) { 380 if (tmem_enabled && frontswap) {
379 char *s = ""; 381 char *s = "";
380 struct frontswap_ops *old_ops = 382 struct frontswap_ops *old_ops;
381 frontswap_register_ops(&tmem_frontswap_ops);
382 383
383 tmem_frontswap_poolid = -1; 384 tmem_frontswap_poolid = -1;
385 old_ops = frontswap_register_ops(&tmem_frontswap_ops);
384 if (IS_ERR(old_ops) || old_ops) { 386 if (IS_ERR(old_ops) || old_ops) {
385 if (IS_ERR(old_ops)) 387 if (IS_ERR(old_ops))
386 return PTR_ERR(old_ops); 388 return PTR_ERR(old_ops);
diff --git a/drivers/xen/xen-pciback/pci_stub.c b/drivers/xen/xen-pciback/pci_stub.c
index a2278ba7fb27..4e8ba38aa0c9 100644
--- a/drivers/xen/xen-pciback/pci_stub.c
+++ b/drivers/xen/xen-pciback/pci_stub.c
@@ -106,7 +106,7 @@ static void pcistub_device_release(struct kref *kref)
106 else 106 else
107 pci_restore_state(dev); 107 pci_restore_state(dev);
108 108
109 if (pci_find_capability(dev, PCI_CAP_ID_MSIX)) { 109 if (dev->msix_cap) {
110 struct physdev_pci_device ppdev = { 110 struct physdev_pci_device ppdev = {
111 .seg = pci_domain_nr(dev->bus), 111 .seg = pci_domain_nr(dev->bus),
112 .bus = dev->bus->number, 112 .bus = dev->bus->number,
@@ -371,7 +371,7 @@ static int pcistub_init_device(struct pci_dev *dev)
371 if (err) 371 if (err)
372 goto config_release; 372 goto config_release;
373 373
374 if (pci_find_capability(dev, PCI_CAP_ID_MSIX)) { 374 if (dev->msix_cap) {
375 struct physdev_pci_device ppdev = { 375 struct physdev_pci_device ppdev = {
376 .seg = pci_domain_nr(dev->bus), 376 .seg = pci_domain_nr(dev->bus),
377 .bus = dev->bus->number, 377 .bus = dev->bus->number,
diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c
index 61786be9138b..ec097d6f964d 100644
--- a/drivers/xen/xenbus/xenbus_client.c
+++ b/drivers/xen/xenbus/xenbus_client.c
@@ -534,7 +534,7 @@ static int xenbus_map_ring_valloc_hvm(struct xenbus_device *dev,
534 534
535 err = xenbus_map_ring(dev, gnt_ref, &node->handle, addr); 535 err = xenbus_map_ring(dev, gnt_ref, &node->handle, addr);
536 if (err) 536 if (err)
537 goto out_err; 537 goto out_err_free_ballooned_pages;
538 538
539 spin_lock(&xenbus_valloc_lock); 539 spin_lock(&xenbus_valloc_lock);
540 list_add(&node->next, &xenbus_valloc_pages); 540 list_add(&node->next, &xenbus_valloc_pages);
@@ -543,8 +543,9 @@ static int xenbus_map_ring_valloc_hvm(struct xenbus_device *dev,
543 *vaddr = addr; 543 *vaddr = addr;
544 return 0; 544 return 0;
545 545
546 out_err: 546 out_err_free_ballooned_pages:
547 free_xenballooned_pages(1, &node->page); 547 free_xenballooned_pages(1, &node->page);
548 out_err:
548 kfree(node); 549 kfree(node);
549 return err; 550 return err;
550} 551}
diff --git a/drivers/xen/xenbus/xenbus_comms.h b/drivers/xen/xenbus/xenbus_comms.h
index c8abd3b8a6c4..e74f9c1fbd80 100644
--- a/drivers/xen/xenbus/xenbus_comms.h
+++ b/drivers/xen/xenbus/xenbus_comms.h
@@ -45,6 +45,7 @@ int xb_wait_for_data_to_read(void);
45int xs_input_avail(void); 45int xs_input_avail(void);
46extern struct xenstore_domain_interface *xen_store_interface; 46extern struct xenstore_domain_interface *xen_store_interface;
47extern int xen_store_evtchn; 47extern int xen_store_evtchn;
48extern enum xenstore_init xen_store_domain_type;
48 49
49extern const struct file_operations xen_xenbus_fops; 50extern const struct file_operations xen_xenbus_fops;
50 51
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
index 3325884c693f..56cfaaa9d006 100644
--- a/drivers/xen/xenbus/xenbus_probe.c
+++ b/drivers/xen/xenbus/xenbus_probe.c
@@ -69,6 +69,9 @@ EXPORT_SYMBOL_GPL(xen_store_evtchn);
69struct xenstore_domain_interface *xen_store_interface; 69struct xenstore_domain_interface *xen_store_interface;
70EXPORT_SYMBOL_GPL(xen_store_interface); 70EXPORT_SYMBOL_GPL(xen_store_interface);
71 71
72enum xenstore_init xen_store_domain_type;
73EXPORT_SYMBOL_GPL(xen_store_domain_type);
74
72static unsigned long xen_store_mfn; 75static unsigned long xen_store_mfn;
73 76
74static BLOCKING_NOTIFIER_HEAD(xenstore_chain); 77static BLOCKING_NOTIFIER_HEAD(xenstore_chain);
@@ -719,17 +722,11 @@ static int __init xenstored_local_init(void)
719 return err; 722 return err;
720} 723}
721 724
722enum xenstore_init {
723 UNKNOWN,
724 PV,
725 HVM,
726 LOCAL,
727};
728static int __init xenbus_init(void) 725static int __init xenbus_init(void)
729{ 726{
730 int err = 0; 727 int err = 0;
731 enum xenstore_init usage = UNKNOWN;
732 uint64_t v = 0; 728 uint64_t v = 0;
729 xen_store_domain_type = XS_UNKNOWN;
733 730
734 if (!xen_domain()) 731 if (!xen_domain())
735 return -ENODEV; 732 return -ENODEV;
@@ -737,29 +734,29 @@ static int __init xenbus_init(void)
737 xenbus_ring_ops_init(); 734 xenbus_ring_ops_init();
738 735
739 if (xen_pv_domain()) 736 if (xen_pv_domain())
740 usage = PV; 737 xen_store_domain_type = XS_PV;
741 if (xen_hvm_domain()) 738 if (xen_hvm_domain())
742 usage = HVM; 739 xen_store_domain_type = XS_HVM;
743 if (xen_hvm_domain() && xen_initial_domain()) 740 if (xen_hvm_domain() && xen_initial_domain())
744 usage = LOCAL; 741 xen_store_domain_type = XS_LOCAL;
745 if (xen_pv_domain() && !xen_start_info->store_evtchn) 742 if (xen_pv_domain() && !xen_start_info->store_evtchn)
746 usage = LOCAL; 743 xen_store_domain_type = XS_LOCAL;
747 if (xen_pv_domain() && xen_start_info->store_evtchn) 744 if (xen_pv_domain() && xen_start_info->store_evtchn)
748 xenstored_ready = 1; 745 xenstored_ready = 1;
749 746
750 switch (usage) { 747 switch (xen_store_domain_type) {
751 case LOCAL: 748 case XS_LOCAL:
752 err = xenstored_local_init(); 749 err = xenstored_local_init();
753 if (err) 750 if (err)
754 goto out_error; 751 goto out_error;
755 xen_store_interface = mfn_to_virt(xen_store_mfn); 752 xen_store_interface = mfn_to_virt(xen_store_mfn);
756 break; 753 break;
757 case PV: 754 case XS_PV:
758 xen_store_evtchn = xen_start_info->store_evtchn; 755 xen_store_evtchn = xen_start_info->store_evtchn;
759 xen_store_mfn = xen_start_info->store_mfn; 756 xen_store_mfn = xen_start_info->store_mfn;
760 xen_store_interface = mfn_to_virt(xen_store_mfn); 757 xen_store_interface = mfn_to_virt(xen_store_mfn);
761 break; 758 break;
762 case HVM: 759 case XS_HVM:
763 err = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN, &v); 760 err = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN, &v);
764 if (err) 761 if (err)
765 goto out_error; 762 goto out_error;
diff --git a/drivers/xen/xenbus/xenbus_probe.h b/drivers/xen/xenbus/xenbus_probe.h
index bb4f92ed8730..146f857a36f8 100644
--- a/drivers/xen/xenbus/xenbus_probe.h
+++ b/drivers/xen/xenbus/xenbus_probe.h
@@ -47,6 +47,13 @@ struct xen_bus_type {
47 struct bus_type bus; 47 struct bus_type bus;
48}; 48};
49 49
50enum xenstore_init {
51 XS_UNKNOWN,
52 XS_PV,
53 XS_HVM,
54 XS_LOCAL,
55};
56
50extern struct device_attribute xenbus_dev_attrs[]; 57extern struct device_attribute xenbus_dev_attrs[];
51 58
52extern int xenbus_match(struct device *_dev, struct device_driver *_drv); 59extern int xenbus_match(struct device *_dev, struct device_driver *_drv);
diff --git a/drivers/xen/xenbus/xenbus_probe_frontend.c b/drivers/xen/xenbus/xenbus_probe_frontend.c
index 3159a37d966d..a7e25073de19 100644
--- a/drivers/xen/xenbus/xenbus_probe_frontend.c
+++ b/drivers/xen/xenbus/xenbus_probe_frontend.c
@@ -29,6 +29,8 @@
29#include "xenbus_probe.h" 29#include "xenbus_probe.h"
30 30
31 31
32static struct workqueue_struct *xenbus_frontend_wq;
33
32/* device/<type>/<id> => <type>-<id> */ 34/* device/<type>/<id> => <type>-<id> */
33static int frontend_bus_id(char bus_id[XEN_BUS_ID_SIZE], const char *nodename) 35static int frontend_bus_id(char bus_id[XEN_BUS_ID_SIZE], const char *nodename)
34{ 36{
@@ -89,9 +91,40 @@ static void backend_changed(struct xenbus_watch *watch,
89 xenbus_otherend_changed(watch, vec, len, 1); 91 xenbus_otherend_changed(watch, vec, len, 1);
90} 92}
91 93
94static void xenbus_frontend_delayed_resume(struct work_struct *w)
95{
96 struct xenbus_device *xdev = container_of(w, struct xenbus_device, work);
97
98 xenbus_dev_resume(&xdev->dev);
99}
100
101static int xenbus_frontend_dev_resume(struct device *dev)
102{
103 /*
104 * If xenstored is running in this domain, we cannot access the backend
105 * state at the moment, so we need to defer xenbus_dev_resume
106 */
107 if (xen_store_domain_type == XS_LOCAL) {
108 struct xenbus_device *xdev = to_xenbus_device(dev);
109
110 if (!xenbus_frontend_wq) {
111 pr_err("%s: no workqueue to process delayed resume\n",
112 xdev->nodename);
113 return -EFAULT;
114 }
115
116 INIT_WORK(&xdev->work, xenbus_frontend_delayed_resume);
117 queue_work(xenbus_frontend_wq, &xdev->work);
118
119 return 0;
120 }
121
122 return xenbus_dev_resume(dev);
123}
124
92static const struct dev_pm_ops xenbus_pm_ops = { 125static const struct dev_pm_ops xenbus_pm_ops = {
93 .suspend = xenbus_dev_suspend, 126 .suspend = xenbus_dev_suspend,
94 .resume = xenbus_dev_resume, 127 .resume = xenbus_frontend_dev_resume,
95 .freeze = xenbus_dev_suspend, 128 .freeze = xenbus_dev_suspend,
96 .thaw = xenbus_dev_cancel, 129 .thaw = xenbus_dev_cancel,
97 .restore = xenbus_dev_resume, 130 .restore = xenbus_dev_resume,
@@ -440,6 +473,8 @@ static int __init xenbus_probe_frontend_init(void)
440 473
441 register_xenstore_notifier(&xenstore_notifier); 474 register_xenstore_notifier(&xenstore_notifier);
442 475
476 xenbus_frontend_wq = create_workqueue("xenbus_frontend");
477
443 return 0; 478 return 0;
444} 479}
445subsys_initcall(xenbus_probe_frontend_init); 480subsys_initcall(xenbus_probe_frontend_init);
diff --git a/fs/aio.c b/fs/aio.c
index 7fe5bdee1630..2bbcacf74d0c 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -141,9 +141,6 @@ static void aio_free_ring(struct kioctx *ctx)
141 for (i = 0; i < ctx->nr_pages; i++) 141 for (i = 0; i < ctx->nr_pages; i++)
142 put_page(ctx->ring_pages[i]); 142 put_page(ctx->ring_pages[i]);
143 143
144 if (ctx->mmap_size)
145 vm_munmap(ctx->mmap_base, ctx->mmap_size);
146
147 if (ctx->ring_pages && ctx->ring_pages != ctx->internal_pages) 144 if (ctx->ring_pages && ctx->ring_pages != ctx->internal_pages)
148 kfree(ctx->ring_pages); 145 kfree(ctx->ring_pages);
149} 146}
@@ -322,11 +319,6 @@ static void free_ioctx(struct kioctx *ctx)
322 319
323 aio_free_ring(ctx); 320 aio_free_ring(ctx);
324 321
325 spin_lock(&aio_nr_lock);
326 BUG_ON(aio_nr - ctx->max_reqs > aio_nr);
327 aio_nr -= ctx->max_reqs;
328 spin_unlock(&aio_nr_lock);
329
330 pr_debug("freeing %p\n", ctx); 322 pr_debug("freeing %p\n", ctx);
331 323
332 /* 324 /*
@@ -435,17 +427,24 @@ static void kill_ioctx(struct kioctx *ctx)
435{ 427{
436 if (!atomic_xchg(&ctx->dead, 1)) { 428 if (!atomic_xchg(&ctx->dead, 1)) {
437 hlist_del_rcu(&ctx->list); 429 hlist_del_rcu(&ctx->list);
438 /* Between hlist_del_rcu() and dropping the initial ref */
439 synchronize_rcu();
440 430
441 /* 431 /*
442 * We can't punt to workqueue here because put_ioctx() -> 432 * It'd be more correct to do this in free_ioctx(), after all
443 * free_ioctx() will unmap the ringbuffer, and that has to be 433 * the outstanding kiocbs have finished - but by then io_destroy
444 * done in the original process's context. kill_ioctx_rcu/work() 434 * has already returned, so io_setup() could potentially return
445 * exist for exit_aio(), as in that path free_ioctx() won't do 435 * -EAGAIN with no ioctxs actually in use (as far as userspace
446 * the unmap. 436 * could tell).
447 */ 437 */
448 kill_ioctx_work(&ctx->rcu_work); 438 spin_lock(&aio_nr_lock);
439 BUG_ON(aio_nr - ctx->max_reqs > aio_nr);
440 aio_nr -= ctx->max_reqs;
441 spin_unlock(&aio_nr_lock);
442
443 if (ctx->mmap_size)
444 vm_munmap(ctx->mmap_base, ctx->mmap_size);
445
446 /* Between hlist_del_rcu() and dropping the initial ref */
447 call_rcu(&ctx->rcu_head, kill_ioctx_rcu);
449 } 448 }
450} 449}
451 450
@@ -495,10 +494,7 @@ void exit_aio(struct mm_struct *mm)
495 */ 494 */
496 ctx->mmap_size = 0; 495 ctx->mmap_size = 0;
497 496
498 if (!atomic_xchg(&ctx->dead, 1)) { 497 kill_ioctx(ctx);
499 hlist_del_rcu(&ctx->list);
500 call_rcu(&ctx->rcu_head, kill_ioctx_rcu);
501 }
502 } 498 }
503} 499}
504 500
diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
index 8615ee89ab55..f95dddced968 100644
--- a/fs/befs/linuxvfs.c
+++ b/fs/befs/linuxvfs.c
@@ -265,8 +265,8 @@ befs_readdir(struct file *filp, void *dirent, filldir_t filldir)
265 result = filldir(dirent, keybuf, keysize, filp->f_pos, 265 result = filldir(dirent, keybuf, keysize, filp->f_pos,
266 (ino_t) value, d_type); 266 (ino_t) value, d_type);
267 } 267 }
268 268 if (!result)
269 filp->f_pos++; 269 filp->f_pos++;
270 270
271 befs_debug(sb, "<--- befs_readdir() filp->f_pos %Ld", filp->f_pos); 271 befs_debug(sb, "<--- befs_readdir() filp->f_pos %Ld", filp->f_pos);
272 272
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index e7b3cb5286a5..b8b60b660c8f 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -2859,8 +2859,8 @@ fail_qgroup:
2859 btrfs_free_qgroup_config(fs_info); 2859 btrfs_free_qgroup_config(fs_info);
2860fail_trans_kthread: 2860fail_trans_kthread:
2861 kthread_stop(fs_info->transaction_kthread); 2861 kthread_stop(fs_info->transaction_kthread);
2862 del_fs_roots(fs_info);
2863 btrfs_cleanup_transaction(fs_info->tree_root); 2862 btrfs_cleanup_transaction(fs_info->tree_root);
2863 del_fs_roots(fs_info);
2864fail_cleaner: 2864fail_cleaner:
2865 kthread_stop(fs_info->cleaner_kthread); 2865 kthread_stop(fs_info->cleaner_kthread);
2866 2866
@@ -3512,15 +3512,15 @@ int close_ctree(struct btrfs_root *root)
3512 percpu_counter_sum(&fs_info->delalloc_bytes)); 3512 percpu_counter_sum(&fs_info->delalloc_bytes));
3513 } 3513 }
3514 3514
3515 free_root_pointers(fs_info, 1);
3516
3517 btrfs_free_block_groups(fs_info); 3515 btrfs_free_block_groups(fs_info);
3518 3516
3517 btrfs_stop_all_workers(fs_info);
3518
3519 del_fs_roots(fs_info); 3519 del_fs_roots(fs_info);
3520 3520
3521 iput(fs_info->btree_inode); 3521 free_root_pointers(fs_info, 1);
3522 3522
3523 btrfs_stop_all_workers(fs_info); 3523 iput(fs_info->btree_inode);
3524 3524
3525#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY 3525#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
3526 if (btrfs_test_opt(root, CHECK_INTEGRITY)) 3526 if (btrfs_test_opt(root, CHECK_INTEGRITY))
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index af978f7682b3..17f3064b4a3e 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -8012,6 +8012,9 @@ int btrfs_drop_inode(struct inode *inode)
8012{ 8012{
8013 struct btrfs_root *root = BTRFS_I(inode)->root; 8013 struct btrfs_root *root = BTRFS_I(inode)->root;
8014 8014
8015 if (root == NULL)
8016 return 1;
8017
8015 /* the snap/subvol tree is on deleting */ 8018 /* the snap/subvol tree is on deleting */
8016 if (btrfs_root_refs(&root->root_item) == 0 && 8019 if (btrfs_root_refs(&root->root_item) == 0 &&
8017 root != root->fs_info->tree_root) 8020 root != root->fs_info->tree_root)
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index 395b82031a42..4febca4fc2de 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -4082,7 +4082,7 @@ out:
4082 return inode; 4082 return inode;
4083} 4083}
4084 4084
4085static struct reloc_control *alloc_reloc_control(void) 4085static struct reloc_control *alloc_reloc_control(struct btrfs_fs_info *fs_info)
4086{ 4086{
4087 struct reloc_control *rc; 4087 struct reloc_control *rc;
4088 4088
@@ -4093,7 +4093,8 @@ static struct reloc_control *alloc_reloc_control(void)
4093 INIT_LIST_HEAD(&rc->reloc_roots); 4093 INIT_LIST_HEAD(&rc->reloc_roots);
4094 backref_cache_init(&rc->backref_cache); 4094 backref_cache_init(&rc->backref_cache);
4095 mapping_tree_init(&rc->reloc_root_tree); 4095 mapping_tree_init(&rc->reloc_root_tree);
4096 extent_io_tree_init(&rc->processed_blocks, NULL); 4096 extent_io_tree_init(&rc->processed_blocks,
4097 fs_info->btree_inode->i_mapping);
4097 return rc; 4098 return rc;
4098} 4099}
4099 4100
@@ -4110,7 +4111,7 @@ int btrfs_relocate_block_group(struct btrfs_root *extent_root, u64 group_start)
4110 int rw = 0; 4111 int rw = 0;
4111 int err = 0; 4112 int err = 0;
4112 4113
4113 rc = alloc_reloc_control(); 4114 rc = alloc_reloc_control(fs_info);
4114 if (!rc) 4115 if (!rc)
4115 return -ENOMEM; 4116 return -ENOMEM;
4116 4117
@@ -4311,7 +4312,7 @@ int btrfs_recover_relocation(struct btrfs_root *root)
4311 if (list_empty(&reloc_roots)) 4312 if (list_empty(&reloc_roots))
4312 goto out; 4313 goto out;
4313 4314
4314 rc = alloc_reloc_control(); 4315 rc = alloc_reloc_control(root->fs_info);
4315 if (!rc) { 4316 if (!rc) {
4316 err = -ENOMEM; 4317 err = -ENOMEM;
4317 goto out; 4318 goto out;
diff --git a/fs/ceph/locks.c b/fs/ceph/locks.c
index 202dd3d68be0..ebbf680378e2 100644
--- a/fs/ceph/locks.c
+++ b/fs/ceph/locks.c
@@ -191,27 +191,23 @@ void ceph_count_locks(struct inode *inode, int *fcntl_count, int *flock_count)
191} 191}
192 192
193/** 193/**
194 * Encode the flock and fcntl locks for the given inode into the pagelist. 194 * Encode the flock and fcntl locks for the given inode into the ceph_filelock
195 * Format is: #fcntl locks, sequential fcntl locks, #flock locks, 195 * array. Must be called with lock_flocks() already held.
196 * sequential flock locks. 196 * If we encounter more of a specific lock type than expected, return -ENOSPC.
197 * Must be called with lock_flocks() already held.
198 * If we encounter more of a specific lock type than expected,
199 * we return the value 1.
200 */ 197 */
201int ceph_encode_locks(struct inode *inode, struct ceph_pagelist *pagelist, 198int ceph_encode_locks_to_buffer(struct inode *inode,
202 int num_fcntl_locks, int num_flock_locks) 199 struct ceph_filelock *flocks,
200 int num_fcntl_locks, int num_flock_locks)
203{ 201{
204 struct file_lock *lock; 202 struct file_lock *lock;
205 struct ceph_filelock cephlock;
206 int err = 0; 203 int err = 0;
207 int seen_fcntl = 0; 204 int seen_fcntl = 0;
208 int seen_flock = 0; 205 int seen_flock = 0;
206 int l = 0;
209 207
210 dout("encoding %d flock and %d fcntl locks", num_flock_locks, 208 dout("encoding %d flock and %d fcntl locks", num_flock_locks,
211 num_fcntl_locks); 209 num_fcntl_locks);
212 err = ceph_pagelist_append(pagelist, &num_fcntl_locks, sizeof(u32)); 210
213 if (err)
214 goto fail;
215 for (lock = inode->i_flock; lock != NULL; lock = lock->fl_next) { 211 for (lock = inode->i_flock; lock != NULL; lock = lock->fl_next) {
216 if (lock->fl_flags & FL_POSIX) { 212 if (lock->fl_flags & FL_POSIX) {
217 ++seen_fcntl; 213 ++seen_fcntl;
@@ -219,19 +215,12 @@ int ceph_encode_locks(struct inode *inode, struct ceph_pagelist *pagelist,
219 err = -ENOSPC; 215 err = -ENOSPC;
220 goto fail; 216 goto fail;
221 } 217 }
222 err = lock_to_ceph_filelock(lock, &cephlock); 218 err = lock_to_ceph_filelock(lock, &flocks[l]);
223 if (err) 219 if (err)
224 goto fail; 220 goto fail;
225 err = ceph_pagelist_append(pagelist, &cephlock, 221 ++l;
226 sizeof(struct ceph_filelock));
227 } 222 }
228 if (err)
229 goto fail;
230 } 223 }
231
232 err = ceph_pagelist_append(pagelist, &num_flock_locks, sizeof(u32));
233 if (err)
234 goto fail;
235 for (lock = inode->i_flock; lock != NULL; lock = lock->fl_next) { 224 for (lock = inode->i_flock; lock != NULL; lock = lock->fl_next) {
236 if (lock->fl_flags & FL_FLOCK) { 225 if (lock->fl_flags & FL_FLOCK) {
237 ++seen_flock; 226 ++seen_flock;
@@ -239,19 +228,51 @@ int ceph_encode_locks(struct inode *inode, struct ceph_pagelist *pagelist,
239 err = -ENOSPC; 228 err = -ENOSPC;
240 goto fail; 229 goto fail;
241 } 230 }
242 err = lock_to_ceph_filelock(lock, &cephlock); 231 err = lock_to_ceph_filelock(lock, &flocks[l]);
243 if (err) 232 if (err)
244 goto fail; 233 goto fail;
245 err = ceph_pagelist_append(pagelist, &cephlock, 234 ++l;
246 sizeof(struct ceph_filelock));
247 } 235 }
248 if (err)
249 goto fail;
250 } 236 }
251fail: 237fail:
252 return err; 238 return err;
253} 239}
254 240
241/**
242 * Copy the encoded flock and fcntl locks into the pagelist.
243 * Format is: #fcntl locks, sequential fcntl locks, #flock locks,
244 * sequential flock locks.
245 * Returns zero on success.
246 */
247int ceph_locks_to_pagelist(struct ceph_filelock *flocks,
248 struct ceph_pagelist *pagelist,
249 int num_fcntl_locks, int num_flock_locks)
250{
251 int err = 0;
252 __le32 nlocks;
253
254 nlocks = cpu_to_le32(num_fcntl_locks);
255 err = ceph_pagelist_append(pagelist, &nlocks, sizeof(nlocks));
256 if (err)
257 goto out_fail;
258
259 err = ceph_pagelist_append(pagelist, flocks,
260 num_fcntl_locks * sizeof(*flocks));
261 if (err)
262 goto out_fail;
263
264 nlocks = cpu_to_le32(num_flock_locks);
265 err = ceph_pagelist_append(pagelist, &nlocks, sizeof(nlocks));
266 if (err)
267 goto out_fail;
268
269 err = ceph_pagelist_append(pagelist,
270 &flocks[num_fcntl_locks],
271 num_flock_locks * sizeof(*flocks));
272out_fail:
273 return err;
274}
275
255/* 276/*
256 * Given a pointer to a lock, convert it to a ceph filelock 277 * Given a pointer to a lock, convert it to a ceph filelock
257 */ 278 */
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index 4f22671a5bd4..4d2920304be8 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -2478,39 +2478,44 @@ static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap,
2478 2478
2479 if (recon_state->flock) { 2479 if (recon_state->flock) {
2480 int num_fcntl_locks, num_flock_locks; 2480 int num_fcntl_locks, num_flock_locks;
2481 struct ceph_pagelist_cursor trunc_point; 2481 struct ceph_filelock *flocks;
2482 2482
2483 ceph_pagelist_set_cursor(pagelist, &trunc_point); 2483encode_again:
2484 do { 2484 lock_flocks();
2485 lock_flocks(); 2485 ceph_count_locks(inode, &num_fcntl_locks, &num_flock_locks);
2486 ceph_count_locks(inode, &num_fcntl_locks, 2486 unlock_flocks();
2487 &num_flock_locks); 2487 flocks = kmalloc((num_fcntl_locks+num_flock_locks) *
2488 rec.v2.flock_len = (2*sizeof(u32) + 2488 sizeof(struct ceph_filelock), GFP_NOFS);
2489 (num_fcntl_locks+num_flock_locks) * 2489 if (!flocks) {
2490 sizeof(struct ceph_filelock)); 2490 err = -ENOMEM;
2491 unlock_flocks(); 2491 goto out_free;
2492 2492 }
2493 /* pre-alloc pagelist */ 2493 lock_flocks();
2494 ceph_pagelist_truncate(pagelist, &trunc_point); 2494 err = ceph_encode_locks_to_buffer(inode, flocks,
2495 err = ceph_pagelist_append(pagelist, &rec, reclen); 2495 num_fcntl_locks,
2496 if (!err) 2496 num_flock_locks);
2497 err = ceph_pagelist_reserve(pagelist, 2497 unlock_flocks();
2498 rec.v2.flock_len); 2498 if (err) {
2499 2499 kfree(flocks);
2500 /* encode locks */ 2500 if (err == -ENOSPC)
2501 if (!err) { 2501 goto encode_again;
2502 lock_flocks(); 2502 goto out_free;
2503 err = ceph_encode_locks(inode, 2503 }
2504 pagelist, 2504 /*
2505 num_fcntl_locks, 2505 * number of encoded locks is stable, so copy to pagelist
2506 num_flock_locks); 2506 */
2507 unlock_flocks(); 2507 rec.v2.flock_len = cpu_to_le32(2*sizeof(u32) +
2508 } 2508 (num_fcntl_locks+num_flock_locks) *
2509 } while (err == -ENOSPC); 2509 sizeof(struct ceph_filelock));
2510 err = ceph_pagelist_append(pagelist, &rec, reclen);
2511 if (!err)
2512 err = ceph_locks_to_pagelist(flocks, pagelist,
2513 num_fcntl_locks,
2514 num_flock_locks);
2515 kfree(flocks);
2510 } else { 2516 } else {
2511 err = ceph_pagelist_append(pagelist, &rec, reclen); 2517 err = ceph_pagelist_append(pagelist, &rec, reclen);
2512 } 2518 }
2513
2514out_free: 2519out_free:
2515 kfree(path); 2520 kfree(path);
2516out_dput: 2521out_dput:
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index 8696be2ff679..7ccfdb4aea2e 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -822,8 +822,13 @@ extern const struct export_operations ceph_export_ops;
822extern int ceph_lock(struct file *file, int cmd, struct file_lock *fl); 822extern int ceph_lock(struct file *file, int cmd, struct file_lock *fl);
823extern int ceph_flock(struct file *file, int cmd, struct file_lock *fl); 823extern int ceph_flock(struct file *file, int cmd, struct file_lock *fl);
824extern void ceph_count_locks(struct inode *inode, int *p_num, int *f_num); 824extern void ceph_count_locks(struct inode *inode, int *p_num, int *f_num);
825extern int ceph_encode_locks(struct inode *i, struct ceph_pagelist *p, 825extern int ceph_encode_locks_to_buffer(struct inode *inode,
826 int p_locks, int f_locks); 826 struct ceph_filelock *flocks,
827 int num_fcntl_locks,
828 int num_flock_locks);
829extern int ceph_locks_to_pagelist(struct ceph_filelock *flocks,
830 struct ceph_pagelist *pagelist,
831 int num_fcntl_locks, int num_flock_locks);
827extern int lock_to_ceph_filelock(struct file_lock *fl, struct ceph_filelock *c); 832extern int lock_to_ceph_filelock(struct file_lock *fl, struct ceph_filelock *c);
828 833
829/* debugfs.c */ 834/* debugfs.c */
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 5b97e56ddbca..e3bc39bb9d12 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -3279,8 +3279,8 @@ build_unc_path_to_root(const struct smb_vol *vol,
3279 pos = full_path + unc_len; 3279 pos = full_path + unc_len;
3280 3280
3281 if (pplen) { 3281 if (pplen) {
3282 *pos++ = CIFS_DIR_SEP(cifs_sb); 3282 *pos = CIFS_DIR_SEP(cifs_sb);
3283 strncpy(pos, vol->prepath, pplen); 3283 strncpy(pos + 1, vol->prepath, pplen);
3284 pos += pplen; 3284 pos += pplen;
3285 } 3285 }
3286 3286
diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c
index 201f0a0d6b0a..a7abbea2c096 100644
--- a/fs/ecryptfs/file.c
+++ b/fs/ecryptfs/file.c
@@ -295,6 +295,12 @@ static int ecryptfs_release(struct inode *inode, struct file *file)
295static int 295static int
296ecryptfs_fsync(struct file *file, loff_t start, loff_t end, int datasync) 296ecryptfs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
297{ 297{
298 int rc;
299
300 rc = filemap_write_and_wait(file->f_mapping);
301 if (rc)
302 return rc;
303
298 return vfs_fsync(ecryptfs_file_to_lower(file), datasync); 304 return vfs_fsync(ecryptfs_file_to_lower(file), datasync);
299} 305}
300 306
diff --git a/fs/efivarfs/file.c b/fs/efivarfs/file.c
index bfb531564319..8dd524f32284 100644
--- a/fs/efivarfs/file.c
+++ b/fs/efivarfs/file.c
@@ -44,8 +44,11 @@ static ssize_t efivarfs_file_write(struct file *file,
44 44
45 bytes = efivar_entry_set_get_size(var, attributes, &datasize, 45 bytes = efivar_entry_set_get_size(var, attributes, &datasize,
46 data, &set); 46 data, &set);
47 if (!set && bytes) 47 if (!set && bytes) {
48 if (bytes == -ENOENT)
49 bytes = -EIO;
48 goto out; 50 goto out;
51 }
49 52
50 if (bytes == -ENOENT) { 53 if (bytes == -ENOENT) {
51 drop_nlink(inode); 54 drop_nlink(inode);
@@ -76,7 +79,14 @@ static ssize_t efivarfs_file_read(struct file *file, char __user *userbuf,
76 int err; 79 int err;
77 80
78 err = efivar_entry_size(var, &datasize); 81 err = efivar_entry_size(var, &datasize);
79 if (err) 82
83 /*
84 * efivarfs represents uncommitted variables with
85 * zero-length files. Reading them should return EOF.
86 */
87 if (err == -ENOENT)
88 return 0;
89 else if (err)
80 return err; 90 return err;
81 91
82 data = kmalloc(datasize + sizeof(attributes), GFP_KERNEL); 92 data = kmalloc(datasize + sizeof(attributes), GFP_KERNEL);
diff --git a/fs/exec.c b/fs/exec.c
index 643019585574..ffd7a813ad3d 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1135,13 +1135,6 @@ void setup_new_exec(struct linux_binprm * bprm)
1135 set_dumpable(current->mm, suid_dumpable); 1135 set_dumpable(current->mm, suid_dumpable);
1136 } 1136 }
1137 1137
1138 /*
1139 * Flush performance counters when crossing a
1140 * security domain:
1141 */
1142 if (!get_dumpable(current->mm))
1143 perf_event_exit_task(current);
1144
1145 /* An exec changes our domain. We are no longer part of the thread 1138 /* An exec changes our domain. We are no longer part of the thread
1146 group */ 1139 group */
1147 1140
@@ -1205,6 +1198,15 @@ void install_exec_creds(struct linux_binprm *bprm)
1205 1198
1206 commit_creds(bprm->cred); 1199 commit_creds(bprm->cred);
1207 bprm->cred = NULL; 1200 bprm->cred = NULL;
1201
1202 /*
1203 * Disable monitoring for regular users
1204 * when executing setuid binaries. Must
1205 * wait until new credentials are committed
1206 * by commit_creds() above
1207 */
1208 if (get_dumpable(current->mm) != SUID_DUMP_USER)
1209 perf_event_exit_task(current);
1208 /* 1210 /*
1209 * cred_guard_mutex must be held at least to this point to prevent 1211 * cred_guard_mutex must be held at least to this point to prevent
1210 * ptrace_attach() from altering our determination of the task's 1212 * ptrace_attach() from altering our determination of the task's
diff --git a/fs/file_table.c b/fs/file_table.c
index cd4d87a82951..485dc0eddd67 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -306,17 +306,18 @@ void fput(struct file *file)
306{ 306{
307 if (atomic_long_dec_and_test(&file->f_count)) { 307 if (atomic_long_dec_and_test(&file->f_count)) {
308 struct task_struct *task = current; 308 struct task_struct *task = current;
309 unsigned long flags;
310
309 file_sb_list_del(file); 311 file_sb_list_del(file);
310 if (unlikely(in_interrupt() || task->flags & PF_KTHREAD)) { 312 if (likely(!in_interrupt() && !(task->flags & PF_KTHREAD))) {
311 unsigned long flags; 313 init_task_work(&file->f_u.fu_rcuhead, ____fput);
312 spin_lock_irqsave(&delayed_fput_lock, flags); 314 if (!task_work_add(task, &file->f_u.fu_rcuhead, true))
313 list_add(&file->f_u.fu_list, &delayed_fput_list); 315 return;
314 schedule_work(&delayed_fput_work);
315 spin_unlock_irqrestore(&delayed_fput_lock, flags);
316 return;
317 } 316 }
318 init_task_work(&file->f_u.fu_rcuhead, ____fput); 317 spin_lock_irqsave(&delayed_fput_lock, flags);
319 task_work_add(task, &file->f_u.fu_rcuhead, true); 318 list_add(&file->f_u.fu_list, &delayed_fput_list);
319 schedule_work(&delayed_fput_work);
320 spin_unlock_irqrestore(&delayed_fput_lock, flags);
320 } 321 }
321} 322}
322 323
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index 254df56b847b..f3f783dc4f75 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -180,6 +180,8 @@ u64 fuse_get_attr_version(struct fuse_conn *fc)
180static int fuse_dentry_revalidate(struct dentry *entry, unsigned int flags) 180static int fuse_dentry_revalidate(struct dentry *entry, unsigned int flags)
181{ 181{
182 struct inode *inode; 182 struct inode *inode;
183 struct dentry *parent;
184 struct fuse_conn *fc;
183 185
184 inode = ACCESS_ONCE(entry->d_inode); 186 inode = ACCESS_ONCE(entry->d_inode);
185 if (inode && is_bad_inode(inode)) 187 if (inode && is_bad_inode(inode))
@@ -187,10 +189,8 @@ static int fuse_dentry_revalidate(struct dentry *entry, unsigned int flags)
187 else if (fuse_dentry_time(entry) < get_jiffies_64()) { 189 else if (fuse_dentry_time(entry) < get_jiffies_64()) {
188 int err; 190 int err;
189 struct fuse_entry_out outarg; 191 struct fuse_entry_out outarg;
190 struct fuse_conn *fc;
191 struct fuse_req *req; 192 struct fuse_req *req;
192 struct fuse_forget_link *forget; 193 struct fuse_forget_link *forget;
193 struct dentry *parent;
194 u64 attr_version; 194 u64 attr_version;
195 195
196 /* For negative dentries, always do a fresh lookup */ 196 /* For negative dentries, always do a fresh lookup */
@@ -241,8 +241,14 @@ static int fuse_dentry_revalidate(struct dentry *entry, unsigned int flags)
241 entry_attr_timeout(&outarg), 241 entry_attr_timeout(&outarg),
242 attr_version); 242 attr_version);
243 fuse_change_entry_timeout(entry, &outarg); 243 fuse_change_entry_timeout(entry, &outarg);
244 } else if (inode) {
245 fc = get_fuse_conn(inode);
246 if (fc->readdirplus_auto) {
247 parent = dget_parent(entry);
248 fuse_advise_use_readdirplus(parent->d_inode);
249 dput(parent);
250 }
244 } 251 }
245 fuse_advise_use_readdirplus(inode);
246 return 1; 252 return 1;
247} 253}
248 254
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index d1c9b85b3f58..35f281033142 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -16,6 +16,7 @@
16#include <linux/compat.h> 16#include <linux/compat.h>
17#include <linux/swap.h> 17#include <linux/swap.h>
18#include <linux/aio.h> 18#include <linux/aio.h>
19#include <linux/falloc.h>
19 20
20static const struct file_operations fuse_direct_io_file_operations; 21static const struct file_operations fuse_direct_io_file_operations;
21 22
@@ -1278,7 +1279,10 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, const struct iovec *iov,
1278 1279
1279 iov_iter_init(&ii, iov, nr_segs, count, 0); 1280 iov_iter_init(&ii, iov, nr_segs, count, 0);
1280 1281
1281 req = fuse_get_req(fc, fuse_iter_npages(&ii)); 1282 if (io->async)
1283 req = fuse_get_req_for_background(fc, fuse_iter_npages(&ii));
1284 else
1285 req = fuse_get_req(fc, fuse_iter_npages(&ii));
1282 if (IS_ERR(req)) 1286 if (IS_ERR(req))
1283 return PTR_ERR(req); 1287 return PTR_ERR(req);
1284 1288
@@ -1314,7 +1318,11 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, const struct iovec *iov,
1314 break; 1318 break;
1315 if (count) { 1319 if (count) {
1316 fuse_put_request(fc, req); 1320 fuse_put_request(fc, req);
1317 req = fuse_get_req(fc, fuse_iter_npages(&ii)); 1321 if (io->async)
1322 req = fuse_get_req_for_background(fc,
1323 fuse_iter_npages(&ii));
1324 else
1325 req = fuse_get_req(fc, fuse_iter_npages(&ii));
1318 if (IS_ERR(req)) 1326 if (IS_ERR(req))
1319 break; 1327 break;
1320 } 1328 }
@@ -2365,6 +2373,11 @@ static void fuse_do_truncate(struct file *file)
2365 fuse_do_setattr(inode, &attr, file); 2373 fuse_do_setattr(inode, &attr, file);
2366} 2374}
2367 2375
2376static inline loff_t fuse_round_up(loff_t off)
2377{
2378 return round_up(off, FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT);
2379}
2380
2368static ssize_t 2381static ssize_t
2369fuse_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, 2382fuse_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
2370 loff_t offset, unsigned long nr_segs) 2383 loff_t offset, unsigned long nr_segs)
@@ -2372,6 +2385,7 @@ fuse_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
2372 ssize_t ret = 0; 2385 ssize_t ret = 0;
2373 struct file *file = iocb->ki_filp; 2386 struct file *file = iocb->ki_filp;
2374 struct fuse_file *ff = file->private_data; 2387 struct fuse_file *ff = file->private_data;
2388 bool async_dio = ff->fc->async_dio;
2375 loff_t pos = 0; 2389 loff_t pos = 0;
2376 struct inode *inode; 2390 struct inode *inode;
2377 loff_t i_size; 2391 loff_t i_size;
@@ -2383,10 +2397,10 @@ fuse_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
2383 i_size = i_size_read(inode); 2397 i_size = i_size_read(inode);
2384 2398
2385 /* optimization for short read */ 2399 /* optimization for short read */
2386 if (rw != WRITE && offset + count > i_size) { 2400 if (async_dio && rw != WRITE && offset + count > i_size) {
2387 if (offset >= i_size) 2401 if (offset >= i_size)
2388 return 0; 2402 return 0;
2389 count = i_size - offset; 2403 count = min_t(loff_t, count, fuse_round_up(i_size - offset));
2390 } 2404 }
2391 2405
2392 io = kmalloc(sizeof(struct fuse_io_priv), GFP_KERNEL); 2406 io = kmalloc(sizeof(struct fuse_io_priv), GFP_KERNEL);
@@ -2404,7 +2418,7 @@ fuse_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
2404 * By default, we want to optimize all I/Os with async request 2418 * By default, we want to optimize all I/Os with async request
2405 * submission to the client filesystem if supported. 2419 * submission to the client filesystem if supported.
2406 */ 2420 */
2407 io->async = ff->fc->async_dio; 2421 io->async = async_dio;
2408 io->iocb = iocb; 2422 io->iocb = iocb;
2409 2423
2410 /* 2424 /*
@@ -2412,7 +2426,7 @@ fuse_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
2412 * to wait on real async I/O requests, so we must submit this request 2426 * to wait on real async I/O requests, so we must submit this request
2413 * synchronously. 2427 * synchronously.
2414 */ 2428 */
2415 if (!is_sync_kiocb(iocb) && (offset + count > i_size)) 2429 if (!is_sync_kiocb(iocb) && (offset + count > i_size) && rw == WRITE)
2416 io->async = false; 2430 io->async = false;
2417 2431
2418 if (rw == WRITE) 2432 if (rw == WRITE)
@@ -2424,7 +2438,7 @@ fuse_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
2424 fuse_aio_complete(io, ret < 0 ? ret : 0, -1); 2438 fuse_aio_complete(io, ret < 0 ? ret : 0, -1);
2425 2439
2426 /* we have a non-extending, async request, so return */ 2440 /* we have a non-extending, async request, so return */
2427 if (ret > 0 && !is_sync_kiocb(iocb)) 2441 if (!is_sync_kiocb(iocb))
2428 return -EIOCBQUEUED; 2442 return -EIOCBQUEUED;
2429 2443
2430 ret = wait_on_sync_kiocb(iocb); 2444 ret = wait_on_sync_kiocb(iocb);
@@ -2446,6 +2460,7 @@ static long fuse_file_fallocate(struct file *file, int mode, loff_t offset,
2446 loff_t length) 2460 loff_t length)
2447{ 2461{
2448 struct fuse_file *ff = file->private_data; 2462 struct fuse_file *ff = file->private_data;
2463 struct inode *inode = file->f_inode;
2449 struct fuse_conn *fc = ff->fc; 2464 struct fuse_conn *fc = ff->fc;
2450 struct fuse_req *req; 2465 struct fuse_req *req;
2451 struct fuse_fallocate_in inarg = { 2466 struct fuse_fallocate_in inarg = {
@@ -2455,13 +2470,23 @@ static long fuse_file_fallocate(struct file *file, int mode, loff_t offset,
2455 .mode = mode 2470 .mode = mode
2456 }; 2471 };
2457 int err; 2472 int err;
2473 bool lock_inode = !(mode & FALLOC_FL_KEEP_SIZE) ||
2474 (mode & FALLOC_FL_PUNCH_HOLE);
2458 2475
2459 if (fc->no_fallocate) 2476 if (fc->no_fallocate)
2460 return -EOPNOTSUPP; 2477 return -EOPNOTSUPP;
2461 2478
2479 if (lock_inode) {
2480 mutex_lock(&inode->i_mutex);
2481 if (mode & FALLOC_FL_PUNCH_HOLE)
2482 fuse_set_nowrite(inode);
2483 }
2484
2462 req = fuse_get_req_nopages(fc); 2485 req = fuse_get_req_nopages(fc);
2463 if (IS_ERR(req)) 2486 if (IS_ERR(req)) {
2464 return PTR_ERR(req); 2487 err = PTR_ERR(req);
2488 goto out;
2489 }
2465 2490
2466 req->in.h.opcode = FUSE_FALLOCATE; 2491 req->in.h.opcode = FUSE_FALLOCATE;
2467 req->in.h.nodeid = ff->nodeid; 2492 req->in.h.nodeid = ff->nodeid;
@@ -2476,6 +2501,25 @@ static long fuse_file_fallocate(struct file *file, int mode, loff_t offset,
2476 } 2501 }
2477 fuse_put_request(fc, req); 2502 fuse_put_request(fc, req);
2478 2503
2504 if (err)
2505 goto out;
2506
2507 /* we could have extended the file */
2508 if (!(mode & FALLOC_FL_KEEP_SIZE))
2509 fuse_write_update_size(inode, offset + length);
2510
2511 if (mode & FALLOC_FL_PUNCH_HOLE)
2512 truncate_pagecache_range(inode, offset, offset + length - 1);
2513
2514 fuse_invalidate_attr(inode);
2515
2516out:
2517 if (lock_inode) {
2518 if (mode & FALLOC_FL_PUNCH_HOLE)
2519 fuse_release_nowrite(inode);
2520 mutex_unlock(&inode->i_mutex);
2521 }
2522
2479 return err; 2523 return err;
2480} 2524}
2481 2525
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 6201f81e4d3a..9a0cdde14a08 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -867,10 +867,11 @@ static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req)
867 fc->dont_mask = 1; 867 fc->dont_mask = 1;
868 if (arg->flags & FUSE_AUTO_INVAL_DATA) 868 if (arg->flags & FUSE_AUTO_INVAL_DATA)
869 fc->auto_inval_data = 1; 869 fc->auto_inval_data = 1;
870 if (arg->flags & FUSE_DO_READDIRPLUS) 870 if (arg->flags & FUSE_DO_READDIRPLUS) {
871 fc->do_readdirplus = 1; 871 fc->do_readdirplus = 1;
872 if (arg->flags & FUSE_READDIRPLUS_AUTO) 872 if (arg->flags & FUSE_READDIRPLUS_AUTO)
873 fc->readdirplus_auto = 1; 873 fc->readdirplus_auto = 1;
874 }
874 if (arg->flags & FUSE_ASYNC_DIO) 875 if (arg->flags & FUSE_ASYNC_DIO)
875 fc->async_dio = 1; 876 fc->async_dio = 1;
876 } else { 877 } else {
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index 1dc9a13ce6bb..93b5809c20bb 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -1286,17 +1286,26 @@ int gfs2_setattr_size(struct inode *inode, u64 newsize)
1286 if (ret) 1286 if (ret)
1287 return ret; 1287 return ret;
1288 1288
1289 ret = get_write_access(inode);
1290 if (ret)
1291 return ret;
1292
1289 inode_dio_wait(inode); 1293 inode_dio_wait(inode);
1290 1294
1291 ret = gfs2_rs_alloc(GFS2_I(inode)); 1295 ret = gfs2_rs_alloc(GFS2_I(inode));
1292 if (ret) 1296 if (ret)
1293 return ret; 1297 goto out;
1294 1298
1295 oldsize = inode->i_size; 1299 oldsize = inode->i_size;
1296 if (newsize >= oldsize) 1300 if (newsize >= oldsize) {
1297 return do_grow(inode, newsize); 1301 ret = do_grow(inode, newsize);
1302 goto out;
1303 }
1298 1304
1299 return do_shrink(inode, oldsize, newsize); 1305 ret = do_shrink(inode, oldsize, newsize);
1306out:
1307 put_write_access(inode);
1308 return ret;
1300} 1309}
1301 1310
1302int gfs2_truncatei_resume(struct gfs2_inode *ip) 1311int gfs2_truncatei_resume(struct gfs2_inode *ip)
diff --git a/fs/gfs2/dir.c b/fs/gfs2/dir.c
index c3e82bd23179..b631c9043460 100644
--- a/fs/gfs2/dir.c
+++ b/fs/gfs2/dir.c
@@ -354,22 +354,31 @@ static __be64 *gfs2_dir_get_hash_table(struct gfs2_inode *ip)
354 return ERR_PTR(-EIO); 354 return ERR_PTR(-EIO);
355 } 355 }
356 356
357 hc = kmalloc(hsize, GFP_NOFS); 357 hc = kmalloc(hsize, GFP_NOFS | __GFP_NOWARN);
358 ret = -ENOMEM; 358 if (hc == NULL)
359 hc = __vmalloc(hsize, GFP_NOFS, PAGE_KERNEL);
360
359 if (hc == NULL) 361 if (hc == NULL)
360 return ERR_PTR(-ENOMEM); 362 return ERR_PTR(-ENOMEM);
361 363
362 ret = gfs2_dir_read_data(ip, hc, hsize); 364 ret = gfs2_dir_read_data(ip, hc, hsize);
363 if (ret < 0) { 365 if (ret < 0) {
364 kfree(hc); 366 if (is_vmalloc_addr(hc))
367 vfree(hc);
368 else
369 kfree(hc);
365 return ERR_PTR(ret); 370 return ERR_PTR(ret);
366 } 371 }
367 372
368 spin_lock(&inode->i_lock); 373 spin_lock(&inode->i_lock);
369 if (ip->i_hash_cache) 374 if (ip->i_hash_cache) {
370 kfree(hc); 375 if (is_vmalloc_addr(hc))
371 else 376 vfree(hc);
377 else
378 kfree(hc);
379 } else {
372 ip->i_hash_cache = hc; 380 ip->i_hash_cache = hc;
381 }
373 spin_unlock(&inode->i_lock); 382 spin_unlock(&inode->i_lock);
374 383
375 return ip->i_hash_cache; 384 return ip->i_hash_cache;
@@ -385,7 +394,10 @@ void gfs2_dir_hash_inval(struct gfs2_inode *ip)
385{ 394{
386 __be64 *hc = ip->i_hash_cache; 395 __be64 *hc = ip->i_hash_cache;
387 ip->i_hash_cache = NULL; 396 ip->i_hash_cache = NULL;
388 kfree(hc); 397 if (is_vmalloc_addr(hc))
398 vfree(hc);
399 else
400 kfree(hc);
389} 401}
390 402
391static inline int gfs2_dirent_sentinel(const struct gfs2_dirent *dent) 403static inline int gfs2_dirent_sentinel(const struct gfs2_dirent *dent)
@@ -1113,7 +1125,10 @@ static int dir_double_exhash(struct gfs2_inode *dip)
1113 if (IS_ERR(hc)) 1125 if (IS_ERR(hc))
1114 return PTR_ERR(hc); 1126 return PTR_ERR(hc);
1115 1127
1116 h = hc2 = kmalloc(hsize_bytes * 2, GFP_NOFS); 1128 h = hc2 = kmalloc(hsize_bytes * 2, GFP_NOFS | __GFP_NOWARN);
1129 if (hc2 == NULL)
1130 hc2 = __vmalloc(hsize_bytes * 2, GFP_NOFS, PAGE_KERNEL);
1131
1117 if (!hc2) 1132 if (!hc2)
1118 return -ENOMEM; 1133 return -ENOMEM;
1119 1134
@@ -1145,7 +1160,10 @@ fail:
1145 gfs2_dinode_out(dip, dibh->b_data); 1160 gfs2_dinode_out(dip, dibh->b_data);
1146 brelse(dibh); 1161 brelse(dibh);
1147out_kfree: 1162out_kfree:
1148 kfree(hc2); 1163 if (is_vmalloc_addr(hc2))
1164 vfree(hc2);
1165 else
1166 kfree(hc2);
1149 return error; 1167 return error;
1150} 1168}
1151 1169
@@ -1846,6 +1864,8 @@ static int leaf_dealloc(struct gfs2_inode *dip, u32 index, u32 len,
1846 memset(&rlist, 0, sizeof(struct gfs2_rgrp_list)); 1864 memset(&rlist, 0, sizeof(struct gfs2_rgrp_list));
1847 1865
1848 ht = kzalloc(size, GFP_NOFS); 1866 ht = kzalloc(size, GFP_NOFS);
1867 if (ht == NULL)
1868 ht = vzalloc(size);
1849 if (!ht) 1869 if (!ht)
1850 return -ENOMEM; 1870 return -ENOMEM;
1851 1871
@@ -1933,7 +1953,10 @@ out_rlist:
1933 gfs2_rlist_free(&rlist); 1953 gfs2_rlist_free(&rlist);
1934 gfs2_quota_unhold(dip); 1954 gfs2_quota_unhold(dip);
1935out: 1955out:
1936 kfree(ht); 1956 if (is_vmalloc_addr(ht))
1957 vfree(ht);
1958 else
1959 kfree(ht);
1937 return error; 1960 return error;
1938} 1961}
1939 1962
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
index acd16764b133..ad0dc38d87ab 100644
--- a/fs/gfs2/file.c
+++ b/fs/gfs2/file.c
@@ -402,16 +402,20 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
402 /* Update file times before taking page lock */ 402 /* Update file times before taking page lock */
403 file_update_time(vma->vm_file); 403 file_update_time(vma->vm_file);
404 404
405 ret = get_write_access(inode);
406 if (ret)
407 goto out;
408
405 ret = gfs2_rs_alloc(ip); 409 ret = gfs2_rs_alloc(ip);
406 if (ret) 410 if (ret)
407 return ret; 411 goto out_write_access;
408 412
409 gfs2_size_hint(vma->vm_file, pos, PAGE_CACHE_SIZE); 413 gfs2_size_hint(vma->vm_file, pos, PAGE_CACHE_SIZE);
410 414
411 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh); 415 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
412 ret = gfs2_glock_nq(&gh); 416 ret = gfs2_glock_nq(&gh);
413 if (ret) 417 if (ret)
414 goto out; 418 goto out_uninit;
415 419
416 set_bit(GLF_DIRTY, &ip->i_gl->gl_flags); 420 set_bit(GLF_DIRTY, &ip->i_gl->gl_flags);
417 set_bit(GIF_SW_PAGED, &ip->i_flags); 421 set_bit(GIF_SW_PAGED, &ip->i_flags);
@@ -480,12 +484,15 @@ out_quota_unlock:
480 gfs2_quota_unlock(ip); 484 gfs2_quota_unlock(ip);
481out_unlock: 485out_unlock:
482 gfs2_glock_dq(&gh); 486 gfs2_glock_dq(&gh);
483out: 487out_uninit:
484 gfs2_holder_uninit(&gh); 488 gfs2_holder_uninit(&gh);
485 if (ret == 0) { 489 if (ret == 0) {
486 set_page_dirty(page); 490 set_page_dirty(page);
487 wait_for_stable_page(page); 491 wait_for_stable_page(page);
488 } 492 }
493out_write_access:
494 put_write_access(inode);
495out:
489 sb_end_pagefault(inode->i_sb); 496 sb_end_pagefault(inode->i_sb);
490 return block_page_mkwrite_return(ret); 497 return block_page_mkwrite_return(ret);
491} 498}
@@ -594,10 +601,10 @@ static int gfs2_release(struct inode *inode, struct file *file)
594 kfree(file->private_data); 601 kfree(file->private_data);
595 file->private_data = NULL; 602 file->private_data = NULL;
596 603
597 if ((file->f_mode & FMODE_WRITE) && 604 if (!(file->f_mode & FMODE_WRITE))
598 (atomic_read(&inode->i_writecount) == 1)) 605 return 0;
599 gfs2_rs_delete(ip);
600 606
607 gfs2_rs_delete(ip);
601 return 0; 608 return 0;
602} 609}
603 610
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index 8833a4f264e3..62b484e4a9e4 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -189,6 +189,7 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
189 return inode; 189 return inode;
190 190
191fail_refresh: 191fail_refresh:
192 ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
192 ip->i_iopen_gh.gh_gl->gl_object = NULL; 193 ip->i_iopen_gh.gh_gl->gl_object = NULL;
193 gfs2_glock_dq_uninit(&ip->i_iopen_gh); 194 gfs2_glock_dq_uninit(&ip->i_iopen_gh);
194fail_iopen: 195fail_iopen:
diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c
index 68b4c8f1fce8..6c33d7b6e0c4 100644
--- a/fs/gfs2/lops.c
+++ b/fs/gfs2/lops.c
@@ -419,7 +419,9 @@ static void gfs2_before_commit(struct gfs2_sbd *sdp, unsigned int limit,
419 if (total > limit) 419 if (total > limit)
420 num = limit; 420 num = limit;
421 gfs2_log_unlock(sdp); 421 gfs2_log_unlock(sdp);
422 page = gfs2_get_log_desc(sdp, GFS2_LOG_DESC_METADATA, num + 1, num); 422 page = gfs2_get_log_desc(sdp,
423 is_databuf ? GFS2_LOG_DESC_JDATA :
424 GFS2_LOG_DESC_METADATA, num + 1, num);
423 ld = page_address(page); 425 ld = page_address(page);
424 gfs2_log_lock(sdp); 426 gfs2_log_lock(sdp);
425 ptr = (__be64 *)(ld + 1); 427 ptr = (__be64 *)(ld + 1);
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index 5232525934ae..9809156e3d04 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -638,8 +638,10 @@ void gfs2_rs_deltree(struct gfs2_blkreserv *rs)
638 */ 638 */
639void gfs2_rs_delete(struct gfs2_inode *ip) 639void gfs2_rs_delete(struct gfs2_inode *ip)
640{ 640{
641 struct inode *inode = &ip->i_inode;
642
641 down_write(&ip->i_rw_mutex); 643 down_write(&ip->i_rw_mutex);
642 if (ip->i_res) { 644 if (ip->i_res && atomic_read(&inode->i_writecount) <= 1) {
643 gfs2_rs_deltree(ip->i_res); 645 gfs2_rs_deltree(ip->i_res);
644 BUG_ON(ip->i_res->rs_free); 646 BUG_ON(ip->i_res->rs_free);
645 kmem_cache_free(gfs2_rsrv_cachep, ip->i_res); 647 kmem_cache_free(gfs2_rsrv_cachep, ip->i_res);
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index 917c8e1eb4ae..e5639dec66c4 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -1444,6 +1444,7 @@ static void gfs2_evict_inode(struct inode *inode)
1444 /* Must not read inode block until block type has been verified */ 1444 /* Must not read inode block until block type has been verified */
1445 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_SKIP, &gh); 1445 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_SKIP, &gh);
1446 if (unlikely(error)) { 1446 if (unlikely(error)) {
1447 ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
1447 gfs2_glock_dq_uninit(&ip->i_iopen_gh); 1448 gfs2_glock_dq_uninit(&ip->i_iopen_gh);
1448 goto out; 1449 goto out;
1449 } 1450 }
@@ -1514,8 +1515,10 @@ out_unlock:
1514 if (gfs2_rs_active(ip->i_res)) 1515 if (gfs2_rs_active(ip->i_res))
1515 gfs2_rs_deltree(ip->i_res); 1516 gfs2_rs_deltree(ip->i_res);
1516 1517
1517 if (test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags)) 1518 if (test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags)) {
1519 ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
1518 gfs2_glock_dq(&ip->i_iopen_gh); 1520 gfs2_glock_dq(&ip->i_iopen_gh);
1521 }
1519 gfs2_holder_uninit(&ip->i_iopen_gh); 1522 gfs2_holder_uninit(&ip->i_iopen_gh);
1520 gfs2_glock_dq_uninit(&gh); 1523 gfs2_glock_dq_uninit(&gh);
1521 if (error && error != GLR_TRYFAILED && error != -EROFS) 1524 if (error && error != GLR_TRYFAILED && error != -EROFS)
@@ -1534,6 +1537,7 @@ out:
1534 ip->i_gl = NULL; 1537 ip->i_gl = NULL;
1535 if (ip->i_iopen_gh.gh_gl) { 1538 if (ip->i_iopen_gh.gh_gl) {
1536 ip->i_iopen_gh.gh_gl->gl_object = NULL; 1539 ip->i_iopen_gh.gh_gl->gl_object = NULL;
1540 ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
1537 gfs2_glock_dq_uninit(&ip->i_iopen_gh); 1541 gfs2_glock_dq_uninit(&ip->i_iopen_gh);
1538 } 1542 }
1539} 1543}
diff --git a/fs/hpfs/dir.c b/fs/hpfs/dir.c
index 546f6d39713a..834ac13c04b7 100644
--- a/fs/hpfs/dir.c
+++ b/fs/hpfs/dir.c
@@ -33,25 +33,27 @@ static loff_t hpfs_dir_lseek(struct file *filp, loff_t off, int whence)
33 if (whence == SEEK_DATA || whence == SEEK_HOLE) 33 if (whence == SEEK_DATA || whence == SEEK_HOLE)
34 return -EINVAL; 34 return -EINVAL;
35 35
36 mutex_lock(&i->i_mutex);
36 hpfs_lock(s); 37 hpfs_lock(s);
37 38
38 /*printk("dir lseek\n");*/ 39 /*printk("dir lseek\n");*/
39 if (new_off == 0 || new_off == 1 || new_off == 11 || new_off == 12 || new_off == 13) goto ok; 40 if (new_off == 0 || new_off == 1 || new_off == 11 || new_off == 12 || new_off == 13) goto ok;
40 mutex_lock(&i->i_mutex);
41 pos = ((loff_t) hpfs_de_as_down_as_possible(s, hpfs_inode->i_dno) << 4) + 1; 41 pos = ((loff_t) hpfs_de_as_down_as_possible(s, hpfs_inode->i_dno) << 4) + 1;
42 while (pos != new_off) { 42 while (pos != new_off) {
43 if (map_pos_dirent(i, &pos, &qbh)) hpfs_brelse4(&qbh); 43 if (map_pos_dirent(i, &pos, &qbh)) hpfs_brelse4(&qbh);
44 else goto fail; 44 else goto fail;
45 if (pos == 12) goto fail; 45 if (pos == 12) goto fail;
46 } 46 }
47 mutex_unlock(&i->i_mutex); 47 hpfs_add_pos(i, &filp->f_pos);
48ok: 48ok:
49 filp->f_pos = new_off;
49 hpfs_unlock(s); 50 hpfs_unlock(s);
50 return filp->f_pos = new_off;
51fail:
52 mutex_unlock(&i->i_mutex); 51 mutex_unlock(&i->i_mutex);
52 return new_off;
53fail:
53 /*printk("illegal lseek: %016llx\n", new_off);*/ 54 /*printk("illegal lseek: %016llx\n", new_off);*/
54 hpfs_unlock(s); 55 hpfs_unlock(s);
56 mutex_unlock(&i->i_mutex);
55 return -ESPIPE; 57 return -ESPIPE;
56} 58}
57 59
diff --git a/fs/hpfs/file.c b/fs/hpfs/file.c
index 3027f4dbbab5..e4ba5fe4c3b5 100644
--- a/fs/hpfs/file.c
+++ b/fs/hpfs/file.c
@@ -109,10 +109,14 @@ static void hpfs_write_failed(struct address_space *mapping, loff_t to)
109{ 109{
110 struct inode *inode = mapping->host; 110 struct inode *inode = mapping->host;
111 111
112 hpfs_lock(inode->i_sb);
113
112 if (to > inode->i_size) { 114 if (to > inode->i_size) {
113 truncate_pagecache(inode, to, inode->i_size); 115 truncate_pagecache(inode, to, inode->i_size);
114 hpfs_truncate(inode); 116 hpfs_truncate(inode);
115 } 117 }
118
119 hpfs_unlock(inode->i_sb);
116} 120}
117 121
118static int hpfs_write_begin(struct file *file, struct address_space *mapping, 122static int hpfs_write_begin(struct file *file, struct address_space *mapping,
diff --git a/fs/internal.h b/fs/internal.h
index eaa75f75b625..68121584ae37 100644
--- a/fs/internal.h
+++ b/fs/internal.h
@@ -132,6 +132,12 @@ extern struct dentry *__d_alloc(struct super_block *, const struct qstr *);
132extern ssize_t __kernel_write(struct file *, const char *, size_t, loff_t *); 132extern ssize_t __kernel_write(struct file *, const char *, size_t, loff_t *);
133 133
134/* 134/*
135 * splice.c
136 */
137extern long do_splice_direct(struct file *in, loff_t *ppos, struct file *out,
138 loff_t *opos, size_t len, unsigned int flags);
139
140/*
135 * pipe.c 141 * pipe.c
136 */ 142 */
137extern const struct file_operations pipefifo_fops; 143extern const struct file_operations pipefifo_fops;
diff --git a/fs/jfs/jfs_logmgr.c b/fs/jfs/jfs_logmgr.c
index c57499dca89c..360d27c48887 100644
--- a/fs/jfs/jfs_logmgr.c
+++ b/fs/jfs/jfs_logmgr.c
@@ -2009,7 +2009,13 @@ static int lbmRead(struct jfs_log * log, int pn, struct lbuf ** bpp)
2009 2009
2010 bio->bi_end_io = lbmIODone; 2010 bio->bi_end_io = lbmIODone;
2011 bio->bi_private = bp; 2011 bio->bi_private = bp;
2012 submit_bio(READ_SYNC, bio); 2012 /*check if journaling to disk has been disabled*/
2013 if (log->no_integrity) {
2014 bio->bi_size = 0;
2015 lbmIODone(bio, 0);
2016 } else {
2017 submit_bio(READ_SYNC, bio);
2018 }
2013 2019
2014 wait_event(bp->l_ioevent, (bp->l_flag != lbmREAD)); 2020 wait_event(bp->l_ioevent, (bp->l_flag != lbmREAD));
2015 2021
diff --git a/fs/jfs/super.c b/fs/jfs/super.c
index 2003e830ed1c..788e0a9c1fb0 100644
--- a/fs/jfs/super.c
+++ b/fs/jfs/super.c
@@ -611,11 +611,28 @@ static int jfs_freeze(struct super_block *sb)
611{ 611{
612 struct jfs_sb_info *sbi = JFS_SBI(sb); 612 struct jfs_sb_info *sbi = JFS_SBI(sb);
613 struct jfs_log *log = sbi->log; 613 struct jfs_log *log = sbi->log;
614 int rc = 0;
614 615
615 if (!(sb->s_flags & MS_RDONLY)) { 616 if (!(sb->s_flags & MS_RDONLY)) {
616 txQuiesce(sb); 617 txQuiesce(sb);
617 lmLogShutdown(log); 618 rc = lmLogShutdown(log);
618 updateSuper(sb, FM_CLEAN); 619 if (rc) {
620 jfs_error(sb, "jfs_freeze: lmLogShutdown failed");
621
622 /* let operations fail rather than hang */
623 txResume(sb);
624
625 return rc;
626 }
627 rc = updateSuper(sb, FM_CLEAN);
628 if (rc) {
629 jfs_err("jfs_freeze: updateSuper failed\n");
630 /*
631 * Don't fail here. Everything succeeded except
632 * marking the superblock clean, so there's really
633 * no harm in leaving it frozen for now.
634 */
635 }
619 } 636 }
620 return 0; 637 return 0;
621} 638}
@@ -627,13 +644,18 @@ static int jfs_unfreeze(struct super_block *sb)
627 int rc = 0; 644 int rc = 0;
628 645
629 if (!(sb->s_flags & MS_RDONLY)) { 646 if (!(sb->s_flags & MS_RDONLY)) {
630 updateSuper(sb, FM_MOUNT); 647 rc = updateSuper(sb, FM_MOUNT);
631 if ((rc = lmLogInit(log))) 648 if (rc) {
632 jfs_err("jfs_unlock failed with return code %d", rc); 649 jfs_error(sb, "jfs_unfreeze: updateSuper failed");
633 else 650 goto out;
634 txResume(sb); 651 }
652 rc = lmLogInit(log);
653 if (rc)
654 jfs_error(sb, "jfs_unfreeze: lmLogInit failed");
655out:
656 txResume(sb);
635 } 657 }
636 return 0; 658 return rc;
637} 659}
638 660
639static struct dentry *jfs_do_mount(struct file_system_type *fs_type, 661static struct dentry *jfs_do_mount(struct file_system_type *fs_type,
diff --git a/fs/namei.c b/fs/namei.c
index 85e40d1c0a8f..9ed9361223c0 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -1976,7 +1976,7 @@ static int path_lookupat(int dfd, const char *name,
1976 err = complete_walk(nd); 1976 err = complete_walk(nd);
1977 1977
1978 if (!err && nd->flags & LOOKUP_DIRECTORY) { 1978 if (!err && nd->flags & LOOKUP_DIRECTORY) {
1979 if (!nd->inode->i_op->lookup) { 1979 if (!can_lookup(nd->inode)) {
1980 path_put(&nd->path); 1980 path_put(&nd->path);
1981 err = -ENOTDIR; 1981 err = -ENOTDIR;
1982 } 1982 }
@@ -2850,7 +2850,7 @@ finish_lookup:
2850 if ((open_flag & O_CREAT) && S_ISDIR(nd->inode->i_mode)) 2850 if ((open_flag & O_CREAT) && S_ISDIR(nd->inode->i_mode))
2851 goto out; 2851 goto out;
2852 error = -ENOTDIR; 2852 error = -ENOTDIR;
2853 if ((nd->flags & LOOKUP_DIRECTORY) && !nd->inode->i_op->lookup) 2853 if ((nd->flags & LOOKUP_DIRECTORY) && !can_lookup(nd->inode))
2854 goto out; 2854 goto out;
2855 audit_inode(name, nd->path.dentry, 0); 2855 audit_inode(name, nd->path.dentry, 0);
2856finish_open: 2856finish_open:
diff --git a/fs/ncpfs/dir.c b/fs/ncpfs/dir.c
index 816326093656..6792ce11f2bf 100644
--- a/fs/ncpfs/dir.c
+++ b/fs/ncpfs/dir.c
@@ -1029,15 +1029,6 @@ static int ncp_rmdir(struct inode *dir, struct dentry *dentry)
1029 DPRINTK("ncp_rmdir: removing %s/%s\n", 1029 DPRINTK("ncp_rmdir: removing %s/%s\n",
1030 dentry->d_parent->d_name.name, dentry->d_name.name); 1030 dentry->d_parent->d_name.name, dentry->d_name.name);
1031 1031
1032 /*
1033 * fail with EBUSY if there are still references to this
1034 * directory.
1035 */
1036 dentry_unhash(dentry);
1037 error = -EBUSY;
1038 if (!d_unhashed(dentry))
1039 goto out;
1040
1041 len = sizeof(__name); 1032 len = sizeof(__name);
1042 error = ncp_io2vol(server, __name, &len, dentry->d_name.name, 1033 error = ncp_io2vol(server, __name, &len, dentry->d_name.name,
1043 dentry->d_name.len, !ncp_preserve_case(dir)); 1034 dentry->d_name.len, !ncp_preserve_case(dir));
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 4e2fe714d5c2..d7ba5616989c 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -1078,7 +1078,7 @@ static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata)
1078 struct nfs4_state *state = opendata->state; 1078 struct nfs4_state *state = opendata->state;
1079 struct nfs_inode *nfsi = NFS_I(state->inode); 1079 struct nfs_inode *nfsi = NFS_I(state->inode);
1080 struct nfs_delegation *delegation; 1080 struct nfs_delegation *delegation;
1081 int open_mode = opendata->o_arg.open_flags & (O_EXCL|O_TRUNC); 1081 int open_mode = opendata->o_arg.open_flags;
1082 fmode_t fmode = opendata->o_arg.fmode; 1082 fmode_t fmode = opendata->o_arg.fmode;
1083 nfs4_stateid stateid; 1083 nfs4_stateid stateid;
1084 int ret = -EAGAIN; 1084 int ret = -EAGAIN;
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index a366107a7331..2d7525fbcf25 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -1942,6 +1942,7 @@ static int nfs23_validate_mount_data(void *options,
1942 args->namlen = data->namlen; 1942 args->namlen = data->namlen;
1943 args->bsize = data->bsize; 1943 args->bsize = data->bsize;
1944 1944
1945 args->auth_flavors[0] = RPC_AUTH_UNIX;
1945 if (data->flags & NFS_MOUNT_SECFLAVOUR) 1946 if (data->flags & NFS_MOUNT_SECFLAVOUR)
1946 args->auth_flavors[0] = data->pseudoflavor; 1947 args->auth_flavors[0] = data->pseudoflavor;
1947 if (!args->nfs_server.hostname) 1948 if (!args->nfs_server.hostname)
@@ -2637,6 +2638,7 @@ static int nfs4_validate_mount_data(void *options,
2637 goto out_no_address; 2638 goto out_no_address;
2638 args->nfs_server.port = ntohs(((struct sockaddr_in *)sap)->sin_port); 2639 args->nfs_server.port = ntohs(((struct sockaddr_in *)sap)->sin_port);
2639 2640
2641 args->auth_flavors[0] = RPC_AUTH_UNIX;
2640 if (data->auth_flavourlen) { 2642 if (data->auth_flavourlen) {
2641 if (data->auth_flavourlen > 1) 2643 if (data->auth_flavourlen > 1)
2642 goto out_inval_auth; 2644 goto out_inval_auth;
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
index b3fdd1a323d6..e68588e6b1e8 100644
--- a/fs/ocfs2/dlm/dlmrecovery.c
+++ b/fs/ocfs2/dlm/dlmrecovery.c
@@ -1408,6 +1408,7 @@ int dlm_mig_lockres_handler(struct o2net_msg *msg, u32 len, void *data,
1408 mres->lockname_len, mres->lockname); 1408 mres->lockname_len, mres->lockname);
1409 ret = -EFAULT; 1409 ret = -EFAULT;
1410 spin_unlock(&res->spinlock); 1410 spin_unlock(&res->spinlock);
1411 dlm_lockres_put(res);
1411 goto leave; 1412 goto leave;
1412 } 1413 }
1413 res->state |= DLM_LOCK_RES_MIGRATING; 1414 res->state |= DLM_LOCK_RES_MIGRATING;
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
index 04ee1b57c243..b4a5cdf9dbc5 100644
--- a/fs/ocfs2/namei.c
+++ b/fs/ocfs2/namei.c
@@ -947,7 +947,7 @@ leave:
947 ocfs2_free_dir_lookup_result(&orphan_insert); 947 ocfs2_free_dir_lookup_result(&orphan_insert);
948 ocfs2_free_dir_lookup_result(&lookup); 948 ocfs2_free_dir_lookup_result(&lookup);
949 949
950 if (status) 950 if (status && (status != -ENOTEMPTY))
951 mlog_errno(status); 951 mlog_errno(status);
952 952
953 return status; 953 return status;
@@ -2216,7 +2216,7 @@ out:
2216 2216
2217 brelse(orphan_dir_bh); 2217 brelse(orphan_dir_bh);
2218 2218
2219 return 0; 2219 return ret;
2220} 2220}
2221 2221
2222int ocfs2_create_inode_in_orphan(struct inode *dir, 2222int ocfs2_create_inode_in_orphan(struct inode *dir,
diff --git a/fs/pnode.c b/fs/pnode.c
index 3d2a7141b87a..9af0df15256e 100644
--- a/fs/pnode.c
+++ b/fs/pnode.c
@@ -83,7 +83,8 @@ static int do_make_slave(struct mount *mnt)
83 if (peer_mnt == mnt) 83 if (peer_mnt == mnt)
84 peer_mnt = NULL; 84 peer_mnt = NULL;
85 } 85 }
86 if (IS_MNT_SHARED(mnt) && list_empty(&mnt->mnt_share)) 86 if (mnt->mnt_group_id && IS_MNT_SHARED(mnt) &&
87 list_empty(&mnt->mnt_share))
87 mnt_release_group_id(mnt); 88 mnt_release_group_id(mnt);
88 89
89 list_del_init(&mnt->mnt_share); 90 list_del_init(&mnt->mnt_share);
diff --git a/fs/proc/base.c b/fs/proc/base.c
index dd51e50001fe..c3834dad09b3 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -2118,6 +2118,7 @@ static int show_timer(struct seq_file *m, void *v)
2118 nstr[notify & ~SIGEV_THREAD_ID], 2118 nstr[notify & ~SIGEV_THREAD_ID],
2119 (notify & SIGEV_THREAD_ID) ? "tid" : "pid", 2119 (notify & SIGEV_THREAD_ID) ? "tid" : "pid",
2120 pid_nr_ns(timer->it_pid, tp->ns)); 2120 pid_nr_ns(timer->it_pid, tp->ns));
2121 seq_printf(m, "ClockID: %d\n", timer->it_clock);
2121 2122
2122 return 0; 2123 return 0;
2123} 2124}
diff --git a/fs/proc/kmsg.c b/fs/proc/kmsg.c
index bd4b5a740ff1..bdfabdaefdce 100644
--- a/fs/proc/kmsg.c
+++ b/fs/proc/kmsg.c
@@ -21,12 +21,12 @@ extern wait_queue_head_t log_wait;
21 21
22static int kmsg_open(struct inode * inode, struct file * file) 22static int kmsg_open(struct inode * inode, struct file * file)
23{ 23{
24 return do_syslog(SYSLOG_ACTION_OPEN, NULL, 0, SYSLOG_FROM_FILE); 24 return do_syslog(SYSLOG_ACTION_OPEN, NULL, 0, SYSLOG_FROM_PROC);
25} 25}
26 26
27static int kmsg_release(struct inode * inode, struct file * file) 27static int kmsg_release(struct inode * inode, struct file * file)
28{ 28{
29 (void) do_syslog(SYSLOG_ACTION_CLOSE, NULL, 0, SYSLOG_FROM_FILE); 29 (void) do_syslog(SYSLOG_ACTION_CLOSE, NULL, 0, SYSLOG_FROM_PROC);
30 return 0; 30 return 0;
31} 31}
32 32
@@ -34,15 +34,15 @@ static ssize_t kmsg_read(struct file *file, char __user *buf,
34 size_t count, loff_t *ppos) 34 size_t count, loff_t *ppos)
35{ 35{
36 if ((file->f_flags & O_NONBLOCK) && 36 if ((file->f_flags & O_NONBLOCK) &&
37 !do_syslog(SYSLOG_ACTION_SIZE_UNREAD, NULL, 0, SYSLOG_FROM_FILE)) 37 !do_syslog(SYSLOG_ACTION_SIZE_UNREAD, NULL, 0, SYSLOG_FROM_PROC))
38 return -EAGAIN; 38 return -EAGAIN;
39 return do_syslog(SYSLOG_ACTION_READ, buf, count, SYSLOG_FROM_FILE); 39 return do_syslog(SYSLOG_ACTION_READ, buf, count, SYSLOG_FROM_PROC);
40} 40}
41 41
42static unsigned int kmsg_poll(struct file *file, poll_table *wait) 42static unsigned int kmsg_poll(struct file *file, poll_table *wait)
43{ 43{
44 poll_wait(file, &log_wait, wait); 44 poll_wait(file, &log_wait, wait);
45 if (do_syslog(SYSLOG_ACTION_SIZE_UNREAD, NULL, 0, SYSLOG_FROM_FILE)) 45 if (do_syslog(SYSLOG_ACTION_SIZE_UNREAD, NULL, 0, SYSLOG_FROM_PROC))
46 return POLLIN | POLLRDNORM; 46 return POLLIN | POLLRDNORM;
47 return 0; 47 return 0;
48} 48}
diff --git a/fs/qnx6/dir.c b/fs/qnx6/dir.c
index 8798d065e400..afa6be6fc397 100644
--- a/fs/qnx6/dir.c
+++ b/fs/qnx6/dir.c
@@ -120,7 +120,7 @@ static int qnx6_readdir(struct file *filp, void *dirent, filldir_t filldir)
120 struct inode *inode = file_inode(filp); 120 struct inode *inode = file_inode(filp);
121 struct super_block *s = inode->i_sb; 121 struct super_block *s = inode->i_sb;
122 struct qnx6_sb_info *sbi = QNX6_SB(s); 122 struct qnx6_sb_info *sbi = QNX6_SB(s);
123 loff_t pos = filp->f_pos & (QNX6_DIR_ENTRY_SIZE - 1); 123 loff_t pos = filp->f_pos & ~(QNX6_DIR_ENTRY_SIZE - 1);
124 unsigned long npages = dir_pages(inode); 124 unsigned long npages = dir_pages(inode);
125 unsigned long n = pos >> PAGE_CACHE_SHIFT; 125 unsigned long n = pos >> PAGE_CACHE_SHIFT;
126 unsigned start = (pos & ~PAGE_CACHE_MASK) / QNX6_DIR_ENTRY_SIZE; 126 unsigned start = (pos & ~PAGE_CACHE_MASK) / QNX6_DIR_ENTRY_SIZE;
diff --git a/fs/read_write.c b/fs/read_write.c
index 03430008704e..2cefa417be34 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -1064,6 +1064,7 @@ static ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos,
1064 struct fd in, out; 1064 struct fd in, out;
1065 struct inode *in_inode, *out_inode; 1065 struct inode *in_inode, *out_inode;
1066 loff_t pos; 1066 loff_t pos;
1067 loff_t out_pos;
1067 ssize_t retval; 1068 ssize_t retval;
1068 int fl; 1069 int fl;
1069 1070
@@ -1077,12 +1078,14 @@ static ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos,
1077 if (!(in.file->f_mode & FMODE_READ)) 1078 if (!(in.file->f_mode & FMODE_READ))
1078 goto fput_in; 1079 goto fput_in;
1079 retval = -ESPIPE; 1080 retval = -ESPIPE;
1080 if (!ppos) 1081 if (!ppos) {
1081 ppos = &in.file->f_pos; 1082 pos = in.file->f_pos;
1082 else 1083 } else {
1084 pos = *ppos;
1083 if (!(in.file->f_mode & FMODE_PREAD)) 1085 if (!(in.file->f_mode & FMODE_PREAD))
1084 goto fput_in; 1086 goto fput_in;
1085 retval = rw_verify_area(READ, in.file, ppos, count); 1087 }
1088 retval = rw_verify_area(READ, in.file, &pos, count);
1086 if (retval < 0) 1089 if (retval < 0)
1087 goto fput_in; 1090 goto fput_in;
1088 count = retval; 1091 count = retval;
@@ -1099,7 +1102,8 @@ static ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos,
1099 retval = -EINVAL; 1102 retval = -EINVAL;
1100 in_inode = file_inode(in.file); 1103 in_inode = file_inode(in.file);
1101 out_inode = file_inode(out.file); 1104 out_inode = file_inode(out.file);
1102 retval = rw_verify_area(WRITE, out.file, &out.file->f_pos, count); 1105 out_pos = out.file->f_pos;
1106 retval = rw_verify_area(WRITE, out.file, &out_pos, count);
1103 if (retval < 0) 1107 if (retval < 0)
1104 goto fput_out; 1108 goto fput_out;
1105 count = retval; 1109 count = retval;
@@ -1107,7 +1111,6 @@ static ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos,
1107 if (!max) 1111 if (!max)
1108 max = min(in_inode->i_sb->s_maxbytes, out_inode->i_sb->s_maxbytes); 1112 max = min(in_inode->i_sb->s_maxbytes, out_inode->i_sb->s_maxbytes);
1109 1113
1110 pos = *ppos;
1111 if (unlikely(pos + count > max)) { 1114 if (unlikely(pos + count > max)) {
1112 retval = -EOVERFLOW; 1115 retval = -EOVERFLOW;
1113 if (pos >= max) 1116 if (pos >= max)
@@ -1126,18 +1129,23 @@ static ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos,
1126 if (in.file->f_flags & O_NONBLOCK) 1129 if (in.file->f_flags & O_NONBLOCK)
1127 fl = SPLICE_F_NONBLOCK; 1130 fl = SPLICE_F_NONBLOCK;
1128#endif 1131#endif
1129 retval = do_splice_direct(in.file, ppos, out.file, count, fl); 1132 retval = do_splice_direct(in.file, &pos, out.file, &out_pos, count, fl);
1130 1133
1131 if (retval > 0) { 1134 if (retval > 0) {
1132 add_rchar(current, retval); 1135 add_rchar(current, retval);
1133 add_wchar(current, retval); 1136 add_wchar(current, retval);
1134 fsnotify_access(in.file); 1137 fsnotify_access(in.file);
1135 fsnotify_modify(out.file); 1138 fsnotify_modify(out.file);
1139 out.file->f_pos = out_pos;
1140 if (ppos)
1141 *ppos = pos;
1142 else
1143 in.file->f_pos = pos;
1136 } 1144 }
1137 1145
1138 inc_syscr(current); 1146 inc_syscr(current);
1139 inc_syscw(current); 1147 inc_syscw(current);
1140 if (*ppos > max) 1148 if (pos > max)
1141 retval = -EOVERFLOW; 1149 retval = -EOVERFLOW;
1142 1150
1143fput_out: 1151fput_out:
diff --git a/fs/reiserfs/dir.c b/fs/reiserfs/dir.c
index 66c53b642a88..6c2d136561cb 100644
--- a/fs/reiserfs/dir.c
+++ b/fs/reiserfs/dir.c
@@ -204,6 +204,8 @@ int reiserfs_readdir_dentry(struct dentry *dentry, void *dirent,
204 next_pos = deh_offset(deh) + 1; 204 next_pos = deh_offset(deh) + 1;
205 205
206 if (item_moved(&tmp_ih, &path_to_entry)) { 206 if (item_moved(&tmp_ih, &path_to_entry)) {
207 set_cpu_key_k_offset(&pos_key,
208 next_pos);
207 goto research; 209 goto research;
208 } 210 }
209 } /* for */ 211 } /* for */
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index 77d6d47abc83..f844533792ee 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -1811,11 +1811,16 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
1811 TYPE_STAT_DATA, SD_SIZE, MAX_US_INT); 1811 TYPE_STAT_DATA, SD_SIZE, MAX_US_INT);
1812 memcpy(INODE_PKEY(inode), &(ih.ih_key), KEY_SIZE); 1812 memcpy(INODE_PKEY(inode), &(ih.ih_key), KEY_SIZE);
1813 args.dirid = le32_to_cpu(ih.ih_key.k_dir_id); 1813 args.dirid = le32_to_cpu(ih.ih_key.k_dir_id);
1814 if (insert_inode_locked4(inode, args.objectid, 1814
1815 reiserfs_find_actor, &args) < 0) { 1815 reiserfs_write_unlock(inode->i_sb);
1816 err = insert_inode_locked4(inode, args.objectid,
1817 reiserfs_find_actor, &args);
1818 reiserfs_write_lock(inode->i_sb);
1819 if (err) {
1816 err = -EINVAL; 1820 err = -EINVAL;
1817 goto out_bad_inode; 1821 goto out_bad_inode;
1818 } 1822 }
1823
1819 if (old_format_only(sb)) 1824 if (old_format_only(sb))
1820 /* not a perfect generation count, as object ids can be reused, but 1825 /* not a perfect generation count, as object ids can be reused, but
1821 ** this is as good as reiserfs can do right now. 1826 ** this is as good as reiserfs can do right now.
diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c
index 4cce1d9552fb..821bcf70e467 100644
--- a/fs/reiserfs/xattr.c
+++ b/fs/reiserfs/xattr.c
@@ -318,7 +318,19 @@ static int delete_one_xattr(struct dentry *dentry, void *data)
318static int chown_one_xattr(struct dentry *dentry, void *data) 318static int chown_one_xattr(struct dentry *dentry, void *data)
319{ 319{
320 struct iattr *attrs = data; 320 struct iattr *attrs = data;
321 return reiserfs_setattr(dentry, attrs); 321 int ia_valid = attrs->ia_valid;
322 int err;
323
324 /*
325 * We only want the ownership bits. Otherwise, we'll do
326 * things like change a directory to a regular file if
327 * ATTR_MODE is set.
328 */
329 attrs->ia_valid &= (ATTR_UID|ATTR_GID);
330 err = reiserfs_setattr(dentry, attrs);
331 attrs->ia_valid = ia_valid;
332
333 return err;
322} 334}
323 335
324/* No i_mutex, but the inode is unconnected. */ 336/* No i_mutex, but the inode is unconnected. */
diff --git a/fs/reiserfs/xattr_acl.c b/fs/reiserfs/xattr_acl.c
index d7c01ef64eda..6c8767fdfc6a 100644
--- a/fs/reiserfs/xattr_acl.c
+++ b/fs/reiserfs/xattr_acl.c
@@ -443,6 +443,9 @@ int reiserfs_acl_chmod(struct inode *inode)
443 int depth; 443 int depth;
444 int error; 444 int error;
445 445
446 if (IS_PRIVATE(inode))
447 return 0;
448
446 if (S_ISLNK(inode->i_mode)) 449 if (S_ISLNK(inode->i_mode))
447 return -EOPNOTSUPP; 450 return -EOPNOTSUPP;
448 451
diff --git a/fs/splice.c b/fs/splice.c
index e6b25598c8c4..d37431dd60a1 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -1274,7 +1274,7 @@ static int direct_splice_actor(struct pipe_inode_info *pipe,
1274{ 1274{
1275 struct file *file = sd->u.file; 1275 struct file *file = sd->u.file;
1276 1276
1277 return do_splice_from(pipe, file, &file->f_pos, sd->total_len, 1277 return do_splice_from(pipe, file, sd->opos, sd->total_len,
1278 sd->flags); 1278 sd->flags);
1279} 1279}
1280 1280
@@ -1283,6 +1283,7 @@ static int direct_splice_actor(struct pipe_inode_info *pipe,
1283 * @in: file to splice from 1283 * @in: file to splice from
1284 * @ppos: input file offset 1284 * @ppos: input file offset
1285 * @out: file to splice to 1285 * @out: file to splice to
1286 * @opos: output file offset
1286 * @len: number of bytes to splice 1287 * @len: number of bytes to splice
1287 * @flags: splice modifier flags 1288 * @flags: splice modifier flags
1288 * 1289 *
@@ -1294,7 +1295,7 @@ static int direct_splice_actor(struct pipe_inode_info *pipe,
1294 * 1295 *
1295 */ 1296 */
1296long do_splice_direct(struct file *in, loff_t *ppos, struct file *out, 1297long do_splice_direct(struct file *in, loff_t *ppos, struct file *out,
1297 size_t len, unsigned int flags) 1298 loff_t *opos, size_t len, unsigned int flags)
1298{ 1299{
1299 struct splice_desc sd = { 1300 struct splice_desc sd = {
1300 .len = len, 1301 .len = len,
@@ -1302,6 +1303,7 @@ long do_splice_direct(struct file *in, loff_t *ppos, struct file *out,
1302 .flags = flags, 1303 .flags = flags,
1303 .pos = *ppos, 1304 .pos = *ppos,
1304 .u.file = out, 1305 .u.file = out,
1306 .opos = opos,
1305 }; 1307 };
1306 long ret; 1308 long ret;
1307 1309
@@ -1325,7 +1327,7 @@ static long do_splice(struct file *in, loff_t __user *off_in,
1325{ 1327{
1326 struct pipe_inode_info *ipipe; 1328 struct pipe_inode_info *ipipe;
1327 struct pipe_inode_info *opipe; 1329 struct pipe_inode_info *opipe;
1328 loff_t offset, *off; 1330 loff_t offset;
1329 long ret; 1331 long ret;
1330 1332
1331 ipipe = get_pipe_info(in); 1333 ipipe = get_pipe_info(in);
@@ -1356,13 +1358,15 @@ static long do_splice(struct file *in, loff_t __user *off_in,
1356 return -EINVAL; 1358 return -EINVAL;
1357 if (copy_from_user(&offset, off_out, sizeof(loff_t))) 1359 if (copy_from_user(&offset, off_out, sizeof(loff_t)))
1358 return -EFAULT; 1360 return -EFAULT;
1359 off = &offset; 1361 } else {
1360 } else 1362 offset = out->f_pos;
1361 off = &out->f_pos; 1363 }
1362 1364
1363 ret = do_splice_from(ipipe, out, off, len, flags); 1365 ret = do_splice_from(ipipe, out, &offset, len, flags);
1364 1366
1365 if (off_out && copy_to_user(off_out, off, sizeof(loff_t))) 1367 if (!off_out)
1368 out->f_pos = offset;
1369 else if (copy_to_user(off_out, &offset, sizeof(loff_t)))
1366 ret = -EFAULT; 1370 ret = -EFAULT;
1367 1371
1368 return ret; 1372 return ret;
@@ -1376,13 +1380,15 @@ static long do_splice(struct file *in, loff_t __user *off_in,
1376 return -EINVAL; 1380 return -EINVAL;
1377 if (copy_from_user(&offset, off_in, sizeof(loff_t))) 1381 if (copy_from_user(&offset, off_in, sizeof(loff_t)))
1378 return -EFAULT; 1382 return -EFAULT;
1379 off = &offset; 1383 } else {
1380 } else 1384 offset = in->f_pos;
1381 off = &in->f_pos; 1385 }
1382 1386
1383 ret = do_splice_to(in, off, opipe, len, flags); 1387 ret = do_splice_to(in, &offset, opipe, len, flags);
1384 1388
1385 if (off_in && copy_to_user(off_in, off, sizeof(loff_t))) 1389 if (!off_in)
1390 in->f_pos = offset;
1391 else if (copy_to_user(off_in, &offset, sizeof(loff_t)))
1386 ret = -EFAULT; 1392 ret = -EFAULT;
1387 1393
1388 return ret; 1394 return ret;
diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c
index de08c92f2e23..605af512aec2 100644
--- a/fs/ubifs/dir.c
+++ b/fs/ubifs/dir.c
@@ -349,31 +349,50 @@ static unsigned int vfs_dent_type(uint8_t type)
349static int ubifs_readdir(struct file *file, void *dirent, filldir_t filldir) 349static int ubifs_readdir(struct file *file, void *dirent, filldir_t filldir)
350{ 350{
351 int err, over = 0; 351 int err, over = 0;
352 loff_t pos = file->f_pos;
352 struct qstr nm; 353 struct qstr nm;
353 union ubifs_key key; 354 union ubifs_key key;
354 struct ubifs_dent_node *dent; 355 struct ubifs_dent_node *dent;
355 struct inode *dir = file_inode(file); 356 struct inode *dir = file_inode(file);
356 struct ubifs_info *c = dir->i_sb->s_fs_info; 357 struct ubifs_info *c = dir->i_sb->s_fs_info;
357 358
358 dbg_gen("dir ino %lu, f_pos %#llx", dir->i_ino, file->f_pos); 359 dbg_gen("dir ino %lu, f_pos %#llx", dir->i_ino, pos);
359 360
360 if (file->f_pos > UBIFS_S_KEY_HASH_MASK || file->f_pos == 2) 361 if (pos > UBIFS_S_KEY_HASH_MASK || pos == 2)
361 /* 362 /*
362 * The directory was seek'ed to a senseless position or there 363 * The directory was seek'ed to a senseless position or there
363 * are no more entries. 364 * are no more entries.
364 */ 365 */
365 return 0; 366 return 0;
366 367
368 if (file->f_version == 0) {
369 /*
370 * The file was seek'ed, which means that @file->private_data
371 * is now invalid. This may also be just the first
372 * 'ubifs_readdir()' invocation, in which case
373 * @file->private_data is NULL, and the below code is
374 * basically a no-op.
375 */
376 kfree(file->private_data);
377 file->private_data = NULL;
378 }
379
380 /*
381 * 'generic_file_llseek()' unconditionally sets @file->f_version to
382 * zero, and we use this for detecting whether the file was seek'ed.
383 */
384 file->f_version = 1;
385
367 /* File positions 0 and 1 correspond to "." and ".." */ 386 /* File positions 0 and 1 correspond to "." and ".." */
368 if (file->f_pos == 0) { 387 if (pos == 0) {
369 ubifs_assert(!file->private_data); 388 ubifs_assert(!file->private_data);
370 over = filldir(dirent, ".", 1, 0, dir->i_ino, DT_DIR); 389 over = filldir(dirent, ".", 1, 0, dir->i_ino, DT_DIR);
371 if (over) 390 if (over)
372 return 0; 391 return 0;
373 file->f_pos = 1; 392 file->f_pos = pos = 1;
374 } 393 }
375 394
376 if (file->f_pos == 1) { 395 if (pos == 1) {
377 ubifs_assert(!file->private_data); 396 ubifs_assert(!file->private_data);
378 over = filldir(dirent, "..", 2, 1, 397 over = filldir(dirent, "..", 2, 1,
379 parent_ino(file->f_path.dentry), DT_DIR); 398 parent_ino(file->f_path.dentry), DT_DIR);
@@ -389,7 +408,7 @@ static int ubifs_readdir(struct file *file, void *dirent, filldir_t filldir)
389 goto out; 408 goto out;
390 } 409 }
391 410
392 file->f_pos = key_hash_flash(c, &dent->key); 411 file->f_pos = pos = key_hash_flash(c, &dent->key);
393 file->private_data = dent; 412 file->private_data = dent;
394 } 413 }
395 414
@@ -397,17 +416,16 @@ static int ubifs_readdir(struct file *file, void *dirent, filldir_t filldir)
397 if (!dent) { 416 if (!dent) {
398 /* 417 /*
399 * The directory was seek'ed to and is now readdir'ed. 418 * The directory was seek'ed to and is now readdir'ed.
400 * Find the entry corresponding to @file->f_pos or the 419 * Find the entry corresponding to @pos or the closest one.
401 * closest one.
402 */ 420 */
403 dent_key_init_hash(c, &key, dir->i_ino, file->f_pos); 421 dent_key_init_hash(c, &key, dir->i_ino, pos);
404 nm.name = NULL; 422 nm.name = NULL;
405 dent = ubifs_tnc_next_ent(c, &key, &nm); 423 dent = ubifs_tnc_next_ent(c, &key, &nm);
406 if (IS_ERR(dent)) { 424 if (IS_ERR(dent)) {
407 err = PTR_ERR(dent); 425 err = PTR_ERR(dent);
408 goto out; 426 goto out;
409 } 427 }
410 file->f_pos = key_hash_flash(c, &dent->key); 428 file->f_pos = pos = key_hash_flash(c, &dent->key);
411 file->private_data = dent; 429 file->private_data = dent;
412 } 430 }
413 431
@@ -419,7 +437,7 @@ static int ubifs_readdir(struct file *file, void *dirent, filldir_t filldir)
419 ubifs_inode(dir)->creat_sqnum); 437 ubifs_inode(dir)->creat_sqnum);
420 438
421 nm.len = le16_to_cpu(dent->nlen); 439 nm.len = le16_to_cpu(dent->nlen);
422 over = filldir(dirent, dent->name, nm.len, file->f_pos, 440 over = filldir(dirent, dent->name, nm.len, pos,
423 le64_to_cpu(dent->inum), 441 le64_to_cpu(dent->inum),
424 vfs_dent_type(dent->type)); 442 vfs_dent_type(dent->type));
425 if (over) 443 if (over)
@@ -435,9 +453,17 @@ static int ubifs_readdir(struct file *file, void *dirent, filldir_t filldir)
435 } 453 }
436 454
437 kfree(file->private_data); 455 kfree(file->private_data);
438 file->f_pos = key_hash_flash(c, &dent->key); 456 file->f_pos = pos = key_hash_flash(c, &dent->key);
439 file->private_data = dent; 457 file->private_data = dent;
440 cond_resched(); 458 cond_resched();
459
460 if (file->f_version == 0)
461 /*
462 * The file was seek'ed meanwhile, lets return and start
463 * reading direntries from the new position on the next
464 * invocation.
465 */
466 return 0;
441 } 467 }
442 468
443out: 469out:
@@ -448,15 +474,13 @@ out:
448 474
449 kfree(file->private_data); 475 kfree(file->private_data);
450 file->private_data = NULL; 476 file->private_data = NULL;
477 /* 2 is a special value indicating that there are no more direntries */
451 file->f_pos = 2; 478 file->f_pos = 2;
452 return 0; 479 return 0;
453} 480}
454 481
455/* If a directory is seeked, we have to free saved readdir() state */
456static loff_t ubifs_dir_llseek(struct file *file, loff_t offset, int whence) 482static loff_t ubifs_dir_llseek(struct file *file, loff_t offset, int whence)
457{ 483{
458 kfree(file->private_data);
459 file->private_data = NULL;
460 return generic_file_llseek(file, offset, whence); 484 return generic_file_llseek(file, offset, whence);
461} 485}
462 486
diff --git a/fs/xfs/xfs_acl.c b/fs/xfs/xfs_acl.c
index 1d32f1d52763..306d883d89bc 100644
--- a/fs/xfs/xfs_acl.c
+++ b/fs/xfs/xfs_acl.c
@@ -21,6 +21,8 @@
21#include "xfs_bmap_btree.h" 21#include "xfs_bmap_btree.h"
22#include "xfs_inode.h" 22#include "xfs_inode.h"
23#include "xfs_vnodeops.h" 23#include "xfs_vnodeops.h"
24#include "xfs_sb.h"
25#include "xfs_mount.h"
24#include "xfs_trace.h" 26#include "xfs_trace.h"
25#include <linux/slab.h> 27#include <linux/slab.h>
26#include <linux/xattr.h> 28#include <linux/xattr.h>
@@ -34,7 +36,9 @@
34 */ 36 */
35 37
36STATIC struct posix_acl * 38STATIC struct posix_acl *
37xfs_acl_from_disk(struct xfs_acl *aclp) 39xfs_acl_from_disk(
40 struct xfs_acl *aclp,
41 int max_entries)
38{ 42{
39 struct posix_acl_entry *acl_e; 43 struct posix_acl_entry *acl_e;
40 struct posix_acl *acl; 44 struct posix_acl *acl;
@@ -42,7 +46,7 @@ xfs_acl_from_disk(struct xfs_acl *aclp)
42 unsigned int count, i; 46 unsigned int count, i;
43 47
44 count = be32_to_cpu(aclp->acl_cnt); 48 count = be32_to_cpu(aclp->acl_cnt);
45 if (count > XFS_ACL_MAX_ENTRIES) 49 if (count > max_entries)
46 return ERR_PTR(-EFSCORRUPTED); 50 return ERR_PTR(-EFSCORRUPTED);
47 51
48 acl = posix_acl_alloc(count, GFP_KERNEL); 52 acl = posix_acl_alloc(count, GFP_KERNEL);
@@ -108,9 +112,9 @@ xfs_get_acl(struct inode *inode, int type)
108 struct xfs_inode *ip = XFS_I(inode); 112 struct xfs_inode *ip = XFS_I(inode);
109 struct posix_acl *acl; 113 struct posix_acl *acl;
110 struct xfs_acl *xfs_acl; 114 struct xfs_acl *xfs_acl;
111 int len = sizeof(struct xfs_acl);
112 unsigned char *ea_name; 115 unsigned char *ea_name;
113 int error; 116 int error;
117 int len;
114 118
115 acl = get_cached_acl(inode, type); 119 acl = get_cached_acl(inode, type);
116 if (acl != ACL_NOT_CACHED) 120 if (acl != ACL_NOT_CACHED)
@@ -133,8 +137,8 @@ xfs_get_acl(struct inode *inode, int type)
133 * If we have a cached ACLs value just return it, not need to 137 * If we have a cached ACLs value just return it, not need to
134 * go out to the disk. 138 * go out to the disk.
135 */ 139 */
136 140 len = XFS_ACL_MAX_SIZE(ip->i_mount);
137 xfs_acl = kzalloc(sizeof(struct xfs_acl), GFP_KERNEL); 141 xfs_acl = kzalloc(len, GFP_KERNEL);
138 if (!xfs_acl) 142 if (!xfs_acl)
139 return ERR_PTR(-ENOMEM); 143 return ERR_PTR(-ENOMEM);
140 144
@@ -153,7 +157,7 @@ xfs_get_acl(struct inode *inode, int type)
153 goto out; 157 goto out;
154 } 158 }
155 159
156 acl = xfs_acl_from_disk(xfs_acl); 160 acl = xfs_acl_from_disk(xfs_acl, XFS_ACL_MAX_ENTRIES(ip->i_mount));
157 if (IS_ERR(acl)) 161 if (IS_ERR(acl))
158 goto out; 162 goto out;
159 163
@@ -189,16 +193,17 @@ xfs_set_acl(struct inode *inode, int type, struct posix_acl *acl)
189 193
190 if (acl) { 194 if (acl) {
191 struct xfs_acl *xfs_acl; 195 struct xfs_acl *xfs_acl;
192 int len; 196 int len = XFS_ACL_MAX_SIZE(ip->i_mount);
193 197
194 xfs_acl = kzalloc(sizeof(struct xfs_acl), GFP_KERNEL); 198 xfs_acl = kzalloc(len, GFP_KERNEL);
195 if (!xfs_acl) 199 if (!xfs_acl)
196 return -ENOMEM; 200 return -ENOMEM;
197 201
198 xfs_acl_to_disk(xfs_acl, acl); 202 xfs_acl_to_disk(xfs_acl, acl);
199 len = sizeof(struct xfs_acl) - 203
200 (sizeof(struct xfs_acl_entry) * 204 /* subtract away the unused acl entries */
201 (XFS_ACL_MAX_ENTRIES - acl->a_count)); 205 len -= sizeof(struct xfs_acl_entry) *
206 (XFS_ACL_MAX_ENTRIES(ip->i_mount) - acl->a_count);
202 207
203 error = -xfs_attr_set(ip, ea_name, (unsigned char *)xfs_acl, 208 error = -xfs_attr_set(ip, ea_name, (unsigned char *)xfs_acl,
204 len, ATTR_ROOT); 209 len, ATTR_ROOT);
@@ -243,7 +248,7 @@ xfs_set_mode(struct inode *inode, umode_t mode)
243static int 248static int
244xfs_acl_exists(struct inode *inode, unsigned char *name) 249xfs_acl_exists(struct inode *inode, unsigned char *name)
245{ 250{
246 int len = sizeof(struct xfs_acl); 251 int len = XFS_ACL_MAX_SIZE(XFS_M(inode->i_sb));
247 252
248 return (xfs_attr_get(XFS_I(inode), name, NULL, &len, 253 return (xfs_attr_get(XFS_I(inode), name, NULL, &len,
249 ATTR_ROOT|ATTR_KERNOVAL) == 0); 254 ATTR_ROOT|ATTR_KERNOVAL) == 0);
@@ -379,7 +384,7 @@ xfs_xattr_acl_set(struct dentry *dentry, const char *name,
379 goto out_release; 384 goto out_release;
380 385
381 error = -EINVAL; 386 error = -EINVAL;
382 if (acl->a_count > XFS_ACL_MAX_ENTRIES) 387 if (acl->a_count > XFS_ACL_MAX_ENTRIES(XFS_M(inode->i_sb)))
383 goto out_release; 388 goto out_release;
384 389
385 if (type == ACL_TYPE_ACCESS) { 390 if (type == ACL_TYPE_ACCESS) {
diff --git a/fs/xfs/xfs_acl.h b/fs/xfs/xfs_acl.h
index 39632d941354..4016a567b83c 100644
--- a/fs/xfs/xfs_acl.h
+++ b/fs/xfs/xfs_acl.h
@@ -22,19 +22,36 @@ struct inode;
22struct posix_acl; 22struct posix_acl;
23struct xfs_inode; 23struct xfs_inode;
24 24
25#define XFS_ACL_MAX_ENTRIES 25
26#define XFS_ACL_NOT_PRESENT (-1) 25#define XFS_ACL_NOT_PRESENT (-1)
27 26
28/* On-disk XFS access control list structure */ 27/* On-disk XFS access control list structure */
28struct xfs_acl_entry {
29 __be32 ae_tag;
30 __be32 ae_id;
31 __be16 ae_perm;
32 __be16 ae_pad; /* fill the implicit hole in the structure */
33};
34
29struct xfs_acl { 35struct xfs_acl {
30 __be32 acl_cnt; 36 __be32 acl_cnt;
31 struct xfs_acl_entry { 37 struct xfs_acl_entry acl_entry[0];
32 __be32 ae_tag;
33 __be32 ae_id;
34 __be16 ae_perm;
35 } acl_entry[XFS_ACL_MAX_ENTRIES];
36}; 38};
37 39
40/*
41 * The number of ACL entries allowed is defined by the on-disk format.
42 * For v4 superblocks, that is limited to 25 entries. For v5 superblocks, it is
43 * limited only by the maximum size of the xattr that stores the information.
44 */
45#define XFS_ACL_MAX_ENTRIES(mp) \
46 (xfs_sb_version_hascrc(&mp->m_sb) \
47 ? (XATTR_SIZE_MAX - sizeof(struct xfs_acl)) / \
48 sizeof(struct xfs_acl_entry) \
49 : 25)
50
51#define XFS_ACL_MAX_SIZE(mp) \
52 (sizeof(struct xfs_acl) + \
53 sizeof(struct xfs_acl_entry) * XFS_ACL_MAX_ENTRIES((mp)))
54
38/* On-disk XFS extended attribute names */ 55/* On-disk XFS extended attribute names */
39#define SGI_ACL_FILE (unsigned char *)"SGI_ACL_FILE" 56#define SGI_ACL_FILE (unsigned char *)"SGI_ACL_FILE"
40#define SGI_ACL_DEFAULT (unsigned char *)"SGI_ACL_DEFAULT" 57#define SGI_ACL_DEFAULT (unsigned char *)"SGI_ACL_DEFAULT"
diff --git a/fs/xfs/xfs_attr_leaf.c b/fs/xfs/xfs_attr_leaf.c
index 0bce1b348580..31d3cd129269 100644
--- a/fs/xfs/xfs_attr_leaf.c
+++ b/fs/xfs/xfs_attr_leaf.c
@@ -1412,7 +1412,7 @@ xfs_attr3_leaf_add_work(
1412 name_rmt->valuelen = 0; 1412 name_rmt->valuelen = 0;
1413 name_rmt->valueblk = 0; 1413 name_rmt->valueblk = 0;
1414 args->rmtblkno = 1; 1414 args->rmtblkno = 1;
1415 args->rmtblkcnt = XFS_B_TO_FSB(mp, args->valuelen); 1415 args->rmtblkcnt = xfs_attr3_rmt_blocks(mp, args->valuelen);
1416 } 1416 }
1417 xfs_trans_log_buf(args->trans, bp, 1417 xfs_trans_log_buf(args->trans, bp,
1418 XFS_DA_LOGRANGE(leaf, xfs_attr3_leaf_name(leaf, args->index), 1418 XFS_DA_LOGRANGE(leaf, xfs_attr3_leaf_name(leaf, args->index),
@@ -1445,11 +1445,12 @@ xfs_attr3_leaf_add_work(
1445STATIC void 1445STATIC void
1446xfs_attr3_leaf_compact( 1446xfs_attr3_leaf_compact(
1447 struct xfs_da_args *args, 1447 struct xfs_da_args *args,
1448 struct xfs_attr3_icleaf_hdr *ichdr_d, 1448 struct xfs_attr3_icleaf_hdr *ichdr_dst,
1449 struct xfs_buf *bp) 1449 struct xfs_buf *bp)
1450{ 1450{
1451 xfs_attr_leafblock_t *leaf_s, *leaf_d; 1451 struct xfs_attr_leafblock *leaf_src;
1452 struct xfs_attr3_icleaf_hdr ichdr_s; 1452 struct xfs_attr_leafblock *leaf_dst;
1453 struct xfs_attr3_icleaf_hdr ichdr_src;
1453 struct xfs_trans *trans = args->trans; 1454 struct xfs_trans *trans = args->trans;
1454 struct xfs_mount *mp = trans->t_mountp; 1455 struct xfs_mount *mp = trans->t_mountp;
1455 char *tmpbuffer; 1456 char *tmpbuffer;
@@ -1457,29 +1458,38 @@ xfs_attr3_leaf_compact(
1457 trace_xfs_attr_leaf_compact(args); 1458 trace_xfs_attr_leaf_compact(args);
1458 1459
1459 tmpbuffer = kmem_alloc(XFS_LBSIZE(mp), KM_SLEEP); 1460 tmpbuffer = kmem_alloc(XFS_LBSIZE(mp), KM_SLEEP);
1460 ASSERT(tmpbuffer != NULL);
1461 memcpy(tmpbuffer, bp->b_addr, XFS_LBSIZE(mp)); 1461 memcpy(tmpbuffer, bp->b_addr, XFS_LBSIZE(mp));
1462 memset(bp->b_addr, 0, XFS_LBSIZE(mp)); 1462 memset(bp->b_addr, 0, XFS_LBSIZE(mp));
1463 leaf_src = (xfs_attr_leafblock_t *)tmpbuffer;
1464 leaf_dst = bp->b_addr;
1463 1465
1464 /* 1466 /*
1465 * Copy basic information 1467 * Copy the on-disk header back into the destination buffer to ensure
1468 * all the information in the header that is not part of the incore
1469 * header structure is preserved.
1466 */ 1470 */
1467 leaf_s = (xfs_attr_leafblock_t *)tmpbuffer; 1471 memcpy(bp->b_addr, tmpbuffer, xfs_attr3_leaf_hdr_size(leaf_src));
1468 leaf_d = bp->b_addr; 1472
1469 ichdr_s = *ichdr_d; /* struct copy */ 1473 /* Initialise the incore headers */
1470 ichdr_d->firstused = XFS_LBSIZE(mp); 1474 ichdr_src = *ichdr_dst; /* struct copy */
1471 ichdr_d->usedbytes = 0; 1475 ichdr_dst->firstused = XFS_LBSIZE(mp);
1472 ichdr_d->count = 0; 1476 ichdr_dst->usedbytes = 0;
1473 ichdr_d->holes = 0; 1477 ichdr_dst->count = 0;
1474 ichdr_d->freemap[0].base = xfs_attr3_leaf_hdr_size(leaf_s); 1478 ichdr_dst->holes = 0;
1475 ichdr_d->freemap[0].size = ichdr_d->firstused - ichdr_d->freemap[0].base; 1479 ichdr_dst->freemap[0].base = xfs_attr3_leaf_hdr_size(leaf_src);
1480 ichdr_dst->freemap[0].size = ichdr_dst->firstused -
1481 ichdr_dst->freemap[0].base;
1482
1483
1484 /* write the header back to initialise the underlying buffer */
1485 xfs_attr3_leaf_hdr_to_disk(leaf_dst, ichdr_dst);
1476 1486
1477 /* 1487 /*
1478 * Copy all entry's in the same (sorted) order, 1488 * Copy all entry's in the same (sorted) order,
1479 * but allocate name/value pairs packed and in sequence. 1489 * but allocate name/value pairs packed and in sequence.
1480 */ 1490 */
1481 xfs_attr3_leaf_moveents(leaf_s, &ichdr_s, 0, leaf_d, ichdr_d, 0, 1491 xfs_attr3_leaf_moveents(leaf_src, &ichdr_src, 0, leaf_dst, ichdr_dst, 0,
1482 ichdr_s.count, mp); 1492 ichdr_src.count, mp);
1483 /* 1493 /*
1484 * this logs the entire buffer, but the caller must write the header 1494 * this logs the entire buffer, but the caller must write the header
1485 * back to the buffer when it is finished modifying it. 1495 * back to the buffer when it is finished modifying it.
@@ -2181,14 +2191,24 @@ xfs_attr3_leaf_unbalance(
2181 struct xfs_attr_leafblock *tmp_leaf; 2191 struct xfs_attr_leafblock *tmp_leaf;
2182 struct xfs_attr3_icleaf_hdr tmphdr; 2192 struct xfs_attr3_icleaf_hdr tmphdr;
2183 2193
2184 tmp_leaf = kmem_alloc(state->blocksize, KM_SLEEP); 2194 tmp_leaf = kmem_zalloc(state->blocksize, KM_SLEEP);
2185 memset(tmp_leaf, 0, state->blocksize);
2186 memset(&tmphdr, 0, sizeof(tmphdr));
2187 2195
2196 /*
2197 * Copy the header into the temp leaf so that all the stuff
2198 * not in the incore header is present and gets copied back in
2199 * once we've moved all the entries.
2200 */
2201 memcpy(tmp_leaf, save_leaf, xfs_attr3_leaf_hdr_size(save_leaf));
2202
2203 memset(&tmphdr, 0, sizeof(tmphdr));
2188 tmphdr.magic = savehdr.magic; 2204 tmphdr.magic = savehdr.magic;
2189 tmphdr.forw = savehdr.forw; 2205 tmphdr.forw = savehdr.forw;
2190 tmphdr.back = savehdr.back; 2206 tmphdr.back = savehdr.back;
2191 tmphdr.firstused = state->blocksize; 2207 tmphdr.firstused = state->blocksize;
2208
2209 /* write the header to the temp buffer to initialise it */
2210 xfs_attr3_leaf_hdr_to_disk(tmp_leaf, &tmphdr);
2211
2192 if (xfs_attr3_leaf_order(save_blk->bp, &savehdr, 2212 if (xfs_attr3_leaf_order(save_blk->bp, &savehdr,
2193 drop_blk->bp, &drophdr)) { 2213 drop_blk->bp, &drophdr)) {
2194 xfs_attr3_leaf_moveents(drop_leaf, &drophdr, 0, 2214 xfs_attr3_leaf_moveents(drop_leaf, &drophdr, 0,
@@ -2334,8 +2354,9 @@ xfs_attr3_leaf_lookup_int(
2334 args->index = probe; 2354 args->index = probe;
2335 args->valuelen = be32_to_cpu(name_rmt->valuelen); 2355 args->valuelen = be32_to_cpu(name_rmt->valuelen);
2336 args->rmtblkno = be32_to_cpu(name_rmt->valueblk); 2356 args->rmtblkno = be32_to_cpu(name_rmt->valueblk);
2337 args->rmtblkcnt = XFS_B_TO_FSB(args->dp->i_mount, 2357 args->rmtblkcnt = xfs_attr3_rmt_blocks(
2338 args->valuelen); 2358 args->dp->i_mount,
2359 args->valuelen);
2339 return XFS_ERROR(EEXIST); 2360 return XFS_ERROR(EEXIST);
2340 } 2361 }
2341 } 2362 }
@@ -2386,7 +2407,8 @@ xfs_attr3_leaf_getvalue(
2386 ASSERT(memcmp(args->name, name_rmt->name, args->namelen) == 0); 2407 ASSERT(memcmp(args->name, name_rmt->name, args->namelen) == 0);
2387 valuelen = be32_to_cpu(name_rmt->valuelen); 2408 valuelen = be32_to_cpu(name_rmt->valuelen);
2388 args->rmtblkno = be32_to_cpu(name_rmt->valueblk); 2409 args->rmtblkno = be32_to_cpu(name_rmt->valueblk);
2389 args->rmtblkcnt = XFS_B_TO_FSB(args->dp->i_mount, valuelen); 2410 args->rmtblkcnt = xfs_attr3_rmt_blocks(args->dp->i_mount,
2411 valuelen);
2390 if (args->flags & ATTR_KERNOVAL) { 2412 if (args->flags & ATTR_KERNOVAL) {
2391 args->valuelen = valuelen; 2413 args->valuelen = valuelen;
2392 return 0; 2414 return 0;
@@ -2712,7 +2734,8 @@ xfs_attr3_leaf_list_int(
2712 args.valuelen = valuelen; 2734 args.valuelen = valuelen;
2713 args.value = kmem_alloc(valuelen, KM_SLEEP | KM_NOFS); 2735 args.value = kmem_alloc(valuelen, KM_SLEEP | KM_NOFS);
2714 args.rmtblkno = be32_to_cpu(name_rmt->valueblk); 2736 args.rmtblkno = be32_to_cpu(name_rmt->valueblk);
2715 args.rmtblkcnt = XFS_B_TO_FSB(args.dp->i_mount, valuelen); 2737 args.rmtblkcnt = xfs_attr3_rmt_blocks(
2738 args.dp->i_mount, valuelen);
2716 retval = xfs_attr_rmtval_get(&args); 2739 retval = xfs_attr_rmtval_get(&args);
2717 if (retval) 2740 if (retval)
2718 return retval; 2741 return retval;
@@ -3235,7 +3258,7 @@ xfs_attr3_leaf_inactive(
3235 name_rmt = xfs_attr3_leaf_name_remote(leaf, i); 3258 name_rmt = xfs_attr3_leaf_name_remote(leaf, i);
3236 if (name_rmt->valueblk) { 3259 if (name_rmt->valueblk) {
3237 lp->valueblk = be32_to_cpu(name_rmt->valueblk); 3260 lp->valueblk = be32_to_cpu(name_rmt->valueblk);
3238 lp->valuelen = XFS_B_TO_FSB(dp->i_mount, 3261 lp->valuelen = xfs_attr3_rmt_blocks(dp->i_mount,
3239 be32_to_cpu(name_rmt->valuelen)); 3262 be32_to_cpu(name_rmt->valuelen));
3240 lp++; 3263 lp++;
3241 } 3264 }
diff --git a/fs/xfs/xfs_attr_leaf.h b/fs/xfs/xfs_attr_leaf.h
index f9d7846097e2..444a7704596c 100644
--- a/fs/xfs/xfs_attr_leaf.h
+++ b/fs/xfs/xfs_attr_leaf.h
@@ -128,6 +128,7 @@ struct xfs_attr3_leaf_hdr {
128 __u8 holes; 128 __u8 holes;
129 __u8 pad1; 129 __u8 pad1;
130 struct xfs_attr_leaf_map freemap[XFS_ATTR_LEAF_MAPSIZE]; 130 struct xfs_attr_leaf_map freemap[XFS_ATTR_LEAF_MAPSIZE];
131 __be32 pad2; /* 64 bit alignment */
131}; 132};
132 133
133#define XFS_ATTR3_LEAF_CRC_OFF (offsetof(struct xfs_attr3_leaf_hdr, info.crc)) 134#define XFS_ATTR3_LEAF_CRC_OFF (offsetof(struct xfs_attr3_leaf_hdr, info.crc))
diff --git a/fs/xfs/xfs_attr_remote.c b/fs/xfs/xfs_attr_remote.c
index dee84466dcc9..ef6b0c124528 100644
--- a/fs/xfs/xfs_attr_remote.c
+++ b/fs/xfs/xfs_attr_remote.c
@@ -47,22 +47,55 @@
47 * Each contiguous block has a header, so it is not just a simple attribute 47 * Each contiguous block has a header, so it is not just a simple attribute
48 * length to FSB conversion. 48 * length to FSB conversion.
49 */ 49 */
50static int 50int
51xfs_attr3_rmt_blocks( 51xfs_attr3_rmt_blocks(
52 struct xfs_mount *mp, 52 struct xfs_mount *mp,
53 int attrlen) 53 int attrlen)
54{ 54{
55 int buflen = XFS_ATTR3_RMT_BUF_SPACE(mp, 55 if (xfs_sb_version_hascrc(&mp->m_sb)) {
56 mp->m_sb.sb_blocksize); 56 int buflen = XFS_ATTR3_RMT_BUF_SPACE(mp, mp->m_sb.sb_blocksize);
57 return (attrlen + buflen - 1) / buflen; 57 return (attrlen + buflen - 1) / buflen;
58 }
59 return XFS_B_TO_FSB(mp, attrlen);
60}
61
62/*
63 * Checking of the remote attribute header is split into two parts. The verifier
64 * does CRC, location and bounds checking, the unpacking function checks the
65 * attribute parameters and owner.
66 */
67static bool
68xfs_attr3_rmt_hdr_ok(
69 struct xfs_mount *mp,
70 void *ptr,
71 xfs_ino_t ino,
72 uint32_t offset,
73 uint32_t size,
74 xfs_daddr_t bno)
75{
76 struct xfs_attr3_rmt_hdr *rmt = ptr;
77
78 if (bno != be64_to_cpu(rmt->rm_blkno))
79 return false;
80 if (offset != be32_to_cpu(rmt->rm_offset))
81 return false;
82 if (size != be32_to_cpu(rmt->rm_bytes))
83 return false;
84 if (ino != be64_to_cpu(rmt->rm_owner))
85 return false;
86
87 /* ok */
88 return true;
58} 89}
59 90
60static bool 91static bool
61xfs_attr3_rmt_verify( 92xfs_attr3_rmt_verify(
62 struct xfs_buf *bp) 93 struct xfs_mount *mp,
94 void *ptr,
95 int fsbsize,
96 xfs_daddr_t bno)
63{ 97{
64 struct xfs_mount *mp = bp->b_target->bt_mount; 98 struct xfs_attr3_rmt_hdr *rmt = ptr;
65 struct xfs_attr3_rmt_hdr *rmt = bp->b_addr;
66 99
67 if (!xfs_sb_version_hascrc(&mp->m_sb)) 100 if (!xfs_sb_version_hascrc(&mp->m_sb))
68 return false; 101 return false;
@@ -70,7 +103,9 @@ xfs_attr3_rmt_verify(
70 return false; 103 return false;
71 if (!uuid_equal(&rmt->rm_uuid, &mp->m_sb.sb_uuid)) 104 if (!uuid_equal(&rmt->rm_uuid, &mp->m_sb.sb_uuid))
72 return false; 105 return false;
73 if (bp->b_bn != be64_to_cpu(rmt->rm_blkno)) 106 if (be64_to_cpu(rmt->rm_blkno) != bno)
107 return false;
108 if (be32_to_cpu(rmt->rm_bytes) > fsbsize - sizeof(*rmt))
74 return false; 109 return false;
75 if (be32_to_cpu(rmt->rm_offset) + 110 if (be32_to_cpu(rmt->rm_offset) +
76 be32_to_cpu(rmt->rm_bytes) >= XATTR_SIZE_MAX) 111 be32_to_cpu(rmt->rm_bytes) >= XATTR_SIZE_MAX)
@@ -86,17 +121,40 @@ xfs_attr3_rmt_read_verify(
86 struct xfs_buf *bp) 121 struct xfs_buf *bp)
87{ 122{
88 struct xfs_mount *mp = bp->b_target->bt_mount; 123 struct xfs_mount *mp = bp->b_target->bt_mount;
124 char *ptr;
125 int len;
126 bool corrupt = false;
127 xfs_daddr_t bno;
89 128
90 /* no verification of non-crc buffers */ 129 /* no verification of non-crc buffers */
91 if (!xfs_sb_version_hascrc(&mp->m_sb)) 130 if (!xfs_sb_version_hascrc(&mp->m_sb))
92 return; 131 return;
93 132
94 if (!xfs_verify_cksum(bp->b_addr, BBTOB(bp->b_length), 133 ptr = bp->b_addr;
95 XFS_ATTR3_RMT_CRC_OFF) || 134 bno = bp->b_bn;
96 !xfs_attr3_rmt_verify(bp)) { 135 len = BBTOB(bp->b_length);
136 ASSERT(len >= XFS_LBSIZE(mp));
137
138 while (len > 0) {
139 if (!xfs_verify_cksum(ptr, XFS_LBSIZE(mp),
140 XFS_ATTR3_RMT_CRC_OFF)) {
141 corrupt = true;
142 break;
143 }
144 if (!xfs_attr3_rmt_verify(mp, ptr, XFS_LBSIZE(mp), bno)) {
145 corrupt = true;
146 break;
147 }
148 len -= XFS_LBSIZE(mp);
149 ptr += XFS_LBSIZE(mp);
150 bno += mp->m_bsize;
151 }
152
153 if (corrupt) {
97 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, bp->b_addr); 154 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, bp->b_addr);
98 xfs_buf_ioerror(bp, EFSCORRUPTED); 155 xfs_buf_ioerror(bp, EFSCORRUPTED);
99 } 156 } else
157 ASSERT(len == 0);
100} 158}
101 159
102static void 160static void
@@ -105,23 +163,39 @@ xfs_attr3_rmt_write_verify(
105{ 163{
106 struct xfs_mount *mp = bp->b_target->bt_mount; 164 struct xfs_mount *mp = bp->b_target->bt_mount;
107 struct xfs_buf_log_item *bip = bp->b_fspriv; 165 struct xfs_buf_log_item *bip = bp->b_fspriv;
166 char *ptr;
167 int len;
168 xfs_daddr_t bno;
108 169
109 /* no verification of non-crc buffers */ 170 /* no verification of non-crc buffers */
110 if (!xfs_sb_version_hascrc(&mp->m_sb)) 171 if (!xfs_sb_version_hascrc(&mp->m_sb))
111 return; 172 return;
112 173
113 if (!xfs_attr3_rmt_verify(bp)) { 174 ptr = bp->b_addr;
114 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, bp->b_addr); 175 bno = bp->b_bn;
115 xfs_buf_ioerror(bp, EFSCORRUPTED); 176 len = BBTOB(bp->b_length);
116 return; 177 ASSERT(len >= XFS_LBSIZE(mp));
117 } 178
179 while (len > 0) {
180 if (!xfs_attr3_rmt_verify(mp, ptr, XFS_LBSIZE(mp), bno)) {
181 XFS_CORRUPTION_ERROR(__func__,
182 XFS_ERRLEVEL_LOW, mp, bp->b_addr);
183 xfs_buf_ioerror(bp, EFSCORRUPTED);
184 return;
185 }
186 if (bip) {
187 struct xfs_attr3_rmt_hdr *rmt;
188
189 rmt = (struct xfs_attr3_rmt_hdr *)ptr;
190 rmt->rm_lsn = cpu_to_be64(bip->bli_item.li_lsn);
191 }
192 xfs_update_cksum(ptr, XFS_LBSIZE(mp), XFS_ATTR3_RMT_CRC_OFF);
118 193
119 if (bip) { 194 len -= XFS_LBSIZE(mp);
120 struct xfs_attr3_rmt_hdr *rmt = bp->b_addr; 195 ptr += XFS_LBSIZE(mp);
121 rmt->rm_lsn = cpu_to_be64(bip->bli_item.li_lsn); 196 bno += mp->m_bsize;
122 } 197 }
123 xfs_update_cksum(bp->b_addr, BBTOB(bp->b_length), 198 ASSERT(len == 0);
124 XFS_ATTR3_RMT_CRC_OFF);
125} 199}
126 200
127const struct xfs_buf_ops xfs_attr3_rmt_buf_ops = { 201const struct xfs_buf_ops xfs_attr3_rmt_buf_ops = {
@@ -129,15 +203,16 @@ const struct xfs_buf_ops xfs_attr3_rmt_buf_ops = {
129 .verify_write = xfs_attr3_rmt_write_verify, 203 .verify_write = xfs_attr3_rmt_write_verify,
130}; 204};
131 205
132static int 206STATIC int
133xfs_attr3_rmt_hdr_set( 207xfs_attr3_rmt_hdr_set(
134 struct xfs_mount *mp, 208 struct xfs_mount *mp,
209 void *ptr,
135 xfs_ino_t ino, 210 xfs_ino_t ino,
136 uint32_t offset, 211 uint32_t offset,
137 uint32_t size, 212 uint32_t size,
138 struct xfs_buf *bp) 213 xfs_daddr_t bno)
139{ 214{
140 struct xfs_attr3_rmt_hdr *rmt = bp->b_addr; 215 struct xfs_attr3_rmt_hdr *rmt = ptr;
141 216
142 if (!xfs_sb_version_hascrc(&mp->m_sb)) 217 if (!xfs_sb_version_hascrc(&mp->m_sb))
143 return 0; 218 return 0;
@@ -147,36 +222,107 @@ xfs_attr3_rmt_hdr_set(
147 rmt->rm_bytes = cpu_to_be32(size); 222 rmt->rm_bytes = cpu_to_be32(size);
148 uuid_copy(&rmt->rm_uuid, &mp->m_sb.sb_uuid); 223 uuid_copy(&rmt->rm_uuid, &mp->m_sb.sb_uuid);
149 rmt->rm_owner = cpu_to_be64(ino); 224 rmt->rm_owner = cpu_to_be64(ino);
150 rmt->rm_blkno = cpu_to_be64(bp->b_bn); 225 rmt->rm_blkno = cpu_to_be64(bno);
151 bp->b_ops = &xfs_attr3_rmt_buf_ops;
152 226
153 return sizeof(struct xfs_attr3_rmt_hdr); 227 return sizeof(struct xfs_attr3_rmt_hdr);
154} 228}
155 229
156/* 230/*
157 * Checking of the remote attribute header is split into two parts. the verifier 231 * Helper functions to copy attribute data in and out of the one disk extents
158 * does CRC, location and bounds checking, the unpacking function checks the
159 * attribute parameters and owner.
160 */ 232 */
161static bool 233STATIC int
162xfs_attr3_rmt_hdr_ok( 234xfs_attr_rmtval_copyout(
163 struct xfs_mount *mp, 235 struct xfs_mount *mp,
164 xfs_ino_t ino, 236 struct xfs_buf *bp,
165 uint32_t offset, 237 xfs_ino_t ino,
166 uint32_t size, 238 int *offset,
167 struct xfs_buf *bp) 239 int *valuelen,
240 char **dst)
168{ 241{
169 struct xfs_attr3_rmt_hdr *rmt = bp->b_addr; 242 char *src = bp->b_addr;
243 xfs_daddr_t bno = bp->b_bn;
244 int len = BBTOB(bp->b_length);
170 245
171 if (offset != be32_to_cpu(rmt->rm_offset)) 246 ASSERT(len >= XFS_LBSIZE(mp));
172 return false;
173 if (size != be32_to_cpu(rmt->rm_bytes))
174 return false;
175 if (ino != be64_to_cpu(rmt->rm_owner))
176 return false;
177 247
178 /* ok */ 248 while (len > 0 && *valuelen > 0) {
179 return true; 249 int hdr_size = 0;
250 int byte_cnt = XFS_ATTR3_RMT_BUF_SPACE(mp, XFS_LBSIZE(mp));
251
252 byte_cnt = min_t(int, *valuelen, byte_cnt);
253
254 if (xfs_sb_version_hascrc(&mp->m_sb)) {
255 if (!xfs_attr3_rmt_hdr_ok(mp, src, ino, *offset,
256 byte_cnt, bno)) {
257 xfs_alert(mp,
258"remote attribute header mismatch bno/off/len/owner (0x%llx/0x%x/Ox%x/0x%llx)",
259 bno, *offset, byte_cnt, ino);
260 return EFSCORRUPTED;
261 }
262 hdr_size = sizeof(struct xfs_attr3_rmt_hdr);
263 }
264
265 memcpy(*dst, src + hdr_size, byte_cnt);
266
267 /* roll buffer forwards */
268 len -= XFS_LBSIZE(mp);
269 src += XFS_LBSIZE(mp);
270 bno += mp->m_bsize;
271
272 /* roll attribute data forwards */
273 *valuelen -= byte_cnt;
274 *dst += byte_cnt;
275 *offset += byte_cnt;
276 }
277 return 0;
278}
279
280STATIC void
281xfs_attr_rmtval_copyin(
282 struct xfs_mount *mp,
283 struct xfs_buf *bp,
284 xfs_ino_t ino,
285 int *offset,
286 int *valuelen,
287 char **src)
288{
289 char *dst = bp->b_addr;
290 xfs_daddr_t bno = bp->b_bn;
291 int len = BBTOB(bp->b_length);
292
293 ASSERT(len >= XFS_LBSIZE(mp));
294
295 while (len > 0 && *valuelen > 0) {
296 int hdr_size;
297 int byte_cnt = XFS_ATTR3_RMT_BUF_SPACE(mp, XFS_LBSIZE(mp));
298
299 byte_cnt = min(*valuelen, byte_cnt);
300 hdr_size = xfs_attr3_rmt_hdr_set(mp, dst, ino, *offset,
301 byte_cnt, bno);
302
303 memcpy(dst + hdr_size, *src, byte_cnt);
304
305 /*
306 * If this is the last block, zero the remainder of it.
307 * Check that we are actually the last block, too.
308 */
309 if (byte_cnt + hdr_size < XFS_LBSIZE(mp)) {
310 ASSERT(*valuelen - byte_cnt == 0);
311 ASSERT(len == XFS_LBSIZE(mp));
312 memset(dst + hdr_size + byte_cnt, 0,
313 XFS_LBSIZE(mp) - hdr_size - byte_cnt);
314 }
315
316 /* roll buffer forwards */
317 len -= XFS_LBSIZE(mp);
318 dst += XFS_LBSIZE(mp);
319 bno += mp->m_bsize;
320
321 /* roll attribute data forwards */
322 *valuelen -= byte_cnt;
323 *src += byte_cnt;
324 *offset += byte_cnt;
325 }
180} 326}
181 327
182/* 328/*
@@ -190,13 +336,12 @@ xfs_attr_rmtval_get(
190 struct xfs_bmbt_irec map[ATTR_RMTVALUE_MAPSIZE]; 336 struct xfs_bmbt_irec map[ATTR_RMTVALUE_MAPSIZE];
191 struct xfs_mount *mp = args->dp->i_mount; 337 struct xfs_mount *mp = args->dp->i_mount;
192 struct xfs_buf *bp; 338 struct xfs_buf *bp;
193 xfs_daddr_t dblkno;
194 xfs_dablk_t lblkno = args->rmtblkno; 339 xfs_dablk_t lblkno = args->rmtblkno;
195 void *dst = args->value; 340 char *dst = args->value;
196 int valuelen = args->valuelen; 341 int valuelen = args->valuelen;
197 int nmap; 342 int nmap;
198 int error; 343 int error;
199 int blkcnt; 344 int blkcnt = args->rmtblkcnt;
200 int i; 345 int i;
201 int offset = 0; 346 int offset = 0;
202 347
@@ -207,52 +352,36 @@ xfs_attr_rmtval_get(
207 while (valuelen > 0) { 352 while (valuelen > 0) {
208 nmap = ATTR_RMTVALUE_MAPSIZE; 353 nmap = ATTR_RMTVALUE_MAPSIZE;
209 error = xfs_bmapi_read(args->dp, (xfs_fileoff_t)lblkno, 354 error = xfs_bmapi_read(args->dp, (xfs_fileoff_t)lblkno,
210 args->rmtblkcnt, map, &nmap, 355 blkcnt, map, &nmap,
211 XFS_BMAPI_ATTRFORK); 356 XFS_BMAPI_ATTRFORK);
212 if (error) 357 if (error)
213 return error; 358 return error;
214 ASSERT(nmap >= 1); 359 ASSERT(nmap >= 1);
215 360
216 for (i = 0; (i < nmap) && (valuelen > 0); i++) { 361 for (i = 0; (i < nmap) && (valuelen > 0); i++) {
217 int byte_cnt; 362 xfs_daddr_t dblkno;
218 char *src; 363 int dblkcnt;
219 364
220 ASSERT((map[i].br_startblock != DELAYSTARTBLOCK) && 365 ASSERT((map[i].br_startblock != DELAYSTARTBLOCK) &&
221 (map[i].br_startblock != HOLESTARTBLOCK)); 366 (map[i].br_startblock != HOLESTARTBLOCK));
222 dblkno = XFS_FSB_TO_DADDR(mp, map[i].br_startblock); 367 dblkno = XFS_FSB_TO_DADDR(mp, map[i].br_startblock);
223 blkcnt = XFS_FSB_TO_BB(mp, map[i].br_blockcount); 368 dblkcnt = XFS_FSB_TO_BB(mp, map[i].br_blockcount);
224 error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, 369 error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
225 dblkno, blkcnt, 0, &bp, 370 dblkno, dblkcnt, 0, &bp,
226 &xfs_attr3_rmt_buf_ops); 371 &xfs_attr3_rmt_buf_ops);
227 if (error) 372 if (error)
228 return error; 373 return error;
229 374
230 byte_cnt = min_t(int, valuelen, BBTOB(bp->b_length)); 375 error = xfs_attr_rmtval_copyout(mp, bp, args->dp->i_ino,
231 byte_cnt = XFS_ATTR3_RMT_BUF_SPACE(mp, byte_cnt); 376 &offset, &valuelen,
232 377 &dst);
233 src = bp->b_addr;
234 if (xfs_sb_version_hascrc(&mp->m_sb)) {
235 if (!xfs_attr3_rmt_hdr_ok(mp, args->dp->i_ino,
236 offset, byte_cnt, bp)) {
237 xfs_alert(mp,
238"remote attribute header does not match required off/len/owner (0x%x/Ox%x,0x%llx)",
239 offset, byte_cnt, args->dp->i_ino);
240 xfs_buf_relse(bp);
241 return EFSCORRUPTED;
242
243 }
244
245 src += sizeof(struct xfs_attr3_rmt_hdr);
246 }
247
248 memcpy(dst, src, byte_cnt);
249 xfs_buf_relse(bp); 378 xfs_buf_relse(bp);
379 if (error)
380 return error;
250 381
251 offset += byte_cnt; 382 /* roll attribute extent map forwards */
252 dst += byte_cnt;
253 valuelen -= byte_cnt;
254
255 lblkno += map[i].br_blockcount; 383 lblkno += map[i].br_blockcount;
384 blkcnt -= map[i].br_blockcount;
256 } 385 }
257 } 386 }
258 ASSERT(valuelen == 0); 387 ASSERT(valuelen == 0);
@@ -270,17 +399,13 @@ xfs_attr_rmtval_set(
270 struct xfs_inode *dp = args->dp; 399 struct xfs_inode *dp = args->dp;
271 struct xfs_mount *mp = dp->i_mount; 400 struct xfs_mount *mp = dp->i_mount;
272 struct xfs_bmbt_irec map; 401 struct xfs_bmbt_irec map;
273 struct xfs_buf *bp;
274 xfs_daddr_t dblkno;
275 xfs_dablk_t lblkno; 402 xfs_dablk_t lblkno;
276 xfs_fileoff_t lfileoff = 0; 403 xfs_fileoff_t lfileoff = 0;
277 void *src = args->value; 404 char *src = args->value;
278 int blkcnt; 405 int blkcnt;
279 int valuelen; 406 int valuelen;
280 int nmap; 407 int nmap;
281 int error; 408 int error;
282 int hdrcnt = 0;
283 bool crcs = xfs_sb_version_hascrc(&mp->m_sb);
284 int offset = 0; 409 int offset = 0;
285 410
286 trace_xfs_attr_rmtval_set(args); 411 trace_xfs_attr_rmtval_set(args);
@@ -289,24 +414,14 @@ xfs_attr_rmtval_set(
289 * Find a "hole" in the attribute address space large enough for 414 * Find a "hole" in the attribute address space large enough for
290 * us to drop the new attribute's value into. Because CRC enable 415 * us to drop the new attribute's value into. Because CRC enable
291 * attributes have headers, we can't just do a straight byte to FSB 416 * attributes have headers, we can't just do a straight byte to FSB
292 * conversion. We calculate the worst case block count in this case 417 * conversion and have to take the header space into account.
293 * and we may not need that many, so we have to handle this when
294 * allocating the blocks below.
295 */ 418 */
296 if (!crcs) 419 blkcnt = xfs_attr3_rmt_blocks(mp, args->valuelen);
297 blkcnt = XFS_B_TO_FSB(mp, args->valuelen);
298 else
299 blkcnt = xfs_attr3_rmt_blocks(mp, args->valuelen);
300
301 error = xfs_bmap_first_unused(args->trans, args->dp, blkcnt, &lfileoff, 420 error = xfs_bmap_first_unused(args->trans, args->dp, blkcnt, &lfileoff,
302 XFS_ATTR_FORK); 421 XFS_ATTR_FORK);
303 if (error) 422 if (error)
304 return error; 423 return error;
305 424
306 /* Start with the attribute data. We'll allocate the rest afterwards. */
307 if (crcs)
308 blkcnt = XFS_B_TO_FSB(mp, args->valuelen);
309
310 args->rmtblkno = lblkno = (xfs_dablk_t)lfileoff; 425 args->rmtblkno = lblkno = (xfs_dablk_t)lfileoff;
311 args->rmtblkcnt = blkcnt; 426 args->rmtblkcnt = blkcnt;
312 427
@@ -349,26 +464,6 @@ xfs_attr_rmtval_set(
349 (map.br_startblock != HOLESTARTBLOCK)); 464 (map.br_startblock != HOLESTARTBLOCK));
350 lblkno += map.br_blockcount; 465 lblkno += map.br_blockcount;
351 blkcnt -= map.br_blockcount; 466 blkcnt -= map.br_blockcount;
352 hdrcnt++;
353
354 /*
355 * If we have enough blocks for the attribute data, calculate
356 * how many extra blocks we need for headers. We might run
357 * through this multiple times in the case that the additional
358 * headers in the blocks needed for the data fragments spills
359 * into requiring more blocks. e.g. for 512 byte blocks, we'll
360 * spill for another block every 9 headers we require in this
361 * loop.
362 */
363 if (crcs && blkcnt == 0) {
364 int total_len;
365
366 total_len = args->valuelen +
367 hdrcnt * sizeof(struct xfs_attr3_rmt_hdr);
368 blkcnt = XFS_B_TO_FSB(mp, total_len);
369 blkcnt -= args->rmtblkcnt;
370 args->rmtblkcnt += blkcnt;
371 }
372 467
373 /* 468 /*
374 * Start the next trans in the chain. 469 * Start the next trans in the chain.
@@ -385,18 +480,19 @@ xfs_attr_rmtval_set(
385 * the INCOMPLETE flag. 480 * the INCOMPLETE flag.
386 */ 481 */
387 lblkno = args->rmtblkno; 482 lblkno = args->rmtblkno;
483 blkcnt = args->rmtblkcnt;
388 valuelen = args->valuelen; 484 valuelen = args->valuelen;
389 while (valuelen > 0) { 485 while (valuelen > 0) {
390 int byte_cnt; 486 struct xfs_buf *bp;
391 char *buf; 487 xfs_daddr_t dblkno;
488 int dblkcnt;
489
490 ASSERT(blkcnt > 0);
392 491
393 /*
394 * Try to remember where we decided to put the value.
395 */
396 xfs_bmap_init(args->flist, args->firstblock); 492 xfs_bmap_init(args->flist, args->firstblock);
397 nmap = 1; 493 nmap = 1;
398 error = xfs_bmapi_read(dp, (xfs_fileoff_t)lblkno, 494 error = xfs_bmapi_read(dp, (xfs_fileoff_t)lblkno,
399 args->rmtblkcnt, &map, &nmap, 495 blkcnt, &map, &nmap,
400 XFS_BMAPI_ATTRFORK); 496 XFS_BMAPI_ATTRFORK);
401 if (error) 497 if (error)
402 return(error); 498 return(error);
@@ -405,41 +501,27 @@ xfs_attr_rmtval_set(
405 (map.br_startblock != HOLESTARTBLOCK)); 501 (map.br_startblock != HOLESTARTBLOCK));
406 502
407 dblkno = XFS_FSB_TO_DADDR(mp, map.br_startblock), 503 dblkno = XFS_FSB_TO_DADDR(mp, map.br_startblock),
408 blkcnt = XFS_FSB_TO_BB(mp, map.br_blockcount); 504 dblkcnt = XFS_FSB_TO_BB(mp, map.br_blockcount);
409 505
410 bp = xfs_buf_get(mp->m_ddev_targp, dblkno, blkcnt, 0); 506 bp = xfs_buf_get(mp->m_ddev_targp, dblkno, dblkcnt, 0);
411 if (!bp) 507 if (!bp)
412 return ENOMEM; 508 return ENOMEM;
413 bp->b_ops = &xfs_attr3_rmt_buf_ops; 509 bp->b_ops = &xfs_attr3_rmt_buf_ops;
414 510
415 byte_cnt = BBTOB(bp->b_length); 511 xfs_attr_rmtval_copyin(mp, bp, args->dp->i_ino, &offset,
416 byte_cnt = XFS_ATTR3_RMT_BUF_SPACE(mp, byte_cnt); 512 &valuelen, &src);
417 if (valuelen < byte_cnt)
418 byte_cnt = valuelen;
419
420 buf = bp->b_addr;
421 buf += xfs_attr3_rmt_hdr_set(mp, dp->i_ino, offset,
422 byte_cnt, bp);
423 memcpy(buf, src, byte_cnt);
424
425 if (byte_cnt < BBTOB(bp->b_length))
426 xfs_buf_zero(bp, byte_cnt,
427 BBTOB(bp->b_length) - byte_cnt);
428 513
429 error = xfs_bwrite(bp); /* GROT: NOTE: synchronous write */ 514 error = xfs_bwrite(bp); /* GROT: NOTE: synchronous write */
430 xfs_buf_relse(bp); 515 xfs_buf_relse(bp);
431 if (error) 516 if (error)
432 return error; 517 return error;
433 518
434 src += byte_cnt;
435 valuelen -= byte_cnt;
436 offset += byte_cnt;
437 hdrcnt--;
438 519
520 /* roll attribute extent map forwards */
439 lblkno += map.br_blockcount; 521 lblkno += map.br_blockcount;
522 blkcnt -= map.br_blockcount;
440 } 523 }
441 ASSERT(valuelen == 0); 524 ASSERT(valuelen == 0);
442 ASSERT(hdrcnt == 0);
443 return 0; 525 return 0;
444} 526}
445 527
@@ -448,33 +530,40 @@ xfs_attr_rmtval_set(
448 * out-of-line buffer that it is stored on. 530 * out-of-line buffer that it is stored on.
449 */ 531 */
450int 532int
451xfs_attr_rmtval_remove(xfs_da_args_t *args) 533xfs_attr_rmtval_remove(
534 struct xfs_da_args *args)
452{ 535{
453 xfs_mount_t *mp; 536 struct xfs_mount *mp = args->dp->i_mount;
454 xfs_bmbt_irec_t map; 537 xfs_dablk_t lblkno;
455 xfs_buf_t *bp; 538 int blkcnt;
456 xfs_daddr_t dblkno; 539 int error;
457 xfs_dablk_t lblkno; 540 int done;
458 int valuelen, blkcnt, nmap, error, done, committed;
459 541
460 trace_xfs_attr_rmtval_remove(args); 542 trace_xfs_attr_rmtval_remove(args);
461 543
462 mp = args->dp->i_mount;
463
464 /* 544 /*
465 * Roll through the "value", invalidating the attribute value's 545 * Roll through the "value", invalidating the attribute value's blocks.
466 * blocks. 546 * Note that args->rmtblkcnt is the minimum number of data blocks we'll
547 * see for a CRC enabled remote attribute. Each extent will have a
548 * header, and so we may have more blocks than we realise here. If we
549 * fail to map the blocks correctly, we'll have problems with the buffer
550 * lookups.
467 */ 551 */
468 lblkno = args->rmtblkno; 552 lblkno = args->rmtblkno;
469 valuelen = args->rmtblkcnt; 553 blkcnt = args->rmtblkcnt;
470 while (valuelen > 0) { 554 while (blkcnt > 0) {
555 struct xfs_bmbt_irec map;
556 struct xfs_buf *bp;
557 xfs_daddr_t dblkno;
558 int dblkcnt;
559 int nmap;
560
471 /* 561 /*
472 * Try to remember where we decided to put the value. 562 * Try to remember where we decided to put the value.
473 */ 563 */
474 nmap = 1; 564 nmap = 1;
475 error = xfs_bmapi_read(args->dp, (xfs_fileoff_t)lblkno, 565 error = xfs_bmapi_read(args->dp, (xfs_fileoff_t)lblkno,
476 args->rmtblkcnt, &map, &nmap, 566 blkcnt, &map, &nmap, XFS_BMAPI_ATTRFORK);
477 XFS_BMAPI_ATTRFORK);
478 if (error) 567 if (error)
479 return(error); 568 return(error);
480 ASSERT(nmap == 1); 569 ASSERT(nmap == 1);
@@ -482,21 +571,20 @@ xfs_attr_rmtval_remove(xfs_da_args_t *args)
482 (map.br_startblock != HOLESTARTBLOCK)); 571 (map.br_startblock != HOLESTARTBLOCK));
483 572
484 dblkno = XFS_FSB_TO_DADDR(mp, map.br_startblock), 573 dblkno = XFS_FSB_TO_DADDR(mp, map.br_startblock),
485 blkcnt = XFS_FSB_TO_BB(mp, map.br_blockcount); 574 dblkcnt = XFS_FSB_TO_BB(mp, map.br_blockcount);
486 575
487 /* 576 /*
488 * If the "remote" value is in the cache, remove it. 577 * If the "remote" value is in the cache, remove it.
489 */ 578 */
490 bp = xfs_incore(mp->m_ddev_targp, dblkno, blkcnt, XBF_TRYLOCK); 579 bp = xfs_incore(mp->m_ddev_targp, dblkno, dblkcnt, XBF_TRYLOCK);
491 if (bp) { 580 if (bp) {
492 xfs_buf_stale(bp); 581 xfs_buf_stale(bp);
493 xfs_buf_relse(bp); 582 xfs_buf_relse(bp);
494 bp = NULL; 583 bp = NULL;
495 } 584 }
496 585
497 valuelen -= map.br_blockcount;
498
499 lblkno += map.br_blockcount; 586 lblkno += map.br_blockcount;
587 blkcnt -= map.br_blockcount;
500 } 588 }
501 589
502 /* 590 /*
@@ -506,6 +594,8 @@ xfs_attr_rmtval_remove(xfs_da_args_t *args)
506 blkcnt = args->rmtblkcnt; 594 blkcnt = args->rmtblkcnt;
507 done = 0; 595 done = 0;
508 while (!done) { 596 while (!done) {
597 int committed;
598
509 xfs_bmap_init(args->flist, args->firstblock); 599 xfs_bmap_init(args->flist, args->firstblock);
510 error = xfs_bunmapi(args->trans, args->dp, lblkno, blkcnt, 600 error = xfs_bunmapi(args->trans, args->dp, lblkno, blkcnt,
511 XFS_BMAPI_ATTRFORK | XFS_BMAPI_METADATA, 601 XFS_BMAPI_ATTRFORK | XFS_BMAPI_METADATA,
diff --git a/fs/xfs/xfs_attr_remote.h b/fs/xfs/xfs_attr_remote.h
index c7cca60a062a..92a8fd7977cc 100644
--- a/fs/xfs/xfs_attr_remote.h
+++ b/fs/xfs/xfs_attr_remote.h
@@ -20,6 +20,14 @@
20 20
21#define XFS_ATTR3_RMT_MAGIC 0x5841524d /* XARM */ 21#define XFS_ATTR3_RMT_MAGIC 0x5841524d /* XARM */
22 22
23/*
24 * There is one of these headers per filesystem block in a remote attribute.
25 * This is done to ensure there is a 1:1 mapping between the attribute value
26 * length and the number of blocks needed to store the attribute. This makes the
27 * verification of a buffer a little more complex, but greatly simplifies the
28 * allocation, reading and writing of these attributes as we don't have to guess
29 * the number of blocks needed to store the attribute data.
30 */
23struct xfs_attr3_rmt_hdr { 31struct xfs_attr3_rmt_hdr {
24 __be32 rm_magic; 32 __be32 rm_magic;
25 __be32 rm_offset; 33 __be32 rm_offset;
@@ -39,6 +47,8 @@ struct xfs_attr3_rmt_hdr {
39 47
40extern const struct xfs_buf_ops xfs_attr3_rmt_buf_ops; 48extern const struct xfs_buf_ops xfs_attr3_rmt_buf_ops;
41 49
50int xfs_attr3_rmt_blocks(struct xfs_mount *mp, int attrlen);
51
42int xfs_attr_rmtval_get(struct xfs_da_args *args); 52int xfs_attr_rmtval_get(struct xfs_da_args *args);
43int xfs_attr_rmtval_set(struct xfs_da_args *args); 53int xfs_attr_rmtval_set(struct xfs_da_args *args);
44int xfs_attr_rmtval_remove(struct xfs_da_args *args); 54int xfs_attr_rmtval_remove(struct xfs_da_args *args);
diff --git a/fs/xfs/xfs_btree.c b/fs/xfs/xfs_btree.c
index 8804b8a3c310..0903960410a2 100644
--- a/fs/xfs/xfs_btree.c
+++ b/fs/xfs/xfs_btree.c
@@ -2544,7 +2544,17 @@ xfs_btree_new_iroot(
2544 if (error) 2544 if (error)
2545 goto error0; 2545 goto error0;
2546 2546
2547 /*
2548 * we can't just memcpy() the root in for CRC enabled btree blocks.
2549 * In that case have to also ensure the blkno remains correct
2550 */
2547 memcpy(cblock, block, xfs_btree_block_len(cur)); 2551 memcpy(cblock, block, xfs_btree_block_len(cur));
2552 if (cur->bc_flags & XFS_BTREE_CRC_BLOCKS) {
2553 if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
2554 cblock->bb_u.l.bb_blkno = cpu_to_be64(cbp->b_bn);
2555 else
2556 cblock->bb_u.s.bb_blkno = cpu_to_be64(cbp->b_bn);
2557 }
2548 2558
2549 be16_add_cpu(&block->bb_level, 1); 2559 be16_add_cpu(&block->bb_level, 1);
2550 xfs_btree_set_numrecs(block, 1); 2560 xfs_btree_set_numrecs(block, 1);
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index 0d2554299688..1b2472a46e46 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -513,6 +513,7 @@ _xfs_buf_find(
513 xfs_alert(btp->bt_mount, 513 xfs_alert(btp->bt_mount,
514 "%s: Block out of range: block 0x%llx, EOFS 0x%llx ", 514 "%s: Block out of range: block 0x%llx, EOFS 0x%llx ",
515 __func__, blkno, eofs); 515 __func__, blkno, eofs);
516 WARN_ON(1);
516 return NULL; 517 return NULL;
517 } 518 }
518 519
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
index cf263476d6b4..4ec431777048 100644
--- a/fs/xfs/xfs_buf_item.c
+++ b/fs/xfs/xfs_buf_item.c
@@ -262,12 +262,7 @@ xfs_buf_item_format_segment(
262 vecp->i_addr = xfs_buf_offset(bp, buffer_offset); 262 vecp->i_addr = xfs_buf_offset(bp, buffer_offset);
263 vecp->i_len = nbits * XFS_BLF_CHUNK; 263 vecp->i_len = nbits * XFS_BLF_CHUNK;
264 vecp->i_type = XLOG_REG_TYPE_BCHUNK; 264 vecp->i_type = XLOG_REG_TYPE_BCHUNK;
265/* 265 nvecs++;
266 * You would think we need to bump the nvecs here too, but we do not
267 * this number is used by recovery, and it gets confused by the boundary
268 * split here
269 * nvecs++;
270 */
271 vecp++; 266 vecp++;
272 first_bit = next_bit; 267 first_bit = next_bit;
273 last_bit = next_bit; 268 last_bit = next_bit;
diff --git a/fs/xfs/xfs_dfrag.c b/fs/xfs/xfs_dfrag.c
index f852b082a084..c407e1ccff43 100644
--- a/fs/xfs/xfs_dfrag.c
+++ b/fs/xfs/xfs_dfrag.c
@@ -219,6 +219,14 @@ xfs_swap_extents(
219 int taforkblks = 0; 219 int taforkblks = 0;
220 __uint64_t tmp; 220 __uint64_t tmp;
221 221
222 /*
223 * We have no way of updating owner information in the BMBT blocks for
224 * each inode on CRC enabled filesystems, so to avoid corrupting the
225 * this metadata we simply don't allow extent swaps to occur.
226 */
227 if (xfs_sb_version_hascrc(&mp->m_sb))
228 return XFS_ERROR(EINVAL);
229
222 tempifp = kmem_alloc(sizeof(xfs_ifork_t), KM_MAYFAIL); 230 tempifp = kmem_alloc(sizeof(xfs_ifork_t), KM_MAYFAIL);
223 if (!tempifp) { 231 if (!tempifp) {
224 error = XFS_ERROR(ENOMEM); 232 error = XFS_ERROR(ENOMEM);
diff --git a/fs/xfs/xfs_dir2_format.h b/fs/xfs/xfs_dir2_format.h
index a3b1bd841a80..7826782b8d78 100644
--- a/fs/xfs/xfs_dir2_format.h
+++ b/fs/xfs/xfs_dir2_format.h
@@ -266,6 +266,7 @@ struct xfs_dir3_blk_hdr {
266struct xfs_dir3_data_hdr { 266struct xfs_dir3_data_hdr {
267 struct xfs_dir3_blk_hdr hdr; 267 struct xfs_dir3_blk_hdr hdr;
268 xfs_dir2_data_free_t best_free[XFS_DIR2_DATA_FD_COUNT]; 268 xfs_dir2_data_free_t best_free[XFS_DIR2_DATA_FD_COUNT];
269 __be32 pad; /* 64 bit alignment */
269}; 270};
270 271
271#define XFS_DIR3_DATA_CRC_OFF offsetof(struct xfs_dir3_data_hdr, hdr.crc) 272#define XFS_DIR3_DATA_CRC_OFF offsetof(struct xfs_dir3_data_hdr, hdr.crc)
@@ -477,7 +478,7 @@ struct xfs_dir3_leaf_hdr {
477 struct xfs_da3_blkinfo info; /* header for da routines */ 478 struct xfs_da3_blkinfo info; /* header for da routines */
478 __be16 count; /* count of entries */ 479 __be16 count; /* count of entries */
479 __be16 stale; /* count of stale entries */ 480 __be16 stale; /* count of stale entries */
480 __be32 pad; 481 __be32 pad; /* 64 bit alignment */
481}; 482};
482 483
483struct xfs_dir3_icleaf_hdr { 484struct xfs_dir3_icleaf_hdr {
@@ -715,6 +716,7 @@ struct xfs_dir3_free_hdr {
715 __be32 firstdb; /* db of first entry */ 716 __be32 firstdb; /* db of first entry */
716 __be32 nvalid; /* count of valid entries */ 717 __be32 nvalid; /* count of valid entries */
717 __be32 nused; /* count of used entries */ 718 __be32 nused; /* count of used entries */
719 __be32 pad; /* 64 bit alignment */
718}; 720};
719 721
720struct xfs_dir3_free { 722struct xfs_dir3_free {
diff --git a/fs/xfs/xfs_dir2_node.c b/fs/xfs/xfs_dir2_node.c
index 5246de4912d4..2226a00acd15 100644
--- a/fs/xfs/xfs_dir2_node.c
+++ b/fs/xfs/xfs_dir2_node.c
@@ -263,18 +263,19 @@ xfs_dir3_free_get_buf(
263 * Initialize the new block to be empty, and remember 263 * Initialize the new block to be empty, and remember
264 * its first slot as our empty slot. 264 * its first slot as our empty slot.
265 */ 265 */
266 hdr.magic = XFS_DIR2_FREE_MAGIC; 266 memset(bp->b_addr, 0, sizeof(struct xfs_dir3_free_hdr));
267 hdr.firstdb = 0; 267 memset(&hdr, 0, sizeof(hdr));
268 hdr.nused = 0; 268
269 hdr.nvalid = 0;
270 if (xfs_sb_version_hascrc(&mp->m_sb)) { 269 if (xfs_sb_version_hascrc(&mp->m_sb)) {
271 struct xfs_dir3_free_hdr *hdr3 = bp->b_addr; 270 struct xfs_dir3_free_hdr *hdr3 = bp->b_addr;
272 271
273 hdr.magic = XFS_DIR3_FREE_MAGIC; 272 hdr.magic = XFS_DIR3_FREE_MAGIC;
273
274 hdr3->hdr.blkno = cpu_to_be64(bp->b_bn); 274 hdr3->hdr.blkno = cpu_to_be64(bp->b_bn);
275 hdr3->hdr.owner = cpu_to_be64(dp->i_ino); 275 hdr3->hdr.owner = cpu_to_be64(dp->i_ino);
276 uuid_copy(&hdr3->hdr.uuid, &mp->m_sb.sb_uuid); 276 uuid_copy(&hdr3->hdr.uuid, &mp->m_sb.sb_uuid);
277 } 277 } else
278 hdr.magic = XFS_DIR2_FREE_MAGIC;
278 xfs_dir3_free_hdr_to_disk(bp->b_addr, &hdr); 279 xfs_dir3_free_hdr_to_disk(bp->b_addr, &hdr);
279 *bpp = bp; 280 *bpp = bp;
280 return 0; 281 return 0;
@@ -1921,8 +1922,6 @@ xfs_dir2_node_addname_int(
1921 */ 1922 */
1922 freehdr.firstdb = (fbno - XFS_DIR2_FREE_FIRSTDB(mp)) * 1923 freehdr.firstdb = (fbno - XFS_DIR2_FREE_FIRSTDB(mp)) *
1923 xfs_dir3_free_max_bests(mp); 1924 xfs_dir3_free_max_bests(mp);
1924 free->hdr.nvalid = 0;
1925 free->hdr.nused = 0;
1926 } else { 1925 } else {
1927 free = fbp->b_addr; 1926 free = fbp->b_addr;
1928 bests = xfs_dir3_free_bests_p(mp, free); 1927 bests = xfs_dir3_free_bests_p(mp, free);
diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c
index a41f8bf1da37..044e97a33c8d 100644
--- a/fs/xfs/xfs_dquot.c
+++ b/fs/xfs/xfs_dquot.c
@@ -249,8 +249,11 @@ xfs_qm_init_dquot_blk(
249 d->dd_diskdq.d_version = XFS_DQUOT_VERSION; 249 d->dd_diskdq.d_version = XFS_DQUOT_VERSION;
250 d->dd_diskdq.d_id = cpu_to_be32(curid); 250 d->dd_diskdq.d_id = cpu_to_be32(curid);
251 d->dd_diskdq.d_flags = type; 251 d->dd_diskdq.d_flags = type;
252 if (xfs_sb_version_hascrc(&mp->m_sb)) 252 if (xfs_sb_version_hascrc(&mp->m_sb)) {
253 uuid_copy(&d->dd_uuid, &mp->m_sb.sb_uuid); 253 uuid_copy(&d->dd_uuid, &mp->m_sb.sb_uuid);
254 xfs_update_cksum((char *)d, sizeof(struct xfs_dqblk),
255 XFS_DQUOT_CRC_OFF);
256 }
254 } 257 }
255 258
256 xfs_trans_dquot_buf(tp, bp, 259 xfs_trans_dquot_buf(tp, bp,
@@ -286,23 +289,6 @@ xfs_dquot_set_prealloc_limits(struct xfs_dquot *dqp)
286 dqp->q_low_space[XFS_QLOWSP_5_PCNT] = space * 5; 289 dqp->q_low_space[XFS_QLOWSP_5_PCNT] = space * 5;
287} 290}
288 291
289STATIC void
290xfs_dquot_buf_calc_crc(
291 struct xfs_mount *mp,
292 struct xfs_buf *bp)
293{
294 struct xfs_dqblk *d = (struct xfs_dqblk *)bp->b_addr;
295 int i;
296
297 if (!xfs_sb_version_hascrc(&mp->m_sb))
298 return;
299
300 for (i = 0; i < mp->m_quotainfo->qi_dqperchunk; i++, d++) {
301 xfs_update_cksum((char *)d, sizeof(struct xfs_dqblk),
302 offsetof(struct xfs_dqblk, dd_crc));
303 }
304}
305
306STATIC bool 292STATIC bool
307xfs_dquot_buf_verify_crc( 293xfs_dquot_buf_verify_crc(
308 struct xfs_mount *mp, 294 struct xfs_mount *mp,
@@ -328,12 +314,11 @@ xfs_dquot_buf_verify_crc(
328 314
329 for (i = 0; i < ndquots; i++, d++) { 315 for (i = 0; i < ndquots; i++, d++) {
330 if (!xfs_verify_cksum((char *)d, sizeof(struct xfs_dqblk), 316 if (!xfs_verify_cksum((char *)d, sizeof(struct xfs_dqblk),
331 offsetof(struct xfs_dqblk, dd_crc))) 317 XFS_DQUOT_CRC_OFF))
332 return false; 318 return false;
333 if (!uuid_equal(&d->dd_uuid, &mp->m_sb.sb_uuid)) 319 if (!uuid_equal(&d->dd_uuid, &mp->m_sb.sb_uuid))
334 return false; 320 return false;
335 } 321 }
336
337 return true; 322 return true;
338} 323}
339 324
@@ -393,6 +378,11 @@ xfs_dquot_buf_read_verify(
393 } 378 }
394} 379}
395 380
381/*
382 * we don't calculate the CRC here as that is done when the dquot is flushed to
383 * the buffer after the update is done. This ensures that the dquot in the
384 * buffer always has an up-to-date CRC value.
385 */
396void 386void
397xfs_dquot_buf_write_verify( 387xfs_dquot_buf_write_verify(
398 struct xfs_buf *bp) 388 struct xfs_buf *bp)
@@ -404,7 +394,6 @@ xfs_dquot_buf_write_verify(
404 xfs_buf_ioerror(bp, EFSCORRUPTED); 394 xfs_buf_ioerror(bp, EFSCORRUPTED);
405 return; 395 return;
406 } 396 }
407 xfs_dquot_buf_calc_crc(mp, bp);
408} 397}
409 398
410const struct xfs_buf_ops xfs_dquot_buf_ops = { 399const struct xfs_buf_ops xfs_dquot_buf_ops = {
@@ -1151,11 +1140,17 @@ xfs_qm_dqflush(
1151 * copy the lsn into the on-disk dquot now while we have the in memory 1140 * copy the lsn into the on-disk dquot now while we have the in memory
1152 * dquot here. This can't be done later in the write verifier as we 1141 * dquot here. This can't be done later in the write verifier as we
1153 * can't get access to the log item at that point in time. 1142 * can't get access to the log item at that point in time.
1143 *
1144 * We also calculate the CRC here so that the on-disk dquot in the
1145 * buffer always has a valid CRC. This ensures there is no possibility
1146 * of a dquot without an up-to-date CRC getting to disk.
1154 */ 1147 */
1155 if (xfs_sb_version_hascrc(&mp->m_sb)) { 1148 if (xfs_sb_version_hascrc(&mp->m_sb)) {
1156 struct xfs_dqblk *dqb = (struct xfs_dqblk *)ddqp; 1149 struct xfs_dqblk *dqb = (struct xfs_dqblk *)ddqp;
1157 1150
1158 dqb->dd_lsn = cpu_to_be64(dqp->q_logitem.qli_item.li_lsn); 1151 dqb->dd_lsn = cpu_to_be64(dqp->q_logitem.qli_item.li_lsn);
1152 xfs_update_cksum((char *)dqb, sizeof(struct xfs_dqblk),
1153 XFS_DQUOT_CRC_OFF);
1159 } 1154 }
1160 1155
1161 /* 1156 /*
diff --git a/fs/xfs/xfs_fs.h b/fs/xfs/xfs_fs.h
index 6dda3f949b04..d04695545397 100644
--- a/fs/xfs/xfs_fs.h
+++ b/fs/xfs/xfs_fs.h
@@ -236,6 +236,7 @@ typedef struct xfs_fsop_resblks {
236#define XFS_FSOP_GEOM_FLAGS_PROJID32 0x0800 /* 32-bit project IDs */ 236#define XFS_FSOP_GEOM_FLAGS_PROJID32 0x0800 /* 32-bit project IDs */
237#define XFS_FSOP_GEOM_FLAGS_DIRV2CI 0x1000 /* ASCII only CI names */ 237#define XFS_FSOP_GEOM_FLAGS_DIRV2CI 0x1000 /* ASCII only CI names */
238#define XFS_FSOP_GEOM_FLAGS_LAZYSB 0x4000 /* lazy superblock counters */ 238#define XFS_FSOP_GEOM_FLAGS_LAZYSB 0x4000 /* lazy superblock counters */
239#define XFS_FSOP_GEOM_FLAGS_V5SB 0x8000 /* version 5 superblock */
239 240
240 241
241/* 242/*
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c
index 87595b211da1..3c3644ea825b 100644
--- a/fs/xfs/xfs_fsops.c
+++ b/fs/xfs/xfs_fsops.c
@@ -99,7 +99,9 @@ xfs_fs_geometry(
99 (xfs_sb_version_hasattr2(&mp->m_sb) ? 99 (xfs_sb_version_hasattr2(&mp->m_sb) ?
100 XFS_FSOP_GEOM_FLAGS_ATTR2 : 0) | 100 XFS_FSOP_GEOM_FLAGS_ATTR2 : 0) |
101 (xfs_sb_version_hasprojid32bit(&mp->m_sb) ? 101 (xfs_sb_version_hasprojid32bit(&mp->m_sb) ?
102 XFS_FSOP_GEOM_FLAGS_PROJID32 : 0); 102 XFS_FSOP_GEOM_FLAGS_PROJID32 : 0) |
103 (xfs_sb_version_hascrc(&mp->m_sb) ?
104 XFS_FSOP_GEOM_FLAGS_V5SB : 0);
103 geo->logsectsize = xfs_sb_version_hassector(&mp->m_sb) ? 105 geo->logsectsize = xfs_sb_version_hassector(&mp->m_sb) ?
104 mp->m_sb.sb_logsectsize : BBSIZE; 106 mp->m_sb.sb_logsectsize : BBSIZE;
105 geo->rtsectsize = mp->m_sb.sb_blocksize; 107 geo->rtsectsize = mp->m_sb.sb_blocksize;
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index efbe1accb6ca..7f7be5f98f52 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -1638,6 +1638,10 @@ xfs_iunlink(
1638 dip->di_next_unlinked = agi->agi_unlinked[bucket_index]; 1638 dip->di_next_unlinked = agi->agi_unlinked[bucket_index];
1639 offset = ip->i_imap.im_boffset + 1639 offset = ip->i_imap.im_boffset +
1640 offsetof(xfs_dinode_t, di_next_unlinked); 1640 offsetof(xfs_dinode_t, di_next_unlinked);
1641
1642 /* need to recalc the inode CRC if appropriate */
1643 xfs_dinode_calc_crc(mp, dip);
1644
1641 xfs_trans_inode_buf(tp, ibp); 1645 xfs_trans_inode_buf(tp, ibp);
1642 xfs_trans_log_buf(tp, ibp, offset, 1646 xfs_trans_log_buf(tp, ibp, offset,
1643 (offset + sizeof(xfs_agino_t) - 1)); 1647 (offset + sizeof(xfs_agino_t) - 1));
@@ -1723,6 +1727,10 @@ xfs_iunlink_remove(
1723 dip->di_next_unlinked = cpu_to_be32(NULLAGINO); 1727 dip->di_next_unlinked = cpu_to_be32(NULLAGINO);
1724 offset = ip->i_imap.im_boffset + 1728 offset = ip->i_imap.im_boffset +
1725 offsetof(xfs_dinode_t, di_next_unlinked); 1729 offsetof(xfs_dinode_t, di_next_unlinked);
1730
1731 /* need to recalc the inode CRC if appropriate */
1732 xfs_dinode_calc_crc(mp, dip);
1733
1726 xfs_trans_inode_buf(tp, ibp); 1734 xfs_trans_inode_buf(tp, ibp);
1727 xfs_trans_log_buf(tp, ibp, offset, 1735 xfs_trans_log_buf(tp, ibp, offset,
1728 (offset + sizeof(xfs_agino_t) - 1)); 1736 (offset + sizeof(xfs_agino_t) - 1));
@@ -1796,6 +1804,10 @@ xfs_iunlink_remove(
1796 dip->di_next_unlinked = cpu_to_be32(NULLAGINO); 1804 dip->di_next_unlinked = cpu_to_be32(NULLAGINO);
1797 offset = ip->i_imap.im_boffset + 1805 offset = ip->i_imap.im_boffset +
1798 offsetof(xfs_dinode_t, di_next_unlinked); 1806 offsetof(xfs_dinode_t, di_next_unlinked);
1807
1808 /* need to recalc the inode CRC if appropriate */
1809 xfs_dinode_calc_crc(mp, dip);
1810
1799 xfs_trans_inode_buf(tp, ibp); 1811 xfs_trans_inode_buf(tp, ibp);
1800 xfs_trans_log_buf(tp, ibp, offset, 1812 xfs_trans_log_buf(tp, ibp, offset,
1801 (offset + sizeof(xfs_agino_t) - 1)); 1813 (offset + sizeof(xfs_agino_t) - 1));
@@ -1809,6 +1821,10 @@ xfs_iunlink_remove(
1809 last_dip->di_next_unlinked = cpu_to_be32(next_agino); 1821 last_dip->di_next_unlinked = cpu_to_be32(next_agino);
1810 ASSERT(next_agino != 0); 1822 ASSERT(next_agino != 0);
1811 offset = last_offset + offsetof(xfs_dinode_t, di_next_unlinked); 1823 offset = last_offset + offsetof(xfs_dinode_t, di_next_unlinked);
1824
1825 /* need to recalc the inode CRC if appropriate */
1826 xfs_dinode_calc_crc(mp, last_dip);
1827
1812 xfs_trans_inode_buf(tp, last_ibp); 1828 xfs_trans_inode_buf(tp, last_ibp);
1813 xfs_trans_log_buf(tp, last_ibp, offset, 1829 xfs_trans_log_buf(tp, last_ibp, offset,
1814 (offset + sizeof(xfs_agino_t) - 1)); 1830 (offset + sizeof(xfs_agino_t) - 1));
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
index d82efaa2ac73..ca9ecaa81112 100644
--- a/fs/xfs/xfs_iops.c
+++ b/fs/xfs/xfs_iops.c
@@ -455,6 +455,28 @@ xfs_vn_getattr(
455 return 0; 455 return 0;
456} 456}
457 457
458static void
459xfs_setattr_mode(
460 struct xfs_trans *tp,
461 struct xfs_inode *ip,
462 struct iattr *iattr)
463{
464 struct inode *inode = VFS_I(ip);
465 umode_t mode = iattr->ia_mode;
466
467 ASSERT(tp);
468 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
469
470 if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID))
471 mode &= ~S_ISGID;
472
473 ip->i_d.di_mode &= S_IFMT;
474 ip->i_d.di_mode |= mode & ~S_IFMT;
475
476 inode->i_mode &= S_IFMT;
477 inode->i_mode |= mode & ~S_IFMT;
478}
479
458int 480int
459xfs_setattr_nonsize( 481xfs_setattr_nonsize(
460 struct xfs_inode *ip, 482 struct xfs_inode *ip,
@@ -606,18 +628,8 @@ xfs_setattr_nonsize(
606 /* 628 /*
607 * Change file access modes. 629 * Change file access modes.
608 */ 630 */
609 if (mask & ATTR_MODE) { 631 if (mask & ATTR_MODE)
610 umode_t mode = iattr->ia_mode; 632 xfs_setattr_mode(tp, ip, iattr);
611
612 if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID))
613 mode &= ~S_ISGID;
614
615 ip->i_d.di_mode &= S_IFMT;
616 ip->i_d.di_mode |= mode & ~S_IFMT;
617
618 inode->i_mode &= S_IFMT;
619 inode->i_mode |= mode & ~S_IFMT;
620 }
621 633
622 /* 634 /*
623 * Change file access or modified times. 635 * Change file access or modified times.
@@ -714,9 +726,8 @@ xfs_setattr_size(
714 return XFS_ERROR(error); 726 return XFS_ERROR(error);
715 727
716 ASSERT(S_ISREG(ip->i_d.di_mode)); 728 ASSERT(S_ISREG(ip->i_d.di_mode));
717 ASSERT((mask & (ATTR_MODE|ATTR_UID|ATTR_GID|ATTR_ATIME|ATTR_ATIME_SET| 729 ASSERT((mask & (ATTR_UID|ATTR_GID|ATTR_ATIME|ATTR_ATIME_SET|
718 ATTR_MTIME_SET|ATTR_KILL_SUID|ATTR_KILL_SGID| 730 ATTR_MTIME_SET|ATTR_KILL_PRIV|ATTR_TIMES_SET)) == 0);
719 ATTR_KILL_PRIV|ATTR_TIMES_SET)) == 0);
720 731
721 if (!(flags & XFS_ATTR_NOLOCK)) { 732 if (!(flags & XFS_ATTR_NOLOCK)) {
722 lock_flags |= XFS_IOLOCK_EXCL; 733 lock_flags |= XFS_IOLOCK_EXCL;
@@ -860,6 +871,12 @@ xfs_setattr_size(
860 xfs_inode_clear_eofblocks_tag(ip); 871 xfs_inode_clear_eofblocks_tag(ip);
861 } 872 }
862 873
874 /*
875 * Change file access modes.
876 */
877 if (mask & ATTR_MODE)
878 xfs_setattr_mode(tp, ip, iattr);
879
863 if (mask & ATTR_CTIME) { 880 if (mask & ATTR_CTIME) {
864 inode->i_ctime = iattr->ia_ctime; 881 inode->i_ctime = iattr->ia_ctime;
865 ip->i_d.di_ctime.t_sec = iattr->ia_ctime.tv_sec; 882 ip->i_d.di_ctime.t_sec = iattr->ia_ctime.tv_sec;
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index 93f03ec17eec..7cf5e4eafe28 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -1599,10 +1599,43 @@ xlog_recover_add_to_trans(
1599} 1599}
1600 1600
1601/* 1601/*
1602 * Sort the log items in the transaction. Cancelled buffers need 1602 * Sort the log items in the transaction.
1603 * to be put first so they are processed before any items that might 1603 *
1604 * modify the buffers. If they are cancelled, then the modifications 1604 * The ordering constraints are defined by the inode allocation and unlink
1605 * don't need to be replayed. 1605 * behaviour. The rules are:
1606 *
1607 * 1. Every item is only logged once in a given transaction. Hence it
1608 * represents the last logged state of the item. Hence ordering is
1609 * dependent on the order in which operations need to be performed so
1610 * required initial conditions are always met.
1611 *
1612 * 2. Cancelled buffers are recorded in pass 1 in a separate table and
1613 * there's nothing to replay from them so we can simply cull them
1614 * from the transaction. However, we can't do that until after we've
1615 * replayed all the other items because they may be dependent on the
1616 * cancelled buffer and replaying the cancelled buffer can remove it
1617 * form the cancelled buffer table. Hence they have tobe done last.
1618 *
1619 * 3. Inode allocation buffers must be replayed before inode items that
1620 * read the buffer and replay changes into it.
1621 *
1622 * 4. Inode unlink buffers must be replayed after inode items are replayed.
1623 * This ensures that inodes are completely flushed to the inode buffer
1624 * in a "free" state before we remove the unlinked inode list pointer.
1625 *
1626 * Hence the ordering needs to be inode allocation buffers first, inode items
1627 * second, inode unlink buffers third and cancelled buffers last.
1628 *
1629 * But there's a problem with that - we can't tell an inode allocation buffer
1630 * apart from a regular buffer, so we can't separate them. We can, however,
1631 * tell an inode unlink buffer from the others, and so we can separate them out
1632 * from all the other buffers and move them to last.
1633 *
1634 * Hence, 4 lists, in order from head to tail:
1635 * - buffer_list for all buffers except cancelled/inode unlink buffers
1636 * - item_list for all non-buffer items
1637 * - inode_buffer_list for inode unlink buffers
1638 * - cancel_list for the cancelled buffers
1606 */ 1639 */
1607STATIC int 1640STATIC int
1608xlog_recover_reorder_trans( 1641xlog_recover_reorder_trans(
@@ -1612,6 +1645,10 @@ xlog_recover_reorder_trans(
1612{ 1645{
1613 xlog_recover_item_t *item, *n; 1646 xlog_recover_item_t *item, *n;
1614 LIST_HEAD(sort_list); 1647 LIST_HEAD(sort_list);
1648 LIST_HEAD(cancel_list);
1649 LIST_HEAD(buffer_list);
1650 LIST_HEAD(inode_buffer_list);
1651 LIST_HEAD(inode_list);
1615 1652
1616 list_splice_init(&trans->r_itemq, &sort_list); 1653 list_splice_init(&trans->r_itemq, &sort_list);
1617 list_for_each_entry_safe(item, n, &sort_list, ri_list) { 1654 list_for_each_entry_safe(item, n, &sort_list, ri_list) {
@@ -1619,12 +1656,18 @@ xlog_recover_reorder_trans(
1619 1656
1620 switch (ITEM_TYPE(item)) { 1657 switch (ITEM_TYPE(item)) {
1621 case XFS_LI_BUF: 1658 case XFS_LI_BUF:
1622 if (!(buf_f->blf_flags & XFS_BLF_CANCEL)) { 1659 if (buf_f->blf_flags & XFS_BLF_CANCEL) {
1623 trace_xfs_log_recover_item_reorder_head(log, 1660 trace_xfs_log_recover_item_reorder_head(log,
1624 trans, item, pass); 1661 trans, item, pass);
1625 list_move(&item->ri_list, &trans->r_itemq); 1662 list_move(&item->ri_list, &cancel_list);
1663 break;
1664 }
1665 if (buf_f->blf_flags & XFS_BLF_INODE_BUF) {
1666 list_move(&item->ri_list, &inode_buffer_list);
1626 break; 1667 break;
1627 } 1668 }
1669 list_move_tail(&item->ri_list, &buffer_list);
1670 break;
1628 case XFS_LI_INODE: 1671 case XFS_LI_INODE:
1629 case XFS_LI_DQUOT: 1672 case XFS_LI_DQUOT:
1630 case XFS_LI_QUOTAOFF: 1673 case XFS_LI_QUOTAOFF:
@@ -1632,7 +1675,7 @@ xlog_recover_reorder_trans(
1632 case XFS_LI_EFI: 1675 case XFS_LI_EFI:
1633 trace_xfs_log_recover_item_reorder_tail(log, 1676 trace_xfs_log_recover_item_reorder_tail(log,
1634 trans, item, pass); 1677 trans, item, pass);
1635 list_move_tail(&item->ri_list, &trans->r_itemq); 1678 list_move_tail(&item->ri_list, &inode_list);
1636 break; 1679 break;
1637 default: 1680 default:
1638 xfs_warn(log->l_mp, 1681 xfs_warn(log->l_mp,
@@ -1643,6 +1686,14 @@ xlog_recover_reorder_trans(
1643 } 1686 }
1644 } 1687 }
1645 ASSERT(list_empty(&sort_list)); 1688 ASSERT(list_empty(&sort_list));
1689 if (!list_empty(&buffer_list))
1690 list_splice(&buffer_list, &trans->r_itemq);
1691 if (!list_empty(&inode_list))
1692 list_splice_tail(&inode_list, &trans->r_itemq);
1693 if (!list_empty(&inode_buffer_list))
1694 list_splice_tail(&inode_buffer_list, &trans->r_itemq);
1695 if (!list_empty(&cancel_list))
1696 list_splice_tail(&cancel_list, &trans->r_itemq);
1646 return 0; 1697 return 0;
1647} 1698}
1648 1699
@@ -1794,7 +1845,13 @@ xlog_recover_do_inode_buffer(
1794 xfs_agino_t *buffer_nextp; 1845 xfs_agino_t *buffer_nextp;
1795 1846
1796 trace_xfs_log_recover_buf_inode_buf(mp->m_log, buf_f); 1847 trace_xfs_log_recover_buf_inode_buf(mp->m_log, buf_f);
1797 bp->b_ops = &xfs_inode_buf_ops; 1848
1849 /*
1850 * Post recovery validation only works properly on CRC enabled
1851 * filesystems.
1852 */
1853 if (xfs_sb_version_hascrc(&mp->m_sb))
1854 bp->b_ops = &xfs_inode_buf_ops;
1798 1855
1799 inodes_per_buf = BBTOB(bp->b_io_length) >> mp->m_sb.sb_inodelog; 1856 inodes_per_buf = BBTOB(bp->b_io_length) >> mp->m_sb.sb_inodelog;
1800 for (i = 0; i < inodes_per_buf; i++) { 1857 for (i = 0; i < inodes_per_buf; i++) {
@@ -1861,6 +1918,15 @@ xlog_recover_do_inode_buffer(
1861 buffer_nextp = (xfs_agino_t *)xfs_buf_offset(bp, 1918 buffer_nextp = (xfs_agino_t *)xfs_buf_offset(bp,
1862 next_unlinked_offset); 1919 next_unlinked_offset);
1863 *buffer_nextp = *logged_nextp; 1920 *buffer_nextp = *logged_nextp;
1921
1922 /*
1923 * If necessary, recalculate the CRC in the on-disk inode. We
1924 * have to leave the inode in a consistent state for whoever
1925 * reads it next....
1926 */
1927 xfs_dinode_calc_crc(mp, (struct xfs_dinode *)
1928 xfs_buf_offset(bp, i * mp->m_sb.sb_inodesize));
1929
1864 } 1930 }
1865 1931
1866 return 0; 1932 return 0;
@@ -2097,6 +2163,17 @@ xlog_recover_do_reg_buffer(
2097 ((uint)bit << XFS_BLF_SHIFT) + (nbits << XFS_BLF_SHIFT)); 2163 ((uint)bit << XFS_BLF_SHIFT) + (nbits << XFS_BLF_SHIFT));
2098 2164
2099 /* 2165 /*
2166 * The dirty regions logged in the buffer, even though
2167 * contiguous, may span multiple chunks. This is because the
2168 * dirty region may span a physical page boundary in a buffer
2169 * and hence be split into two separate vectors for writing into
2170 * the log. Hence we need to trim nbits back to the length of
2171 * the current region being copied out of the log.
2172 */
2173 if (item->ri_buf[i].i_len < (nbits << XFS_BLF_SHIFT))
2174 nbits = item->ri_buf[i].i_len >> XFS_BLF_SHIFT;
2175
2176 /*
2100 * Do a sanity check if this is a dquot buffer. Just checking 2177 * Do a sanity check if this is a dquot buffer. Just checking
2101 * the first dquot in the buffer should do. XXXThis is 2178 * the first dquot in the buffer should do. XXXThis is
2102 * probably a good thing to do for other buf types also. 2179 * probably a good thing to do for other buf types also.
@@ -2134,7 +2211,16 @@ xlog_recover_do_reg_buffer(
2134 /* Shouldn't be any more regions */ 2211 /* Shouldn't be any more regions */
2135 ASSERT(i == item->ri_total); 2212 ASSERT(i == item->ri_total);
2136 2213
2137 xlog_recovery_validate_buf_type(mp, bp, buf_f); 2214 /*
2215 * We can only do post recovery validation on items on CRC enabled
2216 * fielsystems as we need to know when the buffer was written to be able
2217 * to determine if we should have replayed the item. If we replay old
2218 * metadata over a newer buffer, then it will enter a temporarily
2219 * inconsistent state resulting in verification failures. Hence for now
2220 * just avoid the verification stage for non-crc filesystems
2221 */
2222 if (xfs_sb_version_hascrc(&mp->m_sb))
2223 xlog_recovery_validate_buf_type(mp, bp, buf_f);
2138} 2224}
2139 2225
2140/* 2226/*
@@ -2255,6 +2341,12 @@ xfs_qm_dqcheck(
2255 d->dd_diskdq.d_flags = type; 2341 d->dd_diskdq.d_flags = type;
2256 d->dd_diskdq.d_id = cpu_to_be32(id); 2342 d->dd_diskdq.d_id = cpu_to_be32(id);
2257 2343
2344 if (xfs_sb_version_hascrc(&mp->m_sb)) {
2345 uuid_copy(&d->dd_uuid, &mp->m_sb.sb_uuid);
2346 xfs_update_cksum((char *)d, sizeof(struct xfs_dqblk),
2347 XFS_DQUOT_CRC_OFF);
2348 }
2349
2258 return errs; 2350 return errs;
2259} 2351}
2260 2352
@@ -2782,6 +2874,10 @@ xlog_recover_dquot_pass2(
2782 } 2874 }
2783 2875
2784 memcpy(ddq, recddq, item->ri_buf[1].i_len); 2876 memcpy(ddq, recddq, item->ri_buf[1].i_len);
2877 if (xfs_sb_version_hascrc(&mp->m_sb)) {
2878 xfs_update_cksum((char *)ddq, sizeof(struct xfs_dqblk),
2879 XFS_DQUOT_CRC_OFF);
2880 }
2785 2881
2786 ASSERT(dq_f->qlf_size == 2); 2882 ASSERT(dq_f->qlf_size == 2);
2787 ASSERT(bp->b_target->bt_mount == mp); 2883 ASSERT(bp->b_target->bt_mount == mp);
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index f6bfbd734669..e8e310c05097 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -314,7 +314,8 @@ STATIC int
314xfs_mount_validate_sb( 314xfs_mount_validate_sb(
315 xfs_mount_t *mp, 315 xfs_mount_t *mp,
316 xfs_sb_t *sbp, 316 xfs_sb_t *sbp,
317 bool check_inprogress) 317 bool check_inprogress,
318 bool check_version)
318{ 319{
319 320
320 /* 321 /*
@@ -337,9 +338,10 @@ xfs_mount_validate_sb(
337 338
338 /* 339 /*
339 * Version 5 superblock feature mask validation. Reject combinations the 340 * Version 5 superblock feature mask validation. Reject combinations the
340 * kernel cannot support up front before checking anything else. 341 * kernel cannot support up front before checking anything else. For
342 * write validation, we don't need to check feature masks.
341 */ 343 */
342 if (XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5) { 344 if (check_version && XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5) {
343 xfs_alert(mp, 345 xfs_alert(mp,
344"Version 5 superblock detected. This kernel has EXPERIMENTAL support enabled!\n" 346"Version 5 superblock detected. This kernel has EXPERIMENTAL support enabled!\n"
345"Use of these features in this kernel is at your own risk!"); 347"Use of these features in this kernel is at your own risk!");
@@ -675,7 +677,8 @@ xfs_sb_to_disk(
675 677
676static int 678static int
677xfs_sb_verify( 679xfs_sb_verify(
678 struct xfs_buf *bp) 680 struct xfs_buf *bp,
681 bool check_version)
679{ 682{
680 struct xfs_mount *mp = bp->b_target->bt_mount; 683 struct xfs_mount *mp = bp->b_target->bt_mount;
681 struct xfs_sb sb; 684 struct xfs_sb sb;
@@ -686,7 +689,8 @@ xfs_sb_verify(
686 * Only check the in progress field for the primary superblock as 689 * Only check the in progress field for the primary superblock as
687 * mkfs.xfs doesn't clear it from secondary superblocks. 690 * mkfs.xfs doesn't clear it from secondary superblocks.
688 */ 691 */
689 return xfs_mount_validate_sb(mp, &sb, bp->b_bn == XFS_SB_DADDR); 692 return xfs_mount_validate_sb(mp, &sb, bp->b_bn == XFS_SB_DADDR,
693 check_version);
690} 694}
691 695
692/* 696/*
@@ -719,7 +723,7 @@ xfs_sb_read_verify(
719 goto out_error; 723 goto out_error;
720 } 724 }
721 } 725 }
722 error = xfs_sb_verify(bp); 726 error = xfs_sb_verify(bp, true);
723 727
724out_error: 728out_error:
725 if (error) { 729 if (error) {
@@ -758,7 +762,7 @@ xfs_sb_write_verify(
758 struct xfs_buf_log_item *bip = bp->b_fspriv; 762 struct xfs_buf_log_item *bip = bp->b_fspriv;
759 int error; 763 int error;
760 764
761 error = xfs_sb_verify(bp); 765 error = xfs_sb_verify(bp, false);
762 if (error) { 766 if (error) {
763 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, bp->b_addr); 767 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, bp->b_addr);
764 xfs_buf_ioerror(bp, error); 768 xfs_buf_ioerror(bp, error);
diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c
index f41702b43003..b75c9bb6e71e 100644
--- a/fs/xfs/xfs_qm.c
+++ b/fs/xfs/xfs_qm.c
@@ -41,6 +41,7 @@
41#include "xfs_qm.h" 41#include "xfs_qm.h"
42#include "xfs_trace.h" 42#include "xfs_trace.h"
43#include "xfs_icache.h" 43#include "xfs_icache.h"
44#include "xfs_cksum.h"
44 45
45/* 46/*
46 * The global quota manager. There is only one of these for the entire 47 * The global quota manager. There is only one of these for the entire
@@ -839,7 +840,7 @@ xfs_qm_reset_dqcounts(
839 xfs_dqid_t id, 840 xfs_dqid_t id,
840 uint type) 841 uint type)
841{ 842{
842 xfs_disk_dquot_t *ddq; 843 struct xfs_dqblk *dqb;
843 int j; 844 int j;
844 845
845 trace_xfs_reset_dqcounts(bp, _RET_IP_); 846 trace_xfs_reset_dqcounts(bp, _RET_IP_);
@@ -853,8 +854,12 @@ xfs_qm_reset_dqcounts(
853 do_div(j, sizeof(xfs_dqblk_t)); 854 do_div(j, sizeof(xfs_dqblk_t));
854 ASSERT(mp->m_quotainfo->qi_dqperchunk == j); 855 ASSERT(mp->m_quotainfo->qi_dqperchunk == j);
855#endif 856#endif
856 ddq = bp->b_addr; 857 dqb = bp->b_addr;
857 for (j = 0; j < mp->m_quotainfo->qi_dqperchunk; j++) { 858 for (j = 0; j < mp->m_quotainfo->qi_dqperchunk; j++) {
859 struct xfs_disk_dquot *ddq;
860
861 ddq = (struct xfs_disk_dquot *)&dqb[j];
862
858 /* 863 /*
859 * Do a sanity check, and if needed, repair the dqblk. Don't 864 * Do a sanity check, and if needed, repair the dqblk. Don't
860 * output any warnings because it's perfectly possible to 865 * output any warnings because it's perfectly possible to
@@ -871,7 +876,12 @@ xfs_qm_reset_dqcounts(
871 ddq->d_bwarns = 0; 876 ddq->d_bwarns = 0;
872 ddq->d_iwarns = 0; 877 ddq->d_iwarns = 0;
873 ddq->d_rtbwarns = 0; 878 ddq->d_rtbwarns = 0;
874 ddq = (xfs_disk_dquot_t *) ((xfs_dqblk_t *)ddq + 1); 879
880 if (xfs_sb_version_hascrc(&mp->m_sb)) {
881 xfs_update_cksum((char *)&dqb[j],
882 sizeof(struct xfs_dqblk),
883 XFS_DQUOT_CRC_OFF);
884 }
875 } 885 }
876} 886}
877 887
@@ -907,19 +917,29 @@ xfs_qm_dqiter_bufs(
907 XFS_FSB_TO_DADDR(mp, bno), 917 XFS_FSB_TO_DADDR(mp, bno),
908 mp->m_quotainfo->qi_dqchunklen, 0, &bp, 918 mp->m_quotainfo->qi_dqchunklen, 0, &bp,
909 &xfs_dquot_buf_ops); 919 &xfs_dquot_buf_ops);
910 if (error)
911 break;
912 920
913 /* 921 /*
914 * XXX(hch): need to figure out if it makes sense to validate 922 * CRC and validation errors will return a EFSCORRUPTED here. If
915 * the CRC here. 923 * this occurs, re-read without CRC validation so that we can
924 * repair the damage via xfs_qm_reset_dqcounts(). This process
925 * will leave a trace in the log indicating corruption has
926 * been detected.
916 */ 927 */
928 if (error == EFSCORRUPTED) {
929 error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
930 XFS_FSB_TO_DADDR(mp, bno),
931 mp->m_quotainfo->qi_dqchunklen, 0, &bp,
932 NULL);
933 }
934
935 if (error)
936 break;
937
917 xfs_qm_reset_dqcounts(mp, bp, firstid, type); 938 xfs_qm_reset_dqcounts(mp, bp, firstid, type);
918 xfs_buf_delwri_queue(bp, buffer_list); 939 xfs_buf_delwri_queue(bp, buffer_list);
919 xfs_buf_relse(bp); 940 xfs_buf_relse(bp);
920 /* 941
921 * goto the next block. 942 /* goto the next block. */
922 */
923 bno++; 943 bno++;
924 firstid += mp->m_quotainfo->qi_dqperchunk; 944 firstid += mp->m_quotainfo->qi_dqperchunk;
925 } 945 }
diff --git a/fs/xfs/xfs_qm_syscalls.c b/fs/xfs/xfs_qm_syscalls.c
index c41190cad6e9..6cdf6ffc36a1 100644
--- a/fs/xfs/xfs_qm_syscalls.c
+++ b/fs/xfs/xfs_qm_syscalls.c
@@ -489,31 +489,36 @@ xfs_qm_scall_setqlim(
489 if ((newlim->d_fieldmask & XFS_DQ_MASK) == 0) 489 if ((newlim->d_fieldmask & XFS_DQ_MASK) == 0)
490 return 0; 490 return 0;
491 491
492 tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SETQLIM);
493 error = xfs_trans_reserve(tp, 0, XFS_QM_SETQLIM_LOG_RES(mp),
494 0, 0, XFS_DEFAULT_LOG_COUNT);
495 if (error) {
496 xfs_trans_cancel(tp, 0);
497 return (error);
498 }
499
500 /* 492 /*
501 * We don't want to race with a quotaoff so take the quotaoff lock. 493 * We don't want to race with a quotaoff so take the quotaoff lock.
502 * (We don't hold an inode lock, so there's nothing else to stop 494 * We don't hold an inode lock, so there's nothing else to stop
503 * a quotaoff from happening). (XXXThis doesn't currently happen 495 * a quotaoff from happening.
504 * because we take the vfslock before calling xfs_qm_sysent).
505 */ 496 */
506 mutex_lock(&q->qi_quotaofflock); 497 mutex_lock(&q->qi_quotaofflock);
507 498
508 /* 499 /*
509 * Get the dquot (locked), and join it to the transaction. 500 * Get the dquot (locked) before we start, as we need to do a
510 * Allocate the dquot if this doesn't exist. 501 * transaction to allocate it if it doesn't exist. Once we have the
502 * dquot, unlock it so we can start the next transaction safely. We hold
503 * a reference to the dquot, so it's safe to do this unlock/lock without
504 * it being reclaimed in the mean time.
511 */ 505 */
512 if ((error = xfs_qm_dqget(mp, NULL, id, type, XFS_QMOPT_DQALLOC, &dqp))) { 506 error = xfs_qm_dqget(mp, NULL, id, type, XFS_QMOPT_DQALLOC, &dqp);
513 xfs_trans_cancel(tp, XFS_TRANS_ABORT); 507 if (error) {
514 ASSERT(error != ENOENT); 508 ASSERT(error != ENOENT);
515 goto out_unlock; 509 goto out_unlock;
516 } 510 }
511 xfs_dqunlock(dqp);
512
513 tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SETQLIM);
514 error = xfs_trans_reserve(tp, 0, XFS_QM_SETQLIM_LOG_RES(mp),
515 0, 0, XFS_DEFAULT_LOG_COUNT);
516 if (error) {
517 xfs_trans_cancel(tp, 0);
518 goto out_rele;
519 }
520
521 xfs_dqlock(dqp);
517 xfs_trans_dqjoin(tp, dqp); 522 xfs_trans_dqjoin(tp, dqp);
518 ddq = &dqp->q_core; 523 ddq = &dqp->q_core;
519 524
@@ -621,9 +626,10 @@ xfs_qm_scall_setqlim(
621 xfs_trans_log_dquot(tp, dqp); 626 xfs_trans_log_dquot(tp, dqp);
622 627
623 error = xfs_trans_commit(tp, 0); 628 error = xfs_trans_commit(tp, 0);
624 xfs_qm_dqrele(dqp);
625 629
626 out_unlock: 630out_rele:
631 xfs_qm_dqrele(dqp);
632out_unlock:
627 mutex_unlock(&q->qi_quotaofflock); 633 mutex_unlock(&q->qi_quotaofflock);
628 return error; 634 return error;
629} 635}
diff --git a/fs/xfs/xfs_quota.h b/fs/xfs/xfs_quota.h
index c61e31c7d997..c38068f26c55 100644
--- a/fs/xfs/xfs_quota.h
+++ b/fs/xfs/xfs_quota.h
@@ -87,6 +87,8 @@ typedef struct xfs_dqblk {
87 uuid_t dd_uuid; /* location information */ 87 uuid_t dd_uuid; /* location information */
88} xfs_dqblk_t; 88} xfs_dqblk_t;
89 89
90#define XFS_DQUOT_CRC_OFF offsetof(struct xfs_dqblk, dd_crc)
91
90/* 92/*
91 * flags for q_flags field in the dquot. 93 * flags for q_flags field in the dquot.
92 */ 94 */
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index ea341cea68cb..3033ba5e9762 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -1373,6 +1373,17 @@ xfs_finish_flags(
1373 } 1373 }
1374 1374
1375 /* 1375 /*
1376 * V5 filesystems always use attr2 format for attributes.
1377 */
1378 if (xfs_sb_version_hascrc(&mp->m_sb) &&
1379 (mp->m_flags & XFS_MOUNT_NOATTR2)) {
1380 xfs_warn(mp,
1381"Cannot mount a V5 filesystem as %s. %s is always enabled for V5 filesystems.",
1382 MNTOPT_NOATTR2, MNTOPT_ATTR2);
1383 return XFS_ERROR(EINVAL);
1384 }
1385
1386 /*
1376 * mkfs'ed attr2 will turn on attr2 mount unless explicitly 1387 * mkfs'ed attr2 will turn on attr2 mount unless explicitly
1377 * told by noattr2 to turn it off 1388 * told by noattr2 to turn it off
1378 */ 1389 */
diff --git a/fs/xfs/xfs_symlink.c b/fs/xfs/xfs_symlink.c
index 5f234389327c..195a403e1522 100644
--- a/fs/xfs/xfs_symlink.c
+++ b/fs/xfs/xfs_symlink.c
@@ -56,16 +56,9 @@ xfs_symlink_blocks(
56 struct xfs_mount *mp, 56 struct xfs_mount *mp,
57 int pathlen) 57 int pathlen)
58{ 58{
59 int fsblocks = 0; 59 int buflen = XFS_SYMLINK_BUF_SPACE(mp, mp->m_sb.sb_blocksize);
60 int len = pathlen;
61 60
62 do { 61 return (pathlen + buflen - 1) / buflen;
63 fsblocks++;
64 len -= XFS_SYMLINK_BUF_SPACE(mp, mp->m_sb.sb_blocksize);
65 } while (len > 0);
66
67 ASSERT(fsblocks <= XFS_SYMLINK_MAPS);
68 return fsblocks;
69} 62}
70 63
71static int 64static int
@@ -405,7 +398,7 @@ xfs_symlink(
405 if (pathlen <= XFS_LITINO(mp, dp->i_d.di_version)) 398 if (pathlen <= XFS_LITINO(mp, dp->i_d.di_version))
406 fs_blocks = 0; 399 fs_blocks = 0;
407 else 400 else
408 fs_blocks = XFS_B_TO_FSB(mp, pathlen); 401 fs_blocks = xfs_symlink_blocks(mp, pathlen);
409 resblks = XFS_SYMLINK_SPACE_RES(mp, link_name->len, fs_blocks); 402 resblks = XFS_SYMLINK_SPACE_RES(mp, link_name->len, fs_blocks);
410 error = xfs_trans_reserve(tp, resblks, XFS_SYMLINK_LOG_RES(mp), 0, 403 error = xfs_trans_reserve(tp, resblks, XFS_SYMLINK_LOG_RES(mp), 0,
411 XFS_TRANS_PERM_LOG_RES, XFS_SYMLINK_LOG_COUNT); 404 XFS_TRANS_PERM_LOG_RES, XFS_SYMLINK_LOG_COUNT);
@@ -512,7 +505,7 @@ xfs_symlink(
512 cur_chunk = target_path; 505 cur_chunk = target_path;
513 offset = 0; 506 offset = 0;
514 for (n = 0; n < nmaps; n++) { 507 for (n = 0; n < nmaps; n++) {
515 char *buf; 508 char *buf;
516 509
517 d = XFS_FSB_TO_DADDR(mp, mval[n].br_startblock); 510 d = XFS_FSB_TO_DADDR(mp, mval[n].br_startblock);
518 byte_cnt = XFS_FSB_TO_B(mp, mval[n].br_blockcount); 511 byte_cnt = XFS_FSB_TO_B(mp, mval[n].br_blockcount);
@@ -525,9 +518,7 @@ xfs_symlink(
525 bp->b_ops = &xfs_symlink_buf_ops; 518 bp->b_ops = &xfs_symlink_buf_ops;
526 519
527 byte_cnt = XFS_SYMLINK_BUF_SPACE(mp, byte_cnt); 520 byte_cnt = XFS_SYMLINK_BUF_SPACE(mp, byte_cnt);
528 if (pathlen < byte_cnt) { 521 byte_cnt = min(byte_cnt, pathlen);
529 byte_cnt = pathlen;
530 }
531 522
532 buf = bp->b_addr; 523 buf = bp->b_addr;
533 buf += xfs_symlink_hdr_set(mp, ip->i_ino, offset, 524 buf += xfs_symlink_hdr_set(mp, ip->i_ino, offset,
@@ -542,6 +533,7 @@ xfs_symlink(
542 xfs_trans_log_buf(tp, bp, 0, (buf + byte_cnt - 1) - 533 xfs_trans_log_buf(tp, bp, 0, (buf + byte_cnt - 1) -
543 (char *)bp->b_addr); 534 (char *)bp->b_addr);
544 } 535 }
536 ASSERT(pathlen == 0);
545 } 537 }
546 538
547 /* 539 /*
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index 636c59f2003a..c13c919ab99e 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -382,6 +382,7 @@ const char *acpi_power_state_string(int state);
382int acpi_device_get_power(struct acpi_device *device, int *state); 382int acpi_device_get_power(struct acpi_device *device, int *state);
383int acpi_device_set_power(struct acpi_device *device, int state); 383int acpi_device_set_power(struct acpi_device *device, int state);
384int acpi_bus_init_power(struct acpi_device *device); 384int acpi_bus_init_power(struct acpi_device *device);
385int acpi_device_fix_up_power(struct acpi_device *device);
385int acpi_bus_update_power(acpi_handle handle, int *state_p); 386int acpi_bus_update_power(acpi_handle handle, int *state_p);
386bool acpi_bus_power_manageable(acpi_handle handle); 387bool acpi_bus_power_manageable(acpi_handle handle);
387 388
diff --git a/include/acpi/acpi_drivers.h b/include/acpi/acpi_drivers.h
index e6168a24b9f0..b420939f5eb5 100644
--- a/include/acpi/acpi_drivers.h
+++ b/include/acpi/acpi_drivers.h
@@ -123,7 +123,9 @@ extern int register_dock_notifier(struct notifier_block *nb);
123extern void unregister_dock_notifier(struct notifier_block *nb); 123extern void unregister_dock_notifier(struct notifier_block *nb);
124extern int register_hotplug_dock_device(acpi_handle handle, 124extern int register_hotplug_dock_device(acpi_handle handle,
125 const struct acpi_dock_ops *ops, 125 const struct acpi_dock_ops *ops,
126 void *context); 126 void *context,
127 void (*init)(void *),
128 void (*release)(void *));
127extern void unregister_hotplug_dock_device(acpi_handle handle); 129extern void unregister_hotplug_dock_device(acpi_handle handle);
128#else 130#else
129static inline int is_dock_device(acpi_handle handle) 131static inline int is_dock_device(acpi_handle handle)
@@ -139,7 +141,9 @@ static inline void unregister_dock_notifier(struct notifier_block *nb)
139} 141}
140static inline int register_hotplug_dock_device(acpi_handle handle, 142static inline int register_hotplug_dock_device(acpi_handle handle,
141 const struct acpi_dock_ops *ops, 143 const struct acpi_dock_ops *ops,
142 void *context) 144 void *context,
145 void (*init)(void *),
146 void (*release)(void *))
143{ 147{
144 return -ENODEV; 148 return -ENODEV;
145} 149}
diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h
index ac9da00e9f2c..d5afe96adba6 100644
--- a/include/asm-generic/io.h
+++ b/include/asm-generic/io.h
@@ -343,8 +343,12 @@ extern void ioport_unmap(void __iomem *p);
343#endif /* CONFIG_GENERIC_IOMAP */ 343#endif /* CONFIG_GENERIC_IOMAP */
344#endif /* CONFIG_HAS_IOPORT */ 344#endif /* CONFIG_HAS_IOPORT */
345 345
346#ifndef xlate_dev_kmem_ptr
346#define xlate_dev_kmem_ptr(p) p 347#define xlate_dev_kmem_ptr(p) p
348#endif
349#ifndef xlate_dev_mem_ptr
347#define xlate_dev_mem_ptr(p) __va(p) 350#define xlate_dev_mem_ptr(p) __va(p)
351#endif
348 352
349#ifdef CONFIG_VIRT_TO_BUS 353#ifdef CONFIG_VIRT_TO_BUS
350#ifndef virt_to_bus 354#ifndef virt_to_bus
diff --git a/include/asm-generic/kvm_para.h b/include/asm-generic/kvm_para.h
index 9d96605f160a..fa25becbdcaf 100644
--- a/include/asm-generic/kvm_para.h
+++ b/include/asm-generic/kvm_para.h
@@ -18,4 +18,9 @@ static inline unsigned int kvm_arch_para_features(void)
18 return 0; 18 return 0;
19} 19}
20 20
21static inline bool kvm_para_available(void)
22{
23 return false;
24}
25
21#endif 26#endif
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index b1b1fa6ffffe..13821c339a41 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -97,11 +97,9 @@ struct mmu_gather {
97 unsigned long start; 97 unsigned long start;
98 unsigned long end; 98 unsigned long end;
99 unsigned int need_flush : 1, /* Did free PTEs */ 99 unsigned int need_flush : 1, /* Did free PTEs */
100 fast_mode : 1; /* No batching */
101
102 /* we are in the middle of an operation to clear 100 /* we are in the middle of an operation to clear
103 * a full mm and can make some optimizations */ 101 * a full mm and can make some optimizations */
104 unsigned int fullmm : 1, 102 fullmm : 1,
105 /* we have performed an operation which 103 /* we have performed an operation which
106 * requires a complete flush of the tlb */ 104 * requires a complete flush of the tlb */
107 need_flush_all : 1; 105 need_flush_all : 1;
@@ -114,19 +112,6 @@ struct mmu_gather {
114 112
115#define HAVE_GENERIC_MMU_GATHER 113#define HAVE_GENERIC_MMU_GATHER
116 114
117static inline int tlb_fast_mode(struct mmu_gather *tlb)
118{
119#ifdef CONFIG_SMP
120 return tlb->fast_mode;
121#else
122 /*
123 * For UP we don't need to worry about TLB flush
124 * and page free order so much..
125 */
126 return 1;
127#endif
128}
129
130void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm); 115void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm);
131void tlb_flush_mmu(struct mmu_gather *tlb); 116void tlb_flush_mmu(struct mmu_gather *tlb);
132void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, 117void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start,
diff --git a/include/linux/aer.h b/include/linux/aer.h
index ec10e1b24c1c..737f90ab4b62 100644
--- a/include/linux/aer.h
+++ b/include/linux/aer.h
@@ -49,10 +49,11 @@ static inline int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
49} 49}
50#endif 50#endif
51 51
52extern void cper_print_aer(const char *prefix, struct pci_dev *dev, 52extern void cper_print_aer(struct pci_dev *dev,
53 int cper_severity, struct aer_capability_regs *aer); 53 int cper_severity, struct aer_capability_regs *aer);
54extern int cper_severity_to_aer(int cper_severity); 54extern int cper_severity_to_aer(int cper_severity);
55extern void aer_recover_queue(int domain, unsigned int bus, unsigned int devfn, 55extern void aer_recover_queue(int domain, unsigned int bus, unsigned int devfn,
56 int severity); 56 int severity,
57 struct aer_capability_regs *aer_regs);
57#endif //_AER_H_ 58#endif //_AER_H_
58 59
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 5047355b9a0f..8bda1294c035 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -707,7 +707,7 @@ struct cgroup *cgroup_rightmost_descendant(struct cgroup *pos);
707 * 707 *
708 * If a subsystem synchronizes against the parent in its ->css_online() and 708 * If a subsystem synchronizes against the parent in its ->css_online() and
709 * before starting iterating, and synchronizes against @pos on each 709 * before starting iterating, and synchronizes against @pos on each
710 * iteration, any descendant cgroup which finished ->css_offline() is 710 * iteration, any descendant cgroup which finished ->css_online() is
711 * guaranteed to be visible in the future iterations. 711 * guaranteed to be visible in the future iterations.
712 * 712 *
713 * In other words, the following guarantees that a descendant can't escape 713 * In other words, the following guarantees that a descendant can't escape
diff --git a/include/linux/context_tracking.h b/include/linux/context_tracking.h
index 365f4a61bf04..fc09d7b0dacf 100644
--- a/include/linux/context_tracking.h
+++ b/include/linux/context_tracking.h
@@ -3,6 +3,7 @@
3 3
4#include <linux/sched.h> 4#include <linux/sched.h>
5#include <linux/percpu.h> 5#include <linux/percpu.h>
6#include <linux/vtime.h>
6#include <asm/ptrace.h> 7#include <asm/ptrace.h>
7 8
8struct context_tracking { 9struct context_tracking {
@@ -19,6 +20,26 @@ struct context_tracking {
19 } state; 20 } state;
20}; 21};
21 22
23static inline void __guest_enter(void)
24{
25 /*
26 * This is running in ioctl context so we can avoid
27 * the call to vtime_account() with its unnecessary idle check.
28 */
29 vtime_account_system(current);
30 current->flags |= PF_VCPU;
31}
32
33static inline void __guest_exit(void)
34{
35 /*
36 * This is running in ioctl context so we can avoid
37 * the call to vtime_account() with its unnecessary idle check.
38 */
39 vtime_account_system(current);
40 current->flags &= ~PF_VCPU;
41}
42
22#ifdef CONFIG_CONTEXT_TRACKING 43#ifdef CONFIG_CONTEXT_TRACKING
23DECLARE_PER_CPU(struct context_tracking, context_tracking); 44DECLARE_PER_CPU(struct context_tracking, context_tracking);
24 45
@@ -35,6 +56,9 @@ static inline bool context_tracking_active(void)
35extern void user_enter(void); 56extern void user_enter(void);
36extern void user_exit(void); 57extern void user_exit(void);
37 58
59extern void guest_enter(void);
60extern void guest_exit(void);
61
38static inline enum ctx_state exception_enter(void) 62static inline enum ctx_state exception_enter(void)
39{ 63{
40 enum ctx_state prev_ctx; 64 enum ctx_state prev_ctx;
@@ -57,6 +81,17 @@ extern void context_tracking_task_switch(struct task_struct *prev,
57static inline bool context_tracking_in_user(void) { return false; } 81static inline bool context_tracking_in_user(void) { return false; }
58static inline void user_enter(void) { } 82static inline void user_enter(void) { }
59static inline void user_exit(void) { } 83static inline void user_exit(void) { }
84
85static inline void guest_enter(void)
86{
87 __guest_enter();
88}
89
90static inline void guest_exit(void)
91{
92 __guest_exit();
93}
94
60static inline enum ctx_state exception_enter(void) { return 0; } 95static inline enum ctx_state exception_enter(void) { return 0; }
61static inline void exception_exit(enum ctx_state prev_ctx) { } 96static inline void exception_exit(enum ctx_state prev_ctx) { }
62static inline void context_tracking_task_switch(struct task_struct *prev, 97static inline void context_tracking_task_switch(struct task_struct *prev,
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index c6f6e0839b61..9f3c7e81270a 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -175,6 +175,8 @@ extern struct bus_type cpu_subsys;
175 175
176extern void get_online_cpus(void); 176extern void get_online_cpus(void);
177extern void put_online_cpus(void); 177extern void put_online_cpus(void);
178extern void cpu_hotplug_disable(void);
179extern void cpu_hotplug_enable(void);
178#define hotcpu_notifier(fn, pri) cpu_notifier(fn, pri) 180#define hotcpu_notifier(fn, pri) cpu_notifier(fn, pri)
179#define register_hotcpu_notifier(nb) register_cpu_notifier(nb) 181#define register_hotcpu_notifier(nb) register_cpu_notifier(nb)
180#define unregister_hotcpu_notifier(nb) unregister_cpu_notifier(nb) 182#define unregister_hotcpu_notifier(nb) unregister_cpu_notifier(nb)
@@ -198,6 +200,8 @@ static inline void cpu_hotplug_driver_unlock(void)
198 200
199#define get_online_cpus() do { } while (0) 201#define get_online_cpus() do { } while (0)
200#define put_online_cpus() do { } while (0) 202#define put_online_cpus() do { } while (0)
203#define cpu_hotplug_disable() do { } while (0)
204#define cpu_hotplug_enable() do { } while (0)
201#define hotcpu_notifier(fn, pri) do { (void)(fn); } while (0) 205#define hotcpu_notifier(fn, pri) do { (void)(fn); } while (0)
202/* These aren't inline functions due to a GCC bug. */ 206/* These aren't inline functions due to a GCC bug. */
203#define register_hotcpu_notifier(nb) ({ (void)(nb); 0; }) 207#define register_hotcpu_notifier(nb) ({ (void)(nb); 0; })
diff --git a/include/linux/filter.h b/include/linux/filter.h
index c050dcc322a4..f65f5a69db8f 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -46,6 +46,7 @@ extern int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
46extern int sk_detach_filter(struct sock *sk); 46extern int sk_detach_filter(struct sock *sk);
47extern int sk_chk_filter(struct sock_filter *filter, unsigned int flen); 47extern int sk_chk_filter(struct sock_filter *filter, unsigned int flen);
48extern int sk_get_filter(struct sock *sk, struct sock_filter __user *filter, unsigned len); 48extern int sk_get_filter(struct sock *sk, struct sock_filter __user *filter, unsigned len);
49extern void sk_decode_filter(struct sock_filter *filt, struct sock_filter *to);
49 50
50#ifdef CONFIG_BPF_JIT 51#ifdef CONFIG_BPF_JIT
51#include <stdarg.h> 52#include <stdarg.h>
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 43db02e9c9fa..65c2be22b601 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -2414,8 +2414,6 @@ extern ssize_t generic_file_splice_write(struct pipe_inode_info *,
2414 struct file *, loff_t *, size_t, unsigned int); 2414 struct file *, loff_t *, size_t, unsigned int);
2415extern ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe, 2415extern ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe,
2416 struct file *out, loff_t *, size_t len, unsigned int flags); 2416 struct file *out, loff_t *, size_t len, unsigned int flags);
2417extern long do_splice_direct(struct file *in, loff_t *ppos, struct file *out,
2418 size_t len, unsigned int flags);
2419 2417
2420extern void 2418extern void
2421file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping); 2419file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping);
diff --git a/include/linux/if_team.h b/include/linux/if_team.h
index 4474557904f6..16fae6436d0e 100644
--- a/include/linux/if_team.h
+++ b/include/linux/if_team.h
@@ -249,12 +249,12 @@ team_get_first_port_txable_rcu(struct team *team, struct team_port *port)
249 return port; 249 return port;
250 cur = port; 250 cur = port;
251 list_for_each_entry_continue_rcu(cur, &team->port_list, list) 251 list_for_each_entry_continue_rcu(cur, &team->port_list, list)
252 if (team_port_txable(port)) 252 if (team_port_txable(cur))
253 return cur; 253 return cur;
254 list_for_each_entry_rcu(cur, &team->port_list, list) { 254 list_for_each_entry_rcu(cur, &team->port_list, list) {
255 if (cur == port) 255 if (cur == port)
256 break; 256 break;
257 if (team_port_txable(port)) 257 if (team_port_txable(cur))
258 return cur; 258 return cur;
259 } 259 }
260 return NULL; 260 return NULL;
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index 52bd03b38962..637fa71de0c7 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -44,7 +44,7 @@ struct vlan_hdr {
44 * struct vlan_ethhdr - vlan ethernet header (ethhdr + vlan_hdr) 44 * struct vlan_ethhdr - vlan ethernet header (ethhdr + vlan_hdr)
45 * @h_dest: destination ethernet address 45 * @h_dest: destination ethernet address
46 * @h_source: source ethernet address 46 * @h_source: source ethernet address
47 * @h_vlan_proto: ethernet protocol (always 0x8100) 47 * @h_vlan_proto: ethernet protocol
48 * @h_vlan_TCI: priority and VLAN ID 48 * @h_vlan_TCI: priority and VLAN ID
49 * @h_vlan_encapsulated_proto: packet type ID or len 49 * @h_vlan_encapsulated_proto: packet type ID or len
50 */ 50 */
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index f0eea07d2c2b..8db53cfaccdb 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -23,6 +23,7 @@
23#include <linux/ratelimit.h> 23#include <linux/ratelimit.h>
24#include <linux/err.h> 24#include <linux/err.h>
25#include <linux/irqflags.h> 25#include <linux/irqflags.h>
26#include <linux/context_tracking.h>
26#include <asm/signal.h> 27#include <asm/signal.h>
27 28
28#include <linux/kvm.h> 29#include <linux/kvm.h>
@@ -760,42 +761,6 @@ static inline int kvm_iommu_unmap_guest(struct kvm *kvm)
760} 761}
761#endif 762#endif
762 763
763static inline void __guest_enter(void)
764{
765 /*
766 * This is running in ioctl context so we can avoid
767 * the call to vtime_account() with its unnecessary idle check.
768 */
769 vtime_account_system(current);
770 current->flags |= PF_VCPU;
771}
772
773static inline void __guest_exit(void)
774{
775 /*
776 * This is running in ioctl context so we can avoid
777 * the call to vtime_account() with its unnecessary idle check.
778 */
779 vtime_account_system(current);
780 current->flags &= ~PF_VCPU;
781}
782
783#ifdef CONFIG_CONTEXT_TRACKING
784extern void guest_enter(void);
785extern void guest_exit(void);
786
787#else /* !CONFIG_CONTEXT_TRACKING */
788static inline void guest_enter(void)
789{
790 __guest_enter();
791}
792
793static inline void guest_exit(void)
794{
795 __guest_exit();
796}
797#endif /* !CONFIG_CONTEXT_TRACKING */
798
799static inline void kvm_guest_enter(void) 764static inline void kvm_guest_enter(void)
800{ 765{
801 unsigned long flags; 766 unsigned long flags;
diff --git a/include/linux/list.h b/include/linux/list.h
index 6a1f8df9144b..b83e5657365a 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -362,6 +362,17 @@ static inline void list_splice_tail_init(struct list_head *list,
362 list_entry((ptr)->next, type, member) 362 list_entry((ptr)->next, type, member)
363 363
364/** 364/**
365 * list_first_entry_or_null - get the first element from a list
366 * @ptr: the list head to take the element from.
367 * @type: the type of the struct this is embedded in.
368 * @member: the name of the list_struct within the struct.
369 *
370 * Note that if the list is empty, it returns NULL.
371 */
372#define list_first_entry_or_null(ptr, type, member) \
373 (!list_empty(ptr) ? list_first_entry(ptr, type, member) : NULL)
374
375/**
365 * list_for_each - iterate over a list 376 * list_for_each - iterate over a list
366 * @pos: the &struct list_head to use as a loop cursor. 377 * @pos: the &struct list_head to use as a loop cursor.
367 * @head: the head for your list. 378 * @head: the head for your list.
diff --git a/include/linux/math64.h b/include/linux/math64.h
index b8ba85544721..2913b86eb12a 100644
--- a/include/linux/math64.h
+++ b/include/linux/math64.h
@@ -6,7 +6,8 @@
6 6
7#if BITS_PER_LONG == 64 7#if BITS_PER_LONG == 64
8 8
9#define div64_long(x,y) div64_s64((x),(y)) 9#define div64_long(x, y) div64_s64((x), (y))
10#define div64_ul(x, y) div64_u64((x), (y))
10 11
11/** 12/**
12 * div_u64_rem - unsigned 64bit divide with 32bit divisor with remainder 13 * div_u64_rem - unsigned 64bit divide with 32bit divisor with remainder
@@ -47,7 +48,8 @@ static inline s64 div64_s64(s64 dividend, s64 divisor)
47 48
48#elif BITS_PER_LONG == 32 49#elif BITS_PER_LONG == 32
49 50
50#define div64_long(x,y) div_s64((x),(y)) 51#define div64_long(x, y) div_s64((x), (y))
52#define div64_ul(x, y) div_u64((x), (y))
51 53
52#ifndef div_u64_rem 54#ifndef div_u64_rem
53static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder) 55static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 60584b185a0c..96e4c21e15e0 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -1695,6 +1695,7 @@ extern int init_dummy_netdev(struct net_device *dev);
1695extern struct net_device *dev_get_by_index(struct net *net, int ifindex); 1695extern struct net_device *dev_get_by_index(struct net *net, int ifindex);
1696extern struct net_device *__dev_get_by_index(struct net *net, int ifindex); 1696extern struct net_device *__dev_get_by_index(struct net *net, int ifindex);
1697extern struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex); 1697extern struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
1698extern int netdev_get_name(struct net *net, char *name, int ifindex);
1698extern int dev_restart(struct net_device *dev); 1699extern int dev_restart(struct net_device *dev);
1699#ifdef CONFIG_NETPOLL_TRAP 1700#ifdef CONFIG_NETPOLL_TRAP
1700extern int netpoll_trap(void); 1701extern int netpoll_trap(void);
diff --git a/include/linux/netfilter_ipv6.h b/include/linux/netfilter_ipv6.h
index 98ffb54988b6..2d4df6ce043e 100644
--- a/include/linux/netfilter_ipv6.h
+++ b/include/linux/netfilter_ipv6.h
@@ -17,6 +17,22 @@ extern __sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook,
17 17
18extern int ipv6_netfilter_init(void); 18extern int ipv6_netfilter_init(void);
19extern void ipv6_netfilter_fini(void); 19extern void ipv6_netfilter_fini(void);
20
21/*
22 * Hook functions for ipv6 to allow xt_* modules to be built-in even
23 * if IPv6 is a module.
24 */
25struct nf_ipv6_ops {
26 int (*chk_addr)(struct net *net, const struct in6_addr *addr,
27 const struct net_device *dev, int strict);
28};
29
30extern const struct nf_ipv6_ops __rcu *nf_ipv6_ops;
31static inline const struct nf_ipv6_ops *nf_get_ipv6_ops(void)
32{
33 return rcu_dereference(nf_ipv6_ops);
34}
35
20#else /* CONFIG_NETFILTER */ 36#else /* CONFIG_NETFILTER */
21static inline int ipv6_netfilter_init(void) { return 0; } 37static inline int ipv6_netfilter_init(void) { return 0; }
22static inline void ipv6_netfilter_fini(void) { return; } 38static inline void ipv6_netfilter_fini(void) { return; }
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 5ec99e5a50d2..8650c732f77a 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -389,8 +389,7 @@ struct perf_event {
389 /* mmap bits */ 389 /* mmap bits */
390 struct mutex mmap_mutex; 390 struct mutex mmap_mutex;
391 atomic_t mmap_count; 391 atomic_t mmap_count;
392 int mmap_locked; 392
393 struct user_struct *mmap_user;
394 struct ring_buffer *rb; 393 struct ring_buffer *rb;
395 struct list_head rb_entry; 394 struct list_head rb_entry;
396 395
diff --git a/include/linux/rculist.h b/include/linux/rculist.h
index 8089e35d47ac..f4b1001a4676 100644
--- a/include/linux/rculist.h
+++ b/include/linux/rculist.h
@@ -461,6 +461,26 @@ static inline void hlist_add_after_rcu(struct hlist_node *prev,
461 &(pos)->member)), typeof(*(pos)), member)) 461 &(pos)->member)), typeof(*(pos)), member))
462 462
463/** 463/**
464 * hlist_for_each_entry_rcu_notrace - iterate over rcu list of given type (for tracing)
465 * @pos: the type * to use as a loop cursor.
466 * @head: the head for your list.
467 * @member: the name of the hlist_node within the struct.
468 *
469 * This list-traversal primitive may safely run concurrently with
470 * the _rcu list-mutation primitives such as hlist_add_head_rcu()
471 * as long as the traversal is guarded by rcu_read_lock().
472 *
473 * This is the same as hlist_for_each_entry_rcu() except that it does
474 * not do any RCU debugging or tracing.
475 */
476#define hlist_for_each_entry_rcu_notrace(pos, head, member) \
477 for (pos = hlist_entry_safe (rcu_dereference_raw_notrace(hlist_first_rcu(head)),\
478 typeof(*(pos)), member); \
479 pos; \
480 pos = hlist_entry_safe(rcu_dereference_raw_notrace(hlist_next_rcu(\
481 &(pos)->member)), typeof(*(pos)), member))
482
483/**
464 * hlist_for_each_entry_rcu_bh - iterate over rcu list of given type 484 * hlist_for_each_entry_rcu_bh - iterate over rcu list of given type
465 * @pos: the type * to use as a loop cursor. 485 * @pos: the type * to use as a loop cursor.
466 * @head: the head for your list. 486 * @head: the head for your list.
diff --git a/include/linux/rculist_nulls.h b/include/linux/rculist_nulls.h
index 2ae13714828b..1c33dd7da4a7 100644
--- a/include/linux/rculist_nulls.h
+++ b/include/linux/rculist_nulls.h
@@ -105,9 +105,14 @@ static inline void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n,
105 * @head: the head for your list. 105 * @head: the head for your list.
106 * @member: the name of the hlist_nulls_node within the struct. 106 * @member: the name of the hlist_nulls_node within the struct.
107 * 107 *
108 * The barrier() is needed to make sure compiler doesn't cache first element [1],
109 * as this loop can be restarted [2]
110 * [1] Documentation/atomic_ops.txt around line 114
111 * [2] Documentation/RCU/rculist_nulls.txt around line 146
108 */ 112 */
109#define hlist_nulls_for_each_entry_rcu(tpos, pos, head, member) \ 113#define hlist_nulls_for_each_entry_rcu(tpos, pos, head, member) \
110 for (pos = rcu_dereference_raw(hlist_nulls_first_rcu(head)); \ 114 for (({barrier();}), \
115 pos = rcu_dereference_raw(hlist_nulls_first_rcu(head)); \
111 (!is_a_nulls(pos)) && \ 116 (!is_a_nulls(pos)) && \
112 ({ tpos = hlist_nulls_entry(pos, typeof(*tpos), member); 1; }); \ 117 ({ tpos = hlist_nulls_entry(pos, typeof(*tpos), member); 1; }); \
113 pos = rcu_dereference_raw(hlist_nulls_next_rcu(pos))) 118 pos = rcu_dereference_raw(hlist_nulls_next_rcu(pos)))
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 4ccd68e49b00..ddcc7826d907 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -640,6 +640,15 @@ static inline void rcu_preempt_sleep_check(void)
640 640
641#define rcu_dereference_raw(p) rcu_dereference_check(p, 1) /*@@@ needed? @@@*/ 641#define rcu_dereference_raw(p) rcu_dereference_check(p, 1) /*@@@ needed? @@@*/
642 642
643/*
644 * The tracing infrastructure traces RCU (we want that), but unfortunately
645 * some of the RCU checks causes tracing to lock up the system.
646 *
647 * The tracing version of rcu_dereference_raw() must not call
648 * rcu_read_lock_held().
649 */
650#define rcu_dereference_raw_notrace(p) __rcu_dereference_check((p), 1, __rcu)
651
643/** 652/**
644 * rcu_access_index() - fetch RCU index with no dereferencing 653 * rcu_access_index() - fetch RCU index with no dereferencing
645 * @p: The index to read 654 * @p: The index to read
diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
index 5951e3f38878..26806775b11b 100644
--- a/include/linux/scatterlist.h
+++ b/include/linux/scatterlist.h
@@ -111,6 +111,9 @@ static inline struct page *sg_page(struct scatterlist *sg)
111static inline void sg_set_buf(struct scatterlist *sg, const void *buf, 111static inline void sg_set_buf(struct scatterlist *sg, const void *buf,
112 unsigned int buflen) 112 unsigned int buflen)
113{ 113{
114#ifdef CONFIG_DEBUG_SG
115 BUG_ON(!virt_addr_valid(buf));
116#endif
114 sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf)); 117 sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf));
115} 118}
116 119
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 2e0ced1af3b1..dec1748cd002 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -627,6 +627,7 @@ static inline struct rtable *skb_rtable(const struct sk_buff *skb)
627} 627}
628 628
629extern void kfree_skb(struct sk_buff *skb); 629extern void kfree_skb(struct sk_buff *skb);
630extern void kfree_skb_list(struct sk_buff *segs);
630extern void skb_tx_error(struct sk_buff *skb); 631extern void skb_tx_error(struct sk_buff *skb);
631extern void consume_skb(struct sk_buff *skb); 632extern void consume_skb(struct sk_buff *skb);
632extern void __kfree_skb(struct sk_buff *skb); 633extern void __kfree_skb(struct sk_buff *skb);
@@ -2852,6 +2853,21 @@ static inline int skb_tnl_header_len(const struct sk_buff *inner_skb)
2852 SKB_GSO_CB(inner_skb)->mac_offset; 2853 SKB_GSO_CB(inner_skb)->mac_offset;
2853} 2854}
2854 2855
2856static inline int gso_pskb_expand_head(struct sk_buff *skb, int extra)
2857{
2858 int new_headroom, headroom;
2859 int ret;
2860
2861 headroom = skb_headroom(skb);
2862 ret = pskb_expand_head(skb, extra, 0, GFP_ATOMIC);
2863 if (ret)
2864 return ret;
2865
2866 new_headroom = skb_headroom(skb);
2867 SKB_GSO_CB(skb)->mac_offset += (new_headroom - headroom);
2868 return 0;
2869}
2870
2855static inline bool skb_is_gso(const struct sk_buff *skb) 2871static inline bool skb_is_gso(const struct sk_buff *skb)
2856{ 2872{
2857 return skb_shinfo(skb)->gso_size; 2873 return skb_shinfo(skb)->gso_size;
diff --git a/include/linux/smp.h b/include/linux/smp.h
index e6564c1dc552..c8488763277f 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -11,6 +11,7 @@
11#include <linux/list.h> 11#include <linux/list.h>
12#include <linux/cpumask.h> 12#include <linux/cpumask.h>
13#include <linux/init.h> 13#include <linux/init.h>
14#include <linux/irqflags.h>
14 15
15extern void cpu_idle(void); 16extern void cpu_idle(void);
16 17
@@ -139,13 +140,17 @@ static inline int up_smp_call_function(smp_call_func_t func, void *info)
139} 140}
140#define smp_call_function(func, info, wait) \ 141#define smp_call_function(func, info, wait) \
141 (up_smp_call_function(func, info)) 142 (up_smp_call_function(func, info))
142#define on_each_cpu(func,info,wait) \ 143
143 ({ \ 144static inline int on_each_cpu(smp_call_func_t func, void *info, int wait)
144 local_irq_disable(); \ 145{
145 func(info); \ 146 unsigned long flags;
146 local_irq_enable(); \ 147
147 0; \ 148 local_irq_save(flags);
148 }) 149 func(info);
150 local_irq_restore(flags);
151 return 0;
152}
153
149/* 154/*
150 * Note we still need to test the mask even for UP 155 * Note we still need to test the mask even for UP
151 * because we actually can get an empty mask from 156 * because we actually can get an empty mask from
diff --git a/include/linux/socket.h b/include/linux/socket.h
index 33bf2dfab19d..b10ce4b341ea 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -320,6 +320,9 @@ extern int put_cmsg(struct msghdr*, int level, int type, int len, void *data);
320 320
321struct timespec; 321struct timespec;
322 322
323/* The __sys_...msg variants allow MSG_CMSG_COMPAT */
324extern long __sys_recvmsg(int fd, struct msghdr __user *msg, unsigned flags);
325extern long __sys_sendmsg(int fd, struct msghdr __user *msg, unsigned flags);
323extern int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen, 326extern int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen,
324 unsigned int flags, struct timespec *timeout); 327 unsigned int flags, struct timespec *timeout);
325extern int __sys_sendmmsg(int fd, struct mmsghdr __user *mmsg, 328extern int __sys_sendmmsg(int fd, struct mmsghdr __user *mmsg,
diff --git a/include/linux/splice.h b/include/linux/splice.h
index 09a545a7dfa3..74575cbf2d6f 100644
--- a/include/linux/splice.h
+++ b/include/linux/splice.h
@@ -35,6 +35,7 @@ struct splice_desc {
35 void *data; /* cookie */ 35 void *data; /* cookie */
36 } u; 36 } u;
37 loff_t pos; /* file position */ 37 loff_t pos; /* file position */
38 loff_t *opos; /* sendfile: output position */
38 size_t num_spliced; /* number of bytes already spliced */ 39 size_t num_spliced; /* number of bytes already spliced */
39 bool need_wakeup; /* need to wake up writer */ 40 bool need_wakeup; /* need to wake up writer */
40}; 41};
diff --git a/include/linux/swapops.h b/include/linux/swapops.h
index 47ead515c811..c5fd30d2a415 100644
--- a/include/linux/swapops.h
+++ b/include/linux/swapops.h
@@ -137,6 +137,7 @@ static inline void make_migration_entry_read(swp_entry_t *entry)
137 137
138extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, 138extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
139 unsigned long address); 139 unsigned long address);
140extern void migration_entry_wait_huge(struct mm_struct *mm, pte_t *pte);
140#else 141#else
141 142
142#define make_migration_entry(page, write) swp_entry(0, 0) 143#define make_migration_entry(page, write) swp_entry(0, 0)
@@ -148,6 +149,8 @@ static inline int is_migration_entry(swp_entry_t swp)
148static inline void make_migration_entry_read(swp_entry_t *entryp) { } 149static inline void make_migration_entry_read(swp_entry_t *entryp) { }
149static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, 150static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
150 unsigned long address) { } 151 unsigned long address) { }
152static inline void migration_entry_wait_huge(struct mm_struct *mm,
153 pte_t *pte) { }
151static inline int is_write_migration_entry(swp_entry_t entry) 154static inline int is_write_migration_entry(swp_entry_t entry)
152{ 155{
153 return 0; 156 return 0;
diff --git a/include/linux/syslog.h b/include/linux/syslog.h
index 38911391a139..98a3153c0f96 100644
--- a/include/linux/syslog.h
+++ b/include/linux/syslog.h
@@ -44,8 +44,8 @@
44/* Return size of the log buffer */ 44/* Return size of the log buffer */
45#define SYSLOG_ACTION_SIZE_BUFFER 10 45#define SYSLOG_ACTION_SIZE_BUFFER 10
46 46
47#define SYSLOG_FROM_CALL 0 47#define SYSLOG_FROM_READER 0
48#define SYSLOG_FROM_FILE 1 48#define SYSLOG_FROM_PROC 1
49 49
50int do_syslog(int type, char __user *buf, int count, bool from_file); 50int do_syslog(int type, char __user *buf, int count, bool from_file);
51 51
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index 2f322c38bd4d..f8e084d0fc77 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -145,8 +145,8 @@ static inline void tracepoint_synchronize_unregister(void)
145 TP_PROTO(data_proto), \ 145 TP_PROTO(data_proto), \
146 TP_ARGS(data_args), \ 146 TP_ARGS(data_args), \
147 TP_CONDITION(cond), \ 147 TP_CONDITION(cond), \
148 rcu_idle_exit(), \ 148 rcu_irq_enter(), \
149 rcu_idle_enter()); \ 149 rcu_irq_exit()); \
150 } 150 }
151#else 151#else
152#define __DECLARE_TRACE_RCU(name, proto, args, cond, data_proto, data_args) 152#define __DECLARE_TRACE_RCU(name, proto, args, cond, data_proto, data_args)
diff --git a/include/linux/vtime.h b/include/linux/vtime.h
index 71a5782d8c59..b1dd2db80076 100644
--- a/include/linux/vtime.h
+++ b/include/linux/vtime.h
@@ -34,7 +34,7 @@ static inline void vtime_user_exit(struct task_struct *tsk)
34} 34}
35extern void vtime_guest_enter(struct task_struct *tsk); 35extern void vtime_guest_enter(struct task_struct *tsk);
36extern void vtime_guest_exit(struct task_struct *tsk); 36extern void vtime_guest_exit(struct task_struct *tsk);
37extern void vtime_init_idle(struct task_struct *tsk); 37extern void vtime_init_idle(struct task_struct *tsk, int cpu);
38#else 38#else
39static inline void vtime_account_irq_exit(struct task_struct *tsk) 39static inline void vtime_account_irq_exit(struct task_struct *tsk)
40{ 40{
@@ -45,7 +45,7 @@ static inline void vtime_user_enter(struct task_struct *tsk) { }
45static inline void vtime_user_exit(struct task_struct *tsk) { } 45static inline void vtime_user_exit(struct task_struct *tsk) { }
46static inline void vtime_guest_enter(struct task_struct *tsk) { } 46static inline void vtime_guest_enter(struct task_struct *tsk) { }
47static inline void vtime_guest_exit(struct task_struct *tsk) { } 47static inline void vtime_guest_exit(struct task_struct *tsk) { }
48static inline void vtime_init_idle(struct task_struct *tsk) { } 48static inline void vtime_init_idle(struct task_struct *tsk, int cpu) { }
49#endif 49#endif
50 50
51#ifdef CONFIG_IRQ_TIME_ACCOUNTING 51#ifdef CONFIG_IRQ_TIME_ACCOUNTING
diff --git a/include/media/v4l2-mem2mem.h b/include/media/v4l2-mem2mem.h
index d3eef01da648..0f4555b2a31b 100644
--- a/include/media/v4l2-mem2mem.h
+++ b/include/media/v4l2-mem2mem.h
@@ -110,6 +110,8 @@ int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
110 struct v4l2_buffer *buf); 110 struct v4l2_buffer *buf);
111int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 111int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
112 struct v4l2_buffer *buf); 112 struct v4l2_buffer *buf);
113int v4l2_m2m_create_bufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
114 struct v4l2_create_buffers *create);
113 115
114int v4l2_m2m_expbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 116int v4l2_m2m_expbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
115 struct v4l2_exportbuffer *eb); 117 struct v4l2_exportbuffer *eb);
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index 84a6440f1f19..21f702704f24 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -65,7 +65,7 @@ extern int addrconf_set_dstaddr(struct net *net,
65 65
66extern int ipv6_chk_addr(struct net *net, 66extern int ipv6_chk_addr(struct net *net,
67 const struct in6_addr *addr, 67 const struct in6_addr *addr,
68 struct net_device *dev, 68 const struct net_device *dev,
69 int strict); 69 int strict);
70 70
71#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) 71#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index 35a57cd1704c..7cb6d360d147 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -1117,6 +1117,7 @@ void hci_sock_dev_event(struct hci_dev *hdev, int event);
1117int mgmt_control(struct sock *sk, struct msghdr *msg, size_t len); 1117int mgmt_control(struct sock *sk, struct msghdr *msg, size_t len);
1118int mgmt_index_added(struct hci_dev *hdev); 1118int mgmt_index_added(struct hci_dev *hdev);
1119int mgmt_index_removed(struct hci_dev *hdev); 1119int mgmt_index_removed(struct hci_dev *hdev);
1120int mgmt_set_powered_failed(struct hci_dev *hdev, int err);
1120int mgmt_powered(struct hci_dev *hdev, u8 powered); 1121int mgmt_powered(struct hci_dev *hdev, u8 powered);
1121int mgmt_discoverable(struct hci_dev *hdev, u8 discoverable); 1122int mgmt_discoverable(struct hci_dev *hdev, u8 discoverable);
1122int mgmt_connectable(struct hci_dev *hdev, u8 connectable); 1123int mgmt_connectable(struct hci_dev *hdev, u8 connectable);
diff --git a/include/net/bluetooth/mgmt.h b/include/net/bluetooth/mgmt.h
index 22980a7c3873..9944c3e68c5d 100644
--- a/include/net/bluetooth/mgmt.h
+++ b/include/net/bluetooth/mgmt.h
@@ -42,6 +42,7 @@
42#define MGMT_STATUS_NOT_POWERED 0x0f 42#define MGMT_STATUS_NOT_POWERED 0x0f
43#define MGMT_STATUS_CANCELLED 0x10 43#define MGMT_STATUS_CANCELLED 0x10
44#define MGMT_STATUS_INVALID_INDEX 0x11 44#define MGMT_STATUS_INVALID_INDEX 0x11
45#define MGMT_STATUS_RFKILLED 0x12
45 46
46struct mgmt_hdr { 47struct mgmt_hdr {
47 __le16 opcode; 48 __le16 opcode;
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index 4b6f0b28f41f..09b1360e10bf 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -95,10 +95,10 @@ struct ip_tunnel_net {
95int ip_tunnel_init(struct net_device *dev); 95int ip_tunnel_init(struct net_device *dev);
96void ip_tunnel_uninit(struct net_device *dev); 96void ip_tunnel_uninit(struct net_device *dev);
97void ip_tunnel_dellink(struct net_device *dev, struct list_head *head); 97void ip_tunnel_dellink(struct net_device *dev, struct list_head *head);
98int __net_init ip_tunnel_init_net(struct net *net, int ip_tnl_net_id, 98int ip_tunnel_init_net(struct net *net, int ip_tnl_net_id,
99 struct rtnl_link_ops *ops, char *devname); 99 struct rtnl_link_ops *ops, char *devname);
100 100
101void __net_exit ip_tunnel_delete_net(struct ip_tunnel_net *itn); 101void ip_tunnel_delete_net(struct ip_tunnel_net *itn);
102 102
103void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, 103void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
104 const struct iphdr *tnl_params); 104 const struct iphdr *tnl_params);
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index f10818fc8804..e7f4e21cc3e1 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -679,22 +679,26 @@ static inline struct sk_buff *skb_act_clone(struct sk_buff *skb, gfp_t gfp_mask,
679#endif 679#endif
680 680
681struct psched_ratecfg { 681struct psched_ratecfg {
682 u64 rate_bps; 682 u64 rate_bps;
683 u32 mult; 683 u32 mult;
684 u32 shift; 684 u16 overhead;
685 u8 shift;
685}; 686};
686 687
687static inline u64 psched_l2t_ns(const struct psched_ratecfg *r, 688static inline u64 psched_l2t_ns(const struct psched_ratecfg *r,
688 unsigned int len) 689 unsigned int len)
689{ 690{
690 return ((u64)len * r->mult) >> r->shift; 691 return ((u64)(len + r->overhead) * r->mult) >> r->shift;
691} 692}
692 693
693extern void psched_ratecfg_precompute(struct psched_ratecfg *r, u32 rate); 694extern void psched_ratecfg_precompute(struct psched_ratecfg *r, const struct tc_ratespec *conf);
694 695
695static inline u32 psched_ratecfg_getrate(const struct psched_ratecfg *r) 696static inline void psched_ratecfg_getrate(struct tc_ratespec *res,
697 const struct psched_ratecfg *r)
696{ 698{
697 return r->rate_bps >> 3; 699 memset(res, 0, sizeof(*res));
700 res->rate = r->rate_bps >> 3;
701 res->overhead = r->overhead;
698} 702}
699 703
700#endif 704#endif
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index ae16531d0d35..94ce082b29dc 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -1160,6 +1160,8 @@ static inline void xfrm_sk_free_policy(struct sock *sk)
1160 } 1160 }
1161} 1161}
1162 1162
1163extern void xfrm_garbage_collect(struct net *net);
1164
1163#else 1165#else
1164 1166
1165static inline void xfrm_sk_free_policy(struct sock *sk) {} 1167static inline void xfrm_sk_free_policy(struct sock *sk) {}
@@ -1194,6 +1196,9 @@ static inline int xfrm6_policy_check_reverse(struct sock *sk, int dir,
1194{ 1196{
1195 return 1; 1197 return 1;
1196} 1198}
1199static inline void xfrm_garbage_collect(struct net *net)
1200{
1201}
1197#endif 1202#endif
1198 1203
1199static __inline__ 1204static __inline__
diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h
index d4609029f014..385c6329a967 100644
--- a/include/sound/soc-dapm.h
+++ b/include/sound/soc-dapm.h
@@ -450,7 +450,8 @@ enum snd_soc_dapm_type {
450 snd_soc_dapm_aif_in, /* audio interface input */ 450 snd_soc_dapm_aif_in, /* audio interface input */
451 snd_soc_dapm_aif_out, /* audio interface output */ 451 snd_soc_dapm_aif_out, /* audio interface output */
452 snd_soc_dapm_siggen, /* signal generator */ 452 snd_soc_dapm_siggen, /* signal generator */
453 snd_soc_dapm_dai, /* link to DAI structure */ 453 snd_soc_dapm_dai_in, /* link to DAI structure */
454 snd_soc_dapm_dai_out,
454 snd_soc_dapm_dai_link, /* link between two DAI structures */ 455 snd_soc_dapm_dai_link, /* link between two DAI structures */
455}; 456};
456 457
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index e773dfa5f98f..4ea4f985f394 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -543,6 +543,7 @@ struct se_session {
543 struct list_head sess_list; 543 struct list_head sess_list;
544 struct list_head sess_acl_list; 544 struct list_head sess_acl_list;
545 struct list_head sess_cmd_list; 545 struct list_head sess_cmd_list;
546 struct list_head sess_wait_list;
546 spinlock_t sess_cmd_lock; 547 spinlock_t sess_cmd_lock;
547 struct kref sess_kref; 548 struct kref sess_kref;
548}; 549};
diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h
index ba3471b73c07..1dcce9cc99b9 100644
--- a/include/target/target_core_fabric.h
+++ b/include/target/target_core_fabric.h
@@ -114,7 +114,7 @@ sense_reason_t transport_generic_new_cmd(struct se_cmd *);
114 114
115void target_execute_cmd(struct se_cmd *cmd); 115void target_execute_cmd(struct se_cmd *cmd);
116 116
117void transport_generic_free_cmd(struct se_cmd *, int); 117int transport_generic_free_cmd(struct se_cmd *, int);
118 118
119bool transport_wait_for_tasks(struct se_cmd *); 119bool transport_wait_for_tasks(struct se_cmd *);
120int transport_check_aborted_status(struct se_cmd *, int); 120int transport_check_aborted_status(struct se_cmd *, int);
@@ -123,7 +123,7 @@ int transport_send_check_condition_and_sense(struct se_cmd *,
123int target_get_sess_cmd(struct se_session *, struct se_cmd *, bool); 123int target_get_sess_cmd(struct se_session *, struct se_cmd *, bool);
124int target_put_sess_cmd(struct se_session *, struct se_cmd *); 124int target_put_sess_cmd(struct se_session *, struct se_cmd *);
125void target_sess_cmd_list_set_waiting(struct se_session *); 125void target_sess_cmd_list_set_waiting(struct se_session *);
126void target_wait_for_sess_cmds(struct se_session *, int); 126void target_wait_for_sess_cmds(struct se_session *);
127 127
128int core_alua_check_nonop_delay(struct se_cmd *); 128int core_alua_check_nonop_delay(struct se_cmd *);
129 129
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index ab5d4992e568..bdc6e87ff3eb 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -261,6 +261,7 @@ header-y += net_dropmon.h
261header-y += net_tstamp.h 261header-y += net_tstamp.h
262header-y += netconf.h 262header-y += netconf.h
263header-y += netdevice.h 263header-y += netdevice.h
264header-y += netlink_diag.h
264header-y += netfilter.h 265header-y += netfilter.h
265header-y += netfilter_arp.h 266header-y += netfilter_arp.h
266header-y += netfilter_bridge.h 267header-y += netfilter_bridge.h
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index a5c86fc34a37..d88c8ee00c8b 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -783,6 +783,7 @@ struct kvm_dirty_tlb {
783#define KVM_REG_IA64 0x3000000000000000ULL 783#define KVM_REG_IA64 0x3000000000000000ULL
784#define KVM_REG_ARM 0x4000000000000000ULL 784#define KVM_REG_ARM 0x4000000000000000ULL
785#define KVM_REG_S390 0x5000000000000000ULL 785#define KVM_REG_S390 0x5000000000000000ULL
786#define KVM_REG_MIPS 0x7000000000000000ULL
786 787
787#define KVM_REG_SIZE_SHIFT 52 788#define KVM_REG_SIZE_SHIFT 52
788#define KVM_REG_SIZE_MASK 0x00f0000000000000ULL 789#define KVM_REG_SIZE_MASK 0x00f0000000000000ULL
diff --git a/include/video/omapdss.h b/include/video/omapdss.h
index 62ca9a77c1d6..aeb4e9a0c5d1 100644
--- a/include/video/omapdss.h
+++ b/include/video/omapdss.h
@@ -748,6 +748,7 @@ struct omap_dss_driver {
748}; 748};
749 749
750enum omapdss_version omapdss_get_version(void); 750enum omapdss_version omapdss_get_version(void);
751bool omapdss_is_initialized(void);
751 752
752int omap_dss_register_driver(struct omap_dss_driver *); 753int omap_dss_register_driver(struct omap_dss_driver *);
753void omap_dss_unregister_driver(struct omap_dss_driver *); 754void omap_dss_unregister_driver(struct omap_dss_driver *);
diff --git a/include/xen/xenbus.h b/include/xen/xenbus.h
index 0a7515c1e3a4..569c07f2e344 100644
--- a/include/xen/xenbus.h
+++ b/include/xen/xenbus.h
@@ -70,6 +70,7 @@ struct xenbus_device {
70 struct device dev; 70 struct device dev;
71 enum xenbus_state state; 71 enum xenbus_state state;
72 struct completion down; 72 struct completion down;
73 struct work_struct work;
73}; 74};
74 75
75static inline struct xenbus_device *to_xenbus_device(struct device *dev) 76static inline struct xenbus_device *to_xenbus_device(struct device *dev)
diff --git a/init/Kconfig b/init/Kconfig
index 9d3a7887a6d3..2d9b83104dcf 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -431,6 +431,7 @@ choice
431config TREE_RCU 431config TREE_RCU
432 bool "Tree-based hierarchical RCU" 432 bool "Tree-based hierarchical RCU"
433 depends on !PREEMPT && SMP 433 depends on !PREEMPT && SMP
434 select IRQ_WORK
434 help 435 help
435 This option selects the RCU implementation that is 436 This option selects the RCU implementation that is
436 designed for very large SMP system with hundreds or 437 designed for very large SMP system with hundreds or
diff --git a/kernel/audit.c b/kernel/audit.c
index 21c7fa615bd3..91e53d04b6a9 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -1056,7 +1056,7 @@ static inline void audit_get_stamp(struct audit_context *ctx,
1056static void wait_for_auditd(unsigned long sleep_time) 1056static void wait_for_auditd(unsigned long sleep_time)
1057{ 1057{
1058 DECLARE_WAITQUEUE(wait, current); 1058 DECLARE_WAITQUEUE(wait, current);
1059 set_current_state(TASK_INTERRUPTIBLE); 1059 set_current_state(TASK_UNINTERRUPTIBLE);
1060 add_wait_queue(&audit_backlog_wait, &wait); 1060 add_wait_queue(&audit_backlog_wait, &wait);
1061 1061
1062 if (audit_backlog_limit && 1062 if (audit_backlog_limit &&
diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c
index a291aa23fb3f..43c307dc9453 100644
--- a/kernel/audit_tree.c
+++ b/kernel/audit_tree.c
@@ -658,6 +658,7 @@ int audit_add_tree_rule(struct audit_krule *rule)
658 struct vfsmount *mnt; 658 struct vfsmount *mnt;
659 int err; 659 int err;
660 660
661 rule->tree = NULL;
661 list_for_each_entry(tree, &tree_list, list) { 662 list_for_each_entry(tree, &tree_list, list) {
662 if (!strcmp(seed->pathname, tree->pathname)) { 663 if (!strcmp(seed->pathname, tree->pathname)) {
663 put_tree(seed); 664 put_tree(seed);
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 2a9926275f80..a7c9e6ddb979 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -1686,11 +1686,14 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type,
1686 */ 1686 */
1687 cgroup_drop_root(opts.new_root); 1687 cgroup_drop_root(opts.new_root);
1688 1688
1689 if (((root->flags | opts.flags) & CGRP_ROOT_SANE_BEHAVIOR) && 1689 if (root->flags != opts.flags) {
1690 root->flags != opts.flags) { 1690 if ((root->flags | opts.flags) & CGRP_ROOT_SANE_BEHAVIOR) {
1691 pr_err("cgroup: sane_behavior: new mount options should match the existing superblock\n"); 1691 pr_err("cgroup: sane_behavior: new mount options should match the existing superblock\n");
1692 ret = -EINVAL; 1692 ret = -EINVAL;
1693 goto drop_new_super; 1693 goto drop_new_super;
1694 } else {
1695 pr_warning("cgroup: new mount options do not match the existing superblock, will be ignored\n");
1696 }
1694 } 1697 }
1695 1698
1696 /* no subsys rebinding, so refcounts don't change */ 1699 /* no subsys rebinding, so refcounts don't change */
@@ -2699,13 +2702,14 @@ static int cgroup_add_file(struct cgroup *cgrp, struct cgroup_subsys *subsys,
2699 goto out; 2702 goto out;
2700 } 2703 }
2701 2704
2705 cfe->type = (void *)cft;
2706 cfe->dentry = dentry;
2707 dentry->d_fsdata = cfe;
2708 simple_xattrs_init(&cfe->xattrs);
2709
2702 mode = cgroup_file_mode(cft); 2710 mode = cgroup_file_mode(cft);
2703 error = cgroup_create_file(dentry, mode | S_IFREG, cgrp->root->sb); 2711 error = cgroup_create_file(dentry, mode | S_IFREG, cgrp->root->sb);
2704 if (!error) { 2712 if (!error) {
2705 cfe->type = (void *)cft;
2706 cfe->dentry = dentry;
2707 dentry->d_fsdata = cfe;
2708 simple_xattrs_init(&cfe->xattrs);
2709 list_add_tail(&cfe->node, &parent->files); 2713 list_add_tail(&cfe->node, &parent->files);
2710 cfe = NULL; 2714 cfe = NULL;
2711 } 2715 }
@@ -2953,11 +2957,8 @@ struct cgroup *cgroup_next_descendant_pre(struct cgroup *pos,
2953 WARN_ON_ONCE(!rcu_read_lock_held()); 2957 WARN_ON_ONCE(!rcu_read_lock_held());
2954 2958
2955 /* if first iteration, pretend we just visited @cgroup */ 2959 /* if first iteration, pretend we just visited @cgroup */
2956 if (!pos) { 2960 if (!pos)
2957 if (list_empty(&cgroup->children))
2958 return NULL;
2959 pos = cgroup; 2961 pos = cgroup;
2960 }
2961 2962
2962 /* visit the first child if exists */ 2963 /* visit the first child if exists */
2963 next = list_first_or_null_rcu(&pos->children, struct cgroup, sibling); 2964 next = list_first_or_null_rcu(&pos->children, struct cgroup, sibling);
@@ -2965,14 +2966,14 @@ struct cgroup *cgroup_next_descendant_pre(struct cgroup *pos,
2965 return next; 2966 return next;
2966 2967
2967 /* no child, visit my or the closest ancestor's next sibling */ 2968 /* no child, visit my or the closest ancestor's next sibling */
2968 do { 2969 while (pos != cgroup) {
2969 next = list_entry_rcu(pos->sibling.next, struct cgroup, 2970 next = list_entry_rcu(pos->sibling.next, struct cgroup,
2970 sibling); 2971 sibling);
2971 if (&next->sibling != &pos->parent->children) 2972 if (&next->sibling != &pos->parent->children)
2972 return next; 2973 return next;
2973 2974
2974 pos = pos->parent; 2975 pos = pos->parent;
2975 } while (pos != cgroup); 2976 }
2976 2977
2977 return NULL; 2978 return NULL;
2978} 2979}
diff --git a/kernel/context_tracking.c b/kernel/context_tracking.c
index 66677003e223..383f8231e436 100644
--- a/kernel/context_tracking.c
+++ b/kernel/context_tracking.c
@@ -15,7 +15,6 @@
15 */ 15 */
16 16
17#include <linux/context_tracking.h> 17#include <linux/context_tracking.h>
18#include <linux/kvm_host.h>
19#include <linux/rcupdate.h> 18#include <linux/rcupdate.h>
20#include <linux/sched.h> 19#include <linux/sched.h>
21#include <linux/hardirq.h> 20#include <linux/hardirq.h>
diff --git a/kernel/cpu.c b/kernel/cpu.c
index b5e4ab2d427e..198a38883e64 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -133,6 +133,27 @@ static void cpu_hotplug_done(void)
133 mutex_unlock(&cpu_hotplug.lock); 133 mutex_unlock(&cpu_hotplug.lock);
134} 134}
135 135
136/*
137 * Wait for currently running CPU hotplug operations to complete (if any) and
138 * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
139 * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
140 * hotplug path before performing hotplug operations. So acquiring that lock
141 * guarantees mutual exclusion from any currently running hotplug operations.
142 */
143void cpu_hotplug_disable(void)
144{
145 cpu_maps_update_begin();
146 cpu_hotplug_disabled = 1;
147 cpu_maps_update_done();
148}
149
150void cpu_hotplug_enable(void)
151{
152 cpu_maps_update_begin();
153 cpu_hotplug_disabled = 0;
154 cpu_maps_update_done();
155}
156
136#else /* #if CONFIG_HOTPLUG_CPU */ 157#else /* #if CONFIG_HOTPLUG_CPU */
137static void cpu_hotplug_begin(void) {} 158static void cpu_hotplug_begin(void) {}
138static void cpu_hotplug_done(void) {} 159static void cpu_hotplug_done(void) {}
@@ -541,36 +562,6 @@ static int __init alloc_frozen_cpus(void)
541core_initcall(alloc_frozen_cpus); 562core_initcall(alloc_frozen_cpus);
542 563
543/* 564/*
544 * Prevent regular CPU hotplug from racing with the freezer, by disabling CPU
545 * hotplug when tasks are about to be frozen. Also, don't allow the freezer
546 * to continue until any currently running CPU hotplug operation gets
547 * completed.
548 * To modify the 'cpu_hotplug_disabled' flag, we need to acquire the
549 * 'cpu_add_remove_lock'. And this same lock is also taken by the regular
550 * CPU hotplug path and released only after it is complete. Thus, we
551 * (and hence the freezer) will block here until any currently running CPU
552 * hotplug operation gets completed.
553 */
554void cpu_hotplug_disable_before_freeze(void)
555{
556 cpu_maps_update_begin();
557 cpu_hotplug_disabled = 1;
558 cpu_maps_update_done();
559}
560
561
562/*
563 * When tasks have been thawed, re-enable regular CPU hotplug (which had been
564 * disabled while beginning to freeze tasks).
565 */
566void cpu_hotplug_enable_after_thaw(void)
567{
568 cpu_maps_update_begin();
569 cpu_hotplug_disabled = 0;
570 cpu_maps_update_done();
571}
572
573/*
574 * When callbacks for CPU hotplug notifications are being executed, we must 565 * When callbacks for CPU hotplug notifications are being executed, we must
575 * ensure that the state of the system with respect to the tasks being frozen 566 * ensure that the state of the system with respect to the tasks being frozen
576 * or not, as reported by the notification, remains unchanged *throughout the 567 * or not, as reported by the notification, remains unchanged *throughout the
@@ -589,12 +580,12 @@ cpu_hotplug_pm_callback(struct notifier_block *nb,
589 580
590 case PM_SUSPEND_PREPARE: 581 case PM_SUSPEND_PREPARE:
591 case PM_HIBERNATION_PREPARE: 582 case PM_HIBERNATION_PREPARE:
592 cpu_hotplug_disable_before_freeze(); 583 cpu_hotplug_disable();
593 break; 584 break;
594 585
595 case PM_POST_SUSPEND: 586 case PM_POST_SUSPEND:
596 case PM_POST_HIBERNATION: 587 case PM_POST_HIBERNATION:
597 cpu_hotplug_enable_after_thaw(); 588 cpu_hotplug_enable();
598 break; 589 break;
599 590
600 default: 591 default:
diff --git a/kernel/cpu/idle.c b/kernel/cpu/idle.c
index d5585f5e038e..e695c0a0bcb5 100644
--- a/kernel/cpu/idle.c
+++ b/kernel/cpu/idle.c
@@ -5,6 +5,7 @@
5#include <linux/cpu.h> 5#include <linux/cpu.h>
6#include <linux/tick.h> 6#include <linux/tick.h>
7#include <linux/mm.h> 7#include <linux/mm.h>
8#include <linux/stackprotector.h>
8 9
9#include <asm/tlb.h> 10#include <asm/tlb.h>
10 11
@@ -58,6 +59,7 @@ void __weak arch_cpu_idle_dead(void) { }
58void __weak arch_cpu_idle(void) 59void __weak arch_cpu_idle(void)
59{ 60{
60 cpu_idle_force_poll = 1; 61 cpu_idle_force_poll = 1;
62 local_irq_enable();
61} 63}
62 64
63/* 65/*
@@ -112,6 +114,21 @@ static void cpu_idle_loop(void)
112 114
113void cpu_startup_entry(enum cpuhp_state state) 115void cpu_startup_entry(enum cpuhp_state state)
114{ 116{
117 /*
118 * This #ifdef needs to die, but it's too late in the cycle to
119 * make this generic (arm and sh have never invoked the canary
120 * init for the non boot cpus!). Will be fixed in 3.11
121 */
122#ifdef CONFIG_X86
123 /*
124 * If we're the non-boot CPU, nothing set the stack canary up
125 * for us. The boot CPU already has it initialized but no harm
126 * in doing it again. This is a good place for updating it, as
127 * we wont ever return from this function (so the invalid
128 * canaries already on the stack wont ever trigger).
129 */
130 boot_init_stack_canary();
131#endif
115 current_set_polling(); 132 current_set_polling();
116 arch_cpu_idle_prepare(); 133 arch_cpu_idle_prepare();
117 cpu_idle_loop(); 134 cpu_idle_loop();
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 9dc297faf7c0..b391907d5352 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -196,9 +196,6 @@ static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
196static void update_context_time(struct perf_event_context *ctx); 196static void update_context_time(struct perf_event_context *ctx);
197static u64 perf_event_time(struct perf_event *event); 197static u64 perf_event_time(struct perf_event *event);
198 198
199static void ring_buffer_attach(struct perf_event *event,
200 struct ring_buffer *rb);
201
202void __weak perf_event_print_debug(void) { } 199void __weak perf_event_print_debug(void) { }
203 200
204extern __weak const char *perf_pmu_name(void) 201extern __weak const char *perf_pmu_name(void)
@@ -2918,6 +2915,7 @@ static void free_event_rcu(struct rcu_head *head)
2918} 2915}
2919 2916
2920static void ring_buffer_put(struct ring_buffer *rb); 2917static void ring_buffer_put(struct ring_buffer *rb);
2918static void ring_buffer_detach(struct perf_event *event, struct ring_buffer *rb);
2921 2919
2922static void free_event(struct perf_event *event) 2920static void free_event(struct perf_event *event)
2923{ 2921{
@@ -2942,15 +2940,30 @@ static void free_event(struct perf_event *event)
2942 if (has_branch_stack(event)) { 2940 if (has_branch_stack(event)) {
2943 static_key_slow_dec_deferred(&perf_sched_events); 2941 static_key_slow_dec_deferred(&perf_sched_events);
2944 /* is system-wide event */ 2942 /* is system-wide event */
2945 if (!(event->attach_state & PERF_ATTACH_TASK)) 2943 if (!(event->attach_state & PERF_ATTACH_TASK)) {
2946 atomic_dec(&per_cpu(perf_branch_stack_events, 2944 atomic_dec(&per_cpu(perf_branch_stack_events,
2947 event->cpu)); 2945 event->cpu));
2946 }
2948 } 2947 }
2949 } 2948 }
2950 2949
2951 if (event->rb) { 2950 if (event->rb) {
2952 ring_buffer_put(event->rb); 2951 struct ring_buffer *rb;
2953 event->rb = NULL; 2952
2953 /*
2954 * Can happen when we close an event with re-directed output.
2955 *
2956 * Since we have a 0 refcount, perf_mmap_close() will skip
2957 * over us; possibly making our ring_buffer_put() the last.
2958 */
2959 mutex_lock(&event->mmap_mutex);
2960 rb = event->rb;
2961 if (rb) {
2962 rcu_assign_pointer(event->rb, NULL);
2963 ring_buffer_detach(event, rb);
2964 ring_buffer_put(rb); /* could be last */
2965 }
2966 mutex_unlock(&event->mmap_mutex);
2954 } 2967 }
2955 2968
2956 if (is_cgroup_event(event)) 2969 if (is_cgroup_event(event))
@@ -3188,30 +3201,13 @@ static unsigned int perf_poll(struct file *file, poll_table *wait)
3188 unsigned int events = POLL_HUP; 3201 unsigned int events = POLL_HUP;
3189 3202
3190 /* 3203 /*
3191 * Race between perf_event_set_output() and perf_poll(): perf_poll() 3204 * Pin the event->rb by taking event->mmap_mutex; otherwise
3192 * grabs the rb reference but perf_event_set_output() overrides it. 3205 * perf_event_set_output() can swizzle our rb and make us miss wakeups.
3193 * Here is the timeline for two threads T1, T2:
3194 * t0: T1, rb = rcu_dereference(event->rb)
3195 * t1: T2, old_rb = event->rb
3196 * t2: T2, event->rb = new rb
3197 * t3: T2, ring_buffer_detach(old_rb)
3198 * t4: T1, ring_buffer_attach(rb1)
3199 * t5: T1, poll_wait(event->waitq)
3200 *
3201 * To avoid this problem, we grab mmap_mutex in perf_poll()
3202 * thereby ensuring that the assignment of the new ring buffer
3203 * and the detachment of the old buffer appear atomic to perf_poll()
3204 */ 3206 */
3205 mutex_lock(&event->mmap_mutex); 3207 mutex_lock(&event->mmap_mutex);
3206 3208 rb = event->rb;
3207 rcu_read_lock(); 3209 if (rb)
3208 rb = rcu_dereference(event->rb);
3209 if (rb) {
3210 ring_buffer_attach(event, rb);
3211 events = atomic_xchg(&rb->poll, 0); 3210 events = atomic_xchg(&rb->poll, 0);
3212 }
3213 rcu_read_unlock();
3214
3215 mutex_unlock(&event->mmap_mutex); 3211 mutex_unlock(&event->mmap_mutex);
3216 3212
3217 poll_wait(file, &event->waitq, wait); 3213 poll_wait(file, &event->waitq, wait);
@@ -3521,16 +3517,12 @@ static void ring_buffer_attach(struct perf_event *event,
3521 return; 3517 return;
3522 3518
3523 spin_lock_irqsave(&rb->event_lock, flags); 3519 spin_lock_irqsave(&rb->event_lock, flags);
3524 if (!list_empty(&event->rb_entry)) 3520 if (list_empty(&event->rb_entry))
3525 goto unlock; 3521 list_add(&event->rb_entry, &rb->event_list);
3526
3527 list_add(&event->rb_entry, &rb->event_list);
3528unlock:
3529 spin_unlock_irqrestore(&rb->event_lock, flags); 3522 spin_unlock_irqrestore(&rb->event_lock, flags);
3530} 3523}
3531 3524
3532static void ring_buffer_detach(struct perf_event *event, 3525static void ring_buffer_detach(struct perf_event *event, struct ring_buffer *rb)
3533 struct ring_buffer *rb)
3534{ 3526{
3535 unsigned long flags; 3527 unsigned long flags;
3536 3528
@@ -3549,13 +3541,10 @@ static void ring_buffer_wakeup(struct perf_event *event)
3549 3541
3550 rcu_read_lock(); 3542 rcu_read_lock();
3551 rb = rcu_dereference(event->rb); 3543 rb = rcu_dereference(event->rb);
3552 if (!rb) 3544 if (rb) {
3553 goto unlock; 3545 list_for_each_entry_rcu(event, &rb->event_list, rb_entry)
3554 3546 wake_up_all(&event->waitq);
3555 list_for_each_entry_rcu(event, &rb->event_list, rb_entry) 3547 }
3556 wake_up_all(&event->waitq);
3557
3558unlock:
3559 rcu_read_unlock(); 3548 rcu_read_unlock();
3560} 3549}
3561 3550
@@ -3584,18 +3573,10 @@ static struct ring_buffer *ring_buffer_get(struct perf_event *event)
3584 3573
3585static void ring_buffer_put(struct ring_buffer *rb) 3574static void ring_buffer_put(struct ring_buffer *rb)
3586{ 3575{
3587 struct perf_event *event, *n;
3588 unsigned long flags;
3589
3590 if (!atomic_dec_and_test(&rb->refcount)) 3576 if (!atomic_dec_and_test(&rb->refcount))
3591 return; 3577 return;
3592 3578
3593 spin_lock_irqsave(&rb->event_lock, flags); 3579 WARN_ON_ONCE(!list_empty(&rb->event_list));
3594 list_for_each_entry_safe(event, n, &rb->event_list, rb_entry) {
3595 list_del_init(&event->rb_entry);
3596 wake_up_all(&event->waitq);
3597 }
3598 spin_unlock_irqrestore(&rb->event_lock, flags);
3599 3580
3600 call_rcu(&rb->rcu_head, rb_free_rcu); 3581 call_rcu(&rb->rcu_head, rb_free_rcu);
3601} 3582}
@@ -3605,26 +3586,100 @@ static void perf_mmap_open(struct vm_area_struct *vma)
3605 struct perf_event *event = vma->vm_file->private_data; 3586 struct perf_event *event = vma->vm_file->private_data;
3606 3587
3607 atomic_inc(&event->mmap_count); 3588 atomic_inc(&event->mmap_count);
3589 atomic_inc(&event->rb->mmap_count);
3608} 3590}
3609 3591
3592/*
3593 * A buffer can be mmap()ed multiple times; either directly through the same
3594 * event, or through other events by use of perf_event_set_output().
3595 *
3596 * In order to undo the VM accounting done by perf_mmap() we need to destroy
3597 * the buffer here, where we still have a VM context. This means we need
3598 * to detach all events redirecting to us.
3599 */
3610static void perf_mmap_close(struct vm_area_struct *vma) 3600static void perf_mmap_close(struct vm_area_struct *vma)
3611{ 3601{
3612 struct perf_event *event = vma->vm_file->private_data; 3602 struct perf_event *event = vma->vm_file->private_data;
3613 3603
3614 if (atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) { 3604 struct ring_buffer *rb = event->rb;
3615 unsigned long size = perf_data_size(event->rb); 3605 struct user_struct *mmap_user = rb->mmap_user;
3616 struct user_struct *user = event->mmap_user; 3606 int mmap_locked = rb->mmap_locked;
3617 struct ring_buffer *rb = event->rb; 3607 unsigned long size = perf_data_size(rb);
3608
3609 atomic_dec(&rb->mmap_count);
3610
3611 if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex))
3612 return;
3618 3613
3619 atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm); 3614 /* Detach current event from the buffer. */
3620 vma->vm_mm->pinned_vm -= event->mmap_locked; 3615 rcu_assign_pointer(event->rb, NULL);
3621 rcu_assign_pointer(event->rb, NULL); 3616 ring_buffer_detach(event, rb);
3622 ring_buffer_detach(event, rb); 3617 mutex_unlock(&event->mmap_mutex);
3618
3619 /* If there's still other mmap()s of this buffer, we're done. */
3620 if (atomic_read(&rb->mmap_count)) {
3621 ring_buffer_put(rb); /* can't be last */
3622 return;
3623 }
3624
3625 /*
3626 * No other mmap()s, detach from all other events that might redirect
3627 * into the now unreachable buffer. Somewhat complicated by the
3628 * fact that rb::event_lock otherwise nests inside mmap_mutex.
3629 */
3630again:
3631 rcu_read_lock();
3632 list_for_each_entry_rcu(event, &rb->event_list, rb_entry) {
3633 if (!atomic_long_inc_not_zero(&event->refcount)) {
3634 /*
3635 * This event is en-route to free_event() which will
3636 * detach it and remove it from the list.
3637 */
3638 continue;
3639 }
3640 rcu_read_unlock();
3641
3642 mutex_lock(&event->mmap_mutex);
3643 /*
3644 * Check we didn't race with perf_event_set_output() which can
3645 * swizzle the rb from under us while we were waiting to
3646 * acquire mmap_mutex.
3647 *
3648 * If we find a different rb; ignore this event, a next
3649 * iteration will no longer find it on the list. We have to
3650 * still restart the iteration to make sure we're not now
3651 * iterating the wrong list.
3652 */
3653 if (event->rb == rb) {
3654 rcu_assign_pointer(event->rb, NULL);
3655 ring_buffer_detach(event, rb);
3656 ring_buffer_put(rb); /* can't be last, we still have one */
3657 }
3623 mutex_unlock(&event->mmap_mutex); 3658 mutex_unlock(&event->mmap_mutex);
3659 put_event(event);
3624 3660
3625 ring_buffer_put(rb); 3661 /*
3626 free_uid(user); 3662 * Restart the iteration; either we're on the wrong list or
3663 * destroyed its integrity by doing a deletion.
3664 */
3665 goto again;
3627 } 3666 }
3667 rcu_read_unlock();
3668
3669 /*
3670 * It could be there's still a few 0-ref events on the list; they'll
3671 * get cleaned up by free_event() -- they'll also still have their
3672 * ref on the rb and will free it whenever they are done with it.
3673 *
3674 * Aside from that, this buffer is 'fully' detached and unmapped,
3675 * undo the VM accounting.
3676 */
3677
3678 atomic_long_sub((size >> PAGE_SHIFT) + 1, &mmap_user->locked_vm);
3679 vma->vm_mm->pinned_vm -= mmap_locked;
3680 free_uid(mmap_user);
3681
3682 ring_buffer_put(rb); /* could be last */
3628} 3683}
3629 3684
3630static const struct vm_operations_struct perf_mmap_vmops = { 3685static const struct vm_operations_struct perf_mmap_vmops = {
@@ -3674,12 +3729,24 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
3674 return -EINVAL; 3729 return -EINVAL;
3675 3730
3676 WARN_ON_ONCE(event->ctx->parent_ctx); 3731 WARN_ON_ONCE(event->ctx->parent_ctx);
3732again:
3677 mutex_lock(&event->mmap_mutex); 3733 mutex_lock(&event->mmap_mutex);
3678 if (event->rb) { 3734 if (event->rb) {
3679 if (event->rb->nr_pages == nr_pages) 3735 if (event->rb->nr_pages != nr_pages) {
3680 atomic_inc(&event->rb->refcount);
3681 else
3682 ret = -EINVAL; 3736 ret = -EINVAL;
3737 goto unlock;
3738 }
3739
3740 if (!atomic_inc_not_zero(&event->rb->mmap_count)) {
3741 /*
3742 * Raced against perf_mmap_close() through
3743 * perf_event_set_output(). Try again, hope for better
3744 * luck.
3745 */
3746 mutex_unlock(&event->mmap_mutex);
3747 goto again;
3748 }
3749
3683 goto unlock; 3750 goto unlock;
3684 } 3751 }
3685 3752
@@ -3720,12 +3787,16 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
3720 ret = -ENOMEM; 3787 ret = -ENOMEM;
3721 goto unlock; 3788 goto unlock;
3722 } 3789 }
3723 rcu_assign_pointer(event->rb, rb); 3790
3791 atomic_set(&rb->mmap_count, 1);
3792 rb->mmap_locked = extra;
3793 rb->mmap_user = get_current_user();
3724 3794
3725 atomic_long_add(user_extra, &user->locked_vm); 3795 atomic_long_add(user_extra, &user->locked_vm);
3726 event->mmap_locked = extra; 3796 vma->vm_mm->pinned_vm += extra;
3727 event->mmap_user = get_current_user(); 3797
3728 vma->vm_mm->pinned_vm += event->mmap_locked; 3798 ring_buffer_attach(event, rb);
3799 rcu_assign_pointer(event->rb, rb);
3729 3800
3730 perf_event_update_userpage(event); 3801 perf_event_update_userpage(event);
3731 3802
@@ -3734,7 +3805,11 @@ unlock:
3734 atomic_inc(&event->mmap_count); 3805 atomic_inc(&event->mmap_count);
3735 mutex_unlock(&event->mmap_mutex); 3806 mutex_unlock(&event->mmap_mutex);
3736 3807
3737 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; 3808 /*
3809 * Since pinned accounting is per vm we cannot allow fork() to copy our
3810 * vma.
3811 */
3812 vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP;
3738 vma->vm_ops = &perf_mmap_vmops; 3813 vma->vm_ops = &perf_mmap_vmops;
3739 3814
3740 return ret; 3815 return ret;
@@ -6412,6 +6487,8 @@ set:
6412 if (atomic_read(&event->mmap_count)) 6487 if (atomic_read(&event->mmap_count))
6413 goto unlock; 6488 goto unlock;
6414 6489
6490 old_rb = event->rb;
6491
6415 if (output_event) { 6492 if (output_event) {
6416 /* get the rb we want to redirect to */ 6493 /* get the rb we want to redirect to */
6417 rb = ring_buffer_get(output_event); 6494 rb = ring_buffer_get(output_event);
@@ -6419,16 +6496,28 @@ set:
6419 goto unlock; 6496 goto unlock;
6420 } 6497 }
6421 6498
6422 old_rb = event->rb;
6423 rcu_assign_pointer(event->rb, rb);
6424 if (old_rb) 6499 if (old_rb)
6425 ring_buffer_detach(event, old_rb); 6500 ring_buffer_detach(event, old_rb);
6501
6502 if (rb)
6503 ring_buffer_attach(event, rb);
6504
6505 rcu_assign_pointer(event->rb, rb);
6506
6507 if (old_rb) {
6508 ring_buffer_put(old_rb);
6509 /*
6510 * Since we detached before setting the new rb, so that we
6511 * could attach the new rb, we could have missed a wakeup.
6512 * Provide it now.
6513 */
6514 wake_up_all(&event->waitq);
6515 }
6516
6426 ret = 0; 6517 ret = 0;
6427unlock: 6518unlock:
6428 mutex_unlock(&event->mmap_mutex); 6519 mutex_unlock(&event->mmap_mutex);
6429 6520
6430 if (old_rb)
6431 ring_buffer_put(old_rb);
6432out: 6521out:
6433 return ret; 6522 return ret;
6434} 6523}
diff --git a/kernel/events/hw_breakpoint.c b/kernel/events/hw_breakpoint.c
index a64f8aeb5c1f..20185ea64aa6 100644
--- a/kernel/events/hw_breakpoint.c
+++ b/kernel/events/hw_breakpoint.c
@@ -120,7 +120,7 @@ static int task_bp_pinned(int cpu, struct perf_event *bp, enum bp_type_idx type)
120 list_for_each_entry(iter, &bp_task_head, hw.bp_list) { 120 list_for_each_entry(iter, &bp_task_head, hw.bp_list) {
121 if (iter->hw.bp_target == tsk && 121 if (iter->hw.bp_target == tsk &&
122 find_slot_idx(iter) == type && 122 find_slot_idx(iter) == type &&
123 cpu == iter->cpu) 123 (iter->cpu < 0 || cpu == iter->cpu))
124 count += hw_breakpoint_weight(iter); 124 count += hw_breakpoint_weight(iter);
125 } 125 }
126 126
@@ -149,7 +149,7 @@ fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp,
149 return; 149 return;
150 } 150 }
151 151
152 for_each_online_cpu(cpu) { 152 for_each_possible_cpu(cpu) {
153 unsigned int nr; 153 unsigned int nr;
154 154
155 nr = per_cpu(nr_cpu_bp_pinned[type], cpu); 155 nr = per_cpu(nr_cpu_bp_pinned[type], cpu);
@@ -235,7 +235,7 @@ toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type,
235 if (cpu >= 0) { 235 if (cpu >= 0) {
236 toggle_bp_task_slot(bp, cpu, enable, type, weight); 236 toggle_bp_task_slot(bp, cpu, enable, type, weight);
237 } else { 237 } else {
238 for_each_online_cpu(cpu) 238 for_each_possible_cpu(cpu)
239 toggle_bp_task_slot(bp, cpu, enable, type, weight); 239 toggle_bp_task_slot(bp, cpu, enable, type, weight);
240 } 240 }
241 241
diff --git a/kernel/events/internal.h b/kernel/events/internal.h
index eb675c4d59df..ca6599723be5 100644
--- a/kernel/events/internal.h
+++ b/kernel/events/internal.h
@@ -31,6 +31,10 @@ struct ring_buffer {
31 spinlock_t event_lock; 31 spinlock_t event_lock;
32 struct list_head event_list; 32 struct list_head event_list;
33 33
34 atomic_t mmap_count;
35 unsigned long mmap_locked;
36 struct user_struct *mmap_user;
37
34 struct perf_event_mmap_page *user_page; 38 struct perf_event_mmap_page *user_page;
35 void *data_pages[0]; 39 void *data_pages[0];
36}; 40};
diff --git a/kernel/exit.c b/kernel/exit.c
index af2eb3cbd499..7bb73f9d09db 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -649,7 +649,6 @@ static void exit_notify(struct task_struct *tsk, int group_dead)
649 * jobs, send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2) 649 * jobs, send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2)
650 */ 650 */
651 forget_original_parent(tsk); 651 forget_original_parent(tsk);
652 exit_task_namespaces(tsk);
653 652
654 write_lock_irq(&tasklist_lock); 653 write_lock_irq(&tasklist_lock);
655 if (group_dead) 654 if (group_dead)
@@ -795,6 +794,7 @@ void do_exit(long code)
795 exit_shm(tsk); 794 exit_shm(tsk);
796 exit_files(tsk); 795 exit_files(tsk);
797 exit_fs(tsk); 796 exit_fs(tsk);
797 exit_task_namespaces(tsk);
798 exit_task_work(tsk); 798 exit_task_work(tsk);
799 check_stack_usage(); 799 check_stack_usage();
800 exit_thread(); 800 exit_thread();
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
index 5a83dde8ca0c..54a4d5223238 100644
--- a/kernel/irq/irqdomain.c
+++ b/kernel/irq/irqdomain.c
@@ -143,7 +143,10 @@ static unsigned int irq_domain_legacy_revmap(struct irq_domain *domain,
143 * irq_domain_add_simple() - Allocate and register a simple irq_domain. 143 * irq_domain_add_simple() - Allocate and register a simple irq_domain.
144 * @of_node: pointer to interrupt controller's device tree node. 144 * @of_node: pointer to interrupt controller's device tree node.
145 * @size: total number of irqs in mapping 145 * @size: total number of irqs in mapping
146 * @first_irq: first number of irq block assigned to the domain 146 * @first_irq: first number of irq block assigned to the domain,
147 * pass zero to assign irqs on-the-fly. This will result in a
148 * linear IRQ domain so it is important to use irq_create_mapping()
149 * for each used IRQ, especially when SPARSE_IRQ is enabled.
147 * @ops: map/unmap domain callbacks 150 * @ops: map/unmap domain callbacks
148 * @host_data: Controller private data pointer 151 * @host_data: Controller private data pointer
149 * 152 *
@@ -191,6 +194,7 @@ struct irq_domain *irq_domain_add_simple(struct device_node *of_node,
191 /* A linear domain is the default */ 194 /* A linear domain is the default */
192 return irq_domain_add_linear(of_node, size, ops, host_data); 195 return irq_domain_add_linear(of_node, size, ops, host_data);
193} 196}
197EXPORT_SYMBOL_GPL(irq_domain_add_simple);
194 198
195/** 199/**
196 * irq_domain_add_legacy() - Allocate and register a legacy revmap irq_domain. 200 * irq_domain_add_legacy() - Allocate and register a legacy revmap irq_domain.
@@ -397,11 +401,12 @@ static void irq_domain_disassociate_many(struct irq_domain *domain,
397 while (count--) { 401 while (count--) {
398 int irq = irq_base + count; 402 int irq = irq_base + count;
399 struct irq_data *irq_data = irq_get_irq_data(irq); 403 struct irq_data *irq_data = irq_get_irq_data(irq);
400 irq_hw_number_t hwirq = irq_data->hwirq; 404 irq_hw_number_t hwirq;
401 405
402 if (WARN_ON(!irq_data || irq_data->domain != domain)) 406 if (WARN_ON(!irq_data || irq_data->domain != domain))
403 continue; 407 continue;
404 408
409 hwirq = irq_data->hwirq;
405 irq_set_status_flags(irq, IRQ_NOREQUEST); 410 irq_set_status_flags(irq, IRQ_NOREQUEST);
406 411
407 /* remove chip and handler */ 412 /* remove chip and handler */
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 3fed7f0cbcdf..bddf3b201a48 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -467,6 +467,7 @@ static struct kprobe *__kprobes get_optimized_kprobe(unsigned long addr)
467/* Optimization staging list, protected by kprobe_mutex */ 467/* Optimization staging list, protected by kprobe_mutex */
468static LIST_HEAD(optimizing_list); 468static LIST_HEAD(optimizing_list);
469static LIST_HEAD(unoptimizing_list); 469static LIST_HEAD(unoptimizing_list);
470static LIST_HEAD(freeing_list);
470 471
471static void kprobe_optimizer(struct work_struct *work); 472static void kprobe_optimizer(struct work_struct *work);
472static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer); 473static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer);
@@ -504,7 +505,7 @@ static __kprobes void do_optimize_kprobes(void)
504 * Unoptimize (replace a jump with a breakpoint and remove the breakpoint 505 * Unoptimize (replace a jump with a breakpoint and remove the breakpoint
505 * if need) kprobes listed on unoptimizing_list. 506 * if need) kprobes listed on unoptimizing_list.
506 */ 507 */
507static __kprobes void do_unoptimize_kprobes(struct list_head *free_list) 508static __kprobes void do_unoptimize_kprobes(void)
508{ 509{
509 struct optimized_kprobe *op, *tmp; 510 struct optimized_kprobe *op, *tmp;
510 511
@@ -515,9 +516,9 @@ static __kprobes void do_unoptimize_kprobes(struct list_head *free_list)
515 /* Ditto to do_optimize_kprobes */ 516 /* Ditto to do_optimize_kprobes */
516 get_online_cpus(); 517 get_online_cpus();
517 mutex_lock(&text_mutex); 518 mutex_lock(&text_mutex);
518 arch_unoptimize_kprobes(&unoptimizing_list, free_list); 519 arch_unoptimize_kprobes(&unoptimizing_list, &freeing_list);
519 /* Loop free_list for disarming */ 520 /* Loop free_list for disarming */
520 list_for_each_entry_safe(op, tmp, free_list, list) { 521 list_for_each_entry_safe(op, tmp, &freeing_list, list) {
521 /* Disarm probes if marked disabled */ 522 /* Disarm probes if marked disabled */
522 if (kprobe_disabled(&op->kp)) 523 if (kprobe_disabled(&op->kp))
523 arch_disarm_kprobe(&op->kp); 524 arch_disarm_kprobe(&op->kp);
@@ -536,11 +537,11 @@ static __kprobes void do_unoptimize_kprobes(struct list_head *free_list)
536} 537}
537 538
538/* Reclaim all kprobes on the free_list */ 539/* Reclaim all kprobes on the free_list */
539static __kprobes void do_free_cleaned_kprobes(struct list_head *free_list) 540static __kprobes void do_free_cleaned_kprobes(void)
540{ 541{
541 struct optimized_kprobe *op, *tmp; 542 struct optimized_kprobe *op, *tmp;
542 543
543 list_for_each_entry_safe(op, tmp, free_list, list) { 544 list_for_each_entry_safe(op, tmp, &freeing_list, list) {
544 BUG_ON(!kprobe_unused(&op->kp)); 545 BUG_ON(!kprobe_unused(&op->kp));
545 list_del_init(&op->list); 546 list_del_init(&op->list);
546 free_aggr_kprobe(&op->kp); 547 free_aggr_kprobe(&op->kp);
@@ -556,8 +557,6 @@ static __kprobes void kick_kprobe_optimizer(void)
556/* Kprobe jump optimizer */ 557/* Kprobe jump optimizer */
557static __kprobes void kprobe_optimizer(struct work_struct *work) 558static __kprobes void kprobe_optimizer(struct work_struct *work)
558{ 559{
559 LIST_HEAD(free_list);
560
561 mutex_lock(&kprobe_mutex); 560 mutex_lock(&kprobe_mutex);
562 /* Lock modules while optimizing kprobes */ 561 /* Lock modules while optimizing kprobes */
563 mutex_lock(&module_mutex); 562 mutex_lock(&module_mutex);
@@ -566,7 +565,7 @@ static __kprobes void kprobe_optimizer(struct work_struct *work)
566 * Step 1: Unoptimize kprobes and collect cleaned (unused and disarmed) 565 * Step 1: Unoptimize kprobes and collect cleaned (unused and disarmed)
567 * kprobes before waiting for quiesence period. 566 * kprobes before waiting for quiesence period.
568 */ 567 */
569 do_unoptimize_kprobes(&free_list); 568 do_unoptimize_kprobes();
570 569
571 /* 570 /*
572 * Step 2: Wait for quiesence period to ensure all running interrupts 571 * Step 2: Wait for quiesence period to ensure all running interrupts
@@ -581,7 +580,7 @@ static __kprobes void kprobe_optimizer(struct work_struct *work)
581 do_optimize_kprobes(); 580 do_optimize_kprobes();
582 581
583 /* Step 4: Free cleaned kprobes after quiesence period */ 582 /* Step 4: Free cleaned kprobes after quiesence period */
584 do_free_cleaned_kprobes(&free_list); 583 do_free_cleaned_kprobes();
585 584
586 mutex_unlock(&module_mutex); 585 mutex_unlock(&module_mutex);
587 mutex_unlock(&kprobe_mutex); 586 mutex_unlock(&kprobe_mutex);
@@ -723,8 +722,19 @@ static void __kprobes kill_optimized_kprobe(struct kprobe *p)
723 if (!list_empty(&op->list)) 722 if (!list_empty(&op->list))
724 /* Dequeue from the (un)optimization queue */ 723 /* Dequeue from the (un)optimization queue */
725 list_del_init(&op->list); 724 list_del_init(&op->list);
726
727 op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED; 725 op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
726
727 if (kprobe_unused(p)) {
728 /* Enqueue if it is unused */
729 list_add(&op->list, &freeing_list);
730 /*
731 * Remove unused probes from the hash list. After waiting
732 * for synchronization, this probe is reclaimed.
733 * (reclaiming is done by do_free_cleaned_kprobes().)
734 */
735 hlist_del_rcu(&op->kp.hlist);
736 }
737
728 /* Don't touch the code, because it is already freed. */ 738 /* Don't touch the code, because it is already freed. */
729 arch_remove_optimized_kprobe(op); 739 arch_remove_optimized_kprobe(op);
730} 740}
diff --git a/kernel/printk.c b/kernel/printk.c
index fa36e1494420..8212c1aef125 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -363,6 +363,53 @@ static void log_store(int facility, int level,
363 log_next_seq++; 363 log_next_seq++;
364} 364}
365 365
366#ifdef CONFIG_SECURITY_DMESG_RESTRICT
367int dmesg_restrict = 1;
368#else
369int dmesg_restrict;
370#endif
371
372static int syslog_action_restricted(int type)
373{
374 if (dmesg_restrict)
375 return 1;
376 /*
377 * Unless restricted, we allow "read all" and "get buffer size"
378 * for everybody.
379 */
380 return type != SYSLOG_ACTION_READ_ALL &&
381 type != SYSLOG_ACTION_SIZE_BUFFER;
382}
383
384static int check_syslog_permissions(int type, bool from_file)
385{
386 /*
387 * If this is from /proc/kmsg and we've already opened it, then we've
388 * already done the capabilities checks at open time.
389 */
390 if (from_file && type != SYSLOG_ACTION_OPEN)
391 return 0;
392
393 if (syslog_action_restricted(type)) {
394 if (capable(CAP_SYSLOG))
395 return 0;
396 /*
397 * For historical reasons, accept CAP_SYS_ADMIN too, with
398 * a warning.
399 */
400 if (capable(CAP_SYS_ADMIN)) {
401 pr_warn_once("%s (%d): Attempt to access syslog with "
402 "CAP_SYS_ADMIN but no CAP_SYSLOG "
403 "(deprecated).\n",
404 current->comm, task_pid_nr(current));
405 return 0;
406 }
407 return -EPERM;
408 }
409 return security_syslog(type);
410}
411
412
366/* /dev/kmsg - userspace message inject/listen interface */ 413/* /dev/kmsg - userspace message inject/listen interface */
367struct devkmsg_user { 414struct devkmsg_user {
368 u64 seq; 415 u64 seq;
@@ -620,7 +667,8 @@ static int devkmsg_open(struct inode *inode, struct file *file)
620 if ((file->f_flags & O_ACCMODE) == O_WRONLY) 667 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
621 return 0; 668 return 0;
622 669
623 err = security_syslog(SYSLOG_ACTION_READ_ALL); 670 err = check_syslog_permissions(SYSLOG_ACTION_READ_ALL,
671 SYSLOG_FROM_READER);
624 if (err) 672 if (err)
625 return err; 673 return err;
626 674
@@ -813,45 +861,6 @@ static inline void boot_delay_msec(int level)
813} 861}
814#endif 862#endif
815 863
816#ifdef CONFIG_SECURITY_DMESG_RESTRICT
817int dmesg_restrict = 1;
818#else
819int dmesg_restrict;
820#endif
821
822static int syslog_action_restricted(int type)
823{
824 if (dmesg_restrict)
825 return 1;
826 /* Unless restricted, we allow "read all" and "get buffer size" for everybody */
827 return type != SYSLOG_ACTION_READ_ALL && type != SYSLOG_ACTION_SIZE_BUFFER;
828}
829
830static int check_syslog_permissions(int type, bool from_file)
831{
832 /*
833 * If this is from /proc/kmsg and we've already opened it, then we've
834 * already done the capabilities checks at open time.
835 */
836 if (from_file && type != SYSLOG_ACTION_OPEN)
837 return 0;
838
839 if (syslog_action_restricted(type)) {
840 if (capable(CAP_SYSLOG))
841 return 0;
842 /* For historical reasons, accept CAP_SYS_ADMIN too, with a warning */
843 if (capable(CAP_SYS_ADMIN)) {
844 printk_once(KERN_WARNING "%s (%d): "
845 "Attempt to access syslog with CAP_SYS_ADMIN "
846 "but no CAP_SYSLOG (deprecated).\n",
847 current->comm, task_pid_nr(current));
848 return 0;
849 }
850 return -EPERM;
851 }
852 return 0;
853}
854
855#if defined(CONFIG_PRINTK_TIME) 864#if defined(CONFIG_PRINTK_TIME)
856static bool printk_time = 1; 865static bool printk_time = 1;
857#else 866#else
@@ -1249,7 +1258,7 @@ out:
1249 1258
1250SYSCALL_DEFINE3(syslog, int, type, char __user *, buf, int, len) 1259SYSCALL_DEFINE3(syslog, int, type, char __user *, buf, int, len)
1251{ 1260{
1252 return do_syslog(type, buf, len, SYSLOG_FROM_CALL); 1261 return do_syslog(type, buf, len, SYSLOG_FROM_READER);
1253} 1262}
1254 1263
1255/* 1264/*
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index aed981a3f69c..335a7ae697f5 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -665,20 +665,22 @@ static int ptrace_peek_siginfo(struct task_struct *child,
665 if (unlikely(is_compat_task())) { 665 if (unlikely(is_compat_task())) {
666 compat_siginfo_t __user *uinfo = compat_ptr(data); 666 compat_siginfo_t __user *uinfo = compat_ptr(data);
667 667
668 ret = copy_siginfo_to_user32(uinfo, &info); 668 if (copy_siginfo_to_user32(uinfo, &info) ||
669 ret |= __put_user(info.si_code, &uinfo->si_code); 669 __put_user(info.si_code, &uinfo->si_code)) {
670 ret = -EFAULT;
671 break;
672 }
673
670 } else 674 } else
671#endif 675#endif
672 { 676 {
673 siginfo_t __user *uinfo = (siginfo_t __user *) data; 677 siginfo_t __user *uinfo = (siginfo_t __user *) data;
674 678
675 ret = copy_siginfo_to_user(uinfo, &info); 679 if (copy_siginfo_to_user(uinfo, &info) ||
676 ret |= __put_user(info.si_code, &uinfo->si_code); 680 __put_user(info.si_code, &uinfo->si_code)) {
677 } 681 ret = -EFAULT;
678 682 break;
679 if (ret) { 683 }
680 ret = -EFAULT;
681 break;
682 } 684 }
683 685
684 data += sizeof(siginfo_t); 686 data += sizeof(siginfo_t);
diff --git a/kernel/range.c b/kernel/range.c
index 071b0ab455cb..322ea8e93e4b 100644
--- a/kernel/range.c
+++ b/kernel/range.c
@@ -4,7 +4,7 @@
4#include <linux/kernel.h> 4#include <linux/kernel.h>
5#include <linux/init.h> 5#include <linux/init.h>
6#include <linux/sort.h> 6#include <linux/sort.h>
7 7#include <linux/string.h>
8#include <linux/range.h> 8#include <linux/range.h>
9 9
10int add_range(struct range *range, int az, int nr_range, u64 start, u64 end) 10int add_range(struct range *range, int az, int nr_range, u64 start, u64 end)
@@ -32,9 +32,8 @@ int add_range_with_merge(struct range *range, int az, int nr_range,
32 if (start >= end) 32 if (start >= end)
33 return nr_range; 33 return nr_range;
34 34
35 /* Try to merge it with old one: */ 35 /* get new start/end: */
36 for (i = 0; i < nr_range; i++) { 36 for (i = 0; i < nr_range; i++) {
37 u64 final_start, final_end;
38 u64 common_start, common_end; 37 u64 common_start, common_end;
39 38
40 if (!range[i].end) 39 if (!range[i].end)
@@ -45,12 +44,16 @@ int add_range_with_merge(struct range *range, int az, int nr_range,
45 if (common_start > common_end) 44 if (common_start > common_end)
46 continue; 45 continue;
47 46
48 final_start = min(range[i].start, start); 47 /* new start/end, will add it back at last */
49 final_end = max(range[i].end, end); 48 start = min(range[i].start, start);
49 end = max(range[i].end, end);
50 50
51 range[i].start = final_start; 51 memmove(&range[i], &range[i + 1],
52 range[i].end = final_end; 52 (nr_range - (i + 1)) * sizeof(range[i]));
53 return nr_range; 53 range[nr_range - 1].start = 0;
54 range[nr_range - 1].end = 0;
55 nr_range--;
56 i--;
54 } 57 }
55 58
56 /* Need to add it: */ 59 /* Need to add it: */
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index 16ea67925015..35380019f0fc 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -1451,9 +1451,9 @@ static int rcu_gp_init(struct rcu_state *rsp)
1451 rnp->grphi, rnp->qsmask); 1451 rnp->grphi, rnp->qsmask);
1452 raw_spin_unlock_irq(&rnp->lock); 1452 raw_spin_unlock_irq(&rnp->lock);
1453#ifdef CONFIG_PROVE_RCU_DELAY 1453#ifdef CONFIG_PROVE_RCU_DELAY
1454 if ((prandom_u32() % (rcu_num_nodes * 8)) == 0 && 1454 if ((prandom_u32() % (rcu_num_nodes + 1)) == 0 &&
1455 system_state == SYSTEM_RUNNING) 1455 system_state == SYSTEM_RUNNING)
1456 schedule_timeout_uninterruptible(2); 1456 udelay(200);
1457#endif /* #ifdef CONFIG_PROVE_RCU_DELAY */ 1457#endif /* #ifdef CONFIG_PROVE_RCU_DELAY */
1458 cond_resched(); 1458 cond_resched();
1459 } 1459 }
@@ -1613,6 +1613,14 @@ static int __noreturn rcu_gp_kthread(void *arg)
1613 } 1613 }
1614} 1614}
1615 1615
1616static void rsp_wakeup(struct irq_work *work)
1617{
1618 struct rcu_state *rsp = container_of(work, struct rcu_state, wakeup_work);
1619
1620 /* Wake up rcu_gp_kthread() to start the grace period. */
1621 wake_up(&rsp->gp_wq);
1622}
1623
1616/* 1624/*
1617 * Start a new RCU grace period if warranted, re-initializing the hierarchy 1625 * Start a new RCU grace period if warranted, re-initializing the hierarchy
1618 * in preparation for detecting the next grace period. The caller must hold 1626 * in preparation for detecting the next grace period. The caller must hold
@@ -1637,8 +1645,12 @@ rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp,
1637 } 1645 }
1638 rsp->gp_flags = RCU_GP_FLAG_INIT; 1646 rsp->gp_flags = RCU_GP_FLAG_INIT;
1639 1647
1640 /* Wake up rcu_gp_kthread() to start the grace period. */ 1648 /*
1641 wake_up(&rsp->gp_wq); 1649 * We can't do wakeups while holding the rnp->lock, as that
1650 * could cause possible deadlocks with the rq->lock. Deter
1651 * the wakeup to interrupt context.
1652 */
1653 irq_work_queue(&rsp->wakeup_work);
1642} 1654}
1643 1655
1644/* 1656/*
@@ -3235,6 +3247,7 @@ static void __init rcu_init_one(struct rcu_state *rsp,
3235 3247
3236 rsp->rda = rda; 3248 rsp->rda = rda;
3237 init_waitqueue_head(&rsp->gp_wq); 3249 init_waitqueue_head(&rsp->gp_wq);
3250 init_irq_work(&rsp->wakeup_work, rsp_wakeup);
3238 rnp = rsp->level[rcu_num_lvls - 1]; 3251 rnp = rsp->level[rcu_num_lvls - 1];
3239 for_each_possible_cpu(i) { 3252 for_each_possible_cpu(i) {
3240 while (i > rnp->grphi) 3253 while (i > rnp->grphi)
diff --git a/kernel/rcutree.h b/kernel/rcutree.h
index da77a8f57ff9..4df503470e42 100644
--- a/kernel/rcutree.h
+++ b/kernel/rcutree.h
@@ -27,6 +27,7 @@
27#include <linux/threads.h> 27#include <linux/threads.h>
28#include <linux/cpumask.h> 28#include <linux/cpumask.h>
29#include <linux/seqlock.h> 29#include <linux/seqlock.h>
30#include <linux/irq_work.h>
30 31
31/* 32/*
32 * Define shape of hierarchy based on NR_CPUS, CONFIG_RCU_FANOUT, and 33 * Define shape of hierarchy based on NR_CPUS, CONFIG_RCU_FANOUT, and
@@ -442,6 +443,7 @@ struct rcu_state {
442 char *name; /* Name of structure. */ 443 char *name; /* Name of structure. */
443 char abbr; /* Abbreviated name. */ 444 char abbr; /* Abbreviated name. */
444 struct list_head flavors; /* List of RCU flavors. */ 445 struct list_head flavors; /* List of RCU flavors. */
446 struct irq_work wakeup_work; /* Postponed wakeups */
445}; 447};
446 448
447/* Values for rcu_state structure's gp_flags field. */ 449/* Values for rcu_state structure's gp_flags field. */
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 08746cc12370..9b1f2e533b95 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -4184,7 +4184,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
4184 */ 4184 */
4185 idle->sched_class = &idle_sched_class; 4185 idle->sched_class = &idle_sched_class;
4186 ftrace_graph_init_idle_task(idle, cpu); 4186 ftrace_graph_init_idle_task(idle, cpu);
4187 vtime_init_idle(idle); 4187 vtime_init_idle(idle, cpu);
4188#if defined(CONFIG_SMP) 4188#if defined(CONFIG_SMP)
4189 sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu); 4189 sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
4190#endif 4190#endif
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
index 94691bcd7364..a7959e05a9d5 100644
--- a/kernel/sched/cputime.c
+++ b/kernel/sched/cputime.c
@@ -746,17 +746,17 @@ void arch_vtime_task_switch(struct task_struct *prev)
746 746
747 write_seqlock(&current->vtime_seqlock); 747 write_seqlock(&current->vtime_seqlock);
748 current->vtime_snap_whence = VTIME_SYS; 748 current->vtime_snap_whence = VTIME_SYS;
749 current->vtime_snap = sched_clock(); 749 current->vtime_snap = sched_clock_cpu(smp_processor_id());
750 write_sequnlock(&current->vtime_seqlock); 750 write_sequnlock(&current->vtime_seqlock);
751} 751}
752 752
753void vtime_init_idle(struct task_struct *t) 753void vtime_init_idle(struct task_struct *t, int cpu)
754{ 754{
755 unsigned long flags; 755 unsigned long flags;
756 756
757 write_seqlock_irqsave(&t->vtime_seqlock, flags); 757 write_seqlock_irqsave(&t->vtime_seqlock, flags);
758 t->vtime_snap_whence = VTIME_SYS; 758 t->vtime_snap_whence = VTIME_SYS;
759 t->vtime_snap = sched_clock(); 759 t->vtime_snap = sched_clock_cpu(cpu);
760 write_sequnlock_irqrestore(&t->vtime_seqlock, flags); 760 write_sequnlock_irqrestore(&t->vtime_seqlock, flags);
761} 761}
762 762
diff --git a/kernel/softirq.c b/kernel/softirq.c
index b5197dcb0dad..3d6833f125d3 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -195,8 +195,12 @@ void local_bh_enable_ip(unsigned long ip)
195EXPORT_SYMBOL(local_bh_enable_ip); 195EXPORT_SYMBOL(local_bh_enable_ip);
196 196
197/* 197/*
198 * We restart softirq processing for at most 2 ms, 198 * We restart softirq processing for at most MAX_SOFTIRQ_RESTART times,
199 * and if need_resched() is not set. 199 * but break the loop if need_resched() is set or after 2 ms.
200 * The MAX_SOFTIRQ_TIME provides a nice upper bound in most cases, but in
201 * certain cases, such as stop_machine(), jiffies may cease to
202 * increment and so we need the MAX_SOFTIRQ_RESTART limit as
203 * well to make sure we eventually return from this method.
200 * 204 *
201 * These limits have been established via experimentation. 205 * These limits have been established via experimentation.
202 * The two things to balance is latency against fairness - 206 * The two things to balance is latency against fairness -
@@ -204,6 +208,7 @@ EXPORT_SYMBOL(local_bh_enable_ip);
204 * should not be able to lock up the box. 208 * should not be able to lock up the box.
205 */ 209 */
206#define MAX_SOFTIRQ_TIME msecs_to_jiffies(2) 210#define MAX_SOFTIRQ_TIME msecs_to_jiffies(2)
211#define MAX_SOFTIRQ_RESTART 10
207 212
208asmlinkage void __do_softirq(void) 213asmlinkage void __do_softirq(void)
209{ 214{
@@ -212,6 +217,7 @@ asmlinkage void __do_softirq(void)
212 unsigned long end = jiffies + MAX_SOFTIRQ_TIME; 217 unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
213 int cpu; 218 int cpu;
214 unsigned long old_flags = current->flags; 219 unsigned long old_flags = current->flags;
220 int max_restart = MAX_SOFTIRQ_RESTART;
215 221
216 /* 222 /*
217 * Mask out PF_MEMALLOC s current task context is borrowed for the 223 * Mask out PF_MEMALLOC s current task context is borrowed for the
@@ -265,7 +271,8 @@ restart:
265 271
266 pending = local_softirq_pending(); 272 pending = local_softirq_pending();
267 if (pending) { 273 if (pending) {
268 if (time_before(jiffies, end) && !need_resched()) 274 if (time_before(jiffies, end) && !need_resched() &&
275 --max_restart)
269 goto restart; 276 goto restart;
270 277
271 wakeup_softirqd(); 278 wakeup_softirqd();
diff --git a/kernel/sys.c b/kernel/sys.c
index b95d3c72ba21..2bbd9a73b54c 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -362,6 +362,29 @@ int unregister_reboot_notifier(struct notifier_block *nb)
362} 362}
363EXPORT_SYMBOL(unregister_reboot_notifier); 363EXPORT_SYMBOL(unregister_reboot_notifier);
364 364
365/* Add backwards compatibility for stable trees. */
366#ifndef PF_NO_SETAFFINITY
367#define PF_NO_SETAFFINITY PF_THREAD_BOUND
368#endif
369
370static void migrate_to_reboot_cpu(void)
371{
372 /* The boot cpu is always logical cpu 0 */
373 int cpu = 0;
374
375 cpu_hotplug_disable();
376
377 /* Make certain the cpu I'm about to reboot on is online */
378 if (!cpu_online(cpu))
379 cpu = cpumask_first(cpu_online_mask);
380
381 /* Prevent races with other tasks migrating this task */
382 current->flags |= PF_NO_SETAFFINITY;
383
384 /* Make certain I only run on the appropriate processor */
385 set_cpus_allowed_ptr(current, cpumask_of(cpu));
386}
387
365/** 388/**
366 * kernel_restart - reboot the system 389 * kernel_restart - reboot the system
367 * @cmd: pointer to buffer containing command to execute for restart 390 * @cmd: pointer to buffer containing command to execute for restart
@@ -373,7 +396,7 @@ EXPORT_SYMBOL(unregister_reboot_notifier);
373void kernel_restart(char *cmd) 396void kernel_restart(char *cmd)
374{ 397{
375 kernel_restart_prepare(cmd); 398 kernel_restart_prepare(cmd);
376 disable_nonboot_cpus(); 399 migrate_to_reboot_cpu();
377 syscore_shutdown(); 400 syscore_shutdown();
378 if (!cmd) 401 if (!cmd)
379 printk(KERN_EMERG "Restarting system.\n"); 402 printk(KERN_EMERG "Restarting system.\n");
@@ -400,7 +423,7 @@ static void kernel_shutdown_prepare(enum system_states state)
400void kernel_halt(void) 423void kernel_halt(void)
401{ 424{
402 kernel_shutdown_prepare(SYSTEM_HALT); 425 kernel_shutdown_prepare(SYSTEM_HALT);
403 disable_nonboot_cpus(); 426 migrate_to_reboot_cpu();
404 syscore_shutdown(); 427 syscore_shutdown();
405 printk(KERN_EMERG "System halted.\n"); 428 printk(KERN_EMERG "System halted.\n");
406 kmsg_dump(KMSG_DUMP_HALT); 429 kmsg_dump(KMSG_DUMP_HALT);
@@ -419,7 +442,7 @@ void kernel_power_off(void)
419 kernel_shutdown_prepare(SYSTEM_POWER_OFF); 442 kernel_shutdown_prepare(SYSTEM_POWER_OFF);
420 if (pm_power_off_prepare) 443 if (pm_power_off_prepare)
421 pm_power_off_prepare(); 444 pm_power_off_prepare();
422 disable_nonboot_cpus(); 445 migrate_to_reboot_cpu();
423 syscore_shutdown(); 446 syscore_shutdown();
424 printk(KERN_EMERG "Power down.\n"); 447 printk(KERN_EMERG "Power down.\n");
425 kmsg_dump(KMSG_DUMP_POWEROFF); 448 kmsg_dump(KMSG_DUMP_POWEROFF);
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
index 12ff13a838c6..8f5b3b98577b 100644
--- a/kernel/time/ntp.c
+++ b/kernel/time/ntp.c
@@ -874,7 +874,6 @@ static void hardpps_update_phase(long error)
874void __hardpps(const struct timespec *phase_ts, const struct timespec *raw_ts) 874void __hardpps(const struct timespec *phase_ts, const struct timespec *raw_ts)
875{ 875{
876 struct pps_normtime pts_norm, freq_norm; 876 struct pps_normtime pts_norm, freq_norm;
877 unsigned long flags;
878 877
879 pts_norm = pps_normalize_ts(*phase_ts); 878 pts_norm = pps_normalize_ts(*phase_ts);
880 879
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index 24938d577669..20d6fba70652 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -511,6 +511,12 @@ again:
511 } 511 }
512 } 512 }
513 513
514 /*
515 * Remove the current cpu from the pending mask. The event is
516 * delivered immediately in tick_do_broadcast() !
517 */
518 cpumask_clear_cpu(smp_processor_id(), tick_broadcast_pending_mask);
519
514 /* Take care of enforced broadcast requests */ 520 /* Take care of enforced broadcast requests */
515 cpumask_or(tmpmask, tmpmask, tick_broadcast_force_mask); 521 cpumask_or(tmpmask, tmpmask, tick_broadcast_force_mask);
516 cpumask_clear(tick_broadcast_force_mask); 522 cpumask_clear(tick_broadcast_force_mask);
@@ -575,8 +581,8 @@ void tick_broadcast_oneshot_control(unsigned long reason)
575 581
576 raw_spin_lock_irqsave(&tick_broadcast_lock, flags); 582 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
577 if (reason == CLOCK_EVT_NOTIFY_BROADCAST_ENTER) { 583 if (reason == CLOCK_EVT_NOTIFY_BROADCAST_ENTER) {
578 WARN_ON_ONCE(cpumask_test_cpu(cpu, tick_broadcast_pending_mask));
579 if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_oneshot_mask)) { 584 if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_oneshot_mask)) {
585 WARN_ON_ONCE(cpumask_test_cpu(cpu, tick_broadcast_pending_mask));
580 clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN); 586 clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN);
581 /* 587 /*
582 * We only reprogram the broadcast timer if we 588 * We only reprogram the broadcast timer if we
@@ -593,8 +599,6 @@ void tick_broadcast_oneshot_control(unsigned long reason)
593 } else { 599 } else {
594 if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_oneshot_mask)) { 600 if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_oneshot_mask)) {
595 clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT); 601 clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT);
596 if (dev->next_event.tv64 == KTIME_MAX)
597 goto out;
598 /* 602 /*
599 * The cpu which was handling the broadcast 603 * The cpu which was handling the broadcast
600 * timer marked this cpu in the broadcast 604 * timer marked this cpu in the broadcast
@@ -609,6 +613,11 @@ void tick_broadcast_oneshot_control(unsigned long reason)
609 goto out; 613 goto out;
610 614
611 /* 615 /*
616 * Bail out if there is no next event.
617 */
618 if (dev->next_event.tv64 == KTIME_MAX)
619 goto out;
620 /*
612 * If the pending bit is not set, then we are 621 * If the pending bit is not set, then we are
613 * either the CPU handling the broadcast 622 * either the CPU handling the broadcast
614 * interrupt or we got woken by something else. 623 * interrupt or we got woken by something else.
@@ -692,10 +701,6 @@ void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
692 701
693 bc->event_handler = tick_handle_oneshot_broadcast; 702 bc->event_handler = tick_handle_oneshot_broadcast;
694 703
695 /* Take the do_timer update */
696 if (!tick_nohz_full_cpu(cpu))
697 tick_do_timer_cpu = cpu;
698
699 /* 704 /*
700 * We must be careful here. There might be other CPUs 705 * We must be careful here. There might be other CPUs
701 * waiting for periodic broadcast. We need to set the 706 * waiting for periodic broadcast. We need to set the
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index f4208138fbf4..0cf1c1453181 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -306,7 +306,7 @@ static int __cpuinit tick_nohz_cpu_down_callback(struct notifier_block *nfb,
306 * we can't safely shutdown that CPU. 306 * we can't safely shutdown that CPU.
307 */ 307 */
308 if (have_nohz_full_mask && tick_do_timer_cpu == cpu) 308 if (have_nohz_full_mask && tick_do_timer_cpu == cpu)
309 return -EINVAL; 309 return NOTIFY_BAD;
310 break; 310 break;
311 } 311 }
312 return NOTIFY_OK; 312 return NOTIFY_OK;
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 98cd470bbe49..baeeb5c87cf1 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -975,6 +975,14 @@ static int timekeeping_suspend(void)
975 975
976 read_persistent_clock(&timekeeping_suspend_time); 976 read_persistent_clock(&timekeeping_suspend_time);
977 977
978 /*
979 * On some systems the persistent_clock can not be detected at
980 * timekeeping_init by its return value, so if we see a valid
981 * value returned, update the persistent_clock_exists flag.
982 */
983 if (timekeeping_suspend_time.tv_sec || timekeeping_suspend_time.tv_nsec)
984 persistent_clock_exist = true;
985
978 raw_spin_lock_irqsave(&timekeeper_lock, flags); 986 raw_spin_lock_irqsave(&timekeeper_lock, flags);
979 write_seqcount_begin(&timekeeper_seq); 987 write_seqcount_begin(&timekeeper_seq);
980 timekeeping_forward_now(tk); 988 timekeeping_forward_now(tk);
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index b549b0f5b977..6c508ff33c62 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -120,22 +120,22 @@ static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip);
120 120
121/* 121/*
122 * Traverse the ftrace_global_list, invoking all entries. The reason that we 122 * Traverse the ftrace_global_list, invoking all entries. The reason that we
123 * can use rcu_dereference_raw() is that elements removed from this list 123 * can use rcu_dereference_raw_notrace() is that elements removed from this list
124 * are simply leaked, so there is no need to interact with a grace-period 124 * are simply leaked, so there is no need to interact with a grace-period
125 * mechanism. The rcu_dereference_raw() calls are needed to handle 125 * mechanism. The rcu_dereference_raw_notrace() calls are needed to handle
126 * concurrent insertions into the ftrace_global_list. 126 * concurrent insertions into the ftrace_global_list.
127 * 127 *
128 * Silly Alpha and silly pointer-speculation compiler optimizations! 128 * Silly Alpha and silly pointer-speculation compiler optimizations!
129 */ 129 */
130#define do_for_each_ftrace_op(op, list) \ 130#define do_for_each_ftrace_op(op, list) \
131 op = rcu_dereference_raw(list); \ 131 op = rcu_dereference_raw_notrace(list); \
132 do 132 do
133 133
134/* 134/*
135 * Optimized for just a single item in the list (as that is the normal case). 135 * Optimized for just a single item in the list (as that is the normal case).
136 */ 136 */
137#define while_for_each_ftrace_op(op) \ 137#define while_for_each_ftrace_op(op) \
138 while (likely(op = rcu_dereference_raw((op)->next)) && \ 138 while (likely(op = rcu_dereference_raw_notrace((op)->next)) && \
139 unlikely((op) != &ftrace_list_end)) 139 unlikely((op) != &ftrace_list_end))
140 140
141static inline void ftrace_ops_init(struct ftrace_ops *ops) 141static inline void ftrace_ops_init(struct ftrace_ops *ops)
@@ -779,7 +779,7 @@ ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
779 if (hlist_empty(hhd)) 779 if (hlist_empty(hhd))
780 return NULL; 780 return NULL;
781 781
782 hlist_for_each_entry_rcu(rec, hhd, node) { 782 hlist_for_each_entry_rcu_notrace(rec, hhd, node) {
783 if (rec->ip == ip) 783 if (rec->ip == ip)
784 return rec; 784 return rec;
785 } 785 }
@@ -1165,7 +1165,7 @@ ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
1165 1165
1166 hhd = &hash->buckets[key]; 1166 hhd = &hash->buckets[key];
1167 1167
1168 hlist_for_each_entry_rcu(entry, hhd, hlist) { 1168 hlist_for_each_entry_rcu_notrace(entry, hhd, hlist) {
1169 if (entry->ip == ip) 1169 if (entry->ip == ip)
1170 return entry; 1170 return entry;
1171 } 1171 }
@@ -1422,8 +1422,8 @@ ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
1422 struct ftrace_hash *notrace_hash; 1422 struct ftrace_hash *notrace_hash;
1423 int ret; 1423 int ret;
1424 1424
1425 filter_hash = rcu_dereference_raw(ops->filter_hash); 1425 filter_hash = rcu_dereference_raw_notrace(ops->filter_hash);
1426 notrace_hash = rcu_dereference_raw(ops->notrace_hash); 1426 notrace_hash = rcu_dereference_raw_notrace(ops->notrace_hash);
1427 1427
1428 if ((ftrace_hash_empty(filter_hash) || 1428 if ((ftrace_hash_empty(filter_hash) ||
1429 ftrace_lookup_ip(filter_hash, ip)) && 1429 ftrace_lookup_ip(filter_hash, ip)) &&
@@ -2920,7 +2920,7 @@ static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
2920 * on the hash. rcu_read_lock is too dangerous here. 2920 * on the hash. rcu_read_lock is too dangerous here.
2921 */ 2921 */
2922 preempt_disable_notrace(); 2922 preempt_disable_notrace();
2923 hlist_for_each_entry_rcu(entry, hhd, node) { 2923 hlist_for_each_entry_rcu_notrace(entry, hhd, node) {
2924 if (entry->ip == ip) 2924 if (entry->ip == ip)
2925 entry->ops->func(ip, parent_ip, &entry->data); 2925 entry->ops->func(ip, parent_ip, &entry->data);
2926 } 2926 }
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 4d79485b3237..e71a8be4a6ee 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -652,8 +652,6 @@ static struct {
652 ARCH_TRACE_CLOCKS 652 ARCH_TRACE_CLOCKS
653}; 653};
654 654
655int trace_clock_id;
656
657/* 655/*
658 * trace_parser_get_init - gets the buffer for trace parser 656 * trace_parser_get_init - gets the buffer for trace parser
659 */ 657 */
@@ -843,7 +841,15 @@ __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
843 841
844 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN); 842 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
845 max_data->pid = tsk->pid; 843 max_data->pid = tsk->pid;
846 max_data->uid = task_uid(tsk); 844 /*
845 * If tsk == current, then use current_uid(), as that does not use
846 * RCU. The irq tracer can be called out of RCU scope.
847 */
848 if (tsk == current)
849 max_data->uid = current_uid();
850 else
851 max_data->uid = task_uid(tsk);
852
847 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO; 853 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
848 max_data->policy = tsk->policy; 854 max_data->policy = tsk->policy;
849 max_data->rt_priority = tsk->rt_priority; 855 max_data->rt_priority = tsk->rt_priority;
@@ -2818,7 +2824,7 @@ __tracing_open(struct inode *inode, struct file *file, bool snapshot)
2818 iter->iter_flags |= TRACE_FILE_ANNOTATE; 2824 iter->iter_flags |= TRACE_FILE_ANNOTATE;
2819 2825
2820 /* Output in nanoseconds only if we are using a clock in nanoseconds. */ 2826 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
2821 if (trace_clocks[trace_clock_id].in_ns) 2827 if (trace_clocks[tr->clock_id].in_ns)
2822 iter->iter_flags |= TRACE_FILE_TIME_IN_NS; 2828 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
2823 2829
2824 /* stop the trace while dumping if we are not opening "snapshot" */ 2830 /* stop the trace while dumping if we are not opening "snapshot" */
@@ -3817,7 +3823,7 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp)
3817 iter->iter_flags |= TRACE_FILE_LAT_FMT; 3823 iter->iter_flags |= TRACE_FILE_LAT_FMT;
3818 3824
3819 /* Output in nanoseconds only if we are using a clock in nanoseconds. */ 3825 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
3820 if (trace_clocks[trace_clock_id].in_ns) 3826 if (trace_clocks[tr->clock_id].in_ns)
3821 iter->iter_flags |= TRACE_FILE_TIME_IN_NS; 3827 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
3822 3828
3823 iter->cpu_file = tc->cpu; 3829 iter->cpu_file = tc->cpu;
@@ -5087,7 +5093,7 @@ tracing_stats_read(struct file *filp, char __user *ubuf,
5087 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu); 5093 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
5088 trace_seq_printf(s, "bytes: %ld\n", cnt); 5094 trace_seq_printf(s, "bytes: %ld\n", cnt);
5089 5095
5090 if (trace_clocks[trace_clock_id].in_ns) { 5096 if (trace_clocks[tr->clock_id].in_ns) {
5091 /* local or global for trace_clock */ 5097 /* local or global for trace_clock */
5092 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu)); 5098 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
5093 usec_rem = do_div(t, USEC_PER_SEC); 5099 usec_rem = do_div(t, USEC_PER_SEC);
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 711ca7d3e7f1..20572ed88c5c 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -700,8 +700,6 @@ enum print_line_t print_trace_line(struct trace_iterator *iter);
700 700
701extern unsigned long trace_flags; 701extern unsigned long trace_flags;
702 702
703extern int trace_clock_id;
704
705/* Standard output formatting function used for function return traces */ 703/* Standard output formatting function used for function return traces */
706#ifdef CONFIG_FUNCTION_GRAPH_TRACER 704#ifdef CONFIG_FUNCTION_GRAPH_TRACER
707 705
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
index 55e2cf66967b..2901e3b88590 100644
--- a/kernel/trace/trace_selftest.c
+++ b/kernel/trace/trace_selftest.c
@@ -1159,7 +1159,7 @@ trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
1159 /* stop the tracing. */ 1159 /* stop the tracing. */
1160 tracing_stop(); 1160 tracing_stop();
1161 /* check the trace buffer */ 1161 /* check the trace buffer */
1162 ret = trace_test_buffer(tr, &count); 1162 ret = trace_test_buffer(&tr->trace_buffer, &count);
1163 trace->reset(tr); 1163 trace->reset(tr);
1164 tracing_start(); 1164 tracing_start();
1165 1165
diff --git a/lib/mpi/mpicoder.c b/lib/mpi/mpicoder.c
index 5f9c44cdf1f5..4cc6442733f4 100644
--- a/lib/mpi/mpicoder.c
+++ b/lib/mpi/mpicoder.c
@@ -37,7 +37,7 @@ MPI mpi_read_raw_data(const void *xbuffer, size_t nbytes)
37 mpi_limb_t a; 37 mpi_limb_t a;
38 MPI val = NULL; 38 MPI val = NULL;
39 39
40 while (nbytes >= 0 && buffer[0] == 0) { 40 while (nbytes > 0 && buffer[0] == 0) {
41 buffer++; 41 buffer++;
42 nbytes--; 42 nbytes--;
43 } 43 }
diff --git a/mm/frontswap.c b/mm/frontswap.c
index 538367ef1372..1b24bdcb3197 100644
--- a/mm/frontswap.c
+++ b/mm/frontswap.c
@@ -319,7 +319,7 @@ void __frontswap_invalidate_area(unsigned type)
319 return; 319 return;
320 frontswap_ops->invalidate_area(type); 320 frontswap_ops->invalidate_area(type);
321 atomic_set(&sis->frontswap_pages, 0); 321 atomic_set(&sis->frontswap_pages, 0);
322 memset(sis->frontswap_map, 0, sis->max / sizeof(long)); 322 bitmap_zero(sis->frontswap_map, sis->max);
323 } 323 }
324 clear_bit(type, need_init); 324 clear_bit(type, need_init);
325} 325}
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index f8feeeca6686..e2bfbf73a551 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2839,7 +2839,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2839 if (ptep) { 2839 if (ptep) {
2840 entry = huge_ptep_get(ptep); 2840 entry = huge_ptep_get(ptep);
2841 if (unlikely(is_hugetlb_entry_migration(entry))) { 2841 if (unlikely(is_hugetlb_entry_migration(entry))) {
2842 migration_entry_wait(mm, (pmd_t *)ptep, address); 2842 migration_entry_wait_huge(mm, ptep);
2843 return 0; 2843 return 0;
2844 } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry))) 2844 } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
2845 return VM_FAULT_HWPOISON_LARGE | 2845 return VM_FAULT_HWPOISON_LARGE |
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 010d6c14129a..194721839cf5 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1199,7 +1199,6 @@ struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
1199 1199
1200 mz = mem_cgroup_zoneinfo(root, nid, zid); 1200 mz = mem_cgroup_zoneinfo(root, nid, zid);
1201 iter = &mz->reclaim_iter[reclaim->priority]; 1201 iter = &mz->reclaim_iter[reclaim->priority];
1202 last_visited = iter->last_visited;
1203 if (prev && reclaim->generation != iter->generation) { 1202 if (prev && reclaim->generation != iter->generation) {
1204 iter->last_visited = NULL; 1203 iter->last_visited = NULL;
1205 goto out_unlock; 1204 goto out_unlock;
@@ -1218,13 +1217,12 @@ struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
1218 * is alive. 1217 * is alive.
1219 */ 1218 */
1220 dead_count = atomic_read(&root->dead_count); 1219 dead_count = atomic_read(&root->dead_count);
1221 smp_rmb(); 1220 if (dead_count == iter->last_dead_count) {
1222 last_visited = iter->last_visited; 1221 smp_rmb();
1223 if (last_visited) { 1222 last_visited = iter->last_visited;
1224 if ((dead_count != iter->last_dead_count) || 1223 if (last_visited &&
1225 !css_tryget(&last_visited->css)) { 1224 !css_tryget(&last_visited->css))
1226 last_visited = NULL; 1225 last_visited = NULL;
1227 }
1228 } 1226 }
1229 } 1227 }
1230 1228
@@ -3141,8 +3139,6 @@ int memcg_update_cache_size(struct kmem_cache *s, int num_groups)
3141 return -ENOMEM; 3139 return -ENOMEM;
3142 } 3140 }
3143 3141
3144 INIT_WORK(&s->memcg_params->destroy,
3145 kmem_cache_destroy_work_func);
3146 s->memcg_params->is_root_cache = true; 3142 s->memcg_params->is_root_cache = true;
3147 3143
3148 /* 3144 /*
diff --git a/mm/memory.c b/mm/memory.c
index 6dc1882fbd72..61a262b08e53 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -220,7 +220,6 @@ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm)
220 tlb->start = -1UL; 220 tlb->start = -1UL;
221 tlb->end = 0; 221 tlb->end = 0;
222 tlb->need_flush = 0; 222 tlb->need_flush = 0;
223 tlb->fast_mode = (num_possible_cpus() == 1);
224 tlb->local.next = NULL; 223 tlb->local.next = NULL;
225 tlb->local.nr = 0; 224 tlb->local.nr = 0;
226 tlb->local.max = ARRAY_SIZE(tlb->__pages); 225 tlb->local.max = ARRAY_SIZE(tlb->__pages);
@@ -244,9 +243,6 @@ void tlb_flush_mmu(struct mmu_gather *tlb)
244 tlb_table_flush(tlb); 243 tlb_table_flush(tlb);
245#endif 244#endif
246 245
247 if (tlb_fast_mode(tlb))
248 return;
249
250 for (batch = &tlb->local; batch; batch = batch->next) { 246 for (batch = &tlb->local; batch; batch = batch->next) {
251 free_pages_and_swap_cache(batch->pages, batch->nr); 247 free_pages_and_swap_cache(batch->pages, batch->nr);
252 batch->nr = 0; 248 batch->nr = 0;
@@ -288,11 +284,6 @@ int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
288 284
289 VM_BUG_ON(!tlb->need_flush); 285 VM_BUG_ON(!tlb->need_flush);
290 286
291 if (tlb_fast_mode(tlb)) {
292 free_page_and_swap_cache(page);
293 return 1; /* avoid calling tlb_flush_mmu() */
294 }
295
296 batch = tlb->active; 287 batch = tlb->active;
297 batch->pages[batch->nr++] = page; 288 batch->pages[batch->nr++] = page;
298 if (batch->nr == batch->max) { 289 if (batch->nr == batch->max) {
diff --git a/mm/migrate.c b/mm/migrate.c
index b1f57501de9c..6f0c24438bba 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -200,15 +200,14 @@ static void remove_migration_ptes(struct page *old, struct page *new)
200 * get to the page and wait until migration is finished. 200 * get to the page and wait until migration is finished.
201 * When we return from this function the fault will be retried. 201 * When we return from this function the fault will be retried.
202 */ 202 */
203void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, 203static void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
204 unsigned long address) 204 spinlock_t *ptl)
205{ 205{
206 pte_t *ptep, pte; 206 pte_t pte;
207 spinlock_t *ptl;
208 swp_entry_t entry; 207 swp_entry_t entry;
209 struct page *page; 208 struct page *page;
210 209
211 ptep = pte_offset_map_lock(mm, pmd, address, &ptl); 210 spin_lock(ptl);
212 pte = *ptep; 211 pte = *ptep;
213 if (!is_swap_pte(pte)) 212 if (!is_swap_pte(pte))
214 goto out; 213 goto out;
@@ -236,6 +235,20 @@ out:
236 pte_unmap_unlock(ptep, ptl); 235 pte_unmap_unlock(ptep, ptl);
237} 236}
238 237
238void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
239 unsigned long address)
240{
241 spinlock_t *ptl = pte_lockptr(mm, pmd);
242 pte_t *ptep = pte_offset_map(pmd, address);
243 __migration_entry_wait(mm, ptep, ptl);
244}
245
246void migration_entry_wait_huge(struct mm_struct *mm, pte_t *pte)
247{
248 spinlock_t *ptl = &(mm)->page_table_lock;
249 __migration_entry_wait(mm, pte, ptl);
250}
251
239#ifdef CONFIG_BLOCK 252#ifdef CONFIG_BLOCK
240/* Returns true if all buffers are successfully locked */ 253/* Returns true if all buffers are successfully locked */
241static bool buffer_migrate_lock_buffers(struct buffer_head *head, 254static bool buffer_migrate_lock_buffers(struct buffer_head *head,
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 378a15bcd649..c3edb624fccf 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1628,6 +1628,7 @@ static bool __zone_watermark_ok(struct zone *z, int order, unsigned long mark,
1628 long min = mark; 1628 long min = mark;
1629 long lowmem_reserve = z->lowmem_reserve[classzone_idx]; 1629 long lowmem_reserve = z->lowmem_reserve[classzone_idx];
1630 int o; 1630 int o;
1631 long free_cma = 0;
1631 1632
1632 free_pages -= (1 << order) - 1; 1633 free_pages -= (1 << order) - 1;
1633 if (alloc_flags & ALLOC_HIGH) 1634 if (alloc_flags & ALLOC_HIGH)
@@ -1637,9 +1638,10 @@ static bool __zone_watermark_ok(struct zone *z, int order, unsigned long mark,
1637#ifdef CONFIG_CMA 1638#ifdef CONFIG_CMA
1638 /* If allocation can't use CMA areas don't use free CMA pages */ 1639 /* If allocation can't use CMA areas don't use free CMA pages */
1639 if (!(alloc_flags & ALLOC_CMA)) 1640 if (!(alloc_flags & ALLOC_CMA))
1640 free_pages -= zone_page_state(z, NR_FREE_CMA_PAGES); 1641 free_cma = zone_page_state(z, NR_FREE_CMA_PAGES);
1641#endif 1642#endif
1642 if (free_pages <= min + lowmem_reserve) 1643
1644 if (free_pages - free_cma <= min + lowmem_reserve)
1643 return false; 1645 return false;
1644 for (o = 0; o < order; o++) { 1646 for (o = 0; o < order; o++) {
1645 /* At the next order, this order's pages become unavailable */ 1647 /* At the next order, this order's pages become unavailable */
diff --git a/mm/slab_common.c b/mm/slab_common.c
index ff3218a0f5e1..2d414508e9ec 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -373,8 +373,10 @@ struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
373{ 373{
374 int index; 374 int index;
375 375
376 if (WARN_ON_ONCE(size > KMALLOC_MAX_SIZE)) 376 if (size > KMALLOC_MAX_SIZE) {
377 WARN_ON_ONCE(!(flags & __GFP_NOWARN));
377 return NULL; 378 return NULL;
379 }
378 380
379 if (size <= 192) { 381 if (size <= 192) {
380 if (!size) 382 if (!size)
diff --git a/mm/swap_state.c b/mm/swap_state.c
index b3d40dcf3624..f24ab0dff554 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -336,8 +336,24 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
336 * Swap entry may have been freed since our caller observed it. 336 * Swap entry may have been freed since our caller observed it.
337 */ 337 */
338 err = swapcache_prepare(entry); 338 err = swapcache_prepare(entry);
339 if (err == -EEXIST) { /* seems racy */ 339 if (err == -EEXIST) {
340 radix_tree_preload_end(); 340 radix_tree_preload_end();
341 /*
342 * We might race against get_swap_page() and stumble
343 * across a SWAP_HAS_CACHE swap_map entry whose page
344 * has not been brought into the swapcache yet, while
345 * the other end is scheduled away waiting on discard
346 * I/O completion at scan_swap_map().
347 *
348 * In order to avoid turning this transitory state
349 * into a permanent loop around this -EEXIST case
350 * if !CONFIG_PREEMPT and the I/O completion happens
351 * to be waiting on the CPU waitqueue where we are now
352 * busy looping, we just conditionally invoke the
353 * scheduler here, if there are some more important
354 * tasks to run.
355 */
356 cond_resched();
341 continue; 357 continue;
342 } 358 }
343 if (err) { /* swp entry is obsolete ? */ 359 if (err) { /* swp entry is obsolete ? */
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 6c340d908b27..746af55b8455 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -2116,7 +2116,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
2116 } 2116 }
2117 /* frontswap enabled? set up bit-per-page map for frontswap */ 2117 /* frontswap enabled? set up bit-per-page map for frontswap */
2118 if (frontswap_enabled) 2118 if (frontswap_enabled)
2119 frontswap_map = vzalloc(maxpages / sizeof(long)); 2119 frontswap_map = vzalloc(BITS_TO_LONGS(maxpages) * sizeof(long));
2120 2120
2121 if (p->bdev) { 2121 if (p->bdev) {
2122 if (blk_queue_nonrot(bdev_get_queue(p->bdev))) { 2122 if (blk_queue_nonrot(bdev_get_queue(p->bdev))) {
diff --git a/net/9p/client.c b/net/9p/client.c
index 8eb75425e6e6..addc116cecf0 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -562,36 +562,19 @@ static int p9_check_zc_errors(struct p9_client *c, struct p9_req_t *req,
562 562
563 if (!p9_is_proto_dotl(c)) { 563 if (!p9_is_proto_dotl(c)) {
564 /* Error is reported in string format */ 564 /* Error is reported in string format */
565 uint16_t len; 565 int len;
566 /* 7 = header size for RERROR, 2 is the size of string len; */ 566 /* 7 = header size for RERROR; */
567 int inline_len = in_hdrlen - (7 + 2); 567 int inline_len = in_hdrlen - 7;
568 568
569 /* Read the size of error string */ 569 len = req->rc->size - req->rc->offset;
570 err = p9pdu_readf(req->rc, c->proto_version, "w", &len); 570 if (len > (P9_ZC_HDR_SZ - 7)) {
571 if (err) 571 err = -EFAULT;
572 goto out_err;
573
574 ename = kmalloc(len + 1, GFP_NOFS);
575 if (!ename) {
576 err = -ENOMEM;
577 goto out_err; 572 goto out_err;
578 } 573 }
579 if (len <= inline_len) {
580 /* We have error in protocol buffer itself */
581 if (pdu_read(req->rc, ename, len)) {
582 err = -EFAULT;
583 goto out_free;
584 574
585 } 575 ename = &req->rc->sdata[req->rc->offset];
586 } else { 576 if (len > inline_len) {
587 /* 577 /* We have error in external buffer */
588 * Part of the data is in user space buffer.
589 */
590 if (pdu_read(req->rc, ename, inline_len)) {
591 err = -EFAULT;
592 goto out_free;
593
594 }
595 if (kern_buf) { 578 if (kern_buf) {
596 memcpy(ename + inline_len, uidata, 579 memcpy(ename + inline_len, uidata,
597 len - inline_len); 580 len - inline_len);
@@ -600,19 +583,19 @@ static int p9_check_zc_errors(struct p9_client *c, struct p9_req_t *req,
600 uidata, len - inline_len); 583 uidata, len - inline_len);
601 if (err) { 584 if (err) {
602 err = -EFAULT; 585 err = -EFAULT;
603 goto out_free; 586 goto out_err;
604 } 587 }
605 } 588 }
606 } 589 }
607 ename[len] = 0; 590 ename = NULL;
608 if (p9_is_proto_dotu(c)) { 591 err = p9pdu_readf(req->rc, c->proto_version, "s?d",
609 /* For dotu we also have error code */ 592 &ename, &ecode);
610 err = p9pdu_readf(req->rc, 593 if (err)
611 c->proto_version, "d", &ecode); 594 goto out_err;
612 if (err) 595
613 goto out_free; 596 if (p9_is_proto_dotu(c))
614 err = -ecode; 597 err = -ecode;
615 } 598
616 if (!err || !IS_ERR_VALUE(err)) { 599 if (!err || !IS_ERR_VALUE(err)) {
617 err = p9_errstr2errno(ename, strlen(ename)); 600 err = p9_errstr2errno(ename, strlen(ename));
618 601
@@ -628,8 +611,6 @@ static int p9_check_zc_errors(struct p9_client *c, struct p9_req_t *req,
628 } 611 }
629 return err; 612 return err;
630 613
631out_free:
632 kfree(ename);
633out_err: 614out_err:
634 p9_debug(P9_DEBUG_ERROR, "couldn't parse error%d\n", err); 615 p9_debug(P9_DEBUG_ERROR, "couldn't parse error%d\n", err);
635 return err; 616 return err;
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
index 071f288b77a8..f680ee101878 100644
--- a/net/batman-adv/bat_iv_ogm.c
+++ b/net/batman-adv/bat_iv_ogm.c
@@ -29,6 +29,21 @@
29#include "bat_algo.h" 29#include "bat_algo.h"
30#include "network-coding.h" 30#include "network-coding.h"
31 31
32/**
33 * batadv_dup_status - duplicate status
34 * @BATADV_NO_DUP: the packet is a duplicate
35 * @BATADV_ORIG_DUP: OGM is a duplicate in the originator (but not for the
36 * neighbor)
37 * @BATADV_NEIGH_DUP: OGM is a duplicate for the neighbor
38 * @BATADV_PROTECTED: originator is currently protected (after reboot)
39 */
40enum batadv_dup_status {
41 BATADV_NO_DUP = 0,
42 BATADV_ORIG_DUP,
43 BATADV_NEIGH_DUP,
44 BATADV_PROTECTED,
45};
46
32static struct batadv_neigh_node * 47static struct batadv_neigh_node *
33batadv_iv_ogm_neigh_new(struct batadv_hard_iface *hard_iface, 48batadv_iv_ogm_neigh_new(struct batadv_hard_iface *hard_iface,
34 const uint8_t *neigh_addr, 49 const uint8_t *neigh_addr,
@@ -650,7 +665,7 @@ batadv_iv_ogm_orig_update(struct batadv_priv *bat_priv,
650 const struct batadv_ogm_packet *batadv_ogm_packet, 665 const struct batadv_ogm_packet *batadv_ogm_packet,
651 struct batadv_hard_iface *if_incoming, 666 struct batadv_hard_iface *if_incoming,
652 const unsigned char *tt_buff, 667 const unsigned char *tt_buff,
653 int is_duplicate) 668 enum batadv_dup_status dup_status)
654{ 669{
655 struct batadv_neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL; 670 struct batadv_neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL;
656 struct batadv_neigh_node *router = NULL; 671 struct batadv_neigh_node *router = NULL;
@@ -676,7 +691,7 @@ batadv_iv_ogm_orig_update(struct batadv_priv *bat_priv,
676 continue; 691 continue;
677 } 692 }
678 693
679 if (is_duplicate) 694 if (dup_status != BATADV_NO_DUP)
680 continue; 695 continue;
681 696
682 spin_lock_bh(&tmp_neigh_node->lq_update_lock); 697 spin_lock_bh(&tmp_neigh_node->lq_update_lock);
@@ -718,7 +733,7 @@ batadv_iv_ogm_orig_update(struct batadv_priv *bat_priv,
718 neigh_node->tq_avg = batadv_ring_buffer_avg(neigh_node->tq_recv); 733 neigh_node->tq_avg = batadv_ring_buffer_avg(neigh_node->tq_recv);
719 spin_unlock_bh(&neigh_node->lq_update_lock); 734 spin_unlock_bh(&neigh_node->lq_update_lock);
720 735
721 if (!is_duplicate) { 736 if (dup_status == BATADV_NO_DUP) {
722 orig_node->last_ttl = batadv_ogm_packet->header.ttl; 737 orig_node->last_ttl = batadv_ogm_packet->header.ttl;
723 neigh_node->last_ttl = batadv_ogm_packet->header.ttl; 738 neigh_node->last_ttl = batadv_ogm_packet->header.ttl;
724 } 739 }
@@ -902,15 +917,16 @@ out:
902 return ret; 917 return ret;
903} 918}
904 919
905/* processes a batman packet for all interfaces, adjusts the sequence number and 920/**
906 * finds out whether it is a duplicate. 921 * batadv_iv_ogm_update_seqnos - process a batman packet for all interfaces,
907 * returns: 922 * adjust the sequence number and find out whether it is a duplicate
908 * 1 the packet is a duplicate 923 * @ethhdr: ethernet header of the packet
909 * 0 the packet has not yet been received 924 * @batadv_ogm_packet: OGM packet to be considered
910 * -1 the packet is old and has been received while the seqno window 925 * @if_incoming: interface on which the OGM packet was received
911 * was protected. Caller should drop it. 926 *
927 * Returns duplicate status as enum batadv_dup_status
912 */ 928 */
913static int 929static enum batadv_dup_status
914batadv_iv_ogm_update_seqnos(const struct ethhdr *ethhdr, 930batadv_iv_ogm_update_seqnos(const struct ethhdr *ethhdr,
915 const struct batadv_ogm_packet *batadv_ogm_packet, 931 const struct batadv_ogm_packet *batadv_ogm_packet,
916 const struct batadv_hard_iface *if_incoming) 932 const struct batadv_hard_iface *if_incoming)
@@ -918,17 +934,18 @@ batadv_iv_ogm_update_seqnos(const struct ethhdr *ethhdr,
918 struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface); 934 struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
919 struct batadv_orig_node *orig_node; 935 struct batadv_orig_node *orig_node;
920 struct batadv_neigh_node *tmp_neigh_node; 936 struct batadv_neigh_node *tmp_neigh_node;
921 int is_duplicate = 0; 937 int is_dup;
922 int32_t seq_diff; 938 int32_t seq_diff;
923 int need_update = 0; 939 int need_update = 0;
924 int set_mark, ret = -1; 940 int set_mark;
941 enum batadv_dup_status ret = BATADV_NO_DUP;
925 uint32_t seqno = ntohl(batadv_ogm_packet->seqno); 942 uint32_t seqno = ntohl(batadv_ogm_packet->seqno);
926 uint8_t *neigh_addr; 943 uint8_t *neigh_addr;
927 uint8_t packet_count; 944 uint8_t packet_count;
928 945
929 orig_node = batadv_get_orig_node(bat_priv, batadv_ogm_packet->orig); 946 orig_node = batadv_get_orig_node(bat_priv, batadv_ogm_packet->orig);
930 if (!orig_node) 947 if (!orig_node)
931 return 0; 948 return BATADV_NO_DUP;
932 949
933 spin_lock_bh(&orig_node->ogm_cnt_lock); 950 spin_lock_bh(&orig_node->ogm_cnt_lock);
934 seq_diff = seqno - orig_node->last_real_seqno; 951 seq_diff = seqno - orig_node->last_real_seqno;
@@ -936,22 +953,29 @@ batadv_iv_ogm_update_seqnos(const struct ethhdr *ethhdr,
936 /* signalize caller that the packet is to be dropped. */ 953 /* signalize caller that the packet is to be dropped. */
937 if (!hlist_empty(&orig_node->neigh_list) && 954 if (!hlist_empty(&orig_node->neigh_list) &&
938 batadv_window_protected(bat_priv, seq_diff, 955 batadv_window_protected(bat_priv, seq_diff,
939 &orig_node->batman_seqno_reset)) 956 &orig_node->batman_seqno_reset)) {
957 ret = BATADV_PROTECTED;
940 goto out; 958 goto out;
959 }
941 960
942 rcu_read_lock(); 961 rcu_read_lock();
943 hlist_for_each_entry_rcu(tmp_neigh_node, 962 hlist_for_each_entry_rcu(tmp_neigh_node,
944 &orig_node->neigh_list, list) { 963 &orig_node->neigh_list, list) {
945 is_duplicate |= batadv_test_bit(tmp_neigh_node->real_bits,
946 orig_node->last_real_seqno,
947 seqno);
948
949 neigh_addr = tmp_neigh_node->addr; 964 neigh_addr = tmp_neigh_node->addr;
965 is_dup = batadv_test_bit(tmp_neigh_node->real_bits,
966 orig_node->last_real_seqno,
967 seqno);
968
950 if (batadv_compare_eth(neigh_addr, ethhdr->h_source) && 969 if (batadv_compare_eth(neigh_addr, ethhdr->h_source) &&
951 tmp_neigh_node->if_incoming == if_incoming) 970 tmp_neigh_node->if_incoming == if_incoming) {
952 set_mark = 1; 971 set_mark = 1;
953 else 972 if (is_dup)
973 ret = BATADV_NEIGH_DUP;
974 } else {
954 set_mark = 0; 975 set_mark = 0;
976 if (is_dup && (ret != BATADV_NEIGH_DUP))
977 ret = BATADV_ORIG_DUP;
978 }
955 979
956 /* if the window moved, set the update flag. */ 980 /* if the window moved, set the update flag. */
957 need_update |= batadv_bit_get_packet(bat_priv, 981 need_update |= batadv_bit_get_packet(bat_priv,
@@ -971,8 +995,6 @@ batadv_iv_ogm_update_seqnos(const struct ethhdr *ethhdr,
971 orig_node->last_real_seqno = seqno; 995 orig_node->last_real_seqno = seqno;
972 } 996 }
973 997
974 ret = is_duplicate;
975
976out: 998out:
977 spin_unlock_bh(&orig_node->ogm_cnt_lock); 999 spin_unlock_bh(&orig_node->ogm_cnt_lock);
978 batadv_orig_node_free_ref(orig_node); 1000 batadv_orig_node_free_ref(orig_node);
@@ -994,7 +1016,8 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr,
994 int is_broadcast = 0, is_bidirect; 1016 int is_broadcast = 0, is_bidirect;
995 bool is_single_hop_neigh = false; 1017 bool is_single_hop_neigh = false;
996 bool is_from_best_next_hop = false; 1018 bool is_from_best_next_hop = false;
997 int is_duplicate, sameseq, simlar_ttl; 1019 int sameseq, similar_ttl;
1020 enum batadv_dup_status dup_status;
998 uint32_t if_incoming_seqno; 1021 uint32_t if_incoming_seqno;
999 uint8_t *prev_sender; 1022 uint8_t *prev_sender;
1000 1023
@@ -1138,10 +1161,10 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr,
1138 if (!orig_node) 1161 if (!orig_node)
1139 return; 1162 return;
1140 1163
1141 is_duplicate = batadv_iv_ogm_update_seqnos(ethhdr, batadv_ogm_packet, 1164 dup_status = batadv_iv_ogm_update_seqnos(ethhdr, batadv_ogm_packet,
1142 if_incoming); 1165 if_incoming);
1143 1166
1144 if (is_duplicate == -1) { 1167 if (dup_status == BATADV_PROTECTED) {
1145 batadv_dbg(BATADV_DBG_BATMAN, bat_priv, 1168 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
1146 "Drop packet: packet within seqno protection time (sender: %pM)\n", 1169 "Drop packet: packet within seqno protection time (sender: %pM)\n",
1147 ethhdr->h_source); 1170 ethhdr->h_source);
@@ -1211,11 +1234,12 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr,
1211 * seqno and similar ttl as the non-duplicate 1234 * seqno and similar ttl as the non-duplicate
1212 */ 1235 */
1213 sameseq = orig_node->last_real_seqno == ntohl(batadv_ogm_packet->seqno); 1236 sameseq = orig_node->last_real_seqno == ntohl(batadv_ogm_packet->seqno);
1214 simlar_ttl = orig_node->last_ttl - 3 <= batadv_ogm_packet->header.ttl; 1237 similar_ttl = orig_node->last_ttl - 3 <= batadv_ogm_packet->header.ttl;
1215 if (is_bidirect && (!is_duplicate || (sameseq && simlar_ttl))) 1238 if (is_bidirect && ((dup_status == BATADV_NO_DUP) ||
1239 (sameseq && similar_ttl)))
1216 batadv_iv_ogm_orig_update(bat_priv, orig_node, ethhdr, 1240 batadv_iv_ogm_orig_update(bat_priv, orig_node, ethhdr,
1217 batadv_ogm_packet, if_incoming, 1241 batadv_ogm_packet, if_incoming,
1218 tt_buff, is_duplicate); 1242 tt_buff, dup_status);
1219 1243
1220 /* is single hop (direct) neighbor */ 1244 /* is single hop (direct) neighbor */
1221 if (is_single_hop_neigh) { 1245 if (is_single_hop_neigh) {
@@ -1236,7 +1260,7 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr,
1236 goto out_neigh; 1260 goto out_neigh;
1237 } 1261 }
1238 1262
1239 if (is_duplicate) { 1263 if (dup_status == BATADV_NEIGH_DUP) {
1240 batadv_dbg(BATADV_DBG_BATMAN, bat_priv, 1264 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
1241 "Drop packet: duplicate packet received\n"); 1265 "Drop packet: duplicate packet received\n");
1242 goto out_neigh; 1266 goto out_neigh;
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
index 379061c72549..de27b3175cfd 100644
--- a/net/batman-adv/bridge_loop_avoidance.c
+++ b/net/batman-adv/bridge_loop_avoidance.c
@@ -1067,6 +1067,10 @@ void batadv_bla_update_orig_address(struct batadv_priv *bat_priv,
1067 group = htons(crc16(0, primary_if->net_dev->dev_addr, ETH_ALEN)); 1067 group = htons(crc16(0, primary_if->net_dev->dev_addr, ETH_ALEN));
1068 bat_priv->bla.claim_dest.group = group; 1068 bat_priv->bla.claim_dest.group = group;
1069 1069
1070 /* purge everything when bridge loop avoidance is turned off */
1071 if (!atomic_read(&bat_priv->bridge_loop_avoidance))
1072 oldif = NULL;
1073
1070 if (!oldif) { 1074 if (!oldif) {
1071 batadv_bla_purge_claims(bat_priv, NULL, 1); 1075 batadv_bla_purge_claims(bat_priv, NULL, 1);
1072 batadv_bla_purge_backbone_gw(bat_priv, 1); 1076 batadv_bla_purge_backbone_gw(bat_priv, 1);
diff --git a/net/batman-adv/sysfs.c b/net/batman-adv/sysfs.c
index 15a22efa9a67..929e304dacb2 100644
--- a/net/batman-adv/sysfs.c
+++ b/net/batman-adv/sysfs.c
@@ -582,10 +582,7 @@ static ssize_t batadv_store_mesh_iface(struct kobject *kobj,
582 (strncmp(hard_iface->soft_iface->name, buff, IFNAMSIZ) == 0)) 582 (strncmp(hard_iface->soft_iface->name, buff, IFNAMSIZ) == 0))
583 goto out; 583 goto out;
584 584
585 if (!rtnl_trylock()) { 585 rtnl_lock();
586 ret = -ERESTARTSYS;
587 goto out;
588 }
589 586
590 if (status_tmp == BATADV_IF_NOT_IN_USE) { 587 if (status_tmp == BATADV_IF_NOT_IN_USE) {
591 batadv_hardif_disable_interface(hard_iface, 588 batadv_hardif_disable_interface(hard_iface,
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 33843c5c4939..ace5e55fe5a3 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -341,7 +341,6 @@ static void hci_init1_req(struct hci_request *req, unsigned long opt)
341 341
342static void bredr_setup(struct hci_request *req) 342static void bredr_setup(struct hci_request *req)
343{ 343{
344 struct hci_cp_delete_stored_link_key cp;
345 __le16 param; 344 __le16 param;
346 __u8 flt_type; 345 __u8 flt_type;
347 346
@@ -365,10 +364,6 @@ static void bredr_setup(struct hci_request *req)
365 param = __constant_cpu_to_le16(0x7d00); 364 param = __constant_cpu_to_le16(0x7d00);
366 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param); 365 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
367 366
368 bacpy(&cp.bdaddr, BDADDR_ANY);
369 cp.delete_all = 0x01;
370 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
371
372 /* Read page scan parameters */ 367 /* Read page scan parameters */
373 if (req->hdev->hci_ver > BLUETOOTH_VER_1_1) { 368 if (req->hdev->hci_ver > BLUETOOTH_VER_1_1) {
374 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL); 369 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
@@ -602,6 +597,16 @@ static void hci_init3_req(struct hci_request *req, unsigned long opt)
602 struct hci_dev *hdev = req->hdev; 597 struct hci_dev *hdev = req->hdev;
603 u8 p; 598 u8 p;
604 599
600 /* Only send HCI_Delete_Stored_Link_Key if it is supported */
601 if (hdev->commands[6] & 0x80) {
602 struct hci_cp_delete_stored_link_key cp;
603
604 bacpy(&cp.bdaddr, BDADDR_ANY);
605 cp.delete_all = 0x01;
606 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
607 sizeof(cp), &cp);
608 }
609
605 if (hdev->commands[5] & 0x10) 610 if (hdev->commands[5] & 0x10)
606 hci_setup_link_policy(req); 611 hci_setup_link_policy(req);
607 612
@@ -1555,11 +1560,15 @@ static const struct rfkill_ops hci_rfkill_ops = {
1555static void hci_power_on(struct work_struct *work) 1560static void hci_power_on(struct work_struct *work)
1556{ 1561{
1557 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on); 1562 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1563 int err;
1558 1564
1559 BT_DBG("%s", hdev->name); 1565 BT_DBG("%s", hdev->name);
1560 1566
1561 if (hci_dev_open(hdev->id) < 0) 1567 err = hci_dev_open(hdev->id);
1568 if (err < 0) {
1569 mgmt_set_powered_failed(hdev, err);
1562 return; 1570 return;
1571 }
1563 1572
1564 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) 1573 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1565 queue_delayed_work(hdev->req_workqueue, &hdev->power_off, 1574 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index a76d1ac0321b..68843a28a7af 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -2852,6 +2852,9 @@ static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2852 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u", 2852 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2853 conn, code, ident, dlen); 2853 conn, code, ident, dlen);
2854 2854
2855 if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2856 return NULL;
2857
2855 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen; 2858 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2856 count = min_t(unsigned int, conn->mtu, len); 2859 count = min_t(unsigned int, conn->mtu, len);
2857 2860
@@ -3677,10 +3680,14 @@ static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3677} 3680}
3678 3681
3679static inline int l2cap_command_rej(struct l2cap_conn *conn, 3682static inline int l2cap_command_rej(struct l2cap_conn *conn,
3680 struct l2cap_cmd_hdr *cmd, u8 *data) 3683 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3684 u8 *data)
3681{ 3685{
3682 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data; 3686 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3683 3687
3688 if (cmd_len < sizeof(*rej))
3689 return -EPROTO;
3690
3684 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD) 3691 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3685 return 0; 3692 return 0;
3686 3693
@@ -3829,11 +3836,14 @@ sendresp:
3829} 3836}
3830 3837
3831static int l2cap_connect_req(struct l2cap_conn *conn, 3838static int l2cap_connect_req(struct l2cap_conn *conn,
3832 struct l2cap_cmd_hdr *cmd, u8 *data) 3839 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3833{ 3840{
3834 struct hci_dev *hdev = conn->hcon->hdev; 3841 struct hci_dev *hdev = conn->hcon->hdev;
3835 struct hci_conn *hcon = conn->hcon; 3842 struct hci_conn *hcon = conn->hcon;
3836 3843
3844 if (cmd_len < sizeof(struct l2cap_conn_req))
3845 return -EPROTO;
3846
3837 hci_dev_lock(hdev); 3847 hci_dev_lock(hdev);
3838 if (test_bit(HCI_MGMT, &hdev->dev_flags) && 3848 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
3839 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags)) 3849 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
@@ -3847,7 +3857,8 @@ static int l2cap_connect_req(struct l2cap_conn *conn,
3847} 3857}
3848 3858
3849static int l2cap_connect_create_rsp(struct l2cap_conn *conn, 3859static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
3850 struct l2cap_cmd_hdr *cmd, u8 *data) 3860 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3861 u8 *data)
3851{ 3862{
3852 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data; 3863 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3853 u16 scid, dcid, result, status; 3864 u16 scid, dcid, result, status;
@@ -3855,6 +3866,9 @@ static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
3855 u8 req[128]; 3866 u8 req[128];
3856 int err; 3867 int err;
3857 3868
3869 if (cmd_len < sizeof(*rsp))
3870 return -EPROTO;
3871
3858 scid = __le16_to_cpu(rsp->scid); 3872 scid = __le16_to_cpu(rsp->scid);
3859 dcid = __le16_to_cpu(rsp->dcid); 3873 dcid = __le16_to_cpu(rsp->dcid);
3860 result = __le16_to_cpu(rsp->result); 3874 result = __le16_to_cpu(rsp->result);
@@ -3952,6 +3966,9 @@ static inline int l2cap_config_req(struct l2cap_conn *conn,
3952 struct l2cap_chan *chan; 3966 struct l2cap_chan *chan;
3953 int len, err = 0; 3967 int len, err = 0;
3954 3968
3969 if (cmd_len < sizeof(*req))
3970 return -EPROTO;
3971
3955 dcid = __le16_to_cpu(req->dcid); 3972 dcid = __le16_to_cpu(req->dcid);
3956 flags = __le16_to_cpu(req->flags); 3973 flags = __le16_to_cpu(req->flags);
3957 3974
@@ -3975,7 +3992,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn,
3975 3992
3976 /* Reject if config buffer is too small. */ 3993 /* Reject if config buffer is too small. */
3977 len = cmd_len - sizeof(*req); 3994 len = cmd_len - sizeof(*req);
3978 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) { 3995 if (chan->conf_len + len > sizeof(chan->conf_req)) {
3979 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, 3996 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3980 l2cap_build_conf_rsp(chan, rsp, 3997 l2cap_build_conf_rsp(chan, rsp,
3981 L2CAP_CONF_REJECT, flags), rsp); 3998 L2CAP_CONF_REJECT, flags), rsp);
@@ -4053,14 +4070,18 @@ unlock:
4053} 4070}
4054 4071
4055static inline int l2cap_config_rsp(struct l2cap_conn *conn, 4072static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4056 struct l2cap_cmd_hdr *cmd, u8 *data) 4073 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4074 u8 *data)
4057{ 4075{
4058 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data; 4076 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4059 u16 scid, flags, result; 4077 u16 scid, flags, result;
4060 struct l2cap_chan *chan; 4078 struct l2cap_chan *chan;
4061 int len = le16_to_cpu(cmd->len) - sizeof(*rsp); 4079 int len = cmd_len - sizeof(*rsp);
4062 int err = 0; 4080 int err = 0;
4063 4081
4082 if (cmd_len < sizeof(*rsp))
4083 return -EPROTO;
4084
4064 scid = __le16_to_cpu(rsp->scid); 4085 scid = __le16_to_cpu(rsp->scid);
4065 flags = __le16_to_cpu(rsp->flags); 4086 flags = __le16_to_cpu(rsp->flags);
4066 result = __le16_to_cpu(rsp->result); 4087 result = __le16_to_cpu(rsp->result);
@@ -4161,7 +4182,8 @@ done:
4161} 4182}
4162 4183
4163static inline int l2cap_disconnect_req(struct l2cap_conn *conn, 4184static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4164 struct l2cap_cmd_hdr *cmd, u8 *data) 4185 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4186 u8 *data)
4165{ 4187{
4166 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data; 4188 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4167 struct l2cap_disconn_rsp rsp; 4189 struct l2cap_disconn_rsp rsp;
@@ -4169,6 +4191,9 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4169 struct l2cap_chan *chan; 4191 struct l2cap_chan *chan;
4170 struct sock *sk; 4192 struct sock *sk;
4171 4193
4194 if (cmd_len != sizeof(*req))
4195 return -EPROTO;
4196
4172 scid = __le16_to_cpu(req->scid); 4197 scid = __le16_to_cpu(req->scid);
4173 dcid = __le16_to_cpu(req->dcid); 4198 dcid = __le16_to_cpu(req->dcid);
4174 4199
@@ -4208,12 +4233,16 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4208} 4233}
4209 4234
4210static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, 4235static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4211 struct l2cap_cmd_hdr *cmd, u8 *data) 4236 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4237 u8 *data)
4212{ 4238{
4213 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data; 4239 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4214 u16 dcid, scid; 4240 u16 dcid, scid;
4215 struct l2cap_chan *chan; 4241 struct l2cap_chan *chan;
4216 4242
4243 if (cmd_len != sizeof(*rsp))
4244 return -EPROTO;
4245
4217 scid = __le16_to_cpu(rsp->scid); 4246 scid = __le16_to_cpu(rsp->scid);
4218 dcid = __le16_to_cpu(rsp->dcid); 4247 dcid = __le16_to_cpu(rsp->dcid);
4219 4248
@@ -4243,11 +4272,15 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4243} 4272}
4244 4273
4245static inline int l2cap_information_req(struct l2cap_conn *conn, 4274static inline int l2cap_information_req(struct l2cap_conn *conn,
4246 struct l2cap_cmd_hdr *cmd, u8 *data) 4275 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4276 u8 *data)
4247{ 4277{
4248 struct l2cap_info_req *req = (struct l2cap_info_req *) data; 4278 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4249 u16 type; 4279 u16 type;
4250 4280
4281 if (cmd_len != sizeof(*req))
4282 return -EPROTO;
4283
4251 type = __le16_to_cpu(req->type); 4284 type = __le16_to_cpu(req->type);
4252 4285
4253 BT_DBG("type 0x%4.4x", type); 4286 BT_DBG("type 0x%4.4x", type);
@@ -4294,11 +4327,15 @@ static inline int l2cap_information_req(struct l2cap_conn *conn,
4294} 4327}
4295 4328
4296static inline int l2cap_information_rsp(struct l2cap_conn *conn, 4329static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4297 struct l2cap_cmd_hdr *cmd, u8 *data) 4330 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4331 u8 *data)
4298{ 4332{
4299 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data; 4333 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4300 u16 type, result; 4334 u16 type, result;
4301 4335
4336 if (cmd_len < sizeof(*rsp))
4337 return -EPROTO;
4338
4302 type = __le16_to_cpu(rsp->type); 4339 type = __le16_to_cpu(rsp->type);
4303 result = __le16_to_cpu(rsp->result); 4340 result = __le16_to_cpu(rsp->result);
4304 4341
@@ -5164,16 +5201,16 @@ static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5164 5201
5165 switch (cmd->code) { 5202 switch (cmd->code) {
5166 case L2CAP_COMMAND_REJ: 5203 case L2CAP_COMMAND_REJ:
5167 l2cap_command_rej(conn, cmd, data); 5204 l2cap_command_rej(conn, cmd, cmd_len, data);
5168 break; 5205 break;
5169 5206
5170 case L2CAP_CONN_REQ: 5207 case L2CAP_CONN_REQ:
5171 err = l2cap_connect_req(conn, cmd, data); 5208 err = l2cap_connect_req(conn, cmd, cmd_len, data);
5172 break; 5209 break;
5173 5210
5174 case L2CAP_CONN_RSP: 5211 case L2CAP_CONN_RSP:
5175 case L2CAP_CREATE_CHAN_RSP: 5212 case L2CAP_CREATE_CHAN_RSP:
5176 err = l2cap_connect_create_rsp(conn, cmd, data); 5213 err = l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5177 break; 5214 break;
5178 5215
5179 case L2CAP_CONF_REQ: 5216 case L2CAP_CONF_REQ:
@@ -5181,15 +5218,15 @@ static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5181 break; 5218 break;
5182 5219
5183 case L2CAP_CONF_RSP: 5220 case L2CAP_CONF_RSP:
5184 err = l2cap_config_rsp(conn, cmd, data); 5221 err = l2cap_config_rsp(conn, cmd, cmd_len, data);
5185 break; 5222 break;
5186 5223
5187 case L2CAP_DISCONN_REQ: 5224 case L2CAP_DISCONN_REQ:
5188 err = l2cap_disconnect_req(conn, cmd, data); 5225 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5189 break; 5226 break;
5190 5227
5191 case L2CAP_DISCONN_RSP: 5228 case L2CAP_DISCONN_RSP:
5192 err = l2cap_disconnect_rsp(conn, cmd, data); 5229 err = l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5193 break; 5230 break;
5194 5231
5195 case L2CAP_ECHO_REQ: 5232 case L2CAP_ECHO_REQ:
@@ -5200,11 +5237,11 @@ static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5200 break; 5237 break;
5201 5238
5202 case L2CAP_INFO_REQ: 5239 case L2CAP_INFO_REQ:
5203 err = l2cap_information_req(conn, cmd, data); 5240 err = l2cap_information_req(conn, cmd, cmd_len, data);
5204 break; 5241 break;
5205 5242
5206 case L2CAP_INFO_RSP: 5243 case L2CAP_INFO_RSP:
5207 err = l2cap_information_rsp(conn, cmd, data); 5244 err = l2cap_information_rsp(conn, cmd, cmd_len, data);
5208 break; 5245 break;
5209 5246
5210 case L2CAP_CREATE_CHAN_REQ: 5247 case L2CAP_CREATE_CHAN_REQ:
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 35fef22703e9..f8ecbc70293d 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -2700,7 +2700,7 @@ static int start_discovery(struct sock *sk, struct hci_dev *hdev,
2700 break; 2700 break;
2701 2701
2702 case DISCOV_TYPE_LE: 2702 case DISCOV_TYPE_LE:
2703 if (!lmp_host_le_capable(hdev)) { 2703 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
2704 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY, 2704 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2705 MGMT_STATUS_NOT_SUPPORTED); 2705 MGMT_STATUS_NOT_SUPPORTED);
2706 mgmt_pending_remove(cmd); 2706 mgmt_pending_remove(cmd);
@@ -3418,6 +3418,27 @@ new_settings:
3418 return err; 3418 return err;
3419} 3419}
3420 3420
3421int mgmt_set_powered_failed(struct hci_dev *hdev, int err)
3422{
3423 struct pending_cmd *cmd;
3424 u8 status;
3425
3426 cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
3427 if (!cmd)
3428 return -ENOENT;
3429
3430 if (err == -ERFKILL)
3431 status = MGMT_STATUS_RFKILLED;
3432 else
3433 status = MGMT_STATUS_FAILED;
3434
3435 err = cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
3436
3437 mgmt_pending_remove(cmd);
3438
3439 return err;
3440}
3441
3421int mgmt_discoverable(struct hci_dev *hdev, u8 discoverable) 3442int mgmt_discoverable(struct hci_dev *hdev, u8 discoverable)
3422{ 3443{
3423 struct cmd_lookup match = { NULL, hdev }; 3444 struct cmd_lookup match = { NULL, hdev };
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index b2296d3857a0..b5562abdd6e0 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -770,7 +770,7 @@ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level)
770 770
771 BT_DBG("conn %p hcon %p level 0x%2.2x", conn, hcon, sec_level); 771 BT_DBG("conn %p hcon %p level 0x%2.2x", conn, hcon, sec_level);
772 772
773 if (!lmp_host_le_capable(hcon->hdev)) 773 if (!test_bit(HCI_LE_ENABLED, &hcon->hdev->dev_flags))
774 return 1; 774 return 1;
775 775
776 if (sec_level == BT_SECURITY_LOW) 776 if (sec_level == BT_SECURITY_LOW)
@@ -851,7 +851,7 @@ int smp_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
851 __u8 reason; 851 __u8 reason;
852 int err = 0; 852 int err = 0;
853 853
854 if (!lmp_host_le_capable(conn->hcon->hdev)) { 854 if (!test_bit(HCI_LE_ENABLED, &conn->hcon->hdev->dev_flags)) {
855 err = -ENOTSUPP; 855 err = -ENOTSUPP;
856 reason = SMP_PAIRING_NOTSUPP; 856 reason = SMP_PAIRING_NOTSUPP;
857 goto done; 857 goto done;
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 81f2389f78eb..d6448e35e027 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -465,8 +465,9 @@ static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br,
465 skb_set_transport_header(skb, skb->len); 465 skb_set_transport_header(skb, skb->len);
466 mldq = (struct mld_msg *) icmp6_hdr(skb); 466 mldq = (struct mld_msg *) icmp6_hdr(skb);
467 467
468 interval = ipv6_addr_any(group) ? br->multicast_last_member_interval : 468 interval = ipv6_addr_any(group) ?
469 br->multicast_query_response_interval; 469 br->multicast_query_response_interval :
470 br->multicast_last_member_interval;
470 471
471 mldq->mld_type = ICMPV6_MGM_QUERY; 472 mldq->mld_type = ICMPV6_MGM_QUERY;
472 mldq->mld_code = 0; 473 mldq->mld_code = 0;
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index d5953b87918c..3a246a6cab47 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -1675,13 +1675,13 @@ static void kick_requests(struct ceph_osd_client *osdc, int force_resend)
1675 __register_request(osdc, req); 1675 __register_request(osdc, req);
1676 __unregister_linger_request(osdc, req); 1676 __unregister_linger_request(osdc, req);
1677 } 1677 }
1678 reset_changed_osds(osdc);
1678 mutex_unlock(&osdc->request_mutex); 1679 mutex_unlock(&osdc->request_mutex);
1679 1680
1680 if (needmap) { 1681 if (needmap) {
1681 dout("%d requests for down osds, need new map\n", needmap); 1682 dout("%d requests for down osds, need new map\n", needmap);
1682 ceph_monc_request_next_osdmap(&osdc->client->monc); 1683 ceph_monc_request_next_osdmap(&osdc->client->monc);
1683 } 1684 }
1684 reset_changed_osds(osdc);
1685} 1685}
1686 1686
1687 1687
diff --git a/net/compat.c b/net/compat.c
index 79ae88485001..f0a1ba6c8086 100644
--- a/net/compat.c
+++ b/net/compat.c
@@ -734,19 +734,25 @@ static unsigned char nas[21] = {
734 734
735asmlinkage long compat_sys_sendmsg(int fd, struct compat_msghdr __user *msg, unsigned int flags) 735asmlinkage long compat_sys_sendmsg(int fd, struct compat_msghdr __user *msg, unsigned int flags)
736{ 736{
737 return sys_sendmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT); 737 if (flags & MSG_CMSG_COMPAT)
738 return -EINVAL;
739 return __sys_sendmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT);
738} 740}
739 741
740asmlinkage long compat_sys_sendmmsg(int fd, struct compat_mmsghdr __user *mmsg, 742asmlinkage long compat_sys_sendmmsg(int fd, struct compat_mmsghdr __user *mmsg,
741 unsigned int vlen, unsigned int flags) 743 unsigned int vlen, unsigned int flags)
742{ 744{
745 if (flags & MSG_CMSG_COMPAT)
746 return -EINVAL;
743 return __sys_sendmmsg(fd, (struct mmsghdr __user *)mmsg, vlen, 747 return __sys_sendmmsg(fd, (struct mmsghdr __user *)mmsg, vlen,
744 flags | MSG_CMSG_COMPAT); 748 flags | MSG_CMSG_COMPAT);
745} 749}
746 750
747asmlinkage long compat_sys_recvmsg(int fd, struct compat_msghdr __user *msg, unsigned int flags) 751asmlinkage long compat_sys_recvmsg(int fd, struct compat_msghdr __user *msg, unsigned int flags)
748{ 752{
749 return sys_recvmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT); 753 if (flags & MSG_CMSG_COMPAT)
754 return -EINVAL;
755 return __sys_recvmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT);
750} 756}
751 757
752asmlinkage long compat_sys_recv(int fd, void __user *buf, size_t len, unsigned int flags) 758asmlinkage long compat_sys_recv(int fd, void __user *buf, size_t len, unsigned int flags)
@@ -768,6 +774,9 @@ asmlinkage long compat_sys_recvmmsg(int fd, struct compat_mmsghdr __user *mmsg,
768 int datagrams; 774 int datagrams;
769 struct timespec ktspec; 775 struct timespec ktspec;
770 776
777 if (flags & MSG_CMSG_COMPAT)
778 return -EINVAL;
779
771 if (COMPAT_USE_64BIT_TIME) 780 if (COMPAT_USE_64BIT_TIME)
772 return __sys_recvmmsg(fd, (struct mmsghdr __user *)mmsg, vlen, 781 return __sys_recvmmsg(fd, (struct mmsghdr __user *)mmsg, vlen,
773 flags | MSG_CMSG_COMPAT, 782 flags | MSG_CMSG_COMPAT,
diff --git a/net/core/dev.c b/net/core/dev.c
index fc1e289397f5..faebb398fb46 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -792,6 +792,40 @@ struct net_device *dev_get_by_index(struct net *net, int ifindex)
792EXPORT_SYMBOL(dev_get_by_index); 792EXPORT_SYMBOL(dev_get_by_index);
793 793
794/** 794/**
795 * netdev_get_name - get a netdevice name, knowing its ifindex.
796 * @net: network namespace
797 * @name: a pointer to the buffer where the name will be stored.
798 * @ifindex: the ifindex of the interface to get the name from.
799 *
800 * The use of raw_seqcount_begin() and cond_resched() before
801 * retrying is required as we want to give the writers a chance
802 * to complete when CONFIG_PREEMPT is not set.
803 */
804int netdev_get_name(struct net *net, char *name, int ifindex)
805{
806 struct net_device *dev;
807 unsigned int seq;
808
809retry:
810 seq = raw_seqcount_begin(&devnet_rename_seq);
811 rcu_read_lock();
812 dev = dev_get_by_index_rcu(net, ifindex);
813 if (!dev) {
814 rcu_read_unlock();
815 return -ENODEV;
816 }
817
818 strcpy(name, dev->name);
819 rcu_read_unlock();
820 if (read_seqcount_retry(&devnet_rename_seq, seq)) {
821 cond_resched();
822 goto retry;
823 }
824
825 return 0;
826}
827
828/**
795 * dev_getbyhwaddr_rcu - find a device by its hardware address 829 * dev_getbyhwaddr_rcu - find a device by its hardware address
796 * @net: the applicable net namespace 830 * @net: the applicable net namespace
797 * @type: media type of device 831 * @type: media type of device
diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c
index c013f38482a1..6cda4e2c2132 100644
--- a/net/core/dev_addr_lists.c
+++ b/net/core/dev_addr_lists.c
@@ -39,6 +39,7 @@ static int __hw_addr_create_ex(struct netdev_hw_addr_list *list,
39 ha->refcount = 1; 39 ha->refcount = 1;
40 ha->global_use = global; 40 ha->global_use = global;
41 ha->synced = sync; 41 ha->synced = sync;
42 ha->sync_cnt = 0;
42 list_add_tail_rcu(&ha->list, &list->list); 43 list_add_tail_rcu(&ha->list, &list->list);
43 list->count++; 44 list->count++;
44 45
@@ -66,7 +67,7 @@ static int __hw_addr_add_ex(struct netdev_hw_addr_list *list,
66 } 67 }
67 if (sync) { 68 if (sync) {
68 if (ha->synced) 69 if (ha->synced)
69 return 0; 70 return -EEXIST;
70 else 71 else
71 ha->synced = true; 72 ha->synced = true;
72 } 73 }
@@ -139,10 +140,13 @@ static int __hw_addr_sync_one(struct netdev_hw_addr_list *to_list,
139 140
140 err = __hw_addr_add_ex(to_list, ha->addr, addr_len, ha->type, 141 err = __hw_addr_add_ex(to_list, ha->addr, addr_len, ha->type,
141 false, true); 142 false, true);
142 if (err) 143 if (err && err != -EEXIST)
143 return err; 144 return err;
144 ha->sync_cnt++; 145
145 ha->refcount++; 146 if (!err) {
147 ha->sync_cnt++;
148 ha->refcount++;
149 }
146 150
147 return 0; 151 return 0;
148} 152}
@@ -159,7 +163,8 @@ static void __hw_addr_unsync_one(struct netdev_hw_addr_list *to_list,
159 if (err) 163 if (err)
160 return; 164 return;
161 ha->sync_cnt--; 165 ha->sync_cnt--;
162 __hw_addr_del_entry(from_list, ha, false, true); 166 /* address on from list is not marked synced */
167 __hw_addr_del_entry(from_list, ha, false, false);
163} 168}
164 169
165static int __hw_addr_sync_multiple(struct netdev_hw_addr_list *to_list, 170static int __hw_addr_sync_multiple(struct netdev_hw_addr_list *to_list,
@@ -796,7 +801,7 @@ int dev_mc_sync_multiple(struct net_device *to, struct net_device *from)
796 return -EINVAL; 801 return -EINVAL;
797 802
798 netif_addr_lock_nested(to); 803 netif_addr_lock_nested(to);
799 err = __hw_addr_sync(&to->mc, &from->mc, to->addr_len); 804 err = __hw_addr_sync_multiple(&to->mc, &from->mc, to->addr_len);
800 if (!err) 805 if (!err)
801 __dev_set_rx_mode(to); 806 __dev_set_rx_mode(to);
802 netif_addr_unlock(to); 807 netif_addr_unlock(to);
diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
index 6cc0481faade..5b7d0e1d0664 100644
--- a/net/core/dev_ioctl.c
+++ b/net/core/dev_ioctl.c
@@ -19,9 +19,8 @@
19 19
20static int dev_ifname(struct net *net, struct ifreq __user *arg) 20static int dev_ifname(struct net *net, struct ifreq __user *arg)
21{ 21{
22 struct net_device *dev;
23 struct ifreq ifr; 22 struct ifreq ifr;
24 unsigned seq; 23 int error;
25 24
26 /* 25 /*
27 * Fetch the caller's info block. 26 * Fetch the caller's info block.
@@ -30,19 +29,9 @@ static int dev_ifname(struct net *net, struct ifreq __user *arg)
30 if (copy_from_user(&ifr, arg, sizeof(struct ifreq))) 29 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
31 return -EFAULT; 30 return -EFAULT;
32 31
33retry: 32 error = netdev_get_name(net, ifr.ifr_name, ifr.ifr_ifindex);
34 seq = read_seqcount_begin(&devnet_rename_seq); 33 if (error)
35 rcu_read_lock(); 34 return error;
36 dev = dev_get_by_index_rcu(net, ifr.ifr_ifindex);
37 if (!dev) {
38 rcu_read_unlock();
39 return -ENODEV;
40 }
41
42 strcpy(ifr.ifr_name, dev->name);
43 rcu_read_unlock();
44 if (read_seqcount_retry(&devnet_rename_seq, seq))
45 goto retry;
46 35
47 if (copy_to_user(arg, &ifr, sizeof(struct ifreq))) 36 if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
48 return -EFAULT; 37 return -EFAULT;
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 22efdaa76ebf..ce91766eeca9 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -60,10 +60,10 @@ static const char netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN]
60 [NETIF_F_IPV6_CSUM_BIT] = "tx-checksum-ipv6", 60 [NETIF_F_IPV6_CSUM_BIT] = "tx-checksum-ipv6",
61 [NETIF_F_HIGHDMA_BIT] = "highdma", 61 [NETIF_F_HIGHDMA_BIT] = "highdma",
62 [NETIF_F_FRAGLIST_BIT] = "tx-scatter-gather-fraglist", 62 [NETIF_F_FRAGLIST_BIT] = "tx-scatter-gather-fraglist",
63 [NETIF_F_HW_VLAN_CTAG_TX_BIT] = "tx-vlan-ctag-hw-insert", 63 [NETIF_F_HW_VLAN_CTAG_TX_BIT] = "tx-vlan-hw-insert",
64 64
65 [NETIF_F_HW_VLAN_CTAG_RX_BIT] = "rx-vlan-ctag-hw-parse", 65 [NETIF_F_HW_VLAN_CTAG_RX_BIT] = "rx-vlan-hw-parse",
66 [NETIF_F_HW_VLAN_CTAG_FILTER_BIT] = "rx-vlan-ctag-filter", 66 [NETIF_F_HW_VLAN_CTAG_FILTER_BIT] = "rx-vlan-filter",
67 [NETIF_F_HW_VLAN_STAG_TX_BIT] = "tx-vlan-stag-hw-insert", 67 [NETIF_F_HW_VLAN_STAG_TX_BIT] = "tx-vlan-stag-hw-insert",
68 [NETIF_F_HW_VLAN_STAG_RX_BIT] = "rx-vlan-stag-hw-parse", 68 [NETIF_F_HW_VLAN_STAG_RX_BIT] = "rx-vlan-stag-hw-parse",
69 [NETIF_F_HW_VLAN_STAG_FILTER_BIT] = "rx-vlan-stag-filter", 69 [NETIF_F_HW_VLAN_STAG_FILTER_BIT] = "rx-vlan-stag-filter",
diff --git a/net/core/filter.c b/net/core/filter.c
index dad2a178f9f8..6438f29ff266 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -778,7 +778,7 @@ int sk_detach_filter(struct sock *sk)
778} 778}
779EXPORT_SYMBOL_GPL(sk_detach_filter); 779EXPORT_SYMBOL_GPL(sk_detach_filter);
780 780
781static void sk_decode_filter(struct sock_filter *filt, struct sock_filter *to) 781void sk_decode_filter(struct sock_filter *filt, struct sock_filter *to)
782{ 782{
783 static const u16 decodes[] = { 783 static const u16 decodes[] = {
784 [BPF_S_ALU_ADD_K] = BPF_ALU|BPF_ADD|BPF_K, 784 [BPF_S_ALU_ADD_K] = BPF_ALU|BPF_ADD|BPF_K,
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index af9185d0be6a..1c1738cc4538 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -195,7 +195,7 @@ struct sk_buff *__alloc_skb_head(gfp_t gfp_mask, int node)
195 * the tail pointer in struct sk_buff! 195 * the tail pointer in struct sk_buff!
196 */ 196 */
197 memset(skb, 0, offsetof(struct sk_buff, tail)); 197 memset(skb, 0, offsetof(struct sk_buff, tail));
198 skb->data = NULL; 198 skb->head = NULL;
199 skb->truesize = sizeof(struct sk_buff); 199 skb->truesize = sizeof(struct sk_buff);
200 atomic_set(&skb->users, 1); 200 atomic_set(&skb->users, 1);
201 201
@@ -483,15 +483,8 @@ EXPORT_SYMBOL(skb_add_rx_frag);
483 483
484static void skb_drop_list(struct sk_buff **listp) 484static void skb_drop_list(struct sk_buff **listp)
485{ 485{
486 struct sk_buff *list = *listp; 486 kfree_skb_list(*listp);
487
488 *listp = NULL; 487 *listp = NULL;
489
490 do {
491 struct sk_buff *this = list;
492 list = list->next;
493 kfree_skb(this);
494 } while (list);
495} 488}
496 489
497static inline void skb_drop_fraglist(struct sk_buff *skb) 490static inline void skb_drop_fraglist(struct sk_buff *skb)
@@ -611,7 +604,7 @@ static void skb_release_head_state(struct sk_buff *skb)
611static void skb_release_all(struct sk_buff *skb) 604static void skb_release_all(struct sk_buff *skb)
612{ 605{
613 skb_release_head_state(skb); 606 skb_release_head_state(skb);
614 if (likely(skb->data)) 607 if (likely(skb->head))
615 skb_release_data(skb); 608 skb_release_data(skb);
616} 609}
617 610
@@ -651,6 +644,17 @@ void kfree_skb(struct sk_buff *skb)
651} 644}
652EXPORT_SYMBOL(kfree_skb); 645EXPORT_SYMBOL(kfree_skb);
653 646
647void kfree_skb_list(struct sk_buff *segs)
648{
649 while (segs) {
650 struct sk_buff *next = segs->next;
651
652 kfree_skb(segs);
653 segs = next;
654 }
655}
656EXPORT_SYMBOL(kfree_skb_list);
657
654/** 658/**
655 * skb_tx_error - report an sk_buff xmit error 659 * skb_tx_error - report an sk_buff xmit error
656 * @skb: buffer that triggered an error 660 * @skb: buffer that triggered an error
diff --git a/net/core/sock.c b/net/core/sock.c
index 6ba327da79e1..d6d024cfaaaf 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -210,7 +210,7 @@ static const char *const af_family_key_strings[AF_MAX+1] = {
210 "sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV" , 210 "sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV" ,
211 "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN" , "sk_lock-AF_PHONET" , 211 "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN" , "sk_lock-AF_PHONET" ,
212 "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" , "sk_lock-AF_ALG" , 212 "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" , "sk_lock-AF_ALG" ,
213 "sk_lock-AF_NFC" , "sk_lock-AF_MAX" 213 "sk_lock-AF_NFC" , "sk_lock-AF_VSOCK" , "sk_lock-AF_MAX"
214}; 214};
215static const char *const af_family_slock_key_strings[AF_MAX+1] = { 215static const char *const af_family_slock_key_strings[AF_MAX+1] = {
216 "slock-AF_UNSPEC", "slock-AF_UNIX" , "slock-AF_INET" , 216 "slock-AF_UNSPEC", "slock-AF_UNIX" , "slock-AF_INET" ,
@@ -226,7 +226,7 @@ static const char *const af_family_slock_key_strings[AF_MAX+1] = {
226 "slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_IUCV" , 226 "slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_IUCV" ,
227 "slock-AF_RXRPC" , "slock-AF_ISDN" , "slock-AF_PHONET" , 227 "slock-AF_RXRPC" , "slock-AF_ISDN" , "slock-AF_PHONET" ,
228 "slock-AF_IEEE802154", "slock-AF_CAIF" , "slock-AF_ALG" , 228 "slock-AF_IEEE802154", "slock-AF_CAIF" , "slock-AF_ALG" ,
229 "slock-AF_NFC" , "slock-AF_MAX" 229 "slock-AF_NFC" , "slock-AF_VSOCK" ,"slock-AF_MAX"
230}; 230};
231static const char *const af_family_clock_key_strings[AF_MAX+1] = { 231static const char *const af_family_clock_key_strings[AF_MAX+1] = {
232 "clock-AF_UNSPEC", "clock-AF_UNIX" , "clock-AF_INET" , 232 "clock-AF_UNSPEC", "clock-AF_UNIX" , "clock-AF_INET" ,
@@ -242,7 +242,7 @@ static const char *const af_family_clock_key_strings[AF_MAX+1] = {
242 "clock-AF_TIPC" , "clock-AF_BLUETOOTH", "clock-AF_IUCV" , 242 "clock-AF_TIPC" , "clock-AF_BLUETOOTH", "clock-AF_IUCV" ,
243 "clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET" , 243 "clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET" ,
244 "clock-AF_IEEE802154", "clock-AF_CAIF" , "clock-AF_ALG" , 244 "clock-AF_IEEE802154", "clock-AF_CAIF" , "clock-AF_ALG" ,
245 "clock-AF_NFC" , "clock-AF_MAX" 245 "clock-AF_NFC" , "clock-AF_VSOCK" , "clock-AF_MAX"
246}; 246};
247 247
248/* 248/*
@@ -571,9 +571,7 @@ static int sock_getbindtodevice(struct sock *sk, char __user *optval,
571 int ret = -ENOPROTOOPT; 571 int ret = -ENOPROTOOPT;
572#ifdef CONFIG_NETDEVICES 572#ifdef CONFIG_NETDEVICES
573 struct net *net = sock_net(sk); 573 struct net *net = sock_net(sk);
574 struct net_device *dev;
575 char devname[IFNAMSIZ]; 574 char devname[IFNAMSIZ];
576 unsigned seq;
577 575
578 if (sk->sk_bound_dev_if == 0) { 576 if (sk->sk_bound_dev_if == 0) {
579 len = 0; 577 len = 0;
@@ -584,20 +582,9 @@ static int sock_getbindtodevice(struct sock *sk, char __user *optval,
584 if (len < IFNAMSIZ) 582 if (len < IFNAMSIZ)
585 goto out; 583 goto out;
586 584
587retry: 585 ret = netdev_get_name(net, devname, sk->sk_bound_dev_if);
588 seq = read_seqcount_begin(&devnet_rename_seq); 586 if (ret)
589 rcu_read_lock();
590 dev = dev_get_by_index_rcu(net, sk->sk_bound_dev_if);
591 ret = -ENODEV;
592 if (!dev) {
593 rcu_read_unlock();
594 goto out; 587 goto out;
595 }
596
597 strcpy(devname, dev->name);
598 rcu_read_unlock();
599 if (read_seqcount_retry(&devnet_rename_seq, seq))
600 goto retry;
601 588
602 len = strlen(devname) + 1; 589 len = strlen(devname) + 1;
603 590
diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
index d5bef0b0f639..a0e9cf6379de 100644
--- a/net/core/sock_diag.c
+++ b/net/core/sock_diag.c
@@ -73,8 +73,13 @@ int sock_diag_put_filterinfo(struct user_namespace *user_ns, struct sock *sk,
73 goto out; 73 goto out;
74 } 74 }
75 75
76 if (filter) 76 if (filter) {
77 memcpy(nla_data(attr), filter->insns, len); 77 struct sock_filter *fb = (struct sock_filter *)nla_data(attr);
78 int i;
79
80 for (i = 0; i < filter->len; i++, fb++)
81 sk_decode_filter(&filter->insns[i], fb);
82 }
78 83
79out: 84out:
80 rcu_read_unlock(); 85 rcu_read_unlock();
diff --git a/net/ipv4/gre.c b/net/ipv4/gre.c
index b2e805af9b87..7856d1651d05 100644
--- a/net/ipv4/gre.c
+++ b/net/ipv4/gre.c
@@ -178,7 +178,7 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
178 178
179 err = __skb_linearize(skb); 179 err = __skb_linearize(skb);
180 if (err) { 180 if (err) {
181 kfree_skb(segs); 181 kfree_skb_list(segs);
182 segs = ERR_PTR(err); 182 segs = ERR_PTR(err);
183 goto out; 183 goto out;
184 } 184 }
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index e4147ec1665a..7fa8f08fa7ae 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -503,6 +503,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
503 503
504 inner_iph = (const struct iphdr *)skb_inner_network_header(skb); 504 inner_iph = (const struct iphdr *)skb_inner_network_header(skb);
505 505
506 memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
506 dst = tnl_params->daddr; 507 dst = tnl_params->daddr;
507 if (dst == 0) { 508 if (dst == 0) {
508 /* NBMA tunnel */ 509 /* NBMA tunnel */
@@ -658,7 +659,6 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
658 659
659 skb_dst_drop(skb); 660 skb_dst_drop(skb);
660 skb_dst_set(skb, &rt->dst); 661 skb_dst_set(skb, &rt->dst);
661 memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
662 662
663 /* Push down and install the IP header. */ 663 /* Push down and install the IP header. */
664 skb_push(skb, sizeof(struct iphdr)); 664 skb_push(skb, sizeof(struct iphdr));
@@ -853,7 +853,7 @@ void ip_tunnel_dellink(struct net_device *dev, struct list_head *head)
853} 853}
854EXPORT_SYMBOL_GPL(ip_tunnel_dellink); 854EXPORT_SYMBOL_GPL(ip_tunnel_dellink);
855 855
856int __net_init ip_tunnel_init_net(struct net *net, int ip_tnl_net_id, 856int ip_tunnel_init_net(struct net *net, int ip_tnl_net_id,
857 struct rtnl_link_ops *ops, char *devname) 857 struct rtnl_link_ops *ops, char *devname)
858{ 858{
859 struct ip_tunnel_net *itn = net_generic(net, ip_tnl_net_id); 859 struct ip_tunnel_net *itn = net_generic(net, ip_tnl_net_id);
@@ -899,7 +899,7 @@ static void ip_tunnel_destroy(struct ip_tunnel_net *itn, struct list_head *head)
899 unregister_netdevice_queue(itn->fb_tunnel_dev, head); 899 unregister_netdevice_queue(itn->fb_tunnel_dev, head);
900} 900}
901 901
902void __net_exit ip_tunnel_delete_net(struct ip_tunnel_net *itn) 902void ip_tunnel_delete_net(struct ip_tunnel_net *itn)
903{ 903{
904 LIST_HEAD(list); 904 LIST_HEAD(list);
905 905
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
index 9d2bdb2c1d3f..c118f6b576bb 100644
--- a/net/ipv4/ip_vti.c
+++ b/net/ipv4/ip_vti.c
@@ -361,8 +361,7 @@ static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
361 tunnel->err_count = 0; 361 tunnel->err_count = 0;
362 } 362 }
363 363
364 IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | 364 memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
365 IPSKB_REROUTED);
366 skb_dst_drop(skb); 365 skb_dst_drop(skb);
367 skb_dst_set(skb, &rt->dst); 366 skb_dst_set(skb, &rt->dst);
368 nf_reset(skb); 367 nf_reset(skb);
diff --git a/net/ipv4/netfilter/ipt_ULOG.c b/net/ipv4/netfilter/ipt_ULOG.c
index cf08218ddbcf..32b0e978c8e0 100644
--- a/net/ipv4/netfilter/ipt_ULOG.c
+++ b/net/ipv4/netfilter/ipt_ULOG.c
@@ -125,15 +125,16 @@ static void ulog_send(struct ulog_net *ulog, unsigned int nlgroupnum)
125/* timer function to flush queue in flushtimeout time */ 125/* timer function to flush queue in flushtimeout time */
126static void ulog_timer(unsigned long data) 126static void ulog_timer(unsigned long data)
127{ 127{
128 unsigned int groupnum = *((unsigned int *)data);
128 struct ulog_net *ulog = container_of((void *)data, 129 struct ulog_net *ulog = container_of((void *)data,
129 struct ulog_net, 130 struct ulog_net,
130 nlgroup[*(unsigned int *)data]); 131 nlgroup[groupnum]);
131 pr_debug("timer function called, calling ulog_send\n"); 132 pr_debug("timer function called, calling ulog_send\n");
132 133
133 /* lock to protect against somebody modifying our structure 134 /* lock to protect against somebody modifying our structure
134 * from ipt_ulog_target at the same time */ 135 * from ipt_ulog_target at the same time */
135 spin_lock_bh(&ulog->lock); 136 spin_lock_bh(&ulog->lock);
136 ulog_send(ulog, data); 137 ulog_send(ulog, groupnum);
137 spin_unlock_bh(&ulog->lock); 138 spin_unlock_bh(&ulog->lock);
138} 139}
139 140
@@ -231,8 +232,10 @@ static void ipt_ulog_packet(struct net *net,
231 put_unaligned(tv.tv_usec, &pm->timestamp_usec); 232 put_unaligned(tv.tv_usec, &pm->timestamp_usec);
232 put_unaligned(skb->mark, &pm->mark); 233 put_unaligned(skb->mark, &pm->mark);
233 pm->hook = hooknum; 234 pm->hook = hooknum;
234 if (prefix != NULL) 235 if (prefix != NULL) {
235 strncpy(pm->prefix, prefix, sizeof(pm->prefix)); 236 strncpy(pm->prefix, prefix, sizeof(pm->prefix) - 1);
237 pm->prefix[sizeof(pm->prefix) - 1] = '\0';
238 }
236 else if (loginfo->prefix[0] != '\0') 239 else if (loginfo->prefix[0] != '\0')
237 strncpy(pm->prefix, loginfo->prefix, sizeof(pm->prefix)); 240 strncpy(pm->prefix, loginfo->prefix, sizeof(pm->prefix));
238 else 241 else
@@ -405,8 +408,11 @@ static int __net_init ulog_tg_net_init(struct net *net)
405 408
406 spin_lock_init(&ulog->lock); 409 spin_lock_init(&ulog->lock);
407 /* initialize ulog_buffers */ 410 /* initialize ulog_buffers */
408 for (i = 0; i < ULOG_MAXNLGROUPS; i++) 411 for (i = 0; i < ULOG_MAXNLGROUPS; i++) {
409 setup_timer(&ulog->ulog_buffers[i].timer, ulog_timer, i); 412 ulog->nlgroup[i] = i;
413 setup_timer(&ulog->ulog_buffers[i].timer, ulog_timer,
414 (unsigned long)&ulog->nlgroup[i]);
415 }
410 416
411 ulog->nflognl = netlink_kernel_create(net, NETLINK_NFLOG, &cfg); 417 ulog->nflognl = netlink_kernel_create(net, NETLINK_NFLOG, &cfg);
412 if (!ulog->nflognl) 418 if (!ulog->nflognl)
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 550781a17b34..d35bbf0cf404 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -737,10 +737,15 @@ static void ip_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buf
737{ 737{
738 struct rtable *rt; 738 struct rtable *rt;
739 struct flowi4 fl4; 739 struct flowi4 fl4;
740 const struct iphdr *iph = (const struct iphdr *) skb->data;
741 int oif = skb->dev->ifindex;
742 u8 tos = RT_TOS(iph->tos);
743 u8 prot = iph->protocol;
744 u32 mark = skb->mark;
740 745
741 rt = (struct rtable *) dst; 746 rt = (struct rtable *) dst;
742 747
743 ip_rt_build_flow_key(&fl4, sk, skb); 748 __build_flow_key(&fl4, sk, iph, oif, tos, prot, mark, 0);
744 __ip_do_redirect(rt, skb, &fl4, true); 749 __ip_do_redirect(rt, skb, &fl4, true);
745} 750}
746 751
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 719652305a29..7999fc55c83b 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1003,7 +1003,7 @@ int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
1003 struct tcp_sock *tp = tcp_sk(sk); 1003 struct tcp_sock *tp = tcp_sk(sk);
1004 struct tcp_md5sig_info *md5sig; 1004 struct tcp_md5sig_info *md5sig;
1005 1005
1006 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&addr, AF_INET); 1006 key = tcp_md5_do_lookup(sk, addr, family);
1007 if (key) { 1007 if (key) {
1008 /* Pre-existing entry - just update that one. */ 1008 /* Pre-existing entry - just update that one. */
1009 memcpy(key->key, newkey, newkeylen); 1009 memcpy(key->key, newkey, newkeylen);
@@ -1048,7 +1048,7 @@ int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family)
1048 struct tcp_md5sig_key *key; 1048 struct tcp_md5sig_key *key;
1049 struct tcp_md5sig_info *md5sig; 1049 struct tcp_md5sig_info *md5sig;
1050 1050
1051 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&addr, AF_INET); 1051 key = tcp_md5_do_lookup(sk, addr, family);
1052 if (!key) 1052 if (!key)
1053 return -ENOENT; 1053 return -ENOENT;
1054 hlist_del_rcu(&key->node); 1054 hlist_del_rcu(&key->node);
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index d1ab6ab29a55..4ab4c38958c6 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -1487,7 +1487,7 @@ static int ipv6_count_addresses(struct inet6_dev *idev)
1487} 1487}
1488 1488
1489int ipv6_chk_addr(struct net *net, const struct in6_addr *addr, 1489int ipv6_chk_addr(struct net *net, const struct in6_addr *addr,
1490 struct net_device *dev, int strict) 1490 const struct net_device *dev, int strict)
1491{ 1491{
1492 struct inet6_ifaddr *ifp; 1492 struct inet6_ifaddr *ifp;
1493 unsigned int hash = inet6_addr_hash(addr); 1493 unsigned int hash = inet6_addr_hash(addr);
@@ -2655,11 +2655,16 @@ static void init_loopback(struct net_device *dev)
2655 if (sp_ifa->flags & (IFA_F_DADFAILED | IFA_F_TENTATIVE)) 2655 if (sp_ifa->flags & (IFA_F_DADFAILED | IFA_F_TENTATIVE))
2656 continue; 2656 continue;
2657 2657
2658 if (sp_ifa->rt)
2659 continue;
2660
2658 sp_rt = addrconf_dst_alloc(idev, &sp_ifa->addr, 0); 2661 sp_rt = addrconf_dst_alloc(idev, &sp_ifa->addr, 0);
2659 2662
2660 /* Failure cases are ignored */ 2663 /* Failure cases are ignored */
2661 if (!IS_ERR(sp_rt)) 2664 if (!IS_ERR(sp_rt)) {
2665 sp_ifa->rt = sp_rt;
2662 ip6_ins_rt(sp_rt); 2666 ip6_ins_rt(sp_rt);
2667 }
2663 } 2668 }
2664 read_unlock_bh(&idev->lock); 2669 read_unlock_bh(&idev->lock);
2665 } 2670 }
@@ -4301,6 +4306,7 @@ static int inet6_set_iftoken(struct inet6_dev *idev, struct in6_addr *token)
4301 struct inet6_ifaddr *ifp; 4306 struct inet6_ifaddr *ifp;
4302 struct net_device *dev = idev->dev; 4307 struct net_device *dev = idev->dev;
4303 bool update_rs = false; 4308 bool update_rs = false;
4309 struct in6_addr ll_addr;
4304 4310
4305 if (token == NULL) 4311 if (token == NULL)
4306 return -EINVAL; 4312 return -EINVAL;
@@ -4320,11 +4326,9 @@ static int inet6_set_iftoken(struct inet6_dev *idev, struct in6_addr *token)
4320 4326
4321 write_unlock_bh(&idev->lock); 4327 write_unlock_bh(&idev->lock);
4322 4328
4323 if (!idev->dead && (idev->if_flags & IF_READY)) { 4329 if (!idev->dead && (idev->if_flags & IF_READY) &&
4324 struct in6_addr ll_addr; 4330 !ipv6_get_lladdr(dev, &ll_addr, IFA_F_TENTATIVE |
4325 4331 IFA_F_OPTIMISTIC)) {
4326 ipv6_get_lladdr(dev, &ll_addr, IFA_F_TENTATIVE |
4327 IFA_F_OPTIMISTIC);
4328 4332
4329 /* If we're not ready, then normal ifup will take care 4333 /* If we're not ready, then normal ifup will take care
4330 * of this. Otherwise, we need to request our rs here. 4334 * of this. Otherwise, we need to request our rs here.
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index dae1949019d7..d5d20cde8d92 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -381,9 +381,8 @@ int ip6_forward(struct sk_buff *skb)
381 * cannot be fragmented, because there is no warranty 381 * cannot be fragmented, because there is no warranty
382 * that different fragments will go along one path. --ANK 382 * that different fragments will go along one path. --ANK
383 */ 383 */
384 if (opt->ra) { 384 if (unlikely(opt->flags & IP6SKB_ROUTERALERT)) {
385 u8 *ptr = skb_network_header(skb) + opt->ra; 385 if (ip6_call_ra_chain(skb, ntohs(opt->ra)))
386 if (ip6_call_ra_chain(skb, (ptr[2]<<8) + ptr[3]))
387 return 0; 386 return 0;
388 } 387 }
389 388
@@ -822,11 +821,17 @@ static struct dst_entry *ip6_sk_dst_check(struct sock *sk,
822 const struct flowi6 *fl6) 821 const struct flowi6 *fl6)
823{ 822{
824 struct ipv6_pinfo *np = inet6_sk(sk); 823 struct ipv6_pinfo *np = inet6_sk(sk);
825 struct rt6_info *rt = (struct rt6_info *)dst; 824 struct rt6_info *rt;
826 825
827 if (!dst) 826 if (!dst)
828 goto out; 827 goto out;
829 828
829 if (dst->ops->family != AF_INET6) {
830 dst_release(dst);
831 return NULL;
832 }
833
834 rt = (struct rt6_info *)dst;
830 /* Yes, checking route validity in not connected 835 /* Yes, checking route validity in not connected
831 * case is not very simple. Take into account, 836 * case is not very simple. Take into account,
832 * that we do not support routing by source, TOS, 837 * that we do not support routing by source, TOS,
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 2712ab22a174..ca4ffcc287f1 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -1493,7 +1493,7 @@ void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target)
1493 */ 1493 */
1494 1494
1495 if (ha) 1495 if (ha)
1496 ndisc_fill_addr_option(skb, ND_OPT_TARGET_LL_ADDR, ha); 1496 ndisc_fill_addr_option(buff, ND_OPT_TARGET_LL_ADDR, ha);
1497 1497
1498 /* 1498 /*
1499 * build redirect option and copy skb over to the new packet. 1499 * build redirect option and copy skb over to the new packet.
diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c
index 72836f40b730..95f3f1da0d7f 100644
--- a/net/ipv6/netfilter.c
+++ b/net/ipv6/netfilter.c
@@ -10,6 +10,7 @@
10#include <linux/netfilter.h> 10#include <linux/netfilter.h>
11#include <linux/netfilter_ipv6.h> 11#include <linux/netfilter_ipv6.h>
12#include <linux/export.h> 12#include <linux/export.h>
13#include <net/addrconf.h>
13#include <net/dst.h> 14#include <net/dst.h>
14#include <net/ipv6.h> 15#include <net/ipv6.h>
15#include <net/ip6_route.h> 16#include <net/ip6_route.h>
@@ -186,6 +187,10 @@ static __sum16 nf_ip6_checksum_partial(struct sk_buff *skb, unsigned int hook,
186 return csum; 187 return csum;
187}; 188};
188 189
190static const struct nf_ipv6_ops ipv6ops = {
191 .chk_addr = ipv6_chk_addr,
192};
193
189static const struct nf_afinfo nf_ip6_afinfo = { 194static const struct nf_afinfo nf_ip6_afinfo = {
190 .family = AF_INET6, 195 .family = AF_INET6,
191 .checksum = nf_ip6_checksum, 196 .checksum = nf_ip6_checksum,
@@ -198,6 +203,7 @@ static const struct nf_afinfo nf_ip6_afinfo = {
198 203
199int __init ipv6_netfilter_init(void) 204int __init ipv6_netfilter_init(void)
200{ 205{
206 RCU_INIT_POINTER(nf_ipv6_ops, &ipv6ops);
201 return nf_register_afinfo(&nf_ip6_afinfo); 207 return nf_register_afinfo(&nf_ip6_afinfo);
202} 208}
203 209
@@ -206,5 +212,6 @@ int __init ipv6_netfilter_init(void)
206 */ 212 */
207void ipv6_netfilter_fini(void) 213void ipv6_netfilter_fini(void)
208{ 214{
215 RCU_INIT_POINTER(nf_ipv6_ops, NULL);
209 nf_unregister_afinfo(&nf_ip6_afinfo); 216 nf_unregister_afinfo(&nf_ip6_afinfo);
210} 217}
diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
index 97bcf2bae857..c9b6a6e6a1e8 100644
--- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
@@ -204,7 +204,7 @@ static unsigned int __ipv6_conntrack_in(struct net *net,
204 if (ct != NULL && !nf_ct_is_untracked(ct)) { 204 if (ct != NULL && !nf_ct_is_untracked(ct)) {
205 help = nfct_help(ct); 205 help = nfct_help(ct);
206 if ((help && help->helper) || !nf_ct_is_confirmed(ct)) { 206 if ((help && help->helper) || !nf_ct_is_confirmed(ct)) {
207 nf_conntrack_get_reasm(skb); 207 nf_conntrack_get_reasm(reasm);
208 NF_HOOK_THRESH(NFPROTO_IPV6, hooknum, reasm, 208 NF_HOOK_THRESH(NFPROTO_IPV6, hooknum, reasm,
209 (struct net_device *)in, 209 (struct net_device *)in,
210 (struct net_device *)out, 210 (struct net_device *)out,
diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
index f3c1ff4357ff..51c3285b5d9b 100644
--- a/net/ipv6/proc.c
+++ b/net/ipv6/proc.c
@@ -90,7 +90,7 @@ static const struct snmp_mib snmp6_ipstats_list[] = {
90 SNMP_MIB_ITEM("Ip6OutMcastOctets", IPSTATS_MIB_OUTMCASTOCTETS), 90 SNMP_MIB_ITEM("Ip6OutMcastOctets", IPSTATS_MIB_OUTMCASTOCTETS),
91 SNMP_MIB_ITEM("Ip6InBcastOctets", IPSTATS_MIB_INBCASTOCTETS), 91 SNMP_MIB_ITEM("Ip6InBcastOctets", IPSTATS_MIB_INBCASTOCTETS),
92 SNMP_MIB_ITEM("Ip6OutBcastOctets", IPSTATS_MIB_OUTBCASTOCTETS), 92 SNMP_MIB_ITEM("Ip6OutBcastOctets", IPSTATS_MIB_OUTBCASTOCTETS),
93 SNMP_MIB_ITEM("InCsumErrors", IPSTATS_MIB_CSUMERRORS), 93 /* IPSTATS_MIB_CSUMERRORS is not relevant in IPv6 (no checksum) */
94 SNMP_MIB_SENTINEL 94 SNMP_MIB_SENTINEL
95}; 95};
96 96
diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c
index 3bb3a891a424..d3cfaf9c7a08 100644
--- a/net/ipv6/udp_offload.c
+++ b/net/ipv6/udp_offload.c
@@ -46,11 +46,12 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
46 unsigned int mss; 46 unsigned int mss;
47 unsigned int unfrag_ip6hlen, unfrag_len; 47 unsigned int unfrag_ip6hlen, unfrag_len;
48 struct frag_hdr *fptr; 48 struct frag_hdr *fptr;
49 u8 *mac_start, *prevhdr; 49 u8 *packet_start, *prevhdr;
50 u8 nexthdr; 50 u8 nexthdr;
51 u8 frag_hdr_sz = sizeof(struct frag_hdr); 51 u8 frag_hdr_sz = sizeof(struct frag_hdr);
52 int offset; 52 int offset;
53 __wsum csum; 53 __wsum csum;
54 int tnl_hlen;
54 55
55 mss = skb_shinfo(skb)->gso_size; 56 mss = skb_shinfo(skb)->gso_size;
56 if (unlikely(skb->len <= mss)) 57 if (unlikely(skb->len <= mss))
@@ -83,9 +84,11 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
83 skb->ip_summed = CHECKSUM_NONE; 84 skb->ip_summed = CHECKSUM_NONE;
84 85
85 /* Check if there is enough headroom to insert fragment header. */ 86 /* Check if there is enough headroom to insert fragment header. */
86 if ((skb_mac_header(skb) < skb->head + frag_hdr_sz) && 87 tnl_hlen = skb_tnl_header_len(skb);
87 pskb_expand_head(skb, frag_hdr_sz, 0, GFP_ATOMIC)) 88 if (skb_headroom(skb) < (tnl_hlen + frag_hdr_sz)) {
88 goto out; 89 if (gso_pskb_expand_head(skb, tnl_hlen + frag_hdr_sz))
90 goto out;
91 }
89 92
90 /* Find the unfragmentable header and shift it left by frag_hdr_sz 93 /* Find the unfragmentable header and shift it left by frag_hdr_sz
91 * bytes to insert fragment header. 94 * bytes to insert fragment header.
@@ -93,11 +96,12 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
93 unfrag_ip6hlen = ip6_find_1stfragopt(skb, &prevhdr); 96 unfrag_ip6hlen = ip6_find_1stfragopt(skb, &prevhdr);
94 nexthdr = *prevhdr; 97 nexthdr = *prevhdr;
95 *prevhdr = NEXTHDR_FRAGMENT; 98 *prevhdr = NEXTHDR_FRAGMENT;
96 unfrag_len = skb_network_header(skb) - skb_mac_header(skb) + 99 unfrag_len = (skb_network_header(skb) - skb_mac_header(skb)) +
97 unfrag_ip6hlen; 100 unfrag_ip6hlen + tnl_hlen;
98 mac_start = skb_mac_header(skb); 101 packet_start = (u8 *) skb->head + SKB_GSO_CB(skb)->mac_offset;
99 memmove(mac_start-frag_hdr_sz, mac_start, unfrag_len); 102 memmove(packet_start-frag_hdr_sz, packet_start, unfrag_len);
100 103
104 SKB_GSO_CB(skb)->mac_offset -= frag_hdr_sz;
101 skb->mac_header -= frag_hdr_sz; 105 skb->mac_header -= frag_hdr_sz;
102 skb->network_header -= frag_hdr_sz; 106 skb->network_header -= frag_hdr_sz;
103 107
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 5b1e5af25713..9da862070dd8 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -1710,6 +1710,7 @@ static int key_notify_sa_flush(const struct km_event *c)
1710 hdr->sadb_msg_version = PF_KEY_V2; 1710 hdr->sadb_msg_version = PF_KEY_V2;
1711 hdr->sadb_msg_errno = (uint8_t) 0; 1711 hdr->sadb_msg_errno = (uint8_t) 0;
1712 hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t)); 1712 hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t));
1713 hdr->sadb_msg_reserved = 0;
1713 1714
1714 pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net); 1715 pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net);
1715 1716
@@ -2366,6 +2367,8 @@ static int pfkey_spddelete(struct sock *sk, struct sk_buff *skb, const struct sa
2366 2367
2367out: 2368out:
2368 xfrm_pol_put(xp); 2369 xfrm_pol_put(xp);
2370 if (err == 0)
2371 xfrm_garbage_collect(net);
2369 return err; 2372 return err;
2370} 2373}
2371 2374
@@ -2615,6 +2618,8 @@ static int pfkey_spdget(struct sock *sk, struct sk_buff *skb, const struct sadb_
2615 2618
2616out: 2619out:
2617 xfrm_pol_put(xp); 2620 xfrm_pol_put(xp);
2621 if (delete && err == 0)
2622 xfrm_garbage_collect(net);
2618 return err; 2623 return err;
2619} 2624}
2620 2625
@@ -2695,6 +2700,7 @@ static int key_notify_policy_flush(const struct km_event *c)
2695 hdr->sadb_msg_errno = (uint8_t) 0; 2700 hdr->sadb_msg_errno = (uint8_t) 0;
2696 hdr->sadb_msg_satype = SADB_SATYPE_UNSPEC; 2701 hdr->sadb_msg_satype = SADB_SATYPE_UNSPEC;
2697 hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t)); 2702 hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t));
2703 hdr->sadb_msg_reserved = 0;
2698 pfkey_broadcast(skb_out, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net); 2704 pfkey_broadcast(skb_out, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net);
2699 return 0; 2705 return 0;
2700 2706
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index 637a341c1e2d..8dec6876dc50 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -346,19 +346,19 @@ static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msgh
346 skb_put(skb, 2); 346 skb_put(skb, 2);
347 347
348 /* Copy user data into skb */ 348 /* Copy user data into skb */
349 error = memcpy_fromiovec(skb->data, m->msg_iov, total_len); 349 error = memcpy_fromiovec(skb_put(skb, total_len), m->msg_iov,
350 total_len);
350 if (error < 0) { 351 if (error < 0) {
351 kfree_skb(skb); 352 kfree_skb(skb);
352 goto error_put_sess_tun; 353 goto error_put_sess_tun;
353 } 354 }
354 skb_put(skb, total_len);
355 355
356 l2tp_xmit_skb(session, skb, session->hdr_len); 356 l2tp_xmit_skb(session, skb, session->hdr_len);
357 357
358 sock_put(ps->tunnel_sock); 358 sock_put(ps->tunnel_sock);
359 sock_put(sk); 359 sock_put(sk);
360 360
361 return error; 361 return total_len;
362 362
363error_put_sess_tun: 363error_put_sess_tun:
364 sock_put(ps->tunnel_sock); 364 sock_put(ps->tunnel_sock);
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 1a89c80e6407..4fdb306e42e0 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -1057,6 +1057,12 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev)
1057 clear_bit(SDATA_STATE_OFFCHANNEL_BEACON_STOPPED, &sdata->state); 1057 clear_bit(SDATA_STATE_OFFCHANNEL_BEACON_STOPPED, &sdata->state);
1058 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED); 1058 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED);
1059 1059
1060 if (sdata->wdev.cac_started) {
1061 cancel_delayed_work_sync(&sdata->dfs_cac_timer_work);
1062 cfg80211_cac_event(sdata->dev, NL80211_RADAR_CAC_ABORTED,
1063 GFP_KERNEL);
1064 }
1065
1060 drv_stop_ap(sdata->local, sdata); 1066 drv_stop_ap(sdata->local, sdata);
1061 1067
1062 /* free all potentially still buffered bcast frames */ 1068 /* free all potentially still buffered bcast frames */
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 44be28cfc6c4..9ca8e3278cc0 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -1497,10 +1497,11 @@ static inline void ieee80211_tx_skb(struct ieee80211_sub_if_data *sdata,
1497 ieee80211_tx_skb_tid(sdata, skb, 7); 1497 ieee80211_tx_skb_tid(sdata, skb, 7);
1498} 1498}
1499 1499
1500u32 ieee802_11_parse_elems_crc(u8 *start, size_t len, bool action, 1500u32 ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action,
1501 struct ieee802_11_elems *elems, 1501 struct ieee802_11_elems *elems,
1502 u64 filter, u32 crc); 1502 u64 filter, u32 crc);
1503static inline void ieee802_11_parse_elems(u8 *start, size_t len, bool action, 1503static inline void ieee802_11_parse_elems(const u8 *start, size_t len,
1504 bool action,
1504 struct ieee802_11_elems *elems) 1505 struct ieee802_11_elems *elems)
1505{ 1506{
1506 ieee802_11_parse_elems_crc(start, len, action, elems, 0, 0); 1507 ieee802_11_parse_elems_crc(start, len, action, elems, 0, 0);
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 60f1ce5e5e52..98d20c0f6fed 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -159,9 +159,10 @@ static int ieee80211_change_mtu(struct net_device *dev, int new_mtu)
159 return 0; 159 return 0;
160} 160}
161 161
162static int ieee80211_verify_mac(struct ieee80211_local *local, u8 *addr) 162static int ieee80211_verify_mac(struct ieee80211_sub_if_data *sdata, u8 *addr)
163{ 163{
164 struct ieee80211_sub_if_data *sdata; 164 struct ieee80211_local *local = sdata->local;
165 struct ieee80211_sub_if_data *iter;
165 u64 new, mask, tmp; 166 u64 new, mask, tmp;
166 u8 *m; 167 u8 *m;
167 int ret = 0; 168 int ret = 0;
@@ -181,11 +182,14 @@ static int ieee80211_verify_mac(struct ieee80211_local *local, u8 *addr)
181 182
182 183
183 mutex_lock(&local->iflist_mtx); 184 mutex_lock(&local->iflist_mtx);
184 list_for_each_entry(sdata, &local->interfaces, list) { 185 list_for_each_entry(iter, &local->interfaces, list) {
185 if (sdata->vif.type == NL80211_IFTYPE_MONITOR) 186 if (iter == sdata)
187 continue;
188
189 if (iter->vif.type == NL80211_IFTYPE_MONITOR)
186 continue; 190 continue;
187 191
188 m = sdata->vif.addr; 192 m = iter->vif.addr;
189 tmp = ((u64)m[0] << 5*8) | ((u64)m[1] << 4*8) | 193 tmp = ((u64)m[0] << 5*8) | ((u64)m[1] << 4*8) |
190 ((u64)m[2] << 3*8) | ((u64)m[3] << 2*8) | 194 ((u64)m[2] << 3*8) | ((u64)m[3] << 2*8) |
191 ((u64)m[4] << 1*8) | ((u64)m[5] << 0*8); 195 ((u64)m[4] << 1*8) | ((u64)m[5] << 0*8);
@@ -209,7 +213,7 @@ static int ieee80211_change_mac(struct net_device *dev, void *addr)
209 if (ieee80211_sdata_running(sdata)) 213 if (ieee80211_sdata_running(sdata))
210 return -EBUSY; 214 return -EBUSY;
211 215
212 ret = ieee80211_verify_mac(sdata->local, sa->sa_data); 216 ret = ieee80211_verify_mac(sdata, sa->sa_data);
213 if (ret) 217 if (ret)
214 return ret; 218 return ret;
215 219
@@ -474,6 +478,9 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
474 master->control_port_protocol; 478 master->control_port_protocol;
475 sdata->control_port_no_encrypt = 479 sdata->control_port_no_encrypt =
476 master->control_port_no_encrypt; 480 master->control_port_no_encrypt;
481 sdata->vif.cab_queue = master->vif.cab_queue;
482 memcpy(sdata->vif.hw_queue, master->vif.hw_queue,
483 sizeof(sdata->vif.hw_queue));
477 break; 484 break;
478 } 485 }
479 case NL80211_IFTYPE_AP: 486 case NL80211_IFTYPE_AP:
@@ -653,7 +660,11 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
653 660
654 ieee80211_recalc_ps(local, -1); 661 ieee80211_recalc_ps(local, -1);
655 662
656 if (dev) { 663 if (sdata->vif.type == NL80211_IFTYPE_MONITOR ||
664 sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
665 /* XXX: for AP_VLAN, actually track AP queues */
666 netif_tx_start_all_queues(dev);
667 } else if (dev) {
657 unsigned long flags; 668 unsigned long flags;
658 int n_acs = IEEE80211_NUM_ACS; 669 int n_acs = IEEE80211_NUM_ACS;
659 int ac; 670 int ac;
@@ -1479,7 +1490,17 @@ static void ieee80211_assign_perm_addr(struct ieee80211_local *local,
1479 break; 1490 break;
1480 } 1491 }
1481 1492
1493 /*
1494 * Pick address of existing interface in case user changed
1495 * MAC address manually, default to perm_addr.
1496 */
1482 m = local->hw.wiphy->perm_addr; 1497 m = local->hw.wiphy->perm_addr;
1498 list_for_each_entry(sdata, &local->interfaces, list) {
1499 if (sdata->vif.type == NL80211_IFTYPE_MONITOR)
1500 continue;
1501 m = sdata->vif.addr;
1502 break;
1503 }
1483 start = ((u64)m[0] << 5*8) | ((u64)m[1] << 4*8) | 1504 start = ((u64)m[0] << 5*8) | ((u64)m[1] << 4*8) |
1484 ((u64)m[2] << 3*8) | ((u64)m[3] << 2*8) | 1505 ((u64)m[2] << 3*8) | ((u64)m[3] << 2*8) |
1485 ((u64)m[4] << 1*8) | ((u64)m[5] << 0*8); 1506 ((u64)m[4] << 1*8) | ((u64)m[5] << 0*8);
@@ -1696,6 +1717,15 @@ void ieee80211_remove_interfaces(struct ieee80211_local *local)
1696 1717
1697 ASSERT_RTNL(); 1718 ASSERT_RTNL();
1698 1719
1720 /*
1721 * Close all AP_VLAN interfaces first, as otherwise they
1722 * might be closed while the AP interface they belong to
1723 * is closed, causing unregister_netdevice_many() to crash.
1724 */
1725 list_for_each_entry(sdata, &local->interfaces, list)
1726 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
1727 dev_close(sdata->dev);
1728
1699 mutex_lock(&local->iflist_mtx); 1729 mutex_lock(&local->iflist_mtx);
1700 list_for_each_entry_safe(sdata, tmp, &local->interfaces, list) { 1730 list_for_each_entry_safe(sdata, tmp, &local->interfaces, list) {
1701 list_del(&sdata->list); 1731 list_del(&sdata->list);
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index a46e490f20dd..741448b30825 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -2522,8 +2522,11 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
2522 u16 capab_info, aid; 2522 u16 capab_info, aid;
2523 struct ieee802_11_elems elems; 2523 struct ieee802_11_elems elems;
2524 struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf; 2524 struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf;
2525 const struct cfg80211_bss_ies *bss_ies = NULL;
2526 struct ieee80211_mgd_assoc_data *assoc_data = ifmgd->assoc_data;
2525 u32 changed = 0; 2527 u32 changed = 0;
2526 int err; 2528 int err;
2529 bool ret;
2527 2530
2528 /* AssocResp and ReassocResp have identical structure */ 2531 /* AssocResp and ReassocResp have identical structure */
2529 2532
@@ -2555,21 +2558,86 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
2555 ifmgd->aid = aid; 2558 ifmgd->aid = aid;
2556 2559
2557 /* 2560 /*
2561 * Some APs are erroneously not including some information in their
2562 * (re)association response frames. Try to recover by using the data
2563 * from the beacon or probe response. This seems to afflict mobile
2564 * 2G/3G/4G wifi routers, reported models include the "Onda PN51T",
2565 * "Vodafone PocketWiFi 2", "ZTE MF60" and a similar T-Mobile device.
2566 */
2567 if ((assoc_data->wmm && !elems.wmm_param) ||
2568 (!(ifmgd->flags & IEEE80211_STA_DISABLE_HT) &&
2569 (!elems.ht_cap_elem || !elems.ht_operation)) ||
2570 (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT) &&
2571 (!elems.vht_cap_elem || !elems.vht_operation))) {
2572 const struct cfg80211_bss_ies *ies;
2573 struct ieee802_11_elems bss_elems;
2574
2575 rcu_read_lock();
2576 ies = rcu_dereference(cbss->ies);
2577 if (ies)
2578 bss_ies = kmemdup(ies, sizeof(*ies) + ies->len,
2579 GFP_ATOMIC);
2580 rcu_read_unlock();
2581 if (!bss_ies)
2582 return false;
2583
2584 ieee802_11_parse_elems(bss_ies->data, bss_ies->len,
2585 false, &bss_elems);
2586 if (assoc_data->wmm &&
2587 !elems.wmm_param && bss_elems.wmm_param) {
2588 elems.wmm_param = bss_elems.wmm_param;
2589 sdata_info(sdata,
2590 "AP bug: WMM param missing from AssocResp\n");
2591 }
2592
2593 /*
2594 * Also check if we requested HT/VHT, otherwise the AP doesn't
2595 * have to include the IEs in the (re)association response.
2596 */
2597 if (!elems.ht_cap_elem && bss_elems.ht_cap_elem &&
2598 !(ifmgd->flags & IEEE80211_STA_DISABLE_HT)) {
2599 elems.ht_cap_elem = bss_elems.ht_cap_elem;
2600 sdata_info(sdata,
2601 "AP bug: HT capability missing from AssocResp\n");
2602 }
2603 if (!elems.ht_operation && bss_elems.ht_operation &&
2604 !(ifmgd->flags & IEEE80211_STA_DISABLE_HT)) {
2605 elems.ht_operation = bss_elems.ht_operation;
2606 sdata_info(sdata,
2607 "AP bug: HT operation missing from AssocResp\n");
2608 }
2609 if (!elems.vht_cap_elem && bss_elems.vht_cap_elem &&
2610 !(ifmgd->flags & IEEE80211_STA_DISABLE_VHT)) {
2611 elems.vht_cap_elem = bss_elems.vht_cap_elem;
2612 sdata_info(sdata,
2613 "AP bug: VHT capa missing from AssocResp\n");
2614 }
2615 if (!elems.vht_operation && bss_elems.vht_operation &&
2616 !(ifmgd->flags & IEEE80211_STA_DISABLE_VHT)) {
2617 elems.vht_operation = bss_elems.vht_operation;
2618 sdata_info(sdata,
2619 "AP bug: VHT operation missing from AssocResp\n");
2620 }
2621 }
2622
2623 /*
2558 * We previously checked these in the beacon/probe response, so 2624 * We previously checked these in the beacon/probe response, so
2559 * they should be present here. This is just a safety net. 2625 * they should be present here. This is just a safety net.
2560 */ 2626 */
2561 if (!(ifmgd->flags & IEEE80211_STA_DISABLE_HT) && 2627 if (!(ifmgd->flags & IEEE80211_STA_DISABLE_HT) &&
2562 (!elems.wmm_param || !elems.ht_cap_elem || !elems.ht_operation)) { 2628 (!elems.wmm_param || !elems.ht_cap_elem || !elems.ht_operation)) {
2563 sdata_info(sdata, 2629 sdata_info(sdata,
2564 "HT AP is missing WMM params or HT capability/operation in AssocResp\n"); 2630 "HT AP is missing WMM params or HT capability/operation\n");
2565 return false; 2631 ret = false;
2632 goto out;
2566 } 2633 }
2567 2634
2568 if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT) && 2635 if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT) &&
2569 (!elems.vht_cap_elem || !elems.vht_operation)) { 2636 (!elems.vht_cap_elem || !elems.vht_operation)) {
2570 sdata_info(sdata, 2637 sdata_info(sdata,
2571 "VHT AP is missing VHT capability/operation in AssocResp\n"); 2638 "VHT AP is missing VHT capability/operation\n");
2572 return false; 2639 ret = false;
2640 goto out;
2573 } 2641 }
2574 2642
2575 mutex_lock(&sdata->local->sta_mtx); 2643 mutex_lock(&sdata->local->sta_mtx);
@@ -2580,7 +2648,8 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
2580 sta = sta_info_get(sdata, cbss->bssid); 2648 sta = sta_info_get(sdata, cbss->bssid);
2581 if (WARN_ON(!sta)) { 2649 if (WARN_ON(!sta)) {
2582 mutex_unlock(&sdata->local->sta_mtx); 2650 mutex_unlock(&sdata->local->sta_mtx);
2583 return false; 2651 ret = false;
2652 goto out;
2584 } 2653 }
2585 2654
2586 sband = local->hw.wiphy->bands[ieee80211_get_sdata_band(sdata)]; 2655 sband = local->hw.wiphy->bands[ieee80211_get_sdata_band(sdata)];
@@ -2633,7 +2702,8 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
2633 sta->sta.addr); 2702 sta->sta.addr);
2634 WARN_ON(__sta_info_destroy(sta)); 2703 WARN_ON(__sta_info_destroy(sta));
2635 mutex_unlock(&sdata->local->sta_mtx); 2704 mutex_unlock(&sdata->local->sta_mtx);
2636 return false; 2705 ret = false;
2706 goto out;
2637 } 2707 }
2638 2708
2639 mutex_unlock(&sdata->local->sta_mtx); 2709 mutex_unlock(&sdata->local->sta_mtx);
@@ -2673,7 +2743,10 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
2673 ieee80211_sta_rx_notify(sdata, (struct ieee80211_hdr *)mgmt); 2743 ieee80211_sta_rx_notify(sdata, (struct ieee80211_hdr *)mgmt);
2674 ieee80211_sta_reset_beacon_monitor(sdata); 2744 ieee80211_sta_reset_beacon_monitor(sdata);
2675 2745
2676 return true; 2746 ret = true;
2747 out:
2748 kfree(bss_ies);
2749 return ret;
2677} 2750}
2678 2751
2679static enum rx_mgmt_action __must_check 2752static enum rx_mgmt_action __must_check
@@ -3321,10 +3394,6 @@ static int ieee80211_probe_auth(struct ieee80211_sub_if_data *sdata)
3321 if (WARN_ON_ONCE(!auth_data)) 3394 if (WARN_ON_ONCE(!auth_data))
3322 return -EINVAL; 3395 return -EINVAL;
3323 3396
3324 if (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)
3325 tx_flags = IEEE80211_TX_CTL_REQ_TX_STATUS |
3326 IEEE80211_TX_INTFL_MLME_CONN_TX;
3327
3328 auth_data->tries++; 3397 auth_data->tries++;
3329 3398
3330 if (auth_data->tries > IEEE80211_AUTH_MAX_TRIES) { 3399 if (auth_data->tries > IEEE80211_AUTH_MAX_TRIES) {
@@ -3358,6 +3427,10 @@ static int ieee80211_probe_auth(struct ieee80211_sub_if_data *sdata)
3358 auth_data->expected_transaction = trans; 3427 auth_data->expected_transaction = trans;
3359 } 3428 }
3360 3429
3430 if (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)
3431 tx_flags = IEEE80211_TX_CTL_REQ_TX_STATUS |
3432 IEEE80211_TX_INTFL_MLME_CONN_TX;
3433
3361 ieee80211_send_auth(sdata, trans, auth_data->algorithm, status, 3434 ieee80211_send_auth(sdata, trans, auth_data->algorithm, status,
3362 auth_data->data, auth_data->data_len, 3435 auth_data->data, auth_data->data_len,
3363 auth_data->bss->bssid, 3436 auth_data->bss->bssid,
@@ -3381,12 +3454,12 @@ static int ieee80211_probe_auth(struct ieee80211_sub_if_data *sdata)
3381 * will not answer to direct packet in unassociated state. 3454 * will not answer to direct packet in unassociated state.
3382 */ 3455 */
3383 ieee80211_send_probe_req(sdata, NULL, ssidie + 2, ssidie[1], 3456 ieee80211_send_probe_req(sdata, NULL, ssidie + 2, ssidie[1],
3384 NULL, 0, (u32) -1, true, tx_flags, 3457 NULL, 0, (u32) -1, true, 0,
3385 auth_data->bss->channel, false); 3458 auth_data->bss->channel, false);
3386 rcu_read_unlock(); 3459 rcu_read_unlock();
3387 } 3460 }
3388 3461
3389 if (!(local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)) { 3462 if (tx_flags == 0) {
3390 auth_data->timeout = jiffies + IEEE80211_AUTH_TIMEOUT; 3463 auth_data->timeout = jiffies + IEEE80211_AUTH_TIMEOUT;
3391 ifmgd->auth_data->timeout_started = true; 3464 ifmgd->auth_data->timeout_started = true;
3392 run_again(ifmgd, auth_data->timeout); 3465 run_again(ifmgd, auth_data->timeout);
diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
index d3f414fe67e0..a02bef35b134 100644
--- a/net/mac80211/rate.c
+++ b/net/mac80211/rate.c
@@ -615,7 +615,7 @@ static void rate_control_apply_mask(struct ieee80211_sub_if_data *sdata,
615 if (rates[i].idx < 0) 615 if (rates[i].idx < 0)
616 break; 616 break;
617 617
618 rate_idx_match_mask(&rates[i], sband, mask, chan_width, 618 rate_idx_match_mask(&rates[i], sband, chan_width, mask,
619 mcs_mask); 619 mcs_mask);
620 } 620 }
621} 621}
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 27e07150eb46..72e6292955bb 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -661,12 +661,12 @@ void ieee80211_queue_delayed_work(struct ieee80211_hw *hw,
661} 661}
662EXPORT_SYMBOL(ieee80211_queue_delayed_work); 662EXPORT_SYMBOL(ieee80211_queue_delayed_work);
663 663
664u32 ieee802_11_parse_elems_crc(u8 *start, size_t len, bool action, 664u32 ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action,
665 struct ieee802_11_elems *elems, 665 struct ieee802_11_elems *elems,
666 u64 filter, u32 crc) 666 u64 filter, u32 crc)
667{ 667{
668 size_t left = len; 668 size_t left = len;
669 u8 *pos = start; 669 const u8 *pos = start;
670 bool calc_crc = filter != 0; 670 bool calc_crc = filter != 0;
671 DECLARE_BITMAP(seen_elems, 256); 671 DECLARE_BITMAP(seen_elems, 256);
672 const u8 *ie; 672 const u8 *ie;
diff --git a/net/netfilter/core.c b/net/netfilter/core.c
index 07c865a31a3d..857ca9f35177 100644
--- a/net/netfilter/core.c
+++ b/net/netfilter/core.c
@@ -30,6 +30,8 @@ static DEFINE_MUTEX(afinfo_mutex);
30 30
31const struct nf_afinfo __rcu *nf_afinfo[NFPROTO_NUMPROTO] __read_mostly; 31const struct nf_afinfo __rcu *nf_afinfo[NFPROTO_NUMPROTO] __read_mostly;
32EXPORT_SYMBOL(nf_afinfo); 32EXPORT_SYMBOL(nf_afinfo);
33const struct nf_ipv6_ops __rcu *nf_ipv6_ops __read_mostly;
34EXPORT_SYMBOL_GPL(nf_ipv6_ops);
33 35
34int nf_register_afinfo(const struct nf_afinfo *afinfo) 36int nf_register_afinfo(const struct nf_afinfo *afinfo)
35{ 37{
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index 085b5880ab0d..23b8eb53a569 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -1001,6 +1001,32 @@ static inline int is_tcp_reset(const struct sk_buff *skb, int nh_len)
1001 return th->rst; 1001 return th->rst;
1002} 1002}
1003 1003
1004static inline bool is_new_conn(const struct sk_buff *skb,
1005 struct ip_vs_iphdr *iph)
1006{
1007 switch (iph->protocol) {
1008 case IPPROTO_TCP: {
1009 struct tcphdr _tcph, *th;
1010
1011 th = skb_header_pointer(skb, iph->len, sizeof(_tcph), &_tcph);
1012 if (th == NULL)
1013 return false;
1014 return th->syn;
1015 }
1016 case IPPROTO_SCTP: {
1017 sctp_chunkhdr_t *sch, schunk;
1018
1019 sch = skb_header_pointer(skb, iph->len + sizeof(sctp_sctphdr_t),
1020 sizeof(schunk), &schunk);
1021 if (sch == NULL)
1022 return false;
1023 return sch->type == SCTP_CID_INIT;
1024 }
1025 default:
1026 return false;
1027 }
1028}
1029
1004/* Handle response packets: rewrite addresses and send away... 1030/* Handle response packets: rewrite addresses and send away...
1005 */ 1031 */
1006static unsigned int 1032static unsigned int
@@ -1416,7 +1442,8 @@ ignore_ipip:
1416 1442
1417 /* do the statistics and put it back */ 1443 /* do the statistics and put it back */
1418 ip_vs_in_stats(cp, skb); 1444 ip_vs_in_stats(cp, skb);
1419 if (IPPROTO_TCP == cih->protocol || IPPROTO_UDP == cih->protocol) 1445 if (IPPROTO_TCP == cih->protocol || IPPROTO_UDP == cih->protocol ||
1446 IPPROTO_SCTP == cih->protocol)
1420 offset += 2 * sizeof(__u16); 1447 offset += 2 * sizeof(__u16);
1421 verdict = ip_vs_icmp_xmit(skb, cp, pp, offset, hooknum, &ciph); 1448 verdict = ip_vs_icmp_xmit(skb, cp, pp, offset, hooknum, &ciph);
1422 1449
@@ -1612,6 +1639,15 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
1612 * Check if the packet belongs to an existing connection entry 1639 * Check if the packet belongs to an existing connection entry
1613 */ 1640 */
1614 cp = pp->conn_in_get(af, skb, &iph, 0); 1641 cp = pp->conn_in_get(af, skb, &iph, 0);
1642
1643 if (unlikely(sysctl_expire_nodest_conn(ipvs)) && cp && cp->dest &&
1644 unlikely(!atomic_read(&cp->dest->weight)) && !iph.fragoffs &&
1645 is_new_conn(skb, &iph)) {
1646 ip_vs_conn_expire_now(cp);
1647 __ip_vs_conn_put(cp);
1648 cp = NULL;
1649 }
1650
1615 if (unlikely(!cp) && !iph.fragoffs) { 1651 if (unlikely(!cp) && !iph.fragoffs) {
1616 /* No (second) fragments need to enter here, as nf_defrag_ipv6 1652 /* No (second) fragments need to enter here, as nf_defrag_ipv6
1617 * replayed fragment zero will already have created the cp 1653 * replayed fragment zero will already have created the cp
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 5b142fb16480..9e6c2a075a4c 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -2542,6 +2542,7 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
2542 struct ip_vs_dest *dest; 2542 struct ip_vs_dest *dest;
2543 struct ip_vs_dest_entry entry; 2543 struct ip_vs_dest_entry entry;
2544 2544
2545 memset(&entry, 0, sizeof(entry));
2545 list_for_each_entry(dest, &svc->destinations, n_list) { 2546 list_for_each_entry(dest, &svc->destinations, n_list) {
2546 if (count >= get->num_dests) 2547 if (count >= get->num_dests)
2547 break; 2548 break;
diff --git a/net/netfilter/ipvs/ip_vs_sh.c b/net/netfilter/ipvs/ip_vs_sh.c
index 0df269d7c99f..a65edfe4b16c 100644
--- a/net/netfilter/ipvs/ip_vs_sh.c
+++ b/net/netfilter/ipvs/ip_vs_sh.c
@@ -67,8 +67,8 @@ struct ip_vs_sh_bucket {
67#define IP_VS_SH_TAB_MASK (IP_VS_SH_TAB_SIZE - 1) 67#define IP_VS_SH_TAB_MASK (IP_VS_SH_TAB_SIZE - 1)
68 68
69struct ip_vs_sh_state { 69struct ip_vs_sh_state {
70 struct ip_vs_sh_bucket buckets[IP_VS_SH_TAB_SIZE];
71 struct rcu_head rcu_head; 70 struct rcu_head rcu_head;
71 struct ip_vs_sh_bucket buckets[IP_VS_SH_TAB_SIZE];
72}; 72};
73 73
74/* 74/*
diff --git a/net/netfilter/nf_conntrack_labels.c b/net/netfilter/nf_conntrack_labels.c
index 8fe2e99428b7..355d2ef08094 100644
--- a/net/netfilter/nf_conntrack_labels.c
+++ b/net/netfilter/nf_conntrack_labels.c
@@ -45,7 +45,7 @@ int nf_connlabel_set(struct nf_conn *ct, u16 bit)
45 if (test_bit(bit, labels->bits)) 45 if (test_bit(bit, labels->bits))
46 return 0; 46 return 0;
47 47
48 if (test_and_set_bit(bit, labels->bits)) 48 if (!test_and_set_bit(bit, labels->bits))
49 nf_conntrack_event_cache(IPCT_LABEL, ct); 49 nf_conntrack_event_cache(IPCT_LABEL, ct);
50 50
51 return 0; 51 return 0;
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 6d0f8a17c5b7..ecf065f94032 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -1825,6 +1825,7 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
1825 nf_conntrack_eventmask_report((1 << IPCT_REPLY) | 1825 nf_conntrack_eventmask_report((1 << IPCT_REPLY) |
1826 (1 << IPCT_ASSURED) | 1826 (1 << IPCT_ASSURED) |
1827 (1 << IPCT_HELPER) | 1827 (1 << IPCT_HELPER) |
1828 (1 << IPCT_LABEL) |
1828 (1 << IPCT_PROTOINFO) | 1829 (1 << IPCT_PROTOINFO) |
1829 (1 << IPCT_NATSEQADJ) | 1830 (1 << IPCT_NATSEQADJ) |
1830 (1 << IPCT_MARK), 1831 (1 << IPCT_MARK),
diff --git a/net/netfilter/nf_nat_sip.c b/net/netfilter/nf_nat_sip.c
index 96ccdf78a29f..dac11f73868e 100644
--- a/net/netfilter/nf_nat_sip.c
+++ b/net/netfilter/nf_nat_sip.c
@@ -230,9 +230,10 @@ static unsigned int nf_nat_sip(struct sk_buff *skb, unsigned int protoff,
230 &ct->tuplehash[!dir].tuple.src.u3, 230 &ct->tuplehash[!dir].tuple.src.u3,
231 false); 231 false);
232 if (!mangle_packet(skb, protoff, dataoff, dptr, datalen, 232 if (!mangle_packet(skb, protoff, dataoff, dptr, datalen,
233 poff, plen, buffer, buflen)) 233 poff, plen, buffer, buflen)) {
234 nf_ct_helper_log(skb, ct, "cannot mangle received"); 234 nf_ct_helper_log(skb, ct, "cannot mangle received");
235 return NF_DROP; 235 return NF_DROP;
236 }
236 } 237 }
237 238
238 /* The rport= parameter (RFC 3581) contains the port number 239 /* The rport= parameter (RFC 3581) contains the port number
diff --git a/net/netfilter/nfnetlink_acct.c b/net/netfilter/nfnetlink_acct.c
index dc3fd5d44464..c7b6d466a662 100644
--- a/net/netfilter/nfnetlink_acct.c
+++ b/net/netfilter/nfnetlink_acct.c
@@ -149,9 +149,12 @@ nfnl_acct_dump(struct sk_buff *skb, struct netlink_callback *cb)
149 149
150 rcu_read_lock(); 150 rcu_read_lock();
151 list_for_each_entry_rcu(cur, &nfnl_acct_list, head) { 151 list_for_each_entry_rcu(cur, &nfnl_acct_list, head) {
152 if (last && cur != last) 152 if (last) {
153 continue; 153 if (cur != last)
154 continue;
154 155
156 last = NULL;
157 }
155 if (nfnl_acct_fill_info(skb, NETLINK_CB(cb->skb).portid, 158 if (nfnl_acct_fill_info(skb, NETLINK_CB(cb->skb).portid,
156 cb->nlh->nlmsg_seq, 159 cb->nlh->nlmsg_seq,
157 NFNL_MSG_TYPE(cb->nlh->nlmsg_type), 160 NFNL_MSG_TYPE(cb->nlh->nlmsg_type),
diff --git a/net/netfilter/nfnetlink_cttimeout.c b/net/netfilter/nfnetlink_cttimeout.c
index 701c88a20fea..65074dfb9383 100644
--- a/net/netfilter/nfnetlink_cttimeout.c
+++ b/net/netfilter/nfnetlink_cttimeout.c
@@ -220,9 +220,12 @@ ctnl_timeout_dump(struct sk_buff *skb, struct netlink_callback *cb)
220 220
221 rcu_read_lock(); 221 rcu_read_lock();
222 list_for_each_entry_rcu(cur, &cttimeout_list, head) { 222 list_for_each_entry_rcu(cur, &cttimeout_list, head) {
223 if (last && cur != last) 223 if (last) {
224 continue; 224 if (cur != last)
225 continue;
225 226
227 last = NULL;
228 }
226 if (ctnl_timeout_fill_info(skb, NETLINK_CB(cb->skb).portid, 229 if (ctnl_timeout_fill_info(skb, NETLINK_CB(cb->skb).portid,
227 cb->nlh->nlmsg_seq, 230 cb->nlh->nlmsg_seq,
228 NFNL_MSG_TYPE(cb->nlh->nlmsg_type), 231 NFNL_MSG_TYPE(cb->nlh->nlmsg_type),
diff --git a/net/netfilter/nfnetlink_queue_core.c b/net/netfilter/nfnetlink_queue_core.c
index 4e27fa035814..5352b2d2d5bf 100644
--- a/net/netfilter/nfnetlink_queue_core.c
+++ b/net/netfilter/nfnetlink_queue_core.c
@@ -637,9 +637,6 @@ nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
637 if (queue->copy_mode == NFQNL_COPY_NONE) 637 if (queue->copy_mode == NFQNL_COPY_NONE)
638 return -EINVAL; 638 return -EINVAL;
639 639
640 if ((queue->flags & NFQA_CFG_F_GSO) || !skb_is_gso(entry->skb))
641 return __nfqnl_enqueue_packet(net, queue, entry);
642
643 skb = entry->skb; 640 skb = entry->skb;
644 641
645 switch (entry->pf) { 642 switch (entry->pf) {
@@ -651,6 +648,9 @@ nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
651 break; 648 break;
652 } 649 }
653 650
651 if ((queue->flags & NFQA_CFG_F_GSO) || !skb_is_gso(skb))
652 return __nfqnl_enqueue_packet(net, queue, entry);
653
654 nf_bridge_adjust_skb_data(skb); 654 nf_bridge_adjust_skb_data(skb);
655 segs = skb_gso_segment(skb, 0); 655 segs = skb_gso_segment(skb, 0);
656 /* Does not use PTR_ERR to limit the number of error codes that can be 656 /* Does not use PTR_ERR to limit the number of error codes that can be
diff --git a/net/netfilter/xt_LOG.c b/net/netfilter/xt_LOG.c
index 491c7d821a0b..5ab24843370a 100644
--- a/net/netfilter/xt_LOG.c
+++ b/net/netfilter/xt_LOG.c
@@ -737,7 +737,7 @@ static void dump_ipv6_packet(struct sbuff *m,
737 dump_sk_uid_gid(m, skb->sk); 737 dump_sk_uid_gid(m, skb->sk);
738 738
739 /* Max length: 16 "MARK=0xFFFFFFFF " */ 739 /* Max length: 16 "MARK=0xFFFFFFFF " */
740 if (!recurse && skb->mark) 740 if (recurse && skb->mark)
741 sb_add(m, "MARK=0x%x ", skb->mark); 741 sb_add(m, "MARK=0x%x ", skb->mark);
742} 742}
743 743
diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c
index a75240f0d42b..7011c71646f0 100644
--- a/net/netfilter/xt_TCPMSS.c
+++ b/net/netfilter/xt_TCPMSS.c
@@ -45,17 +45,22 @@ optlen(const u_int8_t *opt, unsigned int offset)
45 45
46static int 46static int
47tcpmss_mangle_packet(struct sk_buff *skb, 47tcpmss_mangle_packet(struct sk_buff *skb,
48 const struct xt_tcpmss_info *info, 48 const struct xt_action_param *par,
49 unsigned int in_mtu, 49 unsigned int in_mtu,
50 unsigned int tcphoff, 50 unsigned int tcphoff,
51 unsigned int minlen) 51 unsigned int minlen)
52{ 52{
53 const struct xt_tcpmss_info *info = par->targinfo;
53 struct tcphdr *tcph; 54 struct tcphdr *tcph;
54 unsigned int tcplen, i; 55 unsigned int tcplen, i;
55 __be16 oldval; 56 __be16 oldval;
56 u16 newmss; 57 u16 newmss;
57 u8 *opt; 58 u8 *opt;
58 59
60 /* This is a fragment, no TCP header is available */
61 if (par->fragoff != 0)
62 return XT_CONTINUE;
63
59 if (!skb_make_writable(skb, skb->len)) 64 if (!skb_make_writable(skb, skb->len))
60 return -1; 65 return -1;
61 66
@@ -125,6 +130,18 @@ tcpmss_mangle_packet(struct sk_buff *skb,
125 130
126 skb_put(skb, TCPOLEN_MSS); 131 skb_put(skb, TCPOLEN_MSS);
127 132
133 /*
134 * IPv4: RFC 1122 states "If an MSS option is not received at
135 * connection setup, TCP MUST assume a default send MSS of 536".
136 * IPv6: RFC 2460 states IPv6 has a minimum MTU of 1280 and a minimum
137 * length IPv6 header of 60, ergo the default MSS value is 1220
138 * Since no MSS was provided, we must use the default values
139 */
140 if (par->family == NFPROTO_IPV4)
141 newmss = min(newmss, (u16)536);
142 else
143 newmss = min(newmss, (u16)1220);
144
128 opt = (u_int8_t *)tcph + sizeof(struct tcphdr); 145 opt = (u_int8_t *)tcph + sizeof(struct tcphdr);
129 memmove(opt + TCPOLEN_MSS, opt, tcplen - sizeof(struct tcphdr)); 146 memmove(opt + TCPOLEN_MSS, opt, tcplen - sizeof(struct tcphdr));
130 147
@@ -182,7 +199,7 @@ tcpmss_tg4(struct sk_buff *skb, const struct xt_action_param *par)
182 __be16 newlen; 199 __be16 newlen;
183 int ret; 200 int ret;
184 201
185 ret = tcpmss_mangle_packet(skb, par->targinfo, 202 ret = tcpmss_mangle_packet(skb, par,
186 tcpmss_reverse_mtu(skb, PF_INET), 203 tcpmss_reverse_mtu(skb, PF_INET),
187 iph->ihl * 4, 204 iph->ihl * 4,
188 sizeof(*iph) + sizeof(struct tcphdr)); 205 sizeof(*iph) + sizeof(struct tcphdr));
@@ -211,7 +228,7 @@ tcpmss_tg6(struct sk_buff *skb, const struct xt_action_param *par)
211 tcphoff = ipv6_skip_exthdr(skb, sizeof(*ipv6h), &nexthdr, &frag_off); 228 tcphoff = ipv6_skip_exthdr(skb, sizeof(*ipv6h), &nexthdr, &frag_off);
212 if (tcphoff < 0) 229 if (tcphoff < 0)
213 return NF_DROP; 230 return NF_DROP;
214 ret = tcpmss_mangle_packet(skb, par->targinfo, 231 ret = tcpmss_mangle_packet(skb, par,
215 tcpmss_reverse_mtu(skb, PF_INET6), 232 tcpmss_reverse_mtu(skb, PF_INET6),
216 tcphoff, 233 tcphoff,
217 sizeof(*ipv6h) + sizeof(struct tcphdr)); 234 sizeof(*ipv6h) + sizeof(struct tcphdr));
diff --git a/net/netfilter/xt_TCPOPTSTRIP.c b/net/netfilter/xt_TCPOPTSTRIP.c
index 1eb1a44bfd3d..b68fa191710f 100644
--- a/net/netfilter/xt_TCPOPTSTRIP.c
+++ b/net/netfilter/xt_TCPOPTSTRIP.c
@@ -48,11 +48,13 @@ tcpoptstrip_mangle_packet(struct sk_buff *skb,
48 return NF_DROP; 48 return NF_DROP;
49 49
50 len = skb->len - tcphoff; 50 len = skb->len - tcphoff;
51 if (len < (int)sizeof(struct tcphdr) || 51 if (len < (int)sizeof(struct tcphdr))
52 tcp_hdr(skb)->doff * 4 > len)
53 return NF_DROP; 52 return NF_DROP;
54 53
55 tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff); 54 tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff);
55 if (tcph->doff * 4 > len)
56 return NF_DROP;
57
56 opt = (u_int8_t *)tcph; 58 opt = (u_int8_t *)tcph;
57 59
58 /* 60 /*
diff --git a/net/netfilter/xt_addrtype.c b/net/netfilter/xt_addrtype.c
index 49c5ff7f6dd6..68ff29f60867 100644
--- a/net/netfilter/xt_addrtype.c
+++ b/net/netfilter/xt_addrtype.c
@@ -22,6 +22,7 @@
22#include <net/ip6_fib.h> 22#include <net/ip6_fib.h>
23#endif 23#endif
24 24
25#include <linux/netfilter_ipv6.h>
25#include <linux/netfilter/xt_addrtype.h> 26#include <linux/netfilter/xt_addrtype.h>
26#include <linux/netfilter/x_tables.h> 27#include <linux/netfilter/x_tables.h>
27 28
@@ -33,12 +34,12 @@ MODULE_ALIAS("ip6t_addrtype");
33 34
34#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) 35#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
35static u32 match_lookup_rt6(struct net *net, const struct net_device *dev, 36static u32 match_lookup_rt6(struct net *net, const struct net_device *dev,
36 const struct in6_addr *addr) 37 const struct in6_addr *addr, u16 mask)
37{ 38{
38 const struct nf_afinfo *afinfo; 39 const struct nf_afinfo *afinfo;
39 struct flowi6 flow; 40 struct flowi6 flow;
40 struct rt6_info *rt; 41 struct rt6_info *rt;
41 u32 ret; 42 u32 ret = 0;
42 int route_err; 43 int route_err;
43 44
44 memset(&flow, 0, sizeof(flow)); 45 memset(&flow, 0, sizeof(flow));
@@ -49,12 +50,19 @@ static u32 match_lookup_rt6(struct net *net, const struct net_device *dev,
49 rcu_read_lock(); 50 rcu_read_lock();
50 51
51 afinfo = nf_get_afinfo(NFPROTO_IPV6); 52 afinfo = nf_get_afinfo(NFPROTO_IPV6);
52 if (afinfo != NULL) 53 if (afinfo != NULL) {
54 const struct nf_ipv6_ops *v6ops;
55
56 if (dev && (mask & XT_ADDRTYPE_LOCAL)) {
57 v6ops = nf_get_ipv6_ops();
58 if (v6ops && v6ops->chk_addr(net, addr, dev, true))
59 ret = XT_ADDRTYPE_LOCAL;
60 }
53 route_err = afinfo->route(net, (struct dst_entry **)&rt, 61 route_err = afinfo->route(net, (struct dst_entry **)&rt,
54 flowi6_to_flowi(&flow), !!dev); 62 flowi6_to_flowi(&flow), false);
55 else 63 } else {
56 route_err = 1; 64 route_err = 1;
57 65 }
58 rcu_read_unlock(); 66 rcu_read_unlock();
59 67
60 if (route_err) 68 if (route_err)
@@ -62,15 +70,12 @@ static u32 match_lookup_rt6(struct net *net, const struct net_device *dev,
62 70
63 if (rt->rt6i_flags & RTF_REJECT) 71 if (rt->rt6i_flags & RTF_REJECT)
64 ret = XT_ADDRTYPE_UNREACHABLE; 72 ret = XT_ADDRTYPE_UNREACHABLE;
65 else
66 ret = 0;
67 73
68 if (rt->rt6i_flags & RTF_LOCAL) 74 if (dev == NULL && rt->rt6i_flags & RTF_LOCAL)
69 ret |= XT_ADDRTYPE_LOCAL; 75 ret |= XT_ADDRTYPE_LOCAL;
70 if (rt->rt6i_flags & RTF_ANYCAST) 76 if (rt->rt6i_flags & RTF_ANYCAST)
71 ret |= XT_ADDRTYPE_ANYCAST; 77 ret |= XT_ADDRTYPE_ANYCAST;
72 78
73
74 dst_release(&rt->dst); 79 dst_release(&rt->dst);
75 return ret; 80 return ret;
76} 81}
@@ -90,7 +95,7 @@ static bool match_type6(struct net *net, const struct net_device *dev,
90 95
91 if ((XT_ADDRTYPE_LOCAL | XT_ADDRTYPE_ANYCAST | 96 if ((XT_ADDRTYPE_LOCAL | XT_ADDRTYPE_ANYCAST |
92 XT_ADDRTYPE_UNREACHABLE) & mask) 97 XT_ADDRTYPE_UNREACHABLE) & mask)
93 return !!(mask & match_lookup_rt6(net, dev, addr)); 98 return !!(mask & match_lookup_rt6(net, dev, addr, mask));
94 return true; 99 return true;
95} 100}
96 101
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 12ac6b47a35c..57ee84d21470 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -371,7 +371,7 @@ static int netlink_mmap(struct file *file, struct socket *sock,
371 err = 0; 371 err = 0;
372out: 372out:
373 mutex_unlock(&nlk->pg_vec_lock); 373 mutex_unlock(&nlk->pg_vec_lock);
374 return 0; 374 return err;
375} 375}
376 376
377static void netlink_frame_flush_dcache(const struct nl_mmap_hdr *hdr) 377static void netlink_frame_flush_dcache(const struct nl_mmap_hdr *hdr)
@@ -747,7 +747,7 @@ static void netlink_skb_destructor(struct sk_buff *skb)
747 atomic_dec(&ring->pending); 747 atomic_dec(&ring->pending);
748 sock_put(sk); 748 sock_put(sk);
749 749
750 skb->data = NULL; 750 skb->head = NULL;
751 } 751 }
752#endif 752#endif
753 if (skb->sk != NULL) 753 if (skb->sk != NULL)
diff --git a/net/nfc/Makefile b/net/nfc/Makefile
index fb799deaed4f..a76f4533cb6c 100644
--- a/net/nfc/Makefile
+++ b/net/nfc/Makefile
@@ -5,7 +5,6 @@
5obj-$(CONFIG_NFC) += nfc.o 5obj-$(CONFIG_NFC) += nfc.o
6obj-$(CONFIG_NFC_NCI) += nci/ 6obj-$(CONFIG_NFC_NCI) += nci/
7obj-$(CONFIG_NFC_HCI) += hci/ 7obj-$(CONFIG_NFC_HCI) += hci/
8#obj-$(CONFIG_NFC_LLCP) += llcp/
9 8
10nfc-objs := core.o netlink.o af_nfc.o rawsock.o llcp_core.o llcp_commands.o \ 9nfc-objs := core.o netlink.o af_nfc.o rawsock.o llcp_core.o llcp_commands.o \
11 llcp_sock.o 10 llcp_sock.o
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 8ec1bca7f859..20a1bd0e6549 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -2851,12 +2851,11 @@ static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
2851 return -EOPNOTSUPP; 2851 return -EOPNOTSUPP;
2852 2852
2853 uaddr->sa_family = AF_PACKET; 2853 uaddr->sa_family = AF_PACKET;
2854 memset(uaddr->sa_data, 0, sizeof(uaddr->sa_data));
2854 rcu_read_lock(); 2855 rcu_read_lock();
2855 dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex); 2856 dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex);
2856 if (dev) 2857 if (dev)
2857 strncpy(uaddr->sa_data, dev->name, 14); 2858 strlcpy(uaddr->sa_data, dev->name, sizeof(uaddr->sa_data));
2858 else
2859 memset(uaddr->sa_data, 0, 14);
2860 rcu_read_unlock(); 2859 rcu_read_unlock();
2861 *uaddr_len = sizeof(*uaddr); 2860 *uaddr_len = sizeof(*uaddr);
2862 2861
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index 823463adbd21..189e3c5b3d09 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -231,14 +231,14 @@ override:
231 } 231 }
232 if (R_tab) { 232 if (R_tab) {
233 police->rate_present = true; 233 police->rate_present = true;
234 psched_ratecfg_precompute(&police->rate, R_tab->rate.rate); 234 psched_ratecfg_precompute(&police->rate, &R_tab->rate);
235 qdisc_put_rtab(R_tab); 235 qdisc_put_rtab(R_tab);
236 } else { 236 } else {
237 police->rate_present = false; 237 police->rate_present = false;
238 } 238 }
239 if (P_tab) { 239 if (P_tab) {
240 police->peak_present = true; 240 police->peak_present = true;
241 psched_ratecfg_precompute(&police->peak, P_tab->rate.rate); 241 psched_ratecfg_precompute(&police->peak, &P_tab->rate);
242 qdisc_put_rtab(P_tab); 242 qdisc_put_rtab(P_tab);
243 } else { 243 } else {
244 police->peak_present = false; 244 police->peak_present = false;
@@ -376,9 +376,9 @@ tcf_act_police_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
376 }; 376 };
377 377
378 if (police->rate_present) 378 if (police->rate_present)
379 opt.rate.rate = psched_ratecfg_getrate(&police->rate); 379 psched_ratecfg_getrate(&opt.rate, &police->rate);
380 if (police->peak_present) 380 if (police->peak_present)
381 opt.peakrate.rate = psched_ratecfg_getrate(&police->peak); 381 psched_ratecfg_getrate(&opt.peakrate, &police->peak);
382 if (nla_put(skb, TCA_POLICE_TBF, sizeof(opt), &opt)) 382 if (nla_put(skb, TCA_POLICE_TBF, sizeof(opt), &opt))
383 goto nla_put_failure; 383 goto nla_put_failure;
384 if (police->tcfp_result && 384 if (police->tcfp_result &&
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 2b935e7cfe7b..281c1bded1f6 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -291,17 +291,18 @@ struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, struct nlattr *ta
291{ 291{
292 struct qdisc_rate_table *rtab; 292 struct qdisc_rate_table *rtab;
293 293
294 if (tab == NULL || r->rate == 0 || r->cell_log == 0 ||
295 nla_len(tab) != TC_RTAB_SIZE)
296 return NULL;
297
294 for (rtab = qdisc_rtab_list; rtab; rtab = rtab->next) { 298 for (rtab = qdisc_rtab_list; rtab; rtab = rtab->next) {
295 if (memcmp(&rtab->rate, r, sizeof(struct tc_ratespec)) == 0) { 299 if (!memcmp(&rtab->rate, r, sizeof(struct tc_ratespec)) &&
300 !memcmp(&rtab->data, nla_data(tab), 1024)) {
296 rtab->refcnt++; 301 rtab->refcnt++;
297 return rtab; 302 return rtab;
298 } 303 }
299 } 304 }
300 305
301 if (tab == NULL || r->rate == 0 || r->cell_log == 0 ||
302 nla_len(tab) != TC_RTAB_SIZE)
303 return NULL;
304
305 rtab = kmalloc(sizeof(*rtab), GFP_KERNEL); 306 rtab = kmalloc(sizeof(*rtab), GFP_KERNEL);
306 if (rtab) { 307 if (rtab) {
307 rtab->rate = *r; 308 rtab->rate = *r;
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index eac7e0ee23c1..20224086cc28 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -898,14 +898,16 @@ void dev_shutdown(struct net_device *dev)
898 WARN_ON(timer_pending(&dev->watchdog_timer)); 898 WARN_ON(timer_pending(&dev->watchdog_timer));
899} 899}
900 900
901void psched_ratecfg_precompute(struct psched_ratecfg *r, u32 rate) 901void psched_ratecfg_precompute(struct psched_ratecfg *r,
902 const struct tc_ratespec *conf)
902{ 903{
903 u64 factor; 904 u64 factor;
904 u64 mult; 905 u64 mult;
905 int shift; 906 int shift;
906 907
907 r->rate_bps = (u64)rate << 3; 908 memset(r, 0, sizeof(*r));
908 r->shift = 0; 909 r->overhead = conf->overhead;
910 r->rate_bps = (u64)conf->rate << 3;
909 r->mult = 1; 911 r->mult = 1;
910 /* 912 /*
911 * Calibrate mult, shift so that token counting is accurate 913 * Calibrate mult, shift so that token counting is accurate
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 79b1876b6cd2..adaedd79389c 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -109,7 +109,7 @@ struct htb_class {
109 } un; 109 } un;
110 struct rb_node node[TC_HTB_NUMPRIO]; /* node for self or feed tree */ 110 struct rb_node node[TC_HTB_NUMPRIO]; /* node for self or feed tree */
111 struct rb_node pq_node; /* node for event queue */ 111 struct rb_node pq_node; /* node for event queue */
112 psched_time_t pq_key; 112 s64 pq_key;
113 113
114 int prio_activity; /* for which prios are we active */ 114 int prio_activity; /* for which prios are we active */
115 enum htb_cmode cmode; /* current mode of the class */ 115 enum htb_cmode cmode; /* current mode of the class */
@@ -121,10 +121,10 @@ struct htb_class {
121 /* token bucket parameters */ 121 /* token bucket parameters */
122 struct psched_ratecfg rate; 122 struct psched_ratecfg rate;
123 struct psched_ratecfg ceil; 123 struct psched_ratecfg ceil;
124 s64 buffer, cbuffer; /* token bucket depth/rate */ 124 s64 buffer, cbuffer; /* token bucket depth/rate */
125 psched_tdiff_t mbuffer; /* max wait time */ 125 s64 mbuffer; /* max wait time */
126 s64 tokens, ctokens; /* current number of tokens */ 126 s64 tokens, ctokens; /* current number of tokens */
127 psched_time_t t_c; /* checkpoint time */ 127 s64 t_c; /* checkpoint time */
128}; 128};
129 129
130struct htb_sched { 130struct htb_sched {
@@ -141,15 +141,15 @@ struct htb_sched {
141 struct rb_root wait_pq[TC_HTB_MAXDEPTH]; 141 struct rb_root wait_pq[TC_HTB_MAXDEPTH];
142 142
143 /* time of nearest event per level (row) */ 143 /* time of nearest event per level (row) */
144 psched_time_t near_ev_cache[TC_HTB_MAXDEPTH]; 144 s64 near_ev_cache[TC_HTB_MAXDEPTH];
145 145
146 int defcls; /* class where unclassified flows go to */ 146 int defcls; /* class where unclassified flows go to */
147 147
148 /* filters for qdisc itself */ 148 /* filters for qdisc itself */
149 struct tcf_proto *filter_list; 149 struct tcf_proto *filter_list;
150 150
151 int rate2quantum; /* quant = rate / rate2quantum */ 151 int rate2quantum; /* quant = rate / rate2quantum */
152 psched_time_t now; /* cached dequeue time */ 152 s64 now; /* cached dequeue time */
153 struct qdisc_watchdog watchdog; 153 struct qdisc_watchdog watchdog;
154 154
155 /* non shaped skbs; let them go directly thru */ 155 /* non shaped skbs; let them go directly thru */
@@ -664,8 +664,8 @@ static void htb_charge_class(struct htb_sched *q, struct htb_class *cl,
664 * next pending event (0 for no event in pq, q->now for too many events). 664 * next pending event (0 for no event in pq, q->now for too many events).
665 * Note: Applied are events whose have cl->pq_key <= q->now. 665 * Note: Applied are events whose have cl->pq_key <= q->now.
666 */ 666 */
667static psched_time_t htb_do_events(struct htb_sched *q, int level, 667static s64 htb_do_events(struct htb_sched *q, int level,
668 unsigned long start) 668 unsigned long start)
669{ 669{
670 /* don't run for longer than 2 jiffies; 2 is used instead of 670 /* don't run for longer than 2 jiffies; 2 is used instead of
671 * 1 to simplify things when jiffy is going to be incremented 671 * 1 to simplify things when jiffy is going to be incremented
@@ -857,7 +857,7 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch)
857 struct sk_buff *skb; 857 struct sk_buff *skb;
858 struct htb_sched *q = qdisc_priv(sch); 858 struct htb_sched *q = qdisc_priv(sch);
859 int level; 859 int level;
860 psched_time_t next_event; 860 s64 next_event;
861 unsigned long start_at; 861 unsigned long start_at;
862 862
863 /* try to dequeue direct packets as high prio (!) to minimize cpu work */ 863 /* try to dequeue direct packets as high prio (!) to minimize cpu work */
@@ -880,7 +880,7 @@ ok:
880 for (level = 0; level < TC_HTB_MAXDEPTH; level++) { 880 for (level = 0; level < TC_HTB_MAXDEPTH; level++) {
881 /* common case optimization - skip event handler quickly */ 881 /* common case optimization - skip event handler quickly */
882 int m; 882 int m;
883 psched_time_t event; 883 s64 event;
884 884
885 if (q->now >= q->near_ev_cache[level]) { 885 if (q->now >= q->near_ev_cache[level]) {
886 event = htb_do_events(q, level, start_at); 886 event = htb_do_events(q, level, start_at);
@@ -1090,9 +1090,9 @@ static int htb_dump_class(struct Qdisc *sch, unsigned long arg,
1090 1090
1091 memset(&opt, 0, sizeof(opt)); 1091 memset(&opt, 0, sizeof(opt));
1092 1092
1093 opt.rate.rate = psched_ratecfg_getrate(&cl->rate); 1093 psched_ratecfg_getrate(&opt.rate, &cl->rate);
1094 opt.buffer = PSCHED_NS2TICKS(cl->buffer); 1094 opt.buffer = PSCHED_NS2TICKS(cl->buffer);
1095 opt.ceil.rate = psched_ratecfg_getrate(&cl->ceil); 1095 psched_ratecfg_getrate(&opt.ceil, &cl->ceil);
1096 opt.cbuffer = PSCHED_NS2TICKS(cl->cbuffer); 1096 opt.cbuffer = PSCHED_NS2TICKS(cl->cbuffer);
1097 opt.quantum = cl->quantum; 1097 opt.quantum = cl->quantum;
1098 opt.prio = cl->prio; 1098 opt.prio = cl->prio;
@@ -1117,8 +1117,8 @@ htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d)
1117 1117
1118 if (!cl->level && cl->un.leaf.q) 1118 if (!cl->level && cl->un.leaf.q)
1119 cl->qstats.qlen = cl->un.leaf.q->q.qlen; 1119 cl->qstats.qlen = cl->un.leaf.q->q.qlen;
1120 cl->xstats.tokens = cl->tokens; 1120 cl->xstats.tokens = PSCHED_NS2TICKS(cl->tokens);
1121 cl->xstats.ctokens = cl->ctokens; 1121 cl->xstats.ctokens = PSCHED_NS2TICKS(cl->ctokens);
1122 1122
1123 if (gnet_stats_copy_basic(d, &cl->bstats) < 0 || 1123 if (gnet_stats_copy_basic(d, &cl->bstats) < 0 ||
1124 gnet_stats_copy_rate_est(d, NULL, &cl->rate_est) < 0 || 1124 gnet_stats_copy_rate_est(d, NULL, &cl->rate_est) < 0 ||
@@ -1200,7 +1200,7 @@ static void htb_parent_to_leaf(struct htb_sched *q, struct htb_class *cl,
1200 parent->un.leaf.q = new_q ? new_q : &noop_qdisc; 1200 parent->un.leaf.q = new_q ? new_q : &noop_qdisc;
1201 parent->tokens = parent->buffer; 1201 parent->tokens = parent->buffer;
1202 parent->ctokens = parent->cbuffer; 1202 parent->ctokens = parent->cbuffer;
1203 parent->t_c = psched_get_time(); 1203 parent->t_c = ktime_to_ns(ktime_get());
1204 parent->cmode = HTB_CAN_SEND; 1204 parent->cmode = HTB_CAN_SEND;
1205} 1205}
1206 1206
@@ -1417,8 +1417,8 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
1417 /* set class to be in HTB_CAN_SEND state */ 1417 /* set class to be in HTB_CAN_SEND state */
1418 cl->tokens = PSCHED_TICKS2NS(hopt->buffer); 1418 cl->tokens = PSCHED_TICKS2NS(hopt->buffer);
1419 cl->ctokens = PSCHED_TICKS2NS(hopt->cbuffer); 1419 cl->ctokens = PSCHED_TICKS2NS(hopt->cbuffer);
1420 cl->mbuffer = 60 * PSCHED_TICKS_PER_SEC; /* 1min */ 1420 cl->mbuffer = 60ULL * NSEC_PER_SEC; /* 1min */
1421 cl->t_c = psched_get_time(); 1421 cl->t_c = ktime_to_ns(ktime_get());
1422 cl->cmode = HTB_CAN_SEND; 1422 cl->cmode = HTB_CAN_SEND;
1423 1423
1424 /* attach to the hash list and parent's family */ 1424 /* attach to the hash list and parent's family */
@@ -1459,8 +1459,8 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
1459 cl->prio = TC_HTB_NUMPRIO - 1; 1459 cl->prio = TC_HTB_NUMPRIO - 1;
1460 } 1460 }
1461 1461
1462 psched_ratecfg_precompute(&cl->rate, hopt->rate.rate); 1462 psched_ratecfg_precompute(&cl->rate, &hopt->rate);
1463 psched_ratecfg_precompute(&cl->ceil, hopt->ceil.rate); 1463 psched_ratecfg_precompute(&cl->ceil, &hopt->ceil);
1464 1464
1465 cl->buffer = PSCHED_TICKS2NS(hopt->buffer); 1465 cl->buffer = PSCHED_TICKS2NS(hopt->buffer);
1466 cl->cbuffer = PSCHED_TICKS2NS(hopt->buffer); 1466 cl->cbuffer = PSCHED_TICKS2NS(hopt->buffer);
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index c8388f3c3426..e478d316602b 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -298,9 +298,9 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt)
298 q->tokens = q->buffer; 298 q->tokens = q->buffer;
299 q->ptokens = q->mtu; 299 q->ptokens = q->mtu;
300 300
301 psched_ratecfg_precompute(&q->rate, rtab->rate.rate); 301 psched_ratecfg_precompute(&q->rate, &rtab->rate);
302 if (ptab) { 302 if (ptab) {
303 psched_ratecfg_precompute(&q->peak, ptab->rate.rate); 303 psched_ratecfg_precompute(&q->peak, &ptab->rate);
304 q->peak_present = true; 304 q->peak_present = true;
305 } else { 305 } else {
306 q->peak_present = false; 306 q->peak_present = false;
@@ -350,9 +350,9 @@ static int tbf_dump(struct Qdisc *sch, struct sk_buff *skb)
350 goto nla_put_failure; 350 goto nla_put_failure;
351 351
352 opt.limit = q->limit; 352 opt.limit = q->limit;
353 opt.rate.rate = psched_ratecfg_getrate(&q->rate); 353 psched_ratecfg_getrate(&opt.rate, &q->rate);
354 if (q->peak_present) 354 if (q->peak_present)
355 opt.peakrate.rate = psched_ratecfg_getrate(&q->peak); 355 psched_ratecfg_getrate(&opt.peakrate, &q->peak);
356 else 356 else
357 memset(&opt.peakrate, 0, sizeof(opt.peakrate)); 357 memset(&opt.peakrate, 0, sizeof(opt.peakrate));
358 opt.mtu = PSCHED_NS2TICKS(q->mtu); 358 opt.mtu = PSCHED_NS2TICKS(q->mtu);
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index 32a4625fef77..be35e2dbcc9a 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -206,6 +206,8 @@ static inline int sctp_cacc_skip(struct sctp_transport *primary,
206 */ 206 */
207void sctp_outq_init(struct sctp_association *asoc, struct sctp_outq *q) 207void sctp_outq_init(struct sctp_association *asoc, struct sctp_outq *q)
208{ 208{
209 memset(q, 0, sizeof(struct sctp_outq));
210
209 q->asoc = asoc; 211 q->asoc = asoc;
210 INIT_LIST_HEAD(&q->out_chunk_list); 212 INIT_LIST_HEAD(&q->out_chunk_list);
211 INIT_LIST_HEAD(&q->control_chunk_list); 213 INIT_LIST_HEAD(&q->control_chunk_list);
@@ -213,11 +215,7 @@ void sctp_outq_init(struct sctp_association *asoc, struct sctp_outq *q)
213 INIT_LIST_HEAD(&q->sacked); 215 INIT_LIST_HEAD(&q->sacked);
214 INIT_LIST_HEAD(&q->abandoned); 216 INIT_LIST_HEAD(&q->abandoned);
215 217
216 q->fast_rtx = 0;
217 q->outstanding_bytes = 0;
218 q->empty = 1; 218 q->empty = 1;
219 q->cork = 0;
220 q->out_qlen = 0;
221} 219}
222 220
223/* Free the outqueue structure and any related pending chunks. 221/* Free the outqueue structure and any related pending chunks.
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index f631c5ff4dbf..6abb1caf9836 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -4003,6 +4003,12 @@ SCTP_STATIC void sctp_destroy_sock(struct sock *sk)
4003 4003
4004 /* Release our hold on the endpoint. */ 4004 /* Release our hold on the endpoint. */
4005 sp = sctp_sk(sk); 4005 sp = sctp_sk(sk);
4006 /* This could happen during socket init, thus we bail out
4007 * early, since the rest of the below is not setup either.
4008 */
4009 if (sp->ep == NULL)
4010 return;
4011
4006 if (sp->do_auto_asconf) { 4012 if (sp->do_auto_asconf) {
4007 sp->do_auto_asconf = 0; 4013 sp->do_auto_asconf = 0;
4008 list_del(&sp->auto_asconf_list); 4014 list_del(&sp->auto_asconf_list);
diff --git a/net/socket.c b/net/socket.c
index 6b94633ca61d..4ca1526db756 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -1956,7 +1956,7 @@ struct used_address {
1956 unsigned int name_len; 1956 unsigned int name_len;
1957}; 1957};
1958 1958
1959static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg, 1959static int ___sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
1960 struct msghdr *msg_sys, unsigned int flags, 1960 struct msghdr *msg_sys, unsigned int flags,
1961 struct used_address *used_address) 1961 struct used_address *used_address)
1962{ 1962{
@@ -2071,22 +2071,30 @@ out:
2071 * BSD sendmsg interface 2071 * BSD sendmsg interface
2072 */ 2072 */
2073 2073
2074SYSCALL_DEFINE3(sendmsg, int, fd, struct msghdr __user *, msg, unsigned int, flags) 2074long __sys_sendmsg(int fd, struct msghdr __user *msg, unsigned flags)
2075{ 2075{
2076 int fput_needed, err; 2076 int fput_needed, err;
2077 struct msghdr msg_sys; 2077 struct msghdr msg_sys;
2078 struct socket *sock = sockfd_lookup_light(fd, &err, &fput_needed); 2078 struct socket *sock;
2079 2079
2080 sock = sockfd_lookup_light(fd, &err, &fput_needed);
2080 if (!sock) 2081 if (!sock)
2081 goto out; 2082 goto out;
2082 2083
2083 err = __sys_sendmsg(sock, msg, &msg_sys, flags, NULL); 2084 err = ___sys_sendmsg(sock, msg, &msg_sys, flags, NULL);
2084 2085
2085 fput_light(sock->file, fput_needed); 2086 fput_light(sock->file, fput_needed);
2086out: 2087out:
2087 return err; 2088 return err;
2088} 2089}
2089 2090
2091SYSCALL_DEFINE3(sendmsg, int, fd, struct msghdr __user *, msg, unsigned int, flags)
2092{
2093 if (flags & MSG_CMSG_COMPAT)
2094 return -EINVAL;
2095 return __sys_sendmsg(fd, msg, flags);
2096}
2097
2090/* 2098/*
2091 * Linux sendmmsg interface 2099 * Linux sendmmsg interface
2092 */ 2100 */
@@ -2117,15 +2125,16 @@ int __sys_sendmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen,
2117 2125
2118 while (datagrams < vlen) { 2126 while (datagrams < vlen) {
2119 if (MSG_CMSG_COMPAT & flags) { 2127 if (MSG_CMSG_COMPAT & flags) {
2120 err = __sys_sendmsg(sock, (struct msghdr __user *)compat_entry, 2128 err = ___sys_sendmsg(sock, (struct msghdr __user *)compat_entry,
2121 &msg_sys, flags, &used_address); 2129 &msg_sys, flags, &used_address);
2122 if (err < 0) 2130 if (err < 0)
2123 break; 2131 break;
2124 err = __put_user(err, &compat_entry->msg_len); 2132 err = __put_user(err, &compat_entry->msg_len);
2125 ++compat_entry; 2133 ++compat_entry;
2126 } else { 2134 } else {
2127 err = __sys_sendmsg(sock, (struct msghdr __user *)entry, 2135 err = ___sys_sendmsg(sock,
2128 &msg_sys, flags, &used_address); 2136 (struct msghdr __user *)entry,
2137 &msg_sys, flags, &used_address);
2129 if (err < 0) 2138 if (err < 0)
2130 break; 2139 break;
2131 err = put_user(err, &entry->msg_len); 2140 err = put_user(err, &entry->msg_len);
@@ -2149,10 +2158,12 @@ int __sys_sendmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen,
2149SYSCALL_DEFINE4(sendmmsg, int, fd, struct mmsghdr __user *, mmsg, 2158SYSCALL_DEFINE4(sendmmsg, int, fd, struct mmsghdr __user *, mmsg,
2150 unsigned int, vlen, unsigned int, flags) 2159 unsigned int, vlen, unsigned int, flags)
2151{ 2160{
2161 if (flags & MSG_CMSG_COMPAT)
2162 return -EINVAL;
2152 return __sys_sendmmsg(fd, mmsg, vlen, flags); 2163 return __sys_sendmmsg(fd, mmsg, vlen, flags);
2153} 2164}
2154 2165
2155static int __sys_recvmsg(struct socket *sock, struct msghdr __user *msg, 2166static int ___sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
2156 struct msghdr *msg_sys, unsigned int flags, int nosec) 2167 struct msghdr *msg_sys, unsigned int flags, int nosec)
2157{ 2168{
2158 struct compat_msghdr __user *msg_compat = 2169 struct compat_msghdr __user *msg_compat =
@@ -2244,23 +2255,31 @@ out:
2244 * BSD recvmsg interface 2255 * BSD recvmsg interface
2245 */ 2256 */
2246 2257
2247SYSCALL_DEFINE3(recvmsg, int, fd, struct msghdr __user *, msg, 2258long __sys_recvmsg(int fd, struct msghdr __user *msg, unsigned flags)
2248 unsigned int, flags)
2249{ 2259{
2250 int fput_needed, err; 2260 int fput_needed, err;
2251 struct msghdr msg_sys; 2261 struct msghdr msg_sys;
2252 struct socket *sock = sockfd_lookup_light(fd, &err, &fput_needed); 2262 struct socket *sock;
2253 2263
2264 sock = sockfd_lookup_light(fd, &err, &fput_needed);
2254 if (!sock) 2265 if (!sock)
2255 goto out; 2266 goto out;
2256 2267
2257 err = __sys_recvmsg(sock, msg, &msg_sys, flags, 0); 2268 err = ___sys_recvmsg(sock, msg, &msg_sys, flags, 0);
2258 2269
2259 fput_light(sock->file, fput_needed); 2270 fput_light(sock->file, fput_needed);
2260out: 2271out:
2261 return err; 2272 return err;
2262} 2273}
2263 2274
2275SYSCALL_DEFINE3(recvmsg, int, fd, struct msghdr __user *, msg,
2276 unsigned int, flags)
2277{
2278 if (flags & MSG_CMSG_COMPAT)
2279 return -EINVAL;
2280 return __sys_recvmsg(fd, msg, flags);
2281}
2282
2264/* 2283/*
2265 * Linux recvmmsg interface 2284 * Linux recvmmsg interface
2266 */ 2285 */
@@ -2298,17 +2317,18 @@ int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen,
2298 * No need to ask LSM for more than the first datagram. 2317 * No need to ask LSM for more than the first datagram.
2299 */ 2318 */
2300 if (MSG_CMSG_COMPAT & flags) { 2319 if (MSG_CMSG_COMPAT & flags) {
2301 err = __sys_recvmsg(sock, (struct msghdr __user *)compat_entry, 2320 err = ___sys_recvmsg(sock, (struct msghdr __user *)compat_entry,
2302 &msg_sys, flags & ~MSG_WAITFORONE, 2321 &msg_sys, flags & ~MSG_WAITFORONE,
2303 datagrams); 2322 datagrams);
2304 if (err < 0) 2323 if (err < 0)
2305 break; 2324 break;
2306 err = __put_user(err, &compat_entry->msg_len); 2325 err = __put_user(err, &compat_entry->msg_len);
2307 ++compat_entry; 2326 ++compat_entry;
2308 } else { 2327 } else {
2309 err = __sys_recvmsg(sock, (struct msghdr __user *)entry, 2328 err = ___sys_recvmsg(sock,
2310 &msg_sys, flags & ~MSG_WAITFORONE, 2329 (struct msghdr __user *)entry,
2311 datagrams); 2330 &msg_sys, flags & ~MSG_WAITFORONE,
2331 datagrams);
2312 if (err < 0) 2332 if (err < 0)
2313 break; 2333 break;
2314 err = put_user(err, &entry->msg_len); 2334 err = put_user(err, &entry->msg_len);
@@ -2375,6 +2395,9 @@ SYSCALL_DEFINE5(recvmmsg, int, fd, struct mmsghdr __user *, mmsg,
2375 int datagrams; 2395 int datagrams;
2376 struct timespec timeout_sys; 2396 struct timespec timeout_sys;
2377 2397
2398 if (flags & MSG_CMSG_COMPAT)
2399 return -EINVAL;
2400
2378 if (!timeout) 2401 if (!timeout)
2379 return __sys_recvmmsg(fd, mmsg, vlen, flags, NULL); 2402 return __sys_recvmmsg(fd, mmsg, vlen, flags, NULL);
2380 2403
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index 871c73c92165..29b4ba93ab3c 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -1287,7 +1287,7 @@ static bool use_gss_proxy(struct net *net)
1287 1287
1288#ifdef CONFIG_PROC_FS 1288#ifdef CONFIG_PROC_FS
1289 1289
1290static bool set_gss_proxy(struct net *net, int type) 1290static int set_gss_proxy(struct net *net, int type)
1291{ 1291{
1292 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); 1292 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
1293 int ret = 0; 1293 int ret = 0;
@@ -1317,10 +1317,12 @@ static inline bool gssp_ready(struct sunrpc_net *sn)
1317 return false; 1317 return false;
1318} 1318}
1319 1319
1320static int wait_for_gss_proxy(struct net *net) 1320static int wait_for_gss_proxy(struct net *net, struct file *file)
1321{ 1321{
1322 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); 1322 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
1323 1323
1324 if (file->f_flags & O_NONBLOCK && !gssp_ready(sn))
1325 return -EAGAIN;
1324 return wait_event_interruptible(sn->gssp_wq, gssp_ready(sn)); 1326 return wait_event_interruptible(sn->gssp_wq, gssp_ready(sn));
1325} 1327}
1326 1328
@@ -1362,7 +1364,7 @@ static ssize_t read_gssp(struct file *file, char __user *buf,
1362 size_t len; 1364 size_t len;
1363 int ret; 1365 int ret;
1364 1366
1365 ret = wait_for_gss_proxy(net); 1367 ret = wait_for_gss_proxy(net, file);
1366 if (ret) 1368 if (ret)
1367 return ret; 1369 return ret;
1368 1370
diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c
index c3f9e1ef7f53..06bdf5a1082c 100644
--- a/net/sunrpc/svcauth_unix.c
+++ b/net/sunrpc/svcauth_unix.c
@@ -810,11 +810,15 @@ svcauth_unix_accept(struct svc_rqst *rqstp, __be32 *authp)
810 goto badcred; 810 goto badcred;
811 argv->iov_base = (void*)((__be32*)argv->iov_base + slen); /* skip machname */ 811 argv->iov_base = (void*)((__be32*)argv->iov_base + slen); /* skip machname */
812 argv->iov_len -= slen*4; 812 argv->iov_len -= slen*4;
813 813 /*
814 * Note: we skip uid_valid()/gid_valid() checks here for
815 * backwards compatibility with clients that use -1 id's.
816 * Instead, -1 uid or gid is later mapped to the
817 * (export-specific) anonymous id by nfsd_setuser.
818 * Supplementary gid's will be left alone.
819 */
814 cred->cr_uid = make_kuid(&init_user_ns, svc_getnl(argv)); /* uid */ 820 cred->cr_uid = make_kuid(&init_user_ns, svc_getnl(argv)); /* uid */
815 cred->cr_gid = make_kgid(&init_user_ns, svc_getnl(argv)); /* gid */ 821 cred->cr_gid = make_kgid(&init_user_ns, svc_getnl(argv)); /* gid */
816 if (!uid_valid(cred->cr_uid) || !gid_valid(cred->cr_gid))
817 goto badcred;
818 slen = svc_getnl(argv); /* gids length */ 822 slen = svc_getnl(argv); /* gids length */
819 if (slen > 16 || (len -= (slen + 2)*4) < 0) 823 if (slen > 16 || (len -= (slen + 2)*4) < 0)
820 goto badcred; 824 goto badcred;
@@ -823,8 +827,6 @@ svcauth_unix_accept(struct svc_rqst *rqstp, __be32 *authp)
823 return SVC_CLOSE; 827 return SVC_CLOSE;
824 for (i = 0; i < slen; i++) { 828 for (i = 0; i < slen; i++) {
825 kgid_t kgid = make_kgid(&init_user_ns, svc_getnl(argv)); 829 kgid_t kgid = make_kgid(&init_user_ns, svc_getnl(argv));
826 if (!gid_valid(kgid))
827 goto badcred;
828 GROUP_AT(cred->cr_group_info, i) = kgid; 830 GROUP_AT(cred->cr_group_info, i) = kgid;
829 } 831 }
830 if (svc_getu32(argv) != htonl(RPC_AUTH_NULL) || svc_getu32(argv) != 0) { 832 if (svc_getu32(argv) != htonl(RPC_AUTH_NULL) || svc_getu32(argv) != 0) {
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index dfdb5e643211..b14b7e3cb6e6 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -1564,12 +1564,17 @@ static int nl80211_dump_wiphy(struct sk_buff *skb, struct netlink_callback *cb)
1564 struct cfg80211_registered_device *dev; 1564 struct cfg80211_registered_device *dev;
1565 s64 filter_wiphy = -1; 1565 s64 filter_wiphy = -1;
1566 bool split = false; 1566 bool split = false;
1567 struct nlattr **tb = nl80211_fam.attrbuf; 1567 struct nlattr **tb;
1568 int res; 1568 int res;
1569 1569
1570 /* will be zeroed in nlmsg_parse() */
1571 tb = kmalloc(sizeof(*tb) * (NL80211_ATTR_MAX + 1), GFP_KERNEL);
1572 if (!tb)
1573 return -ENOMEM;
1574
1570 mutex_lock(&cfg80211_mutex); 1575 mutex_lock(&cfg80211_mutex);
1571 res = nlmsg_parse(cb->nlh, GENL_HDRLEN + nl80211_fam.hdrsize, 1576 res = nlmsg_parse(cb->nlh, GENL_HDRLEN + nl80211_fam.hdrsize,
1572 tb, nl80211_fam.maxattr, nl80211_policy); 1577 tb, NL80211_ATTR_MAX, nl80211_policy);
1573 if (res == 0) { 1578 if (res == 0) {
1574 split = tb[NL80211_ATTR_SPLIT_WIPHY_DUMP]; 1579 split = tb[NL80211_ATTR_SPLIT_WIPHY_DUMP];
1575 if (tb[NL80211_ATTR_WIPHY]) 1580 if (tb[NL80211_ATTR_WIPHY])
@@ -1583,6 +1588,7 @@ static int nl80211_dump_wiphy(struct sk_buff *skb, struct netlink_callback *cb)
1583 netdev = dev_get_by_index(sock_net(skb->sk), ifidx); 1588 netdev = dev_get_by_index(sock_net(skb->sk), ifidx);
1584 if (!netdev) { 1589 if (!netdev) {
1585 mutex_unlock(&cfg80211_mutex); 1590 mutex_unlock(&cfg80211_mutex);
1591 kfree(tb);
1586 return -ENODEV; 1592 return -ENODEV;
1587 } 1593 }
1588 if (netdev->ieee80211_ptr) { 1594 if (netdev->ieee80211_ptr) {
@@ -1593,6 +1599,7 @@ static int nl80211_dump_wiphy(struct sk_buff *skb, struct netlink_callback *cb)
1593 dev_put(netdev); 1599 dev_put(netdev);
1594 } 1600 }
1595 } 1601 }
1602 kfree(tb);
1596 1603
1597 list_for_each_entry(dev, &cfg80211_rdev_list, list) { 1604 list_for_each_entry(dev, &cfg80211_rdev_list, list) {
1598 if (!net_eq(wiphy_net(&dev->wiphy), sock_net(skb->sk))) 1605 if (!net_eq(wiphy_net(&dev->wiphy), sock_net(skb->sk)))
@@ -3411,7 +3418,7 @@ static int nl80211_send_station(struct sk_buff *msg, u32 portid, u32 seq,
3411 (u32)sinfo->rx_bytes)) 3418 (u32)sinfo->rx_bytes))
3412 goto nla_put_failure; 3419 goto nla_put_failure;
3413 if ((sinfo->filled & (STATION_INFO_TX_BYTES | 3420 if ((sinfo->filled & (STATION_INFO_TX_BYTES |
3414 NL80211_STA_INFO_TX_BYTES64)) && 3421 STATION_INFO_TX_BYTES64)) &&
3415 nla_put_u32(msg, NL80211_STA_INFO_TX_BYTES, 3422 nla_put_u32(msg, NL80211_STA_INFO_TX_BYTES,
3416 (u32)sinfo->tx_bytes)) 3423 (u32)sinfo->tx_bytes))
3417 goto nla_put_failure; 3424 goto nla_put_failure;
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index 8b5eddfba1e5..3ed35c345cae 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -231,6 +231,9 @@ void cfg80211_conn_work(struct work_struct *work)
231 mutex_lock(&rdev->sched_scan_mtx); 231 mutex_lock(&rdev->sched_scan_mtx);
232 232
233 list_for_each_entry(wdev, &rdev->wdev_list, list) { 233 list_for_each_entry(wdev, &rdev->wdev_list, list) {
234 if (!wdev->netdev)
235 continue;
236
234 wdev_lock(wdev); 237 wdev_lock(wdev);
235 if (!netif_running(wdev->netdev)) { 238 if (!netif_running(wdev->netdev)) {
236 wdev_unlock(wdev); 239 wdev_unlock(wdev);
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 23cea0f74336..ea970b8002a2 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -2557,11 +2557,12 @@ static void __xfrm_garbage_collect(struct net *net)
2557 } 2557 }
2558} 2558}
2559 2559
2560static void xfrm_garbage_collect(struct net *net) 2560void xfrm_garbage_collect(struct net *net)
2561{ 2561{
2562 flow_cache_flush(); 2562 flow_cache_flush();
2563 __xfrm_garbage_collect(net); 2563 __xfrm_garbage_collect(net);
2564} 2564}
2565EXPORT_SYMBOL(xfrm_garbage_collect);
2565 2566
2566static void xfrm_garbage_collect_deferred(struct net *net) 2567static void xfrm_garbage_collect_deferred(struct net *net)
2567{ 2568{
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index aa778748c565..3f565e495ac6 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -1681,6 +1681,8 @@ static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
1681 1681
1682out: 1682out:
1683 xfrm_pol_put(xp); 1683 xfrm_pol_put(xp);
1684 if (delete && err == 0)
1685 xfrm_garbage_collect(net);
1684 return err; 1686 return err;
1685} 1687}
1686 1688
diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib
index 51bb3de680b6..f97869f1f09b 100644
--- a/scripts/Makefile.lib
+++ b/scripts/Makefile.lib
@@ -149,7 +149,7 @@ cpp_flags = -Wp,-MD,$(depfile) $(NOSTDINC_FLAGS) $(LINUXINCLUDE) \
149 149
150ld_flags = $(LDFLAGS) $(ldflags-y) 150ld_flags = $(LDFLAGS) $(ldflags-y)
151 151
152dtc_cpp_flags = -Wp,-MD,$(depfile).pre -nostdinc \ 152dtc_cpp_flags = -Wp,-MD,$(depfile).pre.tmp -nostdinc \
153 -I$(srctree)/arch/$(SRCARCH)/boot/dts \ 153 -I$(srctree)/arch/$(SRCARCH)/boot/dts \
154 -I$(srctree)/arch/$(SRCARCH)/boot/dts/include \ 154 -I$(srctree)/arch/$(SRCARCH)/boot/dts/include \
155 -undef -D__DTS__ 155 -undef -D__DTS__
@@ -264,14 +264,14 @@ $(obj)/%.dtb.S: $(obj)/%.dtb
264quiet_cmd_dtc = DTC $@ 264quiet_cmd_dtc = DTC $@
265cmd_dtc = $(CPP) $(dtc_cpp_flags) -x assembler-with-cpp -o $(dtc-tmp) $< ; \ 265cmd_dtc = $(CPP) $(dtc_cpp_flags) -x assembler-with-cpp -o $(dtc-tmp) $< ; \
266 $(objtree)/scripts/dtc/dtc -O dtb -o $@ -b 0 \ 266 $(objtree)/scripts/dtc/dtc -O dtb -o $@ -b 0 \
267 -i $(srctree)/arch/$(SRCARCH)/boot/dts $(DTC_FLAGS) \ 267 -i $(dir $<) $(DTC_FLAGS) \
268 -d $(depfile).dtc $(dtc-tmp) ; \ 268 -d $(depfile).dtc.tmp $(dtc-tmp) ; \
269 cat $(depfile).pre $(depfile).dtc > $(depfile) 269 cat $(depfile).pre.tmp $(depfile).dtc.tmp > $(depfile)
270 270
271$(obj)/%.dtb: $(src)/%.dts FORCE 271$(obj)/%.dtb: $(src)/%.dts FORCE
272 $(call if_changed_dep,dtc) 272 $(call if_changed_dep,dtc)
273 273
274dtc-tmp = $(subst $(comma),_,$(dot-target).dts) 274dtc-tmp = $(subst $(comma),_,$(dot-target).dts.tmp)
275 275
276# Bzip2 276# Bzip2
277# --------------------------------------------------------------------------- 277# ---------------------------------------------------------------------------
diff --git a/scripts/config b/scripts/config
index bb4d3deb6d1c..a65ecbbdd32a 100755
--- a/scripts/config
+++ b/scripts/config
@@ -105,7 +105,7 @@ while [ "$1" != "" ] ; do
105 ;; 105 ;;
106 --refresh) 106 --refresh)
107 ;; 107 ;;
108 --*-after) 108 --*-after|-E|-D|-M)
109 checkarg "$1" 109 checkarg "$1"
110 A=$ARG 110 A=$ARG
111 checkarg "$2" 111 checkarg "$2"
diff --git a/scripts/dtc/dtc-lexer.l b/scripts/dtc/dtc-lexer.l
index 254d5af88956..3b41bfca636c 100644
--- a/scripts/dtc/dtc-lexer.l
+++ b/scripts/dtc/dtc-lexer.l
@@ -71,7 +71,7 @@ static int pop_input_file(void);
71 push_input_file(name); 71 push_input_file(name);
72 } 72 }
73 73
74<*>^"#"(line)?{WS}+[0-9]+{WS}+{STRING}({WS}+[0-9]+)? { 74<*>^"#"(line)?[ \t]+[0-9]+[ \t]+{STRING}([ \t]+[0-9]+)? {
75 char *line, *tmp, *fn; 75 char *line, *tmp, *fn;
76 /* skip text before line # */ 76 /* skip text before line # */
77 line = yytext; 77 line = yytext;
diff --git a/scripts/dtc/dtc-lexer.lex.c_shipped b/scripts/dtc/dtc-lexer.lex.c_shipped
index a6c5fcdfc032..2d30f41778b7 100644
--- a/scripts/dtc/dtc-lexer.lex.c_shipped
+++ b/scripts/dtc/dtc-lexer.lex.c_shipped
@@ -405,19 +405,19 @@ static yyconst flex_int16_t yy_accept[161] =
405static yyconst flex_int32_t yy_ec[256] = 405static yyconst flex_int32_t yy_ec[256] =
406 { 0, 406 { 0,
407 1, 1, 1, 1, 1, 1, 1, 1, 2, 3, 407 1, 1, 1, 1, 1, 1, 1, 1, 2, 3,
408 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 408 4, 4, 4, 1, 1, 1, 1, 1, 1, 1,
409 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 409 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
410 1, 2, 4, 5, 6, 1, 1, 7, 8, 1, 410 1, 2, 5, 6, 7, 1, 1, 8, 9, 1,
411 1, 9, 10, 10, 11, 10, 12, 13, 14, 15, 411 1, 10, 11, 11, 12, 11, 13, 14, 15, 16,
412 15, 15, 15, 15, 15, 15, 15, 16, 1, 17, 412 16, 16, 16, 16, 16, 16, 16, 17, 1, 18,
413 18, 19, 10, 10, 20, 20, 20, 20, 20, 20, 413 19, 20, 11, 11, 21, 21, 21, 21, 21, 21,
414 21, 21, 21, 21, 21, 22, 21, 21, 21, 21, 414 22, 22, 22, 22, 22, 23, 22, 22, 22, 22,
415 21, 21, 21, 21, 23, 21, 21, 24, 21, 21, 415 22, 22, 22, 22, 24, 22, 22, 25, 22, 22,
416 1, 25, 26, 1, 21, 1, 20, 27, 28, 29, 416 1, 26, 27, 1, 22, 1, 21, 28, 29, 30,
417 417
418 30, 20, 21, 21, 31, 21, 21, 32, 33, 34, 418 31, 21, 22, 22, 32, 22, 22, 33, 34, 35,
419 35, 36, 21, 37, 38, 39, 40, 41, 21, 24, 419 36, 37, 22, 38, 39, 40, 41, 42, 22, 25,
420 42, 21, 43, 44, 45, 1, 1, 1, 1, 1, 420 43, 22, 44, 45, 46, 1, 1, 1, 1, 1,
421 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 421 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
422 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 422 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
423 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 423 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
@@ -434,36 +434,36 @@ static yyconst flex_int32_t yy_ec[256] =
434 1, 1, 1, 1, 1 434 1, 1, 1, 1, 1
435 } ; 435 } ;
436 436
437static yyconst flex_int32_t yy_meta[46] = 437static yyconst flex_int32_t yy_meta[47] =
438 { 0, 438 { 0,
439 1, 1, 1, 1, 1, 2, 3, 1, 2, 2, 439 1, 1, 1, 1, 1, 1, 2, 3, 1, 2,
440 2, 4, 5, 5, 5, 6, 1, 1, 1, 7, 440 2, 2, 4, 5, 5, 5, 6, 1, 1, 1,
441 8, 8, 8, 8, 1, 1, 7, 7, 7, 7, 441 7, 8, 8, 8, 8, 1, 1, 7, 7, 7,
442 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 442 7, 8, 8, 8, 8, 8, 8, 8, 8, 8,
443 8, 8, 3, 1, 1 443 8, 8, 8, 3, 1, 1
444 } ; 444 } ;
445 445
446static yyconst flex_int16_t yy_base[175] = 446static yyconst flex_int16_t yy_base[175] =
447 { 0, 447 { 0,
448 0, 388, 381, 40, 41, 386, 71, 385, 34, 44, 448 0, 385, 378, 40, 41, 383, 72, 382, 34, 44,
449 390, 395, 60, 62, 371, 112, 111, 111, 111, 104, 449 388, 393, 61, 117, 368, 116, 115, 115, 115, 48,
450 370, 106, 371, 342, 124, 119, 0, 144, 395, 0, 450 367, 107, 368, 339, 127, 120, 0, 147, 393, 0,
451 123, 0, 159, 153, 165, 167, 395, 130, 395, 382, 451 127, 0, 133, 156, 168, 153, 393, 125, 393, 380,
452 395, 0, 372, 122, 395, 157, 374, 379, 350, 21, 452 393, 0, 369, 127, 393, 160, 371, 377, 347, 21,
453 346, 349, 395, 395, 395, 395, 395, 362, 395, 395, 453 343, 346, 393, 393, 393, 393, 393, 359, 393, 393,
454 181, 346, 342, 395, 359, 0, 191, 343, 190, 351, 454 183, 343, 339, 393, 356, 0, 183, 340, 187, 348,
455 350, 0, 0, 0, 173, 362, 177, 367, 357, 329, 455 347, 0, 0, 0, 178, 359, 195, 365, 354, 326,
456 335, 328, 337, 331, 206, 329, 334, 327, 395, 338, 456 332, 325, 334, 328, 204, 326, 331, 324, 393, 335,
457 170, 314, 346, 345, 318, 325, 343, 158, 316, 212, 457 150, 311, 343, 342, 315, 322, 340, 179, 313, 207,
458 458
459 322, 319, 320, 395, 340, 336, 308, 305, 314, 304, 459 319, 316, 317, 393, 337, 333, 305, 302, 311, 301,
460 295, 138, 208, 220, 395, 292, 305, 265, 264, 254, 460 310, 190, 338, 337, 393, 307, 322, 301, 305, 277,
461 201, 222, 285, 275, 273, 270, 236, 235, 225, 115, 461 208, 311, 307, 278, 271, 270, 248, 246, 213, 130,
462 395, 395, 252, 216, 216, 217, 214, 230, 209, 220, 462 393, 393, 263, 235, 207, 221, 218, 229, 213, 213,
463 213, 239, 211, 217, 216, 209, 229, 395, 240, 225, 463 206, 234, 218, 210, 208, 193, 219, 393, 223, 204,
464 206, 169, 395, 395, 116, 106, 99, 54, 395, 395, 464 176, 157, 393, 393, 120, 106, 97, 119, 393, 393,
465 254, 260, 268, 272, 276, 282, 289, 293, 301, 309, 465 245, 251, 259, 263, 267, 273, 280, 284, 292, 300,
466 313, 319, 327, 335 466 304, 310, 318, 326
467 } ; 467 } ;
468 468
469static yyconst flex_int16_t yy_def[175] = 469static yyconst flex_int16_t yy_def[175] =
@@ -489,108 +489,108 @@ static yyconst flex_int16_t yy_def[175] =
489 160, 160, 160, 160 489 160, 160, 160, 160
490 } ; 490 } ;
491 491
492static yyconst flex_int16_t yy_nxt[441] = 492static yyconst flex_int16_t yy_nxt[440] =
493 { 0, 493 { 0,
494 12, 13, 14, 15, 16, 12, 17, 18, 12, 12, 494 12, 13, 14, 13, 15, 16, 12, 17, 18, 12,
495 12, 19, 12, 12, 12, 12, 20, 21, 22, 23, 495 12, 12, 19, 12, 12, 12, 12, 20, 21, 22,
496 23, 23, 23, 23, 12, 12, 23, 23, 23, 23, 496 23, 23, 23, 23, 23, 12, 12, 23, 23, 23,
497 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 497 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
498 23, 23, 12, 24, 12, 25, 34, 35, 35, 25, 498 23, 23, 23, 12, 24, 12, 25, 34, 35, 35,
499 81, 26, 26, 27, 27, 27, 34, 35, 35, 82, 499 25, 81, 26, 26, 27, 27, 27, 34, 35, 35,
500 28, 36, 36, 36, 36, 159, 29, 28, 28, 28, 500 82, 28, 36, 36, 36, 53, 54, 29, 28, 28,
501 28, 12, 13, 14, 15, 16, 30, 17, 18, 30, 501 28, 28, 12, 13, 14, 13, 15, 16, 30, 17,
502 30, 30, 26, 30, 30, 30, 12, 20, 21, 22, 502 18, 30, 30, 30, 26, 30, 30, 30, 12, 20,
503 31, 31, 31, 31, 31, 32, 12, 31, 31, 31, 503 21, 22, 31, 31, 31, 31, 31, 32, 12, 31,
504 504
505 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 505 31, 31, 31, 31, 31, 31, 31, 31, 31, 31,
506 31, 31, 31, 12, 24, 12, 39, 41, 45, 47, 506 31, 31, 31, 31, 31, 12, 24, 12, 36, 36,
507 53, 54, 48, 56, 57, 61, 61, 47, 66, 45, 507 36, 39, 41, 45, 47, 56, 57, 48, 61, 47,
508 48, 66, 66, 66, 39, 46, 40, 49, 59, 50, 508 39, 159, 48, 66, 61, 45, 66, 66, 66, 158,
509 158, 51, 122, 52, 157, 49, 46, 50, 136, 63, 509 46, 40, 49, 59, 50, 157, 51, 49, 52, 50,
510 137, 52, 156, 43, 40, 62, 65, 65, 65, 59, 510 40, 63, 46, 52, 36, 36, 36, 156, 43, 62,
511 61, 61, 123, 65, 75, 69, 69, 69, 36, 36, 511 65, 65, 65, 59, 136, 68, 137, 65, 75, 69,
512 65, 65, 65, 65, 70, 71, 72, 69, 69, 69, 512 69, 69, 70, 71, 65, 65, 65, 65, 70, 71,
513 45, 46, 61, 61, 109, 77, 70, 71, 93, 110, 513 72, 69, 69, 69, 61, 46, 45, 155, 154, 66,
514 68, 70, 71, 85, 85, 85, 66, 46, 155, 66, 514 70, 71, 66, 66, 66, 122, 85, 85, 85, 59,
515 515
516 66, 66, 69, 69, 69, 122, 59, 100, 100, 61, 516 69, 69, 69, 46, 77, 100, 109, 93, 100, 70,
517 61, 70, 71, 100, 100, 148, 112, 154, 85, 85, 517 71, 110, 112, 122, 129, 123, 153, 85, 85, 85,
518 85, 61, 61, 129, 129, 123, 129, 129, 135, 135, 518 135, 135, 135, 148, 148, 160, 135, 135, 135, 152,
519 135, 142, 142, 148, 143, 149, 153, 135, 135, 135, 519 142, 142, 142, 123, 143, 142, 142, 142, 151, 143,
520 142, 142, 160, 143, 152, 151, 150, 146, 145, 144, 520 150, 146, 145, 149, 149, 38, 38, 38, 38, 38,
521 141, 140, 139, 149, 38, 38, 38, 38, 38, 38, 521 38, 38, 38, 42, 144, 141, 140, 42, 42, 44,
522 38, 38, 42, 138, 134, 133, 42, 42, 44, 44, 522 44, 44, 44, 44, 44, 44, 44, 58, 58, 58,
523 44, 44, 44, 44, 44, 44, 58, 58, 58, 58, 523 58, 64, 139, 64, 66, 138, 134, 66, 133, 66,
524 64, 132, 64, 66, 131, 130, 66, 160, 66, 66, 524 66, 67, 132, 131, 67, 67, 67, 67, 73, 130,
525 67, 128, 127, 67, 67, 67, 67, 73, 126, 73, 525 73, 73, 76, 76, 76, 76, 76, 76, 76, 76,
526 526
527 73, 76, 76, 76, 76, 76, 76, 76, 76, 78, 527 78, 78, 78, 78, 78, 78, 78, 78, 91, 160,
528 78, 78, 78, 78, 78, 78, 78, 91, 125, 91, 528 91, 92, 129, 92, 92, 128, 92, 92, 121, 121,
529 92, 124, 92, 92, 120, 92, 92, 121, 121, 121, 529 121, 121, 121, 121, 121, 121, 147, 147, 147, 147,
530 121, 121, 121, 121, 121, 147, 147, 147, 147, 147, 530 147, 147, 147, 147, 127, 126, 125, 124, 61, 61,
531 147, 147, 147, 119, 118, 117, 116, 115, 47, 114, 531 120, 119, 118, 117, 116, 115, 47, 114, 110, 113,
532 110, 113, 111, 108, 107, 106, 48, 105, 104, 89, 532 111, 108, 107, 106, 48, 105, 104, 89, 103, 102,
533 103, 102, 101, 99, 98, 97, 96, 95, 94, 79, 533 101, 99, 98, 97, 96, 95, 94, 79, 77, 90,
534 77, 90, 89, 88, 59, 87, 86, 59, 84, 83, 534 89, 88, 59, 87, 86, 59, 84, 83, 80, 79,
535 80, 79, 77, 74, 160, 60, 59, 55, 37, 160, 535 77, 74, 160, 60, 59, 55, 37, 160, 33, 25,
536 33, 25, 26, 25, 11, 160, 160, 160, 160, 160, 536 26, 25, 11, 160, 160, 160, 160, 160, 160, 160,
537 537
538 160, 160, 160, 160, 160, 160, 160, 160, 160, 160, 538 160, 160, 160, 160, 160, 160, 160, 160, 160, 160,
539 160, 160, 160, 160, 160, 160, 160, 160, 160, 160, 539 160, 160, 160, 160, 160, 160, 160, 160, 160, 160,
540 160, 160, 160, 160, 160, 160, 160, 160, 160, 160, 540 160, 160, 160, 160, 160, 160, 160, 160, 160, 160,
541 160, 160, 160, 160, 160, 160, 160, 160, 160, 160 541 160, 160, 160, 160, 160, 160, 160, 160, 160
542 } ; 542 } ;
543 543
544static yyconst flex_int16_t yy_chk[441] = 544static yyconst flex_int16_t yy_chk[440] =
545 { 0, 545 { 0,
546 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 546 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
547 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 547 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
548 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 548 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
549 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 549 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
550 1, 1, 1, 1, 1, 4, 9, 9, 9, 10, 550 1, 1, 1, 1, 1, 1, 4, 9, 9, 9,
551 50, 4, 5, 5, 5, 5, 10, 10, 10, 50, 551 10, 50, 4, 5, 5, 5, 5, 10, 10, 10,
552 5, 13, 13, 14, 14, 158, 5, 5, 5, 5, 552 50, 5, 13, 13, 13, 20, 20, 5, 5, 5,
553 5, 7, 7, 7, 7, 7, 7, 7, 7, 7, 553 5, 5, 7, 7, 7, 7, 7, 7, 7, 7,
554 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 554 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
555 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 555 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
556 556
557 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 557 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
558 7, 7, 7, 7, 7, 7, 16, 17, 18, 19, 558 7, 7, 7, 7, 7, 7, 7, 7, 14, 14,
559 20, 20, 19, 22, 22, 25, 25, 26, 31, 44, 559 14, 16, 17, 18, 19, 22, 22, 19, 25, 26,
560 26, 31, 31, 31, 38, 18, 16, 19, 31, 19, 560 38, 158, 26, 31, 33, 44, 31, 31, 31, 157,
561 157, 19, 112, 19, 156, 26, 44, 26, 130, 26, 561 18, 16, 19, 31, 19, 156, 19, 26, 19, 26,
562 130, 26, 155, 17, 38, 25, 28, 28, 28, 28, 562 38, 26, 44, 26, 36, 36, 36, 155, 17, 25,
563 33, 33, 112, 28, 46, 34, 34, 34, 36, 36, 563 28, 28, 28, 28, 130, 33, 130, 28, 46, 34,
564 28, 28, 28, 28, 34, 34, 34, 35, 35, 35, 564 34, 34, 91, 91, 28, 28, 28, 28, 34, 34,
565 75, 46, 61, 61, 98, 77, 35, 35, 77, 98, 565 34, 35, 35, 35, 61, 46, 75, 152, 151, 67,
566 33, 91, 91, 61, 61, 61, 67, 75, 152, 67, 566 35, 35, 67, 67, 67, 112, 61, 61, 61, 67,
567 567
568 67, 67, 69, 69, 69, 121, 67, 85, 85, 113, 568 69, 69, 69, 75, 77, 85, 98, 77, 100, 69,
569 113, 69, 69, 100, 100, 143, 100, 151, 85, 85, 569 69, 98, 100, 121, 129, 112, 150, 85, 85, 85,
570 85, 114, 114, 122, 122, 121, 129, 129, 135, 135, 570 135, 135, 135, 143, 147, 149, 129, 129, 129, 146,
571 135, 138, 138, 147, 138, 143, 150, 129, 129, 129, 571 138, 138, 138, 121, 138, 142, 142, 142, 145, 142,
572 142, 142, 149, 142, 146, 145, 144, 141, 140, 139, 572 144, 141, 140, 143, 147, 161, 161, 161, 161, 161,
573 137, 136, 134, 147, 161, 161, 161, 161, 161, 161, 573 161, 161, 161, 162, 139, 137, 136, 162, 162, 163,
574 161, 161, 162, 133, 128, 127, 162, 162, 163, 163, 574 163, 163, 163, 163, 163, 163, 163, 164, 164, 164,
575 163, 163, 163, 163, 163, 163, 164, 164, 164, 164, 575 164, 165, 134, 165, 166, 133, 128, 166, 127, 166,
576 165, 126, 165, 166, 125, 124, 166, 123, 166, 166, 576 166, 167, 126, 125, 167, 167, 167, 167, 168, 124,
577 167, 120, 119, 167, 167, 167, 167, 168, 118, 168, 577 168, 168, 169, 169, 169, 169, 169, 169, 169, 169,
578 578
579 168, 169, 169, 169, 169, 169, 169, 169, 169, 170, 579 170, 170, 170, 170, 170, 170, 170, 170, 171, 123,
580 170, 170, 170, 170, 170, 170, 170, 171, 117, 171, 580 171, 172, 122, 172, 172, 120, 172, 172, 173, 173,
581 172, 116, 172, 172, 111, 172, 172, 173, 173, 173, 581 173, 173, 173, 173, 173, 173, 174, 174, 174, 174,
582 173, 173, 173, 173, 173, 174, 174, 174, 174, 174, 582 174, 174, 174, 174, 119, 118, 117, 116, 114, 113,
583 174, 174, 174, 110, 109, 108, 107, 106, 105, 103, 583 111, 110, 109, 108, 107, 106, 105, 103, 102, 101,
584 102, 101, 99, 97, 96, 95, 94, 93, 92, 90, 584 99, 97, 96, 95, 94, 93, 92, 90, 88, 87,
585 88, 87, 86, 84, 83, 82, 81, 80, 79, 78, 585 86, 84, 83, 82, 81, 80, 79, 78, 76, 71,
586 76, 71, 70, 68, 65, 63, 62, 58, 52, 51, 586 70, 68, 65, 63, 62, 58, 52, 51, 49, 48,
587 49, 48, 47, 43, 40, 24, 23, 21, 15, 11, 587 47, 43, 40, 24, 23, 21, 15, 11, 8, 6,
588 8, 6, 3, 2, 160, 160, 160, 160, 160, 160, 588 3, 2, 160, 160, 160, 160, 160, 160, 160, 160,
589 589
590 160, 160, 160, 160, 160, 160, 160, 160, 160, 160, 590 160, 160, 160, 160, 160, 160, 160, 160, 160, 160,
591 160, 160, 160, 160, 160, 160, 160, 160, 160, 160, 591 160, 160, 160, 160, 160, 160, 160, 160, 160, 160,
592 160, 160, 160, 160, 160, 160, 160, 160, 160, 160, 592 160, 160, 160, 160, 160, 160, 160, 160, 160, 160,
593 160, 160, 160, 160, 160, 160, 160, 160, 160, 160 593 160, 160, 160, 160, 160, 160, 160, 160, 160
594 } ; 594 } ;
595 595
596static yy_state_type yy_last_accepting_state; 596static yy_state_type yy_last_accepting_state;
diff --git a/scripts/dtc/dtc-parser.tab.c_shipped b/scripts/dtc/dtc-parser.tab.c_shipped
index 4af55900a15b..ee1d8c3042fb 100644
--- a/scripts/dtc/dtc-parser.tab.c_shipped
+++ b/scripts/dtc/dtc-parser.tab.c_shipped
@@ -1,10 +1,8 @@
1/* A Bison parser, made by GNU Bison 2.5. */
1 2
2/* A Bison parser, made by GNU Bison 2.4.1. */ 3/* Bison implementation for Yacc-like parsers in C
3
4/* Skeleton implementation for Bison's Yacc-like parsers in C
5 4
6 Copyright (C) 1984, 1989, 1990, 2000, 2001, 2002, 2003, 2004, 2005, 2006 5 Copyright (C) 1984, 1989-1990, 2000-2011 Free Software Foundation, Inc.
7 Free Software Foundation, Inc.
8 6
9 This program is free software: you can redistribute it and/or modify 7 This program is free software: you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by 8 it under the terms of the GNU General Public License as published by
@@ -46,7 +44,7 @@
46#define YYBISON 1 44#define YYBISON 1
47 45
48/* Bison version. */ 46/* Bison version. */
49#define YYBISON_VERSION "2.4.1" 47#define YYBISON_VERSION "2.5"
50 48
51/* Skeleton name. */ 49/* Skeleton name. */
52#define YYSKELETON_NAME "yacc.c" 50#define YYSKELETON_NAME "yacc.c"
@@ -67,7 +65,7 @@
67 65
68/* Copy the first part of user declarations. */ 66/* Copy the first part of user declarations. */
69 67
70/* Line 189 of yacc.c */ 68/* Line 268 of yacc.c */
71#line 21 "dtc-parser.y" 69#line 21 "dtc-parser.y"
72 70
73#include <stdio.h> 71#include <stdio.h>
@@ -88,8 +86,8 @@ static unsigned long long eval_literal(const char *s, int base, int bits);
88static unsigned char eval_char_literal(const char *s); 86static unsigned char eval_char_literal(const char *s);
89 87
90 88
91/* Line 189 of yacc.c */ 89/* Line 268 of yacc.c */
92#line 93 "dtc-parser.tab.c" 90#line 91 "dtc-parser.tab.c"
93 91
94/* Enabling traces. */ 92/* Enabling traces. */
95#ifndef YYDEBUG 93#ifndef YYDEBUG
@@ -147,7 +145,7 @@ static unsigned char eval_char_literal(const char *s);
147typedef union YYSTYPE 145typedef union YYSTYPE
148{ 146{
149 147
150/* Line 214 of yacc.c */ 148/* Line 293 of yacc.c */
151#line 40 "dtc-parser.y" 149#line 40 "dtc-parser.y"
152 150
153 char *propnodename; 151 char *propnodename;
@@ -171,8 +169,8 @@ typedef union YYSTYPE
171 169
172 170
173 171
174/* Line 214 of yacc.c */ 172/* Line 293 of yacc.c */
175#line 176 "dtc-parser.tab.c" 173#line 174 "dtc-parser.tab.c"
176} YYSTYPE; 174} YYSTYPE;
177# define YYSTYPE_IS_TRIVIAL 1 175# define YYSTYPE_IS_TRIVIAL 1
178# define yystype YYSTYPE /* obsolescent; will be withdrawn */ 176# define yystype YYSTYPE /* obsolescent; will be withdrawn */
@@ -183,8 +181,8 @@ typedef union YYSTYPE
183/* Copy the second part of user declarations. */ 181/* Copy the second part of user declarations. */
184 182
185 183
186/* Line 264 of yacc.c */ 184/* Line 343 of yacc.c */
187#line 188 "dtc-parser.tab.c" 185#line 186 "dtc-parser.tab.c"
188 186
189#ifdef short 187#ifdef short
190# undef short 188# undef short
@@ -234,7 +232,7 @@ typedef short int yytype_int16;
234#define YYSIZE_MAXIMUM ((YYSIZE_T) -1) 232#define YYSIZE_MAXIMUM ((YYSIZE_T) -1)
235 233
236#ifndef YY_ 234#ifndef YY_
237# if YYENABLE_NLS 235# if defined YYENABLE_NLS && YYENABLE_NLS
238# if ENABLE_NLS 236# if ENABLE_NLS
239# include <libintl.h> /* INFRINGES ON USER NAME SPACE */ 237# include <libintl.h> /* INFRINGES ON USER NAME SPACE */
240# define YY_(msgid) dgettext ("bison-runtime", msgid) 238# define YY_(msgid) dgettext ("bison-runtime", msgid)
@@ -287,11 +285,11 @@ YYID (yyi)
287# define alloca _alloca 285# define alloca _alloca
288# else 286# else
289# define YYSTACK_ALLOC alloca 287# define YYSTACK_ALLOC alloca
290# if ! defined _ALLOCA_H && ! defined _STDLIB_H && (defined __STDC__ || defined __C99__FUNC__ \ 288# if ! defined _ALLOCA_H && ! defined EXIT_SUCCESS && (defined __STDC__ || defined __C99__FUNC__ \
291 || defined __cplusplus || defined _MSC_VER) 289 || defined __cplusplus || defined _MSC_VER)
292# include <stdlib.h> /* INFRINGES ON USER NAME SPACE */ 290# include <stdlib.h> /* INFRINGES ON USER NAME SPACE */
293# ifndef _STDLIB_H 291# ifndef EXIT_SUCCESS
294# define _STDLIB_H 1 292# define EXIT_SUCCESS 0
295# endif 293# endif
296# endif 294# endif
297# endif 295# endif
@@ -314,24 +312,24 @@ YYID (yyi)
314# ifndef YYSTACK_ALLOC_MAXIMUM 312# ifndef YYSTACK_ALLOC_MAXIMUM
315# define YYSTACK_ALLOC_MAXIMUM YYSIZE_MAXIMUM 313# define YYSTACK_ALLOC_MAXIMUM YYSIZE_MAXIMUM
316# endif 314# endif
317# if (defined __cplusplus && ! defined _STDLIB_H \ 315# if (defined __cplusplus && ! defined EXIT_SUCCESS \
318 && ! ((defined YYMALLOC || defined malloc) \ 316 && ! ((defined YYMALLOC || defined malloc) \
319 && (defined YYFREE || defined free))) 317 && (defined YYFREE || defined free)))
320# include <stdlib.h> /* INFRINGES ON USER NAME SPACE */ 318# include <stdlib.h> /* INFRINGES ON USER NAME SPACE */
321# ifndef _STDLIB_H 319# ifndef EXIT_SUCCESS
322# define _STDLIB_H 1 320# define EXIT_SUCCESS 0
323# endif 321# endif
324# endif 322# endif
325# ifndef YYMALLOC 323# ifndef YYMALLOC
326# define YYMALLOC malloc 324# define YYMALLOC malloc
327# if ! defined malloc && ! defined _STDLIB_H && (defined __STDC__ || defined __C99__FUNC__ \ 325# if ! defined malloc && ! defined EXIT_SUCCESS && (defined __STDC__ || defined __C99__FUNC__ \
328 || defined __cplusplus || defined _MSC_VER) 326 || defined __cplusplus || defined _MSC_VER)
329void *malloc (YYSIZE_T); /* INFRINGES ON USER NAME SPACE */ 327void *malloc (YYSIZE_T); /* INFRINGES ON USER NAME SPACE */
330# endif 328# endif
331# endif 329# endif
332# ifndef YYFREE 330# ifndef YYFREE
333# define YYFREE free 331# define YYFREE free
334# if ! defined free && ! defined _STDLIB_H && (defined __STDC__ || defined __C99__FUNC__ \ 332# if ! defined free && ! defined EXIT_SUCCESS && (defined __STDC__ || defined __C99__FUNC__ \
335 || defined __cplusplus || defined _MSC_VER) 333 || defined __cplusplus || defined _MSC_VER)
336void free (void *); /* INFRINGES ON USER NAME SPACE */ 334void free (void *); /* INFRINGES ON USER NAME SPACE */
337# endif 335# endif
@@ -360,23 +358,7 @@ union yyalloc
360 ((N) * (sizeof (yytype_int16) + sizeof (YYSTYPE)) \ 358 ((N) * (sizeof (yytype_int16) + sizeof (YYSTYPE)) \
361 + YYSTACK_GAP_MAXIMUM) 359 + YYSTACK_GAP_MAXIMUM)
362 360
363/* Copy COUNT objects from FROM to TO. The source and destination do 361# define YYCOPY_NEEDED 1
364 not overlap. */
365# ifndef YYCOPY
366# if defined __GNUC__ && 1 < __GNUC__
367# define YYCOPY(To, From, Count) \
368 __builtin_memcpy (To, From, (Count) * sizeof (*(From)))
369# else
370# define YYCOPY(To, From, Count) \
371 do \
372 { \
373 YYSIZE_T yyi; \
374 for (yyi = 0; yyi < (Count); yyi++) \
375 (To)[yyi] = (From)[yyi]; \
376 } \
377 while (YYID (0))
378# endif
379# endif
380 362
381/* Relocate STACK from its old location to the new one. The 363/* Relocate STACK from its old location to the new one. The
382 local variables YYSIZE and YYSTACKSIZE give the old and new number of 364 local variables YYSIZE and YYSTACKSIZE give the old and new number of
@@ -396,6 +378,26 @@ union yyalloc
396 378
397#endif 379#endif
398 380
381#if defined YYCOPY_NEEDED && YYCOPY_NEEDED
382/* Copy COUNT objects from FROM to TO. The source and destination do
383 not overlap. */
384# ifndef YYCOPY
385# if defined __GNUC__ && 1 < __GNUC__
386# define YYCOPY(To, From, Count) \
387 __builtin_memcpy (To, From, (Count) * sizeof (*(From)))
388# else
389# define YYCOPY(To, From, Count) \
390 do \
391 { \
392 YYSIZE_T yyi; \
393 for (yyi = 0; yyi < (Count); yyi++) \
394 (To)[yyi] = (From)[yyi]; \
395 } \
396 while (YYID (0))
397# endif
398# endif
399#endif /* !YYCOPY_NEEDED */
400
399/* YYFINAL -- State number of the termination state. */ 401/* YYFINAL -- State number of the termination state. */
400#define YYFINAL 4 402#define YYFINAL 4
401/* YYLAST -- Last index in YYTABLE. */ 403/* YYLAST -- Last index in YYTABLE. */
@@ -571,8 +573,8 @@ static const yytype_uint8 yyr2[] =
571 2, 0, 2, 2, 0, 2, 2, 2, 3, 2 573 2, 0, 2, 2, 0, 2, 2, 2, 3, 2
572}; 574};
573 575
574/* YYDEFACT[STATE-NAME] -- Default rule to reduce with in state 576/* YYDEFACT[STATE-NAME] -- Default reduction number in state STATE-NUM.
575 STATE-NUM when YYTABLE doesn't specify something else to do. Zero 577 Performed when YYTABLE doesn't specify something else to do. Zero
576 means the default is an error. */ 578 means the default is an error. */
577static const yytype_uint8 yydefact[] = 579static const yytype_uint8 yydefact[] =
578{ 580{
@@ -633,8 +635,7 @@ static const yytype_int8 yypgoto[] =
633 635
634/* YYTABLE[YYPACT[STATE-NUM]]. What to do in state STATE-NUM. If 636/* YYTABLE[YYPACT[STATE-NUM]]. What to do in state STATE-NUM. If
635 positive, shift that token. If negative, reduce the rule which 637 positive, shift that token. If negative, reduce the rule which
636 number is the opposite. If zero, do what YYDEFACT says. 638 number is the opposite. If YYTABLE_NINF, syntax error. */
637 If YYTABLE_NINF, syntax error. */
638#define YYTABLE_NINF -1 639#define YYTABLE_NINF -1
639static const yytype_uint8 yytable[] = 640static const yytype_uint8 yytable[] =
640{ 641{
@@ -654,6 +655,12 @@ static const yytype_uint8 yytable[] =
654 137, 0, 73, 139 655 137, 0, 73, 139
655}; 656};
656 657
658#define yypact_value_is_default(yystate) \
659 ((yystate) == (-78))
660
661#define yytable_value_is_error(yytable_value) \
662 YYID (0)
663
657static const yytype_int16 yycheck[] = 664static const yytype_int16 yycheck[] =
658{ 665{
659 5, 38, 39, 17, 18, 19, 12, 12, 17, 18, 666 5, 38, 39, 17, 18, 19, 12, 12, 17, 18,
@@ -705,9 +712,18 @@ static const yytype_uint8 yystos[] =
705 712
706/* Like YYERROR except do call yyerror. This remains here temporarily 713/* Like YYERROR except do call yyerror. This remains here temporarily
707 to ease the transition to the new meaning of YYERROR, for GCC. 714 to ease the transition to the new meaning of YYERROR, for GCC.
708 Once GCC version 2 has supplanted version 1, this can go. */ 715 Once GCC version 2 has supplanted version 1, this can go. However,
716 YYFAIL appears to be in use. Nevertheless, it is formally deprecated
717 in Bison 2.4.2's NEWS entry, where a plan to phase it out is
718 discussed. */
709 719
710#define YYFAIL goto yyerrlab 720#define YYFAIL goto yyerrlab
721#if defined YYFAIL
722 /* This is here to suppress warnings from the GCC cpp's
723 -Wunused-macros. Normally we don't worry about that warning, but
724 some users do, and we want to make it easy for users to remove
725 YYFAIL uses, which will produce warnings from Bison 2.5. */
726#endif
711 727
712#define YYRECOVERING() (!!yyerrstatus) 728#define YYRECOVERING() (!!yyerrstatus)
713 729
@@ -717,7 +733,6 @@ do \
717 { \ 733 { \
718 yychar = (Token); \ 734 yychar = (Token); \
719 yylval = (Value); \ 735 yylval = (Value); \
720 yytoken = YYTRANSLATE (yychar); \
721 YYPOPSTACK (1); \ 736 YYPOPSTACK (1); \
722 goto yybackup; \ 737 goto yybackup; \
723 } \ 738 } \
@@ -759,19 +774,10 @@ while (YYID (0))
759#endif 774#endif
760 775
761 776
762/* YY_LOCATION_PRINT -- Print the location on the stream. 777/* This macro is provided for backward compatibility. */
763 This macro was not mandated originally: define only if we know
764 we won't break user code: when these are the locations we know. */
765 778
766#ifndef YY_LOCATION_PRINT 779#ifndef YY_LOCATION_PRINT
767# if YYLTYPE_IS_TRIVIAL 780# define YY_LOCATION_PRINT(File, Loc) ((void) 0)
768# define YY_LOCATION_PRINT(File, Loc) \
769 fprintf (File, "%d.%d-%d.%d", \
770 (Loc).first_line, (Loc).first_column, \
771 (Loc).last_line, (Loc).last_column)
772# else
773# define YY_LOCATION_PRINT(File, Loc) ((void) 0)
774# endif
775#endif 781#endif
776 782
777 783
@@ -963,7 +969,6 @@ int yydebug;
963# define YYMAXDEPTH 10000 969# define YYMAXDEPTH 10000
964#endif 970#endif
965 971
966
967 972
968#if YYERROR_VERBOSE 973#if YYERROR_VERBOSE
969 974
@@ -1066,115 +1071,142 @@ yytnamerr (char *yyres, const char *yystr)
1066} 1071}
1067# endif 1072# endif
1068 1073
1069/* Copy into YYRESULT an error message about the unexpected token 1074/* Copy into *YYMSG, which is of size *YYMSG_ALLOC, an error message
1070 YYCHAR while in state YYSTATE. Return the number of bytes copied, 1075 about the unexpected token YYTOKEN for the state stack whose top is
1071 including the terminating null byte. If YYRESULT is null, do not 1076 YYSSP.
1072 copy anything; just return the number of bytes that would be
1073 copied. As a special case, return 0 if an ordinary "syntax error"
1074 message will do. Return YYSIZE_MAXIMUM if overflow occurs during
1075 size calculation. */
1076static YYSIZE_T
1077yysyntax_error (char *yyresult, int yystate, int yychar)
1078{
1079 int yyn = yypact[yystate];
1080 1077
1081 if (! (YYPACT_NINF < yyn && yyn <= YYLAST)) 1078 Return 0 if *YYMSG was successfully written. Return 1 if *YYMSG is
1082 return 0; 1079 not large enough to hold the message. In that case, also set
1083 else 1080 *YYMSG_ALLOC to the required number of bytes. Return 2 if the
1081 required number of bytes is too large to store. */
1082static int
1083yysyntax_error (YYSIZE_T *yymsg_alloc, char **yymsg,
1084 yytype_int16 *yyssp, int yytoken)
1085{
1086 YYSIZE_T yysize0 = yytnamerr (0, yytname[yytoken]);
1087 YYSIZE_T yysize = yysize0;
1088 YYSIZE_T yysize1;
1089 enum { YYERROR_VERBOSE_ARGS_MAXIMUM = 5 };
1090 /* Internationalized format string. */
1091 const char *yyformat = 0;
1092 /* Arguments of yyformat. */
1093 char const *yyarg[YYERROR_VERBOSE_ARGS_MAXIMUM];
1094 /* Number of reported tokens (one for the "unexpected", one per
1095 "expected"). */
1096 int yycount = 0;
1097
1098 /* There are many possibilities here to consider:
1099 - Assume YYFAIL is not used. It's too flawed to consider. See
1100 <http://lists.gnu.org/archive/html/bison-patches/2009-12/msg00024.html>
1101 for details. YYERROR is fine as it does not invoke this
1102 function.
1103 - If this state is a consistent state with a default action, then
1104 the only way this function was invoked is if the default action
1105 is an error action. In that case, don't check for expected
1106 tokens because there are none.
1107 - The only way there can be no lookahead present (in yychar) is if
1108 this state is a consistent state with a default action. Thus,
1109 detecting the absence of a lookahead is sufficient to determine
1110 that there is no unexpected or expected token to report. In that
1111 case, just report a simple "syntax error".
1112 - Don't assume there isn't a lookahead just because this state is a
1113 consistent state with a default action. There might have been a
1114 previous inconsistent state, consistent state with a non-default
1115 action, or user semantic action that manipulated yychar.
1116 - Of course, the expected token list depends on states to have
1117 correct lookahead information, and it depends on the parser not
1118 to perform extra reductions after fetching a lookahead from the
1119 scanner and before detecting a syntax error. Thus, state merging
1120 (from LALR or IELR) and default reductions corrupt the expected
1121 token list. However, the list is correct for canonical LR with
1122 one exception: it will still contain any token that will not be
1123 accepted due to an error action in a later state.
1124 */
1125 if (yytoken != YYEMPTY)
1084 { 1126 {
1085 int yytype = YYTRANSLATE (yychar); 1127 int yyn = yypact[*yyssp];
1086 YYSIZE_T yysize0 = yytnamerr (0, yytname[yytype]); 1128 yyarg[yycount++] = yytname[yytoken];
1087 YYSIZE_T yysize = yysize0; 1129 if (!yypact_value_is_default (yyn))
1088 YYSIZE_T yysize1; 1130 {
1089 int yysize_overflow = 0; 1131 /* Start YYX at -YYN if negative to avoid negative indexes in
1090 enum { YYERROR_VERBOSE_ARGS_MAXIMUM = 5 }; 1132 YYCHECK. In other words, skip the first -YYN actions for
1091 char const *yyarg[YYERROR_VERBOSE_ARGS_MAXIMUM]; 1133 this state because they are default actions. */
1092 int yyx; 1134 int yyxbegin = yyn < 0 ? -yyn : 0;
1093 1135 /* Stay within bounds of both yycheck and yytname. */
1094# if 0 1136 int yychecklim = YYLAST - yyn + 1;
1095 /* This is so xgettext sees the translatable formats that are 1137 int yyxend = yychecklim < YYNTOKENS ? yychecklim : YYNTOKENS;
1096 constructed on the fly. */ 1138 int yyx;
1097 YY_("syntax error, unexpected %s"); 1139
1098 YY_("syntax error, unexpected %s, expecting %s"); 1140 for (yyx = yyxbegin; yyx < yyxend; ++yyx)
1099 YY_("syntax error, unexpected %s, expecting %s or %s"); 1141 if (yycheck[yyx + yyn] == yyx && yyx != YYTERROR
1100 YY_("syntax error, unexpected %s, expecting %s or %s or %s"); 1142 && !yytable_value_is_error (yytable[yyx + yyn]))
1101 YY_("syntax error, unexpected %s, expecting %s or %s or %s or %s"); 1143 {
1102# endif 1144 if (yycount == YYERROR_VERBOSE_ARGS_MAXIMUM)
1103 char *yyfmt; 1145 {
1104 char const *yyf; 1146 yycount = 1;
1105 static char const yyunexpected[] = "syntax error, unexpected %s"; 1147 yysize = yysize0;
1106 static char const yyexpecting[] = ", expecting %s"; 1148 break;
1107 static char const yyor[] = " or %s"; 1149 }
1108 char yyformat[sizeof yyunexpected 1150 yyarg[yycount++] = yytname[yyx];
1109 + sizeof yyexpecting - 1 1151 yysize1 = yysize + yytnamerr (0, yytname[yyx]);
1110 + ((YYERROR_VERBOSE_ARGS_MAXIMUM - 2) 1152 if (! (yysize <= yysize1
1111 * (sizeof yyor - 1))]; 1153 && yysize1 <= YYSTACK_ALLOC_MAXIMUM))
1112 char const *yyprefix = yyexpecting; 1154 return 2;
1113 1155 yysize = yysize1;
1114 /* Start YYX at -YYN if negative to avoid negative indexes in 1156 }
1115 YYCHECK. */ 1157 }
1116 int yyxbegin = yyn < 0 ? -yyn : 0; 1158 }
1117
1118 /* Stay within bounds of both yycheck and yytname. */
1119 int yychecklim = YYLAST - yyn + 1;
1120 int yyxend = yychecklim < YYNTOKENS ? yychecklim : YYNTOKENS;
1121 int yycount = 1;
1122
1123 yyarg[0] = yytname[yytype];
1124 yyfmt = yystpcpy (yyformat, yyunexpected);
1125
1126 for (yyx = yyxbegin; yyx < yyxend; ++yyx)
1127 if (yycheck[yyx + yyn] == yyx && yyx != YYTERROR)
1128 {
1129 if (yycount == YYERROR_VERBOSE_ARGS_MAXIMUM)
1130 {
1131 yycount = 1;
1132 yysize = yysize0;
1133 yyformat[sizeof yyunexpected - 1] = '\0';
1134 break;
1135 }
1136 yyarg[yycount++] = yytname[yyx];
1137 yysize1 = yysize + yytnamerr (0, yytname[yyx]);
1138 yysize_overflow |= (yysize1 < yysize);
1139 yysize = yysize1;
1140 yyfmt = yystpcpy (yyfmt, yyprefix);
1141 yyprefix = yyor;
1142 }
1143 1159
1144 yyf = YY_(yyformat); 1160 switch (yycount)
1145 yysize1 = yysize + yystrlen (yyf); 1161 {
1146 yysize_overflow |= (yysize1 < yysize); 1162# define YYCASE_(N, S) \
1147 yysize = yysize1; 1163 case N: \
1164 yyformat = S; \
1165 break
1166 YYCASE_(0, YY_("syntax error"));
1167 YYCASE_(1, YY_("syntax error, unexpected %s"));
1168 YYCASE_(2, YY_("syntax error, unexpected %s, expecting %s"));
1169 YYCASE_(3, YY_("syntax error, unexpected %s, expecting %s or %s"));
1170 YYCASE_(4, YY_("syntax error, unexpected %s, expecting %s or %s or %s"));
1171 YYCASE_(5, YY_("syntax error, unexpected %s, expecting %s or %s or %s or %s"));
1172# undef YYCASE_
1173 }
1148 1174
1149 if (yysize_overflow) 1175 yysize1 = yysize + yystrlen (yyformat);
1150 return YYSIZE_MAXIMUM; 1176 if (! (yysize <= yysize1 && yysize1 <= YYSTACK_ALLOC_MAXIMUM))
1177 return 2;
1178 yysize = yysize1;
1151 1179
1152 if (yyresult) 1180 if (*yymsg_alloc < yysize)
1153 { 1181 {
1154 /* Avoid sprintf, as that infringes on the user's name space. 1182 *yymsg_alloc = 2 * yysize;
1155 Don't have undefined behavior even if the translation 1183 if (! (yysize <= *yymsg_alloc
1156 produced a string with the wrong number of "%s"s. */ 1184 && *yymsg_alloc <= YYSTACK_ALLOC_MAXIMUM))
1157 char *yyp = yyresult; 1185 *yymsg_alloc = YYSTACK_ALLOC_MAXIMUM;
1158 int yyi = 0; 1186 return 1;
1159 while ((*yyp = *yyf) != '\0')
1160 {
1161 if (*yyp == '%' && yyf[1] == 's' && yyi < yycount)
1162 {
1163 yyp += yytnamerr (yyp, yyarg[yyi++]);
1164 yyf += 2;
1165 }
1166 else
1167 {
1168 yyp++;
1169 yyf++;
1170 }
1171 }
1172 }
1173 return yysize;
1174 } 1187 }
1188
1189 /* Avoid sprintf, as that infringes on the user's name space.
1190 Don't have undefined behavior even if the translation
1191 produced a string with the wrong number of "%s"s. */
1192 {
1193 char *yyp = *yymsg;
1194 int yyi = 0;
1195 while ((*yyp = *yyformat) != '\0')
1196 if (*yyp == '%' && yyformat[1] == 's' && yyi < yycount)
1197 {
1198 yyp += yytnamerr (yyp, yyarg[yyi++]);
1199 yyformat += 2;
1200 }
1201 else
1202 {
1203 yyp++;
1204 yyformat++;
1205 }
1206 }
1207 return 0;
1175} 1208}
1176#endif /* YYERROR_VERBOSE */ 1209#endif /* YYERROR_VERBOSE */
1177
1178 1210
1179/*-----------------------------------------------. 1211/*-----------------------------------------------.
1180| Release the memory associated to this symbol. | 1212| Release the memory associated to this symbol. |
@@ -1207,6 +1239,7 @@ yydestruct (yymsg, yytype, yyvaluep)
1207 } 1239 }
1208} 1240}
1209 1241
1242
1210/* Prevent warnings from -Wmissing-prototypes. */ 1243/* Prevent warnings from -Wmissing-prototypes. */
1211#ifdef YYPARSE_PARAM 1244#ifdef YYPARSE_PARAM
1212#if defined __STDC__ || defined __cplusplus 1245#if defined __STDC__ || defined __cplusplus
@@ -1233,10 +1266,9 @@ YYSTYPE yylval;
1233int yynerrs; 1266int yynerrs;
1234 1267
1235 1268
1236 1269/*----------.
1237/*-------------------------. 1270| yyparse. |
1238| yyparse or yypush_parse. | 1271`----------*/
1239`-------------------------*/
1240 1272
1241#ifdef YYPARSE_PARAM 1273#ifdef YYPARSE_PARAM
1242#if (defined __STDC__ || defined __C99__FUNC__ \ 1274#if (defined __STDC__ || defined __C99__FUNC__ \
@@ -1260,8 +1292,6 @@ yyparse ()
1260#endif 1292#endif
1261#endif 1293#endif
1262{ 1294{
1263
1264
1265 int yystate; 1295 int yystate;
1266 /* Number of tokens to shift before error messages enabled. */ 1296 /* Number of tokens to shift before error messages enabled. */
1267 int yyerrstatus; 1297 int yyerrstatus;
@@ -1416,7 +1446,7 @@ yybackup:
1416 1446
1417 /* First try to decide what to do without reference to lookahead token. */ 1447 /* First try to decide what to do without reference to lookahead token. */
1418 yyn = yypact[yystate]; 1448 yyn = yypact[yystate];
1419 if (yyn == YYPACT_NINF) 1449 if (yypact_value_is_default (yyn))
1420 goto yydefault; 1450 goto yydefault;
1421 1451
1422 /* Not known => get a lookahead token if don't already have one. */ 1452 /* Not known => get a lookahead token if don't already have one. */
@@ -1447,8 +1477,8 @@ yybackup:
1447 yyn = yytable[yyn]; 1477 yyn = yytable[yyn];
1448 if (yyn <= 0) 1478 if (yyn <= 0)
1449 { 1479 {
1450 if (yyn == 0 || yyn == YYTABLE_NINF) 1480 if (yytable_value_is_error (yyn))
1451 goto yyerrlab; 1481 goto yyerrlab;
1452 yyn = -yyn; 1482 yyn = -yyn;
1453 goto yyreduce; 1483 goto yyreduce;
1454 } 1484 }
@@ -1503,72 +1533,72 @@ yyreduce:
1503 { 1533 {
1504 case 2: 1534 case 2:
1505 1535
1506/* Line 1455 of yacc.c */ 1536/* Line 1806 of yacc.c */
1507#line 110 "dtc-parser.y" 1537#line 110 "dtc-parser.y"
1508 { 1538 {
1509 the_boot_info = build_boot_info((yyvsp[(3) - (4)].re), (yyvsp[(4) - (4)].node), 1539 the_boot_info = build_boot_info((yyvsp[(3) - (4)].re), (yyvsp[(4) - (4)].node),
1510 guess_boot_cpuid((yyvsp[(4) - (4)].node))); 1540 guess_boot_cpuid((yyvsp[(4) - (4)].node)));
1511 ;} 1541 }
1512 break; 1542 break;
1513 1543
1514 case 3: 1544 case 3:
1515 1545
1516/* Line 1455 of yacc.c */ 1546/* Line 1806 of yacc.c */
1517#line 118 "dtc-parser.y" 1547#line 118 "dtc-parser.y"
1518 { 1548 {
1519 (yyval.re) = NULL; 1549 (yyval.re) = NULL;
1520 ;} 1550 }
1521 break; 1551 break;
1522 1552
1523 case 4: 1553 case 4:
1524 1554
1525/* Line 1455 of yacc.c */ 1555/* Line 1806 of yacc.c */
1526#line 122 "dtc-parser.y" 1556#line 122 "dtc-parser.y"
1527 { 1557 {
1528 (yyval.re) = chain_reserve_entry((yyvsp[(1) - (2)].re), (yyvsp[(2) - (2)].re)); 1558 (yyval.re) = chain_reserve_entry((yyvsp[(1) - (2)].re), (yyvsp[(2) - (2)].re));
1529 ;} 1559 }
1530 break; 1560 break;
1531 1561
1532 case 5: 1562 case 5:
1533 1563
1534/* Line 1455 of yacc.c */ 1564/* Line 1806 of yacc.c */
1535#line 129 "dtc-parser.y" 1565#line 129 "dtc-parser.y"
1536 { 1566 {
1537 (yyval.re) = build_reserve_entry((yyvsp[(2) - (4)].integer), (yyvsp[(3) - (4)].integer)); 1567 (yyval.re) = build_reserve_entry((yyvsp[(2) - (4)].integer), (yyvsp[(3) - (4)].integer));
1538 ;} 1568 }
1539 break; 1569 break;
1540 1570
1541 case 6: 1571 case 6:
1542 1572
1543/* Line 1455 of yacc.c */ 1573/* Line 1806 of yacc.c */
1544#line 133 "dtc-parser.y" 1574#line 133 "dtc-parser.y"
1545 { 1575 {
1546 add_label(&(yyvsp[(2) - (2)].re)->labels, (yyvsp[(1) - (2)].labelref)); 1576 add_label(&(yyvsp[(2) - (2)].re)->labels, (yyvsp[(1) - (2)].labelref));
1547 (yyval.re) = (yyvsp[(2) - (2)].re); 1577 (yyval.re) = (yyvsp[(2) - (2)].re);
1548 ;} 1578 }
1549 break; 1579 break;
1550 1580
1551 case 7: 1581 case 7:
1552 1582
1553/* Line 1455 of yacc.c */ 1583/* Line 1806 of yacc.c */
1554#line 141 "dtc-parser.y" 1584#line 141 "dtc-parser.y"
1555 { 1585 {
1556 (yyval.node) = name_node((yyvsp[(2) - (2)].node), ""); 1586 (yyval.node) = name_node((yyvsp[(2) - (2)].node), "");
1557 ;} 1587 }
1558 break; 1588 break;
1559 1589
1560 case 8: 1590 case 8:
1561 1591
1562/* Line 1455 of yacc.c */ 1592/* Line 1806 of yacc.c */
1563#line 145 "dtc-parser.y" 1593#line 145 "dtc-parser.y"
1564 { 1594 {
1565 (yyval.node) = merge_nodes((yyvsp[(1) - (3)].node), (yyvsp[(3) - (3)].node)); 1595 (yyval.node) = merge_nodes((yyvsp[(1) - (3)].node), (yyvsp[(3) - (3)].node));
1566 ;} 1596 }
1567 break; 1597 break;
1568 1598
1569 case 9: 1599 case 9:
1570 1600
1571/* Line 1455 of yacc.c */ 1601/* Line 1806 of yacc.c */
1572#line 149 "dtc-parser.y" 1602#line 149 "dtc-parser.y"
1573 { 1603 {
1574 struct node *target = get_node_by_ref((yyvsp[(1) - (3)].node), (yyvsp[(2) - (3)].labelref)); 1604 struct node *target = get_node_by_ref((yyvsp[(1) - (3)].node), (yyvsp[(2) - (3)].labelref));
@@ -1578,12 +1608,12 @@ yyreduce:
1578 else 1608 else
1579 print_error("label or path, '%s', not found", (yyvsp[(2) - (3)].labelref)); 1609 print_error("label or path, '%s', not found", (yyvsp[(2) - (3)].labelref));
1580 (yyval.node) = (yyvsp[(1) - (3)].node); 1610 (yyval.node) = (yyvsp[(1) - (3)].node);
1581 ;} 1611 }
1582 break; 1612 break;
1583 1613
1584 case 10: 1614 case 10:
1585 1615
1586/* Line 1455 of yacc.c */ 1616/* Line 1806 of yacc.c */
1587#line 159 "dtc-parser.y" 1617#line 159 "dtc-parser.y"
1588 { 1618 {
1589 struct node *target = get_node_by_ref((yyvsp[(1) - (4)].node), (yyvsp[(3) - (4)].labelref)); 1619 struct node *target = get_node_by_ref((yyvsp[(1) - (4)].node), (yyvsp[(3) - (4)].labelref));
@@ -1594,112 +1624,112 @@ yyreduce:
1594 delete_node(target); 1624 delete_node(target);
1595 1625
1596 (yyval.node) = (yyvsp[(1) - (4)].node); 1626 (yyval.node) = (yyvsp[(1) - (4)].node);
1597 ;} 1627 }
1598 break; 1628 break;
1599 1629
1600 case 11: 1630 case 11:
1601 1631
1602/* Line 1455 of yacc.c */ 1632/* Line 1806 of yacc.c */
1603#line 173 "dtc-parser.y" 1633#line 173 "dtc-parser.y"
1604 { 1634 {
1605 (yyval.node) = build_node((yyvsp[(2) - (5)].proplist), (yyvsp[(3) - (5)].nodelist)); 1635 (yyval.node) = build_node((yyvsp[(2) - (5)].proplist), (yyvsp[(3) - (5)].nodelist));
1606 ;} 1636 }
1607 break; 1637 break;
1608 1638
1609 case 12: 1639 case 12:
1610 1640
1611/* Line 1455 of yacc.c */ 1641/* Line 1806 of yacc.c */
1612#line 180 "dtc-parser.y" 1642#line 180 "dtc-parser.y"
1613 { 1643 {
1614 (yyval.proplist) = NULL; 1644 (yyval.proplist) = NULL;
1615 ;} 1645 }
1616 break; 1646 break;
1617 1647
1618 case 13: 1648 case 13:
1619 1649
1620/* Line 1455 of yacc.c */ 1650/* Line 1806 of yacc.c */
1621#line 184 "dtc-parser.y" 1651#line 184 "dtc-parser.y"
1622 { 1652 {
1623 (yyval.proplist) = chain_property((yyvsp[(2) - (2)].prop), (yyvsp[(1) - (2)].proplist)); 1653 (yyval.proplist) = chain_property((yyvsp[(2) - (2)].prop), (yyvsp[(1) - (2)].proplist));
1624 ;} 1654 }
1625 break; 1655 break;
1626 1656
1627 case 14: 1657 case 14:
1628 1658
1629/* Line 1455 of yacc.c */ 1659/* Line 1806 of yacc.c */
1630#line 191 "dtc-parser.y" 1660#line 191 "dtc-parser.y"
1631 { 1661 {
1632 (yyval.prop) = build_property((yyvsp[(1) - (4)].propnodename), (yyvsp[(3) - (4)].data)); 1662 (yyval.prop) = build_property((yyvsp[(1) - (4)].propnodename), (yyvsp[(3) - (4)].data));
1633 ;} 1663 }
1634 break; 1664 break;
1635 1665
1636 case 15: 1666 case 15:
1637 1667
1638/* Line 1455 of yacc.c */ 1668/* Line 1806 of yacc.c */
1639#line 195 "dtc-parser.y" 1669#line 195 "dtc-parser.y"
1640 { 1670 {
1641 (yyval.prop) = build_property((yyvsp[(1) - (2)].propnodename), empty_data); 1671 (yyval.prop) = build_property((yyvsp[(1) - (2)].propnodename), empty_data);
1642 ;} 1672 }
1643 break; 1673 break;
1644 1674
1645 case 16: 1675 case 16:
1646 1676
1647/* Line 1455 of yacc.c */ 1677/* Line 1806 of yacc.c */
1648#line 199 "dtc-parser.y" 1678#line 199 "dtc-parser.y"
1649 { 1679 {
1650 (yyval.prop) = build_property_delete((yyvsp[(2) - (3)].propnodename)); 1680 (yyval.prop) = build_property_delete((yyvsp[(2) - (3)].propnodename));
1651 ;} 1681 }
1652 break; 1682 break;
1653 1683
1654 case 17: 1684 case 17:
1655 1685
1656/* Line 1455 of yacc.c */ 1686/* Line 1806 of yacc.c */
1657#line 203 "dtc-parser.y" 1687#line 203 "dtc-parser.y"
1658 { 1688 {
1659 add_label(&(yyvsp[(2) - (2)].prop)->labels, (yyvsp[(1) - (2)].labelref)); 1689 add_label(&(yyvsp[(2) - (2)].prop)->labels, (yyvsp[(1) - (2)].labelref));
1660 (yyval.prop) = (yyvsp[(2) - (2)].prop); 1690 (yyval.prop) = (yyvsp[(2) - (2)].prop);
1661 ;} 1691 }
1662 break; 1692 break;
1663 1693
1664 case 18: 1694 case 18:
1665 1695
1666/* Line 1455 of yacc.c */ 1696/* Line 1806 of yacc.c */
1667#line 211 "dtc-parser.y" 1697#line 211 "dtc-parser.y"
1668 { 1698 {
1669 (yyval.data) = data_merge((yyvsp[(1) - (2)].data), (yyvsp[(2) - (2)].data)); 1699 (yyval.data) = data_merge((yyvsp[(1) - (2)].data), (yyvsp[(2) - (2)].data));
1670 ;} 1700 }
1671 break; 1701 break;
1672 1702
1673 case 19: 1703 case 19:
1674 1704
1675/* Line 1455 of yacc.c */ 1705/* Line 1806 of yacc.c */
1676#line 215 "dtc-parser.y" 1706#line 215 "dtc-parser.y"
1677 { 1707 {
1678 (yyval.data) = data_merge((yyvsp[(1) - (3)].data), (yyvsp[(2) - (3)].array).data); 1708 (yyval.data) = data_merge((yyvsp[(1) - (3)].data), (yyvsp[(2) - (3)].array).data);
1679 ;} 1709 }
1680 break; 1710 break;
1681 1711
1682 case 20: 1712 case 20:
1683 1713
1684/* Line 1455 of yacc.c */ 1714/* Line 1806 of yacc.c */
1685#line 219 "dtc-parser.y" 1715#line 219 "dtc-parser.y"
1686 { 1716 {
1687 (yyval.data) = data_merge((yyvsp[(1) - (4)].data), (yyvsp[(3) - (4)].data)); 1717 (yyval.data) = data_merge((yyvsp[(1) - (4)].data), (yyvsp[(3) - (4)].data));
1688 ;} 1718 }
1689 break; 1719 break;
1690 1720
1691 case 21: 1721 case 21:
1692 1722
1693/* Line 1455 of yacc.c */ 1723/* Line 1806 of yacc.c */
1694#line 223 "dtc-parser.y" 1724#line 223 "dtc-parser.y"
1695 { 1725 {
1696 (yyval.data) = data_add_marker((yyvsp[(1) - (2)].data), REF_PATH, (yyvsp[(2) - (2)].labelref)); 1726 (yyval.data) = data_add_marker((yyvsp[(1) - (2)].data), REF_PATH, (yyvsp[(2) - (2)].labelref));
1697 ;} 1727 }
1698 break; 1728 break;
1699 1729
1700 case 22: 1730 case 22:
1701 1731
1702/* Line 1455 of yacc.c */ 1732/* Line 1806 of yacc.c */
1703#line 227 "dtc-parser.y" 1733#line 227 "dtc-parser.y"
1704 { 1734 {
1705 FILE *f = srcfile_relative_open((yyvsp[(4) - (9)].data).val, NULL); 1735 FILE *f = srcfile_relative_open((yyvsp[(4) - (9)].data).val, NULL);
@@ -1716,12 +1746,12 @@ yyreduce:
1716 1746
1717 (yyval.data) = data_merge((yyvsp[(1) - (9)].data), d); 1747 (yyval.data) = data_merge((yyvsp[(1) - (9)].data), d);
1718 fclose(f); 1748 fclose(f);
1719 ;} 1749 }
1720 break; 1750 break;
1721 1751
1722 case 23: 1752 case 23:
1723 1753
1724/* Line 1455 of yacc.c */ 1754/* Line 1806 of yacc.c */
1725#line 244 "dtc-parser.y" 1755#line 244 "dtc-parser.y"
1726 { 1756 {
1727 FILE *f = srcfile_relative_open((yyvsp[(4) - (5)].data).val, NULL); 1757 FILE *f = srcfile_relative_open((yyvsp[(4) - (5)].data).val, NULL);
@@ -1731,48 +1761,48 @@ yyreduce:
1731 1761
1732 (yyval.data) = data_merge((yyvsp[(1) - (5)].data), d); 1762 (yyval.data) = data_merge((yyvsp[(1) - (5)].data), d);
1733 fclose(f); 1763 fclose(f);
1734 ;} 1764 }
1735 break; 1765 break;
1736 1766
1737 case 24: 1767 case 24:
1738 1768
1739/* Line 1455 of yacc.c */ 1769/* Line 1806 of yacc.c */
1740#line 254 "dtc-parser.y" 1770#line 254 "dtc-parser.y"
1741 { 1771 {
1742 (yyval.data) = data_add_marker((yyvsp[(1) - (2)].data), LABEL, (yyvsp[(2) - (2)].labelref)); 1772 (yyval.data) = data_add_marker((yyvsp[(1) - (2)].data), LABEL, (yyvsp[(2) - (2)].labelref));
1743 ;} 1773 }
1744 break; 1774 break;
1745 1775
1746 case 25: 1776 case 25:
1747 1777
1748/* Line 1455 of yacc.c */ 1778/* Line 1806 of yacc.c */
1749#line 261 "dtc-parser.y" 1779#line 261 "dtc-parser.y"
1750 { 1780 {
1751 (yyval.data) = empty_data; 1781 (yyval.data) = empty_data;
1752 ;} 1782 }
1753 break; 1783 break;
1754 1784
1755 case 26: 1785 case 26:
1756 1786
1757/* Line 1455 of yacc.c */ 1787/* Line 1806 of yacc.c */
1758#line 265 "dtc-parser.y" 1788#line 265 "dtc-parser.y"
1759 { 1789 {
1760 (yyval.data) = (yyvsp[(1) - (2)].data); 1790 (yyval.data) = (yyvsp[(1) - (2)].data);
1761 ;} 1791 }
1762 break; 1792 break;
1763 1793
1764 case 27: 1794 case 27:
1765 1795
1766/* Line 1455 of yacc.c */ 1796/* Line 1806 of yacc.c */
1767#line 269 "dtc-parser.y" 1797#line 269 "dtc-parser.y"
1768 { 1798 {
1769 (yyval.data) = data_add_marker((yyvsp[(1) - (2)].data), LABEL, (yyvsp[(2) - (2)].labelref)); 1799 (yyval.data) = data_add_marker((yyvsp[(1) - (2)].data), LABEL, (yyvsp[(2) - (2)].labelref));
1770 ;} 1800 }
1771 break; 1801 break;
1772 1802
1773 case 28: 1803 case 28:
1774 1804
1775/* Line 1455 of yacc.c */ 1805/* Line 1806 of yacc.c */
1776#line 276 "dtc-parser.y" 1806#line 276 "dtc-parser.y"
1777 { 1807 {
1778 (yyval.array).data = empty_data; 1808 (yyval.array).data = empty_data;
@@ -1787,22 +1817,22 @@ yyreduce:
1787 " are currently supported"); 1817 " are currently supported");
1788 (yyval.array).bits = 32; 1818 (yyval.array).bits = 32;
1789 } 1819 }
1790 ;} 1820 }
1791 break; 1821 break;
1792 1822
1793 case 29: 1823 case 29:
1794 1824
1795/* Line 1455 of yacc.c */ 1825/* Line 1806 of yacc.c */
1796#line 291 "dtc-parser.y" 1826#line 291 "dtc-parser.y"
1797 { 1827 {
1798 (yyval.array).data = empty_data; 1828 (yyval.array).data = empty_data;
1799 (yyval.array).bits = 32; 1829 (yyval.array).bits = 32;
1800 ;} 1830 }
1801 break; 1831 break;
1802 1832
1803 case 30: 1833 case 30:
1804 1834
1805/* Line 1455 of yacc.c */ 1835/* Line 1806 of yacc.c */
1806#line 296 "dtc-parser.y" 1836#line 296 "dtc-parser.y"
1807 { 1837 {
1808 if ((yyvsp[(1) - (2)].array).bits < 64) { 1838 if ((yyvsp[(1) - (2)].array).bits < 64) {
@@ -1822,12 +1852,12 @@ yyreduce:
1822 } 1852 }
1823 1853
1824 (yyval.array).data = data_append_integer((yyvsp[(1) - (2)].array).data, (yyvsp[(2) - (2)].integer), (yyvsp[(1) - (2)].array).bits); 1854 (yyval.array).data = data_append_integer((yyvsp[(1) - (2)].array).data, (yyvsp[(2) - (2)].integer), (yyvsp[(1) - (2)].array).bits);
1825 ;} 1855 }
1826 break; 1856 break;
1827 1857
1828 case 31: 1858 case 31:
1829 1859
1830/* Line 1455 of yacc.c */ 1860/* Line 1806 of yacc.c */
1831#line 316 "dtc-parser.y" 1861#line 316 "dtc-parser.y"
1832 { 1862 {
1833 uint64_t val = ~0ULL >> (64 - (yyvsp[(1) - (2)].array).bits); 1863 uint64_t val = ~0ULL >> (64 - (yyvsp[(1) - (2)].array).bits);
@@ -1841,288 +1871,299 @@ yyreduce:
1841 "arrays with 32-bit elements."); 1871 "arrays with 32-bit elements.");
1842 1872
1843 (yyval.array).data = data_append_integer((yyvsp[(1) - (2)].array).data, val, (yyvsp[(1) - (2)].array).bits); 1873 (yyval.array).data = data_append_integer((yyvsp[(1) - (2)].array).data, val, (yyvsp[(1) - (2)].array).bits);
1844 ;} 1874 }
1845 break; 1875 break;
1846 1876
1847 case 32: 1877 case 32:
1848 1878
1849/* Line 1455 of yacc.c */ 1879/* Line 1806 of yacc.c */
1850#line 330 "dtc-parser.y" 1880#line 330 "dtc-parser.y"
1851 { 1881 {
1852 (yyval.array).data = data_add_marker((yyvsp[(1) - (2)].array).data, LABEL, (yyvsp[(2) - (2)].labelref)); 1882 (yyval.array).data = data_add_marker((yyvsp[(1) - (2)].array).data, LABEL, (yyvsp[(2) - (2)].labelref));
1853 ;} 1883 }
1854 break; 1884 break;
1855 1885
1856 case 33: 1886 case 33:
1857 1887
1858/* Line 1455 of yacc.c */ 1888/* Line 1806 of yacc.c */
1859#line 337 "dtc-parser.y" 1889#line 337 "dtc-parser.y"
1860 { 1890 {
1861 (yyval.integer) = eval_literal((yyvsp[(1) - (1)].literal), 0, 64); 1891 (yyval.integer) = eval_literal((yyvsp[(1) - (1)].literal), 0, 64);
1862 ;} 1892 }
1863 break; 1893 break;
1864 1894
1865 case 34: 1895 case 34:
1866 1896
1867/* Line 1455 of yacc.c */ 1897/* Line 1806 of yacc.c */
1868#line 341 "dtc-parser.y" 1898#line 341 "dtc-parser.y"
1869 { 1899 {
1870 (yyval.integer) = eval_char_literal((yyvsp[(1) - (1)].literal)); 1900 (yyval.integer) = eval_char_literal((yyvsp[(1) - (1)].literal));
1871 ;} 1901 }
1872 break; 1902 break;
1873 1903
1874 case 35: 1904 case 35:
1875 1905
1876/* Line 1455 of yacc.c */ 1906/* Line 1806 of yacc.c */
1877#line 345 "dtc-parser.y" 1907#line 345 "dtc-parser.y"
1878 { 1908 {
1879 (yyval.integer) = (yyvsp[(2) - (3)].integer); 1909 (yyval.integer) = (yyvsp[(2) - (3)].integer);
1880 ;} 1910 }
1881 break; 1911 break;
1882 1912
1883 case 38: 1913 case 38:
1884 1914
1885/* Line 1455 of yacc.c */ 1915/* Line 1806 of yacc.c */
1886#line 356 "dtc-parser.y" 1916#line 356 "dtc-parser.y"
1887 { (yyval.integer) = (yyvsp[(1) - (5)].integer) ? (yyvsp[(3) - (5)].integer) : (yyvsp[(5) - (5)].integer); ;} 1917 { (yyval.integer) = (yyvsp[(1) - (5)].integer) ? (yyvsp[(3) - (5)].integer) : (yyvsp[(5) - (5)].integer); }
1888 break; 1918 break;
1889 1919
1890 case 40: 1920 case 40:
1891 1921
1892/* Line 1455 of yacc.c */ 1922/* Line 1806 of yacc.c */
1893#line 361 "dtc-parser.y" 1923#line 361 "dtc-parser.y"
1894 { (yyval.integer) = (yyvsp[(1) - (3)].integer) || (yyvsp[(3) - (3)].integer); ;} 1924 { (yyval.integer) = (yyvsp[(1) - (3)].integer) || (yyvsp[(3) - (3)].integer); }
1895 break; 1925 break;
1896 1926
1897 case 42: 1927 case 42:
1898 1928
1899/* Line 1455 of yacc.c */ 1929/* Line 1806 of yacc.c */
1900#line 366 "dtc-parser.y" 1930#line 366 "dtc-parser.y"
1901 { (yyval.integer) = (yyvsp[(1) - (3)].integer) && (yyvsp[(3) - (3)].integer); ;} 1931 { (yyval.integer) = (yyvsp[(1) - (3)].integer) && (yyvsp[(3) - (3)].integer); }
1902 break; 1932 break;
1903 1933
1904 case 44: 1934 case 44:
1905 1935
1906/* Line 1455 of yacc.c */ 1936/* Line 1806 of yacc.c */
1907#line 371 "dtc-parser.y" 1937#line 371 "dtc-parser.y"
1908 { (yyval.integer) = (yyvsp[(1) - (3)].integer) | (yyvsp[(3) - (3)].integer); ;} 1938 { (yyval.integer) = (yyvsp[(1) - (3)].integer) | (yyvsp[(3) - (3)].integer); }
1909 break; 1939 break;
1910 1940
1911 case 46: 1941 case 46:
1912 1942
1913/* Line 1455 of yacc.c */ 1943/* Line 1806 of yacc.c */
1914#line 376 "dtc-parser.y" 1944#line 376 "dtc-parser.y"
1915 { (yyval.integer) = (yyvsp[(1) - (3)].integer) ^ (yyvsp[(3) - (3)].integer); ;} 1945 { (yyval.integer) = (yyvsp[(1) - (3)].integer) ^ (yyvsp[(3) - (3)].integer); }
1916 break; 1946 break;
1917 1947
1918 case 48: 1948 case 48:
1919 1949
1920/* Line 1455 of yacc.c */ 1950/* Line 1806 of yacc.c */
1921#line 381 "dtc-parser.y" 1951#line 381 "dtc-parser.y"
1922 { (yyval.integer) = (yyvsp[(1) - (3)].integer) & (yyvsp[(3) - (3)].integer); ;} 1952 { (yyval.integer) = (yyvsp[(1) - (3)].integer) & (yyvsp[(3) - (3)].integer); }
1923 break; 1953 break;
1924 1954
1925 case 50: 1955 case 50:
1926 1956
1927/* Line 1455 of yacc.c */ 1957/* Line 1806 of yacc.c */
1928#line 386 "dtc-parser.y" 1958#line 386 "dtc-parser.y"
1929 { (yyval.integer) = (yyvsp[(1) - (3)].integer) == (yyvsp[(3) - (3)].integer); ;} 1959 { (yyval.integer) = (yyvsp[(1) - (3)].integer) == (yyvsp[(3) - (3)].integer); }
1930 break; 1960 break;
1931 1961
1932 case 51: 1962 case 51:
1933 1963
1934/* Line 1455 of yacc.c */ 1964/* Line 1806 of yacc.c */
1935#line 387 "dtc-parser.y" 1965#line 387 "dtc-parser.y"
1936 { (yyval.integer) = (yyvsp[(1) - (3)].integer) != (yyvsp[(3) - (3)].integer); ;} 1966 { (yyval.integer) = (yyvsp[(1) - (3)].integer) != (yyvsp[(3) - (3)].integer); }
1937 break; 1967 break;
1938 1968
1939 case 53: 1969 case 53:
1940 1970
1941/* Line 1455 of yacc.c */ 1971/* Line 1806 of yacc.c */
1942#line 392 "dtc-parser.y" 1972#line 392 "dtc-parser.y"
1943 { (yyval.integer) = (yyvsp[(1) - (3)].integer) < (yyvsp[(3) - (3)].integer); ;} 1973 { (yyval.integer) = (yyvsp[(1) - (3)].integer) < (yyvsp[(3) - (3)].integer); }
1944 break; 1974 break;
1945 1975
1946 case 54: 1976 case 54:
1947 1977
1948/* Line 1455 of yacc.c */ 1978/* Line 1806 of yacc.c */
1949#line 393 "dtc-parser.y" 1979#line 393 "dtc-parser.y"
1950 { (yyval.integer) = (yyvsp[(1) - (3)].integer) > (yyvsp[(3) - (3)].integer); ;} 1980 { (yyval.integer) = (yyvsp[(1) - (3)].integer) > (yyvsp[(3) - (3)].integer); }
1951 break; 1981 break;
1952 1982
1953 case 55: 1983 case 55:
1954 1984
1955/* Line 1455 of yacc.c */ 1985/* Line 1806 of yacc.c */
1956#line 394 "dtc-parser.y" 1986#line 394 "dtc-parser.y"
1957 { (yyval.integer) = (yyvsp[(1) - (3)].integer) <= (yyvsp[(3) - (3)].integer); ;} 1987 { (yyval.integer) = (yyvsp[(1) - (3)].integer) <= (yyvsp[(3) - (3)].integer); }
1958 break; 1988 break;
1959 1989
1960 case 56: 1990 case 56:
1961 1991
1962/* Line 1455 of yacc.c */ 1992/* Line 1806 of yacc.c */
1963#line 395 "dtc-parser.y" 1993#line 395 "dtc-parser.y"
1964 { (yyval.integer) = (yyvsp[(1) - (3)].integer) >= (yyvsp[(3) - (3)].integer); ;} 1994 { (yyval.integer) = (yyvsp[(1) - (3)].integer) >= (yyvsp[(3) - (3)].integer); }
1965 break; 1995 break;
1966 1996
1967 case 57: 1997 case 57:
1968 1998
1969/* Line 1455 of yacc.c */ 1999/* Line 1806 of yacc.c */
1970#line 399 "dtc-parser.y" 2000#line 399 "dtc-parser.y"
1971 { (yyval.integer) = (yyvsp[(1) - (3)].integer) << (yyvsp[(3) - (3)].integer); ;} 2001 { (yyval.integer) = (yyvsp[(1) - (3)].integer) << (yyvsp[(3) - (3)].integer); }
1972 break; 2002 break;
1973 2003
1974 case 58: 2004 case 58:
1975 2005
1976/* Line 1455 of yacc.c */ 2006/* Line 1806 of yacc.c */
1977#line 400 "dtc-parser.y" 2007#line 400 "dtc-parser.y"
1978 { (yyval.integer) = (yyvsp[(1) - (3)].integer) >> (yyvsp[(3) - (3)].integer); ;} 2008 { (yyval.integer) = (yyvsp[(1) - (3)].integer) >> (yyvsp[(3) - (3)].integer); }
1979 break; 2009 break;
1980 2010
1981 case 60: 2011 case 60:
1982 2012
1983/* Line 1455 of yacc.c */ 2013/* Line 1806 of yacc.c */
1984#line 405 "dtc-parser.y" 2014#line 405 "dtc-parser.y"
1985 { (yyval.integer) = (yyvsp[(1) - (3)].integer) + (yyvsp[(3) - (3)].integer); ;} 2015 { (yyval.integer) = (yyvsp[(1) - (3)].integer) + (yyvsp[(3) - (3)].integer); }
1986 break; 2016 break;
1987 2017
1988 case 61: 2018 case 61:
1989 2019
1990/* Line 1455 of yacc.c */ 2020/* Line 1806 of yacc.c */
1991#line 406 "dtc-parser.y" 2021#line 406 "dtc-parser.y"
1992 { (yyval.integer) = (yyvsp[(1) - (3)].integer) - (yyvsp[(3) - (3)].integer); ;} 2022 { (yyval.integer) = (yyvsp[(1) - (3)].integer) - (yyvsp[(3) - (3)].integer); }
1993 break; 2023 break;
1994 2024
1995 case 63: 2025 case 63:
1996 2026
1997/* Line 1455 of yacc.c */ 2027/* Line 1806 of yacc.c */
1998#line 411 "dtc-parser.y" 2028#line 411 "dtc-parser.y"
1999 { (yyval.integer) = (yyvsp[(1) - (3)].integer) * (yyvsp[(3) - (3)].integer); ;} 2029 { (yyval.integer) = (yyvsp[(1) - (3)].integer) * (yyvsp[(3) - (3)].integer); }
2000 break; 2030 break;
2001 2031
2002 case 64: 2032 case 64:
2003 2033
2004/* Line 1455 of yacc.c */ 2034/* Line 1806 of yacc.c */
2005#line 412 "dtc-parser.y" 2035#line 412 "dtc-parser.y"
2006 { (yyval.integer) = (yyvsp[(1) - (3)].integer) / (yyvsp[(3) - (3)].integer); ;} 2036 { (yyval.integer) = (yyvsp[(1) - (3)].integer) / (yyvsp[(3) - (3)].integer); }
2007 break; 2037 break;
2008 2038
2009 case 65: 2039 case 65:
2010 2040
2011/* Line 1455 of yacc.c */ 2041/* Line 1806 of yacc.c */
2012#line 413 "dtc-parser.y" 2042#line 413 "dtc-parser.y"
2013 { (yyval.integer) = (yyvsp[(1) - (3)].integer) % (yyvsp[(3) - (3)].integer); ;} 2043 { (yyval.integer) = (yyvsp[(1) - (3)].integer) % (yyvsp[(3) - (3)].integer); }
2014 break; 2044 break;
2015 2045
2016 case 68: 2046 case 68:
2017 2047
2018/* Line 1455 of yacc.c */ 2048/* Line 1806 of yacc.c */
2019#line 419 "dtc-parser.y" 2049#line 419 "dtc-parser.y"
2020 { (yyval.integer) = -(yyvsp[(2) - (2)].integer); ;} 2050 { (yyval.integer) = -(yyvsp[(2) - (2)].integer); }
2021 break; 2051 break;
2022 2052
2023 case 69: 2053 case 69:
2024 2054
2025/* Line 1455 of yacc.c */ 2055/* Line 1806 of yacc.c */
2026#line 420 "dtc-parser.y" 2056#line 420 "dtc-parser.y"
2027 { (yyval.integer) = ~(yyvsp[(2) - (2)].integer); ;} 2057 { (yyval.integer) = ~(yyvsp[(2) - (2)].integer); }
2028 break; 2058 break;
2029 2059
2030 case 70: 2060 case 70:
2031 2061
2032/* Line 1455 of yacc.c */ 2062/* Line 1806 of yacc.c */
2033#line 421 "dtc-parser.y" 2063#line 421 "dtc-parser.y"
2034 { (yyval.integer) = !(yyvsp[(2) - (2)].integer); ;} 2064 { (yyval.integer) = !(yyvsp[(2) - (2)].integer); }
2035 break; 2065 break;
2036 2066
2037 case 71: 2067 case 71:
2038 2068
2039/* Line 1455 of yacc.c */ 2069/* Line 1806 of yacc.c */
2040#line 426 "dtc-parser.y" 2070#line 426 "dtc-parser.y"
2041 { 2071 {
2042 (yyval.data) = empty_data; 2072 (yyval.data) = empty_data;
2043 ;} 2073 }
2044 break; 2074 break;
2045 2075
2046 case 72: 2076 case 72:
2047 2077
2048/* Line 1455 of yacc.c */ 2078/* Line 1806 of yacc.c */
2049#line 430 "dtc-parser.y" 2079#line 430 "dtc-parser.y"
2050 { 2080 {
2051 (yyval.data) = data_append_byte((yyvsp[(1) - (2)].data), (yyvsp[(2) - (2)].byte)); 2081 (yyval.data) = data_append_byte((yyvsp[(1) - (2)].data), (yyvsp[(2) - (2)].byte));
2052 ;} 2082 }
2053 break; 2083 break;
2054 2084
2055 case 73: 2085 case 73:
2056 2086
2057/* Line 1455 of yacc.c */ 2087/* Line 1806 of yacc.c */
2058#line 434 "dtc-parser.y" 2088#line 434 "dtc-parser.y"
2059 { 2089 {
2060 (yyval.data) = data_add_marker((yyvsp[(1) - (2)].data), LABEL, (yyvsp[(2) - (2)].labelref)); 2090 (yyval.data) = data_add_marker((yyvsp[(1) - (2)].data), LABEL, (yyvsp[(2) - (2)].labelref));
2061 ;} 2091 }
2062 break; 2092 break;
2063 2093
2064 case 74: 2094 case 74:
2065 2095
2066/* Line 1455 of yacc.c */ 2096/* Line 1806 of yacc.c */
2067#line 441 "dtc-parser.y" 2097#line 441 "dtc-parser.y"
2068 { 2098 {
2069 (yyval.nodelist) = NULL; 2099 (yyval.nodelist) = NULL;
2070 ;} 2100 }
2071 break; 2101 break;
2072 2102
2073 case 75: 2103 case 75:
2074 2104
2075/* Line 1455 of yacc.c */ 2105/* Line 1806 of yacc.c */
2076#line 445 "dtc-parser.y" 2106#line 445 "dtc-parser.y"
2077 { 2107 {
2078 (yyval.nodelist) = chain_node((yyvsp[(1) - (2)].node), (yyvsp[(2) - (2)].nodelist)); 2108 (yyval.nodelist) = chain_node((yyvsp[(1) - (2)].node), (yyvsp[(2) - (2)].nodelist));
2079 ;} 2109 }
2080 break; 2110 break;
2081 2111
2082 case 76: 2112 case 76:
2083 2113
2084/* Line 1455 of yacc.c */ 2114/* Line 1806 of yacc.c */
2085#line 449 "dtc-parser.y" 2115#line 449 "dtc-parser.y"
2086 { 2116 {
2087 print_error("syntax error: properties must precede subnodes"); 2117 print_error("syntax error: properties must precede subnodes");
2088 YYERROR; 2118 YYERROR;
2089 ;} 2119 }
2090 break; 2120 break;
2091 2121
2092 case 77: 2122 case 77:
2093 2123
2094/* Line 1455 of yacc.c */ 2124/* Line 1806 of yacc.c */
2095#line 457 "dtc-parser.y" 2125#line 457 "dtc-parser.y"
2096 { 2126 {
2097 (yyval.node) = name_node((yyvsp[(2) - (2)].node), (yyvsp[(1) - (2)].propnodename)); 2127 (yyval.node) = name_node((yyvsp[(2) - (2)].node), (yyvsp[(1) - (2)].propnodename));
2098 ;} 2128 }
2099 break; 2129 break;
2100 2130
2101 case 78: 2131 case 78:
2102 2132
2103/* Line 1455 of yacc.c */ 2133/* Line 1806 of yacc.c */
2104#line 461 "dtc-parser.y" 2134#line 461 "dtc-parser.y"
2105 { 2135 {
2106 (yyval.node) = name_node(build_node_delete(), (yyvsp[(2) - (3)].propnodename)); 2136 (yyval.node) = name_node(build_node_delete(), (yyvsp[(2) - (3)].propnodename));
2107 ;} 2137 }
2108 break; 2138 break;
2109 2139
2110 case 79: 2140 case 79:
2111 2141
2112/* Line 1455 of yacc.c */ 2142/* Line 1806 of yacc.c */
2113#line 465 "dtc-parser.y" 2143#line 465 "dtc-parser.y"
2114 { 2144 {
2115 add_label(&(yyvsp[(2) - (2)].node)->labels, (yyvsp[(1) - (2)].labelref)); 2145 add_label(&(yyvsp[(2) - (2)].node)->labels, (yyvsp[(1) - (2)].labelref));
2116 (yyval.node) = (yyvsp[(2) - (2)].node); 2146 (yyval.node) = (yyvsp[(2) - (2)].node);
2117 ;} 2147 }
2118 break; 2148 break;
2119 2149
2120 2150
2121 2151
2122/* Line 1455 of yacc.c */ 2152/* Line 1806 of yacc.c */
2123#line 2124 "dtc-parser.tab.c" 2153#line 2154 "dtc-parser.tab.c"
2124 default: break; 2154 default: break;
2125 } 2155 }
2156 /* User semantic actions sometimes alter yychar, and that requires
2157 that yytoken be updated with the new translation. We take the
2158 approach of translating immediately before every use of yytoken.
2159 One alternative is translating here after every semantic action,
2160 but that translation would be missed if the semantic action invokes
2161 YYABORT, YYACCEPT, or YYERROR immediately after altering yychar or
2162 if it invokes YYBACKUP. In the case of YYABORT or YYACCEPT, an
2163 incorrect destructor might then be invoked immediately. In the
2164 case of YYERROR or YYBACKUP, subsequent parser actions might lead
2165 to an incorrect destructor call or verbose syntax error message
2166 before the lookahead is translated. */
2126 YY_SYMBOL_PRINT ("-> $$ =", yyr1[yyn], &yyval, &yyloc); 2167 YY_SYMBOL_PRINT ("-> $$ =", yyr1[yyn], &yyval, &yyloc);
2127 2168
2128 YYPOPSTACK (yylen); 2169 YYPOPSTACK (yylen);
@@ -2150,6 +2191,10 @@ yyreduce:
2150| yyerrlab -- here on detecting error | 2191| yyerrlab -- here on detecting error |
2151`------------------------------------*/ 2192`------------------------------------*/
2152yyerrlab: 2193yyerrlab:
2194 /* Make sure we have latest lookahead translation. See comments at
2195 user semantic actions for why this is necessary. */
2196 yytoken = yychar == YYEMPTY ? YYEMPTY : YYTRANSLATE (yychar);
2197
2153 /* If not already recovering from an error, report this error. */ 2198 /* If not already recovering from an error, report this error. */
2154 if (!yyerrstatus) 2199 if (!yyerrstatus)
2155 { 2200 {
@@ -2157,37 +2202,36 @@ yyerrlab:
2157#if ! YYERROR_VERBOSE 2202#if ! YYERROR_VERBOSE
2158 yyerror (YY_("syntax error")); 2203 yyerror (YY_("syntax error"));
2159#else 2204#else
2205# define YYSYNTAX_ERROR yysyntax_error (&yymsg_alloc, &yymsg, \
2206 yyssp, yytoken)
2160 { 2207 {
2161 YYSIZE_T yysize = yysyntax_error (0, yystate, yychar); 2208 char const *yymsgp = YY_("syntax error");
2162 if (yymsg_alloc < yysize && yymsg_alloc < YYSTACK_ALLOC_MAXIMUM) 2209 int yysyntax_error_status;
2163 { 2210 yysyntax_error_status = YYSYNTAX_ERROR;
2164 YYSIZE_T yyalloc = 2 * yysize; 2211 if (yysyntax_error_status == 0)
2165 if (! (yysize <= yyalloc && yyalloc <= YYSTACK_ALLOC_MAXIMUM)) 2212 yymsgp = yymsg;
2166 yyalloc = YYSTACK_ALLOC_MAXIMUM; 2213 else if (yysyntax_error_status == 1)
2167 if (yymsg != yymsgbuf) 2214 {
2168 YYSTACK_FREE (yymsg); 2215 if (yymsg != yymsgbuf)
2169 yymsg = (char *) YYSTACK_ALLOC (yyalloc); 2216 YYSTACK_FREE (yymsg);
2170 if (yymsg) 2217 yymsg = (char *) YYSTACK_ALLOC (yymsg_alloc);
2171 yymsg_alloc = yyalloc; 2218 if (!yymsg)
2172 else 2219 {
2173 { 2220 yymsg = yymsgbuf;
2174 yymsg = yymsgbuf; 2221 yymsg_alloc = sizeof yymsgbuf;
2175 yymsg_alloc = sizeof yymsgbuf; 2222 yysyntax_error_status = 2;
2176 } 2223 }
2177 } 2224 else
2178 2225 {
2179 if (0 < yysize && yysize <= yymsg_alloc) 2226 yysyntax_error_status = YYSYNTAX_ERROR;
2180 { 2227 yymsgp = yymsg;
2181 (void) yysyntax_error (yymsg, yystate, yychar); 2228 }
2182 yyerror (yymsg); 2229 }
2183 } 2230 yyerror (yymsgp);
2184 else 2231 if (yysyntax_error_status == 2)
2185 { 2232 goto yyexhaustedlab;
2186 yyerror (YY_("syntax error"));
2187 if (yysize != 0)
2188 goto yyexhaustedlab;
2189 }
2190 } 2233 }
2234# undef YYSYNTAX_ERROR
2191#endif 2235#endif
2192 } 2236 }
2193 2237
@@ -2246,7 +2290,7 @@ yyerrlab1:
2246 for (;;) 2290 for (;;)
2247 { 2291 {
2248 yyn = yypact[yystate]; 2292 yyn = yypact[yystate];
2249 if (yyn != YYPACT_NINF) 2293 if (!yypact_value_is_default (yyn))
2250 { 2294 {
2251 yyn += YYTERROR; 2295 yyn += YYTERROR;
2252 if (0 <= yyn && yyn <= YYLAST && yycheck[yyn] == YYTERROR) 2296 if (0 <= yyn && yyn <= YYLAST && yycheck[yyn] == YYTERROR)
@@ -2305,8 +2349,13 @@ yyexhaustedlab:
2305 2349
2306yyreturn: 2350yyreturn:
2307 if (yychar != YYEMPTY) 2351 if (yychar != YYEMPTY)
2308 yydestruct ("Cleanup: discarding lookahead", 2352 {
2309 yytoken, &yylval); 2353 /* Make sure we have latest lookahead translation. See comments at
2354 user semantic actions for why this is necessary. */
2355 yytoken = YYTRANSLATE (yychar);
2356 yydestruct ("Cleanup: discarding lookahead",
2357 yytoken, &yylval);
2358 }
2310 /* Do not reclaim the symbols of the rule which action triggered 2359 /* Do not reclaim the symbols of the rule which action triggered
2311 this YYABORT or YYACCEPT. */ 2360 this YYABORT or YYACCEPT. */
2312 YYPOPSTACK (yylen); 2361 YYPOPSTACK (yylen);
@@ -2331,7 +2380,7 @@ yyreturn:
2331 2380
2332 2381
2333 2382
2334/* Line 1675 of yacc.c */ 2383/* Line 2067 of yacc.c */
2335#line 471 "dtc-parser.y" 2384#line 471 "dtc-parser.y"
2336 2385
2337 2386
diff --git a/scripts/dtc/dtc-parser.tab.h_shipped b/scripts/dtc/dtc-parser.tab.h_shipped
index 9d2dce41211f..25d3b88c6132 100644
--- a/scripts/dtc/dtc-parser.tab.h_shipped
+++ b/scripts/dtc/dtc-parser.tab.h_shipped
@@ -1,10 +1,8 @@
1/* A Bison parser, made by GNU Bison 2.5. */
1 2
2/* A Bison parser, made by GNU Bison 2.4.1. */ 3/* Bison interface for Yacc-like parsers in C
3
4/* Skeleton interface for Bison's Yacc-like parsers in C
5 4
6 Copyright (C) 1984, 1989, 1990, 2000, 2001, 2002, 2003, 2004, 2005, 2006 5 Copyright (C) 1984, 1989-1990, 2000-2011 Free Software Foundation, Inc.
7 Free Software Foundation, Inc.
8 6
9 This program is free software: you can redistribute it and/or modify 7 This program is free software: you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by 8 it under the terms of the GNU General Public License as published by
@@ -70,7 +68,7 @@
70typedef union YYSTYPE 68typedef union YYSTYPE
71{ 69{
72 70
73/* Line 1676 of yacc.c */ 71/* Line 2068 of yacc.c */
74#line 40 "dtc-parser.y" 72#line 40 "dtc-parser.y"
75 73
76 char *propnodename; 74 char *propnodename;
@@ -94,8 +92,8 @@ typedef union YYSTYPE
94 92
95 93
96 94
97/* Line 1676 of yacc.c */ 95/* Line 2068 of yacc.c */
98#line 99 "dtc-parser.tab.h" 96#line 97 "dtc-parser.tab.h"
99} YYSTYPE; 97} YYSTYPE;
100# define YYSTYPE_IS_TRIVIAL 1 98# define YYSTYPE_IS_TRIVIAL 1
101# define yystype YYSTYPE /* obsolescent; will be withdrawn */ 99# define yystype YYSTYPE /* obsolescent; will be withdrawn */
diff --git a/scripts/kconfig/lxdialog/menubox.c b/scripts/kconfig/lxdialog/menubox.c
index 48d382e7e374..38cd69c5660e 100644
--- a/scripts/kconfig/lxdialog/menubox.c
+++ b/scripts/kconfig/lxdialog/menubox.c
@@ -303,10 +303,11 @@ do_resize:
303 } 303 }
304 } 304 }
305 305
306 if (i < max_choice || 306 if (item_count() != 0 &&
307 key == KEY_UP || key == KEY_DOWN || 307 (i < max_choice ||
308 key == '-' || key == '+' || 308 key == KEY_UP || key == KEY_DOWN ||
309 key == KEY_PPAGE || key == KEY_NPAGE) { 309 key == '-' || key == '+' ||
310 key == KEY_PPAGE || key == KEY_NPAGE)) {
310 /* Remove highligt of current item */ 311 /* Remove highligt of current item */
311 print_item(scroll + choice, choice, FALSE); 312 print_item(scroll + choice, choice, FALSE);
312 313
diff --git a/scripts/kconfig/mconf.c b/scripts/kconfig/mconf.c
index 387dc8daf7b2..a69cbd78fb38 100644
--- a/scripts/kconfig/mconf.c
+++ b/scripts/kconfig/mconf.c
@@ -670,11 +670,12 @@ static void conf(struct menu *menu, struct menu *active_menu)
670 active_menu, &s_scroll); 670 active_menu, &s_scroll);
671 if (res == 1 || res == KEY_ESC || res == -ERRDISPLAYTOOSMALL) 671 if (res == 1 || res == KEY_ESC || res == -ERRDISPLAYTOOSMALL)
672 break; 672 break;
673 if (!item_activate_selected()) 673 if (item_count() != 0) {
674 continue; 674 if (!item_activate_selected())
675 if (!item_tag()) 675 continue;
676 continue; 676 if (!item_tag())
677 677 continue;
678 }
678 submenu = item_data(); 679 submenu = item_data();
679 active_menu = item_data(); 680 active_menu = item_data();
680 if (submenu) 681 if (submenu)
diff --git a/scripts/kconfig/menu.c b/scripts/kconfig/menu.c
index b5c7d90df9df..fd3f0180e08f 100644
--- a/scripts/kconfig/menu.c
+++ b/scripts/kconfig/menu.c
@@ -146,11 +146,24 @@ struct property *menu_add_prop(enum prop_type type, char *prompt, struct expr *e
146 struct menu *menu = current_entry; 146 struct menu *menu = current_entry;
147 147
148 while ((menu = menu->parent) != NULL) { 148 while ((menu = menu->parent) != NULL) {
149 struct expr *dup_expr;
150
149 if (!menu->visibility) 151 if (!menu->visibility)
150 continue; 152 continue;
153 /*
154 * Do not add a reference to the
155 * menu's visibility expression but
156 * use a copy of it. Otherwise the
157 * expression reduction functions
158 * will modify expressions that have
159 * multiple references which can
160 * cause unwanted side effects.
161 */
162 dup_expr = expr_copy(menu->visibility);
163
151 prop->visible.expr 164 prop->visible.expr
152 = expr_alloc_and(prop->visible.expr, 165 = expr_alloc_and(prop->visible.expr,
153 menu->visibility); 166 dup_expr);
154 } 167 }
155 } 168 }
156 169
diff --git a/security/selinux/xfrm.c b/security/selinux/xfrm.c
index 8ab295154517..d03081886214 100644
--- a/security/selinux/xfrm.c
+++ b/security/selinux/xfrm.c
@@ -316,6 +316,7 @@ int selinux_xfrm_policy_clone(struct xfrm_sec_ctx *old_ctx,
316 316
317 memcpy(new_ctx, old_ctx, sizeof(*new_ctx)); 317 memcpy(new_ctx, old_ctx, sizeof(*new_ctx));
318 memcpy(new_ctx->ctx_str, old_ctx->ctx_str, new_ctx->ctx_len); 318 memcpy(new_ctx->ctx_str, old_ctx->ctx_str, new_ctx->ctx_len);
319 atomic_inc(&selinux_xfrm_refcount);
319 *new_ctxp = new_ctx; 320 *new_ctxp = new_ctx;
320 } 321 }
321 return 0; 322 return 0;
@@ -326,6 +327,7 @@ int selinux_xfrm_policy_clone(struct xfrm_sec_ctx *old_ctx,
326 */ 327 */
327void selinux_xfrm_policy_free(struct xfrm_sec_ctx *ctx) 328void selinux_xfrm_policy_free(struct xfrm_sec_ctx *ctx)
328{ 329{
330 atomic_dec(&selinux_xfrm_refcount);
329 kfree(ctx); 331 kfree(ctx);
330} 332}
331 333
@@ -335,17 +337,13 @@ void selinux_xfrm_policy_free(struct xfrm_sec_ctx *ctx)
335int selinux_xfrm_policy_delete(struct xfrm_sec_ctx *ctx) 337int selinux_xfrm_policy_delete(struct xfrm_sec_ctx *ctx)
336{ 338{
337 const struct task_security_struct *tsec = current_security(); 339 const struct task_security_struct *tsec = current_security();
338 int rc = 0;
339 340
340 if (ctx) { 341 if (!ctx)
341 rc = avc_has_perm(tsec->sid, ctx->ctx_sid, 342 return 0;
342 SECCLASS_ASSOCIATION,
343 ASSOCIATION__SETCONTEXT, NULL);
344 if (rc == 0)
345 atomic_dec(&selinux_xfrm_refcount);
346 }
347 343
348 return rc; 344 return avc_has_perm(tsec->sid, ctx->ctx_sid,
345 SECCLASS_ASSOCIATION, ASSOCIATION__SETCONTEXT,
346 NULL);
349} 347}
350 348
351/* 349/*
@@ -370,8 +368,8 @@ int selinux_xfrm_state_alloc(struct xfrm_state *x, struct xfrm_user_sec_ctx *uct
370 */ 368 */
371void selinux_xfrm_state_free(struct xfrm_state *x) 369void selinux_xfrm_state_free(struct xfrm_state *x)
372{ 370{
373 struct xfrm_sec_ctx *ctx = x->security; 371 atomic_dec(&selinux_xfrm_refcount);
374 kfree(ctx); 372 kfree(x->security);
375} 373}
376 374
377 /* 375 /*
@@ -381,17 +379,13 @@ int selinux_xfrm_state_delete(struct xfrm_state *x)
381{ 379{
382 const struct task_security_struct *tsec = current_security(); 380 const struct task_security_struct *tsec = current_security();
383 struct xfrm_sec_ctx *ctx = x->security; 381 struct xfrm_sec_ctx *ctx = x->security;
384 int rc = 0;
385 382
386 if (ctx) { 383 if (!ctx)
387 rc = avc_has_perm(tsec->sid, ctx->ctx_sid, 384 return 0;
388 SECCLASS_ASSOCIATION,
389 ASSOCIATION__SETCONTEXT, NULL);
390 if (rc == 0)
391 atomic_dec(&selinux_xfrm_refcount);
392 }
393 385
394 return rc; 386 return avc_has_perm(tsec->sid, ctx->ctx_sid,
387 SECCLASS_ASSOCIATION, ASSOCIATION__SETCONTEXT,
388 NULL);
395} 389}
396 390
397/* 391/*
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index ccfa383f1fda..f92818155958 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -1649,6 +1649,7 @@ static int snd_pcm_link(struct snd_pcm_substream *substream, int fd)
1649 } 1649 }
1650 if (!snd_pcm_stream_linked(substream)) { 1650 if (!snd_pcm_stream_linked(substream)) {
1651 substream->group = group; 1651 substream->group = group;
1652 group = NULL;
1652 spin_lock_init(&substream->group->lock); 1653 spin_lock_init(&substream->group->lock);
1653 INIT_LIST_HEAD(&substream->group->substreams); 1654 INIT_LIST_HEAD(&substream->group->substreams);
1654 list_add_tail(&substream->link_list, &substream->group->substreams); 1655 list_add_tail(&substream->link_list, &substream->group->substreams);
@@ -1663,8 +1664,7 @@ static int snd_pcm_link(struct snd_pcm_substream *substream, int fd)
1663 _nolock: 1664 _nolock:
1664 snd_card_unref(substream1->pcm->card); 1665 snd_card_unref(substream1->pcm->card);
1665 fput_light(file, fput_needed); 1666 fput_light(file, fput_needed);
1666 if (res < 0) 1667 kfree(group);
1667 kfree(group);
1668 return res; 1668 return res;
1669} 1669}
1670 1670
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
index ae85bbd2e6f8..4b1524a861f3 100644
--- a/sound/pci/hda/hda_generic.c
+++ b/sound/pci/hda/hda_generic.c
@@ -788,6 +788,8 @@ static void set_pin_eapd(struct hda_codec *codec, hda_nid_t pin, bool enable)
788 return; 788 return;
789 if (codec->inv_eapd) 789 if (codec->inv_eapd)
790 enable = !enable; 790 enable = !enable;
791 if (spec->keep_eapd_on && !enable)
792 return;
791 snd_hda_codec_update_cache(codec, pin, 0, 793 snd_hda_codec_update_cache(codec, pin, 0,
792 AC_VERB_SET_EAPD_BTLENABLE, 794 AC_VERB_SET_EAPD_BTLENABLE,
793 enable ? 0x02 : 0x00); 795 enable ? 0x02 : 0x00);
@@ -1938,17 +1940,7 @@ static int create_speaker_out_ctls(struct hda_codec *codec)
1938 * independent HP controls 1940 * independent HP controls
1939 */ 1941 */
1940 1942
1941/* update HP auto-mute state too */ 1943static void call_hp_automute(struct hda_codec *codec, struct hda_jack_tbl *jack);
1942static void update_hp_automute_hook(struct hda_codec *codec)
1943{
1944 struct hda_gen_spec *spec = codec->spec;
1945
1946 if (spec->hp_automute_hook)
1947 spec->hp_automute_hook(codec, NULL);
1948 else
1949 snd_hda_gen_hp_automute(codec, NULL);
1950}
1951
1952static int indep_hp_info(struct snd_kcontrol *kcontrol, 1944static int indep_hp_info(struct snd_kcontrol *kcontrol,
1953 struct snd_ctl_elem_info *uinfo) 1945 struct snd_ctl_elem_info *uinfo)
1954{ 1946{
@@ -2009,7 +2001,7 @@ static int indep_hp_put(struct snd_kcontrol *kcontrol,
2009 else 2001 else
2010 *dacp = spec->alt_dac_nid; 2002 *dacp = spec->alt_dac_nid;
2011 2003
2012 update_hp_automute_hook(codec); 2004 call_hp_automute(codec, NULL);
2013 ret = 1; 2005 ret = 1;
2014 } 2006 }
2015 unlock: 2007 unlock:
@@ -2305,7 +2297,7 @@ static void update_hp_mic(struct hda_codec *codec, int adc_mux, bool force)
2305 else 2297 else
2306 val = PIN_HP; 2298 val = PIN_HP;
2307 set_pin_target(codec, pin, val, true); 2299 set_pin_target(codec, pin, val, true);
2308 update_hp_automute_hook(codec); 2300 call_hp_automute(codec, NULL);
2309 } 2301 }
2310} 2302}
2311 2303
@@ -2714,7 +2706,7 @@ static int hp_mic_jack_mode_put(struct snd_kcontrol *kcontrol,
2714 val = snd_hda_get_default_vref(codec, nid); 2706 val = snd_hda_get_default_vref(codec, nid);
2715 } 2707 }
2716 snd_hda_set_pin_ctl_cache(codec, nid, val); 2708 snd_hda_set_pin_ctl_cache(codec, nid, val);
2717 update_hp_automute_hook(codec); 2709 call_hp_automute(codec, NULL);
2718 2710
2719 return 1; 2711 return 1;
2720} 2712}
@@ -3859,20 +3851,42 @@ void snd_hda_gen_mic_autoswitch(struct hda_codec *codec, struct hda_jack_tbl *ja
3859} 3851}
3860EXPORT_SYMBOL_HDA(snd_hda_gen_mic_autoswitch); 3852EXPORT_SYMBOL_HDA(snd_hda_gen_mic_autoswitch);
3861 3853
3862/* update jack retasking */ 3854/* call appropriate hooks */
3863static void update_automute_all(struct hda_codec *codec) 3855static void call_hp_automute(struct hda_codec *codec, struct hda_jack_tbl *jack)
3864{ 3856{
3865 struct hda_gen_spec *spec = codec->spec; 3857 struct hda_gen_spec *spec = codec->spec;
3858 if (spec->hp_automute_hook)
3859 spec->hp_automute_hook(codec, jack);
3860 else
3861 snd_hda_gen_hp_automute(codec, jack);
3862}
3866 3863
3867 update_hp_automute_hook(codec); 3864static void call_line_automute(struct hda_codec *codec,
3865 struct hda_jack_tbl *jack)
3866{
3867 struct hda_gen_spec *spec = codec->spec;
3868 if (spec->line_automute_hook) 3868 if (spec->line_automute_hook)
3869 spec->line_automute_hook(codec, NULL); 3869 spec->line_automute_hook(codec, jack);
3870 else 3870 else
3871 snd_hda_gen_line_automute(codec, NULL); 3871 snd_hda_gen_line_automute(codec, jack);
3872}
3873
3874static void call_mic_autoswitch(struct hda_codec *codec,
3875 struct hda_jack_tbl *jack)
3876{
3877 struct hda_gen_spec *spec = codec->spec;
3872 if (spec->mic_autoswitch_hook) 3878 if (spec->mic_autoswitch_hook)
3873 spec->mic_autoswitch_hook(codec, NULL); 3879 spec->mic_autoswitch_hook(codec, jack);
3874 else 3880 else
3875 snd_hda_gen_mic_autoswitch(codec, NULL); 3881 snd_hda_gen_mic_autoswitch(codec, jack);
3882}
3883
3884/* update jack retasking */
3885static void update_automute_all(struct hda_codec *codec)
3886{
3887 call_hp_automute(codec, NULL);
3888 call_line_automute(codec, NULL);
3889 call_mic_autoswitch(codec, NULL);
3876} 3890}
3877 3891
3878/* 3892/*
@@ -4009,9 +4023,7 @@ static int check_auto_mute_availability(struct hda_codec *codec)
4009 snd_printdd("hda-codec: Enable HP auto-muting on NID 0x%x\n", 4023 snd_printdd("hda-codec: Enable HP auto-muting on NID 0x%x\n",
4010 nid); 4024 nid);
4011 snd_hda_jack_detect_enable_callback(codec, nid, HDA_GEN_HP_EVENT, 4025 snd_hda_jack_detect_enable_callback(codec, nid, HDA_GEN_HP_EVENT,
4012 spec->hp_automute_hook ? 4026 call_hp_automute);
4013 spec->hp_automute_hook :
4014 snd_hda_gen_hp_automute);
4015 spec->detect_hp = 1; 4027 spec->detect_hp = 1;
4016 } 4028 }
4017 4029
@@ -4024,9 +4036,7 @@ static int check_auto_mute_availability(struct hda_codec *codec)
4024 snd_printdd("hda-codec: Enable Line-Out auto-muting on NID 0x%x\n", nid); 4036 snd_printdd("hda-codec: Enable Line-Out auto-muting on NID 0x%x\n", nid);
4025 snd_hda_jack_detect_enable_callback(codec, nid, 4037 snd_hda_jack_detect_enable_callback(codec, nid,
4026 HDA_GEN_FRONT_EVENT, 4038 HDA_GEN_FRONT_EVENT,
4027 spec->line_automute_hook ? 4039 call_line_automute);
4028 spec->line_automute_hook :
4029 snd_hda_gen_line_automute);
4030 spec->detect_lo = 1; 4040 spec->detect_lo = 1;
4031 } 4041 }
4032 spec->automute_lo_possible = spec->detect_hp; 4042 spec->automute_lo_possible = spec->detect_hp;
@@ -4068,9 +4078,7 @@ static bool auto_mic_check_imux(struct hda_codec *codec)
4068 snd_hda_jack_detect_enable_callback(codec, 4078 snd_hda_jack_detect_enable_callback(codec,
4069 spec->am_entry[i].pin, 4079 spec->am_entry[i].pin,
4070 HDA_GEN_MIC_EVENT, 4080 HDA_GEN_MIC_EVENT,
4071 spec->mic_autoswitch_hook ? 4081 call_mic_autoswitch);
4072 spec->mic_autoswitch_hook :
4073 snd_hda_gen_mic_autoswitch);
4074 return true; 4082 return true;
4075} 4083}
4076 4084
diff --git a/sound/pci/hda/hda_generic.h b/sound/pci/hda/hda_generic.h
index 54e665160379..76200314ee95 100644
--- a/sound/pci/hda/hda_generic.h
+++ b/sound/pci/hda/hda_generic.h
@@ -222,6 +222,7 @@ struct hda_gen_spec {
222 unsigned int multi_cap_vol:1; /* allow multiple capture xxx volumes */ 222 unsigned int multi_cap_vol:1; /* allow multiple capture xxx volumes */
223 unsigned int inv_dmic_split:1; /* inverted dmic w/a for conexant */ 223 unsigned int inv_dmic_split:1; /* inverted dmic w/a for conexant */
224 unsigned int own_eapd_ctl:1; /* set EAPD by own function */ 224 unsigned int own_eapd_ctl:1; /* set EAPD by own function */
225 unsigned int keep_eapd_on:1; /* don't turn off EAPD automatically */
225 unsigned int vmaster_mute_enum:1; /* add vmaster mute mode enum */ 226 unsigned int vmaster_mute_enum:1; /* add vmaster mute mode enum */
226 unsigned int indep_hp:1; /* independent HP supported */ 227 unsigned int indep_hp:1; /* independent HP supported */
227 unsigned int prefer_hp_amp:1; /* enable HP amp for speaker if any */ 228 unsigned int prefer_hp_amp:1; /* enable HP amp for speaker if any */
diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
index bd8d46cca2b3..cccaf9c7a7bb 100644
--- a/sound/pci/hda/patch_cirrus.c
+++ b/sound/pci/hda/patch_cirrus.c
@@ -58,6 +58,7 @@ enum {
58 CS420X_GPIO_23, 58 CS420X_GPIO_23,
59 CS420X_MBP101, 59 CS420X_MBP101,
60 CS420X_MBP81, 60 CS420X_MBP81,
61 CS420X_MBA42,
61 CS420X_AUTO, 62 CS420X_AUTO,
62 /* aliases */ 63 /* aliases */
63 CS420X_IMAC27_122 = CS420X_GPIO_23, 64 CS420X_IMAC27_122 = CS420X_GPIO_23,
@@ -346,6 +347,7 @@ static const struct hda_model_fixup cs420x_models[] = {
346 { .id = CS420X_APPLE, .name = "apple" }, 347 { .id = CS420X_APPLE, .name = "apple" },
347 { .id = CS420X_MBP101, .name = "mbp101" }, 348 { .id = CS420X_MBP101, .name = "mbp101" },
348 { .id = CS420X_MBP81, .name = "mbp81" }, 349 { .id = CS420X_MBP81, .name = "mbp81" },
350 { .id = CS420X_MBA42, .name = "mba42" },
349 {} 351 {}
350}; 352};
351 353
@@ -361,6 +363,7 @@ static const struct snd_pci_quirk cs420x_fixup_tbl[] = {
361 SND_PCI_QUIRK(0x106b, 0x1c00, "MacBookPro 8,1", CS420X_MBP81), 363 SND_PCI_QUIRK(0x106b, 0x1c00, "MacBookPro 8,1", CS420X_MBP81),
362 SND_PCI_QUIRK(0x106b, 0x2000, "iMac 12,2", CS420X_IMAC27_122), 364 SND_PCI_QUIRK(0x106b, 0x2000, "iMac 12,2", CS420X_IMAC27_122),
363 SND_PCI_QUIRK(0x106b, 0x2800, "MacBookPro 10,1", CS420X_MBP101), 365 SND_PCI_QUIRK(0x106b, 0x2800, "MacBookPro 10,1", CS420X_MBP101),
366 SND_PCI_QUIRK(0x106b, 0x5b00, "MacBookAir 4,2", CS420X_MBA42),
364 SND_PCI_QUIRK_VENDOR(0x106b, "Apple", CS420X_APPLE), 367 SND_PCI_QUIRK_VENDOR(0x106b, "Apple", CS420X_APPLE),
365 {} /* terminator */ 368 {} /* terminator */
366}; 369};
@@ -414,6 +417,20 @@ static const struct hda_pintbl mbp101_pincfgs[] = {
414 {} /* terminator */ 417 {} /* terminator */
415}; 418};
416 419
420static const struct hda_pintbl mba42_pincfgs[] = {
421 { 0x09, 0x012b4030 }, /* HP */
422 { 0x0a, 0x400000f0 },
423 { 0x0b, 0x90100120 }, /* speaker */
424 { 0x0c, 0x400000f0 },
425 { 0x0d, 0x90a00110 }, /* mic */
426 { 0x0e, 0x400000f0 },
427 { 0x0f, 0x400000f0 },
428 { 0x10, 0x400000f0 },
429 { 0x12, 0x400000f0 },
430 { 0x15, 0x400000f0 },
431 {} /* terminator */
432};
433
417static void cs420x_fixup_gpio_13(struct hda_codec *codec, 434static void cs420x_fixup_gpio_13(struct hda_codec *codec,
418 const struct hda_fixup *fix, int action) 435 const struct hda_fixup *fix, int action)
419{ 436{
@@ -482,6 +499,12 @@ static const struct hda_fixup cs420x_fixups[] = {
482 .chained = true, 499 .chained = true,
483 .chain_id = CS420X_GPIO_13, 500 .chain_id = CS420X_GPIO_13,
484 }, 501 },
502 [CS420X_MBA42] = {
503 .type = HDA_FIXUP_PINS,
504 .v.pins = mba42_pincfgs,
505 .chained = true,
506 .chain_id = CS420X_GPIO_13,
507 },
485}; 508};
486 509
487static struct cs_spec *cs_alloc_spec(struct hda_codec *codec, int vendor_nid) 510static struct cs_spec *cs_alloc_spec(struct hda_codec *codec, int vendor_nid)
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 59d2e91a9ab6..403010c9e82e 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -3483,6 +3483,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
3483 SND_PCI_QUIRK(0x1028, 0x05ca, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE), 3483 SND_PCI_QUIRK(0x1028, 0x05ca, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
3484 SND_PCI_QUIRK(0x1028, 0x05cb, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE), 3484 SND_PCI_QUIRK(0x1028, 0x05cb, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
3485 SND_PCI_QUIRK(0x1028, 0x05de, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE), 3485 SND_PCI_QUIRK(0x1028, 0x05de, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
3486 SND_PCI_QUIRK(0x1028, 0x05e0, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
3486 SND_PCI_QUIRK(0x1028, 0x05e9, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), 3487 SND_PCI_QUIRK(0x1028, 0x05e9, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
3487 SND_PCI_QUIRK(0x1028, 0x05ea, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), 3488 SND_PCI_QUIRK(0x1028, 0x05ea, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
3488 SND_PCI_QUIRK(0x1028, 0x05eb, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), 3489 SND_PCI_QUIRK(0x1028, 0x05eb, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
@@ -3493,6 +3494,10 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
3493 SND_PCI_QUIRK(0x1028, 0x05f4, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), 3494 SND_PCI_QUIRK(0x1028, 0x05f4, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
3494 SND_PCI_QUIRK(0x1028, 0x05f5, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), 3495 SND_PCI_QUIRK(0x1028, 0x05f5, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
3495 SND_PCI_QUIRK(0x1028, 0x05f6, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), 3496 SND_PCI_QUIRK(0x1028, 0x05f6, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
3497 SND_PCI_QUIRK(0x1028, 0x05f8, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
3498 SND_PCI_QUIRK(0x1028, 0x0606, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
3499 SND_PCI_QUIRK(0x1028, 0x0608, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
3500 SND_PCI_QUIRK(0x1028, 0x0609, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
3496 SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2), 3501 SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
3497 SND_PCI_QUIRK(0x103c, 0x18e6, "HP", ALC269_FIXUP_HP_GPIO_LED), 3502 SND_PCI_QUIRK(0x103c, 0x18e6, "HP", ALC269_FIXUP_HP_GPIO_LED),
3498 SND_PCI_QUIRK(0x103c, 0x1973, "HP Pavilion", ALC269_FIXUP_HP_MUTE_LED_MIC1), 3503 SND_PCI_QUIRK(0x103c, 0x1973, "HP Pavilion", ALC269_FIXUP_HP_MUTE_LED_MIC1),
@@ -3530,6 +3535,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
3530 SND_PCI_QUIRK(0x17aa, 0x21fa, "Thinkpad X230", ALC269_FIXUP_LENOVO_DOCK), 3535 SND_PCI_QUIRK(0x17aa, 0x21fa, "Thinkpad X230", ALC269_FIXUP_LENOVO_DOCK),
3531 SND_PCI_QUIRK(0x17aa, 0x21f3, "Thinkpad T430", ALC269_FIXUP_LENOVO_DOCK), 3536 SND_PCI_QUIRK(0x17aa, 0x21f3, "Thinkpad T430", ALC269_FIXUP_LENOVO_DOCK),
3532 SND_PCI_QUIRK(0x17aa, 0x21fb, "Thinkpad T430s", ALC269_FIXUP_LENOVO_DOCK), 3537 SND_PCI_QUIRK(0x17aa, 0x21fb, "Thinkpad T430s", ALC269_FIXUP_LENOVO_DOCK),
3538 SND_PCI_QUIRK(0x17aa, 0x2208, "Thinkpad T431s", ALC269_FIXUP_LENOVO_DOCK),
3533 SND_PCI_QUIRK(0x17aa, 0x2203, "Thinkpad X230 Tablet", ALC269_FIXUP_LENOVO_DOCK), 3539 SND_PCI_QUIRK(0x17aa, 0x2203, "Thinkpad X230 Tablet", ALC269_FIXUP_LENOVO_DOCK),
3534 SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K), 3540 SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
3535 SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD), 3541 SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
@@ -3593,6 +3599,8 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
3593 {.id = ALC269_FIXUP_INV_DMIC, .name = "inv-dmic"}, 3599 {.id = ALC269_FIXUP_INV_DMIC, .name = "inv-dmic"},
3594 {.id = ALC269_FIXUP_LENOVO_DOCK, .name = "lenovo-dock"}, 3600 {.id = ALC269_FIXUP_LENOVO_DOCK, .name = "lenovo-dock"},
3595 {.id = ALC269_FIXUP_HP_GPIO_LED, .name = "hp-gpio-led"}, 3601 {.id = ALC269_FIXUP_HP_GPIO_LED, .name = "hp-gpio-led"},
3602 {.id = ALC269_FIXUP_DELL1_MIC_NO_PRESENCE, .name = "dell-headset-multi"},
3603 {.id = ALC269_FIXUP_DELL2_MIC_NO_PRESENCE, .name = "dell-headset-dock"},
3596 {} 3604 {}
3597}; 3605};
3598 3606
@@ -4272,6 +4280,7 @@ static const struct hda_model_fixup alc662_fixup_models[] = {
4272 {.id = ALC662_FIXUP_ASUS_MODE7, .name = "asus-mode7"}, 4280 {.id = ALC662_FIXUP_ASUS_MODE7, .name = "asus-mode7"},
4273 {.id = ALC662_FIXUP_ASUS_MODE8, .name = "asus-mode8"}, 4281 {.id = ALC662_FIXUP_ASUS_MODE8, .name = "asus-mode8"},
4274 {.id = ALC662_FIXUP_INV_DMIC, .name = "inv-dmic"}, 4282 {.id = ALC662_FIXUP_INV_DMIC, .name = "inv-dmic"},
4283 {.id = ALC668_FIXUP_DELL_MIC_NO_PRESENCE, .name = "dell-headset-multi"},
4275 {} 4284 {}
4276}; 4285};
4277 4286
diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c
index e0dadcf2030d..e5245544eb52 100644
--- a/sound/pci/hda/patch_via.c
+++ b/sound/pci/hda/patch_via.c
@@ -136,6 +136,7 @@ static struct via_spec *via_new_spec(struct hda_codec *codec)
136 spec->codec_type = VT1708S; 136 spec->codec_type = VT1708S;
137 spec->no_pin_power_ctl = 1; 137 spec->no_pin_power_ctl = 1;
138 spec->gen.indep_hp = 1; 138 spec->gen.indep_hp = 1;
139 spec->gen.keep_eapd_on = 1;
139 spec->gen.pcm_playback_hook = via_playback_pcm_hook; 140 spec->gen.pcm_playback_hook = via_playback_pcm_hook;
140 return spec; 141 return spec;
141} 142}
@@ -231,9 +232,14 @@ static void vt1708_update_hp_work(struct hda_codec *codec)
231 232
232static void set_widgets_power_state(struct hda_codec *codec) 233static void set_widgets_power_state(struct hda_codec *codec)
233{ 234{
235#if 0 /* FIXME: the assumed connections don't match always with the
236 * actual routes by the generic parser, so better to disable
237 * the control for safety.
238 */
234 struct via_spec *spec = codec->spec; 239 struct via_spec *spec = codec->spec;
235 if (spec->set_widgets_power_state) 240 if (spec->set_widgets_power_state)
236 spec->set_widgets_power_state(codec); 241 spec->set_widgets_power_state(codec);
242#endif
237} 243}
238 244
239static void update_power_state(struct hda_codec *codec, hda_nid_t nid, 245static void update_power_state(struct hda_codec *codec, hda_nid_t nid,
@@ -478,7 +484,9 @@ static int via_suspend(struct hda_codec *codec)
478 /* Fix pop noise on headphones */ 484 /* Fix pop noise on headphones */
479 int i; 485 int i;
480 for (i = 0; i < spec->gen.autocfg.hp_outs; i++) 486 for (i = 0; i < spec->gen.autocfg.hp_outs; i++)
481 snd_hda_set_pin_ctl(codec, spec->gen.autocfg.hp_pins[i], 0); 487 snd_hda_codec_write(codec, spec->gen.autocfg.hp_pins[i],
488 0, AC_VERB_SET_PIN_WIDGET_CONTROL,
489 0x00);
482 } 490 }
483 491
484 return 0; 492 return 0;
diff --git a/sound/pci/sis7019.c b/sound/pci/sis7019.c
index d59abe1682c5..748e82d4d257 100644
--- a/sound/pci/sis7019.c
+++ b/sound/pci/sis7019.c
@@ -1341,7 +1341,8 @@ static int sis_chip_create(struct snd_card *card,
1341 if (rc) 1341 if (rc)
1342 goto error_out; 1342 goto error_out;
1343 1343
1344 if (pci_set_dma_mask(pci, DMA_BIT_MASK(30)) < 0) { 1344 rc = pci_set_dma_mask(pci, DMA_BIT_MASK(30));
1345 if (rc < 0) {
1345 dev_err(&pci->dev, "architecture does not support 30-bit PCI busmaster DMA"); 1346 dev_err(&pci->dev, "architecture does not support 30-bit PCI busmaster DMA");
1346 goto error_out_enabled; 1347 goto error_out_enabled;
1347 } 1348 }
diff --git a/sound/soc/codecs/cs42l52.c b/sound/soc/codecs/cs42l52.c
index 0f6f481cec09..987f728718c5 100644
--- a/sound/soc/codecs/cs42l52.c
+++ b/sound/soc/codecs/cs42l52.c
@@ -86,7 +86,7 @@ static const struct reg_default cs42l52_reg_defaults[] = {
86 { CS42L52_BEEP_VOL, 0x00 }, /* r1D Beep Volume off Time */ 86 { CS42L52_BEEP_VOL, 0x00 }, /* r1D Beep Volume off Time */
87 { CS42L52_BEEP_TONE_CTL, 0x00 }, /* r1E Beep Tone Cfg. */ 87 { CS42L52_BEEP_TONE_CTL, 0x00 }, /* r1E Beep Tone Cfg. */
88 { CS42L52_TONE_CTL, 0x00 }, /* r1F Tone Ctl */ 88 { CS42L52_TONE_CTL, 0x00 }, /* r1F Tone Ctl */
89 { CS42L52_MASTERA_VOL, 0x88 }, /* r20 Master A Volume */ 89 { CS42L52_MASTERA_VOL, 0x00 }, /* r20 Master A Volume */
90 { CS42L52_MASTERB_VOL, 0x00 }, /* r21 Master B Volume */ 90 { CS42L52_MASTERB_VOL, 0x00 }, /* r21 Master B Volume */
91 { CS42L52_HPA_VOL, 0x00 }, /* r22 Headphone A Volume */ 91 { CS42L52_HPA_VOL, 0x00 }, /* r22 Headphone A Volume */
92 { CS42L52_HPB_VOL, 0x00 }, /* r23 Headphone B Volume */ 92 { CS42L52_HPB_VOL, 0x00 }, /* r23 Headphone B Volume */
@@ -193,6 +193,8 @@ static DECLARE_TLV_DB_SCALE(mic_tlv, 1600, 100, 0);
193 193
194static DECLARE_TLV_DB_SCALE(pga_tlv, -600, 50, 0); 194static DECLARE_TLV_DB_SCALE(pga_tlv, -600, 50, 0);
195 195
196static DECLARE_TLV_DB_SCALE(mix_tlv, -50, 50, 0);
197
196static const unsigned int limiter_tlv[] = { 198static const unsigned int limiter_tlv[] = {
197 TLV_DB_RANGE_HEAD(2), 199 TLV_DB_RANGE_HEAD(2),
198 0, 2, TLV_DB_SCALE_ITEM(-3000, 600, 0), 200 0, 2, TLV_DB_SCALE_ITEM(-3000, 600, 0),
@@ -225,7 +227,7 @@ static const char * const mic_bias_level_text[] = {
225}; 227};
226 228
227static const struct soc_enum mic_bias_level_enum = 229static const struct soc_enum mic_bias_level_enum =
228 SOC_ENUM_SINGLE(CS42L52_IFACE_CTL1, 0, 230 SOC_ENUM_SINGLE(CS42L52_IFACE_CTL2, 0,
229 ARRAY_SIZE(mic_bias_level_text), mic_bias_level_text); 231 ARRAY_SIZE(mic_bias_level_text), mic_bias_level_text);
230 232
231static const char * const cs42l52_mic_text[] = { "Single", "Differential" }; 233static const char * const cs42l52_mic_text[] = { "Single", "Differential" };
@@ -260,7 +262,7 @@ static const char * const hp_gain_num_text[] = {
260}; 262};
261 263
262static const struct soc_enum hp_gain_enum = 264static const struct soc_enum hp_gain_enum =
263 SOC_ENUM_SINGLE(CS42L52_PB_CTL1, 4, 265 SOC_ENUM_SINGLE(CS42L52_PB_CTL1, 5,
264 ARRAY_SIZE(hp_gain_num_text), hp_gain_num_text); 266 ARRAY_SIZE(hp_gain_num_text), hp_gain_num_text);
265 267
266static const char * const beep_pitch_text[] = { 268static const char * const beep_pitch_text[] = {
@@ -413,7 +415,7 @@ static const struct snd_kcontrol_new cs42l52_snd_controls[] = {
413 SOC_ENUM("Headphone Analog Gain", hp_gain_enum), 415 SOC_ENUM("Headphone Analog Gain", hp_gain_enum),
414 416
415 SOC_DOUBLE_R_SX_TLV("Speaker Volume", CS42L52_SPKA_VOL, 417 SOC_DOUBLE_R_SX_TLV("Speaker Volume", CS42L52_SPKA_VOL,
416 CS42L52_SPKB_VOL, 7, 0x1, 0xff, hl_tlv), 418 CS42L52_SPKB_VOL, 0, 0x1, 0xff, hl_tlv),
417 419
418 SOC_DOUBLE_R_SX_TLV("Bypass Volume", CS42L52_PASSTHRUA_VOL, 420 SOC_DOUBLE_R_SX_TLV("Bypass Volume", CS42L52_PASSTHRUA_VOL,
419 CS42L52_PASSTHRUB_VOL, 6, 0x18, 0x90, pga_tlv), 421 CS42L52_PASSTHRUB_VOL, 6, 0x18, 0x90, pga_tlv),
@@ -441,7 +443,7 @@ static const struct snd_kcontrol_new cs42l52_snd_controls[] = {
441 443
442 SOC_DOUBLE_R_SX_TLV("PCM Mixer Volume", 444 SOC_DOUBLE_R_SX_TLV("PCM Mixer Volume",
443 CS42L52_PCMA_MIXER_VOL, CS42L52_PCMB_MIXER_VOL, 445 CS42L52_PCMA_MIXER_VOL, CS42L52_PCMB_MIXER_VOL,
444 6, 0x7f, 0x19, hl_tlv), 446 0, 0x7f, 0x19, mix_tlv),
445 SOC_DOUBLE_R("PCM Mixer Switch", 447 SOC_DOUBLE_R("PCM Mixer Switch",
446 CS42L52_PCMA_MIXER_VOL, CS42L52_PCMB_MIXER_VOL, 7, 1, 1), 448 CS42L52_PCMA_MIXER_VOL, CS42L52_PCMB_MIXER_VOL, 7, 1, 1),
447 449
diff --git a/sound/soc/codecs/cs42l52.h b/sound/soc/codecs/cs42l52.h
index 60985c059071..4277012c4719 100644
--- a/sound/soc/codecs/cs42l52.h
+++ b/sound/soc/codecs/cs42l52.h
@@ -157,7 +157,7 @@
157#define CS42L52_PB_CTL1_INV_PCMA (1 << 2) 157#define CS42L52_PB_CTL1_INV_PCMA (1 << 2)
158#define CS42L52_PB_CTL1_MSTB_MUTE (1 << 1) 158#define CS42L52_PB_CTL1_MSTB_MUTE (1 << 1)
159#define CS42L52_PB_CTL1_MSTA_MUTE (1 << 0) 159#define CS42L52_PB_CTL1_MSTA_MUTE (1 << 0)
160#define CS42L52_PB_CTL1_MUTE_MASK 0xFFFD 160#define CS42L52_PB_CTL1_MUTE_MASK 0x03
161#define CS42L52_PB_CTL1_MUTE 3 161#define CS42L52_PB_CTL1_MUTE 3
162#define CS42L52_PB_CTL1_UNMUTE 0 162#define CS42L52_PB_CTL1_UNMUTE 0
163 163
diff --git a/sound/soc/codecs/max98090.c b/sound/soc/codecs/max98090.c
index ce0d36412c97..8d14a76c7249 100644
--- a/sound/soc/codecs/max98090.c
+++ b/sound/soc/codecs/max98090.c
@@ -2233,7 +2233,7 @@ static int max98090_probe(struct snd_soc_codec *codec)
2233 dev_dbg(codec->dev, "irq = %d\n", max98090->irq); 2233 dev_dbg(codec->dev, "irq = %d\n", max98090->irq);
2234 2234
2235 ret = request_threaded_irq(max98090->irq, NULL, 2235 ret = request_threaded_irq(max98090->irq, NULL,
2236 max98090_interrupt, IRQF_TRIGGER_FALLING, 2236 max98090_interrupt, IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
2237 "max98090_interrupt", codec); 2237 "max98090_interrupt", codec);
2238 if (ret < 0) { 2238 if (ret < 0) {
2239 dev_err(codec->dev, "request_irq failed: %d\n", 2239 dev_err(codec->dev, "request_irq failed: %d\n",
diff --git a/sound/soc/codecs/tlv320aic3x.c b/sound/soc/codecs/tlv320aic3x.c
index 65d09d60b7c6..1514bf845e4b 100644
--- a/sound/soc/codecs/tlv320aic3x.c
+++ b/sound/soc/codecs/tlv320aic3x.c
@@ -187,14 +187,14 @@ static int snd_soc_dapm_put_volsw_aic3x(struct snd_kcontrol *kcontrol,
187 187
188 break; 188 break;
189 } 189 }
190
191 if (found)
192 snd_soc_dapm_sync(widget->dapm);
193 } 190 }
194 191
195 ret = snd_soc_update_bits(widget->codec, reg, val_mask, val);
196
197 mutex_unlock(&widget->codec->mutex); 192 mutex_unlock(&widget->codec->mutex);
193
194 if (found)
195 snd_soc_dapm_sync(widget->dapm);
196
197 ret = snd_soc_update_bits_locked(widget->codec, reg, val_mask, val);
198 return ret; 198 return ret;
199} 199}
200 200
diff --git a/sound/soc/codecs/wm5102.c b/sound/soc/codecs/wm5102.c
index e895d3939eef..100fdadda56a 100644
--- a/sound/soc/codecs/wm5102.c
+++ b/sound/soc/codecs/wm5102.c
@@ -1120,7 +1120,8 @@ SND_SOC_DAPM_AIF_IN("AIF3RX2", NULL, 0,
1120ARIZONA_DSP_WIDGETS(DSP1, "DSP1"), 1120ARIZONA_DSP_WIDGETS(DSP1, "DSP1"),
1121 1121
1122SND_SOC_DAPM_VALUE_MUX("AEC Loopback", ARIZONA_DAC_AEC_CONTROL_1, 1122SND_SOC_DAPM_VALUE_MUX("AEC Loopback", ARIZONA_DAC_AEC_CONTROL_1,
1123 ARIZONA_AEC_LOOPBACK_ENA, 0, &wm5102_aec_loopback_mux), 1123 ARIZONA_AEC_LOOPBACK_ENA_SHIFT, 0,
1124 &wm5102_aec_loopback_mux),
1124 1125
1125SND_SOC_DAPM_PGA_E("OUT1L", SND_SOC_NOPM, 1126SND_SOC_DAPM_PGA_E("OUT1L", SND_SOC_NOPM,
1126 ARIZONA_OUT1L_ENA_SHIFT, 0, NULL, 0, arizona_hp_ev, 1127 ARIZONA_OUT1L_ENA_SHIFT, 0, NULL, 0, arizona_hp_ev,
diff --git a/sound/soc/codecs/wm5110.c b/sound/soc/codecs/wm5110.c
index 731884e04776..88ad7db52dde 100644
--- a/sound/soc/codecs/wm5110.c
+++ b/sound/soc/codecs/wm5110.c
@@ -190,7 +190,7 @@ ARIZONA_MIXER_CONTROLS("DSP2R", ARIZONA_DSP2RMIX_INPUT_1_SOURCE),
190ARIZONA_MIXER_CONTROLS("DSP3L", ARIZONA_DSP3LMIX_INPUT_1_SOURCE), 190ARIZONA_MIXER_CONTROLS("DSP3L", ARIZONA_DSP3LMIX_INPUT_1_SOURCE),
191ARIZONA_MIXER_CONTROLS("DSP3R", ARIZONA_DSP3RMIX_INPUT_1_SOURCE), 191ARIZONA_MIXER_CONTROLS("DSP3R", ARIZONA_DSP3RMIX_INPUT_1_SOURCE),
192ARIZONA_MIXER_CONTROLS("DSP4L", ARIZONA_DSP4LMIX_INPUT_1_SOURCE), 192ARIZONA_MIXER_CONTROLS("DSP4L", ARIZONA_DSP4LMIX_INPUT_1_SOURCE),
193ARIZONA_MIXER_CONTROLS("DSP5R", ARIZONA_DSP4RMIX_INPUT_1_SOURCE), 193ARIZONA_MIXER_CONTROLS("DSP4R", ARIZONA_DSP4RMIX_INPUT_1_SOURCE),
194 194
195ARIZONA_MIXER_CONTROLS("Mic", ARIZONA_MICMIX_INPUT_1_SOURCE), 195ARIZONA_MIXER_CONTROLS("Mic", ARIZONA_MICMIX_INPUT_1_SOURCE),
196ARIZONA_MIXER_CONTROLS("Noise", ARIZONA_NOISEMIX_INPUT_1_SOURCE), 196ARIZONA_MIXER_CONTROLS("Noise", ARIZONA_NOISEMIX_INPUT_1_SOURCE),
@@ -503,7 +503,8 @@ SND_SOC_DAPM_PGA("ASRC2R", ARIZONA_ASRC_ENABLE, ARIZONA_ASRC2R_ENA_SHIFT, 0,
503 NULL, 0), 503 NULL, 0),
504 504
505SND_SOC_DAPM_VALUE_MUX("AEC Loopback", ARIZONA_DAC_AEC_CONTROL_1, 505SND_SOC_DAPM_VALUE_MUX("AEC Loopback", ARIZONA_DAC_AEC_CONTROL_1,
506 ARIZONA_AEC_LOOPBACK_ENA, 0, &wm5110_aec_loopback_mux), 506 ARIZONA_AEC_LOOPBACK_ENA_SHIFT, 0,
507 &wm5110_aec_loopback_mux),
507 508
508SND_SOC_DAPM_AIF_OUT("AIF1TX1", NULL, 0, 509SND_SOC_DAPM_AIF_OUT("AIF1TX1", NULL, 0,
509 ARIZONA_AIF1_TX_ENABLES, ARIZONA_AIF1TX1_ENA_SHIFT, 0), 510 ARIZONA_AIF1_TX_ENABLES, ARIZONA_AIF1TX1_ENA_SHIFT, 0),
@@ -976,6 +977,8 @@ static int wm5110_codec_probe(struct snd_soc_codec *codec)
976 if (ret != 0) 977 if (ret != 0)
977 return ret; 978 return ret;
978 979
980 arizona_init_spk(codec);
981
979 snd_soc_dapm_disable_pin(&codec->dapm, "HAPTICS"); 982 snd_soc_dapm_disable_pin(&codec->dapm, "HAPTICS");
980 983
981 priv->core.arizona->dapm = &codec->dapm; 984 priv->core.arizona->dapm = &codec->dapm;
diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
index 1eb152cb1097..29e95f93d482 100644
--- a/sound/soc/codecs/wm8994.c
+++ b/sound/soc/codecs/wm8994.c
@@ -383,6 +383,8 @@ static int wm8994_get_drc_enum(struct snd_kcontrol *kcontrol,
383 struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec); 383 struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
384 int drc = wm8994_get_drc(kcontrol->id.name); 384 int drc = wm8994_get_drc(kcontrol->id.name);
385 385
386 if (drc < 0)
387 return drc;
386 ucontrol->value.enumerated.item[0] = wm8994->drc_cfg[drc]; 388 ucontrol->value.enumerated.item[0] = wm8994->drc_cfg[drc];
387 389
388 return 0; 390 return 0;
@@ -488,6 +490,9 @@ static int wm8994_get_retune_mobile_enum(struct snd_kcontrol *kcontrol,
488 struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec); 490 struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
489 int block = wm8994_get_retune_mobile_block(kcontrol->id.name); 491 int block = wm8994_get_retune_mobile_block(kcontrol->id.name);
490 492
493 if (block < 0)
494 return block;
495
491 ucontrol->value.enumerated.item[0] = wm8994->retune_mobile_cfg[block]; 496 ucontrol->value.enumerated.item[0] = wm8994->retune_mobile_cfg[block];
492 497
493 return 0; 498 return 0;
@@ -1031,7 +1036,7 @@ static int aif1clk_ev(struct snd_soc_dapm_widget *w,
1031{ 1036{
1032 struct snd_soc_codec *codec = w->codec; 1037 struct snd_soc_codec *codec = w->codec;
1033 struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec); 1038 struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
1034 struct wm8994 *control = codec->control_data; 1039 struct wm8994 *control = wm8994->wm8994;
1035 int mask = WM8994_AIF1DAC1L_ENA | WM8994_AIF1DAC1R_ENA; 1040 int mask = WM8994_AIF1DAC1L_ENA | WM8994_AIF1DAC1R_ENA;
1036 int i; 1041 int i;
1037 int dac; 1042 int dac;
@@ -3831,8 +3836,14 @@ static irqreturn_t wm8958_mic_irq(int irq, void *data)
3831 ret); 3836 ret);
3832 } else if (!(ret & WM1811_JACKDET_LVL)) { 3837 } else if (!(ret & WM1811_JACKDET_LVL)) {
3833 dev_dbg(codec->dev, "Ignoring removed jack\n"); 3838 dev_dbg(codec->dev, "Ignoring removed jack\n");
3834 return IRQ_HANDLED; 3839 goto out;
3835 } 3840 }
3841 } else if (!(reg & WM8958_MICD_STS)) {
3842 snd_soc_jack_report(wm8994->micdet[0].jack, 0,
3843 SND_JACK_MECHANICAL | SND_JACK_HEADSET |
3844 wm8994->btn_mask);
3845 wm8994->mic_detecting = true;
3846 goto out;
3836 } 3847 }
3837 3848
3838 if (wm8994->mic_detecting) 3849 if (wm8994->mic_detecting)
diff --git a/sound/soc/davinci/davinci-mcasp.c b/sound/soc/davinci/davinci-mcasp.c
index 56ecfc72f2e9..81490febac6d 100644
--- a/sound/soc/davinci/davinci-mcasp.c
+++ b/sound/soc/davinci/davinci-mcasp.c
@@ -631,7 +631,8 @@ static int davinci_config_channel_size(struct davinci_audio_dev *dev,
631 int word_length) 631 int word_length)
632{ 632{
633 u32 fmt; 633 u32 fmt;
634 u32 rotate = (word_length / 4) & 0x7; 634 u32 tx_rotate = (word_length / 4) & 0x7;
635 u32 rx_rotate = (32 - word_length) / 4;
635 u32 mask = (1ULL << word_length) - 1; 636 u32 mask = (1ULL << word_length) - 1;
636 637
637 /* 638 /*
@@ -655,9 +656,9 @@ static int davinci_config_channel_size(struct davinci_audio_dev *dev,
655 mcasp_mod_bits(dev->base + DAVINCI_MCASP_TXFMT_REG, 656 mcasp_mod_bits(dev->base + DAVINCI_MCASP_TXFMT_REG,
656 TXSSZ(fmt), TXSSZ(0x0F)); 657 TXSSZ(fmt), TXSSZ(0x0F));
657 mcasp_mod_bits(dev->base + DAVINCI_MCASP_TXFMT_REG, 658 mcasp_mod_bits(dev->base + DAVINCI_MCASP_TXFMT_REG,
658 TXROT(rotate), TXROT(7)); 659 TXROT(tx_rotate), TXROT(7));
659 mcasp_mod_bits(dev->base + DAVINCI_MCASP_RXFMT_REG, 660 mcasp_mod_bits(dev->base + DAVINCI_MCASP_RXFMT_REG,
660 RXROT(rotate), RXROT(7)); 661 RXROT(rx_rotate), RXROT(7));
661 mcasp_set_reg(dev->base + DAVINCI_MCASP_RXMASK_REG, 662 mcasp_set_reg(dev->base + DAVINCI_MCASP_RXMASK_REG,
662 mask); 663 mask);
663 } 664 }
diff --git a/sound/soc/soc-compress.c b/sound/soc/soc-compress.c
index 3853f7eb3f28..06a8000aa07b 100644
--- a/sound/soc/soc-compress.c
+++ b/sound/soc/soc-compress.c
@@ -220,8 +220,12 @@ static int soc_compr_set_params(struct snd_compr_stream *cstream,
220 goto err; 220 goto err;
221 } 221 }
222 222
223 snd_soc_dapm_stream_event(rtd, SNDRV_PCM_STREAM_PLAYBACK, 223 if (cstream->direction == SND_COMPRESS_PLAYBACK)
224 SND_SOC_DAPM_STREAM_START); 224 snd_soc_dapm_stream_event(rtd, SNDRV_PCM_STREAM_PLAYBACK,
225 SND_SOC_DAPM_STREAM_START);
226 else
227 snd_soc_dapm_stream_event(rtd, SNDRV_PCM_STREAM_CAPTURE,
228 SND_SOC_DAPM_STREAM_START);
225 229
226 /* cancel any delayed stream shutdown that is pending */ 230 /* cancel any delayed stream shutdown that is pending */
227 rtd->pop_wait = 0; 231 rtd->pop_wait = 0;
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index a80c883bb8be..c7051c457b75 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -55,7 +55,8 @@ static int dapm_up_seq[] = {
55 [snd_soc_dapm_clock_supply] = 1, 55 [snd_soc_dapm_clock_supply] = 1,
56 [snd_soc_dapm_micbias] = 2, 56 [snd_soc_dapm_micbias] = 2,
57 [snd_soc_dapm_dai_link] = 2, 57 [snd_soc_dapm_dai_link] = 2,
58 [snd_soc_dapm_dai] = 3, 58 [snd_soc_dapm_dai_in] = 3,
59 [snd_soc_dapm_dai_out] = 3,
59 [snd_soc_dapm_aif_in] = 3, 60 [snd_soc_dapm_aif_in] = 3,
60 [snd_soc_dapm_aif_out] = 3, 61 [snd_soc_dapm_aif_out] = 3,
61 [snd_soc_dapm_mic] = 4, 62 [snd_soc_dapm_mic] = 4,
@@ -92,7 +93,8 @@ static int dapm_down_seq[] = {
92 [snd_soc_dapm_value_mux] = 9, 93 [snd_soc_dapm_value_mux] = 9,
93 [snd_soc_dapm_aif_in] = 10, 94 [snd_soc_dapm_aif_in] = 10,
94 [snd_soc_dapm_aif_out] = 10, 95 [snd_soc_dapm_aif_out] = 10,
95 [snd_soc_dapm_dai] = 10, 96 [snd_soc_dapm_dai_in] = 10,
97 [snd_soc_dapm_dai_out] = 10,
96 [snd_soc_dapm_dai_link] = 11, 98 [snd_soc_dapm_dai_link] = 11,
97 [snd_soc_dapm_clock_supply] = 12, 99 [snd_soc_dapm_clock_supply] = 12,
98 [snd_soc_dapm_regulator_supply] = 12, 100 [snd_soc_dapm_regulator_supply] = 12,
@@ -419,7 +421,8 @@ static void dapm_set_path_status(struct snd_soc_dapm_widget *w,
419 case snd_soc_dapm_clock_supply: 421 case snd_soc_dapm_clock_supply:
420 case snd_soc_dapm_aif_in: 422 case snd_soc_dapm_aif_in:
421 case snd_soc_dapm_aif_out: 423 case snd_soc_dapm_aif_out:
422 case snd_soc_dapm_dai: 424 case snd_soc_dapm_dai_in:
425 case snd_soc_dapm_dai_out:
423 case snd_soc_dapm_hp: 426 case snd_soc_dapm_hp:
424 case snd_soc_dapm_mic: 427 case snd_soc_dapm_mic:
425 case snd_soc_dapm_spk: 428 case snd_soc_dapm_spk:
@@ -820,7 +823,7 @@ static int is_connected_output_ep(struct snd_soc_dapm_widget *widget,
820 switch (widget->id) { 823 switch (widget->id) {
821 case snd_soc_dapm_adc: 824 case snd_soc_dapm_adc:
822 case snd_soc_dapm_aif_out: 825 case snd_soc_dapm_aif_out:
823 case snd_soc_dapm_dai: 826 case snd_soc_dapm_dai_out:
824 if (widget->active) { 827 if (widget->active) {
825 widget->outputs = snd_soc_dapm_suspend_check(widget); 828 widget->outputs = snd_soc_dapm_suspend_check(widget);
826 return widget->outputs; 829 return widget->outputs;
@@ -916,7 +919,7 @@ static int is_connected_input_ep(struct snd_soc_dapm_widget *widget,
916 switch (widget->id) { 919 switch (widget->id) {
917 case snd_soc_dapm_dac: 920 case snd_soc_dapm_dac:
918 case snd_soc_dapm_aif_in: 921 case snd_soc_dapm_aif_in:
919 case snd_soc_dapm_dai: 922 case snd_soc_dapm_dai_in:
920 if (widget->active) { 923 if (widget->active) {
921 widget->inputs = snd_soc_dapm_suspend_check(widget); 924 widget->inputs = snd_soc_dapm_suspend_check(widget);
922 return widget->inputs; 925 return widget->inputs;
@@ -1135,16 +1138,6 @@ static int dapm_generic_check_power(struct snd_soc_dapm_widget *w)
1135 return out != 0 && in != 0; 1138 return out != 0 && in != 0;
1136} 1139}
1137 1140
1138static int dapm_dai_check_power(struct snd_soc_dapm_widget *w)
1139{
1140 DAPM_UPDATE_STAT(w, power_checks);
1141
1142 if (w->active)
1143 return w->active;
1144
1145 return dapm_generic_check_power(w);
1146}
1147
1148/* Check to see if an ADC has power */ 1141/* Check to see if an ADC has power */
1149static int dapm_adc_check_power(struct snd_soc_dapm_widget *w) 1142static int dapm_adc_check_power(struct snd_soc_dapm_widget *w)
1150{ 1143{
@@ -2318,7 +2311,8 @@ static int snd_soc_dapm_add_route(struct snd_soc_dapm_context *dapm,
2318 case snd_soc_dapm_clock_supply: 2311 case snd_soc_dapm_clock_supply:
2319 case snd_soc_dapm_aif_in: 2312 case snd_soc_dapm_aif_in:
2320 case snd_soc_dapm_aif_out: 2313 case snd_soc_dapm_aif_out:
2321 case snd_soc_dapm_dai: 2314 case snd_soc_dapm_dai_in:
2315 case snd_soc_dapm_dai_out:
2322 case snd_soc_dapm_dai_link: 2316 case snd_soc_dapm_dai_link:
2323 list_add(&path->list, &dapm->card->paths); 2317 list_add(&path->list, &dapm->card->paths);
2324 list_add(&path->list_sink, &wsink->sources); 2318 list_add(&path->list_sink, &wsink->sources);
@@ -3129,10 +3123,12 @@ snd_soc_dapm_new_control(struct snd_soc_dapm_context *dapm,
3129 break; 3123 break;
3130 case snd_soc_dapm_adc: 3124 case snd_soc_dapm_adc:
3131 case snd_soc_dapm_aif_out: 3125 case snd_soc_dapm_aif_out:
3126 case snd_soc_dapm_dai_out:
3132 w->power_check = dapm_adc_check_power; 3127 w->power_check = dapm_adc_check_power;
3133 break; 3128 break;
3134 case snd_soc_dapm_dac: 3129 case snd_soc_dapm_dac:
3135 case snd_soc_dapm_aif_in: 3130 case snd_soc_dapm_aif_in:
3131 case snd_soc_dapm_dai_in:
3136 w->power_check = dapm_dac_check_power; 3132 w->power_check = dapm_dac_check_power;
3137 break; 3133 break;
3138 case snd_soc_dapm_pga: 3134 case snd_soc_dapm_pga:
@@ -3152,9 +3148,6 @@ snd_soc_dapm_new_control(struct snd_soc_dapm_context *dapm,
3152 case snd_soc_dapm_clock_supply: 3148 case snd_soc_dapm_clock_supply:
3153 w->power_check = dapm_supply_check_power; 3149 w->power_check = dapm_supply_check_power;
3154 break; 3150 break;
3155 case snd_soc_dapm_dai:
3156 w->power_check = dapm_dai_check_power;
3157 break;
3158 default: 3151 default:
3159 w->power_check = dapm_always_on_check_power; 3152 w->power_check = dapm_always_on_check_power;
3160 break; 3153 break;
@@ -3375,7 +3368,7 @@ int snd_soc_dapm_new_dai_widgets(struct snd_soc_dapm_context *dapm,
3375 template.reg = SND_SOC_NOPM; 3368 template.reg = SND_SOC_NOPM;
3376 3369
3377 if (dai->driver->playback.stream_name) { 3370 if (dai->driver->playback.stream_name) {
3378 template.id = snd_soc_dapm_dai; 3371 template.id = snd_soc_dapm_dai_in;
3379 template.name = dai->driver->playback.stream_name; 3372 template.name = dai->driver->playback.stream_name;
3380 template.sname = dai->driver->playback.stream_name; 3373 template.sname = dai->driver->playback.stream_name;
3381 3374
@@ -3393,7 +3386,7 @@ int snd_soc_dapm_new_dai_widgets(struct snd_soc_dapm_context *dapm,
3393 } 3386 }
3394 3387
3395 if (dai->driver->capture.stream_name) { 3388 if (dai->driver->capture.stream_name) {
3396 template.id = snd_soc_dapm_dai; 3389 template.id = snd_soc_dapm_dai_out;
3397 template.name = dai->driver->capture.stream_name; 3390 template.name = dai->driver->capture.stream_name;
3398 template.sname = dai->driver->capture.stream_name; 3391 template.sname = dai->driver->capture.stream_name;
3399 3392
@@ -3423,8 +3416,13 @@ int snd_soc_dapm_link_dai_widgets(struct snd_soc_card *card)
3423 3416
3424 /* For each DAI widget... */ 3417 /* For each DAI widget... */
3425 list_for_each_entry(dai_w, &card->widgets, list) { 3418 list_for_each_entry(dai_w, &card->widgets, list) {
3426 if (dai_w->id != snd_soc_dapm_dai) 3419 switch (dai_w->id) {
3420 case snd_soc_dapm_dai_in:
3421 case snd_soc_dapm_dai_out:
3422 break;
3423 default:
3427 continue; 3424 continue;
3425 }
3428 3426
3429 dai = dai_w->priv; 3427 dai = dai_w->priv;
3430 3428
@@ -3433,8 +3431,13 @@ int snd_soc_dapm_link_dai_widgets(struct snd_soc_card *card)
3433 if (w->dapm != dai_w->dapm) 3431 if (w->dapm != dai_w->dapm)
3434 continue; 3432 continue;
3435 3433
3436 if (w->id == snd_soc_dapm_dai) 3434 switch (w->id) {
3435 case snd_soc_dapm_dai_in:
3436 case snd_soc_dapm_dai_out:
3437 continue; 3437 continue;
3438 default:
3439 break;
3440 }
3438 3441
3439 if (!w->sname) 3442 if (!w->sname)
3440 continue; 3443 continue;
diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
index 73bb8eefa491..ccb6be4d658d 100644
--- a/sound/soc/soc-pcm.c
+++ b/sound/soc/soc-pcm.c
@@ -928,8 +928,13 @@ static int dpcm_add_paths(struct snd_soc_pcm_runtime *fe, int stream,
928 /* Create any new FE <--> BE connections */ 928 /* Create any new FE <--> BE connections */
929 for (i = 0; i < list->num_widgets; i++) { 929 for (i = 0; i < list->num_widgets; i++) {
930 930
931 if (list->widgets[i]->id != snd_soc_dapm_dai) 931 switch (list->widgets[i]->id) {
932 case snd_soc_dapm_dai_in:
933 case snd_soc_dapm_dai_out:
934 break;
935 default:
932 continue; 936 continue;
937 }
933 938
934 /* is there a valid BE rtd for this widget */ 939 /* is there a valid BE rtd for this widget */
935 be = dpcm_get_be(card, list->widgets[i], stream); 940 be = dpcm_get_be(card, list->widgets[i], stream);
@@ -2011,9 +2016,11 @@ int soc_new_pcm(struct snd_soc_pcm_runtime *rtd, int num)
2011 if (cpu_dai->driver->capture.channels_min) 2016 if (cpu_dai->driver->capture.channels_min)
2012 capture = 1; 2017 capture = 1;
2013 } else { 2018 } else {
2014 if (codec_dai->driver->playback.channels_min) 2019 if (codec_dai->driver->playback.channels_min &&
2020 cpu_dai->driver->playback.channels_min)
2015 playback = 1; 2021 playback = 1;
2016 if (codec_dai->driver->capture.channels_min) 2022 if (codec_dai->driver->capture.channels_min &&
2023 cpu_dai->driver->capture.channels_min)
2017 capture = 1; 2024 capture = 1;
2018 } 2025 }
2019 2026
diff --git a/sound/usb/6fire/firmware.c b/sound/usb/6fire/firmware.c
index a1d9b0792a1e..b9defcdeb7ef 100644
--- a/sound/usb/6fire/firmware.c
+++ b/sound/usb/6fire/firmware.c
@@ -42,8 +42,8 @@ static const u8 ep_w_max_packet_size[] = {
42 0x94, 0x01, 0x5c, 0x02 /* alt 3: 404 EP2 and 604 EP6 (25 fpp) */ 42 0x94, 0x01, 0x5c, 0x02 /* alt 3: 404 EP2 and 604 EP6 (25 fpp) */
43}; 43};
44 44
45static const u8 known_fw_versions[][4] = { 45static const u8 known_fw_versions[][2] = {
46 { 0x03, 0x01, 0x0b, 0x00 } 46 { 0x03, 0x01 }
47}; 47};
48 48
49struct ihex_record { 49struct ihex_record {
@@ -343,7 +343,7 @@ static int usb6fire_fw_check(u8 *version)
343 int i; 343 int i;
344 344
345 for (i = 0; i < ARRAY_SIZE(known_fw_versions); i++) 345 for (i = 0; i < ARRAY_SIZE(known_fw_versions); i++)
346 if (!memcmp(version, known_fw_versions + i, 4)) 346 if (!memcmp(version, known_fw_versions + i, 2))
347 return 0; 347 return 0;
348 348
349 snd_printk(KERN_ERR PREFIX "invalid fimware version in device: %*ph. " 349 snd_printk(KERN_ERR PREFIX "invalid fimware version in device: %*ph. "
diff --git a/sound/usb/card.c b/sound/usb/card.c
index 1a033177b83f..64952e2d3ed1 100644
--- a/sound/usb/card.c
+++ b/sound/usb/card.c
@@ -147,14 +147,32 @@ static int snd_usb_create_stream(struct snd_usb_audio *chip, int ctrlif, int int
147 return -EINVAL; 147 return -EINVAL;
148 } 148 }
149 149
150 alts = &iface->altsetting[0];
151 altsd = get_iface_desc(alts);
152
153 /*
154 * Android with both accessory and audio interfaces enabled gets the
155 * interface numbers wrong.
156 */
157 if ((chip->usb_id == USB_ID(0x18d1, 0x2d04) ||
158 chip->usb_id == USB_ID(0x18d1, 0x2d05)) &&
159 interface == 0 &&
160 altsd->bInterfaceClass == USB_CLASS_VENDOR_SPEC &&
161 altsd->bInterfaceSubClass == USB_SUBCLASS_VENDOR_SPEC) {
162 interface = 2;
163 iface = usb_ifnum_to_if(dev, interface);
164 if (!iface)
165 return -EINVAL;
166 alts = &iface->altsetting[0];
167 altsd = get_iface_desc(alts);
168 }
169
150 if (usb_interface_claimed(iface)) { 170 if (usb_interface_claimed(iface)) {
151 snd_printdd(KERN_INFO "%d:%d:%d: skipping, already claimed\n", 171 snd_printdd(KERN_INFO "%d:%d:%d: skipping, already claimed\n",
152 dev->devnum, ctrlif, interface); 172 dev->devnum, ctrlif, interface);
153 return -EINVAL; 173 return -EINVAL;
154 } 174 }
155 175
156 alts = &iface->altsetting[0];
157 altsd = get_iface_desc(alts);
158 if ((altsd->bInterfaceClass == USB_CLASS_AUDIO || 176 if ((altsd->bInterfaceClass == USB_CLASS_AUDIO ||
159 altsd->bInterfaceClass == USB_CLASS_VENDOR_SPEC) && 177 altsd->bInterfaceClass == USB_CLASS_VENDOR_SPEC) &&
160 altsd->bInterfaceSubClass == USB_SUBCLASS_MIDISTREAMING) { 178 altsd->bInterfaceSubClass == USB_SUBCLASS_MIDISTREAMING) {
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index ca4739c3f650..d5438083fd6a 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -885,7 +885,9 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval,
885 885
886 case USB_ID(0x046d, 0x0808): 886 case USB_ID(0x046d, 0x0808):
887 case USB_ID(0x046d, 0x0809): 887 case USB_ID(0x046d, 0x0809):
888 case USB_ID(0x046d, 0x081b): /* HD Webcam c310 */
888 case USB_ID(0x046d, 0x081d): /* HD Webcam c510 */ 889 case USB_ID(0x046d, 0x081d): /* HD Webcam c510 */
890 case USB_ID(0x046d, 0x0825): /* HD Webcam c270 */
889 case USB_ID(0x046d, 0x0991): 891 case USB_ID(0x046d, 0x0991):
890 /* Most audio usb devices lie about volume resolution. 892 /* Most audio usb devices lie about volume resolution.
891 * Most Logitech webcams have res = 384. 893 * Most Logitech webcams have res = 384.
diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
index 7f1722f82c89..8b75bcf136f6 100644
--- a/sound/usb/quirks-table.h
+++ b/sound/usb/quirks-table.h
@@ -215,7 +215,13 @@
215 .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL 215 .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL
216}, 216},
217{ 217{
218 USB_DEVICE(0x046d, 0x0990), 218 .match_flags = USB_DEVICE_ID_MATCH_DEVICE |
219 USB_DEVICE_ID_MATCH_INT_CLASS |
220 USB_DEVICE_ID_MATCH_INT_SUBCLASS,
221 .idVendor = 0x046d,
222 .idProduct = 0x0990,
223 .bInterfaceClass = USB_CLASS_AUDIO,
224 .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL,
219 .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) { 225 .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
220 .vendor_name = "Logitech, Inc.", 226 .vendor_name = "Logitech, Inc.",
221 .product_name = "QuickCam Pro 9000", 227 .product_name = "QuickCam Pro 9000",
@@ -1792,7 +1798,11 @@ YAMAHA_DEVICE(0x7010, "UB99"),
1792 USB_DEVICE_VENDOR_SPEC(0x0582, 0x0108), 1798 USB_DEVICE_VENDOR_SPEC(0x0582, 0x0108),
1793 .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) { 1799 .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
1794 .ifnum = 0, 1800 .ifnum = 0,
1795 .type = QUIRK_MIDI_STANDARD_INTERFACE 1801 .type = QUIRK_MIDI_FIXED_ENDPOINT,
1802 .data = & (const struct snd_usb_midi_endpoint_info) {
1803 .out_cables = 0x0007,
1804 .in_cables = 0x0007
1805 }
1796 } 1806 }
1797}, 1807},
1798{ 1808{
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
index 9e9d34871195..fe702076ca46 100644
--- a/tools/power/x86/turbostat/turbostat.c
+++ b/tools/power/x86/turbostat/turbostat.c
@@ -2191,7 +2191,7 @@ int initialize_counters(int cpu_id)
2191 2191
2192void allocate_output_buffer() 2192void allocate_output_buffer()
2193{ 2193{
2194 output_buffer = calloc(1, (1 + topo.num_cpus) * 128); 2194 output_buffer = calloc(1, (1 + topo.num_cpus) * 256);
2195 outp = output_buffer; 2195 outp = output_buffer;
2196 if (outp == NULL) { 2196 if (outp == NULL) {
2197 perror("calloc"); 2197 perror("calloc");